From 9d5e59d503fdfe8bd2c9fe0f9e965df318a01267 Mon Sep 17 00:00:00 2001 From: Misty Stanley-Jones Date: Thu, 29 Sep 2016 11:19:31 -0700 Subject: [PATCH] Moved engine imported docs to the engine subdirectory --- AUTHORS | 1522 - CHANGELOG.md | 2761 - CONTRIBUTING.md | 400 - Dockerfile | 272 - Dockerfile.aarch64 | 211 - Dockerfile.armhf | 218 - Dockerfile.gccgo | 104 - Dockerfile.ppc64le | 232 - Dockerfile.s390x | 227 - Dockerfile.simple | 85 - Dockerfile.windows | 89 - LICENSE | 191 - MAINTAINERS | 296 - Makefile | 126 - NOTICE | 19 - README.md | 304 - ROADMAP.md | 140 - VENDORING.md | 45 - VERSION | 1 - api/README.md | 5 - api/client/bundlefile/bundlefile.go | 71 - api/client/bundlefile/bundlefile_test.go | 79 - api/client/cli.go | 281 - api/client/client.go | 5 - api/client/commands.go | 11 - api/client/container/attach.go | 129 - api/client/container/commit.go | 92 - api/client/container/cp.go | 303 - api/client/container/create.go | 217 - api/client/container/diff.go | 60 - api/client/container/export.go | 59 - api/client/container/kill.go | 53 - api/client/container/logs.go | 87 - api/client/container/pause.go | 50 - api/client/container/port.go | 79 - api/client/container/ps.go | 125 - api/client/container/rename.go | 52 - api/client/container/restart.go | 55 - api/client/container/rm.go | 76 - api/client/container/run.go | 326 - api/client/container/start.go | 152 - api/client/container/stats.go | 233 - api/client/container/stats_helpers.go | 238 - api/client/container/stats_unit_test.go | 45 - api/client/container/stop.go | 56 - api/client/container/top.go | 58 - api/client/container/unpause.go | 50 - api/client/container/utils.go | 22 - api/client/container/wait.go | 51 - api/client/credentials.go | 44 - api/client/exec.go | 160 - api/client/exec_test.go | 122 - api/client/formatter/custom.go | 243 - api/client/formatter/custom_test.go | 192 - api/client/formatter/formatter.go | 307 - api/client/formatter/formatter_test.go | 537 - api/client/hijack.go | 95 - api/client/idresolver/idresolver.go | 70 - api/client/image/build.go | 432 - api/client/image/history.go | 99 - api/client/image/images.go | 103 - api/client/image/import.go | 86 - api/client/image/load.go | 67 - api/client/image/pull.go | 85 - api/client/image/push.go | 62 - api/client/image/remove.go | 70 - api/client/image/save.go | 57 - api/client/image/search.go | 135 - api/client/image/tag.go | 41 - api/client/info.go | 215 - api/client/inspect.go | 95 - api/client/inspect/inspector.go | 195 - api/client/inspect/inspector_test.go | 221 - api/client/network/cmd.go | 31 - api/client/network/connect.go | 64 - api/client/network/create.go | 222 - api/client/network/disconnect.go | 41 - api/client/network/inspect.go | 45 - api/client/network/list.go | 100 - api/client/network/remove.go | 43 - api/client/node/cmd.go | 49 - api/client/node/demote.go | 32 - api/client/node/inspect.go | 144 - api/client/node/list.go | 111 - api/client/node/opts.go | 60 - api/client/node/promote.go | 32 - api/client/node/ps.go | 63 - api/client/node/remove.go | 46 - api/client/node/update.go | 113 - api/client/plugin/cmd.go | 12 - api/client/plugin/cmd_experimental.go | 36 - api/client/plugin/disable.go | 45 - api/client/plugin/enable.go | 45 - api/client/plugin/inspect.go | 52 - api/client/plugin/install.go | 103 - api/client/plugin/list.go | 44 - api/client/plugin/push.go | 55 - api/client/plugin/remove.go | 55 - api/client/plugin/set.go | 42 - api/client/registry.go | 188 - api/client/registry/login.go | 81 - api/client/registry/logout.go | 52 - api/client/service/cmd.go | 32 - api/client/service/create.go | 72 - api/client/service/inspect.go | 188 - api/client/service/inspect_test.go | 84 - api/client/service/list.go | 124 - api/client/service/opts.go | 559 - api/client/service/opts_test.go | 176 - api/client/service/ps.go | 70 - api/client/service/remove.go | 47 - api/client/service/scale.go | 88 - api/client/service/update.go | 473 - api/client/service/update_test.go | 180 - api/client/stack/cmd.go | 38 - api/client/stack/cmd_stub.go | 18 - api/client/stack/common.go | 50 - api/client/stack/config.go | 41 - api/client/stack/deploy.go | 227 - api/client/stack/opts.go | 49 - api/client/stack/ps.go | 70 - api/client/stack/remove.go | 75 - api/client/swarm/cmd.go | 30 - api/client/swarm/init.go | 81 - api/client/swarm/join.go | 75 - api/client/swarm/join_token.go | 105 - api/client/swarm/leave.go | 44 - api/client/swarm/opts.go | 179 - api/client/swarm/opts_test.go | 37 - api/client/swarm/update.go | 82 - api/client/system/events.go | 115 - api/client/system/events_utils.go | 66 - api/client/system/version.go | 110 - api/client/task/print.go | 100 - api/client/trust.go | 605 - api/client/trust_test.go | 56 - api/client/update.go | 120 - api/client/utils.go | 190 - api/client/volume/cmd.go | 48 - api/client/volume/create.go | 102 - api/client/volume/inspect.go | 55 - api/client/volume/list.go | 99 - api/client/volume/remove.go | 54 - api/common.go | 169 - api/common_test.go | 341 - api/fixtures/keyfile | 7 - api/server/httputils/decoder.go | 16 - api/server/httputils/errors.go | 93 - api/server/httputils/form.go | 73 - api/server/httputils/form_test.go | 105 - api/server/httputils/httputils.go | 106 - api/server/middleware.go | 24 - api/server/middleware/cors.go | 37 - api/server/middleware/debug.go | 76 - api/server/middleware/middleware.go | 13 - api/server/middleware/user_agent.go | 47 - api/server/middleware/version.go | 59 - api/server/middleware/version_test.go | 63 - api/server/profiler.go | 41 - api/server/router/build/backend.go | 20 - api/server/router/build/build.go | 29 - api/server/router/build/build_routes.go | 194 - api/server/router/container/backend.go | 72 - api/server/router/container/container.go | 76 - .../router/container/container_routes.go | 527 - api/server/router/container/copy.go | 119 - api/server/router/container/exec.go | 134 - api/server/router/container/inspect.go | 21 - api/server/router/image/backend.go | 43 - api/server/router/image/image.go | 49 - api/server/router/image/image_routes.go | 319 - api/server/router/local.go | 96 - api/server/router/network/backend.go | 20 - api/server/router/network/filter.go | 98 - api/server/router/network/network.go | 42 - api/server/router/network/network_routes.go | 297 - api/server/router/plugin/backend.go | 21 - api/server/router/plugin/plugin.go | 23 - .../router/plugin/plugin_experimental.go | 20 - api/server/router/plugin/plugin_regular.go | 9 - api/server/router/plugin/plugin_routes.go | 103 - api/server/router/router.go | 19 - api/server/router/swarm/backend.go | 26 - api/server/router/swarm/cluster.go | 44 - api/server/router/swarm/cluster_routes.go | 261 - api/server/router/system/backend.go | 20 - api/server/router/system/system.go | 38 - api/server/router/system/system_routes.go | 154 - api/server/router/volume/backend.go | 15 - api/server/router/volume/volume.go | 35 - api/server/router/volume/volume_routes.go | 66 - api/server/router_swapper.go | 30 - api/server/server.go | 207 - api/server/server_test.go | 46 - api/types/backend/backend.go | 85 - builder/builder.go | 155 - builder/context.go | 260 - builder/context_test.go | 307 - builder/context_unix.go | 11 - builder/context_windows.go | 17 - builder/dockerfile/bflag.go | 176 - builder/dockerfile/bflag_test.go | 187 - builder/dockerfile/builder.go | 330 - builder/dockerfile/builder_unix.go | 5 - builder/dockerfile/builder_windows.go | 3 - builder/dockerfile/command/command.go | 46 - builder/dockerfile/dispatchers.go | 758 - builder/dockerfile/dispatchers_unix.go | 27 - builder/dockerfile/dispatchers_windows.go | 65 - .../dockerfile/dispatchers_windows_test.go | 34 - builder/dockerfile/envVarTest | 112 - builder/dockerfile/evaluator.go | 203 - builder/dockerfile/evaluator_test.go | 231 - builder/dockerfile/evaluator_unix.go | 9 - builder/dockerfile/evaluator_windows.go | 13 - builder/dockerfile/internals.go | 669 - builder/dockerfile/internals_test.go | 102 - builder/dockerfile/internals_unix.go | 26 - builder/dockerfile/internals_windows.go | 56 - builder/dockerfile/internals_windows_test.go | 51 - builder/dockerfile/parser/dumper/main.go | 35 - builder/dockerfile/parser/json_test.go | 61 - builder/dockerfile/parser/line_parsers.go | 361 - builder/dockerfile/parser/parser.go | 215 - builder/dockerfile/parser/parser_test.go | 174 - .../parser/testfile-line/Dockerfile | 35 - .../env_no_value/Dockerfile | 3 - .../shykes-nested-json/Dockerfile | 1 - .../testfiles/ADD-COPY-with-JSON/Dockerfile | 11 - .../testfiles/ADD-COPY-with-JSON/result | 10 - .../testfiles/brimstone-consuldock/Dockerfile | 26 - .../testfiles/brimstone-consuldock/result | 5 - .../brimstone-docker-consul/Dockerfile | 52 - .../testfiles/brimstone-docker-consul/result | 9 - .../testfiles/continueIndent/Dockerfile | 36 - .../parser/testfiles/continueIndent/result | 10 - .../testfiles/cpuguy83-nagios/Dockerfile | 54 - .../parser/testfiles/cpuguy83-nagios/result | 40 - .../parser/testfiles/docker/Dockerfile | 103 - .../dockerfile/parser/testfiles/docker/result | 24 - .../parser/testfiles/env/Dockerfile | 23 - .../dockerfile/parser/testfiles/env/result | 16 - .../testfiles/escape-after-comment/Dockerfile | 9 - .../testfiles/escape-after-comment/result | 3 - .../testfiles/escape-nonewline/Dockerfile | 7 - .../parser/testfiles/escape-nonewline/result | 3 - .../parser/testfiles/escape/Dockerfile | 6 - .../dockerfile/parser/testfiles/escape/result | 3 - .../parser/testfiles/escapes/Dockerfile | 14 - .../parser/testfiles/escapes/result | 6 - .../parser/testfiles/flags/Dockerfile | 10 - .../dockerfile/parser/testfiles/flags/result | 10 - .../parser/testfiles/health/Dockerfile | 10 - .../dockerfile/parser/testfiles/health/result | 9 - .../parser/testfiles/influxdb/Dockerfile | 15 - .../parser/testfiles/influxdb/result | 11 - .../Dockerfile | 1 - .../result | 1 - .../Dockerfile | 1 - .../result | 1 - .../Dockerfile | 1 - .../jeztah-invalid-json-single-quotes/result | 1 - .../Dockerfile | 1 - .../result | 1 - .../Dockerfile | 1 - .../result | 1 - .../parser/testfiles/json/Dockerfile | 8 - .../dockerfile/parser/testfiles/json/result | 8 - .../kartar-entrypoint-oddities/Dockerfile | 7 - .../kartar-entrypoint-oddities/result | 7 - .../lk4d4-the-edge-case-generator/Dockerfile | 48 - .../lk4d4-the-edge-case-generator/result | 29 - .../parser/testfiles/mail/Dockerfile | 16 - .../dockerfile/parser/testfiles/mail/result | 14 - .../testfiles/multiple-volumes/Dockerfile | 3 - .../parser/testfiles/multiple-volumes/result | 2 - .../parser/testfiles/mumble/Dockerfile | 7 - .../dockerfile/parser/testfiles/mumble/result | 4 - .../parser/testfiles/nginx/Dockerfile | 14 - .../dockerfile/parser/testfiles/nginx/result | 11 - .../parser/testfiles/tf2/Dockerfile | 23 - .../dockerfile/parser/testfiles/tf2/result | 20 - .../parser/testfiles/weechat/Dockerfile | 9 - .../parser/testfiles/weechat/result | 6 - .../parser/testfiles/znc/Dockerfile | 7 - .../dockerfile/parser/testfiles/znc/result | 5 - builder/dockerfile/parser/utils.go | 176 - builder/dockerfile/shell_parser.go | 314 - builder/dockerfile/shell_parser_test.go | 143 - builder/dockerfile/support.go | 19 - builder/dockerfile/support_test.go | 65 - builder/dockerfile/wordsTest | 25 - builder/dockerignore.go | 47 - builder/dockerignore/dockerignore.go | 49 - builder/dockerignore/dockerignore_test.go | 55 - builder/dockerignore_test.go | 95 - builder/git.go | 28 - builder/remote.go | 152 - builder/remote_test.go | 208 - builder/tarsum.go | 158 - builder/tarsum_test.go | 265 - builder/utils_test.go | 87 - cli/cli.go | 191 - cli/cobraadaptor/adaptor.go | 159 - cli/error.go | 20 - cli/flagerrors.go | 24 - cli/flags/client.go | 12 - cli/flags/common.go | 123 - cli/required.go | 96 - cli/usage.go | 24 - cliconfig/config.go | 120 - cliconfig/config_test.go | 545 - cliconfig/configfile/file.go | 177 - cliconfig/configfile/file_test.go | 27 - cliconfig/credentials/credentials.go | 17 - cliconfig/credentials/default_store.go | 22 - cliconfig/credentials/default_store_darwin.go | 3 - cliconfig/credentials/default_store_linux.go | 3 - .../credentials/default_store_unsupported.go | 5 - .../credentials/default_store_windows.go | 3 - cliconfig/credentials/file_store.go | 67 - cliconfig/credentials/file_store_test.go | 139 - cliconfig/credentials/native_store.go | 126 - cliconfig/credentials/native_store_test.go | 356 - cmd/docker/daemon.go | 18 - cmd/docker/daemon_none.go | 16 - cmd/docker/daemon_none_test.go | 20 - cmd/docker/daemon_unix.go | 56 - cmd/docker/docker.go | 118 - cmd/docker/docker_test.go | 23 - cmd/docker/docker_windows.go | 18 - cmd/docker/usage.go | 22 - cmd/docker/usage_test.go | 15 - cmd/dockerd/README.md | 3 - cmd/dockerd/daemon.go | 447 - cmd/dockerd/daemon_freebsd.go | 5 - cmd/dockerd/daemon_linux.go | 11 - cmd/dockerd/daemon_solaris.go | 74 - cmd/dockerd/daemon_test.go | 294 - cmd/dockerd/daemon_unix.go | 131 - cmd/dockerd/daemon_unix_test.go | 212 - cmd/dockerd/daemon_windows.go | 82 - cmd/dockerd/docker.go | 82 - cmd/dockerd/docker_windows.go | 18 - cmd/dockerd/hack/malformed_host_override.go | 121 - .../hack/malformed_host_override_test.go | 124 - cmd/dockerd/routes.go | 9 - cmd/dockerd/routes_experimental.go | 13 - cmd/dockerd/service_unsupported.go | 7 - cmd/dockerd/service_windows.go | 369 - container/archive.go | 76 - container/container.go | 974 - container/container_solaris.go | 95 - container/container_unit_test.go | 36 - container/container_unix.go | 419 - container/container_windows.go | 109 - container/health.go | 52 - container/history.go | 30 - container/memory_store.go | 92 - container/memory_store_test.go | 106 - container/monitor.go | 46 - container/mounts_unix.go | 12 - container/mounts_windows.go | 8 - container/state.go | 310 - container/state_solaris.go | 7 - container/state_test.go | 87 - container/state_unix.go | 10 - container/state_windows.go | 7 - container/store.go | 28 - contrib/README.md | 4 - contrib/REVIEWERS | 1 - contrib/apparmor/main.go | 56 - contrib/apparmor/template.go | 268 - contrib/builder/deb/amd64/README.md | 5 - contrib/builder/deb/amd64/build.sh | 10 - .../deb/amd64/debian-jessie/Dockerfile | 20 - .../deb/amd64/debian-stretch/Dockerfile | 20 - .../deb/amd64/debian-wheezy/Dockerfile | 22 - contrib/builder/deb/amd64/generate.sh | 147 - .../deb/amd64/ubuntu-precise/Dockerfile | 16 - .../deb/amd64/ubuntu-trusty/Dockerfile | 16 - .../builder/deb/amd64/ubuntu-wily/Dockerfile | 16 - .../deb/amd64/ubuntu-xenial/Dockerfile | 16 - .../deb/armhf/debian-jessie/Dockerfile | 14 - .../deb/armhf/raspbian-jessie/Dockerfile | 15 - .../deb/armhf/ubuntu-trusty/Dockerfile | 10 - contrib/builder/rpm/amd64/README.md | 5 - contrib/builder/rpm/amd64/build.sh | 10 - contrib/builder/rpm/amd64/centos-7/Dockerfile | 19 - .../builder/rpm/amd64/fedora-22/Dockerfile | 18 - .../builder/rpm/amd64/fedora-23/Dockerfile | 18 - .../builder/rpm/amd64/fedora-24/Dockerfile | 18 - contrib/builder/rpm/amd64/generate.sh | 173 - .../rpm/amd64/opensuse-13.2/Dockerfile | 18 - .../rpm/amd64/oraclelinux-6/Dockerfile | 28 - .../rpm/amd64/oraclelinux-7/Dockerfile | 18 - contrib/check-config.sh | 297 - contrib/completion/REVIEWERS | 2 - contrib/completion/bash/docker | 2980 - contrib/completion/fish/docker.fish | 400 - .../completion/powershell/posh-docker.psm1 | 179 - contrib/completion/zsh/REVIEWERS | 2 - contrib/completion/zsh/_docker | 2114 - contrib/desktop-integration/README.md | 11 - .../desktop-integration/chromium/Dockerfile | 36 - .../desktop-integration/gparted/Dockerfile | 31 - contrib/docker-device-tool/README.md | 14 - contrib/docker-device-tool/device_tool.go | 176 - .../docker-device-tool/device_tool_windows.go | 4 - contrib/dockerize-disk.sh | 118 - contrib/download-frozen-image-v1.sh | 108 - contrib/download-frozen-image-v2.sh | 121 - contrib/gitdm/aliases | 148 - contrib/gitdm/domain-map | 39 - contrib/gitdm/generate_aliases.sh | 16 - contrib/gitdm/gitdm.config | 17 - contrib/httpserver/Dockerfile | 4 - contrib/httpserver/server.go | 12 - contrib/init/openrc/docker.confd | 13 - contrib/init/openrc/docker.initd | 19 - contrib/init/systemd/REVIEWERS | 3 - contrib/init/systemd/docker.service | 29 - contrib/init/systemd/docker.service.rpm | 28 - contrib/init/systemd/docker.socket | 12 - contrib/init/sysvinit-debian/docker | 149 - contrib/init/sysvinit-debian/docker.default | 20 - contrib/init/sysvinit-redhat/docker | 153 - contrib/init/sysvinit-redhat/docker.sysconfig | 7 - contrib/init/upstart/REVIEWERS | 2 - contrib/init/upstart/docker.conf | 68 - contrib/mkimage-alpine.sh | 87 - contrib/mkimage-arch-pacman.conf | 92 - contrib/mkimage-arch.sh | 122 - contrib/mkimage-archarm-pacman.conf | 98 - contrib/mkimage-busybox.sh | 43 - contrib/mkimage-crux.sh | 75 - contrib/mkimage-debootstrap.sh | 297 - contrib/mkimage-rinse.sh | 123 - contrib/mkimage-yum.sh | 134 - contrib/mkimage.sh | 117 - contrib/mkimage/.febootstrap-minimize | 28 - contrib/mkimage/busybox-static | 34 - contrib/mkimage/debootstrap | 226 - contrib/mkimage/mageia-urpmi | 61 - contrib/mkimage/rinse | 25 - contrib/nnp-test/Dockerfile | 9 - contrib/nnp-test/nnp-test.c | 10 - contrib/nuke-graph-directory.sh | 65 - contrib/project-stats.sh | 22 - contrib/report-issue.sh | 105 - contrib/reprepro/suites.sh | 12 - .../docker-engine-selinux/LICENSE | 339 - .../docker-engine-selinux/Makefile | 23 - .../docker-engine-selinux/README.md | 1 - .../docker-engine-selinux/docker.fc | 29 - .../docker-engine-selinux/docker.if | 523 - .../docker-engine-selinux/docker.te | 399 - .../docker-engine-selinux/LICENSE | 339 - .../docker-engine-selinux/Makefile | 23 - .../docker-engine-selinux/README.md | 1 - .../docker-engine-selinux/docker.fc | 33 - .../docker-engine-selinux/docker.if | 659 - .../docker-engine-selinux/docker.te | 465 - contrib/selinux/docker-engine-selinux/LICENSE | 340 - .../selinux/docker-engine-selinux/Makefile | 16 - .../selinux/docker-engine-selinux/docker.fc | 20 - .../selinux/docker-engine-selinux/docker.if | 461 - .../selinux/docker-engine-selinux/docker.te | 414 - .../docker-engine-selinux/docker_selinux.8.gz | Bin 2847 -> 0 bytes contrib/syntax/kate/Dockerfile.xml | 67 - contrib/syntax/nano/Dockerfile.nanorc | 26 - contrib/syntax/nano/README.md | 32 - .../Preferences/Dockerfile.tmPreferences | 24 - .../Syntaxes/Dockerfile.tmLanguage | 143 - .../textmate/Docker.tmbundle/info.plist | 16 - contrib/syntax/textmate/README.md | 17 - contrib/syntax/textmate/REVIEWERS | 1 - contrib/syntax/vim/LICENSE | 22 - contrib/syntax/vim/README.md | 26 - contrib/syntax/vim/doc/dockerfile.txt | 18 - contrib/syntax/vim/ftdetect/dockerfile.vim | 1 - contrib/syntax/vim/syntax/dockerfile.vim | 31 - contrib/syscall-test/Dockerfile | 9 - contrib/syscall-test/acct.c | 16 - contrib/syscall-test/ns.c | 63 - contrib/syscall-test/userns.c | 63 - contrib/udev/80-docker.rules | 3 - contrib/vagrant-docker/README.md | 50 - daemon/apparmor_default.go | 30 - daemon/apparmor_default_unsupported.go | 6 - daemon/archive.go | 436 - daemon/archive_unix.go | 58 - daemon/archive_windows.go | 18 - daemon/attach.go | 147 - daemon/auth.go | 13 - daemon/caps/utils_unix.go | 131 - daemon/changes.go | 15 - daemon/cluster/cluster.go | 1419 - daemon/cluster/convert/container.go | 116 - daemon/cluster/convert/network.go | 199 - daemon/cluster/convert/node.go | 88 - daemon/cluster/convert/service.go | 311 - daemon/cluster/convert/swarm.go | 100 - daemon/cluster/convert/task.go | 54 - daemon/cluster/executor/backend.go | 43 - daemon/cluster/executor/container/adapter.go | 273 - .../cluster/executor/container/container.go | 508 - .../cluster/executor/container/controller.go | 457 - daemon/cluster/executor/container/errors.go | 15 - daemon/cluster/executor/container/executor.go | 160 - .../cluster/executor/container/health_test.go | 102 - daemon/cluster/executor/container/validate.go | 43 - .../executor/container/validate_test.go | 141 - .../executor/container/validate_unix_test.go | 7 - .../container/validate_windows_test.go | 5 - daemon/cluster/filters.go | 98 - daemon/cluster/helpers.go | 108 - daemon/cluster/listen_addr.go | 270 - daemon/cluster/provider/network.go | 37 - daemon/commit.go | 264 - daemon/config.go | 453 - daemon/config_experimental.go | 8 - daemon/config_solaris.go | 47 - daemon/config_stub.go | 8 - daemon/config_test.go | 278 - daemon/config_unix.go | 140 - daemon/config_windows.go | 68 - daemon/container.go | 256 - daemon/container_operations.go | 768 - daemon/container_operations_solaris.go | 50 - daemon/container_operations_unix.go | 385 - daemon/container_operations_windows.go | 57 - daemon/create.go | 260 - daemon/create_unix.go | 76 - daemon/create_windows.go | 80 - daemon/daemon.go | 1099 - daemon/daemon_experimental.go | 26 - daemon/daemon_linux.go | 80 - daemon/daemon_linux_test.go | 104 - daemon/daemon_solaris.go | 167 - daemon/daemon_stub.go | 19 - daemon/daemon_test.go | 532 - daemon/daemon_unix.go | 1167 - daemon/daemon_unix_test.go | 199 - daemon/daemon_unsupported.go | 5 - daemon/daemon_windows.go | 525 - daemon/debugtrap_unix.go | 21 - daemon/debugtrap_unsupported.go | 7 - daemon/debugtrap_windows.go | 30 - daemon/delete.go | 157 - daemon/delete_test.go | 42 - daemon/discovery.go | 203 - daemon/discovery_test.go | 152 - daemon/errors.go | 57 - daemon/events.go | 132 - daemon/events/events.go | 154 - daemon/events/events_test.go | 275 - daemon/events/filter.go | 92 - daemon/events/testutils/testutils.go | 76 - daemon/events_test.go | 94 - daemon/exec.go | 268 - daemon/exec/exec.go | 93 - daemon/exec_linux.go | 26 - daemon/exec_solaris.go | 11 - daemon/exec_windows.go | 13 - daemon/export.go | 55 - daemon/graphdriver/aufs/aufs.go | 652 - daemon/graphdriver/aufs/aufs_test.go | 801 - daemon/graphdriver/aufs/dirs.go | 64 - daemon/graphdriver/aufs/mount.go | 21 - daemon/graphdriver/aufs/mount_linux.go | 7 - daemon/graphdriver/aufs/mount_unsupported.go | 12 - daemon/graphdriver/btrfs/btrfs.go | 520 - daemon/graphdriver/btrfs/btrfs_test.go | 63 - daemon/graphdriver/btrfs/dummy_unsupported.go | 3 - daemon/graphdriver/btrfs/version.go | 26 - daemon/graphdriver/btrfs/version_none.go | 14 - daemon/graphdriver/btrfs/version_test.go | 13 - daemon/graphdriver/counter.go | 67 - daemon/graphdriver/devmapper/README.md | 96 - daemon/graphdriver/devmapper/deviceset.go | 2627 - daemon/graphdriver/devmapper/devmapper_doc.go | 106 - .../graphdriver/devmapper/devmapper_test.go | 110 - daemon/graphdriver/devmapper/driver.go | 226 - daemon/graphdriver/devmapper/mount.go | 89 - daemon/graphdriver/driver.go | 243 - daemon/graphdriver/driver_freebsd.go | 19 - daemon/graphdriver/driver_linux.go | 133 - daemon/graphdriver/driver_solaris.go | 65 - daemon/graphdriver/driver_unsupported.go | 15 - daemon/graphdriver/driver_windows.go | 14 - daemon/graphdriver/fsdiff.go | 162 - .../graphdriver/graphtest/graphbench_unix.go | 264 - .../graphdriver/graphtest/graphtest_unix.go | 359 - .../graphtest/graphtest_windows.go | 1 - daemon/graphdriver/graphtest/testutil.go | 342 - daemon/graphdriver/graphtest/testutil_unix.go | 143 - daemon/graphdriver/overlay/copy.go | 169 - daemon/graphdriver/overlay/overlay.go | 444 - daemon/graphdriver/overlay/overlay_test.go | 93 - .../overlay/overlay_unsupported.go | 3 - daemon/graphdriver/overlay2/mount.go | 88 - daemon/graphdriver/overlay2/overlay.go | 509 - daemon/graphdriver/overlay2/overlay_test.go | 106 - .../overlay2/overlay_unsupported.go | 3 - daemon/graphdriver/overlay2/randomid.go | 80 - daemon/graphdriver/plugin.go | 32 - daemon/graphdriver/plugin_unsupported.go | 7 - daemon/graphdriver/proxy.go | 225 - daemon/graphdriver/register/register_aufs.go | 8 - daemon/graphdriver/register/register_btrfs.go | 8 - .../register/register_devicemapper.go | 8 - .../graphdriver/register/register_overlay.go | 9 - daemon/graphdriver/register/register_vfs.go | 6 - .../graphdriver/register/register_windows.go | 6 - daemon/graphdriver/register/register_zfs.go | 8 - daemon/graphdriver/vfs/driver.go | 145 - daemon/graphdriver/vfs/vfs_test.go | 37 - daemon/graphdriver/windows/windows.go | 853 - .../windows/windows_windows_test.go | 18 - daemon/graphdriver/zfs/MAINTAINERS | 2 - daemon/graphdriver/zfs/zfs.go | 412 - daemon/graphdriver/zfs/zfs_freebsd.go | 38 - daemon/graphdriver/zfs/zfs_linux.go | 27 - daemon/graphdriver/zfs/zfs_solaris.go | 59 - daemon/graphdriver/zfs/zfs_test.go | 35 - daemon/graphdriver/zfs/zfs_unsupported.go | 11 - daemon/health.go | 321 - daemon/health_test.go | 118 - daemon/image.go | 124 - daemon/image_delete.go | 404 - daemon/image_exporter.go | 25 - daemon/image_history.go | 82 - daemon/image_inspect.go | 81 - daemon/image_pull.go | 106 - daemon/image_push.go | 58 - daemon/image_tag.go | 37 - daemon/images.go | 193 - daemon/import.go | 135 - daemon/info.go | 186 - daemon/inspect.go | 250 - daemon/inspect_solaris.go | 40 - daemon/inspect_unix.go | 91 - daemon/inspect_windows.go | 40 - daemon/keys.go | 59 - daemon/keys_unsupported.go | 8 - daemon/kill.go | 157 - daemon/links.go | 128 - daemon/links/links.go | 141 - daemon/links/links_test.go | 213 - daemon/links_test.go | 98 - daemon/list.go | 613 - daemon/list_unix.go | 11 - daemon/list_windows.go | 20 - daemon/logdrivers_linux.go | 14 - daemon/logdrivers_windows.go | 10 - daemon/logger/awslogs/cloudwatchlogs.go | 375 - daemon/logger/awslogs/cloudwatchlogs_test.go | 627 - .../logger/awslogs/cwlogsiface_mock_test.go | 77 - daemon/logger/context.go | 113 - daemon/logger/copier.go | 81 - daemon/logger/copier_test.go | 118 - daemon/logger/etwlogs/etwlogs_windows.go | 183 - daemon/logger/factory.go | 104 - daemon/logger/fluentd/fluentd.go | 200 - daemon/logger/gcplogs/gcplogging.go | 191 - daemon/logger/gelf/gelf.go | 209 - daemon/logger/gelf/gelf_unsupported.go | 3 - daemon/logger/journald/journald.go | 95 - .../logger/journald/journald_unsupported.go | 6 - daemon/logger/journald/read.go | 392 - daemon/logger/journald/read_native.go | 6 - daemon/logger/journald/read_native_compat.go | 6 - daemon/logger/journald/read_unsupported.go | 7 - daemon/logger/jsonfilelog/jsonfilelog.go | 147 - daemon/logger/jsonfilelog/jsonfilelog_test.go | 248 - daemon/logger/jsonfilelog/read.go | 239 - daemon/logger/logger.go | 113 - daemon/logger/loggerutils/log_tag.go | 28 - daemon/logger/loggerutils/log_tag_test.go | 47 - daemon/logger/loggerutils/rotatefilewriter.go | 124 - daemon/logger/splunk/splunk.go | 266 - daemon/logger/syslog/syslog.go | 258 - daemon/logger/syslog/syslog_test.go | 51 - daemon/logger/syslog/syslog_unsupported.go | 3 - daemon/logs.go | 166 - daemon/logs_test.go | 15 - daemon/monitor.go | 156 - daemon/monitor_linux.go | 19 - daemon/monitor_solaris.go | 18 - daemon/monitor_windows.go | 37 - daemon/mounts.go | 48 - daemon/names.go | 115 - daemon/network.go | 381 - daemon/network/filter.go | 94 - daemon/network/settings.go | 24 - daemon/oci_linux.go | 711 - daemon/oci_solaris.go | 12 - daemon/oci_windows.go | 200 - daemon/pause.go | 49 - daemon/rename.go | 98 - daemon/resize.go | 40 - daemon/restart.go | 48 - daemon/search.go | 94 - daemon/search_test.go | 357 - daemon/seccomp_disabled.go | 19 - daemon/seccomp_linux.go | 48 - daemon/seccomp_unsupported.go | 5 - daemon/selinux_linux.go | 17 - daemon/selinux_unsupported.go | 13 - daemon/start.go | 194 - daemon/start_linux.go | 26 - daemon/start_windows.go | 10 - daemon/stats.go | 194 - daemon/stats_collector_solaris.go | 34 - daemon/stats_collector_unix.go | 189 - daemon/stats_collector_windows.go | 35 - daemon/stop.go | 67 - daemon/top_unix.go | 126 - daemon/top_unix_test.go | 76 - daemon/top_windows.go | 32 - daemon/unpause.go | 43 - daemon/update.go | 94 - daemon/update_linux.go | 25 - daemon/update_solaris.go | 11 - daemon/update_windows.go | 13 - daemon/volumes.go | 185 - daemon/volumes_unit_test.go | 39 - daemon/volumes_unix.go | 86 - daemon/volumes_windows.go | 51 - daemon/wait.go | 32 - distribution/errors.go | 117 - .../fixtures/validate_manifest/bad_manifest | 38 - .../validate_manifest/extra_data_manifest | 46 - .../fixtures/validate_manifest/good_manifest | 38 - distribution/metadata/metadata.go | 75 - distribution/metadata/v1_id_service.go | 44 - distribution/metadata/v1_id_service_test.go | 83 - distribution/metadata/v2_metadata_service.go | 137 - .../metadata/v2_metadata_service_test.go | 115 - distribution/pull.go | 225 - distribution/pull_v1.go | 366 - distribution/pull_v2.go | 845 - distribution/pull_v2_test.go | 183 - distribution/pull_v2_unix.go | 19 - distribution/pull_v2_windows.go | 69 - distribution/push.go | 219 - distribution/push_v1.go | 454 - distribution/push_v2.go | 452 - distribution/registry.go | 122 - distribution/registry_unit_test.go | 133 - distribution/xfer/download.go | 452 - distribution/xfer/download_test.go | 341 - distribution/xfer/transfer.go | 401 - distribution/xfer/transfer_test.go | 410 - distribution/xfer/upload.go | 168 - distribution/xfer/upload_test.go | 134 - dockerversion/useragent.go | 74 - dockerversion/version_lib.go | 13 - {docs => engine}/.gitignore | 0 {docs => engine}/Dockerfile | 0 {docs => engine}/Makefile | 0 {docs => engine}/README.md | 0 .../admin/ambassador_pattern_linking.md | 0 .../admin/b2d_volume_images/add_cd.png | Bin .../b2d_volume_images/add_new_controller.png | Bin .../admin/b2d_volume_images/add_volume.png | Bin .../admin/b2d_volume_images/boot_order.png | Bin .../admin/b2d_volume_images/gparted.png | Bin .../admin/b2d_volume_images/gparted2.png | Bin .../admin/b2d_volume_images/verify.png | Bin {docs => engine}/admin/b2d_volume_resize.md | 0 {docs => engine}/admin/chef.md | 0 {docs => engine}/admin/dsc.md | 0 {docs => engine}/admin/formatting.md | 0 {docs => engine}/admin/host_integration.md | 0 {docs => engine}/admin/index.md | 0 {docs => engine}/admin/live-restore.md | 0 {docs => engine}/admin/logging/awslogs.md | 0 {docs => engine}/admin/logging/etwlogs.md | 0 {docs => engine}/admin/logging/fluentd.md | 0 {docs => engine}/admin/logging/gcplogs.md | 0 {docs => engine}/admin/logging/index.md | 0 {docs => engine}/admin/logging/journald.md | 0 {docs => engine}/admin/logging/log_tags.md | 0 {docs => engine}/admin/logging/overview.md | 0 {docs => engine}/admin/logging/splunk.md | 0 {docs => engine}/admin/menu.md | 0 {docs => engine}/admin/puppet.md | 0 {docs => engine}/admin/registry_mirror.md | 0 {docs => engine}/admin/runmetrics.md | 0 {docs => engine}/admin/systemd.md | 0 {docs => engine}/admin/using_supervisord.md | 0 {docs => engine}/article-img/architecture.svg | 0 .../article-img/engine-components-flow.png | Bin {docs => engine}/breaking_changes.md | 0 {docs => engine}/deprecated.md | 0 .../examples/apt-cacher-ng.Dockerfile | 0 {docs => engine}/examples/apt-cacher-ng.md | 0 {docs => engine}/examples/couchbase.md | 0 .../examples/couchbase/web-console.png | Bin .../examples/couchdb_data_volumes.md | 0 {docs => engine}/examples/index.md | 0 {docs => engine}/examples/mongodb.md | 0 {docs => engine}/examples/mongodb/Dockerfile | 0 .../examples/postgresql_service.Dockerfile | 0 .../examples/postgresql_service.md | 0 .../examples/running_redis_service.md | 0 .../examples/running_riak_service.Dockerfile | 0 .../examples/running_riak_service.md | 0 .../examples/running_ssh_service.Dockerfile | 0 .../examples/running_ssh_service.md | 0 {docs => engine}/examples/supervisord.conf | 0 .../extend/images/authz_additional_info.png | Bin .../extend/images/authz_allow.png | Bin .../extend/images/authz_chunked.png | Bin .../extend/images/authz_connection_hijack.png | Bin {docs => engine}/extend/images/authz_deny.png | Bin {docs => engine}/extend/index.md | 0 {docs => engine}/extend/legacy_plugins.md | 0 {docs => engine}/extend/manifest.md | 0 {docs => engine}/extend/menu.md | 0 {docs => engine}/extend/plugin_api.md | 0 .../extend/plugins_authorization.md | 0 {docs => engine}/extend/plugins_network.md | 0 {docs => engine}/extend/plugins_volume.md | 0 {docs => engine}/faq.md | 0 {docs => engine}/getstarted/index.md | 0 {docs => engine}/getstarted/last_page.md | 0 .../getstarted/linux_install_help.md | 0 {docs => engine}/getstarted/menu.md | 0 {docs => engine}/getstarted/step_five.md | 0 {docs => engine}/getstarted/step_four.md | 0 {docs => engine}/getstarted/step_one.md | 0 {docs => engine}/getstarted/step_six.md | 0 {docs => engine}/getstarted/step_three.md | 0 {docs => engine}/getstarted/step_two.md | 0 .../getstarted/tutimg/add_repository.png | Bin .../getstarted/tutimg/browse_and_search.png | Bin .../getstarted/tutimg/container_explainer.png | Bin .../getstarted/tutimg/hub_signup.png | Bin .../getstarted/tutimg/image_found.png | Bin .../getstarted/tutimg/line_one.png | Bin .../getstarted/tutimg/new_image.png | Bin {docs => engine}/getstarted/tutimg/tagger.png | Bin .../getstarted/tutimg/whale_repo.png | Bin {docs => engine}/index.md | 0 {docs => engine}/installation/binaries.md | 0 .../installation/cloud/cloud-ex-aws.md | 0 .../cloud/cloud-ex-machine-ocean.md | 0 {docs => engine}/installation/cloud/index.md | 0 .../installation/cloud/overview.md | 0 .../installation/images/bad_host.png | Bin .../installation/images/cool_view.png | Bin .../installation/images/ec2-ubuntu.png | Bin .../images/ec2_instance_details.png | Bin .../installation/images/ec2_instance_type.png | Bin .../images/ec2_launch_instance.png | Bin .../installation/images/good_host.png | Bin .../installation/images/kitematic.png | Bin .../installation/images/linux_docker_host.svg | 0 .../installation/images/mac-page-finished.png | Bin .../installation/images/mac-page-two.png | Bin .../images/mac-password-prompt.png | Bin .../installation/images/mac-success.png | Bin .../installation/images/mac-welcome-page.png | Bin .../installation/images/mac_docker_host.svg | 0 .../installation/images/my-docker-vm.png | Bin .../installation/images/newsite_view.png | Bin .../installation/images/nginx-webserver.png | Bin .../installation/images/ocean_click_api.png | Bin .../installation/images/ocean_droplet.png | Bin .../images/ocean_droplet_ubuntu.png | Bin .../installation/images/ocean_gen_token.png | Bin .../installation/images/ocean_save_token.png | Bin .../images/ocean_token_create.png | Bin .../installation/images/virtualization.png | Bin .../installation/images/win-page-6.png | Bin .../installation/images/win-welcome.png | Bin .../installation/images/win_docker_host.svg | 0 .../installation/images/win_ver.png | Bin .../images/windows-boot2docker-cmd.png | Bin .../images/windows-boot2docker-powershell.png | Bin .../images/windows-boot2docker-start.png | Bin .../installation/images/windows-finish.png | Bin {docs => engine}/installation/index.md | 0 {docs => engine}/installation/linux/SUSE.md | 0 .../installation/linux/archlinux.md | 0 {docs => engine}/installation/linux/centos.md | 0 .../installation/linux/cruxlinux.md | 0 {docs => engine}/installation/linux/debian.md | 0 {docs => engine}/installation/linux/fedora.md | 0 .../installation/linux/gentoolinux.md | 0 {docs => engine}/installation/linux/index.md | 0 {docs => engine}/installation/linux/oracle.md | 0 {docs => engine}/installation/linux/rhel.md | 0 .../installation/linux/ubuntulinux.md | 0 {docs => engine}/installation/mac.md | 0 {docs => engine}/installation/windows.md | 0 {docs => engine}/migration.md | 0 {docs => engine}/reference/api/README.md | 0 .../_static/io_oauth_authorization_page.png | Bin .../reference/api/docker-io_api.md | 0 .../reference/api/docker_io_accounts_api.md | 0 .../reference/api/docker_remote_api.md | 0 .../reference/api/docker_remote_api_v1.18.md | 0 .../reference/api/docker_remote_api_v1.19.md | 0 .../reference/api/docker_remote_api_v1.20.md | 0 .../reference/api/docker_remote_api_v1.21.md | 0 .../reference/api/docker_remote_api_v1.22.md | 0 .../reference/api/docker_remote_api_v1.23.md | 0 .../reference/api/docker_remote_api_v1.24.md | 0 .../reference/api/docker_remote_api_v1.25.md | 0 .../reference/api/hub_registry_spec.md | 0 .../reference/api/images/event_state.gliffy | 0 .../reference/api/images/event_state.png | Bin {docs => engine}/reference/api/index.md | 0 .../api/remote_api_client_libraries.md | 0 {docs => engine}/reference/builder.md | 0 .../reference/commandline/attach.md | 0 .../reference/commandline/build.md | 0 {docs => engine}/reference/commandline/cli.md | 0 .../reference/commandline/commit.md | 0 {docs => engine}/reference/commandline/cp.md | 0 .../reference/commandline/create.md | 0 .../reference/commandline/deploy.md | 0 .../reference/commandline/diff.md | 0 .../reference/commandline/docker_images.gif | Bin .../reference/commandline/dockerd.md | 0 .../reference/commandline/events.md | 0 .../reference/commandline/exec.md | 0 .../reference/commandline/export.md | 0 .../reference/commandline/history.md | 0 .../reference/commandline/images.md | 0 .../reference/commandline/import.md | 0 .../reference/commandline/index.md | 0 .../reference/commandline/info.md | 0 .../reference/commandline/inspect.md | 0 .../reference/commandline/kill.md | 0 .../reference/commandline/load.md | 0 .../reference/commandline/login.md | 0 .../reference/commandline/logout.md | 0 .../reference/commandline/logs.md | 0 .../reference/commandline/menu.md | 0 .../reference/commandline/network_connect.md | 0 .../reference/commandline/network_create.md | 0 .../commandline/network_disconnect.md | 0 .../reference/commandline/network_inspect.md | 0 .../reference/commandline/network_ls.md | 0 .../reference/commandline/network_rm.md | 0 .../reference/commandline/node_demote.md | 0 .../reference/commandline/node_inspect.md | 0 .../reference/commandline/node_ls.md | 0 .../reference/commandline/node_promote.md | 0 .../reference/commandline/node_ps.md | 0 .../reference/commandline/node_rm.md | 0 .../reference/commandline/node_update.md | 0 .../reference/commandline/pause.md | 0 .../reference/commandline/plugin_disable.md | 0 .../reference/commandline/plugin_enable.md | 0 .../reference/commandline/plugin_inspect.md | 0 .../reference/commandline/plugin_install.md | 0 .../reference/commandline/plugin_ls.md | 0 .../reference/commandline/plugin_rm.md | 0 .../reference/commandline/port.md | 0 {docs => engine}/reference/commandline/ps.md | 0 .../reference/commandline/pull.md | 0 .../reference/commandline/push.md | 0 .../reference/commandline/rename.md | 0 .../reference/commandline/restart.md | 0 {docs => engine}/reference/commandline/rm.md | 0 {docs => engine}/reference/commandline/rmi.md | 0 {docs => engine}/reference/commandline/run.md | 0 .../reference/commandline/save.md | 0 .../reference/commandline/search.md | 0 .../reference/commandline/service_create.md | 0 .../reference/commandline/service_inspect.md | 0 .../reference/commandline/service_ls.md | 0 .../reference/commandline/service_ps.md | 0 .../reference/commandline/service_rm.md | 0 .../reference/commandline/service_scale.md | 0 .../reference/commandline/service_update.md | 0 .../reference/commandline/stack_config.md | 0 .../reference/commandline/stack_deploy.md | 0 .../reference/commandline/stack_rm.md | 0 .../reference/commandline/stack_services.md | 0 .../reference/commandline/stack_tasks.md | 0 .../reference/commandline/start.md | 0 .../reference/commandline/stats.md | 0 .../reference/commandline/stop.md | 0 .../reference/commandline/swarm_init.md | 0 .../reference/commandline/swarm_join.md | 0 .../reference/commandline/swarm_join_token.md | 0 .../reference/commandline/swarm_leave.md | 0 .../reference/commandline/swarm_update.md | 0 {docs => engine}/reference/commandline/tag.md | 0 {docs => engine}/reference/commandline/top.md | 0 .../reference/commandline/unpause.md | 0 .../reference/commandline/update.md | 0 .../reference/commandline/version.md | 0 .../reference/commandline/volume_create.md | 0 .../reference/commandline/volume_inspect.md | 0 .../reference/commandline/volume_ls.md | 0 .../reference/commandline/volume_rm.md | 0 .../reference/commandline/wait.md | 0 {docs => engine}/reference/glossary.md | 0 {docs => engine}/reference/index.md | 0 {docs => engine}/reference/run.md | 0 {docs => engine}/security/apparmor.md | 0 {docs => engine}/security/certificates.md | 0 {docs => engine}/security/https.md | 0 {docs => engine}/security/https/Dockerfile | 0 {docs => engine}/security/https/Makefile | 0 {docs => engine}/security/https/README.md | 0 {docs => engine}/security/https/make_certs.sh | 0 {docs => engine}/security/https/parsedocs.sh | 0 {docs => engine}/security/index.md | 0 {docs => engine}/security/non-events.md | 0 {docs => engine}/security/seccomp.md | 0 {docs => engine}/security/security.md | 0 .../security/trust/content_trust.md | 0 .../security/trust/deploying_notary.md | 0 .../security/trust/images/tag_signing.png | Bin .../security/trust/images/trust_.gliffy | 0 .../trust/images/trust_components.gliffy | 0 .../trust/images/trust_components.png | Bin .../trust/images/trust_signing.gliffy | 0 .../security/trust/images/trust_signing.png | Bin .../security/trust/images/trust_view.gliffy | 0 .../security/trust/images/trust_view.png | Bin {docs => engine}/security/trust/index.md | 0 .../security/trust/trust_automation.md | 0 .../security/trust/trust_delegation.md | 0 .../security/trust/trust_key_mng.md | 0 .../security/trust/trust_sandbox.md | 0 {docs => engine}/static_files/README.md | 0 .../static_files/contributors.png | Bin .../static_files/docker-logo-compressed.png | Bin .../static_files/docker_pull_chart.png | Bin .../static_files/docker_push_chart.png | Bin .../static_files/dockerlogo-v.png | Bin {docs => engine}/swarm/admin_guide.md | 0 .../swarm/how-swarm-mode-works/menu.md | 0 .../swarm/how-swarm-mode-works/nodes.md | 0 .../swarm/how-swarm-mode-works/pki.md | 0 .../swarm/how-swarm-mode-works/services.md | 0 {docs => engine}/swarm/images/ingress-lb.png | Bin .../swarm/images/ingress-routing-mesh.png | Bin .../swarm/images/replicated-vs-global.png | Bin .../swarm/images/service-lifecycle.png | Bin {docs => engine}/swarm/images/service-vip.png | Bin .../swarm/images/services-diagram.png | Bin .../swarm/images/src/ingress-lb.svg | 0 .../swarm/images/src/ingress-routing-mesh.svg | 0 .../swarm/images/src/replicated-vs-global.svg | 0 .../swarm/images/src/service-lifecycle.svg | 0 .../swarm/images/src/service-vip.svg | 0 .../swarm/images/src/services-diagram.svg | 0 .../swarm/images/src/simple-cluster.svg | 0 {docs => engine}/swarm/images/src/tls.svg | 0 .../swarm/images/swarm-diagram.png | Bin {docs => engine}/swarm/images/tls.png | Bin {docs => engine}/swarm/index.md | 0 {docs => engine}/swarm/ingress.md | 0 {docs => engine}/swarm/join-nodes.md | 0 {docs => engine}/swarm/key-concepts.md | 0 {docs => engine}/swarm/manage-nodes.md | 0 {docs => engine}/swarm/menu.md | 0 {docs => engine}/swarm/networking.md | 0 {docs => engine}/swarm/raft.md | 0 {docs => engine}/swarm/services.md | 0 {docs => engine}/swarm/swarm-mode.md | 0 .../swarm/swarm-tutorial/add-nodes.md | 0 .../swarm/swarm-tutorial/create-swarm.md | 0 .../swarm/swarm-tutorial/delete-service.md | 0 .../swarm/swarm-tutorial/deploy-service.md | 0 .../swarm/swarm-tutorial/drain-node.md | 0 .../swarm/swarm-tutorial/index.md | 0 .../swarm/swarm-tutorial/inspect-service.md | 0 {docs => engine}/swarm/swarm-tutorial/menu.md | 0 .../swarm/swarm-tutorial/rolling-update.md | 0 .../swarm/swarm-tutorial/scale-service.md | 0 {docs => engine}/touch-up.sh | 0 {docs => engine}/tutorials/dockerimages.md | 0 {docs => engine}/tutorials/dockerizing.md | 0 {docs => engine}/tutorials/dockerrepos.md | 0 {docs => engine}/tutorials/dockervolumes.md | 0 {docs => engine}/tutorials/index.md | 0 {docs => engine}/tutorials/menu.md | 0 .../tutorials/networkingcontainers.md | 0 {docs => engine}/tutorials/search.png | Bin {docs => engine}/tutorials/usingdocker.md | 0 {docs => engine}/tutorials/webapp1.png | Bin {docs => engine}/understanding-docker.md | 0 .../userguide/eng-image/baseimages.md | 0 .../eng-image/dockerfile_best-practices.md | 0 .../userguide/eng-image/image_management.md | 0 {docs => engine}/userguide/eng-image/index.md | 0 {docs => engine}/userguide/index.md | 0 {docs => engine}/userguide/intro.md | 0 .../userguide/labels-custom-metadata.md | 0 .../userguide/networking/configure-dns.md | 0 .../networking/default_network/binding.md | 0 .../default_network/build-bridges.md | 0 .../default_network/configure-dns.md | 0 .../container-communication.md | 0 .../default_network/custom-docker0.md | 0 .../networking/default_network/dockerlinks.md | 0 .../images/ipv6_basic_host_config.gliffy | 0 .../images/ipv6_basic_host_config.svg | 0 .../images/ipv6_ndp_proxying.gliffy | 0 .../images/ipv6_ndp_proxying.svg | 0 .../images/ipv6_routed_network_example.gliffy | 0 .../images/ipv6_routed_network_example.svg | 0 .../images/ipv6_slash64_subnet_config.gliffy | 0 .../images/ipv6_slash64_subnet_config.svg | 0 .../ipv6_switched_network_example.gliffy | 0 .../images/ipv6_switched_network_example.svg | 0 .../networking/default_network/index.md | 0 .../networking/default_network/ipv6.md | 0 .../networking/get-started-macvlan.md | 0 .../networking/get-started-overlay.md | 0 .../networking/images/bridge_network.gliffy | 0 .../networking/images/bridge_network.png | Bin .../networking/images/bridge_network.svg | 0 .../networking/images/engine_on_net.gliffy | 0 .../networking/images/engine_on_net.png | Bin .../networking/images/engine_on_net.svg | 0 .../networking/images/key_value.gliffy | 0 .../userguide/networking/images/key_value.png | Bin .../userguide/networking/images/key_value.svg | 0 .../images/macvlan-bridge-ipvlan-l2.gliffy | 0 .../images/macvlan-bridge-ipvlan-l2.png | Bin .../images/macvlan-bridge-ipvlan-l2.svg | 0 .../images/macvlan_bridge_simple.gliffy | 0 .../images/macvlan_bridge_simple.png | Bin .../images/macvlan_bridge_simple.svg | 0 .../images/multi_tenant_8021q_vlans.gliffy | 0 .../images/multi_tenant_8021q_vlans.png | Bin .../images/multi_tenant_8021q_vlans.svg | 0 .../networking/images/network_access.gliffy | 0 .../networking/images/network_access.png | Bin .../networking/images/network_access.svg | 0 .../images/overlay-network-final.gliffy | 0 .../images/overlay-network-final.png | Bin .../images/overlay-network-final.svg | 0 .../networking/images/overlay_network.gliffy | 0 .../networking/images/overlay_network.png | Bin .../networking/images/overlay_network.svg | 0 .../networking/images/working.gliffy | 0 .../userguide/networking/images/working.png | Bin .../userguide/networking/images/working.svg | 0 .../userguide/networking/index.md | 0 {docs => engine}/userguide/networking/menu.md | 0 .../networking/overlay-security-model.md | 0 .../networking/work-with-networks.md | 0 .../userguide/storagedriver/aufs-driver.md | 0 .../userguide/storagedriver/btrfs-driver.md | 0 .../storagedriver/device-mapper-driver.md | 0 .../storagedriver/images/aufs_delete.jpg | Bin .../storagedriver/images/aufs_layers.jpg | Bin .../storagedriver/images/aufs_metadata.jpg | Bin .../storagedriver/images/base_device.jpg | Bin .../storagedriver/images/btfs_constructs.jpg | Bin .../images/btfs_container_layer.jpg | Bin .../storagedriver/images/btfs_layers.png | Bin .../storagedriver/images/btfs_pool.jpg | Bin .../storagedriver/images/btfs_snapshots.jpg | Bin .../storagedriver/images/btfs_subvolume.jpg | Bin .../images/container-layers-cas.jpg | Bin .../storagedriver/images/container-layers.jpg | Bin .../storagedriver/images/dm_container.jpg | Bin .../storagedriver/images/driver-pros-cons.png | Bin .../storagedriver/images/image-layers.jpg | Bin .../storagedriver/images/lsblk-diagram.jpg | Bin .../images/overlay_constructs.jpg | Bin .../images/overlay_constructs2.jpg | Bin .../storagedriver/images/saving-space.jpg | Bin .../storagedriver/images/shared-uuid.jpg | Bin .../storagedriver/images/shared-volume.jpg | Bin .../storagedriver/images/sharing-layers.jpg | Bin .../storagedriver/images/two_dm_container.jpg | Bin .../storagedriver/images/zfs_clones.jpg | Bin .../storagedriver/images/zfs_zpool.jpg | Bin .../storagedriver/images/zpool_blocks.jpg | Bin .../storagedriver/imagesandcontainers.md | 0 .../userguide/storagedriver/index.md | 0 .../storagedriver/overlayfs-driver.md | 0 .../userguide/storagedriver/selectadriver.md | 0 .../userguide/storagedriver/zfs-driver.md | 0 errors/errors.go | 47 - experimental/README.md | 82 - experimental/docker-stacks-and-bundles.md | 183 - experimental/images/ipvlan-l3.gliffy | 1 - experimental/images/ipvlan-l3.png | Bin 18260 -> 0 bytes experimental/images/ipvlan-l3.svg | 1 - experimental/images/ipvlan_l2_simple.gliffy | 1 - experimental/images/ipvlan_l2_simple.png | Bin 20145 -> 0 bytes experimental/images/ipvlan_l2_simple.svg | 1 - .../images/macvlan-bridge-ipvlan-l2.gliffy | 1 - .../images/macvlan-bridge-ipvlan-l2.png | Bin 14527 -> 0 bytes .../images/macvlan-bridge-ipvlan-l2.svg | 1 - .../images/multi_tenant_8021q_vlans.gliffy | 1 - .../images/multi_tenant_8021q_vlans.png | Bin 17879 -> 0 bytes .../images/multi_tenant_8021q_vlans.svg | 1 - experimental/images/vlans-deeper-look.gliffy | 1 - experimental/images/vlans-deeper-look.png | Bin 38837 -> 0 bytes experimental/images/vlans-deeper-look.svg | 1 - experimental/plugins_graphdriver.md | 334 - experimental/vlan-networks.md | 471 - hack/.vendor-helpers.sh | 164 - hack/Jenkins/W2L/postbuild.sh | 35 - hack/Jenkins/W2L/setup.sh | 309 - hack/Jenkins/readme.md | 3 - hack/dind | 33 - hack/generate-authors.sh | 15 - hack/install.sh | 525 - hack/make.sh | 386 - hack/make/.binary | 64 - hack/make/.binary-setup | 5 - hack/make/.build-deb/compat | 1 - hack/make/.build-deb/control | 29 - .../.build-deb/docker-engine.bash-completion | 1 - .../.build-deb/docker-engine.docker.default | 1 - .../make/.build-deb/docker-engine.docker.init | 1 - .../.build-deb/docker-engine.docker.upstart | 1 - hack/make/.build-deb/docker-engine.install | 12 - hack/make/.build-deb/docker-engine.manpages | 1 - hack/make/.build-deb/docker-engine.postinst | 20 - hack/make/.build-deb/docker-engine.udev | 1 - hack/make/.build-deb/docs | 1 - hack/make/.build-deb/rules | 54 - .../.build-rpm/docker-engine-selinux.spec | 109 - hack/make/.build-rpm/docker-engine.spec | 242 - hack/make/.detect-daemon-osarch | 66 - hack/make/.ensure-emptyfs | 23 - hack/make/.ensure-frozen-images | 67 - hack/make/.ensure-frozen-images-windows | 32 - hack/make/.ensure-httpserver | 15 - hack/make/.ensure-nnp-test | 22 - hack/make/.ensure-syscall-test | 23 - hack/make/.go-autogen | 66 - hack/make/.integration-daemon-setup | 14 - hack/make/.integration-daemon-start | 111 - hack/make/.integration-daemon-stop | 27 - hack/make/.resources-windows/common.rc | 38 - .../.resources-windows/docker.exe.manifest | 18 - hack/make/.resources-windows/docker.ico | Bin 370070 -> 0 bytes hack/make/.resources-windows/docker.png | Bin 658195 -> 0 bytes hack/make/.resources-windows/docker.rc | 3 - hack/make/.resources-windows/dockerd.rc | 4 - .../make/.resources-windows/event_messages.mc | 39 - hack/make/.resources-windows/resources.go | 18 - hack/make/.validate | 33 - hack/make/README.md | 17 - hack/make/binary | 15 - hack/make/binary-client | 12 - hack/make/binary-daemon | 16 - hack/make/build-deb | 101 - hack/make/build-rpm | 159 - hack/make/clean-apt-repo | 43 - hack/make/clean-yum-repo | 20 - hack/make/cover | 20 - hack/make/cross | 42 - hack/make/dynbinary | 15 - hack/make/dynbinary-client | 12 - hack/make/dynbinary-daemon | 16 - hack/make/dyngccgo | 11 - hack/make/gccgo | 68 - hack/make/generate-index-listing | 74 - hack/make/install-binary | 12 - hack/make/install-binary-client | 10 - hack/make/install-binary-daemon | 11 - hack/make/install-script | 63 - hack/make/release-deb | 162 - hack/make/release-rpm | 71 - hack/make/sign-repos | 62 - hack/make/test-deb-install | 71 - hack/make/test-docker-py | 18 - hack/make/test-install-script | 31 - hack/make/test-integration-cli | 18 - hack/make/test-old-apt-repo | 29 - hack/make/test-unit | 36 - hack/make/tgz | 82 - hack/make/ubuntu | 190 - hack/make/update-apt-repo | 70 - hack/make/validate-dco | 54 - hack/make/validate-default-seccomp | 27 - hack/make/validate-gofmt | 30 - hack/make/validate-lint | 30 - hack/make/validate-pkg | 32 - hack/make/validate-test | 35 - hack/make/validate-toml | 30 - hack/make/validate-vendor | 27 - hack/make/validate-vet | 31 - hack/make/win | 20 - hack/release.sh | 319 - hack/vendor.sh | 169 - image/fs.go | 175 - image/fs_test.go | 384 - image/image.go | 140 - image/image_test.go | 59 - image/rootfs.go | 16 - image/rootfs_unix.go | 18 - image/rootfs_windows.go | 48 - image/spec/v1.1.md | 640 - image/spec/v1.2.md | 696 - image/spec/v1.md | 573 - image/store.go | 295 - image/store_test.go | 300 - image/tarexport/load.go | 392 - image/tarexport/save.go | 349 - image/tarexport/tarexport.go | 47 - image/v1/imagev1.go | 156 - image/v1/imagev1_test.go | 55 - integration-cli/benchmark_test.go | 95 - integration-cli/check_test.go | 283 - integration-cli/daemon.go | 529 - integration-cli/daemon_swarm.go | 327 - integration-cli/daemon_swarm_hack.go | 20 - integration-cli/docker_api_attach_test.go | 171 - integration-cli/docker_api_auth_test.go | 25 - integration-cli/docker_api_build_test.go | 283 - integration-cli/docker_api_containers_test.go | 1434 - integration-cli/docker_api_create_test.go | 44 - integration-cli/docker_api_events_test.go | 73 - .../docker_api_exec_resize_test.go | 105 - integration-cli/docker_api_exec_test.go | 183 - integration-cli/docker_api_images_test.go | 129 - integration-cli/docker_api_info_test.go | 40 - integration-cli/docker_api_inspect_test.go | 183 - .../docker_api_inspect_unix_test.go | 35 - integration-cli/docker_api_logs_test.go | 87 - integration-cli/docker_api_network_test.go | 353 - integration-cli/docker_api_resize_test.go | 44 - .../docker_api_service_update_test.go | 39 - integration-cli/docker_api_stats_test.go | 281 - integration-cli/docker_api_stats_unix_test.go | 41 - integration-cli/docker_api_swarm_test.go | 938 - integration-cli/docker_api_test.go | 142 - .../docker_api_update_unix_test.go | 35 - integration-cli/docker_api_version_test.go | 23 - integration-cli/docker_api_volumes_test.go | 88 - integration-cli/docker_cli_attach_test.go | 166 - .../docker_cli_attach_unix_test.go | 230 - integration-cli/docker_cli_authz_unix_test.go | 477 - integration-cli/docker_cli_build_test.go | 6962 - integration-cli/docker_cli_build_unix_test.go | 206 - integration-cli/docker_cli_by_digest_test.go | 693 - integration-cli/docker_cli_commit_test.go | 189 - integration-cli/docker_cli_config_test.go | 138 - .../docker_cli_cp_from_container_test.go | 489 - integration-cli/docker_cli_cp_test.go | 665 - .../docker_cli_cp_to_container_test.go | 605 - .../docker_cli_cp_to_container_unix_test.go | 39 - integration-cli/docker_cli_cp_utils.go | 303 - integration-cli/docker_cli_create_test.go | 480 - .../docker_cli_daemon_experimental_test.go | 224 - integration-cli/docker_cli_daemon_test.go | 2719 - integration-cli/docker_cli_diff_test.go | 87 - integration-cli/docker_cli_events_test.go | 743 - .../docker_cli_events_unix_test.go | 494 - integration-cli/docker_cli_exec_test.go | 515 - integration-cli/docker_cli_exec_unix_test.go | 70 - .../docker_cli_experimental_test.go | 21 - .../docker_cli_export_import_test.go | 49 - ...cker_cli_external_graphdriver_unix_test.go | 399 - ...er_cli_external_volume_driver_unix_test.go | 522 - integration-cli/docker_cli_health_test.go | 167 - integration-cli/docker_cli_help_test.go | 339 - integration-cli/docker_cli_history_test.go | 125 - integration-cli/docker_cli_images_test.go | 359 - integration-cli/docker_cli_import_test.go | 127 - integration-cli/docker_cli_info_test.go | 189 - integration-cli/docker_cli_info_unix_test.go | 15 - .../docker_cli_inspect_experimental_test.go | 33 - integration-cli/docker_cli_inspect_test.go | 407 - integration-cli/docker_cli_kill_test.go | 95 - integration-cli/docker_cli_links_test.go | 239 - integration-cli/docker_cli_links_unix_test.go | 26 - integration-cli/docker_cli_login_test.go | 44 - integration-cli/docker_cli_logout_test.go | 56 - integration-cli/docker_cli_logs_bench_test.go | 32 - integration-cli/docker_cli_logs_test.go | 322 - integration-cli/docker_cli_nat_test.go | 93 - integration-cli/docker_cli_netmode_test.go | 94 - .../docker_cli_network_unix_test.go | 1674 - integration-cli/docker_cli_oom_killed_test.go | 30 - integration-cli/docker_cli_pause_test.go | 66 - integration-cli/docker_cli_plugins_test.go | 109 - integration-cli/docker_cli_port_test.go | 319 - integration-cli/docker_cli_proxy_test.go | 53 - integration-cli/docker_cli_ps_test.go | 900 - integration-cli/docker_cli_pull_local_test.go | 446 - integration-cli/docker_cli_pull_test.go | 274 - .../docker_cli_pull_trusted_test.go | 365 - integration-cli/docker_cli_push_test.go | 702 - .../docker_cli_registry_user_agent_test.go | 120 - integration-cli/docker_cli_rename_test.go | 123 - integration-cli/docker_cli_restart_test.go | 242 - integration-cli/docker_cli_rm_test.go | 86 - integration-cli/docker_cli_rmi_test.go | 347 - integration-cli/docker_cli_run_test.go | 4493 - integration-cli/docker_cli_run_unix_test.go | 1306 - integration-cli/docker_cli_save_load_test.go | 382 - .../docker_cli_save_load_unix_test.go | 87 - integration-cli/docker_cli_search_test.go | 131 - .../docker_cli_service_create_hack_test.go | 45 - .../docker_cli_service_health_test.go | 191 - .../docker_cli_service_update_test.go | 86 - integration-cli/docker_cli_sni_test.go | 44 - integration-cli/docker_cli_stack_test.go | 28 - integration-cli/docker_cli_start_test.go | 187 - integration-cli/docker_cli_stats_test.go | 159 - integration-cli/docker_cli_stop_test.go | 17 - integration-cli/docker_cli_swarm_test.go | 222 - integration-cli/docker_cli_tag_test.go | 225 - integration-cli/docker_cli_top_test.go | 43 - integration-cli/docker_cli_update_test.go | 31 - .../docker_cli_update_unix_test.go | 217 - integration-cli/docker_cli_userns_test.go | 86 - integration-cli/docker_cli_v2_only_test.go | 125 - integration-cli/docker_cli_version_test.go | 58 - integration-cli/docker_cli_volume_test.go | 301 - integration-cli/docker_cli_wait_test.go | 97 - .../docker_deprecated_api_v124_test.go | 227 - .../docker_deprecated_api_v124_unix_test.go | 30 - .../docker_experimental_network_test.go | 591 - integration-cli/docker_hub_pull_suite_test.go | 90 - integration-cli/docker_test_vars.go | 131 - integration-cli/docker_utils.go | 1569 - integration-cli/events_utils.go | 206 - .../auth/docker-credential-shell-test | 33 - integration-cli/fixtures/https/ca.pem | 23 - .../fixtures/https/client-cert.pem | 73 - integration-cli/fixtures/https/client-key.pem | 16 - .../fixtures/https/client-rogue-cert.pem | 73 - .../fixtures/https/client-rogue-key.pem | 16 - .../fixtures/https/server-cert.pem | 76 - integration-cli/fixtures/https/server-key.pem | 16 - .../fixtures/https/server-rogue-cert.pem | 76 - .../fixtures/https/server-rogue-key.pem | 16 - integration-cli/fixtures/load/emptyLayer.tar | Bin 30720 -> 0 bytes integration-cli/fixtures/notary/delgkey1.crt | 24 - integration-cli/fixtures/notary/delgkey1.key | 27 - integration-cli/fixtures/notary/delgkey2.crt | 24 - integration-cli/fixtures/notary/delgkey2.key | 27 - integration-cli/fixtures/notary/delgkey3.crt | 24 - integration-cli/fixtures/notary/delgkey3.key | 27 - integration-cli/fixtures/notary/delgkey4.crt | 24 - integration-cli/fixtures/notary/delgkey4.key | 27 - .../fixtures/notary/localhost.cert | 19 - integration-cli/fixtures/notary/localhost.key | 27 - integration-cli/fixtures/registry/cert.pem | 21 - integration-cli/npipe.go | 12 - integration-cli/npipe_windows.go | 12 - integration-cli/registry.go | 175 - integration-cli/registry_mock.go | 55 - integration-cli/requirements.go | 206 - integration-cli/requirements_unix.go | 106 - integration-cli/test_vars_exec.go | 8 - integration-cli/test_vars_noexec.go | 8 - integration-cli/test_vars_noseccomp.go | 8 - integration-cli/test_vars_seccomp.go | 8 - integration-cli/test_vars_unix.go | 16 - integration-cli/test_vars_windows.go | 18 - integration-cli/trust_server.go | 320 - integration-cli/utils.go | 85 - layer/empty.go | 48 - layer/empty_test.go | 46 - layer/filestore.go | 354 - layer/filestore_test.go | 104 - layer/layer.go | 270 - layer/layer_store.go | 659 - layer/layer_store_windows.go | 11 - layer/layer_test.go | 768 - layer/layer_unix.go | 9 - layer/layer_unix_test.go | 71 - layer/layer_windows.go | 98 - layer/migration.go | 256 - layer/migration_test.go | 435 - layer/mount_test.go | 230 - layer/mounted_layer.go | 103 - layer/ro_layer.go | 172 - layer/ro_layer_windows.go | 9 - libcontainerd/client.go | 46 - libcontainerd/client_linux.go | 627 - libcontainerd/client_solaris.go | 58 - libcontainerd/client_windows.go | 436 - libcontainerd/container.go | 40 - libcontainerd/container_linux.go | 243 - libcontainerd/container_solaris.go | 5 - libcontainerd/container_windows.go | 310 - libcontainerd/pausemonitor_linux.go | 31 - libcontainerd/process.go | 18 - libcontainerd/process_linux.go | 110 - libcontainerd/process_solaris.go | 6 - libcontainerd/process_windows.go | 95 - libcontainerd/queue_linux.go | 29 - libcontainerd/remote.go | 20 - libcontainerd/remote_linux.go | 542 - libcontainerd/remote_solaris.go | 34 - libcontainerd/remote_windows.go | 36 - libcontainerd/types.go | 64 - libcontainerd/types_linux.go | 55 - libcontainerd/types_solaris.go | 38 - libcontainerd/types_windows.go | 39 - libcontainerd/utils_linux.go | 52 - libcontainerd/utils_windows.go | 21 - libcontainerd/windowsoci/oci_windows.go | 179 - libcontainerd/windowsoci/unsupported.go | 3 - man/Dockerfile | 24 - man/Dockerfile.5.md | 473 - man/Dockerfile.armhf | 24 - man/README.md | 15 - man/docker-attach.1.md | 99 - man/docker-build.1.md | 313 - man/docker-commit.1.md | 71 - man/docker-config-json.5.md | 72 - man/docker-cp.1.md | 175 - man/docker-create.1.md | 502 - man/docker-diff.1.md | 49 - man/docker-events.1.md | 104 - man/docker-exec.1.md | 64 - man/docker-export.1.md | 46 - man/docker-history.1.md | 52 - man/docker-images.1.md | 115 - man/docker-import.1.md | 72 - man/docker-info.1.md | 146 - man/docker-inspect.1.md | 322 - man/docker-kill.1.md | 28 - man/docker-load.1.md | 56 - man/docker-login.1.md | 53 - man/docker-logout.1.md | 32 - man/docker-logs.1.md | 71 - man/docker-network-connect.1.md | 69 - man/docker-network-create.1.md | 183 - man/docker-network-disconnect.1.md | 36 - man/docker-network-inspect.1.md | 112 - man/docker-network-ls.1.md | 175 - man/docker-network-rm.1.md | 43 - man/docker-pause.1.md | 30 - man/docker-port.1.md | 47 - man/docker-ps.1.md | 142 - man/docker-pull.1.md | 220 - man/docker-push.1.md | 63 - man/docker-rename.1.md | 15 - man/docker-restart.1.md | 26 - man/docker-rm.1.md | 72 - man/docker-rmi.1.md | 42 - man/docker-run.1.md | 1005 - man/docker-save.1.md | 45 - man/docker-search.1.md | 70 - man/docker-start.1.md | 39 - man/docker-stats.1.md | 43 - man/docker-stop.1.md | 30 - man/docker-tag.1.md | 76 - man/docker-top.1.md | 36 - man/docker-unpause.1.md | 27 - man/docker-update.1.md | 108 - man/docker-version.1.md | 62 - man/docker-wait.1.md | 30 - man/docker.1.md | 237 - man/dockerd.8.md | 605 - man/generate.go | 39 - man/generate.sh | 15 - man/glide.lock | 52 - man/glide.yaml | 12 - man/md2man-all.sh | 22 - migrate/v1/migratev1.go | 504 - migrate/v1/migratev1_test.go | 435 - oci/defaults_linux.go | 178 - oci/defaults_solaris.go | 11 - oci/defaults_windows.go | 23 - opts/hosts.go | 151 - opts/hosts_test.go | 148 - opts/hosts_unix.go | 8 - opts/hosts_windows.go | 6 - opts/ip.go | 42 - opts/ip_test.go | 54 - opts/opts.go | 321 - opts/opts_test.go | 232 - opts/opts_unix.go | 6 - opts/opts_windows.go | 56 - pkg/README.md | 11 - pkg/aaparser/aaparser.go | 92 - pkg/aaparser/aaparser_test.go | 73 - pkg/archive/README.md | 1 - pkg/archive/archive.go | 1147 - pkg/archive/archive_linux.go | 91 - pkg/archive/archive_other.go | 7 - pkg/archive/archive_test.go | 1148 - pkg/archive/archive_unix.go | 112 - pkg/archive/archive_unix_test.go | 245 - pkg/archive/archive_windows.go | 70 - pkg/archive/archive_windows_test.go | 91 - pkg/archive/changes.go | 446 - pkg/archive/changes_linux.go | 312 - pkg/archive/changes_other.go | 97 - pkg/archive/changes_posix_test.go | 127 - pkg/archive/changes_test.go | 565 - pkg/archive/changes_unix.go | 36 - pkg/archive/changes_windows.go | 30 - pkg/archive/copy.go | 458 - pkg/archive/copy_unix.go | 11 - pkg/archive/copy_unix_test.go | 978 - pkg/archive/copy_windows.go | 9 - pkg/archive/diff.go | 279 - pkg/archive/diff_test.go | 386 - pkg/archive/example_changes.go | 97 - pkg/archive/testdata/broken.tar | Bin 13824 -> 0 bytes pkg/archive/time_linux.go | 16 - pkg/archive/time_unsupported.go | 16 - pkg/archive/utils_test.go | 166 - pkg/archive/whiteouts.go | 23 - pkg/archive/wrap.go | 59 - pkg/archive/wrap_test.go | 98 - pkg/authorization/api.go | 54 - pkg/authorization/authz.go | 179 - pkg/authorization/authz_unix_test.go | 282 - pkg/authorization/middleware.go | 60 - pkg/authorization/plugin.go | 92 - pkg/authorization/response.go | 203 - pkg/broadcaster/unbuffered.go | 49 - pkg/broadcaster/unbuffered_test.go | 162 - pkg/chrootarchive/archive.go | 97 - pkg/chrootarchive/archive_test.go | 394 - pkg/chrootarchive/archive_unix.go | 86 - pkg/chrootarchive/archive_windows.go | 22 - pkg/chrootarchive/chroot_linux.go | 103 - pkg/chrootarchive/chroot_unix.go | 12 - pkg/chrootarchive/diff.go | 19 - pkg/chrootarchive/diff_unix.go | 120 - pkg/chrootarchive/diff_windows.go | 44 - pkg/chrootarchive/init_unix.go | 28 - pkg/chrootarchive/init_windows.go | 4 - pkg/devicemapper/devmapper.go | 828 - pkg/devicemapper/devmapper_log.go | 35 - pkg/devicemapper/devmapper_wrapper.go | 251 - .../devmapper_wrapper_deferred_remove.go | 34 - .../devmapper_wrapper_no_deferred_remove.go | 15 - pkg/devicemapper/ioctl.go | 27 - pkg/devicemapper/log.go | 11 - pkg/directory/directory.go | 26 - pkg/directory/directory_test.go | 192 - pkg/directory/directory_unix.go | 48 - pkg/directory/directory_windows.go | 37 - pkg/discovery/README.md | 41 - pkg/discovery/backends.go | 107 - pkg/discovery/discovery.go | 35 - pkg/discovery/discovery_test.go | 137 - pkg/discovery/entry.go | 94 - pkg/discovery/file/file.go | 109 - pkg/discovery/file/file_test.go | 114 - pkg/discovery/generator.go | 35 - pkg/discovery/generator_test.go | 53 - pkg/discovery/kv/kv.go | 192 - pkg/discovery/kv/kv_test.go | 324 - pkg/discovery/memory/memory.go | 93 - pkg/discovery/memory/memory_test.go | 48 - pkg/discovery/nodes/nodes.go | 54 - pkg/discovery/nodes/nodes_test.go | 51 - pkg/filenotify/filenotify.go | 40 - pkg/filenotify/fsnotify.go | 18 - pkg/filenotify/poller.go | 204 - pkg/filenotify/poller_test.go | 119 - pkg/fileutils/fileutils.go | 283 - pkg/fileutils/fileutils_solaris.go | 7 - pkg/fileutils/fileutils_test.go | 585 - pkg/fileutils/fileutils_unix.go | 22 - pkg/fileutils/fileutils_windows.go | 7 - pkg/gitutils/gitutils.go | 100 - pkg/gitutils/gitutils_test.go | 220 - pkg/graphdb/conn_sqlite3.go | 15 - pkg/graphdb/conn_sqlite3_unix.go | 7 - pkg/graphdb/conn_sqlite3_windows.go | 7 - pkg/graphdb/conn_unsupported.go | 8 - pkg/graphdb/graphdb.go | 551 - pkg/graphdb/graphdb_test.go | 721 - pkg/graphdb/sort.go | 27 - pkg/graphdb/sort_test.go | 29 - pkg/graphdb/utils.go | 32 - pkg/homedir/homedir.go | 39 - pkg/homedir/homedir_test.go | 24 - pkg/httputils/httputils.go | 56 - pkg/httputils/httputils_test.go | 115 - pkg/httputils/mimetype.go | 30 - pkg/httputils/mimetype_test.go | 13 - pkg/httputils/resumablerequestreader.go | 95 - pkg/httputils/resumablerequestreader_test.go | 307 - pkg/idtools/idtools.go | 197 - pkg/idtools/idtools_unix.go | 60 - pkg/idtools/idtools_unix_test.go | 271 - pkg/idtools/idtools_windows.go | 18 - pkg/idtools/usergroupadd_linux.go | 188 - pkg/idtools/usergroupadd_unsupported.go | 12 - pkg/integration/checker/checker.go | 46 - pkg/integration/dockerCmd_utils.go | 78 - pkg/integration/dockerCmd_utils_test.go | 405 - pkg/integration/utils.go | 361 - pkg/integration/utils_test.go | 572 - pkg/ioutils/buffer.go | 51 - pkg/ioutils/buffer_test.go | 75 - pkg/ioutils/bytespipe.go | 186 - pkg/ioutils/bytespipe_test.go | 159 - pkg/ioutils/fmt.go | 22 - pkg/ioutils/fmt_test.go | 17 - pkg/ioutils/fswriters.go | 82 - pkg/ioutils/fswriters_test.go | 39 - pkg/ioutils/multireader.go | 226 - pkg/ioutils/multireader_test.go | 149 - pkg/ioutils/readers.go | 154 - pkg/ioutils/readers_test.go | 94 - pkg/ioutils/temp_unix.go | 10 - pkg/ioutils/temp_windows.go | 18 - pkg/ioutils/writeflusher.go | 92 - pkg/ioutils/writers.go | 66 - pkg/ioutils/writers_test.go | 65 - pkg/jsonlog/jsonlog.go | 42 - pkg/jsonlog/jsonlog_marshalling.go | 178 - pkg/jsonlog/jsonlog_marshalling_test.go | 34 - pkg/jsonlog/jsonlogbytes.go | 122 - pkg/jsonlog/jsonlogbytes_test.go | 39 - pkg/jsonlog/time_marshalling.go | 27 - pkg/jsonlog/time_marshalling_test.go | 47 - pkg/jsonmessage/jsonmessage.go | 221 - pkg/jsonmessage/jsonmessage_test.go | 245 - pkg/listeners/listeners_solaris.go | 31 - pkg/listeners/listeners_unix.go | 94 - pkg/listeners/listeners_windows.go | 54 - pkg/locker/README.md | 65 - pkg/locker/locker.go | 112 - pkg/locker/locker_test.go | 124 - pkg/longpath/longpath.go | 26 - pkg/longpath/longpath_test.go | 22 - pkg/loopback/attach_loopback.go | 137 - pkg/loopback/ioctl.go | 53 - pkg/loopback/loop_wrapper.go | 52 - pkg/loopback/loopback.go | 63 - pkg/mflag/LICENSE | 27 - pkg/mflag/README.md | 40 - pkg/mflag/example/example.go | 36 - pkg/mflag/flag.go | 1280 - pkg/mflag/flag_test.go | 527 - pkg/mount/flags.go | 149 - pkg/mount/flags_freebsd.go | 48 - pkg/mount/flags_linux.go | 85 - pkg/mount/flags_unsupported.go | 30 - pkg/mount/mount.go | 74 - pkg/mount/mount_unix_test.go | 162 - pkg/mount/mounter_freebsd.go | 59 - pkg/mount/mounter_linux.go | 21 - pkg/mount/mounter_solaris.go | 33 - pkg/mount/mounter_unsupported.go | 11 - pkg/mount/mountinfo.go | 40 - pkg/mount/mountinfo_freebsd.go | 41 - pkg/mount/mountinfo_linux.go | 95 - pkg/mount/mountinfo_linux_test.go | 476 - pkg/mount/mountinfo_solaris.go | 37 - pkg/mount/mountinfo_unsupported.go | 12 - pkg/mount/mountinfo_windows.go | 6 - pkg/mount/sharedsubtree_linux.go | 69 - pkg/mount/sharedsubtree_linux_test.go | 331 - .../cmd/names-generator/main.go | 11 - pkg/namesgenerator/names-generator.go | 543 - pkg/namesgenerator/names-generator_test.go | 27 - pkg/parsers/kernel/kernel.go | 74 - pkg/parsers/kernel/kernel_darwin.go | 56 - pkg/parsers/kernel/kernel_unix.go | 30 - pkg/parsers/kernel/kernel_unix_test.go | 96 - pkg/parsers/kernel/kernel_windows.go | 69 - pkg/parsers/kernel/uname_linux.go | 19 - pkg/parsers/kernel/uname_solaris.go | 14 - pkg/parsers/kernel/uname_unsupported.go | 18 - .../operatingsystem/operatingsystem_linux.go | 77 - .../operatingsystem_solaris.go | 37 - .../operatingsystem/operatingsystem_unix.go | 25 - .../operatingsystem_unix_test.go | 247 - .../operatingsystem_windows.go | 49 - pkg/parsers/parsers.go | 69 - pkg/parsers/parsers_test.go | 70 - pkg/pidfile/pidfile.go | 49 - pkg/pidfile/pidfile_test.go | 38 - pkg/pidfile/pidfile_unix.go | 16 - pkg/pidfile/pidfile_windows.go | 23 - pkg/platform/architecture_linux.go | 16 - pkg/platform/architecture_unix.go | 20 - pkg/platform/architecture_windows.go | 52 - pkg/platform/platform.go | 23 - pkg/platform/utsname_int8.go | 18 - pkg/platform/utsname_uint8.go | 18 - pkg/plugins/client.go | 188 - pkg/plugins/client_test.go | 134 - pkg/plugins/discovery.go | 132 - pkg/plugins/discovery_test.go | 152 - pkg/plugins/discovery_unix_test.go | 61 - pkg/plugins/errors.go | 33 - pkg/plugins/pluginrpc-gen/README.md | 58 - pkg/plugins/pluginrpc-gen/fixtures/foo.go | 89 - .../fixtures/otherfixture/spaceship.go | 4 - pkg/plugins/pluginrpc-gen/main.go | 91 - pkg/plugins/pluginrpc-gen/parser.go | 263 - pkg/plugins/pluginrpc-gen/parser_test.go | 222 - pkg/plugins/pluginrpc-gen/template.go | 118 - pkg/plugins/plugins.go | 274 - pkg/plugins/transport/http.go | 36 - pkg/plugins/transport/transport.go | 36 - pkg/pools/pools.go | 119 - pkg/pools/pools_test.go | 161 - pkg/progress/progress.go | 73 - pkg/progress/progressreader.go | 59 - pkg/progress/progressreader_test.go | 75 - pkg/promise/promise.go | 11 - pkg/pubsub/publisher.go | 111 - pkg/pubsub/publisher_test.go | 142 - pkg/random/random.go | 71 - pkg/random/random_test.go | 22 - pkg/reexec/README.md | 5 - pkg/reexec/command_linux.go | 28 - pkg/reexec/command_unix.go | 23 - pkg/reexec/command_unsupported.go | 12 - pkg/reexec/command_windows.go | 23 - pkg/reexec/reexec.go | 47 - pkg/registrar/registrar.go | 127 - pkg/registrar/registrar_test.go | 119 - pkg/signal/README.md | 1 - pkg/signal/signal.go | 54 - pkg/signal/signal_darwin.go | 41 - pkg/signal/signal_freebsd.go | 43 - pkg/signal/signal_linux.go | 80 - pkg/signal/signal_solaris.go | 42 - pkg/signal/signal_unix.go | 21 - pkg/signal/signal_unsupported.go | 10 - pkg/signal/signal_windows.go | 28 - pkg/signal/trap.go | 81 - pkg/stdcopy/stdcopy.go | 185 - pkg/stdcopy/stdcopy_test.go | 260 - pkg/streamformatter/streamformatter.go | 172 - pkg/streamformatter/streamformatter_test.go | 108 - pkg/stringid/README.md | 1 - pkg/stringid/stringid.go | 71 - pkg/stringid/stringid_test.go | 56 - pkg/stringutils/README.md | 1 - pkg/stringutils/stringutils.go | 87 - pkg/stringutils/stringutils_test.go | 105 - pkg/symlink/LICENSE.APACHE | 191 - pkg/symlink/LICENSE.BSD | 27 - pkg/symlink/README.md | 6 - pkg/symlink/fs.go | 143 - pkg/symlink/fs_unix.go | 11 - pkg/symlink/fs_unix_test.go | 407 - pkg/symlink/fs_windows.go | 155 - pkg/sysinfo/README.md | 1 - pkg/sysinfo/sysinfo.go | 128 - pkg/sysinfo/sysinfo_freebsd.go | 7 - pkg/sysinfo/sysinfo_linux.go | 246 - pkg/sysinfo/sysinfo_linux_test.go | 58 - pkg/sysinfo/sysinfo_solaris.go | 119 - pkg/sysinfo/sysinfo_test.go | 26 - pkg/sysinfo/sysinfo_windows.go | 7 - pkg/system/chtimes.go | 52 - pkg/system/chtimes_test.go | 94 - pkg/system/chtimes_unix.go | 14 - pkg/system/chtimes_unix_test.go | 91 - pkg/system/chtimes_windows.go | 27 - pkg/system/chtimes_windows_test.go | 86 - pkg/system/errors.go | 10 - pkg/system/events_windows.go | 83 - pkg/system/filesys.go | 19 - pkg/system/filesys_windows.go | 82 - pkg/system/lstat.go | 19 - pkg/system/lstat_unix_test.go | 30 - pkg/system/lstat_windows.go | 25 - pkg/system/meminfo.go | 17 - pkg/system/meminfo_linux.go | 65 - pkg/system/meminfo_solaris.go | 128 - pkg/system/meminfo_unix_test.go | 40 - pkg/system/meminfo_unsupported.go | 8 - pkg/system/meminfo_windows.go | 44 - pkg/system/mknod.go | 22 - pkg/system/mknod_windows.go | 13 - pkg/system/path_unix.go | 14 - pkg/system/path_windows.go | 37 - pkg/system/path_windows_test.go | 78 - pkg/system/stat.go | 53 - pkg/system/stat_freebsd.go | 27 - pkg/system/stat_linux.go | 33 - pkg/system/stat_openbsd.go | 15 - pkg/system/stat_solaris.go | 34 - pkg/system/stat_unix_test.go | 39 - pkg/system/stat_unsupported.go | 17 - pkg/system/stat_windows.go | 43 - pkg/system/syscall_unix.go | 17 - pkg/system/syscall_windows.go | 103 - pkg/system/syscall_windows_test.go | 9 - pkg/system/umask.go | 13 - pkg/system/umask_windows.go | 9 - pkg/system/utimes_darwin.go | 8 - pkg/system/utimes_freebsd.go | 22 - pkg/system/utimes_linux.go | 26 - pkg/system/utimes_unix_test.go | 68 - pkg/system/utimes_unsupported.go | 10 - pkg/system/xattrs_linux.go | 63 - pkg/system/xattrs_unsupported.go | 13 - pkg/tailfile/tailfile.go | 66 - pkg/tailfile/tailfile_test.go | 148 - pkg/tarsum/builder_context.go | 21 - pkg/tarsum/builder_context_test.go | 63 - pkg/tarsum/fileinfosums.go | 126 - pkg/tarsum/fileinfosums_test.go | 62 - pkg/tarsum/tarsum.go | 295 - pkg/tarsum/tarsum_spec.md | 230 - pkg/tarsum/tarsum_test.go | 656 - .../json | 1 - .../layer.tar | Bin 9216 -> 0 bytes .../json | 1 - .../layer.tar | Bin 1536 -> 0 bytes pkg/tarsum/testdata/collision/collision-0.tar | Bin 10240 -> 0 bytes pkg/tarsum/testdata/collision/collision-1.tar | Bin 10240 -> 0 bytes pkg/tarsum/testdata/collision/collision-2.tar | Bin 10240 -> 0 bytes pkg/tarsum/testdata/collision/collision-3.tar | Bin 10240 -> 0 bytes pkg/tarsum/testdata/xattr/json | 1 - pkg/tarsum/testdata/xattr/layer.tar | Bin 2560 -> 0 bytes pkg/tarsum/versioning.go | 150 - pkg/tarsum/versioning_test.go | 98 - pkg/tarsum/writercloser.go | 22 - pkg/term/ascii.go | 66 - pkg/term/ascii_test.go | 43 - pkg/term/tc_linux_cgo.go | 50 - pkg/term/tc_other.go | 20 - pkg/term/tc_solaris_cgo.go | 63 - pkg/term/term.go | 117 - pkg/term/term_solaris.go | 41 - pkg/term/term_unix.go | 29 - pkg/term/term_windows.go | 233 - pkg/term/termios_darwin.go | 69 - pkg/term/termios_freebsd.go | 69 - pkg/term/termios_linux.go | 47 - pkg/term/termios_openbsd.go | 69 - pkg/term/windows/ansi_reader.go | 261 - pkg/term/windows/ansi_writer.go | 64 - pkg/term/windows/console.go | 35 - pkg/term/windows/windows.go | 33 - pkg/term/windows/windows_test.go | 3 - pkg/testutil/assert/assert.go | 70 - pkg/testutil/pkg.go | 1 - pkg/tlsconfig/config.go | 133 - pkg/truncindex/truncindex.go | 137 - pkg/truncindex/truncindex_test.go | 429 - pkg/urlutil/urlutil.go | 50 - pkg/urlutil/urlutil_test.go | 70 - pkg/useragent/README.md | 1 - pkg/useragent/useragent.go | 55 - pkg/useragent/useragent_test.go | 31 - plugin/backend.go | 153 - plugin/distribution/pull.go | 212 - plugin/distribution/push.go | 135 - plugin/distribution/types.go | 19 - plugin/interface.go | 10 - plugin/legacy.go | 23 - plugin/manager.go | 449 - plugin/manager_linux.go | 188 - plugin/manager_windows.go | 29 - profiles/apparmor/apparmor.go | 115 - profiles/apparmor/template.go | 46 - profiles/seccomp/default.json | 1593 - profiles/seccomp/fixtures/example.json | 27 - profiles/seccomp/generate.go | 35 - profiles/seccomp/seccomp.go | 74 - profiles/seccomp/seccomp_default.go | 1879 - profiles/seccomp/seccomp_test.go | 28 - profiles/seccomp/seccomp_unsupported.go | 13 - project/ARM.md | 45 - project/BRANCHES-AND-TAGS.md | 35 - project/CONTRIBUTORS.md | 1 - project/GOVERNANCE.md | 17 - project/IRC-ADMINISTRATION.md | 37 - project/ISSUE-TRIAGE.md | 91 - project/PACKAGE-REPO-MAINTENANCE.md | 74 - project/PACKAGERS.md | 307 - project/PATCH-RELEASES.md | 68 - project/PRINCIPLES.md | 19 - project/README.md | 24 - project/RELEASE-CHECKLIST.md | 512 - project/RELEASE-PROCESS.md | 78 - project/REVIEWING.md | 209 - project/TOOLS.md | 63 - reference/reference.go | 211 - reference/reference_test.go | 275 - reference/store.go | 287 - reference/store_test.go | 356 - restartmanager/restartmanager.go | 128 - restartmanager/restartmanager_test.go | 34 - runconfig/compare.go | 61 - runconfig/compare_test.go | 126 - runconfig/config.go | 90 - runconfig/config_test.go | 134 - runconfig/config_unix.go | 59 - runconfig/config_windows.go | 19 - runconfig/errors.go | 40 - .../fixtures/unix/container_config_1_14.json | 30 - .../fixtures/unix/container_config_1_17.json | 50 - .../fixtures/unix/container_config_1_19.json | 58 - .../unix/container_hostconfig_1_14.json | 18 - .../unix/container_hostconfig_1_19.json | 30 - .../windows/container_config_1_19.json | 58 - runconfig/hostconfig.go | 35 - runconfig/hostconfig_solaris.go | 47 - runconfig/hostconfig_test.go | 222 - runconfig/hostconfig_unix.go | 106 - runconfig/hostconfig_windows.go | 51 - runconfig/opts/envfile.go | 67 - runconfig/opts/envfile_test.go | 142 - runconfig/opts/fixtures/valid.env | 1 - runconfig/opts/fixtures/valid.label | 1 - runconfig/opts/opts.go | 70 - runconfig/opts/opts_test.go | 108 - runconfig/opts/parse.go | 949 - runconfig/opts/parse_test.go | 870 - runconfig/opts/runtime.go | 74 - runconfig/opts/throttledevice.go | 113 - runconfig/opts/ulimit.go | 57 - runconfig/opts/ulimit_test.go | 42 - runconfig/opts/weightdevice.go | 89 - runconfig/streams.go | 109 - utils/debug.go | 26 - utils/debug_test.go | 43 - utils/experimental.go | 9 - utils/names.go | 12 - utils/process_unix.go | 22 - utils/process_windows.go | 20 - utils/stubs.go | 9 - utils/templates/templates.go | 33 - utils/templates/templates_test.go | 38 - utils/utils.go | 87 - utils/utils_test.go | 21 - .../src/bitbucket.org/ww/goautoneg/Makefile | 13 - .../src/bitbucket.org/ww/goautoneg/README.txt | 67 - .../src/bitbucket.org/ww/goautoneg/autoneg.go | 162 - .../src/github.com/Azure/go-ansiterm/LICENSE | 21 - .../github.com/Azure/go-ansiterm/README.md | 9 - .../github.com/Azure/go-ansiterm/constants.go | 188 - .../github.com/Azure/go-ansiterm/context.go | 7 - .../Azure/go-ansiterm/csi_entry_state.go | 49 - .../Azure/go-ansiterm/csi_param_state.go | 38 - .../go-ansiterm/escape_intermediate_state.go | 36 - .../Azure/go-ansiterm/escape_state.go | 47 - .../Azure/go-ansiterm/event_handler.go | 90 - .../Azure/go-ansiterm/ground_state.go | 24 - .../Azure/go-ansiterm/osc_string_state.go | 31 - .../github.com/Azure/go-ansiterm/parser.go | 136 - .../go-ansiterm/parser_action_helpers.go | 103 - .../Azure/go-ansiterm/parser_actions.go | 122 - .../github.com/Azure/go-ansiterm/states.go | 71 - .../github.com/Azure/go-ansiterm/utilities.go | 21 - .../Azure/go-ansiterm/winterm/ansi.go | 182 - .../Azure/go-ansiterm/winterm/api.go | 322 - .../go-ansiterm/winterm/attr_translation.go | 100 - .../go-ansiterm/winterm/cursor_helpers.go | 101 - .../go-ansiterm/winterm/erase_helpers.go | 84 - .../go-ansiterm/winterm/scroll_helper.go | 118 - .../Azure/go-ansiterm/winterm/utilities.go | 9 - .../go-ansiterm/winterm/win_event_handler.go | 726 - .../src/github.com/BurntSushi/toml/.gitignore | 5 - .../github.com/BurntSushi/toml/.travis.yml | 12 - .../src/github.com/BurntSushi/toml/COMPATIBLE | 3 - vendor/src/github.com/BurntSushi/toml/COPYING | 14 - .../src/github.com/BurntSushi/toml/Makefile | 19 - .../src/github.com/BurntSushi/toml/README.md | 220 - .../toml/cmd/toml-test-decoder/COPYING | 14 - .../toml/cmd/toml-test-encoder/COPYING | 14 - .../BurntSushi/toml/cmd/tomlv/COPYING | 14 - .../src/github.com/BurntSushi/toml/decode.go | 492 - .../github.com/BurntSushi/toml/decode_meta.go | 122 - vendor/src/github.com/BurntSushi/toml/doc.go | 27 - .../src/github.com/BurntSushi/toml/encode.go | 496 - .../BurntSushi/toml/encoding_types.go | 19 - .../BurntSushi/toml/encoding_types_1.1.go | 18 - vendor/src/github.com/BurntSushi/toml/lex.go | 874 - .../src/github.com/BurntSushi/toml/parse.go | 498 - .../github.com/BurntSushi/toml/session.vim | 1 - .../github.com/BurntSushi/toml/type_check.go | 91 - .../github.com/BurntSushi/toml/type_fields.go | 241 - .../src/github.com/Graylog2/go-gelf/LICENSE | 21 - .../Graylog2/go-gelf/gelf/reader.go | 140 - .../Graylog2/go-gelf/gelf/writer.go | 418 - .../github.com/Microsoft/go-winio/.gitignore | 1 - .../src/github.com/Microsoft/go-winio/LICENSE | 22 - .../github.com/Microsoft/go-winio/README.md | 15 - .../Microsoft/go-winio/archive/tar/LICENSE | 27 - .../Microsoft/go-winio/archive/tar/common.go | 344 - .../Microsoft/go-winio/archive/tar/reader.go | 1002 - .../go-winio/archive/tar/stat_atim.go | 20 - .../go-winio/archive/tar/stat_atimespec.go | 20 - .../go-winio/archive/tar/stat_unix.go | 32 - .../Microsoft/go-winio/archive/tar/writer.go | 444 - .../github.com/Microsoft/go-winio/backup.go | 266 - .../Microsoft/go-winio/backuptar/tar.go | 351 - .../src/github.com/Microsoft/go-winio/file.go | 219 - .../github.com/Microsoft/go-winio/fileinfo.go | 54 - .../src/github.com/Microsoft/go-winio/pipe.go | 398 - .../Microsoft/go-winio/privilege.go | 191 - .../github.com/Microsoft/go-winio/reparse.go | 128 - .../src/github.com/Microsoft/go-winio/sd.go | 96 - .../github.com/Microsoft/go-winio/syscall.go | 3 - .../github.com/Microsoft/go-winio/zsyscall.go | 496 - .../src/github.com/Microsoft/hcsshim/LICENSE | 22 - .../Microsoft/hcsshim/activatelayer.go | 28 - .../github.com/Microsoft/hcsshim/baselayer.go | 147 - .../github.com/Microsoft/hcsshim/callback.go | 79 - .../github.com/Microsoft/hcsshim/container.go | 531 - .../Microsoft/hcsshim/createcomputesystem.go | 22 - .../Microsoft/hcsshim/createlayer.go | 27 - .../Microsoft/hcsshim/createprocess.go | 101 - .../Microsoft/hcsshim/createsandboxlayer.go | 35 - .../Microsoft/hcsshim/deactivatelayer.go | 26 - .../Microsoft/hcsshim/destroylayer.go | 27 - .../Microsoft/hcsshim/expandsandboxsize.go | 26 - .../Microsoft/hcsshim/exportlayer.go | 158 - .../hcsshim/getcomputesystemproperties.go | 43 - .../Microsoft/hcsshim/getlayermountpath.go | 55 - .../Microsoft/hcsshim/getsharedbaseimages.go | 22 - .../src/github.com/Microsoft/hcsshim/guid.go | 19 - .../github.com/Microsoft/hcsshim/hcsshim.go | 184 - .../github.com/Microsoft/hcsshim/hnsfuncs.go | 149 - .../Microsoft/hcsshim/importlayer.go | 193 - .../github.com/Microsoft/hcsshim/interface.go | 171 - .../Microsoft/hcsshim/layerexists.go | 30 - .../Microsoft/hcsshim/layerutils.go | 111 - .../github.com/Microsoft/hcsshim/legacy.go | 441 - .../Microsoft/hcsshim/mksyscall_windows.go | 818 - .../Microsoft/hcsshim/nametoguid.go | 20 - .../Microsoft/hcsshim/preparelayer.go | 36 - .../github.com/Microsoft/hcsshim/process.go | 441 - .../Microsoft/hcsshim/processimage.go | 23 - .../Microsoft/hcsshim/resizeconsole.go | 22 - .../hcsshim/shutdownterminatecomputesystem.go | 43 - .../Microsoft/hcsshim/startcomputesystem.go | 21 - .../Microsoft/hcsshim/terminateprocess.go | 20 - .../Microsoft/hcsshim/unpreparelayer.go | 27 - .../src/github.com/Microsoft/hcsshim/utils.go | 11 - .../github.com/Microsoft/hcsshim/version.go | 7 - .../Microsoft/hcsshim/waithelper.go | 126 - .../Microsoft/hcsshim/waitprocess.go | 20 - .../github.com/Microsoft/hcsshim/zhcsshim.go | 1307 - .../src/github.com/RackSec/srslog/.gitignore | 1 - .../src/github.com/RackSec/srslog/.travis.yml | 18 - .../RackSec/srslog/CODE_OF_CONDUCT.md | 50 - vendor/src/github.com/RackSec/srslog/LICENSE | 27 - .../src/github.com/RackSec/srslog/README.md | 131 - .../github.com/RackSec/srslog/constants.go | 68 - .../src/github.com/RackSec/srslog/dialer.go | 87 - .../github.com/RackSec/srslog/formatter.go | 48 - .../src/github.com/RackSec/srslog/framer.go | 24 - .../src/github.com/RackSec/srslog/net_conn.go | 30 - .../src/github.com/RackSec/srslog/srslog.go | 100 - .../github.com/RackSec/srslog/srslog_unix.go | 54 - .../src/github.com/RackSec/srslog/writer.go | 164 - .../src/github.com/Sirupsen/logrus/.gitignore | 1 - .../github.com/Sirupsen/logrus/.travis.yml | 9 - .../github.com/Sirupsen/logrus/CHANGELOG.md | 66 - vendor/src/github.com/Sirupsen/logrus/LICENSE | 21 - .../src/github.com/Sirupsen/logrus/README.md | 388 - vendor/src/github.com/Sirupsen/logrus/doc.go | 26 - .../src/github.com/Sirupsen/logrus/entry.go | 264 - .../github.com/Sirupsen/logrus/exported.go | 193 - .../github.com/Sirupsen/logrus/formatter.go | 48 - .../src/github.com/Sirupsen/logrus/hooks.go | 34 - .../Sirupsen/logrus/json_formatter.go | 41 - .../src/github.com/Sirupsen/logrus/logger.go | 212 - .../src/github.com/Sirupsen/logrus/logrus.go | 143 - .../Sirupsen/logrus/terminal_bsd.go | 9 - .../Sirupsen/logrus/terminal_linux.go | 12 - .../Sirupsen/logrus/terminal_notwindows.go | 21 - .../Sirupsen/logrus/terminal_solaris.go | 15 - .../Sirupsen/logrus/terminal_windows.go | 27 - .../Sirupsen/logrus/text_formatter.go | 161 - .../src/github.com/Sirupsen/logrus/writer.go | 31 - vendor/src/github.com/agl/ed25519/LICENSE | 27 - vendor/src/github.com/agl/ed25519/ed25519.go | 125 - .../agl/ed25519/edwards25519/const.go | 1411 - .../agl/ed25519/edwards25519/edwards25519.go | 2127 - .../github.com/armon/go-metrics/.gitignore | 22 - .../src/github.com/armon/go-metrics/LICENSE | 20 - .../src/github.com/armon/go-metrics/README.md | 68 - .../github.com/armon/go-metrics/const_unix.go | 12 - .../armon/go-metrics/const_windows.go | 13 - .../src/github.com/armon/go-metrics/inmem.go | 239 - .../armon/go-metrics/inmem_signal.go | 100 - .../github.com/armon/go-metrics/metrics.go | 115 - .../src/github.com/armon/go-metrics/sink.go | 52 - .../src/github.com/armon/go-metrics/start.go | 95 - .../src/github.com/armon/go-metrics/statsd.go | 154 - .../github.com/armon/go-metrics/statsite.go | 142 - .../src/github.com/armon/go-radix/.gitignore | 22 - .../src/github.com/armon/go-radix/.travis.yml | 3 - vendor/src/github.com/armon/go-radix/LICENSE | 20 - .../src/github.com/armon/go-radix/README.md | 36 - vendor/src/github.com/armon/go-radix/radix.go | 467 - .../src/github.com/aws/aws-sdk-go/LICENSE.txt | 202 - .../aws/aws-sdk-go/aws/awserr/error.go | 145 - .../aws/aws-sdk-go/aws/awserr/types.go | 194 - .../aws/aws-sdk-go/aws/awsutil/copy.go | 100 - .../aws/aws-sdk-go/aws/awsutil/equal.go | 27 - .../aws/aws-sdk-go/aws/awsutil/path_value.go | 222 - .../aws/aws-sdk-go/aws/awsutil/prettify.go | 107 - .../aws-sdk-go/aws/awsutil/string_value.go | 89 - .../aws/aws-sdk-go/aws/client/client.go | 120 - .../aws-sdk-go/aws/client/default_retryer.go | 90 - .../aws/client/metadata/client_info.go | 12 - .../github.com/aws/aws-sdk-go/aws/config.go | 358 - .../aws/aws-sdk-go/aws/convert_types.go | 369 - .../aws-sdk-go/aws/corehandlers/handlers.go | 152 - .../aws/corehandlers/param_validator.go | 17 - .../aws/credentials/chain_provider.go | 100 - .../aws-sdk-go/aws/credentials/credentials.go | 223 - .../ec2rolecreds/ec2_role_provider.go | 178 - .../aws/credentials/env_provider.go | 77 - .../aws-sdk-go/aws/credentials/example.ini | 12 - .../shared_credentials_provider.go | 151 - .../aws/credentials/static_provider.go | 48 - .../aws/aws-sdk-go/aws/defaults/defaults.go | 98 - .../aws/aws-sdk-go/aws/ec2metadata/api.go | 140 - .../aws/aws-sdk-go/aws/ec2metadata/service.go | 124 - .../github.com/aws/aws-sdk-go/aws/errors.go | 17 - .../github.com/aws/aws-sdk-go/aws/logger.go | 112 - .../aws/aws-sdk-go/aws/request/handlers.go | 187 - .../aws-sdk-go/aws/request/http_request.go | 33 - .../aws/request/http_request_1_4.go | 31 - .../aws-sdk-go/aws/request/offset_reader.go | 49 - .../aws/aws-sdk-go/aws/request/request.go | 329 - .../aws/request/request_pagination.go | 104 - .../aws/aws-sdk-go/aws/request/retryer.go | 101 - .../aws/aws-sdk-go/aws/request/validation.go | 234 - .../aws/aws-sdk-go/aws/session/session.go | 120 - .../github.com/aws/aws-sdk-go/aws/types.go | 106 - .../github.com/aws/aws-sdk-go/aws/version.go | 8 - .../vendor/golang.org/x/tools/LICENSE | 27 - .../aws-sdk-go/private/endpoints/endpoints.go | 65 - .../private/endpoints/endpoints.json | 75 - .../private/endpoints/endpoints_map.go | 88 - .../private/protocol/idempotency.go | 75 - .../private/protocol/json/jsonutil/build.go | 254 - .../protocol/json/jsonutil/unmarshal.go | 213 - .../private/protocol/jsonrpc/jsonrpc.go | 111 - .../aws-sdk-go/private/protocol/rest/build.go | 256 - .../private/protocol/rest/payload.go | 45 - .../private/protocol/rest/unmarshal.go | 198 - .../aws-sdk-go/private/protocol/unmarshal.go | 21 - .../private/signer/v4/header_rules.go | 82 - .../aws/aws-sdk-go/private/signer/v4/v4.go | 465 - .../aws-sdk-go/service/cloudwatchlogs/api.go | 3141 - .../service/cloudwatchlogs/service.go | 116 - .../beorn7/perks/quantile/exampledata.txt | 2388 - .../beorn7/perks/quantile/stream.go | 292 - vendor/src/github.com/boltdb/bolt/.gitignore | 4 - vendor/src/github.com/boltdb/bolt/LICENSE | 20 - vendor/src/github.com/boltdb/bolt/Makefile | 18 - vendor/src/github.com/boltdb/bolt/README.md | 857 - .../src/github.com/boltdb/bolt/appveyor.yml | 18 - vendor/src/github.com/boltdb/bolt/bolt_386.go | 10 - .../src/github.com/boltdb/bolt/bolt_amd64.go | 10 - vendor/src/github.com/boltdb/bolt/bolt_arm.go | 28 - .../src/github.com/boltdb/bolt/bolt_arm64.go | 12 - .../src/github.com/boltdb/bolt/bolt_linux.go | 10 - .../github.com/boltdb/bolt/bolt_openbsd.go | 27 - vendor/src/github.com/boltdb/bolt/bolt_ppc.go | 9 - .../src/github.com/boltdb/bolt/bolt_ppc64.go | 9 - .../github.com/boltdb/bolt/bolt_ppc64le.go | 12 - .../src/github.com/boltdb/bolt/bolt_s390x.go | 12 - .../src/github.com/boltdb/bolt/bolt_unix.go | 89 - .../boltdb/bolt/bolt_unix_solaris.go | 90 - .../github.com/boltdb/bolt/bolt_windows.go | 144 - .../github.com/boltdb/bolt/boltsync_unix.go | 8 - vendor/src/github.com/boltdb/bolt/bucket.go | 778 - vendor/src/github.com/boltdb/bolt/cursor.go | 400 - vendor/src/github.com/boltdb/bolt/db.go | 1036 - vendor/src/github.com/boltdb/bolt/doc.go | 44 - vendor/src/github.com/boltdb/bolt/errors.go | 71 - vendor/src/github.com/boltdb/bolt/freelist.go | 248 - vendor/src/github.com/boltdb/bolt/node.go | 604 - vendor/src/github.com/boltdb/bolt/page.go | 178 - vendor/src/github.com/boltdb/bolt/tx.go | 682 - .../src/github.com/cloudflare/cfssl/LICENSE | 24 - .../github.com/cloudflare/cfssl/api/api.go | 231 - .../github.com/cloudflare/cfssl/auth/auth.go | 94 - .../cloudflare/cfssl/certdb/README.md | 71 - .../cloudflare/cfssl/certdb/certdb.go | 40 - .../cloudflare/cfssl/config/config.go | 563 - .../cloudflare/cfssl/crypto/pkcs7/pkcs7.go | 188 - .../github.com/cloudflare/cfssl/csr/csr.go | 431 - .../github.com/cloudflare/cfssl/errors/doc.go | 46 - .../cloudflare/cfssl/errors/error.go | 420 - .../cloudflare/cfssl/errors/http.go | 47 - .../cfssl/helpers/derhelpers/derhelpers.go | 42 - .../cloudflare/cfssl/helpers/helpers.go | 479 - .../github.com/cloudflare/cfssl/info/info.go | 15 - .../cloudflare/cfssl/initca/initca.go | 223 - .../github.com/cloudflare/cfssl/log/log.go | 170 - .../cloudflare/cfssl/ocsp/config/config.go | 13 - .../cloudflare/cfssl/signer/local/local.go | 469 - .../cloudflare/cfssl/signer/signer.go | 410 - .../cloudflare/cfssl/whitelist/LICENSE | 13 - vendor/src/github.com/coreos/etcd/LICENSE | 202 - .../github.com/coreos/etcd/client/README.md | 110 - .../coreos/etcd/client/auth_role.go | 237 - .../coreos/etcd/client/auth_user.go | 324 - .../coreos/etcd/client/cancelreq.go | 20 - .../coreos/etcd/client/cancelreq_go14.go | 17 - .../github.com/coreos/etcd/client/client.go | 598 - .../coreos/etcd/client/cluster_error.go | 33 - .../src/github.com/coreos/etcd/client/curl.go | 70 - .../github.com/coreos/etcd/client/discover.go | 21 - .../src/github.com/coreos/etcd/client/doc.go | 71 - .../coreos/etcd/client/keys.generated.go | 1000 - .../src/github.com/coreos/etcd/client/keys.go | 663 - .../github.com/coreos/etcd/client/members.go | 304 - .../src/github.com/coreos/etcd/client/srv.go | 65 - .../src/github.com/coreos/etcd/client/util.go | 23 - .../src/github.com/coreos/etcd/pkg/crc/crc.go | 43 - .../coreos/etcd/pkg/fileutil/fileutil.go | 75 - .../coreos/etcd/pkg/fileutil/lock.go | 29 - .../coreos/etcd/pkg/fileutil/lock_plan9.go | 79 - .../coreos/etcd/pkg/fileutil/lock_solaris.go | 87 - .../coreos/etcd/pkg/fileutil/lock_unix.go | 65 - .../coreos/etcd/pkg/fileutil/lock_windows.go | 60 - .../pkg/fileutil/perallocate_unsupported.go | 28 - .../coreos/etcd/pkg/fileutil/preallocate.go | 42 - .../coreos/etcd/pkg/fileutil/purge.go | 80 - .../coreos/etcd/pkg/fileutil/sync.go | 26 - .../coreos/etcd/pkg/fileutil/sync_linux.go | 29 - .../github.com/coreos/etcd/pkg/idutil/id.go | 78 - .../coreos/etcd/pkg/pathutil/path.go | 31 - .../coreos/etcd/pkg/pbutil/pbutil.go | 60 - .../github.com/coreos/etcd/pkg/types/doc.go | 17 - .../github.com/coreos/etcd/pkg/types/id.go | 41 - .../github.com/coreos/etcd/pkg/types/set.go | 178 - .../github.com/coreos/etcd/pkg/types/slice.go | 22 - .../github.com/coreos/etcd/pkg/types/urls.go | 74 - .../coreos/etcd/pkg/types/urlsmap.go | 93 - .../src/github.com/coreos/etcd/raft/design.md | 57 - vendor/src/github.com/coreos/etcd/raft/doc.go | 293 - vendor/src/github.com/coreos/etcd/raft/log.go | 361 - .../coreos/etcd/raft/log_unstable.go | 139 - .../src/github.com/coreos/etcd/raft/logger.go | 126 - .../src/github.com/coreos/etcd/raft/node.go | 488 - .../github.com/coreos/etcd/raft/progress.go | 245 - .../src/github.com/coreos/etcd/raft/raft.go | 898 - .../coreos/etcd/raft/raftpb/raft.pb.go | 1768 - .../coreos/etcd/raft/raftpb/raft.proto | 86 - .../github.com/coreos/etcd/raft/rawnode.go | 228 - .../src/github.com/coreos/etcd/raft/status.go | 76 - .../github.com/coreos/etcd/raft/storage.go | 252 - .../src/github.com/coreos/etcd/raft/util.go | 116 - vendor/src/github.com/coreos/etcd/snap/db.go | 74 - .../github.com/coreos/etcd/snap/message.go | 59 - .../github.com/coreos/etcd/snap/metrics.go | 41 - .../coreos/etcd/snap/snappb/snap.pb.go | 332 - .../coreos/etcd/snap/snappb/snap.proto | 14 - .../coreos/etcd/snap/snapshotter.go | 189 - .../src/github.com/coreos/etcd/wal/decoder.go | 103 - vendor/src/github.com/coreos/etcd/wal/doc.go | 68 - .../src/github.com/coreos/etcd/wal/encoder.go | 89 - .../src/github.com/coreos/etcd/wal/metrics.go | 38 - .../coreos/etcd/wal/multi_readcloser.go | 45 - .../src/github.com/coreos/etcd/wal/repair.go | 106 - vendor/src/github.com/coreos/etcd/wal/util.go | 93 - vendor/src/github.com/coreos/etcd/wal/wal.go | 562 - .../coreos/etcd/wal/walpb/record.go | 29 - .../coreos/etcd/wal/walpb/record.pb.go | 495 - .../coreos/etcd/wal/walpb/record.proto | 20 - .../src/github.com/coreos/go-systemd/LICENSE | 191 - .../coreos/go-systemd/activation/files.go | 52 - .../coreos/go-systemd/activation/listeners.go | 62 - .../go-systemd/activation/packetconns.go | 37 - .../coreos/go-systemd/daemon/sdnotify.go | 31 - .../coreos/go-systemd/journal/journal.go | 178 - vendor/src/github.com/coreos/pkg/LICENSE | 202 - .../github.com/coreos/pkg/capnslog/README.md | 39 - .../coreos/pkg/capnslog/formatters.go | 106 - .../coreos/pkg/capnslog/glog_formatter.go | 96 - .../github.com/coreos/pkg/capnslog/init.go | 49 - .../coreos/pkg/capnslog/init_windows.go | 25 - .../coreos/pkg/capnslog/journald_formatter.go | 68 - .../coreos/pkg/capnslog/log_hijack.go | 39 - .../github.com/coreos/pkg/capnslog/logmap.go | 240 - .../coreos/pkg/capnslog/pkg_logger.go | 158 - .../coreos/pkg/capnslog/syslog_formatter.go | 65 - .../github.com/deckarep/golang-set/.gitignore | 22 - .../deckarep/golang-set/.travis.yml | 9 - .../github.com/deckarep/golang-set/LICENSE | 22 - .../github.com/deckarep/golang-set/README.md | 94 - .../src/github.com/deckarep/golang-set/set.go | 168 - .../deckarep/golang-set/threadsafe.go | 204 - .../deckarep/golang-set/threadunsafe.go | 246 - .../github.com/docker/containerd/LICENSE.code | 191 - .../github.com/docker/containerd/LICENSE.docs | 425 - .../containerd/api/grpc/types/api.pb.go | 1498 - .../containerd/api/grpc/types/api.proto | 320 - .../github.com/docker/distribution/.gitignore | 37 - .../github.com/docker/distribution/.mailmap | 18 - .../github.com/docker/distribution/AUTHORS | 147 - .../docker/distribution/BUILDING.md | 119 - .../docker/distribution/CHANGELOG.md | 35 - .../docker/distribution/CONTRIBUTING.md | 140 - .../github.com/docker/distribution/Dockerfile | 18 - .../github.com/docker/distribution/LICENSE | 202 - .../docker/distribution/MAINTAINERS | 58 - .../github.com/docker/distribution/Makefile | 106 - .../github.com/docker/distribution/README.md | 131 - .../github.com/docker/distribution/ROADMAP.md | 267 - .../github.com/docker/distribution/blobs.go | 245 - .../github.com/docker/distribution/circle.yml | 89 - .../docker/distribution/context/context.go | 85 - .../docker/distribution/context/doc.go | 89 - .../docker/distribution/context/http.go | 364 - .../docker/distribution/context/logger.go | 116 - .../docker/distribution/context/trace.go | 104 - .../docker/distribution/context/util.go | 24 - .../docker/distribution/context/version.go | 16 - .../docker/distribution/coverpkg.sh | 7 - .../docker/distribution/digest/digest.go | 139 - .../docker/distribution/digest/digester.go | 155 - .../docker/distribution/digest/doc.go | 42 - .../docker/distribution/digest/set.go | 245 - .../docker/distribution/digest/verifiers.go | 44 - .../src/github.com/docker/distribution/doc.go | 7 - .../github.com/docker/distribution/errors.go | 115 - .../docker/distribution/manifest/doc.go | 1 - .../manifest/manifestlist/manifestlist.go | 155 - .../manifest/schema1/config_builder.go | 283 - .../distribution/manifest/schema1/manifest.go | 184 - .../manifest/schema1/reference_builder.go | 98 - .../distribution/manifest/schema1/sign.go | 68 - .../distribution/manifest/schema1/verify.go | 32 - .../distribution/manifest/schema2/builder.go | 80 - .../distribution/manifest/schema2/manifest.go | 128 - .../docker/distribution/manifest/versioned.go | 12 - .../docker/distribution/manifests.go | 117 - .../distribution/reference/reference.go | 334 - .../docker/distribution/reference/regexp.go | 124 - .../docker/distribution/registry.go | 97 - .../registry/api/errcode/errors.go | 267 - .../registry/api/errcode/handler.go | 44 - .../registry/api/errcode/register.go | 138 - .../registry/api/v2/descriptors.go | 1569 - .../distribution/registry/api/v2/doc.go | 9 - .../distribution/registry/api/v2/errors.go | 136 - .../distribution/registry/api/v2/routes.go | 49 - .../distribution/registry/api/v2/urls.go | 251 - .../registry/client/auth/api_version.go | 58 - .../registry/client/auth/authchallenge.go | 220 - .../registry/client/auth/session.go | 497 - .../registry/client/blob_writer.go | 162 - .../distribution/registry/client/errors.go | 107 - .../registry/client/repository.go | 863 - .../registry/client/transport/http_reader.go | 250 - .../registry/client/transport/transport.go | 147 - .../registry/storage/cache/cache.go | 35 - .../cache/cachedblobdescriptorstore.go | 101 - .../registry/storage/cache/memory/memory.go | 170 - .../github.com/docker/distribution/tags.go | 27 - .../docker/distribution/uuid/uuid.go | 126 - .../docker/docker-credential-helpers/LICENSE | 20 - .../client/client.go | 70 - .../client/command.go | 37 - .../credentials/credentials.go | 129 - .../credentials/error.go | 37 - .../credentials/helper.go | 12 - .../src/github.com/docker/engine-api/LICENSE | 191 - .../engine-api/client/checkpoint_create.go | 13 - .../engine-api/client/checkpoint_delete.go | 12 - .../engine-api/client/checkpoint_list.go | 22 - .../docker/engine-api/client/client.go | 153 - .../docker/engine-api/client/client_darwin.go | 4 - .../docker/engine-api/client/client_unix.go | 6 - .../engine-api/client/client_windows.go | 4 - .../engine-api/client/container_attach.go | 34 - .../engine-api/client/container_commit.go | 53 - .../engine-api/client/container_copy.go | 97 - .../engine-api/client/container_create.go | 46 - .../engine-api/client/container_diff.go | 23 - .../engine-api/client/container_exec.go | 49 - .../engine-api/client/container_export.go | 20 - .../engine-api/client/container_inspect.go | 54 - .../engine-api/client/container_kill.go | 17 - .../engine-api/client/container_list.go | 56 - .../engine-api/client/container_logs.go | 52 - .../engine-api/client/container_pause.go | 10 - .../engine-api/client/container_remove.go | 27 - .../engine-api/client/container_rename.go | 16 - .../engine-api/client/container_resize.go | 29 - .../engine-api/client/container_restart.go | 22 - .../engine-api/client/container_start.go | 21 - .../engine-api/client/container_stats.go | 24 - .../engine-api/client/container_stop.go | 21 - .../docker/engine-api/client/container_top.go | 28 - .../engine-api/client/container_unpause.go | 10 - .../engine-api/client/container_update.go | 13 - .../engine-api/client/container_wait.go | 26 - .../docker/engine-api/client/errors.go | 203 - .../docker/engine-api/client/events.go | 48 - .../docker/engine-api/client/hijack.go | 174 - .../docker/engine-api/client/image_build.go | 119 - .../docker/engine-api/client/image_create.go | 34 - .../docker/engine-api/client/image_history.go | 22 - .../docker/engine-api/client/image_import.go | 37 - .../docker/engine-api/client/image_inspect.go | 38 - .../docker/engine-api/client/image_list.go | 40 - .../docker/engine-api/client/image_load.go | 30 - .../docker/engine-api/client/image_pull.go | 46 - .../docker/engine-api/client/image_push.go | 54 - .../docker/engine-api/client/image_remove.go | 31 - .../docker/engine-api/client/image_save.go | 22 - .../docker/engine-api/client/image_search.go | 51 - .../docker/engine-api/client/image_tag.go | 34 - .../docker/engine-api/client/info.go | 26 - .../docker/engine-api/client/interface.go | 135 - .../client/interface_experimental.go | 37 - .../engine-api/client/interface_stable.go | 11 - .../docker/engine-api/client/login.go | 28 - .../engine-api/client/network_connect.go | 18 - .../engine-api/client/network_create.go | 25 - .../engine-api/client/network_disconnect.go | 14 - .../engine-api/client/network_inspect.go | 38 - .../docker/engine-api/client/network_list.go | 31 - .../engine-api/client/network_remove.go | 10 - .../docker/engine-api/client/node_inspect.go | 33 - .../docker/engine-api/client/node_list.go | 36 - .../docker/engine-api/client/node_remove.go | 21 - .../docker/engine-api/client/node_update.go | 18 - .../engine-api/client/plugin_disable.go | 14 - .../docker/engine-api/client/plugin_enable.go | 14 - .../engine-api/client/plugin_inspect.go | 22 - .../engine-api/client/plugin_install.go | 59 - .../docker/engine-api/client/plugin_list.go | 23 - .../docker/engine-api/client/plugin_push.go | 15 - .../docker/engine-api/client/plugin_remove.go | 14 - .../docker/engine-api/client/plugin_set.go | 14 - .../docker/engine-api/client/request.go | 207 - .../engine-api/client/service_create.go | 30 - .../engine-api/client/service_inspect.go | 33 - .../docker/engine-api/client/service_list.go | 35 - .../engine-api/client/service_remove.go | 10 - .../engine-api/client/service_update.go | 30 - .../docker/engine-api/client/swarm_init.go | 21 - .../docker/engine-api/client/swarm_inspect.go | 21 - .../docker/engine-api/client/swarm_join.go | 13 - .../docker/engine-api/client/swarm_leave.go | 18 - .../docker/engine-api/client/swarm_update.go | 21 - .../docker/engine-api/client/task_inspect.go | 34 - .../docker/engine-api/client/task_list.go | 35 - .../client/transport/cancellable/LICENSE | 27 - .../client/transport/cancellable/canceler.go | 23 - .../transport/cancellable/canceler_go14.go | 27 - .../transport/cancellable/cancellable.go | 113 - .../engine-api/client/transport/client.go | 47 - .../engine-api/client/transport/transport.go | 57 - .../docker/engine-api/client/version.go | 21 - .../docker/engine-api/client/volume_create.go | 20 - .../engine-api/client/volume_inspect.go | 38 - .../docker/engine-api/client/volume_list.go | 32 - .../docker/engine-api/client/volume_remove.go | 10 - .../docker/engine-api/types/auth.go | 22 - .../docker/engine-api/types/blkiodev/blkio.go | 23 - .../docker/engine-api/types/client.go | 291 - .../docker/engine-api/types/configs.go | 53 - .../engine-api/types/container/config.go | 62 - .../engine-api/types/container/host_config.go | 320 - .../types/container/hostconfig_unix.go | 81 - .../types/container/hostconfig_windows.go | 87 - .../docker/engine-api/types/errors.go | 6 - .../docker/engine-api/types/events/events.go | 42 - .../docker/engine-api/types/filters/parse.go | 307 - .../engine-api/types/network/network.go | 53 - .../docker/engine-api/types/plugin.go | 169 - .../types/reference/image_reference.go | 34 - .../engine-api/types/registry/registry.go | 99 - .../docker/engine-api/types/seccomp.go | 73 - .../docker/engine-api/types/stats.go | 115 - .../engine-api/types/strslice/strslice.go | 30 - .../docker/engine-api/types/swarm/common.go | 21 - .../engine-api/types/swarm/container.go | 67 - .../docker/engine-api/types/swarm/network.go | 99 - .../docker/engine-api/types/swarm/node.go | 107 - .../docker/engine-api/types/swarm/service.go | 73 - .../docker/engine-api/types/swarm/swarm.go | 141 - .../docker/engine-api/types/swarm/task.go | 115 - .../engine-api/types/time/duration_convert.go | 12 - .../docker/engine-api/types/time/timestamp.go | 124 - .../docker/engine-api/types/types.go | 515 - .../engine-api/types/versions/README.md | 14 - .../engine-api/types/versions/compare.go | 62 - .../engine-api/types/versions/v1p19/types.go | 35 - .../engine-api/types/versions/v1p20/types.go | 40 - .../github.com/docker/go-connections/LICENSE | 191 - .../docker/go-connections/nat/nat.go | 243 - .../docker/go-connections/nat/parse.go | 56 - .../docker/go-connections/nat/sort.go | 96 - .../docker/go-connections/sockets/README.md | 0 .../go-connections/sockets/inmem_socket.go | 81 - .../docker/go-connections/sockets/proxy.go | 51 - .../docker/go-connections/sockets/sockets.go | 42 - .../go-connections/sockets/sockets_unix.go | 15 - .../go-connections/sockets/sockets_windows.go | 13 - .../go-connections/sockets/tcp_socket.go | 22 - .../go-connections/sockets/unix_socket.go | 80 - .../docker/go-connections/tlsconfig/config.go | 122 - .../tlsconfig/config_client_ciphers.go | 17 - .../tlsconfig/config_legacy_client_ciphers.go | 15 - .../github.com/docker/go-events/.gitignore | 24 - .../docker/go-events/CONTRIBUTING.md | 70 - .../src/github.com/docker/go-events/LICENSE | 201 - .../github.com/docker/go-events/MAINTAINERS | 46 - .../src/github.com/docker/go-events/README.md | 117 - .../github.com/docker/go-events/broadcast.go | 158 - .../github.com/docker/go-events/channel.go | 47 - .../src/github.com/docker/go-events/errors.go | 10 - .../src/github.com/docker/go-events/event.go | 15 - .../src/github.com/docker/go-events/filter.go | 52 - .../src/github.com/docker/go-events/queue.go | 111 - .../src/github.com/docker/go-events/retry.go | 249 - vendor/src/github.com/docker/go-units/LICENSE | 191 - .../src/github.com/docker/go-units/README.md | 13 - .../src/github.com/docker/go-units/circle.yml | 11 - .../github.com/docker/go-units/duration.go | 33 - vendor/src/github.com/docker/go-units/size.go | 95 - .../src/github.com/docker/go-units/ulimit.go | 109 - vendor/src/github.com/docker/go/LICENSE | 27 - .../docker/go/canonical/json/decode.go | 1094 - .../docker/go/canonical/json/encode.go | 1245 - .../docker/go/canonical/json/fold.go | 143 - .../docker/go/canonical/json/indent.go | 137 - .../docker/go/canonical/json/scanner.go | 630 - .../docker/go/canonical/json/stream.go | 487 - .../docker/go/canonical/json/tags.go | 44 - .../src/github.com/docker/libkv/.travis.yml | 31 - .../src/github.com/docker/libkv/LICENSE.code | 191 - .../src/github.com/docker/libkv/LICENSE.docs | 425 - .../src/github.com/docker/libkv/MAINTAINERS | 46 - vendor/src/github.com/docker/libkv/README.md | 107 - vendor/src/github.com/docker/libkv/libkv.go | 40 - .../docker/libkv/store/boltdb/boltdb.go | 469 - .../docker/libkv/store/consul/consul.go | 558 - .../docker/libkv/store/etcd/etcd.go | 606 - .../github.com/docker/libkv/store/helpers.go | 47 - .../github.com/docker/libkv/store/store.go | 132 - .../docker/libkv/store/zookeeper/zookeeper.go | 429 - .../docker/libnetwork/.dockerignore | 1 - .../github.com/docker/libnetwork/.gitignore | 39 - .../github.com/docker/libnetwork/CHANGELOG.md | 199 - .../docker/libnetwork/Dockerfile.build | 7 - .../src/github.com/docker/libnetwork/LICENSE | 202 - .../github.com/docker/libnetwork/MAINTAINERS | 64 - .../src/github.com/docker/libnetwork/Makefile | 108 - .../github.com/docker/libnetwork/README.md | 89 - .../github.com/docker/libnetwork/ROADMAP.md | 20 - .../src/github.com/docker/libnetwork/agent.go | 638 - .../github.com/docker/libnetwork/agent.pb.go | 1003 - .../github.com/docker/libnetwork/agent.proto | 72 - .../docker/libnetwork/bitseq/sequence.go | 678 - .../docker/libnetwork/bitseq/store.go | 141 - .../github.com/docker/libnetwork/circle.yml | 17 - .../docker/libnetwork/cluster/provider.go | 12 - .../docker/libnetwork/cmd/proxy/main.go | 67 - .../docker/libnetwork/cmd/proxy/proxy.go | 37 - .../docker/libnetwork/cmd/proxy/stub_proxy.go | 31 - .../docker/libnetwork/cmd/proxy/tcp_proxy.go | 96 - .../docker/libnetwork/cmd/proxy/udp_proxy.go | 169 - .../docker/libnetwork/config/config.go | 264 - .../docker/libnetwork/config/libnetwork.toml | 12 - .../docker/libnetwork/controller.go | 1107 - .../docker/libnetwork/datastore/cache.go | 177 - .../docker/libnetwork/datastore/datastore.go | 625 - .../docker/libnetwork/datastore/mock_store.go | 129 - .../docker/libnetwork/default_gateway.go | 174 - .../libnetwork/default_gateway_freebsd.go | 7 - .../libnetwork/default_gateway_linux.go | 26 - .../libnetwork/default_gateway_solaris.go | 7 - .../libnetwork/default_gateway_windows.go | 7 - .../libnetwork/discoverapi/discoverapi.go | 60 - .../docker/libnetwork/driverapi/driverapi.go | 171 - .../docker/libnetwork/driverapi/errors.go | 56 - .../docker/libnetwork/driverapi/ipamdata.go | 103 - .../libnetwork/drivers/bridge/bridge.go | 1455 - .../libnetwork/drivers/bridge/bridge_store.go | 378 - .../libnetwork/drivers/bridge/errors.go | 341 - .../libnetwork/drivers/bridge/interface.go | 85 - .../libnetwork/drivers/bridge/labels.go | 18 - .../docker/libnetwork/drivers/bridge/link.go | 85 - .../bridge/netlink_deprecated_linux.go | 131 - .../netlink_deprecated_linux_armppc64.go | 7 - .../bridge/netlink_deprecated_linux_notarm.go | 7 - .../bridge/netlink_deprecated_unsupported.go | 18 - .../libnetwork/drivers/bridge/port_mapping.go | 128 - .../libnetwork/drivers/bridge/resolvconf.go | 67 - .../docker/libnetwork/drivers/bridge/setup.go | 26 - .../bridge/setup_bridgenetfiltering.go | 162 - .../libnetwork/drivers/bridge/setup_device.go | 68 - .../drivers/bridge/setup_firewalld.go | 20 - .../drivers/bridge/setup_ip_forwarding.go | 29 - .../drivers/bridge/setup_ip_tables.go | 348 - .../libnetwork/drivers/bridge/setup_ipv4.go | 62 - .../libnetwork/drivers/bridge/setup_ipv6.go | 119 - .../libnetwork/drivers/bridge/setup_verify.go | 53 - .../docker/libnetwork/drivers/host/host.go | 97 - .../libnetwork/drivers/ipvlan/ipvlan.go | 106 - .../drivers/ipvlan/ipvlan_endpoint.go | 87 - .../drivers/ipvlan/ipvlan_joinleave.go | 199 - .../drivers/ipvlan/ipvlan_network.go | 240 - .../libnetwork/drivers/ipvlan/ipvlan_setup.go | 205 - .../libnetwork/drivers/ipvlan/ipvlan_state.go | 115 - .../libnetwork/drivers/ipvlan/ipvlan_store.go | 349 - .../libnetwork/drivers/macvlan/macvlan.go | 108 - .../drivers/macvlan/macvlan_endpoint.go | 94 - .../drivers/macvlan/macvlan_joinleave.go | 144 - .../drivers/macvlan/macvlan_network.go | 248 - .../drivers/macvlan/macvlan_setup.go | 209 - .../drivers/macvlan/macvlan_state.go | 113 - .../drivers/macvlan/macvlan_store.go | 351 - .../docker/libnetwork/drivers/null/null.go | 97 - .../libnetwork/drivers/overlay/encryption.go | 564 - .../libnetwork/drivers/overlay/filter.go | 142 - .../libnetwork/drivers/overlay/joinleave.go | 226 - .../libnetwork/drivers/overlay/ov_endpoint.go | 259 - .../libnetwork/drivers/overlay/ov_network.go | 971 - .../libnetwork/drivers/overlay/ov_serf.go | 233 - .../libnetwork/drivers/overlay/ov_utils.go | 155 - .../libnetwork/drivers/overlay/overlay.go | 377 - .../libnetwork/drivers/overlay/overlay.pb.go | 468 - .../libnetwork/drivers/overlay/overlay.proto | 27 - .../drivers/overlay/ovmanager/ovmanager.go | 248 - .../libnetwork/drivers/overlay/peerdb.go | 365 - .../libnetwork/drivers/remote/api/api.go | 188 - .../libnetwork/drivers/remote/driver.go | 368 - .../libnetwork/drivers/windows/labels.go | 18 - .../libnetwork/drivers/windows/windows.go | 595 - .../libnetwork/drivers_experimental_linux.go | 11 - .../docker/libnetwork/drivers_freebsd.go | 13 - .../docker/libnetwork/drivers_ipam.go | 23 - .../docker/libnetwork/drivers_linux.go | 24 - .../docker/libnetwork/drivers_solaris.go | 5 - .../docker/libnetwork/drivers_stub_linux.go | 7 - .../docker/libnetwork/drivers_windows.go | 16 - .../libnetwork/drvregistry/drvregistry.go | 219 - .../github.com/docker/libnetwork/endpoint.go | 1103 - .../docker/libnetwork/endpoint_cnt.go | 178 - .../docker/libnetwork/endpoint_info.go | 471 - .../src/github.com/docker/libnetwork/error.go | 185 - .../docker/libnetwork/etchosts/etchosts.go | 208 - .../libnetwork/hostdiscovery/hostdiscovery.go | 121 - .../hostdiscovery/hostdiscovery_api.go | 22 - .../libnetwork/hostdiscovery/libnetwork.toml | 6 - .../github.com/docker/libnetwork/idm/idm.go | 60 - .../docker/libnetwork/ipam/allocator.go | 589 - .../docker/libnetwork/ipam/store.go | 136 - .../docker/libnetwork/ipam/structures.go | 362 - .../docker/libnetwork/ipam/utils.go | 81 - .../docker/libnetwork/ipamapi/contract.go | 90 - .../libnetwork/ipams/builtin/builtin_unix.go | 43 - .../ipams/builtin/builtin_windows.go | 16 - .../docker/libnetwork/ipams/null/null.go | 71 - .../docker/libnetwork/ipams/remote/api/api.go | 94 - .../docker/libnetwork/ipams/remote/remote.go | 139 - .../ipams/windowsipam/windowsipam.go | 100 - .../docker/libnetwork/ipamutils/utils.go | 50 - .../docker/libnetwork/iptables/firewalld.go | 168 - .../docker/libnetwork/iptables/iptables.go | 431 - .../docker/libnetwork/ipvs/constants.go | 130 - .../github.com/docker/libnetwork/ipvs/ipvs.go | 113 - .../docker/libnetwork/ipvs/netlink.go | 234 - .../src/github.com/docker/libnetwork/machines | 111 - .../docker/libnetwork/netlabel/labels.go | 123 - .../docker/libnetwork/netutils/utils.go | 194 - .../libnetwork/netutils/utils_freebsd.go | 21 - .../docker/libnetwork/netutils/utils_linux.go | 125 - .../libnetwork/netutils/utils_solaris.go | 32 - .../libnetwork/netutils/utils_windows.go | 21 - .../github.com/docker/libnetwork/network.go | 1530 - .../docker/libnetwork/networkdb/broadcast.go | 163 - .../docker/libnetwork/networkdb/cluster.go | 619 - .../docker/libnetwork/networkdb/delegate.go | 463 - .../libnetwork/networkdb/event_delegate.go | 38 - .../docker/libnetwork/networkdb/message.go | 102 - .../docker/libnetwork/networkdb/networkdb.go | 559 - .../libnetwork/networkdb/networkdb.pb.go | 2554 - .../libnetwork/networkdb/networkdb.proto | 183 - .../docker/libnetwork/networkdb/watch.go | 98 - .../docker/libnetwork/ns/init_linux.go | 86 - .../docker/libnetwork/options/options.go | 88 - .../libnetwork/osl/interface_freebsd.go | 4 - .../docker/libnetwork/osl/interface_linux.go | 441 - .../libnetwork/osl/interface_solaris.go | 4 - .../libnetwork/osl/interface_windows.go | 4 - .../docker/libnetwork/osl/namespace_linux.go | 500 - .../libnetwork/osl/namespace_unsupported.go | 16 - .../libnetwork/osl/namespace_windows.go | 43 - .../docker/libnetwork/osl/neigh_freebsd.go | 4 - .../docker/libnetwork/osl/neigh_linux.go | 150 - .../docker/libnetwork/osl/neigh_solaris.go | 4 - .../docker/libnetwork/osl/neigh_windows.go | 4 - .../docker/libnetwork/osl/options_linux.go | 79 - .../docker/libnetwork/osl/route_linux.go | 203 - .../docker/libnetwork/osl/sandbox.go | 171 - .../docker/libnetwork/osl/sandbox_freebsd.go | 44 - .../libnetwork/osl/sandbox_unsupported.go | 22 - .../libnetwork/portallocator/portallocator.go | 270 - .../docker/libnetwork/portmapper/mapper.go | 234 - .../libnetwork/portmapper/mock_proxy.go | 18 - .../docker/libnetwork/portmapper/proxy.go | 148 - .../docker/libnetwork/resolvconf/README.md | 1 - .../libnetwork/resolvconf/dns/resolvconf.go | 17 - .../libnetwork/resolvconf/resolvconf.go | 247 - .../github.com/docker/libnetwork/resolver.go | 435 - .../docker/libnetwork/resolver_unix.go | 101 - .../docker/libnetwork/resolver_windows.go | 7 - .../github.com/docker/libnetwork/sandbox.go | 1235 - .../docker/libnetwork/sandbox_dns_unix.go | 354 - .../docker/libnetwork/sandbox_dns_windows.go | 35 - .../docker/libnetwork/sandbox_externalkey.go | 12 - .../libnetwork/sandbox_externalkey_solaris.go | 45 - .../libnetwork/sandbox_externalkey_unix.go | 177 - .../libnetwork/sandbox_externalkey_windows.go | 45 - .../docker/libnetwork/sandbox_store.go | 282 - .../github.com/docker/libnetwork/service.go | 64 - .../docker/libnetwork/service_linux.go | 952 - .../docker/libnetwork/service_unsupported.go | 25 - .../src/github.com/docker/libnetwork/store.go | 483 - .../github.com/docker/libnetwork/support.sh | 36 - .../docker/libnetwork/types/types.go | 636 - .../github.com/docker/libnetwork/wrapmake.sh | 11 - .../docker/libtrust/CONTRIBUTING.md | 13 - vendor/src/github.com/docker/libtrust/LICENSE | 191 - .../github.com/docker/libtrust/MAINTAINERS | 3 - .../src/github.com/docker/libtrust/README.md | 18 - .../docker/libtrust/certificates.go | 175 - vendor/src/github.com/docker/libtrust/doc.go | 9 - .../src/github.com/docker/libtrust/ec_key.go | 428 - .../src/github.com/docker/libtrust/filter.go | 50 - vendor/src/github.com/docker/libtrust/hash.go | 56 - .../github.com/docker/libtrust/jsonsign.go | 657 - vendor/src/github.com/docker/libtrust/key.go | 253 - .../github.com/docker/libtrust/key_files.go | 255 - .../github.com/docker/libtrust/key_manager.go | 175 - .../src/github.com/docker/libtrust/rsa_key.go | 427 - vendor/src/github.com/docker/libtrust/util.go | 363 - .../src/github.com/docker/notary/.gitignore | 11 - .../src/github.com/docker/notary/CHANGELOG.md | 26 - .../github.com/docker/notary/CONTRIBUTING.md | 85 - .../src/github.com/docker/notary/CONTRIBUTORS | 4 - .../src/github.com/docker/notary/Dockerfile | 35 - vendor/src/github.com/docker/notary/LICENSE | 201 - .../src/github.com/docker/notary/MAINTAINERS | 58 - vendor/src/github.com/docker/notary/Makefile | 214 - .../github.com/docker/notary/NOTARY_VERSION | 1 - vendor/src/github.com/docker/notary/README.md | 99 - .../src/github.com/docker/notary/ROADMAP.md | 7 - .../src/github.com/docker/notary/circle.yml | 87 - .../docker/notary/client/changelist/change.go | 101 - .../notary/client/changelist/changelist.go | 59 - .../client/changelist/file_changelist.go | 176 - .../notary/client/changelist/interface.go | 70 - .../github.com/docker/notary/client/client.go | 927 - .../docker/notary/client/delegations.go | 294 - .../docker/notary/client/helpers.go | 237 - .../github.com/docker/notary/client/repo.go | 29 - .../docker/notary/client/repo_pkcs11.go | 34 - .../src/github.com/docker/notary/codecov.yml | 18 - vendor/src/github.com/docker/notary/const.go | 68 - .../src/github.com/docker/notary/coverpkg.sh | 10 - .../notary/cryptoservice/certificate.go | 41 - .../notary/cryptoservice/crypto_service.go | 155 - .../notary/cryptoservice/import_export.go | 313 - .../docker/notary/development.rethink.yml | 113 - .../github.com/docker/notary/development.yml | 36 - .../docker/notary/docker-compose.rethink.yml | 102 - .../docker/notary/docker-compose.yml | 34 - .../docker/notary/passphrase/passphrase.go | 201 - .../docker/notary/server.Dockerfile | 25 - .../docker/notary/signer.Dockerfile | 28 - .../docker/notary/trustmanager/filestore.go | 150 - .../notary/trustmanager/keyfilestore.go | 497 - .../docker/notary/trustmanager/keystore.go | 59 - .../docker/notary/trustmanager/memorystore.go | 67 - .../docker/notary/trustmanager/store.go | 42 - .../docker/notary/trustmanager/x509utils.go | 524 - .../notary/trustmanager/yubikey/non_pkcs11.go | 9 - .../trustmanager/yubikey/pkcs11_darwin.go | 9 - .../trustmanager/yubikey/pkcs11_interface.go | 40 - .../trustmanager/yubikey/pkcs11_linux.go | 10 - .../trustmanager/yubikey/yubikeystore.go | 920 - .../docker/notary/trustpinning/certs.go | 268 - .../docker/notary/trustpinning/trustpin.go | 114 - .../src/github.com/docker/notary/tuf/LICENSE | 30 - .../github.com/docker/notary/tuf/README.md | 36 - .../github.com/docker/notary/tuf/builder.go | 673 - .../docker/notary/tuf/client/client.go | 229 - .../docker/notary/tuf/client/errors.go | 14 - .../docker/notary/tuf/data/errors.go | 44 - .../github.com/docker/notary/tuf/data/keys.go | 528 - .../docker/notary/tuf/data/roles.go | 313 - .../github.com/docker/notary/tuf/data/root.go | 171 - .../docker/notary/tuf/data/serializer.go | 36 - .../docker/notary/tuf/data/snapshot.go | 169 - .../docker/notary/tuf/data/targets.go | 198 - .../docker/notary/tuf/data/timestamp.go | 136 - .../docker/notary/tuf/data/types.go | 276 - .../docker/notary/tuf/signed/ed25519.go | 107 - .../docker/notary/tuf/signed/errors.go | 91 - .../docker/notary/tuf/signed/interface.go | 49 - .../docker/notary/tuf/signed/sign.go | 113 - .../docker/notary/tuf/signed/verifiers.go | 283 - .../docker/notary/tuf/signed/verify.go | 107 - .../docker/notary/tuf/store/errors.go | 13 - .../docker/notary/tuf/store/filestore.go | 102 - .../docker/notary/tuf/store/httpstore.go | 297 - .../docker/notary/tuf/store/interfaces.go | 31 - .../docker/notary/tuf/store/memorystore.go | 107 - .../docker/notary/tuf/store/offlinestore.go | 53 - .../src/github.com/docker/notary/tuf/tuf.go | 1069 - .../docker/notary/tuf/utils/role_sort.go | 31 - .../docker/notary/tuf/utils/stack.go | 85 - .../docker/notary/tuf/utils/util.go | 109 - .../docker/notary/tuf/utils/utils.go | 152 - .../docker/notary/tuf/validation/errors.go | 126 - vendor/src/github.com/docker/swarmkit/LICENSE | 201 - .../github.com/docker/swarmkit/agent/agent.go | 356 - .../docker/swarmkit/agent/config.go | 49 - .../docker/swarmkit/agent/errors.go | 24 - .../docker/swarmkit/agent/exec/controller.go | 299 - .../agent/exec/controller_test.mock.go | 143 - .../docker/swarmkit/agent/exec/errors.go | 84 - .../docker/swarmkit/agent/exec/executor.go | 23 - .../docker/swarmkit/agent/helpers.go | 13 - .../github.com/docker/swarmkit/agent/node.go | 762 - .../docker/swarmkit/agent/reporter.go | 124 - .../docker/swarmkit/agent/session.go | 315 - .../docker/swarmkit/agent/storage.go | 216 - .../github.com/docker/swarmkit/agent/task.go | 243 - .../docker/swarmkit/agent/worker.go | 260 - .../github.com/docker/swarmkit/api/README.md | 8 - .../github.com/docker/swarmkit/api/ca.pb.go | 1691 - .../github.com/docker/swarmkit/api/ca.proto | 57 - .../docker/swarmkit/api/control.pb.go | 10904 -- .../docker/swarmkit/api/control.proto | 295 - .../docker/swarmkit/api/dispatcher.pb.go | 2492 - .../docker/swarmkit/api/dispatcher.proto | 154 - .../swarmkit/api/duration/duration.pb.go | 456 - .../swarmkit/api/duration/duration.proto | 100 - .../docker/swarmkit/api/duration/gen.go | 3 - .../docker/swarmkit/api/equality/equality.go | 29 - .../src/github.com/docker/swarmkit/api/gen.go | 3 - .../docker/swarmkit/api/health.pb.go | 727 - .../docker/swarmkit/api/health.proto | 34 - .../docker/swarmkit/api/objects.pb.go | 3649 - .../docker/swarmkit/api/objects.proto | 224 - .../github.com/docker/swarmkit/api/raft.pb.go | 3264 - .../github.com/docker/swarmkit/api/raft.proto | 128 - .../docker/swarmkit/api/snapshot.pb.go | 1115 - .../docker/swarmkit/api/snapshot.proto | 40 - .../docker/swarmkit/api/specs.pb.go | 4211 - .../docker/swarmkit/api/specs.proto | 255 - .../docker/swarmkit/api/timestamp/gen.go | 3 - .../swarmkit/api/timestamp/timestamp.pb.go | 469 - .../swarmkit/api/timestamp/timestamp.proto | 121 - .../docker/swarmkit/api/types.pb.go | 11470 -- .../docker/swarmkit/api/types.proto | 698 - .../src/github.com/docker/swarmkit/ca/auth.go | 229 - .../docker/swarmkit/ca/certificates.go | 761 - .../github.com/docker/swarmkit/ca/config.go | 580 - .../github.com/docker/swarmkit/ca/external.go | 141 - .../github.com/docker/swarmkit/ca/forward.go | 77 - .../github.com/docker/swarmkit/ca/server.go | 649 - .../docker/swarmkit/ca/transport.go | 202 - .../docker/swarmkit/identity/doc.go | 17 - .../docker/swarmkit/identity/randomid.go | 50 - .../docker/swarmkit/ioutils/ioutils.go | 40 - .../github.com/docker/swarmkit/log/context.go | 37 - .../github.com/docker/swarmkit/log/grpc.go | 8 - .../swarmkit/manager/allocator/allocator.go | 221 - .../docker/swarmkit/manager/allocator/doc.go | 18 - .../swarmkit/manager/allocator/network.go | 836 - .../networkallocator/networkallocator.go | 737 - .../networkallocator/portallocator.go | 255 - .../swarmkit/manager/controlapi/cluster.go | 211 - .../swarmkit/manager/controlapi/common.go | 86 - .../manager/controlapi/hackpicker/cluster.go | 12 - .../controlapi/hackpicker/raftpicker.go | 141 - .../swarmkit/manager/controlapi/network.go | 259 - .../swarmkit/manager/controlapi/node.go | 295 - .../swarmkit/manager/controlapi/server.go | 30 - .../swarmkit/manager/controlapi/service.go | 439 - .../swarmkit/manager/controlapi/task.go | 141 - .../swarmkit/manager/dispatcher/dispatcher.go | 801 - .../manager/dispatcher/heartbeat/heartbeat.go | 39 - .../swarmkit/manager/dispatcher/nodes.go | 198 - .../swarmkit/manager/dispatcher/period.go | 28 - .../github.com/docker/swarmkit/manager/doc.go | 1 - .../docker/swarmkit/manager/health/health.go | 58 - .../swarmkit/manager/keymanager/keymanager.go | 242 - .../docker/swarmkit/manager/manager.go | 725 - .../swarmkit/manager/orchestrator/global.go | 424 - .../manager/orchestrator/replicated.go | 194 - .../swarmkit/manager/orchestrator/restart.go | 429 - .../swarmkit/manager/orchestrator/services.go | 247 - .../manager/orchestrator/task_reaper.go | 203 - .../swarmkit/manager/orchestrator/tasks.go | 233 - .../swarmkit/manager/orchestrator/updater.go | 371 - .../swarmkit/manager/raftpicker/cluster.go | 12 - .../swarmkit/manager/raftpicker/raftpicker.go | 127 - .../swarmkit/manager/scheduler/constraint.go | 97 - .../docker/swarmkit/manager/scheduler/expr.go | 96 - .../swarmkit/manager/scheduler/filter.go | 131 - .../manager/scheduler/indexed_node_heap.go | 153 - .../swarmkit/manager/scheduler/nodeinfo.go | 61 - .../swarmkit/manager/scheduler/pipeline.go | 59 - .../swarmkit/manager/scheduler/scheduler.go | 478 - .../docker/swarmkit/manager/state/doc.go | 32 - .../docker/swarmkit/manager/state/proposer.go | 17 - .../manager/state/raft/membership/cluster.go | 221 - .../swarmkit/manager/state/raft/raft.go | 1476 - .../swarmkit/manager/state/raft/storage.go | 468 - .../swarmkit/manager/state/raft/util.go | 83 - .../swarmkit/manager/state/raft/wait.go | 73 - .../swarmkit/manager/state/store/apply.go | 48 - .../docker/swarmkit/manager/state/store/by.go | 123 - .../swarmkit/manager/state/store/clusters.go | 231 - .../manager/state/store/combinators.go | 14 - .../swarmkit/manager/state/store/memory.go | 736 - .../swarmkit/manager/state/store/networks.go | 225 - .../swarmkit/manager/state/store/nodes.go | 258 - .../swarmkit/manager/state/store/object.go | 29 - .../swarmkit/manager/state/store/services.go | 225 - .../swarmkit/manager/state/store/tasks.go | 300 - .../docker/swarmkit/manager/state/watch.go | 493 - .../swarmkit/manager/state/watch/watch.go | 48 - .../docker/swarmkit/picker/picker.go | 337 - .../docker/swarmkit/protobuf/plugin/gen.go | 3 - .../swarmkit/protobuf/plugin/helpers.go | 11 - .../swarmkit/protobuf/plugin/plugin.pb.go | 464 - .../swarmkit/protobuf/plugin/plugin.proto | 25 - .../docker/swarmkit/protobuf/ptypes/doc.go | 9 - .../swarmkit/protobuf/ptypes/duration.go | 102 - .../swarmkit/protobuf/ptypes/timestamp.go | 135 - .../fluent/fluent-logger-golang/LICENSE | 202 - .../fluent-logger-golang/fluent/fluent.go | 309 - .../fluent-logger-golang/fluent/proto.go | 24 - .../fluent-logger-golang/fluent/proto_gen.go | 372 - .../fluent-logger-golang/fluent/version.go | 3 - .../github.com/flynn-archive/go-shlex/COPYING | 202 - .../flynn-archive/go-shlex/Makefile | 21 - .../flynn-archive/go-shlex/README.md | 2 - .../flynn-archive/go-shlex/shlex.go | 457 - .../src/github.com/go-check/check/.gitignore | 4 - .../src/github.com/go-check/check/.travis.yml | 10 - vendor/src/github.com/go-check/check/LICENSE | 25 - .../src/github.com/go-check/check/README.md | 10 - vendor/src/github.com/go-check/check/TODO | 2 - .../github.com/go-check/check/benchmark.go | 187 - vendor/src/github.com/go-check/check/check.go | 892 - .../src/github.com/go-check/check/checkers.go | 458 - .../src/github.com/go-check/check/helpers.go | 231 - .../src/github.com/go-check/check/printer.go | 168 - .../src/github.com/go-check/check/reporter.go | 88 - vendor/src/github.com/go-check/check/run.go | 183 - vendor/src/github.com/go-ini/ini/.gitignore | 4 - vendor/src/github.com/go-ini/ini/LICENSE | 191 - vendor/src/github.com/go-ini/ini/README.md | 560 - vendor/src/github.com/go-ini/ini/README_ZH.md | 547 - vendor/src/github.com/go-ini/ini/ini.go | 1226 - vendor/src/github.com/go-ini/ini/struct.go | 350 - .../github.com/godbus/dbus/CONTRIBUTING.md | 50 - vendor/src/github.com/godbus/dbus/LICENSE | 25 - vendor/src/github.com/godbus/dbus/MAINTAINERS | 2 - .../github.com/godbus/dbus/README.markdown | 41 - vendor/src/github.com/godbus/dbus/auth.go | 253 - .../github.com/godbus/dbus/auth_external.go | 26 - .../src/github.com/godbus/dbus/auth_sha1.go | 102 - vendor/src/github.com/godbus/dbus/call.go | 36 - vendor/src/github.com/godbus/dbus/conn.go | 634 - .../src/github.com/godbus/dbus/conn_darwin.go | 21 - .../src/github.com/godbus/dbus/conn_other.go | 31 - vendor/src/github.com/godbus/dbus/dbus.go | 258 - vendor/src/github.com/godbus/dbus/decoder.go | 228 - vendor/src/github.com/godbus/dbus/doc.go | 63 - vendor/src/github.com/godbus/dbus/encoder.go | 208 - vendor/src/github.com/godbus/dbus/export.go | 468 - vendor/src/github.com/godbus/dbus/homedir.go | 28 - .../github.com/godbus/dbus/homedir_dynamic.go | 15 - .../github.com/godbus/dbus/homedir_static.go | 45 - vendor/src/github.com/godbus/dbus/message.go | 353 - vendor/src/github.com/godbus/dbus/object.go | 136 - vendor/src/github.com/godbus/dbus/sig.go | 257 - .../godbus/dbus/transport_darwin.go | 6 - .../godbus/dbus/transport_generic.go | 35 - .../github.com/godbus/dbus/transport_tcp.go | 43 - .../github.com/godbus/dbus/transport_unix.go | 196 - .../dbus/transport_unixcred_dragonfly.go | 95 - .../godbus/dbus/transport_unixcred_linux.go | 25 - vendor/src/github.com/godbus/dbus/variant.go | 139 - .../github.com/godbus/dbus/variant_lexer.go | 284 - .../github.com/godbus/dbus/variant_parser.go | 817 - vendor/src/github.com/gogo/protobuf/LICENSE | 36 - .../gogo/protobuf/gogoproto/Makefile | 36 - .../github.com/gogo/protobuf/gogoproto/doc.go | 168 - .../gogo/protobuf/gogoproto/gogo.pb.go | 661 - .../gogo/protobuf/gogoproto/gogo.pb.golden | 45 - .../gogo/protobuf/gogoproto/gogo.proto | 120 - .../gogo/protobuf/gogoproto/helper.go | 308 - .../github.com/gogo/protobuf/proto/Makefile | 43 - .../github.com/gogo/protobuf/proto/clone.go | 228 - .../github.com/gogo/protobuf/proto/decode.go | 872 - .../gogo/protobuf/proto/decode_gogo.go | 169 - .../github.com/gogo/protobuf/proto/encode.go | 1325 - .../gogo/protobuf/proto/encode_gogo.go | 354 - .../github.com/gogo/protobuf/proto/equal.go | 276 - .../gogo/protobuf/proto/extensions.go | 518 - .../gogo/protobuf/proto/extensions_gogo.go | 236 - .../src/github.com/gogo/protobuf/proto/lib.go | 894 - .../gogo/protobuf/proto/lib_gogo.go | 40 - .../gogo/protobuf/proto/message_set.go | 280 - .../gogo/protobuf/proto/pointer_reflect.go | 479 - .../gogo/protobuf/proto/pointer_unsafe.go | 266 - .../protobuf/proto/pointer_unsafe_gogo.go | 108 - .../gogo/protobuf/proto/properties.go | 923 - .../gogo/protobuf/proto/properties_gogo.go | 64 - .../gogo/protobuf/proto/skip_gogo.go | 117 - .../github.com/gogo/protobuf/proto/text.go | 793 - .../gogo/protobuf/proto/text_gogo.go | 55 - .../gogo/protobuf/proto/text_parser.go | 849 - .../protoc-gen-gogo/descriptor/Makefile | 33 - .../descriptor/descriptor.pb.go | 2017 - .../protoc-gen-gogo/descriptor/gostring.go | 635 - .../protoc-gen-gogo/descriptor/helper.go | 355 - .../gogo/protobuf/sortkeys/sortkeys.go | 99 - vendor/src/github.com/golang/mock/LICENSE | 202 - .../src/github.com/golang/mock/gomock/call.go | 268 - .../github.com/golang/mock/gomock/callset.go | 76 - .../golang/mock/gomock/controller.go | 167 - .../github.com/golang/mock/gomock/matchers.go | 97 - vendor/src/github.com/golang/protobuf/LICENSE | 31 - .../github.com/golang/protobuf/proto/Makefile | 43 - .../github.com/golang/protobuf/proto/clone.go | 223 - .../golang/protobuf/proto/decode.go | 867 - .../golang/protobuf/proto/encode.go | 1325 - .../github.com/golang/protobuf/proto/equal.go | 276 - .../golang/protobuf/proto/extensions.go | 399 - .../github.com/golang/protobuf/proto/lib.go | 894 - .../golang/protobuf/proto/message_set.go | 280 - .../golang/protobuf/proto/pointer_reflect.go | 479 - .../golang/protobuf/proto/pointer_unsafe.go | 266 - .../golang/protobuf/proto/properties.go | 846 - .../github.com/golang/protobuf/proto/text.go | 762 - .../golang/protobuf/proto/text_parser.go | 806 - .../github.com/golang/protobuf/ptypes/doc.go | 35 - .../golang/protobuf/ptypes/duration.go | 102 - .../protobuf/ptypes/duration/duration.pb.go | 106 - .../protobuf/ptypes/duration/duration.proto | 97 - .../golang/protobuf/ptypes/regen.sh | 72 - .../golang/protobuf/ptypes/timestamp.go | 125 - .../protobuf/ptypes/timestamp/timestamp.pb.go | 119 - .../protobuf/ptypes/timestamp/timestamp.proto | 111 - .../google/certificate-transparency/LICENSE | 202 - .../certificate-transparency/go/README.md | 25 - .../certificate-transparency/go/asn1/asn1.go | 956 - .../go/asn1/common.go | 163 - .../go/asn1/marshal.go | 581 - .../go/client/logclient.go | 357 - .../go/serialization.go | 512 - .../certificate-transparency/go/signatures.go | 131 - .../certificate-transparency/go/types.go | 363 - .../go/x509/cert_pool.go | 116 - .../go/x509/pem_decrypt.go | 233 - .../certificate-transparency/go/x509/pkcs1.go | 124 - .../certificate-transparency/go/x509/pkcs8.go | 56 - .../go/x509/pkix/pkix.go | 173 - .../certificate-transparency/go/x509/root.go | 17 - .../go/x509/root_darwin.go | 83 - .../go/x509/root_plan9.go | 33 - .../go/x509/root_stub.go | 14 - .../go/x509/root_unix.go | 37 - .../go/x509/root_windows.go | 229 - .../certificate-transparency/go/x509/sec1.go | 85 - .../go/x509/verify.go | 476 - .../certificate-transparency/go/x509/x509.go | 1622 - .../github.com/gorilla/context/.travis.yml | 7 - vendor/src/github.com/gorilla/context/LICENSE | 27 - .../src/github.com/gorilla/context/README.md | 7 - .../src/github.com/gorilla/context/context.go | 143 - vendor/src/github.com/gorilla/context/doc.go | 82 - vendor/src/github.com/gorilla/mux/.travis.yml | 7 - vendor/src/github.com/gorilla/mux/LICENSE | 27 - vendor/src/github.com/gorilla/mux/README.md | 7 - vendor/src/github.com/gorilla/mux/doc.go | 199 - vendor/src/github.com/gorilla/mux/mux.go | 353 - vendor/src/github.com/gorilla/mux/regexp.go | 276 - vendor/src/github.com/gorilla/mux/route.go | 524 - .../src/github.com/hashicorp/consul/LICENSE | 354 - .../github.com/hashicorp/consul/api/README.md | 39 - .../github.com/hashicorp/consul/api/acl.go | 140 - .../github.com/hashicorp/consul/api/agent.go | 334 - .../github.com/hashicorp/consul/api/api.go | 442 - .../hashicorp/consul/api/catalog.go | 182 - .../github.com/hashicorp/consul/api/event.go | 104 - .../github.com/hashicorp/consul/api/health.go | 136 - .../src/github.com/hashicorp/consul/api/kv.go | 236 - .../github.com/hashicorp/consul/api/lock.go | 326 - .../github.com/hashicorp/consul/api/raw.go | 24 - .../hashicorp/consul/api/semaphore.go | 477 - .../hashicorp/consul/api/session.go | 201 - .../github.com/hashicorp/consul/api/status.go | 43 - .../hashicorp/consul/website/LICENSE.md | 10 - .../hashicorp/go-immutable-radix/.gitignore | 24 - .../hashicorp/go-immutable-radix/.travis.yml | 3 - .../hashicorp/go-immutable-radix/LICENSE | 363 - .../hashicorp/go-immutable-radix/README.md | 41 - .../hashicorp/go-immutable-radix/edges.go | 21 - .../hashicorp/go-immutable-radix/iradix.go | 333 - .../hashicorp/go-immutable-radix/iter.go | 81 - .../hashicorp/go-immutable-radix/node.go | 289 - .../github.com/hashicorp/go-memdb/.gitignore | 24 - .../src/github.com/hashicorp/go-memdb/LICENSE | 363 - .../github.com/hashicorp/go-memdb/README.md | 93 - .../github.com/hashicorp/go-memdb/index.go | 330 - .../github.com/hashicorp/go-memdb/memdb.go | 89 - .../github.com/hashicorp/go-memdb/schema.go | 76 - .../src/github.com/hashicorp/go-memdb/txn.go | 475 - .../github.com/hashicorp/go-msgpack/LICENSE | 25 - .../hashicorp/go-msgpack/codec/0doc.go | 143 - .../hashicorp/go-msgpack/codec/README.md | 174 - .../hashicorp/go-msgpack/codec/binc.go | 786 - .../hashicorp/go-msgpack/codec/decode.go | 1048 - .../hashicorp/go-msgpack/codec/encode.go | 1001 - .../hashicorp/go-msgpack/codec/helper.go | 589 - .../go-msgpack/codec/helper_internal.go | 127 - .../hashicorp/go-msgpack/codec/msgpack.go | 816 - .../go-msgpack/codec/msgpack_test.py | 110 - .../hashicorp/go-msgpack/codec/rpc.go | 152 - .../hashicorp/go-msgpack/codec/simple.go | 461 - .../hashicorp/go-msgpack/codec/time.go | 193 - .../hashicorp/go-multierror/LICENSE | 353 - .../hashicorp/go-multierror/README.md | 91 - .../hashicorp/go-multierror/append.go | 30 - .../hashicorp/go-multierror/format.go | 23 - .../hashicorp/go-multierror/multierror.go | 51 - .../github.com/hashicorp/golang-lru/LICENSE | 362 - .../hashicorp/golang-lru/simplelru/lru.go | 160 - .../hashicorp/memberlist/.gitignore | 25 - .../github.com/hashicorp/memberlist/LICENSE | 354 - .../github.com/hashicorp/memberlist/Makefile | 14 - .../github.com/hashicorp/memberlist/README.md | 144 - .../hashicorp/memberlist/alive_delegate.go | 14 - .../hashicorp/memberlist/broadcast.go | 100 - .../github.com/hashicorp/memberlist/config.go | 232 - .../hashicorp/memberlist/conflict_delegate.go | 10 - .../hashicorp/memberlist/delegate.go | 37 - .../hashicorp/memberlist/event_delegate.go | 61 - .../hashicorp/memberlist/keyring.go | 149 - .../hashicorp/memberlist/logging.go | 22 - .../hashicorp/memberlist/memberlist.go | 658 - .../hashicorp/memberlist/merge_delegate.go | 14 - .../github.com/hashicorp/memberlist/net.go | 1039 - .../hashicorp/memberlist/ping_delegate.go | 14 - .../github.com/hashicorp/memberlist/queue.go | 167 - .../hashicorp/memberlist/security.go | 198 - .../github.com/hashicorp/memberlist/state.go | 1012 - .../github.com/hashicorp/memberlist/todo.md | 6 - .../github.com/hashicorp/memberlist/util.go | 392 - vendor/src/github.com/hashicorp/serf/LICENSE | 354 - .../hashicorp/serf/coordinate/client.go | 180 - .../hashicorp/serf/coordinate/config.go | 70 - .../hashicorp/serf/coordinate/coordinate.go | 183 - .../hashicorp/serf/coordinate/phantom.go | 187 - .../hashicorp/serf/serf/broadcast.go | 27 - .../hashicorp/serf/serf/coalesce.go | 80 - .../hashicorp/serf/serf/coalesce_member.go | 68 - .../hashicorp/serf/serf/coalesce_user.go | 52 - .../github.com/hashicorp/serf/serf/config.go | 251 - .../hashicorp/serf/serf/conflict_delegate.go | 13 - .../hashicorp/serf/serf/delegate.go | 254 - .../github.com/hashicorp/serf/serf/event.go | 168 - .../hashicorp/serf/serf/event_delegate.go | 21 - .../hashicorp/serf/serf/internal_query.go | 312 - .../hashicorp/serf/serf/keymanager.go | 166 - .../github.com/hashicorp/serf/serf/lamport.go | 45 - .../hashicorp/serf/serf/merge_delegate.go | 44 - .../hashicorp/serf/serf/messages.go | 147 - .../hashicorp/serf/serf/ping_delegate.go | 89 - .../github.com/hashicorp/serf/serf/query.go | 210 - .../github.com/hashicorp/serf/serf/serf.go | 1692 - .../hashicorp/serf/serf/snapshot.go | 560 - .../hashicorp/serf/website/source/LICENSE | 10 - .../src/github.com/imdario/mergo/.travis.yml | 2 - vendor/src/github.com/imdario/mergo/LICENSE | 28 - vendor/src/github.com/imdario/mergo/README.md | 122 - vendor/src/github.com/imdario/mergo/doc.go | 44 - vendor/src/github.com/imdario/mergo/map.go | 154 - vendor/src/github.com/imdario/mergo/merge.go | 120 - vendor/src/github.com/imdario/mergo/mergo.go | 90 - .../inconshreveable/mousetrap/LICENSE | 13 - .../inconshreveable/mousetrap/README.md | 23 - .../inconshreveable/mousetrap/trap_others.go | 15 - .../inconshreveable/mousetrap/trap_windows.go | 98 - .../mousetrap/trap_windows_1.4.go | 46 - .../jmespath/go-jmespath/.gitignore | 4 - .../jmespath/go-jmespath/.travis.yml | 9 - .../github.com/jmespath/go-jmespath/LICENSE | 13 - .../github.com/jmespath/go-jmespath/Makefile | 44 - .../github.com/jmespath/go-jmespath/README.md | 7 - .../github.com/jmespath/go-jmespath/api.go | 49 - .../go-jmespath/astnodetype_string.go | 16 - .../jmespath/go-jmespath/functions.go | 842 - .../jmespath/go-jmespath/interpreter.go | 418 - .../github.com/jmespath/go-jmespath/lexer.go | 420 - .../github.com/jmespath/go-jmespath/parser.go | 603 - .../jmespath/go-jmespath/toktype_string.go | 16 - .../github.com/jmespath/go-jmespath/util.go | 185 - vendor/src/github.com/kr/pty/.gitignore | 4 - vendor/src/github.com/kr/pty/License | 23 - vendor/src/github.com/kr/pty/README.md | 36 - vendor/src/github.com/kr/pty/doc.go | 16 - vendor/src/github.com/kr/pty/ioctl.go | 11 - vendor/src/github.com/kr/pty/ioctl_bsd.go | 39 - vendor/src/github.com/kr/pty/mktypes.bash | 19 - vendor/src/github.com/kr/pty/pty_darwin.go | 60 - vendor/src/github.com/kr/pty/pty_freebsd.go | 73 - vendor/src/github.com/kr/pty/pty_linux.go | 46 - .../src/github.com/kr/pty/pty_unsupported.go | 11 - vendor/src/github.com/kr/pty/run.go | 28 - vendor/src/github.com/kr/pty/types.go | 10 - vendor/src/github.com/kr/pty/types_freebsd.go | 15 - vendor/src/github.com/kr/pty/util.go | 35 - vendor/src/github.com/kr/pty/ztypes_386.go | 9 - vendor/src/github.com/kr/pty/ztypes_amd64.go | 9 - vendor/src/github.com/kr/pty/ztypes_arm.go | 9 - vendor/src/github.com/kr/pty/ztypes_arm64.go | 11 - .../github.com/kr/pty/ztypes_freebsd_386.go | 13 - .../github.com/kr/pty/ztypes_freebsd_amd64.go | 14 - .../github.com/kr/pty/ztypes_freebsd_arm.go | 13 - vendor/src/github.com/kr/pty/ztypes_ppc64.go | 11 - .../src/github.com/kr/pty/ztypes_ppc64le.go | 11 - vendor/src/github.com/kr/pty/ztypes_s390x.go | 11 - .../mattn/go-shellwords/.travis.yml | 9 - .../github.com/mattn/go-shellwords/README.md | 47 - .../mattn/go-shellwords/shellwords.go | 134 - .../mattn/go-shellwords/util_posix.go | 19 - .../mattn/go-shellwords/util_windows.go | 17 - .../github.com/mattn/go-sqlite3/.gitignore | 3 - .../github.com/mattn/go-sqlite3/.travis.yml | 9 - .../src/github.com/mattn/go-sqlite3/LICENSE | 21 - .../src/github.com/mattn/go-sqlite3/README.md | 70 - .../src/github.com/mattn/go-sqlite3/backup.go | 70 - .../mattn/go-sqlite3/code/sqlite3-binding.c | 147782 --------------- .../mattn/go-sqlite3/code/sqlite3-binding.h | 7478 - .../mattn/go-sqlite3/code/sqlite3ext.h | 487 - vendor/src/github.com/mattn/go-sqlite3/doc.go | 95 - .../src/github.com/mattn/go-sqlite3/error.go | 128 - .../mattn/go-sqlite3/sqlite3-binding.c | 4 - .../mattn/go-sqlite3/sqlite3-binding.h | 5 - .../github.com/mattn/go-sqlite3/sqlite3.go | 660 - .../mattn/go-sqlite3/sqlite3_icu.go | 13 - .../mattn/go-sqlite3/sqlite3_libsqlite3.go | 13 - .../go-sqlite3/sqlite3_load_extension.go | 39 - .../go-sqlite3/sqlite3_omit_load_extension.go | 19 - .../mattn/go-sqlite3/sqlite3_other.go | 13 - .../mattn/go-sqlite3/sqlite3_windows.go | 14 - .../golang_protobuf_extensions/LICENSE | 201 - .../pbutil/decode.go | 75 - .../golang_protobuf_extensions/pbutil/doc.go | 16 - .../pbutil/encode.go | 46 - vendor/src/github.com/miekg/dns/.gitignore | 4 - vendor/src/github.com/miekg/dns/.travis.yml | 7 - vendor/src/github.com/miekg/dns/AUTHORS | 1 - vendor/src/github.com/miekg/dns/CONTRIBUTORS | 9 - vendor/src/github.com/miekg/dns/COPYRIGHT | 9 - vendor/src/github.com/miekg/dns/LICENSE | 32 - vendor/src/github.com/miekg/dns/README.md | 153 - vendor/src/github.com/miekg/dns/client.go | 385 - .../src/github.com/miekg/dns/clientconfig.go | 99 - vendor/src/github.com/miekg/dns/defaults.go | 278 - vendor/src/github.com/miekg/dns/dns.go | 100 - vendor/src/github.com/miekg/dns/dnssec.go | 664 - .../src/github.com/miekg/dns/dnssec_keygen.go | 156 - .../github.com/miekg/dns/dnssec_keyscan.go | 249 - .../github.com/miekg/dns/dnssec_privkey.go | 85 - vendor/src/github.com/miekg/dns/doc.go | 251 - vendor/src/github.com/miekg/dns/edns.go | 505 - vendor/src/github.com/miekg/dns/format.go | 96 - vendor/src/github.com/miekg/dns/labels.go | 162 - vendor/src/github.com/miekg/dns/msg.go | 1945 - vendor/src/github.com/miekg/dns/nsecx.go | 112 - vendor/src/github.com/miekg/dns/privaterr.go | 117 - vendor/src/github.com/miekg/dns/rawmsg.go | 95 - vendor/src/github.com/miekg/dns/sanitize.go | 84 - vendor/src/github.com/miekg/dns/scanner.go | 43 - vendor/src/github.com/miekg/dns/server.go | 690 - vendor/src/github.com/miekg/dns/sig0.go | 216 - .../github.com/miekg/dns/singleinflight.go | 57 - vendor/src/github.com/miekg/dns/tlsa.go | 86 - vendor/src/github.com/miekg/dns/tsig.go | 320 - vendor/src/github.com/miekg/dns/types.go | 1328 - .../github.com/miekg/dns/types_generate.go | 266 - vendor/src/github.com/miekg/dns/udp.go | 58 - vendor/src/github.com/miekg/dns/udp_linux.go | 73 - vendor/src/github.com/miekg/dns/udp_other.go | 17 - .../src/github.com/miekg/dns/udp_windows.go | 34 - vendor/src/github.com/miekg/dns/update.go | 94 - vendor/src/github.com/miekg/dns/xfr.go | 244 - vendor/src/github.com/miekg/dns/zgenerate.go | 158 - vendor/src/github.com/miekg/dns/zscan.go | 974 - vendor/src/github.com/miekg/dns/zscan_rr.go | 2270 - vendor/src/github.com/miekg/dns/ztypes.go | 842 - vendor/src/github.com/miekg/pkcs11/.gitignore | 1 - .../src/github.com/miekg/pkcs11/.travis.yml | 14 - vendor/src/github.com/miekg/pkcs11/LICENSE | 27 - vendor/src/github.com/miekg/pkcs11/README.md | 64 - vendor/src/github.com/miekg/pkcs11/const.go | 565 - vendor/src/github.com/miekg/pkcs11/error.go | 98 - vendor/src/github.com/miekg/pkcs11/hsm.db | Bin 10240 -> 0 bytes vendor/src/github.com/miekg/pkcs11/pkcs11.go | 1575 - vendor/src/github.com/miekg/pkcs11/pkcs11.h | 299 - vendor/src/github.com/miekg/pkcs11/pkcs11f.h | 910 - vendor/src/github.com/miekg/pkcs11/pkcs11t.h | 1885 - .../src/github.com/miekg/pkcs11/softhsm.conf | 1 - vendor/src/github.com/miekg/pkcs11/types.go | 267 - .../github.com/mistifyio/go-zfs/.gitignore | 1 - .../github.com/mistifyio/go-zfs/.travis.yml | 41 - .../mistifyio/go-zfs/CONTRIBUTING.md | 60 - .../src/github.com/mistifyio/go-zfs/LICENSE | 201 - .../src/github.com/mistifyio/go-zfs/README.md | 54 - .../src/github.com/mistifyio/go-zfs/error.go | 18 - .../src/github.com/mistifyio/go-zfs/utils.go | 352 - .../mistifyio/go-zfs/utils_notsolaris.go | 17 - .../mistifyio/go-zfs/utils_solaris.go | 17 - vendor/src/github.com/mistifyio/go-zfs/zfs.go | 451 - .../src/github.com/mistifyio/go-zfs/zpool.go | 112 - .../mreiferson/go-httpclient/.gitignore | 1 - .../mreiferson/go-httpclient/.travis.yml | 11 - .../mreiferson/go-httpclient/LICENSE | 17 - .../mreiferson/go-httpclient/README.md | 41 - .../mreiferson/go-httpclient/httpclient.go | 237 - .../github.com/opencontainers/runc/LICENSE | 191 - .../runc/libcontainer/apparmor/apparmor.go | 39 - .../apparmor/apparmor_disabled.go | 20 - .../runc/libcontainer/cgroups/cgroups.go | 64 - .../cgroups/cgroups_unsupported.go | 3 - .../runc/libcontainer/cgroups/stats.go | 106 - .../runc/libcontainer/cgroups/utils.go | 413 - .../runc/libcontainer/configs/blkio_device.go | 61 - .../runc/libcontainer/configs/cgroup_unix.go | 124 - .../configs/cgroup_unsupported.go | 6 - .../libcontainer/configs/cgroup_windows.go | 6 - .../runc/libcontainer/configs/config.go | 332 - .../runc/libcontainer/configs/config_unix.go | 51 - .../runc/libcontainer/configs/device.go | 57 - .../libcontainer/configs/device_defaults.go | 125 - .../libcontainer/configs/hugepage_limit.go | 9 - .../configs/interface_priority_map.go | 14 - .../runc/libcontainer/configs/mount.go | 30 - .../runc/libcontainer/configs/namespaces.go | 5 - .../configs/namespaces_syscall.go | 31 - .../configs/namespaces_syscall_unsupported.go | 15 - .../libcontainer/configs/namespaces_unix.go | 127 - .../configs/namespaces_unsupported.go | 8 - .../runc/libcontainer/configs/network.go | 72 - .../runc/libcontainer/devices/devices_unix.go | 102 - .../devices/devices_unsupported.go | 3 - .../runc/libcontainer/devices/number.go | 24 - .../runc/libcontainer/label/label.go | 80 - .../runc/libcontainer/label/label_selinux.go | 197 - .../runc/libcontainer/selinux/selinux.go | 499 - .../runc/libcontainer/system/linux.go | 143 - .../runc/libcontainer/system/proc.go | 27 - .../runc/libcontainer/system/setns_linux.go | 40 - .../libcontainer/system/syscall_linux_386.go | 25 - .../libcontainer/system/syscall_linux_64.go | 25 - .../libcontainer/system/syscall_linux_arm.go | 25 - .../runc/libcontainer/system/sysconfig.go | 31 - .../libcontainer/system/sysconfig_notcgo.go | 15 - .../runc/libcontainer/system/unsupported.go | 9 - .../runc/libcontainer/system/xattrs_linux.go | 99 - .../runc/libcontainer/user/MAINTAINERS | 2 - .../runc/libcontainer/user/lookup.go | 110 - .../runc/libcontainer/user/lookup_unix.go | 30 - .../libcontainer/user/lookup_unsupported.go | 21 - .../runc/libcontainer/user/user.go | 441 - .../github.com/opencontainers/specs/LICENSE | 191 - .../opencontainers/specs/specs-go/config.go | 471 - .../opencontainers/specs/specs-go/state.go | 17 - .../opencontainers/specs/specs-go/version.go | 18 - .../src/github.com/pborman/uuid/.travis.yml | 10 - .../src/github.com/pborman/uuid/CONTRIBUTORS | 1 - vendor/src/github.com/pborman/uuid/LICENSE | 27 - vendor/src/github.com/pborman/uuid/README.md | 13 - vendor/src/github.com/pborman/uuid/dce.go | 84 - vendor/src/github.com/pborman/uuid/doc.go | 8 - vendor/src/github.com/pborman/uuid/hash.go | 53 - vendor/src/github.com/pborman/uuid/json.go | 34 - vendor/src/github.com/pborman/uuid/node.go | 117 - vendor/src/github.com/pborman/uuid/sql.go | 58 - vendor/src/github.com/pborman/uuid/time.go | 132 - vendor/src/github.com/pborman/uuid/util.go | 43 - vendor/src/github.com/pborman/uuid/uuid.go | 176 - .../src/github.com/pborman/uuid/version1.go | 41 - .../src/github.com/pborman/uuid/version4.go | 25 - vendor/src/github.com/philhofer/fwd/README.md | 311 - vendor/src/github.com/philhofer/fwd/reader.go | 358 - vendor/src/github.com/philhofer/fwd/writer.go | 224 - .../philhofer/fwd/writer_appengine.go | 5 - .../github.com/philhofer/fwd/writer_unsafe.go | 18 - .../github.com/pivotal-golang/clock/LICENSE | 202 - .../github.com/pivotal-golang/clock/README.md | 1 - .../github.com/pivotal-golang/clock/clock.go | 42 - .../github.com/pivotal-golang/clock/ticker.go | 20 - .../github.com/pivotal-golang/clock/timer.go | 25 - vendor/src/github.com/pkg/errors/.gitignore | 24 - vendor/src/github.com/pkg/errors/.travis.yml | 10 - vendor/src/github.com/pkg/errors/LICENSE | 24 - vendor/src/github.com/pkg/errors/README.md | 50 - vendor/src/github.com/pkg/errors/appveyor.yml | 32 - vendor/src/github.com/pkg/errors/errors.go | 211 - vendor/src/github.com/pkg/errors/stack.go | 165 - .../prometheus/client_golang/LICENSE | 201 - .../client_golang/prometheus/.gitignore | 1 - .../client_golang/prometheus/README.md | 53 - .../client_golang/prometheus/collector.go | 75 - .../client_golang/prometheus/counter.go | 175 - .../client_golang/prometheus/desc.go | 201 - .../client_golang/prometheus/doc.go | 109 - .../client_golang/prometheus/expvar.go | 119 - .../client_golang/prometheus/gauge.go | 147 - .../client_golang/prometheus/go_collector.go | 263 - .../client_golang/prometheus/histogram.go | 450 - .../client_golang/prometheus/http.go | 361 - .../client_golang/prometheus/metric.go | 166 - .../prometheus/process_collector.go | 142 - .../client_golang/prometheus/push.go | 65 - .../client_golang/prometheus/registry.go | 726 - .../client_golang/prometheus/summary.go | 540 - .../client_golang/prometheus/untyped.go | 145 - .../client_golang/prometheus/value.go | 234 - .../client_golang/prometheus/vec.go | 247 - .../prometheus/client_model/LICENSE | 201 - .../prometheus/client_model/go/metrics.pb.go | 364 - .../prometheus/client_model/ruby/LICENSE | 201 - .../src/github.com/prometheus/common/LICENSE | 201 - .../prometheus/common/expfmt/decode.go | 411 - .../prometheus/common/expfmt/encode.go | 88 - .../prometheus/common/expfmt/expfmt.go | 40 - .../prometheus/common/expfmt/fuzz.go | 36 - .../prometheus/common/expfmt/json_decode.go | 162 - .../prometheus/common/expfmt/text_create.go | 305 - .../prometheus/common/expfmt/text_parse.go | 746 - .../prometheus/common/model/alert.go | 109 - .../prometheus/common/model/fingerprinting.go | 105 - .../prometheus/common/model/labels.go | 188 - .../prometheus/common/model/labelset.go | 153 - .../prometheus/common/model/metric.go | 81 - .../prometheus/common/model/model.go | 16 - .../prometheus/common/model/signature.go | 190 - .../prometheus/common/model/silence.go | 60 - .../prometheus/common/model/time.go | 230 - .../prometheus/common/model/value.go | 395 - .../github.com/prometheus/procfs/.travis.yml | 7 - .../github.com/prometheus/procfs/AUTHORS.md | 20 - .../prometheus/procfs/CONTRIBUTING.md | 18 - .../src/github.com/prometheus/procfs/LICENSE | 201 - .../src/github.com/prometheus/procfs/Makefile | 6 - .../src/github.com/prometheus/procfs/NOTICE | 7 - .../github.com/prometheus/procfs/README.md | 10 - .../src/github.com/prometheus/procfs/doc.go | 45 - vendor/src/github.com/prometheus/procfs/fs.go | 40 - .../src/github.com/prometheus/procfs/ipvs.go | 223 - .../github.com/prometheus/procfs/mdstat.go | 158 - .../src/github.com/prometheus/procfs/proc.go | 202 - .../github.com/prometheus/procfs/proc_io.go | 54 - .../prometheus/procfs/proc_limits.go | 111 - .../github.com/prometheus/procfs/proc_stat.go | 175 - .../src/github.com/prometheus/procfs/stat.go | 55 - .../github.com/samuel/go-zookeeper/LICENSE | 25 - .../github.com/samuel/go-zookeeper/zk/conn.go | 844 - .../samuel/go-zookeeper/zk/constants.go | 242 - .../github.com/samuel/go-zookeeper/zk/flw.go | 288 - .../github.com/samuel/go-zookeeper/zk/lock.go | 131 - .../samuel/go-zookeeper/zk/server_help.go | 119 - .../samuel/go-zookeeper/zk/server_java.go | 136 - .../samuel/go-zookeeper/zk/structs.go | 633 - .../samuel/go-zookeeper/zk/tracer.go | 148 - .../github.com/samuel/go-zookeeper/zk/util.go | 54 - .../seccomp/libseccomp-golang/LICENSE | 22 - .../seccomp/libseccomp-golang/README | 26 - .../seccomp/libseccomp-golang/seccomp.go | 857 - .../libseccomp-golang/seccomp_internal.go | 506 - vendor/src/github.com/spf13/cobra/.gitignore | 24 - vendor/src/github.com/spf13/cobra/.mailmap | 3 - vendor/src/github.com/spf13/cobra/.travis.yml | 18 - vendor/src/github.com/spf13/cobra/LICENSE.txt | 174 - vendor/src/github.com/spf13/cobra/README.md | 928 - vendor/src/github.com/spf13/cobra/args.go | 98 - .../spf13/cobra/bash_completions.go | 602 - .../spf13/cobra/bash_completions.md | 206 - vendor/src/github.com/spf13/cobra/cobra.go | 171 - vendor/src/github.com/spf13/cobra/command.go | 1260 - .../github.com/spf13/cobra/command_notwin.go | 5 - .../src/github.com/spf13/cobra/command_win.go | 26 - vendor/src/github.com/spf13/pflag/.travis.yml | 17 - vendor/src/github.com/spf13/pflag/LICENSE | 28 - vendor/src/github.com/spf13/pflag/README.md | 256 - vendor/src/github.com/spf13/pflag/bool.go | 97 - vendor/src/github.com/spf13/pflag/count.go | 97 - vendor/src/github.com/spf13/pflag/duration.go | 86 - vendor/src/github.com/spf13/pflag/flag.go | 934 - vendor/src/github.com/spf13/pflag/float32.go | 91 - vendor/src/github.com/spf13/pflag/float64.go | 87 - .../src/github.com/spf13/pflag/golangflag.go | 104 - vendor/src/github.com/spf13/pflag/int.go | 87 - vendor/src/github.com/spf13/pflag/int32.go | 91 - vendor/src/github.com/spf13/pflag/int64.go | 87 - vendor/src/github.com/spf13/pflag/int8.go | 91 - .../src/github.com/spf13/pflag/int_slice.go | 128 - vendor/src/github.com/spf13/pflag/ip.go | 96 - vendor/src/github.com/spf13/pflag/ipmask.go | 122 - vendor/src/github.com/spf13/pflag/ipnet.go | 100 - vendor/src/github.com/spf13/pflag/string.go | 82 - .../github.com/spf13/pflag/string_slice.go | 111 - vendor/src/github.com/spf13/pflag/uint.go | 91 - vendor/src/github.com/spf13/pflag/uint16.go | 89 - vendor/src/github.com/spf13/pflag/uint32.go | 89 - vendor/src/github.com/spf13/pflag/uint64.go | 91 - vendor/src/github.com/spf13/pflag/uint8.go | 91 - .../github.com/syndtr/gocapability/LICENSE | 24 - .../gocapability/capability/capability.go | 72 - .../capability/capability_linux.go | 608 - .../capability/capability_noop.go | 19 - .../syndtr/gocapability/capability/enum.go | 264 - .../gocapability/capability/enum_gen.go | 129 - .../gocapability/capability/syscall_linux.go | 145 - .../src/github.com/tchap/go-patricia/LICENSE | 20 - .../tchap/go-patricia/patricia/children.go | 244 - .../tchap/go-patricia/patricia/patricia.go | 467 - vendor/src/github.com/tinylib/msgp/LICENSE | 8 - .../github.com/tinylib/msgp/msgp/circular.go | 38 - .../src/github.com/tinylib/msgp/msgp/defs.go | 142 - .../src/github.com/tinylib/msgp/msgp/edit.go | 241 - .../github.com/tinylib/msgp/msgp/elsize.go | 99 - .../github.com/tinylib/msgp/msgp/errors.go | 142 - .../github.com/tinylib/msgp/msgp/extension.go | 548 - .../github.com/tinylib/msgp/msgp/integers.go | 174 - .../src/github.com/tinylib/msgp/msgp/json.go | 542 - .../tinylib/msgp/msgp/json_bytes.go | 363 - .../github.com/tinylib/msgp/msgp/number.go | 140 - .../tinylib/msgp/msgp/number_appengine.go | 101 - .../tinylib/msgp/msgp/number_unsafe.go | 159 - .../src/github.com/tinylib/msgp/msgp/read.go | 1118 - .../tinylib/msgp/msgp/read_bytes.go | 1073 - .../src/github.com/tinylib/msgp/msgp/size.go | 38 - .../src/github.com/tinylib/msgp/msgp/write.go | 768 - .../tinylib/msgp/msgp/write_bytes.go | 369 - vendor/src/github.com/ugorji/go/LICENSE | 22 - vendor/src/github.com/ugorji/go/codec/0doc.go | 193 - .../src/github.com/ugorji/go/codec/README.md | 148 - vendor/src/github.com/ugorji/go/codec/binc.go | 918 - vendor/src/github.com/ugorji/go/codec/cbor.go | 584 - .../src/github.com/ugorji/go/codec/decode.go | 2015 - .../src/github.com/ugorji/go/codec/encode.go | 1405 - .../ugorji/go/codec/fast-path.generated.go | 38900 ---- .../ugorji/go/codec/fast-path.go.tmpl | 511 - .../ugorji/go/codec/fast-path.not.go | 32 - .../ugorji/go/codec/gen-dec-array.go.tmpl | 101 - .../ugorji/go/codec/gen-dec-map.go.tmpl | 58 - .../ugorji/go/codec/gen-helper.generated.go | 233 - .../ugorji/go/codec/gen-helper.go.tmpl | 364 - .../ugorji/go/codec/gen.generated.go | 172 - vendor/src/github.com/ugorji/go/codec/gen.go | 1920 - .../src/github.com/ugorji/go/codec/helper.go | 1129 - .../ugorji/go/codec/helper_internal.go | 242 - .../ugorji/go/codec/helper_not_unsafe.go | 20 - .../ugorji/go/codec/helper_unsafe.go | 45 - vendor/src/github.com/ugorji/go/codec/json.go | 1072 - .../src/github.com/ugorji/go/codec/msgpack.go | 844 - vendor/src/github.com/ugorji/go/codec/noop.go | 213 - .../github.com/ugorji/go/codec/prebuild.go | 3 - .../github.com/ugorji/go/codec/prebuild.sh | 199 - vendor/src/github.com/ugorji/go/codec/rpc.go | 180 - .../src/github.com/ugorji/go/codec/simple.go | 518 - .../ugorji/go/codec/test-cbor-goldens.json | 639 - vendor/src/github.com/ugorji/go/codec/test.py | 120 - .../src/github.com/ugorji/go/codec/tests.sh | 74 - vendor/src/github.com/ugorji/go/codec/time.go | 222 - .../src/github.com/vbatts/tar-split/LICENSE | 19 - .../vbatts/tar-split/archive/tar/common.go | 329 - .../vbatts/tar-split/archive/tar/reader.go | 943 - .../vbatts/tar-split/archive/tar/stat_atim.go | 20 - .../tar-split/archive/tar/stat_atimespec.go | 20 - .../vbatts/tar-split/archive/tar/stat_unix.go | 32 - .../vbatts/tar-split/archive/tar/writer.go | 396 - .../vbatts/tar-split/tar/asm/README.md | 44 - .../vbatts/tar-split/tar/asm/assemble.go | 130 - .../vbatts/tar-split/tar/asm/disassemble.go | 141 - .../vbatts/tar-split/tar/asm/doc.go | 9 - .../vbatts/tar-split/tar/storage/doc.go | 12 - .../vbatts/tar-split/tar/storage/entry.go | 78 - .../vbatts/tar-split/tar/storage/getter.go | 104 - .../vbatts/tar-split/tar/storage/packer.go | 127 - .../github.com/vdemeester/shakers/.gitignore | 2 - .../github.com/vdemeester/shakers/Dockerfile | 16 - .../src/github.com/vdemeester/shakers/LICENSE | 191 - .../github.com/vdemeester/shakers/Makefile | 37 - .../github.com/vdemeester/shakers/README.md | 30 - .../src/github.com/vdemeester/shakers/bool.go | 46 - .../github.com/vdemeester/shakers/circle.yml | 11 - .../github.com/vdemeester/shakers/common.go | 310 - .../github.com/vdemeester/shakers/glide.yaml | 4 - .../github.com/vdemeester/shakers/string.go | 168 - .../src/github.com/vdemeester/shakers/time.go | 234 - .../vishvananda/netlink/.travis.yml | 8 - .../github.com/vishvananda/netlink/LICENSE | 192 - .../github.com/vishvananda/netlink/Makefile | 29 - .../github.com/vishvananda/netlink/README.md | 89 - .../github.com/vishvananda/netlink/addr.go | 45 - .../vishvananda/netlink/addr_linux.go | 243 - .../vishvananda/netlink/bpf_linux.go | 60 - .../github.com/vishvananda/netlink/class.go | 78 - .../vishvananda/netlink/class_linux.go | 254 - .../github.com/vishvananda/netlink/filter.go | 244 - .../vishvananda/netlink/filter_linux.go | 560 - .../vishvananda/netlink/handle_linux.go | 86 - .../github.com/vishvananda/netlink/link.go | 584 - .../vishvananda/netlink/link_linux.go | 1391 - .../vishvananda/netlink/link_tuntap_linux.go | 14 - .../github.com/vishvananda/netlink/neigh.go | 22 - .../vishvananda/netlink/neigh_linux.go | 229 - .../github.com/vishvananda/netlink/netlink.go | 31 - .../vishvananda/netlink/netlink_linux.go | 10 - .../netlink/netlink_unspecified.go | 143 - .../vishvananda/netlink/nl/addr_linux.go | 47 - .../vishvananda/netlink/nl/link_linux.go | 396 - .../vishvananda/netlink/nl/nl_linux.go | 552 - .../vishvananda/netlink/nl/route_linux.go | 54 - .../vishvananda/netlink/nl/syscall.go | 37 - .../vishvananda/netlink/nl/tc_linux.go | 675 - .../vishvananda/netlink/nl/xfrm_linux.go | 276 - .../netlink/nl/xfrm_policy_linux.go | 119 - .../netlink/nl/xfrm_state_linux.go | 272 - .../vishvananda/netlink/protinfo.go | 53 - .../vishvananda/netlink/protinfo_linux.go | 64 - .../github.com/vishvananda/netlink/qdisc.go | 231 - .../vishvananda/netlink/qdisc_linux.go | 513 - .../github.com/vishvananda/netlink/route.go | 66 - .../vishvananda/netlink/route_linux.go | 463 - .../vishvananda/netlink/route_unspecified.go | 7 - .../github.com/vishvananda/netlink/rule.go | 40 - .../vishvananda/netlink/rule_linux.go | 215 - .../github.com/vishvananda/netlink/xfrm.go | 74 - .../vishvananda/netlink/xfrm_policy.go | 74 - .../vishvananda/netlink/xfrm_policy_linux.go | 257 - .../vishvananda/netlink/xfrm_state.go | 107 - .../vishvananda/netlink/xfrm_state_linux.go | 368 - .../src/github.com/vishvananda/netns/LICENSE | 192 - .../github.com/vishvananda/netns/README.md | 49 - .../src/github.com/vishvananda/netns/netns.go | 67 - .../vishvananda/netns/netns_linux.go | 205 - .../vishvananda/netns/netns_linux_386.go | 7 - .../vishvananda/netns/netns_linux_amd64.go | 7 - .../vishvananda/netns/netns_linux_arm.go | 7 - .../vishvananda/netns/netns_linux_arm64.go | 7 - .../vishvananda/netns/netns_linux_ppc64le.go | 7 - .../vishvananda/netns/netns_linux_s390x.go | 7 - .../vishvananda/netns/netns_unspecified.go | 35 - vendor/src/golang.org/x/crypto/LICENSE | 27 - .../golang.org/x/crypto/pkcs12/bmp-string.go | 50 - .../src/golang.org/x/crypto/pkcs12/crypto.go | 131 - .../src/golang.org/x/crypto/pkcs12/errors.go | 23 - .../x/crypto/pkcs12/internal/rc2/rc2.go | 274 - vendor/src/golang.org/x/crypto/pkcs12/mac.go | 45 - .../src/golang.org/x/crypto/pkcs12/pbkdf.go | 170 - .../src/golang.org/x/crypto/pkcs12/pkcs12.go | 342 - .../golang.org/x/crypto/pkcs12/safebags.go | 57 - vendor/src/golang.org/x/net/LICENSE | 27 - .../src/golang.org/x/net/context/context.go | 447 - .../x/net/context/ctxhttp/cancelreq.go | 19 - .../x/net/context/ctxhttp/cancelreq_go14.go | 23 - .../x/net/context/ctxhttp/ctxhttp.go | 145 - vendor/src/golang.org/x/net/http2/.gitignore | 2 - vendor/src/golang.org/x/net/http2/Dockerfile | 51 - vendor/src/golang.org/x/net/http2/Makefile | 3 - vendor/src/golang.org/x/net/http2/README | 20 - .../x/net/http2/client_conn_pool.go | 225 - .../x/net/http2/configure_transport.go | 89 - vendor/src/golang.org/x/net/http2/errors.go | 122 - .../golang.org/x/net/http2/fixed_buffer.go | 60 - vendor/src/golang.org/x/net/http2/flow.go | 50 - vendor/src/golang.org/x/net/http2/frame.go | 1496 - vendor/src/golang.org/x/net/http2/go15.go | 11 - vendor/src/golang.org/x/net/http2/gotrack.go | 170 - .../src/golang.org/x/net/http2/headermap.go | 78 - .../golang.org/x/net/http2/hpack/encode.go | 251 - .../src/golang.org/x/net/http2/hpack/hpack.go | 542 - .../golang.org/x/net/http2/hpack/huffman.go | 190 - .../golang.org/x/net/http2/hpack/tables.go | 352 - vendor/src/golang.org/x/net/http2/http2.go | 464 - vendor/src/golang.org/x/net/http2/not_go15.go | 11 - vendor/src/golang.org/x/net/http2/not_go16.go | 13 - vendor/src/golang.org/x/net/http2/pipe.go | 147 - vendor/src/golang.org/x/net/http2/server.go | 2182 - .../src/golang.org/x/net/http2/transport.go | 1664 - vendor/src/golang.org/x/net/http2/write.go | 262 - .../src/golang.org/x/net/http2/writesched.go | 283 - .../x/net/internal/timeseries/timeseries.go | 525 - vendor/src/golang.org/x/net/proxy/direct.go | 18 - vendor/src/golang.org/x/net/proxy/per_host.go | 140 - vendor/src/golang.org/x/net/proxy/proxy.go | 94 - vendor/src/golang.org/x/net/proxy/socks5.go | 210 - vendor/src/golang.org/x/net/trace/events.go | 532 - .../src/golang.org/x/net/trace/histogram.go | 365 - vendor/src/golang.org/x/net/trace/trace.go | 1070 - .../src/golang.org/x/net/websocket/client.go | 113 - vendor/src/golang.org/x/net/websocket/hybi.go | 586 - .../src/golang.org/x/net/websocket/server.go | 113 - .../golang.org/x/net/websocket/websocket.go | 414 - vendor/src/golang.org/x/oauth2/.travis.yml | 14 - vendor/src/golang.org/x/oauth2/AUTHORS | 3 - .../src/golang.org/x/oauth2/CONTRIBUTING.md | 31 - vendor/src/golang.org/x/oauth2/CONTRIBUTORS | 3 - vendor/src/golang.org/x/oauth2/LICENSE | 27 - vendor/src/golang.org/x/oauth2/README.md | 64 - .../golang.org/x/oauth2/client_appengine.go | 25 - .../golang.org/x/oauth2/google/appengine.go | 86 - .../x/oauth2/google/appengine_hook.go | 13 - .../x/oauth2/google/appenginevm_hook.go | 14 - .../src/golang.org/x/oauth2/google/default.go | 155 - .../src/golang.org/x/oauth2/google/google.go | 145 - vendor/src/golang.org/x/oauth2/google/jwt.go | 71 - vendor/src/golang.org/x/oauth2/google/sdk.go | 168 - .../golang.org/x/oauth2/internal/oauth2.go | 76 - .../src/golang.org/x/oauth2/internal/token.go | 221 - .../golang.org/x/oauth2/internal/transport.go | 69 - vendor/src/golang.org/x/oauth2/jws/jws.go | 172 - vendor/src/golang.org/x/oauth2/jwt/jwt.go | 153 - vendor/src/golang.org/x/oauth2/oauth2.go | 337 - vendor/src/golang.org/x/oauth2/token.go | 158 - vendor/src/golang.org/x/oauth2/transport.go | 132 - vendor/src/golang.org/x/sys/LICENSE | 27 - vendor/src/golang.org/x/sys/windows/asm.s | 8 - .../x/sys/windows/asm_windows_386.s | 13 - .../x/sys/windows/asm_windows_amd64.s | 13 - .../golang.org/x/sys/windows/dll_windows.go | 275 - .../src/golang.org/x/sys/windows/env_unset.go | 14 - .../golang.org/x/sys/windows/env_windows.go | 25 - .../src/golang.org/x/sys/windows/eventlog.go | 20 - .../golang.org/x/sys/windows/exec_windows.go | 97 - vendor/src/golang.org/x/sys/windows/race.go | 30 - vendor/src/golang.org/x/sys/windows/race0.go | 25 - .../golang.org/x/sys/windows/registry/key.go | 178 - .../x/sys/windows/registry/syscall.go | 33 - .../x/sys/windows/registry/value.go | 384 - .../sys/windows/registry/zsyscall_windows.go | 82 - .../x/sys/windows/security_windows.go | 435 - .../src/golang.org/x/sys/windows/service.go | 143 - vendor/src/golang.org/x/sys/windows/str.go | 22 - .../golang.org/x/sys/windows/svc/debug/log.go | 56 - .../x/sys/windows/svc/debug/service.go | 45 - .../src/golang.org/x/sys/windows/svc/event.go | 48 - .../x/sys/windows/svc/eventlog/install.go | 80 - .../x/sys/windows/svc/eventlog/log.go | 70 - .../src/golang.org/x/sys/windows/svc/go12.c | 24 - .../src/golang.org/x/sys/windows/svc/go12.go | 11 - .../src/golang.org/x/sys/windows/svc/go13.go | 31 - .../x/sys/windows/svc/mgr/config.go | 139 - .../golang.org/x/sys/windows/svc/mgr/mgr.go | 119 - .../x/sys/windows/svc/mgr/service.go | 74 - .../golang.org/x/sys/windows/svc/security.go | 62 - .../golang.org/x/sys/windows/svc/service.go | 316 - .../golang.org/x/sys/windows/svc/sys_386.s | 67 - .../golang.org/x/sys/windows/svc/sys_amd64.s | 41 - .../src/golang.org/x/sys/windows/syscall.go | 77 - .../x/sys/windows/syscall_windows.go | 990 - .../x/sys/windows/zsyscall_windows.go | 2220 - .../x/sys/windows/ztypes_windows.go | 1242 - .../x/sys/windows/ztypes_windows_386.go | 22 - .../x/sys/windows/ztypes_windows_amd64.go | 22 - vendor/src/google.golang.org/api/LICENSE | 27 - .../google.golang.org/api/gensupport/json.go | 177 - .../api/gensupport/params.go | 31 - .../api/googleapi/googleapi.go | 588 - .../googleapi/internal/uritemplates/LICENSE | 18 - .../internal/uritemplates/uritemplates.go | 359 - .../googleapi/internal/uritemplates/utils.go | 13 - .../google.golang.org/api/googleapi/types.go | 182 - .../api/logging/v1beta3/logging-api.json | 1692 - .../api/logging/v1beta3/logging-gen.go | 4787 - .../src/google.golang.org/cloud/.travis.yml | 11 - vendor/src/google.golang.org/cloud/AUTHORS | 12 - .../google.golang.org/cloud/CONTRIBUTING.md | 114 - .../src/google.golang.org/cloud/CONTRIBUTORS | 24 - vendor/src/google.golang.org/cloud/LICENSE | 202 - vendor/src/google.golang.org/cloud/README.md | 135 - vendor/src/google.golang.org/cloud/cloud.go | 49 - .../cloud/compute/metadata/metadata.go | 327 - .../google.golang.org/cloud/internal/cloud.go | 128 - .../cloud/internal/opts/option.go | 24 - .../cloud/internal/transport/cancelreq.go | 29 - .../internal/transport/cancelreq_legacy.go | 31 - .../cloud/internal/transport/dial.go | 134 - .../cloud/internal/transport/proto.go | 80 - .../src/google.golang.org/cloud/key.json.enc | Bin 1248 -> 0 bytes .../cloud/logging/logging.go | 468 - vendor/src/google.golang.org/cloud/option.go | 102 - vendor/src/google.golang.org/grpc/.travis.yml | 17 - .../google.golang.org/grpc/CONTRIBUTING.md | 46 - vendor/src/google.golang.org/grpc/LICENSE | 28 - vendor/src/google.golang.org/grpc/Makefile | 51 - vendor/src/google.golang.org/grpc/PATENTS | 22 - vendor/src/google.golang.org/grpc/README.md | 32 - vendor/src/google.golang.org/grpc/backoff.go | 80 - vendor/src/google.golang.org/grpc/call.go | 191 - .../src/google.golang.org/grpc/clientconn.go | 623 - vendor/src/google.golang.org/grpc/codegen.sh | 17 - .../grpc/codes/code_string.go | 16 - .../src/google.golang.org/grpc/codes/codes.go | 159 - vendor/src/google.golang.org/grpc/coverage.sh | 47 - .../grpc/credentials/credentials.go | 226 - .../grpc/credentials/oauth/oauth.go | 177 - vendor/src/google.golang.org/grpc/doc.go | 6 - .../google.golang.org/grpc/grpclog/logger.go | 93 - .../src/google.golang.org/grpc/interceptor.go | 74 - .../grpc/internal/internal.go | 49 - .../grpc/metadata/metadata.go | 134 - .../google.golang.org/grpc/naming/naming.go | 73 - .../src/google.golang.org/grpc/peer/peer.go | 65 - vendor/src/google.golang.org/grpc/picker.go | 243 - vendor/src/google.golang.org/grpc/rpc_util.go | 418 - vendor/src/google.golang.org/grpc/server.go | 782 - vendor/src/google.golang.org/grpc/stream.go | 414 - vendor/src/google.golang.org/grpc/trace.go | 120 - .../grpc/transport/control.go | 210 - .../grpc/transport/handler_server.go | 383 - .../grpc/transport/http2_client.go | 914 - .../grpc/transport/http2_server.go | 735 - .../grpc/transport/http_util.go | 411 - .../grpc/transport/transport.go | 508 - vendor/src/gopkg.in/fsnotify.v1/.gitignore | 6 - vendor/src/gopkg.in/fsnotify.v1/.travis.yml | 29 - vendor/src/gopkg.in/fsnotify.v1/AUTHORS | 43 - vendor/src/gopkg.in/fsnotify.v1/CHANGELOG.md | 287 - .../src/gopkg.in/fsnotify.v1/CONTRIBUTING.md | 77 - vendor/src/gopkg.in/fsnotify.v1/LICENSE | 28 - vendor/src/gopkg.in/fsnotify.v1/README.md | 46 - vendor/src/gopkg.in/fsnotify.v1/fen.go | 37 - vendor/src/gopkg.in/fsnotify.v1/fsnotify.go | 62 - vendor/src/gopkg.in/fsnotify.v1/inotify.go | 324 - .../gopkg.in/fsnotify.v1/inotify_poller.go | 186 - vendor/src/gopkg.in/fsnotify.v1/kqueue.go | 502 - .../src/gopkg.in/fsnotify.v1/open_mode_bsd.go | 11 - .../gopkg.in/fsnotify.v1/open_mode_darwin.go | 12 - vendor/src/gopkg.in/fsnotify.v1/windows.go | 561 - volume/drivers/adapter.go | 164 - volume/drivers/extpoint.go | 181 - volume/drivers/extpoint_test.go | 23 - volume/drivers/proxy.go | 241 - volume/drivers/proxy_test.go | 132 - volume/local/local.go | 344 - volume/local/local_test.go | 328 - volume/local/local_unix.go | 69 - volume/local/local_windows.go | 34 - volume/store/errors.go | 74 - volume/store/store.go | 526 - volume/store/store_test.go | 201 - volume/store/store_unix.go | 9 - volume/store/store_windows.go | 12 - volume/testutils/testutils.go | 116 - volume/volume.go | 190 - volume/volume_copy.go | 28 - volume/volume_propagation_linux.go | 44 - volume/volume_propagation_linux_test.go | 65 - volume/volume_propagation_unsupported.go | 22 - volume/volume_test.go | 212 - volume/volume_unix.go | 186 - volume/volume_windows.go | 206 - 3979 files changed, 825464 deletions(-) delete mode 100644 AUTHORS delete mode 100644 CHANGELOG.md delete mode 100644 CONTRIBUTING.md delete mode 100644 Dockerfile delete mode 100644 Dockerfile.aarch64 delete mode 100644 Dockerfile.armhf delete mode 100644 Dockerfile.gccgo delete mode 100644 Dockerfile.ppc64le delete mode 100644 Dockerfile.s390x delete mode 100644 Dockerfile.simple delete mode 100644 Dockerfile.windows delete mode 100644 LICENSE delete mode 100644 MAINTAINERS delete mode 100644 Makefile delete mode 100644 NOTICE delete mode 100644 README.md delete mode 100644 ROADMAP.md delete mode 100644 VENDORING.md delete mode 100644 VERSION delete mode 100644 api/README.md delete mode 100644 api/client/bundlefile/bundlefile.go delete mode 100644 api/client/bundlefile/bundlefile_test.go delete mode 100644 api/client/cli.go delete mode 100644 api/client/client.go delete mode 100644 api/client/commands.go delete mode 100644 api/client/container/attach.go delete mode 100644 api/client/container/commit.go delete mode 100644 api/client/container/cp.go delete mode 100644 api/client/container/create.go delete mode 100644 api/client/container/diff.go delete mode 100644 api/client/container/export.go delete mode 100644 api/client/container/kill.go delete mode 100644 api/client/container/logs.go delete mode 100644 api/client/container/pause.go delete mode 100644 api/client/container/port.go delete mode 100644 api/client/container/ps.go delete mode 100644 api/client/container/rename.go delete mode 100644 api/client/container/restart.go delete mode 100644 api/client/container/rm.go delete mode 100644 api/client/container/run.go delete mode 100644 api/client/container/start.go delete mode 100644 api/client/container/stats.go delete mode 100644 api/client/container/stats_helpers.go delete mode 100644 api/client/container/stats_unit_test.go delete mode 100644 api/client/container/stop.go delete mode 100644 api/client/container/top.go delete mode 100644 api/client/container/unpause.go delete mode 100644 api/client/container/utils.go delete mode 100644 api/client/container/wait.go delete mode 100644 api/client/credentials.go delete mode 100644 api/client/exec.go delete mode 100644 api/client/exec_test.go delete mode 100644 api/client/formatter/custom.go delete mode 100644 api/client/formatter/custom_test.go delete mode 100644 api/client/formatter/formatter.go delete mode 100644 api/client/formatter/formatter_test.go delete mode 100644 api/client/hijack.go delete mode 100644 api/client/idresolver/idresolver.go delete mode 100644 api/client/image/build.go delete mode 100644 api/client/image/history.go delete mode 100644 api/client/image/images.go delete mode 100644 api/client/image/import.go delete mode 100644 api/client/image/load.go delete mode 100644 api/client/image/pull.go delete mode 100644 api/client/image/push.go delete mode 100644 api/client/image/remove.go delete mode 100644 api/client/image/save.go delete mode 100644 api/client/image/search.go delete mode 100644 api/client/image/tag.go delete mode 100644 api/client/info.go delete mode 100644 api/client/inspect.go delete mode 100644 api/client/inspect/inspector.go delete mode 100644 api/client/inspect/inspector_test.go delete mode 100644 api/client/network/cmd.go delete mode 100644 api/client/network/connect.go delete mode 100644 api/client/network/create.go delete mode 100644 api/client/network/disconnect.go delete mode 100644 api/client/network/inspect.go delete mode 100644 api/client/network/list.go delete mode 100644 api/client/network/remove.go delete mode 100644 api/client/node/cmd.go delete mode 100644 api/client/node/demote.go delete mode 100644 api/client/node/inspect.go delete mode 100644 api/client/node/list.go delete mode 100644 api/client/node/opts.go delete mode 100644 api/client/node/promote.go delete mode 100644 api/client/node/ps.go delete mode 100644 api/client/node/remove.go delete mode 100644 api/client/node/update.go delete mode 100644 api/client/plugin/cmd.go delete mode 100644 api/client/plugin/cmd_experimental.go delete mode 100644 api/client/plugin/disable.go delete mode 100644 api/client/plugin/enable.go delete mode 100644 api/client/plugin/inspect.go delete mode 100644 api/client/plugin/install.go delete mode 100644 api/client/plugin/list.go delete mode 100644 api/client/plugin/push.go delete mode 100644 api/client/plugin/remove.go delete mode 100644 api/client/plugin/set.go delete mode 100644 api/client/registry.go delete mode 100644 api/client/registry/login.go delete mode 100644 api/client/registry/logout.go delete mode 100644 api/client/service/cmd.go delete mode 100644 api/client/service/create.go delete mode 100644 api/client/service/inspect.go delete mode 100644 api/client/service/inspect_test.go delete mode 100644 api/client/service/list.go delete mode 100644 api/client/service/opts.go delete mode 100644 api/client/service/opts_test.go delete mode 100644 api/client/service/ps.go delete mode 100644 api/client/service/remove.go delete mode 100644 api/client/service/scale.go delete mode 100644 api/client/service/update.go delete mode 100644 api/client/service/update_test.go delete mode 100644 api/client/stack/cmd.go delete mode 100644 api/client/stack/cmd_stub.go delete mode 100644 api/client/stack/common.go delete mode 100644 api/client/stack/config.go delete mode 100644 api/client/stack/deploy.go delete mode 100644 api/client/stack/opts.go delete mode 100644 api/client/stack/ps.go delete mode 100644 api/client/stack/remove.go delete mode 100644 api/client/swarm/cmd.go delete mode 100644 api/client/swarm/init.go delete mode 100644 api/client/swarm/join.go delete mode 100644 api/client/swarm/join_token.go delete mode 100644 api/client/swarm/leave.go delete mode 100644 api/client/swarm/opts.go delete mode 100644 api/client/swarm/opts_test.go delete mode 100644 api/client/swarm/update.go delete mode 100644 api/client/system/events.go delete mode 100644 api/client/system/events_utils.go delete mode 100644 api/client/system/version.go delete mode 100644 api/client/task/print.go delete mode 100644 api/client/trust.go delete mode 100644 api/client/trust_test.go delete mode 100644 api/client/update.go delete mode 100644 api/client/utils.go delete mode 100644 api/client/volume/cmd.go delete mode 100644 api/client/volume/create.go delete mode 100644 api/client/volume/inspect.go delete mode 100644 api/client/volume/list.go delete mode 100644 api/client/volume/remove.go delete mode 100644 api/common.go delete mode 100644 api/common_test.go delete mode 100644 api/fixtures/keyfile delete mode 100644 api/server/httputils/decoder.go delete mode 100644 api/server/httputils/errors.go delete mode 100644 api/server/httputils/form.go delete mode 100644 api/server/httputils/form_test.go delete mode 100644 api/server/httputils/httputils.go delete mode 100644 api/server/middleware.go delete mode 100644 api/server/middleware/cors.go delete mode 100644 api/server/middleware/debug.go delete mode 100644 api/server/middleware/middleware.go delete mode 100644 api/server/middleware/user_agent.go delete mode 100644 api/server/middleware/version.go delete mode 100644 api/server/middleware/version_test.go delete mode 100644 api/server/profiler.go delete mode 100644 api/server/router/build/backend.go delete mode 100644 api/server/router/build/build.go delete mode 100644 api/server/router/build/build_routes.go delete mode 100644 api/server/router/container/backend.go delete mode 100644 api/server/router/container/container.go delete mode 100644 api/server/router/container/container_routes.go delete mode 100644 api/server/router/container/copy.go delete mode 100644 api/server/router/container/exec.go delete mode 100644 api/server/router/container/inspect.go delete mode 100644 api/server/router/image/backend.go delete mode 100644 api/server/router/image/image.go delete mode 100644 api/server/router/image/image_routes.go delete mode 100644 api/server/router/local.go delete mode 100644 api/server/router/network/backend.go delete mode 100644 api/server/router/network/filter.go delete mode 100644 api/server/router/network/network.go delete mode 100644 api/server/router/network/network_routes.go delete mode 100644 api/server/router/plugin/backend.go delete mode 100644 api/server/router/plugin/plugin.go delete mode 100644 api/server/router/plugin/plugin_experimental.go delete mode 100644 api/server/router/plugin/plugin_regular.go delete mode 100644 api/server/router/plugin/plugin_routes.go delete mode 100644 api/server/router/router.go delete mode 100644 api/server/router/swarm/backend.go delete mode 100644 api/server/router/swarm/cluster.go delete mode 100644 api/server/router/swarm/cluster_routes.go delete mode 100644 api/server/router/system/backend.go delete mode 100644 api/server/router/system/system.go delete mode 100644 api/server/router/system/system_routes.go delete mode 100644 api/server/router/volume/backend.go delete mode 100644 api/server/router/volume/volume.go delete mode 100644 api/server/router/volume/volume_routes.go delete mode 100644 api/server/router_swapper.go delete mode 100644 api/server/server.go delete mode 100644 api/server/server_test.go delete mode 100644 api/types/backend/backend.go delete mode 100644 builder/builder.go delete mode 100644 builder/context.go delete mode 100644 builder/context_test.go delete mode 100644 builder/context_unix.go delete mode 100644 builder/context_windows.go delete mode 100644 builder/dockerfile/bflag.go delete mode 100644 builder/dockerfile/bflag_test.go delete mode 100644 builder/dockerfile/builder.go delete mode 100644 builder/dockerfile/builder_unix.go delete mode 100644 builder/dockerfile/builder_windows.go delete mode 100644 builder/dockerfile/command/command.go delete mode 100644 builder/dockerfile/dispatchers.go delete mode 100644 builder/dockerfile/dispatchers_unix.go delete mode 100644 builder/dockerfile/dispatchers_windows.go delete mode 100644 builder/dockerfile/dispatchers_windows_test.go delete mode 100644 builder/dockerfile/envVarTest delete mode 100644 builder/dockerfile/evaluator.go delete mode 100644 builder/dockerfile/evaluator_test.go delete mode 100644 builder/dockerfile/evaluator_unix.go delete mode 100644 builder/dockerfile/evaluator_windows.go delete mode 100644 builder/dockerfile/internals.go delete mode 100644 builder/dockerfile/internals_test.go delete mode 100644 builder/dockerfile/internals_unix.go delete mode 100644 builder/dockerfile/internals_windows.go delete mode 100644 builder/dockerfile/internals_windows_test.go delete mode 100644 builder/dockerfile/parser/dumper/main.go delete mode 100644 builder/dockerfile/parser/json_test.go delete mode 100644 builder/dockerfile/parser/line_parsers.go delete mode 100644 builder/dockerfile/parser/parser.go delete mode 100644 builder/dockerfile/parser/parser_test.go delete mode 100644 builder/dockerfile/parser/testfile-line/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result delete mode 100644 builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/brimstone-consuldock/result delete mode 100644 builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/brimstone-docker-consul/result delete mode 100644 builder/dockerfile/parser/testfiles/continueIndent/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/continueIndent/result delete mode 100644 builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/cpuguy83-nagios/result delete mode 100644 builder/dockerfile/parser/testfiles/docker/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/docker/result delete mode 100644 builder/dockerfile/parser/testfiles/env/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/env/result delete mode 100644 builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/escape-after-comment/result delete mode 100644 builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/escape-nonewline/result delete mode 100644 builder/dockerfile/parser/testfiles/escape/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/escape/result delete mode 100644 builder/dockerfile/parser/testfiles/escapes/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/escapes/result delete mode 100644 builder/dockerfile/parser/testfiles/flags/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/flags/result delete mode 100644 builder/dockerfile/parser/testfiles/health/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/health/result delete mode 100644 builder/dockerfile/parser/testfiles/influxdb/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/influxdb/result delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result delete mode 100644 builder/dockerfile/parser/testfiles/json/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/json/result delete mode 100644 builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result delete mode 100644 builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result delete mode 100644 builder/dockerfile/parser/testfiles/mail/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/mail/result delete mode 100644 builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/multiple-volumes/result delete mode 100644 builder/dockerfile/parser/testfiles/mumble/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/mumble/result delete mode 100644 builder/dockerfile/parser/testfiles/nginx/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/nginx/result delete mode 100644 builder/dockerfile/parser/testfiles/tf2/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/tf2/result delete mode 100644 builder/dockerfile/parser/testfiles/weechat/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/weechat/result delete mode 100644 builder/dockerfile/parser/testfiles/znc/Dockerfile delete mode 100644 builder/dockerfile/parser/testfiles/znc/result delete mode 100644 builder/dockerfile/parser/utils.go delete mode 100644 builder/dockerfile/shell_parser.go delete mode 100644 builder/dockerfile/shell_parser_test.go delete mode 100644 builder/dockerfile/support.go delete mode 100644 builder/dockerfile/support_test.go delete mode 100644 builder/dockerfile/wordsTest delete mode 100644 builder/dockerignore.go delete mode 100644 builder/dockerignore/dockerignore.go delete mode 100644 builder/dockerignore/dockerignore_test.go delete mode 100644 builder/dockerignore_test.go delete mode 100644 builder/git.go delete mode 100644 builder/remote.go delete mode 100644 builder/remote_test.go delete mode 100644 builder/tarsum.go delete mode 100644 builder/tarsum_test.go delete mode 100644 builder/utils_test.go delete mode 100644 cli/cli.go delete mode 100644 cli/cobraadaptor/adaptor.go delete mode 100644 cli/error.go delete mode 100644 cli/flagerrors.go delete mode 100644 cli/flags/client.go delete mode 100644 cli/flags/common.go delete mode 100644 cli/required.go delete mode 100644 cli/usage.go delete mode 100644 cliconfig/config.go delete mode 100644 cliconfig/config_test.go delete mode 100644 cliconfig/configfile/file.go delete mode 100644 cliconfig/configfile/file_test.go delete mode 100644 cliconfig/credentials/credentials.go delete mode 100644 cliconfig/credentials/default_store.go delete mode 100644 cliconfig/credentials/default_store_darwin.go delete mode 100644 cliconfig/credentials/default_store_linux.go delete mode 100644 cliconfig/credentials/default_store_unsupported.go delete mode 100644 cliconfig/credentials/default_store_windows.go delete mode 100644 cliconfig/credentials/file_store.go delete mode 100644 cliconfig/credentials/file_store_test.go delete mode 100644 cliconfig/credentials/native_store.go delete mode 100644 cliconfig/credentials/native_store_test.go delete mode 100644 cmd/docker/daemon.go delete mode 100644 cmd/docker/daemon_none.go delete mode 100644 cmd/docker/daemon_none_test.go delete mode 100644 cmd/docker/daemon_unix.go delete mode 100644 cmd/docker/docker.go delete mode 100644 cmd/docker/docker_test.go delete mode 100644 cmd/docker/docker_windows.go delete mode 100644 cmd/docker/usage.go delete mode 100644 cmd/docker/usage_test.go delete mode 100644 cmd/dockerd/README.md delete mode 100644 cmd/dockerd/daemon.go delete mode 100644 cmd/dockerd/daemon_freebsd.go delete mode 100644 cmd/dockerd/daemon_linux.go delete mode 100644 cmd/dockerd/daemon_solaris.go delete mode 100644 cmd/dockerd/daemon_test.go delete mode 100644 cmd/dockerd/daemon_unix.go delete mode 100644 cmd/dockerd/daemon_unix_test.go delete mode 100644 cmd/dockerd/daemon_windows.go delete mode 100644 cmd/dockerd/docker.go delete mode 100644 cmd/dockerd/docker_windows.go delete mode 100644 cmd/dockerd/hack/malformed_host_override.go delete mode 100644 cmd/dockerd/hack/malformed_host_override_test.go delete mode 100644 cmd/dockerd/routes.go delete mode 100644 cmd/dockerd/routes_experimental.go delete mode 100644 cmd/dockerd/service_unsupported.go delete mode 100644 cmd/dockerd/service_windows.go delete mode 100644 container/archive.go delete mode 100644 container/container.go delete mode 100644 container/container_solaris.go delete mode 100644 container/container_unit_test.go delete mode 100644 container/container_unix.go delete mode 100644 container/container_windows.go delete mode 100644 container/health.go delete mode 100644 container/history.go delete mode 100644 container/memory_store.go delete mode 100644 container/memory_store_test.go delete mode 100644 container/monitor.go delete mode 100644 container/mounts_unix.go delete mode 100644 container/mounts_windows.go delete mode 100644 container/state.go delete mode 100644 container/state_solaris.go delete mode 100644 container/state_test.go delete mode 100644 container/state_unix.go delete mode 100644 container/state_windows.go delete mode 100644 container/store.go delete mode 100644 contrib/README.md delete mode 100644 contrib/REVIEWERS delete mode 100644 contrib/apparmor/main.go delete mode 100644 contrib/apparmor/template.go delete mode 100644 contrib/builder/deb/amd64/README.md delete mode 100755 contrib/builder/deb/amd64/build.sh delete mode 100644 contrib/builder/deb/amd64/debian-jessie/Dockerfile delete mode 100644 contrib/builder/deb/amd64/debian-stretch/Dockerfile delete mode 100644 contrib/builder/deb/amd64/debian-wheezy/Dockerfile delete mode 100755 contrib/builder/deb/amd64/generate.sh delete mode 100644 contrib/builder/deb/amd64/ubuntu-precise/Dockerfile delete mode 100644 contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile delete mode 100644 contrib/builder/deb/amd64/ubuntu-wily/Dockerfile delete mode 100644 contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile delete mode 100644 contrib/builder/deb/armhf/debian-jessie/Dockerfile delete mode 100644 contrib/builder/deb/armhf/raspbian-jessie/Dockerfile delete mode 100644 contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile delete mode 100644 contrib/builder/rpm/amd64/README.md delete mode 100755 contrib/builder/rpm/amd64/build.sh delete mode 100644 contrib/builder/rpm/amd64/centos-7/Dockerfile delete mode 100644 contrib/builder/rpm/amd64/fedora-22/Dockerfile delete mode 100644 contrib/builder/rpm/amd64/fedora-23/Dockerfile delete mode 100644 contrib/builder/rpm/amd64/fedora-24/Dockerfile delete mode 100755 contrib/builder/rpm/amd64/generate.sh delete mode 100644 contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile delete mode 100644 contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile delete mode 100644 contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile delete mode 100755 contrib/check-config.sh delete mode 100644 contrib/completion/REVIEWERS delete mode 100644 contrib/completion/bash/docker delete mode 100644 contrib/completion/fish/docker.fish delete mode 100644 contrib/completion/powershell/posh-docker.psm1 delete mode 100644 contrib/completion/zsh/REVIEWERS delete mode 100644 contrib/completion/zsh/_docker delete mode 100644 contrib/desktop-integration/README.md delete mode 100644 contrib/desktop-integration/chromium/Dockerfile delete mode 100644 contrib/desktop-integration/gparted/Dockerfile delete mode 100644 contrib/docker-device-tool/README.md delete mode 100644 contrib/docker-device-tool/device_tool.go delete mode 100644 contrib/docker-device-tool/device_tool_windows.go delete mode 100755 contrib/dockerize-disk.sh delete mode 100755 contrib/download-frozen-image-v1.sh delete mode 100755 contrib/download-frozen-image-v2.sh delete mode 100644 contrib/gitdm/aliases delete mode 100644 contrib/gitdm/domain-map delete mode 100755 contrib/gitdm/generate_aliases.sh delete mode 100644 contrib/gitdm/gitdm.config delete mode 100644 contrib/httpserver/Dockerfile delete mode 100644 contrib/httpserver/server.go delete mode 100644 contrib/init/openrc/docker.confd delete mode 100644 contrib/init/openrc/docker.initd delete mode 100644 contrib/init/systemd/REVIEWERS delete mode 100644 contrib/init/systemd/docker.service delete mode 100644 contrib/init/systemd/docker.service.rpm delete mode 100644 contrib/init/systemd/docker.socket delete mode 100755 contrib/init/sysvinit-debian/docker delete mode 100644 contrib/init/sysvinit-debian/docker.default delete mode 100755 contrib/init/sysvinit-redhat/docker delete mode 100644 contrib/init/sysvinit-redhat/docker.sysconfig delete mode 100644 contrib/init/upstart/REVIEWERS delete mode 100644 contrib/init/upstart/docker.conf delete mode 100755 contrib/mkimage-alpine.sh delete mode 100644 contrib/mkimage-arch-pacman.conf delete mode 100755 contrib/mkimage-arch.sh delete mode 100644 contrib/mkimage-archarm-pacman.conf delete mode 100755 contrib/mkimage-busybox.sh delete mode 100755 contrib/mkimage-crux.sh delete mode 100755 contrib/mkimage-debootstrap.sh delete mode 100755 contrib/mkimage-rinse.sh delete mode 100755 contrib/mkimage-yum.sh delete mode 100755 contrib/mkimage.sh delete mode 100755 contrib/mkimage/.febootstrap-minimize delete mode 100755 contrib/mkimage/busybox-static delete mode 100755 contrib/mkimage/debootstrap delete mode 100755 contrib/mkimage/mageia-urpmi delete mode 100755 contrib/mkimage/rinse delete mode 100644 contrib/nnp-test/Dockerfile delete mode 100644 contrib/nnp-test/nnp-test.c delete mode 100755 contrib/nuke-graph-directory.sh delete mode 100755 contrib/project-stats.sh delete mode 100755 contrib/report-issue.sh delete mode 100755 contrib/reprepro/suites.sh delete mode 100644 contrib/selinux-fedora-24/docker-engine-selinux/LICENSE delete mode 100644 contrib/selinux-fedora-24/docker-engine-selinux/Makefile delete mode 100644 contrib/selinux-fedora-24/docker-engine-selinux/README.md delete mode 100644 contrib/selinux-fedora-24/docker-engine-selinux/docker.fc delete mode 100644 contrib/selinux-fedora-24/docker-engine-selinux/docker.if delete mode 100644 contrib/selinux-fedora-24/docker-engine-selinux/docker.te delete mode 100644 contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE delete mode 100644 contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile delete mode 100644 contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md delete mode 100644 contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc delete mode 100644 contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if delete mode 100644 contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te delete mode 100644 contrib/selinux/docker-engine-selinux/LICENSE delete mode 100644 contrib/selinux/docker-engine-selinux/Makefile delete mode 100644 contrib/selinux/docker-engine-selinux/docker.fc delete mode 100644 contrib/selinux/docker-engine-selinux/docker.if delete mode 100644 contrib/selinux/docker-engine-selinux/docker.te delete mode 100644 contrib/selinux/docker-engine-selinux/docker_selinux.8.gz delete mode 100644 contrib/syntax/kate/Dockerfile.xml delete mode 100644 contrib/syntax/nano/Dockerfile.nanorc delete mode 100644 contrib/syntax/nano/README.md delete mode 100644 contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences delete mode 100644 contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage delete mode 100644 contrib/syntax/textmate/Docker.tmbundle/info.plist delete mode 100644 contrib/syntax/textmate/README.md delete mode 100644 contrib/syntax/textmate/REVIEWERS delete mode 100644 contrib/syntax/vim/LICENSE delete mode 100644 contrib/syntax/vim/README.md delete mode 100644 contrib/syntax/vim/doc/dockerfile.txt delete mode 100644 contrib/syntax/vim/ftdetect/dockerfile.vim delete mode 100644 contrib/syntax/vim/syntax/dockerfile.vim delete mode 100644 contrib/syscall-test/Dockerfile delete mode 100644 contrib/syscall-test/acct.c delete mode 100644 contrib/syscall-test/ns.c delete mode 100644 contrib/syscall-test/userns.c delete mode 100644 contrib/udev/80-docker.rules delete mode 100644 contrib/vagrant-docker/README.md delete mode 100644 daemon/apparmor_default.go delete mode 100644 daemon/apparmor_default_unsupported.go delete mode 100644 daemon/archive.go delete mode 100644 daemon/archive_unix.go delete mode 100644 daemon/archive_windows.go delete mode 100644 daemon/attach.go delete mode 100644 daemon/auth.go delete mode 100644 daemon/caps/utils_unix.go delete mode 100644 daemon/changes.go delete mode 100644 daemon/cluster/cluster.go delete mode 100644 daemon/cluster/convert/container.go delete mode 100644 daemon/cluster/convert/network.go delete mode 100644 daemon/cluster/convert/node.go delete mode 100644 daemon/cluster/convert/service.go delete mode 100644 daemon/cluster/convert/swarm.go delete mode 100644 daemon/cluster/convert/task.go delete mode 100644 daemon/cluster/executor/backend.go delete mode 100644 daemon/cluster/executor/container/adapter.go delete mode 100644 daemon/cluster/executor/container/container.go delete mode 100644 daemon/cluster/executor/container/controller.go delete mode 100644 daemon/cluster/executor/container/errors.go delete mode 100644 daemon/cluster/executor/container/executor.go delete mode 100644 daemon/cluster/executor/container/health_test.go delete mode 100644 daemon/cluster/executor/container/validate.go delete mode 100644 daemon/cluster/executor/container/validate_test.go delete mode 100644 daemon/cluster/executor/container/validate_unix_test.go delete mode 100644 daemon/cluster/executor/container/validate_windows_test.go delete mode 100644 daemon/cluster/filters.go delete mode 100644 daemon/cluster/helpers.go delete mode 100644 daemon/cluster/listen_addr.go delete mode 100644 daemon/cluster/provider/network.go delete mode 100644 daemon/commit.go delete mode 100644 daemon/config.go delete mode 100644 daemon/config_experimental.go delete mode 100644 daemon/config_solaris.go delete mode 100644 daemon/config_stub.go delete mode 100644 daemon/config_test.go delete mode 100644 daemon/config_unix.go delete mode 100644 daemon/config_windows.go delete mode 100644 daemon/container.go delete mode 100644 daemon/container_operations.go delete mode 100644 daemon/container_operations_solaris.go delete mode 100644 daemon/container_operations_unix.go delete mode 100644 daemon/container_operations_windows.go delete mode 100644 daemon/create.go delete mode 100644 daemon/create_unix.go delete mode 100644 daemon/create_windows.go delete mode 100644 daemon/daemon.go delete mode 100644 daemon/daemon_experimental.go delete mode 100644 daemon/daemon_linux.go delete mode 100644 daemon/daemon_linux_test.go delete mode 100644 daemon/daemon_solaris.go delete mode 100644 daemon/daemon_stub.go delete mode 100644 daemon/daemon_test.go delete mode 100644 daemon/daemon_unix.go delete mode 100644 daemon/daemon_unix_test.go delete mode 100644 daemon/daemon_unsupported.go delete mode 100644 daemon/daemon_windows.go delete mode 100644 daemon/debugtrap_unix.go delete mode 100644 daemon/debugtrap_unsupported.go delete mode 100644 daemon/debugtrap_windows.go delete mode 100644 daemon/delete.go delete mode 100644 daemon/delete_test.go delete mode 100644 daemon/discovery.go delete mode 100644 daemon/discovery_test.go delete mode 100644 daemon/errors.go delete mode 100644 daemon/events.go delete mode 100644 daemon/events/events.go delete mode 100644 daemon/events/events_test.go delete mode 100644 daemon/events/filter.go delete mode 100644 daemon/events/testutils/testutils.go delete mode 100644 daemon/events_test.go delete mode 100644 daemon/exec.go delete mode 100644 daemon/exec/exec.go delete mode 100644 daemon/exec_linux.go delete mode 100644 daemon/exec_solaris.go delete mode 100644 daemon/exec_windows.go delete mode 100644 daemon/export.go delete mode 100644 daemon/graphdriver/aufs/aufs.go delete mode 100644 daemon/graphdriver/aufs/aufs_test.go delete mode 100644 daemon/graphdriver/aufs/dirs.go delete mode 100644 daemon/graphdriver/aufs/mount.go delete mode 100644 daemon/graphdriver/aufs/mount_linux.go delete mode 100644 daemon/graphdriver/aufs/mount_unsupported.go delete mode 100644 daemon/graphdriver/btrfs/btrfs.go delete mode 100644 daemon/graphdriver/btrfs/btrfs_test.go delete mode 100644 daemon/graphdriver/btrfs/dummy_unsupported.go delete mode 100644 daemon/graphdriver/btrfs/version.go delete mode 100644 daemon/graphdriver/btrfs/version_none.go delete mode 100644 daemon/graphdriver/btrfs/version_test.go delete mode 100644 daemon/graphdriver/counter.go delete mode 100644 daemon/graphdriver/devmapper/README.md delete mode 100644 daemon/graphdriver/devmapper/deviceset.go delete mode 100644 daemon/graphdriver/devmapper/devmapper_doc.go delete mode 100644 daemon/graphdriver/devmapper/devmapper_test.go delete mode 100644 daemon/graphdriver/devmapper/driver.go delete mode 100644 daemon/graphdriver/devmapper/mount.go delete mode 100644 daemon/graphdriver/driver.go delete mode 100644 daemon/graphdriver/driver_freebsd.go delete mode 100644 daemon/graphdriver/driver_linux.go delete mode 100644 daemon/graphdriver/driver_solaris.go delete mode 100644 daemon/graphdriver/driver_unsupported.go delete mode 100644 daemon/graphdriver/driver_windows.go delete mode 100644 daemon/graphdriver/fsdiff.go delete mode 100644 daemon/graphdriver/graphtest/graphbench_unix.go delete mode 100644 daemon/graphdriver/graphtest/graphtest_unix.go delete mode 100644 daemon/graphdriver/graphtest/graphtest_windows.go delete mode 100644 daemon/graphdriver/graphtest/testutil.go delete mode 100644 daemon/graphdriver/graphtest/testutil_unix.go delete mode 100644 daemon/graphdriver/overlay/copy.go delete mode 100644 daemon/graphdriver/overlay/overlay.go delete mode 100644 daemon/graphdriver/overlay/overlay_test.go delete mode 100644 daemon/graphdriver/overlay/overlay_unsupported.go delete mode 100644 daemon/graphdriver/overlay2/mount.go delete mode 100644 daemon/graphdriver/overlay2/overlay.go delete mode 100644 daemon/graphdriver/overlay2/overlay_test.go delete mode 100644 daemon/graphdriver/overlay2/overlay_unsupported.go delete mode 100644 daemon/graphdriver/overlay2/randomid.go delete mode 100644 daemon/graphdriver/plugin.go delete mode 100644 daemon/graphdriver/plugin_unsupported.go delete mode 100644 daemon/graphdriver/proxy.go delete mode 100644 daemon/graphdriver/register/register_aufs.go delete mode 100644 daemon/graphdriver/register/register_btrfs.go delete mode 100644 daemon/graphdriver/register/register_devicemapper.go delete mode 100644 daemon/graphdriver/register/register_overlay.go delete mode 100644 daemon/graphdriver/register/register_vfs.go delete mode 100644 daemon/graphdriver/register/register_windows.go delete mode 100644 daemon/graphdriver/register/register_zfs.go delete mode 100644 daemon/graphdriver/vfs/driver.go delete mode 100644 daemon/graphdriver/vfs/vfs_test.go delete mode 100644 daemon/graphdriver/windows/windows.go delete mode 100644 daemon/graphdriver/windows/windows_windows_test.go delete mode 100644 daemon/graphdriver/zfs/MAINTAINERS delete mode 100644 daemon/graphdriver/zfs/zfs.go delete mode 100644 daemon/graphdriver/zfs/zfs_freebsd.go delete mode 100644 daemon/graphdriver/zfs/zfs_linux.go delete mode 100644 daemon/graphdriver/zfs/zfs_solaris.go delete mode 100644 daemon/graphdriver/zfs/zfs_test.go delete mode 100644 daemon/graphdriver/zfs/zfs_unsupported.go delete mode 100644 daemon/health.go delete mode 100644 daemon/health_test.go delete mode 100644 daemon/image.go delete mode 100644 daemon/image_delete.go delete mode 100644 daemon/image_exporter.go delete mode 100644 daemon/image_history.go delete mode 100644 daemon/image_inspect.go delete mode 100644 daemon/image_pull.go delete mode 100644 daemon/image_push.go delete mode 100644 daemon/image_tag.go delete mode 100644 daemon/images.go delete mode 100644 daemon/import.go delete mode 100644 daemon/info.go delete mode 100644 daemon/inspect.go delete mode 100644 daemon/inspect_solaris.go delete mode 100644 daemon/inspect_unix.go delete mode 100644 daemon/inspect_windows.go delete mode 100644 daemon/keys.go delete mode 100644 daemon/keys_unsupported.go delete mode 100644 daemon/kill.go delete mode 100644 daemon/links.go delete mode 100644 daemon/links/links.go delete mode 100644 daemon/links/links_test.go delete mode 100644 daemon/links_test.go delete mode 100644 daemon/list.go delete mode 100644 daemon/list_unix.go delete mode 100644 daemon/list_windows.go delete mode 100644 daemon/logdrivers_linux.go delete mode 100644 daemon/logdrivers_windows.go delete mode 100644 daemon/logger/awslogs/cloudwatchlogs.go delete mode 100644 daemon/logger/awslogs/cloudwatchlogs_test.go delete mode 100644 daemon/logger/awslogs/cwlogsiface_mock_test.go delete mode 100644 daemon/logger/context.go delete mode 100644 daemon/logger/copier.go delete mode 100644 daemon/logger/copier_test.go delete mode 100644 daemon/logger/etwlogs/etwlogs_windows.go delete mode 100644 daemon/logger/factory.go delete mode 100644 daemon/logger/fluentd/fluentd.go delete mode 100644 daemon/logger/gcplogs/gcplogging.go delete mode 100644 daemon/logger/gelf/gelf.go delete mode 100644 daemon/logger/gelf/gelf_unsupported.go delete mode 100644 daemon/logger/journald/journald.go delete mode 100644 daemon/logger/journald/journald_unsupported.go delete mode 100644 daemon/logger/journald/read.go delete mode 100644 daemon/logger/journald/read_native.go delete mode 100644 daemon/logger/journald/read_native_compat.go delete mode 100644 daemon/logger/journald/read_unsupported.go delete mode 100644 daemon/logger/jsonfilelog/jsonfilelog.go delete mode 100644 daemon/logger/jsonfilelog/jsonfilelog_test.go delete mode 100644 daemon/logger/jsonfilelog/read.go delete mode 100644 daemon/logger/logger.go delete mode 100644 daemon/logger/loggerutils/log_tag.go delete mode 100644 daemon/logger/loggerutils/log_tag_test.go delete mode 100644 daemon/logger/loggerutils/rotatefilewriter.go delete mode 100644 daemon/logger/splunk/splunk.go delete mode 100644 daemon/logger/syslog/syslog.go delete mode 100644 daemon/logger/syslog/syslog_test.go delete mode 100644 daemon/logger/syslog/syslog_unsupported.go delete mode 100644 daemon/logs.go delete mode 100644 daemon/logs_test.go delete mode 100644 daemon/monitor.go delete mode 100644 daemon/monitor_linux.go delete mode 100644 daemon/monitor_solaris.go delete mode 100644 daemon/monitor_windows.go delete mode 100644 daemon/mounts.go delete mode 100644 daemon/names.go delete mode 100644 daemon/network.go delete mode 100644 daemon/network/filter.go delete mode 100644 daemon/network/settings.go delete mode 100644 daemon/oci_linux.go delete mode 100644 daemon/oci_solaris.go delete mode 100644 daemon/oci_windows.go delete mode 100644 daemon/pause.go delete mode 100644 daemon/rename.go delete mode 100644 daemon/resize.go delete mode 100644 daemon/restart.go delete mode 100644 daemon/search.go delete mode 100644 daemon/search_test.go delete mode 100644 daemon/seccomp_disabled.go delete mode 100644 daemon/seccomp_linux.go delete mode 100644 daemon/seccomp_unsupported.go delete mode 100644 daemon/selinux_linux.go delete mode 100644 daemon/selinux_unsupported.go delete mode 100644 daemon/start.go delete mode 100644 daemon/start_linux.go delete mode 100644 daemon/start_windows.go delete mode 100644 daemon/stats.go delete mode 100644 daemon/stats_collector_solaris.go delete mode 100644 daemon/stats_collector_unix.go delete mode 100644 daemon/stats_collector_windows.go delete mode 100644 daemon/stop.go delete mode 100644 daemon/top_unix.go delete mode 100644 daemon/top_unix_test.go delete mode 100644 daemon/top_windows.go delete mode 100644 daemon/unpause.go delete mode 100644 daemon/update.go delete mode 100644 daemon/update_linux.go delete mode 100644 daemon/update_solaris.go delete mode 100644 daemon/update_windows.go delete mode 100644 daemon/volumes.go delete mode 100644 daemon/volumes_unit_test.go delete mode 100644 daemon/volumes_unix.go delete mode 100644 daemon/volumes_windows.go delete mode 100644 daemon/wait.go delete mode 100644 distribution/errors.go delete mode 100644 distribution/fixtures/validate_manifest/bad_manifest delete mode 100644 distribution/fixtures/validate_manifest/extra_data_manifest delete mode 100644 distribution/fixtures/validate_manifest/good_manifest delete mode 100644 distribution/metadata/metadata.go delete mode 100644 distribution/metadata/v1_id_service.go delete mode 100644 distribution/metadata/v1_id_service_test.go delete mode 100644 distribution/metadata/v2_metadata_service.go delete mode 100644 distribution/metadata/v2_metadata_service_test.go delete mode 100644 distribution/pull.go delete mode 100644 distribution/pull_v1.go delete mode 100644 distribution/pull_v2.go delete mode 100644 distribution/pull_v2_test.go delete mode 100644 distribution/pull_v2_unix.go delete mode 100644 distribution/pull_v2_windows.go delete mode 100644 distribution/push.go delete mode 100644 distribution/push_v1.go delete mode 100644 distribution/push_v2.go delete mode 100644 distribution/registry.go delete mode 100644 distribution/registry_unit_test.go delete mode 100644 distribution/xfer/download.go delete mode 100644 distribution/xfer/download_test.go delete mode 100644 distribution/xfer/transfer.go delete mode 100644 distribution/xfer/transfer_test.go delete mode 100644 distribution/xfer/upload.go delete mode 100644 distribution/xfer/upload_test.go delete mode 100644 dockerversion/useragent.go delete mode 100644 dockerversion/version_lib.go rename {docs => engine}/.gitignore (100%) rename {docs => engine}/Dockerfile (100%) rename {docs => engine}/Makefile (100%) rename {docs => engine}/README.md (100%) rename {docs => engine}/admin/ambassador_pattern_linking.md (100%) rename {docs => engine}/admin/b2d_volume_images/add_cd.png (100%) rename {docs => engine}/admin/b2d_volume_images/add_new_controller.png (100%) rename {docs => engine}/admin/b2d_volume_images/add_volume.png (100%) rename {docs => engine}/admin/b2d_volume_images/boot_order.png (100%) rename {docs => engine}/admin/b2d_volume_images/gparted.png (100%) rename {docs => engine}/admin/b2d_volume_images/gparted2.png (100%) rename {docs => engine}/admin/b2d_volume_images/verify.png (100%) rename {docs => engine}/admin/b2d_volume_resize.md (100%) rename {docs => engine}/admin/chef.md (100%) rename {docs => engine}/admin/dsc.md (100%) rename {docs => engine}/admin/formatting.md (100%) rename {docs => engine}/admin/host_integration.md (100%) rename {docs => engine}/admin/index.md (100%) rename {docs => engine}/admin/live-restore.md (100%) rename {docs => engine}/admin/logging/awslogs.md (100%) rename {docs => engine}/admin/logging/etwlogs.md (100%) rename {docs => engine}/admin/logging/fluentd.md (100%) rename {docs => engine}/admin/logging/gcplogs.md (100%) rename {docs => engine}/admin/logging/index.md (100%) rename {docs => engine}/admin/logging/journald.md (100%) rename {docs => engine}/admin/logging/log_tags.md (100%) rename {docs => engine}/admin/logging/overview.md (100%) rename {docs => engine}/admin/logging/splunk.md (100%) rename {docs => engine}/admin/menu.md (100%) rename {docs => engine}/admin/puppet.md (100%) rename {docs => engine}/admin/registry_mirror.md (100%) rename {docs => engine}/admin/runmetrics.md (100%) rename {docs => engine}/admin/systemd.md (100%) rename {docs => engine}/admin/using_supervisord.md (100%) rename {docs => engine}/article-img/architecture.svg (100%) rename {docs => engine}/article-img/engine-components-flow.png (100%) rename {docs => engine}/breaking_changes.md (100%) rename {docs => engine}/deprecated.md (100%) rename {docs => engine}/examples/apt-cacher-ng.Dockerfile (100%) rename {docs => engine}/examples/apt-cacher-ng.md (100%) rename {docs => engine}/examples/couchbase.md (100%) rename {docs => engine}/examples/couchbase/web-console.png (100%) rename {docs => engine}/examples/couchdb_data_volumes.md (100%) rename {docs => engine}/examples/index.md (100%) rename {docs => engine}/examples/mongodb.md (100%) rename {docs => engine}/examples/mongodb/Dockerfile (100%) rename {docs => engine}/examples/postgresql_service.Dockerfile (100%) rename {docs => engine}/examples/postgresql_service.md (100%) rename {docs => engine}/examples/running_redis_service.md (100%) rename {docs => engine}/examples/running_riak_service.Dockerfile (100%) rename {docs => engine}/examples/running_riak_service.md (100%) rename {docs => engine}/examples/running_ssh_service.Dockerfile (100%) rename {docs => engine}/examples/running_ssh_service.md (100%) rename {docs => engine}/examples/supervisord.conf (100%) rename {docs => engine}/extend/images/authz_additional_info.png (100%) rename {docs => engine}/extend/images/authz_allow.png (100%) rename {docs => engine}/extend/images/authz_chunked.png (100%) rename {docs => engine}/extend/images/authz_connection_hijack.png (100%) rename {docs => engine}/extend/images/authz_deny.png (100%) rename {docs => engine}/extend/index.md (100%) rename {docs => engine}/extend/legacy_plugins.md (100%) rename {docs => engine}/extend/manifest.md (100%) rename {docs => engine}/extend/menu.md (100%) rename {docs => engine}/extend/plugin_api.md (100%) rename {docs => engine}/extend/plugins_authorization.md (100%) rename {docs => engine}/extend/plugins_network.md (100%) rename {docs => engine}/extend/plugins_volume.md (100%) rename {docs => engine}/faq.md (100%) rename {docs => engine}/getstarted/index.md (100%) rename {docs => engine}/getstarted/last_page.md (100%) rename {docs => engine}/getstarted/linux_install_help.md (100%) rename {docs => engine}/getstarted/menu.md (100%) rename {docs => engine}/getstarted/step_five.md (100%) rename {docs => engine}/getstarted/step_four.md (100%) rename {docs => engine}/getstarted/step_one.md (100%) rename {docs => engine}/getstarted/step_six.md (100%) rename {docs => engine}/getstarted/step_three.md (100%) rename {docs => engine}/getstarted/step_two.md (100%) rename {docs => engine}/getstarted/tutimg/add_repository.png (100%) rename {docs => engine}/getstarted/tutimg/browse_and_search.png (100%) rename {docs => engine}/getstarted/tutimg/container_explainer.png (100%) rename {docs => engine}/getstarted/tutimg/hub_signup.png (100%) rename {docs => engine}/getstarted/tutimg/image_found.png (100%) rename {docs => engine}/getstarted/tutimg/line_one.png (100%) rename {docs => engine}/getstarted/tutimg/new_image.png (100%) rename {docs => engine}/getstarted/tutimg/tagger.png (100%) rename {docs => engine}/getstarted/tutimg/whale_repo.png (100%) rename {docs => engine}/index.md (100%) rename {docs => engine}/installation/binaries.md (100%) rename {docs => engine}/installation/cloud/cloud-ex-aws.md (100%) rename {docs => engine}/installation/cloud/cloud-ex-machine-ocean.md (100%) rename {docs => engine}/installation/cloud/index.md (100%) rename {docs => engine}/installation/cloud/overview.md (100%) rename {docs => engine}/installation/images/bad_host.png (100%) rename {docs => engine}/installation/images/cool_view.png (100%) rename {docs => engine}/installation/images/ec2-ubuntu.png (100%) rename {docs => engine}/installation/images/ec2_instance_details.png (100%) rename {docs => engine}/installation/images/ec2_instance_type.png (100%) rename {docs => engine}/installation/images/ec2_launch_instance.png (100%) rename {docs => engine}/installation/images/good_host.png (100%) rename {docs => engine}/installation/images/kitematic.png (100%) rename {docs => engine}/installation/images/linux_docker_host.svg (100%) rename {docs => engine}/installation/images/mac-page-finished.png (100%) rename {docs => engine}/installation/images/mac-page-two.png (100%) rename {docs => engine}/installation/images/mac-password-prompt.png (100%) rename {docs => engine}/installation/images/mac-success.png (100%) rename {docs => engine}/installation/images/mac-welcome-page.png (100%) rename {docs => engine}/installation/images/mac_docker_host.svg (100%) rename {docs => engine}/installation/images/my-docker-vm.png (100%) rename {docs => engine}/installation/images/newsite_view.png (100%) rename {docs => engine}/installation/images/nginx-webserver.png (100%) rename {docs => engine}/installation/images/ocean_click_api.png (100%) rename {docs => engine}/installation/images/ocean_droplet.png (100%) rename {docs => engine}/installation/images/ocean_droplet_ubuntu.png (100%) rename {docs => engine}/installation/images/ocean_gen_token.png (100%) rename {docs => engine}/installation/images/ocean_save_token.png (100%) rename {docs => engine}/installation/images/ocean_token_create.png (100%) rename {docs => engine}/installation/images/virtualization.png (100%) rename {docs => engine}/installation/images/win-page-6.png (100%) rename {docs => engine}/installation/images/win-welcome.png (100%) rename {docs => engine}/installation/images/win_docker_host.svg (100%) rename {docs => engine}/installation/images/win_ver.png (100%) rename {docs => engine}/installation/images/windows-boot2docker-cmd.png (100%) rename {docs => engine}/installation/images/windows-boot2docker-powershell.png (100%) rename {docs => engine}/installation/images/windows-boot2docker-start.png (100%) rename {docs => engine}/installation/images/windows-finish.png (100%) rename {docs => engine}/installation/index.md (100%) rename {docs => engine}/installation/linux/SUSE.md (100%) rename {docs => engine}/installation/linux/archlinux.md (100%) rename {docs => engine}/installation/linux/centos.md (100%) rename {docs => engine}/installation/linux/cruxlinux.md (100%) rename {docs => engine}/installation/linux/debian.md (100%) rename {docs => engine}/installation/linux/fedora.md (100%) rename {docs => engine}/installation/linux/gentoolinux.md (100%) rename {docs => engine}/installation/linux/index.md (100%) rename {docs => engine}/installation/linux/oracle.md (100%) rename {docs => engine}/installation/linux/rhel.md (100%) rename {docs => engine}/installation/linux/ubuntulinux.md (100%) rename {docs => engine}/installation/mac.md (100%) rename {docs => engine}/installation/windows.md (100%) rename {docs => engine}/migration.md (100%) rename {docs => engine}/reference/api/README.md (100%) rename {docs => engine}/reference/api/_static/io_oauth_authorization_page.png (100%) rename {docs => engine}/reference/api/docker-io_api.md (100%) rename {docs => engine}/reference/api/docker_io_accounts_api.md (100%) rename {docs => engine}/reference/api/docker_remote_api.md (100%) rename {docs => engine}/reference/api/docker_remote_api_v1.18.md (100%) rename {docs => engine}/reference/api/docker_remote_api_v1.19.md (100%) rename {docs => engine}/reference/api/docker_remote_api_v1.20.md (100%) rename {docs => engine}/reference/api/docker_remote_api_v1.21.md (100%) rename {docs => engine}/reference/api/docker_remote_api_v1.22.md (100%) rename {docs => engine}/reference/api/docker_remote_api_v1.23.md (100%) rename {docs => engine}/reference/api/docker_remote_api_v1.24.md (100%) rename {docs => engine}/reference/api/docker_remote_api_v1.25.md (100%) rename {docs => engine}/reference/api/hub_registry_spec.md (100%) rename {docs => engine}/reference/api/images/event_state.gliffy (100%) rename {docs => engine}/reference/api/images/event_state.png (100%) rename {docs => engine}/reference/api/index.md (100%) rename {docs => engine}/reference/api/remote_api_client_libraries.md (100%) rename {docs => engine}/reference/builder.md (100%) rename {docs => engine}/reference/commandline/attach.md (100%) rename {docs => engine}/reference/commandline/build.md (100%) rename {docs => engine}/reference/commandline/cli.md (100%) rename {docs => engine}/reference/commandline/commit.md (100%) rename {docs => engine}/reference/commandline/cp.md (100%) rename {docs => engine}/reference/commandline/create.md (100%) rename {docs => engine}/reference/commandline/deploy.md (100%) rename {docs => engine}/reference/commandline/diff.md (100%) rename {docs => engine}/reference/commandline/docker_images.gif (100%) rename {docs => engine}/reference/commandline/dockerd.md (100%) rename {docs => engine}/reference/commandline/events.md (100%) rename {docs => engine}/reference/commandline/exec.md (100%) rename {docs => engine}/reference/commandline/export.md (100%) rename {docs => engine}/reference/commandline/history.md (100%) rename {docs => engine}/reference/commandline/images.md (100%) rename {docs => engine}/reference/commandline/import.md (100%) rename {docs => engine}/reference/commandline/index.md (100%) rename {docs => engine}/reference/commandline/info.md (100%) rename {docs => engine}/reference/commandline/inspect.md (100%) rename {docs => engine}/reference/commandline/kill.md (100%) rename {docs => engine}/reference/commandline/load.md (100%) rename {docs => engine}/reference/commandline/login.md (100%) rename {docs => engine}/reference/commandline/logout.md (100%) rename {docs => engine}/reference/commandline/logs.md (100%) rename {docs => engine}/reference/commandline/menu.md (100%) rename {docs => engine}/reference/commandline/network_connect.md (100%) rename {docs => engine}/reference/commandline/network_create.md (100%) rename {docs => engine}/reference/commandline/network_disconnect.md (100%) rename {docs => engine}/reference/commandline/network_inspect.md (100%) rename {docs => engine}/reference/commandline/network_ls.md (100%) rename {docs => engine}/reference/commandline/network_rm.md (100%) rename {docs => engine}/reference/commandline/node_demote.md (100%) rename {docs => engine}/reference/commandline/node_inspect.md (100%) rename {docs => engine}/reference/commandline/node_ls.md (100%) rename {docs => engine}/reference/commandline/node_promote.md (100%) rename {docs => engine}/reference/commandline/node_ps.md (100%) rename {docs => engine}/reference/commandline/node_rm.md (100%) rename {docs => engine}/reference/commandline/node_update.md (100%) rename {docs => engine}/reference/commandline/pause.md (100%) rename {docs => engine}/reference/commandline/plugin_disable.md (100%) rename {docs => engine}/reference/commandline/plugin_enable.md (100%) rename {docs => engine}/reference/commandline/plugin_inspect.md (100%) rename {docs => engine}/reference/commandline/plugin_install.md (100%) rename {docs => engine}/reference/commandline/plugin_ls.md (100%) rename {docs => engine}/reference/commandline/plugin_rm.md (100%) rename {docs => engine}/reference/commandline/port.md (100%) rename {docs => engine}/reference/commandline/ps.md (100%) rename {docs => engine}/reference/commandline/pull.md (100%) rename {docs => engine}/reference/commandline/push.md (100%) rename {docs => engine}/reference/commandline/rename.md (100%) rename {docs => engine}/reference/commandline/restart.md (100%) rename {docs => engine}/reference/commandline/rm.md (100%) rename {docs => engine}/reference/commandline/rmi.md (100%) rename {docs => engine}/reference/commandline/run.md (100%) rename {docs => engine}/reference/commandline/save.md (100%) rename {docs => engine}/reference/commandline/search.md (100%) rename {docs => engine}/reference/commandline/service_create.md (100%) rename {docs => engine}/reference/commandline/service_inspect.md (100%) rename {docs => engine}/reference/commandline/service_ls.md (100%) rename {docs => engine}/reference/commandline/service_ps.md (100%) rename {docs => engine}/reference/commandline/service_rm.md (100%) rename {docs => engine}/reference/commandline/service_scale.md (100%) rename {docs => engine}/reference/commandline/service_update.md (100%) rename {docs => engine}/reference/commandline/stack_config.md (100%) rename {docs => engine}/reference/commandline/stack_deploy.md (100%) rename {docs => engine}/reference/commandline/stack_rm.md (100%) rename {docs => engine}/reference/commandline/stack_services.md (100%) rename {docs => engine}/reference/commandline/stack_tasks.md (100%) rename {docs => engine}/reference/commandline/start.md (100%) rename {docs => engine}/reference/commandline/stats.md (100%) rename {docs => engine}/reference/commandline/stop.md (100%) rename {docs => engine}/reference/commandline/swarm_init.md (100%) rename {docs => engine}/reference/commandline/swarm_join.md (100%) rename {docs => engine}/reference/commandline/swarm_join_token.md (100%) rename {docs => engine}/reference/commandline/swarm_leave.md (100%) rename {docs => engine}/reference/commandline/swarm_update.md (100%) rename {docs => engine}/reference/commandline/tag.md (100%) rename {docs => engine}/reference/commandline/top.md (100%) rename {docs => engine}/reference/commandline/unpause.md (100%) rename {docs => engine}/reference/commandline/update.md (100%) rename {docs => engine}/reference/commandline/version.md (100%) rename {docs => engine}/reference/commandline/volume_create.md (100%) rename {docs => engine}/reference/commandline/volume_inspect.md (100%) rename {docs => engine}/reference/commandline/volume_ls.md (100%) rename {docs => engine}/reference/commandline/volume_rm.md (100%) rename {docs => engine}/reference/commandline/wait.md (100%) rename {docs => engine}/reference/glossary.md (100%) rename {docs => engine}/reference/index.md (100%) rename {docs => engine}/reference/run.md (100%) rename {docs => engine}/security/apparmor.md (100%) rename {docs => engine}/security/certificates.md (100%) rename {docs => engine}/security/https.md (100%) rename {docs => engine}/security/https/Dockerfile (100%) rename {docs => engine}/security/https/Makefile (100%) rename {docs => engine}/security/https/README.md (100%) rename {docs => engine}/security/https/make_certs.sh (100%) rename {docs => engine}/security/https/parsedocs.sh (100%) rename {docs => engine}/security/index.md (100%) rename {docs => engine}/security/non-events.md (100%) rename {docs => engine}/security/seccomp.md (100%) rename {docs => engine}/security/security.md (100%) rename {docs => engine}/security/trust/content_trust.md (100%) rename {docs => engine}/security/trust/deploying_notary.md (100%) rename {docs => engine}/security/trust/images/tag_signing.png (100%) rename {docs => engine}/security/trust/images/trust_.gliffy (100%) rename {docs => engine}/security/trust/images/trust_components.gliffy (100%) rename {docs => engine}/security/trust/images/trust_components.png (100%) rename {docs => engine}/security/trust/images/trust_signing.gliffy (100%) rename {docs => engine}/security/trust/images/trust_signing.png (100%) rename {docs => engine}/security/trust/images/trust_view.gliffy (100%) rename {docs => engine}/security/trust/images/trust_view.png (100%) rename {docs => engine}/security/trust/index.md (100%) rename {docs => engine}/security/trust/trust_automation.md (100%) rename {docs => engine}/security/trust/trust_delegation.md (100%) rename {docs => engine}/security/trust/trust_key_mng.md (100%) rename {docs => engine}/security/trust/trust_sandbox.md (100%) rename {docs => engine}/static_files/README.md (100%) rename {docs => engine}/static_files/contributors.png (100%) rename {docs => engine}/static_files/docker-logo-compressed.png (100%) rename {docs => engine}/static_files/docker_pull_chart.png (100%) rename {docs => engine}/static_files/docker_push_chart.png (100%) rename {docs => engine}/static_files/dockerlogo-v.png (100%) rename {docs => engine}/swarm/admin_guide.md (100%) rename {docs => engine}/swarm/how-swarm-mode-works/menu.md (100%) rename {docs => engine}/swarm/how-swarm-mode-works/nodes.md (100%) rename {docs => engine}/swarm/how-swarm-mode-works/pki.md (100%) rename {docs => engine}/swarm/how-swarm-mode-works/services.md (100%) rename {docs => engine}/swarm/images/ingress-lb.png (100%) rename {docs => engine}/swarm/images/ingress-routing-mesh.png (100%) rename {docs => engine}/swarm/images/replicated-vs-global.png (100%) rename {docs => engine}/swarm/images/service-lifecycle.png (100%) rename {docs => engine}/swarm/images/service-vip.png (100%) rename {docs => engine}/swarm/images/services-diagram.png (100%) rename {docs => engine}/swarm/images/src/ingress-lb.svg (100%) rename {docs => engine}/swarm/images/src/ingress-routing-mesh.svg (100%) rename {docs => engine}/swarm/images/src/replicated-vs-global.svg (100%) rename {docs => engine}/swarm/images/src/service-lifecycle.svg (100%) rename {docs => engine}/swarm/images/src/service-vip.svg (100%) rename {docs => engine}/swarm/images/src/services-diagram.svg (100%) rename {docs => engine}/swarm/images/src/simple-cluster.svg (100%) rename {docs => engine}/swarm/images/src/tls.svg (100%) rename {docs => engine}/swarm/images/swarm-diagram.png (100%) rename {docs => engine}/swarm/images/tls.png (100%) rename {docs => engine}/swarm/index.md (100%) rename {docs => engine}/swarm/ingress.md (100%) rename {docs => engine}/swarm/join-nodes.md (100%) rename {docs => engine}/swarm/key-concepts.md (100%) rename {docs => engine}/swarm/manage-nodes.md (100%) rename {docs => engine}/swarm/menu.md (100%) rename {docs => engine}/swarm/networking.md (100%) rename {docs => engine}/swarm/raft.md (100%) rename {docs => engine}/swarm/services.md (100%) rename {docs => engine}/swarm/swarm-mode.md (100%) rename {docs => engine}/swarm/swarm-tutorial/add-nodes.md (100%) rename {docs => engine}/swarm/swarm-tutorial/create-swarm.md (100%) rename {docs => engine}/swarm/swarm-tutorial/delete-service.md (100%) rename {docs => engine}/swarm/swarm-tutorial/deploy-service.md (100%) rename {docs => engine}/swarm/swarm-tutorial/drain-node.md (100%) rename {docs => engine}/swarm/swarm-tutorial/index.md (100%) rename {docs => engine}/swarm/swarm-tutorial/inspect-service.md (100%) rename {docs => engine}/swarm/swarm-tutorial/menu.md (100%) rename {docs => engine}/swarm/swarm-tutorial/rolling-update.md (100%) rename {docs => engine}/swarm/swarm-tutorial/scale-service.md (100%) rename {docs => engine}/touch-up.sh (100%) rename {docs => engine}/tutorials/dockerimages.md (100%) rename {docs => engine}/tutorials/dockerizing.md (100%) rename {docs => engine}/tutorials/dockerrepos.md (100%) rename {docs => engine}/tutorials/dockervolumes.md (100%) rename {docs => engine}/tutorials/index.md (100%) rename {docs => engine}/tutorials/menu.md (100%) rename {docs => engine}/tutorials/networkingcontainers.md (100%) rename {docs => engine}/tutorials/search.png (100%) rename {docs => engine}/tutorials/usingdocker.md (100%) rename {docs => engine}/tutorials/webapp1.png (100%) rename {docs => engine}/understanding-docker.md (100%) rename {docs => engine}/userguide/eng-image/baseimages.md (100%) rename {docs => engine}/userguide/eng-image/dockerfile_best-practices.md (100%) rename {docs => engine}/userguide/eng-image/image_management.md (100%) rename {docs => engine}/userguide/eng-image/index.md (100%) rename {docs => engine}/userguide/index.md (100%) rename {docs => engine}/userguide/intro.md (100%) rename {docs => engine}/userguide/labels-custom-metadata.md (100%) rename {docs => engine}/userguide/networking/configure-dns.md (100%) rename {docs => engine}/userguide/networking/default_network/binding.md (100%) rename {docs => engine}/userguide/networking/default_network/build-bridges.md (100%) rename {docs => engine}/userguide/networking/default_network/configure-dns.md (100%) rename {docs => engine}/userguide/networking/default_network/container-communication.md (100%) rename {docs => engine}/userguide/networking/default_network/custom-docker0.md (100%) rename {docs => engine}/userguide/networking/default_network/dockerlinks.md (100%) rename {docs => engine}/userguide/networking/default_network/images/ipv6_basic_host_config.gliffy (100%) rename {docs => engine}/userguide/networking/default_network/images/ipv6_basic_host_config.svg (100%) rename {docs => engine}/userguide/networking/default_network/images/ipv6_ndp_proxying.gliffy (100%) rename {docs => engine}/userguide/networking/default_network/images/ipv6_ndp_proxying.svg (100%) rename {docs => engine}/userguide/networking/default_network/images/ipv6_routed_network_example.gliffy (100%) rename {docs => engine}/userguide/networking/default_network/images/ipv6_routed_network_example.svg (100%) rename {docs => engine}/userguide/networking/default_network/images/ipv6_slash64_subnet_config.gliffy (100%) rename {docs => engine}/userguide/networking/default_network/images/ipv6_slash64_subnet_config.svg (100%) rename {docs => engine}/userguide/networking/default_network/images/ipv6_switched_network_example.gliffy (100%) rename {docs => engine}/userguide/networking/default_network/images/ipv6_switched_network_example.svg (100%) rename {docs => engine}/userguide/networking/default_network/index.md (100%) rename {docs => engine}/userguide/networking/default_network/ipv6.md (100%) rename {docs => engine}/userguide/networking/get-started-macvlan.md (100%) rename {docs => engine}/userguide/networking/get-started-overlay.md (100%) rename {docs => engine}/userguide/networking/images/bridge_network.gliffy (100%) rename {docs => engine}/userguide/networking/images/bridge_network.png (100%) rename {docs => engine}/userguide/networking/images/bridge_network.svg (100%) rename {docs => engine}/userguide/networking/images/engine_on_net.gliffy (100%) rename {docs => engine}/userguide/networking/images/engine_on_net.png (100%) rename {docs => engine}/userguide/networking/images/engine_on_net.svg (100%) rename {docs => engine}/userguide/networking/images/key_value.gliffy (100%) rename {docs => engine}/userguide/networking/images/key_value.png (100%) rename {docs => engine}/userguide/networking/images/key_value.svg (100%) rename {docs => engine}/userguide/networking/images/macvlan-bridge-ipvlan-l2.gliffy (100%) rename {docs => engine}/userguide/networking/images/macvlan-bridge-ipvlan-l2.png (100%) rename {docs => engine}/userguide/networking/images/macvlan-bridge-ipvlan-l2.svg (100%) rename {docs => engine}/userguide/networking/images/macvlan_bridge_simple.gliffy (100%) rename {docs => engine}/userguide/networking/images/macvlan_bridge_simple.png (100%) rename {docs => engine}/userguide/networking/images/macvlan_bridge_simple.svg (100%) rename {docs => engine}/userguide/networking/images/multi_tenant_8021q_vlans.gliffy (100%) rename {docs => engine}/userguide/networking/images/multi_tenant_8021q_vlans.png (100%) rename {docs => engine}/userguide/networking/images/multi_tenant_8021q_vlans.svg (100%) rename {docs => engine}/userguide/networking/images/network_access.gliffy (100%) rename {docs => engine}/userguide/networking/images/network_access.png (100%) rename {docs => engine}/userguide/networking/images/network_access.svg (100%) rename {docs => engine}/userguide/networking/images/overlay-network-final.gliffy (100%) rename {docs => engine}/userguide/networking/images/overlay-network-final.png (100%) rename {docs => engine}/userguide/networking/images/overlay-network-final.svg (100%) rename {docs => engine}/userguide/networking/images/overlay_network.gliffy (100%) rename {docs => engine}/userguide/networking/images/overlay_network.png (100%) rename {docs => engine}/userguide/networking/images/overlay_network.svg (100%) rename {docs => engine}/userguide/networking/images/working.gliffy (100%) rename {docs => engine}/userguide/networking/images/working.png (100%) rename {docs => engine}/userguide/networking/images/working.svg (100%) rename {docs => engine}/userguide/networking/index.md (100%) rename {docs => engine}/userguide/networking/menu.md (100%) rename {docs => engine}/userguide/networking/overlay-security-model.md (100%) rename {docs => engine}/userguide/networking/work-with-networks.md (100%) rename {docs => engine}/userguide/storagedriver/aufs-driver.md (100%) rename {docs => engine}/userguide/storagedriver/btrfs-driver.md (100%) rename {docs => engine}/userguide/storagedriver/device-mapper-driver.md (100%) rename {docs => engine}/userguide/storagedriver/images/aufs_delete.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/aufs_layers.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/aufs_metadata.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/base_device.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/btfs_constructs.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/btfs_container_layer.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/btfs_layers.png (100%) rename {docs => engine}/userguide/storagedriver/images/btfs_pool.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/btfs_snapshots.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/btfs_subvolume.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/container-layers-cas.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/container-layers.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/dm_container.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/driver-pros-cons.png (100%) rename {docs => engine}/userguide/storagedriver/images/image-layers.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/lsblk-diagram.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/overlay_constructs.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/overlay_constructs2.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/saving-space.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/shared-uuid.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/shared-volume.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/sharing-layers.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/two_dm_container.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/zfs_clones.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/zfs_zpool.jpg (100%) rename {docs => engine}/userguide/storagedriver/images/zpool_blocks.jpg (100%) rename {docs => engine}/userguide/storagedriver/imagesandcontainers.md (100%) rename {docs => engine}/userguide/storagedriver/index.md (100%) rename {docs => engine}/userguide/storagedriver/overlayfs-driver.md (100%) rename {docs => engine}/userguide/storagedriver/selectadriver.md (100%) rename {docs => engine}/userguide/storagedriver/zfs-driver.md (100%) delete mode 100644 errors/errors.go delete mode 100644 experimental/README.md delete mode 100644 experimental/docker-stacks-and-bundles.md delete mode 100644 experimental/images/ipvlan-l3.gliffy delete mode 100644 experimental/images/ipvlan-l3.png delete mode 100644 experimental/images/ipvlan-l3.svg delete mode 100644 experimental/images/ipvlan_l2_simple.gliffy delete mode 100644 experimental/images/ipvlan_l2_simple.png delete mode 100644 experimental/images/ipvlan_l2_simple.svg delete mode 100644 experimental/images/macvlan-bridge-ipvlan-l2.gliffy delete mode 100644 experimental/images/macvlan-bridge-ipvlan-l2.png delete mode 100644 experimental/images/macvlan-bridge-ipvlan-l2.svg delete mode 100644 experimental/images/multi_tenant_8021q_vlans.gliffy delete mode 100644 experimental/images/multi_tenant_8021q_vlans.png delete mode 100644 experimental/images/multi_tenant_8021q_vlans.svg delete mode 100644 experimental/images/vlans-deeper-look.gliffy delete mode 100644 experimental/images/vlans-deeper-look.png delete mode 100644 experimental/images/vlans-deeper-look.svg delete mode 100644 experimental/plugins_graphdriver.md delete mode 100644 experimental/vlan-networks.md delete mode 100755 hack/.vendor-helpers.sh delete mode 100644 hack/Jenkins/W2L/postbuild.sh delete mode 100644 hack/Jenkins/W2L/setup.sh delete mode 100644 hack/Jenkins/readme.md delete mode 100755 hack/dind delete mode 100755 hack/generate-authors.sh delete mode 100644 hack/install.sh delete mode 100755 hack/make.sh delete mode 100644 hack/make/.binary delete mode 100644 hack/make/.binary-setup delete mode 100644 hack/make/.build-deb/compat delete mode 100644 hack/make/.build-deb/control delete mode 100644 hack/make/.build-deb/docker-engine.bash-completion delete mode 120000 hack/make/.build-deb/docker-engine.docker.default delete mode 120000 hack/make/.build-deb/docker-engine.docker.init delete mode 120000 hack/make/.build-deb/docker-engine.docker.upstart delete mode 100644 hack/make/.build-deb/docker-engine.install delete mode 100644 hack/make/.build-deb/docker-engine.manpages delete mode 100644 hack/make/.build-deb/docker-engine.postinst delete mode 120000 hack/make/.build-deb/docker-engine.udev delete mode 100644 hack/make/.build-deb/docs delete mode 100755 hack/make/.build-deb/rules delete mode 100644 hack/make/.build-rpm/docker-engine-selinux.spec delete mode 100644 hack/make/.build-rpm/docker-engine.spec delete mode 100644 hack/make/.detect-daemon-osarch delete mode 100644 hack/make/.ensure-emptyfs delete mode 100644 hack/make/.ensure-frozen-images delete mode 100644 hack/make/.ensure-frozen-images-windows delete mode 100644 hack/make/.ensure-httpserver delete mode 100644 hack/make/.ensure-nnp-test delete mode 100644 hack/make/.ensure-syscall-test delete mode 100644 hack/make/.go-autogen delete mode 100644 hack/make/.integration-daemon-setup delete mode 100644 hack/make/.integration-daemon-start delete mode 100644 hack/make/.integration-daemon-stop delete mode 100644 hack/make/.resources-windows/common.rc delete mode 100644 hack/make/.resources-windows/docker.exe.manifest delete mode 100644 hack/make/.resources-windows/docker.ico delete mode 100644 hack/make/.resources-windows/docker.png delete mode 100644 hack/make/.resources-windows/docker.rc delete mode 100644 hack/make/.resources-windows/dockerd.rc delete mode 100644 hack/make/.resources-windows/event_messages.mc delete mode 100644 hack/make/.resources-windows/resources.go delete mode 100644 hack/make/.validate delete mode 100644 hack/make/README.md delete mode 100644 hack/make/binary delete mode 100644 hack/make/binary-client delete mode 100644 hack/make/binary-daemon delete mode 100644 hack/make/build-deb delete mode 100644 hack/make/build-rpm delete mode 100755 hack/make/clean-apt-repo delete mode 100755 hack/make/clean-yum-repo delete mode 100644 hack/make/cover delete mode 100644 hack/make/cross delete mode 100644 hack/make/dynbinary delete mode 100644 hack/make/dynbinary-client delete mode 100644 hack/make/dynbinary-daemon delete mode 100644 hack/make/dyngccgo delete mode 100644 hack/make/gccgo delete mode 100755 hack/make/generate-index-listing delete mode 100644 hack/make/install-binary delete mode 100644 hack/make/install-binary-client delete mode 100644 hack/make/install-binary-daemon delete mode 100644 hack/make/install-script delete mode 100755 hack/make/release-deb delete mode 100755 hack/make/release-rpm delete mode 100755 hack/make/sign-repos delete mode 100755 hack/make/test-deb-install delete mode 100644 hack/make/test-docker-py delete mode 100755 hack/make/test-install-script delete mode 100755 hack/make/test-integration-cli delete mode 100755 hack/make/test-old-apt-repo delete mode 100644 hack/make/test-unit delete mode 100644 hack/make/tgz delete mode 100644 hack/make/ubuntu delete mode 100755 hack/make/update-apt-repo delete mode 100644 hack/make/validate-dco delete mode 100644 hack/make/validate-default-seccomp delete mode 100644 hack/make/validate-gofmt delete mode 100644 hack/make/validate-lint delete mode 100644 hack/make/validate-pkg delete mode 100644 hack/make/validate-test delete mode 100644 hack/make/validate-toml delete mode 100644 hack/make/validate-vendor delete mode 100644 hack/make/validate-vet delete mode 100644 hack/make/win delete mode 100755 hack/release.sh delete mode 100755 hack/vendor.sh delete mode 100644 image/fs.go delete mode 100644 image/fs_test.go delete mode 100644 image/image.go delete mode 100644 image/image_test.go delete mode 100644 image/rootfs.go delete mode 100644 image/rootfs_unix.go delete mode 100644 image/rootfs_windows.go delete mode 100644 image/spec/v1.1.md delete mode 100644 image/spec/v1.2.md delete mode 100644 image/spec/v1.md delete mode 100644 image/store.go delete mode 100644 image/store_test.go delete mode 100644 image/tarexport/load.go delete mode 100644 image/tarexport/save.go delete mode 100644 image/tarexport/tarexport.go delete mode 100644 image/v1/imagev1.go delete mode 100644 image/v1/imagev1_test.go delete mode 100644 integration-cli/benchmark_test.go delete mode 100644 integration-cli/check_test.go delete mode 100644 integration-cli/daemon.go delete mode 100644 integration-cli/daemon_swarm.go delete mode 100644 integration-cli/daemon_swarm_hack.go delete mode 100644 integration-cli/docker_api_attach_test.go delete mode 100644 integration-cli/docker_api_auth_test.go delete mode 100644 integration-cli/docker_api_build_test.go delete mode 100644 integration-cli/docker_api_containers_test.go delete mode 100644 integration-cli/docker_api_create_test.go delete mode 100644 integration-cli/docker_api_events_test.go delete mode 100644 integration-cli/docker_api_exec_resize_test.go delete mode 100644 integration-cli/docker_api_exec_test.go delete mode 100644 integration-cli/docker_api_images_test.go delete mode 100644 integration-cli/docker_api_info_test.go delete mode 100644 integration-cli/docker_api_inspect_test.go delete mode 100644 integration-cli/docker_api_inspect_unix_test.go delete mode 100644 integration-cli/docker_api_logs_test.go delete mode 100644 integration-cli/docker_api_network_test.go delete mode 100644 integration-cli/docker_api_resize_test.go delete mode 100644 integration-cli/docker_api_service_update_test.go delete mode 100644 integration-cli/docker_api_stats_test.go delete mode 100644 integration-cli/docker_api_stats_unix_test.go delete mode 100644 integration-cli/docker_api_swarm_test.go delete mode 100644 integration-cli/docker_api_test.go delete mode 100644 integration-cli/docker_api_update_unix_test.go delete mode 100644 integration-cli/docker_api_version_test.go delete mode 100644 integration-cli/docker_api_volumes_test.go delete mode 100644 integration-cli/docker_cli_attach_test.go delete mode 100644 integration-cli/docker_cli_attach_unix_test.go delete mode 100644 integration-cli/docker_cli_authz_unix_test.go delete mode 100644 integration-cli/docker_cli_build_test.go delete mode 100644 integration-cli/docker_cli_build_unix_test.go delete mode 100644 integration-cli/docker_cli_by_digest_test.go delete mode 100644 integration-cli/docker_cli_commit_test.go delete mode 100644 integration-cli/docker_cli_config_test.go delete mode 100644 integration-cli/docker_cli_cp_from_container_test.go delete mode 100644 integration-cli/docker_cli_cp_test.go delete mode 100644 integration-cli/docker_cli_cp_to_container_test.go delete mode 100644 integration-cli/docker_cli_cp_to_container_unix_test.go delete mode 100644 integration-cli/docker_cli_cp_utils.go delete mode 100644 integration-cli/docker_cli_create_test.go delete mode 100644 integration-cli/docker_cli_daemon_experimental_test.go delete mode 100644 integration-cli/docker_cli_daemon_test.go delete mode 100644 integration-cli/docker_cli_diff_test.go delete mode 100644 integration-cli/docker_cli_events_test.go delete mode 100644 integration-cli/docker_cli_events_unix_test.go delete mode 100644 integration-cli/docker_cli_exec_test.go delete mode 100644 integration-cli/docker_cli_exec_unix_test.go delete mode 100644 integration-cli/docker_cli_experimental_test.go delete mode 100644 integration-cli/docker_cli_export_import_test.go delete mode 100644 integration-cli/docker_cli_external_graphdriver_unix_test.go delete mode 100644 integration-cli/docker_cli_external_volume_driver_unix_test.go delete mode 100644 integration-cli/docker_cli_health_test.go delete mode 100644 integration-cli/docker_cli_help_test.go delete mode 100644 integration-cli/docker_cli_history_test.go delete mode 100644 integration-cli/docker_cli_images_test.go delete mode 100644 integration-cli/docker_cli_import_test.go delete mode 100644 integration-cli/docker_cli_info_test.go delete mode 100644 integration-cli/docker_cli_info_unix_test.go delete mode 100644 integration-cli/docker_cli_inspect_experimental_test.go delete mode 100644 integration-cli/docker_cli_inspect_test.go delete mode 100644 integration-cli/docker_cli_kill_test.go delete mode 100644 integration-cli/docker_cli_links_test.go delete mode 100644 integration-cli/docker_cli_links_unix_test.go delete mode 100644 integration-cli/docker_cli_login_test.go delete mode 100644 integration-cli/docker_cli_logout_test.go delete mode 100644 integration-cli/docker_cli_logs_bench_test.go delete mode 100644 integration-cli/docker_cli_logs_test.go delete mode 100644 integration-cli/docker_cli_nat_test.go delete mode 100644 integration-cli/docker_cli_netmode_test.go delete mode 100644 integration-cli/docker_cli_network_unix_test.go delete mode 100644 integration-cli/docker_cli_oom_killed_test.go delete mode 100644 integration-cli/docker_cli_pause_test.go delete mode 100644 integration-cli/docker_cli_plugins_test.go delete mode 100644 integration-cli/docker_cli_port_test.go delete mode 100644 integration-cli/docker_cli_proxy_test.go delete mode 100644 integration-cli/docker_cli_ps_test.go delete mode 100644 integration-cli/docker_cli_pull_local_test.go delete mode 100644 integration-cli/docker_cli_pull_test.go delete mode 100644 integration-cli/docker_cli_pull_trusted_test.go delete mode 100644 integration-cli/docker_cli_push_test.go delete mode 100644 integration-cli/docker_cli_registry_user_agent_test.go delete mode 100644 integration-cli/docker_cli_rename_test.go delete mode 100644 integration-cli/docker_cli_restart_test.go delete mode 100644 integration-cli/docker_cli_rm_test.go delete mode 100644 integration-cli/docker_cli_rmi_test.go delete mode 100644 integration-cli/docker_cli_run_test.go delete mode 100644 integration-cli/docker_cli_run_unix_test.go delete mode 100644 integration-cli/docker_cli_save_load_test.go delete mode 100644 integration-cli/docker_cli_save_load_unix_test.go delete mode 100644 integration-cli/docker_cli_search_test.go delete mode 100644 integration-cli/docker_cli_service_create_hack_test.go delete mode 100644 integration-cli/docker_cli_service_health_test.go delete mode 100644 integration-cli/docker_cli_service_update_test.go delete mode 100644 integration-cli/docker_cli_sni_test.go delete mode 100644 integration-cli/docker_cli_stack_test.go delete mode 100644 integration-cli/docker_cli_start_test.go delete mode 100644 integration-cli/docker_cli_stats_test.go delete mode 100644 integration-cli/docker_cli_stop_test.go delete mode 100644 integration-cli/docker_cli_swarm_test.go delete mode 100644 integration-cli/docker_cli_tag_test.go delete mode 100644 integration-cli/docker_cli_top_test.go delete mode 100644 integration-cli/docker_cli_update_test.go delete mode 100644 integration-cli/docker_cli_update_unix_test.go delete mode 100644 integration-cli/docker_cli_userns_test.go delete mode 100644 integration-cli/docker_cli_v2_only_test.go delete mode 100644 integration-cli/docker_cli_version_test.go delete mode 100644 integration-cli/docker_cli_volume_test.go delete mode 100644 integration-cli/docker_cli_wait_test.go delete mode 100644 integration-cli/docker_deprecated_api_v124_test.go delete mode 100644 integration-cli/docker_deprecated_api_v124_unix_test.go delete mode 100644 integration-cli/docker_experimental_network_test.go delete mode 100644 integration-cli/docker_hub_pull_suite_test.go delete mode 100644 integration-cli/docker_test_vars.go delete mode 100644 integration-cli/docker_utils.go delete mode 100644 integration-cli/events_utils.go delete mode 100755 integration-cli/fixtures/auth/docker-credential-shell-test delete mode 100644 integration-cli/fixtures/https/ca.pem delete mode 100644 integration-cli/fixtures/https/client-cert.pem delete mode 100644 integration-cli/fixtures/https/client-key.pem delete mode 100644 integration-cli/fixtures/https/client-rogue-cert.pem delete mode 100644 integration-cli/fixtures/https/client-rogue-key.pem delete mode 100644 integration-cli/fixtures/https/server-cert.pem delete mode 100644 integration-cli/fixtures/https/server-key.pem delete mode 100644 integration-cli/fixtures/https/server-rogue-cert.pem delete mode 100644 integration-cli/fixtures/https/server-rogue-key.pem delete mode 100644 integration-cli/fixtures/load/emptyLayer.tar delete mode 100644 integration-cli/fixtures/notary/delgkey1.crt delete mode 100644 integration-cli/fixtures/notary/delgkey1.key delete mode 100644 integration-cli/fixtures/notary/delgkey2.crt delete mode 100644 integration-cli/fixtures/notary/delgkey2.key delete mode 100644 integration-cli/fixtures/notary/delgkey3.crt delete mode 100644 integration-cli/fixtures/notary/delgkey3.key delete mode 100644 integration-cli/fixtures/notary/delgkey4.crt delete mode 100644 integration-cli/fixtures/notary/delgkey4.key delete mode 100644 integration-cli/fixtures/notary/localhost.cert delete mode 100644 integration-cli/fixtures/notary/localhost.key delete mode 100644 integration-cli/fixtures/registry/cert.pem delete mode 100644 integration-cli/npipe.go delete mode 100644 integration-cli/npipe_windows.go delete mode 100644 integration-cli/registry.go delete mode 100644 integration-cli/registry_mock.go delete mode 100644 integration-cli/requirements.go delete mode 100644 integration-cli/requirements_unix.go delete mode 100644 integration-cli/test_vars_exec.go delete mode 100644 integration-cli/test_vars_noexec.go delete mode 100644 integration-cli/test_vars_noseccomp.go delete mode 100644 integration-cli/test_vars_seccomp.go delete mode 100644 integration-cli/test_vars_unix.go delete mode 100644 integration-cli/test_vars_windows.go delete mode 100644 integration-cli/trust_server.go delete mode 100644 integration-cli/utils.go delete mode 100644 layer/empty.go delete mode 100644 layer/empty_test.go delete mode 100644 layer/filestore.go delete mode 100644 layer/filestore_test.go delete mode 100644 layer/layer.go delete mode 100644 layer/layer_store.go delete mode 100644 layer/layer_store_windows.go delete mode 100644 layer/layer_test.go delete mode 100644 layer/layer_unix.go delete mode 100644 layer/layer_unix_test.go delete mode 100644 layer/layer_windows.go delete mode 100644 layer/migration.go delete mode 100644 layer/migration_test.go delete mode 100644 layer/mount_test.go delete mode 100644 layer/mounted_layer.go delete mode 100644 layer/ro_layer.go delete mode 100644 layer/ro_layer_windows.go delete mode 100644 libcontainerd/client.go delete mode 100644 libcontainerd/client_linux.go delete mode 100644 libcontainerd/client_solaris.go delete mode 100644 libcontainerd/client_windows.go delete mode 100644 libcontainerd/container.go delete mode 100644 libcontainerd/container_linux.go delete mode 100644 libcontainerd/container_solaris.go delete mode 100644 libcontainerd/container_windows.go delete mode 100644 libcontainerd/pausemonitor_linux.go delete mode 100644 libcontainerd/process.go delete mode 100644 libcontainerd/process_linux.go delete mode 100644 libcontainerd/process_solaris.go delete mode 100644 libcontainerd/process_windows.go delete mode 100644 libcontainerd/queue_linux.go delete mode 100644 libcontainerd/remote.go delete mode 100644 libcontainerd/remote_linux.go delete mode 100644 libcontainerd/remote_solaris.go delete mode 100644 libcontainerd/remote_windows.go delete mode 100644 libcontainerd/types.go delete mode 100644 libcontainerd/types_linux.go delete mode 100644 libcontainerd/types_solaris.go delete mode 100644 libcontainerd/types_windows.go delete mode 100644 libcontainerd/utils_linux.go delete mode 100644 libcontainerd/utils_windows.go delete mode 100644 libcontainerd/windowsoci/oci_windows.go delete mode 100644 libcontainerd/windowsoci/unsupported.go delete mode 100644 man/Dockerfile delete mode 100644 man/Dockerfile.5.md delete mode 100644 man/Dockerfile.armhf delete mode 100644 man/README.md delete mode 100644 man/docker-attach.1.md delete mode 100644 man/docker-build.1.md delete mode 100644 man/docker-commit.1.md delete mode 100644 man/docker-config-json.5.md delete mode 100644 man/docker-cp.1.md delete mode 100644 man/docker-create.1.md delete mode 100644 man/docker-diff.1.md delete mode 100644 man/docker-events.1.md delete mode 100644 man/docker-exec.1.md delete mode 100644 man/docker-export.1.md delete mode 100644 man/docker-history.1.md delete mode 100644 man/docker-images.1.md delete mode 100644 man/docker-import.1.md delete mode 100644 man/docker-info.1.md delete mode 100644 man/docker-inspect.1.md delete mode 100644 man/docker-kill.1.md delete mode 100644 man/docker-load.1.md delete mode 100644 man/docker-login.1.md delete mode 100644 man/docker-logout.1.md delete mode 100644 man/docker-logs.1.md delete mode 100644 man/docker-network-connect.1.md delete mode 100644 man/docker-network-create.1.md delete mode 100644 man/docker-network-disconnect.1.md delete mode 100644 man/docker-network-inspect.1.md delete mode 100644 man/docker-network-ls.1.md delete mode 100644 man/docker-network-rm.1.md delete mode 100644 man/docker-pause.1.md delete mode 100644 man/docker-port.1.md delete mode 100644 man/docker-ps.1.md delete mode 100644 man/docker-pull.1.md delete mode 100644 man/docker-push.1.md delete mode 100644 man/docker-rename.1.md delete mode 100644 man/docker-restart.1.md delete mode 100644 man/docker-rm.1.md delete mode 100644 man/docker-rmi.1.md delete mode 100644 man/docker-run.1.md delete mode 100644 man/docker-save.1.md delete mode 100644 man/docker-search.1.md delete mode 100644 man/docker-start.1.md delete mode 100644 man/docker-stats.1.md delete mode 100644 man/docker-stop.1.md delete mode 100644 man/docker-tag.1.md delete mode 100644 man/docker-top.1.md delete mode 100644 man/docker-unpause.1.md delete mode 100644 man/docker-update.1.md delete mode 100644 man/docker-version.1.md delete mode 100644 man/docker-wait.1.md delete mode 100644 man/docker.1.md delete mode 100644 man/dockerd.8.md delete mode 100644 man/generate.go delete mode 100755 man/generate.sh delete mode 100644 man/glide.lock delete mode 100644 man/glide.yaml delete mode 100755 man/md2man-all.sh delete mode 100644 migrate/v1/migratev1.go delete mode 100644 migrate/v1/migratev1_test.go delete mode 100644 oci/defaults_linux.go delete mode 100644 oci/defaults_solaris.go delete mode 100644 oci/defaults_windows.go delete mode 100644 opts/hosts.go delete mode 100644 opts/hosts_test.go delete mode 100644 opts/hosts_unix.go delete mode 100644 opts/hosts_windows.go delete mode 100644 opts/ip.go delete mode 100644 opts/ip_test.go delete mode 100644 opts/opts.go delete mode 100644 opts/opts_test.go delete mode 100644 opts/opts_unix.go delete mode 100644 opts/opts_windows.go delete mode 100644 pkg/README.md delete mode 100644 pkg/aaparser/aaparser.go delete mode 100644 pkg/aaparser/aaparser_test.go delete mode 100644 pkg/archive/README.md delete mode 100644 pkg/archive/archive.go delete mode 100644 pkg/archive/archive_linux.go delete mode 100644 pkg/archive/archive_other.go delete mode 100644 pkg/archive/archive_test.go delete mode 100644 pkg/archive/archive_unix.go delete mode 100644 pkg/archive/archive_unix_test.go delete mode 100644 pkg/archive/archive_windows.go delete mode 100644 pkg/archive/archive_windows_test.go delete mode 100644 pkg/archive/changes.go delete mode 100644 pkg/archive/changes_linux.go delete mode 100644 pkg/archive/changes_other.go delete mode 100644 pkg/archive/changes_posix_test.go delete mode 100644 pkg/archive/changes_test.go delete mode 100644 pkg/archive/changes_unix.go delete mode 100644 pkg/archive/changes_windows.go delete mode 100644 pkg/archive/copy.go delete mode 100644 pkg/archive/copy_unix.go delete mode 100644 pkg/archive/copy_unix_test.go delete mode 100644 pkg/archive/copy_windows.go delete mode 100644 pkg/archive/diff.go delete mode 100644 pkg/archive/diff_test.go delete mode 100644 pkg/archive/example_changes.go delete mode 100644 pkg/archive/testdata/broken.tar delete mode 100644 pkg/archive/time_linux.go delete mode 100644 pkg/archive/time_unsupported.go delete mode 100644 pkg/archive/utils_test.go delete mode 100644 pkg/archive/whiteouts.go delete mode 100644 pkg/archive/wrap.go delete mode 100644 pkg/archive/wrap_test.go delete mode 100644 pkg/authorization/api.go delete mode 100644 pkg/authorization/authz.go delete mode 100644 pkg/authorization/authz_unix_test.go delete mode 100644 pkg/authorization/middleware.go delete mode 100644 pkg/authorization/plugin.go delete mode 100644 pkg/authorization/response.go delete mode 100644 pkg/broadcaster/unbuffered.go delete mode 100644 pkg/broadcaster/unbuffered_test.go delete mode 100644 pkg/chrootarchive/archive.go delete mode 100644 pkg/chrootarchive/archive_test.go delete mode 100644 pkg/chrootarchive/archive_unix.go delete mode 100644 pkg/chrootarchive/archive_windows.go delete mode 100644 pkg/chrootarchive/chroot_linux.go delete mode 100644 pkg/chrootarchive/chroot_unix.go delete mode 100644 pkg/chrootarchive/diff.go delete mode 100644 pkg/chrootarchive/diff_unix.go delete mode 100644 pkg/chrootarchive/diff_windows.go delete mode 100644 pkg/chrootarchive/init_unix.go delete mode 100644 pkg/chrootarchive/init_windows.go delete mode 100644 pkg/devicemapper/devmapper.go delete mode 100644 pkg/devicemapper/devmapper_log.go delete mode 100644 pkg/devicemapper/devmapper_wrapper.go delete mode 100644 pkg/devicemapper/devmapper_wrapper_deferred_remove.go delete mode 100644 pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go delete mode 100644 pkg/devicemapper/ioctl.go delete mode 100644 pkg/devicemapper/log.go delete mode 100644 pkg/directory/directory.go delete mode 100644 pkg/directory/directory_test.go delete mode 100644 pkg/directory/directory_unix.go delete mode 100644 pkg/directory/directory_windows.go delete mode 100644 pkg/discovery/README.md delete mode 100644 pkg/discovery/backends.go delete mode 100644 pkg/discovery/discovery.go delete mode 100644 pkg/discovery/discovery_test.go delete mode 100644 pkg/discovery/entry.go delete mode 100644 pkg/discovery/file/file.go delete mode 100644 pkg/discovery/file/file_test.go delete mode 100644 pkg/discovery/generator.go delete mode 100644 pkg/discovery/generator_test.go delete mode 100644 pkg/discovery/kv/kv.go delete mode 100644 pkg/discovery/kv/kv_test.go delete mode 100644 pkg/discovery/memory/memory.go delete mode 100644 pkg/discovery/memory/memory_test.go delete mode 100644 pkg/discovery/nodes/nodes.go delete mode 100644 pkg/discovery/nodes/nodes_test.go delete mode 100644 pkg/filenotify/filenotify.go delete mode 100644 pkg/filenotify/fsnotify.go delete mode 100644 pkg/filenotify/poller.go delete mode 100644 pkg/filenotify/poller_test.go delete mode 100644 pkg/fileutils/fileutils.go delete mode 100644 pkg/fileutils/fileutils_solaris.go delete mode 100644 pkg/fileutils/fileutils_test.go delete mode 100644 pkg/fileutils/fileutils_unix.go delete mode 100644 pkg/fileutils/fileutils_windows.go delete mode 100644 pkg/gitutils/gitutils.go delete mode 100644 pkg/gitutils/gitutils_test.go delete mode 100644 pkg/graphdb/conn_sqlite3.go delete mode 100644 pkg/graphdb/conn_sqlite3_unix.go delete mode 100644 pkg/graphdb/conn_sqlite3_windows.go delete mode 100644 pkg/graphdb/conn_unsupported.go delete mode 100644 pkg/graphdb/graphdb.go delete mode 100644 pkg/graphdb/graphdb_test.go delete mode 100644 pkg/graphdb/sort.go delete mode 100644 pkg/graphdb/sort_test.go delete mode 100644 pkg/graphdb/utils.go delete mode 100644 pkg/homedir/homedir.go delete mode 100644 pkg/homedir/homedir_test.go delete mode 100644 pkg/httputils/httputils.go delete mode 100644 pkg/httputils/httputils_test.go delete mode 100644 pkg/httputils/mimetype.go delete mode 100644 pkg/httputils/mimetype_test.go delete mode 100644 pkg/httputils/resumablerequestreader.go delete mode 100644 pkg/httputils/resumablerequestreader_test.go delete mode 100644 pkg/idtools/idtools.go delete mode 100644 pkg/idtools/idtools_unix.go delete mode 100644 pkg/idtools/idtools_unix_test.go delete mode 100644 pkg/idtools/idtools_windows.go delete mode 100644 pkg/idtools/usergroupadd_linux.go delete mode 100644 pkg/idtools/usergroupadd_unsupported.go delete mode 100644 pkg/integration/checker/checker.go delete mode 100644 pkg/integration/dockerCmd_utils.go delete mode 100644 pkg/integration/dockerCmd_utils_test.go delete mode 100644 pkg/integration/utils.go delete mode 100644 pkg/integration/utils_test.go delete mode 100644 pkg/ioutils/buffer.go delete mode 100644 pkg/ioutils/buffer_test.go delete mode 100644 pkg/ioutils/bytespipe.go delete mode 100644 pkg/ioutils/bytespipe_test.go delete mode 100644 pkg/ioutils/fmt.go delete mode 100644 pkg/ioutils/fmt_test.go delete mode 100644 pkg/ioutils/fswriters.go delete mode 100644 pkg/ioutils/fswriters_test.go delete mode 100644 pkg/ioutils/multireader.go delete mode 100644 pkg/ioutils/multireader_test.go delete mode 100644 pkg/ioutils/readers.go delete mode 100644 pkg/ioutils/readers_test.go delete mode 100644 pkg/ioutils/temp_unix.go delete mode 100644 pkg/ioutils/temp_windows.go delete mode 100644 pkg/ioutils/writeflusher.go delete mode 100644 pkg/ioutils/writers.go delete mode 100644 pkg/ioutils/writers_test.go delete mode 100644 pkg/jsonlog/jsonlog.go delete mode 100644 pkg/jsonlog/jsonlog_marshalling.go delete mode 100644 pkg/jsonlog/jsonlog_marshalling_test.go delete mode 100644 pkg/jsonlog/jsonlogbytes.go delete mode 100644 pkg/jsonlog/jsonlogbytes_test.go delete mode 100644 pkg/jsonlog/time_marshalling.go delete mode 100644 pkg/jsonlog/time_marshalling_test.go delete mode 100644 pkg/jsonmessage/jsonmessage.go delete mode 100644 pkg/jsonmessage/jsonmessage_test.go delete mode 100644 pkg/listeners/listeners_solaris.go delete mode 100644 pkg/listeners/listeners_unix.go delete mode 100644 pkg/listeners/listeners_windows.go delete mode 100644 pkg/locker/README.md delete mode 100644 pkg/locker/locker.go delete mode 100644 pkg/locker/locker_test.go delete mode 100644 pkg/longpath/longpath.go delete mode 100644 pkg/longpath/longpath_test.go delete mode 100644 pkg/loopback/attach_loopback.go delete mode 100644 pkg/loopback/ioctl.go delete mode 100644 pkg/loopback/loop_wrapper.go delete mode 100644 pkg/loopback/loopback.go delete mode 100644 pkg/mflag/LICENSE delete mode 100644 pkg/mflag/README.md delete mode 100644 pkg/mflag/example/example.go delete mode 100644 pkg/mflag/flag.go delete mode 100644 pkg/mflag/flag_test.go delete mode 100644 pkg/mount/flags.go delete mode 100644 pkg/mount/flags_freebsd.go delete mode 100644 pkg/mount/flags_linux.go delete mode 100644 pkg/mount/flags_unsupported.go delete mode 100644 pkg/mount/mount.go delete mode 100644 pkg/mount/mount_unix_test.go delete mode 100644 pkg/mount/mounter_freebsd.go delete mode 100644 pkg/mount/mounter_linux.go delete mode 100644 pkg/mount/mounter_solaris.go delete mode 100644 pkg/mount/mounter_unsupported.go delete mode 100644 pkg/mount/mountinfo.go delete mode 100644 pkg/mount/mountinfo_freebsd.go delete mode 100644 pkg/mount/mountinfo_linux.go delete mode 100644 pkg/mount/mountinfo_linux_test.go delete mode 100644 pkg/mount/mountinfo_solaris.go delete mode 100644 pkg/mount/mountinfo_unsupported.go delete mode 100644 pkg/mount/mountinfo_windows.go delete mode 100644 pkg/mount/sharedsubtree_linux.go delete mode 100644 pkg/mount/sharedsubtree_linux_test.go delete mode 100644 pkg/namesgenerator/cmd/names-generator/main.go delete mode 100644 pkg/namesgenerator/names-generator.go delete mode 100644 pkg/namesgenerator/names-generator_test.go delete mode 100644 pkg/parsers/kernel/kernel.go delete mode 100644 pkg/parsers/kernel/kernel_darwin.go delete mode 100644 pkg/parsers/kernel/kernel_unix.go delete mode 100644 pkg/parsers/kernel/kernel_unix_test.go delete mode 100644 pkg/parsers/kernel/kernel_windows.go delete mode 100644 pkg/parsers/kernel/uname_linux.go delete mode 100644 pkg/parsers/kernel/uname_solaris.go delete mode 100644 pkg/parsers/kernel/uname_unsupported.go delete mode 100644 pkg/parsers/operatingsystem/operatingsystem_linux.go delete mode 100644 pkg/parsers/operatingsystem/operatingsystem_solaris.go delete mode 100644 pkg/parsers/operatingsystem/operatingsystem_unix.go delete mode 100644 pkg/parsers/operatingsystem/operatingsystem_unix_test.go delete mode 100644 pkg/parsers/operatingsystem/operatingsystem_windows.go delete mode 100644 pkg/parsers/parsers.go delete mode 100644 pkg/parsers/parsers_test.go delete mode 100644 pkg/pidfile/pidfile.go delete mode 100644 pkg/pidfile/pidfile_test.go delete mode 100644 pkg/pidfile/pidfile_unix.go delete mode 100644 pkg/pidfile/pidfile_windows.go delete mode 100644 pkg/platform/architecture_linux.go delete mode 100644 pkg/platform/architecture_unix.go delete mode 100644 pkg/platform/architecture_windows.go delete mode 100644 pkg/platform/platform.go delete mode 100644 pkg/platform/utsname_int8.go delete mode 100644 pkg/platform/utsname_uint8.go delete mode 100644 pkg/plugins/client.go delete mode 100644 pkg/plugins/client_test.go delete mode 100644 pkg/plugins/discovery.go delete mode 100644 pkg/plugins/discovery_test.go delete mode 100644 pkg/plugins/discovery_unix_test.go delete mode 100644 pkg/plugins/errors.go delete mode 100644 pkg/plugins/pluginrpc-gen/README.md delete mode 100644 pkg/plugins/pluginrpc-gen/fixtures/foo.go delete mode 100644 pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go delete mode 100644 pkg/plugins/pluginrpc-gen/main.go delete mode 100644 pkg/plugins/pluginrpc-gen/parser.go delete mode 100644 pkg/plugins/pluginrpc-gen/parser_test.go delete mode 100644 pkg/plugins/pluginrpc-gen/template.go delete mode 100644 pkg/plugins/plugins.go delete mode 100644 pkg/plugins/transport/http.go delete mode 100644 pkg/plugins/transport/transport.go delete mode 100644 pkg/pools/pools.go delete mode 100644 pkg/pools/pools_test.go delete mode 100644 pkg/progress/progress.go delete mode 100644 pkg/progress/progressreader.go delete mode 100644 pkg/progress/progressreader_test.go delete mode 100644 pkg/promise/promise.go delete mode 100644 pkg/pubsub/publisher.go delete mode 100644 pkg/pubsub/publisher_test.go delete mode 100644 pkg/random/random.go delete mode 100644 pkg/random/random_test.go delete mode 100644 pkg/reexec/README.md delete mode 100644 pkg/reexec/command_linux.go delete mode 100644 pkg/reexec/command_unix.go delete mode 100644 pkg/reexec/command_unsupported.go delete mode 100644 pkg/reexec/command_windows.go delete mode 100644 pkg/reexec/reexec.go delete mode 100644 pkg/registrar/registrar.go delete mode 100644 pkg/registrar/registrar_test.go delete mode 100644 pkg/signal/README.md delete mode 100644 pkg/signal/signal.go delete mode 100644 pkg/signal/signal_darwin.go delete mode 100644 pkg/signal/signal_freebsd.go delete mode 100644 pkg/signal/signal_linux.go delete mode 100644 pkg/signal/signal_solaris.go delete mode 100644 pkg/signal/signal_unix.go delete mode 100644 pkg/signal/signal_unsupported.go delete mode 100644 pkg/signal/signal_windows.go delete mode 100644 pkg/signal/trap.go delete mode 100644 pkg/stdcopy/stdcopy.go delete mode 100644 pkg/stdcopy/stdcopy_test.go delete mode 100644 pkg/streamformatter/streamformatter.go delete mode 100644 pkg/streamformatter/streamformatter_test.go delete mode 100644 pkg/stringid/README.md delete mode 100644 pkg/stringid/stringid.go delete mode 100644 pkg/stringid/stringid_test.go delete mode 100644 pkg/stringutils/README.md delete mode 100644 pkg/stringutils/stringutils.go delete mode 100644 pkg/stringutils/stringutils_test.go delete mode 100644 pkg/symlink/LICENSE.APACHE delete mode 100644 pkg/symlink/LICENSE.BSD delete mode 100644 pkg/symlink/README.md delete mode 100644 pkg/symlink/fs.go delete mode 100644 pkg/symlink/fs_unix.go delete mode 100644 pkg/symlink/fs_unix_test.go delete mode 100644 pkg/symlink/fs_windows.go delete mode 100644 pkg/sysinfo/README.md delete mode 100644 pkg/sysinfo/sysinfo.go delete mode 100644 pkg/sysinfo/sysinfo_freebsd.go delete mode 100644 pkg/sysinfo/sysinfo_linux.go delete mode 100644 pkg/sysinfo/sysinfo_linux_test.go delete mode 100644 pkg/sysinfo/sysinfo_solaris.go delete mode 100644 pkg/sysinfo/sysinfo_test.go delete mode 100644 pkg/sysinfo/sysinfo_windows.go delete mode 100644 pkg/system/chtimes.go delete mode 100644 pkg/system/chtimes_test.go delete mode 100644 pkg/system/chtimes_unix.go delete mode 100644 pkg/system/chtimes_unix_test.go delete mode 100644 pkg/system/chtimes_windows.go delete mode 100644 pkg/system/chtimes_windows_test.go delete mode 100644 pkg/system/errors.go delete mode 100644 pkg/system/events_windows.go delete mode 100644 pkg/system/filesys.go delete mode 100644 pkg/system/filesys_windows.go delete mode 100644 pkg/system/lstat.go delete mode 100644 pkg/system/lstat_unix_test.go delete mode 100644 pkg/system/lstat_windows.go delete mode 100644 pkg/system/meminfo.go delete mode 100644 pkg/system/meminfo_linux.go delete mode 100644 pkg/system/meminfo_solaris.go delete mode 100644 pkg/system/meminfo_unix_test.go delete mode 100644 pkg/system/meminfo_unsupported.go delete mode 100644 pkg/system/meminfo_windows.go delete mode 100644 pkg/system/mknod.go delete mode 100644 pkg/system/mknod_windows.go delete mode 100644 pkg/system/path_unix.go delete mode 100644 pkg/system/path_windows.go delete mode 100644 pkg/system/path_windows_test.go delete mode 100644 pkg/system/stat.go delete mode 100644 pkg/system/stat_freebsd.go delete mode 100644 pkg/system/stat_linux.go delete mode 100644 pkg/system/stat_openbsd.go delete mode 100644 pkg/system/stat_solaris.go delete mode 100644 pkg/system/stat_unix_test.go delete mode 100644 pkg/system/stat_unsupported.go delete mode 100644 pkg/system/stat_windows.go delete mode 100644 pkg/system/syscall_unix.go delete mode 100644 pkg/system/syscall_windows.go delete mode 100644 pkg/system/syscall_windows_test.go delete mode 100644 pkg/system/umask.go delete mode 100644 pkg/system/umask_windows.go delete mode 100644 pkg/system/utimes_darwin.go delete mode 100644 pkg/system/utimes_freebsd.go delete mode 100644 pkg/system/utimes_linux.go delete mode 100644 pkg/system/utimes_unix_test.go delete mode 100644 pkg/system/utimes_unsupported.go delete mode 100644 pkg/system/xattrs_linux.go delete mode 100644 pkg/system/xattrs_unsupported.go delete mode 100644 pkg/tailfile/tailfile.go delete mode 100644 pkg/tailfile/tailfile_test.go delete mode 100644 pkg/tarsum/builder_context.go delete mode 100644 pkg/tarsum/builder_context_test.go delete mode 100644 pkg/tarsum/fileinfosums.go delete mode 100644 pkg/tarsum/fileinfosums_test.go delete mode 100644 pkg/tarsum/tarsum.go delete mode 100644 pkg/tarsum/tarsum_spec.md delete mode 100644 pkg/tarsum/tarsum_test.go delete mode 100644 pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json delete mode 100644 pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar delete mode 100644 pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json delete mode 100644 pkg/tarsum/testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar delete mode 100644 pkg/tarsum/testdata/collision/collision-0.tar delete mode 100644 pkg/tarsum/testdata/collision/collision-1.tar delete mode 100644 pkg/tarsum/testdata/collision/collision-2.tar delete mode 100644 pkg/tarsum/testdata/collision/collision-3.tar delete mode 100644 pkg/tarsum/testdata/xattr/json delete mode 100644 pkg/tarsum/testdata/xattr/layer.tar delete mode 100644 pkg/tarsum/versioning.go delete mode 100644 pkg/tarsum/versioning_test.go delete mode 100644 pkg/tarsum/writercloser.go delete mode 100644 pkg/term/ascii.go delete mode 100644 pkg/term/ascii_test.go delete mode 100644 pkg/term/tc_linux_cgo.go delete mode 100644 pkg/term/tc_other.go delete mode 100644 pkg/term/tc_solaris_cgo.go delete mode 100644 pkg/term/term.go delete mode 100644 pkg/term/term_solaris.go delete mode 100644 pkg/term/term_unix.go delete mode 100644 pkg/term/term_windows.go delete mode 100644 pkg/term/termios_darwin.go delete mode 100644 pkg/term/termios_freebsd.go delete mode 100644 pkg/term/termios_linux.go delete mode 100644 pkg/term/termios_openbsd.go delete mode 100644 pkg/term/windows/ansi_reader.go delete mode 100644 pkg/term/windows/ansi_writer.go delete mode 100644 pkg/term/windows/console.go delete mode 100644 pkg/term/windows/windows.go delete mode 100644 pkg/term/windows/windows_test.go delete mode 100644 pkg/testutil/assert/assert.go delete mode 100644 pkg/testutil/pkg.go delete mode 100644 pkg/tlsconfig/config.go delete mode 100644 pkg/truncindex/truncindex.go delete mode 100644 pkg/truncindex/truncindex_test.go delete mode 100644 pkg/urlutil/urlutil.go delete mode 100644 pkg/urlutil/urlutil_test.go delete mode 100644 pkg/useragent/README.md delete mode 100644 pkg/useragent/useragent.go delete mode 100644 pkg/useragent/useragent_test.go delete mode 100644 plugin/backend.go delete mode 100644 plugin/distribution/pull.go delete mode 100644 plugin/distribution/push.go delete mode 100644 plugin/distribution/types.go delete mode 100644 plugin/interface.go delete mode 100644 plugin/legacy.go delete mode 100644 plugin/manager.go delete mode 100644 plugin/manager_linux.go delete mode 100644 plugin/manager_windows.go delete mode 100644 profiles/apparmor/apparmor.go delete mode 100644 profiles/apparmor/template.go delete mode 100755 profiles/seccomp/default.json delete mode 100755 profiles/seccomp/fixtures/example.json delete mode 100644 profiles/seccomp/generate.go delete mode 100644 profiles/seccomp/seccomp.go delete mode 100644 profiles/seccomp/seccomp_default.go delete mode 100644 profiles/seccomp/seccomp_test.go delete mode 100644 profiles/seccomp/seccomp_unsupported.go delete mode 100644 project/ARM.md delete mode 100644 project/BRANCHES-AND-TAGS.md delete mode 120000 project/CONTRIBUTORS.md delete mode 100644 project/GOVERNANCE.md delete mode 100644 project/IRC-ADMINISTRATION.md delete mode 100644 project/ISSUE-TRIAGE.md delete mode 100644 project/PACKAGE-REPO-MAINTENANCE.md delete mode 100644 project/PACKAGERS.md delete mode 100644 project/PATCH-RELEASES.md delete mode 100644 project/PRINCIPLES.md delete mode 100644 project/README.md delete mode 100644 project/RELEASE-CHECKLIST.md delete mode 100644 project/RELEASE-PROCESS.md delete mode 100644 project/REVIEWING.md delete mode 100644 project/TOOLS.md delete mode 100644 reference/reference.go delete mode 100644 reference/reference_test.go delete mode 100644 reference/store.go delete mode 100644 reference/store_test.go delete mode 100644 restartmanager/restartmanager.go delete mode 100644 restartmanager/restartmanager_test.go delete mode 100644 runconfig/compare.go delete mode 100644 runconfig/compare_test.go delete mode 100644 runconfig/config.go delete mode 100644 runconfig/config_test.go delete mode 100644 runconfig/config_unix.go delete mode 100644 runconfig/config_windows.go delete mode 100644 runconfig/errors.go delete mode 100644 runconfig/fixtures/unix/container_config_1_14.json delete mode 100644 runconfig/fixtures/unix/container_config_1_17.json delete mode 100644 runconfig/fixtures/unix/container_config_1_19.json delete mode 100644 runconfig/fixtures/unix/container_hostconfig_1_14.json delete mode 100644 runconfig/fixtures/unix/container_hostconfig_1_19.json delete mode 100644 runconfig/fixtures/windows/container_config_1_19.json delete mode 100644 runconfig/hostconfig.go delete mode 100644 runconfig/hostconfig_solaris.go delete mode 100644 runconfig/hostconfig_test.go delete mode 100644 runconfig/hostconfig_unix.go delete mode 100644 runconfig/hostconfig_windows.go delete mode 100644 runconfig/opts/envfile.go delete mode 100644 runconfig/opts/envfile_test.go delete mode 100644 runconfig/opts/fixtures/valid.env delete mode 100644 runconfig/opts/fixtures/valid.label delete mode 100644 runconfig/opts/opts.go delete mode 100644 runconfig/opts/opts_test.go delete mode 100644 runconfig/opts/parse.go delete mode 100644 runconfig/opts/parse_test.go delete mode 100644 runconfig/opts/runtime.go delete mode 100644 runconfig/opts/throttledevice.go delete mode 100644 runconfig/opts/ulimit.go delete mode 100644 runconfig/opts/ulimit_test.go delete mode 100644 runconfig/opts/weightdevice.go delete mode 100644 runconfig/streams.go delete mode 100644 utils/debug.go delete mode 100644 utils/debug_test.go delete mode 100644 utils/experimental.go delete mode 100644 utils/names.go delete mode 100644 utils/process_unix.go delete mode 100644 utils/process_windows.go delete mode 100644 utils/stubs.go delete mode 100644 utils/templates/templates.go delete mode 100644 utils/templates/templates_test.go delete mode 100644 utils/utils.go delete mode 100644 utils/utils_test.go delete mode 100644 vendor/src/bitbucket.org/ww/goautoneg/Makefile delete mode 100644 vendor/src/bitbucket.org/ww/goautoneg/README.txt delete mode 100644 vendor/src/bitbucket.org/ww/goautoneg/autoneg.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/LICENSE delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/README.md delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/constants.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/context.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/csi_entry_state.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/csi_param_state.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/escape_intermediate_state.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/escape_state.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/event_handler.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/ground_state.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/osc_string_state.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/parser.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/parser_action_helpers.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/parser_actions.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/states.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/utilities.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/winterm/ansi.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/winterm/api.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/winterm/attr_translation.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/winterm/erase_helpers.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/winterm/scroll_helper.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/winterm/utilities.go delete mode 100644 vendor/src/github.com/Azure/go-ansiterm/winterm/win_event_handler.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/.gitignore delete mode 100644 vendor/src/github.com/BurntSushi/toml/.travis.yml delete mode 100644 vendor/src/github.com/BurntSushi/toml/COMPATIBLE delete mode 100644 vendor/src/github.com/BurntSushi/toml/COPYING delete mode 100644 vendor/src/github.com/BurntSushi/toml/Makefile delete mode 100644 vendor/src/github.com/BurntSushi/toml/README.md delete mode 100644 vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING delete mode 100644 vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING delete mode 100644 vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING delete mode 100644 vendor/src/github.com/BurntSushi/toml/decode.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/decode_meta.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/doc.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/encode.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/encoding_types.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/encoding_types_1.1.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/lex.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/parse.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/session.vim delete mode 100644 vendor/src/github.com/BurntSushi/toml/type_check.go delete mode 100644 vendor/src/github.com/BurntSushi/toml/type_fields.go delete mode 100644 vendor/src/github.com/Graylog2/go-gelf/LICENSE delete mode 100644 vendor/src/github.com/Graylog2/go-gelf/gelf/reader.go delete mode 100644 vendor/src/github.com/Graylog2/go-gelf/gelf/writer.go delete mode 100644 vendor/src/github.com/Microsoft/go-winio/.gitignore delete mode 100644 vendor/src/github.com/Microsoft/go-winio/LICENSE delete mode 100644 vendor/src/github.com/Microsoft/go-winio/README.md delete mode 100644 vendor/src/github.com/Microsoft/go-winio/archive/tar/LICENSE delete mode 100644 vendor/src/github.com/Microsoft/go-winio/archive/tar/common.go delete mode 100644 vendor/src/github.com/Microsoft/go-winio/archive/tar/reader.go delete mode 100644 vendor/src/github.com/Microsoft/go-winio/archive/tar/stat_atim.go delete mode 100644 vendor/src/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go delete mode 100644 vendor/src/github.com/Microsoft/go-winio/archive/tar/stat_unix.go delete mode 100644 vendor/src/github.com/Microsoft/go-winio/archive/tar/writer.go delete mode 100644 vendor/src/github.com/Microsoft/go-winio/backup.go delete mode 100644 vendor/src/github.com/Microsoft/go-winio/backuptar/tar.go delete mode 100644 vendor/src/github.com/Microsoft/go-winio/file.go delete mode 100644 vendor/src/github.com/Microsoft/go-winio/fileinfo.go delete mode 100644 vendor/src/github.com/Microsoft/go-winio/pipe.go delete mode 100644 vendor/src/github.com/Microsoft/go-winio/privilege.go delete mode 100644 vendor/src/github.com/Microsoft/go-winio/reparse.go delete mode 100644 vendor/src/github.com/Microsoft/go-winio/sd.go delete mode 100644 vendor/src/github.com/Microsoft/go-winio/syscall.go delete mode 100644 vendor/src/github.com/Microsoft/go-winio/zsyscall.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/LICENSE delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/activatelayer.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/baselayer.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/callback.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/container.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/createcomputesystem.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/createlayer.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/createprocess.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/createsandboxlayer.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/deactivatelayer.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/destroylayer.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/expandsandboxsize.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/exportlayer.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/getcomputesystemproperties.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/getlayermountpath.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/getsharedbaseimages.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/guid.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/hcsshim.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/hnsfuncs.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/importlayer.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/interface.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/layerexists.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/layerutils.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/legacy.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/mksyscall_windows.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/nametoguid.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/preparelayer.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/process.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/processimage.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/resizeconsole.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/shutdownterminatecomputesystem.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/startcomputesystem.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/terminateprocess.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/unpreparelayer.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/utils.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/version.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/waithelper.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/waitprocess.go delete mode 100644 vendor/src/github.com/Microsoft/hcsshim/zhcsshim.go delete mode 100644 vendor/src/github.com/RackSec/srslog/.gitignore delete mode 100644 vendor/src/github.com/RackSec/srslog/.travis.yml delete mode 100644 vendor/src/github.com/RackSec/srslog/CODE_OF_CONDUCT.md delete mode 100644 vendor/src/github.com/RackSec/srslog/LICENSE delete mode 100644 vendor/src/github.com/RackSec/srslog/README.md delete mode 100644 vendor/src/github.com/RackSec/srslog/constants.go delete mode 100644 vendor/src/github.com/RackSec/srslog/dialer.go delete mode 100644 vendor/src/github.com/RackSec/srslog/formatter.go delete mode 100644 vendor/src/github.com/RackSec/srslog/framer.go delete mode 100644 vendor/src/github.com/RackSec/srslog/net_conn.go delete mode 100644 vendor/src/github.com/RackSec/srslog/srslog.go delete mode 100644 vendor/src/github.com/RackSec/srslog/srslog_unix.go delete mode 100644 vendor/src/github.com/RackSec/srslog/writer.go delete mode 100644 vendor/src/github.com/Sirupsen/logrus/.gitignore delete mode 100644 vendor/src/github.com/Sirupsen/logrus/.travis.yml delete mode 100644 vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md delete mode 100644 vendor/src/github.com/Sirupsen/logrus/LICENSE delete mode 100644 vendor/src/github.com/Sirupsen/logrus/README.md delete mode 100644 vendor/src/github.com/Sirupsen/logrus/doc.go delete mode 100644 vendor/src/github.com/Sirupsen/logrus/entry.go delete mode 100644 vendor/src/github.com/Sirupsen/logrus/exported.go delete mode 100644 vendor/src/github.com/Sirupsen/logrus/formatter.go delete mode 100644 vendor/src/github.com/Sirupsen/logrus/hooks.go delete mode 100644 vendor/src/github.com/Sirupsen/logrus/json_formatter.go delete mode 100644 vendor/src/github.com/Sirupsen/logrus/logger.go delete mode 100644 vendor/src/github.com/Sirupsen/logrus/logrus.go delete mode 100644 vendor/src/github.com/Sirupsen/logrus/terminal_bsd.go delete mode 100644 vendor/src/github.com/Sirupsen/logrus/terminal_linux.go delete mode 100644 vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go delete mode 100644 vendor/src/github.com/Sirupsen/logrus/terminal_solaris.go delete mode 100644 vendor/src/github.com/Sirupsen/logrus/terminal_windows.go delete mode 100644 vendor/src/github.com/Sirupsen/logrus/text_formatter.go delete mode 100644 vendor/src/github.com/Sirupsen/logrus/writer.go delete mode 100644 vendor/src/github.com/agl/ed25519/LICENSE delete mode 100644 vendor/src/github.com/agl/ed25519/ed25519.go delete mode 100644 vendor/src/github.com/agl/ed25519/edwards25519/const.go delete mode 100644 vendor/src/github.com/agl/ed25519/edwards25519/edwards25519.go delete mode 100755 vendor/src/github.com/armon/go-metrics/.gitignore delete mode 100644 vendor/src/github.com/armon/go-metrics/LICENSE delete mode 100644 vendor/src/github.com/armon/go-metrics/README.md delete mode 100644 vendor/src/github.com/armon/go-metrics/const_unix.go delete mode 100644 vendor/src/github.com/armon/go-metrics/const_windows.go delete mode 100644 vendor/src/github.com/armon/go-metrics/inmem.go delete mode 100644 vendor/src/github.com/armon/go-metrics/inmem_signal.go delete mode 100755 vendor/src/github.com/armon/go-metrics/metrics.go delete mode 100755 vendor/src/github.com/armon/go-metrics/sink.go delete mode 100755 vendor/src/github.com/armon/go-metrics/start.go delete mode 100644 vendor/src/github.com/armon/go-metrics/statsd.go delete mode 100755 vendor/src/github.com/armon/go-metrics/statsite.go delete mode 100644 vendor/src/github.com/armon/go-radix/.gitignore delete mode 100644 vendor/src/github.com/armon/go-radix/.travis.yml delete mode 100644 vendor/src/github.com/armon/go-radix/LICENSE delete mode 100644 vendor/src/github.com/armon/go-radix/README.md delete mode 100644 vendor/src/github.com/armon/go-radix/radix.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/client/client.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/config.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/convert_types.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/errors.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/logger.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/request/request.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/request/retryer.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/request/validation.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/session/session.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/types.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/aws/version.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/LICENSE delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go delete mode 100644 vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go delete mode 100644 vendor/src/github.com/beorn7/perks/quantile/exampledata.txt delete mode 100644 vendor/src/github.com/beorn7/perks/quantile/stream.go delete mode 100644 vendor/src/github.com/boltdb/bolt/.gitignore delete mode 100644 vendor/src/github.com/boltdb/bolt/LICENSE delete mode 100644 vendor/src/github.com/boltdb/bolt/Makefile delete mode 100644 vendor/src/github.com/boltdb/bolt/README.md delete mode 100644 vendor/src/github.com/boltdb/bolt/appveyor.yml delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_386.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_amd64.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_arm.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_arm64.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_linux.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_openbsd.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_ppc.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_ppc64.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_s390x.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_unix.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bolt_windows.go delete mode 100644 vendor/src/github.com/boltdb/bolt/boltsync_unix.go delete mode 100644 vendor/src/github.com/boltdb/bolt/bucket.go delete mode 100644 vendor/src/github.com/boltdb/bolt/cursor.go delete mode 100644 vendor/src/github.com/boltdb/bolt/db.go delete mode 100644 vendor/src/github.com/boltdb/bolt/doc.go delete mode 100644 vendor/src/github.com/boltdb/bolt/errors.go delete mode 100644 vendor/src/github.com/boltdb/bolt/freelist.go delete mode 100644 vendor/src/github.com/boltdb/bolt/node.go delete mode 100644 vendor/src/github.com/boltdb/bolt/page.go delete mode 100644 vendor/src/github.com/boltdb/bolt/tx.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/LICENSE delete mode 100644 vendor/src/github.com/cloudflare/cfssl/api/api.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/auth/auth.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/certdb/README.md delete mode 100644 vendor/src/github.com/cloudflare/cfssl/certdb/certdb.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/config/config.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/csr/csr.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/errors/doc.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/errors/error.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/errors/http.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/helpers/helpers.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/info/info.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/initca/initca.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/log/log.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/ocsp/config/config.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/signer/local/local.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/signer/signer.go delete mode 100644 vendor/src/github.com/cloudflare/cfssl/whitelist/LICENSE delete mode 100644 vendor/src/github.com/coreos/etcd/LICENSE delete mode 100644 vendor/src/github.com/coreos/etcd/client/README.md delete mode 100644 vendor/src/github.com/coreos/etcd/client/auth_role.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/auth_user.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/cancelreq.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/cancelreq_go14.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/client.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/cluster_error.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/curl.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/discover.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/doc.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/keys.generated.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/keys.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/members.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/srv.go delete mode 100644 vendor/src/github.com/coreos/etcd/client/util.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/crc/crc.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/lock.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_unix.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_windows.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/perallocate_unsupported.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/preallocate.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/purge.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/sync.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/fileutil/sync_linux.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/idutil/id.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/pathutil/path.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/pbutil/pbutil.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/types/doc.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/types/id.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/types/set.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/types/slice.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/types/urls.go delete mode 100644 vendor/src/github.com/coreos/etcd/pkg/types/urlsmap.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/design.md delete mode 100644 vendor/src/github.com/coreos/etcd/raft/doc.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/log.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/log_unstable.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/logger.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/node.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/progress.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/raft.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/raftpb/raft.proto delete mode 100644 vendor/src/github.com/coreos/etcd/raft/rawnode.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/status.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/storage.go delete mode 100644 vendor/src/github.com/coreos/etcd/raft/util.go delete mode 100644 vendor/src/github.com/coreos/etcd/snap/db.go delete mode 100644 vendor/src/github.com/coreos/etcd/snap/message.go delete mode 100644 vendor/src/github.com/coreos/etcd/snap/metrics.go delete mode 100644 vendor/src/github.com/coreos/etcd/snap/snappb/snap.pb.go delete mode 100644 vendor/src/github.com/coreos/etcd/snap/snappb/snap.proto delete mode 100644 vendor/src/github.com/coreos/etcd/snap/snapshotter.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/decoder.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/doc.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/encoder.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/metrics.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/multi_readcloser.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/repair.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/util.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/wal.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/walpb/record.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/walpb/record.pb.go delete mode 100644 vendor/src/github.com/coreos/etcd/wal/walpb/record.proto delete mode 100644 vendor/src/github.com/coreos/go-systemd/LICENSE delete mode 100644 vendor/src/github.com/coreos/go-systemd/activation/files.go delete mode 100644 vendor/src/github.com/coreos/go-systemd/activation/listeners.go delete mode 100644 vendor/src/github.com/coreos/go-systemd/activation/packetconns.go delete mode 100644 vendor/src/github.com/coreos/go-systemd/daemon/sdnotify.go delete mode 100644 vendor/src/github.com/coreos/go-systemd/journal/journal.go delete mode 100644 vendor/src/github.com/coreos/pkg/LICENSE delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/README.md delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/formatters.go delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/glog_formatter.go delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/init.go delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/init_windows.go delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/journald_formatter.go delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/log_hijack.go delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/logmap.go delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/pkg_logger.go delete mode 100644 vendor/src/github.com/coreos/pkg/capnslog/syslog_formatter.go delete mode 100644 vendor/src/github.com/deckarep/golang-set/.gitignore delete mode 100644 vendor/src/github.com/deckarep/golang-set/.travis.yml delete mode 100644 vendor/src/github.com/deckarep/golang-set/LICENSE delete mode 100644 vendor/src/github.com/deckarep/golang-set/README.md delete mode 100644 vendor/src/github.com/deckarep/golang-set/set.go delete mode 100644 vendor/src/github.com/deckarep/golang-set/threadsafe.go delete mode 100644 vendor/src/github.com/deckarep/golang-set/threadunsafe.go delete mode 100644 vendor/src/github.com/docker/containerd/LICENSE.code delete mode 100644 vendor/src/github.com/docker/containerd/LICENSE.docs delete mode 100644 vendor/src/github.com/docker/containerd/api/grpc/types/api.pb.go delete mode 100644 vendor/src/github.com/docker/containerd/api/grpc/types/api.proto delete mode 100644 vendor/src/github.com/docker/distribution/.gitignore delete mode 100644 vendor/src/github.com/docker/distribution/.mailmap delete mode 100644 vendor/src/github.com/docker/distribution/AUTHORS delete mode 100644 vendor/src/github.com/docker/distribution/BUILDING.md delete mode 100644 vendor/src/github.com/docker/distribution/CHANGELOG.md delete mode 100644 vendor/src/github.com/docker/distribution/CONTRIBUTING.md delete mode 100644 vendor/src/github.com/docker/distribution/Dockerfile delete mode 100644 vendor/src/github.com/docker/distribution/LICENSE delete mode 100644 vendor/src/github.com/docker/distribution/MAINTAINERS delete mode 100644 vendor/src/github.com/docker/distribution/Makefile delete mode 100644 vendor/src/github.com/docker/distribution/README.md delete mode 100644 vendor/src/github.com/docker/distribution/ROADMAP.md delete mode 100644 vendor/src/github.com/docker/distribution/blobs.go delete mode 100644 vendor/src/github.com/docker/distribution/circle.yml delete mode 100644 vendor/src/github.com/docker/distribution/context/context.go delete mode 100644 vendor/src/github.com/docker/distribution/context/doc.go delete mode 100644 vendor/src/github.com/docker/distribution/context/http.go delete mode 100644 vendor/src/github.com/docker/distribution/context/logger.go delete mode 100644 vendor/src/github.com/docker/distribution/context/trace.go delete mode 100644 vendor/src/github.com/docker/distribution/context/util.go delete mode 100644 vendor/src/github.com/docker/distribution/context/version.go delete mode 100755 vendor/src/github.com/docker/distribution/coverpkg.sh delete mode 100644 vendor/src/github.com/docker/distribution/digest/digest.go delete mode 100644 vendor/src/github.com/docker/distribution/digest/digester.go delete mode 100644 vendor/src/github.com/docker/distribution/digest/doc.go delete mode 100644 vendor/src/github.com/docker/distribution/digest/set.go delete mode 100644 vendor/src/github.com/docker/distribution/digest/verifiers.go delete mode 100644 vendor/src/github.com/docker/distribution/doc.go delete mode 100644 vendor/src/github.com/docker/distribution/errors.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/doc.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/schema1/config_builder.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/schema1/reference_builder.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/schema1/sign.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/schema1/verify.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/schema2/builder.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/schema2/manifest.go delete mode 100644 vendor/src/github.com/docker/distribution/manifest/versioned.go delete mode 100644 vendor/src/github.com/docker/distribution/manifests.go delete mode 100644 vendor/src/github.com/docker/distribution/reference/reference.go delete mode 100644 vendor/src/github.com/docker/distribution/reference/regexp.go delete mode 100644 vendor/src/github.com/docker/distribution/registry.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/api/errcode/handler.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/api/errcode/register.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/api/v2/doc.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/api/v2/errors.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/api/v2/routes.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/api/v2/urls.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/client/auth/api_version.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/client/auth/authchallenge.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/client/auth/session.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/client/blob_writer.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/client/errors.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/client/repository.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/client/transport/http_reader.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/client/transport/transport.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/storage/cache/cache.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go delete mode 100644 vendor/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go delete mode 100644 vendor/src/github.com/docker/distribution/tags.go delete mode 100644 vendor/src/github.com/docker/distribution/uuid/uuid.go delete mode 100644 vendor/src/github.com/docker/docker-credential-helpers/LICENSE delete mode 100644 vendor/src/github.com/docker/docker-credential-helpers/client/client.go delete mode 100644 vendor/src/github.com/docker/docker-credential-helpers/client/command.go delete mode 100644 vendor/src/github.com/docker/docker-credential-helpers/credentials/credentials.go delete mode 100644 vendor/src/github.com/docker/docker-credential-helpers/credentials/error.go delete mode 100644 vendor/src/github.com/docker/docker-credential-helpers/credentials/helper.go delete mode 100644 vendor/src/github.com/docker/engine-api/LICENSE delete mode 100644 vendor/src/github.com/docker/engine-api/client/checkpoint_create.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/checkpoint_delete.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/checkpoint_list.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/client.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/client_darwin.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/client_unix.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/client_windows.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_attach.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_commit.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_copy.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_create.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_diff.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_exec.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_export.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_inspect.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_kill.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_list.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_logs.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_pause.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_remove.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_rename.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_resize.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_restart.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_start.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_stats.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_stop.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_top.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_unpause.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_update.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/container_wait.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/errors.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/events.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/hijack.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/image_build.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/image_create.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/image_history.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/image_import.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/image_inspect.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/image_list.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/image_load.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/image_pull.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/image_push.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/image_remove.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/image_save.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/image_search.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/image_tag.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/info.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/interface.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/interface_experimental.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/interface_stable.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/login.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/network_connect.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/network_create.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/network_disconnect.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/network_inspect.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/network_list.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/network_remove.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/node_inspect.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/node_list.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/node_remove.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/node_update.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/plugin_disable.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/plugin_enable.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/plugin_inspect.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/plugin_install.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/plugin_list.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/plugin_push.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/plugin_remove.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/plugin_set.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/request.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/service_create.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/service_inspect.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/service_list.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/service_remove.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/service_update.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/swarm_init.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/swarm_inspect.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/swarm_join.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/swarm_leave.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/swarm_update.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/task_inspect.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/task_list.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/transport/cancellable/LICENSE delete mode 100644 vendor/src/github.com/docker/engine-api/client/transport/cancellable/canceler.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/transport/cancellable/cancellable.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/transport/client.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/transport/transport.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/version.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/volume_create.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/volume_inspect.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/volume_list.go delete mode 100644 vendor/src/github.com/docker/engine-api/client/volume_remove.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/auth.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/blkiodev/blkio.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/client.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/configs.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/container/config.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/container/host_config.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/container/hostconfig_unix.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/container/hostconfig_windows.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/errors.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/events/events.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/filters/parse.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/network/network.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/plugin.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/reference/image_reference.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/registry/registry.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/seccomp.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/stats.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/strslice/strslice.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/swarm/common.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/swarm/container.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/swarm/network.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/swarm/node.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/swarm/service.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/swarm/swarm.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/swarm/task.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/time/duration_convert.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/time/timestamp.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/types.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/versions/README.md delete mode 100644 vendor/src/github.com/docker/engine-api/types/versions/compare.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/versions/v1p19/types.go delete mode 100644 vendor/src/github.com/docker/engine-api/types/versions/v1p20/types.go delete mode 100644 vendor/src/github.com/docker/go-connections/LICENSE delete mode 100644 vendor/src/github.com/docker/go-connections/nat/nat.go delete mode 100644 vendor/src/github.com/docker/go-connections/nat/parse.go delete mode 100644 vendor/src/github.com/docker/go-connections/nat/sort.go delete mode 100644 vendor/src/github.com/docker/go-connections/sockets/README.md delete mode 100644 vendor/src/github.com/docker/go-connections/sockets/inmem_socket.go delete mode 100644 vendor/src/github.com/docker/go-connections/sockets/proxy.go delete mode 100644 vendor/src/github.com/docker/go-connections/sockets/sockets.go delete mode 100644 vendor/src/github.com/docker/go-connections/sockets/sockets_unix.go delete mode 100644 vendor/src/github.com/docker/go-connections/sockets/sockets_windows.go delete mode 100644 vendor/src/github.com/docker/go-connections/sockets/tcp_socket.go delete mode 100644 vendor/src/github.com/docker/go-connections/sockets/unix_socket.go delete mode 100644 vendor/src/github.com/docker/go-connections/tlsconfig/config.go delete mode 100644 vendor/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go delete mode 100644 vendor/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go delete mode 100644 vendor/src/github.com/docker/go-events/.gitignore delete mode 100644 vendor/src/github.com/docker/go-events/CONTRIBUTING.md delete mode 100644 vendor/src/github.com/docker/go-events/LICENSE delete mode 100644 vendor/src/github.com/docker/go-events/MAINTAINERS delete mode 100644 vendor/src/github.com/docker/go-events/README.md delete mode 100644 vendor/src/github.com/docker/go-events/broadcast.go delete mode 100644 vendor/src/github.com/docker/go-events/channel.go delete mode 100644 vendor/src/github.com/docker/go-events/errors.go delete mode 100644 vendor/src/github.com/docker/go-events/event.go delete mode 100644 vendor/src/github.com/docker/go-events/filter.go delete mode 100644 vendor/src/github.com/docker/go-events/queue.go delete mode 100644 vendor/src/github.com/docker/go-events/retry.go delete mode 100644 vendor/src/github.com/docker/go-units/LICENSE delete mode 100644 vendor/src/github.com/docker/go-units/README.md delete mode 100644 vendor/src/github.com/docker/go-units/circle.yml delete mode 100644 vendor/src/github.com/docker/go-units/duration.go delete mode 100644 vendor/src/github.com/docker/go-units/size.go delete mode 100644 vendor/src/github.com/docker/go-units/ulimit.go delete mode 100644 vendor/src/github.com/docker/go/LICENSE delete mode 100644 vendor/src/github.com/docker/go/canonical/json/decode.go delete mode 100644 vendor/src/github.com/docker/go/canonical/json/encode.go delete mode 100644 vendor/src/github.com/docker/go/canonical/json/fold.go delete mode 100644 vendor/src/github.com/docker/go/canonical/json/indent.go delete mode 100644 vendor/src/github.com/docker/go/canonical/json/scanner.go delete mode 100644 vendor/src/github.com/docker/go/canonical/json/stream.go delete mode 100644 vendor/src/github.com/docker/go/canonical/json/tags.go delete mode 100644 vendor/src/github.com/docker/libkv/.travis.yml delete mode 100644 vendor/src/github.com/docker/libkv/LICENSE.code delete mode 100644 vendor/src/github.com/docker/libkv/LICENSE.docs delete mode 100644 vendor/src/github.com/docker/libkv/MAINTAINERS delete mode 100644 vendor/src/github.com/docker/libkv/README.md delete mode 100644 vendor/src/github.com/docker/libkv/libkv.go delete mode 100644 vendor/src/github.com/docker/libkv/store/boltdb/boltdb.go delete mode 100644 vendor/src/github.com/docker/libkv/store/consul/consul.go delete mode 100644 vendor/src/github.com/docker/libkv/store/etcd/etcd.go delete mode 100644 vendor/src/github.com/docker/libkv/store/helpers.go delete mode 100644 vendor/src/github.com/docker/libkv/store/store.go delete mode 100644 vendor/src/github.com/docker/libkv/store/zookeeper/zookeeper.go delete mode 100644 vendor/src/github.com/docker/libnetwork/.dockerignore delete mode 100644 vendor/src/github.com/docker/libnetwork/.gitignore delete mode 100644 vendor/src/github.com/docker/libnetwork/CHANGELOG.md delete mode 100644 vendor/src/github.com/docker/libnetwork/Dockerfile.build delete mode 100644 vendor/src/github.com/docker/libnetwork/LICENSE delete mode 100644 vendor/src/github.com/docker/libnetwork/MAINTAINERS delete mode 100644 vendor/src/github.com/docker/libnetwork/Makefile delete mode 100644 vendor/src/github.com/docker/libnetwork/README.md delete mode 100644 vendor/src/github.com/docker/libnetwork/ROADMAP.md delete mode 100644 vendor/src/github.com/docker/libnetwork/agent.go delete mode 100644 vendor/src/github.com/docker/libnetwork/agent.pb.go delete mode 100644 vendor/src/github.com/docker/libnetwork/agent.proto delete mode 100644 vendor/src/github.com/docker/libnetwork/bitseq/sequence.go delete mode 100644 vendor/src/github.com/docker/libnetwork/bitseq/store.go delete mode 100644 vendor/src/github.com/docker/libnetwork/circle.yml delete mode 100644 vendor/src/github.com/docker/libnetwork/cluster/provider.go delete mode 100644 vendor/src/github.com/docker/libnetwork/cmd/proxy/main.go delete mode 100644 vendor/src/github.com/docker/libnetwork/cmd/proxy/proxy.go delete mode 100644 vendor/src/github.com/docker/libnetwork/cmd/proxy/stub_proxy.go delete mode 100644 vendor/src/github.com/docker/libnetwork/cmd/proxy/tcp_proxy.go delete mode 100644 vendor/src/github.com/docker/libnetwork/cmd/proxy/udp_proxy.go delete mode 100644 vendor/src/github.com/docker/libnetwork/config/config.go delete mode 100644 vendor/src/github.com/docker/libnetwork/config/libnetwork.toml delete mode 100644 vendor/src/github.com/docker/libnetwork/controller.go delete mode 100644 vendor/src/github.com/docker/libnetwork/datastore/cache.go delete mode 100644 vendor/src/github.com/docker/libnetwork/datastore/datastore.go delete mode 100644 vendor/src/github.com/docker/libnetwork/datastore/mock_store.go delete mode 100644 vendor/src/github.com/docker/libnetwork/default_gateway.go delete mode 100644 vendor/src/github.com/docker/libnetwork/default_gateway_freebsd.go delete mode 100644 vendor/src/github.com/docker/libnetwork/default_gateway_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/default_gateway_solaris.go delete mode 100644 vendor/src/github.com/docker/libnetwork/default_gateway_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/discoverapi/discoverapi.go delete mode 100644 vendor/src/github.com/docker/libnetwork/driverapi/driverapi.go delete mode 100644 vendor/src/github.com/docker/libnetwork/driverapi/errors.go delete mode 100644 vendor/src/github.com/docker/libnetwork/driverapi/ipamdata.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/bridge_store.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/errors.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/interface.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/labels.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/link.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux_armppc64.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_linux_notarm.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/netlink_deprecated_unsupported.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/port_mapping.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/resolvconf.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_bridgenetfiltering.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_device.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_firewalld.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_forwarding.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ip_tables.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv4.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_ipv6.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/bridge/setup_verify.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/host/host.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_endpoint.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_joinleave.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_network.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_setup.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_state.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/ipvlan/ipvlan_store.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_endpoint.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_joinleave.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_network.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_setup.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_state.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/macvlan/macvlan_store.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/null/null.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/encryption.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/filter.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/joinleave.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_endpoint.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_network.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_serf.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/ov_utils.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.pb.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/overlay.proto delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/ovmanager/ovmanager.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/overlay/peerdb.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/remote/api/api.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/remote/driver.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/windows/labels.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers/windows/windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers_experimental_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers_freebsd.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers_ipam.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers_solaris.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers_stub_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drivers_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/drvregistry/drvregistry.go delete mode 100644 vendor/src/github.com/docker/libnetwork/endpoint.go delete mode 100644 vendor/src/github.com/docker/libnetwork/endpoint_cnt.go delete mode 100644 vendor/src/github.com/docker/libnetwork/endpoint_info.go delete mode 100644 vendor/src/github.com/docker/libnetwork/error.go delete mode 100644 vendor/src/github.com/docker/libnetwork/etchosts/etchosts.go delete mode 100644 vendor/src/github.com/docker/libnetwork/hostdiscovery/hostdiscovery.go delete mode 100644 vendor/src/github.com/docker/libnetwork/hostdiscovery/hostdiscovery_api.go delete mode 100644 vendor/src/github.com/docker/libnetwork/hostdiscovery/libnetwork.toml delete mode 100644 vendor/src/github.com/docker/libnetwork/idm/idm.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipam/allocator.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipam/store.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipam/structures.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipam/utils.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipamapi/contract.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipams/builtin/builtin_unix.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipams/builtin/builtin_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipams/null/null.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipams/remote/api/api.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipams/remote/remote.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipams/windowsipam/windowsipam.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipamutils/utils.go delete mode 100644 vendor/src/github.com/docker/libnetwork/iptables/firewalld.go delete mode 100644 vendor/src/github.com/docker/libnetwork/iptables/iptables.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipvs/constants.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipvs/ipvs.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ipvs/netlink.go delete mode 100755 vendor/src/github.com/docker/libnetwork/machines delete mode 100644 vendor/src/github.com/docker/libnetwork/netlabel/labels.go delete mode 100644 vendor/src/github.com/docker/libnetwork/netutils/utils.go delete mode 100644 vendor/src/github.com/docker/libnetwork/netutils/utils_freebsd.go delete mode 100644 vendor/src/github.com/docker/libnetwork/netutils/utils_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/netutils/utils_solaris.go delete mode 100644 vendor/src/github.com/docker/libnetwork/netutils/utils_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/network.go delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/broadcast.go delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/cluster.go delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/delegate.go delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/event_delegate.go delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/message.go delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/networkdb.go delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/networkdb.pb.go delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/networkdb.proto delete mode 100644 vendor/src/github.com/docker/libnetwork/networkdb/watch.go delete mode 100644 vendor/src/github.com/docker/libnetwork/ns/init_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/options/options.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/interface_freebsd.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/interface_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/interface_solaris.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/interface_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/namespace_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/namespace_unsupported.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/namespace_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/neigh_freebsd.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/neigh_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/neigh_solaris.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/neigh_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/options_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/route_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/sandbox.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/sandbox_freebsd.go delete mode 100644 vendor/src/github.com/docker/libnetwork/osl/sandbox_unsupported.go delete mode 100644 vendor/src/github.com/docker/libnetwork/portallocator/portallocator.go delete mode 100644 vendor/src/github.com/docker/libnetwork/portmapper/mapper.go delete mode 100644 vendor/src/github.com/docker/libnetwork/portmapper/mock_proxy.go delete mode 100644 vendor/src/github.com/docker/libnetwork/portmapper/proxy.go delete mode 100644 vendor/src/github.com/docker/libnetwork/resolvconf/README.md delete mode 100644 vendor/src/github.com/docker/libnetwork/resolvconf/dns/resolvconf.go delete mode 100644 vendor/src/github.com/docker/libnetwork/resolvconf/resolvconf.go delete mode 100644 vendor/src/github.com/docker/libnetwork/resolver.go delete mode 100644 vendor/src/github.com/docker/libnetwork/resolver_unix.go delete mode 100644 vendor/src/github.com/docker/libnetwork/resolver_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/sandbox.go delete mode 100644 vendor/src/github.com/docker/libnetwork/sandbox_dns_unix.go delete mode 100644 vendor/src/github.com/docker/libnetwork/sandbox_dns_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/sandbox_externalkey.go delete mode 100644 vendor/src/github.com/docker/libnetwork/sandbox_externalkey_solaris.go delete mode 100644 vendor/src/github.com/docker/libnetwork/sandbox_externalkey_unix.go delete mode 100644 vendor/src/github.com/docker/libnetwork/sandbox_externalkey_windows.go delete mode 100644 vendor/src/github.com/docker/libnetwork/sandbox_store.go delete mode 100644 vendor/src/github.com/docker/libnetwork/service.go delete mode 100644 vendor/src/github.com/docker/libnetwork/service_linux.go delete mode 100644 vendor/src/github.com/docker/libnetwork/service_unsupported.go delete mode 100644 vendor/src/github.com/docker/libnetwork/store.go delete mode 100755 vendor/src/github.com/docker/libnetwork/support.sh delete mode 100644 vendor/src/github.com/docker/libnetwork/types/types.go delete mode 100755 vendor/src/github.com/docker/libnetwork/wrapmake.sh delete mode 100644 vendor/src/github.com/docker/libtrust/CONTRIBUTING.md delete mode 100644 vendor/src/github.com/docker/libtrust/LICENSE delete mode 100644 vendor/src/github.com/docker/libtrust/MAINTAINERS delete mode 100644 vendor/src/github.com/docker/libtrust/README.md delete mode 100644 vendor/src/github.com/docker/libtrust/certificates.go delete mode 100644 vendor/src/github.com/docker/libtrust/doc.go delete mode 100644 vendor/src/github.com/docker/libtrust/ec_key.go delete mode 100644 vendor/src/github.com/docker/libtrust/filter.go delete mode 100644 vendor/src/github.com/docker/libtrust/hash.go delete mode 100644 vendor/src/github.com/docker/libtrust/jsonsign.go delete mode 100644 vendor/src/github.com/docker/libtrust/key.go delete mode 100644 vendor/src/github.com/docker/libtrust/key_files.go delete mode 100644 vendor/src/github.com/docker/libtrust/key_manager.go delete mode 100644 vendor/src/github.com/docker/libtrust/rsa_key.go delete mode 100644 vendor/src/github.com/docker/libtrust/util.go delete mode 100644 vendor/src/github.com/docker/notary/.gitignore delete mode 100644 vendor/src/github.com/docker/notary/CHANGELOG.md delete mode 100644 vendor/src/github.com/docker/notary/CONTRIBUTING.md delete mode 100644 vendor/src/github.com/docker/notary/CONTRIBUTORS delete mode 100644 vendor/src/github.com/docker/notary/Dockerfile delete mode 100644 vendor/src/github.com/docker/notary/LICENSE delete mode 100644 vendor/src/github.com/docker/notary/MAINTAINERS delete mode 100644 vendor/src/github.com/docker/notary/Makefile delete mode 100644 vendor/src/github.com/docker/notary/NOTARY_VERSION delete mode 100644 vendor/src/github.com/docker/notary/README.md delete mode 100644 vendor/src/github.com/docker/notary/ROADMAP.md delete mode 100644 vendor/src/github.com/docker/notary/circle.yml delete mode 100644 vendor/src/github.com/docker/notary/client/changelist/change.go delete mode 100644 vendor/src/github.com/docker/notary/client/changelist/changelist.go delete mode 100644 vendor/src/github.com/docker/notary/client/changelist/file_changelist.go delete mode 100644 vendor/src/github.com/docker/notary/client/changelist/interface.go delete mode 100644 vendor/src/github.com/docker/notary/client/client.go delete mode 100644 vendor/src/github.com/docker/notary/client/delegations.go delete mode 100644 vendor/src/github.com/docker/notary/client/helpers.go delete mode 100644 vendor/src/github.com/docker/notary/client/repo.go delete mode 100644 vendor/src/github.com/docker/notary/client/repo_pkcs11.go delete mode 100644 vendor/src/github.com/docker/notary/codecov.yml delete mode 100644 vendor/src/github.com/docker/notary/const.go delete mode 100755 vendor/src/github.com/docker/notary/coverpkg.sh delete mode 100644 vendor/src/github.com/docker/notary/cryptoservice/certificate.go delete mode 100644 vendor/src/github.com/docker/notary/cryptoservice/crypto_service.go delete mode 100644 vendor/src/github.com/docker/notary/cryptoservice/import_export.go delete mode 100644 vendor/src/github.com/docker/notary/development.rethink.yml delete mode 100644 vendor/src/github.com/docker/notary/development.yml delete mode 100644 vendor/src/github.com/docker/notary/docker-compose.rethink.yml delete mode 100644 vendor/src/github.com/docker/notary/docker-compose.yml delete mode 100644 vendor/src/github.com/docker/notary/passphrase/passphrase.go delete mode 100644 vendor/src/github.com/docker/notary/server.Dockerfile delete mode 100644 vendor/src/github.com/docker/notary/signer.Dockerfile delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/filestore.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/keyfilestore.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/keystore.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/memorystore.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/store.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/x509utils.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/yubikey/non_pkcs11.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/yubikey/pkcs11_darwin.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/yubikey/pkcs11_interface.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/yubikey/pkcs11_linux.go delete mode 100644 vendor/src/github.com/docker/notary/trustmanager/yubikey/yubikeystore.go delete mode 100644 vendor/src/github.com/docker/notary/trustpinning/certs.go delete mode 100644 vendor/src/github.com/docker/notary/trustpinning/trustpin.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/LICENSE delete mode 100644 vendor/src/github.com/docker/notary/tuf/README.md delete mode 100644 vendor/src/github.com/docker/notary/tuf/builder.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/client/client.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/client/errors.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/errors.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/keys.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/roles.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/root.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/serializer.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/snapshot.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/targets.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/timestamp.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/data/types.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/signed/ed25519.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/signed/errors.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/signed/interface.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/signed/sign.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/signed/verifiers.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/signed/verify.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/store/errors.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/store/filestore.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/store/httpstore.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/store/interfaces.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/store/memorystore.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/store/offlinestore.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/tuf.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/utils/role_sort.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/utils/stack.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/utils/util.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/utils/utils.go delete mode 100644 vendor/src/github.com/docker/notary/tuf/validation/errors.go delete mode 100644 vendor/src/github.com/docker/swarmkit/LICENSE delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/agent.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/config.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/errors.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/exec/controller.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/exec/controller_test.mock.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/exec/errors.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/exec/executor.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/helpers.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/node.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/reporter.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/session.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/storage.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/task.go delete mode 100644 vendor/src/github.com/docker/swarmkit/agent/worker.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/README.md delete mode 100644 vendor/src/github.com/docker/swarmkit/api/ca.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/ca.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/control.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/control.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/dispatcher.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/dispatcher.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/duration/duration.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/duration/duration.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/duration/gen.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/equality/equality.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/gen.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/health.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/health.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/objects.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/objects.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/raft.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/raft.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/snapshot.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/snapshot.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/specs.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/specs.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/timestamp/gen.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/timestamp/timestamp.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/timestamp/timestamp.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/api/types.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/api/types.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/ca/auth.go delete mode 100644 vendor/src/github.com/docker/swarmkit/ca/certificates.go delete mode 100644 vendor/src/github.com/docker/swarmkit/ca/config.go delete mode 100644 vendor/src/github.com/docker/swarmkit/ca/external.go delete mode 100644 vendor/src/github.com/docker/swarmkit/ca/forward.go delete mode 100644 vendor/src/github.com/docker/swarmkit/ca/server.go delete mode 100644 vendor/src/github.com/docker/swarmkit/ca/transport.go delete mode 100644 vendor/src/github.com/docker/swarmkit/identity/doc.go delete mode 100644 vendor/src/github.com/docker/swarmkit/identity/randomid.go delete mode 100644 vendor/src/github.com/docker/swarmkit/ioutils/ioutils.go delete mode 100644 vendor/src/github.com/docker/swarmkit/log/context.go delete mode 100644 vendor/src/github.com/docker/swarmkit/log/grpc.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/allocator/allocator.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/allocator/doc.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/allocator/network.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/allocator/networkallocator/networkallocator.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/allocator/networkallocator/portallocator.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/controlapi/cluster.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/controlapi/common.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/controlapi/hackpicker/cluster.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/controlapi/hackpicker/raftpicker.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/controlapi/network.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/controlapi/node.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/controlapi/server.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/controlapi/service.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/controlapi/task.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/dispatcher/dispatcher.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/dispatcher/heartbeat/heartbeat.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/dispatcher/nodes.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/dispatcher/period.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/doc.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/health/health.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/keymanager/keymanager.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/manager.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/orchestrator/global.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/orchestrator/replicated.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/orchestrator/restart.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/orchestrator/services.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/orchestrator/task_reaper.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/orchestrator/tasks.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/orchestrator/updater.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/raftpicker/cluster.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/raftpicker/raftpicker.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/scheduler/constraint.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/scheduler/expr.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/scheduler/filter.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/scheduler/indexed_node_heap.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/scheduler/nodeinfo.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/scheduler/pipeline.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/scheduler/scheduler.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/doc.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/proposer.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/raft/membership/cluster.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/raft/raft.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/raft/storage.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/raft/util.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/raft/wait.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/apply.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/by.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/clusters.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/combinators.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/memory.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/networks.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/nodes.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/object.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/services.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/store/tasks.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/watch.go delete mode 100644 vendor/src/github.com/docker/swarmkit/manager/state/watch/watch.go delete mode 100644 vendor/src/github.com/docker/swarmkit/picker/picker.go delete mode 100644 vendor/src/github.com/docker/swarmkit/protobuf/plugin/gen.go delete mode 100644 vendor/src/github.com/docker/swarmkit/protobuf/plugin/helpers.go delete mode 100644 vendor/src/github.com/docker/swarmkit/protobuf/plugin/plugin.pb.go delete mode 100644 vendor/src/github.com/docker/swarmkit/protobuf/plugin/plugin.proto delete mode 100644 vendor/src/github.com/docker/swarmkit/protobuf/ptypes/doc.go delete mode 100644 vendor/src/github.com/docker/swarmkit/protobuf/ptypes/duration.go delete mode 100644 vendor/src/github.com/docker/swarmkit/protobuf/ptypes/timestamp.go delete mode 100644 vendor/src/github.com/fluent/fluent-logger-golang/LICENSE delete mode 100644 vendor/src/github.com/fluent/fluent-logger-golang/fluent/fluent.go delete mode 100644 vendor/src/github.com/fluent/fluent-logger-golang/fluent/proto.go delete mode 100644 vendor/src/github.com/fluent/fluent-logger-golang/fluent/proto_gen.go delete mode 100644 vendor/src/github.com/fluent/fluent-logger-golang/fluent/version.go delete mode 100644 vendor/src/github.com/flynn-archive/go-shlex/COPYING delete mode 100644 vendor/src/github.com/flynn-archive/go-shlex/Makefile delete mode 100644 vendor/src/github.com/flynn-archive/go-shlex/README.md delete mode 100644 vendor/src/github.com/flynn-archive/go-shlex/shlex.go delete mode 100644 vendor/src/github.com/go-check/check/.gitignore delete mode 100644 vendor/src/github.com/go-check/check/.travis.yml delete mode 100644 vendor/src/github.com/go-check/check/LICENSE delete mode 100644 vendor/src/github.com/go-check/check/README.md delete mode 100644 vendor/src/github.com/go-check/check/TODO delete mode 100644 vendor/src/github.com/go-check/check/benchmark.go delete mode 100644 vendor/src/github.com/go-check/check/check.go delete mode 100644 vendor/src/github.com/go-check/check/checkers.go delete mode 100644 vendor/src/github.com/go-check/check/helpers.go delete mode 100644 vendor/src/github.com/go-check/check/printer.go delete mode 100644 vendor/src/github.com/go-check/check/reporter.go delete mode 100644 vendor/src/github.com/go-check/check/run.go delete mode 100644 vendor/src/github.com/go-ini/ini/.gitignore delete mode 100644 vendor/src/github.com/go-ini/ini/LICENSE delete mode 100644 vendor/src/github.com/go-ini/ini/README.md delete mode 100644 vendor/src/github.com/go-ini/ini/README_ZH.md delete mode 100644 vendor/src/github.com/go-ini/ini/ini.go delete mode 100644 vendor/src/github.com/go-ini/ini/struct.go delete mode 100644 vendor/src/github.com/godbus/dbus/CONTRIBUTING.md delete mode 100644 vendor/src/github.com/godbus/dbus/LICENSE delete mode 100644 vendor/src/github.com/godbus/dbus/MAINTAINERS delete mode 100644 vendor/src/github.com/godbus/dbus/README.markdown delete mode 100644 vendor/src/github.com/godbus/dbus/auth.go delete mode 100644 vendor/src/github.com/godbus/dbus/auth_external.go delete mode 100644 vendor/src/github.com/godbus/dbus/auth_sha1.go delete mode 100644 vendor/src/github.com/godbus/dbus/call.go delete mode 100644 vendor/src/github.com/godbus/dbus/conn.go delete mode 100644 vendor/src/github.com/godbus/dbus/conn_darwin.go delete mode 100644 vendor/src/github.com/godbus/dbus/conn_other.go delete mode 100644 vendor/src/github.com/godbus/dbus/dbus.go delete mode 100644 vendor/src/github.com/godbus/dbus/decoder.go delete mode 100644 vendor/src/github.com/godbus/dbus/doc.go delete mode 100644 vendor/src/github.com/godbus/dbus/encoder.go delete mode 100644 vendor/src/github.com/godbus/dbus/export.go delete mode 100644 vendor/src/github.com/godbus/dbus/homedir.go delete mode 100644 vendor/src/github.com/godbus/dbus/homedir_dynamic.go delete mode 100644 vendor/src/github.com/godbus/dbus/homedir_static.go delete mode 100644 vendor/src/github.com/godbus/dbus/message.go delete mode 100644 vendor/src/github.com/godbus/dbus/object.go delete mode 100644 vendor/src/github.com/godbus/dbus/sig.go delete mode 100644 vendor/src/github.com/godbus/dbus/transport_darwin.go delete mode 100644 vendor/src/github.com/godbus/dbus/transport_generic.go delete mode 100644 vendor/src/github.com/godbus/dbus/transport_tcp.go delete mode 100644 vendor/src/github.com/godbus/dbus/transport_unix.go delete mode 100644 vendor/src/github.com/godbus/dbus/transport_unixcred_dragonfly.go delete mode 100644 vendor/src/github.com/godbus/dbus/transport_unixcred_linux.go delete mode 100644 vendor/src/github.com/godbus/dbus/variant.go delete mode 100644 vendor/src/github.com/godbus/dbus/variant_lexer.go delete mode 100644 vendor/src/github.com/godbus/dbus/variant_parser.go delete mode 100644 vendor/src/github.com/gogo/protobuf/LICENSE delete mode 100644 vendor/src/github.com/gogo/protobuf/gogoproto/Makefile delete mode 100644 vendor/src/github.com/gogo/protobuf/gogoproto/doc.go delete mode 100644 vendor/src/github.com/gogo/protobuf/gogoproto/gogo.pb.go delete mode 100644 vendor/src/github.com/gogo/protobuf/gogoproto/gogo.pb.golden delete mode 100644 vendor/src/github.com/gogo/protobuf/gogoproto/gogo.proto delete mode 100644 vendor/src/github.com/gogo/protobuf/gogoproto/helper.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/Makefile delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/clone.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/decode.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/decode_gogo.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/encode.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/encode_gogo.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/equal.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/extensions.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/extensions_gogo.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/lib.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/lib_gogo.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/message_set.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/pointer_unsafe.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/properties.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/properties_gogo.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/skip_gogo.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/text.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/text_gogo.go delete mode 100644 vendor/src/github.com/gogo/protobuf/proto/text_parser.go delete mode 100644 vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile delete mode 100644 vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go delete mode 100644 vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go delete mode 100644 vendor/src/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go delete mode 100644 vendor/src/github.com/gogo/protobuf/sortkeys/sortkeys.go delete mode 100644 vendor/src/github.com/golang/mock/LICENSE delete mode 100644 vendor/src/github.com/golang/mock/gomock/call.go delete mode 100644 vendor/src/github.com/golang/mock/gomock/callset.go delete mode 100644 vendor/src/github.com/golang/mock/gomock/controller.go delete mode 100644 vendor/src/github.com/golang/mock/gomock/matchers.go delete mode 100644 vendor/src/github.com/golang/protobuf/LICENSE delete mode 100644 vendor/src/github.com/golang/protobuf/proto/Makefile delete mode 100644 vendor/src/github.com/golang/protobuf/proto/clone.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/decode.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/encode.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/equal.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/extensions.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/lib.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/message_set.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/pointer_reflect.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/pointer_unsafe.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/properties.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/text.go delete mode 100644 vendor/src/github.com/golang/protobuf/proto/text_parser.go delete mode 100644 vendor/src/github.com/golang/protobuf/ptypes/doc.go delete mode 100644 vendor/src/github.com/golang/protobuf/ptypes/duration.go delete mode 100644 vendor/src/github.com/golang/protobuf/ptypes/duration/duration.pb.go delete mode 100644 vendor/src/github.com/golang/protobuf/ptypes/duration/duration.proto delete mode 100755 vendor/src/github.com/golang/protobuf/ptypes/regen.sh delete mode 100644 vendor/src/github.com/golang/protobuf/ptypes/timestamp.go delete mode 100644 vendor/src/github.com/golang/protobuf/ptypes/timestamp/timestamp.pb.go delete mode 100644 vendor/src/github.com/golang/protobuf/ptypes/timestamp/timestamp.proto delete mode 100644 vendor/src/github.com/google/certificate-transparency/LICENSE delete mode 100644 vendor/src/github.com/google/certificate-transparency/go/README.md delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/asn1/asn1.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/asn1/common.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/asn1/marshal.go delete mode 100644 vendor/src/github.com/google/certificate-transparency/go/client/logclient.go delete mode 100644 vendor/src/github.com/google/certificate-transparency/go/serialization.go delete mode 100644 vendor/src/github.com/google/certificate-transparency/go/signatures.go delete mode 100644 vendor/src/github.com/google/certificate-transparency/go/types.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/cert_pool.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/pem_decrypt.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/pkcs1.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/pkcs8.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/pkix/pkix.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/root.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/root_darwin.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/root_plan9.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/root_stub.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/root_unix.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/root_windows.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/sec1.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/verify.go delete mode 100755 vendor/src/github.com/google/certificate-transparency/go/x509/x509.go delete mode 100644 vendor/src/github.com/gorilla/context/.travis.yml delete mode 100644 vendor/src/github.com/gorilla/context/LICENSE delete mode 100644 vendor/src/github.com/gorilla/context/README.md delete mode 100644 vendor/src/github.com/gorilla/context/context.go delete mode 100644 vendor/src/github.com/gorilla/context/doc.go delete mode 100644 vendor/src/github.com/gorilla/mux/.travis.yml delete mode 100644 vendor/src/github.com/gorilla/mux/LICENSE delete mode 100644 vendor/src/github.com/gorilla/mux/README.md delete mode 100644 vendor/src/github.com/gorilla/mux/doc.go delete mode 100644 vendor/src/github.com/gorilla/mux/mux.go delete mode 100644 vendor/src/github.com/gorilla/mux/regexp.go delete mode 100644 vendor/src/github.com/gorilla/mux/route.go delete mode 100644 vendor/src/github.com/hashicorp/consul/LICENSE delete mode 100644 vendor/src/github.com/hashicorp/consul/api/README.md delete mode 100644 vendor/src/github.com/hashicorp/consul/api/acl.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/agent.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/api.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/catalog.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/event.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/health.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/kv.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/lock.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/raw.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/semaphore.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/session.go delete mode 100644 vendor/src/github.com/hashicorp/consul/api/status.go delete mode 100644 vendor/src/github.com/hashicorp/consul/website/LICENSE.md delete mode 100644 vendor/src/github.com/hashicorp/go-immutable-radix/.gitignore delete mode 100644 vendor/src/github.com/hashicorp/go-immutable-radix/.travis.yml delete mode 100644 vendor/src/github.com/hashicorp/go-immutable-radix/LICENSE delete mode 100644 vendor/src/github.com/hashicorp/go-immutable-radix/README.md delete mode 100644 vendor/src/github.com/hashicorp/go-immutable-radix/edges.go delete mode 100644 vendor/src/github.com/hashicorp/go-immutable-radix/iradix.go delete mode 100644 vendor/src/github.com/hashicorp/go-immutable-radix/iter.go delete mode 100644 vendor/src/github.com/hashicorp/go-immutable-radix/node.go delete mode 100644 vendor/src/github.com/hashicorp/go-memdb/.gitignore delete mode 100644 vendor/src/github.com/hashicorp/go-memdb/LICENSE delete mode 100644 vendor/src/github.com/hashicorp/go-memdb/README.md delete mode 100644 vendor/src/github.com/hashicorp/go-memdb/index.go delete mode 100644 vendor/src/github.com/hashicorp/go-memdb/memdb.go delete mode 100644 vendor/src/github.com/hashicorp/go-memdb/schema.go delete mode 100644 vendor/src/github.com/hashicorp/go-memdb/txn.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/LICENSE delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/0doc.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/README.md delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/binc.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/decode.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/encode.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/helper.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/helper_internal.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/msgpack.go delete mode 100755 vendor/src/github.com/hashicorp/go-msgpack/codec/msgpack_test.py delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/rpc.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/simple.go delete mode 100644 vendor/src/github.com/hashicorp/go-msgpack/codec/time.go delete mode 100644 vendor/src/github.com/hashicorp/go-multierror/LICENSE delete mode 100644 vendor/src/github.com/hashicorp/go-multierror/README.md delete mode 100644 vendor/src/github.com/hashicorp/go-multierror/append.go delete mode 100644 vendor/src/github.com/hashicorp/go-multierror/format.go delete mode 100644 vendor/src/github.com/hashicorp/go-multierror/multierror.go delete mode 100644 vendor/src/github.com/hashicorp/golang-lru/LICENSE delete mode 100644 vendor/src/github.com/hashicorp/golang-lru/simplelru/lru.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/.gitignore delete mode 100644 vendor/src/github.com/hashicorp/memberlist/LICENSE delete mode 100644 vendor/src/github.com/hashicorp/memberlist/Makefile delete mode 100644 vendor/src/github.com/hashicorp/memberlist/README.md delete mode 100644 vendor/src/github.com/hashicorp/memberlist/alive_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/broadcast.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/config.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/conflict_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/delegate.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/event_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/keyring.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/logging.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/memberlist.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/merge_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/net.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/ping_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/queue.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/security.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/state.go delete mode 100644 vendor/src/github.com/hashicorp/memberlist/todo.md delete mode 100644 vendor/src/github.com/hashicorp/memberlist/util.go delete mode 100644 vendor/src/github.com/hashicorp/serf/LICENSE delete mode 100644 vendor/src/github.com/hashicorp/serf/coordinate/client.go delete mode 100644 vendor/src/github.com/hashicorp/serf/coordinate/config.go delete mode 100644 vendor/src/github.com/hashicorp/serf/coordinate/coordinate.go delete mode 100644 vendor/src/github.com/hashicorp/serf/coordinate/phantom.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/broadcast.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/coalesce.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/coalesce_member.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/coalesce_user.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/config.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/conflict_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/delegate.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/event.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/event_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/internal_query.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/keymanager.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/lamport.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/merge_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/messages.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/ping_delegate.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/query.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/serf.go delete mode 100644 vendor/src/github.com/hashicorp/serf/serf/snapshot.go delete mode 100644 vendor/src/github.com/hashicorp/serf/website/source/LICENSE delete mode 100644 vendor/src/github.com/imdario/mergo/.travis.yml delete mode 100644 vendor/src/github.com/imdario/mergo/LICENSE delete mode 100644 vendor/src/github.com/imdario/mergo/README.md delete mode 100644 vendor/src/github.com/imdario/mergo/doc.go delete mode 100644 vendor/src/github.com/imdario/mergo/map.go delete mode 100644 vendor/src/github.com/imdario/mergo/merge.go delete mode 100644 vendor/src/github.com/imdario/mergo/mergo.go delete mode 100644 vendor/src/github.com/inconshreveable/mousetrap/LICENSE delete mode 100644 vendor/src/github.com/inconshreveable/mousetrap/README.md delete mode 100644 vendor/src/github.com/inconshreveable/mousetrap/trap_others.go delete mode 100644 vendor/src/github.com/inconshreveable/mousetrap/trap_windows.go delete mode 100644 vendor/src/github.com/inconshreveable/mousetrap/trap_windows_1.4.go delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/.gitignore delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/.travis.yml delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/LICENSE delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/Makefile delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/README.md delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/api.go delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/astnodetype_string.go delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/functions.go delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/interpreter.go delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/lexer.go delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/parser.go delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/toktype_string.go delete mode 100644 vendor/src/github.com/jmespath/go-jmespath/util.go delete mode 100644 vendor/src/github.com/kr/pty/.gitignore delete mode 100644 vendor/src/github.com/kr/pty/License delete mode 100644 vendor/src/github.com/kr/pty/README.md delete mode 100644 vendor/src/github.com/kr/pty/doc.go delete mode 100644 vendor/src/github.com/kr/pty/ioctl.go delete mode 100644 vendor/src/github.com/kr/pty/ioctl_bsd.go delete mode 100755 vendor/src/github.com/kr/pty/mktypes.bash delete mode 100644 vendor/src/github.com/kr/pty/pty_darwin.go delete mode 100644 vendor/src/github.com/kr/pty/pty_freebsd.go delete mode 100644 vendor/src/github.com/kr/pty/pty_linux.go delete mode 100644 vendor/src/github.com/kr/pty/pty_unsupported.go delete mode 100644 vendor/src/github.com/kr/pty/run.go delete mode 100644 vendor/src/github.com/kr/pty/types.go delete mode 100644 vendor/src/github.com/kr/pty/types_freebsd.go delete mode 100644 vendor/src/github.com/kr/pty/util.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_386.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_amd64.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_arm.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_arm64.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_freebsd_386.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_freebsd_amd64.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_freebsd_arm.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_ppc64.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_ppc64le.go delete mode 100644 vendor/src/github.com/kr/pty/ztypes_s390x.go delete mode 100644 vendor/src/github.com/mattn/go-shellwords/.travis.yml delete mode 100644 vendor/src/github.com/mattn/go-shellwords/README.md delete mode 100644 vendor/src/github.com/mattn/go-shellwords/shellwords.go delete mode 100644 vendor/src/github.com/mattn/go-shellwords/util_posix.go delete mode 100644 vendor/src/github.com/mattn/go-shellwords/util_windows.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/.gitignore delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/.travis.yml delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/LICENSE delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/README.md delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/backup.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/code/sqlite3-binding.c delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/code/sqlite3-binding.h delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/code/sqlite3ext.h delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/doc.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/error.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3-binding.c delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3-binding.h delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3_icu.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3_libsqlite3.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3_load_extension.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3_omit_load_extension.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3_other.go delete mode 100644 vendor/src/github.com/mattn/go-sqlite3/sqlite3_windows.go delete mode 100644 vendor/src/github.com/matttproud/golang_protobuf_extensions/LICENSE delete mode 100644 vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go delete mode 100644 vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go delete mode 100644 vendor/src/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go delete mode 100644 vendor/src/github.com/miekg/dns/.gitignore delete mode 100644 vendor/src/github.com/miekg/dns/.travis.yml delete mode 100644 vendor/src/github.com/miekg/dns/AUTHORS delete mode 100644 vendor/src/github.com/miekg/dns/CONTRIBUTORS delete mode 100644 vendor/src/github.com/miekg/dns/COPYRIGHT delete mode 100644 vendor/src/github.com/miekg/dns/LICENSE delete mode 100644 vendor/src/github.com/miekg/dns/README.md delete mode 100644 vendor/src/github.com/miekg/dns/client.go delete mode 100644 vendor/src/github.com/miekg/dns/clientconfig.go delete mode 100644 vendor/src/github.com/miekg/dns/defaults.go delete mode 100644 vendor/src/github.com/miekg/dns/dns.go delete mode 100644 vendor/src/github.com/miekg/dns/dnssec.go delete mode 100644 vendor/src/github.com/miekg/dns/dnssec_keygen.go delete mode 100644 vendor/src/github.com/miekg/dns/dnssec_keyscan.go delete mode 100644 vendor/src/github.com/miekg/dns/dnssec_privkey.go delete mode 100644 vendor/src/github.com/miekg/dns/doc.go delete mode 100644 vendor/src/github.com/miekg/dns/edns.go delete mode 100644 vendor/src/github.com/miekg/dns/format.go delete mode 100644 vendor/src/github.com/miekg/dns/labels.go delete mode 100644 vendor/src/github.com/miekg/dns/msg.go delete mode 100644 vendor/src/github.com/miekg/dns/nsecx.go delete mode 100644 vendor/src/github.com/miekg/dns/privaterr.go delete mode 100644 vendor/src/github.com/miekg/dns/rawmsg.go delete mode 100644 vendor/src/github.com/miekg/dns/sanitize.go delete mode 100644 vendor/src/github.com/miekg/dns/scanner.go delete mode 100644 vendor/src/github.com/miekg/dns/server.go delete mode 100644 vendor/src/github.com/miekg/dns/sig0.go delete mode 100644 vendor/src/github.com/miekg/dns/singleinflight.go delete mode 100644 vendor/src/github.com/miekg/dns/tlsa.go delete mode 100644 vendor/src/github.com/miekg/dns/tsig.go delete mode 100644 vendor/src/github.com/miekg/dns/types.go delete mode 100644 vendor/src/github.com/miekg/dns/types_generate.go delete mode 100644 vendor/src/github.com/miekg/dns/udp.go delete mode 100644 vendor/src/github.com/miekg/dns/udp_linux.go delete mode 100644 vendor/src/github.com/miekg/dns/udp_other.go delete mode 100644 vendor/src/github.com/miekg/dns/udp_windows.go delete mode 100644 vendor/src/github.com/miekg/dns/update.go delete mode 100644 vendor/src/github.com/miekg/dns/xfr.go delete mode 100644 vendor/src/github.com/miekg/dns/zgenerate.go delete mode 100644 vendor/src/github.com/miekg/dns/zscan.go delete mode 100644 vendor/src/github.com/miekg/dns/zscan_rr.go delete mode 100644 vendor/src/github.com/miekg/dns/ztypes.go delete mode 100644 vendor/src/github.com/miekg/pkcs11/.gitignore delete mode 100644 vendor/src/github.com/miekg/pkcs11/.travis.yml delete mode 100644 vendor/src/github.com/miekg/pkcs11/LICENSE delete mode 100644 vendor/src/github.com/miekg/pkcs11/README.md delete mode 100644 vendor/src/github.com/miekg/pkcs11/const.go delete mode 100644 vendor/src/github.com/miekg/pkcs11/error.go delete mode 100644 vendor/src/github.com/miekg/pkcs11/hsm.db delete mode 100644 vendor/src/github.com/miekg/pkcs11/pkcs11.go delete mode 100644 vendor/src/github.com/miekg/pkcs11/pkcs11.h delete mode 100644 vendor/src/github.com/miekg/pkcs11/pkcs11f.h delete mode 100644 vendor/src/github.com/miekg/pkcs11/pkcs11t.h delete mode 100644 vendor/src/github.com/miekg/pkcs11/softhsm.conf delete mode 100644 vendor/src/github.com/miekg/pkcs11/types.go delete mode 100644 vendor/src/github.com/mistifyio/go-zfs/.gitignore delete mode 100644 vendor/src/github.com/mistifyio/go-zfs/.travis.yml delete mode 100644 vendor/src/github.com/mistifyio/go-zfs/CONTRIBUTING.md delete mode 100644 vendor/src/github.com/mistifyio/go-zfs/LICENSE delete mode 100644 vendor/src/github.com/mistifyio/go-zfs/README.md delete mode 100644 vendor/src/github.com/mistifyio/go-zfs/error.go delete mode 100644 vendor/src/github.com/mistifyio/go-zfs/utils.go delete mode 100644 vendor/src/github.com/mistifyio/go-zfs/utils_notsolaris.go delete mode 100644 vendor/src/github.com/mistifyio/go-zfs/utils_solaris.go delete mode 100644 vendor/src/github.com/mistifyio/go-zfs/zfs.go delete mode 100644 vendor/src/github.com/mistifyio/go-zfs/zpool.go delete mode 100644 vendor/src/github.com/mreiferson/go-httpclient/.gitignore delete mode 100644 vendor/src/github.com/mreiferson/go-httpclient/.travis.yml delete mode 100644 vendor/src/github.com/mreiferson/go-httpclient/LICENSE delete mode 100644 vendor/src/github.com/mreiferson/go-httpclient/README.md delete mode 100644 vendor/src/github.com/mreiferson/go-httpclient/httpclient.go delete mode 100644 vendor/src/github.com/opencontainers/runc/LICENSE delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/apparmor/apparmor.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/apparmor/apparmor_disabled.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/cgroups.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/cgroups_unsupported.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/stats.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/cgroups/utils.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/blkio_device.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/cgroup_unix.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/cgroup_unsupported.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/cgroup_windows.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/config.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/config_unix.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/device.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/device_defaults.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/hugepage_limit.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/interface_priority_map.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/mount.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/namespaces.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/namespaces_syscall_unsupported.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/namespaces_unix.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/namespaces_unsupported.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/configs/network.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/devices/devices_unix.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/devices/devices_unsupported.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/devices/number.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/label/label.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/label/label_selinux.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/selinux/selinux.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/system/linux.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/system/proc.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/system/setns_linux.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/system/syscall_linux_386.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/system/syscall_linux_64.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/system/syscall_linux_arm.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/system/sysconfig.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/system/sysconfig_notcgo.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/system/unsupported.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/system/xattrs_linux.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/user/MAINTAINERS delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/user/lookup.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/user/lookup_unix.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/user/lookup_unsupported.go delete mode 100644 vendor/src/github.com/opencontainers/runc/libcontainer/user/user.go delete mode 100644 vendor/src/github.com/opencontainers/specs/LICENSE delete mode 100644 vendor/src/github.com/opencontainers/specs/specs-go/config.go delete mode 100644 vendor/src/github.com/opencontainers/specs/specs-go/state.go delete mode 100644 vendor/src/github.com/opencontainers/specs/specs-go/version.go delete mode 100644 vendor/src/github.com/pborman/uuid/.travis.yml delete mode 100644 vendor/src/github.com/pborman/uuid/CONTRIBUTORS delete mode 100644 vendor/src/github.com/pborman/uuid/LICENSE delete mode 100644 vendor/src/github.com/pborman/uuid/README.md delete mode 100755 vendor/src/github.com/pborman/uuid/dce.go delete mode 100755 vendor/src/github.com/pborman/uuid/doc.go delete mode 100644 vendor/src/github.com/pborman/uuid/hash.go delete mode 100644 vendor/src/github.com/pborman/uuid/json.go delete mode 100755 vendor/src/github.com/pborman/uuid/node.go delete mode 100644 vendor/src/github.com/pborman/uuid/sql.go delete mode 100755 vendor/src/github.com/pborman/uuid/time.go delete mode 100644 vendor/src/github.com/pborman/uuid/util.go delete mode 100644 vendor/src/github.com/pborman/uuid/uuid.go delete mode 100644 vendor/src/github.com/pborman/uuid/version1.go delete mode 100644 vendor/src/github.com/pborman/uuid/version4.go delete mode 100644 vendor/src/github.com/philhofer/fwd/README.md delete mode 100644 vendor/src/github.com/philhofer/fwd/reader.go delete mode 100644 vendor/src/github.com/philhofer/fwd/writer.go delete mode 100644 vendor/src/github.com/philhofer/fwd/writer_appengine.go delete mode 100644 vendor/src/github.com/philhofer/fwd/writer_unsafe.go delete mode 100644 vendor/src/github.com/pivotal-golang/clock/LICENSE delete mode 100644 vendor/src/github.com/pivotal-golang/clock/README.md delete mode 100644 vendor/src/github.com/pivotal-golang/clock/clock.go delete mode 100644 vendor/src/github.com/pivotal-golang/clock/ticker.go delete mode 100644 vendor/src/github.com/pivotal-golang/clock/timer.go delete mode 100644 vendor/src/github.com/pkg/errors/.gitignore delete mode 100644 vendor/src/github.com/pkg/errors/.travis.yml delete mode 100644 vendor/src/github.com/pkg/errors/LICENSE delete mode 100644 vendor/src/github.com/pkg/errors/README.md delete mode 100644 vendor/src/github.com/pkg/errors/appveyor.yml delete mode 100644 vendor/src/github.com/pkg/errors/errors.go delete mode 100644 vendor/src/github.com/pkg/errors/stack.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/LICENSE delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/.gitignore delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/README.md delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/collector.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/counter.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/desc.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/doc.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/expvar.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/gauge.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/go_collector.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/histogram.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/http.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/metric.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/process_collector.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/push.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/registry.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/summary.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/untyped.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/value.go delete mode 100644 vendor/src/github.com/prometheus/client_golang/prometheus/vec.go delete mode 100644 vendor/src/github.com/prometheus/client_model/LICENSE delete mode 100644 vendor/src/github.com/prometheus/client_model/go/metrics.pb.go delete mode 100644 vendor/src/github.com/prometheus/client_model/ruby/LICENSE delete mode 100644 vendor/src/github.com/prometheus/common/LICENSE delete mode 100644 vendor/src/github.com/prometheus/common/expfmt/decode.go delete mode 100644 vendor/src/github.com/prometheus/common/expfmt/encode.go delete mode 100644 vendor/src/github.com/prometheus/common/expfmt/expfmt.go delete mode 100644 vendor/src/github.com/prometheus/common/expfmt/fuzz.go delete mode 100644 vendor/src/github.com/prometheus/common/expfmt/json_decode.go delete mode 100644 vendor/src/github.com/prometheus/common/expfmt/text_create.go delete mode 100644 vendor/src/github.com/prometheus/common/expfmt/text_parse.go delete mode 100644 vendor/src/github.com/prometheus/common/model/alert.go delete mode 100644 vendor/src/github.com/prometheus/common/model/fingerprinting.go delete mode 100644 vendor/src/github.com/prometheus/common/model/labels.go delete mode 100644 vendor/src/github.com/prometheus/common/model/labelset.go delete mode 100644 vendor/src/github.com/prometheus/common/model/metric.go delete mode 100644 vendor/src/github.com/prometheus/common/model/model.go delete mode 100644 vendor/src/github.com/prometheus/common/model/signature.go delete mode 100644 vendor/src/github.com/prometheus/common/model/silence.go delete mode 100644 vendor/src/github.com/prometheus/common/model/time.go delete mode 100644 vendor/src/github.com/prometheus/common/model/value.go delete mode 100644 vendor/src/github.com/prometheus/procfs/.travis.yml delete mode 100644 vendor/src/github.com/prometheus/procfs/AUTHORS.md delete mode 100644 vendor/src/github.com/prometheus/procfs/CONTRIBUTING.md delete mode 100644 vendor/src/github.com/prometheus/procfs/LICENSE delete mode 100644 vendor/src/github.com/prometheus/procfs/Makefile delete mode 100644 vendor/src/github.com/prometheus/procfs/NOTICE delete mode 100644 vendor/src/github.com/prometheus/procfs/README.md delete mode 100644 vendor/src/github.com/prometheus/procfs/doc.go delete mode 100644 vendor/src/github.com/prometheus/procfs/fs.go delete mode 100644 vendor/src/github.com/prometheus/procfs/ipvs.go delete mode 100644 vendor/src/github.com/prometheus/procfs/mdstat.go delete mode 100644 vendor/src/github.com/prometheus/procfs/proc.go delete mode 100644 vendor/src/github.com/prometheus/procfs/proc_io.go delete mode 100644 vendor/src/github.com/prometheus/procfs/proc_limits.go delete mode 100644 vendor/src/github.com/prometheus/procfs/proc_stat.go delete mode 100644 vendor/src/github.com/prometheus/procfs/stat.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/LICENSE delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/conn.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/constants.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/flw.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/lock.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/server_help.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/server_java.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/structs.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/tracer.go delete mode 100644 vendor/src/github.com/samuel/go-zookeeper/zk/util.go delete mode 100644 vendor/src/github.com/seccomp/libseccomp-golang/LICENSE delete mode 100644 vendor/src/github.com/seccomp/libseccomp-golang/README delete mode 100644 vendor/src/github.com/seccomp/libseccomp-golang/seccomp.go delete mode 100644 vendor/src/github.com/seccomp/libseccomp-golang/seccomp_internal.go delete mode 100644 vendor/src/github.com/spf13/cobra/.gitignore delete mode 100644 vendor/src/github.com/spf13/cobra/.mailmap delete mode 100644 vendor/src/github.com/spf13/cobra/.travis.yml delete mode 100644 vendor/src/github.com/spf13/cobra/LICENSE.txt delete mode 100644 vendor/src/github.com/spf13/cobra/README.md delete mode 100644 vendor/src/github.com/spf13/cobra/args.go delete mode 100644 vendor/src/github.com/spf13/cobra/bash_completions.go delete mode 100644 vendor/src/github.com/spf13/cobra/bash_completions.md delete mode 100644 vendor/src/github.com/spf13/cobra/cobra.go delete mode 100644 vendor/src/github.com/spf13/cobra/command.go delete mode 100644 vendor/src/github.com/spf13/cobra/command_notwin.go delete mode 100644 vendor/src/github.com/spf13/cobra/command_win.go delete mode 100644 vendor/src/github.com/spf13/pflag/.travis.yml delete mode 100644 vendor/src/github.com/spf13/pflag/LICENSE delete mode 100644 vendor/src/github.com/spf13/pflag/README.md delete mode 100644 vendor/src/github.com/spf13/pflag/bool.go delete mode 100644 vendor/src/github.com/spf13/pflag/count.go delete mode 100644 vendor/src/github.com/spf13/pflag/duration.go delete mode 100644 vendor/src/github.com/spf13/pflag/flag.go delete mode 100644 vendor/src/github.com/spf13/pflag/float32.go delete mode 100644 vendor/src/github.com/spf13/pflag/float64.go delete mode 100644 vendor/src/github.com/spf13/pflag/golangflag.go delete mode 100644 vendor/src/github.com/spf13/pflag/int.go delete mode 100644 vendor/src/github.com/spf13/pflag/int32.go delete mode 100644 vendor/src/github.com/spf13/pflag/int64.go delete mode 100644 vendor/src/github.com/spf13/pflag/int8.go delete mode 100644 vendor/src/github.com/spf13/pflag/int_slice.go delete mode 100644 vendor/src/github.com/spf13/pflag/ip.go delete mode 100644 vendor/src/github.com/spf13/pflag/ipmask.go delete mode 100644 vendor/src/github.com/spf13/pflag/ipnet.go delete mode 100644 vendor/src/github.com/spf13/pflag/string.go delete mode 100644 vendor/src/github.com/spf13/pflag/string_slice.go delete mode 100644 vendor/src/github.com/spf13/pflag/uint.go delete mode 100644 vendor/src/github.com/spf13/pflag/uint16.go delete mode 100644 vendor/src/github.com/spf13/pflag/uint32.go delete mode 100644 vendor/src/github.com/spf13/pflag/uint64.go delete mode 100644 vendor/src/github.com/spf13/pflag/uint8.go delete mode 100644 vendor/src/github.com/syndtr/gocapability/LICENSE delete mode 100644 vendor/src/github.com/syndtr/gocapability/capability/capability.go delete mode 100644 vendor/src/github.com/syndtr/gocapability/capability/capability_linux.go delete mode 100644 vendor/src/github.com/syndtr/gocapability/capability/capability_noop.go delete mode 100644 vendor/src/github.com/syndtr/gocapability/capability/enum.go delete mode 100644 vendor/src/github.com/syndtr/gocapability/capability/enum_gen.go delete mode 100644 vendor/src/github.com/syndtr/gocapability/capability/syscall_linux.go delete mode 100644 vendor/src/github.com/tchap/go-patricia/LICENSE delete mode 100644 vendor/src/github.com/tchap/go-patricia/patricia/children.go delete mode 100644 vendor/src/github.com/tchap/go-patricia/patricia/patricia.go delete mode 100644 vendor/src/github.com/tinylib/msgp/LICENSE delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/circular.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/defs.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/edit.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/elsize.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/errors.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/extension.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/integers.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/json.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/json_bytes.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/number.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/number_appengine.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/number_unsafe.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/read.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/read_bytes.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/size.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/write.go delete mode 100644 vendor/src/github.com/tinylib/msgp/msgp/write_bytes.go delete mode 100644 vendor/src/github.com/ugorji/go/LICENSE delete mode 100644 vendor/src/github.com/ugorji/go/codec/0doc.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/README.md delete mode 100644 vendor/src/github.com/ugorji/go/codec/binc.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/cbor.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/decode.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/encode.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/fast-path.generated.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/fast-path.go.tmpl delete mode 100644 vendor/src/github.com/ugorji/go/codec/fast-path.not.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/gen-dec-array.go.tmpl delete mode 100644 vendor/src/github.com/ugorji/go/codec/gen-dec-map.go.tmpl delete mode 100644 vendor/src/github.com/ugorji/go/codec/gen-helper.generated.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/gen-helper.go.tmpl delete mode 100644 vendor/src/github.com/ugorji/go/codec/gen.generated.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/gen.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/helper.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/helper_internal.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/helper_not_unsafe.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/helper_unsafe.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/json.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/msgpack.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/noop.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/prebuild.go delete mode 100755 vendor/src/github.com/ugorji/go/codec/prebuild.sh delete mode 100644 vendor/src/github.com/ugorji/go/codec/rpc.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/simple.go delete mode 100644 vendor/src/github.com/ugorji/go/codec/test-cbor-goldens.json delete mode 100755 vendor/src/github.com/ugorji/go/codec/test.py delete mode 100755 vendor/src/github.com/ugorji/go/codec/tests.sh delete mode 100644 vendor/src/github.com/ugorji/go/codec/time.go delete mode 100644 vendor/src/github.com/vbatts/tar-split/LICENSE delete mode 100644 vendor/src/github.com/vbatts/tar-split/archive/tar/common.go delete mode 100644 vendor/src/github.com/vbatts/tar-split/archive/tar/reader.go delete mode 100644 vendor/src/github.com/vbatts/tar-split/archive/tar/stat_atim.go delete mode 100644 vendor/src/github.com/vbatts/tar-split/archive/tar/stat_atimespec.go delete mode 100644 vendor/src/github.com/vbatts/tar-split/archive/tar/stat_unix.go delete mode 100644 vendor/src/github.com/vbatts/tar-split/archive/tar/writer.go delete mode 100644 vendor/src/github.com/vbatts/tar-split/tar/asm/README.md delete mode 100644 vendor/src/github.com/vbatts/tar-split/tar/asm/assemble.go delete mode 100644 vendor/src/github.com/vbatts/tar-split/tar/asm/disassemble.go delete mode 100644 vendor/src/github.com/vbatts/tar-split/tar/asm/doc.go delete mode 100644 vendor/src/github.com/vbatts/tar-split/tar/storage/doc.go delete mode 100644 vendor/src/github.com/vbatts/tar-split/tar/storage/entry.go delete mode 100644 vendor/src/github.com/vbatts/tar-split/tar/storage/getter.go delete mode 100644 vendor/src/github.com/vbatts/tar-split/tar/storage/packer.go delete mode 100644 vendor/src/github.com/vdemeester/shakers/.gitignore delete mode 100644 vendor/src/github.com/vdemeester/shakers/Dockerfile delete mode 100644 vendor/src/github.com/vdemeester/shakers/LICENSE delete mode 100644 vendor/src/github.com/vdemeester/shakers/Makefile delete mode 100644 vendor/src/github.com/vdemeester/shakers/README.md delete mode 100644 vendor/src/github.com/vdemeester/shakers/bool.go delete mode 100644 vendor/src/github.com/vdemeester/shakers/circle.yml delete mode 100644 vendor/src/github.com/vdemeester/shakers/common.go delete mode 100644 vendor/src/github.com/vdemeester/shakers/glide.yaml delete mode 100644 vendor/src/github.com/vdemeester/shakers/string.go delete mode 100644 vendor/src/github.com/vdemeester/shakers/time.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/.travis.yml delete mode 100644 vendor/src/github.com/vishvananda/netlink/LICENSE delete mode 100644 vendor/src/github.com/vishvananda/netlink/Makefile delete mode 100644 vendor/src/github.com/vishvananda/netlink/README.md delete mode 100644 vendor/src/github.com/vishvananda/netlink/addr.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/addr_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/bpf_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/class.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/class_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/filter.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/filter_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/handle_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/link.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/link_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/link_tuntap_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/neigh.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/neigh_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/netlink.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/netlink_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/netlink_unspecified.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/addr_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/link_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/nl_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/route_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/syscall.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/tc_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/xfrm_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/xfrm_policy_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/nl/xfrm_state_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/protinfo.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/protinfo_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/qdisc.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/qdisc_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/route.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/route_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/route_unspecified.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/rule.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/rule_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/xfrm.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/xfrm_policy.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/xfrm_policy_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/xfrm_state.go delete mode 100644 vendor/src/github.com/vishvananda/netlink/xfrm_state_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netns/LICENSE delete mode 100644 vendor/src/github.com/vishvananda/netns/README.md delete mode 100644 vendor/src/github.com/vishvananda/netns/netns.go delete mode 100644 vendor/src/github.com/vishvananda/netns/netns_linux.go delete mode 100644 vendor/src/github.com/vishvananda/netns/netns_linux_386.go delete mode 100644 vendor/src/github.com/vishvananda/netns/netns_linux_amd64.go delete mode 100644 vendor/src/github.com/vishvananda/netns/netns_linux_arm.go delete mode 100644 vendor/src/github.com/vishvananda/netns/netns_linux_arm64.go delete mode 100644 vendor/src/github.com/vishvananda/netns/netns_linux_ppc64le.go delete mode 100644 vendor/src/github.com/vishvananda/netns/netns_linux_s390x.go delete mode 100644 vendor/src/github.com/vishvananda/netns/netns_unspecified.go delete mode 100644 vendor/src/golang.org/x/crypto/LICENSE delete mode 100644 vendor/src/golang.org/x/crypto/pkcs12/bmp-string.go delete mode 100644 vendor/src/golang.org/x/crypto/pkcs12/crypto.go delete mode 100644 vendor/src/golang.org/x/crypto/pkcs12/errors.go delete mode 100644 vendor/src/golang.org/x/crypto/pkcs12/internal/rc2/rc2.go delete mode 100644 vendor/src/golang.org/x/crypto/pkcs12/mac.go delete mode 100644 vendor/src/golang.org/x/crypto/pkcs12/pbkdf.go delete mode 100644 vendor/src/golang.org/x/crypto/pkcs12/pkcs12.go delete mode 100644 vendor/src/golang.org/x/crypto/pkcs12/safebags.go delete mode 100644 vendor/src/golang.org/x/net/LICENSE delete mode 100644 vendor/src/golang.org/x/net/context/context.go delete mode 100644 vendor/src/golang.org/x/net/context/ctxhttp/cancelreq.go delete mode 100644 vendor/src/golang.org/x/net/context/ctxhttp/cancelreq_go14.go delete mode 100644 vendor/src/golang.org/x/net/context/ctxhttp/ctxhttp.go delete mode 100644 vendor/src/golang.org/x/net/http2/.gitignore delete mode 100644 vendor/src/golang.org/x/net/http2/Dockerfile delete mode 100644 vendor/src/golang.org/x/net/http2/Makefile delete mode 100644 vendor/src/golang.org/x/net/http2/README delete mode 100644 vendor/src/golang.org/x/net/http2/client_conn_pool.go delete mode 100644 vendor/src/golang.org/x/net/http2/configure_transport.go delete mode 100644 vendor/src/golang.org/x/net/http2/errors.go delete mode 100644 vendor/src/golang.org/x/net/http2/fixed_buffer.go delete mode 100644 vendor/src/golang.org/x/net/http2/flow.go delete mode 100644 vendor/src/golang.org/x/net/http2/frame.go delete mode 100644 vendor/src/golang.org/x/net/http2/go15.go delete mode 100644 vendor/src/golang.org/x/net/http2/gotrack.go delete mode 100644 vendor/src/golang.org/x/net/http2/headermap.go delete mode 100644 vendor/src/golang.org/x/net/http2/hpack/encode.go delete mode 100644 vendor/src/golang.org/x/net/http2/hpack/hpack.go delete mode 100644 vendor/src/golang.org/x/net/http2/hpack/huffman.go delete mode 100644 vendor/src/golang.org/x/net/http2/hpack/tables.go delete mode 100644 vendor/src/golang.org/x/net/http2/http2.go delete mode 100644 vendor/src/golang.org/x/net/http2/not_go15.go delete mode 100644 vendor/src/golang.org/x/net/http2/not_go16.go delete mode 100644 vendor/src/golang.org/x/net/http2/pipe.go delete mode 100644 vendor/src/golang.org/x/net/http2/server.go delete mode 100644 vendor/src/golang.org/x/net/http2/transport.go delete mode 100644 vendor/src/golang.org/x/net/http2/write.go delete mode 100644 vendor/src/golang.org/x/net/http2/writesched.go delete mode 100644 vendor/src/golang.org/x/net/internal/timeseries/timeseries.go delete mode 100644 vendor/src/golang.org/x/net/proxy/direct.go delete mode 100644 vendor/src/golang.org/x/net/proxy/per_host.go delete mode 100644 vendor/src/golang.org/x/net/proxy/proxy.go delete mode 100644 vendor/src/golang.org/x/net/proxy/socks5.go delete mode 100644 vendor/src/golang.org/x/net/trace/events.go delete mode 100644 vendor/src/golang.org/x/net/trace/histogram.go delete mode 100644 vendor/src/golang.org/x/net/trace/trace.go delete mode 100644 vendor/src/golang.org/x/net/websocket/client.go delete mode 100644 vendor/src/golang.org/x/net/websocket/hybi.go delete mode 100644 vendor/src/golang.org/x/net/websocket/server.go delete mode 100644 vendor/src/golang.org/x/net/websocket/websocket.go delete mode 100644 vendor/src/golang.org/x/oauth2/.travis.yml delete mode 100644 vendor/src/golang.org/x/oauth2/AUTHORS delete mode 100644 vendor/src/golang.org/x/oauth2/CONTRIBUTING.md delete mode 100644 vendor/src/golang.org/x/oauth2/CONTRIBUTORS delete mode 100644 vendor/src/golang.org/x/oauth2/LICENSE delete mode 100644 vendor/src/golang.org/x/oauth2/README.md delete mode 100644 vendor/src/golang.org/x/oauth2/client_appengine.go delete mode 100644 vendor/src/golang.org/x/oauth2/google/appengine.go delete mode 100644 vendor/src/golang.org/x/oauth2/google/appengine_hook.go delete mode 100644 vendor/src/golang.org/x/oauth2/google/appenginevm_hook.go delete mode 100644 vendor/src/golang.org/x/oauth2/google/default.go delete mode 100644 vendor/src/golang.org/x/oauth2/google/google.go delete mode 100644 vendor/src/golang.org/x/oauth2/google/jwt.go delete mode 100644 vendor/src/golang.org/x/oauth2/google/sdk.go delete mode 100644 vendor/src/golang.org/x/oauth2/internal/oauth2.go delete mode 100644 vendor/src/golang.org/x/oauth2/internal/token.go delete mode 100644 vendor/src/golang.org/x/oauth2/internal/transport.go delete mode 100644 vendor/src/golang.org/x/oauth2/jws/jws.go delete mode 100644 vendor/src/golang.org/x/oauth2/jwt/jwt.go delete mode 100644 vendor/src/golang.org/x/oauth2/oauth2.go delete mode 100644 vendor/src/golang.org/x/oauth2/token.go delete mode 100644 vendor/src/golang.org/x/oauth2/transport.go delete mode 100644 vendor/src/golang.org/x/sys/LICENSE delete mode 100644 vendor/src/golang.org/x/sys/windows/asm.s delete mode 100644 vendor/src/golang.org/x/sys/windows/asm_windows_386.s delete mode 100644 vendor/src/golang.org/x/sys/windows/asm_windows_amd64.s delete mode 100644 vendor/src/golang.org/x/sys/windows/dll_windows.go delete mode 100644 vendor/src/golang.org/x/sys/windows/env_unset.go delete mode 100644 vendor/src/golang.org/x/sys/windows/env_windows.go delete mode 100644 vendor/src/golang.org/x/sys/windows/eventlog.go delete mode 100644 vendor/src/golang.org/x/sys/windows/exec_windows.go delete mode 100644 vendor/src/golang.org/x/sys/windows/race.go delete mode 100644 vendor/src/golang.org/x/sys/windows/race0.go delete mode 100644 vendor/src/golang.org/x/sys/windows/registry/key.go delete mode 100644 vendor/src/golang.org/x/sys/windows/registry/syscall.go delete mode 100644 vendor/src/golang.org/x/sys/windows/registry/value.go delete mode 100644 vendor/src/golang.org/x/sys/windows/registry/zsyscall_windows.go delete mode 100644 vendor/src/golang.org/x/sys/windows/security_windows.go delete mode 100644 vendor/src/golang.org/x/sys/windows/service.go delete mode 100644 vendor/src/golang.org/x/sys/windows/str.go delete mode 100644 vendor/src/golang.org/x/sys/windows/svc/debug/log.go delete mode 100644 vendor/src/golang.org/x/sys/windows/svc/debug/service.go delete mode 100644 vendor/src/golang.org/x/sys/windows/svc/event.go delete mode 100644 vendor/src/golang.org/x/sys/windows/svc/eventlog/install.go delete mode 100644 vendor/src/golang.org/x/sys/windows/svc/eventlog/log.go delete mode 100644 vendor/src/golang.org/x/sys/windows/svc/go12.c delete mode 100644 vendor/src/golang.org/x/sys/windows/svc/go12.go delete mode 100644 vendor/src/golang.org/x/sys/windows/svc/go13.go delete mode 100644 vendor/src/golang.org/x/sys/windows/svc/mgr/config.go delete mode 100644 vendor/src/golang.org/x/sys/windows/svc/mgr/mgr.go delete mode 100644 vendor/src/golang.org/x/sys/windows/svc/mgr/service.go delete mode 100644 vendor/src/golang.org/x/sys/windows/svc/security.go delete mode 100644 vendor/src/golang.org/x/sys/windows/svc/service.go delete mode 100644 vendor/src/golang.org/x/sys/windows/svc/sys_386.s delete mode 100644 vendor/src/golang.org/x/sys/windows/svc/sys_amd64.s delete mode 100644 vendor/src/golang.org/x/sys/windows/syscall.go delete mode 100644 vendor/src/golang.org/x/sys/windows/syscall_windows.go delete mode 100644 vendor/src/golang.org/x/sys/windows/zsyscall_windows.go delete mode 100644 vendor/src/golang.org/x/sys/windows/ztypes_windows.go delete mode 100644 vendor/src/golang.org/x/sys/windows/ztypes_windows_386.go delete mode 100644 vendor/src/golang.org/x/sys/windows/ztypes_windows_amd64.go delete mode 100644 vendor/src/google.golang.org/api/LICENSE delete mode 100644 vendor/src/google.golang.org/api/gensupport/json.go delete mode 100644 vendor/src/google.golang.org/api/gensupport/params.go delete mode 100644 vendor/src/google.golang.org/api/googleapi/googleapi.go delete mode 100644 vendor/src/google.golang.org/api/googleapi/internal/uritemplates/LICENSE delete mode 100644 vendor/src/google.golang.org/api/googleapi/internal/uritemplates/uritemplates.go delete mode 100644 vendor/src/google.golang.org/api/googleapi/internal/uritemplates/utils.go delete mode 100644 vendor/src/google.golang.org/api/googleapi/types.go delete mode 100644 vendor/src/google.golang.org/api/logging/v1beta3/logging-api.json delete mode 100644 vendor/src/google.golang.org/api/logging/v1beta3/logging-gen.go delete mode 100644 vendor/src/google.golang.org/cloud/.travis.yml delete mode 100644 vendor/src/google.golang.org/cloud/AUTHORS delete mode 100644 vendor/src/google.golang.org/cloud/CONTRIBUTING.md delete mode 100644 vendor/src/google.golang.org/cloud/CONTRIBUTORS delete mode 100644 vendor/src/google.golang.org/cloud/LICENSE delete mode 100644 vendor/src/google.golang.org/cloud/README.md delete mode 100644 vendor/src/google.golang.org/cloud/cloud.go delete mode 100644 vendor/src/google.golang.org/cloud/compute/metadata/metadata.go delete mode 100644 vendor/src/google.golang.org/cloud/internal/cloud.go delete mode 100644 vendor/src/google.golang.org/cloud/internal/opts/option.go delete mode 100644 vendor/src/google.golang.org/cloud/internal/transport/cancelreq.go delete mode 100644 vendor/src/google.golang.org/cloud/internal/transport/cancelreq_legacy.go delete mode 100644 vendor/src/google.golang.org/cloud/internal/transport/dial.go delete mode 100644 vendor/src/google.golang.org/cloud/internal/transport/proto.go delete mode 100644 vendor/src/google.golang.org/cloud/key.json.enc delete mode 100644 vendor/src/google.golang.org/cloud/logging/logging.go delete mode 100644 vendor/src/google.golang.org/cloud/option.go delete mode 100644 vendor/src/google.golang.org/grpc/.travis.yml delete mode 100644 vendor/src/google.golang.org/grpc/CONTRIBUTING.md delete mode 100644 vendor/src/google.golang.org/grpc/LICENSE delete mode 100644 vendor/src/google.golang.org/grpc/Makefile delete mode 100644 vendor/src/google.golang.org/grpc/PATENTS delete mode 100644 vendor/src/google.golang.org/grpc/README.md delete mode 100644 vendor/src/google.golang.org/grpc/backoff.go delete mode 100644 vendor/src/google.golang.org/grpc/call.go delete mode 100644 vendor/src/google.golang.org/grpc/clientconn.go delete mode 100755 vendor/src/google.golang.org/grpc/codegen.sh delete mode 100644 vendor/src/google.golang.org/grpc/codes/code_string.go delete mode 100644 vendor/src/google.golang.org/grpc/codes/codes.go delete mode 100755 vendor/src/google.golang.org/grpc/coverage.sh delete mode 100644 vendor/src/google.golang.org/grpc/credentials/credentials.go delete mode 100644 vendor/src/google.golang.org/grpc/credentials/oauth/oauth.go delete mode 100644 vendor/src/google.golang.org/grpc/doc.go delete mode 100644 vendor/src/google.golang.org/grpc/grpclog/logger.go delete mode 100644 vendor/src/google.golang.org/grpc/interceptor.go delete mode 100644 vendor/src/google.golang.org/grpc/internal/internal.go delete mode 100644 vendor/src/google.golang.org/grpc/metadata/metadata.go delete mode 100644 vendor/src/google.golang.org/grpc/naming/naming.go delete mode 100644 vendor/src/google.golang.org/grpc/peer/peer.go delete mode 100644 vendor/src/google.golang.org/grpc/picker.go delete mode 100644 vendor/src/google.golang.org/grpc/rpc_util.go delete mode 100644 vendor/src/google.golang.org/grpc/server.go delete mode 100644 vendor/src/google.golang.org/grpc/stream.go delete mode 100644 vendor/src/google.golang.org/grpc/trace.go delete mode 100644 vendor/src/google.golang.org/grpc/transport/control.go delete mode 100644 vendor/src/google.golang.org/grpc/transport/handler_server.go delete mode 100644 vendor/src/google.golang.org/grpc/transport/http2_client.go delete mode 100644 vendor/src/google.golang.org/grpc/transport/http2_server.go delete mode 100644 vendor/src/google.golang.org/grpc/transport/http_util.go delete mode 100644 vendor/src/google.golang.org/grpc/transport/transport.go delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/.gitignore delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/.travis.yml delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/AUTHORS delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/CHANGELOG.md delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/CONTRIBUTING.md delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/LICENSE delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/README.md delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/fen.go delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/fsnotify.go delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/inotify.go delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/inotify_poller.go delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/kqueue.go delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/open_mode_bsd.go delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/open_mode_darwin.go delete mode 100644 vendor/src/gopkg.in/fsnotify.v1/windows.go delete mode 100644 volume/drivers/adapter.go delete mode 100644 volume/drivers/extpoint.go delete mode 100644 volume/drivers/extpoint_test.go delete mode 100644 volume/drivers/proxy.go delete mode 100644 volume/drivers/proxy_test.go delete mode 100644 volume/local/local.go delete mode 100644 volume/local/local_test.go delete mode 100644 volume/local/local_unix.go delete mode 100644 volume/local/local_windows.go delete mode 100644 volume/store/errors.go delete mode 100644 volume/store/store.go delete mode 100644 volume/store/store_test.go delete mode 100644 volume/store/store_unix.go delete mode 100644 volume/store/store_windows.go delete mode 100644 volume/testutils/testutils.go delete mode 100644 volume/volume.go delete mode 100644 volume/volume_copy.go delete mode 100644 volume/volume_propagation_linux.go delete mode 100644 volume/volume_propagation_linux_test.go delete mode 100644 volume/volume_propagation_unsupported.go delete mode 100644 volume/volume_test.go delete mode 100644 volume/volume_unix.go delete mode 100644 volume/volume_windows.go diff --git a/AUTHORS b/AUTHORS deleted file mode 100644 index 11cd83d14e..0000000000 --- a/AUTHORS +++ /dev/null @@ -1,1522 +0,0 @@ -# This file lists all individuals having contributed content to the repository. -# For how it is generated, see `hack/generate-authors.sh`. - -Aanand Prasad -Aaron Davidson -Aaron Feng -Aaron Huslage -Aaron Lehmann -Aaron Welch -Abel Muiño -Abhijeet Kasurde -Abhinav Ajgaonkar -Abhishek Chanda -Abin Shahab -Adam Miller -Adam Singer -Aditi Rajagopal -Aditya -Adria Casas -Adrian Mouat -Adrian Oprea -Adrien Folie -Adrien Gallouët -Ahmed Kamal -Ahmet Alp Balkan -Aidan Feldman -Aidan Hobson Sayers -AJ Bowen -Ajey Charantimath -ajneu -Akihiro Suda -Al Tobey -alambike -Alan Scherger -Alan Thompson -Albert Callarisa -Albert Zhang -Aleksa Sarai -Aleksandrs Fadins -Alena Prokharchyk -Alessandro Boch -Alessio Biancalana -Alex Chan -Alex Crawford -Alex Ellis -Alex Gaynor -Alex Samorukov -Alex Warhawk -Alexander Artemenko -Alexander Boyd -Alexander Larsson -Alexander Morozov -Alexander Shopov -Alexandre Beslic -Alexandre González -Alexandru Sfirlogea -Alexey Guskov -Alexey Kotlyarov -Alexey Shamrin -Alexis THOMAS -Ali Dehghani -Allen Madsen -Allen Sun -almoehi -Alvin Richards -amangoel -Amen Belayneh -Amit Bakshi -Amit Krishnan -Amy Lindburg -Anand Patil -AnandkumarPatel -Anatoly Borodin -Anchal Agrawal -Anders Janmyr -Andre Dublin <81dublin@gmail.com> -Andre Granovsky -Andrea Luzzardi -Andrea Turli -Andreas Köhler -Andreas Savvides -Andreas Tiefenthaler -Andrew C. Bodine -Andrew Clay Shafer -Andrew Duckworth -Andrew France -Andrew Gerrand -Andrew Guenther -Andrew Kuklewicz -Andrew Macgregor -Andrew Macpherson -Andrew Martin -Andrew Munsell -Andrew Weiss -Andrew Williams -Andrews Medina -Andrey Petrov -Andrey Stolbovsky -André Martins -andy -Andy Chambers -andy diller -Andy Goldstein -Andy Kipp -Andy Rothfusz -Andy Smith -Andy Wilson -Anes Hasicic -Anil Belur -Ankush Agarwal -Anonmily -Anthon van der Neut -Anthony Baire -Anthony Bishopric -Anthony Dahanne -Anton Löfgren -Anton Nikitin -Anton Polonskiy -Anton Tiurin -Antonio Murdaca -Antony Messerli -Anuj Bahuguna -Anusha Ragunathan -apocas -ArikaChen -Arnaud Porterie -Arthur Barr -Arthur Gautier -Artur Meyster -Arun Gupta -Asbjørn Enge -averagehuman -Avi Das -Avi Miller -ayoshitake -Azat Khuyiyakhmetov -Bardia Keyoumarsi -Barnaby Gray -Barry Allard -Bartłomiej Piotrowski -Bastiaan Bakker -bdevloed -Ben Firshman -Ben Golub -Ben Hall -Ben Sargent -Ben Severson -Ben Toews -Ben Wiklund -Benjamin Atkin -Benoit Chesneau -Bernerd Schaefer -Bert Goethals -Bharath Thiruveedula -Bhiraj Butala -Bill W -bin liu -Blake Geno -Boaz Shuster -bobby abbott -boucher -Bouke Haarsma -Boyd Hemphill -boynux -Bradley Cicenas -Bradley Wright -Brandon Liu -Brandon Philips -Brandon Rhodes -Brendan Dixon -Brent Salisbury -Brett Higgins -Brett Kochendorfer -Brian (bex) Exelbierd -Brian Bland -Brian DeHamer -Brian Dorsey -Brian Flad -Brian Goff -Brian McCallister -Brian Olsen -Brian Shumate -Brian Torres-Gil -Brian Trump -Brice Jaglin -Briehan Lombaard -Bruno Bigras -Bruno Binet -Bruno Gazzera -Bruno Renié -Bryan Bess -Bryan Boreham -Bryan Matsuo -Bryan Murphy -buddhamagnet -Burke Libbey -Byung Kang -Caleb Spare -Calen Pennington -Cameron Boehmer -Cameron Spear -Campbell Allen -Candid Dauth -Carl Henrik Lunde -Carl X. Su -Carlos Alexandro Becker -Carlos Sanchez -Carol Fager-Higgins -Cary -Casey Bisson -Cedric Davies -Cezar Sa Espinola -Chad Swenson -Chance Zibolski -Chander G -Charles Chan -Charles Hooper -Charles Law -Charles Lindsay -Charles Merriam -Charles Sarrazin -Charlie Lewis -Chase Bolt -ChaYoung You -Chen Chao -Chen Hanxiao -cheney90 -Chewey -Chia-liang Kao -chli -Cholerae Hu -Chris Alfonso -Chris Armstrong -Chris Dituri -Chris Fordham -Chris Khoo -Chris McKinnel -Chris Seto -Chris Snow -Chris St. Pierre -Chris Stivers -Chris Swan -Chris Wahl -Chris Weyl -chrismckinnel -Christian Berendt -Christian Böhme -Christian Persson -Christian Rotzoll -Christian Simon -Christian Stefanescu -ChristoperBiscardi -Christophe Mehay -Christophe Troestler -Christopher Currie -Christopher Jones -Christopher Latham -Christopher Rigor -Christy Perez -Chun Chen -Ciro S. Costa -Clayton Coleman -Clinton Kitson -Coenraad Loubser -Colin Dunklau -Colin Rice -Colin Walters -Collin Guarino -Colm Hally -companycy -Cory Forsyth -cressie176 -Cristian Staretu -cristiano balducci -Cruceru Calin-Cristian -Cyril F -Daan van Berkel -Daehyeok Mun -Dafydd Crosby -dalanlan -Damien Nadé -Damien Nozay -Damjan Georgievski -Dan Anolik -Dan Buch -Dan Cotora -Dan Griffin -Dan Hirsch -Dan Keder -Dan Levy -Dan McPherson -Dan Stine -Dan Walsh -Dan Williams -Daniel Antlinger -Daniel Exner -Daniel Farrell -Daniel Garcia -Daniel Gasienica -Daniel Hiltgen -Daniel Menet -Daniel Mizyrycki -Daniel Nephin -Daniel Norberg -Daniel Nordberg -Daniel Robinson -Daniel S -Daniel Von Fange -Daniel YC Lin -Daniel Zhang -Daniel, Dao Quang Minh -Danny Berger -Danny Yates -Darren Coxall -Darren Shepherd -Darren Stahl -Dave Barboza -Dave Henderson -Dave MacDonald -Dave Tucker -David Anderson -David Calavera -David Corking -David Cramer -David Currie -David Davis -David Gageot -David Gebler -David Lawrence -David Mackey -David Mat -David Mcanulty -David Pelaez -David R. Jenni -David Röthlisberger -David Sheets -David Sissitka -David Xia -David Young -Davide Ceretti -Dawn Chen -dcylabs -decadent -deed02392 -Deng Guangxing -Deni Bertovic -Denis Gladkikh -Denis Ollier -Dennis Docter -Derek -Derek -Derek Ch -Derek McGowan -Deric Crago -Deshi Xiao -devmeyster -Devvyn Murphy -Dharmit Shah -Dieter Reuter -Dima Stopel -Dimitri John Ledkov -Dimitry Andric -Dinesh Subhraveti -Diogo Monica -DiuDiugirl -Djibril Koné -dkumor -Dmitri Logvinenko -Dmitry Demeshchuk -Dmitry Gusev -Dmitry V. Krivenok -Dmitry Vorobev -Dolph Mathews -Dominik Finkbeiner -Dominik Honnef -Don Kirkby -Don Kjer -Don Spaulding -Donald Huang -Dong Chen -Donovan Jones -Doug Davis -Doug MacEachern -Doug Tangren -Dr Nic Williams -dragon788 -Dražen Lučanin -Dustin Sallings -Ed Costello -Edmund Wagner -Eiichi Tsukata -Eike Herzbach -Eivind Uggedal -Elan Ruusamäe -Elias Probst -Elijah Zupancic -eluck -Elvir Kuric -Emil Hernvall -Emily Maier -Emily Rose -Emir Ozer -Enguerran -Eohyung Lee -Eric Hanchrow -Eric Lee -Eric Myhre -Eric Paris -Eric Rafaloff -Eric Rosenberg -Eric Sage -Eric Windisch -Eric Yang -Eric-Olivier Lamey -Erik Bray -Erik Dubbelboer -Erik Hollensbe -Erik Inge Bolsø -Erik Kristensen -Erik Weathers -Erno Hopearuoho -Erwin van der Koogh -Euan -Eugene Yakubovich -eugenkrizo -evalle -Evan Allrich -Evan Carmi -Evan Hazlett -Evan Krall -Evan Phoenix -Evan Wies -Evgeny Vereshchagin -Ewa Czechowska -Eystein Måløy Stenberg -ezbercih -Fabiano Rosas -Fabio Falci -Fabio Rehm -Fabrizio Regini -Fabrizio Soppelsa -Faiz Khan -falmp -Fangyuan Gao <21551127@zju.edu.cn> -Fareed Dudhia -Fathi Boudra -Federico Gimenez -Felix Geisendörfer -Felix Hupfeld -Felix Rabe -Felix Schindler -Ferenc Szabo -Fernando -Fero Volar -Filipe Brandenburger -Filipe Oliveira -fl0yd -Flavio Castelli -FLGMwt -Florian -Florian Klein -Florian Maier -Florian Weingarten -Florin Asavoaie -Francesc Campoy -Francisco Carriedo -Francisco Souza -Frank Groeneveld -Frank Herrmann -Frank Macreery -Frank Rosquin -Fred Lifton -Frederick F. Kautz IV -Frederik Loeffert -Frederik Nordahl Jul Sabroe -Freek Kalter -fy2462 -Félix Baylac-Jacqué -Félix Cantournet -Gabe Rosenhouse -Gabor Nagy -Gabriel Monroy -GabrielNicolasAvellaneda -Galen Sampson -Gareth Rushgrove -Garrett Barboza -Gaurav -gautam, prasanna -GennadySpb -Geoffrey Bachelet -George MacRorie -George Xie -Georgi Hristozov -Gereon Frey -German DZ -Gert van Valkenhoef -Gianluca Borello -Gildas Cuisinier -gissehel -Giuseppe Mazzotta -Gleb Fotengauer-Malinovskiy -Gleb M Borisov -Glyn Normington -GoBella -Goffert van Gool -Gosuke Miyashita -Gou Rao -Govinda Fichtner -Grant Reaber -Graydon Hoare -Greg Fausak -Greg Thornton -grossws -grunny -gs11 -Guilhem Lettron -Guilherme Salgado -Guillaume Dufour -Guillaume J. Charmes -guoxiuyan -Gurjeet Singh -Guruprasad -gwx296173 -Günter Zöchbauer -Hans Kristian Flaatten -Hans Rødtang -Hao Shu Wei -Hao Zhang <21521210@zju.edu.cn> -Harald Albers -Harley Laue -Harold Cooper -Harry Zhang -He Simei -heartlock <21521209@zju.edu.cn> -Hector Castro -Henning Sprang -Hobofan -Hollie Teal -Hong Xu -hsinko <21551195@zju.edu.cn> -Hu Keping -Hu Tao -Huanzhong Zhang -Huayi Zhang -Hugo Duncan -Hugo Marisco <0x6875676f@gmail.com> -Hunter Blanks -huqun -Huu Nguyen -hyeongkyu.lee -hyp3rdino -Hyzhou <1187766782@qq.com> -Ian Babrou -Ian Bishop -Ian Bull -Ian Calvert -Ian Lee -Ian Main -Ian Truslove -Iavael -Icaro Seara -Igor Dolzhikov -Ilkka Laukkanen -Ilya Dmitrichenko -Ilya Gusev -ILYA Khlopotov -imre Fitos -inglesp -Ingo Gottwald -Isaac Dupree -Isabel Jimenez -Isao Jonas -Ivan Babrou -Ivan Fraixedes -Ivan Grcic -J Bruni -J. Nunn -Jack Danger Canty -Jacob Atzen -Jacob Edelman -Jake Champlin -Jake Moshenko -jakedt -James Allen -James Carey -James Carr -James DeFelice -James Harrison Fisher -James Kyburz -James Kyle -James Lal -James Mills -James Nugent -James Turnbull -Jamie Hannaford -Jamshid Afshar -Jan Keromnes -Jan Koprowski -Jan Pazdziora -Jan Toebes -Jan-Gerd Tenberge -Jan-Jaap Driessen -Jana Radhakrishnan -Januar Wayong -Jared Biel -Jared Hocutt -Jaroslaw Zabiello -jaseg -Jasmine Hegman -Jason Divock -Jason Giedymin -Jason Green -Jason Hall -Jason Heiss -Jason Livesay -Jason McVetta -Jason Plum -Jason Shepherd -Jason Smith -Jason Sommer -Jason Stangroome -jaxgeller -Jay -Jay -Jay Kamat -Jean-Baptiste Barth -Jean-Baptiste Dalido -Jean-Paul Calderone -Jean-Tiare Le Bigot -Jeff Anderson -Jeff Johnston -Jeff Lindsay -Jeff Mickey -Jeff Minard -Jeff Nickoloff -Jeff Welch -Jeffrey Bolle -Jeffrey Morgan -Jeffrey van Gogh -Jenny Gebske -Jeremy Grosser -Jeremy Price -Jeremy Qian -Jeremy Unruh -Jeroen Jacobs -Jesse Dearing -Jesse Dubay -Jessica Frazelle -Jezeniel Zapanta -jgeiger -Jhon Honce -Jian Zhang -jianbosun -Jilles Oldenbeuving -Jim Alateras -Jim Perrin -Jimmy Cuadra -Jimmy Puckett -jimmyxian -Jinsoo Park -Jiri Popelka -Jiří Župka -jjy -jmzwcn -Joe Beda -Joe Doliner -Joe Ferguson -Joe Gordon -Joe Shaw -Joe Van Dyk -Joel Friedly -Joel Handwell -Joel Hansson -Joel Wurtz -Joey Geiger -Joey Gibson -Joffrey F -Johan Euphrosine -Johan Rydberg -Johannes 'fish' Ziemke -John Costa -John Feminella -John Gardiner Myers -John Gossman -John Howard (VM) -John OBrien III -John Starks -John Tims -John Warwick -John Willis -Jon Wedaman -Jonas Pfenniger -Jonathan A. Sternberg -Jonathan Boulle -Jonathan Camp -Jonathan Dowland -Jonathan Lebon -Jonathan McCrohan -Jonathan Mueller -Jonathan Pares -Jonathan Rudenberg -Joost Cassee -Jordan -Jordan Arentsen -Jordan Sissel -Jose Diaz-Gonzalez -Joseph Anthony Pasquale Holsten -Joseph Hager -Joseph Kern -Josh -Josh Hawn -Josh Poimboeuf -Josiah Kiehl -José Tomás Albornoz -JP -jrabbit -Julian Taylor -Julien Barbier -Julien Bisconti -Julien Bordellier -Julien Dubois -Julien Pervillé -Julio Montes -Jun-Ru Chang -Jussi Nummelin -Justas Brazauskas -Justin Cormack -Justin Force -Justin Plock -Justin Simonelis -Justin Terry -Jyrki Puttonen -Jérôme Petazzoni -Jörg Thalheim -Kai Blin -Kai Qiang Wu(Kennan) -Kamil Domański -kamjar gerami -Kanstantsin Shautsou -Karan Lyons -Kareem Khazem -kargakis -Karl Grzeszczak -Karol Duleba -Katie McLaughlin -Kato Kazuyoshi -Katrina Owen -Kawsar Saiyeed -kayrus -Ke Xu -Keli Hu -Ken Cochrane -Ken ICHIKAWA -Kenfe-Mickael Laventure -Kenjiro Nakayama -Kent Johnson -Kevin "qwazerty" Houdebert -Kevin Clark -Kevin J. Lynagh -Kevin Menard -Kevin P. Kucharczyk -Kevin Shi -Kevin Wallace -Kevin Yap -kevinmeredith -Keyvan Fatehi -kies -Kim BKC Carlbacker -Kim Eik -Kimbro Staken -Kir Kolyshkin -Kiran Gangadharan -Kirill SIbirev -knappe -Kohei Tsuruta -Koichi Shiraishi -Konrad Kleine -Konstantin Pelykh -Krasimir Georgiev -Kristian Haugene -Kristina Zabunova -krrg -Kun Zhang -Kunal Kushwaha -Kyle Conroy -kyu -Lachlan Coote -Lai Jiangshan -Lajos Papp -Lakshan Perera -Lalatendu Mohanty -lalyos -Lance Chen -Lance Kinley -Lars Butler -Lars Kellogg-Stedman -Lars R. Damerow -Laszlo Meszaros -Laurent Erignoux -Laurie Voss -Leandro Siqueira -Lee, Meng-Han -leeplay -Lei Jitang -Len Weincier -Lennie -Leszek Kowalski -Levi Blackstone -Levi Gross -Lewis Marshall -Lewis Peckover -Liana Lo -Liang Mingqiang -Liang-Chi Hsieh -liaoqingwei -limsy -Lin Lu -LingFaKe -Linus Heckemann -Liran Tal -Liron Levin -Liu Bo -Liu Hua -LIZAO LI -Lloyd Dewolf -Lokesh Mandvekar -longliqiang88 <394564827@qq.com> -Lorenz Leutgeb -Lorenzo Fontana -Louis Opter -Luca Marturana -Luca Orlandi -Luca-Bogdan Grigorescu -Lucas Chan -Luis Martínez de Bartolomé Izquierdo -Lukas Waslowski -lukaspustina -Lukasz Zajaczkowski -lukemarsden -Lynda O'Leary -Lénaïc Huard -Ma Shimiao -Mabin -Madhav Puri -Madhu Venugopal -Mageee <21521230.zju.edu.cn> -Mahesh Tiyyagura -malnick -Malte Janduda -manchoz -Manfred Touron -Manfred Zabarauskas -mansinahar -Manuel Meurer -Manuel Woelker -mapk0y -Marc Abramowitz -Marc Kuo -Marc Tamsky -Marcelo Salazar -Marco Hennings -Marcus Farkas -Marcus Linke -Marcus Ramberg -Marek Goldmann -Marian Marinov -Marianna Tessel -Mario Loriedo -Marius Gundersen -Marius Sturm -Marius Voila -Mark Allen -Mark McGranaghan -Mark McKinstry -Mark West -Marko Mikulicic -Marko Tibold -Markus Fix -Martijn Dwars -Martijn van Oosterhout -Martin Honermeyer -Martin Kelly -Martin Mosegaard Amdisen -Martin Redmond -Mary Anthony -Masahito Zembutsu -Mason Malone -Mateusz Sulima -Mathias Monnerville -Mathieu Le Marec - Pasquet -Matt Apperson -Matt Bachmann -Matt Bentley -Matt Haggard -Matt McCormick -Matt Moore -Matt Robenolt -Matthew Heon -Matthew Mayer -Matthew Mueller -Matthew Riley -Matthias Klumpp -Matthias Kühnle -Matthias Rampke -Matthieu Hauglustaine -mattymo -mattyw -Mauricio Garavaglia -mauriyouth -Max Shytikov -Maxim Ivanov -Maxim Kulkin -Maxim Treskin -Maxime Petazzoni -Meaglith Ma -meejah -Megan Kostick -Mehul Kar -Mengdi Gao -Mert Yazıcıoğlu -Micah Zoltu -Michael A. Smith -Michael Bridgen -Michael Brown -Michael Chiang -Michael Crosby -Michael Currie -Michael Friis -Michael Gorsuch -Michael Grauer -Michael Holzheu -Michael Hudson-Doyle -Michael Huettermann -Michael Käufl -Michael Neale -Michael Prokop -Michael Scharf -Michael Stapelberg -Michael Steinert -Michael Thies -Michael West -Michal Fojtik -Michal Gebauer -Michal Jemala -Michal Minar -Michaël Pailloncy -Michał Czeraszkiewicz -Michiel@unhosted -Miguel Angel Fernández -Miguel Morales -Mihai Borobocea -Mihuleacc Sergiu -Mike Brown -Mike Chelen -Mike Danese -Mike Dillon -Mike Dougherty -Mike Gaffney -Mike Goelzer -Mike Leone -Mike MacCana -Mike Naberezny -Mike Snitzer -mikelinjie <294893458@qq.com> -Mikhail Sobolev -Miloslav Trmač -mingqing -Mingzhen Feng -Mitch Capper -mlarcher -Mohammad Banikazemi -Mohammed Aaqib Ansari -Mohit Soni -Morgan Bauer -Morgante Pell -Morgy93 -Morten Siebuhr -Morton Fox -Moysés Borges -mqliang -Mrunal Patel -msabansal -mschurenko -muge -Mustafa Akın -Muthukumar R -Máximo Cuadros -Médi-Rémi Hashim -Nahum Shalman -Nakul Pathak -Nalin Dahyabhai -Nan Monnand Deng -Naoki Orii -Natalie Parker -Natanael Copa -Nate Brennand -Nate Eagleson -Nate Jones -Nathan Hsieh -Nathan Kleyn -Nathan LeClaire -Nathan McCauley -Nathan Williams -Neal McBurnett -Nelson Chen -Nghia Tran -Niall O'Higgins -Nicholas E. Rabenau -Nick Irvine -Nick Parker -Nick Payne -Nick Stenning -Nick Stinemates -Nicolas Borboën -Nicolas De loof -Nicolas Dudebout -Nicolas Goy -Nicolas Kaiser -Nicolás Hock Isaza -Nigel Poulton -NikolaMandic -nikolas -Nirmal Mehta -Nishant Totla -NIWA Hideyuki -noducks -Nolan Darilek -nponeccop -Nuutti Kotivuori -nzwsch -O.S. Tezer -objectified -OddBloke -odk- -Oguz Bilgic -Oh Jinkyun -Ohad Schneider -Ole Reifschneider -Oliver Neal -Olivier Gambier -Olle Jonsson -Oriol Francès -Otto Kekäläinen -oyld -ozlerhakan -paetling -pandrew -panticz -Paolo G. Giarrusso -Pascal Borreli -Pascal Hartig -Patrick Devine -Patrick Hemmer -Patrick Stapleton -pattichen -Paul -paul -Paul Annesley -Paul Bellamy -Paul Bowsher -Paul Hammond -Paul Jimenez -Paul Lietar -Paul Liljenberg -Paul Morie -Paul Nasrat -Paul Weaver -Pavel Lobashov -Pavel Pospisil -Pavel Sutyrin -Pavel Tikhomirov -Pavlos Ratis -Peeyush Gupta -Peggy Li -Pei Su -Penghan Wang -perhapszzy@sina.com -Peter Bourgon -Peter Braden -Peter Choi -Peter Dave Hello -Peter Edge -Peter Ericson -Peter Esbensen -Peter Malmgren -Peter Salvatore -Peter Volpe -Peter Waller -Phil -Phil Estes -Phil Spitler -Philip Monroe -Philipp Wahala -Philipp Weissensteiner -Phillip Alexander -pidster -Piergiuliano Bossi -Pierre -Pierre Carrier -Pierre Wacrenier -Pierre-Alain RIVIERE -Piotr Bogdan -pixelistik -Porjo -Poul Kjeldager Sørensen -Pradeep Chhetri -Prasanna Gautam -Prayag Verma -Przemek Hejman -pysqz -qg <1373319223@qq.com> -qhuang -Qiang Huang -qq690388648 <690388648@qq.com> -Quentin Brossard -Quentin Perez -Quentin Tayssier -r0n22 -Rafal Jeczalik -Rafe Colton -Raghavendra K T -Raghuram Devarakonda -Rajat Pandit -Rajdeep Dua -Ralle -Ralph Bean -Ramkumar Ramachandra -Ramon van Alteren -Ray Tsang -ReadmeCritic -Recursive Madman -Regan McCooey -Remi Rampin -Renato Riccieri Santos Zannon -resouer -rgstephens -Rhys Hiltner -Rich Seymour -Richard -Richard Burnison -Richard Harvey -Richard Metzler -Richard Scothern -Richo Healey -Rick Bradley -Rick van de Loo -Rick Wieman -Rik Nijessen -Riku Voipio -Riley Guerin -Ritesh H Shukla -Riyaz Faizullabhoy -Rob Vesse -Robert Bachmann -Robert Bittle -Robert Obryk -Robert Stern -Robert Wallis -Roberto G. Hashioka -Robin Naundorf -Robin Schneider -Robin Speekenbrink -robpc -Rodolfo Carvalho -Rodrigo Vaz -Roel Van Nyen -Roger Peppe -Rohit Jnagal -Rohit Kadam -Roland Huß -Roland Kammerer -Roland Moriz -Roma Sokolov -Roman Strashkin -Ron Smits -root -root -root -root -Rory Hunter -Rory McCune -Ross Boucher -Rovanion Luckey -Rozhnov Alexandr -rsmoorthy -Rudolph Gottesheim -Rui Lopes -Ryan Anderson -Ryan Aslett -Ryan Belgrave -Ryan Detzel -Ryan Fowler -Ryan McLaughlin -Ryan O'Donnell -Ryan Seto -Ryan Thomas -Ryan Trauntvein -Ryan Wallner -RyanDeng -Rémy Greinhofer -s. rannou -s00318865 -Sabin Basyal -Sachin Joshi -Sagar Hani -Sainath Grandhi -Sally O'Malley -Sam Abed -Sam Alba -Sam Bailey -Sam J Sharpe -Sam Neirinck -Sam Reis -Sam Rijs -Sambuddha Basu -Sami Wagiaalla -Samuel Andaya -Samuel Dion-Girardeau -Samuel Karp -Samuel PHAN -Sankar சங்கர் -Sanket Saurav -Santhosh Manohar -sapphiredev -Satnam Singh -satoru -Satoshi Amemiya -scaleoutsean -Scott Bessler -Scott Collier -Scott Johnston -Scott Stamp -Scott Walls -sdreyesg -Sean Christopherson -Sean Cronin -Sean OMeara -Sean P. Kane -Sebastiaan van Steenis -Sebastiaan van Stijn -Senthil Kumar Selvaraj -Senthil Kumaran -SeongJae Park -Seongyeol Lim -Serge Hallyn -Sergey Alekseev -Sergey Evstifeev -Sevki Hasirci -Shane Canon -Shane da Silva -shaunol -Shawn Landden -Shawn Siefkas -Shekhar Gulati -Sheng Yang -Shengbo Song -Shih-Yuan Lee -Shijiang Wei -Shishir Mahajan -shuai-z -Shuwei Hao -Sian Lerk Lau -sidharthamani -Silas Sewell -Simei He -Simon Eskildsen -Simon Leinen -Simon Taranto -Sindhu S -Sjoerd Langkemper -Solganik Alexander -Solomon Hykes -Song Gao -Soshi Katsuta -Soulou -Spencer Brown -Spencer Smith -Sridatta Thatipamala -Sridhar Ratnakumar -Srini Brahmaroutu -srinsriv -Steeve Morin -Stefan Berger -Stefan J. Wernli -Stefan Praszalowicz -Stefan Scherer -Stefan Staudenmeyer -Stefan Weil -Stephen Crosby -Stephen Day -Stephen Rust -Steve Durrheimer -Steve Francia -Steve Koch -Steven Burgess -Steven Iveson -Steven Merrill -Steven Richards -Steven Taylor -Subhajit Ghosh -Sujith Haridasan -Suryakumar Sudar -Sven Dowideit -Swapnil Daingade -Sylvain Baubeau -Sylvain Bellemare -Sébastien -Sébastien Luttringer -Sébastien Stormacq -TAGOMORI Satoshi -tang0th -Tangi COLIN -Tatsuki Sugiura -Tatsushi Inagaki -Taylor Jones -tbonza -Ted M. Young -Tehmasp Chaudhri -Tejesh Mehta -terryding77 <550147740@qq.com> -tgic -Thatcher Peskens -theadactyl -Thell 'Bo' Fowler -Thermionix -Thijs Terlouw -Thomas Bikeev -Thomas Frössman -Thomas Gazagnaire -Thomas Grainger -Thomas Hansen -Thomas Leonard -Thomas LEVEIL -Thomas Orozco -Thomas Riccardi -Thomas Schroeter -Thomas Sjögren -Thomas Swift -Thomas Tanaka -Thomas Texier -Tianon Gravi -Tibor Vass -Tiffany Low -Tim Bosse -Tim Dettrick -Tim Düsterhus -Tim Hockin -Tim Ruffles -Tim Smith -Tim Terhorst -Tim Wang -Tim Waugh -Tim Wraight -Timothy Hobbs -tjwebb123 -tobe -Tobias Bieniek -Tobias Bradtke -Tobias Gesellchen -Tobias Klauser -Tobias Schmidt -Tobias Schwab -Todd Crane -Todd Lunter -Todd Whiteman -Toli Kuznets -Tom Barlow -Tom Denham -Tom Fotherby -Tom Howe -Tom Hulihan -Tom Maaswinkel -Tom X. Tobin -Tomas Tomecek -Tomasz Kopczynski -Tomasz Lipinski -Tomasz Nurkiewicz -Tommaso Visconti -Tomáš Hrčka -Tonis Tiigi -Tonny Xu -Tony Daws -Tony Miller -toogley -Torstein Husebø -tpng -tracylihui <793912329@qq.com> -Travis Cline -Travis Thieman -Trent Ogren -Trevor -Trevor Pounds -trishnaguha -Tristan Carel -Troy Denton -Tyler Brock -Tzu-Jung Lee -Tõnis Tiigi -Ulysse Carion -unknown -vagrant -Vaidas Jablonskis -Veres Lajos -vgeta -Victor Coisne -Victor Costan -Victor I. Wood -Victor Lyuboslavsky -Victor Marmol -Victor Palma -Victor Vieux -Victoria Bialas -Vijaya Kumar K -Viktor Stanchev -Viktor Vojnovski -VinayRaghavanKS -Vincent Batts -Vincent Bernat -Vincent Bernat -Vincent Demeester -Vincent Giersch -Vincent Mayers -Vincent Woo -Vinod Kulkarni -Vishal Doshi -Vishnu Kannan -Vitor Monteiro -Vivek Agarwal -Vivek Dasgupta -Vivek Goyal -Vladimir Bulyga -Vladimir Kirillov -Vladimir Rutsky -Vladimir Varankin -VladimirAus -Vojtech Vitek (V-Teq) -waitingkuo -Walter Leibbrandt -Walter Stanish -WANG Chao -Wang Xing -Ward Vandewege -WarheadsSE -Wayne Chang -Wei-Ting Kuo -weiyan -Weiyang Zhu -Wen Cheng Ma -Wendel Fleming -Wenxuan Zhao -Wenyu You <21551128@zju.edu.cn> -Wes Morgan -Will Dietz -Will Rouesnel -Will Weaver -willhf -William Delanoue -William Henry -William Hubbs -William Riancho -William Thurston -WiseTrem -wlan0 -Wolfgang Powisch -wonderflow -xamyzhao -XiaoBing Jiang -Xiaoxu Chen -xiekeyang -Xinzi Zhou -Xiuming Chen -xlgao-zju -xuzhaokui -Yahya -YAMADA Tsuyoshi -Yan Feng -Yang Bai -yangshukui -Yasunori Mahata -Yestin Sun -Yi EungJun -Yibai Zhang -Yihang Ho -Ying Li -Yohei Ueda -Yong Tang -Yongzhi Pan -yorkie -Youcef YEKHLEF -Yuan Sun -yuchangchun -yuchengxia -Yurii Rashkovskii -yuzou -Zac Dover -Zach Borboa -Zachary Jaffee -Zain Memon -Zaiste! -Zane DeGraffenried -Zefan Li -Zen Lin(Zhinan Lin) -Zhang Kun -Zhang Wei -Zhang Wentao -Zhenan Ye <21551168@zju.edu.cn> -Zhu Guihua -Zhuoyun Wei -Zilin Du -zimbatm -Ziming Dong -ZJUshuaizhou <21551191@zju.edu.cn> -zmarouf -Zoltan Tombol -zqh -Zuhayr Elahi -Zunayed Ali -Álex González -Álvaro Lázaro -Átila Camurça Alves -尹吉峰 -搏通 diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index 9cb88fce2a..0000000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,2761 +0,0 @@ -# Changelog - -Items starting with `DEPRECATE` are important deprecation notices. For more -information on the list of deprecated flags and APIs please have a look at -https://docs.docker.com/engine/deprecated/ where target removal dates can also -be found. - -## 1.12.2-rc1 (2016-09-27) - -**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm -based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When -upgrading from an older version of docker, the upgrade process may not -automatically install the updated version of the unit file, or fail to start -the docker service if; - -- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or -- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive - -Starting the docker service will produce an error: - - Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. - -or - - no sockets found via socket activation: make sure the service was started by systemd. - -To resolve this: - -- Backup the current version of the unit file, and replace the file with the - [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) -- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present -- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). - -After making those changes, run `sudo systemctl daemon-reload`, and `sudo -systemctl restart docker` to reload changes and (re)start the docker daemon. - - -### Runtime - -- Fix a panic due to a race condition filtering `docker ps` [#26049](https://github.com/docker/docker/pull/26049) -* Implement retry logic to prevent "Unable to remove filesystem" errors when using the aufs storage driver [#26536](https://github.com/docker/docker/pull/26536) -* Prevent devicemapper from removing device symlinks if `dm.use_deferred_removal` is enabled [#24740](https://github.com/docker/docker/pull/24740) -- Fix an issue where the CLI did not return correct exit codes if a command was run with invalid options [#26777](https://github.com/docker/docker/pull/26777) -- Fix a panic due to a bug in stdout / stderr processing in health checks [#26507](https://github.com/docker/docker/pull/26507) -- Fix exec's children handling [#26874](https://github.com/docker/docker/pull/26874) -- Fix exec form of HEALTHCHECK CMD [#26208](https://github.com/docker/docker/pull/26208) - -### Networking - -- Fix a daemon start panic on armv5 [#24315](https://github.com/docker/docker/issues/24315) -* Vendor libnetwork [#26879](https://github.com/docker/docker/pull/26879) [#26953](https://github.com/docker/docker/pull/26953) - * Avoid returning early on agent join failures [docker/libnetwork#1473](https://github.com/docker/libnetwork/pull/1473) - - Fix service published port cleanup issues [docker/libetwork#1432](https://github.com/docker/libnetwork/pull/1432) [docker/libnetwork#1433](https://github.com/docker/libnetwork/pull/1433) - * Recover properly from transient gossip failures [docker/libnetwork#1446](https://github.com/docker/libnetwork/pull/1446) - * Disambiguate node names known to gossip cluster to avoid node name collision [docker/libnetwork#1451](https://github.com/docker/libnetwork/pull/1451) - * Honor user provided listen address for gossip [docker/libnetwork#1460](https://github.com/docker/libnetwork/pull/1460) - * Allow reachability via published port across services on the same host [docker/libnetwork#1398](https://github.com/docker/libnetwork/pull/1398) - * Change the ingress sandbox name from random id to just `ingress_sbox` [docker/libnetwork#1449](https://github.com/docker/libnetwork/pull/1449) - -### Swarm Mode - -* Fix remote detection of a node's address when it joins the cluster [#26211](https://github.com/docker/docker/pull/26211) -* Vendor SwarmKit [#26765](https://github.com/docker/docker/pull/26765) - * Bounce session after failed status update [docker/swarmkit#1539](https://github.com/docker/swarmkit/pull/1539) - - Fix possible raft deadlocks [docker/swarmkit#1537](https://github.com/docker/swarmkit/pull/1537) - - Fix panic and endpoint leak when a service is updated with no endpoints [docker/swarmkit#1481](https://github.com/docker/swarmkit/pull/1481) - * Produce an error if the same port is published twice on `service create` or `service update` [docker/swarmkit#1495](https://github.com/docker/swarmkit/pull/1495) - - Fix an issue where changes to a service were not detected, resulting in the service not being updated [docker/swarmkit#1497](https://github.com/docker/swarmkit/pull/1497) - -### Contrib - -* Update the debian sysv-init script to use `dockerd` instead of `docker daemon` [#25869](https://github.com/docker/docker/pull/25869) -* Improve stability when running the docker client on MacOS Sierra [#26875](https://github.com/docker/docker/pull/26875) - -### Windows - -- Fix an issue where arrow-navigation did not work when running the docker client in ConEmu [#25578](https://github.com/docker/docker/pull/25578) - -## 1.12.1 (2016-08-18) - -**IMPORTANT**: Docker 1.12 ships with an updated systemd unit file for rpm -based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When -upgrading from an older version of docker, the upgrade process may not -automatically install the updated version of the unit file, or fail to start -the docker service if; - -- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or -- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive - -Starting the docker service will produce an error: - - Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. - -or - - no sockets found via socket activation: make sure the service was started by systemd. - -To resolve this: - -- Backup the current version of the unit file, and replace the file with the - [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) -- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present -- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). - -After making those changes, run `sudo systemctl daemon-reload`, and `sudo -systemctl restart docker` to reload changes and (re)start the docker daemon. - - -### Client - -* Add `Joined at` information in `node inspect --pretty` [#25512](https://github.com/docker/docker/pull/25512) -- Fix a crash on `service inspect` [#25454](https://github.com/docker/docker/pull/25454) -- Fix issue preventing `service update --env-add` to work as intended [#25427](https://github.com/docker/docker/pull/25427) -- Fix issue preventing `service update --publish-add` to work as intended [#25428](https://github.com/docker/docker/pull/25428) -- Remove `service update --network-add` and `service update --network-rm` flags - because this feature is not yet implemented in 1.12, but was inadvertently added - to the client in 1.12.0 [#25646](https://github.com/docker/docker/pull/25646) - -### Contrib - -+ Official ARM installation for Debian Jessie, Ubuntu Trusty, and Raspbian Jessie [#24815](https://github.com/docker/docker/pull/24815) [#25591](https://github.com/docker/docker/pull/25637) -- Add selinux policy per distro/version, fixing issue preventing successful installation on Fedora 24, and Oracle Linux [#25334](https://github.com/docker/docker/pull/25334) [#25593](https://github.com/docker/docker/pull/25593) - -### Networking - -- Fix issue that prevented containers to be accessed by hostname with Docker overlay driver in Swarm Mode [#25603](https://github.com/docker/docker/pull/25603) [#25648](https://github.com/docker/docker/pull/25648) -- Fix random network issues on service with published port [#25603](https://github.com/docker/docker/pull/25603) -- Fix unreliable inter-service communication after scaling down and up [#25603](https://github.com/docker/docker/pull/25603) -- Fix issue where removing all tasks on a node and adding them back breaks connectivity with other services [#25603](https://github.com/docker/docker/pull/25603) -- Fix issue where a task that fails to start results in a race, causing a `network xxx not found` error that masks the actual error [#25550](https://github.com/docker/docker/pull/25550) -- Relax validation of SRV records for external services that use SRV records not formatted according to RFC 2782 [#25739](https://github.com/docker/docker/pull/25739) - -### Plugins (experimental) - -* Make daemon events listen for plugin lifecycle events [#24760](https://github.com/docker/docker/pull/24760) -* Check for plugin state before enabling plugin [#25033](https://github.com/docker/docker/pull/25033) -- Remove plugin root from filesystem on `plugin rm` [#25187](https://github.com/docker/docker/pull/25187) -- Prevent deadlock when more than one plugin is installed [#25384](https://github.com/docker/docker/pull/25384) - -### Runtime - -* Mask join tokens in daemon logs [#25346](https://github.com/docker/docker/pull/25346) -- Fix `docker ps --filter` causing the results to no longer be sorted by creation time [#25387](https://github.com/docker/docker/pull/25387) -- Fix various crashes [#25053](https://github.com/docker/docker/pull/25053) - -### Security - -* Add `/proc/timer_list` to the masked paths list to prevent information leak from the host [#25630](https://github.com/docker/docker/pull/25630) -* Allow systemd to run with only `--cap-add SYS_ADMIN` rather than having to also add `--cap-add DAC_READ_SEARCH` or disabling seccomp filtering [#25567](https://github.com/docker/docker/pull/25567) - -### Swarm - -- Fix an issue where the swarm can get stuck electing a new leader after quorum is lost [#25055](https://github.com/docker/docker/issues/25055) -- Fix unwanted rescheduling of containers after a leader failover [#25017](https://github.com/docker/docker/issues/25017) -- Change swarm root CA key to P256 curve [swarmkit#1376](https://github.com/docker/swarmkit/pull/1376) -- Allow forced removal of a node from a swarm [#25159](https://github.com/docker/docker/pull/25159) -- Fix connection leak when a node leaves a swarm [swarmkit/#1277](https://github.com/docker/swarmkit/pull/1277) -- Backdate swarm certificates by one hour to tolerate more clock skew [swarmkit/#1243](https://github.com/docker/swarmkit/pull/1243) -- Avoid high CPU use with many unschedulable tasks [swarmkit/#1287](https://github.com/docker/swarmkit/pull/1287) -- Fix issue with global tasks not starting up [swarmkit/#1295](https://github.com/docker/swarmkit/pull/1295) -- Garbage collect raft logs [swarmkit/#1327](https://github.com/docker/swarmkit/pull/1327) - -### Volume - -- Persist local volume options after a daemon restart [#25316](https://github.com/docker/docker/pull/25316) -- Fix an issue where the mount ID was not returned on volume unmount [#25333](https://github.com/docker/docker/pull/25333) -- Fix an issue where a volume mount could inadvertently create a bind mount [#25309](https://github.com/docker/docker/pull/25309) -- `docker service create --mount type=bind,...` now correctly validates if the source path exists, instead of creating it [#25494](https://github.com/docker/docker/pull/25494) - -## 1.12.0 (2016-07-28) - - -**IMPORTANT**: Docker 1.12.0 ships with an updated systemd unit file for rpm -based installs (which includes RHEL, Fedora, CentOS, and Oracle Linux 7). When -upgrading from an older version of docker, the upgrade process may not -automatically install the updated version of the unit file, or fail to start -the docker service if; - -- the systemd unit file (`/usr/lib/systemd/system/docker.service`) contains local changes, or -- a systemd drop-in file is present, and contains `-H fd://` in the `ExecStart` directive - -Starting the docker service will produce an error: - - Failed to start docker.service: Unit docker.socket failed to load: No such file or directory. - -or - - no sockets found via socket activation: make sure the service was started by systemd. - -To resolve this: - -- Backup the current version of the unit file, and replace the file with the - [version that ships with docker 1.12](https://raw.githubusercontent.com/docker/docker/v1.12.0/contrib/init/systemd/docker.service.rpm) -- Remove the `Requires=docker.socket` directive from the `/usr/lib/systemd/system/docker.service` file if present -- Remove `-H fd://` from the `ExecStart` directive (both in the main unit file, and in any drop-in files present). - -After making those changes, run `sudo systemctl daemon-reload`, and `sudo -systemctl restart docker` to reload changes and (re)start the docker daemon. - -**IMPORTANT**: With Docker 1.12, a Linux docker installation now has two -additional binaries; `dockerd`, and `docker-proxy`. If you have scripts for -installing docker, please make sure to update them accordingly. - -### Builder - -+ New `HEALTHCHECK` Dockerfile instruction to support user-defined healthchecks [#23218](https://github.com/docker/docker/pull/23218) -+ New `SHELL` Dockerfile instruction to specify the default shell when using the shell form for commands in a Dockerfile [#22489](https://github.com/docker/docker/pull/22489) -+ Add `#escape=` Dockerfile directive to support platform-specific parsing of file paths in Dockerfile [#22268](https://github.com/docker/docker/pull/22268) -+ Add support for comments in `.dockerignore` [#23111](https://github.com/docker/docker/pull/23111) -* Support for UTF-8 in Dockerfiles [#23372](https://github.com/docker/docker/pull/23372) -* Skip UTF-8 BOM bytes from `Dockerfile` and `.dockerignore` if exist [#23234](https://github.com/docker/docker/pull/23234) -* Windows: support for `ARG` to match Linux [#22508](https://github.com/docker/docker/pull/22508) -- Fix error message when building using a daemon with the bridge network disabled [#22932](https://github.com/docker/docker/pull/22932) - -### Contrib - -* Enable seccomp for Centos 7 and Oracle Linux 7 [#22344](https://github.com/docker/docker/pull/22344) -- Remove MountFlags in systemd unit to allow shared mount propagation [#22806](https://github.com/docker/docker/pull/22806) - -### Distribution - -+ Add `--max-concurrent-downloads` and `--max-concurrent-uploads` daemon flags useful for situations where network connections don't support multiple downloads/uploads [#22445](https://github.com/docker/docker/pull/22445) -* Registry operations now honor the `ALL_PROXY` environment variable [#22316](https://github.com/docker/docker/pull/22316) -* Provide more information to the user on `docker load` [#23377](https://github.com/docker/docker/pull/23377) -* Always save registry digest metadata about images pushed and pulled [#23996](https://github.com/docker/docker/pull/23996) - -### Logging - -+ Syslog logging driver now supports DGRAM sockets [#21613](https://github.com/docker/docker/pull/21613) -+ Add `--details` option to `docker logs` to also display log tags [#21889](https://github.com/docker/docker/pull/21889) -+ Enable syslog logger to have access to env and labels [#21724](https://github.com/docker/docker/pull/21724) -+ An additional syslog-format option `rfc5424micro` to allow microsecond resolution in syslog timestamp [#21844](https://github.com/docker/docker/pull/21844) -* Inherit the daemon log options when creating containers [#21153](https://github.com/docker/docker/pull/21153) -* Remove `docker/` prefix from log messages tag and replace it with `{{.DaemonName}}` so that users have the option of changing the prefix [#22384](https://github.com/docker/docker/pull/22384) - -### Networking - -+ Built-in Virtual-IP based internal and ingress load-balancing using IPVS [#23361](https://github.com/docker/docker/pull/23361) -+ Routing Mesh using ingress overlay network [#23361](https://github.com/docker/docker/pull/23361) -+ Secured multi-host overlay networking using encrypted control-plane and Data-plane [#23361](https://github.com/docker/docker/pull/23361) -+ MacVlan driver is out of experimental [#23524](https://github.com/docker/docker/pull/23524) -+ Add `driver` filter to `network ls` [#22319](https://github.com/docker/docker/pull/22319) -+ Adding `network` filter to `docker ps --filter` [#23300](https://github.com/docker/docker/pull/23300) -+ Add `--link-local-ip` flag to `create`, `run` and `network connect` to specify a container's link-local address [#23415](https://github.com/docker/docker/pull/23415) -+ Add network label filter support [#21495](https://github.com/docker/docker/pull/21495) -* Removed dependency on external KV-Store for Overlay networking in Swarm-Mode [#23361](https://github.com/docker/docker/pull/23361) -* Add container's short-id as default network alias [#21901](https://github.com/docker/docker/pull/21901) -* `run` options `--dns` and `--net=host` are no longer mutually exclusive [#22408](https://github.com/docker/docker/pull/22408) -- Fix DNS issue when renaming containers with generated names [#22716](https://github.com/docker/docker/pull/22716) -- Allow both `network inspect -f {{.Id}}` and `network inspect -f {{.ID}}` to address inconsistency with inspect output [#23226](https://github.com/docker/docker/pull/23226) - -### Plugins (experimental) - -+ New `plugin` command to manager plugins with `install`, `enable`, `disable`, `rm`, `inspect`, `set` subcommands [#23446](https://github.com/docker/docker/pull/23446) - -### Remote API (v1.24) & Client - -+ Split the binary into two: `docker` (client) and `dockerd` (daemon) [#20639](https://github.com/docker/docker/pull/20639) -+ Add `before` and `since` filters to `docker images --filter` [#22908](https://github.com/docker/docker/pull/22908) -+ Add `--limit` option to `docker search` [#23107](https://github.com/docker/docker/pull/23107) -+ Add `--filter` option to `docker search` [#22369](https://github.com/docker/docker/pull/22369) -+ Add security options to `docker info` output [#21172](https://github.com/docker/docker/pull/21172) [#23520](https://github.com/docker/docker/pull/23520) -+ Add insecure registries to `docker info` output [#20410](https://github.com/docker/docker/pull/20410) -+ Extend Docker authorization with TLS user information [#21556](https://github.com/docker/docker/pull/21556) -+ devicemapper: expose Mininum Thin Pool Free Space through `docker info` [#21945](https://github.com/docker/docker/pull/21945) -* API now returns a JSON object when an error occurs making it more consistent [#22880](https://github.com/docker/docker/pull/22880) -- Prevent `docker run -i --restart` from hanging on exit [#22777](https://github.com/docker/docker/pull/22777) -- Fix API/CLI discrepancy on hostname validation [#21641](https://github.com/docker/docker/pull/21641) -- Fix discrepancy in the format of sizes in `stats` from HumanSize to BytesSize [#21773](https://github.com/docker/docker/pull/21773) -- authz: when request is denied return forbbiden exit code (403) [#22448](https://github.com/docker/docker/pull/22448) -- Windows: fix tty-related displaying issues [#23878](https://github.com/docker/docker/pull/23878) - -### Runtime - -+ Split the userland proxy to a separate binary (`docker-proxy`) [#23312](https://github.com/docker/docker/pull/23312) -+ Add `--live-restore` daemon flag to keep containers running when daemon shuts down, and regain control on startup [#23213](https://github.com/docker/docker/pull/23213) -+ Ability to add OCI-compatible runtimes (via `--add-runtime` daemon flag) and select one with `--runtime` on `create` and `run` [#22983](https://github.com/docker/docker/pull/22983) -+ New `overlay2` graphdriver for Linux 4.0+ with multiple lower directory support [#22126](https://github.com/docker/docker/pull/22126) -+ New load/save image events [#22137](https://github.com/docker/docker/pull/22137) -+ Add support for reloading daemon configuration through systemd [#22446](https://github.com/docker/docker/pull/22446) -+ Add disk quota support for btrfs [#19651](https://github.com/docker/docker/pull/19651) -+ Add disk quota support for zfs [#21946](https://github.com/docker/docker/pull/21946) -+ Add support for `docker run --pid=container:` [#22481](https://github.com/docker/docker/pull/22481) -+ Align default seccomp profile with selected capabilities [#22554](https://github.com/docker/docker/pull/22554) -+ Add a `daemon reload` event when the daemon reloads its configuration [#22590](https://github.com/docker/docker/pull/22590) -+ Add `trace` capability in the pprof profiler to show execution traces in binary form [#22715](https://github.com/docker/docker/pull/22715) -+ Add a `detach` event [#22898](https://github.com/docker/docker/pull/22898) -+ Add support for setting sysctls with `--sysctl` [#19265](https://github.com/docker/docker/pull/19265) -+ Add `--storage-opt` flag to `create` and `run` allowing to set `size` on devicemapper [#19367](https://github.com/docker/docker/pull/19367) -+ Add `--oom-score-adjust` daemon flag with a default value of `-500` making the daemon less likely to be killed before containers [#24516](https://github.com/docker/docker/pull/24516) -* Undeprecate the `-c` short alias of `--cpu-shares` on `run`, `build`, `create`, `update` [#22621](https://github.com/docker/docker/pull/22621) -* Prevent from using aufs and overlay graphdrivers on an eCryptfs mount [#23121](https://github.com/docker/docker/pull/23121) -- Fix issues with tmpfs mount ordering [#22329](https://github.com/docker/docker/pull/22329) -- Created containers are no longer listed on `docker ps -a -f exited=0` [#21947](https://github.com/docker/docker/pull/21947) -- Fix an issue where containers are stuck in a "Removal In Progress" state [#22423](https://github.com/docker/docker/pull/22423) -- Fix bug that was returning an HTTP 500 instead of a 400 when not specifying a command on run/create [#22762](https://github.com/docker/docker/pull/22762) -- Fix bug with `--detach-keys` whereby input matching a prefix of the detach key was not preserved [#22943](https://github.com/docker/docker/pull/22943) -- SELinux labeling is now disabled when using `--privileged` mode [#22993](https://github.com/docker/docker/pull/22993) -- If volume-mounted into a container, `/etc/hosts`, `/etc/resolv.conf`, `/etc/hostname` are no longer SELinux-relabeled [#22993](https://github.com/docker/docker/pull/22993) -- Fix inconsistency in `--tmpfs` behavior regarding mount options [#22438](https://github.com/docker/docker/pull/22438) -- Fix an issue where daemon hangs at startup [#23148](https://github.com/docker/docker/pull/23148) -- Ignore SIGPIPE events to prevent journald restarts to crash docker in some cases [#22460](https://github.com/docker/docker/pull/22460) -- Containers are not removed from stats list on error [#20835](https://github.com/docker/docker/pull/20835) -- Fix `on-failure` restart policy when daemon restarts [#20853](https://github.com/docker/docker/pull/20853) -- Fix an issue with `stats` when a container is using another container's network [#21904](https://github.com/docker/docker/pull/21904) - -### Swarm Mode - -+ New `swarm` command to manage swarms with `init`, `join`, `join-token`, `leave`, `update` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#24823](https://github.com/docker/docker/pull/24823) -+ New `service` command to manage swarm-wide services with `create`, `inspect`, `update`, `rm`, `ps` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#25140](https://github.com/docker/docker/pull/25140) -+ New `node` command to manage nodes with `accept`, `promote`, `demote`, `inspect`, `update`, `ps`, `ls` and `rm` subcommands [#23361](https://github.com/docker/docker/pull/23361) [#25140](https://github.com/docker/docker/pull/25140) -+ (experimental) New `stack` and `deploy` commands to manage and deploy multi-service applications [#23522](https://github.com/docker/docker/pull/23522) [#25140](https://github.com/docker/docker/pull/25140) - -### Volume - -+ Add support for local and global volume scopes (analogous to network scopes) [#22077](https://github.com/docker/docker/pull/22077) -+ Allow volume drivers to provide a `Status` field [#21006](https://github.com/docker/docker/pull/21006) -+ Add name/driver filter support for volume [#21361](https://github.com/docker/docker/pull/21361) -* Mount/Unmount operations now receives an opaque ID to allow volume drivers to differentiate between two callers [#21015](https://github.com/docker/docker/pull/21015) -- Fix issue preventing to remove a volume in a corner case [#22103](https://github.com/docker/docker/pull/22103) -- Windows: Enable auto-creation of host-path to match Linux [#22094](https://github.com/docker/docker/pull/22094) - - -### DEPRECATION -* Environment variables `DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and `DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` have been renamed - to `DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE` and `DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE` respectively [#22574](https://github.com/docker/docker/pull/22574) -* Remove deprecated `syslog-tag`, `gelf-tag`, `fluentd-tag` log option in favor of the more generic `tag` one [#22620](https://github.com/docker/docker/pull/22620) -* Remove deprecated feature of passing HostConfig at API container start [#22570](https://github.com/docker/docker/pull/22570) -* Remove deprecated `-f`/`--force` flag on docker tag [#23090](https://github.com/docker/docker/pull/23090) -* Remove deprecated `/containers//copy` endpoint [#22149](https://github.com/docker/docker/pull/22149) -* Remove deprecated `docker ps` flags `--since` and `--before` [#22138](https://github.com/docker/docker/pull/22138) -* Deprecate the old 3-args form of `docker import` [#23273](https://github.com/docker/docker/pull/23273) - -## 1.11.2 (2016-05-31) - -### Networking - -- Fix a stale endpoint issue on overlay networks during ungraceful restart ([#23015](https://github.com/docker/docker/pull/23015)) -- Fix an issue where the wrong port could be reported by `docker inspect/ps/port` ([#22997](https://github.com/docker/docker/pull/22997)) - -### Runtime - -- Fix a potential panic when running `docker build` ([#23032](https://github.com/docker/docker/pull/23032)) -- Fix interpretation of `--user` parameter ([#22998](https://github.com/docker/docker/pull/22998)) -- Fix a bug preventing container statistics to be correctly reported ([#22955](https://github.com/docker/docker/pull/22955)) -- Fix an issue preventing container to be restarted after daemon restart ([#22947](https://github.com/docker/docker/pull/22947)) -- Fix issues when running 32 bit binaries on Ubuntu 16.04 ([#22922](https://github.com/docker/docker/pull/22922)) -- Fix a possible deadlock on image deletion and container attach ([#22918](https://github.com/docker/docker/pull/22918)) -- Fix an issue where containers fail to start after a daemon restart if they depend on a containerized cluster store ([#22561](https://github.com/docker/docker/pull/22561)) -- Fix an issue causing `docker ps` to hang on CentOS when using devicemapper ([#22168](https://github.com/docker/docker/pull/22168), [#23067](https://github.com/docker/docker/pull/23067)) -- Fix a bug preventing to `docker exec` into a container when using devicemapper ([#22168](https://github.com/docker/docker/pull/22168), [#23067](https://github.com/docker/docker/pull/23067)) - - -## 1.11.1 (2016-04-26) - -### Distribution - -- Fix schema2 manifest media type to be of type `application/vnd.docker.container.image.v1+json` ([#21949](https://github.com/docker/docker/pull/21949)) - -### Documentation - -+ Add missing API documentation for changes introduced with 1.11.0 ([#22048](https://github.com/docker/docker/pull/22048)) - -### Builder - -* Append label passed to `docker build` as arguments as an implicit `LABEL` command at the end of the processed `Dockerfile` ([#22184](https://github.com/docker/docker/pull/22184)) - -### Networking - -- Fix a panic that would occur when forwarding DNS query ([#22261](https://github.com/docker/docker/pull/22261)) -- Fix an issue where OS threads could end up within an incorrect network namespace when using user defined networks ([#22261](https://github.com/docker/docker/pull/22261)) - -### Runtime - -- Fix a bug preventing labels configuration to be reloaded via the config file ([#22299](https://github.com/docker/docker/pull/22299)) -- Fix a regression where container mounting `/var/run` would prevent other containers from being removed ([#22256](https://github.com/docker/docker/pull/22256)) -- Fix an issue where it would be impossible to update both `memory-swap` and `memory` value together ([#22255](https://github.com/docker/docker/pull/22255)) -- Fix a regression from 1.11.0 where the `/auth` endpoint would not initialize `serveraddress` if it is not provided ([#22254](https://github.com/docker/docker/pull/22254)) -- Add missing cleanup of container temporary files when cancelling a schedule restart ([#22237](https://github.com/docker/docker/pull/22237)) -- Remove scary error message when no restart policy is specified ([#21993](https://github.com/docker/docker/pull/21993)) -- Fix a panic that would occur when the plugins were activated via the json spec ([#22191](https://github.com/docker/docker/pull/22191)) -- Fix restart backoff logic to correctly reset delay if container ran for at least 10secs ([#22125](https://github.com/docker/docker/pull/22125)) -- Remove error message when a container restart get cancelled ([#22123](https://github.com/docker/docker/pull/22123)) -- Fix an issue where `docker` would not correctly clean up after `docker exec` ([#22121](https://github.com/docker/docker/pull/22121)) -- Fix a panic that could occur when serving concurrent `docker stats` commands ([#22120](https://github.com/docker/docker/pull/22120))` -- Revert deprecation of non-existent host directories auto-creation ([#22065](https://github.com/docker/docker/pull/22065)) -- Hide misleading rpc error on daemon shutdown ([#22058](https://github.com/docker/docker/pull/22058)) - -## 1.11.0 (2016-04-13) - -**IMPORTANT**: With Docker 1.11, a Linux docker installation is now made of 4 binaries (`docker`, [`docker-containerd`](https://github.com/docker/containerd), [`docker-containerd-shim`](https://github.com/docker/containerd) and [`docker-runc`](https://github.com/opencontainers/runc)). If you have scripts relying on docker being a single static binaries, please make sure to update them. Interaction with the daemon stay the same otherwise, the usage of the other binaries should be transparent. A Windows docker installation remains a single binary, `docker.exe`. - -### Builder - -- Fix a bug where Docker would not use the correct uid/gid when processing the `WORKDIR` command ([#21033](https://github.com/docker/docker/pull/21033)) -- Fix a bug where copy operations with userns would not use the proper uid/gid ([#20782](https://github.com/docker/docker/pull/20782), [#21162](https://github.com/docker/docker/pull/21162)) - -### Client - -* Usage of the `:` separator for security option has been deprecated. `=` should be used instead ([#21232](https://github.com/docker/docker/pull/21232)) -+ The client user agent is now passed to the registry on `pull`, `build`, `push`, `login` and `search` operations ([#21306](https://github.com/docker/docker/pull/21306), [#21373](https://github.com/docker/docker/pull/21373)) -* Allow setting the Domainname and Hostname separately through the API ([#20200](https://github.com/docker/docker/pull/20200)) -* Docker info will now warn users if it can not detect the kernel version or the operating system ([#21128](https://github.com/docker/docker/pull/21128)) -- Fix an issue where `docker stats --no-stream` output could be all 0s ([#20803](https://github.com/docker/docker/pull/20803)) -- Fix a bug where some newly started container would not appear in a running `docker stats` command ([#20792](https://github.com/docker/docker/pull/20792)) -* Post processing is no longer enabled for linux-cgo terminals ([#20587](https://github.com/docker/docker/pull/20587)) -- Values to `--hostname` are now refused if they do not comply with [RFC1123](https://tools.ietf.org/html/rfc1123) ([#20566](https://github.com/docker/docker/pull/20566)) -+ Docker learned how to use a SOCKS proxy ([#20366](https://github.com/docker/docker/pull/20366), [#18373](https://github.com/docker/docker/pull/18373)) -+ Docker now supports external credential stores ([#20107](https://github.com/docker/docker/pull/20107)) -* `docker ps` now supports displaying the list of volumes mounted inside a container ([#20017](https://github.com/docker/docker/pull/20017)) -* `docker info` now also reports Docker's root directory location ([#19986](https://github.com/docker/docker/pull/19986)) -- Docker now prohibits login in with an empty username (spaces are trimmed) ([#19806](https://github.com/docker/docker/pull/19806)) -* Docker events attributes are now sorted by key ([#19761](https://github.com/docker/docker/pull/19761)) -* `docker ps` no longer shows exported port for stopped containers ([#19483](https://github.com/docker/docker/pull/19483)) -- Docker now cleans after itself if a save/export command fails ([#17849](https://github.com/docker/docker/pull/17849)) -* Docker load learned how to display a progress bar ([#17329](https://github.com/docker/docker/pull/17329), [#120078](https://github.com/docker/docker/pull/20078)) - -### Distribution - -- Fix a panic that occurred when pulling an image with 0 layers ([#21222](https://github.com/docker/docker/pull/21222)) -- Fix a panic that could occur on error while pushing to a registry with a misconfigured token service ([#21212](https://github.com/docker/docker/pull/21212)) -+ All first-level delegation roles are now signed when doing a trusted push ([#21046](https://github.com/docker/docker/pull/21046)) -+ OAuth support for registries was added ([#20970](https://github.com/docker/docker/pull/20970)) -* `docker login` now handles token using the implementation found in [docker/distribution](https://github.com/docker/distribution) ([#20832](https://github.com/docker/docker/pull/20832)) -* `docker login` will no longer prompt for an email ([#20565](https://github.com/docker/docker/pull/20565)) -* Docker will now fallback to registry V1 if no basic auth credentials are available ([#20241](https://github.com/docker/docker/pull/20241)) -* Docker will now try to resume layer download where it left off after a network error/timeout ([#19840](https://github.com/docker/docker/pull/19840)) -- Fix generated manifest mediaType when pushing cross-repository ([#19509](https://github.com/docker/docker/pull/19509)) -- Fix docker requesting additional push credentials when pulling an image if Content Trust is enabled ([#20382](https://github.com/docker/docker/pull/20382)) - -### Logging - -- Fix a race in the journald log driver ([#21311](https://github.com/docker/docker/pull/21311)) -* Docker syslog driver now uses the RFC-5424 format when emitting logs ([#20121](https://github.com/docker/docker/pull/20121)) -* Docker GELF log driver now allows to specify the compression algorithm and level via the `gelf-compression-type` and `gelf-compression-level` options ([#19831](https://github.com/docker/docker/pull/19831)) -* Docker daemon learned to output uncolorized logs via the `--raw-logs` options ([#19794](https://github.com/docker/docker/pull/19794)) -+ Docker, on Windows platform, now includes an ETW (Event Tracing in Windows) logging driver named `etwlogs` ([#19689](https://github.com/docker/docker/pull/19689)) -* Journald log driver learned how to handle tags ([#19564](https://github.com/docker/docker/pull/19564)) -+ The fluentd log driver learned the following options: `fluentd-address`, `fluentd-buffer-limit`, `fluentd-retry-wait`, `fluentd-max-retries` and `fluentd-async-connect` ([#19439](https://github.com/docker/docker/pull/19439)) -+ Docker learned to send log to Google Cloud via the new `gcplogs` logging driver. ([#18766](https://github.com/docker/docker/pull/18766)) - - -### Misc - -+ When saving linked images together with `docker save` a subsequent `docker load` will correctly restore their parent/child relationship ([#21385](https://github.com/docker/docker/pull/21385)) -+ Support for building the Docker cli for OpenBSD was added ([#21325](https://github.com/docker/docker/pull/21325)) -+ Labels can now be applied at network, volume and image creation ([#21270](https://github.com/docker/docker/pull/21270)) -* The `dockremap` is now created as a system user ([#21266](https://github.com/docker/docker/pull/21266)) -- Fix a few response body leaks ([#21258](https://github.com/docker/docker/pull/21258)) -- Docker, when run as a service with systemd, will now properly manage its processes cgroups ([#20633](https://github.com/docker/docker/pull/20633)) -* `docker info` now reports the value of cgroup KernelMemory or emits a warning if it is not supported ([#20863](https://github.com/docker/docker/pull/20863)) -* `docker info` now also reports the cgroup driver in use ([#20388](https://github.com/docker/docker/pull/20388)) -* Docker completion is now available on PowerShell ([#19894](https://github.com/docker/docker/pull/19894)) -* `dockerinit` is no more ([#19490](https://github.com/docker/docker/pull/19490),[#19851](https://github.com/docker/docker/pull/19851)) -+ Support for building Docker on arm64 was added ([#19013](https://github.com/docker/docker/pull/19013)) -+ Experimental support for building docker.exe in a native Windows Docker installation ([#18348](https://github.com/docker/docker/pull/18348)) - -### Networking - -- Fix panic if a node is forcibly removed from the cluster ([#21671](https://github.com/docker/docker/pull/21671)) -- Fix "error creating vxlan interface" when starting a container in a Swarm cluster ([#21671](https://github.com/docker/docker/pull/21671)) -* `docker network inspect` will now report all endpoints whether they have an active container or not ([#21160](https://github.com/docker/docker/pull/21160)) -+ Experimental support for the MacVlan and IPVlan network drivers has been added ([#21122](https://github.com/docker/docker/pull/21122)) -* Output of `docker network ls` is now sorted by network name ([#20383](https://github.com/docker/docker/pull/20383)) -- Fix a bug where Docker would allow a network to be created with the reserved `default` name ([#19431](https://github.com/docker/docker/pull/19431)) -* `docker network inspect` returns whether a network is internal or not ([#19357](https://github.com/docker/docker/pull/19357)) -+ Control IPv6 via explicit option when creating a network (`docker network create --ipv6`). This shows up as a new `EnableIPv6` field in `docker network inspect` ([#17513](https://github.com/docker/docker/pull/17513)) -* Support for AAAA Records (aka IPv6 Service Discovery) in embedded DNS Server ([#21396](https://github.com/docker/docker/pull/21396)) -- Fix to not forward docker domain IPv6 queries to external servers ([#21396](https://github.com/docker/docker/pull/21396)) -* Multiple A/AAAA records from embedded DNS Server for DNS Round robin ([#21019](https://github.com/docker/docker/pull/21019)) -- Fix endpoint count inconsistency after an ungraceful dameon restart ([#21261](https://github.com/docker/docker/pull/21261)) -- Move the ownership of exposed ports and port-mapping options from Endpoint to Sandbox ([#21019](https://github.com/docker/docker/pull/21019)) -- Fixed a bug which prevents docker reload when host is configured with ipv6.disable=1 ([#21019](https://github.com/docker/docker/pull/21019)) -- Added inbuilt nil IPAM driver ([#21019](https://github.com/docker/docker/pull/21019)) -- Fixed bug in iptables.Exists() logic [#21019](https://github.com/docker/docker/pull/21019) -- Fixed a Veth interface leak when using overlay network ([#21019](https://github.com/docker/docker/pull/21019)) -- Fixed a bug which prevents docker reload after a network delete during shutdown ([#20214](https://github.com/docker/docker/pull/20214)) -- Make sure iptables chains are recreated on firewalld reload ([#20419](https://github.com/docker/docker/pull/20419)) -- Allow to pass global datastore during config reload ([#20419](https://github.com/docker/docker/pull/20419)) -- For anonymous containers use the alias name for IP to name mapping, ie:DNS PTR record ([#21019](https://github.com/docker/docker/pull/21019)) -- Fix a panic when deleting an entry from /etc/hosts file ([#21019](https://github.com/docker/docker/pull/21019)) -- Source the forwarded DNS queries from the container net namespace ([#21019](https://github.com/docker/docker/pull/21019)) -- Fix to retain the network internal mode config for bridge networks on daemon reload ([#21780] (https://github.com/docker/docker/pull/21780)) -- Fix to retain IPAM driver option configs on daemon reload ([#21914] (https://github.com/docker/docker/pull/21914)) - -### Plugins - -- Fix a file descriptor leak that would occur every time plugins were enumerated ([#20686](https://github.com/docker/docker/pull/20686)) -- Fix an issue where Authz plugin would corrupt the payload body when faced with a large amount of data ([#20602](https://github.com/docker/docker/pull/20602)) - -### Runtime - -- Fix a panic that could occur when cleanup after a container started with invalid parameters ([#21716](https://github.com/docker/docker/pull/21716)) -- Fix a race with event timers stopping early ([#21692](https://github.com/docker/docker/pull/21692)) -- Fix race conditions in the layer store, potentially corrupting the map and crashing the process ([#21677](https://github.com/docker/docker/pull/21677)) -- Un-deprecate auto-creation of host directories for mounts. This feature was marked deprecated in ([#21666](https://github.com/docker/docker/pull/21666)) - Docker 1.9, but was decided to be too much of a backward-incompatible change, so it was decided to keep the feature. -+ It is now possible for containers to share the NET and IPC namespaces when `userns` is enabled ([#21383](https://github.com/docker/docker/pull/21383)) -+ `docker inspect ` will now expose the rootfs layers ([#21370](https://github.com/docker/docker/pull/21370)) -+ Docker Windows gained a minimal `top` implementation ([#21354](https://github.com/docker/docker/pull/21354)) -* Docker learned to report the faulty exe when a container cannot be started due to its condition ([#21345](https://github.com/docker/docker/pull/21345)) -* Docker with device mapper will now refuse to run if `udev sync` is not available ([#21097](https://github.com/docker/docker/pull/21097)) -- Fix a bug where Docker would not validate the config file upon configuration reload ([#21089](https://github.com/docker/docker/pull/21089)) -- Fix a hang that would happen on attach if initial start was to fail ([#21048](https://github.com/docker/docker/pull/21048)) -- Fix an issue where registry service options in the daemon configuration file were not properly taken into account ([#21045](https://github.com/docker/docker/pull/21045)) -- Fix a race between the exec and resize operations ([#21022](https://github.com/docker/docker/pull/21022)) -- Fix an issue where nanoseconds were not correctly taken in account when filtering Docker events ([#21013](https://github.com/docker/docker/pull/21013)) -- Fix the handling of Docker command when passed a 64 bytes id ([#21002](https://github.com/docker/docker/pull/21002)) -* Docker will now return a `204` (i.e http.StatusNoContent) code when it successfully deleted a network ([#20977](https://github.com/docker/docker/pull/20977)) -- Fix a bug where the daemon would wait indefinitely in case the process it was about to killed had already exited on its own ([#20967](https://github.com/docker/docker/pull/20967) -* The devmapper driver learned the `dm.min_free_space` option. If the mapped device free space reaches the passed value, new device creation will be prohibited. ([#20786](https://github.com/docker/docker/pull/20786)) -+ Docker can now prevent processes in container to gain new privileges via the `--security-opt=no-new-privileges` flag ([#20727](https://github.com/docker/docker/pull/20727)) -- Starting a container with the `--device` option will now correctly resolves symlinks ([#20684](https://github.com/docker/docker/pull/20684)) -+ Docker now relies on [`containerd`](https://github.com/docker/containerd) and [`runc`](https://github.com/opencontainers/runc) to spawn containers. ([#20662](https://github.com/docker/docker/pull/20662)) -- Fix docker configuration reloading to only alter value present in the given config file ([#20604](https://github.com/docker/docker/pull/20604)) -+ Docker now allows setting a container hostname via the `--hostname` flag when `--net=host` ([#20177](https://github.com/docker/docker/pull/20177)) -+ Docker now allows executing privileged container while running with `--userns-remap` if both `--privileged` and the new `--userns=host` flag are specified ([#20111](https://github.com/docker/docker/pull/20111)) -- Fix Docker not cleaning up correctly old containers upon restarting after a crash ([#19679](https://github.com/docker/docker/pull/19679)) -* Docker will now error out if it doesn't recognize a configuration key within the config file ([#19517](https://github.com/docker/docker/pull/19517)) -- Fix container loading, on daemon startup, when they depends on a plugin running within a container ([#19500](https://github.com/docker/docker/pull/19500)) -* `docker update` learned how to change a container restart policy ([#19116](https://github.com/docker/docker/pull/19116)) -* `docker inspect` now also returns a new `State` field containing the container state in a human readable way (i.e. one of `created`, `restarting`, `running`, `paused`, `exited` or `dead`)([#18966](https://github.com/docker/docker/pull/18966)) -+ Docker learned to limit the number of active pids (i.e. processes) within the container via the `pids-limit` flags. NOTE: This requires `CGROUP_PIDS=y` to be in the kernel configuration. ([#18697](https://github.com/docker/docker/pull/18697)) -- `docker load` now has a `--quiet` option to suppress the load output ([#20078](https://github.com/docker/docker/pull/20078)) -- Fix a bug in neighbor discovery for IPv6 peers ([#20842](https://github.com/docker/docker/pull/20842)) -- Fix a panic during cleanup if a container was started with invalid options ([#21802](https://github.com/docker/docker/pull/21802)) -- Fix a situation where a container cannot be stopped if the terminal is closed ([#21840](https://github.com/docker/docker/pull/21840)) - -### Security - -* Object with the `pcp_pmcd_t` selinux type were given management access to `/var/lib/docker(/.*)?` ([#21370](https://github.com/docker/docker/pull/21370)) -* `restart_syscall`, `copy_file_range`, `mlock2` joined the list of allowed calls in the default seccomp profile ([#21117](https://github.com/docker/docker/pull/21117), [#21262](https://github.com/docker/docker/pull/21262)) -* `send`, `recv` and `x32` were added to the list of allowed syscalls and arch in the default seccomp profile ([#19432](https://github.com/docker/docker/pull/19432)) -* Docker Content Trust now requests the server to perform snapshot signing ([#21046](https://github.com/docker/docker/pull/21046)) -* Support for using YubiKeys for Content Trust signing has been moved out of experimental ([#21591](https://github.com/docker/docker/pull/21591)) - -### Volumes - -* Output of `docker volume ls` is now sorted by volume name ([#20389](https://github.com/docker/docker/pull/20389)) -* Local volumes can now accept options similar to the unix `mount` tool ([#20262](https://github.com/docker/docker/pull/20262)) -- Fix an issue where one letter directory name could not be used as source for volumes ([#21106](https://github.com/docker/docker/pull/21106)) -+ `docker run -v` now accepts a new flag `nocopy`. This tells the runtime not to copy the container path content into the volume (which is the default behavior) ([#21223](https://github.com/docker/docker/pull/21223)) - -## 1.10.3 (2016-03-10) - -### Runtime - -- Fix Docker client exiting with an "Unrecognized input header" error [#20706](https://github.com/docker/docker/pull/20706) -- Fix Docker exiting if Exec is started with both `AttachStdin` and `Detach` [#20647](https://github.com/docker/docker/pull/20647) - -### Distribution - -- Fix a crash when pushing multiple images sharing the same layers to the same repository in parallel [#20831](https://github.com/docker/docker/pull/20831) -- Fix a panic when pushing images to a registry which uses a misconfigured token service [#21030](https://github.com/docker/docker/pull/21030) - -### Plugin system - -- Fix issue preventing volume plugins to start when SELinux is enabled [#20834](https://github.com/docker/docker/pull/20834) -- Prevent Docker from exiting if a volume plugin returns a null response for Get requests [#20682](https://github.com/docker/docker/pull/20682) -- Fix plugin system leaking file descriptors if a plugin has an error [#20680](https://github.com/docker/docker/pull/20680) - -### Security - -- Fix linux32 emulation to fail during docker build [#20672](https://github.com/docker/docker/pull/20672) - It was due to the `personality` syscall being blocked by the default seccomp profile. -- Fix Oracle XE 10g failing to start in a container [#20981](https://github.com/docker/docker/pull/20981) - It was due to the `ipc` syscall being blocked by the default seccomp profile. -- Fix user namespaces not working on Linux From Scratch [#20685](https://github.com/docker/docker/pull/20685) -- Fix issue preventing daemon to start if userns is enabled and the `subuid` or `subgid` files contain comments [#20725](https://github.com/docker/docker/pull/20725) - -## 1.10.2 (2016-02-22) - -### Runtime - -- Prevent systemd from deleting containers' cgroups when its configuration is reloaded [#20518](https://github.com/docker/docker/pull/20518) -- Fix SELinux issues by disregarding `--read-only` when mounting `/dev/mqueue` [#20333](https://github.com/docker/docker/pull/20333) -- Fix chown permissions used during `docker cp` when userns is used [#20446](https://github.com/docker/docker/pull/20446) -- Fix configuration loading issue with all booleans defaulting to `true` [#20471](https://github.com/docker/docker/pull/20471) -- Fix occasional panic with `docker logs -f` [#20522](https://github.com/docker/docker/pull/20522) - -### Distribution - -- Keep layer reference if deletion failed to avoid a badly inconsistent state [#20513](https://github.com/docker/docker/pull/20513) -- Handle gracefully a corner case when canceling migration [#20372](https://github.com/docker/docker/pull/20372) -- Fix docker import on compressed data [#20367](https://github.com/docker/docker/pull/20367) -- Fix tar-split files corruption during migration that later cause docker push and docker save to fail [#20458](https://github.com/docker/docker/pull/20458) - -### Networking - -- Fix daemon crash if embedded DNS is sent garbage [#20510](https://github.com/docker/docker/pull/20510) - -### Volumes - -- Fix issue with multiple volume references with same name [#20381](https://github.com/docker/docker/pull/20381) - -### Security - -- Fix potential cache corruption and delegation conflict issues [#20523](https://github.com/docker/docker/pull/20523) - -## 1.10.1 (2016-02-11) - -### Runtime - -* Do not stop daemon on migration hard failure [#20156](https://github.com/docker/docker/pull/20156) -- Fix various issues with migration to content-addressable images [#20058](https://github.com/docker/docker/pull/20058) -- Fix ZFS permission bug with user namespaces [#20045](https://github.com/docker/docker/pull/20045) -- Do not leak /dev/mqueue from the host to all containers, keep it container-specific [#19876](https://github.com/docker/docker/pull/19876) [#20133](https://github.com/docker/docker/pull/20133) -- Fix `docker ps --filter before=...` to not show stopped containers without providing `-a` flag [#20135](https://github.com/docker/docker/pull/20135) - -### Security - -- Fix issue preventing docker events to work properly with authorization plugin [#20002](https://github.com/docker/docker/pull/20002) - -### Distribution - -* Add additional verifications and prevent from uploading invalid data to registries [#20164](https://github.com/docker/docker/pull/20164) -- Fix regression preventing uppercase characters in image reference hostname [#20175](https://github.com/docker/docker/pull/20175) - -### Networking - -- Fix embedded DNS for user-defined networks in the presence of firewalld [#20060](https://github.com/docker/docker/pull/20060) -- Fix issue where removing a network during shutdown left Docker inoperable [#20181](https://github.com/docker/docker/issues/20181) [#20235](https://github.com/docker/docker/issues/20235) -- Embedded DNS is now able to return compressed results [#20181](https://github.com/docker/docker/issues/20181) -- Fix port-mapping issue with `userland-proxy=false` [#20181](https://github.com/docker/docker/issues/20181) - -### Logging - -- Fix bug where tcp+tls protocol would be rejected [#20109](https://github.com/docker/docker/pull/20109) - -### Volumes - -- Fix issue whereby older volume drivers would not receive volume options [#19983](https://github.com/docker/docker/pull/19983) - -### Misc - -- Remove TasksMax from Docker systemd service [#20167](https://github.com/docker/docker/pull/20167) - -## 1.10.0 (2016-02-04) - -**IMPORTANT**: Docker 1.10 uses a new content-addressable storage for images and layers. -A migration is performed the first time docker is run, and can take a significant amount of time depending on the number of images present. -Refer to this page on the wiki for more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration -We also released a cool migration utility that enables you to perform the migration before updating to reduce downtime. -Engine 1.10 migrator can be found on Docker Hub: https://hub.docker.com/r/docker/v1.10-migrator/ - -### Runtime - -+ New `docker update` command that allows updating resource constraints on running containers [#15078](https://github.com/docker/docker/pull/15078) -+ Add `--tmpfs` flag to `docker run` to create a tmpfs mount in a container [#13587](https://github.com/docker/docker/pull/13587) -+ Add `--format` flag to `docker images` command [#17692](https://github.com/docker/docker/pull/17692) -+ Allow to set daemon configuration in a file and hot-reload it with the `SIGHUP` signal [#18587](https://github.com/docker/docker/pull/18587) -+ Updated docker events to include more meta-data and event types [#18888](https://github.com/docker/docker/pull/18888) - This change is backward compatible in the API, but not on the CLI. -+ Add `--blkio-weight-device` flag to `docker run` [#13959](https://github.com/docker/docker/pull/13959) -+ Add `--device-read-bps` and `--device-write-bps` flags to `docker run` [#14466](https://github.com/docker/docker/pull/14466) -+ Add `--device-read-iops` and `--device-write-iops` flags to `docker run` [#15879](https://github.com/docker/docker/pull/15879) -+ Add `--oom-score-adj` flag to `docker run` [#16277](https://github.com/docker/docker/pull/16277) -+ Add `--detach-keys` flag to `attach`, `run`, `start` and `exec` commands to override the default key sequence that detaches from a container [#15666](https://github.com/docker/docker/pull/15666) -+ Add `--shm-size` flag to `run`, `create` and `build` to set the size of `/dev/shm` [#16168](https://github.com/docker/docker/pull/16168) -+ Show the number of running, stopped, and paused containers in `docker info` [#19249](https://github.com/docker/docker/pull/19249) -+ Show the `OSType` and `Architecture` in `docker info` [#17478](https://github.com/docker/docker/pull/17478) -+ Add `--cgroup-parent` flag on `daemon` to set cgroup parent for all containers [#19062](https://github.com/docker/docker/pull/19062) -+ Add `-L` flag to docker cp to follow symlinks [#16613](https://github.com/docker/docker/pull/16613) -+ New `status=dead` filter for `docker ps` [#17908](https://github.com/docker/docker/pull/17908) -* Change `docker run` exit codes to distinguish between runtime and application errors [#14012](https://github.com/docker/docker/pull/14012) -* Enhance `docker events --since` and `--until` to support nanoseconds and timezones [#17495](https://github.com/docker/docker/pull/17495) -* Add `--all`/`-a` flag to `stats` to include both running and stopped containers [#16742](https://github.com/docker/docker/pull/16742) -* Change the default cgroup-driver to `cgroupfs` [#17704](https://github.com/docker/docker/pull/17704) -* Emit a "tag" event when tagging an image with `build -t` [#17115](https://github.com/docker/docker/pull/17115) -* Best effort for linked containers' start order when starting the daemon [#18208](https://github.com/docker/docker/pull/18208) -* Add ability to add multiple tags on `build` [#15780](https://github.com/docker/docker/pull/15780) -* Permit `OPTIONS` request against any url, thus fixing issue with CORS [#19569](https://github.com/docker/docker/pull/19569) -- Fix the `--quiet` flag on `docker build` to actually be quiet [#17428](https://github.com/docker/docker/pull/17428) -- Fix `docker images --filter dangling=false` to now show all non-dangling images [#19326](https://github.com/docker/docker/pull/19326) -- Fix race condition causing autorestart turning off on restart [#17629](https://github.com/docker/docker/pull/17629) -- Recognize GPFS filesystems [#19216](https://github.com/docker/docker/pull/19216) -- Fix obscure bug preventing to start containers [#19751](https://github.com/docker/docker/pull/19751) -- Forbid `exec` during container restart [#19722](https://github.com/docker/docker/pull/19722) -- devicemapper: Increasing `--storage-opt dm.basesize` will now increase the base device size on daemon restart [#19123](https://github.com/docker/docker/pull/19123) - -### Security - -+ Add `--userns-remap` flag to `daemon` to support user namespaces (previously in experimental) [#19187](https://github.com/docker/docker/pull/19187) -+ Add support for custom seccomp profiles in `--security-opt` [#17989](https://github.com/docker/docker/pull/17989) -+ Add default seccomp profile [#18780](https://github.com/docker/docker/pull/18780) -+ Add `--authorization-plugin` flag to `daemon` to customize ACLs [#15365](https://github.com/docker/docker/pull/15365) -+ Docker Content Trust now supports the ability to read and write user delegations [#18887](https://github.com/docker/docker/pull/18887) - This is an optional, opt-in feature that requires the explicit use of the Notary command-line utility in order to be enabled. - Enabling delegation support in a specific repository will break the ability of Docker 1.9 and 1.8 to pull from that repository, if content trust is enabled. -* Allow SELinux to run in a container when using the BTRFS storage driver [#16452](https://github.com/docker/docker/pull/16452) - -### Distribution - -* Use content-addressable storage for images and layers [#17924](https://github.com/docker/docker/pull/17924) - Note that a migration is performed the first time docker is run; it can take a significant amount of time depending on the number of images and containers present. - Images no longer depend on the parent chain but contain a list of layer references. - `docker load`/`docker save` tarballs now also contain content-addressable image configurations. - For more information: https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration -* Add support for the new [manifest format ("schema2")](https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-2.md) [#18785](https://github.com/docker/docker/pull/18785) -* Lots of improvements for push and pull: performance++, retries on failed downloads, cancelling on client disconnect [#18353](https://github.com/docker/docker/pull/18353), [#18418](https://github.com/docker/docker/pull/18418), [#19109](https://github.com/docker/docker/pull/19109), [#18353](https://github.com/docker/docker/pull/18353) -* Limit v1 protocol fallbacks [#18590](https://github.com/docker/docker/pull/18590) -- Fix issue where docker could hang indefinitely waiting for a nonexistent process to pull an image [#19743](https://github.com/docker/docker/pull/19743) - -### Networking - -+ Use DNS-based discovery instead of `/etc/hosts` [#19198](https://github.com/docker/docker/pull/19198) -+ Support for network-scoped alias using `--net-alias` on `run` and `--alias` on `network connect` [#19242](https://github.com/docker/docker/pull/19242) -+ Add `--ip` and `--ip6` on `run` and `network connect` to support custom IP addresses for a container in a network [#19001](https://github.com/docker/docker/pull/19001) -+ Add `--ipam-opt` to `network create` for passing custom IPAM options [#17316](https://github.com/docker/docker/pull/17316) -+ Add `--internal` flag to `network create` to restrict external access to and from the network [#19276](https://github.com/docker/docker/pull/19276) -+ Add `kv.path` option to `--cluster-store-opt` [#19167](https://github.com/docker/docker/pull/19167) -+ Add `discovery.heartbeat` and `discovery.ttl` options to `--cluster-store-opt` to configure discovery TTL and heartbeat timer [#18204](https://github.com/docker/docker/pull/18204) -+ Add `--format` flag to `network inspect` [#17481](https://github.com/docker/docker/pull/17481) -+ Add `--link` to `network connect` to provide a container-local alias [#19229](https://github.com/docker/docker/pull/19229) -+ Support for Capability exchange with remote IPAM plugins [#18775](https://github.com/docker/docker/pull/18775) -+ Add `--force` to `network disconnect` to force container to be disconnected from network [#19317](https://github.com/docker/docker/pull/19317) -* Support for multi-host networking using built-in overlay driver for all engine supported kernels: 3.10+ [#18775](https://github.com/docker/docker/pull/18775) -* `--link` is now supported on `docker run` for containers in user-defined network [#19229](https://github.com/docker/docker/pull/19229) -* Enhance `docker network rm` to allow removing multiple networks [#17489](https://github.com/docker/docker/pull/17489) -* Include container names in `network inspect` [#17615](https://github.com/docker/docker/pull/17615) -* Include auto-generated subnets for user-defined networks in `network inspect` [#17316](https://github.com/docker/docker/pull/17316) -* Add `--filter` flag to `network ls` to hide predefined networks [#17782](https://github.com/docker/docker/pull/17782) -* Add support for network connect/disconnect to stopped containers [#18906](https://github.com/docker/docker/pull/18906) -* Add network ID to container inspect [#19323](https://github.com/docker/docker/pull/19323) -- Fix MTU issue where Docker would not start with two or more default routes [#18108](https://github.com/docker/docker/pull/18108) -- Fix duplicate IP address for containers [#18106](https://github.com/docker/docker/pull/18106) -- Fix issue preventing sometimes docker from creating the bridge network [#19338](https://github.com/docker/docker/pull/19338) -- Do not substitute 127.0.0.1 name server when using `--net=host` [#19573](https://github.com/docker/docker/pull/19573) - -### Logging - -+ New logging driver for Splunk [#16488](https://github.com/docker/docker/pull/16488) -+ Add support for syslog over TCP+TLS [#18998](https://github.com/docker/docker/pull/18998) -* Enhance `docker logs --since` and `--until` to support nanoseconds and time [#17495](https://github.com/docker/docker/pull/17495) -* Enhance AWS logs to auto-detect region [#16640](https://github.com/docker/docker/pull/16640) - -### Volumes - -+ Add support to set the mount propagation mode for a volume [#17034](https://github.com/docker/docker/pull/17034) -* Add `ls` and `inspect` endpoints to volume plugin API [#16534](https://github.com/docker/docker/pull/16534) - Existing plugins need to make use of these new APIs to satisfy users' expectation - For that, please use the new MIME type `application/vnd.docker.plugins.v1.2+json` [#19549](https://github.com/docker/docker/pull/19549) -- Fix data not being copied to named volumes [#19175](https://github.com/docker/docker/pull/19175) -- Fix issues preventing volume drivers from being containerized [#19500](https://github.com/docker/docker/pull/19500) -- Fix `docker volumes ls --dangling=false` to now show all non-dangling volumes [#19671](https://github.com/docker/docker/pull/19671) -- Do not remove named volumes on container removal [#19568](https://github.com/docker/docker/pull/19568) -- Allow external volume drivers to host anonymous volumes [#19190](https://github.com/docker/docker/pull/19190) - -### Builder - -+ Add support for `**` in `.dockerignore` to wildcard multiple levels of directories [#17090](https://github.com/docker/docker/pull/17090) -- Fix handling of UTF-8 characters in Dockerfiles [#17055](https://github.com/docker/docker/pull/17055) -- Fix permissions problem when reading from STDIN [#19283](https://github.com/docker/docker/pull/19283) - -### Client - -+ Add support for overriding the API version to use via an `DOCKER_API_VERSION` environment-variable [#15964](https://github.com/docker/docker/pull/15964) -- Fix a bug preventing Windows clients to log in to Docker Hub [#19891](https://github.com/docker/docker/pull/19891) - -### Misc - -* systemd: Set TasksMax in addition to LimitNPROC in systemd service file [#19391](https://github.com/docker/docker/pull/19391) - -### Deprecations - -* Remove LXC support. The LXC driver was deprecated in Docker 1.8, and has now been removed [#17700](https://github.com/docker/docker/pull/17700) -* Remove `--exec-driver` daemon flag, because it is no longer in use [#17700](https://github.com/docker/docker/pull/17700) -* Remove old deprecated single-dashed long CLI flags (such as `-rm`; use `--rm` instead) [#17724](https://github.com/docker/docker/pull/17724) -* Deprecate HostConfig at API container start [#17799](https://github.com/docker/docker/pull/17799) -* Deprecate docker packages for newly EOL'd Linux distributions: Fedora 21 and Ubuntu 15.04 (Vivid) [#18794](https://github.com/docker/docker/pull/18794), [#18809](https://github.com/docker/docker/pull/18809) -* Deprecate `-f` flag for docker tag [#18350](https://github.com/docker/docker/pull/18350) - -## 1.9.1 (2015-11-21) - -### Runtime - -- Do not prevent daemon from booting if images could not be restored (#17695) -- Force IPC mount to unmount on daemon shutdown/init (#17539) -- Turn IPC unmount errors into warnings (#17554) -- Fix `docker stats` performance regression (#17638) -- Clarify cryptic error message upon `docker logs` if `--log-driver=none` (#17767) -- Fix seldom panics (#17639, #17634, #17703) -- Fix opq whiteouts problems for files with dot prefix (#17819) -- devicemapper: try defaulting to xfs instead of ext4 for performance reasons (#17903, #17918) -- devicemapper: fix displayed fs in docker info (#17974) -- selinux: only relabel if user requested so with the `z` option (#17450, #17834) -- Do not make network calls when normalizing names (#18014) - -### Client - -- Fix `docker login` on windows (#17738) -- Fix bug with `docker inspect` output when not connected to daemon (#17715) -- Fix `docker inspect -f {{.HostConfig.Dns}} somecontainer` (#17680) - -### Builder - -- Fix regression with symlink behavior in ADD/COPY (#17710) - -### Networking - -- Allow passing a network ID as an argument for `--net` (#17558) -- Fix connect to host and prevent disconnect from host for `host` network (#17476) -- Fix `--fixed-cidr` issue when gateway ip falls in ip-range and ip-range is - not the first block in the network (#17853) -- Restore deterministic `IPv6` generation from `MAC` address on default `bridge` network (#17890) -- Allow port-mapping only for endpoints created on docker run (#17858) -- Fixed an endpoint delete issue with a possible stale sbox (#18102) - -### Distribution - -- Correct parent chain in v2 push when v1Compatibility files on the disk are inconsistent (#18047) - -## 1.9.0 (2015-11-03) - -### Runtime - -+ `docker stats` now returns block IO metrics (#15005) -+ `docker stats` now details network stats per interface (#15786) -+ Add `ancestor=` filter to `docker ps --filter` flag to filter -containers based on their ancestor images (#14570) -+ Add `label=` filter to `docker ps --filter` to filter containers -based on label (#16530) -+ Add `--kernel-memory` flag to `docker run` (#14006) -+ Add `--message` flag to `docker import` allowing to specify an optional -message (#15711) -+ Add `--privileged` flag to `docker exec` (#14113) -+ Add `--stop-signal` flag to `docker run` allowing to replace the container -process stopping signal (#15307) -+ Add a new `unless-stopped` restart policy (#15348) -+ Inspecting an image now returns tags (#13185) -+ Add container size information to `docker inspect` (#15796) -+ Add `RepoTags` and `RepoDigests` field to `/images/{name:.*}/json` (#17275) -- Remove the deprecated `/container/ps` endpoint from the API (#15972) -- Send and document correct HTTP codes for `/exec//start` (#16250) -- Share shm and mqueue between containers sharing IPC namespace (#15862) -- Event stream now shows OOM status when `--oom-kill-disable` is set (#16235) -- Ensure special network files (/etc/hosts etc.) are read-only if bind-mounted -with `ro` option (#14965) -- Improve `rmi` performance (#16890) -- Do not update /etc/hosts for the default bridge network, except for links (#17325) -- Fix conflict with duplicate container names (#17389) -- Fix an issue with incorrect template execution in `docker inspect` (#17284) -- DEPRECATE `-c` short flag variant for `--cpu-shares` in docker run (#16271) - -### Client - -+ Allow `docker import` to import from local files (#11907) - -### Builder - -+ Add a `STOPSIGNAL` Dockerfile instruction allowing to set a different -stop-signal for the container process (#15307) -+ Add an `ARG` Dockerfile instruction and a `--build-arg` flag to `docker build` -that allows to add build-time environment variables (#15182) -- Improve cache miss performance (#16890) - -### Storage - -- devicemapper: Implement deferred deletion capability (#16381) - -## Networking - -+ `docker network` exits experimental and is part of standard release (#16645) -+ New network top-level concept, with associated subcommands and API (#16645) - WARNING: the API is different from the experimental API -+ Support for multiple isolated/micro-segmented networks (#16645) -+ Built-in multihost networking using VXLAN based overlay driver (#14071) -+ Support for third-party network plugins (#13424) -+ Ability to dynamically connect containers to multiple networks (#16645) -+ Support for user-defined IP address management via pluggable IPAM drivers (#16910) -+ Add daemon flags `--cluster-store` and `--cluster-advertise` for built-in nodes discovery (#16229) -+ Add `--cluster-store-opt` for setting up TLS settings (#16644) -+ Add `--dns-opt` to the daemon (#16031) -- DEPRECATE following container `NetworkSettings` fields in API v1.21: `EndpointID`, `Gateway`, - `GlobalIPv6Address`, `GlobalIPv6PrefixLen`, `IPAddress`, `IPPrefixLen`, `IPv6Gateway` and `MacAddress`. - Those are now specific to the `bridge` network. Use `NetworkSettings.Networks` to inspect - the networking settings of a container per network. - -### Volumes - -+ New top-level `volume` subcommand and API (#14242) -- Move API volume driver settings to host-specific config (#15798) -- Print an error message if volume name is not unique (#16009) -- Ensure volumes created from Dockerfiles always use the local volume driver -(#15507) -- DEPRECATE auto-creating missing host paths for bind mounts (#16349) - -### Logging - -+ Add `awslogs` logging driver for Amazon CloudWatch (#15495) -+ Add generic `tag` log option to allow customizing container/image -information passed to driver (e.g. show container names) (#15384) -- Implement the `docker logs` endpoint for the journald driver (#13707) -- DEPRECATE driver-specific log tags (e.g. `syslog-tag`, etc.) (#15384) - -### Distribution - -+ `docker search` now works with partial names (#16509) -- Push optimization: avoid buffering to file (#15493) -- The daemon will display progress for images that were already being pulled -by another client (#15489) -- Only permissions required for the current action being performed are requested (#) -+ Renaming trust keys (and respective environment variables) from `offline` to -`root` and `tagging` to `repository` (#16894) -- DEPRECATE trust key environment variables -`DOCKER_CONTENT_TRUST_OFFLINE_PASSPHRASE` and -`DOCKER_CONTENT_TRUST_TAGGING_PASSPHRASE` (#16894) - -### Security - -+ Add SELinux profiles to the rpm package (#15832) -- Fix various issues with AppArmor profiles provided in the deb package -(#14609) -- Add AppArmor policy that prevents writing to /proc (#15571) - -## 1.8.3 (2015-10-12) - -### Distribution - -- Fix layer IDs lead to local graph poisoning (CVE-2014-8178) -- Fix manifest validation and parsing logic errors allow pull-by-digest validation bypass (CVE-2014-8179) -+ Add `--disable-legacy-registry` to prevent a daemon from using a v1 registry - -## 1.8.2 (2015-09-10) - -### Distribution - -- Fixes rare edge case of handling GNU LongLink and LongName entries. -- Fix ^C on docker pull. -- Fix docker pull issues on client disconnection. -- Fix issue that caused the daemon to panic when loggers weren't configured properly. -- Fix goroutine leak pulling images from registry V2. - -### Runtime - -- Fix a bug mounting cgroups for docker daemons running inside docker containers. -- Initialize log configuration properly. - -### Client: - -- Handle `-q` flag in `docker ps` properly when there is a default format. - -### Networking - -- Fix several corner cases with netlink. - -### Contrib - -- Fix several issues with bash completion. - -## 1.8.1 (2015-08-12) - -### Distribution - -* Fix a bug where pushing multiple tags would result in invalid images - -## 1.8.0 (2015-08-11) - -### Distribution - -+ Trusted pull, push and build, disabled by default -* Make tar layers deterministic between registries -* Don't allow deleting the image of running containers -* Check if a tag name to load is a valid digest -* Allow one character repository names -* Add a more accurate error description for invalid tag name -* Make build cache ignore mtime - -### Cli - -+ Add support for DOCKER_CONFIG/--config to specify config file dir -+ Add --type flag for docker inspect command -+ Add formatting options to `docker ps` with `--format` -+ Replace `docker -d` with new subcommand `docker daemon` -* Zsh completion updates and improvements -* Add some missing events to bash completion -* Support daemon urls with base paths in `docker -H` -* Validate status= filter to docker ps -* Display when a container is in --net=host in docker ps -* Extend docker inspect to export image metadata related to graph driver -* Restore --default-gateway{,-v6} daemon options -* Add missing unpublished ports in docker ps -* Allow duration strings in `docker events` as --since/--until -* Expose more mounts information in `docker inspect` - -### Runtime - -+ Add new Fluentd logging driver -+ Allow `docker import` to load from local files -+ Add logging driver for GELF via UDP -+ Allow to copy files from host to containers with `docker cp` -+ Promote volume drivers from experimental to master -+ Add rollover options to json-file log driver, and --log-driver-opts flag -+ Add memory swappiness tuning options -* Remove cgroup read-only flag when privileged -* Make /proc, /sys, & /dev readonly for readonly containers -* Add cgroup bind mount by default -* Overlay: Export metadata for container and image in `docker inspect` -* Devicemapper: external device activation -* Devicemapper: Compare uuid of base device on startup -* Remove RC4 from the list of registry cipher suites -* Add syslog-facility option -* LXC execdriver compatibility with recent LXC versions -* Mark LXC execriver as deprecated (to be removed with the migration to runc) - -### Plugins - -* Separate plugin sockets and specs locations -* Allow TLS connections to plugins - -### Bug fixes - -- Add missing 'Names' field to /containers/json API output -- Make `docker rmi` of dangling images safe while pulling -- Devicemapper: Change default basesize to 100G -- Go Scheduler issue with sync.Mutex and gcc -- Fix issue where Search API endpoint would panic due to empty AuthConfig -- Set image canonical names correctly -- Check dockerinit only if lxc driver is used -- Fix ulimit usage of nproc -- Always attach STDIN if -i,--interactive is specified -- Show error messages when saving container state fails -- Fixed incorrect assumption on --bridge=none treated as disable network -- Check for invalid port specifications in host configuration -- Fix endpoint leave failure for --net=host mode -- Fix goroutine leak in the stats API if the container is not running -- Check for apparmor file before reading it -- Fix DOCKER_TLS_VERIFY being ignored -- Set umask to the default on startup -- Correct the message of pause and unpause a non-running container -- Adjust disallowed CpuShares in container creation -- ZFS: correctly apply selinux context -- Display empty string instead of when IP opt is nil -- `docker kill` returns error when container is not running -- Fix COPY/ADD quoted/json form -- Fix goroutine leak on logs -f with no output -- Remove panic in nat package on invalid hostport -- Fix container linking in Fedora 22 -- Fix error caused using default gateways outside of the allocated range -- Format times in inspect command with a template as RFC3339Nano -- Make registry client to accept 2xx and 3xx http status responses as successful -- Fix race issue that caused the daemon to crash with certain layer downloads failed in a specific order. -- Fix error when the docker ps format was not valid. -- Remove redundant ip forward check. -- Fix issue trying to push images to repository mirrors. -- Fix error cleaning up network entrypoints when there is an initialization issue. - -## 1.7.1 (2015-07-14) - -#### Runtime - -- Fix default user spawning exec process with `docker exec` -- Make `--bridge=none` not to configure the network bridge -- Publish networking stats properly -- Fix implicit devicemapper selection with static binaries -- Fix socket connections that hung intermittently -- Fix bridge interface creation on CentOS/RHEL 6.6 -- Fix local dns lookups added to resolv.conf -- Fix copy command mounting volumes -- Fix read/write privileges in volumes mounted with --volumes-from - -#### Remote API - -- Fix unmarshalling of Command and Entrypoint -- Set limit for minimum client version supported -- Validate port specification -- Return proper errors when attach/reattach fail - -#### Distribution - -- Fix pulling private images -- Fix fallback between registry V2 and V1 - -## 1.7.0 (2015-06-16) - -#### Runtime -+ Experimental feature: support for out-of-process volume plugins -* The userland proxy can be disabled in favor of hairpin NAT using the daemon’s `--userland-proxy=false` flag -* The `exec` command supports the `-u|--user` flag to specify the new process owner -+ Default gateway for containers can be specified daemon-wide using the `--default-gateway` and `--default-gateway-v6` flags -+ The CPU CFS (Completely Fair Scheduler) quota can be set in `docker run` using `--cpu-quota` -+ Container block IO can be controlled in `docker run` using`--blkio-weight` -+ ZFS support -+ The `docker logs` command supports a `--since` argument -+ UTS namespace can be shared with the host with `docker run --uts=host` - -#### Quality -* Networking stack was entirely rewritten as part of the libnetwork effort -* Engine internals refactoring -* Volumes code was entirely rewritten to support the plugins effort -+ Sending SIGUSR1 to a daemon will dump all goroutines stacks without exiting - -#### Build -+ Support ${variable:-value} and ${variable:+value} syntax for environment variables -+ Support resource management flags `--cgroup-parent`, `--cpu-period`, `--cpu-quota`, `--cpuset-cpus`, `--cpuset-mems` -+ git context changes with branches and directories -* The .dockerignore file support exclusion rules - -#### Distribution -+ Client support for v2 mirroring support for the official registry - -#### Bugfixes -* Firewalld is now supported and will automatically be used when available -* mounting --device recursively - -## 1.6.2 (2015-05-13) - -#### Runtime -- Revert change prohibiting mounting into /sys - -## 1.6.1 (2015-05-07) - -#### Security -- Fix read/write /proc paths (CVE-2015-3630) -- Prohibit VOLUME /proc and VOLUME / (CVE-2015-3631) -- Fix opening of file-descriptor 1 (CVE-2015-3627) -- Fix symlink traversal on container respawn allowing local privilege escalation (CVE-2015-3629) -- Prohibit mount of /sys - -#### Runtime -- Update AppArmor policy to not allow mounts - -## 1.6.0 (2015-04-07) - -#### Builder -+ Building images from an image ID -+ Build containers with resource constraints, ie `docker build --cpu-shares=100 --memory=1024m...` -+ `commit --change` to apply specified Dockerfile instructions while committing the image -+ `import --change` to apply specified Dockerfile instructions while importing the image -+ Builds no longer continue in the background when canceled with CTRL-C - -#### Client -+ Windows Support - -#### Runtime -+ Container and image Labels -+ `--cgroup-parent` for specifying a parent cgroup to place container cgroup within -+ Logging drivers, `json-file`, `syslog`, or `none` -+ Pulling images by ID -+ `--ulimit` to set the ulimit on a container -+ `--default-ulimit` option on the daemon which applies to all created containers (and overwritten by `--ulimit` on run) - -## 1.5.0 (2015-02-10) - -#### Builder -+ Dockerfile to use for a given `docker build` can be specified with the `-f` flag -* Dockerfile and .dockerignore files can be themselves excluded as part of the .dockerignore file, thus preventing modifications to these files invalidating ADD or COPY instructions cache -* ADD and COPY instructions accept relative paths -* Dockerfile `FROM scratch` instruction is now interpreted as a no-base specifier -* Improve performance when exposing a large number of ports - -#### Hack -+ Allow client-side only integration tests for Windows -* Include docker-py integration tests against Docker daemon as part of our test suites - -#### Packaging -+ Support for the new version of the registry HTTP API -* Speed up `docker push` for images with a majority of already existing layers -- Fixed contacting a private registry through a proxy - -#### Remote API -+ A new endpoint will stream live container resource metrics and can be accessed with the `docker stats` command -+ Containers can be renamed using the new `rename` endpoint and the associated `docker rename` command -* Container `inspect` endpoint show the ID of `exec` commands running in this container -* Container `inspect` endpoint show the number of times Docker auto-restarted the container -* New types of event can be streamed by the `events` endpoint: ‘OOM’ (container died with out of memory), ‘exec_create’, and ‘exec_start' -- Fixed returned string fields which hold numeric characters incorrectly omitting surrounding double quotes - -#### Runtime -+ Docker daemon has full IPv6 support -+ The `docker run` command can take the `--pid=host` flag to use the host PID namespace, which makes it possible for example to debug host processes using containerized debugging tools -+ The `docker run` command can take the `--read-only` flag to make the container’s root filesystem mounted as readonly, which can be used in combination with volumes to force a container’s processes to only write to locations that will be persisted -+ Container total memory usage can be limited for `docker run` using the `--memory-swap` flag -* Major stability improvements for devicemapper storage driver -* Better integration with host system: containers will reflect changes to the host's `/etc/resolv.conf` file when restarted -* Better integration with host system: per-container iptable rules are moved to the DOCKER chain -- Fixed container exiting on out of memory to return an invalid exit code - -#### Other -* The HTTP_PROXY, HTTPS_PROXY, and NO_PROXY environment variables are properly taken into account by the client when connecting to the Docker daemon - -## 1.4.1 (2014-12-15) - -#### Runtime -- Fix issue with volumes-from and bind mounts not being honored after create - -## 1.4.0 (2014-12-11) - -#### Notable Features since 1.3.0 -+ Set key=value labels to the daemon (displayed in `docker info`), applied with - new `-label` daemon flag -+ Add support for `ENV` in Dockerfile of the form: - `ENV name=value name2=value2...` -+ New Overlayfs Storage Driver -+ `docker info` now returns an `ID` and `Name` field -+ Filter events by event name, container, or image -+ `docker cp` now supports copying from container volumes -- Fixed `docker tag`, so it honors `--force` when overriding a tag for existing - image. - -## 1.3.3 (2014-12-11) - -#### Security -- Fix path traversal vulnerability in processing of absolute symbolic links (CVE-2014-9356) -- Fix decompression of xz image archives, preventing privilege escalation (CVE-2014-9357) -- Validate image IDs (CVE-2014-9358) - -#### Runtime -- Fix an issue when image archives are being read slowly - -#### Client -- Fix a regression related to stdin redirection -- Fix a regression with `docker cp` when destination is the current directory - -## 1.3.2 (2014-11-20) - -#### Security -- Fix tar breakout vulnerability -* Extractions are now sandboxed chroot -- Security options are no longer comitted to images - -#### Runtime -- Fix deadlock in `docker ps -f exited=1` -- Fix a bug when `--volumes-from` references a container that failed to start - -#### Registry -+ `--insecure-registry` now accepts CIDR notation such as 10.1.0.0/16 -* Private registries whose IPs fall in the 127.0.0.0/8 range do no need the `--insecure-registry` flag -- Skip the experimental registry v2 API when mirroring is enabled - -## 1.3.1 (2014-10-28) - -#### Security -* Prevent fallback to SSL protocols < TLS 1.0 for client, daemon and registry -+ Secure HTTPS connection to registries with certificate verification and without HTTP fallback unless `--insecure-registry` is specified - -#### Runtime -- Fix issue where volumes would not be shared - -#### Client -- Fix issue with `--iptables=false` not automatically setting `--ip-masq=false` -- Fix docker run output to non-TTY stdout - -#### Builder -- Fix escaping `$` for environment variables -- Fix issue with lowercase `onbuild` Dockerfile instruction -- Restrict environment variable expansion to `ENV`, `ADD`, `COPY`, `WORKDIR`, `EXPOSE`, `VOLUME` and `USER` - -## 1.3.0 (2014-10-14) - -#### Notable features since 1.2.0 -+ Docker `exec` allows you to run additional processes inside existing containers -+ Docker `create` gives you the ability to create a container via the CLI without executing a process -+ `--security-opts` options to allow user to customize container labels and apparmor profiles -+ Docker `ps` filters -- Wildcard support to COPY/ADD -+ Move production URLs to get.docker.com from get.docker.io -+ Allocate IP address on the bridge inside a valid CIDR -+ Use drone.io for PR and CI testing -+ Ability to setup an official registry mirror -+ Ability to save multiple images with docker `save` - -## 1.2.0 (2014-08-20) - -#### Runtime -+ Make /etc/hosts /etc/resolv.conf and /etc/hostname editable at runtime -+ Auto-restart containers using policies -+ Use /var/lib/docker/tmp for large temporary files -+ `--cap-add` and `--cap-drop` to tweak what linux capability you want -+ `--device` to use devices in containers - -#### Client -+ `docker search` on private registries -+ Add `exited` filter to `docker ps --filter` -* `docker rm -f` now kills instead of stop -+ Support for IPv6 addresses in `--dns` flag - -#### Proxy -+ Proxy instances in separate processes -* Small bug fix on UDP proxy - -## 1.1.2 (2014-07-23) - -#### Runtime -+ Fix port allocation for existing containers -+ Fix containers restart on daemon restart - -#### Packaging -+ Fix /etc/init.d/docker issue on Debian - -## 1.1.1 (2014-07-09) - -#### Builder -* Fix issue with ADD - -## 1.1.0 (2014-07-03) - -#### Notable features since 1.0.1 -+ Add `.dockerignore` support -+ Pause containers during `docker commit` -+ Add `--tail` to `docker logs` - -#### Builder -+ Allow a tar file as context for `docker build` -* Fix issue with white-spaces and multi-lines in `Dockerfiles` - -#### Runtime -* Overall performance improvements -* Allow `/` as source of `docker run -v` -* Fix port allocation -* Fix bug in `docker save` -* Add links information to `docker inspect` - -#### Client -* Improve command line parsing for `docker commit` - -#### Remote API -* Improve status code for the `start` and `stop` endpoints - -## 1.0.1 (2014-06-19) - -#### Notable features since 1.0.0 -* Enhance security for the LXC driver - -#### Builder -* Fix `ONBUILD` instruction passed to grandchildren - -#### Runtime -* Fix events subscription -* Fix /etc/hostname file with host networking -* Allow `-h` and `--net=none` -* Fix issue with hotplug devices in `--privileged` - -#### Client -* Fix artifacts with events -* Fix a panic with empty flags -* Fix `docker cp` on Mac OS X - -#### Miscellaneous -* Fix compilation on Mac OS X -* Fix several races - -## 1.0.0 (2014-06-09) - -#### Notable features since 0.12.0 -* Production support - -## 0.12.0 (2014-06-05) - -#### Notable features since 0.11.0 -* 40+ various improvements to stability, performance and usability -* New `COPY` Dockerfile instruction to allow copying a local file from the context into the container without ever extracting if the file is a tar file -* Inherit file permissions from the host on `ADD` -* New `pause` and `unpause` commands to allow pausing and unpausing of containers using cgroup freezer -* The `images` command has a `-f`/`--filter` option to filter the list of images -* Add `--force-rm` to clean up after a failed build -* Standardize JSON keys in Remote API to CamelCase -* Pull from a docker run now assumes `latest` tag if not specified -* Enhance security on Linux capabilities and device nodes - -## 0.11.1 (2014-05-07) - -#### Registry -- Fix push and pull to private registry - -## 0.11.0 (2014-05-07) - -#### Notable features since 0.10.0 - -* SELinux support for mount and process labels -* Linked containers can be accessed by hostname -* Use the net `--net` flag to allow advanced network configuration such as host networking so that containers can use the host's network interfaces -* Add a ping endpoint to the Remote API to do healthchecks of your docker daemon -* Logs can now be returned with an optional timestamp -* Docker now works with registries that support SHA-512 -* Multiple registry endpoints are supported to allow registry mirrors - -## 0.10.0 (2014-04-08) - -#### Builder -- Fix printing multiple messages on a single line. Fixes broken output during builds. -- Follow symlinks inside container's root for ADD build instructions. -- Fix EXPOSE caching. - -#### Documentation -- Add the new options of `docker ps` to the documentation. -- Add the options of `docker restart` to the documentation. -- Update daemon docs and help messages for --iptables and --ip-forward. -- Updated apt-cacher-ng docs example. -- Remove duplicate description of --mtu from docs. -- Add missing -t and -v for `docker images` to the docs. -- Add fixes to the cli docs. -- Update libcontainer docs. -- Update images in docs to remove references to AUFS and LXC. -- Update the nodejs_web_app in the docs to use the new epel RPM address. -- Fix external link on security of containers. -- Update remote API docs. -- Add image size to history docs. -- Be explicit about binding to all interfaces in redis example. -- Document DisableNetwork flag in the 1.10 remote api. -- Document that `--lxc-conf` is lxc only. -- Add chef usage documentation. -- Add example for an image with multiple for `docker load`. -- Explain what `docker run -a` does in the docs. - -#### Contrib -- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. -- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. -- Remove inotifywait hack from the upstart host-integration example because it's not necessary any more. -- Add check-config script to contrib. -- Fix fish shell completion. - -#### Hack -* Clean up "go test" output from "make test" to be much more readable/scannable. -* Exclude more "definitely not unit tested Go source code" directories from hack/make/test. -+ Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. -- Include contributed completions in Ubuntu PPA. -+ Add cli integration tests. -* Add tweaks to the hack scripts to make them simpler. - -#### Remote API -+ Add TLS auth support for API. -* Move git clone from daemon to client. -- Fix content-type detection in docker cp. -* Split API into 2 go packages. - -#### Runtime -* Support hairpin NAT without going through Docker server. -- devicemapper: succeed immediately when removing non-existent devices. -- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time and unlock while sleeping). -- devicemapper: increase timeout in waitClose to 10 seconds. -- devicemapper: ensure we shut down thin pool cleanly. -- devicemapper: pass info, rather than hash to activateDeviceIfNeeded, deactivateDevice, setInitialized, deleteDevice. -- devicemapper: avoid AB-BA deadlock. -- devicemapper: make shutdown better/faster. -- improve alpha sorting in mflag. -- Remove manual http cookie management because the cookiejar is being used. -- Use BSD raw mode on Darwin. Fixes nano, tmux and others. -- Add FreeBSD support for the client. -- Merge auth package into registry. -- Add deprecation warning for -t on `docker pull`. -- Remove goroutine leak on error. -- Update parseLxcInfo to comply with new lxc1.0 format. -- Fix attach exit on darwin. -- Improve deprecation message. -- Retry to retrieve the layer metadata up to 5 times for `docker pull`. -- Only unshare the mount namespace for execin. -- Merge existing config when committing. -- Disable daemon startup timeout. -- Fix issue #4681: add loopback interface when networking is disabled. -- Add failing test case for issue #4681. -- Send SIGTERM to child, instead of SIGKILL. -- Show the driver and the kernel version in `docker info` even when not in debug mode. -- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. -- Fix issue caused by the absence of /etc/apparmor.d. -- Don't leave empty cidFile behind when failing to create the container. -- Mount cgroups automatically if they're not mounted already. -- Use mock for search tests. -- Update to double-dash everywhere. -- Move .dockerenv parsing to lxc driver. -- Move all bind-mounts in the container inside the namespace. -- Don't use separate bind mount for container. -- Always symlink /dev/ptmx for libcontainer. -- Don't kill by pid for other drivers. -- Add initial logging to libcontainer. -* Sort by port in `docker ps`. -- Move networking drivers into runtime top level package. -+ Add --no-prune to `docker rmi`. -+ Add time since exit in `docker ps`. -- graphdriver: add build tags. -- Prevent allocation of previously allocated ports & prevent improve port allocation. -* Add support for --since/--before in `docker ps`. -- Clean up container stop. -+ Add support for configurable dns search domains. -- Add support for relative WORKDIR instructions. -- Add --output flag for docker save. -- Remove duplication of DNS entries in config merging. -- Add cpuset.cpus to cgroups and native driver options. -- Remove docker-ci. -- Promote btrfs. btrfs is no longer considered experimental. -- Add --input flag to `docker load`. -- Return error when existing bridge doesn't match IP address. -- Strip comments before parsing line continuations to avoid interpreting instructions as comments. -- Fix TestOnlyLoopbackExistsWhenUsingDisableNetworkOption to ignore "DOWN" interfaces. -- Add systemd implementation of cgroups and make containers show up as systemd units. -- Fix commit and import when no repository is specified. -- Remount /var/lib/docker as --private to fix scaling issue. -- Use the environment's proxy when pinging the remote registry. -- Reduce error level from harmless errors. -* Allow --volumes-from to be individual files. -- Fix expanding buffer in StdCopy. -- Set error regardless of attach or stdin. This fixes #3364. -- Add support for --env-file to load environment variables from files. -- Symlink /etc/mtab and /proc/mounts. -- Allow pushing a single tag. -- Shut down containers cleanly at shutdown and wait forever for the containers to shut down. This makes container shutdown on daemon shutdown work properly via SIGTERM. -- Don't throw error when starting an already running container. -- Fix dynamic port allocation limit. -- remove setupDev from libcontainer. -- Add API version to `docker version`. -- Return correct exit code when receiving signal and make SIGQUIT quit without cleanup. -- Fix --volumes-from mount failure. -- Allow non-privileged containers to create device nodes. -- Skip login tests because of external dependency on a hosted service. -- Deprecate `docker images --tree` and `docker images --viz`. -- Deprecate `docker insert`. -- Include base abstraction for apparmor. This fixes some apparmor related problems on Ubuntu 14.04. -- Add specific error message when hitting 401 over HTTP on push. -- Fix absolute volume check. -- Remove volumes-from from the config. -- Move DNS options to hostconfig. -- Update the apparmor profile for libcontainer. -- Add deprecation notice for `docker commit -run`. - -## 0.9.1 (2014-03-24) - -#### Builder -- Fix printing multiple messages on a single line. Fixes broken output during builds. - -#### Documentation -- Fix external link on security of containers. - -#### Contrib -- Fix init script cgroup mounting workarounds to be more similar to cgroupfs-mount and thus work properly. -- Add variable for DOCKER_LOGFILE to sysvinit and use append instead of overwrite in opening the logfile. - -#### Hack -- Generate md5 and sha256 hashes when building, and upload them via hack/release.sh. - -#### Remote API -- Fix content-type detection in `docker cp`. - -#### Runtime -- Use BSD raw mode on Darwin. Fixes nano, tmux and others. -- Only unshare the mount namespace for execin. -- Retry to retrieve the layer metadata up to 5 times for `docker pull`. -- Merge existing config when committing. -- Fix panic in monitor. -- Disable daemon startup timeout. -- Fix issue #4681: add loopback interface when networking is disabled. -- Add failing test case for issue #4681. -- Send SIGTERM to child, instead of SIGKILL. -- Show the driver and the kernel version in `docker info` even when not in debug mode. -- Always symlink /dev/ptmx for libcontainer. This fixes console related problems. -- Fix issue caused by the absence of /etc/apparmor.d. -- Don't leave empty cidFile behind when failing to create the container. -- Improve deprecation message. -- Fix attach exit on darwin. -- devicemapper: improve handling of devicemapper devices (add per device lock, increase sleep time, unlock while sleeping). -- devicemapper: succeed immediately when removing non-existent devices. -- devicemapper: increase timeout in waitClose to 10 seconds. -- Remove goroutine leak on error. -- Update parseLxcInfo to comply with new lxc1.0 format. - -## 0.9.0 (2014-03-10) - -#### Builder -- Avoid extra mount/unmount during build. This fixes mount/unmount related errors during build. -- Add error to docker build --rm. This adds missing error handling. -- Forbid chained onbuild, `onbuild from` and `onbuild maintainer` triggers. -- Make `--rm` the default for `docker build`. - -#### Documentation -- Download the docker client binary for Mac over https. -- Update the titles of the install instructions & descriptions. -* Add instructions for upgrading boot2docker. -* Add port forwarding example in OS X install docs. -- Attempt to disentangle repository and registry. -- Update docs to explain more about `docker ps`. -- Update sshd example to use a Dockerfile. -- Rework some examples, including the Python examples. -- Update docs to include instructions for a container's lifecycle. -- Update docs documentation to discuss the docs branch. -- Don't skip cert check for an example & use HTTPS. -- Bring back the memory and swap accounting section which was lost when the kernel page was removed. -- Explain DNS warnings and how to fix them on systems running and using a local nameserver. - -#### Contrib -- Add Tanglu support for mkimage-debootstrap. -- Add SteamOS support for mkimage-debootstrap. - -#### Hack -- Get package coverage when running integration tests. -- Remove the Vagrantfile. This is being replaced with boot2docker. -- Fix tests on systems where aufs isn't available. -- Update packaging instructions and remove the dependency on lxc. - -#### Remote API -* Move code specific to the API to the api package. -- Fix header content type for the API. Makes all endpoints use proper content type. -- Fix registry auth & remove ping calls from CmdPush and CmdPull. -- Add newlines to the JSON stream functions. - -#### Runtime -* Do not ping the registry from the CLI. All requests to registries flow through the daemon. -- Check for nil information return in the lxc driver. This fixes panics with older lxc versions. -- Devicemapper: cleanups and fix for unmount. Fixes two problems which were causing unmount to fail intermittently. -- Devicemapper: remove directory when removing device. Directories don't get left behind when removing the device. -* Devicemapper: enable skip_block_zeroing. Improves performance by not zeroing blocks. -- Devicemapper: fix shutdown warnings. Fixes shutdown warnings concerning pool device removal. -- Ensure docker cp stream is closed properly. Fixes problems with files not being copied by `docker cp`. -- Stop making `tcp://` default to `127.0.0.1:4243` and remove the default port for tcp. -- Fix `--run` in `docker commit`. This makes `docker commit --run` work again. -- Fix custom bridge related options. This makes custom bridges work again. -+ Mount-bind the PTY as container console. This allows tmux/screen to run. -+ Add the pure Go libcontainer library to make it possible to run containers using only features of the Linux kernel. -+ Add native exec driver which uses libcontainer and make it the default exec driver. -- Add support for handling extended attributes in archives. -* Set the container MTU to be the same as the host MTU. -+ Add simple sha256 checksums for layers to speed up `docker push`. -* Improve kernel version parsing. -* Allow flag grouping (`docker run -it`). -- Remove chroot exec driver. -- Fix divide by zero to fix panic. -- Rewrite `docker rmi`. -- Fix docker info with lxc 1.0.0. -- Fix fedora tty with apparmor. -* Don't always append env vars, replace defaults with vars from config. -* Fix a goroutine leak. -* Switch to Go 1.2.1. -- Fix unique constraint error checks. -* Handle symlinks for Docker's data directory and for TMPDIR. -- Add deprecation warnings for flags (-flag is deprecated in favor of --flag) -- Add apparmor profile for the native execution driver. -* Move system specific code from archive to pkg/system. -- Fix duplicate signal for `docker run -i -t` (issue #3336). -- Return correct process pid for lxc. -- Add a -G option to specify the group which unix sockets belong to. -+ Add `-f` flag to `docker rm` to force removal of running containers. -+ Kill ghost containers and restart all ghost containers when the docker daemon restarts. -+ Add `DOCKER_RAMDISK` environment variable to make Docker work when the root is on a ramdisk. - -## 0.8.1 (2014-02-18) - -#### Builder - -- Avoid extra mount/unmount during build. This removes an unneeded mount/unmount operation which was causing problems with devicemapper -- Fix regression with ADD of tar files. This stops Docker from decompressing tarballs added via ADD from the local file system -- Add error to `docker build --rm`. This adds a missing error check to ensure failures to remove containers are detected and reported - -#### Documentation - -* Update issue filing instructions -* Warn against the use of symlinks for Docker's storage folder -* Replace the Firefox example with an IceWeasel example -* Rewrite the PostgreSQL example using a Dockerfile and add more details to it -* Improve the OS X documentation - -#### Remote API - -- Fix broken images API for version less than 1.7 -- Use the right encoding for all API endpoints which return JSON -- Move remote api client to api/ -- Queue calls to the API using generic socket wait - -#### Runtime - -- Fix the use of custom settings for bridges and custom bridges -- Refactor the devicemapper code to avoid many mount/unmount race conditions and failures -- Remove two panics which could make Docker crash in some situations -- Don't ping registry from the CLI client -- Enable skip_block_zeroing for devicemapper. This stops devicemapper from always zeroing entire blocks -- Fix --run in `docker commit`. This makes docker commit store `--run` in the image configuration -- Remove directory when removing devicemapper device. This cleans up leftover mount directories -- Drop NET_ADMIN capability for non-privileged containers. Unprivileged containers can't change their network configuration -- Ensure `docker cp` stream is closed properly -- Avoid extra mount/unmount during container registration. This removes an unneeded mount/unmount operation which was causing problems with devicemapper -- Stop allowing tcp:// as a default tcp bin address which binds to 127.0.0.1:4243 and remove the default port -+ Mount-bind the PTY as container console. This allows tmux and screen to run in a container -- Clean up archive closing. This fixes and improves archive handling -- Fix engine tests on systems where temp directories are symlinked -- Add test methods for save and load -- Avoid temporarily unmounting the container when restarting it. This fixes a race for devicemapper during restart -- Support submodules when building from a GitHub repository -- Quote volume path to allow spaces -- Fix remote tar ADD behavior. This fixes a regression which was causing Docker to extract tarballs - -## 0.8.0 (2014-02-04) - -#### Notable features since 0.7.0 - -* Images and containers can be removed much faster -* Building an image from source with docker build is now much faster -* The Docker daemon starts and stops much faster -* The memory footprint of many common operations has been reduced, by streaming files instead of buffering them in memory, fixing memory leaks, and fixing various suboptimal memory allocations -* Several race conditions were fixed, making Docker more stable under very high concurrency load. This makes Docker more stable and less likely to crash and reduces the memory footprint of many common operations -* All packaging operations are now built on the Go language’s standard tar implementation, which is bundled with Docker itself. This makes packaging more portable across host distributions, and solves several issues caused by quirks and incompatibilities between different distributions of tar -* Docker can now create, remove and modify larger numbers of containers and images graciously thanks to more aggressive releasing of system resources. For example the storage driver API now allows Docker to do reference counting on mounts created by the drivers -With the ongoing changes to the networking and execution subsystems of docker testing these areas have been a focus of the refactoring. By moving these subsystems into separate packages we can test, analyze, and monitor coverage and quality of these packages -* Many components have been separated into smaller sub-packages, each with a dedicated test suite. As a result the code is better-tested, more readable and easier to change - -* The ADD instruction now supports caching, which avoids unnecessarily re-uploading the same source content again and again when it hasn’t changed -* The new ONBUILD instruction adds to your image a “trigger” instruction to be executed at a later time, when the image is used as the base for another build -* Docker now ships with an experimental storage driver which uses the BTRFS filesystem for copy-on-write -* Docker is officially supported on Mac OS X -* The Docker daemon supports systemd socket activation - -## 0.7.6 (2014-01-14) - -#### Builder - -* Do not follow symlink outside of build context - -#### Runtime - -- Remount bind mounts when ro is specified -* Use https for fetching docker version - -#### Other - -* Inline the test.docker.io fingerprint -* Add ca-certificates to packaging documentation - -## 0.7.5 (2014-01-09) - -#### Builder - -* Disable compression for build. More space usage but a much faster upload -- Fix ADD caching for certain paths -- Do not compress archive from git build - -#### Documentation - -- Fix error in GROUP add example -* Make sure the GPG fingerprint is inline in the documentation -* Give more specific advice on setting up signing of commits for DCO - -#### Runtime - -- Fix misspelled container names -- Do not add hostname when networking is disabled -* Return most recent image from the cache by date -- Return all errors from docker wait -* Add Content-Type Header "application/json" to GET /version and /info responses - -#### Other - -* Update DCO to version 1.1 -+ Update Makefile to use "docker:GIT_BRANCH" as the generated image name -* Update Travis to check for new 1.1 DCO version - -## 0.7.4 (2014-01-07) - -#### Builder - -- Fix ADD caching issue with . prefixed path -- Fix docker build on devicemapper by reverting sparse file tar option -- Fix issue with file caching and prevent wrong cache hit -* Use same error handling while unmarshalling CMD and ENTRYPOINT - -#### Documentation - -* Simplify and streamline Amazon Quickstart -* Install instructions use unprefixed Fedora image -* Update instructions for mtu flag for Docker on GCE -+ Add Ubuntu Saucy to installation -- Fix for wrong version warning on master instead of latest - -#### Runtime - -- Only get the image's rootfs when we need to calculate the image size -- Correctly handle unmapping UDP ports -* Make CopyFileWithTar use a pipe instead of a buffer to save memory on docker build -- Fix login message to say pull instead of push -- Fix "docker load" help by removing "SOURCE" prompt and mentioning STDIN -* Make blank -H option default to the same as no -H was sent -* Extract cgroups utilities to own submodule - -#### Other - -+ Add Travis CI configuration to validate DCO and gofmt requirements -+ Add Developer Certificate of Origin Text -* Upgrade VBox Guest Additions -* Check standalone header when pinging a registry server - -## 0.7.3 (2014-01-02) - -#### Builder - -+ Update ADD to use the image cache, based on a hash of the added content -* Add error message for empty Dockerfile - -#### Documentation - -- Fix outdated link to the "Introduction" on www.docker.io -+ Update the docs to get wider when the screen does -- Add information about needing to install LXC when using raw binaries -* Update Fedora documentation to disentangle the docker and docker.io conflict -* Add a note about using the new `-mtu` flag in several GCE zones -+ Add FrugalWare installation instructions -+ Add a more complete example of `docker run` -- Fix API documentation for creating and starting Privileged containers -- Add missing "name" parameter documentation on "/containers/create" -* Add a mention of `lxc-checkconfig` as a way to check for some of the necessary kernel configuration -- Update the 1.8 API documentation with some additions that were added to the docs for 1.7 - -#### Hack - -- Add missing libdevmapper dependency to the packagers documentation -* Update minimum Go requirement to a hard line at Go 1.2+ -* Many minor improvements to the Vagrantfile -+ Add ability to customize dockerinit search locations when compiling (to be used very sparingly only by packagers of platforms who require a nonstandard location) -+ Add coverprofile generation reporting -- Add `-a` to our Go build flags, removing the need for recompiling the stdlib manually -* Update Dockerfile to be more canonical and have less spurious warnings during build -- Fix some miscellaneous `docker pull` progress bar display issues -* Migrate more miscellaneous packages under the "pkg" folder -* Update TextMate highlighting to automatically be enabled for files named "Dockerfile" -* Reorganize syntax highlighting files under a common "contrib/syntax" directory -* Update install.sh script (https://get.docker.io/) to not fail if busybox fails to download or run at the end of the Ubuntu/Debian installation -* Add support for container names in bash completion - -#### Packaging - -+ Add an official Docker client binary for Darwin (Mac OS X) -* Remove empty "Vendor" string and added "License" on deb package -+ Add a stubbed version of "/etc/default/docker" in the deb package - -#### Runtime - -* Update layer application to extract tars in place, avoiding file churn while handling whiteouts -- Fix permissiveness of mtime comparisons in tar handling (since GNU tar and Go tar do not yet support sub-second mtime precision) -* Reimplement `docker top` in pure Go to work more consistently, and even inside Docker-in-Docker (thus removing the shell injection vulnerability present in some versions of `lxc-ps`) -+ Update `-H unix://` to work similarly to `-H tcp://` by inserting the default values for missing portions -- Fix more edge cases regarding dockerinit and deleted or replaced docker or dockerinit files -* Update container name validation to include '.' -- Fix use of a symlink or non-absolute path as the argument to `-g` to work as expected -* Update to handle external mounts outside of LXC, fixing many small mounting quirks and making future execution backends and other features simpler -* Update to use proper box-drawing characters everywhere in `docker images -tree` -* Move MTU setting from LXC configuration to directly use netlink -* Add `-S` option to external tar invocation for more efficient spare file handling -+ Add arch/os info to User-Agent string, especially for registry requests -+ Add `-mtu` option to Docker daemon for configuring MTU -- Fix `docker build` to exit with a non-zero exit code on error -+ Add `DOCKER_HOST` environment variable to configure the client `-H` flag without specifying it manually for every invocation - -## 0.7.2 (2013-12-16) - -#### Runtime - -+ Validate container names on creation with standard regex -* Increase maximum image depth to 127 from 42 -* Continue to move api endpoints to the job api -+ Add -bip flag to allow specification of dynamic bridge IP via CIDR -- Allow bridge creation when ipv6 is not enabled on certain systems -* Set hostname and IP address from within dockerinit -* Drop capabilities from within dockerinit -- Fix volumes on host when symlink is present the image -- Prevent deletion of image if ANY container is depending on it even if the container is not running -* Update docker push to use new progress display -* Use os.Lstat to allow mounting unix sockets when inspecting volumes -- Adjust handling of inactive user login -- Add missing defines in devicemapper for older kernels -- Allow untag operations with no container validation -- Add auth config to docker build - -#### Documentation - -* Add more information about Docker logging -+ Add RHEL documentation -* Add a direct example for changing the CMD that is run in a container -* Update Arch installation documentation -+ Add section on Trusted Builds -+ Add Network documentation page - -#### Other - -+ Add new cover bundle for providing code coverage reporting -* Separate integration tests in bundles -* Make Tianon the hack maintainer -* Update mkimage-debootstrap with more tweaks for keeping images small -* Use https to get the install script -* Remove vendored dotcloud/tar now that Go 1.2 has been released - -## 0.7.1 (2013-12-05) - -#### Documentation - -+ Add @SvenDowideit as documentation maintainer -+ Add links example -+ Add documentation regarding ambassador pattern -+ Add Google Cloud Platform docs -+ Add dockerfile best practices -* Update doc for RHEL -* Update doc for registry -* Update Postgres examples -* Update doc for Ubuntu install -* Improve remote api doc - -#### Runtime - -+ Add hostconfig to docker inspect -+ Implement `docker log -f` to stream logs -+ Add env variable to disable kernel version warning -+ Add -format to `docker inspect` -+ Support bind-mount for files -- Fix bridge creation on RHEL -- Fix image size calculation -- Make sure iptables are called even if the bridge already exists -- Fix issue with stderr only attach -- Remove init layer when destroying a container -- Fix same port binding on different interfaces -- `docker build` now returns the correct exit code -- Fix `docker port` to display correct port -- `docker build` now check that the dockerfile exists client side -- `docker attach` now returns the correct exit code -- Remove the name entry when the container does not exist - -#### Registry - -* Improve progress bars, add ETA for downloads -* Simultaneous pulls now waits for the first to finish instead of failing -- Tag only the top-layer image when pushing to registry -- Fix issue with offline image transfer -- Fix issue preventing using ':' in password for registry - -#### Other - -+ Add pprof handler for debug -+ Create a Makefile -* Use stdlib tar that now includes fix -* Improve make.sh test script -* Handle SIGQUIT on the daemon -* Disable verbose during tests -* Upgrade to go1.2 for official build -* Improve unit tests -* The test suite now runs all tests even if one fails -* Refactor C in Go (Devmapper) -- Fix OS X compilation - -## 0.7.0 (2013-11-25) - -#### Notable features since 0.6.0 - -* Storage drivers: choose from aufs, device-mapper, or vfs. -* Standard Linux support: docker now runs on unmodified Linux kernels and all major distributions. -* Links: compose complex software stacks by connecting containers to each other. -* Container naming: organize your containers by giving them memorable names. -* Advanced port redirects: specify port redirects per interface, or keep sensitive ports private. -* Offline transfer: push and pull images to the filesystem without losing information. -* Quality: numerous bugfixes and small usability improvements. Significant increase in test coverage. - -## 0.6.7 (2013-11-21) - -#### Runtime - -* Improve stability, fixes some race conditions -* Skip the volumes mounted when deleting the volumes of container. -* Fix layer size computation: handle hard links correctly -* Use the work Path for docker cp CONTAINER:PATH -* Fix tmp dir never cleanup -* Speedup docker ps -* More informative error message on name collisions -* Fix nameserver regex -* Always return long id's -* Fix container restart race condition -* Keep published ports on docker stop;docker start -* Fix container networking on Fedora -* Correctly express "any address" to iptables -* Fix network setup when reconnecting to ghost container -* Prevent deletion if image is used by a running container -* Lock around read operations in graph - -#### RemoteAPI - -* Return full ID on docker rmi - -#### Client - -+ Add -tree option to images -+ Offline image transfer -* Exit with status 2 on usage error and display usage on stderr -* Do not forward SIGCHLD to container -* Use string timestamp for docker events -since - -#### Other - -* Update to go 1.2rc5 -+ Add /etc/default/docker support to upstart - -## 0.6.6 (2013-11-06) - -#### Runtime - -* Ensure container name on register -* Fix regression in /etc/hosts -+ Add lock around write operations in graph -* Check if port is valid -* Fix restart runtime error with ghost container networking -+ Add some more colors and animals to increase the pool of generated names -* Fix issues in docker inspect -+ Escape apparmor confinement -+ Set environment variables using a file. -* Prevent docker insert to erase something -+ Prevent DNS server conflicts in CreateBridgeIface -+ Validate bind mounts on the server side -+ Use parent image config in docker build -* Fix regression in /etc/hosts - -#### Client - -+ Add -P flag to publish all exposed ports -+ Add -notrunc and -q flags to docker history -* Fix docker commit, tag and import usage -+ Add stars, trusted builds and library flags in docker search -* Fix docker logs with tty - -#### RemoteAPI - -* Make /events API send headers immediately -* Do not split last column docker top -+ Add size to history - -#### Other - -+ Contrib: Desktop integration. Firefox usecase. -+ Dockerfile: bump to go1.2rc3 - -## 0.6.5 (2013-10-29) - -#### Runtime - -+ Containers can now be named -+ Containers can now be linked together for service discovery -+ 'run -a', 'start -a' and 'attach' can forward signals to the container for better integration with process supervisors -+ Automatically start crashed containers after a reboot -+ Expose IP, port, and proto as separate environment vars for container links -* Allow ports to be published to specific ips -* Prohibit inter-container communication by default -- Ignore ErrClosedPipe for stdin in Container.Attach -- Remove unused field kernelVersion -* Fix issue when mounting subdirectories of /mnt in container -- Fix untag during removal of images -* Check return value of syscall.Chdir when changing working directory inside dockerinit - -#### Client - -- Only pass stdin to hijack when needed to avoid closed pipe errors -* Use less reflection in command-line method invocation -- Monitor the tty size after starting the container, not prior -- Remove useless os.Exit() calls after log.Fatal - -#### Hack - -+ Add initial init scripts library and a safer Ubuntu packaging script that works for Debian -* Add -p option to invoke debootstrap with http_proxy -- Update install.sh with $sh_c to get sudo/su for modprobe -* Update all the mkimage scripts to use --numeric-owner as a tar argument -* Update hack/release.sh process to automatically invoke hack/make.sh and bail on build and test issues - -#### Other - -* Documentation: Fix the flags for nc in example -* Testing: Remove warnings and prevent mount issues -- Testing: Change logic for tty resize to avoid warning in tests -- Builder: Fix race condition in docker build with verbose output -- Registry: Fix content-type for PushImageJSONIndex method -* Contrib: Improve helper tools to generate debian and Arch linux server images - -## 0.6.4 (2013-10-16) - -#### Runtime - -- Add cleanup of container when Start() fails -* Add better comments to utils/stdcopy.go -* Add utils.Errorf for error logging -+ Add -rm to docker run for removing a container on exit -- Remove error messages which are not actually errors -- Fix `docker rm` with volumes -- Fix some error cases where an HTTP body might not be closed -- Fix panic with wrong dockercfg file -- Fix the attach behavior with -i -* Record termination time in state. -- Use empty string so TempDir uses the OS's temp dir automatically -- Make sure to close the network allocators -+ Autorestart containers by default -* Bump vendor kr/pty to commit 3b1f6487b `(syscall.O_NOCTTY)` -* lxc: Allow set_file_cap capability in container -- Move run -rm to the cli only -* Split stdout stderr -* Always create a new session for the container - -#### Testing - -- Add aggregated docker-ci email report -- Add cleanup to remove leftover containers -* Add nightly release to docker-ci -* Add more tests around auth.ResolveAuthConfig -- Remove a few errors in tests -- Catch errClosing error when TCP and UDP proxies are terminated -* Only run certain tests with TESTFLAGS='-run TestName' make.sh -* Prevent docker-ci to test closing PRs -* Replace panic by log.Fatal in tests -- Increase TestRunDetach timeout - -#### Documentation - -* Add initial draft of the Docker infrastructure doc -* Add devenvironment link to CONTRIBUTING.md -* Add `apt-get install curl` to Ubuntu docs -* Add explanation for export restrictions -* Add .dockercfg doc -* Remove Gentoo install notes about #1422 workaround -* Fix help text for -v option -* Fix Ping endpoint documentation -- Fix parameter names in docs for ADD command -- Fix ironic typo in changelog -* Various command fixes in postgres example -* Document how to edit and release docs -- Minor updates to `postgresql_service.rst` -* Clarify LGTM process to contributors -- Corrected error in the package name -* Document what `vagrant up` is actually doing -+ improve doc search results -* Cleanup whitespace in API 1.5 docs -* use angle brackets in MAINTAINER example email -* Update archlinux.rst -+ Changes to a new style for the docs. Includes version switcher. -* Formatting, add information about multiline json -* Improve registry and index REST API documentation -- Replace deprecated upgrading reference to docker-latest.tgz, which hasn't been updated since 0.5.3 -* Update Gentoo installation documentation now that we're in the portage tree proper -* Cleanup and reorganize docs and tooling for contributors and maintainers -- Minor spelling correction of protocoll -> protocol - -#### Contrib - -* Add vim syntax highlighting for Dockerfiles from @honza -* Add mkimage-arch.sh -* Reorganize contributed completion scripts to add zsh completion - -#### Hack - -* Add vagrant user to the docker group -* Add proper bash completion for "docker push" -* Add xz utils as a runtime dep -* Add cleanup/refactor portion of #2010 for hack and Dockerfile updates -+ Add contrib/mkimage-centos.sh back (from #1621), and associated documentation link -* Add several of the small make.sh fixes from #1920, and make the output more consistent and contributor-friendly -+ Add @tianon to hack/MAINTAINERS -* Improve network performance for VirtualBox -* Revamp install.sh to be usable by more people, and to use official install methods whenever possible (apt repo, portage tree, etc.) -- Fix contrib/mkimage-debian.sh apt caching prevention -+ Add Dockerfile.tmLanguage to contrib -* Configured FPM to make /etc/init/docker.conf a config file -* Enable SSH Agent forwarding in Vagrant VM -* Several small tweaks/fixes for contrib/mkimage-debian.sh - -#### Other - -- Builder: Abort build if mergeConfig returns an error and fix duplicate error message -- Packaging: Remove deprecated packaging directory -- Registry: Use correct auth config when logging in. -- Registry: Fix the error message so it is the same as the regex - -## 0.6.3 (2013-09-23) - -#### Packaging - -* Add 'docker' group on install for ubuntu package -* Update tar vendor dependency -* Download apt key over HTTPS - -#### Runtime - -- Only copy and change permissions on non-bindmount volumes -* Allow multiple volumes-from -- Fix HTTP imports from STDIN - -#### Documentation - -* Update section on extracting the docker binary after build -* Update development environment docs for new build process -* Remove 'base' image from documentation - -#### Other - -- Client: Fix detach issue -- Registry: Update regular expression to match index - -## 0.6.2 (2013-09-17) - -#### Runtime - -+ Add domainname support -+ Implement image filtering with path.Match -* Remove unnecessary warnings -* Remove os/user dependency -* Only mount the hostname file when the config exists -* Handle signals within the `docker login` command -- UID and GID are now also applied to volumes -- `docker start` set error code upon error -- `docker run` set the same error code as the process started - -#### Builder - -+ Add -rm option in order to remove intermediate containers -* Allow multiline for the RUN instruction - -#### Registry - -* Implement login with private registry -- Fix push issues - -#### Other - -+ Hack: Vendor all dependencies -* Remote API: Bump to v1.5 -* Packaging: Break down hack/make.sh into small scripts, one per 'bundle': test, binary, ubuntu etc. -* Documentation: General improvements - -## 0.6.1 (2013-08-23) - -#### Registry - -* Pass "meta" headers in API calls to the registry - -#### Packaging - -- Use correct upstart script with new build tool -- Use libffi-dev, don`t build it from sources -- Remove duplicate mercurial install command - -## 0.6.0 (2013-08-22) - -#### Runtime - -+ Add lxc-conf flag to allow custom lxc options -+ Add an option to set the working directory -* Add Image name to LogEvent tests -+ Add -privileged flag and relevant tests, docs, and examples -* Add websocket support to /container//attach/ws -* Add warning when net.ipv4.ip_forwarding = 0 -* Add hostname to environment -* Add last stable version in `docker version` -- Fix race conditions in parallel pull -- Fix Graph ByParent() to generate list of child images per parent image. -- Fix typo: fmt.Sprint -> fmt.Sprintf -- Fix small \n error un docker build -* Fix to "Inject dockerinit at /.dockerinit" -* Fix #910. print user name to docker info output -* Use Go 1.1.2 for dockerbuilder -* Use ranged for loop on channels -- Use utils.ParseRepositoryTag instead of strings.Split(name, ":") in server.ImageDelete -- Improve CMD, ENTRYPOINT, and attach docs. -- Improve connect message with socket error -- Load authConfig only when needed and fix useless WARNING -- Show tag used when image is missing -* Apply volumes-from before creating volumes -- Make docker run handle SIGINT/SIGTERM -- Prevent crash when .dockercfg not readable -- Install script should be fetched over https, not http. -* API, issue 1471: Use groups for socket permissions -- Correctly detect IPv4 forwarding -* Mount /dev/shm as a tmpfs -- Switch from http to https for get.docker.io -* Let userland proxy handle container-bound traffic -* Update the Docker CLI to specify a value for the "Host" header. -- Change network range to avoid conflict with EC2 DNS -- Reduce connect and read timeout when pinging the registry -* Parallel pull -- Handle ip route showing mask-less IP addresses -* Allow ENTRYPOINT without CMD -- Always consider localhost as a domain name when parsing the FQN repos name -* Refactor checksum - -#### Documentation - -* Add MongoDB image example -* Add instructions for creating and using the docker group -* Add sudo to examples and installation to documentation -* Add ufw doc -* Add a reference to ps -a -* Add information about Docker`s high level tools over LXC. -* Fix typo in docs for docker run -dns -* Fix a typo in the ubuntu installation guide -* Fix to docs regarding adding docker groups -* Update default -H docs -* Update readme with dependencies for building -* Update amazon.rst to explain that Vagrant is not necessary for running Docker on ec2 -* PostgreSQL service example in documentation -* Suggest installing linux-headers by default. -* Change the twitter handle -* Clarify Amazon EC2 installation -* 'Base' image is deprecated and should no longer be referenced in the docs. -* Move note about officially supported kernel -- Solved the logo being squished in Safari - -#### Builder - -+ Add USER instruction do Dockerfile -+ Add workdir support for the Buildfile -* Add no cache for docker build -- Fix docker build and docker events output -- Only count known instructions as build steps -- Make sure ENV instruction within build perform a commit each time -- Forbid certain paths within docker build ADD -- Repository name (and optionally a tag) in build usage -- Make sure ADD will create everything in 0755 - -#### Remote API - -* Sort Images by most recent creation date. -* Reworking opaque requests in registry module -* Add image name in /events -* Use mime pkg to parse Content-Type -* 650 http utils and user agent field - -#### Hack - -+ Bash Completion: Limit commands to containers of a relevant state -* Add docker dependencies coverage testing into docker-ci - -#### Packaging - -+ Docker-brew 0.5.2 support and memory footprint reduction -* Add new docker dependencies into docker-ci -- Revert "docker.upstart: avoid spawning a `sh` process" -+ Docker-brew and Docker standard library -+ Release docker with docker -* Fix the upstart script generated by get.docker.io -* Enabled the docs to generate manpages. -* Revert Bind daemon to 0.0.0.0 in Vagrant. - -#### Register - -* Improve auth push -* Registry unit tests + mock registry - -#### Tests - -* Improve TestKillDifferentUser to prevent timeout on buildbot -- Fix typo in TestBindMounts (runContainer called without image) -* Improve TestGetContainersTop so it does not rely on sleep -* Relax the lo interface test to allow iface index != 1 -* Add registry functional test to docker-ci -* Add some tests in server and utils - -#### Other - -* Contrib: bash completion script -* Client: Add docker cp command and copy api endpoint to copy container files/folders to the host -* Don`t read from stdout when only attached to stdin - -## 0.5.3 (2013-08-13) - -#### Runtime - -* Use docker group for socket permissions -- Spawn shell within upstart script -- Handle ip route showing mask-less IP addresses -- Add hostname to environment - -#### Builder - -- Make sure ENV instruction within build perform a commit each time - -## 0.5.2 (2013-08-08) - -* Builder: Forbid certain paths within docker build ADD -- Runtime: Change network range to avoid conflict with EC2 DNS -* API: Change daemon to listen on unix socket by default - -## 0.5.1 (2013-07-30) - -#### Runtime - -+ Add `ps` args to `docker top` -+ Add support for container ID files (pidfile like) -+ Add container=lxc in default env -+ Support networkless containers with `docker run -n` and `docker -d -b=none` -* Stdout/stderr logs are now stored in the same file as JSON -* Allocate a /16 IP range by default, with fallback to /24. Try 12 ranges instead of 3. -* Change .dockercfg format to json and support multiple auth remote -- Do not override volumes from config -- Fix issue with EXPOSE override - -#### API - -+ Docker client now sets useragent (RFC 2616) -+ Add /events endpoint - -#### Builder - -+ ADD command now understands URLs -+ CmdAdd and CmdEnv now respect Dockerfile-set ENV variables -- Create directories with 755 instead of 700 within ADD instruction - -#### Hack - -* Simplify unit tests with helpers -* Improve docker.upstart event -* Add coverage testing into docker-ci - -## 0.5.0 (2013-07-17) - -#### Runtime - -+ List all processes running inside a container with 'docker top' -+ Host directories can be mounted as volumes with 'docker run -v' -+ Containers can expose public UDP ports (eg, '-p 123/udp') -+ Optionally specify an exact public port (eg. '-p 80:4500') -* 'docker login' supports additional options -- Don't save a container`s hostname when committing an image. - -#### Registry - -+ New image naming scheme inspired by Go packaging convention allows arbitrary combinations of registries -- Fix issues when uploading images to a private registry - -#### Builder - -+ ENTRYPOINT instruction sets a default binary entry point to a container -+ VOLUME instruction marks a part of the container as persistent data -* 'docker build' displays the full output of a build by default - -## 0.4.8 (2013-07-01) - -+ Builder: New build operation ENTRYPOINT adds an executable entry point to the container. - Runtime: Fix a bug which caused 'docker run -d' to no longer print the container ID. -- Tests: Fix issues in the test suite - -## 0.4.7 (2013-06-28) - -#### Remote API - -* The progress bar updates faster when downloading and uploading large files -- Fix a bug in the optional unix socket transport - -#### Runtime - -* Improve detection of kernel version -+ Host directories can be mounted as volumes with 'docker run -b' -- fix an issue when only attaching to stdin -* Use 'tar --numeric-owner' to avoid uid mismatch across multiple hosts - -#### Hack - -* Improve test suite and dev environment -* Remove dependency on unit tests on 'os/user' - -#### Other - -* Registry: easier push/pull to a custom registry -+ Documentation: add terminology section - -## 0.4.6 (2013-06-22) - -- Runtime: fix a bug which caused creation of empty images (and volumes) to crash. - -## 0.4.5 (2013-06-21) - -+ Builder: 'docker build git://URL' fetches and builds a remote git repository -* Runtime: 'docker ps -s' optionally prints container size -* Tests: improved and simplified -- Runtime: fix a regression introduced in 0.4.3 which caused the logs command to fail. -- Builder: fix a regression when using ADD with single regular file. - -## 0.4.4 (2013-06-19) - -- Builder: fix a regression introduced in 0.4.3 which caused builds to fail on new clients. - -## 0.4.3 (2013-06-19) - -#### Builder - -+ ADD of a local file will detect tar archives and unpack them -* ADD improvements: use tar for copy + automatically unpack local archives -* ADD uses tar/untar for copies instead of calling 'cp -ar' -* Fix the behavior of ADD to be (mostly) reverse-compatible, predictable and well-documented. -- Fix a bug which caused builds to fail if ADD was the first command -* Nicer output for 'docker build' - -#### Runtime - -* Remove bsdtar dependency -* Add unix socket and multiple -H support -* Prevent rm of running containers -* Use go1.1 cookiejar -- Fix issue detaching from running TTY container -- Forbid parallel push/pull for a single image/repo. Fixes #311 -- Fix race condition within Run command when attaching. - -#### Client - -* HumanReadable ProgressBar sizes in pull -* Fix docker version`s git commit output - -#### API - -* Send all tags on History API call -* Add tag lookup to history command. Fixes #882 - -#### Documentation - -- Fix missing command in irc bouncer example - -## 0.4.2 (2013-06-17) - -- Packaging: Bumped version to work around an Ubuntu bug - -## 0.4.1 (2013-06-17) - -#### Remote Api - -+ Add flag to enable cross domain requests -+ Add images and containers sizes in docker ps and docker images - -#### Runtime - -+ Configure dns configuration host-wide with 'docker -d -dns' -+ Detect faulty DNS configuration and replace it with a public default -+ Allow docker run : -+ You can now specify public port (ex: -p 80:4500) -* Improve image removal to garbage-collect unreferenced parents - -#### Client - -* Allow multiple params in inspect -* Print the container id before the hijack in `docker run` - -#### Registry - -* Add regexp check on repo`s name -* Move auth to the client -- Remove login check on pull - -#### Other - -* Vagrantfile: Add the rest api port to vagrantfile`s port_forward -* Upgrade to Go 1.1 -- Builder: don`t ignore last line in Dockerfile when it doesn`t end with \n - -## 0.4.0 (2013-06-03) - -#### Builder - -+ Introducing Builder -+ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile - -#### Remote API - -+ Introducing Remote API -+ control Docker programmatically using a simple HTTP/json API - -#### Runtime - -* Various reliability and usability improvements - -## 0.3.4 (2013-05-30) - -#### Builder - -+ 'docker build' builds a container, layer by layer, from a source repository containing a Dockerfile -+ 'docker build -t FOO' applies the tag FOO to the newly built container. - -#### Runtime - -+ Interactive TTYs correctly handle window resize -* Fix how configuration is merged between layers - -#### Remote API - -+ Split stdout and stderr on 'docker run' -+ Optionally listen on a different IP and port (use at your own risk) - -#### Documentation - -* Improve install instructions. - -## 0.3.3 (2013-05-23) - -- Registry: Fix push regression -- Various bugfixes - -## 0.3.2 (2013-05-09) - -#### Registry - -* Improve the checksum process -* Use the size to have a good progress bar while pushing -* Use the actual archive if it exists in order to speed up the push -- Fix error 400 on push - -#### Runtime - -* Store the actual archive on commit - -## 0.3.1 (2013-05-08) - -#### Builder - -+ Implement the autorun capability within docker builder -+ Add caching to docker builder -+ Add support for docker builder with native API as top level command -+ Implement ENV within docker builder -- Check the command existence prior create and add Unit tests for the case -* use any whitespaces instead of tabs - -#### Runtime - -+ Add go version to debug infos -* Kernel version - don`t show the dash if flavor is empty - -#### Registry - -+ Add docker search top level command in order to search a repository -- Fix pull for official images with specific tag -- Fix issue when login in with a different user and trying to push -* Improve checksum - async calculation - -#### Images - -+ Output graph of images to dot (graphviz) -- Fix ByParent function - -#### Documentation - -+ New introduction and high-level overview -+ Add the documentation for docker builder -- CSS fix for docker documentation to make REST API docs look better. -- Fix CouchDB example page header mistake -- Fix README formatting -* Update www.docker.io website. - -#### Other - -+ Website: new high-level overview -- Makefile: Swap "go get" for "go get -d", especially to compile on go1.1rc -* Packaging: packaging ubuntu; issue #510: Use goland-stable PPA package to build docker - -## 0.3.0 (2013-05-06) - -#### Runtime - -- Fix the command existence check -- strings.Split may return an empty string on no match -- Fix an index out of range crash if cgroup memory is not - -#### Documentation - -* Various improvements -+ New example: sharing data between 2 couchdb databases - -#### Other - -* Vagrant: Use only one deb line in /etc/apt -+ Registry: Implement the new registry - -## 0.2.2 (2013-05-03) - -+ Support for data volumes ('docker run -v=PATH') -+ Share data volumes between containers ('docker run -volumes-from') -+ Improve documentation -* Upgrade to Go 1.0.3 -* Various upgrades to the dev environment for contributors - -## 0.2.1 (2013-05-01) - -+ 'docker commit -run' bundles a layer with default runtime options: command, ports etc. -* Improve install process on Vagrant -+ New Dockerfile operation: "maintainer" -+ New Dockerfile operation: "expose" -+ New Dockerfile operation: "cmd" -+ Contrib script to build a Debian base layer -+ 'docker -d -r': restart crashed containers at daemon startup -* Runtime: improve test coverage - -## 0.2.0 (2013-04-23) - -- Runtime: ghost containers can be killed and waited for -* Documentation: update install instructions -- Packaging: fix Vagrantfile -- Development: automate releasing binaries and ubuntu packages -+ Add a changelog -- Various bugfixes - -## 0.1.8 (2013-04-22) - -- Dynamically detect cgroup capabilities -- Issue stability warning on kernels <3.8 -- 'docker push' buffers on disk instead of memory -- Fix 'docker diff' for removed files -- Fix 'docker stop' for ghost containers -- Fix handling of pidfile -- Various bugfixes and stability improvements - -## 0.1.7 (2013-04-18) - -- Container ports are available on localhost -- 'docker ps' shows allocated TCP ports -- Contributors can run 'make hack' to start a continuous integration VM -- Streamline ubuntu packaging & uploading -- Various bugfixes and stability improvements - -## 0.1.6 (2013-04-17) - -- Record the author an image with 'docker commit -author' - -## 0.1.5 (2013-04-17) - -- Disable standalone mode -- Use a custom DNS resolver with 'docker -d -dns' -- Detect ghost containers -- Improve diagnosis of missing system capabilities -- Allow disabling memory limits at compile time -- Add debian packaging -- Documentation: installing on Arch Linux -- Documentation: running Redis on docker -- Fix lxc 0.9 compatibility -- Automatically load aufs module -- Various bugfixes and stability improvements - -## 0.1.4 (2013-04-09) - -- Full support for TTY emulation -- Detach from a TTY session with the escape sequence `C-p C-q` -- Various bugfixes and stability improvements -- Minor UI improvements -- Automatically create our own bridge interface 'docker0' - -## 0.1.3 (2013-04-04) - -- Choose TCP frontend port with '-p :PORT' -- Layer format is versioned -- Major reliability improvements to the process manager -- Various bugfixes and stability improvements - -## 0.1.2 (2013-04-03) - -- Set container hostname with 'docker run -h' -- Selective attach at run with 'docker run -a [stdin[,stdout[,stderr]]]' -- Various bugfixes and stability improvements -- UI polish -- Progress bar on push/pull -- Use XZ compression by default -- Make IP allocator lazy - -## 0.1.1 (2013-03-31) - -- Display shorthand IDs for convenience -- Stabilize process management -- Layers can include a commit message -- Simplified 'docker attach' -- Fix support for re-attaching -- Various bugfixes and stability improvements -- Auto-download at run -- Auto-login on push -- Beefed up documentation - -## 0.1.0 (2013-03-23) - -Initial public release - -- Implement registry in order to push/pull images -- TCP port allocation -- Fix termcaps on Linux -- Add documentation -- Add Vagrant support with Vagrantfile -- Add unit tests -- Add repository/tags to ease image management -- Improve the layer implementation diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 386180597c..0000000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,400 +0,0 @@ -# Contributing to Docker - -Want to hack on Docker? Awesome! We have a contributor's guide that explains -[setting up a Docker development environment and the contribution -process](https://docs.docker.com/opensource/project/who-written-for/). - -[![Contributors guide](docs/static_files/contributors.png)](https://docs.docker.com/opensource/project/who-written-for/) - -This page contains information about reporting issues as well as some tips and -guidelines useful to experienced open source contributors. Finally, make sure -you read our [community guidelines](#docker-community-guidelines) before you -start participating. - -## Topics - -* [Reporting Security Issues](#reporting-security-issues) -* [Design and Cleanup Proposals](#design-and-cleanup-proposals) -* [Reporting Issues](#reporting-other-issues) -* [Quick Contribution Tips and Guidelines](#quick-contribution-tips-and-guidelines) -* [Community Guidelines](#docker-community-guidelines) - -## Reporting security issues - -The Docker maintainers take security seriously. If you discover a security -issue, please bring it to their attention right away! - -Please **DO NOT** file a public issue, instead send your report privately to -[security@docker.com](mailto:security@docker.com). - -Security reports are greatly appreciated and we will publicly thank you for it. -We also like to send gifts—if you're into Docker schwag, make sure to let -us know. We currently do not offer a paid security bounty program, but are not -ruling it out in the future. - - -## Reporting other issues - -A great way to contribute to the project is to send a detailed report when you -encounter an issue. We always appreciate a well-written, thorough bug report, -and will thank you for it! - -Check that [our issue database](https://github.com/docker/docker/issues) -doesn't already include that problem or suggestion before submitting an issue. -If you find a match, you can use the "subscribe" button to get notified on -updates. Do *not* leave random "+1" or "I have this too" comments, as they -only clutter the discussion, and don't help resolving it. However, if you -have ways to reproduce the issue or have additional information that may help -resolving the issue, please leave a comment. - -When reporting issues, always include: - -* The output of `docker version`. -* The output of `docker info`. - -Also include the steps required to reproduce the problem if possible and -applicable. This information will help us review and fix your issue faster. -When sending lengthy log-files, consider posting them as a gist (https://gist.github.com). -Don't forget to remove sensitive data from your logfiles before posting (you can -replace those parts with "REDACTED"). - -## Quick contribution tips and guidelines - -This section gives the experienced contributor some tips and guidelines. - -### Pull requests are always welcome - -Not sure if that typo is worth a pull request? Found a bug and know how to fix -it? Do it! We will appreciate it. Any significant improvement should be -documented as [a GitHub issue](https://github.com/docker/docker/issues) before -anybody starts working on it. - -We are always thrilled to receive pull requests. We do our best to process them -quickly. If your pull request is not accepted on the first try, -don't get discouraged! Our contributor's guide explains [the review process we -use for simple changes](https://docs.docker.com/opensource/workflow/make-a-contribution/). - -### Design and cleanup proposals - -You can propose new designs for existing Docker features. You can also design -entirely new features. We really appreciate contributors who want to refactor or -otherwise cleanup our project. For information on making these types of -contributions, see [the advanced contribution -section](https://docs.docker.com/opensource/workflow/advanced-contributing/) in -the contributors guide. - -We try hard to keep Docker lean and focused. Docker can't do everything for -everybody. This means that we might decide against incorporating a new feature. -However, there might be a way to implement that feature *on top of* Docker. - -### Talking to other Docker users and contributors - - - - - - - - - - - - - - - - - - - - - - - - -
Forums - A public forum for users to discuss questions and explore current design patterns and - best practices about Docker and related projects in the Docker Ecosystem. To participate, - just log in with your Docker Hub account on https://forums.docker.com. -
Internet Relay Chat (IRC) -

- IRC a direct line to our most knowledgeable Docker users; we have - both the #docker and #docker-dev group on - irc.freenode.net. - IRC is a rich chat protocol but it can overwhelm new users. You can search - our chat archives. -

-

- Read our IRC quickstart guide - for an easy way to get started. -

-
Google Group - The docker-dev - group is for contributors and other people contributing to the Docker project. - You can join them without a google account by sending an email to - docker-dev+subscribe@googlegroups.com. - After receiving the join-request message, you can simply reply to that to confirm the subscribtion. -
Twitter - You can follow Docker's Twitter feed - to get updates on our products. You can also tweet us questions or just - share blogs or stories. -
Stack Overflow - Stack Overflow has over 17000 Docker questions listed. We regularly - monitor Docker questions - and so do many other knowledgeable Docker users. -
- - -### Conventions - -Fork the repository and make changes on your fork in a feature branch: - -- If it's a bug fix branch, name it XXXX-something where XXXX is the number of - the issue. -- If it's a feature branch, create an enhancement issue to announce - your intentions, and name it XXXX-something where XXXX is the number of the - issue. - -Submit unit tests for your changes. Go has a great test framework built in; use -it! Take a look at existing tests for inspiration. [Run the full test -suite](https://docs.docker.com/opensource/project/test-and-docs/) on your branch before -submitting a pull request. - -Update the documentation when creating or modifying features. Test your -documentation changes for clarity, concision, and correctness, as well as a -clean documentation build. See our contributors guide for [our style -guide](https://docs.docker.com/opensource/doc-style) and instructions on [building -the documentation](https://docs.docker.com/opensource/project/test-and-docs/#build-and-test-the-documentation). - -Write clean code. Universally formatted code promotes ease of writing, reading, -and maintenance. Always run `gofmt -s -w file.go` on each changed file before -committing your changes. Most editors have plug-ins that do this automatically. - -Pull request descriptions should be as clear as possible and include a reference -to all the issues that they address. - -Commit messages must start with a capitalized and short summary (max. 50 chars) -written in the imperative, followed by an optional, more detailed explanatory -text which is separated from the summary by an empty line. - -Code review comments may be added to your pull request. Discuss, then make the -suggested modifications and push additional commits to your feature branch. Post -a comment after pushing. New commits show up in the pull request automatically, -but the reviewers are notified only when you comment. - -Pull requests must be cleanly rebased on top of master without multiple branches -mixed into the PR. - -**Git tip**: If your PR no longer merges cleanly, use `rebase master` in your -feature branch to update your pull request rather than `merge master`. - -Before you make a pull request, squash your commits into logical units of work -using `git rebase -i` and `git push -f`. A logical unit of work is a consistent -set of patches that should be reviewed together: for example, upgrading the -version of a vendored dependency and taking advantage of its now available new -feature constitute two separate units of work. Implementing a new function and -calling it in another file constitute a single logical unit of work. The very -high majority of submissions should have a single commit, so if in doubt: squash -down to one. - -After every commit, [make sure the test suite passes] -(https://docs.docker.com/opensource/project/test-and-docs/). Include documentation -changes in the same pull request so that a revert would remove all traces of -the feature or fix. - -Include an issue reference like `Closes #XXXX` or `Fixes #XXXX` in commits that -close an issue. Including references automatically closes the issue on a merge. - -Please do not add yourself to the `AUTHORS` file, as it is regenerated regularly -from the Git history. - -Please see the [Coding Style](#coding-style) for further guidelines. - -### Merge approval - -Docker maintainers use LGTM (Looks Good To Me) in comments on the code review to -indicate acceptance. - -A change requires LGTMs from an absolute majority of the maintainers of each -component affected. For example, if a change affects `docs/` and `registry/`, it -needs an absolute majority from the maintainers of `docs/` AND, separately, an -absolute majority of the maintainers of `registry/`. - -For more details, see the [MAINTAINERS](MAINTAINERS) page. - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. - -### How can I become a maintainer? - -The procedures for adding new maintainers are explained in the -global [MAINTAINERS](https://github.com/docker/opensource/blob/master/MAINTAINERS) -file in the [https://github.com/docker/opensource/](https://github.com/docker/opensource/) -repository. - -Don't forget: being a maintainer is a time investment. Make sure you -will have time to make yourself available. You don't have to be a -maintainer to make a difference on the project! - -## Docker community guidelines - -We want to keep the Docker community awesome, growing and collaborative. We need -your help to keep it that way. To help with this we've come up with some general -guidelines for the community as a whole: - -* Be nice: Be courteous, respectful and polite to fellow community members: - no regional, racial, gender, or other abuse will be tolerated. We like - nice people way better than mean ones! - -* Encourage diversity and participation: Make everyone in our community feel - welcome, regardless of their background and the extent of their - contributions, and do everything possible to encourage participation in - our community. - -* Keep it legal: Basically, don't get us in trouble. Share only content that - you own, do not share private or sensitive information, and don't break - the law. - -* Stay on topic: Make sure that you are posting to the correct channel and - avoid off-topic discussions. Remember when you update an issue or respond - to an email you are potentially sending to a large number of people. Please - consider this before you update. Also remember that nobody likes spam. - -* Don't send email to the maintainers: There's no need to send email to the - maintainers to ask them to investigate an issue or to take a look at a - pull request. Instead of sending an email, GitHub mentions should be - used to ping maintainers to review a pull request, a proposal or an - issue. - -### Guideline violations — 3 strikes method - -The point of this section is not to find opportunities to punish people, but we -do need a fair way to deal with people who are making our community suck. - -1. First occurrence: We'll give you a friendly, but public reminder that the - behavior is inappropriate according to our guidelines. - -2. Second occurrence: We will send you a private message with a warning that - any additional violations will result in removal from the community. - -3. Third occurrence: Depending on the violation, we may need to delete or ban - your account. - -**Notes:** - -* Obvious spammers are banned on first occurrence. If we don't do this, we'll - have spam all over the place. - -* Violations are forgiven after 6 months of good behavior, and we won't hold a - grudge. - -* People who commit minor infractions will get some education, rather than - hammering them in the 3 strikes process. - -* The rules apply equally to everyone in the community, no matter how much - you've contributed. - -* Extreme violations of a threatening, abusive, destructive or illegal nature - will be addressed immediately and are not subject to 3 strikes or forgiveness. - -* Contact abuse@docker.com to report abuse or appeal violations. In the case of - appeals, we know that mistakes happen, and we'll work with you to come up with a - fair solution if there has been a misunderstanding. - -## Coding Style - -Unless explicitly stated, we follow all coding guidelines from the Go -community. While some of these standards may seem arbitrary, they somehow seem -to result in a solid, consistent codebase. - -It is possible that the code base does not currently comply with these -guidelines. We are not looking for a massive PR that fixes this, since that -goes against the spirit of the guidelines. All new contributions should make a -best effort to clean up and make the code base better than they left it. -Obviously, apply your best judgement. Remember, the goal here is to make the -code base easier for humans to navigate and understand. Always keep that in -mind when nudging others to comply. - -The rules: - -1. All code should be formatted with `gofmt -s`. -2. All code should pass the default levels of - [`golint`](https://github.com/golang/lint). -3. All code should follow the guidelines covered in [Effective - Go](http://golang.org/doc/effective_go.html) and [Go Code Review - Comments](https://github.com/golang/go/wiki/CodeReviewComments). -4. Comment the code. Tell us the why, the history and the context. -5. Document _all_ declarations and methods, even private ones. Declare - expectations, caveats and anything else that may be important. If a type - gets exported, having the comments already there will ensure it's ready. -6. Variable name length should be proportional to its context and no longer. - `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. - In practice, short methods will have short variable names and globals will - have longer names. -7. No underscores in package names. If you need a compound name, step back, - and re-examine why you need a compound name. If you still think you need a - compound name, lose the underscore. -8. No utils or helpers packages. If a function is not general enough to - warrant its own package, it has not been written generally enough to be a - part of a util package. Just leave it unexported and well-documented. -9. All tests should run with `go test` and outside tooling should not be - required. No, we don't need another unit testing framework. Assertion - packages are acceptable if they provide _real_ incremental value. -10. Even though we call these "rules" above, they are actually just - guidelines. Since you've read all the rules, you now know that. - -If you are having trouble getting into the mood of idiomatic Go, we recommend -reading through [Effective Go](https://golang.org/doc/effective_go.html). The -[Go Blog](https://blog.golang.org) is also a great resource. Drinking the -kool-aid is a lot easier than going thirsty. diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 064502f4e7..0000000000 --- a/Dockerfile +++ /dev/null @@ -1,272 +0,0 @@ -# This file describes the standard way to build Docker, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test -# -# # Publish a release: -# docker run --privileged \ -# -e AWS_S3_BUCKET=baz \ -# -e AWS_ACCESS_KEY=foo \ -# -e AWS_SECRET_KEY=bar \ -# -e GPG_PASSPHRASE=gloubiboulga \ -# docker hack/release.sh -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM debian:jessie - -# add zfs ppa -RUN apt-key adv --keyserver hkp://p80.pool.sks-keyservers.net:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61 \ - || apt-key adv --keyserver hkp://pgp.mit.edu:80 --recv-keys E871F18B51E0147C77796AC81196BA81F6B0FC61 -RUN echo deb http://ppa.launchpad.net/zfs-native/stable/ubuntu trusty main > /etc/apt/sources.list.d/zfs.list - - -# allow replacing httpredir mirror -ARG APT_MIRROR=httpredir.debian.org -RUN sed -i s/httpredir.debian.org/$APT_MIRROR/g /etc/apt/sources.list - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - apt-utils \ - aufs-tools \ - automake \ - bash-completion \ - binutils-mingw-w64 \ - bsdmainutils \ - btrfs-tools \ - build-essential \ - clang \ - createrepo \ - curl \ - dpkg-sig \ - gcc-mingw-w64 \ - git \ - iptables \ - jq \ - libapparmor-dev \ - libcap-dev \ - libltdl-dev \ - libsqlite3-dev \ - libsystemd-journal-dev \ - libtool \ - mercurial \ - net-tools \ - pkg-config \ - python-dev \ - python-mock \ - python-pip \ - python-websocket \ - ubuntu-zfs \ - xfsprogs \ - libzfs-dev \ - tar \ - zip \ - --no-install-recommends \ - && pip install awscli==1.10.15 -# Get lvm2 source for compiling statically -ENV LVM2_VERSION 2.02.103 -RUN mkdir -p /usr/local/lvm2 \ - && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ - | tar -xzC /usr/local/lvm2 --strip-components=1 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 \ - && ./configure \ - --build="$(gcc -print-multiarch)" \ - --enable-static_link \ - && make device-mapper \ - && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -# Configure the container for OSX cross compilation -ENV OSX_SDK MacOSX10.11.sdk -ENV OSX_CROSS_COMMIT 8aa9b71a394905e6c5f4b59e2b97b87a004658a4 -RUN set -x \ - && export OSXCROSS_PATH="/osxcross" \ - && git clone https://github.com/tpoechtrager/osxcross.git $OSXCROSS_PATH \ - && ( cd $OSXCROSS_PATH && git checkout -q $OSX_CROSS_COMMIT) \ - && curl -sSL https://s3.dockerproject.org/darwin/v2/${OSX_SDK}.tar.xz -o "${OSXCROSS_PATH}/tarballs/${OSX_SDK}.tar.xz" \ - && UNATTENDED=yes OSX_VERSION_MIN=10.6 ${OSXCROSS_PATH}/build.sh -ENV PATH /osxcross/target/bin:$PATH - -# install seccomp: the version shipped in trusty is too old -ENV SECCOMP_VERSION 2.3.1 -RUN set -x \ - && export SECCOMP_PATH="$(mktemp -d)" \ - && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ - | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ - && ( \ - cd "$SECCOMP_PATH" \ - && ./configure --prefix=/usr/local \ - && make \ - && make install \ - && ldconfig \ - ) \ - && rm -rf "$SECCOMP_PATH" - -# Install Go -# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines -# will need updating, to avoid errors. Ping #docker-maintainers on IRC -# with a heads-up. -ENV GO_VERSION 1.6.3 - -# Compile Go for cross compilation -ENV DOCKER_CROSSPLATFORMS \ - linux/386 linux/arm \ - darwin/amd64 \ - freebsd/amd64 freebsd/386 freebsd/arm \ - windows/amd64 windows/386 - -RUN curl -fsSL "https://storage.googleapis.com/golang/go1.4.3.linux-amd64.tar.gz" \ - | tar -xzC /root && \ - mv /root/go /root/go1.4 && \ - cd /usr/local && \ - curl -fsSL "https://storage.googleapis.com/golang/go$GO_VERSION.src.tar.gz" \ - | tar -xzC /usr/local && \ - cd go && \ - printf 'diff --git a/src/runtime/sys_darwin_amd64.s b/src/runtime/sys_darwin_amd64.s\nindex e09b906..fa8ff2f 100644\n--- a/src/runtime/sys_darwin_amd64.s\n+++ b/src/runtime/sys_darwin_amd64.s\n@@ -157,6 +157,7 @@ systime:\n\t// Fall back to system call (usually first call in this thread).\n\tMOVQ\tSP, DI\n\tMOVQ\t$0, SI\n+\tMOVQ\t$0, DX // required as of Sierra; Issue 16570\n\tMOVL\t$(0x2000000+116), AX\n\tSYSCALL\n\tCMPQ\tAX, $0\n' | patch -p1 && \ - cd src && \ - ./make.bash - -ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor - -# This has been commented out and kept as reference because we don't support compiling with older Go anymore. -# ENV GOFMT_VERSION 1.3.3 -# RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt - -ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 -# Grab Go's cover tool for dead-simple code coverage testing -# Grab Go's vet tool for examining go code to find suspicious constructs -# and help prevent errors that the compiler might not catch -RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ - && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \ - && go install -v golang.org/x/tools/cmd/cover \ - && go install -v golang.org/x/tools/cmd/vet -# Grab Go's lint tool -ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 -RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ - && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ - && go install -v github.com/golang/lint/golint - -# Install two versions of the registry. The first is an older version that -# only supports schema1 manifests. The second is a newer version that supports -# both. This allows integration-cli tests to cover push/pull with both schema1 -# and schema2 manifests. -ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd -ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ - && rm -rf "$GOPATH" - -# Install notary and notary-server -ENV NOTARY_VERSION v0.3.0 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ - && rm -rf "$GOPATH" - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 7befe694bd21e3c54bb1d7825270ea4bd6864c13 -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT \ - && pip install -r test-requirements.txt - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux - -# Let us use a .bashrc file -RUN ln -sfv $PWD/.bashrc ~/.bashrc - -# Register Docker's bash completion. -RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker - -# Get useful and necessary Hub images so we can "docker load" locally instead of pulling -COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ -RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - buildpack-deps:jessie@sha256:25785f89240fbcdd8a74bdaf30dd5599a9523882c6dfc567f2e9ef7cf6f79db6 \ - busybox:latest@sha256:e4f93f6ed15a0cdd342f5aae387886fba0ab98af0a102da6276eaf24d6e6ade0 \ - debian:jessie@sha256:f968f10b4b523737e253a97eac59b0d1420b5c19b69928d35801a6373ffe330e \ - hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7 -# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) - -# Download man page generator -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone --depth 1 -b v1.0.5 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \ - && git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \ - && go get -v -d github.com/cpuguy83/go-md2man \ - && go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \ - && rm -rf "$GOPATH" - -# Download toml validator -ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \ - && (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \ - && go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \ - && rm -rf "$GOPATH" - -# Install runc -ENV RUNC_COMMIT cc29e3dded8e27ba8f65738f40d251c885030a28 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ - && cd "$GOPATH/src/github.com/opencontainers/runc" \ - && git checkout -q "$RUNC_COMMIT" \ - && make static BUILDTAGS="seccomp apparmor selinux" \ - && cp runc /usr/local/bin/docker-runc \ - && rm -rf "$GOPATH" - -# Install containerd -ENV CONTAINERD_COMMIT v0.2.4 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ - && cd "$GOPATH/src/github.com/docker/containerd" \ - && git checkout -q "$CONTAINERD_COMMIT" \ - && make static \ - && cp bin/containerd /usr/local/bin/docker-containerd \ - && cp bin/containerd-shim /usr/local/bin/docker-containerd-shim \ - && cp bin/ctr /usr/local/bin/docker-containerd-ctr \ - && rm -rf "$GOPATH" - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/Dockerfile.aarch64 b/Dockerfile.aarch64 deleted file mode 100644 index c3199a2e9f..0000000000 --- a/Dockerfile.aarch64 +++ /dev/null @@ -1,211 +0,0 @@ -# This file describes the standard way to build Docker on aarch64, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker -f Dockerfile.aarch64 . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM aarch64/ubuntu:wily - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - aufs-tools \ - automake \ - bash-completion \ - btrfs-tools \ - build-essential \ - createrepo \ - curl \ - dpkg-sig \ - g++ \ - gcc \ - git \ - iptables \ - jq \ - libapparmor-dev \ - libc6-dev \ - libcap-dev \ - libltdl-dev \ - libsqlite3-dev \ - libsystemd-dev \ - mercurial \ - net-tools \ - parallel \ - pkg-config \ - python-dev \ - python-mock \ - python-pip \ - python-websocket \ - gccgo \ - --no-install-recommends - -# Install armhf loader to use armv6 binaries on armv8 -RUN dpkg --add-architecture armhf \ - && apt-get update \ - && apt-get install -y libc6:armhf - -# Get lvm2 source for compiling statically -ENV LVM2_VERSION 2.02.103 -RUN mkdir -p /usr/local/lvm2 \ - && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ - | tar -xzC /usr/local/lvm2 --strip-components=1 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags - -# fix platform enablement in lvm2 to support aarch64 properly -RUN set -e \ - && for f in config.guess config.sub; do \ - curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ - done -# "arch.c:78:2: error: #error the arch code needs to know about your machine type" - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 \ - && ./configure \ - --build="$(gcc -print-multiarch)" \ - --enable-static_link \ - && make device-mapper \ - && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -# install seccomp: the version shipped in trusty is too old -ENV SECCOMP_VERSION 2.3.1 -RUN set -x \ - && export SECCOMP_PATH="$(mktemp -d)" \ - && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ - | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ - && ( \ - cd "$SECCOMP_PATH" \ - && ./configure --prefix=/usr/local \ - && make \ - && make install \ - && ldconfig \ - ) \ - && rm -rf "$SECCOMP_PATH" - -# Install Go -# We don't have official binary tarballs for ARM64, eigher for Go or bootstrap, -# so we use the official armv6 released binaries as a GOROOT_BOOTSTRAP, and -# build Go from source code. -ENV GO_VERSION 1.6.3 -RUN mkdir /usr/src/go && curl -fsSL https://storage.googleapis.com/golang/go${GO_VERSION}.src.tar.gz | tar -v -C /usr/src/go -xz --strip-components=1 \ - && cd /usr/src/go/src \ - && GOOS=linux GOARCH=arm64 GOROOT_BOOTSTRAP="$(go env GOROOT)" ./make.bash - -ENV PATH /usr/src/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor - -# Only install one version of the registry, because old version which support -# schema1 manifests is not working on ARM64, we should skip integration-cli -# tests for schema1 manifests on ARM64. -ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && rm -rf "$GOPATH" - -# Install notary and notary-server -ENV NOTARY_VERSION v0.3.0 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ - && rm -rf "$GOPATH" - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 7befe694bd21e3c54bb1d7825270ea4bd6864c13 -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT \ - && pip install -r test-requirements.txt - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux - -# Let us use a .bashrc file -RUN ln -sfv $PWD/.bashrc ~/.bashrc - -# Register Docker's bash completion. -RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker - -# Get useful and necessary Hub images so we can "docker load" locally instead of pulling -COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ -RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - aarch64/buildpack-deps:jessie@sha256:6aa1d6910791b7ac78265fd0798e5abd6cb3f27ae992f6f960f6c303ec9535f2 \ - aarch64/busybox:latest@sha256:b23a6a37cf269dff6e46d2473b6e227afa42b037e6d23435f1d2bc40fc8c2828 \ - aarch64/debian:jessie@sha256:4be74a41a7c70ebe887b634b11ffe516cf4fcd56864a54941e56bb49883c3170 \ - aarch64/hello-world:latest@sha256:65a4a158587b307bb02db4de41b836addb0c35175bdc801367b1ac1ddeb9afda -# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) - -# Download man page generator -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone --depth 1 -b v1.0.5 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \ - && git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \ - && go get -v -d github.com/cpuguy83/go-md2man \ - && go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \ - && rm -rf "$GOPATH" - -# Download toml validator -ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \ - && (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \ - && go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \ - && rm -rf "$GOPATH" - -# Install runc -ENV RUNC_COMMIT cc29e3dded8e27ba8f65738f40d251c885030a28 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ - && cd "$GOPATH/src/github.com/opencontainers/runc" \ - && git checkout -q "$RUNC_COMMIT" \ - && make static BUILDTAGS="seccomp apparmor selinux" \ - && cp runc /usr/local/bin/docker-runc \ - && rm -rf "$GOPATH" - -# Install containerd -ENV CONTAINERD_COMMIT v0.2.4 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ - && cd "$GOPATH/src/github.com/docker/containerd" \ - && git checkout -q "$CONTAINERD_COMMIT" \ - && make static \ - && cp bin/containerd /usr/local/bin/docker-containerd \ - && cp bin/containerd-shim /usr/local/bin/docker-containerd-shim \ - && cp bin/ctr /usr/local/bin/docker-containerd-ctr \ - && rm -rf "$GOPATH" - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/Dockerfile.armhf b/Dockerfile.armhf deleted file mode 100644 index 066fad5ded..0000000000 --- a/Dockerfile.armhf +++ /dev/null @@ -1,218 +0,0 @@ -# This file describes the standard way to build Docker on ARMv7, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker -f Dockerfile.armhf . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM armhf/debian:jessie - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - aufs-tools \ - automake \ - bash-completion \ - btrfs-tools \ - build-essential \ - createrepo \ - curl \ - dpkg-sig \ - git \ - iptables \ - jq \ - net-tools \ - libapparmor-dev \ - libcap-dev \ - libltdl-dev \ - libsqlite3-dev \ - libsystemd-journal-dev \ - libtool \ - mercurial \ - pkg-config \ - python-dev \ - python-mock \ - python-pip \ - python-websocket \ - xfsprogs \ - tar \ - --no-install-recommends - -# Get lvm2 source for compiling statically -ENV LVM2_VERSION 2.02.103 -RUN mkdir -p /usr/local/lvm2 \ - && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ - | tar -xzC /usr/local/lvm2 --strip-components=1 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 \ - && ./configure \ - --build="$(gcc -print-multiarch)" \ - --enable-static_link \ - && make device-mapper \ - && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -# Install Go -ENV GO_VERSION 1.6.3 -RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-armv6l.tar.gz" \ - | tar -xzC /usr/local -ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor - -# we're building for armhf, which is ARMv7, so let's be explicit about that -ENV GOARCH arm -ENV GOARM 7 - -# This has been commented out and kept as reference because we don't support compiling with older Go anymore. -# ENV GOFMT_VERSION 1.3.3 -# RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt - -ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 -# Grab Go's cover tool for dead-simple code coverage testing -# Grab Go's vet tool for examining go code to find suspicious constructs -# and help prevent errors that the compiler might not catch -RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ - && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \ - && go install -v golang.org/x/tools/cmd/cover \ - && go install -v golang.org/x/tools/cmd/vet -# Grab Go's lint tool -ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 -RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ - && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ - && go install -v github.com/golang/lint/golint - -# install seccomp: the version shipped in trusty is too old -ENV SECCOMP_VERSION 2.3.1 -RUN set -x \ - && export SECCOMP_PATH="$(mktemp -d)" \ - && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ - | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ - && ( \ - cd "$SECCOMP_PATH" \ - && ./configure --prefix=/usr/local \ - && make \ - && make install \ - && ldconfig \ - ) \ - && rm -rf "$SECCOMP_PATH" - -# Install two versions of the registry. The first is an older version that -# only supports schema1 manifests. The second is a newer version that supports -# both. This allows integration-cli tests to cover push/pull with both schema1 -# and schema2 manifests. -ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd -ENV REGISTRY_COMMIT cb08de17d74bef86ce6c5abe8b240e282f5750be -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ - && rm -rf "$GOPATH" - -# Install notary and notary-server -ENV NOTARY_VERSION v0.3.0 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ - && rm -rf "$GOPATH" - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 7befe694bd21e3c54bb1d7825270ea4bd6864c13 -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT \ - && pip install -r test-requirements.txt - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux - -# Let us use a .bashrc file -RUN ln -sfv $PWD/.bashrc ~/.bashrc - -# Register Docker's bash completion. -RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker - -# Get useful and necessary Hub images so we can "docker load" locally instead of pulling -COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ -RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - armhf/buildpack-deps:jessie@sha256:ca6cce8e5bf5c952129889b5cc15cd6aa8d995d77e55e3749bbaadae50e476cb \ - armhf/busybox:latest@sha256:d98a7343ac750ffe387e3d514f8521ba69846c216778919b01414b8617cfb3d4 \ - armhf/debian:jessie@sha256:4a2187483f04a84f9830910fe3581d69b3c985cc045d9f01d8e2f3795b28107b \ - armhf/hello-world:latest@sha256:161dcecea0225975b2ad5f768058212c1e0d39e8211098666ffa1ac74cfb7791 -# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) - -# Download man page generator -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone --depth 1 -b v1.0.5 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \ - && git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \ - && go get -v -d github.com/cpuguy83/go-md2man \ - && go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \ - && rm -rf "$GOPATH" - -# Download toml validator -ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \ - && (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \ - && go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \ - && rm -rf "$GOPATH" - -# Install runc -ENV RUNC_COMMIT cc29e3dded8e27ba8f65738f40d251c885030a28 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ - && cd "$GOPATH/src/github.com/opencontainers/runc" \ - && git checkout -q "$RUNC_COMMIT" \ - && make static BUILDTAGS="seccomp apparmor selinux" \ - && cp runc /usr/local/bin/docker-runc \ - && rm -rf "$GOPATH" - -# Install containerd -ENV CONTAINERD_COMMIT v0.2.4 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ - && cd "$GOPATH/src/github.com/docker/containerd" \ - && git checkout -q "$CONTAINERD_COMMIT" \ - && make static \ - && cp bin/containerd /usr/local/bin/docker-containerd \ - && cp bin/containerd-shim /usr/local/bin/docker-containerd-shim \ - && cp bin/ctr /usr/local/bin/docker-containerd-ctr \ - && rm -rf "$GOPATH" - -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/Dockerfile.gccgo b/Dockerfile.gccgo deleted file mode 100644 index 1f6420d638..0000000000 --- a/Dockerfile.gccgo +++ /dev/null @@ -1,104 +0,0 @@ -# This file describes the standard way to build Docker, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker -f Dockerfile.gccgo . -# - -FROM gcc:6.1 - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - aufs-tools \ - btrfs-tools \ - build-essential \ - curl \ - git \ - iptables \ - jq \ - net-tools \ - libapparmor-dev \ - libcap-dev \ - libsqlite3-dev \ - mercurial \ - net-tools \ - parallel \ - python-dev \ - python-mock \ - python-pip \ - python-websocket \ - --no-install-recommends - -# Get lvm2 source for compiling statically -RUN git clone -b v2_02_103 https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 \ - && ./configure --enable-static_link \ - && make device-mapper \ - && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -# install seccomp: the version shipped in jessie is too old -ENV SECCOMP_VERSION v2.3.1 -RUN set -x \ - && export SECCOMP_PATH=$(mktemp -d) \ - && git clone https://github.com/seccomp/libseccomp.git "$SECCOMP_PATH" \ - && ( \ - cd "$SECCOMP_PATH" \ - && git checkout "$SECCOMP_VERSION" \ - && ./autogen.sh \ - && ./configure --prefix=/usr \ - && make \ - && make install \ - ) \ - && rm -rf "$SECCOMP_PATH" - -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 7befe694bd21e3c54bb1d7825270ea4bd6864c13 -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor seccomp selinux - -# Install runc -ENV RUNC_COMMIT cc29e3dded8e27ba8f65738f40d251c885030a28 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ - && cd "$GOPATH/src/github.com/opencontainers/runc" \ - && git checkout -q "$RUNC_COMMIT" \ - && make static BUILDTAGS="seccomp apparmor selinux" \ - && cp runc /usr/local/bin/docker-runc \ - && rm -rf "$GOPATH" - -# Install containerd -ENV CONTAINERD_COMMIT v0.2.4 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ - && cd "$GOPATH/src/github.com/docker/containerd" \ - && git checkout -q "$CONTAINERD_COMMIT" \ - && make static \ - && cp bin/containerd /usr/local/bin/docker-containerd \ - && cp bin/containerd-shim /usr/local/bin/docker-containerd-shim \ - && cp bin/ctr /usr/local/bin/docker-containerd-ctr \ - && rm -rf "$GOPATH" - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/Dockerfile.ppc64le b/Dockerfile.ppc64le deleted file mode 100644 index 294d11d41d..0000000000 --- a/Dockerfile.ppc64le +++ /dev/null @@ -1,232 +0,0 @@ -# This file describes the standard way to build Docker on ppc64le, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker -f Dockerfile.ppc64le . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM ppc64le/gcc:6.1 - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - aufs-tools \ - automake \ - bash-completion \ - btrfs-tools \ - build-essential \ - createrepo \ - curl \ - dpkg-sig \ - git \ - iptables \ - jq \ - net-tools \ - libapparmor-dev \ - libcap-dev \ - libltdl-dev \ - libsqlite3-dev \ - libsystemd-journal-dev \ - libtool \ - mercurial \ - pkg-config \ - python-dev \ - python-mock \ - python-pip \ - python-websocket \ - xfsprogs \ - tar \ - --no-install-recommends - -# Get lvm2 source for compiling statically -ENV LVM2_VERSION 2.02.103 -RUN mkdir -p /usr/local/lvm2 \ - && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ - | tar -xzC /usr/local/lvm2 --strip-components=1 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags - -# fix platform enablement in lvm2 to support ppc64le properly -RUN set -e \ - && for f in config.guess config.sub; do \ - curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ - done -# "arch.c:78:2: error: #error the arch code needs to know about your machine type" - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 \ - && ./configure \ - --build="$(gcc -print-multiarch)" \ - --enable-static_link \ - && make device-mapper \ - && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -# install seccomp: the version shipped in jessie is too old -ENV SECCOMP_VERSION 2.3.1 -RUN set -x \ - && export SECCOMP_PATH="$(mktemp -d)" \ - && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ - | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ - && ( \ - cd "$SECCOMP_PATH" \ - && ./configure --prefix=/usr/local \ - && make \ - && make install \ - && ldconfig \ - ) \ - && rm -rf "$SECCOMP_PATH" - - -## BUILD GOLANG 1.6 -# NOTE: ppc64le has compatibility issues with older versions of go, so make sure the version >= 1.6 -ENV GO_VERSION 1.6.3 -ENV GO_DOWNLOAD_URL https://golang.org/dl/go${GO_VERSION}.src.tar.gz -ENV GOROOT_BOOTSTRAP /usr/local - -RUN curl -fsSL "$GO_DOWNLOAD_URL" -o golang.tar.gz \ - && tar -C /usr/src -xzf golang.tar.gz \ - && rm golang.tar.gz \ - && cd /usr/src/go/src && ./make.bash 2>&1 - -ENV GOROOT_BOOTSTRAP /usr/src/ - -ENV PATH /usr/src/go/bin/:/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor - -# This has been commented out and kept as reference because we don't support compiling with older Go anymore. -# ENV GOFMT_VERSION 1.3.3 -# RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt - -ENV GO_TOOLS_COMMIT 823804e1ae08dbb14eb807afc7db9993bc9e3cc3 -# Grab Go's cover tool for dead-simple code coverage testing -# Grab Go's vet tool for examining go code to find suspicious constructs -# and help prevent errors that the compiler might not catch -RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ - && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \ - && go install -v golang.org/x/tools/cmd/cover \ - && go install -v golang.org/x/tools/cmd/vet -# Grab Go's lint tool -ENV GO_LINT_COMMIT 32a87160691b3c96046c0c678fe57c5bef761456 -RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ - && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ - && go install -v github.com/golang/lint/golint - -# Install two versions of the registry. The first is an older version that -# only supports schema1 manifests. The second is a newer version that supports -# both. This allows integration-cli tests to cover push/pull with both schema1 -# and schema2 manifests. -ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd -ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ - && rm -rf "$GOPATH" - -# Install notary and notary-server -ENV NOTARY_VERSION v0.3.0 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION") \ - && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && GOPATH="$GOPATH/src/github.com/docker/notary/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ - && rm -rf "$GOPATH" - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 7befe694bd21e3c54bb1d7825270ea4bd6864c13 -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT \ - && pip install -r test-requirements.txt - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux - -# Let us use a .bashrc file -RUN ln -sfv $PWD/.bashrc ~/.bashrc - -# Register Docker's bash completion. -RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker - -# Get useful and necessary Hub images so we can "docker load" locally instead of pulling -COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ -RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - ppc64le/buildpack-deps:jessie@sha256:902bfe4ef1389f94d143d64516dd50a2de75bca2e66d4a44b1d73f63ddf05dda \ - ppc64le/busybox:latest@sha256:38bb82085248d5a3c24bd7a5dc146f2f2c191e189da0441f1c2ca560e3fc6f1b \ - ppc64le/debian:jessie@sha256:412845f51b6ab662afba71bc7a716e20fdb9b84f185d180d4c7504f8a75c4f91 \ - ppc64le/hello-world:latest@sha256:186a40a9a02ca26df0b6c8acdfb8ac2f3ae6678996a838f977e57fac9d963974 -# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) - -# Download man page generator -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone --depth 1 -b v1.0.5 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \ - && git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \ - && go get -v -d github.com/cpuguy83/go-md2man \ - && go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \ - && rm -rf "$GOPATH" - -# Download toml validator -ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \ - && (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \ - && go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \ - && rm -rf "$GOPATH" - -# Install runc -ENV RUNC_COMMIT cc29e3dded8e27ba8f65738f40d251c885030a28 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ - && cd "$GOPATH/src/github.com/opencontainers/runc" \ - && git checkout -q "$RUNC_COMMIT" \ - && make static BUILDTAGS="apparmor seccomp selinux" \ - && cp runc /usr/local/bin/docker-runc \ - && rm -rf "$GOPATH" - -# Install containerd -ENV CONTAINERD_COMMIT v0.2.4 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ - && cd "$GOPATH/src/github.com/docker/containerd" \ - && git checkout -q "$CONTAINERD_COMMIT" \ - && make static \ - && cp bin/containerd /usr/local/bin/docker-containerd \ - && cp bin/containerd-shim /usr/local/bin/docker-containerd-shim \ - && cp bin/ctr /usr/local/bin/docker-containerd-ctr \ - && rm -rf "$GOPATH" - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/Dockerfile.s390x b/Dockerfile.s390x deleted file mode 100644 index 20d10bb5e7..0000000000 --- a/Dockerfile.s390x +++ /dev/null @@ -1,227 +0,0 @@ -# This file describes the standard way to build Docker on s390x, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker -f Dockerfile.s390x . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM s390x/gcc:6.1 - -# Packaged dependencies -RUN apt-get update && apt-get install -y \ - apparmor \ - aufs-tools \ - automake \ - bash-completion \ - btrfs-tools \ - build-essential \ - createrepo \ - curl \ - dpkg-sig \ - git \ - iptables \ - jq \ - net-tools \ - libapparmor-dev \ - libcap-dev \ - libltdl-dev \ - libsqlite3-dev \ - libsystemd-journal-dev \ - libtool \ - mercurial \ - pkg-config \ - python-dev \ - python-mock \ - python-pip \ - python-websocket \ - xfsprogs \ - tar \ - --no-install-recommends - -# install seccomp: the version shipped in jessie is too old -ENV SECCOMP_VERSION 2.3.1 -RUN set -x \ - && export SECCOMP_PATH="$(mktemp -d)" \ - && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ - | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ - && ( \ - cd "$SECCOMP_PATH" \ - && ./configure --prefix=/usr/local \ - && make \ - && make install \ - && ldconfig \ - ) \ - && rm -rf "$SECCOMP_PATH" - -# Get lvm2 source for compiling statically -ENV LVM2_VERSION 2.02.103 -RUN mkdir -p /usr/local/lvm2 \ - && curl -fsSL "https://mirrors.kernel.org/sourceware/lvm2/LVM2.${LVM2_VERSION}.tgz" \ - | tar -xzC /usr/local/lvm2 --strip-components=1 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags - -# fix platform enablement in lvm2 to support s390x properly -RUN set -e \ - && for f in config.guess config.sub; do \ - curl -fsSL -o "/usr/local/lvm2/autoconf/$f" "http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=$f;hb=HEAD"; \ - done -# "arch.c:78:2: error: #error the arch code needs to know about your machine type" - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 \ - && ./configure \ - --build="$(gcc -print-multiarch)" \ - --enable-static_link \ - && make device-mapper \ - && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -# Note: Go comes from the base image (gccgo, specifically) -# We can't compile Go proper because s390x isn't an officially supported architecture yet. - -ENV PATH /go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor - -# This has been commented out and kept as reference because we don't support compiling with older Go anymore. -# ENV GOFMT_VERSION 1.3.3 -# RUN curl -sSL https://storage.googleapis.com/golang/go${GOFMT_VERSION}.$(go env GOOS)-$(go env GOARCH).tar.gz | tar -C /go/bin -xz --strip-components=2 go/bin/gofmt - -# TODO update this sha when we upgrade to Go 1.5+ -ENV GO_TOOLS_COMMIT 069d2f3bcb68257b627205f0486d6cc69a231ff9 -# Grab Go's cover tool for dead-simple code coverage testing -# Grab Go's vet tool for examining go code to find suspicious constructs -# and help prevent errors that the compiler might not catch -RUN git clone https://github.com/golang/tools.git /go/src/golang.org/x/tools \ - && (cd /go/src/golang.org/x/tools && git checkout -q $GO_TOOLS_COMMIT) \ - && go install -v golang.org/x/tools/cmd/cover \ - && go install -v golang.org/x/tools/cmd/vet -# Grab Go's lint tool -ENV GO_LINT_COMMIT f42f5c1c440621302702cb0741e9d2ca547ae80f -RUN git clone https://github.com/golang/lint.git /go/src/github.com/golang/lint \ - && (cd /go/src/github.com/golang/lint && git checkout -q $GO_LINT_COMMIT) \ - && go install -v github.com/golang/lint/golint - - -# Install two versions of the registry. The first is an older version that -# only supports schema1 manifests. The second is a newer version that supports -# both. This allows integration-cli tests to cover push/pull with both schema1 -# and schema2 manifests. -ENV REGISTRY_COMMIT_SCHEMA1 ec87e9b6971d831f0eff752ddb54fb64693e51cd -ENV REGISTRY_COMMIT 47a064d4195a9b56133891bbb13620c3ac83a827 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/distribution.git "$GOPATH/src/github.com/docker/distribution" \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2 github.com/docker/distribution/cmd/registry \ - && (cd "$GOPATH/src/github.com/docker/distribution" && git checkout -q "$REGISTRY_COMMIT_SCHEMA1") \ - && GOPATH="$GOPATH/src/github.com/docker/distribution/Godeps/_workspace:$GOPATH" \ - go build -o /usr/local/bin/registry-v2-schema1 github.com/docker/distribution/cmd/registry \ - && rm -rf "$GOPATH" - -# Install notary and notary-server -# -# Note: We have to explicitly set GO15VENDOREXPERIMENT=0 because gccgo does not -# support vendoring: https://github.com/golang/go/issues/15628 -ENV NOTARY_VERSION v0.3.0 -RUN set -x \ - && export GO15VENDOREXPERIMENT=0 \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/notary.git "$GOPATH/src/github.com/docker/notary" \ - && (cd "$GOPATH/src/github.com/docker/notary" && git checkout -q "$NOTARY_VERSION" && ln -s . vendor/src) \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -o /usr/local/bin/notary-server github.com/docker/notary/cmd/notary-server \ - && GOPATH="$GOPATH/src/github.com/docker/notary/vendor:$GOPATH" \ - go build -o /usr/local/bin/notary github.com/docker/notary/cmd/notary \ - && rm -rf "$GOPATH" - -# Get the "docker-py" source so we can run their integration tests -ENV DOCKER_PY_COMMIT 7befe694bd21e3c54bb1d7825270ea4bd6864c13 -RUN git clone https://github.com/docker/docker-py.git /docker-py \ - && cd /docker-py \ - && git checkout -q $DOCKER_PY_COMMIT \ - && pip install -r test-requirements.txt - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor selinux seccomp - -# Let us use a .bashrc file -RUN ln -sfv $PWD/.bashrc ~/.bashrc - -# Register Docker's bash completion. -RUN ln -sv $PWD/contrib/completion/bash/docker /etc/bash_completion.d/docker - -# Get useful and necessary Hub images so we can "docker load" locally instead of pulling -COPY contrib/download-frozen-image-v2.sh /go/src/github.com/docker/docker/contrib/ -RUN ./contrib/download-frozen-image-v2.sh /docker-frozen-images \ - s390x/buildpack-deps:jessie@sha256:4d1381224acaca6c4bfe3604de3af6972083a8558a99672cb6989c7541780099 \ - s390x/busybox:latest@sha256:dd61522c983884a66ed72d60301925889028c6d2d5e0220a8fe1d9b4c6a4f01b \ - s390x/debian:jessie@sha256:b74c863400909eff3c5e196cac9bfd1f6333ce47aae6a38398d87d5875da170a \ - s390x/hello-world:latest@sha256:780d80b3a7677c3788c0d5cd9168281320c8d4a6d9183892d8ee5cdd610f5699 -# see also "hack/make/.ensure-frozen-images" (which needs to be updated any time this list is) - -# Download man page generator -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone --depth 1 -b v1.0.5 https://github.com/cpuguy83/go-md2man.git "$GOPATH/src/github.com/cpuguy83/go-md2man" \ - && git clone --depth 1 -b v1.4 https://github.com/russross/blackfriday.git "$GOPATH/src/github.com/russross/blackfriday" \ - && go get -v -d github.com/cpuguy83/go-md2man \ - && go build -v -o /usr/local/bin/go-md2man github.com/cpuguy83/go-md2man \ - && rm -rf "$GOPATH" - -# Download toml validator -ENV TOMLV_COMMIT 9baf8a8a9f2ed20a8e54160840c492f937eeaf9a -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/BurntSushi/toml.git "$GOPATH/src/github.com/BurntSushi/toml" \ - && (cd "$GOPATH/src/github.com/BurntSushi/toml" && git checkout -q "$TOMLV_COMMIT") \ - && go build -v -o /usr/local/bin/tomlv github.com/BurntSushi/toml/cmd/tomlv \ - && rm -rf "$GOPATH" - -# Install runc -ENV RUNC_COMMIT cc29e3dded8e27ba8f65738f40d251c885030a28 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ - && cd "$GOPATH/src/github.com/opencontainers/runc" \ - && git checkout -q "$RUNC_COMMIT" \ - && make static BUILDTAGS="seccomp apparmor selinux" \ - && cp runc /usr/local/bin/docker-runc \ - && rm -rf "$GOPATH" - -# Install containerd -ENV CONTAINERD_COMMIT v0.2.4 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ - && cd "$GOPATH/src/github.com/docker/containerd" \ - && git checkout -q "$CONTAINERD_COMMIT" \ - && make static \ - && cp bin/containerd /usr/local/bin/docker-containerd \ - && cp bin/containerd-shim /usr/local/bin/docker-containerd-shim \ - && cp bin/ctr /usr/local/bin/docker-containerd-ctr \ - && rm -rf "$GOPATH" - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/Dockerfile.simple b/Dockerfile.simple deleted file mode 100644 index 708289e3b0..0000000000 --- a/Dockerfile.simple +++ /dev/null @@ -1,85 +0,0 @@ -# docker build -t docker:simple -f Dockerfile.simple . -# docker run --rm docker:simple hack/make.sh dynbinary -# docker run --rm --privileged docker:simple hack/dind hack/make.sh test-unit -# docker run --rm --privileged -v /var/lib/docker docker:simple hack/dind hack/make.sh dynbinary test-integration-cli - -# This represents the bare minimum required to build and test Docker. - -FROM debian:jessie - -# compile and runtime deps -# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#build-dependencies -# https://github.com/docker/docker/blob/master/project/PACKAGERS.md#runtime-dependencies -RUN apt-get update && apt-get install -y --no-install-recommends \ - btrfs-tools \ - build-essential \ - curl \ - gcc \ - git \ - libapparmor-dev \ - libdevmapper-dev \ - libsqlite3-dev \ - \ - ca-certificates \ - e2fsprogs \ - iptables \ - procps \ - xfsprogs \ - xz-utils \ - \ - aufs-tools \ - && rm -rf /var/lib/apt/lists/* - -# install seccomp: the version shipped in trusty is too old -ENV SECCOMP_VERSION 2.3.1 -RUN set -x \ - && export SECCOMP_PATH="$(mktemp -d)" \ - && curl -fsSL "https://github.com/seccomp/libseccomp/releases/download/v${SECCOMP_VERSION}/libseccomp-${SECCOMP_VERSION}.tar.gz" \ - | tar -xzC "$SECCOMP_PATH" --strip-components=1 \ - && ( \ - cd "$SECCOMP_PATH" \ - && ./configure --prefix=/usr/local \ - && make \ - && make install \ - && ldconfig \ - ) \ - && rm -rf "$SECCOMP_PATH" - -# Install Go -# IMPORTANT: If the version of Go is updated, the Windows to Linux CI machines -# will need updating, to avoid errors. Ping #docker-maintainers on IRC -# with a heads-up. -ENV GO_VERSION 1.6.3 -RUN curl -fsSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" \ - | tar -xzC /usr/local -ENV PATH /go/bin:/usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor -ENV CGO_LDFLAGS -L/lib - -# Install runc -ENV RUNC_COMMIT cc29e3dded8e27ba8f65738f40d251c885030a28 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/opencontainers/runc.git "$GOPATH/src/github.com/opencontainers/runc" \ - && cd "$GOPATH/src/github.com/opencontainers/runc" \ - && git checkout -q "$RUNC_COMMIT" \ - && make static BUILDTAGS="seccomp apparmor selinux" \ - && cp runc /usr/local/bin/docker-runc \ - && rm -rf "$GOPATH" - -# Install containerd -ENV CONTAINERD_COMMIT v0.2.4 -RUN set -x \ - && export GOPATH="$(mktemp -d)" \ - && git clone https://github.com/docker/containerd.git "$GOPATH/src/github.com/docker/containerd" \ - && cd "$GOPATH/src/github.com/docker/containerd" \ - && git checkout -q "$CONTAINERD_COMMIT" \ - && make static \ - && cp bin/containerd /usr/local/bin/docker-containerd \ - && cp bin/containerd-shim /usr/local/bin/docker-containerd-shim \ - && cp bin/ctr /usr/local/bin/docker-containerd-ctr \ - && rm -rf "$GOPATH" - -ENV AUTO_GOPATH 1 -WORKDIR /usr/src/docker -COPY . /usr/src/docker diff --git a/Dockerfile.windows b/Dockerfile.windows deleted file mode 100644 index 698439f735..0000000000 --- a/Dockerfile.windows +++ /dev/null @@ -1,89 +0,0 @@ -# This file describes the standard way to build Docker, using a docker container on Windows -# Server 2016 -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. Run this from -# # a directory containing the sources you are validating. For example from -# # c:\go\src\github.com\docker\docker -# -# docker build -t docker -f Dockerfile.windows . -# -# -# # Build docker in a container. Run the following from a Windows cmd command prommpt, -# # replacing c:\built with the directory you want the binaries to be placed on the -# # host system. -# -# docker run --rm -v "c:\built:c:\target" docker sh -c 'cd /c/go/src/github.com/docker/docker; hack/make.sh binary; ec=$?; if [ $ec -eq 0 ]; then robocopy /c/go/src/github.com/docker/docker/bundles/$(cat VERSION)/binary /c/target/binary; fi; exit $ec' -# -# Important notes: -# --------------- -# -# The posix utilities from GIT aren't usable interactively as at January 2016. This -# is because they require a console window which isn't present in a container in Windows. -# See the example at the top of this file. Do NOT use -it in that docker run!!! -# -# Don't try to use a volume for passing the source through. The posix utilities will -# balk at reparse points. Again, see the example at the top of this file on how use a volume -# to get the built binary out of the container. -# -# The steps are minimised dramatically to improve performance - -FROM windowsservercore - -# Environment variable notes: -# - GO_VERSION must consistent with 'Dockerfile' used by Linux'. -# - FROM_DOCKERFILE is used for detection of building within a container. -ENV GO_VERSION=1.6.3 \ - GIT_LOCATION=https://github.com/git-for-windows/git/releases/download/v2.7.2.windows.1/Git-2.7.2-64-bit.exe \ - GOPATH=C:/go;C:/go/src/github.com/docker/docker/vendor \ - FROM_DOCKERFILE=1 - -WORKDIR c:/ - -# Everything downloaded/installed in one go (better performance, esp on TP4) -RUN \ - setx /M Path "c:\git\cmd;c:\git\bin;c:\git\usr\bin;%Path%;c:\gcc\bin;c:\go\bin" && \ - setx GOROOT "c:\go" && \ - powershell -command \ - $ErrorActionPreference = 'Stop'; \ - Function Download-File([string] $source, [string] $target) { \ - $wc = New-Object net.webclient; $wc.Downloadfile($source, $target) \ - } \ - \ - Write-Host INFO: Downloading git...; \ - Download-File %GIT_LOCATION% gitsetup.exe; \ - \ - Write-Host INFO: Downloading go...; \ - Download-File https://storage.googleapis.com/golang/go%GO_VERSION%.windows-amd64.msi go.msi; \ - \ - Write-Host INFO: Downloading compiler 1 of 3...; \ - Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/gcc.zip gcc.zip; \ - \ - Write-Host INFO: Downloading compiler 2 of 3...; \ - Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/runtime.zip runtime.zip; \ - \ - Write-Host INFO: Downloading compiler 3 of 3...; \ - Download-File https://raw.githubusercontent.com/jhowardmsft/docker-tdmgcc/master/binutils.zip binutils.zip; \ - \ - Write-Host INFO: Installing git...; \ - Start-Process gitsetup.exe -ArgumentList '/VERYSILENT /SUPPRESSMSGBOXES /CLOSEAPPLICATIONS /DIR=c:\git\' -Wait; \ - \ - Write-Host INFO: Installing go..."; \ - Start-Process msiexec -ArgumentList '-i go.msi -quiet' -Wait; \ - \ - Write-Host INFO: Unzipping compiler...; \ - c:\git\usr\bin\unzip.exe -q -o gcc.zip -d /c/gcc; \ - c:\git\usr\bin\unzip.exe -q -o runtime.zip -d /c/gcc; \ - c:\git\usr\bin\unzip.exe -q -o binutils.zip -d /c/gcc"; \ - \ - Write-Host INFO: Removing interim files; \ - Remove-Item *.zip; \ - Remove-Item go.msi; \ - Remove-Item gitsetup.exe; \ - \ - Write-Host INFO: Completed - -# Prepare for building -COPY . /go/src/github.com/docker/docker - diff --git a/LICENSE b/LICENSE deleted file mode 100644 index 8f3fee627a..0000000000 --- a/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/MAINTAINERS b/MAINTAINERS deleted file mode 100644 index d3fcbfe7db..0000000000 --- a/MAINTAINERS +++ /dev/null @@ -1,296 +0,0 @@ -# Docker maintainers file -# -# This file describes who runs the docker/docker project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant -# parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - - [Org."Core maintainers"] - - # The Core maintainers are the ghostbusters of the project: when there's a problem others - # can't solve, they show up and fix it with bizarre devices and weaponry. - # They have final say on technical implementation and coding style. - # They are ultimately responsible for quality in all its forms: usability polish, - # bugfixes, performance, stability, etc. When ownership can cleanly be passed to - # a subsystem, they are responsible for doing so and holding the - # subsystem maintainers accountable. If ownership is unclear, they are the de facto owners. - - # For each release (including minor releases), a "release captain" is assigned from the - # pool of core maintainers. Rotation is encouraged across all maintainers, to ensure - # the release process is clear and up-to-date. - - people = [ - "aaronlehmann", - "calavera", - "coolljt0725", - "cpuguy83", - "crosbymichael", - "duglin", - "estesp", - "icecrime", - "jhowardmsft", - "justincormack", - "lk4d4", - "mavenugo", - "mhbauer", - "runcom", - "tianon", - "tibor", - "tonistiigi", - "unclejack", - "vdemeester" - ] - - [Org."Docs maintainers"] - - # TODO Describe the docs maintainers role. - - people = [ - "jamtur01", - "moxiegirl", - "sven", - "thajeztah" - ] - - [Org.Curators] - - # The curators help ensure that incoming issues and pull requests are properly triaged and - # that our various contribution and reviewing processes are respected. With their knowledge of - # the repository activity, they can also guide contributors to relevant material or - # discussions. - # - # They are neither code nor docs reviewers, so they are never expected to merge. They can - # however: - # - close an issue or pull request when it's an exact duplicate - # - close an issue or pull request when it's inappropriate or off-topic - - people = [ - "programmerq", - "thajeztah" - ] - - [Org.Alumni] - - # This list contains maintainers that are no longer active on the project. - # It is thanks to these people that the project has become what it is today. - # Thank you! - - people = [ - # As a maintainer, Erik was responsible for the "builder", and - # started the first designs for the new networking model in - # Docker. Erik is now working on all kinds of plugins for Docker - # (https://github.com/contiv) and various open source projects - # in his own repository https://github.com/erikh. You may - # still stumble into him in our issue tracker, or on IRC. - "erikh", - - # Jessica Frazelle, also known as the "Keyser Söze of containers", - # runs *everything* in containers. She started contributing to - # Docker with a (fun fun) change involving both iptables and regular - # expressions (coz, YOLO!) on July 10, 2014 - # https://github.com/docker/docker/pull/6950/commits/f3a68ffa390fb851115c77783fa4031f1d3b2995. - # Jess was Release Captain for Docker 1.4, 1.6 and 1.7, and contributed - # many features and improvement, among which "seccomp profiles" (making - # containers a lot more secure). Besides being a maintainer, she - # set up the CI infrastructure for the project, giving everyone - # something to shout at if a PR failed ("noooo Janky!"). - # Jess is currently working on the DCOS security team at Mesosphere, - # and contributing to various open source projects. - # Be sure you don't miss her talks at a conference near you (a must-see), - # read her blog at https://blog.jessfraz.com (a must-read), and - # check out her open source projects on GitHub https://github.com/jfrazelle (a must-try). - "jfrazelle", - - # Vincent "vbatts!" Batts made his first contribution to the project - # in November 2013, to become a maintainer a few months later, on - # May 10, 2014 (https://github.com/docker/docker/commit/d6e666a87a01a5634c250358a94c814bf26cb778). - # As a maintainer, Vincent made important contributions to core elements - # of Docker, such as "distribution" (tarsum) and graphdrivers (btrfs, devicemapper). - # He also contributed the "tar-split" library, an important element - # for the content-addressable store. - # Vincent is currently a member of the Open Containers Initiative - # Technical Oversight Board (TOB), besides his work at Red Hat and - # Project Atomic. You can still find him regularly hanging out in - # our repository and the #docker-dev and #docker-maintainers IRC channels - # for a chat, as he's always a lot of fun. - "vbatts", - - # Victor is one of the earliest contributors to Docker, having worked on the - # project when it was still "dotCloud" in April 2013. He's been responsible - # for multiple releases (https://github.com/docker/docker/pulls?q=is%3Apr+bump+in%3Atitle+author%3Avieux), - # and up until today (2015), our number 2 contributor. Although he's no longer - # a maintainer for the Docker "Engine", he's still actively involved in other - # Docker projects, and most likely can be found in the Docker Swarm repository, - # for which he's a core maintainer. - "vieux", - - # Vishnu became a maintainer to help out on the daemon codebase and - # libcontainer integration. He's currently involved in the - # Open Containers Initiative, working on the specifications, - # besides his work on cAdvisor and Kubernetes for Google. - "vishh" - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.aaronlehmann] - Name = "Aaron Lehmann" - Email = "aaron.lehmann@docker.com" - GitHub = "aaronlehmann" - - [people.calavera] - Name = "David Calavera" - Email = "david.calavera@gmail.com" - GitHub = "calavera" - - [people.coolljt0725] - Name = "Lei Jitang" - Email = "leijitang@huawei.com" - GitHub = "coolljt0725" - - [people.cpuguy83] - Name = "Brian Goff" - Email = "cpuguy83@gmail.com" - Github = "cpuguy83" - - [people.crosbymichael] - Name = "Michael Crosby" - Email = "crosbymichael@gmail.com" - GitHub = "crosbymichael" - - [people.duglin] - Name = "Doug Davis" - Email = "dug@us.ibm.com" - GitHub = "duglin" - - [people.erikh] - Name = "Erik Hollensbe" - Email = "erik@docker.com" - GitHub = "erikh" - - [people.estesp] - Name = "Phil Estes" - Email = "estesp@linux.vnet.ibm.com" - GitHub = "estesp" - - [people.icecrime] - Name = "Arnaud Porterie" - Email = "arnaud@docker.com" - GitHub = "icecrime" - - [people.jamtur01] - Name = "James Turnbull" - Email = "james@lovedthanlost.net" - GitHub = "jamtur01" - - [people.jhowardmsft] - Name = "John Howard" - Email = "jhoward@microsoft.com" - GitHub = "jhowardmsft" - - [people.jfrazelle] - Name = "Jessie Frazelle" - Email = "jess@linux.com" - GitHub = "jfrazelle" - - [people.justincormack] - Name = "Justin Cormack" - Email = "justin.cormack@docker.com" - GitHub = "justincormack" - - [people.lk4d4] - Name = "Alexander Morozov" - Email = "lk4d4@docker.com" - GitHub = "lk4d4" - - [people.mavenugo] - Name = "Madhu Venugopal" - Email = "madhu@docker.com" - GitHub = "mavenugo" - - [people.mhbauer] - Name = "Morgan Bauer" - Email = "mbauer@us.ibm.com" - GitHub = "mhbauer" - - [people.moxiegirl] - Name = "Mary Anthony" - Email = "mary.anthony@docker.com" - GitHub = "moxiegirl" - - [people.programmerq] - Name = "Jeff Anderson" - Email = "jeff@docker.com" - GitHub = "programmerq" - - [people.runcom] - Name = "Antonio Murdaca" - Email = "runcom@redhat.com" - GitHub = "runcom" - - [people.shykes] - Name = "Solomon Hykes" - Email = "solomon@docker.com" - GitHub = "shykes" - - [people.sven] - Name = "Sven Dowideit" - Email = "SvenDowideit@home.org.au" - GitHub = "SvenDowideit" - - [people.thajeztah] - Name = "Sebastiaan van Stijn" - Email = "github@gone.nl" - GitHub = "thaJeztah" - - [people.tianon] - Name = "Tianon Gravi" - Email = "admwiggin@gmail.com" - GitHub = "tianon" - - [people.tibor] - Name = "Tibor Vass" - Email = "tibor@docker.com" - GitHub = "tiborvass" - - [people.tonistiigi] - Name = "Tõnis Tiigi" - Email = "tonis@docker.com" - GitHub = "tonistiigi" - - [people.unclejack] - Name = "Cristian Staretu" - Email = "cristian.staretu@gmail.com" - GitHub = "unclejack" - - [people.vbatts] - Name = "Vincent Batts" - Email = "vbatts@redhat.com" - GitHub = "vbatts" - - [people.vdemeester] - Name = "Vincent Demeester" - Email = "vincent@sbr.pm" - GitHub = "vdemeester" - - [people.vieux] - Name = "Victor Vieux" - Email = "vieux@docker.com" - GitHub = "vieux" - - [people.vishh] - Name = "Vishnu Kannan" - Email = "vishnuk@google.com" - GitHub = "vishh" diff --git a/Makefile b/Makefile deleted file mode 100644 index 445ddd760a..0000000000 --- a/Makefile +++ /dev/null @@ -1,126 +0,0 @@ -.PHONY: all binary build build-gccgo cross default docs docs-build docs-shell shell gccgo test test-docker-py test-integration-cli test-unit validate help - -# set the graph driver as the current graphdriver if not set -DOCKER_GRAPHDRIVER := $(if $(DOCKER_GRAPHDRIVER),$(DOCKER_GRAPHDRIVER),$(shell docker info 2>&1 | grep "Storage Driver" | sed 's/.*: //')) - -# get OS/Arch of docker engine -DOCKER_OSARCH := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKER_ENGINE_OSARCH:-$$DOCKER_CLIENT_OSARCH}') -DOCKERFILE := $(shell bash -c 'source hack/make/.detect-daemon-osarch && echo $${DOCKERFILE}') - -# env vars passed through directly to Docker's build scripts -# to allow things like `make KEEPBUNDLE=1 binary` easily -# `docs/sources/contributing/devenvironment.md ` and `project/PACKAGERS.md` have some limited documentation of some of these -DOCKER_ENVS := \ - -e BUILDFLAGS \ - -e KEEPBUNDLE \ - -e DOCKER_BUILD_GOGC \ - -e DOCKER_BUILD_PKGS \ - -e DOCKER_DEBUG \ - -e DOCKER_EXPERIMENTAL \ - -e DOCKER_GITCOMMIT \ - -e DOCKER_GRAPHDRIVER=$(DOCKER_GRAPHDRIVER) \ - -e DOCKER_INCREMENTAL_BINARY \ - -e DOCKER_REMAP_ROOT \ - -e DOCKER_STORAGE_OPTS \ - -e DOCKER_USERLANDPROXY \ - -e TESTDIRS \ - -e TESTFLAGS \ - -e TIMEOUT -# note: we _cannot_ add "-e DOCKER_BUILDTAGS" here because even if it's unset in the shell, that would shadow the "ENV DOCKER_BUILDTAGS" set in our Dockerfile, which is very important for our official builds - -# to allow `make BIND_DIR=. shell` or `make BIND_DIR= test` -# (default to no bind mount if DOCKER_HOST is set) -# note: BINDDIR is supported for backwards-compatibility here -BIND_DIR := $(if $(BINDDIR),$(BINDDIR),$(if $(DOCKER_HOST),,bundles)) -DOCKER_MOUNT := $(if $(BIND_DIR),-v "$(CURDIR)/$(BIND_DIR):/go/src/github.com/docker/docker/$(BIND_DIR)") - -# This allows the test suite to be able to run without worrying about the underlying fs used by the container running the daemon (e.g. aufs-on-aufs), so long as the host running the container is running a supported fs. -# The volume will be cleaned up when the container is removed due to `--rm`. -# Note that `BIND_DIR` will already be set to `bundles` if `DOCKER_HOST` is not set (see above BIND_DIR line), in such case this will do nothing since `DOCKER_MOUNT` will already be set. -DOCKER_MOUNT := $(if $(DOCKER_MOUNT),$(DOCKER_MOUNT),-v "/go/src/github.com/docker/docker/bundles") - -GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD 2>/dev/null) -GIT_BRANCH_CLEAN := $(shell echo $(GIT_BRANCH) | sed -e "s/[^[:alnum:]]/-/g") -DOCKER_IMAGE := docker-dev$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN)) -DOCKER_DOCS_IMAGE := docker-docs$(if $(GIT_BRANCH_CLEAN),:$(GIT_BRANCH_CLEAN)) - -DOCKER_FLAGS := docker run --rm -i --privileged $(DOCKER_ENVS) $(DOCKER_MOUNT) - -# if this session isn't interactive, then we don't want to allocate a -# TTY, which would fail, but if it is interactive, we do want to attach -# so that the user can send e.g. ^C through. -INTERACTIVE := $(shell [ -t 0 ] && echo 1 || echo 0) -ifeq ($(INTERACTIVE), 1) - DOCKER_FLAGS += -t -endif - -DOCKER_RUN_DOCKER := $(DOCKER_FLAGS) "$(DOCKER_IMAGE)" - -default: binary - -all: build ## validate all checks, build linux binaries, run all tests\ncross build non-linux binaries and generate archives - $(DOCKER_RUN_DOCKER) hack/make.sh - -binary: build ## build the linux binaries - $(DOCKER_RUN_DOCKER) hack/make.sh binary - -build: bundles - docker build ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)" -f "$(DOCKERFILE)" . - -build-gccgo: bundles - docker build ${DOCKER_BUILD_ARGS} -t "$(DOCKER_IMAGE)-gccgo" -f Dockerfile.gccgo . - -bundles: - mkdir bundles - -cross: build ## cross build the binaries for darwin, freebsd and\nwindows - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross - -win: build ## cross build the binary for windows - $(DOCKER_RUN_DOCKER) hack/make.sh win - -tgz: build ## build the archives (.zip on windows and .tgz\notherwise) containing the binaries - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary binary cross tgz - -deb: build ## build the deb packages - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-deb - -docs: ## build the docs - $(MAKE) -C docs docs - -gccgo: build-gccgo ## build the gcc-go linux binaries - $(DOCKER_FLAGS) "$(DOCKER_IMAGE)-gccgo" hack/make.sh gccgo - -install: ## install the linux binaries - KEEPBUNDLE=1 hack/make.sh install-binary - -rpm: build ## build the rpm packages - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary build-rpm - -shell: build ## start a shell inside the build env - $(DOCKER_RUN_DOCKER) bash - -test: build ## run the unit, integration and docker-py tests - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary cross test-unit test-integration-cli test-docker-py - -test-docker-py: build ## run the docker-py tests - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-docker-py - -test-integration-cli: build ## run the integration tests - $(DOCKER_RUN_DOCKER) hack/make.sh dynbinary test-integration-cli - -test-unit: build ## run the unit tests - $(DOCKER_RUN_DOCKER) hack/make.sh test-unit - -validate: build ## validate DCO, Seccomp profile generation, gofmt,\n./pkg/ isolation, golint, tests, tomls, go vet and vendor - $(DOCKER_RUN_DOCKER) hack/make.sh validate-dco validate-default-seccomp validate-gofmt validate-pkg validate-lint validate-test validate-toml validate-vet validate-vendor - -manpages: ## Generate man pages from go source and markdown - docker build -t docker-manpage-dev -f "man/$(DOCKERFILE)" ./man - docker run \ - -v $(PWD):/go/src/github.com/docker/docker/ \ - docker-manpage-dev - -help: ## this help - @awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST) - diff --git a/NOTICE b/NOTICE deleted file mode 100644 index 8a37c1c7bc..0000000000 --- a/NOTICE +++ /dev/null @@ -1,19 +0,0 @@ -Docker -Copyright 2012-2016 Docker, Inc. - -This product includes software developed at Docker, Inc. (https://www.docker.com). - -This product contains software (https://github.com/kr/pty) developed -by Keith Rarick, licensed under the MIT License. - -The following is courtesy of our legal counsel: - - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - -See also https://www.apache.org/dev/crypto.html and/or seek legal counsel. diff --git a/README.md b/README.md deleted file mode 100644 index c4530448cb..0000000000 --- a/README.md +++ /dev/null @@ -1,304 +0,0 @@ -Docker: the container engine [![Release](https://img.shields.io/github/release/docker/docker.svg)](https://github.com/docker/docker/releases/latest) -============================ - -Docker is an open source project to pack, ship and run any application -as a lightweight container. - -Docker containers are both *hardware-agnostic* and *platform-agnostic*. -This means they can run anywhere, from your laptop to the largest -cloud compute instance and everything in between - and they don't require -you to use a particular language, framework or packaging system. That -makes them great building blocks for deploying and scaling web apps, -databases, and backend services without depending on a particular stack -or provider. - -Docker began as an open-source implementation of the deployment engine which -powered [dotCloud](http://web.archive.org/web/20130530031104/https://www.dotcloud.com/), -a popular Platform-as-a-Service. It benefits directly from the experience -accumulated over several years of large-scale operation and support of hundreds -of thousands of applications and databases. - -![Docker logo](docs/static_files/docker-logo-compressed.png "Docker") - -## Security Disclosure - -Security is very important to us. If you have any issue regarding security, -please disclose the information responsibly by sending an email to -security@docker.com and not by creating a github issue. - -## Better than VMs - -A common method for distributing applications and sandboxing their -execution is to use virtual machines, or VMs. Typical VM formats are -VMware's vmdk, Oracle VirtualBox's vdi, and Amazon EC2's ami. In theory -these formats should allow every developer to automatically package -their application into a "machine" for easy distribution and deployment. -In practice, that almost never happens, for a few reasons: - - * *Size*: VMs are very large which makes them impractical to store - and transfer. - * *Performance*: running VMs consumes significant CPU and memory, - which makes them impractical in many scenarios, for example local - development of multi-tier applications, and large-scale deployment - of cpu and memory-intensive applications on large numbers of - machines. - * *Portability*: competing VM environments don't play well with each - other. Although conversion tools do exist, they are limited and - add even more overhead. - * *Hardware-centric*: VMs were designed with machine operators in - mind, not software developers. As a result, they offer very - limited tooling for what developers need most: building, testing - and running their software. For example, VMs offer no facilities - for application versioning, monitoring, configuration, logging or - service discovery. - -By contrast, Docker relies on a different sandboxing method known as -*containerization*. Unlike traditional virtualization, containerization -takes place at the kernel level. Most modern operating system kernels -now support the primitives necessary for containerization, including -Linux with [openvz](https://openvz.org), -[vserver](http://linux-vserver.org) and more recently -[lxc](https://linuxcontainers.org/), Solaris with -[zones](https://docs.oracle.com/cd/E26502_01/html/E29024/preface-1.html#scrolltoc), -and FreeBSD with -[Jails](https://www.freebsd.org/doc/handbook/jails.html). - -Docker builds on top of these low-level primitives to offer developers a -portable format and runtime environment that solves all four problems. -Docker containers are small (and their transfer can be optimized with -layers), they have basically zero memory and cpu overhead, they are -completely portable, and are designed from the ground up with an -application-centric design. - -Perhaps best of all, because Docker operates at the OS level, it can still be -run inside a VM! - -## Plays well with others - -Docker does not require you to buy into a particular programming -language, framework, packaging system, or configuration language. - -Is your application a Unix process? Does it use files, tcp connections, -environment variables, standard Unix streams and command-line arguments -as inputs and outputs? Then Docker can run it. - -Can your application's build be expressed as a sequence of such -commands? Then Docker can build it. - -## Escape dependency hell - -A common problem for developers is the difficulty of managing all -their application's dependencies in a simple and automated way. - -This is usually difficult for several reasons: - - * *Cross-platform dependencies*. Modern applications often depend on - a combination of system libraries and binaries, language-specific - packages, framework-specific modules, internal components - developed for another project, etc. These dependencies live in - different "worlds" and require different tools - these tools - typically don't work well with each other, requiring awkward - custom integrations. - - * *Conflicting dependencies*. Different applications may depend on - different versions of the same dependency. Packaging tools handle - these situations with various degrees of ease - but they all - handle them in different and incompatible ways, which again forces - the developer to do extra work. - - * *Custom dependencies*. A developer may need to prepare a custom - version of their application's dependency. Some packaging systems - can handle custom versions of a dependency, others can't - and all - of them handle it differently. - - -Docker solves the problem of dependency hell by giving the developer a simple -way to express *all* their application's dependencies in one place, while -streamlining the process of assembling them. If this makes you think of -[XKCD 927](https://xkcd.com/927/), don't worry. Docker doesn't -*replace* your favorite packaging systems. It simply orchestrates -their use in a simple and repeatable way. How does it do that? With -layers. - -Docker defines a build as running a sequence of Unix commands, one -after the other, in the same container. Build commands modify the -contents of the container (usually by installing new files on the -filesystem), the next command modifies it some more, etc. Since each -build command inherits the result of the previous commands, the -*order* in which the commands are executed expresses *dependencies*. - -Here's a typical Docker build process: - -```bash -FROM ubuntu:12.04 -RUN apt-get update && apt-get install -y python python-pip curl -RUN curl -sSL https://github.com/shykes/helloflask/archive/master.tar.gz | tar -xzv -RUN cd helloflask-master && pip install -r requirements.txt -``` - -Note that Docker doesn't care *how* dependencies are built - as long -as they can be built by running a Unix command in a container. - - -Getting started -=============== - -Docker can be installed either on your computer for building applications or -on servers for running them. To get started, [check out the installation -instructions in the -documentation](https://docs.docker.com/engine/installation/). - -Usage examples -============== - -Docker can be used to run short-lived commands, long-running daemons -(app servers, databases, etc.), interactive shell sessions, etc. - -You can find a [list of real-world -examples](https://docs.docker.com/engine/examples/) in the -documentation. - -Under the hood --------------- - -Under the hood, Docker is built on the following components: - -* The - [cgroups](https://www.kernel.org/doc/Documentation/cgroup-v1/cgroups.txt) - and - [namespaces](http://man7.org/linux/man-pages/man7/namespaces.7.html) - capabilities of the Linux kernel -* The [Go](https://golang.org) programming language -* The [Docker Image Specification](https://github.com/docker/docker/blob/master/image/spec/v1.md) -* The [Libcontainer Specification](https://github.com/opencontainers/runc/blob/master/libcontainer/SPEC.md) - -Contributing to Docker [![GoDoc](https://godoc.org/github.com/docker/docker?status.svg)](https://godoc.org/github.com/docker/docker) -====================== - -| **Master** (Linux) | **Experimental** (linux) | **Windows** | **FreeBSD** | -|------------------|----------------------|---------|---------| -| [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master/) | [![Jenkins Build Status](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/badge/icon)](https://jenkins.dockerproject.org/view/Docker/job/Docker%20Master%20%28experimental%29/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(windows)/) | [![Build Status](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/badge/icon)](http://jenkins.dockerproject.org/job/Docker%20Master%20(freebsd)/) | - -Want to hack on Docker? Awesome! We have [instructions to help you get -started contributing code or documentation](https://docs.docker.com/opensource/project/who-written-for/). - -These instructions are probably not perfect, please let us know if anything -feels wrong or incomplete. Better yet, submit a PR and improve them yourself. - -Getting the development builds -============================== - -Want to run Docker from a master build? You can download -master builds at [master.dockerproject.org](https://master.dockerproject.org). -They are updated with each commit merged into the master branch. - -Don't know how to use that super cool new feature in the master build? Check -out the master docs at -[docs.master.dockerproject.org](http://docs.master.dockerproject.org). - -How the project is run -====================== - -Docker is a very, very active project. If you want to learn more about how it is run, -or want to get more involved, the best place to start is [the project directory](https://github.com/docker/docker/tree/master/project). - -We are always open to suggestions on process improvements, and are always looking for more maintainers. - -### Talking to other Docker users and contributors - - - - - - - - - - - - - - - - - - - - - - - - -
Internet Relay Chat (IRC) -

- IRC is a direct line to our most knowledgeable Docker users; we have - both the #docker and #docker-dev group on - irc.freenode.net. - IRC is a rich chat protocol but it can overwhelm new users. You can search - our chat archives. -

- Read our IRC quickstart guide for an easy way to get started. -
Docker Community Forums - The Docker Engine - group is for users of the Docker Engine project. -
Google Groups - The docker-dev group is for contributors and other people - contributing to the Docker project. You can join this group without a - Google account by sending an email to docker-dev+subscribe@googlegroups.com. - You'll receive a join-request message; simply reply to the message to - confirm your subscribtion. -
Twitter - You can follow Docker's Twitter feed - to get updates on our products. You can also tweet us questions or just - share blogs or stories. -
Stack Overflow - Stack Overflow has over 7000 Docker questions listed. We regularly - monitor Docker questions - and so do many other knowledgeable Docker users. -
- -### Legal - -*Brought to you courtesy of our legal counsel. For more context, -please see the [NOTICE](https://github.com/docker/docker/blob/master/NOTICE) document in this repo.* - -Use and transfer of Docker may be subject to certain restrictions by the -United States and other governments. - -It is your responsibility to ensure that your use and/or transfer does not -violate applicable laws. - -For more information, please see https://www.bis.doc.gov - - -Licensing -========= -Docker is licensed under the Apache License, Version 2.0. See -[LICENSE](https://github.com/docker/docker/blob/master/LICENSE) for the full -license text. - -Other Docker Related Projects -============================= -There are a number of projects under development that are based on Docker's -core technology. These projects expand the tooling built around the -Docker platform to broaden its application and utility. - -* [Docker Registry](https://github.com/docker/distribution): Registry -server for Docker (hosting/delivery of repositories and images) -* [Docker Machine](https://github.com/docker/machine): Machine management -for a container-centric world -* [Docker Swarm](https://github.com/docker/swarm): A Docker-native clustering -system -* [Docker Compose](https://github.com/docker/compose) (formerly Fig): -Define and run multi-container apps -* [Kitematic](https://github.com/docker/kitematic): The easiest way to use -Docker on Mac and Windows - -If you know of another project underway that should be listed here, please help -us keep this list up-to-date by submitting a PR. - -Awesome-Docker -============== -You can find more projects, tools and articles related to Docker on the [awesome-docker list](https://github.com/veggiemonk/awesome-docker). Add your project there. diff --git a/ROADMAP.md b/ROADMAP.md deleted file mode 100644 index 514fdb7423..0000000000 --- a/ROADMAP.md +++ /dev/null @@ -1,140 +0,0 @@ -Docker Engine Roadmap -===================== - -### How should I use this document? - -This document provides description of items that the project decided to prioritize. This should -serve as a reference point for Docker contributors to understand where the project is going, and -help determine if a contribution could be conflicting with some longer terms plans. - -The fact that a feature isn't listed here doesn't mean that a patch for it will automatically be -refused (except for those mentioned as "frozen features" below)! We are always happy to receive -patches for new cool features we haven't thought about, or didn't judge priority. Please however -understand that such patches might take longer for us to review. - -### How can I help? - -Short term objectives are listed in the [wiki](https://github.com/docker/docker/wiki) and described -in [Issues](https://github.com/docker/docker/issues?q=is%3Aopen+is%3Aissue+label%3Aroadmap). Our -goal is to split down the workload in such way that anybody can jump in and help. Please comment on -issues if you want to take it to avoid duplicating effort! Similarly, if a maintainer is already -assigned on an issue you'd like to participate in, pinging him on IRC or GitHub to offer your help is -the best way to go. - -### How can I add something to the roadmap? - -The roadmap process is new to the Docker Engine: we are only beginning to structure and document the -project objectives. Our immediate goal is to be more transparent, and work with our community to -focus our efforts on fewer prioritized topics. - -We hope to offer in the near future a process allowing anyone to propose a topic to the roadmap, but -we are not quite there yet. For the time being, the BDFL remains the keeper of the roadmap, and we -won't be accepting pull requests adding or removing items from this file. - -# 1. Features and refactoring - -## 1.1 Runtime improvements - -We recently introduced [`runC`](https://runc.io) as a standalone low-level tool for container -execution. The initial goal was to integrate runC as a replacement in the Engine for the traditional -default libcontainer `execdriver`, but the Engine internals were not ready for this. - -As runC continued evolving, and the OCI specification along with it, we created -[`containerd`](https://containerd.tools/), a daemon to control and monitor multiple `runC`. This is -the new target for Engine integration, as it can entirely replace the whole `execdriver` -architecture, and container monitoring along with it. - -Docker Engine will rely on a long-running `containerd` companion daemon for all container execution -related operations. This could open the door in the future for Engine restarts without interrupting -running containers. - -## 1.2 Plugins improvements - -Docker Engine 1.7.0 introduced plugin support, initially for the use cases of volumes and networks -extensions. The plugin infrastructure was kept minimal as we were collecting use cases and real -world feedback before optimizing for any particular workflow. - -In the future, we'd like plugins to become first class citizens, and encourage an ecosystem of -plugins. This implies in particular making it trivially easy to distribute plugins as containers -through any Registry instance, as well as solving the commonly heard pain points of plugins needing -to be treated as somewhat special (being active at all time, started before any other user -containers, and not as easily dismissed). - -## 1.3 Internal decoupling - -A lot of work has been done in trying to decouple the Docker Engine's internals. In particular, the -API implementation has been refactored and ongoing work is happening to move the code to a separate -repository ([`docker/engine-api`](https://github.com/docker/engine-api)), and the Builder side of -the daemon is now [fully independent](https://github.com/docker/docker/tree/master/builder) while -still residing in the same repository. - -We are exploring ways to go further with that decoupling, capitalizing on the work introduced by the -runtime renovation and plugins improvement efforts. Indeed, the combination of `containerd` support -with the concept of "special" containers opens the door for bootstrapping more Engine internals -using the same facilities. - -## 1.4 Cluster capable Engine - -The community has been pushing for a more cluster capable Docker Engine, and a huge effort was spent -adding features such as multihost networking, and node discovery down at the Engine level. Yet, the -Engine is currently incapable of taking scheduling decisions alone, and continues relying on Swarm -for that. - -We plan to complete this effort and make Engine fully cluster capable. Multiple instances of the -Docker Engine being already capable of discovering each other and establish overlay networking for -their container to communicate, the next step is for a given Engine to gain ability to dispatch work -to another node in the cluster. This will be introduced in a backward compatible way, such that a -`docker run` invocation on a particular node remains fully deterministic. - -# 2 Frozen features - -## 2.1 Docker exec - -We won't accept patches expanding the surface of `docker exec`, which we intend to keep as a -*debugging* feature, as well as being strongly dependent on the Runtime ingredient effort. - -## 2.2 Dockerfile syntax - -The Dockerfile syntax as we know it is simple, and has proven successful in supporting all our -[official images](https://github.com/docker-library/official-images). Although this is *not* a -definitive move, we temporarily won't accept more patches to the Dockerfile syntax for several -reasons: - - - Long term impact of syntax changes is a sensitive matter that require an amount of attention the - volume of Engine codebase and activity today doesn't allow us to provide. - - Allowing the Builder to be implemented as a separate utility consuming the Engine's API will - open the door for many possibilities, such as offering alternate syntaxes or DSL for existing - languages without cluttering the Engine's codebase. - - A standalone Builder will also offer the opportunity for a better dedicated group of maintainers - to own the Dockerfile syntax and decide collectively on the direction to give it. - - Our experience with official images tend to show that no new instruction or syntax expansion is - *strictly* necessary for the majority of use cases, and although we are aware many things are - still lacking for many, we cannot make it a priority yet for the above reasons. - -Again, this is not about saying that the Dockerfile syntax is done, it's about making choices about -what we want to do first! - -## 2.3 Remote Registry Operations - -A large amount of work is ongoing in the area of image distribution and provenance. This includes -moving to the V2 Registry API and heavily refactoring the code that powers these features. The -desired result is more secure, reliable and easier to use image distribution. - -Part of the problem with this part of the code base is the lack of a stable and flexible interface. -If new features are added that access the registry without solidifying these interfaces, achieving -feature parity will continue to be elusive. While we get a handle on this situation, we are imposing -a moratorium on new code that accesses the Registry API in commands that don't already make remote -calls. - -Currently, only the following commands cause interaction with a remote registry: - - - push - - pull - - run - - build - - search - - login - -In the interest of stabilizing the registry access model during this ongoing work, we are not -accepting additions to other commands that will cause remote interaction with the Registry API. This -moratorium will lift when the goals of the distribution project have been met. diff --git a/VENDORING.md b/VENDORING.md deleted file mode 100644 index cc12f46492..0000000000 --- a/VENDORING.md +++ /dev/null @@ -1,45 +0,0 @@ -# Vendoring policies - -This document outlines recommended Vendoring policies for Docker repositories. -(Example, libnetwork is a Docker repo and logrus is not.) - -## Vendoring using tags - -Commit ID based vendoring provides little/no information about the updates -vendored. To fix this, vendors will now require that repositories use annotated -tags along with commit ids to snapshot commits. Annotated tags by themselves -are not sufficient, since the same tag can be force updated to reference -different commits. - -Each tag should: -- Follow Semantic Versioning rules (refer to section on "Semantic Versioning") -- Have a corresponding entry in the change tracking document. - -Each repo should: -- Have a change tracking document between tags/releases. Ex: CHANGELOG.md, -github releases file. - -The goal here is for consuming repos to be able to use the tag version and -changelog updates to determine whether the vendoring will cause any breaking or -backward incompatible changes. This also means that repos can specify having -dependency on a package of a specific version or greater up to the next major -release, without encountering breaking changes. - -## Semantic Versioning -Annotated version tags should follow Schema Versioning policies. -According to http://semver.org: - -"Given a version number MAJOR.MINOR.PATCH, increment the: - MAJOR version when you make incompatible API changes, - MINOR version when you add functionality in a backwards-compatible manner, and - PATCH version when you make backwards-compatible bug fixes. -Additional labels for pre-release and build metadata are available as extensions -to the MAJOR.MINOR.PATCH format." - -## Vendoring cadence -In order to avoid huge vendoring changes, it is recommended to have a regular -cadence for vendoring updates. e.g. monthly. - -## Pre-merge vendoring tests -All related repos will be vendored into docker/docker. -CI on docker/docker should catch any breaking changes involving multiple repos. diff --git a/VERSION b/VERSION deleted file mode 100644 index b01f5b9601..0000000000 --- a/VERSION +++ /dev/null @@ -1 +0,0 @@ -1.12.2-rc1 diff --git a/api/README.md b/api/README.md deleted file mode 100644 index 453f61a1a1..0000000000 --- a/api/README.md +++ /dev/null @@ -1,5 +0,0 @@ -This directory contains code pertaining to the Docker API: - - - Used by the docker client when communicating with the docker daemon - - - Used by third party tools wishing to interface with the docker daemon diff --git a/api/client/bundlefile/bundlefile.go b/api/client/bundlefile/bundlefile.go deleted file mode 100644 index 75c2d07433..0000000000 --- a/api/client/bundlefile/bundlefile.go +++ /dev/null @@ -1,71 +0,0 @@ -// +build experimental - -package bundlefile - -import ( - "encoding/json" - "fmt" - "io" -) - -// Bundlefile stores the contents of a bundlefile -type Bundlefile struct { - Version string - Services map[string]Service -} - -// Service is a service from a bundlefile -type Service struct { - Image string - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Env []string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Ports []Port `json:",omitempty"` - WorkingDir *string `json:",omitempty"` - User *string `json:",omitempty"` - Networks []string `json:",omitempty"` -} - -// Port is a port as defined in a bundlefile -type Port struct { - Protocol string - Port uint32 -} - -// LoadFile loads a bundlefile from a path to the file -func LoadFile(reader io.Reader) (*Bundlefile, error) { - bundlefile := &Bundlefile{} - - decoder := json.NewDecoder(reader) - if err := decoder.Decode(bundlefile); err != nil { - switch jsonErr := err.(type) { - case *json.SyntaxError: - return nil, fmt.Errorf( - "JSON syntax error at byte %v: %s", - jsonErr.Offset, - jsonErr.Error()) - case *json.UnmarshalTypeError: - return nil, fmt.Errorf( - "Unexpected type at byte %v. Expected %s but received %s.", - jsonErr.Offset, - jsonErr.Type, - jsonErr.Value) - } - return nil, err - } - - return bundlefile, nil -} - -// Print writes the contents of the bundlefile to the output writer -// as human readable json -func Print(out io.Writer, bundle *Bundlefile) error { - bytes, err := json.MarshalIndent(*bundle, "", " ") - if err != nil { - return err - } - - _, err = out.Write(bytes) - return err -} diff --git a/api/client/bundlefile/bundlefile_test.go b/api/client/bundlefile/bundlefile_test.go deleted file mode 100644 index 1ff8235ff8..0000000000 --- a/api/client/bundlefile/bundlefile_test.go +++ /dev/null @@ -1,79 +0,0 @@ -// +build experimental - -package bundlefile - -import ( - "bytes" - "strings" - "testing" - - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestLoadFileV01Success(t *testing.T) { - reader := strings.NewReader(`{ - "Version": "0.1", - "Services": { - "redis": { - "Image": "redis@sha256:4b24131101fa0117bcaa18ac37055fffd9176aa1a240392bb8ea85e0be50f2ce", - "Networks": ["default"] - }, - "web": { - "Image": "dockercloud/hello-world@sha256:fe79a2cfbd17eefc344fb8419420808df95a1e22d93b7f621a7399fd1e9dca1d", - "Networks": ["default"], - "User": "web" - } - } - }`) - - bundle, err := LoadFile(reader) - assert.NilError(t, err) - assert.Equal(t, bundle.Version, "0.1") - assert.Equal(t, len(bundle.Services), 2) -} - -func TestLoadFileSyntaxError(t *testing.T) { - reader := strings.NewReader(`{ - "Version": "0.1", - "Services": unquoted string - }`) - - _, err := LoadFile(reader) - assert.Error(t, err, "syntax error at byte 37: invalid character 'u'") -} - -func TestLoadFileTypeError(t *testing.T) { - reader := strings.NewReader(`{ - "Version": "0.1", - "Services": { - "web": { - "Image": "redis", - "Networks": "none" - } - } - }`) - - _, err := LoadFile(reader) - assert.Error(t, err, "Unexpected type at byte 94. Expected []string but received string") -} - -func TestPrint(t *testing.T) { - var buffer bytes.Buffer - bundle := &Bundlefile{ - Version: "0.1", - Services: map[string]Service{ - "web": { - Image: "image", - Command: []string{"echo", "something"}, - }, - }, - } - assert.NilError(t, Print(&buffer, bundle)) - output := buffer.String() - assert.Contains(t, output, "\"Image\": \"image\"") - assert.Contains(t, output, - `"Command": [ - "echo", - "something" - ]`) -} diff --git a/api/client/cli.go b/api/client/cli.go deleted file mode 100644 index 1732c518c0..0000000000 --- a/api/client/cli.go +++ /dev/null @@ -1,281 +0,0 @@ -package client - -import ( - "errors" - "fmt" - "io" - "net/http" - "os" - "runtime" - - "github.com/docker/docker/api" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/docker/cliconfig/credentials" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/term" - "github.com/docker/engine-api/client" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" -) - -// DockerCli represents the docker command line client. -// Instances of the client can be returned from NewDockerCli. -type DockerCli struct { - // initializing closure - init func() error - - // configFile has the client configuration file - configFile *configfile.ConfigFile - // in holds the input stream and closer (io.ReadCloser) for the client. - in io.ReadCloser - // out holds the output stream (io.Writer) for the client. - out io.Writer - // err holds the error stream (io.Writer) for the client. - err io.Writer - // keyFile holds the key file as a string. - keyFile string - // inFd holds the file descriptor of the client's STDIN (if valid). - inFd uintptr - // outFd holds file descriptor of the client's STDOUT (if valid). - outFd uintptr - // isTerminalIn indicates whether the client's STDIN is a TTY - isTerminalIn bool - // isTerminalOut indicates whether the client's STDOUT is a TTY - isTerminalOut bool - // client is the http client that performs all API operations - client client.APIClient - // state holds the terminal input state - inState *term.State - // outState holds the terminal output state - outState *term.State -} - -// Initialize calls the init function that will setup the configuration for the client -// such as the TLS, tcp and other parameters used to run the client. -func (cli *DockerCli) Initialize() error { - if cli.init == nil { - return nil - } - return cli.init() -} - -// Client returns the APIClient -func (cli *DockerCli) Client() client.APIClient { - return cli.client -} - -// Out returns the writer used for stdout -func (cli *DockerCli) Out() io.Writer { - return cli.out -} - -// Err returns the writer used for stderr -func (cli *DockerCli) Err() io.Writer { - return cli.err -} - -// In returns the reader used for stdin -func (cli *DockerCli) In() io.ReadCloser { - return cli.in -} - -// ConfigFile returns the ConfigFile -func (cli *DockerCli) ConfigFile() *configfile.ConfigFile { - return cli.configFile -} - -// IsTerminalOut returns true if the clients stdin is a TTY -func (cli *DockerCli) IsTerminalOut() bool { - return cli.isTerminalOut -} - -// OutFd returns the fd for the stdout stream -func (cli *DockerCli) OutFd() uintptr { - return cli.outFd -} - -// CheckTtyInput checks if we are trying to attach to a container tty -// from a non-tty client input stream, and if so, returns an error. -func (cli *DockerCli) CheckTtyInput(attachStdin, ttyMode bool) error { - // In order to attach to a container tty, input stream for the client must - // be a tty itself: redirecting or piping the client standard input is - // incompatible with `docker run -t`, `docker exec -t` or `docker attach`. - if ttyMode && attachStdin && !cli.isTerminalIn { - eText := "the input device is not a TTY" - if runtime.GOOS == "windows" { - return errors.New(eText + ". If you are using mintty, try prefixing the command with 'winpty'") - } - return errors.New(eText) - } - return nil -} - -// PsFormat returns the format string specified in the configuration. -// String contains columns and format specification, for example {{ID}}\t{{Name}}. -func (cli *DockerCli) PsFormat() string { - return cli.configFile.PsFormat -} - -// ImagesFormat returns the format string specified in the configuration. -// String contains columns and format specification, for example {{ID}}\t{{Name}}. -func (cli *DockerCli) ImagesFormat() string { - return cli.configFile.ImagesFormat -} - -func (cli *DockerCli) setRawTerminal() error { - if os.Getenv("NORAW") == "" { - if cli.isTerminalIn { - state, err := term.SetRawTerminal(cli.inFd) - if err != nil { - return err - } - cli.inState = state - } - if cli.isTerminalOut { - state, err := term.SetRawTerminalOutput(cli.outFd) - if err != nil { - return err - } - cli.outState = state - } - } - return nil -} - -func (cli *DockerCli) restoreTerminal(in io.Closer) error { - if cli.inState != nil { - term.RestoreTerminal(cli.inFd, cli.inState) - } - if cli.outState != nil { - term.RestoreTerminal(cli.outFd, cli.outState) - } - // WARNING: DO NOT REMOVE THE OS CHECK !!! - // For some reason this Close call blocks on darwin.. - // As the client exists right after, simply discard the close - // until we find a better solution. - if in != nil && runtime.GOOS != "darwin" { - return in.Close() - } - return nil -} - -// NewDockerCli returns a DockerCli instance with IO output and error streams set by in, out and err. -// The key file, protocol (i.e. unix) and address are passed in as strings, along with the tls.Config. If the tls.Config -// is set the client scheme will be set to https. -// The client will be given a 32-second timeout (see https://github.com/docker/docker/pull/8035). -func NewDockerCli(in io.ReadCloser, out, err io.Writer, clientFlags *cliflags.ClientFlags) *DockerCli { - cli := &DockerCli{ - in: in, - out: out, - err: err, - keyFile: clientFlags.Common.TrustKey, - } - - cli.init = func() error { - clientFlags.PostParse() - cli.configFile = LoadDefaultConfigFile(err) - - client, err := NewAPIClientFromFlags(clientFlags, cli.configFile) - if err != nil { - return err - } - - cli.client = client - - if cli.in != nil { - cli.inFd, cli.isTerminalIn = term.GetFdInfo(cli.in) - } - if cli.out != nil { - cli.outFd, cli.isTerminalOut = term.GetFdInfo(cli.out) - } - - return nil - } - - return cli -} - -// LoadDefaultConfigFile attempts to load the default config file and returns -// an initialized ConfigFile struct if none is found. -func LoadDefaultConfigFile(err io.Writer) *configfile.ConfigFile { - configFile, e := cliconfig.Load(cliconfig.ConfigDir()) - if e != nil { - fmt.Fprintf(err, "WARNING: Error loading config file:%v\n", e) - } - if !configFile.ContainsAuth() { - credentials.DetectDefaultStore(configFile) - } - return configFile -} - -// NewAPIClientFromFlags creates a new APIClient from command line flags -func NewAPIClientFromFlags(clientFlags *cliflags.ClientFlags, configFile *configfile.ConfigFile) (client.APIClient, error) { - host, err := getServerHost(clientFlags.Common.Hosts, clientFlags.Common.TLSOptions) - if err != nil { - return &client.Client{}, err - } - - customHeaders := configFile.HTTPHeaders - if customHeaders == nil { - customHeaders = map[string]string{} - } - customHeaders["User-Agent"] = clientUserAgent() - - verStr := api.DefaultVersion - if tmpStr := os.Getenv("DOCKER_API_VERSION"); tmpStr != "" { - verStr = tmpStr - } - - httpClient, err := newHTTPClient(host, clientFlags.Common.TLSOptions) - if err != nil { - return &client.Client{}, err - } - - return client.NewClient(host, verStr, httpClient, customHeaders) -} - -func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (host string, err error) { - switch len(hosts) { - case 0: - host = os.Getenv("DOCKER_HOST") - case 1: - host = hosts[0] - default: - return "", errors.New("Please specify only one -H") - } - - host, err = opts.ParseHost(tlsOptions != nil, host) - return -} - -func newHTTPClient(host string, tlsOptions *tlsconfig.Options) (*http.Client, error) { - if tlsOptions == nil { - // let the api client configure the default transport. - return nil, nil - } - - config, err := tlsconfig.Client(*tlsOptions) - if err != nil { - return nil, err - } - tr := &http.Transport{ - TLSClientConfig: config, - } - proto, addr, _, err := client.ParseHost(host) - if err != nil { - return nil, err - } - - sockets.ConfigureTransport(tr, proto, addr) - - return &http.Client{ - Transport: tr, - }, nil -} - -func clientUserAgent() string { - return "Docker-Client/" + dockerversion.Version + " (" + runtime.GOOS + ")" -} diff --git a/api/client/client.go b/api/client/client.go deleted file mode 100644 index 4cfce5f684..0000000000 --- a/api/client/client.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package client provides a command-line interface for Docker. -// -// Run "docker help SUBCOMMAND" or "docker SUBCOMMAND --help" to see more information on any Docker subcommand, including the full list of options supported for the subcommand. -// See https://docs.docker.com/installation/ for instructions on installing Docker. -package client diff --git a/api/client/commands.go b/api/client/commands.go deleted file mode 100644 index ed328b8b57..0000000000 --- a/api/client/commands.go +++ /dev/null @@ -1,11 +0,0 @@ -package client - -// Command returns a cli command handler if one exists -func (cli *DockerCli) Command(name string) func(...string) error { - return map[string]func(...string) error{ - "exec": cli.CmdExec, - "info": cli.CmdInfo, - "inspect": cli.CmdInspect, - "update": cli.CmdUpdate, - }[name] -} diff --git a/api/client/container/attach.go b/api/client/container/attach.go deleted file mode 100644 index 8c8d675169..0000000000 --- a/api/client/container/attach.go +++ /dev/null @@ -1,129 +0,0 @@ -package container - -import ( - "fmt" - "io" - "net/http/httputil" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/signal" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type attachOptions struct { - noStdin bool - proxy bool - detachKeys string - - container string -} - -// NewAttachCommand creats a new cobra.Command for `docker attach` -func NewAttachCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts attachOptions - - cmd := &cobra.Command{ - Use: "attach [OPTIONS] CONTAINER", - Short: "Attach to a running container", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - return runAttach(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&opts.noStdin, "no-stdin", false, "Do not attach STDIN") - flags.BoolVar(&opts.proxy, "sig-proxy", true, "Proxy all received signals to the process") - flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") - return cmd -} - -func runAttach(dockerCli *client.DockerCli, opts *attachOptions) error { - ctx := context.Background() - - c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) - if err != nil { - return err - } - - if !c.State.Running { - return fmt.Errorf("You cannot attach to a stopped container, start it first") - } - - if c.State.Paused { - return fmt.Errorf("You cannot attach to a paused container, unpause it first") - } - - if err := dockerCli.CheckTtyInput(!opts.noStdin, c.Config.Tty); err != nil { - return err - } - - if opts.detachKeys != "" { - dockerCli.ConfigFile().DetachKeys = opts.detachKeys - } - - options := types.ContainerAttachOptions{ - Stream: true, - Stdin: !opts.noStdin && c.Config.OpenStdin, - Stdout: true, - Stderr: true, - DetachKeys: dockerCli.ConfigFile().DetachKeys, - } - - var in io.ReadCloser - if options.Stdin { - in = dockerCli.In() - } - - if opts.proxy && !c.Config.Tty { - sigc := dockerCli.ForwardAllSignals(ctx, opts.container) - defer signal.StopCatch(sigc) - } - - resp, errAttach := dockerCli.Client().ContainerAttach(ctx, opts.container, options) - if errAttach != nil && errAttach != httputil.ErrPersistEOF { - // ContainerAttach returns an ErrPersistEOF (connection closed) - // means server met an error and put it in Hijacked connection - // keep the error and read detailed error message from hijacked connection later - return errAttach - } - defer resp.Close() - - if c.Config.Tty && dockerCli.IsTerminalOut() { - height, width := dockerCli.GetTtySize() - // To handle the case where a user repeatedly attaches/detaches without resizing their - // terminal, the only way to get the shell prompt to display for attaches 2+ is to artificially - // resize it, then go back to normal. Without this, every attach after the first will - // require the user to manually resize or hit enter. - dockerCli.ResizeTtyTo(ctx, opts.container, height+1, width+1, false) - - // After the above resizing occurs, the call to MonitorTtySize below will handle resetting back - // to the actual size. - if err := dockerCli.MonitorTtySize(ctx, opts.container, false); err != nil { - logrus.Debugf("Error monitoring TTY size: %s", err) - } - } - if err := dockerCli.HoldHijackedConnection(ctx, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp); err != nil { - return err - } - - if errAttach != nil { - return errAttach - } - - _, status, err := getExitCode(dockerCli, ctx, opts.container) - if err != nil { - return err - } - if status != 0 { - return cli.StatusError{StatusCode: status} - } - - return nil -} diff --git a/api/client/container/commit.go b/api/client/container/commit.go deleted file mode 100644 index 34f5c230a5..0000000000 --- a/api/client/container/commit.go +++ /dev/null @@ -1,92 +0,0 @@ -package container - -import ( - "encoding/json" - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - dockeropts "github.com/docker/docker/opts" - "github.com/docker/engine-api/types" - containertypes "github.com/docker/engine-api/types/container" - "github.com/spf13/cobra" -) - -type commitOptions struct { - container string - reference string - - pause bool - comment string - author string - changes dockeropts.ListOpts - config string -} - -// NewCommitCommand creats a new cobra.Command for `docker commit` -func NewCommitCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts commitOptions - - cmd := &cobra.Command{ - Use: "commit [OPTIONS] CONTAINER [REPOSITORY[:TAG]]", - Short: "Create a new image from a container's changes", - Args: cli.RequiresRangeArgs(1, 2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - if len(args) > 1 { - opts.reference = args[1] - } - return runCommit(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - flags.BoolVarP(&opts.pause, "pause", "p", true, "Pause container during commit") - flags.StringVarP(&opts.comment, "message", "m", "", "Commit message") - flags.StringVarP(&opts.author, "author", "a", "", "Author (e.g., \"John Hannibal Smith \")") - - opts.changes = dockeropts.NewListOpts(nil) - flags.VarP(&opts.changes, "change", "c", "Apply Dockerfile instruction to the created image") - - // FIXME: --run is deprecated, it will be replaced with inline Dockerfile commands. - flags.StringVar(&opts.config, "run", "", "This option is deprecated and will be removed in a future version in favor of inline Dockerfile-compatible commands") - flags.MarkDeprecated("run", "it will be replaced with inline Dockerfile commands.") - - return cmd -} - -func runCommit(dockerCli *client.DockerCli, opts *commitOptions) error { - ctx := context.Background() - - name := opts.container - reference := opts.reference - - var config *containertypes.Config - if opts.config != "" { - config = &containertypes.Config{} - if err := json.Unmarshal([]byte(opts.config), config); err != nil { - return err - } - } - - options := types.ContainerCommitOptions{ - Reference: reference, - Comment: opts.comment, - Author: opts.author, - Changes: opts.changes.GetAll(), - Pause: opts.pause, - Config: config, - } - - response, err := dockerCli.Client().ContainerCommit(ctx, name, options) - if err != nil { - return err - } - - fmt.Fprintln(dockerCli.Out(), response.ID) - return nil -} diff --git a/api/client/container/cp.go b/api/client/container/cp.go deleted file mode 100644 index a0031c8b00..0000000000 --- a/api/client/container/cp.go +++ /dev/null @@ -1,303 +0,0 @@ -package container - -import ( - "fmt" - "io" - "os" - "path/filepath" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/system" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type copyOptions struct { - source string - destination string - followLink bool -} - -type copyDirection int - -const ( - fromContainer copyDirection = (1 << iota) - toContainer - acrossContainers = fromContainer | toContainer -) - -type cpConfig struct { - followLink bool -} - -// NewCopyCommand creates a new `docker cp` command -func NewCopyCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts copyOptions - - cmd := &cobra.Command{ - Use: `cp [OPTIONS] CONTAINER:SRC_PATH DEST_PATH|- - docker cp [OPTIONS] SRC_PATH|- CONTAINER:DEST_PATH`, - Short: "Copy files/folders between a container and the local filesystem", - Long: strings.Join([]string{ - "Copy files/folders between a container and the local filesystem\n", - "\nUse '-' as the source to read a tar archive from stdin\n", - "and extract it to a directory destination in a container.\n", - "Use '-' as the destination to stream a tar archive of a\n", - "container source to stdout.", - }, ""), - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - if args[0] == "" { - return fmt.Errorf("source can not be empty") - } - if args[1] == "" { - return fmt.Errorf("destination can not be empty") - } - opts.source = args[0] - opts.destination = args[1] - return runCopy(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.followLink, "follow-link", "L", false, "Always follow symbol link in SRC_PATH") - - return cmd -} - -func runCopy(dockerCli *client.DockerCli, opts copyOptions) error { - srcContainer, srcPath := splitCpArg(opts.source) - dstContainer, dstPath := splitCpArg(opts.destination) - - var direction copyDirection - if srcContainer != "" { - direction |= fromContainer - } - if dstContainer != "" { - direction |= toContainer - } - - cpParam := &cpConfig{ - followLink: opts.followLink, - } - - ctx := context.Background() - - switch direction { - case fromContainer: - return copyFromContainer(ctx, dockerCli, srcContainer, srcPath, dstPath, cpParam) - case toContainer: - return copyToContainer(ctx, dockerCli, srcPath, dstContainer, dstPath, cpParam) - case acrossContainers: - // Copying between containers isn't supported. - return fmt.Errorf("copying between containers is not supported") - default: - // User didn't specify any container. - return fmt.Errorf("must specify at least one container source") - } -} - -func statContainerPath(ctx context.Context, dockerCli *client.DockerCli, containerName, path string) (types.ContainerPathStat, error) { - return dockerCli.Client().ContainerStatPath(ctx, containerName, path) -} - -func resolveLocalPath(localPath string) (absPath string, err error) { - if absPath, err = filepath.Abs(localPath); err != nil { - return - } - - return archive.PreserveTrailingDotOrSeparator(absPath, localPath), nil -} - -func copyFromContainer(ctx context.Context, dockerCli *client.DockerCli, srcContainer, srcPath, dstPath string, cpParam *cpConfig) (err error) { - if dstPath != "-" { - // Get an absolute destination path. - dstPath, err = resolveLocalPath(dstPath) - if err != nil { - return err - } - } - - // if client requests to follow symbol link, then must decide target file to be copied - var rebaseName string - if cpParam.followLink { - srcStat, err := statContainerPath(ctx, dockerCli, srcContainer, srcPath) - - // If the destination is a symbolic link, we should follow it. - if err == nil && srcStat.Mode&os.ModeSymlink != 0 { - linkTarget := srcStat.LinkTarget - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - srcParent, _ := archive.SplitPathDirEntry(srcPath) - linkTarget = filepath.Join(srcParent, linkTarget) - } - - linkTarget, rebaseName = archive.GetRebaseName(srcPath, linkTarget) - srcPath = linkTarget - } - - } - - content, stat, err := dockerCli.Client().CopyFromContainer(ctx, srcContainer, srcPath) - if err != nil { - return err - } - defer content.Close() - - if dstPath == "-" { - // Send the response to STDOUT. - _, err = io.Copy(os.Stdout, content) - - return err - } - - // Prepare source copy info. - srcInfo := archive.CopyInfo{ - Path: srcPath, - Exists: true, - IsDir: stat.Mode.IsDir(), - RebaseName: rebaseName, - } - - preArchive := content - if len(srcInfo.RebaseName) != 0 { - _, srcBase := archive.SplitPathDirEntry(srcInfo.Path) - preArchive = archive.RebaseArchiveEntries(content, srcBase, srcInfo.RebaseName) - } - // See comments in the implementation of `archive.CopyTo` for exactly what - // goes into deciding how and whether the source archive needs to be - // altered for the correct copy behavior. - return archive.CopyTo(preArchive, srcInfo, dstPath) -} - -func copyToContainer(ctx context.Context, dockerCli *client.DockerCli, srcPath, dstContainer, dstPath string, cpParam *cpConfig) (err error) { - if srcPath != "-" { - // Get an absolute source path. - srcPath, err = resolveLocalPath(srcPath) - if err != nil { - return err - } - } - - // In order to get the copy behavior right, we need to know information - // about both the source and destination. The API is a simple tar - // archive/extract API but we can use the stat info header about the - // destination to be more informed about exactly what the destination is. - - // Prepare destination copy info by stat-ing the container path. - dstInfo := archive.CopyInfo{Path: dstPath} - dstStat, err := statContainerPath(ctx, dockerCli, dstContainer, dstPath) - - // If the destination is a symbolic link, we should evaluate it. - if err == nil && dstStat.Mode&os.ModeSymlink != 0 { - linkTarget := dstStat.LinkTarget - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := archive.SplitPathDirEntry(dstPath) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - dstInfo.Path = linkTarget - dstStat, err = statContainerPath(ctx, dockerCli, dstContainer, linkTarget) - } - - // Ignore any error and assume that the parent directory of the destination - // path exists, in which case the copy may still succeed. If there is any - // type of conflict (e.g., non-directory overwriting an existing directory - // or vice versa) the extraction will fail. If the destination simply did - // not exist, but the parent directory does, the extraction will still - // succeed. - if err == nil { - dstInfo.Exists, dstInfo.IsDir = true, dstStat.Mode.IsDir() - } - - var ( - content io.Reader - resolvedDstPath string - ) - - if srcPath == "-" { - // Use STDIN. - content = os.Stdin - resolvedDstPath = dstInfo.Path - if !dstInfo.IsDir { - return fmt.Errorf("destination %q must be a directory", fmt.Sprintf("%s:%s", dstContainer, dstPath)) - } - } else { - // Prepare source copy info. - srcInfo, err := archive.CopyInfoSourcePath(srcPath, cpParam.followLink) - if err != nil { - return err - } - - srcArchive, err := archive.TarResource(srcInfo) - if err != nil { - return err - } - defer srcArchive.Close() - - // With the stat info about the local source as well as the - // destination, we have enough information to know whether we need to - // alter the archive that we upload so that when the server extracts - // it to the specified directory in the container we get the desired - // copy behavior. - - // See comments in the implementation of `archive.PrepareArchiveCopy` - // for exactly what goes into deciding how and whether the source - // archive needs to be altered for the correct copy behavior when it is - // extracted. This function also infers from the source and destination - // info which directory to extract to, which may be the parent of the - // destination that the user specified. - dstDir, preparedArchive, err := archive.PrepareArchiveCopy(srcArchive, srcInfo, dstInfo) - if err != nil { - return err - } - defer preparedArchive.Close() - - resolvedDstPath = dstDir - content = preparedArchive - } - - options := types.CopyToContainerOptions{ - AllowOverwriteDirWithFile: false, - } - - return dockerCli.Client().CopyToContainer(ctx, dstContainer, resolvedDstPath, content, options) -} - -// We use `:` as a delimiter between CONTAINER and PATH, but `:` could also be -// in a valid LOCALPATH, like `file:name.txt`. We can resolve this ambiguity by -// requiring a LOCALPATH with a `:` to be made explicit with a relative or -// absolute path: -// `/path/to/file:name.txt` or `./file:name.txt` -// -// This is apparently how `scp` handles this as well: -// http://www.cyberciti.biz/faq/rsync-scp-file-name-with-colon-punctuation-in-it/ -// -// We can't simply check for a filepath separator because container names may -// have a separator, e.g., "host0/cname1" if container is in a Docker cluster, -// so we have to check for a `/` or `.` prefix. Also, in the case of a Windows -// client, a `:` could be part of an absolute Windows path, in which case it -// is immediately proceeded by a backslash. -func splitCpArg(arg string) (container, path string) { - if system.IsAbs(arg) { - // Explicit local absolute path, e.g., `C:\foo` or `/foo`. - return "", arg - } - - parts := strings.SplitN(arg, ":", 2) - - if len(parts) == 1 || strings.HasPrefix(parts[0], ".") { - // Either there's no `:` in the arg - // OR it's an explicit local relative path like `./file:name.txt`. - return "", arg - } - - return parts[0], parts[1] -} diff --git a/api/client/container/create.go b/api/client/container/create.go deleted file mode 100644 index a5e90d636c..0000000000 --- a/api/client/container/create.go +++ /dev/null @@ -1,217 +0,0 @@ -package container - -import ( - "fmt" - "io" - "os" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/jsonmessage" - // FIXME migrate to docker/distribution/reference - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - runconfigopts "github.com/docker/docker/runconfig/opts" - apiclient "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -type createOptions struct { - name string -} - -// NewCreateCommand creats a new cobra.Command for `docker create` -func NewCreateCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts createOptions - var copts *runconfigopts.ContainerOptions - - cmd := &cobra.Command{ - Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", - Short: "Create a new container", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - copts.Image = args[0] - if len(args) > 1 { - copts.Args = args[1:] - } - return runCreate(dockerCli, cmd.Flags(), &opts, copts) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - flags.StringVar(&opts.name, "name", "", "Assign a name to the container") - - // Add an explicit help that doesn't have a `-h` to prevent the conflict - // with hostname - flags.Bool("help", false, "Print usage") - - client.AddTrustedFlags(flags, true) - copts = runconfigopts.AddFlags(flags) - return cmd -} - -func runCreate(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *createOptions, copts *runconfigopts.ContainerOptions) error { - config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts) - if err != nil { - reportError(dockerCli.Err(), "create", err.Error(), true) - return cli.StatusError{StatusCode: 125} - } - response, err := createContainer(context.Background(), dockerCli, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, opts.name) - if err != nil { - return err - } - fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID) - return nil -} - -func pullImage(ctx context.Context, dockerCli *client.DockerCli, image string, out io.Writer) error { - ref, err := reference.ParseNamed(image) - if err != nil { - return err - } - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return err - } - - authConfig := dockerCli.ResolveAuthConfig(ctx, repoInfo.Index) - encodedAuth, err := client.EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - - options := types.ImageCreateOptions{ - RegistryAuth: encodedAuth, - } - - responseBody, err := dockerCli.Client().ImageCreate(ctx, image, options) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesStream( - responseBody, - out, - dockerCli.OutFd(), - dockerCli.IsTerminalOut(), - nil) -} - -type cidFile struct { - path string - file *os.File - written bool -} - -func (cid *cidFile) Close() error { - cid.file.Close() - - if !cid.written { - if err := os.Remove(cid.path); err != nil { - return fmt.Errorf("failed to remove the CID file '%s': %s \n", cid.path, err) - } - } - - return nil -} - -func (cid *cidFile) Write(id string) error { - if _, err := cid.file.Write([]byte(id)); err != nil { - return fmt.Errorf("Failed to write the container ID to the file: %s", err) - } - cid.written = true - return nil -} - -func newCIDFile(path string) (*cidFile, error) { - if _, err := os.Stat(path); err == nil { - return nil, fmt.Errorf("Container ID file found, make sure the other container isn't running or delete %s", path) - } - - f, err := os.Create(path) - if err != nil { - return nil, fmt.Errorf("Failed to create the container ID file: %s", err) - } - - return &cidFile{path: path, file: f}, nil -} - -func createContainer(ctx context.Context, dockerCli *client.DockerCli, config *container.Config, hostConfig *container.HostConfig, networkingConfig *networktypes.NetworkingConfig, cidfile, name string) (*types.ContainerCreateResponse, error) { - stderr := dockerCli.Err() - - var containerIDFile *cidFile - if cidfile != "" { - var err error - if containerIDFile, err = newCIDFile(cidfile); err != nil { - return nil, err - } - defer containerIDFile.Close() - } - - var trustedRef reference.Canonical - _, ref, err := reference.ParseIDOrReference(config.Image) - if err != nil { - return nil, err - } - if ref != nil { - ref = reference.WithDefaultTag(ref) - - if ref, ok := ref.(reference.NamedTagged); ok && client.IsTrusted() { - var err error - trustedRef, err = dockerCli.TrustedReference(ctx, ref) - if err != nil { - return nil, err - } - config.Image = trustedRef.String() - } - } - - //create the container - response, err := dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name) - - //if image not found try to pull it - if err != nil { - if apiclient.IsErrImageNotFound(err) && ref != nil { - fmt.Fprintf(stderr, "Unable to find image '%s' locally\n", ref.String()) - - // we don't want to write to stdout anything apart from container.ID - if err = pullImage(ctx, dockerCli, config.Image, stderr); err != nil { - return nil, err - } - if ref, ok := ref.(reference.NamedTagged); ok && trustedRef != nil { - if err := dockerCli.TagTrusted(ctx, trustedRef, ref); err != nil { - return nil, err - } - } - // Retry - var retryErr error - response, retryErr = dockerCli.Client().ContainerCreate(ctx, config, hostConfig, networkingConfig, name) - if retryErr != nil { - return nil, retryErr - } - } else { - return nil, err - } - } - - for _, warning := range response.Warnings { - fmt.Fprintf(stderr, "WARNING: %s\n", warning) - } - if containerIDFile != nil { - if err = containerIDFile.Write(response.ID); err != nil { - return nil, err - } - } - return &response, nil -} diff --git a/api/client/container/diff.go b/api/client/container/diff.go deleted file mode 100644 index b0ae915d7c..0000000000 --- a/api/client/container/diff.go +++ /dev/null @@ -1,60 +0,0 @@ -package container - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/archive" - "github.com/spf13/cobra" -) - -type diffOptions struct { - container string -} - -// NewDiffCommand creates a new cobra.Command for `docker diff` -func NewDiffCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts diffOptions - - cmd := &cobra.Command{ - Use: "diff CONTAINER", - Short: "Inspect changes on a container's filesystem", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - return runDiff(dockerCli, &opts) - }, - } - - return cmd -} - -func runDiff(dockerCli *client.DockerCli, opts *diffOptions) error { - if opts.container == "" { - return fmt.Errorf("Container name cannot be empty") - } - ctx := context.Background() - - changes, err := dockerCli.Client().ContainerDiff(ctx, opts.container) - if err != nil { - return err - } - - for _, change := range changes { - var kind string - switch change.Kind { - case archive.ChangeModify: - kind = "C" - case archive.ChangeAdd: - kind = "A" - case archive.ChangeDelete: - kind = "D" - } - fmt.Fprintf(dockerCli.Out(), "%s %s\n", kind, change.Path) - } - - return nil -} diff --git a/api/client/container/export.go b/api/client/container/export.go deleted file mode 100644 index 8dbea9f7b0..0000000000 --- a/api/client/container/export.go +++ /dev/null @@ -1,59 +0,0 @@ -package container - -import ( - "errors" - "io" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type exportOptions struct { - container string - output string -} - -// NewExportCommand creates a new `docker export` command -func NewExportCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts exportOptions - - cmd := &cobra.Command{ - Use: "export [OPTIONS] CONTAINER", - Short: "Export a container's filesystem as a tar archive", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - return runExport(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") - - return cmd -} - -func runExport(dockerCli *client.DockerCli, opts exportOptions) error { - if opts.output == "" && dockerCli.IsTerminalOut() { - return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") - } - - clnt := dockerCli.Client() - - responseBody, err := clnt.ContainerExport(context.Background(), opts.container) - if err != nil { - return err - } - defer responseBody.Close() - - if opts.output == "" { - _, err := io.Copy(dockerCli.Out(), responseBody) - return err - } - - return client.CopyToFile(opts.output, responseBody) -} diff --git a/api/client/container/kill.go b/api/client/container/kill.go deleted file mode 100644 index 78c0870c31..0000000000 --- a/api/client/container/kill.go +++ /dev/null @@ -1,53 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type killOptions struct { - signal string - - containers []string -} - -// NewKillCommand creats a new cobra.Command for `docker kill` -func NewKillCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts killOptions - - cmd := &cobra.Command{ - Use: "kill [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Kill one or more running containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runKill(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.signal, "signal", "s", "KILL", "Signal to send to the container") - return cmd -} - -func runKill(dockerCli *client.DockerCli, opts *killOptions) error { - var errs []string - ctx := context.Background() - for _, name := range opts.containers { - if err := dockerCli.Client().ContainerKill(ctx, name, opts.signal); err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", name) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/container/logs.go b/api/client/container/logs.go deleted file mode 100644 index d0c7c03d2f..0000000000 --- a/api/client/container/logs.go +++ /dev/null @@ -1,87 +0,0 @@ -package container - -import ( - "fmt" - "io" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -var validDrivers = map[string]bool{ - "json-file": true, - "journald": true, -} - -type logsOptions struct { - follow bool - since string - timestamps bool - details bool - tail string - - container string -} - -// NewLogsCommand creats a new cobra.Command for `docker logs` -func NewLogsCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts logsOptions - - cmd := &cobra.Command{ - Use: "logs [OPTIONS] CONTAINER", - Short: "Fetch the logs of a container", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - return runLogs(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.follow, "follow", "f", false, "Follow log output") - flags.StringVar(&opts.since, "since", "", "Show logs since timestamp") - flags.BoolVarP(&opts.timestamps, "timestamps", "t", false, "Show timestamps") - flags.BoolVar(&opts.details, "details", false, "Show extra details provided to logs") - flags.StringVar(&opts.tail, "tail", "all", "Number of lines to show from the end of the logs") - return cmd -} - -func runLogs(dockerCli *client.DockerCli, opts *logsOptions) error { - ctx := context.Background() - - c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) - if err != nil { - return err - } - - if !validDrivers[c.HostConfig.LogConfig.Type] { - return fmt.Errorf("\"logs\" command is supported only for \"json-file\" and \"journald\" logging drivers (got: %s)", c.HostConfig.LogConfig.Type) - } - - options := types.ContainerLogsOptions{ - ShowStdout: true, - ShowStderr: true, - Since: opts.since, - Timestamps: opts.timestamps, - Follow: opts.follow, - Tail: opts.tail, - Details: opts.details, - } - responseBody, err := dockerCli.Client().ContainerLogs(ctx, opts.container, options) - if err != nil { - return err - } - defer responseBody.Close() - - if c.Config.Tty { - _, err = io.Copy(dockerCli.Out(), responseBody) - } else { - _, err = stdcopy.StdCopy(dockerCli.Out(), dockerCli.Err(), responseBody) - } - return err -} diff --git a/api/client/container/pause.go b/api/client/container/pause.go deleted file mode 100644 index fe170455c6..0000000000 --- a/api/client/container/pause.go +++ /dev/null @@ -1,50 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type pauseOptions struct { - containers []string -} - -// NewPauseCommand creats a new cobra.Command for `docker pause` -func NewPauseCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts pauseOptions - - cmd := &cobra.Command{ - Use: "pause CONTAINER [CONTAINER...]", - Short: "Pause all processes within one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runPause(dockerCli, &opts) - }, - } - - return cmd -} - -func runPause(dockerCli *client.DockerCli, opts *pauseOptions) error { - ctx := context.Background() - - var errs []string - for _, container := range opts.containers { - if err := dockerCli.Client().ContainerPause(ctx, container); err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", container) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/container/port.go b/api/client/container/port.go deleted file mode 100644 index 1a12528d36..0000000000 --- a/api/client/container/port.go +++ /dev/null @@ -1,79 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/go-connections/nat" - "github.com/spf13/cobra" -) - -type portOptions struct { - container string - - port string -} - -// NewPortCommand creats a new cobra.Command for `docker port` -func NewPortCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts portOptions - - cmd := &cobra.Command{ - Use: "port CONTAINER [PRIVATE_PORT[/PROTO]]", - Short: "List port mappings or a specific mapping for the container", - Args: cli.RequiresRangeArgs(1, 2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - if len(args) > 1 { - opts.port = args[1] - } - return runPort(dockerCli, &opts) - }, - } - - return cmd -} - -func runPort(dockerCli *client.DockerCli, opts *portOptions) error { - ctx := context.Background() - - c, err := dockerCli.Client().ContainerInspect(ctx, opts.container) - if err != nil { - return err - } - - if opts.port != "" { - port := opts.port - proto := "tcp" - parts := strings.SplitN(port, "/", 2) - - if len(parts) == 2 && len(parts[1]) != 0 { - port = parts[0] - proto = parts[1] - } - natPort := port + "/" + proto - newP, err := nat.NewPort(proto, port) - if err != nil { - return err - } - if frontends, exists := c.NetworkSettings.Ports[newP]; exists && frontends != nil { - for _, frontend := range frontends { - fmt.Fprintf(dockerCli.Out(), "%s:%s\n", frontend.HostIP, frontend.HostPort) - } - return nil - } - return fmt.Errorf("Error: No public port '%s' published for %s", natPort, opts.container) - } - - for from, frontends := range c.NetworkSettings.Ports { - for _, frontend := range frontends { - fmt.Fprintf(dockerCli.Out(), "%s -> %s:%s\n", from, frontend.HostIP, frontend.HostPort) - } - } - - return nil -} diff --git a/api/client/container/ps.go b/api/client/container/ps.go deleted file mode 100644 index e4624b17d8..0000000000 --- a/api/client/container/ps.go +++ /dev/null @@ -1,125 +0,0 @@ -package container - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/formatter" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - - "github.com/docker/docker/utils/templates" - "github.com/spf13/cobra" - "io/ioutil" -) - -type psOptions struct { - quiet bool - size bool - all bool - noTrunc bool - nLatest bool - last int - format string - filter []string -} - -type preProcessor struct { - opts *types.ContainerListOptions -} - -// Size sets the size option when called by a template execution. -func (p *preProcessor) Size() bool { - p.opts.Size = true - return true -} - -// NewPsCommand creates a new cobra.Command for `docker ps` -func NewPsCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts psOptions - - cmd := &cobra.Command{ - Use: "ps [OPTIONS]", - Short: "List containers", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runPs(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display numeric IDs") - flags.BoolVarP(&opts.size, "size", "s", false, "Display total file sizes") - flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - flags.BoolVarP(&opts.nLatest, "latest", "l", false, "Show the latest created container (includes all states)") - flags.IntVarP(&opts.last, "last", "n", -1, "Show n last created containers (includes all states)") - flags.StringVarP(&opts.format, "format", "", "", "Pretty-print containers using a Go template") - flags.StringSliceVarP(&opts.filter, "filter", "f", []string{}, "Filter output based on conditions provided") - - return cmd -} - -func runPs(dockerCli *client.DockerCli, opts *psOptions) error { - ctx := context.Background() - - if opts.nLatest && opts.last == -1 { - opts.last = 1 - } - - containerFilterArgs := filters.NewArgs() - for _, f := range opts.filter { - var err error - containerFilterArgs, err = filters.ParseFlag(f, containerFilterArgs) - if err != nil { - return err - } - } - - options := types.ContainerListOptions{ - All: opts.all, - Limit: opts.last, - Size: opts.size, - Filter: containerFilterArgs, - } - - pre := &preProcessor{opts: &options} - tmpl, err := templates.Parse(opts.format) - - if err != nil { - return err - } - - _ = tmpl.Execute(ioutil.Discard, pre) - - containers, err := dockerCli.Client().ContainerList(ctx, options) - if err != nil { - return err - } - - f := opts.format - if len(f) == 0 { - if len(dockerCli.PsFormat()) > 0 && !opts.quiet { - f = dockerCli.PsFormat() - } else { - f = "table" - } - } - - psCtx := formatter.ContainerContext{ - Context: formatter.Context{ - Output: dockerCli.Out(), - Format: f, - Quiet: opts.quiet, - Trunc: !opts.noTrunc, - }, - Size: opts.size, - Containers: containers, - } - - psCtx.Write() - - return nil -} diff --git a/api/client/container/rename.go b/api/client/container/rename.go deleted file mode 100644 index dcb04469e1..0000000000 --- a/api/client/container/rename.go +++ /dev/null @@ -1,52 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type renameOptions struct { - oldName string - newName string -} - -// NewRenameCommand creats a new cobra.Command for `docker rename` -func NewRenameCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts renameOptions - - cmd := &cobra.Command{ - Use: "rename CONTAINER NEW_NAME", - Short: "Rename a container", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.oldName = args[0] - opts.newName = args[1] - return runRename(dockerCli, &opts) - }, - } - - return cmd -} - -func runRename(dockerCli *client.DockerCli, opts *renameOptions) error { - ctx := context.Background() - - oldName := strings.TrimSpace(opts.oldName) - newName := strings.TrimSpace(opts.newName) - - if oldName == "" || newName == "" { - return fmt.Errorf("Error: Neither old nor new names may be empty") - } - - if err := dockerCli.Client().ContainerRename(ctx, oldName, newName); err != nil { - fmt.Fprintf(dockerCli.Err(), "%s\n", err) - return fmt.Errorf("Error: failed to rename container named %s", oldName) - } - return nil -} diff --git a/api/client/container/restart.go b/api/client/container/restart.go deleted file mode 100644 index 4e80e1250a..0000000000 --- a/api/client/container/restart.go +++ /dev/null @@ -1,55 +0,0 @@ -package container - -import ( - "fmt" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type restartOptions struct { - nSeconds int - - containers []string -} - -// NewRestartCommand creates a new cobra.Command for `docker restart` -func NewRestartCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts restartOptions - - cmd := &cobra.Command{ - Use: "restart [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Restart a container", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runRestart(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.IntVarP(&opts.nSeconds, "time", "t", 10, "Seconds to wait for stop before killing the container") - return cmd -} - -func runRestart(dockerCli *client.DockerCli, opts *restartOptions) error { - ctx := context.Background() - var errs []string - for _, name := range opts.containers { - timeout := time.Duration(opts.nSeconds) * time.Second - if err := dockerCli.Client().ContainerRestart(ctx, name, &timeout); err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", name) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/container/rm.go b/api/client/container/rm.go deleted file mode 100644 index 2ccc6c21ff..0000000000 --- a/api/client/container/rm.go +++ /dev/null @@ -1,76 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type rmOptions struct { - rmVolumes bool - rmLink bool - force bool - - containers []string -} - -// NewRmCommand creates a new cobra.Command for `docker rm` -func NewRmCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts rmOptions - - cmd := &cobra.Command{ - Use: "rm [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Remove one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runRm(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.rmVolumes, "volumes", "v", false, "Remove the volumes associated with the container") - flags.BoolVarP(&opts.rmLink, "link", "l", false, "Remove the specified link") - flags.BoolVarP(&opts.force, "force", "f", false, "Force the removal of a running container (uses SIGKILL)") - return cmd -} - -func runRm(dockerCli *client.DockerCli, opts *rmOptions) error { - ctx := context.Background() - - var errs []string - for _, name := range opts.containers { - if name == "" { - return fmt.Errorf("Container name cannot be empty") - } - name = strings.Trim(name, "/") - - if err := removeContainer(dockerCli, ctx, name, opts.rmVolumes, opts.rmLink, opts.force); err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", name) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} - -func removeContainer(dockerCli *client.DockerCli, ctx context.Context, container string, removeVolumes, removeLinks, force bool) error { - options := types.ContainerRemoveOptions{ - RemoveVolumes: removeVolumes, - RemoveLinks: removeLinks, - Force: force, - } - if err := dockerCli.Client().ContainerRemove(ctx, container, options); err != nil { - return err - } - return nil -} diff --git a/api/client/container/run.go b/api/client/container/run.go deleted file mode 100644 index fe87ccb51c..0000000000 --- a/api/client/container/run.go +++ /dev/null @@ -1,326 +0,0 @@ -package container - -import ( - "fmt" - "io" - "net/http/httputil" - "os" - "runtime" - "strings" - "syscall" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - opttypes "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/signal" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/docker/libnetwork/resolvconf/dns" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -const ( - errCmdNotFound = "not found or does not exist" - errCmdCouldNotBeInvoked = "could not be invoked" -) - -type runOptions struct { - autoRemove bool - detach bool - sigProxy bool - name string - detachKeys string -} - -// NewRunCommand create a new `docker run` command -func NewRunCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts runOptions - var copts *runconfigopts.ContainerOptions - - cmd := &cobra.Command{ - Use: "run [OPTIONS] IMAGE [COMMAND] [ARG...]", - Short: "Run a command in a new container", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - copts.Image = args[0] - if len(args) > 1 { - copts.Args = args[1:] - } - return runRun(dockerCli, cmd.Flags(), &opts, copts) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - // These are flags not stored in Config/HostConfig - flags.BoolVar(&opts.autoRemove, "rm", false, "Automatically remove the container when it exits") - flags.BoolVarP(&opts.detach, "detach", "d", false, "Run container in background and print container ID") - flags.BoolVar(&opts.sigProxy, "sig-proxy", true, "Proxy received signals to the process") - flags.StringVar(&opts.name, "name", "", "Assign a name to the container") - flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") - - // Add an explicit help that doesn't have a `-h` to prevent the conflict - // with hostname - flags.Bool("help", false, "Print usage") - - client.AddTrustedFlags(flags, true) - copts = runconfigopts.AddFlags(flags) - return cmd -} - -func runRun(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts *runOptions, copts *runconfigopts.ContainerOptions) error { - stdout, stderr, stdin := dockerCli.Out(), dockerCli.Err(), dockerCli.In() - client := dockerCli.Client() - // TODO: pass this as an argument - cmdPath := "run" - - var ( - flAttach *opttypes.ListOpts - ErrConflictAttachDetach = fmt.Errorf("Conflicting options: -a and -d") - ErrConflictRestartPolicyAndAutoRemove = fmt.Errorf("Conflicting options: --restart and --rm") - ErrConflictDetachAutoRemove = fmt.Errorf("Conflicting options: --rm and -d") - ) - - config, hostConfig, networkingConfig, err := runconfigopts.Parse(flags, copts) - - // just in case the Parse does not exit - if err != nil { - reportError(stderr, cmdPath, err.Error(), true) - return cli.StatusError{StatusCode: 125} - } - - if hostConfig.OomKillDisable != nil && *hostConfig.OomKillDisable && hostConfig.Memory == 0 { - fmt.Fprintf(stderr, "WARNING: Disabling the OOM killer on containers without setting a '-m/--memory' limit may be dangerous.\n") - } - - if len(hostConfig.DNS) > 0 { - // check the DNS settings passed via --dns against - // localhost regexp to warn if they are trying to - // set a DNS to a localhost address - for _, dnsIP := range hostConfig.DNS { - if dns.IsLocalhost(dnsIP) { - fmt.Fprintf(stderr, "WARNING: Localhost DNS setting (--dns=%s) may fail in containers.\n", dnsIP) - break - } - } - } - - config.ArgsEscaped = false - - if !opts.detach { - if err := dockerCli.CheckTtyInput(config.AttachStdin, config.Tty); err != nil { - return err - } - } else { - if fl := flags.Lookup("attach"); fl != nil { - flAttach = fl.Value.(*opttypes.ListOpts) - if flAttach.Len() != 0 { - return ErrConflictAttachDetach - } - } - if opts.autoRemove { - return ErrConflictDetachAutoRemove - } - - config.AttachStdin = false - config.AttachStdout = false - config.AttachStderr = false - config.StdinOnce = false - } - - // Disable sigProxy when in TTY mode - if config.Tty { - opts.sigProxy = false - } - - // Telling the Windows daemon the initial size of the tty during start makes - // a far better user experience rather than relying on subsequent resizes - // to cause things to catch up. - if runtime.GOOS == "windows" { - hostConfig.ConsoleSize[0], hostConfig.ConsoleSize[1] = dockerCli.GetTtySize() - } - - ctx, cancelFun := context.WithCancel(context.Background()) - - createResponse, err := createContainer(ctx, dockerCli, config, hostConfig, networkingConfig, hostConfig.ContainerIDFile, opts.name) - if err != nil { - reportError(stderr, cmdPath, err.Error(), true) - return runStartContainerErr(err) - } - if opts.sigProxy { - sigc := dockerCli.ForwardAllSignals(ctx, createResponse.ID) - defer signal.StopCatch(sigc) - } - var ( - waitDisplayID chan struct{} - errCh chan error - ) - if !config.AttachStdout && !config.AttachStderr { - // Make this asynchronous to allow the client to write to stdin before having to read the ID - waitDisplayID = make(chan struct{}) - go func() { - defer close(waitDisplayID) - fmt.Fprintf(stdout, "%s\n", createResponse.ID) - }() - } - if opts.autoRemove && (hostConfig.RestartPolicy.IsAlways() || hostConfig.RestartPolicy.IsOnFailure()) { - return ErrConflictRestartPolicyAndAutoRemove - } - attach := config.AttachStdin || config.AttachStdout || config.AttachStderr - if attach { - var ( - out, cerr io.Writer - in io.ReadCloser - ) - if config.AttachStdin { - in = stdin - } - if config.AttachStdout { - out = stdout - } - if config.AttachStderr { - if config.Tty { - cerr = stdout - } else { - cerr = stderr - } - } - - if opts.detachKeys != "" { - dockerCli.ConfigFile().DetachKeys = opts.detachKeys - } - - options := types.ContainerAttachOptions{ - Stream: true, - Stdin: config.AttachStdin, - Stdout: config.AttachStdout, - Stderr: config.AttachStderr, - DetachKeys: dockerCli.ConfigFile().DetachKeys, - } - - resp, errAttach := client.ContainerAttach(ctx, createResponse.ID, options) - if errAttach != nil && errAttach != httputil.ErrPersistEOF { - // ContainerAttach returns an ErrPersistEOF (connection closed) - // means server met an error and put it in Hijacked connection - // keep the error and read detailed error message from hijacked connection later - return errAttach - } - defer resp.Close() - - errCh = promise.Go(func() error { - errHijack := dockerCli.HoldHijackedConnection(ctx, config.Tty, in, out, cerr, resp) - if errHijack == nil { - return errAttach - } - return errHijack - }) - } - - if opts.autoRemove { - defer func() { - // Explicitly not sharing the context as it could be "Done" (by calling cancelFun) - // and thus the container would not be removed. - if err := removeContainer(dockerCli, context.Background(), createResponse.ID, true, false, true); err != nil { - fmt.Fprintf(stderr, "%v\n", err) - } - }() - } - - //start the container - if err := client.ContainerStart(ctx, createResponse.ID, types.ContainerStartOptions{}); err != nil { - // If we have holdHijackedConnection, we should notify - // holdHijackedConnection we are going to exit and wait - // to avoid the terminal are not restored. - if attach { - cancelFun() - <-errCh - } - - reportError(stderr, cmdPath, err.Error(), false) - return runStartContainerErr(err) - } - - if (config.AttachStdin || config.AttachStdout || config.AttachStderr) && config.Tty && dockerCli.IsTerminalOut() { - if err := dockerCli.MonitorTtySize(ctx, createResponse.ID, false); err != nil { - fmt.Fprintf(stderr, "Error monitoring TTY size: %s\n", err) - } - } - - if errCh != nil { - if err := <-errCh; err != nil { - logrus.Debugf("Error hijack: %s", err) - return err - } - } - - // Detached mode: wait for the id to be displayed and return. - if !config.AttachStdout && !config.AttachStderr { - // Detached mode - <-waitDisplayID - return nil - } - - var status int - - // Attached mode - if opts.autoRemove { - // Autoremove: wait for the container to finish, retrieve - // the exit code and remove the container - if status, err = client.ContainerWait(ctx, createResponse.ID); err != nil { - return runStartContainerErr(err) - } - if _, status, err = getExitCode(dockerCli, ctx, createResponse.ID); err != nil { - return err - } - } else { - // No Autoremove: Simply retrieve the exit code - if !config.Tty && hostConfig.RestartPolicy.IsNone() { - // In non-TTY mode, we can't detach, so we must wait for container exit - if status, err = client.ContainerWait(ctx, createResponse.ID); err != nil { - return err - } - } else { - // In TTY mode, there is a race: if the process dies too slowly, the state could - // be updated after the getExitCode call and result in the wrong exit code being reported - if _, status, err = getExitCode(dockerCli, ctx, createResponse.ID); err != nil { - return err - } - } - } - if status != 0 { - return cli.StatusError{StatusCode: status} - } - return nil -} - -// reportError is a utility method that prints a user-friendly message -// containing the error that occurred during parsing and a suggestion to get help -func reportError(stderr io.Writer, name string, str string, withHelp bool) { - if withHelp { - str += ".\nSee '" + os.Args[0] + " " + name + " --help'" - } - fmt.Fprintf(stderr, "%s: %s.\n", os.Args[0], str) -} - -// if container start fails with 'not found'/'no such' error, return 127 -// if container start fails with 'permission denied' error, return 126 -// return 125 for generic docker daemon failures -func runStartContainerErr(err error) error { - trimmedErr := strings.TrimPrefix(err.Error(), "Error response from daemon: ") - statusError := cli.StatusError{StatusCode: 125} - if strings.Contains(trimmedErr, "executable file not found") || - strings.Contains(trimmedErr, "no such file or directory") || - strings.Contains(trimmedErr, "system cannot find the file specified") { - statusError = cli.StatusError{StatusCode: 127} - } else if strings.Contains(trimmedErr, syscall.EACCES.Error()) { - statusError = cli.StatusError{StatusCode: 126} - } - - return statusError -} diff --git a/api/client/container/start.go b/api/client/container/start.go deleted file mode 100644 index 97f4a7a631..0000000000 --- a/api/client/container/start.go +++ /dev/null @@ -1,152 +0,0 @@ -package container - -import ( - "fmt" - "io" - "net/http/httputil" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/signal" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type startOptions struct { - attach bool - openStdin bool - detachKeys string - - containers []string -} - -// NewStartCommand creats a new cobra.Command for `docker start` -func NewStartCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts startOptions - - cmd := &cobra.Command{ - Use: "start [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Start one or more stopped containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runStart(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.attach, "attach", "a", false, "Attach STDOUT/STDERR and forward signals") - flags.BoolVarP(&opts.openStdin, "interactive", "i", false, "Attach container's STDIN") - flags.StringVar(&opts.detachKeys, "detach-keys", "", "Override the key sequence for detaching a container") - return cmd -} - -func runStart(dockerCli *client.DockerCli, opts *startOptions) error { - ctx, cancelFun := context.WithCancel(context.Background()) - - if opts.attach || opts.openStdin { - // We're going to attach to a container. - // 1. Ensure we only have one container. - if len(opts.containers) > 1 { - return fmt.Errorf("You cannot start and attach multiple containers at once.") - } - - // 2. Attach to the container. - container := opts.containers[0] - c, err := dockerCli.Client().ContainerInspect(ctx, container) - if err != nil { - return err - } - - // We always use c.ID instead of container to maintain consistency during `docker start` - if !c.Config.Tty { - sigc := dockerCli.ForwardAllSignals(ctx, c.ID) - defer signal.StopCatch(sigc) - } - - if opts.detachKeys != "" { - dockerCli.ConfigFile().DetachKeys = opts.detachKeys - } - - options := types.ContainerAttachOptions{ - Stream: true, - Stdin: opts.openStdin && c.Config.OpenStdin, - Stdout: true, - Stderr: true, - DetachKeys: dockerCli.ConfigFile().DetachKeys, - } - - var in io.ReadCloser - - if options.Stdin { - in = dockerCli.In() - } - - resp, errAttach := dockerCli.Client().ContainerAttach(ctx, c.ID, options) - if errAttach != nil && errAttach != httputil.ErrPersistEOF { - // ContainerAttach return an ErrPersistEOF (connection closed) - // means server met an error and put it in Hijacked connection - // keep the error and read detailed error message from hijacked connection - return errAttach - } - defer resp.Close() - cErr := promise.Go(func() error { - errHijack := dockerCli.HoldHijackedConnection(ctx, c.Config.Tty, in, dockerCli.Out(), dockerCli.Err(), resp) - if errHijack == nil { - return errAttach - } - return errHijack - }) - - // 3. Start the container. - if err := dockerCli.Client().ContainerStart(ctx, c.ID, types.ContainerStartOptions{}); err != nil { - cancelFun() - <-cErr - return err - } - - // 4. Wait for attachment to break. - if c.Config.Tty && dockerCli.IsTerminalOut() { - if err := dockerCli.MonitorTtySize(ctx, c.ID, false); err != nil { - fmt.Fprintf(dockerCli.Err(), "Error monitoring TTY size: %s\n", err) - } - } - if attchErr := <-cErr; attchErr != nil { - return attchErr - } - _, status, err := getExitCode(dockerCli, ctx, c.ID) - if err != nil { - return err - } - if status != 0 { - return cli.StatusError{StatusCode: status} - } - } else { - // We're not going to attach to anything. - // Start as many containers as we want. - return startContainersWithoutAttachments(dockerCli, ctx, opts.containers) - } - - return nil -} - -func startContainersWithoutAttachments(dockerCli *client.DockerCli, ctx context.Context, containers []string) error { - var failedContainers []string - for _, container := range containers { - if err := dockerCli.Client().ContainerStart(ctx, container, types.ContainerStartOptions{}); err != nil { - fmt.Fprintf(dockerCli.Err(), "%s\n", err) - failedContainers = append(failedContainers, container) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", container) - } - } - - if len(failedContainers) > 0 { - return fmt.Errorf("Error: failed to start containers: %v", strings.Join(failedContainers, ", ")) - } - return nil -} diff --git a/api/client/container/stats.go b/api/client/container/stats.go deleted file mode 100644 index b2a45a984c..0000000000 --- a/api/client/container/stats.go +++ /dev/null @@ -1,233 +0,0 @@ -package container - -import ( - "fmt" - "io" - "strings" - "sync" - "text/tabwriter" - "time" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/system" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" - "github.com/spf13/cobra" -) - -type statsOptions struct { - all bool - noStream bool - - containers []string -} - -// NewStatsCommand creats a new cobra.Command for `docker stats` -func NewStatsCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts statsOptions - - cmd := &cobra.Command{ - Use: "stats [OPTIONS] [CONTAINER...]", - Short: "Display a live stream of container(s) resource usage statistics", - Args: cli.RequiresMinArgs(0), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runStats(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.all, "all", "a", false, "Show all containers (default shows just running)") - flags.BoolVar(&opts.noStream, "no-stream", false, "Disable streaming stats and only pull the first result") - return cmd -} - -// runStats displays a live stream of resource usage statistics for one or more containers. -// This shows real-time information on CPU usage, memory usage, and network I/O. -func runStats(dockerCli *client.DockerCli, opts *statsOptions) error { - showAll := len(opts.containers) == 0 - closeChan := make(chan error) - - ctx := context.Background() - - // monitorContainerEvents watches for container creation and removal (only - // used when calling `docker stats` without arguments). - monitorContainerEvents := func(started chan<- struct{}, c chan events.Message) { - f := filters.NewArgs() - f.Add("type", "container") - options := types.EventsOptions{ - Filters: f, - } - resBody, err := dockerCli.Client().Events(ctx, options) - // Whether we successfully subscribed to events or not, we can now - // unblock the main goroutine. - close(started) - if err != nil { - closeChan <- err - return - } - defer resBody.Close() - - system.DecodeEvents(resBody, func(event events.Message, err error) error { - if err != nil { - closeChan <- err - return nil - } - c <- event - return nil - }) - } - - // waitFirst is a WaitGroup to wait first stat data's reach for each container - waitFirst := &sync.WaitGroup{} - - cStats := stats{} - // getContainerList simulates creation event for all previously existing - // containers (only used when calling `docker stats` without arguments). - getContainerList := func() { - options := types.ContainerListOptions{ - All: opts.all, - } - cs, err := dockerCli.Client().ContainerList(ctx, options) - if err != nil { - closeChan <- err - } - for _, container := range cs { - s := &containerStats{Name: container.ID[:12]} - if cStats.add(s) { - waitFirst.Add(1) - go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst) - } - } - } - - if showAll { - // If no names were specified, start a long running goroutine which - // monitors container events. We make sure we're subscribed before - // retrieving the list of running containers to avoid a race where we - // would "miss" a creation. - started := make(chan struct{}) - eh := system.InitEventHandler() - eh.Handle("create", func(e events.Message) { - if opts.all { - s := &containerStats{Name: e.ID[:12]} - if cStats.add(s) { - waitFirst.Add(1) - go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst) - } - } - }) - - eh.Handle("start", func(e events.Message) { - s := &containerStats{Name: e.ID[:12]} - if cStats.add(s) { - waitFirst.Add(1) - go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst) - } - }) - - eh.Handle("die", func(e events.Message) { - if !opts.all { - cStats.remove(e.ID[:12]) - } - }) - - eventChan := make(chan events.Message) - go eh.Watch(eventChan) - go monitorContainerEvents(started, eventChan) - defer close(eventChan) - <-started - - // Start a short-lived goroutine to retrieve the initial list of - // containers. - getContainerList() - } else { - // Artificially send creation events for the containers we were asked to - // monitor (same code path than we use when monitoring all containers). - for _, name := range opts.containers { - s := &containerStats{Name: name} - if cStats.add(s) { - waitFirst.Add(1) - go s.Collect(ctx, dockerCli.Client(), !opts.noStream, waitFirst) - } - } - - // We don't expect any asynchronous errors: closeChan can be closed. - close(closeChan) - - // Do a quick pause to detect any error with the provided list of - // container names. - time.Sleep(1500 * time.Millisecond) - var errs []string - cStats.mu.Lock() - for _, c := range cStats.cs { - c.mu.Lock() - if c.err != nil { - errs = append(errs, fmt.Sprintf("%s: %v", c.Name, c.err)) - } - c.mu.Unlock() - } - cStats.mu.Unlock() - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, ", ")) - } - } - - // before print to screen, make sure each container get at least one valid stat data - waitFirst.Wait() - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - printHeader := func() { - if !opts.noStream { - fmt.Fprint(dockerCli.Out(), "\033[2J") - fmt.Fprint(dockerCli.Out(), "\033[H") - } - io.WriteString(w, "CONTAINER\tCPU %\tMEM USAGE / LIMIT\tMEM %\tNET I/O\tBLOCK I/O\tPIDS\n") - } - - for range time.Tick(500 * time.Millisecond) { - printHeader() - toRemove := []string{} - cStats.mu.Lock() - for _, s := range cStats.cs { - if err := s.Display(w); err != nil && !opts.noStream { - logrus.Debugf("stats: got error for %s: %v", s.Name, err) - if err == io.EOF { - toRemove = append(toRemove, s.Name) - } - } - } - cStats.mu.Unlock() - for _, name := range toRemove { - cStats.remove(name) - } - if len(cStats.cs) == 0 && !showAll { - return nil - } - w.Flush() - if opts.noStream { - break - } - select { - case err, ok := <-closeChan: - if ok { - if err != nil { - // this is suppressing "unexpected EOF" in the cli when the - // daemon restarts so it shutdowns cleanly - if err == io.ErrUnexpectedEOF { - return nil - } - return err - } - } - default: - // just skip - } - } - return nil -} diff --git a/api/client/container/stats_helpers.go b/api/client/container/stats_helpers.go deleted file mode 100644 index 336ad14760..0000000000 --- a/api/client/container/stats_helpers.go +++ /dev/null @@ -1,238 +0,0 @@ -package container - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types" - "github.com/docker/go-units" - "golang.org/x/net/context" -) - -type containerStats struct { - Name string - CPUPercentage float64 - Memory float64 - MemoryLimit float64 - MemoryPercentage float64 - NetworkRx float64 - NetworkTx float64 - BlockRead float64 - BlockWrite float64 - PidsCurrent uint64 - mu sync.Mutex - err error -} - -type stats struct { - mu sync.Mutex - cs []*containerStats -} - -func (s *stats) add(cs *containerStats) bool { - s.mu.Lock() - defer s.mu.Unlock() - if _, exists := s.isKnownContainer(cs.Name); !exists { - s.cs = append(s.cs, cs) - return true - } - return false -} - -func (s *stats) remove(id string) { - s.mu.Lock() - if i, exists := s.isKnownContainer(id); exists { - s.cs = append(s.cs[:i], s.cs[i+1:]...) - } - s.mu.Unlock() -} - -func (s *stats) isKnownContainer(cid string) (int, bool) { - for i, c := range s.cs { - if c.Name == cid { - return i, true - } - } - return -1, false -} - -func (s *containerStats) Collect(ctx context.Context, cli client.APIClient, streamStats bool, waitFirst *sync.WaitGroup) { - logrus.Debugf("collecting stats for %s", s.Name) - var ( - getFirst bool - previousCPU uint64 - previousSystem uint64 - u = make(chan error, 1) - ) - - defer func() { - // if error happens and we get nothing of stats, release wait group whatever - if !getFirst { - getFirst = true - waitFirst.Done() - } - }() - - responseBody, err := cli.ContainerStats(ctx, s.Name, streamStats) - if err != nil { - s.mu.Lock() - s.err = err - s.mu.Unlock() - return - } - defer responseBody.Close() - - dec := json.NewDecoder(responseBody) - go func() { - for { - var v *types.StatsJSON - - if err := dec.Decode(&v); err != nil { - dec = json.NewDecoder(io.MultiReader(dec.Buffered(), responseBody)) - u <- err - if err == io.EOF { - break - } - time.Sleep(100 * time.Millisecond) - continue - } - - var memPercent = 0.0 - var cpuPercent = 0.0 - - // MemoryStats.Limit will never be 0 unless the container is not running and we haven't - // got any data from cgroup - if v.MemoryStats.Limit != 0 { - memPercent = float64(v.MemoryStats.Usage) / float64(v.MemoryStats.Limit) * 100.0 - } - - previousCPU = v.PreCPUStats.CPUUsage.TotalUsage - previousSystem = v.PreCPUStats.SystemUsage - cpuPercent = calculateCPUPercent(previousCPU, previousSystem, v) - blkRead, blkWrite := calculateBlockIO(v.BlkioStats) - s.mu.Lock() - s.CPUPercentage = cpuPercent - s.Memory = float64(v.MemoryStats.Usage) - s.MemoryLimit = float64(v.MemoryStats.Limit) - s.MemoryPercentage = memPercent - s.NetworkRx, s.NetworkTx = calculateNetwork(v.Networks) - s.BlockRead = float64(blkRead) - s.BlockWrite = float64(blkWrite) - s.PidsCurrent = v.PidsStats.Current - s.mu.Unlock() - u <- nil - if !streamStats { - return - } - } - }() - for { - select { - case <-time.After(2 * time.Second): - // zero out the values if we have not received an update within - // the specified duration. - s.mu.Lock() - s.CPUPercentage = 0 - s.Memory = 0 - s.MemoryPercentage = 0 - s.MemoryLimit = 0 - s.NetworkRx = 0 - s.NetworkTx = 0 - s.BlockRead = 0 - s.BlockWrite = 0 - s.PidsCurrent = 0 - s.err = errors.New("timeout waiting for stats") - s.mu.Unlock() - // if this is the first stat you get, release WaitGroup - if !getFirst { - getFirst = true - waitFirst.Done() - } - case err := <-u: - if err != nil { - s.mu.Lock() - s.err = err - s.mu.Unlock() - continue - } - s.err = nil - // if this is the first stat you get, release WaitGroup - if !getFirst { - getFirst = true - waitFirst.Done() - } - } - if !streamStats { - return - } - } -} - -func (s *containerStats) Display(w io.Writer) error { - s.mu.Lock() - defer s.mu.Unlock() - // NOTE: if you change this format, you must also change the err format below! - format := "%s\t%.2f%%\t%s / %s\t%.2f%%\t%s / %s\t%s / %s\t%d\n" - if s.err != nil { - format = "%s\t%s\t%s / %s\t%s\t%s / %s\t%s / %s\t%s\n" - errStr := "--" - fmt.Fprintf(w, format, - s.Name, errStr, errStr, errStr, errStr, errStr, errStr, errStr, errStr, errStr, - ) - err := s.err - return err - } - fmt.Fprintf(w, format, - s.Name, - s.CPUPercentage, - units.BytesSize(s.Memory), units.BytesSize(s.MemoryLimit), - s.MemoryPercentage, - units.HumanSize(s.NetworkRx), units.HumanSize(s.NetworkTx), - units.HumanSize(s.BlockRead), units.HumanSize(s.BlockWrite), - s.PidsCurrent) - return nil -} - -func calculateCPUPercent(previousCPU, previousSystem uint64, v *types.StatsJSON) float64 { - var ( - cpuPercent = 0.0 - // calculate the change for the cpu usage of the container in between readings - cpuDelta = float64(v.CPUStats.CPUUsage.TotalUsage) - float64(previousCPU) - // calculate the change for the entire system between readings - systemDelta = float64(v.CPUStats.SystemUsage) - float64(previousSystem) - ) - - if systemDelta > 0.0 && cpuDelta > 0.0 { - cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 - } - return cpuPercent -} - -func calculateBlockIO(blkio types.BlkioStats) (blkRead uint64, blkWrite uint64) { - for _, bioEntry := range blkio.IoServiceBytesRecursive { - switch strings.ToLower(bioEntry.Op) { - case "read": - blkRead = blkRead + bioEntry.Value - case "write": - blkWrite = blkWrite + bioEntry.Value - } - } - return -} - -func calculateNetwork(network map[string]types.NetworkStats) (float64, float64) { - var rx, tx float64 - - for _, v := range network { - rx += float64(v.RxBytes) - tx += float64(v.TxBytes) - } - return rx, tx -} diff --git a/api/client/container/stats_unit_test.go b/api/client/container/stats_unit_test.go deleted file mode 100644 index 83f24bb295..0000000000 --- a/api/client/container/stats_unit_test.go +++ /dev/null @@ -1,45 +0,0 @@ -package container - -import ( - "bytes" - "testing" - - "github.com/docker/engine-api/types" -) - -func TestDisplay(t *testing.T) { - c := &containerStats{ - Name: "app", - CPUPercentage: 30.0, - Memory: 100 * 1024 * 1024.0, - MemoryLimit: 2048 * 1024 * 1024.0, - MemoryPercentage: 100.0 / 2048.0 * 100.0, - NetworkRx: 100 * 1024 * 1024, - NetworkTx: 800 * 1024 * 1024, - BlockRead: 100 * 1024 * 1024, - BlockWrite: 800 * 1024 * 1024, - PidsCurrent: 1, - } - var b bytes.Buffer - if err := c.Display(&b); err != nil { - t.Fatalf("c.Display() gave error: %s", err) - } - got := b.String() - want := "app\t30.00%\t100 MiB / 2 GiB\t4.88%\t104.9 MB / 838.9 MB\t104.9 MB / 838.9 MB\t1\n" - if got != want { - t.Fatalf("c.Display() = %q, want %q", got, want) - } -} - -func TestCalculBlockIO(t *testing.T) { - blkio := types.BlkioStats{ - IoServiceBytesRecursive: []types.BlkioStatEntry{{8, 0, "read", 1234}, {8, 1, "read", 4567}, {8, 0, "write", 123}, {8, 1, "write", 456}}, - } - blkRead, blkWrite := calculateBlockIO(blkio) - if blkRead != 5801 { - t.Fatalf("blkRead = %d, want 5801", blkRead) - } - if blkWrite != 579 { - t.Fatalf("blkWrite = %d, want 579", blkWrite) - } -} diff --git a/api/client/container/stop.go b/api/client/container/stop.go deleted file mode 100644 index cf2dc25839..0000000000 --- a/api/client/container/stop.go +++ /dev/null @@ -1,56 +0,0 @@ -package container - -import ( - "fmt" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type stopOptions struct { - time int - - containers []string -} - -// NewStopCommand creats a new cobra.Command for `docker stop` -func NewStopCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts stopOptions - - cmd := &cobra.Command{ - Use: "stop [OPTIONS] CONTAINER [CONTAINER...]", - Short: "Stop one or more running containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runStop(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.IntVarP(&opts.time, "time", "t", 10, "Seconds to wait for stop before killing it") - return cmd -} - -func runStop(dockerCli *client.DockerCli, opts *stopOptions) error { - ctx := context.Background() - - var errs []string - for _, container := range opts.containers { - timeout := time.Duration(opts.time) * time.Second - if err := dockerCli.Client().ContainerStop(ctx, container, &timeout); err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", container) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/container/top.go b/api/client/container/top.go deleted file mode 100644 index 3ade5fbd6d..0000000000 --- a/api/client/container/top.go +++ /dev/null @@ -1,58 +0,0 @@ -package container - -import ( - "fmt" - "strings" - "text/tabwriter" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type topOptions struct { - container string - - args []string -} - -// NewTopCommand creates a new cobra.Command for `docker top` -func NewTopCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts topOptions - - cmd := &cobra.Command{ - Use: "top CONTAINER [ps OPTIONS]", - Short: "Display the running processes of a container", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.container = args[0] - opts.args = args[1:] - return runTop(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - return cmd -} - -func runTop(dockerCli *client.DockerCli, opts *topOptions) error { - ctx := context.Background() - - procList, err := dockerCli.Client().ContainerTop(ctx, opts.container, opts.args) - if err != nil { - return err - } - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - fmt.Fprintln(w, strings.Join(procList.Titles, "\t")) - - for _, proc := range procList.Processes { - fmt.Fprintln(w, strings.Join(proc, "\t")) - } - w.Flush() - return nil -} diff --git a/api/client/container/unpause.go b/api/client/container/unpause.go deleted file mode 100644 index 75e2ff94ae..0000000000 --- a/api/client/container/unpause.go +++ /dev/null @@ -1,50 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type unpauseOptions struct { - containers []string -} - -// NewUnpauseCommand creats a new cobra.Command for `docker unpause` -func NewUnpauseCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts unpauseOptions - - cmd := &cobra.Command{ - Use: "unpause CONTAINER [CONTAINER...]", - Short: "Unpause all processes within one or more containers", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runUnpause(dockerCli, &opts) - }, - } - - return cmd -} - -func runUnpause(dockerCli *client.DockerCli, opts *unpauseOptions) error { - ctx := context.Background() - - var errs []string - for _, container := range opts.containers { - if err := dockerCli.Client().ContainerUnpause(ctx, container); err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%s\n", container) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/container/utils.go b/api/client/container/utils.go deleted file mode 100644 index b25e0dff57..0000000000 --- a/api/client/container/utils.go +++ /dev/null @@ -1,22 +0,0 @@ -package container - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - clientapi "github.com/docker/engine-api/client" -) - -// getExitCode performs an inspect on the container. It returns -// the running state and the exit code. -func getExitCode(dockerCli *client.DockerCli, ctx context.Context, containerID string) (bool, int, error) { - c, err := dockerCli.Client().ContainerInspect(ctx, containerID) - if err != nil { - // If we can't connect, then the daemon probably died. - if err != clientapi.ErrConnectionFailed { - return false, -1, err - } - return false, -1, nil - } - return c.State.Running, c.State.ExitCode, nil -} diff --git a/api/client/container/wait.go b/api/client/container/wait.go deleted file mode 100644 index 297cabd147..0000000000 --- a/api/client/container/wait.go +++ /dev/null @@ -1,51 +0,0 @@ -package container - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type waitOptions struct { - containers []string -} - -// NewWaitCommand creates a new cobra.Command for `docker wait` -func NewWaitCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts waitOptions - - cmd := &cobra.Command{ - Use: "wait CONTAINER [CONTAINER...]", - Short: "Block until a container stops, then print its exit code", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.containers = args - return runWait(dockerCli, &opts) - }, - } - - return cmd -} - -func runWait(dockerCli *client.DockerCli, opts *waitOptions) error { - ctx := context.Background() - - var errs []string - for _, container := range opts.containers { - status, err := dockerCli.Client().ContainerWait(ctx, container) - if err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(dockerCli.Out(), "%d\n", status) - } - } - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/credentials.go b/api/client/credentials.go deleted file mode 100644 index 9f5e80c7a9..0000000000 --- a/api/client/credentials.go +++ /dev/null @@ -1,44 +0,0 @@ -package client - -import ( - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/docker/cliconfig/credentials" - "github.com/docker/engine-api/types" -) - -// GetCredentials loads the user credentials from a credentials store. -// The store is determined by the config file settings. -func GetCredentials(c *configfile.ConfigFile, serverAddress string) (types.AuthConfig, error) { - s := LoadCredentialsStore(c) - return s.Get(serverAddress) -} - -// GetAllCredentials loads all credentials from a credentials store. -// The store is determined by the config file settings. -func GetAllCredentials(c *configfile.ConfigFile) (map[string]types.AuthConfig, error) { - s := LoadCredentialsStore(c) - return s.GetAll() -} - -// StoreCredentials saves the user credentials in a credentials store. -// The store is determined by the config file settings. -func StoreCredentials(c *configfile.ConfigFile, auth types.AuthConfig) error { - s := LoadCredentialsStore(c) - return s.Store(auth) -} - -// EraseCredentials removes the user credentials from a credentials store. -// The store is determined by the config file settings. -func EraseCredentials(c *configfile.ConfigFile, serverAddress string) error { - s := LoadCredentialsStore(c) - return s.Erase(serverAddress) -} - -// LoadCredentialsStore initializes a new credentials store based -// in the settings provided in the configuration file. -func LoadCredentialsStore(c *configfile.ConfigFile) credentials.Store { - if c.CredentialsStore != "" { - return credentials.NewNativeStore(c) - } - return credentials.NewFileStore(c) -} diff --git a/api/client/exec.go b/api/client/exec.go deleted file mode 100644 index a61c16f376..0000000000 --- a/api/client/exec.go +++ /dev/null @@ -1,160 +0,0 @@ -package client - -import ( - "fmt" - "io" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/promise" - "github.com/docker/engine-api/types" -) - -// CmdExec runs a command in a running container. -// -// Usage: docker exec [OPTIONS] CONTAINER COMMAND [ARG...] -func (cli *DockerCli) CmdExec(args ...string) error { - cmd := Cli.Subcmd("exec", []string{"[OPTIONS] CONTAINER COMMAND [ARG...]"}, Cli.DockerCommands["exec"].Description, true) - detachKeys := cmd.String([]string{"-detach-keys"}, "", "Override the key sequence for detaching a container") - - execConfig, err := ParseExec(cmd, args) - container := cmd.Arg(0) - // just in case the ParseExec does not exit - if container == "" || err != nil { - return Cli.StatusError{StatusCode: 1} - } - - if *detachKeys != "" { - cli.configFile.DetachKeys = *detachKeys - } - - // Send client escape keys - execConfig.DetachKeys = cli.configFile.DetachKeys - - ctx := context.Background() - - response, err := cli.client.ContainerExecCreate(ctx, container, *execConfig) - if err != nil { - return err - } - - execID := response.ID - if execID == "" { - fmt.Fprintf(cli.out, "exec ID empty") - return nil - } - - //Temp struct for execStart so that we don't need to transfer all the execConfig - if !execConfig.Detach { - if err := cli.CheckTtyInput(execConfig.AttachStdin, execConfig.Tty); err != nil { - return err - } - } else { - execStartCheck := types.ExecStartCheck{ - Detach: execConfig.Detach, - Tty: execConfig.Tty, - } - - if err := cli.client.ContainerExecStart(ctx, execID, execStartCheck); err != nil { - return err - } - // For now don't print this - wait for when we support exec wait() - // fmt.Fprintf(cli.out, "%s\n", execID) - return nil - } - - // Interactive exec requested. - var ( - out, stderr io.Writer - in io.ReadCloser - errCh chan error - ) - - if execConfig.AttachStdin { - in = cli.in - } - if execConfig.AttachStdout { - out = cli.out - } - if execConfig.AttachStderr { - if execConfig.Tty { - stderr = cli.out - } else { - stderr = cli.err - } - } - - resp, err := cli.client.ContainerExecAttach(ctx, execID, *execConfig) - if err != nil { - return err - } - defer resp.Close() - errCh = promise.Go(func() error { - return cli.HoldHijackedConnection(ctx, execConfig.Tty, in, out, stderr, resp) - }) - - if execConfig.Tty && cli.isTerminalIn { - if err := cli.MonitorTtySize(ctx, execID, true); err != nil { - fmt.Fprintf(cli.err, "Error monitoring TTY size: %s\n", err) - } - } - - if err := <-errCh; err != nil { - logrus.Debugf("Error hijack: %s", err) - return err - } - - var status int - if _, status, err = cli.getExecExitCode(ctx, execID); err != nil { - return err - } - - if status != 0 { - return Cli.StatusError{StatusCode: status} - } - - return nil -} - -// ParseExec parses the specified args for the specified command and generates -// an ExecConfig from it. -// If the minimal number of specified args is not right or if specified args are -// not valid, it will return an error. -func ParseExec(cmd *flag.FlagSet, args []string) (*types.ExecConfig, error) { - var ( - flStdin = cmd.Bool([]string{"i", "-interactive"}, false, "Keep STDIN open even if not attached") - flTty = cmd.Bool([]string{"t", "-tty"}, false, "Allocate a pseudo-TTY") - flDetach = cmd.Bool([]string{"d", "-detach"}, false, "Detached mode: run command in the background") - flUser = cmd.String([]string{"u", "-user"}, "", "Username or UID (format: [:])") - flPrivileged = cmd.Bool([]string{"-privileged"}, false, "Give extended privileges to the command") - execCmd []string - ) - cmd.Require(flag.Min, 2) - if err := cmd.ParseFlags(args, true); err != nil { - return nil, err - } - parsedArgs := cmd.Args() - execCmd = parsedArgs[1:] - - execConfig := &types.ExecConfig{ - User: *flUser, - Privileged: *flPrivileged, - Tty: *flTty, - Cmd: execCmd, - Detach: *flDetach, - } - - // If -d is not set, attach to everything by default - if !*flDetach { - execConfig.AttachStdout = true - execConfig.AttachStderr = true - if *flStdin { - execConfig.AttachStdin = true - } - } - - return execConfig, nil -} diff --git a/api/client/exec_test.go b/api/client/exec_test.go deleted file mode 100644 index 8b1a3674e9..0000000000 --- a/api/client/exec_test.go +++ /dev/null @@ -1,122 +0,0 @@ -package client - -import ( - "fmt" - "io/ioutil" - "testing" - - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/engine-api/types" -) - -type arguments struct { - args []string -} - -func TestParseExec(t *testing.T) { - invalids := map[*arguments]error{ - &arguments{[]string{"-unknown"}}: fmt.Errorf("flag provided but not defined: -unknown"), - &arguments{[]string{"-u"}}: fmt.Errorf("flag needs an argument: -u"), - &arguments{[]string{"--user"}}: fmt.Errorf("flag needs an argument: --user"), - } - valids := map[*arguments]*types.ExecConfig{ - &arguments{ - []string{"container", "command"}, - }: { - Cmd: []string{"command"}, - AttachStdout: true, - AttachStderr: true, - }, - &arguments{ - []string{"container", "command1", "command2"}, - }: { - Cmd: []string{"command1", "command2"}, - AttachStdout: true, - AttachStderr: true, - }, - &arguments{ - []string{"-i", "-t", "-u", "uid", "container", "command"}, - }: { - User: "uid", - AttachStdin: true, - AttachStdout: true, - AttachStderr: true, - Tty: true, - Cmd: []string{"command"}, - }, - &arguments{ - []string{"-d", "container", "command"}, - }: { - AttachStdin: false, - AttachStdout: false, - AttachStderr: false, - Detach: true, - Cmd: []string{"command"}, - }, - &arguments{ - []string{"-t", "-i", "-d", "container", "command"}, - }: { - AttachStdin: false, - AttachStdout: false, - AttachStderr: false, - Detach: true, - Tty: true, - Cmd: []string{"command"}, - }, - } - for invalid, expectedError := range invalids { - cmd := flag.NewFlagSet("exec", flag.ContinueOnError) - cmd.ShortUsage = func() {} - cmd.SetOutput(ioutil.Discard) - _, err := ParseExec(cmd, invalid.args) - if err == nil || err.Error() != expectedError.Error() { - t.Fatalf("Expected an error [%v] for %v, got %v", expectedError, invalid, err) - } - - } - for valid, expectedExecConfig := range valids { - cmd := flag.NewFlagSet("exec", flag.ContinueOnError) - cmd.ShortUsage = func() {} - cmd.SetOutput(ioutil.Discard) - execConfig, err := ParseExec(cmd, valid.args) - if err != nil { - t.Fatal(err) - } - if !compareExecConfig(expectedExecConfig, execConfig) { - t.Fatalf("Expected [%v] for %v, got [%v]", expectedExecConfig, valid, execConfig) - } - } -} - -func compareExecConfig(config1 *types.ExecConfig, config2 *types.ExecConfig) bool { - if config1.AttachStderr != config2.AttachStderr { - return false - } - if config1.AttachStdin != config2.AttachStdin { - return false - } - if config1.AttachStdout != config2.AttachStdout { - return false - } - if config1.Detach != config2.Detach { - return false - } - if config1.Privileged != config2.Privileged { - return false - } - if config1.Tty != config2.Tty { - return false - } - if config1.User != config2.User { - return false - } - if len(config1.Cmd) != len(config2.Cmd) { - return false - } - for index, value := range config1.Cmd { - if value != config2.Cmd[index] { - return false - } - } - return true -} diff --git a/api/client/formatter/custom.go b/api/client/formatter/custom.go deleted file mode 100644 index 079a71cff8..0000000000 --- a/api/client/formatter/custom.go +++ /dev/null @@ -1,243 +0,0 @@ -package formatter - -import ( - "fmt" - "strconv" - "strings" - "time" - - "github.com/docker/docker/api" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/engine-api/types" - "github.com/docker/go-units" -) - -const ( - tableKey = "table" - - containerIDHeader = "CONTAINER ID" - imageHeader = "IMAGE" - namesHeader = "NAMES" - commandHeader = "COMMAND" - createdSinceHeader = "CREATED" - createdAtHeader = "CREATED AT" - runningForHeader = "CREATED" - statusHeader = "STATUS" - portsHeader = "PORTS" - sizeHeader = "SIZE" - labelsHeader = "LABELS" - imageIDHeader = "IMAGE ID" - repositoryHeader = "REPOSITORY" - tagHeader = "TAG" - digestHeader = "DIGEST" - mountsHeader = "MOUNTS" -) - -type containerContext struct { - baseSubContext - trunc bool - c types.Container -} - -func (c *containerContext) ID() string { - c.addHeader(containerIDHeader) - if c.trunc { - return stringid.TruncateID(c.c.ID) - } - return c.c.ID -} - -func (c *containerContext) Names() string { - c.addHeader(namesHeader) - names := stripNamePrefix(c.c.Names) - if c.trunc { - for _, name := range names { - if len(strings.Split(name, "/")) == 1 { - names = []string{name} - break - } - } - } - return strings.Join(names, ",") -} - -func (c *containerContext) Image() string { - c.addHeader(imageHeader) - if c.c.Image == "" { - return "" - } - if c.trunc { - if trunc := stringid.TruncateID(c.c.ImageID); trunc == stringid.TruncateID(c.c.Image) { - return trunc - } - } - return c.c.Image -} - -func (c *containerContext) Command() string { - c.addHeader(commandHeader) - command := c.c.Command - if c.trunc { - command = stringutils.Truncate(command, 20) - } - return strconv.Quote(command) -} - -func (c *containerContext) CreatedAt() string { - c.addHeader(createdAtHeader) - return time.Unix(int64(c.c.Created), 0).String() -} - -func (c *containerContext) RunningFor() string { - c.addHeader(runningForHeader) - createdAt := time.Unix(int64(c.c.Created), 0) - return units.HumanDuration(time.Now().UTC().Sub(createdAt)) -} - -func (c *containerContext) Ports() string { - c.addHeader(portsHeader) - return api.DisplayablePorts(c.c.Ports) -} - -func (c *containerContext) Status() string { - c.addHeader(statusHeader) - return c.c.Status -} - -func (c *containerContext) Size() string { - c.addHeader(sizeHeader) - srw := units.HumanSize(float64(c.c.SizeRw)) - sv := units.HumanSize(float64(c.c.SizeRootFs)) - - sf := srw - if c.c.SizeRootFs > 0 { - sf = fmt.Sprintf("%s (virtual %s)", srw, sv) - } - return sf -} - -func (c *containerContext) Labels() string { - c.addHeader(labelsHeader) - if c.c.Labels == nil { - return "" - } - - var joinLabels []string - for k, v := range c.c.Labels { - joinLabels = append(joinLabels, fmt.Sprintf("%s=%s", k, v)) - } - return strings.Join(joinLabels, ",") -} - -func (c *containerContext) Label(name string) string { - n := strings.Split(name, ".") - r := strings.NewReplacer("-", " ", "_", " ") - h := r.Replace(n[len(n)-1]) - - c.addHeader(h) - - if c.c.Labels == nil { - return "" - } - return c.c.Labels[name] -} - -func (c *containerContext) Mounts() string { - c.addHeader(mountsHeader) - - var name string - var mounts []string - for _, m := range c.c.Mounts { - if m.Name == "" { - name = m.Source - } else { - name = m.Name - } - if c.trunc { - name = stringutils.Truncate(name, 15) - } - mounts = append(mounts, name) - } - return strings.Join(mounts, ",") -} - -type imageContext struct { - baseSubContext - trunc bool - i types.Image - repo string - tag string - digest string -} - -func (c *imageContext) ID() string { - c.addHeader(imageIDHeader) - if c.trunc { - return stringid.TruncateID(c.i.ID) - } - return c.i.ID -} - -func (c *imageContext) Repository() string { - c.addHeader(repositoryHeader) - return c.repo -} - -func (c *imageContext) Tag() string { - c.addHeader(tagHeader) - return c.tag -} - -func (c *imageContext) Digest() string { - c.addHeader(digestHeader) - return c.digest -} - -func (c *imageContext) CreatedSince() string { - c.addHeader(createdSinceHeader) - createdAt := time.Unix(int64(c.i.Created), 0) - return units.HumanDuration(time.Now().UTC().Sub(createdAt)) -} - -func (c *imageContext) CreatedAt() string { - c.addHeader(createdAtHeader) - return time.Unix(int64(c.i.Created), 0).String() -} - -func (c *imageContext) Size() string { - c.addHeader(sizeHeader) - return units.HumanSize(float64(c.i.Size)) -} - -type subContext interface { - fullHeader() string - addHeader(header string) -} - -type baseSubContext struct { - header []string -} - -func (c *baseSubContext) fullHeader() string { - if c.header == nil { - return "" - } - return strings.Join(c.header, "\t") -} - -func (c *baseSubContext) addHeader(header string) { - if c.header == nil { - c.header = []string{} - } - c.header = append(c.header, strings.ToUpper(header)) -} - -func stripNamePrefix(ss []string) []string { - sss := make([]string, len(ss)) - for i, s := range ss { - sss[i] = s[1:] - } - - return sss -} diff --git a/api/client/formatter/custom_test.go b/api/client/formatter/custom_test.go deleted file mode 100644 index 6a21f2bcd4..0000000000 --- a/api/client/formatter/custom_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package formatter - -import ( - "reflect" - "strings" - "testing" - "time" - - "github.com/docker/docker/pkg/stringid" - "github.com/docker/engine-api/types" -) - -func TestContainerPsContext(t *testing.T) { - containerID := stringid.GenerateRandomID() - unix := time.Now().Add(-65 * time.Second).Unix() - - var ctx containerContext - cases := []struct { - container types.Container - trunc bool - expValue string - expHeader string - call func() string - }{ - {types.Container{ID: containerID}, true, stringid.TruncateID(containerID), containerIDHeader, ctx.ID}, - {types.Container{ID: containerID}, false, containerID, containerIDHeader, ctx.ID}, - {types.Container{Names: []string{"/foobar_baz"}}, true, "foobar_baz", namesHeader, ctx.Names}, - {types.Container{Image: "ubuntu"}, true, "ubuntu", imageHeader, ctx.Image}, - {types.Container{Image: "verylongimagename"}, true, "verylongimagename", imageHeader, ctx.Image}, - {types.Container{Image: "verylongimagename"}, false, "verylongimagename", imageHeader, ctx.Image}, - {types.Container{ - Image: "a5a665ff33eced1e0803148700880edab4", - ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", - }, - true, - "a5a665ff33ec", - imageHeader, - ctx.Image, - }, - {types.Container{ - Image: "a5a665ff33eced1e0803148700880edab4", - ImageID: "a5a665ff33eced1e0803148700880edab4269067ed77e27737a708d0d293fbf5", - }, - false, - "a5a665ff33eced1e0803148700880edab4", - imageHeader, - ctx.Image, - }, - {types.Container{Image: ""}, true, "", imageHeader, ctx.Image}, - {types.Container{Command: "sh -c 'ls -la'"}, true, `"sh -c 'ls -la'"`, commandHeader, ctx.Command}, - {types.Container{Created: unix}, true, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, - {types.Container{Ports: []types.Port{{PrivatePort: 8080, PublicPort: 8080, Type: "tcp"}}}, true, "8080/tcp", portsHeader, ctx.Ports}, - {types.Container{Status: "RUNNING"}, true, "RUNNING", statusHeader, ctx.Status}, - {types.Container{SizeRw: 10}, true, "10 B", sizeHeader, ctx.Size}, - {types.Container{SizeRw: 10, SizeRootFs: 20}, true, "10 B (virtual 20 B)", sizeHeader, ctx.Size}, - {types.Container{}, true, "", labelsHeader, ctx.Labels}, - {types.Container{Labels: map[string]string{"cpu": "6", "storage": "ssd"}}, true, "cpu=6,storage=ssd", labelsHeader, ctx.Labels}, - {types.Container{Created: unix}, true, "About a minute", runningForHeader, ctx.RunningFor}, - } - - for _, c := range cases { - ctx = containerContext{c: c.container, trunc: c.trunc} - v := c.call() - if strings.Contains(v, ",") { - compareMultipleValues(t, v, c.expValue) - } else if v != c.expValue { - t.Fatalf("Expected %s, was %s\n", c.expValue, v) - } - - h := ctx.fullHeader() - if h != c.expHeader { - t.Fatalf("Expected %s, was %s\n", c.expHeader, h) - } - } - - c1 := types.Container{Labels: map[string]string{"com.docker.swarm.swarm-id": "33", "com.docker.swarm.node_name": "ubuntu"}} - ctx = containerContext{c: c1, trunc: true} - - sid := ctx.Label("com.docker.swarm.swarm-id") - node := ctx.Label("com.docker.swarm.node_name") - if sid != "33" { - t.Fatalf("Expected 33, was %s\n", sid) - } - - if node != "ubuntu" { - t.Fatalf("Expected ubuntu, was %s\n", node) - } - - h := ctx.fullHeader() - if h != "SWARM ID\tNODE NAME" { - t.Fatalf("Expected %s, was %s\n", "SWARM ID\tNODE NAME", h) - - } - - c2 := types.Container{} - ctx = containerContext{c: c2, trunc: true} - - label := ctx.Label("anything.really") - if label != "" { - t.Fatalf("Expected an empty string, was %s", label) - } - - ctx = containerContext{c: c2, trunc: true} - fullHeader := ctx.fullHeader() - if fullHeader != "" { - t.Fatalf("Expected fullHeader to be empty, was %s", fullHeader) - } - -} - -func TestImagesContext(t *testing.T) { - imageID := stringid.GenerateRandomID() - unix := time.Now().Unix() - - var ctx imageContext - cases := []struct { - imageCtx imageContext - expValue string - expHeader string - call func() string - }{ - {imageContext{ - i: types.Image{ID: imageID}, - trunc: true, - }, stringid.TruncateID(imageID), imageIDHeader, ctx.ID}, - {imageContext{ - i: types.Image{ID: imageID}, - trunc: false, - }, imageID, imageIDHeader, ctx.ID}, - {imageContext{ - i: types.Image{Size: 10}, - trunc: true, - }, "10 B", sizeHeader, ctx.Size}, - {imageContext{ - i: types.Image{Created: unix}, - trunc: true, - }, time.Unix(unix, 0).String(), createdAtHeader, ctx.CreatedAt}, - // FIXME - // {imageContext{ - // i: types.Image{Created: unix}, - // trunc: true, - // }, units.HumanDuration(time.Unix(unix, 0)), createdSinceHeader, ctx.CreatedSince}, - {imageContext{ - i: types.Image{}, - repo: "busybox", - }, "busybox", repositoryHeader, ctx.Repository}, - {imageContext{ - i: types.Image{}, - tag: "latest", - }, "latest", tagHeader, ctx.Tag}, - {imageContext{ - i: types.Image{}, - digest: "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", - }, "sha256:d149ab53f8718e987c3a3024bb8aa0e2caadf6c0328f1d9d850b2a2a67f2819a", digestHeader, ctx.Digest}, - } - - for _, c := range cases { - ctx = c.imageCtx - v := c.call() - if strings.Contains(v, ",") { - compareMultipleValues(t, v, c.expValue) - } else if v != c.expValue { - t.Fatalf("Expected %s, was %s\n", c.expValue, v) - } - - h := ctx.fullHeader() - if h != c.expHeader { - t.Fatalf("Expected %s, was %s\n", c.expHeader, h) - } - } -} - -func compareMultipleValues(t *testing.T, value, expected string) { - // comma-separated values means probably a map input, which won't - // be guaranteed to have the same order as our expected value - // We'll create maps and use reflect.DeepEquals to check instead: - entriesMap := make(map[string]string) - expMap := make(map[string]string) - entries := strings.Split(value, ",") - expectedEntries := strings.Split(expected, ",") - for _, entry := range entries { - keyval := strings.Split(entry, "=") - entriesMap[keyval[0]] = keyval[1] - } - for _, expected := range expectedEntries { - keyval := strings.Split(expected, "=") - expMap[keyval[0]] = keyval[1] - } - if !reflect.DeepEqual(expMap, entriesMap) { - t.Fatalf("Expected entries: %v, got: %v", expected, value) - } -} diff --git a/api/client/formatter/formatter.go b/api/client/formatter/formatter.go deleted file mode 100644 index 1e250a2522..0000000000 --- a/api/client/formatter/formatter.go +++ /dev/null @@ -1,307 +0,0 @@ -package formatter - -import ( - "bytes" - "fmt" - "io" - "strings" - "text/tabwriter" - "text/template" - - "github.com/docker/docker/reference" - "github.com/docker/docker/utils/templates" - "github.com/docker/engine-api/types" -) - -const ( - tableFormatKey = "table" - rawFormatKey = "raw" - - defaultContainerTableFormat = "table {{.ID}}\t{{.Image}}\t{{.Command}}\t{{.RunningFor}} ago\t{{.Status}}\t{{.Ports}}\t{{.Names}}" - defaultImageTableFormat = "table {{.Repository}}\t{{.Tag}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" - defaultImageTableFormatWithDigest = "table {{.Repository}}\t{{.Tag}}\t{{.Digest}}\t{{.ID}}\t{{.CreatedSince}} ago\t{{.Size}}" - defaultQuietFormat = "{{.ID}}" -) - -// Context contains information required by the formatter to print the output as desired. -type Context struct { - // Output is the output stream to which the formatted string is written. - Output io.Writer - // Format is used to choose raw, table or custom format for the output. - Format string - // Quiet when set to true will simply print minimal information. - Quiet bool - // Trunc when set to true will truncate the output of certain fields such as Container ID. - Trunc bool - - // internal element - table bool - finalFormat string - header string - buffer *bytes.Buffer -} - -func (c *Context) preformat() { - c.finalFormat = c.Format - - if strings.HasPrefix(c.Format, tableKey) { - c.table = true - c.finalFormat = c.finalFormat[len(tableKey):] - } - - c.finalFormat = strings.Trim(c.finalFormat, " ") - r := strings.NewReplacer(`\t`, "\t", `\n`, "\n") - c.finalFormat = r.Replace(c.finalFormat) -} - -func (c *Context) parseFormat() (*template.Template, error) { - tmpl, err := templates.Parse(c.finalFormat) - if err != nil { - c.buffer.WriteString(fmt.Sprintf("Template parsing error: %v\n", err)) - c.buffer.WriteTo(c.Output) - } - return tmpl, err -} - -func (c *Context) postformat(tmpl *template.Template, subContext subContext) { - if c.table { - if len(c.header) == 0 { - // if we still don't have a header, we didn't have any containers so we need to fake it to get the right headers from the template - tmpl.Execute(bytes.NewBufferString(""), subContext) - c.header = subContext.fullHeader() - } - - t := tabwriter.NewWriter(c.Output, 20, 1, 3, ' ', 0) - t.Write([]byte(c.header)) - t.Write([]byte("\n")) - c.buffer.WriteTo(t) - t.Flush() - } else { - c.buffer.WriteTo(c.Output) - } -} - -func (c *Context) contextFormat(tmpl *template.Template, subContext subContext) error { - if err := tmpl.Execute(c.buffer, subContext); err != nil { - c.buffer = bytes.NewBufferString(fmt.Sprintf("Template parsing error: %v\n", err)) - c.buffer.WriteTo(c.Output) - return err - } - if c.table && len(c.header) == 0 { - c.header = subContext.fullHeader() - } - c.buffer.WriteString("\n") - return nil -} - -// ContainerContext contains container specific information required by the formater, encapsulate a Context struct. -type ContainerContext struct { - Context - // Size when set to true will display the size of the output. - Size bool - // Containers - Containers []types.Container -} - -// ImageContext contains image specific information required by the formater, encapsulate a Context struct. -type ImageContext struct { - Context - Digest bool - // Images - Images []types.Image -} - -func (ctx ContainerContext) Write() { - switch ctx.Format { - case tableFormatKey: - if ctx.Quiet { - ctx.Format = defaultQuietFormat - } else { - ctx.Format = defaultContainerTableFormat - if ctx.Size { - ctx.Format += `\t{{.Size}}` - } - } - case rawFormatKey: - if ctx.Quiet { - ctx.Format = `container_id: {{.ID}}` - } else { - ctx.Format = `container_id: {{.ID}}\nimage: {{.Image}}\ncommand: {{.Command}}\ncreated_at: {{.CreatedAt}}\nstatus: {{.Status}}\nnames: {{.Names}}\nlabels: {{.Labels}}\nports: {{.Ports}}\n` - if ctx.Size { - ctx.Format += `size: {{.Size}}\n` - } - } - } - - ctx.buffer = bytes.NewBufferString("") - ctx.preformat() - - tmpl, err := ctx.parseFormat() - if err != nil { - return - } - - for _, container := range ctx.Containers { - containerCtx := &containerContext{ - trunc: ctx.Trunc, - c: container, - } - err = ctx.contextFormat(tmpl, containerCtx) - if err != nil { - return - } - } - - ctx.postformat(tmpl, &containerContext{}) -} - -func isDangling(image types.Image) bool { - return len(image.RepoTags) == 1 && image.RepoTags[0] == ":" && len(image.RepoDigests) == 1 && image.RepoDigests[0] == "@" -} - -func (ctx ImageContext) Write() { - switch ctx.Format { - case tableFormatKey: - ctx.Format = defaultImageTableFormat - if ctx.Digest { - ctx.Format = defaultImageTableFormatWithDigest - } - if ctx.Quiet { - ctx.Format = defaultQuietFormat - } - case rawFormatKey: - if ctx.Quiet { - ctx.Format = `image_id: {{.ID}}` - } else { - if ctx.Digest { - ctx.Format = `repository: {{ .Repository }} -tag: {{.Tag}} -digest: {{.Digest}} -image_id: {{.ID}} -created_at: {{.CreatedAt}} -virtual_size: {{.Size}} -` - } else { - ctx.Format = `repository: {{ .Repository }} -tag: {{.Tag}} -image_id: {{.ID}} -created_at: {{.CreatedAt}} -virtual_size: {{.Size}} -` - } - } - } - - ctx.buffer = bytes.NewBufferString("") - ctx.preformat() - if ctx.table && ctx.Digest && !strings.Contains(ctx.Format, "{{.Digest}}") { - ctx.finalFormat += "\t{{.Digest}}" - } - - tmpl, err := ctx.parseFormat() - if err != nil { - return - } - - for _, image := range ctx.Images { - images := []*imageContext{} - if isDangling(image) { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: "", - tag: "", - digest: "", - }) - } else { - repoTags := map[string][]string{} - repoDigests := map[string][]string{} - - for _, refString := range append(image.RepoTags) { - ref, err := reference.ParseNamed(refString) - if err != nil { - continue - } - if nt, ok := ref.(reference.NamedTagged); ok { - repoTags[ref.Name()] = append(repoTags[ref.Name()], nt.Tag()) - } - } - for _, refString := range append(image.RepoDigests) { - ref, err := reference.ParseNamed(refString) - if err != nil { - continue - } - if c, ok := ref.(reference.Canonical); ok { - repoDigests[ref.Name()] = append(repoDigests[ref.Name()], c.Digest().String()) - } - } - - for repo, tags := range repoTags { - digests := repoDigests[repo] - - // Do not display digests as their own row - delete(repoDigests, repo) - - if !ctx.Digest { - // Ignore digest references, just show tag once - digests = nil - } - - for _, tag := range tags { - if len(digests) == 0 { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: tag, - digest: "", - }) - continue - } - // Display the digests for each tag - for _, dgst := range digests { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: tag, - digest: dgst, - }) - } - - } - } - - // Show rows for remaining digest only references - for repo, digests := range repoDigests { - // If digests are displayed, show row per digest - if ctx.Digest { - for _, dgst := range digests { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: "", - digest: dgst, - }) - } - } else { - images = append(images, &imageContext{ - trunc: ctx.Trunc, - i: image, - repo: repo, - tag: "", - }) - } - } - } - for _, imageCtx := range images { - err = ctx.contextFormat(tmpl, imageCtx) - if err != nil { - return - } - } - } - - ctx.postformat(tmpl, &imageContext{}) -} diff --git a/api/client/formatter/formatter_test.go b/api/client/formatter/formatter_test.go deleted file mode 100644 index 07cde63f95..0000000000 --- a/api/client/formatter/formatter_test.go +++ /dev/null @@ -1,537 +0,0 @@ -package formatter - -import ( - "bytes" - "fmt" - "testing" - "time" - - "github.com/docker/engine-api/types" -) - -func TestContainerContextWrite(t *testing.T) { - unixTime := time.Now().AddDate(0, 0, -1).Unix() - expectedTime := time.Unix(unixTime, 0).String() - - contexts := []struct { - context ContainerContext - expected string - }{ - // Errors - { - ContainerContext{ - Context: Context{ - Format: "{{InvalidFunction}}", - }, - }, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - ContainerContext{ - Context: Context{ - Format: "{{nil}}", - }, - }, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - // Table Format - { - ContainerContext{ - Context: Context{ - Format: "table", - }, - }, - `CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES -containerID1 ubuntu "" 24 hours ago foobar_baz -containerID2 ubuntu "" 24 hours ago foobar_bar -`, - }, - { - ContainerContext{ - Context: Context{ - Format: "table {{.Image}}", - }, - }, - "IMAGE\nubuntu\nubuntu\n", - }, - { - ContainerContext{ - Context: Context{ - Format: "table {{.Image}}", - }, - Size: true, - }, - "IMAGE\nubuntu\nubuntu\n", - }, - { - ContainerContext{ - Context: Context{ - Format: "table {{.Image}}", - Quiet: true, - }, - }, - "IMAGE\nubuntu\nubuntu\n", - }, - { - ContainerContext{ - Context: Context{ - Format: "table", - Quiet: true, - }, - }, - "containerID1\ncontainerID2\n", - }, - // Raw Format - { - ContainerContext{ - Context: Context{ - Format: "raw", - }, - }, - fmt.Sprintf(`container_id: containerID1 -image: ubuntu -command: "" -created_at: %s -status: -names: foobar_baz -labels: -ports: - -container_id: containerID2 -image: ubuntu -command: "" -created_at: %s -status: -names: foobar_bar -labels: -ports: - -`, expectedTime, expectedTime), - }, - { - ContainerContext{ - Context: Context{ - Format: "raw", - }, - Size: true, - }, - fmt.Sprintf(`container_id: containerID1 -image: ubuntu -command: "" -created_at: %s -status: -names: foobar_baz -labels: -ports: -size: 0 B - -container_id: containerID2 -image: ubuntu -command: "" -created_at: %s -status: -names: foobar_bar -labels: -ports: -size: 0 B - -`, expectedTime, expectedTime), - }, - { - ContainerContext{ - Context: Context{ - Format: "raw", - Quiet: true, - }, - }, - "container_id: containerID1\ncontainer_id: containerID2\n", - }, - // Custom Format - { - ContainerContext{ - Context: Context{ - Format: "{{.Image}}", - }, - }, - "ubuntu\nubuntu\n", - }, - { - ContainerContext{ - Context: Context{ - Format: "{{.Image}}", - }, - Size: true, - }, - "ubuntu\nubuntu\n", - }, - } - - for _, context := range contexts { - containers := []types.Container{ - {ID: "containerID1", Names: []string{"/foobar_baz"}, Image: "ubuntu", Created: unixTime}, - {ID: "containerID2", Names: []string{"/foobar_bar"}, Image: "ubuntu", Created: unixTime}, - } - out := bytes.NewBufferString("") - context.context.Output = out - context.context.Containers = containers - context.context.Write() - actual := out.String() - if actual != context.expected { - t.Fatalf("Expected \n%s, got \n%s", context.expected, actual) - } - // Clean buffer - out.Reset() - } -} - -func TestContainerContextWriteWithNoContainers(t *testing.T) { - out := bytes.NewBufferString("") - containers := []types.Container{} - - contexts := []struct { - context ContainerContext - expected string - }{ - { - ContainerContext{ - Context: Context{ - Format: "{{.Image}}", - Output: out, - }, - }, - "", - }, - { - ContainerContext{ - Context: Context{ - Format: "table {{.Image}}", - Output: out, - }, - }, - "IMAGE\n", - }, - { - ContainerContext{ - Context: Context{ - Format: "{{.Image}}", - Output: out, - }, - Size: true, - }, - "", - }, - { - ContainerContext{ - Context: Context{ - Format: "table {{.Image}}", - Output: out, - }, - Size: true, - }, - "IMAGE\n", - }, - { - ContainerContext{ - Context: Context{ - Format: "table {{.Image}}\t{{.Size}}", - Output: out, - }, - }, - "IMAGE SIZE\n", - }, - { - ContainerContext{ - Context: Context{ - Format: "table {{.Image}}\t{{.Size}}", - Output: out, - }, - Size: true, - }, - "IMAGE SIZE\n", - }, - } - - for _, context := range contexts { - context.context.Containers = containers - context.context.Write() - actual := out.String() - if actual != context.expected { - t.Fatalf("Expected \n%s, got \n%s", context.expected, actual) - } - // Clean buffer - out.Reset() - } -} - -func TestImageContextWrite(t *testing.T) { - unixTime := time.Now().AddDate(0, 0, -1).Unix() - expectedTime := time.Unix(unixTime, 0).String() - - contexts := []struct { - context ImageContext - expected string - }{ - // Errors - { - ImageContext{ - Context: Context{ - Format: "{{InvalidFunction}}", - }, - }, - `Template parsing error: template: :1: function "InvalidFunction" not defined -`, - }, - { - ImageContext{ - Context: Context{ - Format: "{{nil}}", - }, - }, - `Template parsing error: template: :1:2: executing "" at : nil is not a command -`, - }, - // Table Format - { - ImageContext{ - Context: Context{ - Format: "table", - }, - }, - `REPOSITORY TAG IMAGE ID CREATED SIZE -image tag1 imageID1 24 hours ago 0 B -image tag2 imageID2 24 hours ago 0 B - imageID3 24 hours ago 0 B -`, - }, - { - ImageContext{ - Context: Context{ - Format: "table {{.Repository}}", - }, - }, - "REPOSITORY\nimage\nimage\n\n", - }, - { - ImageContext{ - Context: Context{ - Format: "table {{.Repository}}", - }, - Digest: true, - }, - `REPOSITORY DIGEST -image sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf -image - -`, - }, - { - ImageContext{ - Context: Context{ - Format: "table {{.Repository}}", - Quiet: true, - }, - }, - "REPOSITORY\nimage\nimage\n\n", - }, - { - ImageContext{ - Context: Context{ - Format: "table", - Quiet: true, - }, - }, - "imageID1\nimageID2\nimageID3\n", - }, - { - ImageContext{ - Context: Context{ - Format: "table", - Quiet: false, - }, - Digest: true, - }, - `REPOSITORY TAG DIGEST IMAGE ID CREATED SIZE -image tag1 sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf imageID1 24 hours ago 0 B -image tag2 imageID2 24 hours ago 0 B - imageID3 24 hours ago 0 B -`, - }, - { - ImageContext{ - Context: Context{ - Format: "table", - Quiet: true, - }, - Digest: true, - }, - "imageID1\nimageID2\nimageID3\n", - }, - // Raw Format - { - ImageContext{ - Context: Context{ - Format: "raw", - }, - }, - fmt.Sprintf(`repository: image -tag: tag1 -image_id: imageID1 -created_at: %s -virtual_size: 0 B - -repository: image -tag: tag2 -image_id: imageID2 -created_at: %s -virtual_size: 0 B - -repository: -tag: -image_id: imageID3 -created_at: %s -virtual_size: 0 B - -`, expectedTime, expectedTime, expectedTime), - }, - { - ImageContext{ - Context: Context{ - Format: "raw", - }, - Digest: true, - }, - fmt.Sprintf(`repository: image -tag: tag1 -digest: sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf -image_id: imageID1 -created_at: %s -virtual_size: 0 B - -repository: image -tag: tag2 -digest: -image_id: imageID2 -created_at: %s -virtual_size: 0 B - -repository: -tag: -digest: -image_id: imageID3 -created_at: %s -virtual_size: 0 B - -`, expectedTime, expectedTime, expectedTime), - }, - { - ImageContext{ - Context: Context{ - Format: "raw", - Quiet: true, - }, - }, - `image_id: imageID1 -image_id: imageID2 -image_id: imageID3 -`, - }, - // Custom Format - { - ImageContext{ - Context: Context{ - Format: "{{.Repository}}", - }, - }, - "image\nimage\n\n", - }, - { - ImageContext{ - Context: Context{ - Format: "{{.Repository}}", - }, - Digest: true, - }, - "image\nimage\n\n", - }, - } - - for _, context := range contexts { - images := []types.Image{ - {ID: "imageID1", RepoTags: []string{"image:tag1"}, RepoDigests: []string{"image@sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"}, Created: unixTime}, - {ID: "imageID2", RepoTags: []string{"image:tag2"}, Created: unixTime}, - {ID: "imageID3", RepoTags: []string{":"}, RepoDigests: []string{"@"}, Created: unixTime}, - } - out := bytes.NewBufferString("") - context.context.Output = out - context.context.Images = images - context.context.Write() - actual := out.String() - if actual != context.expected { - t.Fatalf("Expected \n%s, got \n%s", context.expected, actual) - } - // Clean buffer - out.Reset() - } -} - -func TestImageContextWriteWithNoImage(t *testing.T) { - out := bytes.NewBufferString("") - images := []types.Image{} - - contexts := []struct { - context ImageContext - expected string - }{ - { - ImageContext{ - Context: Context{ - Format: "{{.Repository}}", - Output: out, - }, - }, - "", - }, - { - ImageContext{ - Context: Context{ - Format: "table {{.Repository}}", - Output: out, - }, - }, - "REPOSITORY\n", - }, - { - ImageContext{ - Context: Context{ - Format: "{{.Repository}}", - Output: out, - }, - Digest: true, - }, - "", - }, - { - ImageContext{ - Context: Context{ - Format: "table {{.Repository}}", - Output: out, - }, - Digest: true, - }, - "REPOSITORY DIGEST\n", - }, - } - - for _, context := range contexts { - context.context.Images = images - context.context.Write() - actual := out.String() - if actual != context.expected { - t.Fatalf("Expected \n%s, got \n%s", context.expected, actual) - } - // Clean buffer - out.Reset() - } -} diff --git a/api/client/hijack.go b/api/client/hijack.go deleted file mode 100644 index 294078e44c..0000000000 --- a/api/client/hijack.go +++ /dev/null @@ -1,95 +0,0 @@ -package client - -import ( - "io" - "sync" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/engine-api/types" -) - -// HoldHijackedConnection handles copying input to and output from streams to the -// connection -func (cli *DockerCli) HoldHijackedConnection(ctx context.Context, tty bool, inputStream io.ReadCloser, outputStream, errorStream io.Writer, resp types.HijackedResponse) error { - var ( - err error - restoreOnce sync.Once - ) - if inputStream != nil && tty { - if err := cli.setRawTerminal(); err != nil { - return err - } - defer func() { - restoreOnce.Do(func() { - cli.restoreTerminal(inputStream) - }) - }() - } - - receiveStdout := make(chan error, 1) - if outputStream != nil || errorStream != nil { - go func() { - // When TTY is ON, use regular copy - if tty && outputStream != nil { - _, err = io.Copy(outputStream, resp.Reader) - // we should restore the terminal as soon as possible once connection end - // so any following print messages will be in normal type. - if inputStream != nil { - restoreOnce.Do(func() { - cli.restoreTerminal(inputStream) - }) - } - } else { - _, err = stdcopy.StdCopy(outputStream, errorStream, resp.Reader) - } - - logrus.Debug("[hijack] End of stdout") - receiveStdout <- err - }() - } - - stdinDone := make(chan struct{}) - go func() { - if inputStream != nil { - io.Copy(resp.Conn, inputStream) - // we should restore the terminal as soon as possible once connection end - // so any following print messages will be in normal type. - if tty { - restoreOnce.Do(func() { - cli.restoreTerminal(inputStream) - }) - } - logrus.Debug("[hijack] End of stdin") - } - - if err := resp.CloseWrite(); err != nil { - logrus.Debugf("Couldn't send EOF: %s", err) - } - close(stdinDone) - }() - - select { - case err := <-receiveStdout: - if err != nil { - logrus.Debugf("Error receiveStdout: %s", err) - return err - } - case <-stdinDone: - if outputStream != nil || errorStream != nil { - select { - case err := <-receiveStdout: - if err != nil { - logrus.Debugf("Error receiveStdout: %s", err) - return err - } - case <-ctx.Done(): - } - } - case <-ctx.Done(): - } - - return nil -} diff --git a/api/client/idresolver/idresolver.go b/api/client/idresolver/idresolver.go deleted file mode 100644 index 9b38d151bb..0000000000 --- a/api/client/idresolver/idresolver.go +++ /dev/null @@ -1,70 +0,0 @@ -package idresolver - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types/swarm" -) - -// IDResolver provides ID to Name resolution. -type IDResolver struct { - client client.APIClient - noResolve bool - cache map[string]string -} - -// New creates a new IDResolver. -func New(client client.APIClient, noResolve bool) *IDResolver { - return &IDResolver{ - client: client, - noResolve: noResolve, - cache: make(map[string]string), - } -} - -func (r *IDResolver) get(ctx context.Context, t interface{}, id string) (string, error) { - switch t.(type) { - case swarm.Node: - node, _, err := r.client.NodeInspectWithRaw(ctx, id) - if err != nil { - return id, nil - } - if node.Spec.Annotations.Name != "" { - return node.Spec.Annotations.Name, nil - } - if node.Description.Hostname != "" { - return node.Description.Hostname, nil - } - return id, nil - case swarm.Service: - service, _, err := r.client.ServiceInspectWithRaw(ctx, id) - if err != nil { - return id, nil - } - return service.Spec.Annotations.Name, nil - default: - return "", fmt.Errorf("unsupported type") - } - -} - -// Resolve will attempt to resolve an ID to a Name by querying the manager. -// Results are stored into a cache. -// If the `-n` flag is used in the command-line, resolution is disabled. -func (r *IDResolver) Resolve(ctx context.Context, t interface{}, id string) (string, error) { - if r.noResolve { - return id, nil - } - if name, ok := r.cache[id]; ok { - return name, nil - } - name, err := r.get(ctx, t, id) - if err != nil { - return "", err - } - r.cache[id] = name - return name, nil -} diff --git a/api/client/image/build.go b/api/client/image/build.go deleted file mode 100644 index 6080aec83b..0000000000 --- a/api/client/image/build.go +++ /dev/null @@ -1,432 +0,0 @@ -package image - -import ( - "archive/tar" - "bufio" - "bytes" - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "runtime" - - "golang.org/x/net/context" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/client" - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerignore" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/urlutil" - "github.com/docker/docker/reference" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type buildOptions struct { - context string - dockerfileName string - tags opts.ListOpts - labels []string - buildArgs opts.ListOpts - ulimits *runconfigopts.UlimitOpt - memory string - memorySwap string - shmSize string - cpuShares int64 - cpuPeriod int64 - cpuQuota int64 - cpuSetCpus string - cpuSetMems string - cgroupParent string - isolation string - quiet bool - noCache bool - rm bool - forceRm bool - pull bool -} - -// NewBuildCommand creates a new `docker build` command -func NewBuildCommand(dockerCli *client.DockerCli) *cobra.Command { - ulimits := make(map[string]*units.Ulimit) - options := buildOptions{ - tags: opts.NewListOpts(validateTag), - buildArgs: opts.NewListOpts(runconfigopts.ValidateEnv), - ulimits: runconfigopts.NewUlimitOpt(&ulimits), - } - - cmd := &cobra.Command{ - Use: "build [OPTIONS] PATH | URL | -", - Short: "Build an image from a Dockerfile", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - options.context = args[0] - return runBuild(dockerCli, options) - }, - } - - flags := cmd.Flags() - - flags.VarP(&options.tags, "tag", "t", "Name and optionally a tag in the 'name:tag' format") - flags.Var(&options.buildArgs, "build-arg", "Set build-time variables") - flags.Var(options.ulimits, "ulimit", "Ulimit options") - flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (Default is 'PATH/Dockerfile')") - flags.StringVarP(&options.memory, "memory", "m", "", "Memory limit") - flags.StringVar(&options.memorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") - flags.StringVar(&options.shmSize, "shm-size", "", "Size of /dev/shm, default value is 64MB") - flags.Int64VarP(&options.cpuShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") - flags.Int64Var(&options.cpuPeriod, "cpu-period", 0, "Limit the CPU CFS (Completely Fair Scheduler) period") - flags.Int64Var(&options.cpuQuota, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota") - flags.StringVar(&options.cpuSetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") - flags.StringVar(&options.cpuSetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") - flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") - flags.StringVar(&options.isolation, "isolation", "", "Container isolation technology") - flags.StringSliceVar(&options.labels, "label", []string{}, "Set metadata for an image") - flags.BoolVar(&options.noCache, "no-cache", false, "Do not use cache when building the image") - flags.BoolVar(&options.rm, "rm", true, "Remove intermediate containers after a successful build") - flags.BoolVar(&options.forceRm, "force-rm", false, "Always remove intermediate containers") - flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success") - flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image") - - client.AddTrustedFlags(flags, true) - - return cmd -} - -func runBuild(dockerCli *client.DockerCli, options buildOptions) error { - - var ( - buildCtx io.ReadCloser - err error - ) - - specifiedContext := options.context - - var ( - contextDir string - tempDir string - relDockerfile string - progBuff io.Writer - buildBuff io.Writer - ) - - progBuff = dockerCli.Out() - buildBuff = dockerCli.Out() - if options.quiet { - progBuff = bytes.NewBuffer(nil) - buildBuff = bytes.NewBuffer(nil) - } - - switch { - case specifiedContext == "-": - buildCtx, relDockerfile, err = builder.GetContextFromReader(dockerCli.In(), options.dockerfileName) - case urlutil.IsGitURL(specifiedContext): - tempDir, relDockerfile, err = builder.GetContextFromGitURL(specifiedContext, options.dockerfileName) - case urlutil.IsURL(specifiedContext): - buildCtx, relDockerfile, err = builder.GetContextFromURL(progBuff, specifiedContext, options.dockerfileName) - default: - contextDir, relDockerfile, err = builder.GetContextFromLocalDir(specifiedContext, options.dockerfileName) - } - - if err != nil { - if options.quiet && urlutil.IsURL(specifiedContext) { - fmt.Fprintln(dockerCli.Err(), progBuff) - } - return fmt.Errorf("unable to prepare context: %s", err) - } - - if tempDir != "" { - defer os.RemoveAll(tempDir) - contextDir = tempDir - } - - if buildCtx == nil { - // And canonicalize dockerfile name to a platform-independent one - relDockerfile, err = archive.CanonicalTarNameForPath(relDockerfile) - if err != nil { - return fmt.Errorf("cannot canonicalize dockerfile path %s: %v", relDockerfile, err) - } - - f, err := os.Open(filepath.Join(contextDir, ".dockerignore")) - if err != nil && !os.IsNotExist(err) { - return err - } - - var excludes []string - if err == nil { - excludes, err = dockerignore.ReadAll(f) - if err != nil { - return err - } - } - - if err := builder.ValidateContextDirectory(contextDir, excludes); err != nil { - return fmt.Errorf("Error checking context: '%s'.", err) - } - - // If .dockerignore mentions .dockerignore or the Dockerfile - // then make sure we send both files over to the daemon - // because Dockerfile is, obviously, needed no matter what, and - // .dockerignore is needed to know if either one needs to be - // removed. The daemon will remove them for us, if needed, after it - // parses the Dockerfile. Ignore errors here, as they will have been - // caught by validateContextDirectory above. - var includes = []string{"."} - keepThem1, _ := fileutils.Matches(".dockerignore", excludes) - keepThem2, _ := fileutils.Matches(relDockerfile, excludes) - if keepThem1 || keepThem2 { - includes = append(includes, ".dockerignore", relDockerfile) - } - - buildCtx, err = archive.TarWithOptions(contextDir, &archive.TarOptions{ - Compression: archive.Uncompressed, - ExcludePatterns: excludes, - IncludeFiles: includes, - }) - if err != nil { - return err - } - } - - ctx := context.Background() - - var resolvedTags []*resolvedTag - if client.IsTrusted() { - // Wrap the tar archive to replace the Dockerfile entry with the rewritten - // Dockerfile which uses trusted pulls. - buildCtx = replaceDockerfileTarWrapper(ctx, buildCtx, relDockerfile, dockerCli.TrustedReference, &resolvedTags) - } - - // Setup an upload progress bar - progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(progBuff, true) - - var body io.Reader = progress.NewProgressReader(buildCtx, progressOutput, 0, "", "Sending build context to Docker daemon") - - var memory int64 - if options.memory != "" { - parsedMemory, err := units.RAMInBytes(options.memory) - if err != nil { - return err - } - memory = parsedMemory - } - - var memorySwap int64 - if options.memorySwap != "" { - if options.memorySwap == "-1" { - memorySwap = -1 - } else { - parsedMemorySwap, err := units.RAMInBytes(options.memorySwap) - if err != nil { - return err - } - memorySwap = parsedMemorySwap - } - } - - var shmSize int64 - if options.shmSize != "" { - shmSize, err = units.RAMInBytes(options.shmSize) - if err != nil { - return err - } - } - - buildOptions := types.ImageBuildOptions{ - Memory: memory, - MemorySwap: memorySwap, - Tags: options.tags.GetAll(), - SuppressOutput: options.quiet, - NoCache: options.noCache, - Remove: options.rm, - ForceRemove: options.forceRm, - PullParent: options.pull, - Isolation: container.Isolation(options.isolation), - CPUSetCPUs: options.cpuSetCpus, - CPUSetMems: options.cpuSetMems, - CPUShares: options.cpuShares, - CPUQuota: options.cpuQuota, - CPUPeriod: options.cpuPeriod, - CgroupParent: options.cgroupParent, - Dockerfile: relDockerfile, - ShmSize: shmSize, - Ulimits: options.ulimits.GetList(), - BuildArgs: runconfigopts.ConvertKVStringsToMap(options.buildArgs.GetAll()), - AuthConfigs: dockerCli.RetrieveAuthConfigs(), - Labels: runconfigopts.ConvertKVStringsToMap(options.labels), - } - - response, err := dockerCli.Client().ImageBuild(ctx, body, buildOptions) - if err != nil { - return err - } - defer response.Body.Close() - - err = jsonmessage.DisplayJSONMessagesStream(response.Body, buildBuff, dockerCli.OutFd(), dockerCli.IsTerminalOut(), nil) - if err != nil { - if jerr, ok := err.(*jsonmessage.JSONError); ok { - // If no error code is set, default to 1 - if jerr.Code == 0 { - jerr.Code = 1 - } - if options.quiet { - fmt.Fprintf(dockerCli.Err(), "%s%s", progBuff, buildBuff) - } - return cli.StatusError{Status: jerr.Message, StatusCode: jerr.Code} - } - } - - // Windows: show error message about modified file permissions if the - // daemon isn't running Windows. - if response.OSType != "windows" && runtime.GOOS == "windows" { - fmt.Fprintln(dockerCli.Err(), `SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host. All files and directories added to build context will have '-rwxr-xr-x' permissions. It is recommended to double check and reset permissions for sensitive files and directories.`) - } - - // Everything worked so if -q was provided the output from the daemon - // should be just the image ID and we'll print that to stdout. - if options.quiet { - fmt.Fprintf(dockerCli.Out(), "%s", buildBuff) - } - - if client.IsTrusted() { - // Since the build was successful, now we must tag any of the resolved - // images from the above Dockerfile rewrite. - for _, resolved := range resolvedTags { - if err := dockerCli.TagTrusted(ctx, resolved.digestRef, resolved.tagRef); err != nil { - return err - } - } - } - - return nil -} - -type translatorFunc func(context.Context, reference.NamedTagged) (reference.Canonical, error) - -// validateTag checks if the given image name can be resolved. -func validateTag(rawRepo string) (string, error) { - _, err := reference.ParseNamed(rawRepo) - if err != nil { - return "", err - } - - return rawRepo, nil -} - -var dockerfileFromLinePattern = regexp.MustCompile(`(?i)^[\s]*FROM[ \f\r\t\v]+(?P[^ \f\r\t\v\n#]+)`) - -// resolvedTag records the repository, tag, and resolved digest reference -// from a Dockerfile rewrite. -type resolvedTag struct { - digestRef reference.Canonical - tagRef reference.NamedTagged -} - -// rewriteDockerfileFrom rewrites the given Dockerfile by resolving images in -// "FROM " instructions to a digest reference. `translator` is a -// function that takes a repository name and tag reference and returns a -// trusted digest reference. -func rewriteDockerfileFrom(ctx context.Context, dockerfile io.Reader, translator translatorFunc) (newDockerfile []byte, resolvedTags []*resolvedTag, err error) { - scanner := bufio.NewScanner(dockerfile) - buf := bytes.NewBuffer(nil) - - // Scan the lines of the Dockerfile, looking for a "FROM" line. - for scanner.Scan() { - line := scanner.Text() - - matches := dockerfileFromLinePattern.FindStringSubmatch(line) - if matches != nil && matches[1] != api.NoBaseImageSpecifier { - // Replace the line with a resolved "FROM repo@digest" - ref, err := reference.ParseNamed(matches[1]) - if err != nil { - return nil, nil, err - } - ref = reference.WithDefaultTag(ref) - if ref, ok := ref.(reference.NamedTagged); ok && client.IsTrusted() { - trustedRef, err := translator(ctx, ref) - if err != nil { - return nil, nil, err - } - - line = dockerfileFromLinePattern.ReplaceAllLiteralString(line, fmt.Sprintf("FROM %s", trustedRef.String())) - resolvedTags = append(resolvedTags, &resolvedTag{ - digestRef: trustedRef, - tagRef: ref, - }) - } - } - - _, err := fmt.Fprintln(buf, line) - if err != nil { - return nil, nil, err - } - } - - return buf.Bytes(), resolvedTags, scanner.Err() -} - -// replaceDockerfileTarWrapper wraps the given input tar archive stream and -// replaces the entry with the given Dockerfile name with the contents of the -// new Dockerfile. Returns a new tar archive stream with the replaced -// Dockerfile. -func replaceDockerfileTarWrapper(ctx context.Context, inputTarStream io.ReadCloser, dockerfileName string, translator translatorFunc, resolvedTags *[]*resolvedTag) io.ReadCloser { - pipeReader, pipeWriter := io.Pipe() - go func() { - tarReader := tar.NewReader(inputTarStream) - tarWriter := tar.NewWriter(pipeWriter) - - defer inputTarStream.Close() - - for { - hdr, err := tarReader.Next() - if err == io.EOF { - // Signals end of archive. - tarWriter.Close() - pipeWriter.Close() - return - } - if err != nil { - pipeWriter.CloseWithError(err) - return - } - - var content io.Reader = tarReader - if hdr.Name == dockerfileName { - // This entry is the Dockerfile. Since the tar archive was - // generated from a directory on the local filesystem, the - // Dockerfile will only appear once in the archive. - var newDockerfile []byte - newDockerfile, *resolvedTags, err = rewriteDockerfileFrom(ctx, content, translator) - if err != nil { - pipeWriter.CloseWithError(err) - return - } - hdr.Size = int64(len(newDockerfile)) - content = bytes.NewBuffer(newDockerfile) - } - - if err := tarWriter.WriteHeader(hdr); err != nil { - pipeWriter.CloseWithError(err) - return - } - - if _, err := io.Copy(tarWriter, content); err != nil { - pipeWriter.CloseWithError(err) - return - } - } - }() - - return pipeReader -} diff --git a/api/client/image/history.go b/api/client/image/history.go deleted file mode 100644 index abf2a0bb3b..0000000000 --- a/api/client/image/history.go +++ /dev/null @@ -1,99 +0,0 @@ -package image - -import ( - "fmt" - "strconv" - "strings" - "text/tabwriter" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type historyOptions struct { - image string - - human bool - quiet bool - noTrunc bool -} - -// NewHistoryCommand create a new `docker history` command -func NewHistoryCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts historyOptions - - cmd := &cobra.Command{ - Use: "history [OPTIONS] IMAGE", - Short: "Show the history of an image", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.image = args[0] - return runHistory(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.human, "human", "H", true, "Print sizes and dates in human readable format") - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - - return cmd -} - -func runHistory(dockerCli *client.DockerCli, opts historyOptions) error { - ctx := context.Background() - - history, err := dockerCli.Client().ImageHistory(ctx, opts.image) - if err != nil { - return err - } - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - - if opts.quiet { - for _, entry := range history { - if opts.noTrunc { - fmt.Fprintf(w, "%s\n", entry.ID) - } else { - fmt.Fprintf(w, "%s\n", stringid.TruncateID(entry.ID)) - } - } - w.Flush() - return nil - } - - var imageID string - var createdBy string - var created string - var size string - - fmt.Fprintln(w, "IMAGE\tCREATED\tCREATED BY\tSIZE\tCOMMENT") - for _, entry := range history { - imageID = entry.ID - createdBy = strings.Replace(entry.CreatedBy, "\t", " ", -1) - if opts.noTrunc == false { - createdBy = stringutils.Truncate(createdBy, 45) - imageID = stringid.TruncateID(entry.ID) - } - - if opts.human { - created = units.HumanDuration(time.Now().UTC().Sub(time.Unix(entry.Created, 0))) + " ago" - size = units.HumanSize(float64(entry.Size)) - } else { - created = time.Unix(entry.Created, 0).Format(time.RFC3339) - size = strconv.FormatInt(entry.Size, 10) - } - - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\n", imageID, created, createdBy, size, entry.Comment) - } - w.Flush() - return nil -} diff --git a/api/client/image/images.go b/api/client/image/images.go deleted file mode 100644 index 460fb79801..0000000000 --- a/api/client/image/images.go +++ /dev/null @@ -1,103 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/formatter" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/spf13/cobra" -) - -type imagesOptions struct { - matchName string - - quiet bool - all bool - noTrunc bool - showDigests bool - format string - filter []string -} - -// NewImagesCommand create a new `docker images` command -func NewImagesCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts imagesOptions - - cmd := &cobra.Command{ - Use: "images [OPTIONS] [REPOSITORY[:TAG]]", - Short: "List images", - Args: cli.RequiresMaxArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) > 0 { - opts.matchName = args[0] - } - return runImages(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only show numeric IDs") - flags.BoolVarP(&opts.all, "all", "a", false, "Show all images (default hides intermediate images)") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - flags.BoolVar(&opts.showDigests, "digests", false, "Show digests") - flags.StringVar(&opts.format, "format", "", "Pretty-print images using a Go template") - flags.StringSliceVarP(&opts.filter, "filter", "f", []string{}, "Filter output based on conditions provided") - - return cmd -} - -func runImages(dockerCli *client.DockerCli, opts imagesOptions) error { - ctx := context.Background() - - // Consolidate all filter flags, and sanity check them early. - // They'll get process in the daemon/server. - imageFilterArgs := filters.NewArgs() - for _, f := range opts.filter { - var err error - imageFilterArgs, err = filters.ParseFlag(f, imageFilterArgs) - if err != nil { - return err - } - } - - matchName := opts.matchName - - options := types.ImageListOptions{ - MatchName: matchName, - All: opts.all, - Filters: imageFilterArgs, - } - - images, err := dockerCli.Client().ImageList(ctx, options) - if err != nil { - return err - } - - f := opts.format - if len(f) == 0 { - if len(dockerCli.ImagesFormat()) > 0 && !opts.quiet { - f = dockerCli.ImagesFormat() - } else { - f = "table" - } - } - - imagesCtx := formatter.ImageContext{ - Context: formatter.Context{ - Output: dockerCli.Out(), - Format: f, - Quiet: opts.quiet, - Trunc: !opts.noTrunc, - }, - Digest: opts.showDigests, - Images: images, - } - - imagesCtx.Write() - - return nil -} diff --git a/api/client/image/import.go b/api/client/image/import.go deleted file mode 100644 index 2a7c37b905..0000000000 --- a/api/client/image/import.go +++ /dev/null @@ -1,86 +0,0 @@ -package image - -import ( - "io" - "os" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/urlutil" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type importOptions struct { - source string - reference string - changes []string - message string -} - -// NewImportCommand creates a new `docker import` command -func NewImportCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts importOptions - - cmd := &cobra.Command{ - Use: "import [OPTIONS] file|URL|- [REPOSITORY[:TAG]]", - Short: "Import the contents from a tarball to create a filesystem image", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.source = args[0] - if len(args) > 1 { - opts.reference = args[1] - } - return runImport(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringSliceVarP(&opts.changes, "change", "c", []string{}, "Apply Dockerfile instruction to the created image") - flags.StringVarP(&opts.message, "message", "m", "", "Set commit message for imported image") - - return cmd -} - -func runImport(dockerCli *client.DockerCli, opts importOptions) error { - var ( - in io.Reader - srcName = opts.source - ) - - if opts.source == "-" { - in = dockerCli.In() - } else if !urlutil.IsURL(opts.source) { - srcName = "-" - file, err := os.Open(opts.source) - if err != nil { - return err - } - defer file.Close() - in = file - } - - source := types.ImageImportSource{ - Source: in, - SourceName: srcName, - } - - options := types.ImageImportOptions{ - Message: opts.message, - Changes: opts.changes, - } - - clnt := dockerCli.Client() - - responseBody, err := clnt.ImageImport(context.Background(), source, opts.reference, options) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesStream(responseBody, dockerCli.Out(), dockerCli.OutFd(), dockerCli.IsTerminalOut(), nil) -} diff --git a/api/client/image/load.go b/api/client/image/load.go deleted file mode 100644 index 240362d1e7..0000000000 --- a/api/client/image/load.go +++ /dev/null @@ -1,67 +0,0 @@ -package image - -import ( - "io" - "os" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/spf13/cobra" -) - -type loadOptions struct { - input string - quiet bool -} - -// NewLoadCommand creates a new `docker load` command -func NewLoadCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts loadOptions - - cmd := &cobra.Command{ - Use: "load [OPTIONS]", - Short: "Load an image from a tar archive or STDIN", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runLoad(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.input, "input", "i", "", "Read from tar archive file, instead of STDIN") - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Suppress the load output") - - return cmd -} - -func runLoad(dockerCli *client.DockerCli, opts loadOptions) error { - - var input io.Reader = dockerCli.In() - if opts.input != "" { - file, err := os.Open(opts.input) - if err != nil { - return err - } - defer file.Close() - input = file - } - if !dockerCli.IsTerminalOut() { - opts.quiet = true - } - response, err := dockerCli.Client().ImageLoad(context.Background(), input, opts.quiet) - if err != nil { - return err - } - defer response.Body.Close() - - if response.Body != nil && response.JSON { - return jsonmessage.DisplayJSONMessagesStream(response.Body, dockerCli.Out(), dockerCli.OutFd(), dockerCli.IsTerminalOut(), nil) - } - - _, err = io.Copy(dockerCli.Out(), response.Body) - return err -} diff --git a/api/client/image/pull.go b/api/client/image/pull.go deleted file mode 100644 index e5968db269..0000000000 --- a/api/client/image/pull.go +++ /dev/null @@ -1,85 +0,0 @@ -package image - -import ( - "errors" - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/spf13/cobra" -) - -type pullOptions struct { - remote string - all bool -} - -// NewPullCommand creates a new `docker pull` command -func NewPullCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts pullOptions - - cmd := &cobra.Command{ - Use: "pull [OPTIONS] NAME[:TAG|@DIGEST]", - Short: "Pull an image or a repository from a registry", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.remote = args[0] - return runPull(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.all, "all-tags", "a", false, "Download all tagged images in the repository") - client.AddTrustedFlags(flags, true) - - return cmd -} - -func runPull(dockerCli *client.DockerCli, opts pullOptions) error { - distributionRef, err := reference.ParseNamed(opts.remote) - if err != nil { - return err - } - if opts.all && !reference.IsNameOnly(distributionRef) { - return errors.New("tag can't be used with --all-tags/-a") - } - - if !opts.all && reference.IsNameOnly(distributionRef) { - distributionRef = reference.WithDefaultTag(distributionRef) - fmt.Fprintf(dockerCli.Out(), "Using default tag: %s\n", reference.DefaultTag) - } - - var tag string - switch x := distributionRef.(type) { - case reference.Canonical: - tag = x.Digest().String() - case reference.NamedTagged: - tag = x.Tag() - } - - registryRef := registry.ParseReference(tag) - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(distributionRef) - if err != nil { - return err - } - - ctx := context.Background() - - authConfig := dockerCli.ResolveAuthConfig(ctx, repoInfo.Index) - requestPrivilege := dockerCli.RegistryAuthenticationPrivilegedFunc(repoInfo.Index, "pull") - - if client.IsTrusted() && !registryRef.HasDigest() { - // Check if tag is digest - return dockerCli.TrustedPull(ctx, repoInfo, registryRef, authConfig, requestPrivilege) - } - - return dockerCli.ImagePullPrivileged(ctx, authConfig, distributionRef.String(), requestPrivilege, opts.all) - -} diff --git a/api/client/image/push.go b/api/client/image/push.go deleted file mode 100644 index 1526b2a26b..0000000000 --- a/api/client/image/push.go +++ /dev/null @@ -1,62 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/spf13/cobra" -) - -// NewPushCommand creates a new `docker push` command -func NewPushCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "push [OPTIONS] NAME[:TAG]", - Short: "Push an image or a repository to a registry", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runPush(dockerCli, args[0]) - }, - } - - flags := cmd.Flags() - - client.AddTrustedFlags(flags, true) - - return cmd -} - -func runPush(dockerCli *client.DockerCli, remote string) error { - ref, err := reference.ParseNamed(remote) - if err != nil { - return err - } - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return err - } - - ctx := context.Background() - - // Resolve the Auth config relevant for this server - authConfig := dockerCli.ResolveAuthConfig(ctx, repoInfo.Index) - requestPrivilege := dockerCli.RegistryAuthenticationPrivilegedFunc(repoInfo.Index, "push") - - if client.IsTrusted() { - return dockerCli.TrustedPush(ctx, repoInfo, ref, authConfig, requestPrivilege) - } - - responseBody, err := dockerCli.ImagePushPrivileged(ctx, authConfig, ref.String(), requestPrivilege) - if err != nil { - return err - } - - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesStream(responseBody, dockerCli.Out(), dockerCli.OutFd(), dockerCli.IsTerminalOut(), nil) -} diff --git a/api/client/image/remove.go b/api/client/image/remove.go deleted file mode 100644 index c7c8322b89..0000000000 --- a/api/client/image/remove.go +++ /dev/null @@ -1,70 +0,0 @@ -package image - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type removeOptions struct { - force bool - noPrune bool -} - -// NewRemoveCommand create a new `docker remove` command -func NewRemoveCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts removeOptions - - cmd := &cobra.Command{ - Use: "rmi [OPTIONS] IMAGE [IMAGE...]", - Short: "Remove one or more images", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, opts, args) - }, - } - - flags := cmd.Flags() - - flags.BoolVarP(&opts.force, "force", "f", false, "Force removal of the image") - flags.BoolVar(&opts.noPrune, "no-prune", false, "Do not delete untagged parents") - - return cmd -} - -func runRemove(dockerCli *client.DockerCli, opts removeOptions, images []string) error { - client := dockerCli.Client() - ctx := context.Background() - - options := types.ImageRemoveOptions{ - Force: opts.force, - PruneChildren: !opts.noPrune, - } - - var errs []string - for _, image := range images { - dels, err := client.ImageRemove(ctx, image, options) - if err != nil { - errs = append(errs, err.Error()) - } else { - for _, del := range dels { - if del.Deleted != "" { - fmt.Fprintf(dockerCli.Out(), "Deleted: %s\n", del.Deleted) - } else { - fmt.Fprintf(dockerCli.Out(), "Untagged: %s\n", del.Untagged) - } - } - } - } - - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/image/save.go b/api/client/image/save.go deleted file mode 100644 index c5ea849a38..0000000000 --- a/api/client/image/save.go +++ /dev/null @@ -1,57 +0,0 @@ -package image - -import ( - "errors" - "io" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type saveOptions struct { - images []string - output string -} - -// NewSaveCommand creates a new `docker save` command -func NewSaveCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts saveOptions - - cmd := &cobra.Command{ - Use: "save [OPTIONS] IMAGE [IMAGE...]", - Short: "Save one or more images to a tar archive (streamed to STDOUT by default)", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.images = args - return runSave(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.output, "output", "o", "", "Write to a file, instead of STDOUT") - - return cmd -} - -func runSave(dockerCli *client.DockerCli, opts saveOptions) error { - if opts.output == "" && dockerCli.IsTerminalOut() { - return errors.New("Cowardly refusing to save to a terminal. Use the -o flag or redirect.") - } - - responseBody, err := dockerCli.Client().ImageSave(context.Background(), opts.images) - if err != nil { - return err - } - defer responseBody.Close() - - if opts.output == "" { - _, err := io.Copy(dockerCli.Out(), responseBody) - return err - } - - return client.CopyToFile(opts.output, responseBody) -} diff --git a/api/client/image/search.go b/api/client/image/search.go deleted file mode 100644 index d42b8aaf6d..0000000000 --- a/api/client/image/search.go +++ /dev/null @@ -1,135 +0,0 @@ -package image - -import ( - "fmt" - "sort" - "strings" - "text/tabwriter" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - registrytypes "github.com/docker/engine-api/types/registry" - "github.com/spf13/cobra" -) - -type searchOptions struct { - term string - noTrunc bool - limit int - filter []string - - // Deprecated - stars uint - automated bool -} - -// NewSearchCommand create a new `docker search` command -func NewSearchCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts searchOptions - - cmd := &cobra.Command{ - Use: "search [OPTIONS] TERM", - Short: "Search the Docker Hub for images", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.term = args[0] - return runSearch(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Don't truncate output") - flags.StringSliceVarP(&opts.filter, "filter", "f", []string{}, "Filter output based on conditions provided") - flags.IntVar(&opts.limit, "limit", registry.DefaultSearchLimit, "Max number of search results") - - flags.BoolVar(&opts.automated, "automated", false, "Only show automated builds") - flags.UintVarP(&opts.stars, "stars", "s", 0, "Only displays with at least x stars") - - flags.MarkDeprecated("automated", "use --filter=automated=true instead") - flags.MarkDeprecated("stars", "use --filter=stars=3 instead") - - return cmd -} - -func runSearch(dockerCli *client.DockerCli, opts searchOptions) error { - indexInfo, err := registry.ParseSearchIndexInfo(opts.term) - if err != nil { - return err - } - - ctx := context.Background() - - authConfig := dockerCli.ResolveAuthConfig(ctx, indexInfo) - requestPrivilege := dockerCli.RegistryAuthenticationPrivilegedFunc(indexInfo, "search") - - encodedAuth, err := client.EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - - searchFilters := filters.NewArgs() - for _, f := range opts.filter { - var err error - searchFilters, err = filters.ParseFlag(f, searchFilters) - if err != nil { - return err - } - } - - options := types.ImageSearchOptions{ - RegistryAuth: encodedAuth, - PrivilegeFunc: requestPrivilege, - Filters: searchFilters, - Limit: opts.limit, - } - - clnt := dockerCli.Client() - - unorderedResults, err := clnt.ImageSearch(ctx, opts.term, options) - if err != nil { - return err - } - - results := searchResultsByStars(unorderedResults) - sort.Sort(results) - - w := tabwriter.NewWriter(dockerCli.Out(), 10, 1, 3, ' ', 0) - fmt.Fprintf(w, "NAME\tDESCRIPTION\tSTARS\tOFFICIAL\tAUTOMATED\n") - for _, res := range results { - // --automated and -s, --stars are deprecated since Docker 1.12 - if (opts.automated && !res.IsAutomated) || (int(opts.stars) > res.StarCount) { - continue - } - desc := strings.Replace(res.Description, "\n", " ", -1) - desc = strings.Replace(desc, "\r", " ", -1) - if !opts.noTrunc && len(desc) > 45 { - desc = stringutils.Truncate(desc, 42) + "..." - } - fmt.Fprintf(w, "%s\t%s\t%d\t", res.Name, desc, res.StarCount) - if res.IsOfficial { - fmt.Fprint(w, "[OK]") - - } - fmt.Fprint(w, "\t") - if res.IsAutomated { - fmt.Fprint(w, "[OK]") - } - fmt.Fprint(w, "\n") - } - w.Flush() - return nil -} - -// SearchResultsByStars sorts search results in descending order by number of stars. -type searchResultsByStars []registrytypes.SearchResult - -func (r searchResultsByStars) Len() int { return len(r) } -func (r searchResultsByStars) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r searchResultsByStars) Less(i, j int) bool { return r[j].StarCount < r[i].StarCount } diff --git a/api/client/image/tag.go b/api/client/image/tag.go deleted file mode 100644 index 665e3430c7..0000000000 --- a/api/client/image/tag.go +++ /dev/null @@ -1,41 +0,0 @@ -package image - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type tagOptions struct { - image string - name string -} - -// NewTagCommand create a new `docker tag` command -func NewTagCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts tagOptions - - cmd := &cobra.Command{ - Use: "tag IMAGE[:TAG] IMAGE[:TAG]", - Short: "Tag an image into a repository", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.image = args[0] - opts.name = args[1] - return runTag(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.SetInterspersed(false) - - return cmd -} - -func runTag(dockerCli *client.DockerCli, opts tagOptions) error { - ctx := context.Background() - - return dockerCli.Client().ImageTag(ctx, opts.image, opts.name) -} diff --git a/api/client/info.go b/api/client/info.go deleted file mode 100644 index 43b94c23b1..0000000000 --- a/api/client/info.go +++ /dev/null @@ -1,215 +0,0 @@ -package client - -import ( - "fmt" - "strings" - "time" - - "golang.org/x/net/context" - - Cli "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/ioutils" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/utils" - "github.com/docker/engine-api/types/swarm" - "github.com/docker/go-units" -) - -// CmdInfo displays system-wide information. -// -// Usage: docker info -func (cli *DockerCli) CmdInfo(args ...string) error { - cmd := Cli.Subcmd("info", nil, Cli.DockerCommands["info"].Description, true) - cmd.Require(flag.Exact, 0) - - cmd.ParseFlags(args, true) - - ctx := context.Background() - info, err := cli.client.Info(ctx) - if err != nil { - return err - } - - fmt.Fprintf(cli.out, "Containers: %d\n", info.Containers) - fmt.Fprintf(cli.out, " Running: %d\n", info.ContainersRunning) - fmt.Fprintf(cli.out, " Paused: %d\n", info.ContainersPaused) - fmt.Fprintf(cli.out, " Stopped: %d\n", info.ContainersStopped) - fmt.Fprintf(cli.out, "Images: %d\n", info.Images) - ioutils.FprintfIfNotEmpty(cli.out, "Server Version: %s\n", info.ServerVersion) - ioutils.FprintfIfNotEmpty(cli.out, "Storage Driver: %s\n", info.Driver) - if info.DriverStatus != nil { - for _, pair := range info.DriverStatus { - fmt.Fprintf(cli.out, " %s: %s\n", pair[0], pair[1]) - - // print a warning if devicemapper is using a loopback file - if pair[0] == "Data loop file" { - fmt.Fprintln(cli.err, " WARNING: Usage of loopback devices is strongly discouraged for production use. Use `--storage-opt dm.thinpooldev` to specify a custom block storage device.") - } - } - - } - if info.SystemStatus != nil { - for _, pair := range info.SystemStatus { - fmt.Fprintf(cli.out, "%s: %s\n", pair[0], pair[1]) - } - } - ioutils.FprintfIfNotEmpty(cli.out, "Logging Driver: %s\n", info.LoggingDriver) - ioutils.FprintfIfNotEmpty(cli.out, "Cgroup Driver: %s\n", info.CgroupDriver) - - fmt.Fprintf(cli.out, "Plugins:\n") - fmt.Fprintf(cli.out, " Volume:") - fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Volume, " ")) - fmt.Fprintf(cli.out, "\n") - fmt.Fprintf(cli.out, " Network:") - fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Network, " ")) - fmt.Fprintf(cli.out, "\n") - - if len(info.Plugins.Authorization) != 0 { - fmt.Fprintf(cli.out, " Authorization:") - fmt.Fprintf(cli.out, " %s", strings.Join(info.Plugins.Authorization, " ")) - fmt.Fprintf(cli.out, "\n") - } - - fmt.Fprintf(cli.out, "Swarm: %v\n", info.Swarm.LocalNodeState) - if info.Swarm.LocalNodeState != swarm.LocalNodeStateInactive { - fmt.Fprintf(cli.out, " NodeID: %s\n", info.Swarm.NodeID) - if info.Swarm.Error != "" { - fmt.Fprintf(cli.out, " Error: %v\n", info.Swarm.Error) - } - fmt.Fprintf(cli.out, " Is Manager: %v\n", info.Swarm.ControlAvailable) - if info.Swarm.ControlAvailable { - fmt.Fprintf(cli.out, " ClusterID: %s\n", info.Swarm.Cluster.ID) - fmt.Fprintf(cli.out, " Managers: %d\n", info.Swarm.Managers) - fmt.Fprintf(cli.out, " Nodes: %d\n", info.Swarm.Nodes) - fmt.Fprintf(cli.out, " Orchestration:\n") - fmt.Fprintf(cli.out, " Task History Retention Limit: %d\n", info.Swarm.Cluster.Spec.Orchestration.TaskHistoryRetentionLimit) - fmt.Fprintf(cli.out, " Raft:\n") - fmt.Fprintf(cli.out, " Snapshot Interval: %d\n", info.Swarm.Cluster.Spec.Raft.SnapshotInterval) - fmt.Fprintf(cli.out, " Heartbeat Tick: %d\n", info.Swarm.Cluster.Spec.Raft.HeartbeatTick) - fmt.Fprintf(cli.out, " Election Tick: %d\n", info.Swarm.Cluster.Spec.Raft.ElectionTick) - fmt.Fprintf(cli.out, " Dispatcher:\n") - fmt.Fprintf(cli.out, " Heartbeat Period: %s\n", units.HumanDuration(time.Duration(info.Swarm.Cluster.Spec.Dispatcher.HeartbeatPeriod))) - fmt.Fprintf(cli.out, " CA Configuration:\n") - fmt.Fprintf(cli.out, " Expiry Duration: %s\n", units.HumanDuration(info.Swarm.Cluster.Spec.CAConfig.NodeCertExpiry)) - if len(info.Swarm.Cluster.Spec.CAConfig.ExternalCAs) > 0 { - fmt.Fprintf(cli.out, " External CAs:\n") - for _, entry := range info.Swarm.Cluster.Spec.CAConfig.ExternalCAs { - fmt.Fprintf(cli.out, " %s: %s\n", entry.Protocol, entry.URL) - } - } - } - fmt.Fprintf(cli.out, " Node Address: %s\n", info.Swarm.NodeAddr) - } - - if len(info.Runtimes) > 0 { - fmt.Fprintf(cli.out, "Runtimes:") - for name := range info.Runtimes { - fmt.Fprintf(cli.out, " %s", name) - } - fmt.Fprint(cli.out, "\n") - fmt.Fprintf(cli.out, "Default Runtime: %s\n", info.DefaultRuntime) - } - - fmt.Fprintf(cli.out, "Security Options:") - ioutils.FprintfIfNotEmpty(cli.out, " %s", strings.Join(info.SecurityOptions, " ")) - fmt.Fprintf(cli.out, "\n") - - ioutils.FprintfIfNotEmpty(cli.out, "Kernel Version: %s\n", info.KernelVersion) - ioutils.FprintfIfNotEmpty(cli.out, "Operating System: %s\n", info.OperatingSystem) - ioutils.FprintfIfNotEmpty(cli.out, "OSType: %s\n", info.OSType) - ioutils.FprintfIfNotEmpty(cli.out, "Architecture: %s\n", info.Architecture) - fmt.Fprintf(cli.out, "CPUs: %d\n", info.NCPU) - fmt.Fprintf(cli.out, "Total Memory: %s\n", units.BytesSize(float64(info.MemTotal))) - ioutils.FprintfIfNotEmpty(cli.out, "Name: %s\n", info.Name) - ioutils.FprintfIfNotEmpty(cli.out, "ID: %s\n", info.ID) - fmt.Fprintf(cli.out, "Docker Root Dir: %s\n", info.DockerRootDir) - fmt.Fprintf(cli.out, "Debug Mode (client): %v\n", utils.IsDebugEnabled()) - fmt.Fprintf(cli.out, "Debug Mode (server): %v\n", info.Debug) - - if info.Debug { - fmt.Fprintf(cli.out, " File Descriptors: %d\n", info.NFd) - fmt.Fprintf(cli.out, " Goroutines: %d\n", info.NGoroutines) - fmt.Fprintf(cli.out, " System Time: %s\n", info.SystemTime) - fmt.Fprintf(cli.out, " EventsListeners: %d\n", info.NEventsListener) - } - - ioutils.FprintfIfNotEmpty(cli.out, "Http Proxy: %s\n", info.HTTPProxy) - ioutils.FprintfIfNotEmpty(cli.out, "Https Proxy: %s\n", info.HTTPSProxy) - ioutils.FprintfIfNotEmpty(cli.out, "No Proxy: %s\n", info.NoProxy) - - if info.IndexServerAddress != "" { - u := cli.configFile.AuthConfigs[info.IndexServerAddress].Username - if len(u) > 0 { - fmt.Fprintf(cli.out, "Username: %v\n", u) - } - fmt.Fprintf(cli.out, "Registry: %v\n", info.IndexServerAddress) - } - - // Only output these warnings if the server does not support these features - if info.OSType != "windows" { - if !info.MemoryLimit { - fmt.Fprintln(cli.err, "WARNING: No memory limit support") - } - if !info.SwapLimit { - fmt.Fprintln(cli.err, "WARNING: No swap limit support") - } - if !info.KernelMemory { - fmt.Fprintln(cli.err, "WARNING: No kernel memory limit support") - } - if !info.OomKillDisable { - fmt.Fprintln(cli.err, "WARNING: No oom kill disable support") - } - if !info.CPUCfsQuota { - fmt.Fprintln(cli.err, "WARNING: No cpu cfs quota support") - } - if !info.CPUCfsPeriod { - fmt.Fprintln(cli.err, "WARNING: No cpu cfs period support") - } - if !info.CPUShares { - fmt.Fprintln(cli.err, "WARNING: No cpu shares support") - } - if !info.CPUSet { - fmt.Fprintln(cli.err, "WARNING: No cpuset support") - } - if !info.IPv4Forwarding { - fmt.Fprintln(cli.err, "WARNING: IPv4 forwarding is disabled") - } - if !info.BridgeNfIptables { - fmt.Fprintln(cli.err, "WARNING: bridge-nf-call-iptables is disabled") - } - if !info.BridgeNfIP6tables { - fmt.Fprintln(cli.err, "WARNING: bridge-nf-call-ip6tables is disabled") - } - } - - if info.Labels != nil { - fmt.Fprintln(cli.out, "Labels:") - for _, attribute := range info.Labels { - fmt.Fprintf(cli.out, " %s\n", attribute) - } - } - - ioutils.FprintfIfTrue(cli.out, "Experimental: %v\n", info.ExperimentalBuild) - if info.ClusterStore != "" { - fmt.Fprintf(cli.out, "Cluster Store: %s\n", info.ClusterStore) - } - - if info.ClusterAdvertise != "" { - fmt.Fprintf(cli.out, "Cluster Advertise: %s\n", info.ClusterAdvertise) - } - - if info.RegistryConfig != nil && (len(info.RegistryConfig.InsecureRegistryCIDRs) > 0 || len(info.RegistryConfig.IndexConfigs) > 0) { - fmt.Fprintln(cli.out, "Insecure Registries:") - for _, registry := range info.RegistryConfig.IndexConfigs { - if registry.Secure == false { - fmt.Fprintf(cli.out, " %s\n", registry.Name) - } - } - - for _, registry := range info.RegistryConfig.InsecureRegistryCIDRs { - mask, _ := registry.Mask.Size() - fmt.Fprintf(cli.out, " %s/%d\n", registry.IP.String(), mask) - } - } - return nil -} diff --git a/api/client/inspect.go b/api/client/inspect.go deleted file mode 100644 index 2c22d5dc65..0000000000 --- a/api/client/inspect.go +++ /dev/null @@ -1,95 +0,0 @@ -package client - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client/inspect" - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/engine-api/client" -) - -// CmdInspect displays low-level information on one or more containers, images or tasks. -// -// Usage: docker inspect [OPTIONS] CONTAINER|IMAGE|TASK [CONTAINER|IMAGE|TASK...] -func (cli *DockerCli) CmdInspect(args ...string) error { - cmd := Cli.Subcmd("inspect", []string{"[OPTIONS] CONTAINER|IMAGE|TASK [CONTAINER|IMAGE|TASK...]"}, Cli.DockerCommands["inspect"].Description, true) - tmplStr := cmd.String([]string{"f", "-format"}, "", "Format the output using the given go template") - inspectType := cmd.String([]string{"-type"}, "", "Return JSON for specified type, (e.g image, container or task)") - size := cmd.Bool([]string{"s", "-size"}, false, "Display total file sizes if the type is container") - cmd.Require(flag.Min, 1) - - cmd.ParseFlags(args, true) - - if *inspectType != "" && *inspectType != "container" && *inspectType != "image" && *inspectType != "task" { - return fmt.Errorf("%q is not a valid value for --type", *inspectType) - } - - ctx := context.Background() - - var elementSearcher inspect.GetRefFunc - switch *inspectType { - case "container": - elementSearcher = cli.inspectContainers(ctx, *size) - case "image": - elementSearcher = cli.inspectImages(ctx, *size) - case "task": - if *size { - fmt.Fprintln(cli.err, "WARNING: --size ignored for tasks") - } - elementSearcher = cli.inspectTasks(ctx) - default: - elementSearcher = cli.inspectAll(ctx, *size) - } - - return inspect.Inspect(cli.out, cmd.Args(), *tmplStr, elementSearcher) -} - -func (cli *DockerCli) inspectContainers(ctx context.Context, getSize bool) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return cli.client.ContainerInspectWithRaw(ctx, ref, getSize) - } -} - -func (cli *DockerCli) inspectImages(ctx context.Context, getSize bool) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return cli.client.ImageInspectWithRaw(ctx, ref, getSize) - } -} - -func (cli *DockerCli) inspectTasks(ctx context.Context) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - return cli.client.TaskInspectWithRaw(ctx, ref) - } -} - -func (cli *DockerCli) inspectAll(ctx context.Context, getSize bool) inspect.GetRefFunc { - return func(ref string) (interface{}, []byte, error) { - c, rawContainer, err := cli.client.ContainerInspectWithRaw(ctx, ref, getSize) - if err != nil { - // Search for image with that id if a container doesn't exist. - if client.IsErrContainerNotFound(err) { - i, rawImage, err := cli.client.ImageInspectWithRaw(ctx, ref, getSize) - if err != nil { - if client.IsErrImageNotFound(err) { - // Search for task with that id if an image doesn't exists. - t, rawTask, err := cli.client.TaskInspectWithRaw(ctx, ref) - if err != nil { - return nil, nil, fmt.Errorf("Error: No such image, container or task: %s", ref) - } - if getSize { - fmt.Fprintln(cli.err, "WARNING: --size ignored for tasks") - } - return t, rawTask, nil - } - return nil, nil, err - } - return i, rawImage, nil - } - return nil, nil, err - } - return c, rawContainer, nil - } -} diff --git a/api/client/inspect/inspector.go b/api/client/inspect/inspector.go deleted file mode 100644 index b0537e8464..0000000000 --- a/api/client/inspect/inspector.go +++ /dev/null @@ -1,195 +0,0 @@ -package inspect - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "text/template" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/cli" - "github.com/docker/docker/utils/templates" -) - -// Inspector defines an interface to implement to process elements -type Inspector interface { - Inspect(typedElement interface{}, rawElement []byte) error - Flush() error -} - -// TemplateInspector uses a text template to inspect elements. -type TemplateInspector struct { - outputStream io.Writer - buffer *bytes.Buffer - tmpl *template.Template -} - -// NewTemplateInspector creates a new inspector with a template. -func NewTemplateInspector(outputStream io.Writer, tmpl *template.Template) Inspector { - return &TemplateInspector{ - outputStream: outputStream, - buffer: new(bytes.Buffer), - tmpl: tmpl, - } -} - -// NewTemplateInspectorFromString creates a new TemplateInspector from a string -// which is compiled into a template. -func NewTemplateInspectorFromString(out io.Writer, tmplStr string) (Inspector, error) { - if tmplStr == "" { - return NewIndentedInspector(out), nil - } - - tmpl, err := templates.Parse(tmplStr) - if err != nil { - return nil, fmt.Errorf("Template parsing error: %s", err) - } - return NewTemplateInspector(out, tmpl), nil -} - -// GetRefFunc is a function which used by Inspect to fetch an object from a -// reference -type GetRefFunc func(ref string) (interface{}, []byte, error) - -// Inspect fetches objects by reference using GetRefFunc and writes the json -// representation to the output writer. -func Inspect(out io.Writer, references []string, tmplStr string, getRef GetRefFunc) error { - inspector, err := NewTemplateInspectorFromString(out, tmplStr) - if err != nil { - return cli.StatusError{StatusCode: 64, Status: err.Error()} - } - - var inspectErr error - for _, ref := range references { - element, raw, err := getRef(ref) - if err != nil { - inspectErr = err - break - } - - if err := inspector.Inspect(element, raw); err != nil { - inspectErr = err - break - } - } - - if err := inspector.Flush(); err != nil { - logrus.Errorf("%s\n", err) - } - - if inspectErr != nil { - return cli.StatusError{StatusCode: 1, Status: inspectErr.Error()} - } - return nil -} - -// Inspect executes the inspect template. -// It decodes the raw element into a map if the initial execution fails. -// This allows docker cli to parse inspect structs injected with Swarm fields. -func (i *TemplateInspector) Inspect(typedElement interface{}, rawElement []byte) error { - buffer := new(bytes.Buffer) - if err := i.tmpl.Execute(buffer, typedElement); err != nil { - if rawElement == nil { - return fmt.Errorf("Template parsing error: %v", err) - } - return i.tryRawInspectFallback(rawElement) - } - i.buffer.Write(buffer.Bytes()) - i.buffer.WriteByte('\n') - return nil -} - -// tryRawInspectFallback executes the inspect template with a raw interface. -// This allows docker cli to parse inspect structs injected with Swarm fields. -func (i *TemplateInspector) tryRawInspectFallback(rawElement []byte) error { - var raw interface{} - buffer := new(bytes.Buffer) - rdr := bytes.NewReader(rawElement) - dec := json.NewDecoder(rdr) - - if rawErr := dec.Decode(&raw); rawErr != nil { - return fmt.Errorf("unable to read inspect data: %v", rawErr) - } - - tmplMissingKey := i.tmpl.Option("missingkey=error") - if rawErr := tmplMissingKey.Execute(buffer, raw); rawErr != nil { - return fmt.Errorf("Template parsing error: %v", rawErr) - } - - i.buffer.Write(buffer.Bytes()) - i.buffer.WriteByte('\n') - return nil -} - -// Flush write the result of inspecting all elements into the output stream. -func (i *TemplateInspector) Flush() error { - if i.buffer.Len() == 0 { - _, err := io.WriteString(i.outputStream, "\n") - return err - } - _, err := io.Copy(i.outputStream, i.buffer) - return err -} - -// IndentedInspector uses a buffer to stop the indented representation of an element. -type IndentedInspector struct { - outputStream io.Writer - elements []interface{} - rawElements [][]byte -} - -// NewIndentedInspector generates a new IndentedInspector. -func NewIndentedInspector(outputStream io.Writer) Inspector { - return &IndentedInspector{ - outputStream: outputStream, - } -} - -// Inspect writes the raw element with an indented json format. -func (i *IndentedInspector) Inspect(typedElement interface{}, rawElement []byte) error { - if rawElement != nil { - i.rawElements = append(i.rawElements, rawElement) - } else { - i.elements = append(i.elements, typedElement) - } - return nil -} - -// Flush write the result of inspecting all elements into the output stream. -func (i *IndentedInspector) Flush() error { - if len(i.elements) == 0 && len(i.rawElements) == 0 { - _, err := io.WriteString(i.outputStream, "[]\n") - return err - } - - var buffer io.Reader - if len(i.rawElements) > 0 { - bytesBuffer := new(bytes.Buffer) - bytesBuffer.WriteString("[") - for idx, r := range i.rawElements { - bytesBuffer.Write(r) - if idx < len(i.rawElements)-1 { - bytesBuffer.WriteString(",") - } - } - bytesBuffer.WriteString("]") - indented := new(bytes.Buffer) - if err := json.Indent(indented, bytesBuffer.Bytes(), "", " "); err != nil { - return err - } - buffer = indented - } else { - b, err := json.MarshalIndent(i.elements, "", " ") - if err != nil { - return err - } - buffer = bytes.NewReader(b) - } - - if _, err := io.Copy(i.outputStream, buffer); err != nil { - return err - } - _, err := io.WriteString(i.outputStream, "\n") - return err -} diff --git a/api/client/inspect/inspector_test.go b/api/client/inspect/inspector_test.go deleted file mode 100644 index 1ce1593ab7..0000000000 --- a/api/client/inspect/inspector_test.go +++ /dev/null @@ -1,221 +0,0 @@ -package inspect - -import ( - "bytes" - "strings" - "testing" - - "github.com/docker/docker/utils/templates" -) - -type testElement struct { - DNS string `json:"Dns"` -} - -func TestTemplateInspectorDefault(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.DNS}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - if b.String() != "0.0.0.0\n" { - t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) - } -} - -func TestTemplateInspectorEmpty(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.DNS}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - if b.String() != "\n" { - t.Fatalf("Expected `\\n`, got `%s`", b.String()) - } -} - -func TestTemplateInspectorTemplateError(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.Foo}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - - err = i.Inspect(testElement{"0.0.0.0"}, nil) - if err == nil { - t.Fatal("Expected error got nil") - } - - if !strings.HasPrefix(err.Error(), "Template parsing error") { - t.Fatalf("Expected template error, got %v", err) - } -} - -func TestTemplateInspectorRawFallback(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.Dns}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0"}`)); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - if b.String() != "0.0.0.0\n" { - t.Fatalf("Expected `0.0.0.0\\n`, got `%s`", b.String()) - } -} - -func TestTemplateInspectorRawFallbackError(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.Dns}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - err = i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Foo": "0.0.0.0"}`)) - if err == nil { - t.Fatal("Expected error got nil") - } - - if !strings.HasPrefix(err.Error(), "Template parsing error") { - t.Fatalf("Expected template error, got %v", err) - } -} - -func TestTemplateInspectorMultiple(t *testing.T) { - b := new(bytes.Buffer) - tmpl, err := templates.Parse("{{.DNS}}") - if err != nil { - t.Fatal(err) - } - i := NewTemplateInspector(b, tmpl) - - if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { - t.Fatal(err) - } - if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - if b.String() != "0.0.0.0\n1.1.1.1\n" { - t.Fatalf("Expected `0.0.0.0\\n1.1.1.1\\n`, got `%s`", b.String()) - } -} - -func TestIndentedInspectorDefault(t *testing.T) { - b := new(bytes.Buffer) - i := NewIndentedInspector(b) - if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - - expected := `[ - { - "Dns": "0.0.0.0" - } -] -` - if b.String() != expected { - t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) - } -} - -func TestIndentedInspectorMultiple(t *testing.T) { - b := new(bytes.Buffer) - i := NewIndentedInspector(b) - if err := i.Inspect(testElement{"0.0.0.0"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Inspect(testElement{"1.1.1.1"}, nil); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - - expected := `[ - { - "Dns": "0.0.0.0" - }, - { - "Dns": "1.1.1.1" - } -] -` - if b.String() != expected { - t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) - } -} - -func TestIndentedInspectorEmpty(t *testing.T) { - b := new(bytes.Buffer) - i := NewIndentedInspector(b) - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - - expected := "[]\n" - if b.String() != expected { - t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) - } -} - -func TestIndentedInspectorRawElements(t *testing.T) { - b := new(bytes.Buffer) - i := NewIndentedInspector(b) - if err := i.Inspect(testElement{"0.0.0.0"}, []byte(`{"Dns": "0.0.0.0", "Node": "0"}`)); err != nil { - t.Fatal(err) - } - - if err := i.Inspect(testElement{"1.1.1.1"}, []byte(`{"Dns": "1.1.1.1", "Node": "1"}`)); err != nil { - t.Fatal(err) - } - - if err := i.Flush(); err != nil { - t.Fatal(err) - } - - expected := `[ - { - "Dns": "0.0.0.0", - "Node": "0" - }, - { - "Dns": "1.1.1.1", - "Node": "1" - } -] -` - if b.String() != expected { - t.Fatalf("Expected `%s`, got `%s`", expected, b.String()) - } -} diff --git a/api/client/network/cmd.go b/api/client/network/cmd.go deleted file mode 100644 index f616129f15..0000000000 --- a/api/client/network/cmd.go +++ /dev/null @@ -1,31 +0,0 @@ -package network - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" -) - -// NewNetworkCommand returns a cobra command for `network` subcommands -func NewNetworkCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "network", - Short: "Manage Docker networks", - Args: cli.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) - }, - } - cmd.AddCommand( - newConnectCommand(dockerCli), - newCreateCommand(dockerCli), - newDisconnectCommand(dockerCli), - newInspectCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - ) - return cmd -} diff --git a/api/client/network/connect.go b/api/client/network/connect.go deleted file mode 100644 index 57a7299c14..0000000000 --- a/api/client/network/connect.go +++ /dev/null @@ -1,64 +0,0 @@ -package network - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types/network" - "github.com/spf13/cobra" -) - -type connectOptions struct { - network string - container string - ipaddress string - ipv6address string - links opts.ListOpts - aliases []string - linklocalips []string -} - -func newConnectCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := connectOptions{ - links: opts.NewListOpts(runconfigopts.ValidateLink), - } - - cmd := &cobra.Command{ - Use: "connect [OPTIONS] NETWORK CONTAINER", - Short: "Connect a container to a network", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.network = args[0] - opts.container = args[1] - return runConnect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVar(&opts.ipaddress, "ip", "", "IP Address") - flags.StringVar(&opts.ipv6address, "ip6", "", "IPv6 Address") - flags.Var(&opts.links, "link", "Add link to another container") - flags.StringSliceVar(&opts.aliases, "alias", []string{}, "Add network-scoped alias for the container") - flags.StringSliceVar(&opts.linklocalips, "link-local-ip", []string{}, "Add a link-local address for the container") - - return cmd -} - -func runConnect(dockerCli *client.DockerCli, opts connectOptions) error { - client := dockerCli.Client() - - epConfig := &network.EndpointSettings{ - IPAMConfig: &network.EndpointIPAMConfig{ - IPv4Address: opts.ipaddress, - IPv6Address: opts.ipv6address, - LinkLocalIPs: opts.linklocalips, - }, - Links: opts.links.GetAll(), - Aliases: opts.aliases, - } - - return client.NetworkConnect(context.Background(), opts.network, opts.container, epConfig) -} diff --git a/api/client/network/create.go b/api/client/network/create.go deleted file mode 100644 index f2fb45c9a3..0000000000 --- a/api/client/network/create.go +++ /dev/null @@ -1,222 +0,0 @@ -package network - -import ( - "fmt" - "net" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/network" - "github.com/spf13/cobra" -) - -type createOptions struct { - name string - driver string - driverOpts opts.MapOpts - labels []string - internal bool - ipv6 bool - - ipamDriver string - ipamSubnet []string - ipamIPRange []string - ipamGateway []string - ipamAux opts.MapOpts - ipamOpt opts.MapOpts -} - -func newCreateCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := createOptions{ - driverOpts: *opts.NewMapOpts(nil, nil), - ipamAux: *opts.NewMapOpts(nil, nil), - ipamOpt: *opts.NewMapOpts(nil, nil), - } - - cmd := &cobra.Command{ - Use: "create [OPTIONS] NETWORK", - Short: "Create a network", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.name = args[0] - return runCreate(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.driver, "driver", "d", "bridge", "Driver to manage the Network") - flags.VarP(&opts.driverOpts, "opt", "o", "Set driver specific options") - flags.StringSliceVar(&opts.labels, "label", []string{}, "Set metadata on a network") - flags.BoolVar(&opts.internal, "internal", false, "Restrict external access to the network") - flags.BoolVar(&opts.ipv6, "ipv6", false, "Enable IPv6 networking") - - flags.StringVar(&opts.ipamDriver, "ipam-driver", "default", "IP Address Management Driver") - flags.StringSliceVar(&opts.ipamSubnet, "subnet", []string{}, "Subnet in CIDR format that represents a network segment") - flags.StringSliceVar(&opts.ipamIPRange, "ip-range", []string{}, "Allocate container ip from a sub-range") - flags.StringSliceVar(&opts.ipamGateway, "gateway", []string{}, "IPv4 or IPv6 Gateway for the master subnet") - - flags.Var(&opts.ipamAux, "aux-address", "Auxiliary IPv4 or IPv6 addresses used by Network driver") - flags.Var(&opts.ipamOpt, "ipam-opt", "Set IPAM driver specific options") - - return cmd -} - -func runCreate(dockerCli *client.DockerCli, opts createOptions) error { - client := dockerCli.Client() - - ipamCfg, err := consolidateIpam(opts.ipamSubnet, opts.ipamIPRange, opts.ipamGateway, opts.ipamAux.GetAll()) - if err != nil { - return err - } - - // Construct network create request body - nc := types.NetworkCreate{ - Driver: opts.driver, - Options: opts.driverOpts.GetAll(), - IPAM: network.IPAM{ - Driver: opts.ipamDriver, - Config: ipamCfg, - Options: opts.ipamOpt.GetAll(), - }, - CheckDuplicate: true, - Internal: opts.internal, - EnableIPv6: opts.ipv6, - Labels: runconfigopts.ConvertKVStringsToMap(opts.labels), - } - - resp, err := client.NetworkCreate(context.Background(), opts.name, nc) - if err != nil { - return err - } - fmt.Fprintf(dockerCli.Out(), "%s\n", resp.ID) - return nil -} - -// Consolidates the ipam configuration as a group from different related configurations -// user can configure network with multiple non-overlapping subnets and hence it is -// possible to correlate the various related parameters and consolidate them. -// consoidateIpam consolidates subnets, ip-ranges, gateways and auxiliary addresses into -// structured ipam data. -func consolidateIpam(subnets, ranges, gateways []string, auxaddrs map[string]string) ([]network.IPAMConfig, error) { - if len(subnets) < len(ranges) || len(subnets) < len(gateways) { - return nil, fmt.Errorf("every ip-range or gateway must have a corresponding subnet") - } - iData := map[string]*network.IPAMConfig{} - - // Populate non-overlapping subnets into consolidation map - for _, s := range subnets { - for k := range iData { - ok1, err := subnetMatches(s, k) - if err != nil { - return nil, err - } - ok2, err := subnetMatches(k, s) - if err != nil { - return nil, err - } - if ok1 || ok2 { - return nil, fmt.Errorf("multiple overlapping subnet configuration is not supported") - } - } - iData[s] = &network.IPAMConfig{Subnet: s, AuxAddress: map[string]string{}} - } - - // Validate and add valid ip ranges - for _, r := range ranges { - match := false - for _, s := range subnets { - ok, err := subnetMatches(s, r) - if err != nil { - return nil, err - } - if !ok { - continue - } - if iData[s].IPRange != "" { - return nil, fmt.Errorf("cannot configure multiple ranges (%s, %s) on the same subnet (%s)", r, iData[s].IPRange, s) - } - d := iData[s] - d.IPRange = r - match = true - } - if !match { - return nil, fmt.Errorf("no matching subnet for range %s", r) - } - } - - // Validate and add valid gateways - for _, g := range gateways { - match := false - for _, s := range subnets { - ok, err := subnetMatches(s, g) - if err != nil { - return nil, err - } - if !ok { - continue - } - if iData[s].Gateway != "" { - return nil, fmt.Errorf("cannot configure multiple gateways (%s, %s) for the same subnet (%s)", g, iData[s].Gateway, s) - } - d := iData[s] - d.Gateway = g - match = true - } - if !match { - return nil, fmt.Errorf("no matching subnet for gateway %s", g) - } - } - - // Validate and add aux-addresses - for key, aa := range auxaddrs { - match := false - for _, s := range subnets { - ok, err := subnetMatches(s, aa) - if err != nil { - return nil, err - } - if !ok { - continue - } - iData[s].AuxAddress[key] = aa - match = true - } - if !match { - return nil, fmt.Errorf("no matching subnet for aux-address %s", aa) - } - } - - idl := []network.IPAMConfig{} - for _, v := range iData { - idl = append(idl, *v) - } - return idl, nil -} - -func subnetMatches(subnet, data string) (bool, error) { - var ( - ip net.IP - ) - - _, s, err := net.ParseCIDR(subnet) - if err != nil { - return false, fmt.Errorf("Invalid subnet %s : %v", s, err) - } - - if strings.Contains(data, "/") { - ip, _, err = net.ParseCIDR(data) - if err != nil { - return false, fmt.Errorf("Invalid cidr %s : %v", data, err) - } - } else { - ip = net.ParseIP(data) - } - - return s.Contains(ip), nil -} diff --git a/api/client/network/disconnect.go b/api/client/network/disconnect.go deleted file mode 100644 index 7d5111ba67..0000000000 --- a/api/client/network/disconnect.go +++ /dev/null @@ -1,41 +0,0 @@ -package network - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type disconnectOptions struct { - network string - container string - force bool -} - -func newDisconnectCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := disconnectOptions{} - - cmd := &cobra.Command{ - Use: "disconnect [OPTIONS] NETWORK CONTAINER", - Short: "Disconnect a container from a network", - Args: cli.ExactArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - opts.network = args[0] - opts.container = args[1] - return runDisconnect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.force, "force", "f", false, "Force the container to disconnect from a network") - - return cmd -} - -func runDisconnect(dockerCli *client.DockerCli, opts disconnectOptions) error { - client := dockerCli.Client() - - return client.NetworkDisconnect(context.Background(), opts.network, opts.container, opts.force) -} diff --git a/api/client/network/inspect.go b/api/client/network/inspect.go deleted file mode 100644 index 3a503e14d4..0000000000 --- a/api/client/network/inspect.go +++ /dev/null @@ -1,45 +0,0 @@ -package network - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/inspect" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - format string - names []string -} - -func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] NETWORK [NETWORK...]", - Short: "Display detailed information on one or more networks", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.names = args - return runInspect(dockerCli, opts) - }, - } - - cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template") - - return cmd -} - -func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - - ctx := context.Background() - - getNetFunc := func(name string) (interface{}, []byte, error) { - return client.NetworkInspectWithRaw(ctx, name) - } - - return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getNetFunc) -} diff --git a/api/client/network/list.go b/api/client/network/list.go deleted file mode 100644 index 71c8f1a0b6..0000000000 --- a/api/client/network/list.go +++ /dev/null @@ -1,100 +0,0 @@ -package network - -import ( - "fmt" - "sort" - "text/tabwriter" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/spf13/cobra" -) - -type byNetworkName []types.NetworkResource - -func (r byNetworkName) Len() int { return len(r) } -func (r byNetworkName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byNetworkName) Less(i, j int) bool { return r[i].Name < r[j].Name } - -type listOptions struct { - quiet bool - noTrunc bool - filter []string -} - -func newListCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts listOptions - - cmd := &cobra.Command{ - Use: "ls [OPTIONS]", - Aliases: []string{"list"}, - Short: "List networks", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display network IDs") - flags.BoolVar(&opts.noTrunc, "no-trunc", false, "Do not truncate the output") - flags.StringSliceVarP(&opts.filter, "filter", "f", []string{}, "Provide filter values (i.e. 'dangling=true')") - - return cmd -} - -func runList(dockerCli *client.DockerCli, opts listOptions) error { - client := dockerCli.Client() - - netFilterArgs := filters.NewArgs() - for _, f := range opts.filter { - var err error - netFilterArgs, err = filters.ParseFlag(f, netFilterArgs) - if err != nil { - return err - } - } - - options := types.NetworkListOptions{ - Filters: netFilterArgs, - } - - networkResources, err := client.NetworkList(context.Background(), options) - if err != nil { - return err - } - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - if !opts.quiet { - fmt.Fprintf(w, "NETWORK ID\tNAME\tDRIVER\tSCOPE") - fmt.Fprintf(w, "\n") - } - - sort.Sort(byNetworkName(networkResources)) - for _, networkResource := range networkResources { - ID := networkResource.ID - netName := networkResource.Name - driver := networkResource.Driver - scope := networkResource.Scope - if !opts.noTrunc { - ID = stringid.TruncateID(ID) - } - if opts.quiet { - fmt.Fprintln(w, ID) - continue - } - fmt.Fprintf(w, "%s\t%s\t%s\t%s\t", - ID, - netName, - driver, - scope) - fmt.Fprint(w, "\n") - } - w.Flush() - return nil -} diff --git a/api/client/network/remove.go b/api/client/network/remove.go deleted file mode 100644 index 0313cf0be8..0000000000 --- a/api/client/network/remove.go +++ /dev/null @@ -1,43 +0,0 @@ -package network - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "rm NETWORK [NETWORK...]", - Aliases: []string{"remove"}, - Short: "Remove one or more networks", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args) - }, - } -} - -func runRemove(dockerCli *client.DockerCli, networks []string) error { - client := dockerCli.Client() - ctx := context.Background() - status := 0 - - for _, name := range networks { - if err := client.NetworkRemove(ctx, name); err != nil { - fmt.Fprintf(dockerCli.Err(), "%s\n", err) - status = 1 - continue - } - fmt.Fprintf(dockerCli.Out(), "%s\n", name) - } - - if status != 0 { - return cli.StatusError{StatusCode: status} - } - return nil -} diff --git a/api/client/node/cmd.go b/api/client/node/cmd.go deleted file mode 100644 index bf17819c58..0000000000 --- a/api/client/node/cmd.go +++ /dev/null @@ -1,49 +0,0 @@ -package node - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/spf13/cobra" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - apiclient "github.com/docker/engine-api/client" -) - -// NewNodeCommand returns a cobra command for `node` subcommands -func NewNodeCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "node", - Short: "Manage Docker Swarm nodes", - Args: cli.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) - }, - } - cmd.AddCommand( - newDemoteCommand(dockerCli), - newInspectCommand(dockerCli), - newListCommand(dockerCli), - newPromoteCommand(dockerCli), - newRemoveCommand(dockerCli), - newPSCommand(dockerCli), - newUpdateCommand(dockerCli), - ) - return cmd -} - -// Reference returns the reference of a node. The special value "self" for a node -// reference is mapped to the current node, hence the node ID is retrieved using -// the `/info` endpoint. -func Reference(client apiclient.APIClient, ctx context.Context, ref string) (string, error) { - if ref == "self" { - info, err := client.Info(ctx) - if err != nil { - return "", err - } - return info.Swarm.NodeID, nil - } - return ref, nil -} diff --git a/api/client/node/demote.go b/api/client/node/demote.go deleted file mode 100644 index 3819b1a399..0000000000 --- a/api/client/node/demote.go +++ /dev/null @@ -1,32 +0,0 @@ -package node - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" -) - -func newDemoteCommand(dockerCli *client.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "demote NODE [NODE...]", - Short: "Demote one or more nodes from manager in the swarm", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runDemote(dockerCli, args) - }, - } -} - -func runDemote(dockerCli *client.DockerCli, nodes []string) error { - demote := func(node *swarm.Node) error { - node.Spec.Role = swarm.NodeRoleWorker - return nil - } - success := func(nodeID string) { - fmt.Fprintf(dockerCli.Out(), "Manager %s demoted in the swarm.\n", nodeID) - } - return updateNodes(dockerCli, nodes, demote, success) -} diff --git a/api/client/node/inspect.go b/api/client/node/inspect.go deleted file mode 100644 index 6fcd78e932..0000000000 --- a/api/client/node/inspect.go +++ /dev/null @@ -1,144 +0,0 @@ -package node - -import ( - "fmt" - "io" - "sort" - "strings" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/inspect" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/engine-api/types/swarm" - "github.com/docker/go-units" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type inspectOptions struct { - nodeIds []string - format string - pretty bool -} - -func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] self|NODE [NODE...]", - Short: "Display detailed information on one or more nodes", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.nodeIds = args - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template") - flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format.") - return cmd -} - -func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - getRef := func(ref string) (interface{}, []byte, error) { - nodeRef, err := Reference(client, ctx, ref) - if err != nil { - return nil, nil, err - } - node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) - return node, nil, err - } - - if !opts.pretty { - return inspect.Inspect(dockerCli.Out(), opts.nodeIds, opts.format, getRef) - } - return printHumanFriendly(dockerCli.Out(), opts.nodeIds, getRef) -} - -func printHumanFriendly(out io.Writer, refs []string, getRef inspect.GetRefFunc) error { - for idx, ref := range refs { - obj, _, err := getRef(ref) - if err != nil { - return err - } - printNode(out, obj.(swarm.Node)) - - // TODO: better way to do this? - // print extra space between objects, but not after the last one - if idx+1 != len(refs) { - fmt.Fprintf(out, "\n\n") - } else { - fmt.Fprintf(out, "\n") - } - } - return nil -} - -// TODO: use a template -func printNode(out io.Writer, node swarm.Node) { - fmt.Fprintf(out, "ID:\t\t\t%s\n", node.ID) - ioutils.FprintfIfNotEmpty(out, "Name:\t\t\t%s\n", node.Spec.Name) - if node.Spec.Labels != nil { - fmt.Fprintln(out, "Labels:") - for k, v := range node.Spec.Labels { - fmt.Fprintf(out, " - %s = %s\n", k, v) - } - } - - ioutils.FprintfIfNotEmpty(out, "Hostname:\t\t%s\n", node.Description.Hostname) - fmt.Fprintf(out, "Joined at:\t\t%s\n", client.PrettyPrint(node.CreatedAt)) - fmt.Fprintln(out, "Status:") - fmt.Fprintf(out, " State:\t\t\t%s\n", client.PrettyPrint(node.Status.State)) - ioutils.FprintfIfNotEmpty(out, " Message:\t\t%s\n", client.PrettyPrint(node.Status.Message)) - fmt.Fprintf(out, " Availability:\t\t%s\n", client.PrettyPrint(node.Spec.Availability)) - - if node.ManagerStatus != nil { - fmt.Fprintln(out, "Manager Status:") - fmt.Fprintf(out, " Address:\t\t%s\n", node.ManagerStatus.Addr) - fmt.Fprintf(out, " Raft Status:\t\t%s\n", client.PrettyPrint(node.ManagerStatus.Reachability)) - leader := "No" - if node.ManagerStatus.Leader { - leader = "Yes" - } - fmt.Fprintf(out, " Leader:\t\t%s\n", leader) - } - - fmt.Fprintln(out, "Platform:") - fmt.Fprintf(out, " Operating System:\t%s\n", node.Description.Platform.OS) - fmt.Fprintf(out, " Architecture:\t\t%s\n", node.Description.Platform.Architecture) - - fmt.Fprintln(out, "Resources:") - fmt.Fprintf(out, " CPUs:\t\t\t%d\n", node.Description.Resources.NanoCPUs/1e9) - fmt.Fprintf(out, " Memory:\t\t%s\n", units.BytesSize(float64(node.Description.Resources.MemoryBytes))) - - var pluginTypes []string - pluginNamesByType := map[string][]string{} - for _, p := range node.Description.Engine.Plugins { - // append to pluginTypes only if not done previously - if _, ok := pluginNamesByType[p.Type]; !ok { - pluginTypes = append(pluginTypes, p.Type) - } - pluginNamesByType[p.Type] = append(pluginNamesByType[p.Type], p.Name) - } - - if len(pluginTypes) > 0 { - fmt.Fprintln(out, "Plugins:") - sort.Strings(pluginTypes) // ensure stable output - for _, pluginType := range pluginTypes { - fmt.Fprintf(out, " %s:\t\t%s\n", pluginType, strings.Join(pluginNamesByType[pluginType], ", ")) - } - } - fmt.Fprintf(out, "Engine Version:\t\t%s\n", node.Description.Engine.EngineVersion) - - if len(node.Description.Engine.Labels) != 0 { - fmt.Fprintln(out, "Engine Labels:") - for k, v := range node.Description.Engine.Labels { - fmt.Fprintf(out, " - %s = %s", k, v) - } - } - -} diff --git a/api/client/node/list.go b/api/client/node/list.go deleted file mode 100644 index 55929adabe..0000000000 --- a/api/client/node/list.go +++ /dev/null @@ -1,111 +0,0 @@ -package node - -import ( - "fmt" - "io" - "text/tabwriter" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" -) - -const ( - listItemFmt = "%s\t%s\t%s\t%s\t%s\n" -) - -type listOptions struct { - quiet bool - filter opts.FilterOpt -} - -func newListCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := listOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ls [OPTIONS]", - Aliases: []string{"list"}, - Short: "List nodes in the swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runList(dockerCli *client.DockerCli, opts listOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - nodes, err := client.NodeList( - ctx, - types.NodeListOptions{Filter: opts.filter.Value()}) - if err != nil { - return err - } - - info, err := client.Info(ctx) - if err != nil { - return err - } - - out := dockerCli.Out() - if opts.quiet { - printQuiet(out, nodes) - } else { - printTable(out, nodes, info) - } - return nil -} - -func printTable(out io.Writer, nodes []swarm.Node, info types.Info) { - writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) - - // Ignore flushing errors - defer writer.Flush() - - fmt.Fprintf(writer, listItemFmt, "ID", "HOSTNAME", "STATUS", "AVAILABILITY", "MANAGER STATUS") - for _, node := range nodes { - name := node.Description.Hostname - availability := string(node.Spec.Availability) - - reachability := "" - if node.ManagerStatus != nil { - if node.ManagerStatus.Leader { - reachability = "Leader" - } else { - reachability = string(node.ManagerStatus.Reachability) - } - } - - ID := node.ID - if node.ID == info.Swarm.NodeID { - ID = ID + " *" - } - - fmt.Fprintf( - writer, - listItemFmt, - ID, - name, - client.PrettyPrint(string(node.Status.State)), - client.PrettyPrint(availability), - client.PrettyPrint(reachability)) - } -} - -func printQuiet(out io.Writer, nodes []swarm.Node) { - for _, node := range nodes { - fmt.Fprintln(out, node.ID) - } -} diff --git a/api/client/node/opts.go b/api/client/node/opts.go deleted file mode 100644 index f387bafc47..0000000000 --- a/api/client/node/opts.go +++ /dev/null @@ -1,60 +0,0 @@ -package node - -import ( - "fmt" - "strings" - - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types/swarm" -) - -type nodeOptions struct { - annotations - role string - availability string -} - -type annotations struct { - name string - labels opts.ListOpts -} - -func newNodeOptions() *nodeOptions { - return &nodeOptions{ - annotations: annotations{ - labels: opts.NewListOpts(nil), - }, - } -} - -func (opts *nodeOptions) ToNodeSpec() (swarm.NodeSpec, error) { - var spec swarm.NodeSpec - - spec.Annotations.Name = opts.annotations.name - spec.Annotations.Labels = runconfigopts.ConvertKVStringsToMap(opts.annotations.labels.GetAll()) - - switch swarm.NodeRole(strings.ToLower(opts.role)) { - case swarm.NodeRoleWorker: - spec.Role = swarm.NodeRoleWorker - case swarm.NodeRoleManager: - spec.Role = swarm.NodeRoleManager - case "": - default: - return swarm.NodeSpec{}, fmt.Errorf("invalid role %q, only worker and manager are supported", opts.role) - } - - switch swarm.NodeAvailability(strings.ToLower(opts.availability)) { - case swarm.NodeAvailabilityActive: - spec.Availability = swarm.NodeAvailabilityActive - case swarm.NodeAvailabilityPause: - spec.Availability = swarm.NodeAvailabilityPause - case swarm.NodeAvailabilityDrain: - spec.Availability = swarm.NodeAvailabilityDrain - case "": - default: - return swarm.NodeSpec{}, fmt.Errorf("invalid availability %q, only active, pause and drain are supported", opts.availability) - } - - return spec, nil -} diff --git a/api/client/node/promote.go b/api/client/node/promote.go deleted file mode 100644 index 39e284f937..0000000000 --- a/api/client/node/promote.go +++ /dev/null @@ -1,32 +0,0 @@ -package node - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" -) - -func newPromoteCommand(dockerCli *client.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "promote NODE [NODE...]", - Short: "Promote one or more nodes to manager in the swarm", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runPromote(dockerCli, args) - }, - } -} - -func runPromote(dockerCli *client.DockerCli, nodes []string) error { - promote := func(node *swarm.Node) error { - node.Spec.Role = swarm.NodeRoleManager - return nil - } - success := func(nodeID string) { - fmt.Fprintf(dockerCli.Out(), "Node %s promoted to a manager in the swarm.\n", nodeID) - } - return updateNodes(dockerCli, nodes, promote, success) -} diff --git a/api/client/node/ps.go b/api/client/node/ps.go deleted file mode 100644 index 12e8b8d9d0..0000000000 --- a/api/client/node/ps.go +++ /dev/null @@ -1,63 +0,0 @@ -package node - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/idresolver" - "github.com/docker/docker/api/client/task" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type psOptions struct { - nodeID string - noResolve bool - filter opts.FilterOpt -} - -func newPSCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := psOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ps [OPTIONS] self|NODE", - Short: "List tasks running on a node", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.nodeID = args[0] - return runPS(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runPS(dockerCli *client.DockerCli, opts psOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - nodeRef, err := Reference(client, ctx, opts.nodeID) - if err != nil { - return nil - } - node, _, err := client.NodeInspectWithRaw(ctx, nodeRef) - if err != nil { - return err - } - - filter := opts.filter.Value() - filter.Add("node", node.ID) - tasks, err := client.TaskList( - ctx, - types.TaskListOptions{Filter: filter}) - if err != nil { - return err - } - - return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve)) -} diff --git a/api/client/node/remove.go b/api/client/node/remove.go deleted file mode 100644 index bb54a831e8..0000000000 --- a/api/client/node/remove.go +++ /dev/null @@ -1,46 +0,0 @@ -package node - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type removeOptions struct { - force bool -} - -func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := removeOptions{} - - cmd := &cobra.Command{ - Use: "rm [OPTIONS] NODE [NODE...]", - Aliases: []string{"remove"}, - Short: "Remove one or more nodes from the swarm", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args, opts) - }, - } - flags := cmd.Flags() - flags.BoolVar(&opts.force, "force", false, "Force remove an active node") - return cmd -} - -func runRemove(dockerCli *client.DockerCli, args []string, opts removeOptions) error { - client := dockerCli.Client() - ctx := context.Background() - for _, nodeID := range args { - err := client.NodeRemove(ctx, nodeID, types.NodeRemoveOptions{Force: opts.force}) - if err != nil { - return err - } - fmt.Fprintf(dockerCli.Out(), "%s\n", nodeID) - } - return nil -} diff --git a/api/client/node/update.go b/api/client/node/update.go deleted file mode 100644 index 1d070dd536..0000000000 --- a/api/client/node/update.go +++ /dev/null @@ -1,113 +0,0 @@ -package node - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" - "github.com/spf13/pflag" - "golang.org/x/net/context" -) - -func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command { - nodeOpts := newNodeOptions() - - cmd := &cobra.Command{ - Use: "update [OPTIONS] NODE", - Short: "Update a node", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runUpdate(dockerCli, cmd.Flags(), args[0]) - }, - } - - flags := cmd.Flags() - flags.StringVar(&nodeOpts.role, flagRole, "", "Role of the node (worker/manager)") - flags.StringVar(&nodeOpts.availability, flagAvailability, "", "Availability of the node (active/pause/drain)") - flags.Var(&nodeOpts.annotations.labels, flagLabelAdd, "Add or update a node label (key=value)") - labelKeys := opts.NewListOpts(nil) - flags.Var(&labelKeys, flagLabelRemove, "Remove a node label if exists") - return cmd -} - -func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, nodeID string) error { - success := func(_ string) { - fmt.Fprintln(dockerCli.Out(), nodeID) - } - return updateNodes(dockerCli, []string{nodeID}, mergeNodeUpdate(flags), success) -} - -func updateNodes(dockerCli *client.DockerCli, nodes []string, mergeNode func(node *swarm.Node) error, success func(nodeID string)) error { - client := dockerCli.Client() - ctx := context.Background() - - for _, nodeID := range nodes { - node, _, err := client.NodeInspectWithRaw(ctx, nodeID) - if err != nil { - return err - } - - err = mergeNode(&node) - if err != nil { - return err - } - err = client.NodeUpdate(ctx, node.ID, node.Version, node.Spec) - if err != nil { - return err - } - success(nodeID) - } - return nil -} - -func mergeNodeUpdate(flags *pflag.FlagSet) func(*swarm.Node) error { - return func(node *swarm.Node) error { - spec := &node.Spec - - if flags.Changed(flagRole) { - str, err := flags.GetString(flagRole) - if err != nil { - return err - } - spec.Role = swarm.NodeRole(str) - } - if flags.Changed(flagAvailability) { - str, err := flags.GetString(flagAvailability) - if err != nil { - return err - } - spec.Availability = swarm.NodeAvailability(str) - } - if spec.Annotations.Labels == nil { - spec.Annotations.Labels = make(map[string]string) - } - if flags.Changed(flagLabelAdd) { - labels := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() - for k, v := range runconfigopts.ConvertKVStringsToMap(labels) { - spec.Annotations.Labels[k] = v - } - } - if flags.Changed(flagLabelRemove) { - keys := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() - for _, k := range keys { - // if a key doesn't exist, fail the command explicitly - if _, exists := spec.Annotations.Labels[k]; !exists { - return fmt.Errorf("key %s doesn't exist in node's labels", k) - } - delete(spec.Annotations.Labels, k) - } - } - return nil - } -} - -const ( - flagRole = "role" - flagAvailability = "availability" - flagLabelAdd = "label-add" - flagLabelRemove = "label-rm" -) diff --git a/api/client/plugin/cmd.go b/api/client/plugin/cmd.go deleted file mode 100644 index f9ecc519f0..0000000000 --- a/api/client/plugin/cmd.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !experimental - -package plugin - -import ( - "github.com/docker/docker/api/client" - "github.com/spf13/cobra" -) - -// NewPluginCommand returns a cobra command for `plugin` subcommands -func NewPluginCommand(cmd *cobra.Command, dockerCli *client.DockerCli) { -} diff --git a/api/client/plugin/cmd_experimental.go b/api/client/plugin/cmd_experimental.go deleted file mode 100644 index 6c991937fe..0000000000 --- a/api/client/plugin/cmd_experimental.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build experimental - -package plugin - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -// NewPluginCommand returns a cobra command for `plugin` subcommands -func NewPluginCommand(rootCmd *cobra.Command, dockerCli *client.DockerCli) { - cmd := &cobra.Command{ - Use: "plugin", - Short: "Manage Docker plugins", - Args: cli.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) - }, - } - - cmd.AddCommand( - newDisableCommand(dockerCli), - newEnableCommand(dockerCli), - newInspectCommand(dockerCli), - newInstallCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - newSetCommand(dockerCli), - newPushCommand(dockerCli), - ) - - rootCmd.AddCommand(cmd) -} diff --git a/api/client/plugin/disable.go b/api/client/plugin/disable.go deleted file mode 100644 index 704eb75286..0000000000 --- a/api/client/plugin/disable.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build experimental - -package plugin - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/reference" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newDisableCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "disable PLUGIN", - Short: "Disable a plugin", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runDisable(dockerCli, args[0]) - }, - } - - return cmd -} - -func runDisable(dockerCli *client.DockerCli, name string) error { - named, err := reference.ParseNamed(name) // FIXME: validate - if err != nil { - return err - } - if reference.IsNameOnly(named) { - named = reference.WithDefaultTag(named) - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid name: %s", named.String()) - } - if err := dockerCli.Client().PluginDisable(context.Background(), ref.String()); err != nil { - return err - } - fmt.Fprintln(dockerCli.Out(), name) - return nil -} diff --git a/api/client/plugin/enable.go b/api/client/plugin/enable.go deleted file mode 100644 index c31258bbb6..0000000000 --- a/api/client/plugin/enable.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build experimental - -package plugin - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/reference" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newEnableCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "enable PLUGIN", - Short: "Enable a plugin", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runEnable(dockerCli, args[0]) - }, - } - - return cmd -} - -func runEnable(dockerCli *client.DockerCli, name string) error { - named, err := reference.ParseNamed(name) // FIXME: validate - if err != nil { - return err - } - if reference.IsNameOnly(named) { - named = reference.WithDefaultTag(named) - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid name: %s", named.String()) - } - if err := dockerCli.Client().PluginEnable(context.Background(), ref.String()); err != nil { - return err - } - fmt.Fprintln(dockerCli.Out(), name) - return nil -} diff --git a/api/client/plugin/inspect.go b/api/client/plugin/inspect.go deleted file mode 100644 index 8f7e98d441..0000000000 --- a/api/client/plugin/inspect.go +++ /dev/null @@ -1,52 +0,0 @@ -// +build experimental - -package plugin - -import ( - "encoding/json" - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/reference" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "inspect PLUGIN", - Short: "Inspect a plugin", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runInspect(dockerCli, args[0]) - }, - } - - return cmd -} - -func runInspect(dockerCli *client.DockerCli, name string) error { - named, err := reference.ParseNamed(name) // FIXME: validate - if err != nil { - return err - } - if reference.IsNameOnly(named) { - named = reference.WithDefaultTag(named) - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid name: %s", named.String()) - } - p, err := dockerCli.Client().PluginInspect(context.Background(), ref.String()) - if err != nil { - return err - } - - b, err := json.MarshalIndent(p, "", "\t") - if err != nil { - return err - } - _, err = dockerCli.Out().Write(b) - return err -} diff --git a/api/client/plugin/install.go b/api/client/plugin/install.go deleted file mode 100644 index af8c5b5b0a..0000000000 --- a/api/client/plugin/install.go +++ /dev/null @@ -1,103 +0,0 @@ -// +build experimental - -package plugin - -import ( - "bufio" - "fmt" - "strings" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type pluginOptions struct { - name string - grantPerms bool - disable bool -} - -func newInstallCommand(dockerCli *client.DockerCli) *cobra.Command { - var options pluginOptions - cmd := &cobra.Command{ - Use: "install [OPTIONS] PLUGIN", - Short: "Install a plugin", - Args: cli.ExactArgs(1), // TODO: allow for set args - RunE: func(cmd *cobra.Command, args []string) error { - options.name = args[0] - return runInstall(dockerCli, options) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&options.grantPerms, "grant-all-permissions", false, "grant all permissions necessary to run the plugin") - flags.BoolVar(&options.disable, "disable", false, "do not enable the plugin on install") - - return cmd -} - -func runInstall(dockerCli *client.DockerCli, opts pluginOptions) error { - named, err := reference.ParseNamed(opts.name) // FIXME: validate - if err != nil { - return err - } - if reference.IsNameOnly(named) { - named = reference.WithDefaultTag(named) - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid name: %s", named.String()) - } - - ctx := context.Background() - - repoInfo, err := registry.ParseRepositoryInfo(named) - if err != nil { - return err - } - - authConfig := dockerCli.ResolveAuthConfig(ctx, repoInfo.Index) - - encodedAuth, err := client.EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - - registryAuthFunc := dockerCli.RegistryAuthenticationPrivilegedFunc(repoInfo.Index, "plugin install") - - options := types.PluginInstallOptions{ - RegistryAuth: encodedAuth, - Disabled: opts.disable, - AcceptAllPermissions: opts.grantPerms, - AcceptPermissionsFunc: acceptPrivileges(dockerCli, opts.name), - // TODO: Rename PrivilegeFunc, it has nothing to do with privileges - PrivilegeFunc: registryAuthFunc, - } - if err := dockerCli.Client().PluginInstall(ctx, ref.String(), options); err != nil { - return err - } - fmt.Fprintln(dockerCli.Out(), opts.name) - return nil -} - -func acceptPrivileges(dockerCli *client.DockerCli, name string) func(privileges types.PluginPrivileges) (bool, error) { - return func(privileges types.PluginPrivileges) (bool, error) { - fmt.Fprintf(dockerCli.Out(), "Plugin %q is requesting the following privileges:\n", name) - for _, privilege := range privileges { - fmt.Fprintf(dockerCli.Out(), " - %s: %v\n", privilege.Name, privilege.Value) - } - - fmt.Fprint(dockerCli.Out(), "Do you grant the above permissions? [y/N] ") - reader := bufio.NewReader(dockerCli.In()) - line, _, err := reader.ReadLine() - if err != nil { - return false, err - } - return strings.ToLower(string(line)) == "y", nil - } -} diff --git a/api/client/plugin/list.go b/api/client/plugin/list.go deleted file mode 100644 index 9813a3ce4f..0000000000 --- a/api/client/plugin/list.go +++ /dev/null @@ -1,44 +0,0 @@ -// +build experimental - -package plugin - -import ( - "fmt" - "text/tabwriter" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newListCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "ls", - Short: "List plugins", - Aliases: []string{"list"}, - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli) - }, - } - - return cmd -} - -func runList(dockerCli *client.DockerCli) error { - plugins, err := dockerCli.Client().PluginList(context.Background()) - if err != nil { - return err - } - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - fmt.Fprintf(w, "NAME \tTAG \tACTIVE") - fmt.Fprintf(w, "\n") - - for _, p := range plugins { - fmt.Fprintf(w, "%s\t%s\t%v\n", p.Name, p.Tag, p.Active) - } - w.Flush() - return nil -} diff --git a/api/client/plugin/push.go b/api/client/plugin/push.go deleted file mode 100644 index 9ef4907961..0000000000 --- a/api/client/plugin/push.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build experimental - -package plugin - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/spf13/cobra" -) - -func newPushCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "push PLUGIN", - Short: "Push a plugin", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runPush(dockerCli, args[0]) - }, - } - return cmd -} - -func runPush(dockerCli *client.DockerCli, name string) error { - named, err := reference.ParseNamed(name) // FIXME: validate - if err != nil { - return err - } - if reference.IsNameOnly(named) { - named = reference.WithDefaultTag(named) - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid name: %s", named.String()) - } - - ctx := context.Background() - - repoInfo, err := registry.ParseRepositoryInfo(named) - if err != nil { - return err - } - authConfig := dockerCli.ResolveAuthConfig(ctx, repoInfo.Index) - - encodedAuth, err := client.EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - return dockerCli.Client().PluginPush(ctx, ref.String(), encodedAuth) -} diff --git a/api/client/plugin/remove.go b/api/client/plugin/remove.go deleted file mode 100644 index 5aba24415b..0000000000 --- a/api/client/plugin/remove.go +++ /dev/null @@ -1,55 +0,0 @@ -// +build experimental - -package plugin - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/reference" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "rm PLUGIN", - Short: "Remove a plugin", - Aliases: []string{"remove"}, - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args) - }, - } - - return cmd -} - -func runRemove(dockerCli *client.DockerCli, names []string) error { - var errs cli.Errors - for _, name := range names { - named, err := reference.ParseNamed(name) // FIXME: validate - if err != nil { - return err - } - if reference.IsNameOnly(named) { - named = reference.WithDefaultTag(named) - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid name: %s", named.String()) - } - // TODO: pass names to api instead of making multiple api calls - if err := dockerCli.Client().PluginRemove(context.Background(), ref.String()); err != nil { - errs = append(errs, err) - continue - } - fmt.Fprintln(dockerCli.Out(), name) - } - // Do not simplify to `return errs` because even if errs == nil, it is not a nil-error interface value. - if errs != nil { - return errs - } - return nil -} diff --git a/api/client/plugin/set.go b/api/client/plugin/set.go deleted file mode 100644 index 188bd63cc4..0000000000 --- a/api/client/plugin/set.go +++ /dev/null @@ -1,42 +0,0 @@ -// +build experimental - -package plugin - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/reference" - "github.com/spf13/cobra" -) - -func newSetCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "set PLUGIN key1=value1 [key2=value2...]", - Short: "Change settings for a plugin", - Args: cli.RequiresMinArgs(2), - RunE: func(cmd *cobra.Command, args []string) error { - return runSet(dockerCli, args[0], args[1:]) - }, - } - - return cmd -} - -func runSet(dockerCli *client.DockerCli, name string, args []string) error { - named, err := reference.ParseNamed(name) // FIXME: validate - if err != nil { - return err - } - if reference.IsNameOnly(named) { - named = reference.WithDefaultTag(named) - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid name: %s", named.String()) - } - return dockerCli.Client().PluginSet(context.Background(), ref.String(), args) -} diff --git a/api/client/registry.go b/api/client/registry.go deleted file mode 100644 index e67ea7d9c7..0000000000 --- a/api/client/registry.go +++ /dev/null @@ -1,188 +0,0 @@ -package client - -import ( - "bufio" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "os" - "runtime" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - registrytypes "github.com/docker/engine-api/types/registry" -) - -// ElectAuthServer returns the default registry to use (by asking the daemon) -func (cli *DockerCli) ElectAuthServer(ctx context.Context) string { - // The daemon `/info` endpoint informs us of the default registry being - // used. This is essential in cross-platforms environment, where for - // example a Linux client might be interacting with a Windows daemon, hence - // the default registry URL might be Windows specific. - serverAddress := registry.IndexServer - if info, err := cli.client.Info(ctx); err != nil { - fmt.Fprintf(cli.out, "Warning: failed to get default registry endpoint from daemon (%v). Using system default: %s\n", err, serverAddress) - } else { - serverAddress = info.IndexServerAddress - } - return serverAddress -} - -// EncodeAuthToBase64 serializes the auth configuration as JSON base64 payload -func EncodeAuthToBase64(authConfig types.AuthConfig) (string, error) { - buf, err := json.Marshal(authConfig) - if err != nil { - return "", err - } - return base64.URLEncoding.EncodeToString(buf), nil -} - -// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info -// for the given command. -func (cli *DockerCli) RegistryAuthenticationPrivilegedFunc(index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc { - return func() (string, error) { - fmt.Fprintf(cli.out, "\nPlease login prior to %s:\n", cmdName) - indexServer := registry.GetAuthConfigKey(index) - authConfig, err := cli.ConfigureAuth("", "", indexServer, false) - if err != nil { - return "", err - } - return EncodeAuthToBase64(authConfig) - } -} - -func (cli *DockerCli) promptWithDefault(prompt string, configDefault string) { - if configDefault == "" { - fmt.Fprintf(cli.out, "%s: ", prompt) - } else { - fmt.Fprintf(cli.out, "%s (%s): ", prompt, configDefault) - } -} - -// ResolveAuthConfig is like registry.ResolveAuthConfig, but if using the -// default index, it uses the default index name for the daemon's platform, -// not the client's platform. -func (cli *DockerCli) ResolveAuthConfig(ctx context.Context, index *registrytypes.IndexInfo) types.AuthConfig { - configKey := index.Name - if index.Official { - configKey = cli.ElectAuthServer(ctx) - } - - a, _ := GetCredentials(cli.configFile, configKey) - return a -} - -// RetrieveAuthConfigs return all credentials. -func (cli *DockerCli) RetrieveAuthConfigs() map[string]types.AuthConfig { - acs, _ := GetAllCredentials(cli.configFile) - return acs -} - -// ConfigureAuth returns an AuthConfig from the specified user, password and server. -func (cli *DockerCli) ConfigureAuth(flUser, flPassword, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) { - // On Windows, force the use of the regular OS stdin stream. Fixes #14336/#14210 - if runtime.GOOS == "windows" { - cli.in = os.Stdin - } - - authconfig, err := GetCredentials(cli.configFile, serverAddress) - if err != nil { - return authconfig, err - } - - // Some links documenting this: - // - https://code.google.com/archive/p/mintty/issues/56 - // - https://github.com/docker/docker/issues/15272 - // - https://mintty.github.io/ (compatibility) - // Linux will hit this if you attempt `cat | docker login`, and Windows - // will hit this if you attempt docker login from mintty where stdin - // is a pipe, not a character based console. - if flPassword == "" && !cli.isTerminalIn { - return authconfig, fmt.Errorf("Error: Cannot perform an interactive login from a non TTY device") - } - - authconfig.Username = strings.TrimSpace(authconfig.Username) - - if flUser = strings.TrimSpace(flUser); flUser == "" { - if isDefaultRegistry { - // if this is a default registry (docker hub), then display the following message. - fmt.Fprintln(cli.out, "Login with your Docker ID to push and pull images from Docker Hub. If you don't have a Docker ID, head over to https://hub.docker.com to create one.") - } - cli.promptWithDefault("Username", authconfig.Username) - flUser = readInput(cli.in, cli.out) - flUser = strings.TrimSpace(flUser) - if flUser == "" { - flUser = authconfig.Username - } - } - if flUser == "" { - return authconfig, fmt.Errorf("Error: Non-null Username Required") - } - if flPassword == "" { - oldState, err := term.SaveState(cli.inFd) - if err != nil { - return authconfig, err - } - fmt.Fprintf(cli.out, "Password: ") - term.DisableEcho(cli.inFd, oldState) - - flPassword = readInput(cli.in, cli.out) - fmt.Fprint(cli.out, "\n") - - term.RestoreTerminal(cli.inFd, oldState) - if flPassword == "" { - return authconfig, fmt.Errorf("Error: Password Required") - } - } - - authconfig.Username = flUser - authconfig.Password = flPassword - authconfig.ServerAddress = serverAddress - authconfig.IdentityToken = "" - - return authconfig, nil -} - -// resolveAuthConfigFromImage retrieves that AuthConfig using the image string -func (cli *DockerCli) resolveAuthConfigFromImage(ctx context.Context, image string) (types.AuthConfig, error) { - registryRef, err := reference.ParseNamed(image) - if err != nil { - return types.AuthConfig{}, err - } - repoInfo, err := registry.ParseRepositoryInfo(registryRef) - if err != nil { - return types.AuthConfig{}, err - } - authConfig := cli.ResolveAuthConfig(ctx, repoInfo.Index) - return authConfig, nil -} - -// RetrieveAuthTokenFromImage retrieves an encoded auth token given a complete image -func (cli *DockerCli) RetrieveAuthTokenFromImage(ctx context.Context, image string) (string, error) { - // Retrieve encoded auth token from the image reference - authConfig, err := cli.resolveAuthConfigFromImage(ctx, image) - if err != nil { - return "", err - } - encodedAuth, err := EncodeAuthToBase64(authConfig) - if err != nil { - return "", err - } - return encodedAuth, nil -} - -func readInput(in io.Reader, out io.Writer) string { - reader := bufio.NewReader(in) - line, _, err := reader.ReadLine() - if err != nil { - fmt.Fprintln(out, err.Error()) - os.Exit(1) - } - return string(line) -} diff --git a/api/client/registry/login.go b/api/client/registry/login.go deleted file mode 100644 index 452ac71513..0000000000 --- a/api/client/registry/login.go +++ /dev/null @@ -1,81 +0,0 @@ -package registry - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type loginOptions struct { - serverAddress string - user string - password string - email string -} - -// NewLoginCommand creates a new `docker login` command -func NewLoginCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts loginOptions - - cmd := &cobra.Command{ - Use: "login [OPTIONS] [SERVER]", - Short: "Log in to a Docker registry.", - Long: "Log in to a Docker registry.\nIf no server is specified, the default is defined by the daemon.", - Args: cli.RequiresMaxArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - if len(args) > 0 { - opts.serverAddress = args[0] - } - return runLogin(dockerCli, opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.user, "username", "u", "", "Username") - flags.StringVarP(&opts.password, "password", "p", "", "Password") - - // Deprecated in 1.11: Should be removed in docker 1.13 - flags.StringVarP(&opts.email, "email", "e", "", "Email") - flags.MarkDeprecated("email", "will be removed in 1.13.") - - return cmd -} - -func runLogin(dockerCli *client.DockerCli, opts loginOptions) error { - ctx := context.Background() - clnt := dockerCli.Client() - - var serverAddress string - var isDefaultRegistry bool - if opts.serverAddress != "" { - serverAddress = opts.serverAddress - } else { - serverAddress = dockerCli.ElectAuthServer(ctx) - isDefaultRegistry = true - } - authConfig, err := dockerCli.ConfigureAuth(opts.user, opts.password, serverAddress, isDefaultRegistry) - if err != nil { - return err - } - response, err := clnt.RegistryLogin(ctx, authConfig) - if err != nil { - return err - } - if response.IdentityToken != "" { - authConfig.Password = "" - authConfig.IdentityToken = response.IdentityToken - } - if err := client.StoreCredentials(dockerCli.ConfigFile(), authConfig); err != nil { - return fmt.Errorf("Error saving credentials: %v", err) - } - - if response.Status != "" { - fmt.Fprintln(dockerCli.Out(), response.Status) - } - return nil -} diff --git a/api/client/registry/logout.go b/api/client/registry/logout.go deleted file mode 100644 index dad27644a6..0000000000 --- a/api/client/registry/logout.go +++ /dev/null @@ -1,52 +0,0 @@ -package registry - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -// NewLogoutCommand creates a new `docker login` command -func NewLogoutCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "logout [SERVER]", - Short: "Log out from a Docker registry.", - Long: "Log out from a Docker registry.\nIf no server is specified, the default is defined by the daemon.", - Args: cli.RequiresMaxArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - var serverAddress string - if len(args) > 0 { - serverAddress = args[0] - } - return runLogout(dockerCli, serverAddress) - }, - } - - return cmd -} - -func runLogout(dockerCli *client.DockerCli, serverAddress string) error { - ctx := context.Background() - - if serverAddress == "" { - serverAddress = dockerCli.ElectAuthServer(ctx) - } - - // check if we're logged in based on the records in the config file - // which means it couldn't have user/pass cause they may be in the creds store - if _, ok := dockerCli.ConfigFile().AuthConfigs[serverAddress]; !ok { - fmt.Fprintf(dockerCli.Out(), "Not logged in to %s\n", serverAddress) - return nil - } - - fmt.Fprintf(dockerCli.Out(), "Remove login credentials for %s\n", serverAddress) - if err := client.EraseCredentials(dockerCli.ConfigFile(), serverAddress); err != nil { - fmt.Fprintf(dockerCli.Err(), "WARNING: could not erase credentials: %v\n", err) - } - - return nil -} diff --git a/api/client/service/cmd.go b/api/client/service/cmd.go deleted file mode 100644 index 27ed30b946..0000000000 --- a/api/client/service/cmd.go +++ /dev/null @@ -1,32 +0,0 @@ -package service - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" -) - -// NewServiceCommand returns a cobra command for `service` subcommands -func NewServiceCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "service", - Short: "Manage Docker services", - Args: cli.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) - }, - } - cmd.AddCommand( - newCreateCommand(dockerCli), - newInspectCommand(dockerCli), - newPSCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - newScaleCommand(dockerCli), - newUpdateCommand(dockerCli), - ) - return cmd -} diff --git a/api/client/service/create.go b/api/client/service/create.go deleted file mode 100644 index e55b2a4723..0000000000 --- a/api/client/service/create.go +++ /dev/null @@ -1,72 +0,0 @@ -package service - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newCreateCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := newServiceOptions() - - cmd := &cobra.Command{ - Use: "create [OPTIONS] IMAGE [COMMAND] [ARG...]", - Short: "Create a new service", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.image = args[0] - if len(args) > 1 { - opts.args = args[1:] - } - return runCreate(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.StringVar(&opts.mode, flagMode, "replicated", "Service mode (replicated or global)") - addServiceFlags(cmd, opts) - - flags.VarP(&opts.labels, flagLabel, "l", "Service labels") - flags.Var(&opts.containerLabels, flagContainerLabel, "Container labels") - flags.VarP(&opts.env, flagEnv, "e", "Set environment variables") - flags.Var(&opts.mounts, flagMount, "Attach a mount to the service") - flags.StringSliceVar(&opts.constraints, flagConstraint, []string{}, "Placement constraints") - flags.StringSliceVar(&opts.networks, flagNetwork, []string{}, "Network attachments") - flags.VarP(&opts.endpoint.ports, flagPublish, "p", "Publish a port as a node port") - - flags.SetInterspersed(false) - return cmd -} - -func runCreate(dockerCli *client.DockerCli, opts *serviceOptions) error { - apiClient := dockerCli.Client() - createOpts := types.ServiceCreateOptions{} - - service, err := opts.ToService() - if err != nil { - return err - } - - ctx := context.Background() - - // only send auth if flag was set - if opts.registryAuth { - // Retrieve encoded auth token from the image reference - encodedAuth, err := dockerCli.RetrieveAuthTokenFromImage(ctx, opts.image) - if err != nil { - return err - } - createOpts.EncodedRegistryAuth = encodedAuth - } - - response, err := apiClient.ServiceCreate(ctx, service, createOpts) - if err != nil { - return err - } - - fmt.Fprintf(dockerCli.Out(), "%s\n", response.ID) - return nil -} diff --git a/api/client/service/inspect.go b/api/client/service/inspect.go deleted file mode 100644 index 0abdf5adb3..0000000000 --- a/api/client/service/inspect.go +++ /dev/null @@ -1,188 +0,0 @@ -package service - -import ( - "fmt" - "io" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/inspect" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/ioutils" - apiclient "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types/swarm" - "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - refs []string - format string - pretty bool -} - -func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] SERVICE [SERVICE...]", - Short: "Display detailed information on one or more services", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.refs = args - - if opts.pretty && len(opts.format) > 0 { - return fmt.Errorf("--format is incompatible with human friendly format") - } - return runInspect(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template") - flags.BoolVar(&opts.pretty, "pretty", false, "Print the information in a human friendly format.") - return cmd -} - -func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - getRef := func(ref string) (interface{}, []byte, error) { - service, _, err := client.ServiceInspectWithRaw(ctx, ref) - if err == nil || !apiclient.IsErrServiceNotFound(err) { - return service, nil, err - } - return nil, nil, fmt.Errorf("Error: no such service: %s", ref) - } - - if !opts.pretty { - return inspect.Inspect(dockerCli.Out(), opts.refs, opts.format, getRef) - } - - return printHumanFriendly(dockerCli.Out(), opts.refs, getRef) -} - -func printHumanFriendly(out io.Writer, refs []string, getRef inspect.GetRefFunc) error { - for idx, ref := range refs { - obj, _, err := getRef(ref) - if err != nil { - return err - } - printService(out, obj.(swarm.Service)) - - // TODO: better way to do this? - // print extra space between objects, but not after the last one - if idx+1 != len(refs) { - fmt.Fprintf(out, "\n\n") - } - } - return nil -} - -// TODO: use a template -func printService(out io.Writer, service swarm.Service) { - fmt.Fprintf(out, "ID:\t\t%s\n", service.ID) - fmt.Fprintf(out, "Name:\t\t%s\n", service.Spec.Name) - if service.Spec.Labels != nil { - fmt.Fprintln(out, "Labels:") - for k, v := range service.Spec.Labels { - fmt.Fprintf(out, " - %s=%s\n", k, v) - } - } - - if service.Spec.Mode.Global != nil { - fmt.Fprintln(out, "Mode:\t\tGlobal") - } else { - fmt.Fprintln(out, "Mode:\t\tReplicated") - if service.Spec.Mode.Replicated.Replicas != nil { - fmt.Fprintf(out, " Replicas:\t%d\n", *service.Spec.Mode.Replicated.Replicas) - } - } - - if service.UpdateStatus.State != "" { - fmt.Fprintln(out, "Update status:") - fmt.Fprintf(out, " State:\t\t%s\n", service.UpdateStatus.State) - fmt.Fprintf(out, " Started:\t%s ago\n", strings.ToLower(units.HumanDuration(time.Since(service.UpdateStatus.StartedAt)))) - if service.UpdateStatus.State == swarm.UpdateStateCompleted { - fmt.Fprintf(out, " Completed:\t%s ago\n", strings.ToLower(units.HumanDuration(time.Since(service.UpdateStatus.CompletedAt)))) - } - fmt.Fprintf(out, " Message:\t%s\n", service.UpdateStatus.Message) - } - - fmt.Fprintln(out, "Placement:") - if service.Spec.TaskTemplate.Placement != nil && len(service.Spec.TaskTemplate.Placement.Constraints) > 0 { - ioutils.FprintfIfNotEmpty(out, " Constraints\t: %s\n", strings.Join(service.Spec.TaskTemplate.Placement.Constraints, ", ")) - } - if service.Spec.UpdateConfig != nil { - fmt.Fprintf(out, "UpdateConfig:\n") - fmt.Fprintf(out, " Parallelism:\t%d\n", service.Spec.UpdateConfig.Parallelism) - if service.Spec.UpdateConfig.Delay.Nanoseconds() > 0 { - fmt.Fprintf(out, " Delay:\t\t%s\n", service.Spec.UpdateConfig.Delay) - } - fmt.Fprintf(out, " On failure:\t%s\n", service.Spec.UpdateConfig.FailureAction) - } - - fmt.Fprintf(out, "ContainerSpec:\n") - printContainerSpec(out, service.Spec.TaskTemplate.ContainerSpec) - - resources := service.Spec.TaskTemplate.Resources - if resources != nil { - fmt.Fprintln(out, "Resources:") - printResources := func(out io.Writer, requirement string, r *swarm.Resources) { - if r == nil || (r.MemoryBytes == 0 && r.NanoCPUs == 0) { - return - } - fmt.Fprintf(out, " %s:\n", requirement) - if r.NanoCPUs != 0 { - fmt.Fprintf(out, " CPU:\t\t%g\n", float64(r.NanoCPUs)/1e9) - } - if r.MemoryBytes != 0 { - fmt.Fprintf(out, " Memory:\t%s\n", units.BytesSize(float64(r.MemoryBytes))) - } - } - printResources(out, "Reservations", resources.Reservations) - printResources(out, "Limits", resources.Limits) - } - if len(service.Spec.Networks) > 0 { - fmt.Fprintf(out, "Networks:") - for _, n := range service.Spec.Networks { - fmt.Fprintf(out, " %s", n.Target) - } - fmt.Fprintln(out, "") - } - - if len(service.Endpoint.Ports) > 0 { - fmt.Fprintln(out, "Ports:") - for _, port := range service.Endpoint.Ports { - ioutils.FprintfIfNotEmpty(out, " Name = %s\n", port.Name) - fmt.Fprintf(out, " Protocol = %s\n", port.Protocol) - fmt.Fprintf(out, " TargetPort = %d\n", port.TargetPort) - fmt.Fprintf(out, " PublishedPort = %d\n", port.PublishedPort) - } - } -} - -func printContainerSpec(out io.Writer, containerSpec swarm.ContainerSpec) { - fmt.Fprintf(out, " Image:\t\t%s\n", containerSpec.Image) - if len(containerSpec.Args) > 0 { - fmt.Fprintf(out, " Args:\t\t%s\n", strings.Join(containerSpec.Args, " ")) - } - if len(containerSpec.Env) > 0 { - fmt.Fprintf(out, " Env:\t\t%s\n", strings.Join(containerSpec.Env, " ")) - } - ioutils.FprintfIfNotEmpty(out, " Dir\t\t%s\n", containerSpec.Dir) - ioutils.FprintfIfNotEmpty(out, " User\t\t%s\n", containerSpec.User) - if len(containerSpec.Mounts) > 0 { - fmt.Fprintln(out, " Mounts:") - for _, v := range containerSpec.Mounts { - fmt.Fprintf(out, " Target = %s\n", v.Target) - fmt.Fprintf(out, " Source = %s\n", v.Source) - fmt.Fprintf(out, " ReadOnly = %v\n", v.ReadOnly) - fmt.Fprintf(out, " Type = %v\n", v.Type) - } - } -} diff --git a/api/client/service/inspect_test.go b/api/client/service/inspect_test.go deleted file mode 100644 index 7d7f03ffc9..0000000000 --- a/api/client/service/inspect_test.go +++ /dev/null @@ -1,84 +0,0 @@ -package service - -import ( - "bytes" - "strings" - "testing" - "time" - - "github.com/docker/engine-api/types/swarm" -) - -func TestPrettyPrintWithNoUpdateConfig(t *testing.T) { - b := new(bytes.Buffer) - - endpointSpec := &swarm.EndpointSpec{ - Mode: "vip", - Ports: []swarm.PortConfig{ - { - Protocol: swarm.PortConfigProtocolTCP, - TargetPort: 5000, - }, - }, - } - - two := uint64(2) - - s := swarm.Service{ - ID: "de179gar9d0o7ltdybungplod", - Meta: swarm.Meta{ - Version: swarm.Version{Index: 315}, - CreatedAt: time.Now(), - UpdatedAt: time.Now(), - }, - Spec: swarm.ServiceSpec{ - Annotations: swarm.Annotations{ - Name: "my_service", - Labels: map[string]string{"com.label": "foo"}, - }, - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ - Image: "foo/bar@sha256:this_is_a_test", - }, - }, - Mode: swarm.ServiceMode{ - Replicated: &swarm.ReplicatedService{ - Replicas: &two, - }, - }, - UpdateConfig: nil, - Networks: []swarm.NetworkAttachmentConfig{ - { - Target: "5vpyomhb6ievnk0i0o60gcnei", - Aliases: []string{"web"}, - }, - }, - EndpointSpec: endpointSpec, - }, - Endpoint: swarm.Endpoint{ - Spec: *endpointSpec, - Ports: []swarm.PortConfig{ - { - Protocol: swarm.PortConfigProtocolTCP, - TargetPort: 5000, - PublishedPort: 30000, - }, - }, - VirtualIPs: []swarm.EndpointVirtualIP{ - { - NetworkID: "6o4107cj2jx9tihgb0jyts6pj", - Addr: "10.255.0.4/16", - }, - }, - }, - UpdateStatus: swarm.UpdateStatus{ - StartedAt: time.Now(), - CompletedAt: time.Now(), - }, - } - - printService(b, s) - if strings.Contains(b.String(), "UpdateStatus") { - t.Fatal("Pretty print failed before parsing UpdateStatus") - } -} diff --git a/api/client/service/list.go b/api/client/service/list.go deleted file mode 100644 index 01ff66ee2b..0000000000 --- a/api/client/service/list.go +++ /dev/null @@ -1,124 +0,0 @@ -package service - -import ( - "fmt" - "io" - "strings" - "text/tabwriter" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -const ( - listItemFmt = "%s\t%s\t%s\t%s\t%s\n" -) - -type listOptions struct { - quiet bool - filter opts.FilterOpt -} - -func newListCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := listOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ls [OPTIONS]", - Aliases: []string{"list"}, - Short: "List services", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display IDs") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runList(dockerCli *client.DockerCli, opts listOptions) error { - ctx := context.Background() - client := dockerCli.Client() - - services, err := client.ServiceList(ctx, types.ServiceListOptions{Filter: opts.filter.Value()}) - if err != nil { - return err - } - - out := dockerCli.Out() - if opts.quiet { - printQuiet(out, services) - } else { - taskFilter := filters.NewArgs() - for _, service := range services { - taskFilter.Add("service", service.ID) - } - - tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: taskFilter}) - if err != nil { - return err - } - - nodes, err := client.NodeList(ctx, types.NodeListOptions{}) - if err != nil { - return err - } - activeNodes := make(map[string]struct{}) - for _, n := range nodes { - if n.Status.State == swarm.NodeStateReady { - activeNodes[n.ID] = struct{}{} - } - } - - running := map[string]int{} - for _, task := range tasks { - if _, nodeActive := activeNodes[task.NodeID]; nodeActive && task.Status.State == "running" { - running[task.ServiceID]++ - } - } - - printTable(out, services, running) - } - return nil -} - -func printTable(out io.Writer, services []swarm.Service, running map[string]int) { - writer := tabwriter.NewWriter(out, 0, 4, 2, ' ', 0) - - // Ignore flushing errors - defer writer.Flush() - - fmt.Fprintf(writer, listItemFmt, "ID", "NAME", "REPLICAS", "IMAGE", "COMMAND") - for _, service := range services { - replicas := "" - if service.Spec.Mode.Replicated != nil && service.Spec.Mode.Replicated.Replicas != nil { - replicas = fmt.Sprintf("%d/%d", running[service.ID], *service.Spec.Mode.Replicated.Replicas) - } else if service.Spec.Mode.Global != nil { - replicas = "global" - } - fmt.Fprintf( - writer, - listItemFmt, - stringid.TruncateID(service.ID), - service.Spec.Name, - replicas, - service.Spec.TaskTemplate.ContainerSpec.Image, - strings.Join(service.Spec.TaskTemplate.ContainerSpec.Args, " ")) - } -} - -func printQuiet(out io.Writer, services []swarm.Service) { - for _, service := range services { - fmt.Fprintln(out, service.ID) - } -} diff --git a/api/client/service/opts.go b/api/client/service/opts.go deleted file mode 100644 index c702ba4025..0000000000 --- a/api/client/service/opts.go +++ /dev/null @@ -1,559 +0,0 @@ -package service - -import ( - "encoding/csv" - "fmt" - "math/big" - "strconv" - "strings" - "time" - - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types/swarm" - "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" - "github.com/spf13/cobra" -) - -type int64Value interface { - Value() int64 -} - -type memBytes int64 - -func (m *memBytes) String() string { - return units.BytesSize(float64(m.Value())) -} - -func (m *memBytes) Set(value string) error { - val, err := units.RAMInBytes(value) - *m = memBytes(val) - return err -} - -func (m *memBytes) Type() string { - return "MemoryBytes" -} - -func (m *memBytes) Value() int64 { - return int64(*m) -} - -type nanoCPUs int64 - -func (c *nanoCPUs) String() string { - return big.NewRat(c.Value(), 1e9).FloatString(3) -} - -func (c *nanoCPUs) Set(value string) error { - cpu, ok := new(big.Rat).SetString(value) - if !ok { - return fmt.Errorf("Failed to parse %v as a rational number", value) - } - nano := cpu.Mul(cpu, big.NewRat(1e9, 1)) - if !nano.IsInt() { - return fmt.Errorf("value is too precise") - } - *c = nanoCPUs(nano.Num().Int64()) - return nil -} - -func (c *nanoCPUs) Type() string { - return "NanoCPUs" -} - -func (c *nanoCPUs) Value() int64 { - return int64(*c) -} - -// DurationOpt is an option type for time.Duration that uses a pointer. This -// allows us to get nil values outside, instead of defaulting to 0 -type DurationOpt struct { - value *time.Duration -} - -// Set a new value on the option -func (d *DurationOpt) Set(s string) error { - v, err := time.ParseDuration(s) - d.value = &v - return err -} - -// Type returns the type of this option -func (d *DurationOpt) Type() string { - return "duration-ptr" -} - -// String returns a string repr of this option -func (d *DurationOpt) String() string { - if d.value != nil { - return d.value.String() - } - return "none" -} - -// Value returns the time.Duration -func (d *DurationOpt) Value() *time.Duration { - return d.value -} - -// Uint64Opt represents a uint64. -type Uint64Opt struct { - value *uint64 -} - -// Set a new value on the option -func (i *Uint64Opt) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - i.value = &v - return err -} - -// Type returns the type of this option -func (i *Uint64Opt) Type() string { - return "uint64-ptr" -} - -// String returns a string repr of this option -func (i *Uint64Opt) String() string { - if i.value != nil { - return fmt.Sprintf("%v", *i.value) - } - return "none" -} - -// Value returns the uint64 -func (i *Uint64Opt) Value() *uint64 { - return i.value -} - -// MountOpt is a Value type for parsing mounts -type MountOpt struct { - values []swarm.Mount -} - -// Set a new mount value -func (m *MountOpt) Set(value string) error { - csvReader := csv.NewReader(strings.NewReader(value)) - fields, err := csvReader.Read() - if err != nil { - return err - } - - mount := swarm.Mount{} - - volumeOptions := func() *swarm.VolumeOptions { - if mount.VolumeOptions == nil { - mount.VolumeOptions = &swarm.VolumeOptions{ - Labels: make(map[string]string), - } - } - if mount.VolumeOptions.DriverConfig == nil { - mount.VolumeOptions.DriverConfig = &swarm.Driver{} - } - return mount.VolumeOptions - } - - bindOptions := func() *swarm.BindOptions { - if mount.BindOptions == nil { - mount.BindOptions = new(swarm.BindOptions) - } - return mount.BindOptions - } - - setValueOnMap := func(target map[string]string, value string) { - parts := strings.SplitN(value, "=", 2) - if len(parts) == 1 { - target[value] = "" - } else { - target[parts[0]] = parts[1] - } - } - - mount.Type = swarm.MountTypeVolume // default to volume mounts - // Set writable as the default - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - key := strings.ToLower(parts[0]) - - if len(parts) == 1 { - switch key { - case "readonly", "ro": - mount.ReadOnly = true - continue - case "volume-nocopy": - volumeOptions().NoCopy = true - continue - } - } - - if len(parts) != 2 { - return fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - value := parts[1] - switch key { - case "type": - mount.Type = swarm.MountType(strings.ToLower(value)) - case "source", "src": - mount.Source = value - case "target", "dst", "destination": - mount.Target = value - case "readonly", "ro": - mount.ReadOnly, err = strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for %s: %s", key, value) - } - case "bind-propagation": - bindOptions().Propagation = swarm.MountPropagation(strings.ToLower(value)) - case "volume-nocopy": - volumeOptions().NoCopy, err = strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for populate: %s", value) - } - case "volume-label": - setValueOnMap(volumeOptions().Labels, value) - case "volume-driver": - volumeOptions().DriverConfig.Name = value - case "volume-opt": - if volumeOptions().DriverConfig.Options == nil { - volumeOptions().DriverConfig.Options = make(map[string]string) - } - setValueOnMap(volumeOptions().DriverConfig.Options, value) - default: - return fmt.Errorf("unexpected key '%s' in '%s'", key, field) - } - } - - if mount.Type == "" { - return fmt.Errorf("type is required") - } - - if mount.Target == "" { - return fmt.Errorf("target is required") - } - - if mount.VolumeOptions != nil && mount.Source == "" { - return fmt.Errorf("source is required when specifying volume-* options") - } - - if mount.Type == swarm.MountTypeBind && mount.VolumeOptions != nil { - return fmt.Errorf("cannot mix 'volume-*' options with mount type '%s'", swarm.MountTypeBind) - } - if mount.Type == swarm.MountTypeVolume && mount.BindOptions != nil { - return fmt.Errorf("cannot mix 'bind-*' options with mount type '%s'", swarm.MountTypeVolume) - } - - m.values = append(m.values, mount) - return nil -} - -// Type returns the type of this option -func (m *MountOpt) Type() string { - return "mount" -} - -// String returns a string repr of this option -func (m *MountOpt) String() string { - mounts := []string{} - for _, mount := range m.values { - repr := fmt.Sprintf("%s %s %s", mount.Type, mount.Source, mount.Target) - mounts = append(mounts, repr) - } - return strings.Join(mounts, ", ") -} - -// Value returns the mounts -func (m *MountOpt) Value() []swarm.Mount { - return m.values -} - -type updateOptions struct { - parallelism uint64 - delay time.Duration - onFailure string -} - -type resourceOptions struct { - limitCPU nanoCPUs - limitMemBytes memBytes - resCPU nanoCPUs - resMemBytes memBytes -} - -func (r *resourceOptions) ToResourceRequirements() *swarm.ResourceRequirements { - return &swarm.ResourceRequirements{ - Limits: &swarm.Resources{ - NanoCPUs: r.limitCPU.Value(), - MemoryBytes: r.limitMemBytes.Value(), - }, - Reservations: &swarm.Resources{ - NanoCPUs: r.resCPU.Value(), - MemoryBytes: r.resMemBytes.Value(), - }, - } -} - -type restartPolicyOptions struct { - condition string - delay DurationOpt - maxAttempts Uint64Opt - window DurationOpt -} - -func (r *restartPolicyOptions) ToRestartPolicy() *swarm.RestartPolicy { - return &swarm.RestartPolicy{ - Condition: swarm.RestartPolicyCondition(r.condition), - Delay: r.delay.Value(), - MaxAttempts: r.maxAttempts.Value(), - Window: r.window.Value(), - } -} - -func convertNetworks(networks []string) []swarm.NetworkAttachmentConfig { - nets := []swarm.NetworkAttachmentConfig{} - for _, network := range networks { - nets = append(nets, swarm.NetworkAttachmentConfig{Target: network}) - } - return nets -} - -type endpointOptions struct { - mode string - ports opts.ListOpts -} - -func (e *endpointOptions) ToEndpointSpec() *swarm.EndpointSpec { - portConfigs := []swarm.PortConfig{} - // We can ignore errors because the format was already validated by ValidatePort - ports, portBindings, _ := nat.ParsePortSpecs(e.ports.GetAll()) - - for port := range ports { - portConfigs = append(portConfigs, convertPortToPortConfig(port, portBindings)...) - } - - return &swarm.EndpointSpec{ - Mode: swarm.ResolutionMode(strings.ToLower(e.mode)), - Ports: portConfigs, - } -} - -func convertPortToPortConfig( - port nat.Port, - portBindings map[nat.Port][]nat.PortBinding, -) []swarm.PortConfig { - ports := []swarm.PortConfig{} - - for _, binding := range portBindings[port] { - hostPort, _ := strconv.ParseUint(binding.HostPort, 10, 16) - ports = append(ports, swarm.PortConfig{ - //TODO Name: ? - Protocol: swarm.PortConfigProtocol(strings.ToLower(port.Proto())), - TargetPort: uint32(port.Int()), - PublishedPort: uint32(hostPort), - }) - } - return ports -} - -type logDriverOptions struct { - name string - opts opts.ListOpts -} - -func newLogDriverOptions() logDriverOptions { - return logDriverOptions{opts: opts.NewListOpts(runconfigopts.ValidateEnv)} -} - -func (ldo *logDriverOptions) toLogDriver() *swarm.Driver { - if ldo.name == "" { - return nil - } - - // set the log driver only if specified. - return &swarm.Driver{ - Name: ldo.name, - Options: runconfigopts.ConvertKVStringsToMap(ldo.opts.GetAll()), - } -} - -// ValidatePort validates a string is in the expected format for a port definition -func ValidatePort(value string) (string, error) { - portMappings, err := nat.ParsePortSpec(value) - for _, portMapping := range portMappings { - if portMapping.Binding.HostIP != "" { - return "", fmt.Errorf("HostIP is not supported by a service.") - } - } - return value, err -} - -type serviceOptions struct { - name string - labels opts.ListOpts - containerLabels opts.ListOpts - image string - args []string - env opts.ListOpts - workdir string - user string - mounts MountOpt - - resources resourceOptions - stopGrace DurationOpt - - replicas Uint64Opt - mode string - - restartPolicy restartPolicyOptions - constraints []string - update updateOptions - networks []string - endpoint endpointOptions - - registryAuth bool - - logDriver logDriverOptions -} - -func newServiceOptions() *serviceOptions { - return &serviceOptions{ - labels: opts.NewListOpts(runconfigopts.ValidateEnv), - containerLabels: opts.NewListOpts(runconfigopts.ValidateEnv), - env: opts.NewListOpts(runconfigopts.ValidateEnv), - endpoint: endpointOptions{ - ports: opts.NewListOpts(ValidatePort), - }, - logDriver: newLogDriverOptions(), - } -} - -func (opts *serviceOptions) ToService() (swarm.ServiceSpec, error) { - var service swarm.ServiceSpec - - service = swarm.ServiceSpec{ - Annotations: swarm.Annotations{ - Name: opts.name, - Labels: runconfigopts.ConvertKVStringsToMap(opts.labels.GetAll()), - }, - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ - Image: opts.image, - Args: opts.args, - Env: opts.env.GetAll(), - Labels: runconfigopts.ConvertKVStringsToMap(opts.containerLabels.GetAll()), - Dir: opts.workdir, - User: opts.user, - Mounts: opts.mounts.Value(), - StopGracePeriod: opts.stopGrace.Value(), - }, - Resources: opts.resources.ToResourceRequirements(), - RestartPolicy: opts.restartPolicy.ToRestartPolicy(), - Placement: &swarm.Placement{ - Constraints: opts.constraints, - }, - LogDriver: opts.logDriver.toLogDriver(), - }, - Mode: swarm.ServiceMode{}, - UpdateConfig: &swarm.UpdateConfig{ - Parallelism: opts.update.parallelism, - Delay: opts.update.delay, - FailureAction: opts.update.onFailure, - }, - Networks: convertNetworks(opts.networks), - EndpointSpec: opts.endpoint.ToEndpointSpec(), - } - - switch opts.mode { - case "global": - if opts.replicas.Value() != nil { - return service, fmt.Errorf("replicas can only be used with replicated mode") - } - - service.Mode.Global = &swarm.GlobalService{} - case "replicated": - service.Mode.Replicated = &swarm.ReplicatedService{ - Replicas: opts.replicas.Value(), - } - default: - return service, fmt.Errorf("Unknown mode: %s", opts.mode) - } - return service, nil -} - -// addServiceFlags adds all flags that are common to both `create` and `update`. -// Any flags that are not common are added separately in the individual command -func addServiceFlags(cmd *cobra.Command, opts *serviceOptions) { - flags := cmd.Flags() - flags.StringVar(&opts.name, flagName, "", "Service name") - - flags.StringVarP(&opts.workdir, "workdir", "w", "", "Working directory inside the container") - flags.StringVarP(&opts.user, flagUser, "u", "", "Username or UID") - - flags.Var(&opts.resources.limitCPU, flagLimitCPU, "Limit CPUs") - flags.Var(&opts.resources.limitMemBytes, flagLimitMemory, "Limit Memory") - flags.Var(&opts.resources.resCPU, flagReserveCPU, "Reserve CPUs") - flags.Var(&opts.resources.resMemBytes, flagReserveMemory, "Reserve Memory") - flags.Var(&opts.stopGrace, flagStopGracePeriod, "Time to wait before force killing a container") - - flags.Var(&opts.replicas, flagReplicas, "Number of tasks") - - flags.StringVar(&opts.restartPolicy.condition, flagRestartCondition, "", "Restart when condition is met (none, on-failure, or any)") - flags.Var(&opts.restartPolicy.delay, flagRestartDelay, "Delay between restart attempts") - flags.Var(&opts.restartPolicy.maxAttempts, flagRestartMaxAttempts, "Maximum number of restarts before giving up") - flags.Var(&opts.restartPolicy.window, flagRestartWindow, "Window used to evaluate the restart policy") - - flags.Uint64Var(&opts.update.parallelism, flagUpdateParallelism, 1, "Maximum number of tasks updated simultaneously (0 to update all at once)") - flags.DurationVar(&opts.update.delay, flagUpdateDelay, time.Duration(0), "Delay between updates") - flags.StringVar(&opts.update.onFailure, flagUpdateFailureAction, "pause", "Action on update failure (pause|continue)") - - flags.StringVar(&opts.endpoint.mode, flagEndpointMode, "", "Endpoint mode (vip or dnsrr)") - - flags.BoolVar(&opts.registryAuth, flagRegistryAuth, false, "Send registry authentication details to swarm agents") - - flags.StringVar(&opts.logDriver.name, flagLogDriver, "", "Logging driver for service") - flags.Var(&opts.logDriver.opts, flagLogOpt, "Logging driver options") -} - -const ( - flagConstraint = "constraint" - flagConstraintRemove = "constraint-rm" - flagConstraintAdd = "constraint-add" - flagContainerLabel = "container-label" - flagContainerLabelRemove = "container-label-rm" - flagContainerLabelAdd = "container-label-add" - flagEndpointMode = "endpoint-mode" - flagEnv = "env" - flagEnvRemove = "env-rm" - flagEnvAdd = "env-add" - flagLabel = "label" - flagLabelRemove = "label-rm" - flagLabelAdd = "label-add" - flagLimitCPU = "limit-cpu" - flagLimitMemory = "limit-memory" - flagMode = "mode" - flagMount = "mount" - flagMountRemove = "mount-rm" - flagMountAdd = "mount-add" - flagName = "name" - flagNetwork = "network" - flagPublish = "publish" - flagPublishRemove = "publish-rm" - flagPublishAdd = "publish-add" - flagReplicas = "replicas" - flagReserveCPU = "reserve-cpu" - flagReserveMemory = "reserve-memory" - flagRestartCondition = "restart-condition" - flagRestartDelay = "restart-delay" - flagRestartMaxAttempts = "restart-max-attempts" - flagRestartWindow = "restart-window" - flagStopGracePeriod = "stop-grace-period" - flagUpdateDelay = "update-delay" - flagUpdateFailureAction = "update-failure-action" - flagUpdateParallelism = "update-parallelism" - flagUser = "user" - flagRegistryAuth = "with-registry-auth" - flagLogDriver = "log-driver" - flagLogOpt = "log-opt" -) diff --git a/api/client/service/opts_test.go b/api/client/service/opts_test.go deleted file mode 100644 index c4d56c1ab6..0000000000 --- a/api/client/service/opts_test.go +++ /dev/null @@ -1,176 +0,0 @@ -package service - -import ( - "testing" - "time" - - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/engine-api/types/swarm" -) - -func TestMemBytesString(t *testing.T) { - var mem memBytes = 1048576 - assert.Equal(t, mem.String(), "1 MiB") -} - -func TestMemBytesSetAndValue(t *testing.T) { - var mem memBytes - assert.NilError(t, mem.Set("5kb")) - assert.Equal(t, mem.Value(), int64(5120)) -} - -func TestNanoCPUsString(t *testing.T) { - var cpus nanoCPUs = 6100000000 - assert.Equal(t, cpus.String(), "6.100") -} - -func TestNanoCPUsSetAndValue(t *testing.T) { - var cpus nanoCPUs - assert.NilError(t, cpus.Set("0.35")) - assert.Equal(t, cpus.Value(), int64(350000000)) -} - -func TestDurationOptString(t *testing.T) { - dur := time.Duration(300 * 10e8) - duration := DurationOpt{value: &dur} - assert.Equal(t, duration.String(), "5m0s") -} - -func TestDurationOptSetAndValue(t *testing.T) { - var duration DurationOpt - assert.NilError(t, duration.Set("300s")) - assert.Equal(t, *duration.Value(), time.Duration(300*10e8)) -} - -func TestUint64OptString(t *testing.T) { - value := uint64(2345678) - opt := Uint64Opt{value: &value} - assert.Equal(t, opt.String(), "2345678") - - opt = Uint64Opt{} - assert.Equal(t, opt.String(), "none") -} - -func TestUint64OptSetAndValue(t *testing.T) { - var opt Uint64Opt - assert.NilError(t, opt.Set("14445")) - assert.Equal(t, *opt.Value(), uint64(14445)) -} - -func TestMountOptString(t *testing.T) { - mount := MountOpt{ - values: []swarm.Mount{ - { - Type: swarm.MountTypeBind, - Source: "/home/path", - Target: "/target", - }, - { - Type: swarm.MountTypeVolume, - Source: "foo", - Target: "/target/foo", - }, - }, - } - expected := "bind /home/path /target, volume foo /target/foo" - assert.Equal(t, mount.String(), expected) -} - -func TestMountOptSetNoError(t *testing.T) { - for _, testcase := range []string{ - // tests several aliases that should have same result. - "type=bind,target=/target,source=/source", - "type=bind,src=/source,dst=/target", - "type=bind,source=/source,dst=/target", - "type=bind,src=/source,target=/target", - } { - var mount MountOpt - - assert.NilError(t, mount.Set(testcase)) - - mounts := mount.Value() - assert.Equal(t, len(mounts), 1) - assert.Equal(t, mounts[0], swarm.Mount{ - Type: swarm.MountTypeBind, - Source: "/source", - Target: "/target", - }) - } -} - -// TestMountOptDefaultType ensures that a mount without the type defaults to a -// volume mount. -func TestMountOptDefaultType(t *testing.T) { - var mount MountOpt - assert.NilError(t, mount.Set("target=/target,source=/foo")) - assert.Equal(t, mount.values[0].Type, swarm.MountTypeVolume) -} - -func TestMountOptSetErrorNoTarget(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("type=volume,source=/foo"), "target is required") -} - -func TestMountOptSetErrorInvalidKey(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("type=volume,bogus=foo"), "unexpected key 'bogus'") -} - -func TestMountOptSetErrorInvalidField(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("type=volume,bogus"), "invalid field 'bogus'") -} - -func TestMountOptSetErrorInvalidReadOnly(t *testing.T) { - var mount MountOpt - assert.Error(t, mount.Set("type=volume,readonly=no"), "invalid value for readonly: no") - assert.Error(t, mount.Set("type=volume,readonly=invalid"), "invalid value for readonly: invalid") -} - -func TestMountOptDefaultEnableReadOnly(t *testing.T) { - var m MountOpt - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo")) - assert.Equal(t, m.values[0].ReadOnly, false) - - m = MountOpt{} - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly")) - assert.Equal(t, m.values[0].ReadOnly, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=1")) - assert.Equal(t, m.values[0].ReadOnly, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=bind,target=/foo,source=/foo,readonly=0")) - assert.Equal(t, m.values[0].ReadOnly, false) -} - -func TestMountOptVolumeNoCopy(t *testing.T) { - var m MountOpt - assert.Error(t, m.Set("type=volume,target=/foo,volume-nocopy"), "source is required") - - m = MountOpt{} - assert.NilError(t, m.Set("type=volume,target=/foo,source=foo")) - assert.Equal(t, m.values[0].VolumeOptions == nil, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy=true")) - assert.Equal(t, m.values[0].VolumeOptions != nil, true) - assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy")) - assert.Equal(t, m.values[0].VolumeOptions != nil, true) - assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) - - m = MountOpt{} - assert.NilError(t, m.Set("type=volume,target=/foo,source=foo,volume-nocopy=1")) - assert.Equal(t, m.values[0].VolumeOptions != nil, true) - assert.Equal(t, m.values[0].VolumeOptions.NoCopy, true) -} - -func TestMountOptTypeConflict(t *testing.T) { - var m MountOpt - assert.Error(t, m.Set("type=bind,target=/foo,source=/foo,volume-nocopy=true"), "cannot mix") - assert.Error(t, m.Set("type=volume,target=/foo,source=/foo,bind-propagation=rprivate"), "cannot mix") -} diff --git a/api/client/service/ps.go b/api/client/service/ps.go deleted file mode 100644 index 01df5a8139..0000000000 --- a/api/client/service/ps.go +++ /dev/null @@ -1,70 +0,0 @@ -package service - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/idresolver" - "github.com/docker/docker/api/client/node" - "github.com/docker/docker/api/client/task" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type psOptions struct { - serviceID string - noResolve bool - filter opts.FilterOpt -} - -func newPSCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := psOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ps [OPTIONS] SERVICE", - Short: "List the tasks of a service", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.serviceID = args[0] - return runPS(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runPS(dockerCli *client.DockerCli, opts psOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - service, _, err := client.ServiceInspectWithRaw(ctx, opts.serviceID) - if err != nil { - return err - } - - filter := opts.filter.Value() - filter.Add("service", service.ID) - if filter.Include("node") { - nodeFilters := filter.Get("node") - for _, nodeFilter := range nodeFilters { - nodeReference, err := node.Reference(client, ctx, nodeFilter) - if err != nil { - return err - } - filter.Del("node", nodeFilter) - filter.Add("node", nodeReference) - } - } - - tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: filter}) - if err != nil { - return err - } - - return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve)) -} diff --git a/api/client/service/remove.go b/api/client/service/remove.go deleted file mode 100644 index a304761d98..0000000000 --- a/api/client/service/remove.go +++ /dev/null @@ -1,47 +0,0 @@ -package service - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command { - - cmd := &cobra.Command{ - Use: "rm [OPTIONS] SERVICE [SERVICE...]", - Aliases: []string{"remove"}, - Short: "Remove one or more services", - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args) - }, - } - cmd.Flags() - - return cmd -} - -func runRemove(dockerCli *client.DockerCli, sids []string) error { - client := dockerCli.Client() - - ctx := context.Background() - - var errs []string - for _, sid := range sids { - err := client.ServiceRemove(ctx, sid) - if err != nil { - errs = append(errs, err.Error()) - continue - } - fmt.Fprintf(dockerCli.Out(), "%s\n", sid) - } - if len(errs) > 0 { - return fmt.Errorf(strings.Join(errs, "\n")) - } - return nil -} diff --git a/api/client/service/scale.go b/api/client/service/scale.go deleted file mode 100644 index 1a3ea80eaf..0000000000 --- a/api/client/service/scale.go +++ /dev/null @@ -1,88 +0,0 @@ -package service - -import ( - "fmt" - "strconv" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -func newScaleCommand(dockerCli *client.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "scale SERVICE=REPLICAS [SERVICE=REPLICAS...]", - Short: "Scale one or multiple services", - Args: scaleArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runScale(dockerCli, args) - }, - } -} - -func scaleArgs(cmd *cobra.Command, args []string) error { - if err := cli.RequiresMinArgs(1)(cmd, args); err != nil { - return err - } - for _, arg := range args { - if parts := strings.SplitN(arg, "=", 2); len(parts) != 2 { - return fmt.Errorf( - "Invalid scale specifier '%s'.\nSee '%s --help'.\n\nUsage: %s\n\n%s", - arg, - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } - } - return nil -} - -func runScale(dockerCli *client.DockerCli, args []string) error { - var errors []string - for _, arg := range args { - parts := strings.SplitN(arg, "=", 2) - serviceID, scale := parts[0], parts[1] - if err := runServiceScale(dockerCli, serviceID, scale); err != nil { - errors = append(errors, fmt.Sprintf("%s: %s", serviceID, err.Error())) - } - } - - if len(errors) == 0 { - return nil - } - return fmt.Errorf(strings.Join(errors, "\n")) -} - -func runServiceScale(dockerCli *client.DockerCli, serviceID string, scale string) error { - client := dockerCli.Client() - ctx := context.Background() - - service, _, err := client.ServiceInspectWithRaw(ctx, serviceID) - - if err != nil { - return err - } - - serviceMode := &service.Spec.Mode - if serviceMode.Replicated == nil { - return fmt.Errorf("scale can only be used with replicated mode") - } - uintScale, err := strconv.ParseUint(scale, 10, 64) - if err != nil { - return fmt.Errorf("invalid replicas value %s: %s", scale, err.Error()) - } - serviceMode.Replicated.Replicas = &uintScale - - err = client.ServiceUpdate(ctx, service.ID, service.Version, service.Spec, types.ServiceUpdateOptions{}) - if err != nil { - return err - } - - fmt.Fprintf(dockerCli.Out(), "%s scaled to %s\n", serviceID, scale) - return nil -} diff --git a/api/client/service/update.go b/api/client/service/update.go deleted file mode 100644 index 9504028bd6..0000000000 --- a/api/client/service/update.go +++ /dev/null @@ -1,473 +0,0 @@ -package service - -import ( - "fmt" - "sort" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/swarm" - "github.com/docker/go-connections/nat" - shlex "github.com/flynn-archive/go-shlex" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := newServiceOptions() - - cmd := &cobra.Command{ - Use: "update [OPTIONS] SERVICE", - Short: "Update a service", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runUpdate(dockerCli, cmd.Flags(), args[0]) - }, - } - - flags := cmd.Flags() - flags.String("image", "", "Service image tag") - flags.String("args", "", "Service command args") - addServiceFlags(cmd, opts) - - flags.Var(newListOptsVar(), flagEnvRemove, "Remove an environment variable") - flags.Var(newListOptsVar(), flagLabelRemove, "Remove a label by its key") - flags.Var(newListOptsVar(), flagContainerLabelRemove, "Remove a container label by its key") - flags.Var(newListOptsVar(), flagMountRemove, "Remove a mount by its target path") - flags.Var(newListOptsVar(), flagPublishRemove, "Remove a published port by its target port") - flags.Var(newListOptsVar(), flagConstraintRemove, "Remove a constraint") - flags.Var(&opts.labels, flagLabelAdd, "Add or update service labels") - flags.Var(&opts.containerLabels, flagContainerLabelAdd, "Add or update container labels") - flags.Var(&opts.env, flagEnvAdd, "Add or update environment variables") - flags.Var(&opts.mounts, flagMountAdd, "Add or update a mount on a service") - flags.StringSliceVar(&opts.constraints, flagConstraintAdd, []string{}, "Add or update placement constraints") - flags.Var(&opts.endpoint.ports, flagPublishAdd, "Add or update a published port") - return cmd -} - -func newListOptsVar() *opts.ListOpts { - return opts.NewListOptsRef(&[]string{}, nil) -} - -func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, serviceID string) error { - apiClient := dockerCli.Client() - ctx := context.Background() - updateOpts := types.ServiceUpdateOptions{} - - service, _, err := apiClient.ServiceInspectWithRaw(ctx, serviceID) - if err != nil { - return err - } - - err = updateService(flags, &service.Spec) - if err != nil { - return err - } - - // only send auth if flag was set - sendAuth, err := flags.GetBool(flagRegistryAuth) - if err != nil { - return err - } - if sendAuth { - // Retrieve encoded auth token from the image reference - // This would be the old image if it didn't change in this update - image := service.Spec.TaskTemplate.ContainerSpec.Image - encodedAuth, err := dockerCli.RetrieveAuthTokenFromImage(ctx, image) - if err != nil { - return err - } - updateOpts.EncodedRegistryAuth = encodedAuth - } - - err = apiClient.ServiceUpdate(ctx, service.ID, service.Version, service.Spec, updateOpts) - if err != nil { - return err - } - - fmt.Fprintf(dockerCli.Out(), "%s\n", serviceID) - return nil -} - -func updateService(flags *pflag.FlagSet, spec *swarm.ServiceSpec) error { - updateString := func(flag string, field *string) { - if flags.Changed(flag) { - *field, _ = flags.GetString(flag) - } - } - - updateInt64Value := func(flag string, field *int64) { - if flags.Changed(flag) { - *field = flags.Lookup(flag).Value.(int64Value).Value() - } - } - - updateDuration := func(flag string, field *time.Duration) { - if flags.Changed(flag) { - *field, _ = flags.GetDuration(flag) - } - } - - updateDurationOpt := func(flag string, field **time.Duration) { - if flags.Changed(flag) { - val := *flags.Lookup(flag).Value.(*DurationOpt).Value() - *field = &val - } - } - - updateUint64 := func(flag string, field *uint64) { - if flags.Changed(flag) { - *field, _ = flags.GetUint64(flag) - } - } - - updateUint64Opt := func(flag string, field **uint64) { - if flags.Changed(flag) { - val := *flags.Lookup(flag).Value.(*Uint64Opt).Value() - *field = &val - } - } - - cspec := &spec.TaskTemplate.ContainerSpec - task := &spec.TaskTemplate - - taskResources := func() *swarm.ResourceRequirements { - if task.Resources == nil { - task.Resources = &swarm.ResourceRequirements{} - } - return task.Resources - } - - updateString(flagName, &spec.Name) - updateLabels(flags, &spec.Labels) - updateContainerLabels(flags, &cspec.Labels) - updateString("image", &cspec.Image) - updateStringToSlice(flags, "args", &cspec.Args) - updateEnvironment(flags, &cspec.Env) - updateString("workdir", &cspec.Dir) - updateString(flagUser, &cspec.User) - updateMounts(flags, &cspec.Mounts) - - if flags.Changed(flagLimitCPU) || flags.Changed(flagLimitMemory) { - taskResources().Limits = &swarm.Resources{} - updateInt64Value(flagLimitCPU, &task.Resources.Limits.NanoCPUs) - updateInt64Value(flagLimitMemory, &task.Resources.Limits.MemoryBytes) - } - if flags.Changed(flagReserveCPU) || flags.Changed(flagReserveMemory) { - taskResources().Reservations = &swarm.Resources{} - updateInt64Value(flagReserveCPU, &task.Resources.Reservations.NanoCPUs) - updateInt64Value(flagReserveMemory, &task.Resources.Reservations.MemoryBytes) - } - - updateDurationOpt(flagStopGracePeriod, &cspec.StopGracePeriod) - - if anyChanged(flags, flagRestartCondition, flagRestartDelay, flagRestartMaxAttempts, flagRestartWindow) { - if task.RestartPolicy == nil { - task.RestartPolicy = &swarm.RestartPolicy{} - } - - if flags.Changed(flagRestartCondition) { - value, _ := flags.GetString(flagRestartCondition) - task.RestartPolicy.Condition = swarm.RestartPolicyCondition(value) - } - updateDurationOpt(flagRestartDelay, &task.RestartPolicy.Delay) - updateUint64Opt(flagRestartMaxAttempts, &task.RestartPolicy.MaxAttempts) - updateDurationOpt(flagRestartWindow, &task.RestartPolicy.Window) - } - - if anyChanged(flags, flagConstraintAdd, flagConstraintRemove) { - if task.Placement == nil { - task.Placement = &swarm.Placement{} - } - updatePlacement(flags, task.Placement) - } - - if err := updateReplicas(flags, &spec.Mode); err != nil { - return err - } - - if anyChanged(flags, flagUpdateParallelism, flagUpdateDelay, flagUpdateFailureAction) { - if spec.UpdateConfig == nil { - spec.UpdateConfig = &swarm.UpdateConfig{} - } - updateUint64(flagUpdateParallelism, &spec.UpdateConfig.Parallelism) - updateDuration(flagUpdateDelay, &spec.UpdateConfig.Delay) - updateString(flagUpdateFailureAction, &spec.UpdateConfig.FailureAction) - } - - if flags.Changed(flagEndpointMode) { - value, _ := flags.GetString(flagEndpointMode) - if spec.EndpointSpec == nil { - spec.EndpointSpec = &swarm.EndpointSpec{} - } - spec.EndpointSpec.Mode = swarm.ResolutionMode(value) - } - - if anyChanged(flags, flagPublishAdd, flagPublishRemove) { - if spec.EndpointSpec == nil { - spec.EndpointSpec = &swarm.EndpointSpec{} - } - if err := updatePorts(flags, &spec.EndpointSpec.Ports); err != nil { - return err - } - } - - if err := updateLogDriver(flags, &spec.TaskTemplate); err != nil { - return err - } - - return nil -} - -func updateStringToSlice(flags *pflag.FlagSet, flag string, field *[]string) error { - if !flags.Changed(flag) { - return nil - } - - value, _ := flags.GetString(flag) - valueSlice, err := shlex.Split(value) - *field = valueSlice - return err -} - -func anyChanged(flags *pflag.FlagSet, fields ...string) bool { - for _, flag := range fields { - if flags.Changed(flag) { - return true - } - } - return false -} - -func updatePlacement(flags *pflag.FlagSet, placement *swarm.Placement) { - field, _ := flags.GetStringSlice(flagConstraintAdd) - placement.Constraints = append(placement.Constraints, field...) - - toRemove := buildToRemoveSet(flags, flagConstraintRemove) - placement.Constraints = removeItems(placement.Constraints, toRemove, itemKey) -} - -func updateContainerLabels(flags *pflag.FlagSet, field *map[string]string) { - if flags.Changed(flagContainerLabelAdd) { - if *field == nil { - *field = map[string]string{} - } - - values := flags.Lookup(flagContainerLabelAdd).Value.(*opts.ListOpts).GetAll() - for key, value := range runconfigopts.ConvertKVStringsToMap(values) { - (*field)[key] = value - } - } - - if *field != nil && flags.Changed(flagContainerLabelRemove) { - toRemove := flags.Lookup(flagContainerLabelRemove).Value.(*opts.ListOpts).GetAll() - for _, label := range toRemove { - delete(*field, label) - } - } -} - -func updateLabels(flags *pflag.FlagSet, field *map[string]string) { - if flags.Changed(flagLabelAdd) { - if *field == nil { - *field = map[string]string{} - } - - values := flags.Lookup(flagLabelAdd).Value.(*opts.ListOpts).GetAll() - for key, value := range runconfigopts.ConvertKVStringsToMap(values) { - (*field)[key] = value - } - } - - if *field != nil && flags.Changed(flagLabelRemove) { - toRemove := flags.Lookup(flagLabelRemove).Value.(*opts.ListOpts).GetAll() - for _, label := range toRemove { - delete(*field, label) - } - } -} - -func updateEnvironment(flags *pflag.FlagSet, field *[]string) { - envSet := map[string]string{} - for _, v := range *field { - envSet[envKey(v)] = v - } - if flags.Changed(flagEnvAdd) { - value := flags.Lookup(flagEnvAdd).Value.(*opts.ListOpts) - for _, v := range value.GetAll() { - envSet[envKey(v)] = v - } - } - - *field = []string{} - for _, v := range envSet { - *field = append(*field, v) - } - - toRemove := buildToRemoveSet(flags, flagEnvRemove) - *field = removeItems(*field, toRemove, envKey) -} - -func envKey(value string) string { - kv := strings.SplitN(value, "=", 2) - return kv[0] -} - -func itemKey(value string) string { - return value -} - -func buildToRemoveSet(flags *pflag.FlagSet, flag string) map[string]struct{} { - var empty struct{} - toRemove := make(map[string]struct{}) - - if !flags.Changed(flag) { - return toRemove - } - - toRemoveSlice := flags.Lookup(flag).Value.(*opts.ListOpts).GetAll() - for _, key := range toRemoveSlice { - toRemove[key] = empty - } - return toRemove -} - -func removeItems( - seq []string, - toRemove map[string]struct{}, - keyFunc func(string) string, -) []string { - newSeq := []string{} - for _, item := range seq { - if _, exists := toRemove[keyFunc(item)]; !exists { - newSeq = append(newSeq, item) - } - } - return newSeq -} - -func updateMounts(flags *pflag.FlagSet, mounts *[]swarm.Mount) { - if flags.Changed(flagMountAdd) { - values := flags.Lookup(flagMountAdd).Value.(*MountOpt).Value() - *mounts = append(*mounts, values...) - } - toRemove := buildToRemoveSet(flags, flagMountRemove) - - newMounts := []swarm.Mount{} - for _, mount := range *mounts { - if _, exists := toRemove[mount.Target]; !exists { - newMounts = append(newMounts, mount) - } - } - *mounts = newMounts -} - -type byPortConfig []swarm.PortConfig - -func (r byPortConfig) Len() int { return len(r) } -func (r byPortConfig) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byPortConfig) Less(i, j int) bool { - // We convert PortConfig into `port/protocol`, e.g., `80/tcp` - // In updatePorts we already filter out with map so there is duplicate entries - return portConfigToString(&r[i]) < portConfigToString(&r[j]) -} - -func portConfigToString(portConfig *swarm.PortConfig) string { - protocol := portConfig.Protocol - if protocol == "" { - protocol = "tcp" - } - return fmt.Sprintf("%v/%s", portConfig.PublishedPort, protocol) -} - -func updatePorts(flags *pflag.FlagSet, portConfig *[]swarm.PortConfig) error { - // The key of the map is `port/protocol`, e.g., `80/tcp` - portSet := map[string]swarm.PortConfig{} - // Check to see if there are any conflict in flags. - if flags.Changed(flagPublishAdd) { - values := flags.Lookup(flagPublishAdd).Value.(*opts.ListOpts).GetAll() - ports, portBindings, _ := nat.ParsePortSpecs(values) - - for port := range ports { - newConfigs := convertPortToPortConfig(port, portBindings) - for _, entry := range newConfigs { - if v, ok := portSet[portConfigToString(&entry)]; ok && v != entry { - return fmt.Errorf("conflicting port mapping between %v:%v/%s and %v:%v/%s", entry.PublishedPort, entry.TargetPort, entry.Protocol, v.PublishedPort, v.TargetPort, v.Protocol) - } - portSet[portConfigToString(&entry)] = entry - } - } - } - - // Override previous PortConfig in service if there is any duplicate - for _, entry := range *portConfig { - if _, ok := portSet[portConfigToString(&entry)]; !ok { - portSet[portConfigToString(&entry)] = entry - } - } - - toRemove := flags.Lookup(flagPublishRemove).Value.(*opts.ListOpts).GetAll() - newPorts := []swarm.PortConfig{} -portLoop: - for _, port := range portSet { - for _, rawTargetPort := range toRemove { - targetPort := nat.Port(rawTargetPort) - if equalPort(targetPort, port) { - continue portLoop - } - } - newPorts = append(newPorts, port) - } - // Sort the PortConfig to avoid unnecessary updates - sort.Sort(byPortConfig(newPorts)) - *portConfig = newPorts - return nil -} - -func equalPort(targetPort nat.Port, port swarm.PortConfig) bool { - return (string(port.Protocol) == targetPort.Proto() && - port.TargetPort == uint32(targetPort.Int())) -} - -func updateReplicas(flags *pflag.FlagSet, serviceMode *swarm.ServiceMode) error { - if !flags.Changed(flagReplicas) { - return nil - } - - if serviceMode == nil || serviceMode.Replicated == nil { - return fmt.Errorf("replicas can only be used with replicated mode") - } - serviceMode.Replicated.Replicas = flags.Lookup(flagReplicas).Value.(*Uint64Opt).Value() - return nil -} - -// updateLogDriver updates the log driver only if the log driver flag is set. -// All options will be replaced with those provided on the command line. -func updateLogDriver(flags *pflag.FlagSet, taskTemplate *swarm.TaskSpec) error { - if !flags.Changed(flagLogDriver) { - return nil - } - - name, err := flags.GetString(flagLogDriver) - if err != nil { - return err - } - - if name == "" { - return nil - } - - taskTemplate.LogDriver = &swarm.Driver{ - Name: name, - Options: runconfigopts.ConvertKVStringsToMap(flags.Lookup(flagLogOpt).Value.(*opts.ListOpts).GetAll()), - } - - return nil -} diff --git a/api/client/service/update_test.go b/api/client/service/update_test.go deleted file mode 100644 index 3f9697d999..0000000000 --- a/api/client/service/update_test.go +++ /dev/null @@ -1,180 +0,0 @@ -package service - -import ( - "sort" - "testing" - - "github.com/docker/docker/pkg/testutil/assert" - "github.com/docker/engine-api/types/swarm" -) - -func TestUpdateServiceArgs(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("args", "the \"new args\"") - - spec := &swarm.ServiceSpec{} - cspec := &spec.TaskTemplate.ContainerSpec - cspec.Args = []string{"old", "args"} - - updateService(flags, spec) - assert.EqualStringSlice(t, cspec.Args, []string{"the", "new args"}) -} - -func TestUpdateLabels(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("label-add", "toadd=newlabel") - flags.Set("label-rm", "toremove") - - labels := map[string]string{ - "toremove": "thelabeltoremove", - "tokeep": "value", - } - - updateLabels(flags, &labels) - assert.Equal(t, len(labels), 2) - assert.Equal(t, labels["tokeep"], "value") - assert.Equal(t, labels["toadd"], "newlabel") -} - -func TestUpdateLabelsRemoveALabelThatDoesNotExist(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("label-rm", "dne") - - labels := map[string]string{"foo": "theoldlabel"} - updateLabels(flags, &labels) - assert.Equal(t, len(labels), 1) -} - -func TestUpdatePlacement(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("constraint-add", "node=toadd") - flags.Set("constraint-rm", "node!=toremove") - - placement := &swarm.Placement{ - Constraints: []string{"node!=toremove", "container=tokeep"}, - } - - updatePlacement(flags, placement) - assert.Equal(t, len(placement.Constraints), 2) - assert.Equal(t, placement.Constraints[0], "container=tokeep") - assert.Equal(t, placement.Constraints[1], "node=toadd") -} - -func TestUpdateEnvironment(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("env-add", "toadd=newenv") - flags.Set("env-rm", "toremove") - - envs := []string{"toremove=theenvtoremove", "tokeep=value"} - - updateEnvironment(flags, &envs) - assert.Equal(t, len(envs), 2) - // Order has been removed in updateEnvironment (map) - sort.Strings(envs) - assert.Equal(t, envs[0], "toadd=newenv") - assert.Equal(t, envs[1], "tokeep=value") -} - -func TestUpdateEnvironmentWithDuplicateValues(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("env-add", "foo=newenv") - flags.Set("env-add", "foo=dupe") - flags.Set("env-rm", "foo") - - envs := []string{"foo=value"} - - updateEnvironment(flags, &envs) - assert.Equal(t, len(envs), 0) -} - -func TestUpdateEnvironmentWithDuplicateKeys(t *testing.T) { - // Test case for #25404 - flags := newUpdateCommand(nil).Flags() - flags.Set("env-add", "A=b") - - envs := []string{"A=c"} - - updateEnvironment(flags, &envs) - assert.Equal(t, len(envs), 1) - assert.Equal(t, envs[0], "A=b") -} - -func TestUpdateMounts(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("mount-add", "type=volume,target=/toadd") - flags.Set("mount-rm", "/toremove") - - mounts := []swarm.Mount{ - {Target: "/toremove", Type: swarm.MountTypeBind}, - {Target: "/tokeep", Type: swarm.MountTypeBind}, - } - - updateMounts(flags, &mounts) - assert.Equal(t, len(mounts), 2) - assert.Equal(t, mounts[0].Target, "/tokeep") - assert.Equal(t, mounts[1].Target, "/toadd") -} - -func TestUpdatePorts(t *testing.T) { - flags := newUpdateCommand(nil).Flags() - flags.Set("publish-add", "1000:1000") - flags.Set("publish-rm", "333/udp") - - portConfigs := []swarm.PortConfig{ - {TargetPort: 333, Protocol: swarm.PortConfigProtocolUDP}, - {TargetPort: 555}, - } - - err := updatePorts(flags, &portConfigs) - assert.Equal(t, err, nil) - assert.Equal(t, len(portConfigs), 2) - // Do a sort to have the order (might have changed by map) - targetPorts := []int{int(portConfigs[0].TargetPort), int(portConfigs[1].TargetPort)} - sort.Ints(targetPorts) - assert.Equal(t, targetPorts[0], 555) - assert.Equal(t, targetPorts[1], 1000) -} - -func TestUpdatePortsDuplicateEntries(t *testing.T) { - // Test case for #25375 - flags := newUpdateCommand(nil).Flags() - flags.Set("publish-add", "80:80") - - portConfigs := []swarm.PortConfig{ - {TargetPort: 80, PublishedPort: 80}, - } - - err := updatePorts(flags, &portConfigs) - assert.Equal(t, err, nil) - assert.Equal(t, len(portConfigs), 1) - assert.Equal(t, portConfigs[0].TargetPort, uint32(80)) -} - -func TestUpdatePortsDuplicateKeys(t *testing.T) { - // Test case for #25375 - flags := newUpdateCommand(nil).Flags() - flags.Set("publish-add", "80:20") - - portConfigs := []swarm.PortConfig{ - {TargetPort: 80, PublishedPort: 80}, - } - - err := updatePorts(flags, &portConfigs) - assert.Equal(t, err, nil) - assert.Equal(t, len(portConfigs), 1) - assert.Equal(t, portConfigs[0].TargetPort, uint32(20)) -} - -func TestUpdatePortsConflictingFlags(t *testing.T) { - // Test case for #25375 - flags := newUpdateCommand(nil).Flags() - flags.Set("publish-add", "80:80") - flags.Set("publish-add", "80:20") - - portConfigs := []swarm.PortConfig{ - {TargetPort: 80, PublishedPort: 80}, - } - - err := updatePorts(flags, &portConfigs) - assert.Error(t, err, "conflicting port mapping") -} diff --git a/api/client/stack/cmd.go b/api/client/stack/cmd.go deleted file mode 100644 index cf247b474f..0000000000 --- a/api/client/stack/cmd.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build experimental - -package stack - -import ( - "fmt" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -// NewStackCommand returns a cobra command for `stack` subcommands -func NewStackCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "stack", - Short: "Manage Docker stacks", - Args: cli.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) - }, - } - cmd.AddCommand( - newConfigCommand(dockerCli), - newDeployCommand(dockerCli), - newRemoveCommand(dockerCli), - newPSCommand(dockerCli), - ) - return cmd -} - -// NewTopLevelDeployCommand returns a command for `docker deploy` -func NewTopLevelDeployCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := newDeployCommand(dockerCli) - // Remove the aliases at the top level - cmd.Aliases = []string{} - return cmd -} diff --git a/api/client/stack/cmd_stub.go b/api/client/stack/cmd_stub.go deleted file mode 100644 index 0efc56300b..0000000000 --- a/api/client/stack/cmd_stub.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !experimental - -package stack - -import ( - "github.com/docker/docker/api/client" - "github.com/spf13/cobra" -) - -// NewStackCommand returns no command -func NewStackCommand(dockerCli *client.DockerCli) *cobra.Command { - return &cobra.Command{} -} - -// NewTopLevelDeployCommand returns no command -func NewTopLevelDeployCommand(dockerCli *client.DockerCli) *cobra.Command { - return &cobra.Command{} -} diff --git a/api/client/stack/common.go b/api/client/stack/common.go deleted file mode 100644 index 46c9957250..0000000000 --- a/api/client/stack/common.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build experimental - -package stack - -import ( - "golang.org/x/net/context" - - "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/swarm" -) - -const ( - labelNamespace = "com.docker.stack.namespace" -) - -func getStackLabels(namespace string, labels map[string]string) map[string]string { - if labels == nil { - labels = make(map[string]string) - } - labels[labelNamespace] = namespace - return labels -} - -func getStackFilter(namespace string) filters.Args { - filter := filters.NewArgs() - filter.Add("label", labelNamespace+"="+namespace) - return filter -} - -func getServices( - ctx context.Context, - apiclient client.APIClient, - namespace string, -) ([]swarm.Service, error) { - return apiclient.ServiceList( - ctx, - types.ServiceListOptions{Filter: getStackFilter(namespace)}) -} - -func getNetworks( - ctx context.Context, - apiclient client.APIClient, - namespace string, -) ([]types.NetworkResource, error) { - return apiclient.NetworkList( - ctx, - types.NetworkListOptions{Filters: getStackFilter(namespace)}) -} diff --git a/api/client/stack/config.go b/api/client/stack/config.go deleted file mode 100644 index 696c0c3fc7..0000000000 --- a/api/client/stack/config.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build experimental - -package stack - -import ( - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/bundlefile" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type configOptions struct { - bundlefile string - namespace string -} - -func newConfigCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts configOptions - - cmd := &cobra.Command{ - Use: "config [OPTIONS] STACK", - Short: "Print the stack configuration", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.namespace = args[0] - return runConfig(dockerCli, opts) - }, - } - - flags := cmd.Flags() - addBundlefileFlag(&opts.bundlefile, flags) - return cmd -} - -func runConfig(dockerCli *client.DockerCli, opts configOptions) error { - bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile) - if err != nil { - return err - } - return bundlefile.Print(dockerCli.Out(), bundle) -} diff --git a/api/client/stack/deploy.go b/api/client/stack/deploy.go deleted file mode 100644 index 5a1a5a5318..0000000000 --- a/api/client/stack/deploy.go +++ /dev/null @@ -1,227 +0,0 @@ -// +build experimental - -package stack - -import ( - "fmt" - - "github.com/spf13/cobra" - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/bundlefile" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/network" - "github.com/docker/engine-api/types/swarm" -) - -const ( - defaultNetworkDriver = "overlay" -) - -type deployOptions struct { - bundlefile string - namespace string - sendRegistryAuth bool -} - -func newDeployCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts deployOptions - - cmd := &cobra.Command{ - Use: "deploy [OPTIONS] STACK", - Aliases: []string{"up"}, - Short: "Create and update a stack from a Distributed Application Bundle (DAB)", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.namespace = args[0] - return runDeploy(dockerCli, opts) - }, - } - - flags := cmd.Flags() - addBundlefileFlag(&opts.bundlefile, flags) - addRegistryAuthFlag(&opts.sendRegistryAuth, flags) - return cmd -} - -func runDeploy(dockerCli *client.DockerCli, opts deployOptions) error { - bundle, err := loadBundlefile(dockerCli.Err(), opts.namespace, opts.bundlefile) - if err != nil { - return err - } - - networks := getUniqueNetworkNames(bundle.Services) - ctx := context.Background() - - if err := updateNetworks(ctx, dockerCli, networks, opts.namespace); err != nil { - return err - } - return deployServices(ctx, dockerCli, bundle.Services, opts.namespace, opts.sendRegistryAuth) -} - -func getUniqueNetworkNames(services map[string]bundlefile.Service) []string { - networkSet := make(map[string]bool) - for _, service := range services { - for _, network := range service.Networks { - networkSet[network] = true - } - } - - networks := []string{} - for network := range networkSet { - networks = append(networks, network) - } - return networks -} - -func updateNetworks( - ctx context.Context, - dockerCli *client.DockerCli, - networks []string, - namespace string, -) error { - client := dockerCli.Client() - - existingNetworks, err := getNetworks(ctx, client, namespace) - if err != nil { - return err - } - - existingNetworkMap := make(map[string]types.NetworkResource) - for _, network := range existingNetworks { - existingNetworkMap[network.Name] = network - } - - createOpts := types.NetworkCreate{ - Labels: getStackLabels(namespace, nil), - Driver: defaultNetworkDriver, - // TODO: remove when engine-api uses omitempty for IPAM - IPAM: network.IPAM{Driver: "default"}, - } - - for _, internalName := range networks { - name := fmt.Sprintf("%s_%s", namespace, internalName) - - if _, exists := existingNetworkMap[name]; exists { - continue - } - fmt.Fprintf(dockerCli.Out(), "Creating network %s\n", name) - if _, err := client.NetworkCreate(ctx, name, createOpts); err != nil { - return err - } - } - return nil -} - -func convertNetworks(networks []string, namespace string, name string) []swarm.NetworkAttachmentConfig { - nets := []swarm.NetworkAttachmentConfig{} - for _, network := range networks { - nets = append(nets, swarm.NetworkAttachmentConfig{ - Target: namespace + "_" + network, - Aliases: []string{name}, - }) - } - return nets -} - -func deployServices( - ctx context.Context, - dockerCli *client.DockerCli, - services map[string]bundlefile.Service, - namespace string, - sendAuth bool, -) error { - apiClient := dockerCli.Client() - out := dockerCli.Out() - - existingServices, err := getServices(ctx, apiClient, namespace) - if err != nil { - return err - } - - existingServiceMap := make(map[string]swarm.Service) - for _, service := range existingServices { - existingServiceMap[service.Spec.Name] = service - } - - for internalName, service := range services { - name := fmt.Sprintf("%s_%s", namespace, internalName) - - var ports []swarm.PortConfig - for _, portSpec := range service.Ports { - ports = append(ports, swarm.PortConfig{ - Protocol: swarm.PortConfigProtocol(portSpec.Protocol), - TargetPort: portSpec.Port, - }) - } - - serviceSpec := swarm.ServiceSpec{ - Annotations: swarm.Annotations{ - Name: name, - Labels: getStackLabels(namespace, service.Labels), - }, - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ - Image: service.Image, - Command: service.Command, - Args: service.Args, - Env: service.Env, - }, - }, - EndpointSpec: &swarm.EndpointSpec{ - Ports: ports, - }, - Networks: convertNetworks(service.Networks, namespace, internalName), - } - - cspec := &serviceSpec.TaskTemplate.ContainerSpec - if service.WorkingDir != nil { - cspec.Dir = *service.WorkingDir - } - if service.User != nil { - cspec.User = *service.User - } - - encodedAuth := "" - if sendAuth { - // Retrieve encoded auth token from the image reference - image := serviceSpec.TaskTemplate.ContainerSpec.Image - encodedAuth, err = dockerCli.RetrieveAuthTokenFromImage(ctx, image) - if err != nil { - return err - } - } - - if service, exists := existingServiceMap[name]; exists { - fmt.Fprintf(out, "Updating service %s (id: %s)\n", name, service.ID) - - updateOpts := types.ServiceUpdateOptions{} - if sendAuth { - updateOpts.EncodedRegistryAuth = encodedAuth - } - if err := apiClient.ServiceUpdate( - ctx, - service.ID, - service.Version, - serviceSpec, - updateOpts, - ); err != nil { - return err - } - } else { - fmt.Fprintf(out, "Creating service %s\n", name) - - createOpts := types.ServiceCreateOptions{} - if sendAuth { - createOpts.EncodedRegistryAuth = encodedAuth - } - if _, err := apiClient.ServiceCreate(ctx, serviceSpec, createOpts); err != nil { - return err - } - } - } - - return nil -} diff --git a/api/client/stack/opts.go b/api/client/stack/opts.go deleted file mode 100644 index 345bdc38f5..0000000000 --- a/api/client/stack/opts.go +++ /dev/null @@ -1,49 +0,0 @@ -// +build experimental - -package stack - -import ( - "fmt" - "io" - "os" - - "github.com/docker/docker/api/client/bundlefile" - "github.com/spf13/pflag" -) - -func addBundlefileFlag(opt *string, flags *pflag.FlagSet) { - flags.StringVar( - opt, - "file", "", - "Path to a Distributed Application Bundle file (Default: STACK.dab)") -} - -func addRegistryAuthFlag(opt *bool, flags *pflag.FlagSet) { - flags.BoolVar(opt, "with-registry-auth", false, "Send registry authentication details to Swarm agents") -} - -func loadBundlefile(stderr io.Writer, namespace string, path string) (*bundlefile.Bundlefile, error) { - defaultPath := fmt.Sprintf("%s.dab", namespace) - - if path == "" { - path = defaultPath - } - if _, err := os.Stat(path); err != nil { - return nil, fmt.Errorf( - "Bundle %s not found. Specify the path with --file", - path) - } - - fmt.Fprintf(stderr, "Loading bundle from %s\n", path) - reader, err := os.Open(path) - if err != nil { - return nil, err - } - defer reader.Close() - - bundle, err := bundlefile.LoadFile(reader) - if err != nil { - return nil, fmt.Errorf("Error reading %s: %v\n", path, err) - } - return bundle, err -} diff --git a/api/client/stack/ps.go b/api/client/stack/ps.go deleted file mode 100644 index 2571529b46..0000000000 --- a/api/client/stack/ps.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build experimental - -package stack - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/idresolver" - "github.com/docker/docker/api/client/task" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" -) - -type psOptions struct { - all bool - filter opts.FilterOpt - namespace string - noResolve bool -} - -func newPSCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := psOptions{filter: opts.NewFilterOpt()} - - cmd := &cobra.Command{ - Use: "ps [OPTIONS] STACK", - Short: "List the tasks in the stack", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.namespace = args[0] - return runPS(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.BoolVarP(&opts.all, "all", "a", false, "Display all tasks") - flags.BoolVar(&opts.noResolve, "no-resolve", false, "Do not map IDs to Names") - flags.VarP(&opts.filter, "filter", "f", "Filter output based on conditions provided") - - return cmd -} - -func runPS(dockerCli *client.DockerCli, opts psOptions) error { - namespace := opts.namespace - client := dockerCli.Client() - ctx := context.Background() - - filter := opts.filter.Value() - filter.Add("label", labelNamespace+"="+opts.namespace) - if !opts.all && !filter.Include("desired-state") { - filter.Add("desired-state", string(swarm.TaskStateRunning)) - filter.Add("desired-state", string(swarm.TaskStateAccepted)) - } - - tasks, err := client.TaskList(ctx, types.TaskListOptions{Filter: filter}) - if err != nil { - return err - } - - if len(tasks) == 0 { - fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace) - return nil - } - - return task.Print(dockerCli, ctx, tasks, idresolver.New(client, opts.noResolve)) -} diff --git a/api/client/stack/remove.go b/api/client/stack/remove.go deleted file mode 100644 index 9ba91e5c23..0000000000 --- a/api/client/stack/remove.go +++ /dev/null @@ -1,75 +0,0 @@ -// +build experimental - -package stack - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type removeOptions struct { - namespace string -} - -func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts removeOptions - - cmd := &cobra.Command{ - Use: "rm STACK", - Aliases: []string{"remove", "down"}, - Short: "Remove the stack", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.namespace = args[0] - return runRemove(dockerCli, opts) - }, - } - return cmd -} - -func runRemove(dockerCli *client.DockerCli, opts removeOptions) error { - namespace := opts.namespace - client := dockerCli.Client() - stderr := dockerCli.Err() - ctx := context.Background() - hasError := false - - services, err := getServices(ctx, client, namespace) - if err != nil { - return err - } - for _, service := range services { - fmt.Fprintf(stderr, "Removing service %s\n", service.Spec.Name) - if err := client.ServiceRemove(ctx, service.ID); err != nil { - hasError = true - fmt.Fprintf(stderr, "Failed to remove service %s: %s", service.ID, err) - } - } - - networks, err := getNetworks(ctx, client, namespace) - if err != nil { - return err - } - for _, network := range networks { - fmt.Fprintf(stderr, "Removing network %s\n", network.Name) - if err := client.NetworkRemove(ctx, network.ID); err != nil { - hasError = true - fmt.Fprintf(stderr, "Failed to remove network %s: %s", network.ID, err) - } - } - - if len(services) == 0 && len(networks) == 0 { - fmt.Fprintf(dockerCli.Out(), "Nothing found in stack: %s\n", namespace) - return nil - } - - if hasError { - return fmt.Errorf("Failed to remove some resources") - } - return nil -} diff --git a/api/client/swarm/cmd.go b/api/client/swarm/cmd.go deleted file mode 100644 index 37b035b495..0000000000 --- a/api/client/swarm/cmd.go +++ /dev/null @@ -1,30 +0,0 @@ -package swarm - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" -) - -// NewSwarmCommand returns a cobra command for `swarm` subcommands -func NewSwarmCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "swarm", - Short: "Manage Docker Swarm", - Args: cli.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) - }, - } - cmd.AddCommand( - newInitCommand(dockerCli), - newJoinCommand(dockerCli), - newJoinTokenCommand(dockerCli), - newUpdateCommand(dockerCli), - newLeaveCommand(dockerCli), - ) - return cmd -} diff --git a/api/client/swarm/init.go b/api/client/swarm/init.go deleted file mode 100644 index 6a59019067..0000000000 --- a/api/client/swarm/init.go +++ /dev/null @@ -1,81 +0,0 @@ -package swarm - -import ( - "errors" - "fmt" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -const ( - generatedSecretEntropyBytes = 16 - generatedSecretBase = 36 - // floor(log(2^128-1, 36)) + 1 - maxGeneratedSecretLength = 25 -) - -type initOptions struct { - swarmOptions - listenAddr NodeAddrOption - // Not a NodeAddrOption because it has no default port. - advertiseAddr string - forceNewCluster bool -} - -func newInitCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := initOptions{ - listenAddr: NewListenAddrOption(), - } - - cmd := &cobra.Command{ - Use: "init [OPTIONS]", - Short: "Initialize a swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runInit(dockerCli, cmd.Flags(), opts) - }, - } - - flags := cmd.Flags() - flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: [:port])") - flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: [:port])") - flags.BoolVar(&opts.forceNewCluster, "force-new-cluster", false, "Force create a new cluster from current state.") - addSwarmFlags(flags, &opts.swarmOptions) - return cmd -} - -func runInit(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts initOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - req := swarm.InitRequest{ - ListenAddr: opts.listenAddr.String(), - AdvertiseAddr: opts.advertiseAddr, - ForceNewCluster: opts.forceNewCluster, - Spec: opts.swarmOptions.ToSpec(), - } - - nodeID, err := client.SwarmInit(ctx, req) - if err != nil { - if strings.Contains(err.Error(), "could not choose an IP address to advertise") || strings.Contains(err.Error(), "could not find the system's IP address") { - return errors.New(err.Error() + " - specify one with --advertise-addr") - } - return err - } - - fmt.Fprintf(dockerCli.Out(), "Swarm initialized: current node (%s) is now a manager.\n\n", nodeID) - - if err := printJoinCommand(ctx, dockerCli, nodeID, true, false); err != nil { - return err - } - - fmt.Fprint(dockerCli.Out(), "To add a manager to this swarm, run 'docker swarm join-token manager' and follow the instructions.\n\n") - return nil -} diff --git a/api/client/swarm/join.go b/api/client/swarm/join.go deleted file mode 100644 index de2e15ab23..0000000000 --- a/api/client/swarm/join.go +++ /dev/null @@ -1,75 +0,0 @@ -package swarm - -import ( - "fmt" - "strings" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" - "golang.org/x/net/context" -) - -type joinOptions struct { - remote string - listenAddr NodeAddrOption - // Not a NodeAddrOption because it has no default port. - advertiseAddr string - token string -} - -func newJoinCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := joinOptions{ - listenAddr: NewListenAddrOption(), - } - - cmd := &cobra.Command{ - Use: "join [OPTIONS] HOST:PORT", - Short: "Join a swarm as a node and/or manager", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.remote = args[0] - return runJoin(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.Var(&opts.listenAddr, flagListenAddr, "Listen address (format: [:port])") - flags.StringVar(&opts.advertiseAddr, flagAdvertiseAddr, "", "Advertised address (format: [:port])") - flags.StringVar(&opts.token, flagToken, "", "Token for entry into the swarm") - return cmd -} - -func runJoin(dockerCli *client.DockerCli, opts joinOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - req := swarm.JoinRequest{ - JoinToken: opts.token, - ListenAddr: opts.listenAddr.String(), - AdvertiseAddr: opts.advertiseAddr, - RemoteAddrs: []string{opts.remote}, - } - err := client.SwarmJoin(ctx, req) - if err != nil { - return err - } - - info, err := client.Info(ctx) - if err != nil { - return err - } - - _, _, err = client.NodeInspectWithRaw(ctx, info.Swarm.NodeID) - if err != nil { - // TODO(aaronl): is there a better way to do this? - if strings.Contains(err.Error(), "This node is not a swarm manager.") { - fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a worker.") - } - } else { - fmt.Fprintln(dockerCli.Out(), "This node joined a swarm as a manager.") - } - - return nil -} diff --git a/api/client/swarm/join_token.go b/api/client/swarm/join_token.go deleted file mode 100644 index 130b55f40b..0000000000 --- a/api/client/swarm/join_token.go +++ /dev/null @@ -1,105 +0,0 @@ -package swarm - -import ( - "errors" - "fmt" - - "github.com/spf13/cobra" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types/swarm" - "golang.org/x/net/context" -) - -func newJoinTokenCommand(dockerCli *client.DockerCli) *cobra.Command { - var rotate, quiet bool - - cmd := &cobra.Command{ - Use: "join-token [-q] [--rotate] (worker|manager)", - Short: "Manage join tokens", - Args: cli.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - worker := args[0] == "worker" - manager := args[0] == "manager" - - if !worker && !manager { - return errors.New("unknown role " + args[0]) - } - - client := dockerCli.Client() - ctx := context.Background() - - if rotate { - var flags swarm.UpdateFlags - - swarm, err := client.SwarmInspect(ctx) - if err != nil { - return err - } - - flags.RotateWorkerToken = worker - flags.RotateManagerToken = manager - - err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, flags) - if err != nil { - return err - } - if !quiet { - fmt.Fprintf(dockerCli.Out(), "Succesfully rotated %s join token.\n\n", args[0]) - } - } - - swarm, err := client.SwarmInspect(ctx) - if err != nil { - return err - } - - if quiet { - if worker { - fmt.Fprintln(dockerCli.Out(), swarm.JoinTokens.Worker) - } else { - fmt.Fprintln(dockerCli.Out(), swarm.JoinTokens.Manager) - } - } else { - info, err := client.Info(ctx) - if err != nil { - return err - } - return printJoinCommand(ctx, dockerCli, info.Swarm.NodeID, worker, manager) - } - return nil - }, - } - - flags := cmd.Flags() - flags.BoolVar(&rotate, flagRotate, false, "Rotate join token") - flags.BoolVarP(&quiet, flagQuiet, "q", false, "Only display token") - - return cmd -} - -func printJoinCommand(ctx context.Context, dockerCli *client.DockerCli, nodeID string, worker bool, manager bool) error { - client := dockerCli.Client() - - swarm, err := client.SwarmInspect(ctx) - if err != nil { - return err - } - - node, _, err := client.NodeInspectWithRaw(ctx, nodeID) - if err != nil { - return err - } - - if node.ManagerStatus != nil { - if worker { - fmt.Fprintf(dockerCli.Out(), "To add a worker to this swarm, run the following command:\n\n docker swarm join \\\n --token %s \\\n %s\n\n", swarm.JoinTokens.Worker, node.ManagerStatus.Addr) - } - if manager { - fmt.Fprintf(dockerCli.Out(), "To add a manager to this swarm, run the following command:\n\n docker swarm join \\\n --token %s \\\n %s\n\n", swarm.JoinTokens.Manager, node.ManagerStatus.Addr) - } - } - - return nil -} diff --git a/api/client/swarm/leave.go b/api/client/swarm/leave.go deleted file mode 100644 index 52d83dcee6..0000000000 --- a/api/client/swarm/leave.go +++ /dev/null @@ -1,44 +0,0 @@ -package swarm - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type leaveOptions struct { - force bool -} - -func newLeaveCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := leaveOptions{} - - cmd := &cobra.Command{ - Use: "leave [OPTIONS]", - Short: "Leave the swarm (workers only)", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runLeave(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVar(&opts.force, "force", false, "Force this node to leave the swarm, ignoring warnings") - return cmd -} - -func runLeave(dockerCli *client.DockerCli, opts leaveOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - if err := client.SwarmLeave(ctx, opts.force); err != nil { - return err - } - - fmt.Fprintln(dockerCli.Out(), "Node left the swarm.") - return nil -} diff --git a/api/client/swarm/opts.go b/api/client/swarm/opts.go deleted file mode 100644 index ae91b8530e..0000000000 --- a/api/client/swarm/opts.go +++ /dev/null @@ -1,179 +0,0 @@ -package swarm - -import ( - "encoding/csv" - "errors" - "fmt" - "strings" - "time" - - "github.com/docker/docker/opts" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/pflag" -) - -const ( - defaultListenAddr = "0.0.0.0:2377" - - flagCertExpiry = "cert-expiry" - flagDispatcherHeartbeat = "dispatcher-heartbeat" - flagListenAddr = "listen-addr" - flagAdvertiseAddr = "advertise-addr" - flagQuiet = "quiet" - flagRotate = "rotate" - flagToken = "token" - flagTaskHistoryLimit = "task-history-limit" - flagExternalCA = "external-ca" -) - -type swarmOptions struct { - taskHistoryLimit int64 - dispatcherHeartbeat time.Duration - nodeCertExpiry time.Duration - externalCA ExternalCAOption -} - -// NodeAddrOption is a pflag.Value for listen and remote addresses -type NodeAddrOption struct { - addr string -} - -// String prints the representation of this flag -func (a *NodeAddrOption) String() string { - return a.Value() -} - -// Set the value for this flag -func (a *NodeAddrOption) Set(value string) error { - addr, err := opts.ParseTCPAddr(value, a.addr) - if err != nil { - return err - } - a.addr = addr - return nil -} - -// Type returns the type of this flag -func (a *NodeAddrOption) Type() string { - return "node-addr" -} - -// Value returns the value of this option as addr:port -func (a *NodeAddrOption) Value() string { - return strings.TrimPrefix(a.addr, "tcp://") -} - -// NewNodeAddrOption returns a new node address option -func NewNodeAddrOption(addr string) NodeAddrOption { - return NodeAddrOption{addr} -} - -// NewListenAddrOption returns a NodeAddrOption with default values -func NewListenAddrOption() NodeAddrOption { - return NewNodeAddrOption(defaultListenAddr) -} - -// ExternalCAOption is a Value type for parsing external CA specifications. -type ExternalCAOption struct { - values []*swarm.ExternalCA -} - -// Set parses an external CA option. -func (m *ExternalCAOption) Set(value string) error { - parsed, err := parseExternalCA(value) - if err != nil { - return err - } - - m.values = append(m.values, parsed) - return nil -} - -// Type returns the type of this option. -func (m *ExternalCAOption) Type() string { - return "external-ca" -} - -// String returns a string repr of this option. -func (m *ExternalCAOption) String() string { - externalCAs := []string{} - for _, externalCA := range m.values { - repr := fmt.Sprintf("%s: %s", externalCA.Protocol, externalCA.URL) - externalCAs = append(externalCAs, repr) - } - return strings.Join(externalCAs, ", ") -} - -// Value returns the external CAs -func (m *ExternalCAOption) Value() []*swarm.ExternalCA { - return m.values -} - -// parseExternalCA parses an external CA specification from the command line, -// such as protocol=cfssl,url=https://example.com. -func parseExternalCA(caSpec string) (*swarm.ExternalCA, error) { - csvReader := csv.NewReader(strings.NewReader(caSpec)) - fields, err := csvReader.Read() - if err != nil { - return nil, err - } - - externalCA := swarm.ExternalCA{ - Options: make(map[string]string), - } - - var ( - hasProtocol bool - hasURL bool - ) - - for _, field := range fields { - parts := strings.SplitN(field, "=", 2) - - if len(parts) != 2 { - return nil, fmt.Errorf("invalid field '%s' must be a key=value pair", field) - } - - key, value := parts[0], parts[1] - - switch strings.ToLower(key) { - case "protocol": - hasProtocol = true - if strings.ToLower(value) == string(swarm.ExternalCAProtocolCFSSL) { - externalCA.Protocol = swarm.ExternalCAProtocolCFSSL - } else { - return nil, fmt.Errorf("unrecognized external CA protocol %s", value) - } - case "url": - hasURL = true - externalCA.URL = value - default: - externalCA.Options[key] = value - } - } - - if !hasProtocol { - return nil, errors.New("the external-ca option needs a protocol= parameter") - } - if !hasURL { - return nil, errors.New("the external-ca option needs a url= parameter") - } - - return &externalCA, nil -} - -func addSwarmFlags(flags *pflag.FlagSet, opts *swarmOptions) { - flags.Int64Var(&opts.taskHistoryLimit, flagTaskHistoryLimit, 5, "Task history retention limit") - flags.DurationVar(&opts.dispatcherHeartbeat, flagDispatcherHeartbeat, time.Duration(5*time.Second), "Dispatcher heartbeat period") - flags.DurationVar(&opts.nodeCertExpiry, flagCertExpiry, time.Duration(90*24*time.Hour), "Validity period for node certificates") - flags.Var(&opts.externalCA, flagExternalCA, "Specifications of one or more certificate signing endpoints") -} - -func (opts *swarmOptions) ToSpec() swarm.Spec { - spec := swarm.Spec{} - spec.Orchestration.TaskHistoryRetentionLimit = opts.taskHistoryLimit - spec.Dispatcher.HeartbeatPeriod = uint64(opts.dispatcherHeartbeat.Nanoseconds()) - spec.CAConfig.NodeCertExpiry = opts.nodeCertExpiry - spec.CAConfig.ExternalCAs = opts.externalCA.Value() - return spec -} diff --git a/api/client/swarm/opts_test.go b/api/client/swarm/opts_test.go deleted file mode 100644 index 568dc87302..0000000000 --- a/api/client/swarm/opts_test.go +++ /dev/null @@ -1,37 +0,0 @@ -package swarm - -import ( - "testing" - - "github.com/docker/docker/pkg/testutil/assert" -) - -func TestNodeAddrOptionSetHostAndPort(t *testing.T) { - opt := NewNodeAddrOption("old:123") - addr := "newhost:5555" - assert.NilError(t, opt.Set(addr)) - assert.Equal(t, opt.Value(), addr) -} - -func TestNodeAddrOptionSetHostOnly(t *testing.T) { - opt := NewListenAddrOption() - assert.NilError(t, opt.Set("newhost")) - assert.Equal(t, opt.Value(), "newhost:2377") -} - -func TestNodeAddrOptionSetHostOnlyIPv6(t *testing.T) { - opt := NewListenAddrOption() - assert.NilError(t, opt.Set("::1")) - assert.Equal(t, opt.Value(), "[::1]:2377") -} - -func TestNodeAddrOptionSetPortOnly(t *testing.T) { - opt := NewListenAddrOption() - assert.NilError(t, opt.Set(":4545")) - assert.Equal(t, opt.Value(), "0.0.0.0:4545") -} - -func TestNodeAddrOptionSetInvalidFormat(t *testing.T) { - opt := NewListenAddrOption() - assert.Error(t, opt.Set("http://localhost:4545"), "Invalid") -} diff --git a/api/client/swarm/update.go b/api/client/swarm/update.go deleted file mode 100644 index 05d4e91ee0..0000000000 --- a/api/client/swarm/update.go +++ /dev/null @@ -1,82 +0,0 @@ -package swarm - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types/swarm" - "github.com/spf13/cobra" - "github.com/spf13/pflag" -) - -func newUpdateCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := swarmOptions{} - - cmd := &cobra.Command{ - Use: "update [OPTIONS]", - Short: "Update the swarm", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runUpdate(dockerCli, cmd.Flags(), opts) - }, - } - - addSwarmFlags(cmd.Flags(), &opts) - return cmd -} - -func runUpdate(dockerCli *client.DockerCli, flags *pflag.FlagSet, opts swarmOptions) error { - client := dockerCli.Client() - ctx := context.Background() - - var updateFlags swarm.UpdateFlags - - swarm, err := client.SwarmInspect(ctx) - if err != nil { - return err - } - - err = mergeSwarm(&swarm, flags) - if err != nil { - return err - } - - err = client.SwarmUpdate(ctx, swarm.Version, swarm.Spec, updateFlags) - if err != nil { - return err - } - - fmt.Fprintln(dockerCli.Out(), "Swarm updated.") - - return nil -} - -func mergeSwarm(swarm *swarm.Swarm, flags *pflag.FlagSet) error { - spec := &swarm.Spec - - if flags.Changed(flagTaskHistoryLimit) { - spec.Orchestration.TaskHistoryRetentionLimit, _ = flags.GetInt64(flagTaskHistoryLimit) - } - - if flags.Changed(flagDispatcherHeartbeat) { - if v, err := flags.GetDuration(flagDispatcherHeartbeat); err == nil { - spec.Dispatcher.HeartbeatPeriod = uint64(v.Nanoseconds()) - } - } - - if flags.Changed(flagCertExpiry) { - if v, err := flags.GetDuration(flagCertExpiry); err == nil { - spec.CAConfig.NodeCertExpiry = v - } - } - - if flags.Changed(flagExternalCA) { - value := flags.Lookup(flagExternalCA).Value.(*ExternalCAOption) - spec.CAConfig.ExternalCAs = value.Value() - } - - return nil -} diff --git a/api/client/system/events.go b/api/client/system/events.go deleted file mode 100644 index 6faf11ee80..0000000000 --- a/api/client/system/events.go +++ /dev/null @@ -1,115 +0,0 @@ -package system - -import ( - "fmt" - "io" - "sort" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/engine-api/types" - eventtypes "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" - "github.com/spf13/cobra" -) - -type eventsOptions struct { - since string - until string - filter []string -} - -// NewEventsCommand creats a new cobra.Command for `docker events` -func NewEventsCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts eventsOptions - - cmd := &cobra.Command{ - Use: "events [OPTIONS]", - Short: "Get real time events from the server", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runEvents(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - flags.StringVar(&opts.since, "since", "", "Show all events created since timestamp") - flags.StringVar(&opts.until, "until", "", "Stream events until this timestamp") - flags.StringSliceVarP(&opts.filter, "filter", "f", []string{}, "Filter output based on conditions provided") - - return cmd -} - -func runEvents(dockerCli *client.DockerCli, opts *eventsOptions) error { - eventFilterArgs := filters.NewArgs() - - // Consolidate all filter flags, and sanity check them early. - // They'll get process in the daemon/server. - for _, f := range opts.filter { - var err error - eventFilterArgs, err = filters.ParseFlag(f, eventFilterArgs) - if err != nil { - return err - } - } - - options := types.EventsOptions{ - Since: opts.since, - Until: opts.until, - Filters: eventFilterArgs, - } - - responseBody, err := dockerCli.Client().Events(context.Background(), options) - if err != nil { - return err - } - defer responseBody.Close() - - return streamEvents(responseBody, dockerCli.Out()) -} - -// streamEvents decodes prints the incoming events in the provided output. -func streamEvents(input io.Reader, output io.Writer) error { - return DecodeEvents(input, func(event eventtypes.Message, err error) error { - if err != nil { - return err - } - printOutput(event, output) - return nil - }) -} - -type eventProcessor func(event eventtypes.Message, err error) error - -// printOutput prints all types of event information. -// Each output includes the event type, actor id, name and action. -// Actor attributes are printed at the end if the actor has any. -func printOutput(event eventtypes.Message, output io.Writer) { - if event.TimeNano != 0 { - fmt.Fprintf(output, "%s ", time.Unix(0, event.TimeNano).Format(jsonlog.RFC3339NanoFixed)) - } else if event.Time != 0 { - fmt.Fprintf(output, "%s ", time.Unix(event.Time, 0).Format(jsonlog.RFC3339NanoFixed)) - } - - fmt.Fprintf(output, "%s %s %s", event.Type, event.Action, event.Actor.ID) - - if len(event.Actor.Attributes) > 0 { - var attrs []string - var keys []string - for k := range event.Actor.Attributes { - keys = append(keys, k) - } - sort.Strings(keys) - for _, k := range keys { - v := event.Actor.Attributes[k] - attrs = append(attrs, fmt.Sprintf("%s=%s", k, v)) - } - fmt.Fprintf(output, " (%s)", strings.Join(attrs, ", ")) - } - fmt.Fprint(output, "\n") -} diff --git a/api/client/system/events_utils.go b/api/client/system/events_utils.go deleted file mode 100644 index 685225daa6..0000000000 --- a/api/client/system/events_utils.go +++ /dev/null @@ -1,66 +0,0 @@ -package system - -import ( - "encoding/json" - "io" - "sync" - - "github.com/Sirupsen/logrus" - eventtypes "github.com/docker/engine-api/types/events" -) - -// EventHandler is abstract interface for user to customize -// own handle functions of each type of events -type EventHandler interface { - Handle(action string, h func(eventtypes.Message)) - Watch(c <-chan eventtypes.Message) -} - -// InitEventHandler initializes and returns an EventHandler -func InitEventHandler() EventHandler { - return &eventHandler{handlers: make(map[string]func(eventtypes.Message))} -} - -type eventHandler struct { - handlers map[string]func(eventtypes.Message) - mu sync.Mutex -} - -func (w *eventHandler) Handle(action string, h func(eventtypes.Message)) { - w.mu.Lock() - w.handlers[action] = h - w.mu.Unlock() -} - -// Watch ranges over the passed in event chan and processes the events based on the -// handlers created for a given action. -// To stop watching, close the event chan. -func (w *eventHandler) Watch(c <-chan eventtypes.Message) { - for e := range c { - w.mu.Lock() - h, exists := w.handlers[e.Action] - w.mu.Unlock() - if !exists { - continue - } - logrus.Debugf("event handler: received event: %v", e) - go h(e) - } -} - -// DecodeEvents decodes event from input stream -func DecodeEvents(input io.Reader, ep eventProcessor) error { - dec := json.NewDecoder(input) - for { - var event eventtypes.Message - err := dec.Decode(&event) - if err != nil && err == io.EOF { - break - } - - if procErr := ep(event, err); procErr != nil { - return procErr - } - } - return nil -} diff --git a/api/client/system/version.go b/api/client/system/version.go deleted file mode 100644 index 899bc7bddf..0000000000 --- a/api/client/system/version.go +++ /dev/null @@ -1,110 +0,0 @@ -package system - -import ( - "runtime" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/utils" - "github.com/docker/docker/utils/templates" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -var versionTemplate = `Client: - Version: {{.Client.Version}} - API version: {{.Client.APIVersion}} - Go version: {{.Client.GoVersion}} - Git commit: {{.Client.GitCommit}} - Built: {{.Client.BuildTime}} - OS/Arch: {{.Client.Os}}/{{.Client.Arch}}{{if .Client.Experimental}} - Experimental: {{.Client.Experimental}}{{end}}{{if .ServerOK}} - -Server: - Version: {{.Server.Version}} - API version: {{.Server.APIVersion}} - Go version: {{.Server.GoVersion}} - Git commit: {{.Server.GitCommit}} - Built: {{.Server.BuildTime}} - OS/Arch: {{.Server.Os}}/{{.Server.Arch}}{{if .Server.Experimental}} - Experimental: {{.Server.Experimental}}{{end}}{{end}}` - -type versionOptions struct { - format string -} - -// NewVersionCommand creats a new cobra.Command for `docker version` -func NewVersionCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts versionOptions - - cmd := &cobra.Command{ - Use: "version [OPTIONS]", - Short: "Show the Docker version information", - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runVersion(dockerCli, &opts) - }, - } - - flags := cmd.Flags() - - flags.StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template") - - return cmd -} - -func runVersion(dockerCli *client.DockerCli, opts *versionOptions) error { - ctx := context.Background() - - templateFormat := versionTemplate - if opts.format != "" { - templateFormat = opts.format - } - - tmpl, err := templates.Parse(templateFormat) - if err != nil { - return cli.StatusError{StatusCode: 64, - Status: "Template parsing error: " + err.Error()} - } - - vd := types.VersionResponse{ - Client: &types.Version{ - Version: dockerversion.Version, - APIVersion: dockerCli.Client().ClientVersion(), - GoVersion: runtime.Version(), - GitCommit: dockerversion.GitCommit, - BuildTime: dockerversion.BuildTime, - Os: runtime.GOOS, - Arch: runtime.GOARCH, - Experimental: utils.ExperimentalBuild(), - }, - } - - serverVersion, err := dockerCli.Client().ServerVersion(ctx) - if err == nil { - vd.Server = &serverVersion - } - - // first we need to make BuildTime more human friendly - t, errTime := time.Parse(time.RFC3339Nano, vd.Client.BuildTime) - if errTime == nil { - vd.Client.BuildTime = t.Format(time.ANSIC) - } - - if vd.ServerOK() { - t, errTime = time.Parse(time.RFC3339Nano, vd.Server.BuildTime) - if errTime == nil { - vd.Server.BuildTime = t.Format(time.ANSIC) - } - } - - if err2 := tmpl.Execute(dockerCli.Out(), vd); err2 != nil && err == nil { - err = err2 - } - dockerCli.Out().Write([]byte{'\n'}) - return err -} diff --git a/api/client/task/print.go b/api/client/task/print.go deleted file mode 100644 index 954aef7eb4..0000000000 --- a/api/client/task/print.go +++ /dev/null @@ -1,100 +0,0 @@ -package task - -import ( - "fmt" - "sort" - "strings" - "text/tabwriter" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/idresolver" - "github.com/docker/engine-api/types/swarm" - "github.com/docker/go-units" -) - -const ( - psTaskItemFmt = "%s\t%s\t%s\t%s\t%s\t%s %s ago\t%s\n" - maxErrLength = 30 -) - -type tasksBySlot []swarm.Task - -func (t tasksBySlot) Len() int { - return len(t) -} - -func (t tasksBySlot) Swap(i, j int) { - t[i], t[j] = t[j], t[i] -} - -func (t tasksBySlot) Less(i, j int) bool { - // Sort by slot. - if t[i].Slot != t[j].Slot { - return t[i].Slot < t[j].Slot - } - - // If same slot, sort by most recent. - return t[j].Meta.CreatedAt.Before(t[i].CreatedAt) -} - -// Print task information in a table format -func Print(dockerCli *client.DockerCli, ctx context.Context, tasks []swarm.Task, resolver *idresolver.IDResolver) error { - sort.Stable(tasksBySlot(tasks)) - - writer := tabwriter.NewWriter(dockerCli.Out(), 0, 4, 2, ' ', 0) - - // Ignore flushing errors - defer writer.Flush() - fmt.Fprintln(writer, strings.Join([]string{"ID", "NAME", "IMAGE", "NODE", "DESIRED STATE", "CURRENT STATE", "ERROR"}, "\t")) - - prevName := "" - for _, task := range tasks { - serviceValue, err := resolver.Resolve(ctx, swarm.Service{}, task.ServiceID) - if err != nil { - return err - } - nodeValue, err := resolver.Resolve(ctx, swarm.Node{}, task.NodeID) - if err != nil { - return err - } - - name := serviceValue - if task.Slot > 0 { - name = fmt.Sprintf("%s.%d", name, task.Slot) - } - - // Indent the name if necessary - indentedName := name - if prevName == name { - indentedName = fmt.Sprintf(" \\_ %s", indentedName) - } - prevName = name - - // Trim and quote the error message. - taskErr := task.Status.Err - if len(taskErr) > maxErrLength { - taskErr = fmt.Sprintf("%s…", taskErr[:maxErrLength-1]) - } - if len(taskErr) > 0 { - taskErr = fmt.Sprintf("\"%s\"", taskErr) - } - - fmt.Fprintf( - writer, - psTaskItemFmt, - task.ID, - indentedName, - task.Spec.ContainerSpec.Image, - nodeValue, - client.PrettyPrint(task.DesiredState), - client.PrettyPrint(task.Status.State), - strings.ToLower(units.HumanDuration(time.Since(task.Status.Timestamp))), - taskErr, - ) - } - - return nil -} diff --git a/api/client/trust.go b/api/client/trust.go deleted file mode 100644 index 273b5e4c4a..0000000000 --- a/api/client/trust.go +++ /dev/null @@ -1,605 +0,0 @@ -package client - -import ( - "encoding/hex" - "encoding/json" - "errors" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "path" - "path/filepath" - "sort" - "strconv" - "time" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/distribution" - "github.com/docker/docker/pkg/jsonmessage" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - registrytypes "github.com/docker/engine-api/types/registry" - "github.com/docker/go-connections/tlsconfig" - "github.com/docker/notary/client" - "github.com/docker/notary/passphrase" - "github.com/docker/notary/trustmanager" - "github.com/docker/notary/trustpinning" - "github.com/docker/notary/tuf/data" - "github.com/docker/notary/tuf/signed" - "github.com/docker/notary/tuf/store" - "github.com/spf13/pflag" -) - -var ( - releasesRole = path.Join(data.CanonicalTargetsRole, "releases") - untrusted bool -) - -// addTrustedFlags is the mflag version of AddTrustedFlags -func addTrustedFlags(fs *flag.FlagSet, verify bool) { - trusted, message := setupTrustedFlag(verify) - fs.BoolVar(&untrusted, []string{"-disable-content-trust"}, !trusted, message) -} - -// AddTrustedFlags adds content trust flags to the current command flagset -func AddTrustedFlags(fs *pflag.FlagSet, verify bool) { - trusted, message := setupTrustedFlag(verify) - fs.BoolVar(&untrusted, "disable-content-trust", !trusted, message) -} - -func setupTrustedFlag(verify bool) (bool, string) { - var trusted bool - if e := os.Getenv("DOCKER_CONTENT_TRUST"); e != "" { - if t, err := strconv.ParseBool(e); t || err != nil { - // treat any other value as true - trusted = true - } - } - message := "Skip image signing" - if verify { - message = "Skip image verification" - } - return trusted, message -} - -// IsTrusted returns true if content trust is enabled -func IsTrusted() bool { - return !untrusted -} - -type target struct { - reference registry.Reference - digest digest.Digest - size int64 -} - -func (cli *DockerCli) trustDirectory() string { - return filepath.Join(cliconfig.ConfigDir(), "trust") -} - -// certificateDirectory returns the directory containing -// TLS certificates for the given server. An error is -// returned if there was an error parsing the server string. -func (cli *DockerCli) certificateDirectory(server string) (string, error) { - u, err := url.Parse(server) - if err != nil { - return "", err - } - - return filepath.Join(cliconfig.ConfigDir(), "tls", u.Host), nil -} - -func trustServer(index *registrytypes.IndexInfo) (string, error) { - if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" { - urlObj, err := url.Parse(s) - if err != nil || urlObj.Scheme != "https" { - return "", fmt.Errorf("valid https URL required for trust server, got %s", s) - } - - return s, nil - } - if index.Official { - return registry.NotaryServer, nil - } - return "https://" + index.Name, nil -} - -type simpleCredentialStore struct { - auth types.AuthConfig -} - -func (scs simpleCredentialStore) Basic(u *url.URL) (string, string) { - return scs.auth.Username, scs.auth.Password -} - -func (scs simpleCredentialStore) RefreshToken(u *url.URL, service string) string { - return scs.auth.IdentityToken -} - -func (scs simpleCredentialStore) SetRefreshToken(*url.URL, string, string) { -} - -// getNotaryRepository returns a NotaryRepository which stores all the -// information needed to operate on a notary repository. -// It creates an HTTP transport providing authentication support. -func (cli *DockerCli) getNotaryRepository(repoInfo *registry.RepositoryInfo, authConfig types.AuthConfig, actions ...string) (*client.NotaryRepository, error) { - server, err := trustServer(repoInfo.Index) - if err != nil { - return nil, err - } - - var cfg = tlsconfig.ClientDefault - cfg.InsecureSkipVerify = !repoInfo.Index.Secure - - // Get certificate base directory - certDir, err := cli.certificateDirectory(server) - if err != nil { - return nil, err - } - logrus.Debugf("reading certificate directory: %s", certDir) - - if err := registry.ReadCertsDirectory(&cfg, certDir); err != nil { - return nil, err - } - - base := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: &cfg, - DisableKeepAlives: true, - } - - // Skip configuration headers since request is not going to Docker daemon - modifiers := registry.DockerHeaders(clientUserAgent(), http.Header{}) - authTransport := transport.NewTransport(base, modifiers...) - pingClient := &http.Client{ - Transport: authTransport, - Timeout: 5 * time.Second, - } - endpointStr := server + "/v2/" - req, err := http.NewRequest("GET", endpointStr, nil) - if err != nil { - return nil, err - } - - challengeManager := auth.NewSimpleChallengeManager() - - resp, err := pingClient.Do(req) - if err != nil { - // Ignore error on ping to operate in offline mode - logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err) - } else { - defer resp.Body.Close() - - // Add response to the challenge manager to parse out - // authentication header and register authentication method - if err := challengeManager.AddResponse(resp); err != nil { - return nil, err - } - } - - creds := simpleCredentialStore{auth: authConfig} - tokenHandler := auth.NewTokenHandler(authTransport, creds, repoInfo.FullName(), actions...) - basicHandler := auth.NewBasicHandler(creds) - modifiers = append(modifiers, transport.RequestModifier(auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))) - tr := transport.NewTransport(base, modifiers...) - - return client.NewNotaryRepository( - cli.trustDirectory(), repoInfo.FullName(), server, tr, cli.getPassphraseRetriever(), - trustpinning.TrustPinConfig{}) -} - -func convertTarget(t client.Target) (target, error) { - h, ok := t.Hashes["sha256"] - if !ok { - return target{}, errors.New("no valid hash, expecting sha256") - } - return target{ - reference: registry.ParseReference(t.Name), - digest: digest.NewDigestFromHex("sha256", hex.EncodeToString(h)), - size: t.Length, - }, nil -} - -func (cli *DockerCli) getPassphraseRetriever() passphrase.Retriever { - aliasMap := map[string]string{ - "root": "root", - "snapshot": "repository", - "targets": "repository", - "default": "repository", - } - baseRetriever := passphrase.PromptRetrieverWithInOut(cli.in, cli.out, aliasMap) - env := map[string]string{ - "root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"), - "snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), - "targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), - "default": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"), - } - - return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) { - if v := env[alias]; v != "" { - return v, numAttempts > 1, nil - } - // For non-root roles, we can also try the "default" alias if it is specified - if v := env["default"]; v != "" && alias != data.CanonicalRootRole { - return v, numAttempts > 1, nil - } - return baseRetriever(keyName, alias, createNew, numAttempts) - } -} - -// TrustedReference returns the canonical trusted reference for an image reference -func (cli *DockerCli) TrustedReference(ctx context.Context, ref reference.NamedTagged) (reference.Canonical, error) { - repoInfo, err := registry.ParseRepositoryInfo(ref) - if err != nil { - return nil, err - } - - // Resolve the Auth config relevant for this server - authConfig := cli.ResolveAuthConfig(ctx, repoInfo.Index) - - notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig, "pull") - if err != nil { - fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err) - return nil, err - } - - t, err := notaryRepo.GetTargetByName(ref.Tag(), releasesRole, data.CanonicalTargetsRole) - if err != nil { - return nil, err - } - // Only list tags in the top level targets role or the releases delegation role - ignore - // all other delegation roles - if t.Role != releasesRole && t.Role != data.CanonicalTargetsRole { - return nil, notaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", ref.Tag())) - } - r, err := convertTarget(t.Target) - if err != nil { - return nil, err - - } - - return reference.WithDigest(ref, r.digest) -} - -// TagTrusted tags a trusted ref -func (cli *DockerCli) TagTrusted(ctx context.Context, trustedRef reference.Canonical, ref reference.NamedTagged) error { - fmt.Fprintf(cli.out, "Tagging %s as %s\n", trustedRef.String(), ref.String()) - - return cli.client.ImageTag(ctx, trustedRef.String(), ref.String()) -} - -func notaryError(repoName string, err error) error { - switch err.(type) { - case *json.SyntaxError: - logrus.Debugf("Notary syntax error: %s", err) - return fmt.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName) - case signed.ErrExpired: - return fmt.Errorf("Error: remote repository %s out-of-date: %v", repoName, err) - case trustmanager.ErrKeyNotFound: - return fmt.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err) - case *net.OpError: - return fmt.Errorf("Error: error contacting notary server: %v", err) - case store.ErrMetaNotFound: - return fmt.Errorf("Error: trust data missing for remote repository %s or remote repository not found: %v", repoName, err) - case signed.ErrInvalidKeyType: - return fmt.Errorf("Warning: potential malicious behavior - trust data mismatch for remote repository %s: %v", repoName, err) - case signed.ErrNoKeys: - return fmt.Errorf("Error: could not find signing keys for remote repository %s, or could not decrypt signing key: %v", repoName, err) - case signed.ErrLowVersion: - return fmt.Errorf("Warning: potential malicious behavior - trust data version is lower than expected for remote repository %s: %v", repoName, err) - case signed.ErrRoleThreshold: - return fmt.Errorf("Warning: potential malicious behavior - trust data has insufficient signatures for remote repository %s: %v", repoName, err) - case client.ErrRepositoryNotExist: - return fmt.Errorf("Error: remote trust data does not exist for %s: %v", repoName, err) - case signed.ErrInsufficientSignatures: - return fmt.Errorf("Error: could not produce valid signature for %s. If Yubikey was used, was touch input provided?: %v", repoName, err) - } - - return err -} - -// TrustedPull handles content trust pulling of an image -func (cli *DockerCli) TrustedPull(ctx context.Context, repoInfo *registry.RepositoryInfo, ref registry.Reference, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { - var refs []target - - notaryRepo, err := cli.getNotaryRepository(repoInfo, authConfig, "pull") - if err != nil { - fmt.Fprintf(cli.out, "Error establishing connection to trust repository: %s\n", err) - return err - } - - if ref.String() == "" { - // List all targets - targets, err := notaryRepo.ListTargets(releasesRole, data.CanonicalTargetsRole) - if err != nil { - return notaryError(repoInfo.FullName(), err) - } - for _, tgt := range targets { - t, err := convertTarget(tgt.Target) - if err != nil { - fmt.Fprintf(cli.out, "Skipping target for %q\n", repoInfo.Name()) - continue - } - // Only list tags in the top level targets role or the releases delegation role - ignore - // all other delegation roles - if tgt.Role != releasesRole && tgt.Role != data.CanonicalTargetsRole { - continue - } - refs = append(refs, t) - } - if len(refs) == 0 { - return notaryError(repoInfo.FullName(), fmt.Errorf("No trusted tags for %s", repoInfo.FullName())) - } - } else { - t, err := notaryRepo.GetTargetByName(ref.String(), releasesRole, data.CanonicalTargetsRole) - if err != nil { - return notaryError(repoInfo.FullName(), err) - } - // Only get the tag if it's in the top level targets role or the releases delegation role - // ignore it if it's in any other delegation roles - if t.Role != releasesRole && t.Role != data.CanonicalTargetsRole { - return notaryError(repoInfo.FullName(), fmt.Errorf("No trust data for %s", ref.String())) - } - - logrus.Debugf("retrieving target for %s role\n", t.Role) - r, err := convertTarget(t.Target) - if err != nil { - return err - - } - refs = append(refs, r) - } - - for i, r := range refs { - displayTag := r.reference.String() - if displayTag != "" { - displayTag = ":" + displayTag - } - fmt.Fprintf(cli.out, "Pull (%d of %d): %s%s@%s\n", i+1, len(refs), repoInfo.Name(), displayTag, r.digest) - - ref, err := reference.WithDigest(repoInfo, r.digest) - if err != nil { - return err - } - if err := cli.ImagePullPrivileged(ctx, authConfig, ref.String(), requestPrivilege, false); err != nil { - return err - } - - // If reference is not trusted, tag by trusted reference - if !r.reference.HasDigest() { - tagged, err := reference.WithTag(repoInfo, r.reference.String()) - if err != nil { - return err - } - trustedRef, err := reference.WithDigest(repoInfo, r.digest) - if err != nil { - return err - } - if err := cli.TagTrusted(ctx, trustedRef, tagged); err != nil { - return err - } - } - } - return nil -} - -// TrustedPush handles content trust pushing of an image -func (cli *DockerCli) TrustedPush(ctx context.Context, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig types.AuthConfig, requestPrivilege types.RequestPrivilegeFunc) error { - responseBody, err := cli.ImagePushPrivileged(ctx, authConfig, ref.String(), requestPrivilege) - if err != nil { - return err - } - - defer responseBody.Close() - - // If it is a trusted push we would like to find the target entry which match the - // tag provided in the function and then do an AddTarget later. - target := &client.Target{} - // Count the times of calling for handleTarget, - // if it is called more that once, that should be considered an error in a trusted push. - cnt := 0 - handleTarget := func(aux *json.RawMessage) { - cnt++ - if cnt > 1 { - // handleTarget should only be called one. This will be treated as an error. - return - } - - var pushResult distribution.PushResult - err := json.Unmarshal(*aux, &pushResult) - if err == nil && pushResult.Tag != "" && pushResult.Digest.Validate() == nil { - h, err := hex.DecodeString(pushResult.Digest.Hex()) - if err != nil { - target = nil - return - } - target.Name = registry.ParseReference(pushResult.Tag).String() - target.Hashes = data.Hashes{string(pushResult.Digest.Algorithm()): h} - target.Length = int64(pushResult.Size) - } - } - - var tag string - switch x := ref.(type) { - case reference.Canonical: - return errors.New("cannot push a digest reference") - case reference.NamedTagged: - tag = x.Tag() - } - - // We want trust signatures to always take an explicit tag, - // otherwise it will act as an untrusted push. - if tag == "" { - if err = jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil); err != nil { - return err - } - fmt.Fprintln(cli.out, "No tag specified, skipping trust metadata push") - return nil - } - - if err = jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, handleTarget); err != nil { - return err - } - - if cnt > 1 { - return fmt.Errorf("internal error: only one call to handleTarget expected") - } - - if target == nil { - fmt.Fprintln(cli.out, "No targets found, please provide a specific tag in order to sign it") - return nil - } - - fmt.Fprintln(cli.out, "Signing and pushing trust metadata") - - repo, err := cli.getNotaryRepository(repoInfo, authConfig, "push", "pull") - if err != nil { - fmt.Fprintf(cli.out, "Error establishing connection to notary repository: %s\n", err) - return err - } - - // get the latest repository metadata so we can figure out which roles to sign - err = repo.Update(false) - - switch err.(type) { - case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist: - keys := repo.CryptoService.ListKeys(data.CanonicalRootRole) - var rootKeyID string - // always select the first root key - if len(keys) > 0 { - sort.Strings(keys) - rootKeyID = keys[0] - } else { - rootPublicKey, err := repo.CryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey) - if err != nil { - return err - } - rootKeyID = rootPublicKey.ID() - } - - // Initialize the notary repository with a remotely managed snapshot key - if err := repo.Initialize(rootKeyID, data.CanonicalSnapshotRole); err != nil { - return notaryError(repoInfo.FullName(), err) - } - fmt.Fprintf(cli.out, "Finished initializing %q\n", repoInfo.FullName()) - err = repo.AddTarget(target, data.CanonicalTargetsRole) - case nil: - // already initialized and we have successfully downloaded the latest metadata - err = cli.addTargetToAllSignableRoles(repo, target) - default: - return notaryError(repoInfo.FullName(), err) - } - - if err == nil { - err = repo.Publish() - } - - if err != nil { - fmt.Fprintf(cli.out, "Failed to sign %q:%s - %s\n", repoInfo.FullName(), tag, err.Error()) - return notaryError(repoInfo.FullName(), err) - } - - fmt.Fprintf(cli.out, "Successfully signed %q:%s\n", repoInfo.FullName(), tag) - return nil -} - -// Attempt to add the image target to all the top level delegation roles we can -// (based on whether we have the signing key and whether the role's path allows -// us to). -// If there are no delegation roles, we add to the targets role. -func (cli *DockerCli) addTargetToAllSignableRoles(repo *client.NotaryRepository, target *client.Target) error { - var signableRoles []string - - // translate the full key names, which includes the GUN, into just the key IDs - allCanonicalKeyIDs := make(map[string]struct{}) - for fullKeyID := range repo.CryptoService.ListAllKeys() { - allCanonicalKeyIDs[path.Base(fullKeyID)] = struct{}{} - } - - allDelegationRoles, err := repo.GetDelegationRoles() - if err != nil { - return err - } - - // if there are no delegation roles, then just try to sign it into the targets role - if len(allDelegationRoles) == 0 { - return repo.AddTarget(target, data.CanonicalTargetsRole) - } - - // there are delegation roles, find every delegation role we have a key for, and - // attempt to sign into into all those roles. - for _, delegationRole := range allDelegationRoles { - // We do not support signing any delegation role that isn't a direct child of the targets role. - // Also don't bother checking the keys if we can't add the target - // to this role due to path restrictions - if path.Dir(delegationRole.Name) != data.CanonicalTargetsRole || !delegationRole.CheckPaths(target.Name) { - continue - } - - for _, canonicalKeyID := range delegationRole.KeyIDs { - if _, ok := allCanonicalKeyIDs[canonicalKeyID]; ok { - signableRoles = append(signableRoles, delegationRole.Name) - break - } - } - } - - if len(signableRoles) == 0 { - return fmt.Errorf("no valid signing keys for delegation roles") - } - - return repo.AddTarget(target, signableRoles...) -} - -// ImagePullPrivileged pulls the image and displays it to the output -func (cli *DockerCli) ImagePullPrivileged(ctx context.Context, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc, all bool) error { - - encodedAuth, err := EncodeAuthToBase64(authConfig) - if err != nil { - return err - } - options := types.ImagePullOptions{ - RegistryAuth: encodedAuth, - PrivilegeFunc: requestPrivilege, - All: all, - } - - responseBody, err := cli.client.ImagePull(ctx, ref, options) - if err != nil { - return err - } - defer responseBody.Close() - - return jsonmessage.DisplayJSONMessagesStream(responseBody, cli.out, cli.outFd, cli.isTerminalOut, nil) -} - -// ImagePushPrivileged push the image -func (cli *DockerCli) ImagePushPrivileged(ctx context.Context, authConfig types.AuthConfig, ref string, requestPrivilege types.RequestPrivilegeFunc) (io.ReadCloser, error) { - encodedAuth, err := EncodeAuthToBase64(authConfig) - if err != nil { - return nil, err - } - options := types.ImagePushOptions{ - RegistryAuth: encodedAuth, - PrivilegeFunc: requestPrivilege, - } - - return cli.client.ImagePush(ctx, ref, options) -} diff --git a/api/client/trust_test.go b/api/client/trust_test.go deleted file mode 100644 index ec95bd9d35..0000000000 --- a/api/client/trust_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package client - -import ( - "os" - "testing" - - "github.com/docker/docker/registry" - registrytypes "github.com/docker/engine-api/types/registry" -) - -func unsetENV() { - os.Unsetenv("DOCKER_CONTENT_TRUST") - os.Unsetenv("DOCKER_CONTENT_TRUST_SERVER") -} - -func TestENVTrustServer(t *testing.T) { - defer unsetENV() - indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} - if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "https://notary-test.com:5000"); err != nil { - t.Fatal("Failed to set ENV variable") - } - output, err := trustServer(indexInfo) - expectedStr := "https://notary-test.com:5000" - if err != nil || output != expectedStr { - t.Fatalf("Expected server to be %s, got %s", expectedStr, output) - } -} - -func TestHTTPENVTrustServer(t *testing.T) { - defer unsetENV() - indexInfo := ®istrytypes.IndexInfo{Name: "testserver"} - if err := os.Setenv("DOCKER_CONTENT_TRUST_SERVER", "http://notary-test.com:5000"); err != nil { - t.Fatal("Failed to set ENV variable") - } - _, err := trustServer(indexInfo) - if err == nil { - t.Fatal("Expected error with invalid scheme") - } -} - -func TestOfficialTrustServer(t *testing.T) { - indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: true} - output, err := trustServer(indexInfo) - if err != nil || output != registry.NotaryServer { - t.Fatalf("Expected server to be %s, got %s", registry.NotaryServer, output) - } -} - -func TestNonOfficialTrustServer(t *testing.T) { - indexInfo := ®istrytypes.IndexInfo{Name: "testserver", Official: false} - output, err := trustServer(indexInfo) - expectedStr := "https://" + indexInfo.Name - if err != nil || output != expectedStr { - t.Fatalf("Expected server to be %s, got %s", expectedStr, output) - } -} diff --git a/api/client/update.go b/api/client/update.go deleted file mode 100644 index ee1b49de72..0000000000 --- a/api/client/update.go +++ /dev/null @@ -1,120 +0,0 @@ -package client - -import ( - "fmt" - "strings" - - "golang.org/x/net/context" - - Cli "github.com/docker/docker/cli" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types/container" - "github.com/docker/go-units" -) - -// CmdUpdate updates resources of one or more containers. -// -// Usage: docker update [OPTIONS] CONTAINER [CONTAINER...] -func (cli *DockerCli) CmdUpdate(args ...string) error { - cmd := Cli.Subcmd("update", []string{"CONTAINER [CONTAINER...]"}, Cli.DockerCommands["update"].Description, true) - flBlkioWeight := cmd.Uint16([]string{"-blkio-weight"}, 0, "Block IO (relative weight), between 10 and 1000") - flCPUPeriod := cmd.Int64([]string{"-cpu-period"}, 0, "Limit CPU CFS (Completely Fair Scheduler) period") - flCPUQuota := cmd.Int64([]string{"-cpu-quota"}, 0, "Limit CPU CFS (Completely Fair Scheduler) quota") - flCpusetCpus := cmd.String([]string{"-cpuset-cpus"}, "", "CPUs in which to allow execution (0-3, 0,1)") - flCpusetMems := cmd.String([]string{"-cpuset-mems"}, "", "MEMs in which to allow execution (0-3, 0,1)") - flCPUShares := cmd.Int64([]string{"c", "-cpu-shares"}, 0, "CPU shares (relative weight)") - flMemoryString := cmd.String([]string{"m", "-memory"}, "", "Memory limit") - flMemoryReservation := cmd.String([]string{"-memory-reservation"}, "", "Memory soft limit") - flMemorySwap := cmd.String([]string{"-memory-swap"}, "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") - flKernelMemory := cmd.String([]string{"-kernel-memory"}, "", "Kernel memory limit") - flRestartPolicy := cmd.String([]string{"-restart"}, "", "Restart policy to apply when a container exits") - - cmd.Require(flag.Min, 1) - cmd.ParseFlags(args, true) - if cmd.NFlag() == 0 { - return fmt.Errorf("You must provide one or more flags when using this command.") - } - - var err error - var flMemory int64 - if *flMemoryString != "" { - flMemory, err = units.RAMInBytes(*flMemoryString) - if err != nil { - return err - } - } - - var memoryReservation int64 - if *flMemoryReservation != "" { - memoryReservation, err = units.RAMInBytes(*flMemoryReservation) - if err != nil { - return err - } - } - - var memorySwap int64 - if *flMemorySwap != "" { - if *flMemorySwap == "-1" { - memorySwap = -1 - } else { - memorySwap, err = units.RAMInBytes(*flMemorySwap) - if err != nil { - return err - } - } - } - - var kernelMemory int64 - if *flKernelMemory != "" { - kernelMemory, err = units.RAMInBytes(*flKernelMemory) - if err != nil { - return err - } - } - - var restartPolicy container.RestartPolicy - if *flRestartPolicy != "" { - restartPolicy, err = opts.ParseRestartPolicy(*flRestartPolicy) - if err != nil { - return err - } - } - - resources := container.Resources{ - BlkioWeight: *flBlkioWeight, - CpusetCpus: *flCpusetCpus, - CpusetMems: *flCpusetMems, - CPUShares: *flCPUShares, - Memory: flMemory, - MemoryReservation: memoryReservation, - MemorySwap: memorySwap, - KernelMemory: kernelMemory, - CPUPeriod: *flCPUPeriod, - CPUQuota: *flCPUQuota, - } - - updateConfig := container.UpdateConfig{ - Resources: resources, - RestartPolicy: restartPolicy, - } - - ctx := context.Background() - - names := cmd.Args() - var errs []string - - for _, name := range names { - if err := cli.client.ContainerUpdate(ctx, name, updateConfig); err != nil { - errs = append(errs, err.Error()) - } else { - fmt.Fprintf(cli.out, "%s\n", name) - } - } - - if len(errs) > 0 { - return fmt.Errorf("%s", strings.Join(errs, "\n")) - } - - return nil -} diff --git a/api/client/utils.go b/api/client/utils.go deleted file mode 100644 index 99b0f86189..0000000000 --- a/api/client/utils.go +++ /dev/null @@ -1,190 +0,0 @@ -package client - -import ( - "fmt" - "io" - "io/ioutil" - "os" - gosignal "os/signal" - "path/filepath" - "runtime" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/term" - "github.com/docker/engine-api/client" - "github.com/docker/engine-api/types" -) - -func (cli *DockerCli) resizeTty(ctx context.Context, id string, isExec bool) { - height, width := cli.GetTtySize() - cli.ResizeTtyTo(ctx, id, height, width, isExec) -} - -// ResizeTtyTo resizes tty to specific height and width -// TODO: this can be unexported again once all container related commands move to package container -func (cli *DockerCli) ResizeTtyTo(ctx context.Context, id string, height, width int, isExec bool) { - if height == 0 && width == 0 { - return - } - - options := types.ResizeOptions{ - Height: height, - Width: width, - } - - var err error - if isExec { - err = cli.client.ContainerExecResize(ctx, id, options) - } else { - err = cli.client.ContainerResize(ctx, id, options) - } - - if err != nil { - logrus.Debugf("Error resize: %s", err) - } -} - -// getExecExitCode perform an inspect on the exec command. It returns -// the running state and the exit code. -func (cli *DockerCli) getExecExitCode(ctx context.Context, execID string) (bool, int, error) { - resp, err := cli.client.ContainerExecInspect(ctx, execID) - if err != nil { - // If we can't connect, then the daemon probably died. - if err != client.ErrConnectionFailed { - return false, -1, err - } - return false, -1, nil - } - - return resp.Running, resp.ExitCode, nil -} - -// MonitorTtySize updates the container tty size when the terminal tty changes size -func (cli *DockerCli) MonitorTtySize(ctx context.Context, id string, isExec bool) error { - cli.resizeTty(ctx, id, isExec) - - if runtime.GOOS == "windows" { - go func() { - prevH, prevW := cli.GetTtySize() - for { - time.Sleep(time.Millisecond * 250) - h, w := cli.GetTtySize() - - if prevW != w || prevH != h { - cli.resizeTty(ctx, id, isExec) - } - prevH = h - prevW = w - } - }() - } else { - sigchan := make(chan os.Signal, 1) - gosignal.Notify(sigchan, signal.SIGWINCH) - go func() { - for range sigchan { - cli.resizeTty(ctx, id, isExec) - } - }() - } - return nil -} - -// GetTtySize returns the height and width in characters of the tty -func (cli *DockerCli) GetTtySize() (int, int) { - if !cli.isTerminalOut { - return 0, 0 - } - ws, err := term.GetWinsize(cli.outFd) - if err != nil { - logrus.Debugf("Error getting size: %s", err) - if ws == nil { - return 0, 0 - } - } - return int(ws.Height), int(ws.Width) -} - -// CopyToFile writes the content of the reader to the specified file -func CopyToFile(outfile string, r io.Reader) error { - tmpFile, err := ioutil.TempFile(filepath.Dir(outfile), ".docker_temp_") - if err != nil { - return err - } - - tmpPath := tmpFile.Name() - - _, err = io.Copy(tmpFile, r) - tmpFile.Close() - - if err != nil { - os.Remove(tmpPath) - return err - } - - if err = os.Rename(tmpPath, outfile); err != nil { - os.Remove(tmpPath) - return err - } - - return nil -} - -// ForwardAllSignals forwards signals to the container -// TODO: this can be unexported again once all container commands are under -// api/client/container -func (cli *DockerCli) ForwardAllSignals(ctx context.Context, cid string) chan os.Signal { - sigc := make(chan os.Signal, 128) - signal.CatchAll(sigc) - go func() { - for s := range sigc { - if s == signal.SIGCHLD || s == signal.SIGPIPE { - continue - } - var sig string - for sigStr, sigN := range signal.SignalMap { - if sigN == s { - sig = sigStr - break - } - } - if sig == "" { - fmt.Fprintf(cli.err, "Unsupported signal: %v. Discarding.\n", s) - continue - } - - if err := cli.client.ContainerKill(ctx, cid, sig); err != nil { - logrus.Debugf("Error sending signal: %s", err) - } - } - }() - return sigc -} - -// capitalizeFirst capitalizes the first character of string -func capitalizeFirst(s string) string { - switch l := len(s); l { - case 0: - return s - case 1: - return strings.ToLower(s) - default: - return strings.ToUpper(string(s[0])) + strings.ToLower(s[1:]) - } -} - -// PrettyPrint outputs arbitrary data for human formatted output by uppercasing the first letter. -func PrettyPrint(i interface{}) string { - switch t := i.(type) { - case nil: - return "None" - case string: - return capitalizeFirst(t) - default: - return capitalizeFirst(fmt.Sprintf("%s", t)) - } -} diff --git a/api/client/volume/cmd.go b/api/client/volume/cmd.go deleted file mode 100644 index 7a18a8b64a..0000000000 --- a/api/client/volume/cmd.go +++ /dev/null @@ -1,48 +0,0 @@ -package volume - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" -) - -// NewVolumeCommand returns a cobra command for `volume` subcommands -func NewVolumeCommand(dockerCli *client.DockerCli) *cobra.Command { - cmd := &cobra.Command{ - Use: "volume COMMAND", - Short: "Manage Docker volumes", - Long: volumeDescription, - Args: cli.NoArgs, - Run: func(cmd *cobra.Command, args []string) { - fmt.Fprintf(dockerCli.Err(), "\n"+cmd.UsageString()) - }, - } - cmd.AddCommand( - newCreateCommand(dockerCli), - newInspectCommand(dockerCli), - newListCommand(dockerCli), - newRemoveCommand(dockerCli), - ) - return cmd -} - -var volumeDescription = ` -The **docker volume** command has subcommands for managing data volumes. A data -volume is a specially-designated directory that by-passes storage driver -management. - -Data volumes persist data independent of a container's life cycle. When you -delete a container, the Engine daemon does not delete any data volumes. You can -share volumes across multiple containers. Moreover, you can share data volumes -with other computing resources in your system. - -To see help for a subcommand, use: - - docker volume CMD help - -For full details on using docker volume visit Docker's online documentation. - -` diff --git a/api/client/volume/create.go b/api/client/volume/create.go deleted file mode 100644 index 97ed064f97..0000000000 --- a/api/client/volume/create.go +++ /dev/null @@ -1,102 +0,0 @@ -package volume - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/opts" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/spf13/cobra" -) - -type createOptions struct { - name string - driver string - driverOpts opts.MapOpts - labels []string -} - -func newCreateCommand(dockerCli *client.DockerCli) *cobra.Command { - opts := createOptions{ - driverOpts: *opts.NewMapOpts(nil, nil), - } - - cmd := &cobra.Command{ - Use: "create [OPTIONS]", - Short: "Create a volume", - Long: createDescription, - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runCreate(dockerCli, opts) - }, - } - flags := cmd.Flags() - flags.StringVarP(&opts.driver, "driver", "d", "local", "Specify volume driver name") - flags.StringVar(&opts.name, "name", "", "Specify volume name") - flags.VarP(&opts.driverOpts, "opt", "o", "Set driver specific options") - flags.StringSliceVar(&opts.labels, "label", []string{}, "Set metadata for a volume") - - return cmd -} - -func runCreate(dockerCli *client.DockerCli, opts createOptions) error { - client := dockerCli.Client() - - volReq := types.VolumeCreateRequest{ - Driver: opts.driver, - DriverOpts: opts.driverOpts.GetAll(), - Name: opts.name, - Labels: runconfigopts.ConvertKVStringsToMap(opts.labels), - } - - vol, err := client.VolumeCreate(context.Background(), volReq) - if err != nil { - return err - } - - fmt.Fprintf(dockerCli.Out(), "%s\n", vol.Name) - return nil -} - -var createDescription = ` -Creates a new volume that containers can consume and store data in. If a name -is not specified, Docker generates a random name. You create a volume and then -configure the container to use it, for example: - - $ docker volume create --name hello - hello - $ docker run -d -v hello:/world busybox ls /world - -The mount is created inside the container's **/src** directory. Docker doesn't -not support relative paths for mount points inside the container. - -Multiple containers can use the same volume in the same time period. This is -useful if two containers need access to shared data. For example, if one -container writes and the other reads the data. - -## Driver specific options - -Some volume drivers may take options to customize the volume creation. Use the -**-o** or **--opt** flags to pass driver options: - - $ docker volume create --driver fake --opt tardis=blue --opt timey=wimey - -These options are passed directly to the volume driver. Options for different -volume drivers may do different things (or nothing at all). - -The built-in **local** driver on Windows does not support any options. - -The built-in **local** driver on Linux accepts options similar to the linux -**mount** command: - - $ docker volume create --driver local --opt type=tmpfs --opt device=tmpfs --opt o=size=100m,uid=1000 - -Another example: - - $ docker volume create --driver local --opt type=btrfs --opt device=/dev/sda2 - -` diff --git a/api/client/volume/inspect.go b/api/client/volume/inspect.go deleted file mode 100644 index 7e310a5783..0000000000 --- a/api/client/volume/inspect.go +++ /dev/null @@ -1,55 +0,0 @@ -package volume - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/inspect" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -type inspectOptions struct { - format string - names []string -} - -func newInspectCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts inspectOptions - - cmd := &cobra.Command{ - Use: "inspect [OPTIONS] VOLUME [VOLUME...]", - Short: "Display detailed information on one or more volumes", - Long: inspectDescription, - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - opts.names = args - return runInspect(dockerCli, opts) - }, - } - - cmd.Flags().StringVarP(&opts.format, "format", "f", "", "Format the output using the given go template") - - return cmd -} - -func runInspect(dockerCli *client.DockerCli, opts inspectOptions) error { - client := dockerCli.Client() - - ctx := context.Background() - - getVolFunc := func(name string) (interface{}, []byte, error) { - i, err := client.VolumeInspect(ctx, name) - return i, nil, err - } - - return inspect.Inspect(dockerCli.Out(), opts.names, opts.format, getVolFunc) -} - -var inspectDescription = ` -Returns information about one or more volumes. By default, this command renders -all results in a JSON array. You can specify an alternate format to execute a -given template is executed for each result. Go's https://golang.org/pkg/text/template/ -package describes all the details of the format. - -` diff --git a/api/client/volume/list.go b/api/client/volume/list.go deleted file mode 100644 index 0cebe4d556..0000000000 --- a/api/client/volume/list.go +++ /dev/null @@ -1,99 +0,0 @@ -package volume - -import ( - "fmt" - "sort" - "text/tabwriter" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/spf13/cobra" -) - -type byVolumeName []*types.Volume - -func (r byVolumeName) Len() int { return len(r) } -func (r byVolumeName) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byVolumeName) Less(i, j int) bool { - return r[i].Name < r[j].Name -} - -type listOptions struct { - quiet bool - filter []string -} - -func newListCommand(dockerCli *client.DockerCli) *cobra.Command { - var opts listOptions - - cmd := &cobra.Command{ - Use: "ls [OPTIONS]", - Aliases: []string{"list"}, - Short: "List volumes", - Long: listDescription, - Args: cli.NoArgs, - RunE: func(cmd *cobra.Command, args []string) error { - return runList(dockerCli, opts) - }, - } - - flags := cmd.Flags() - flags.BoolVarP(&opts.quiet, "quiet", "q", false, "Only display volume names") - flags.StringSliceVarP(&opts.filter, "filter", "f", []string{}, "Provide filter values (i.e. 'dangling=true')") - - return cmd -} - -func runList(dockerCli *client.DockerCli, opts listOptions) error { - client := dockerCli.Client() - - volFilterArgs := filters.NewArgs() - for _, f := range opts.filter { - var err error - volFilterArgs, err = filters.ParseFlag(f, volFilterArgs) - if err != nil { - return err - } - } - - volumes, err := client.VolumeList(context.Background(), volFilterArgs) - if err != nil { - return err - } - - w := tabwriter.NewWriter(dockerCli.Out(), 20, 1, 3, ' ', 0) - if !opts.quiet { - for _, warn := range volumes.Warnings { - fmt.Fprintln(dockerCli.Err(), warn) - } - fmt.Fprintf(w, "DRIVER \tVOLUME NAME") - fmt.Fprintf(w, "\n") - } - - sort.Sort(byVolumeName(volumes.Volumes)) - for _, vol := range volumes.Volumes { - if opts.quiet { - fmt.Fprintln(w, vol.Name) - continue - } - fmt.Fprintf(w, "%s\t%s\n", vol.Driver, vol.Name) - } - w.Flush() - return nil -} - -var listDescription = ` - -Lists all the volumes Docker knows about. You can filter using the **-f** or -**--filter** flag. The filtering format is a **key=value** pair. To specify -more than one filter, pass multiple flags (for example, -**--filter "foo=bar" --filter "bif=baz"**) - -There is a single supported filter **dangling=value** which takes a boolean of -**true** or **false**. - -` diff --git a/api/client/volume/remove.go b/api/client/volume/remove.go deleted file mode 100644 index 0ed982572e..0000000000 --- a/api/client/volume/remove.go +++ /dev/null @@ -1,54 +0,0 @@ -package volume - -import ( - "fmt" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/spf13/cobra" -) - -func newRemoveCommand(dockerCli *client.DockerCli) *cobra.Command { - return &cobra.Command{ - Use: "rm VOLUME [VOLUME...]", - Aliases: []string{"remove"}, - Short: "Remove one or more volumes", - Long: removeDescription, - Example: removeExample, - Args: cli.RequiresMinArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - return runRemove(dockerCli, args) - }, - } -} - -func runRemove(dockerCli *client.DockerCli, volumes []string) error { - client := dockerCli.Client() - ctx := context.Background() - status := 0 - - for _, name := range volumes { - if err := client.VolumeRemove(ctx, name); err != nil { - fmt.Fprintf(dockerCli.Err(), "%s\n", err) - status = 1 - continue - } - fmt.Fprintf(dockerCli.Out(), "%s\n", name) - } - - if status != 0 { - return cli.StatusError{StatusCode: status} - } - return nil -} - -var removeDescription = ` -Remove one or more volumes. You cannot remove a volume that is in use by a container. -` - -var removeExample = ` -$ docker volume rm hello -hello -` diff --git a/api/common.go b/api/common.go deleted file mode 100644 index d62e65e8af..0000000000 --- a/api/common.go +++ /dev/null @@ -1,169 +0,0 @@ -package api - -import ( - "encoding/json" - "encoding/pem" - "fmt" - "mime" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/system" - "github.com/docker/engine-api/types" - "github.com/docker/libtrust" -) - -// Common constants for daemon and client. -const ( - // Version of Current REST API - DefaultVersion string = "1.24" - - // MinVersion represents Minimum REST API version supported - MinVersion string = "1.12" - - // NoBaseImageSpecifier is the symbol used by the FROM - // command to specify that no base image is to be used. - NoBaseImageSpecifier string = "scratch" -) - -// byPortInfo is a temporary type used to sort types.Port by its fields -type byPortInfo []types.Port - -func (r byPortInfo) Len() int { return len(r) } -func (r byPortInfo) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byPortInfo) Less(i, j int) bool { - if r[i].PrivatePort != r[j].PrivatePort { - return r[i].PrivatePort < r[j].PrivatePort - } - - if r[i].IP != r[j].IP { - return r[i].IP < r[j].IP - } - - if r[i].PublicPort != r[j].PublicPort { - return r[i].PublicPort < r[j].PublicPort - } - - return r[i].Type < r[j].Type -} - -// DisplayablePorts returns formatted string representing open ports of container -// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp" -// it's used by command 'docker ps' -func DisplayablePorts(ports []types.Port) string { - type portGroup struct { - first int - last int - } - groupMap := make(map[string]*portGroup) - var result []string - var hostMappings []string - var groupMapKeys []string - sort.Sort(byPortInfo(ports)) - for _, port := range ports { - current := port.PrivatePort - portKey := port.Type - if port.IP != "" { - if port.PublicPort != current { - hostMappings = append(hostMappings, fmt.Sprintf("%s:%d->%d/%s", port.IP, port.PublicPort, port.PrivatePort, port.Type)) - continue - } - portKey = fmt.Sprintf("%s/%s", port.IP, port.Type) - } - group := groupMap[portKey] - - if group == nil { - groupMap[portKey] = &portGroup{first: current, last: current} - // record order that groupMap keys are created - groupMapKeys = append(groupMapKeys, portKey) - continue - } - if current == (group.last + 1) { - group.last = current - continue - } - - result = append(result, formGroup(portKey, group.first, group.last)) - groupMap[portKey] = &portGroup{first: current, last: current} - } - for _, portKey := range groupMapKeys { - g := groupMap[portKey] - result = append(result, formGroup(portKey, g.first, g.last)) - } - result = append(result, hostMappings...) - return strings.Join(result, ", ") -} - -func formGroup(key string, start, last int) string { - parts := strings.Split(key, "/") - groupType := parts[0] - var ip string - if len(parts) > 1 { - ip = parts[0] - groupType = parts[1] - } - group := strconv.Itoa(start) - if start != last { - group = fmt.Sprintf("%s-%d", group, last) - } - if ip != "" { - group = fmt.Sprintf("%s:%s->%s", ip, group, group) - } - return fmt.Sprintf("%s/%s", group, groupType) -} - -// MatchesContentType validates the content type against the expected one -func MatchesContentType(contentType, expectedType string) bool { - mimetype, _, err := mime.ParseMediaType(contentType) - if err != nil { - logrus.Errorf("Error parsing media type: %s error: %v", contentType, err) - } - return err == nil && mimetype == expectedType -} - -// LoadOrCreateTrustKey attempts to load the libtrust key at the given path, -// otherwise generates a new one -func LoadOrCreateTrustKey(trustKeyPath string) (libtrust.PrivateKey, error) { - err := system.MkdirAll(filepath.Dir(trustKeyPath), 0700) - if err != nil { - return nil, err - } - trustKey, err := libtrust.LoadKeyFile(trustKeyPath) - if err == libtrust.ErrKeyFileDoesNotExist { - trustKey, err = libtrust.GenerateECP256PrivateKey() - if err != nil { - return nil, fmt.Errorf("Error generating key: %s", err) - } - encodedKey, err := serializePrivateKey(trustKey, filepath.Ext(trustKeyPath)) - if err != nil { - return nil, fmt.Errorf("Error serializing key: %s", err) - } - if err := ioutils.AtomicWriteFile(trustKeyPath, encodedKey, os.FileMode(0600)); err != nil { - return nil, fmt.Errorf("Error saving key file: %s", err) - } - } else if err != nil { - return nil, fmt.Errorf("Error loading key file %s: %s", trustKeyPath, err) - } - return trustKey, nil -} - -func serializePrivateKey(key libtrust.PrivateKey, ext string) (encoded []byte, err error) { - if ext == ".json" || ext == ".jwk" { - encoded, err = json.Marshal(key) - if err != nil { - return nil, fmt.Errorf("unable to encode private key JWK: %s", err) - } - } else { - pemBlock, err := key.PEMBlock() - if err != nil { - return nil, fmt.Errorf("unable to encode private key PEM: %s", err) - } - encoded = pem.EncodeToMemory(pemBlock) - } - return -} diff --git a/api/common_test.go b/api/common_test.go deleted file mode 100644 index c214660cc4..0000000000 --- a/api/common_test.go +++ /dev/null @@ -1,341 +0,0 @@ -package api - -import ( - "io/ioutil" - "path/filepath" - "testing" - - "os" - - "github.com/docker/engine-api/types" -) - -type ports struct { - ports []types.Port - expected string -} - -// DisplayablePorts -func TestDisplayablePorts(t *testing.T) { - cases := []ports{ - { - []types.Port{ - { - PrivatePort: 9988, - Type: "tcp", - }, - }, - "9988/tcp"}, - { - []types.Port{ - { - PrivatePort: 9988, - Type: "udp", - }, - }, - "9988/udp", - }, - { - []types.Port{ - { - IP: "0.0.0.0", - PrivatePort: 9988, - Type: "tcp", - }, - }, - "0.0.0.0:0->9988/tcp", - }, - { - []types.Port{ - { - PrivatePort: 9988, - PublicPort: 8899, - Type: "tcp", - }, - }, - "9988/tcp", - }, - { - []types.Port{ - { - IP: "4.3.2.1", - PrivatePort: 9988, - PublicPort: 8899, - Type: "tcp", - }, - }, - "4.3.2.1:8899->9988/tcp", - }, - { - []types.Port{ - { - IP: "4.3.2.1", - PrivatePort: 9988, - PublicPort: 9988, - Type: "tcp", - }, - }, - "4.3.2.1:9988->9988/tcp", - }, - { - []types.Port{ - { - PrivatePort: 9988, - Type: "udp", - }, { - PrivatePort: 9988, - Type: "udp", - }, - }, - "9988/udp, 9988/udp", - }, - { - []types.Port{ - { - IP: "1.2.3.4", - PublicPort: 9998, - PrivatePort: 9998, - Type: "udp", - }, { - IP: "1.2.3.4", - PublicPort: 9999, - PrivatePort: 9999, - Type: "udp", - }, - }, - "1.2.3.4:9998-9999->9998-9999/udp", - }, - { - []types.Port{ - { - IP: "1.2.3.4", - PublicPort: 8887, - PrivatePort: 9998, - Type: "udp", - }, { - IP: "1.2.3.4", - PublicPort: 8888, - PrivatePort: 9999, - Type: "udp", - }, - }, - "1.2.3.4:8887->9998/udp, 1.2.3.4:8888->9999/udp", - }, - { - []types.Port{ - { - PrivatePort: 9998, - Type: "udp", - }, { - PrivatePort: 9999, - Type: "udp", - }, - }, - "9998-9999/udp", - }, - { - []types.Port{ - { - IP: "1.2.3.4", - PrivatePort: 6677, - PublicPort: 7766, - Type: "tcp", - }, { - PrivatePort: 9988, - PublicPort: 8899, - Type: "udp", - }, - }, - "9988/udp, 1.2.3.4:7766->6677/tcp", - }, - { - []types.Port{ - { - IP: "1.2.3.4", - PrivatePort: 9988, - PublicPort: 8899, - Type: "udp", - }, { - IP: "1.2.3.4", - PrivatePort: 9988, - PublicPort: 8899, - Type: "tcp", - }, { - IP: "4.3.2.1", - PrivatePort: 2233, - PublicPort: 3322, - Type: "tcp", - }, - }, - "4.3.2.1:3322->2233/tcp, 1.2.3.4:8899->9988/tcp, 1.2.3.4:8899->9988/udp", - }, - { - []types.Port{ - { - PrivatePort: 9988, - PublicPort: 8899, - Type: "udp", - }, { - IP: "1.2.3.4", - PrivatePort: 6677, - PublicPort: 7766, - Type: "tcp", - }, { - IP: "4.3.2.1", - PrivatePort: 2233, - PublicPort: 3322, - Type: "tcp", - }, - }, - "9988/udp, 4.3.2.1:3322->2233/tcp, 1.2.3.4:7766->6677/tcp", - }, - { - []types.Port{ - { - PrivatePort: 80, - Type: "tcp", - }, { - PrivatePort: 1024, - Type: "tcp", - }, { - PrivatePort: 80, - Type: "udp", - }, { - PrivatePort: 1024, - Type: "udp", - }, { - IP: "1.1.1.1", - PublicPort: 80, - PrivatePort: 1024, - Type: "tcp", - }, { - IP: "1.1.1.1", - PublicPort: 80, - PrivatePort: 1024, - Type: "udp", - }, { - IP: "1.1.1.1", - PublicPort: 1024, - PrivatePort: 80, - Type: "tcp", - }, { - IP: "1.1.1.1", - PublicPort: 1024, - PrivatePort: 80, - Type: "udp", - }, { - IP: "2.1.1.1", - PublicPort: 80, - PrivatePort: 1024, - Type: "tcp", - }, { - IP: "2.1.1.1", - PublicPort: 80, - PrivatePort: 1024, - Type: "udp", - }, { - IP: "2.1.1.1", - PublicPort: 1024, - PrivatePort: 80, - Type: "tcp", - }, { - IP: "2.1.1.1", - PublicPort: 1024, - PrivatePort: 80, - Type: "udp", - }, - }, - "80/tcp, 80/udp, 1024/tcp, 1024/udp, 1.1.1.1:1024->80/tcp, 1.1.1.1:1024->80/udp, 2.1.1.1:1024->80/tcp, 2.1.1.1:1024->80/udp, 1.1.1.1:80->1024/tcp, 1.1.1.1:80->1024/udp, 2.1.1.1:80->1024/tcp, 2.1.1.1:80->1024/udp", - }, - } - - for _, port := range cases { - actual := DisplayablePorts(port.ports) - if port.expected != actual { - t.Fatalf("Expected %s, got %s.", port.expected, actual) - } - } -} - -// MatchesContentType -func TestJsonContentType(t *testing.T) { - if !MatchesContentType("application/json", "application/json") { - t.Fail() - } - - if !MatchesContentType("application/json; charset=utf-8", "application/json") { - t.Fail() - } - - if MatchesContentType("dockerapplication/json", "application/json") { - t.Fail() - } -} - -// LoadOrCreateTrustKey -func TestLoadOrCreateTrustKeyInvalidKeyFile(t *testing.T) { - tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpKeyFolderPath) - - tmpKeyFile, err := ioutil.TempFile(tmpKeyFolderPath, "keyfile") - if err != nil { - t.Fatal(err) - } - - if _, err := LoadOrCreateTrustKey(tmpKeyFile.Name()); err == nil { - t.Fatalf("expected an error, got nothing.") - } - -} - -func TestLoadOrCreateTrustKeyCreateKey(t *testing.T) { - tmpKeyFolderPath, err := ioutil.TempDir("", "api-trustkey-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpKeyFolderPath) - - // Without the need to create the folder hierarchy - tmpKeyFile := filepath.Join(tmpKeyFolderPath, "keyfile") - - if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { - t.Fatalf("expected a new key file, got : %v and %v", err, key) - } - - if _, err := os.Stat(tmpKeyFile); err != nil { - t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) - } - - // With the need to create the folder hierarchy as tmpKeyFie is in a path - // where some folders do not exist. - tmpKeyFile = filepath.Join(tmpKeyFolderPath, "folder/hierarchy/keyfile") - - if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { - t.Fatalf("expected a new key file, got : %v and %v", err, key) - } - - if _, err := os.Stat(tmpKeyFile); err != nil { - t.Fatalf("Expected to find a file %s, got %v", tmpKeyFile, err) - } - - // With no path at all - defer os.Remove("keyfile") - if key, err := LoadOrCreateTrustKey("keyfile"); err != nil || key == nil { - t.Fatalf("expected a new key file, got : %v and %v", err, key) - } - - if _, err := os.Stat("keyfile"); err != nil { - t.Fatalf("Expected to find a file keyfile, got %v", err) - } -} - -func TestLoadOrCreateTrustKeyLoadValidKey(t *testing.T) { - tmpKeyFile := filepath.Join("fixtures", "keyfile") - - if key, err := LoadOrCreateTrustKey(tmpKeyFile); err != nil || key == nil { - t.Fatalf("expected a key file, got : %v and %v", err, key) - } -} diff --git a/api/fixtures/keyfile b/api/fixtures/keyfile deleted file mode 100644 index 322f254404..0000000000 --- a/api/fixtures/keyfile +++ /dev/null @@ -1,7 +0,0 @@ ------BEGIN EC PRIVATE KEY----- -keyID: AWX2:I27X:WQFX:IOMK:CNAK:O7PW:VYNB:ZLKC:CVAE:YJP2:SI4A:XXAY - -MHcCAQEEILHTRWdcpKWsnORxSFyBnndJ4ROU41hMtr/GCiLVvwBQoAoGCCqGSM49 -AwEHoUQDQgAElpVFbQ2V2UQKajqdE3fVxJ+/pE/YuEFOxWbOxF2be19BY209/iky -NzeFFK7SLpQ4CBJ7zDVXOHsMzrkY/GquGA== ------END EC PRIVATE KEY----- diff --git a/api/server/httputils/decoder.go b/api/server/httputils/decoder.go deleted file mode 100644 index dbe469cca6..0000000000 --- a/api/server/httputils/decoder.go +++ /dev/null @@ -1,16 +0,0 @@ -package httputils - -import ( - "io" - - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/network" -) - -// ContainerDecoder specifies how -// to translate an io.Reader into -// container configuration. -type ContainerDecoder interface { - DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *network.NetworkingConfig, error) - DecodeHostConfig(src io.Reader) (*container.HostConfig, error) -} diff --git a/api/server/httputils/errors.go b/api/server/httputils/errors.go deleted file mode 100644 index da4db97915..0000000000 --- a/api/server/httputils/errors.go +++ /dev/null @@ -1,93 +0,0 @@ -package httputils - -import ( - "net/http" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/versions" - "github.com/gorilla/mux" - "google.golang.org/grpc" -) - -// httpStatusError is an interface -// that errors with custom status codes -// implement to tell the api layer -// which response status to set. -type httpStatusError interface { - HTTPErrorStatusCode() int -} - -// inputValidationError is an interface -// that errors generated by invalid -// inputs can implement to tell the -// api layer to set a 400 status code -// in the response. -type inputValidationError interface { - IsValidationError() bool -} - -// GetHTTPErrorStatusCode retrieve status code from error message -func GetHTTPErrorStatusCode(err error) int { - if err == nil { - logrus.WithFields(logrus.Fields{"error": err}).Error("unexpected HTTP error handling") - return http.StatusInternalServerError - } - - var statusCode int - errMsg := err.Error() - - switch e := err.(type) { - case httpStatusError: - statusCode = e.HTTPErrorStatusCode() - case inputValidationError: - statusCode = http.StatusBadRequest - default: - // FIXME: this is brittle and should not be necessary, but we still need to identify if - // there are errors falling back into this logic. - // If we need to differentiate between different possible error types, - // we should create appropriate error types that implement the httpStatusError interface. - errStr := strings.ToLower(errMsg) - for keyword, status := range map[string]int{ - "not found": http.StatusNotFound, - "no such": http.StatusNotFound, - "bad parameter": http.StatusBadRequest, - "no command": http.StatusBadRequest, - "conflict": http.StatusConflict, - "impossible": http.StatusNotAcceptable, - "wrong login/password": http.StatusUnauthorized, - "unauthorized": http.StatusUnauthorized, - "hasn't been activated": http.StatusForbidden, - "this node": http.StatusNotAcceptable, - } { - if strings.Contains(errStr, keyword) { - statusCode = status - break - } - } - } - - if statusCode == 0 { - statusCode = http.StatusInternalServerError - } - - return statusCode -} - -// MakeErrorHandler makes an HTTP handler that decodes a Docker error and -// returns it in the response. -func MakeErrorHandler(err error) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - statusCode := GetHTTPErrorStatusCode(err) - vars := mux.Vars(r) - if vars["version"] == "" || versions.GreaterThan(vars["version"], "1.23") { - response := &types.ErrorResponse{ - Message: err.Error(), - } - WriteJSON(w, statusCode, response) - } else { - http.Error(w, grpc.ErrorDesc(err), statusCode) - } - } -} diff --git a/api/server/httputils/form.go b/api/server/httputils/form.go deleted file mode 100644 index 20188c12d8..0000000000 --- a/api/server/httputils/form.go +++ /dev/null @@ -1,73 +0,0 @@ -package httputils - -import ( - "fmt" - "net/http" - "path/filepath" - "strconv" - "strings" -) - -// BoolValue transforms a form value in different formats into a boolean type. -func BoolValue(r *http.Request, k string) bool { - s := strings.ToLower(strings.TrimSpace(r.FormValue(k))) - return !(s == "" || s == "0" || s == "no" || s == "false" || s == "none") -} - -// BoolValueOrDefault returns the default bool passed if the query param is -// missing, otherwise it's just a proxy to boolValue above -func BoolValueOrDefault(r *http.Request, k string, d bool) bool { - if _, ok := r.Form[k]; !ok { - return d - } - return BoolValue(r, k) -} - -// Int64ValueOrZero parses a form value into an int64 type. -// It returns 0 if the parsing fails. -func Int64ValueOrZero(r *http.Request, k string) int64 { - val, err := Int64ValueOrDefault(r, k, 0) - if err != nil { - return 0 - } - return val -} - -// Int64ValueOrDefault parses a form value into an int64 type. If there is an -// error, returns the error. If there is no value returns the default value. -func Int64ValueOrDefault(r *http.Request, field string, def int64) (int64, error) { - if r.Form.Get(field) != "" { - value, err := strconv.ParseInt(r.Form.Get(field), 10, 64) - if err != nil { - return value, err - } - return value, nil - } - return def, nil -} - -// ArchiveOptions stores archive information for different operations. -type ArchiveOptions struct { - Name string - Path string -} - -// ArchiveFormValues parses form values and turns them into ArchiveOptions. -// It fails if the archive name and path are not in the request. -func ArchiveFormValues(r *http.Request, vars map[string]string) (ArchiveOptions, error) { - if err := ParseForm(r); err != nil { - return ArchiveOptions{}, err - } - - name := vars["name"] - path := filepath.FromSlash(r.Form.Get("path")) - - switch { - case name == "": - return ArchiveOptions{}, fmt.Errorf("bad parameter: 'name' cannot be empty") - case path == "": - return ArchiveOptions{}, fmt.Errorf("bad parameter: 'path' cannot be empty") - } - - return ArchiveOptions{name, path}, nil -} diff --git a/api/server/httputils/form_test.go b/api/server/httputils/form_test.go deleted file mode 100644 index c56f7c15e3..0000000000 --- a/api/server/httputils/form_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package httputils - -import ( - "net/http" - "net/url" - "testing" -) - -func TestBoolValue(t *testing.T) { - cases := map[string]bool{ - "": false, - "0": false, - "no": false, - "false": false, - "none": false, - "1": true, - "yes": true, - "true": true, - "one": true, - "100": true, - } - - for c, e := range cases { - v := url.Values{} - v.Set("test", c) - r, _ := http.NewRequest("POST", "", nil) - r.Form = v - - a := BoolValue(r, "test") - if a != e { - t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) - } - } -} - -func TestBoolValueOrDefault(t *testing.T) { - r, _ := http.NewRequest("GET", "", nil) - if !BoolValueOrDefault(r, "queryparam", true) { - t.Fatal("Expected to get true default value, got false") - } - - v := url.Values{} - v.Set("param", "") - r, _ = http.NewRequest("GET", "", nil) - r.Form = v - if BoolValueOrDefault(r, "param", true) { - t.Fatal("Expected not to get true") - } -} - -func TestInt64ValueOrZero(t *testing.T) { - cases := map[string]int64{ - "": 0, - "asdf": 0, - "0": 0, - "1": 1, - } - - for c, e := range cases { - v := url.Values{} - v.Set("test", c) - r, _ := http.NewRequest("POST", "", nil) - r.Form = v - - a := Int64ValueOrZero(r, "test") - if a != e { - t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) - } - } -} - -func TestInt64ValueOrDefault(t *testing.T) { - cases := map[string]int64{ - "": -1, - "-1": -1, - "42": 42, - } - - for c, e := range cases { - v := url.Values{} - v.Set("test", c) - r, _ := http.NewRequest("POST", "", nil) - r.Form = v - - a, err := Int64ValueOrDefault(r, "test", -1) - if a != e { - t.Fatalf("Value: %s, expected: %v, actual: %v", c, e, a) - } - if err != nil { - t.Fatalf("Error should be nil, but received: %s", err) - } - } -} - -func TestInt64ValueOrDefaultWithError(t *testing.T) { - v := url.Values{} - v.Set("test", "invalid") - r, _ := http.NewRequest("POST", "", nil) - r.Form = v - - _, err := Int64ValueOrDefault(r, "test", -1) - if err == nil { - t.Fatalf("Expected an error.") - } -} diff --git a/api/server/httputils/httputils.go b/api/server/httputils/httputils.go deleted file mode 100644 index 3b0f7a0dc0..0000000000 --- a/api/server/httputils/httputils.go +++ /dev/null @@ -1,106 +0,0 @@ -package httputils - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/docker/api" -) - -// APIVersionKey is the client's requested API version. -const APIVersionKey = "api-version" - -// UAStringKey is used as key type for user-agent string in net/context struct -const UAStringKey = "upstream-user-agent" - -// APIFunc is an adapter to allow the use of ordinary functions as Docker API endpoints. -// Any function that has the appropriate signature can be registered as an API endpoint (e.g. getVersion). -type APIFunc func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error - -// HijackConnection interrupts the http response writer to get the -// underlying connection and operate with it. -func HijackConnection(w http.ResponseWriter) (io.ReadCloser, io.Writer, error) { - conn, _, err := w.(http.Hijacker).Hijack() - if err != nil { - return nil, nil, err - } - // Flush the options to make sure the client sets the raw mode - conn.Write([]byte{}) - return conn, conn, nil -} - -// CloseStreams ensures that a list for http streams are properly closed. -func CloseStreams(streams ...interface{}) { - for _, stream := range streams { - if tcpc, ok := stream.(interface { - CloseWrite() error - }); ok { - tcpc.CloseWrite() - } else if closer, ok := stream.(io.Closer); ok { - closer.Close() - } - } -} - -// CheckForJSON makes sure that the request's Content-Type is application/json. -func CheckForJSON(r *http.Request) error { - ct := r.Header.Get("Content-Type") - - // No Content-Type header is ok as long as there's no Body - if ct == "" { - if r.Body == nil || r.ContentLength == 0 { - return nil - } - } - - // Otherwise it better be json - if api.MatchesContentType(ct, "application/json") { - return nil - } - return fmt.Errorf("Content-Type specified (%s) must be 'application/json'", ct) -} - -// ParseForm ensures the request form is parsed even with invalid content types. -// If we don't do this, POST method without Content-type (even with empty body) will fail. -func ParseForm(r *http.Request) error { - if r == nil { - return nil - } - if err := r.ParseForm(); err != nil && !strings.HasPrefix(err.Error(), "mime:") { - return err - } - return nil -} - -// ParseMultipartForm ensures the request form is parsed, even with invalid content types. -func ParseMultipartForm(r *http.Request) error { - if err := r.ParseMultipartForm(4096); err != nil && !strings.HasPrefix(err.Error(), "mime:") { - return err - } - return nil -} - -// WriteJSON writes the value v to the http response stream as json with standard json encoding. -func WriteJSON(w http.ResponseWriter, code int, v interface{}) error { - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - return json.NewEncoder(w).Encode(v) -} - -// VersionFromContext returns an API version from the context using APIVersionKey. -// It panics if the context value does not have version.Version type. -func VersionFromContext(ctx context.Context) (ver string) { - if ctx == nil { - return - } - val := ctx.Value(APIVersionKey) - if val == nil { - return - } - return val.(string) -} diff --git a/api/server/middleware.go b/api/server/middleware.go deleted file mode 100644 index 108e3c077c..0000000000 --- a/api/server/middleware.go +++ /dev/null @@ -1,24 +0,0 @@ -package server - -import ( - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/middleware" -) - -// handleWithGlobalMiddlwares wraps the handler function for a request with -// the server's global middlewares. The order of the middlewares is backwards, -// meaning that the first in the list will be evaluated last. -func (s *Server) handleWithGlobalMiddlewares(handler httputils.APIFunc) httputils.APIFunc { - next := handler - - for _, m := range s.middlewares { - next = m.WrapHandler(next) - } - - if s.cfg.Logging && logrus.GetLevel() == logrus.DebugLevel { - next = middleware.DebugRequestMiddleware(next) - } - - return next -} diff --git a/api/server/middleware/cors.go b/api/server/middleware/cors.go deleted file mode 100644 index ea725dbc72..0000000000 --- a/api/server/middleware/cors.go +++ /dev/null @@ -1,37 +0,0 @@ -package middleware - -import ( - "net/http" - - "github.com/Sirupsen/logrus" - "golang.org/x/net/context" -) - -// CORSMiddleware injects CORS headers to each request -// when it's configured. -type CORSMiddleware struct { - defaultHeaders string -} - -// NewCORSMiddleware creates a new CORSMiddleware with default headers. -func NewCORSMiddleware(d string) CORSMiddleware { - return CORSMiddleware{defaultHeaders: d} -} - -// WrapHandler returns a new handler function wrapping the previous one in the request chain. -func (c CORSMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - // If "api-cors-header" is not given, but "api-enable-cors" is true, we set cors to "*" - // otherwise, all head values will be passed to HTTP handler - corsHeaders := c.defaultHeaders - if corsHeaders == "" { - corsHeaders = "*" - } - - logrus.Debugf("CORS header is enabled and set to: %s", corsHeaders) - w.Header().Add("Access-Control-Allow-Origin", corsHeaders) - w.Header().Add("Access-Control-Allow-Headers", "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") - w.Header().Add("Access-Control-Allow-Methods", "HEAD, GET, POST, DELETE, PUT, OPTIONS") - return handler(ctx, w, r, vars) - } -} diff --git a/api/server/middleware/debug.go b/api/server/middleware/debug.go deleted file mode 100644 index c4835fb9d0..0000000000 --- a/api/server/middleware/debug.go +++ /dev/null @@ -1,76 +0,0 @@ -package middleware - -import ( - "bufio" - "encoding/json" - "io" - "net/http" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/pkg/ioutils" - "golang.org/x/net/context" -) - -// DebugRequestMiddleware dumps the request to logger -func DebugRequestMiddleware(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - logrus.Debugf("Calling %s %s", r.Method, r.RequestURI) - - if r.Method != "POST" { - return handler(ctx, w, r, vars) - } - if err := httputils.CheckForJSON(r); err != nil { - return handler(ctx, w, r, vars) - } - maxBodySize := 4096 // 4KB - if r.ContentLength > int64(maxBodySize) { - return handler(ctx, w, r, vars) - } - - body := r.Body - bufReader := bufio.NewReaderSize(body, maxBodySize) - r.Body = ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) - - b, err := bufReader.Peek(maxBodySize) - if err != io.EOF { - // either there was an error reading, or the buffer is full (in which case the request is too large) - return handler(ctx, w, r, vars) - } - - var postForm map[string]interface{} - if err := json.Unmarshal(b, &postForm); err == nil { - maskSecretKeys(postForm) - formStr, errMarshal := json.Marshal(postForm) - if errMarshal == nil { - logrus.Debugf("form data: %s", string(formStr)) - } else { - logrus.Debugf("form data: %q", postForm) - } - } - - return handler(ctx, w, r, vars) - } -} - -func maskSecretKeys(inp interface{}) { - if arr, ok := inp.([]interface{}); ok { - for _, f := range arr { - maskSecretKeys(f) - } - return - } - if form, ok := inp.(map[string]interface{}); ok { - loop0: - for k, v := range form { - for _, m := range []string{"password", "secret", "jointoken"} { - if strings.EqualFold(m, k) { - form[k] = "*****" - continue loop0 - } - } - maskSecretKeys(v) - } - } -} diff --git a/api/server/middleware/middleware.go b/api/server/middleware/middleware.go deleted file mode 100644 index dc1f5bfa0d..0000000000 --- a/api/server/middleware/middleware.go +++ /dev/null @@ -1,13 +0,0 @@ -package middleware - -import ( - "net/http" - - "golang.org/x/net/context" -) - -// Middleware is an interface to allow the use of ordinary functions as Docker API filters. -// Any struct that has the appropriate signature can be registered as a middleware. -type Middleware interface { - WrapHandler(func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error -} diff --git a/api/server/middleware/user_agent.go b/api/server/middleware/user_agent.go deleted file mode 100644 index 87c8cb83e1..0000000000 --- a/api/server/middleware/user_agent.go +++ /dev/null @@ -1,47 +0,0 @@ -package middleware - -import ( - "net/http" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/engine-api/types/versions" - "golang.org/x/net/context" -) - -// UserAgentMiddleware is a middleware that -// validates the client user-agent. -type UserAgentMiddleware struct { - serverVersion string -} - -// NewUserAgentMiddleware creates a new UserAgentMiddleware -// with the server version. -func NewUserAgentMiddleware(s string) UserAgentMiddleware { - return UserAgentMiddleware{ - serverVersion: s, - } -} - -// WrapHandler returns a new handler function wrapping the previous one in the request chain. -func (u UserAgentMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - ctx = context.WithValue(ctx, httputils.UAStringKey, r.Header.Get("User-Agent")) - - if strings.Contains(r.Header.Get("User-Agent"), "Docker-Client/") { - userAgent := strings.Split(r.Header.Get("User-Agent"), "/") - - // v1.20 onwards includes the GOOS of the client after the version - // such as Docker/1.7.0 (linux) - if len(userAgent) == 2 && strings.Contains(userAgent[1], " ") { - userAgent[1] = strings.Split(userAgent[1], " ")[0] - } - - if len(userAgent) == 2 && !versions.Equal(u.serverVersion, userAgent[1]) { - logrus.Debugf("Client and server don't have the same version (client: %s, server: %s)", userAgent[1], u.serverVersion) - } - } - return handler(ctx, w, r, vars) - } -} diff --git a/api/server/middleware/version.go b/api/server/middleware/version.go deleted file mode 100644 index eb7bbf3a3e..0000000000 --- a/api/server/middleware/version.go +++ /dev/null @@ -1,59 +0,0 @@ -package middleware - -import ( - "fmt" - "net/http" - "runtime" - - "github.com/docker/engine-api/types/versions" - "golang.org/x/net/context" -) - -type badRequestError struct { - error -} - -func (badRequestError) HTTPErrorStatusCode() int { - return http.StatusBadRequest -} - -// VersionMiddleware is a middleware that -// validates the client and server versions. -type VersionMiddleware struct { - serverVersion string - defaultVersion string - minVersion string -} - -// NewVersionMiddleware creates a new VersionMiddleware -// with the default versions. -func NewVersionMiddleware(s, d, m string) VersionMiddleware { - return VersionMiddleware{ - serverVersion: s, - defaultVersion: d, - minVersion: m, - } -} - -// WrapHandler returns a new handler function wrapping the previous one in the request chain. -func (v VersionMiddleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - apiVersion := vars["version"] - if apiVersion == "" { - apiVersion = v.defaultVersion - } - - if versions.GreaterThan(apiVersion, v.defaultVersion) { - return badRequestError{fmt.Errorf("client is newer than server (client API version: %s, server API version: %s)", apiVersion, v.defaultVersion)} - } - if versions.LessThan(apiVersion, v.minVersion) { - return badRequestError{fmt.Errorf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", apiVersion, v.minVersion)} - } - - header := fmt.Sprintf("Docker/%s (%s)", v.serverVersion, runtime.GOOS) - w.Header().Set("Server", header) - ctx = context.WithValue(ctx, "api-version", apiVersion) - return handler(ctx, w, r, vars) - } - -} diff --git a/api/server/middleware/version_test.go b/api/server/middleware/version_test.go deleted file mode 100644 index 90dee7138c..0000000000 --- a/api/server/middleware/version_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package middleware - -import ( - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/docker/docker/api/server/httputils" - "golang.org/x/net/context" -) - -func TestVersionMiddleware(t *testing.T) { - handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if httputils.VersionFromContext(ctx) == "" { - t.Fatalf("Expected version, got empty string") - } - return nil - } - - defaultVersion := "1.10.0" - minVersion := "1.2.0" - m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion) - h := m.WrapHandler(handler) - - req, _ := http.NewRequest("GET", "/containers/json", nil) - resp := httptest.NewRecorder() - ctx := context.Background() - if err := h(ctx, resp, req, map[string]string{}); err != nil { - t.Fatal(err) - } -} - -func TestVersionMiddlewareWithErrors(t *testing.T) { - handler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if httputils.VersionFromContext(ctx) == "" { - t.Fatalf("Expected version, got empty string") - } - return nil - } - - defaultVersion := "1.10.0" - minVersion := "1.2.0" - m := NewVersionMiddleware(defaultVersion, defaultVersion, minVersion) - h := m.WrapHandler(handler) - - req, _ := http.NewRequest("GET", "/containers/json", nil) - resp := httptest.NewRecorder() - ctx := context.Background() - - vars := map[string]string{"version": "0.1"} - err := h(ctx, resp, req, vars) - - if !strings.Contains(err.Error(), "client version 0.1 is too old. Minimum supported API version is 1.2.0") { - t.Fatalf("Expected too old client error, got %v", err) - } - - vars["version"] = "100000" - err = h(ctx, resp, req, vars) - if !strings.Contains(err.Error(), "client is newer than server") { - t.Fatalf("Expected client newer than server error, got %v", err) - } -} diff --git a/api/server/profiler.go b/api/server/profiler.go deleted file mode 100644 index 8bf8384fdb..0000000000 --- a/api/server/profiler.go +++ /dev/null @@ -1,41 +0,0 @@ -package server - -import ( - "expvar" - "fmt" - "net/http" - "net/http/pprof" - - "github.com/gorilla/mux" -) - -const debugPathPrefix = "/debug/" - -func profilerSetup(mainRouter *mux.Router) { - var r = mainRouter.PathPrefix(debugPathPrefix).Subrouter() - r.HandleFunc("/vars", expVars) - r.HandleFunc("/pprof/", pprof.Index) - r.HandleFunc("/pprof/cmdline", pprof.Cmdline) - r.HandleFunc("/pprof/profile", pprof.Profile) - r.HandleFunc("/pprof/symbol", pprof.Symbol) - r.HandleFunc("/pprof/trace", pprof.Trace) - r.HandleFunc("/pprof/block", pprof.Handler("block").ServeHTTP) - r.HandleFunc("/pprof/heap", pprof.Handler("heap").ServeHTTP) - r.HandleFunc("/pprof/goroutine", pprof.Handler("goroutine").ServeHTTP) - r.HandleFunc("/pprof/threadcreate", pprof.Handler("threadcreate").ServeHTTP) -} - -// Replicated from expvar.go as not public. -func expVars(w http.ResponseWriter, r *http.Request) { - first := true - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} diff --git a/api/server/router/build/backend.go b/api/server/router/build/backend.go deleted file mode 100644 index 18ba1b2769..0000000000 --- a/api/server/router/build/backend.go +++ /dev/null @@ -1,20 +0,0 @@ -package build - -import ( - "io" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// Backend abstracts an image builder whose only purpose is to build an image referenced by an imageID. -type Backend interface { - // Build builds a Docker image referenced by an imageID string. - // - // Note: Tagging an image should not be done by a Builder, it should instead be done - // by the caller. - // - // TODO: make this return a reference instead of string - BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) -} diff --git a/api/server/router/build/build.go b/api/server/router/build/build.go deleted file mode 100644 index 959498e0f1..0000000000 --- a/api/server/router/build/build.go +++ /dev/null @@ -1,29 +0,0 @@ -package build - -import "github.com/docker/docker/api/server/router" - -// buildRouter is a router to talk with the build controller -type buildRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new build router -func NewRouter(b Backend) router.Router { - r := &buildRouter{ - backend: b, - } - r.initRoutes() - return r -} - -// Routes returns the available routers to the build controller -func (r *buildRouter) Routes() []router.Route { - return r.routes -} - -func (r *buildRouter) initRoutes() { - r.routes = []router.Route{ - router.Cancellable(router.NewPostRoute("/build", r.postBuild)), - } -} diff --git a/api/server/router/build/build_routes.go b/api/server/router/build/build_routes.go deleted file mode 100644 index b5a7c28b69..0000000000 --- a/api/server/router/build/build_routes.go +++ /dev/null @@ -1,194 +0,0 @@ -package build - -import ( - "bytes" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/versions" - "github.com/docker/go-units" - "golang.org/x/net/context" -) - -func newImageBuildOptions(ctx context.Context, r *http.Request) (*types.ImageBuildOptions, error) { - version := httputils.VersionFromContext(ctx) - options := &types.ImageBuildOptions{} - if httputils.BoolValue(r, "forcerm") && versions.GreaterThanOrEqualTo(version, "1.12") { - options.Remove = true - } else if r.FormValue("rm") == "" && versions.GreaterThanOrEqualTo(version, "1.12") { - options.Remove = true - } else { - options.Remove = httputils.BoolValue(r, "rm") - } - if httputils.BoolValue(r, "pull") && versions.GreaterThanOrEqualTo(version, "1.16") { - options.PullParent = true - } - - options.Dockerfile = r.FormValue("dockerfile") - options.SuppressOutput = httputils.BoolValue(r, "q") - options.NoCache = httputils.BoolValue(r, "nocache") - options.ForceRemove = httputils.BoolValue(r, "forcerm") - options.MemorySwap = httputils.Int64ValueOrZero(r, "memswap") - options.Memory = httputils.Int64ValueOrZero(r, "memory") - options.CPUShares = httputils.Int64ValueOrZero(r, "cpushares") - options.CPUPeriod = httputils.Int64ValueOrZero(r, "cpuperiod") - options.CPUQuota = httputils.Int64ValueOrZero(r, "cpuquota") - options.CPUSetCPUs = r.FormValue("cpusetcpus") - options.CPUSetMems = r.FormValue("cpusetmems") - options.CgroupParent = r.FormValue("cgroupparent") - options.Tags = r.Form["t"] - - if r.Form.Get("shmsize") != "" { - shmSize, err := strconv.ParseInt(r.Form.Get("shmsize"), 10, 64) - if err != nil { - return nil, err - } - options.ShmSize = shmSize - } - - if i := container.Isolation(r.FormValue("isolation")); i != "" { - if !container.Isolation.IsValid(i) { - return nil, fmt.Errorf("Unsupported isolation: %q", i) - } - options.Isolation = i - } - - var buildUlimits = []*units.Ulimit{} - ulimitsJSON := r.FormValue("ulimits") - if ulimitsJSON != "" { - if err := json.NewDecoder(strings.NewReader(ulimitsJSON)).Decode(&buildUlimits); err != nil { - return nil, err - } - options.Ulimits = buildUlimits - } - - var buildArgs = map[string]string{} - buildArgsJSON := r.FormValue("buildargs") - if buildArgsJSON != "" { - if err := json.NewDecoder(strings.NewReader(buildArgsJSON)).Decode(&buildArgs); err != nil { - return nil, err - } - options.BuildArgs = buildArgs - } - var labels = map[string]string{} - labelsJSON := r.FormValue("labels") - if labelsJSON != "" { - if err := json.NewDecoder(strings.NewReader(labelsJSON)).Decode(&labels); err != nil { - return nil, err - } - options.Labels = labels - } - - return options, nil -} - -type syncWriter struct { - w io.Writer - mu sync.Mutex -} - -func (s *syncWriter) Write(b []byte) (count int, err error) { - s.mu.Lock() - count, err = s.w.Write(b) - s.mu.Unlock() - return -} - -func (br *buildRouter) postBuild(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var ( - authConfigs = map[string]types.AuthConfig{} - authConfigsEncoded = r.Header.Get("X-Registry-Config") - notVerboseBuffer = bytes.NewBuffer(nil) - ) - - if authConfigsEncoded != "" { - authConfigsJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authConfigsEncoded)) - if err := json.NewDecoder(authConfigsJSON).Decode(&authConfigs); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting - // to be empty. - } - } - - w.Header().Set("Content-Type", "application/json") - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - sf := streamformatter.NewJSONStreamFormatter() - errf := func(err error) error { - if httputils.BoolValue(r, "q") && notVerboseBuffer.Len() > 0 { - output.Write(notVerboseBuffer.Bytes()) - } - // Do not write the error in the http output if it's still empty. - // This prevents from writing a 200(OK) when there is an internal error. - if !output.Flushed() { - return err - } - _, err = w.Write(sf.FormatError(err)) - if err != nil { - logrus.Warnf("could not write error response: %v", err) - } - return nil - } - - buildOptions, err := newImageBuildOptions(ctx, r) - if err != nil { - return errf(err) - } - buildOptions.AuthConfigs = authConfigs - - remoteURL := r.FormValue("remote") - - // Currently, only used if context is from a remote url. - // Look at code in DetectContextFromRemoteURL for more information. - createProgressReader := func(in io.ReadCloser) io.ReadCloser { - progressOutput := sf.NewProgressOutput(output, true) - if buildOptions.SuppressOutput { - progressOutput = sf.NewProgressOutput(notVerboseBuffer, true) - } - return progress.NewProgressReader(in, progressOutput, r.ContentLength, "Downloading context", remoteURL) - } - - var out io.Writer = output - if buildOptions.SuppressOutput { - out = notVerboseBuffer - } - out = &syncWriter{w: out} - stdout := &streamformatter.StdoutFormatter{Writer: out, StreamFormatter: sf} - stderr := &streamformatter.StderrFormatter{Writer: out, StreamFormatter: sf} - - pg := backend.ProgressWriter{ - Output: out, - StdoutFormatter: stdout, - StderrFormatter: stderr, - ProgressReaderFunc: createProgressReader, - } - - imgID, err := br.backend.BuildFromContext(ctx, r.Body, remoteURL, buildOptions, pg) - if err != nil { - return errf(err) - } - - // Everything worked so if -q was provided the output from the daemon - // should be just the image ID and we'll print that to stdout. - if buildOptions.SuppressOutput { - stdout := &streamformatter.StdoutFormatter{Writer: output, StreamFormatter: sf} - fmt.Fprintf(stdout, "%s\n", string(imgID)) - } - - return nil -} diff --git a/api/server/router/container/backend.go b/api/server/router/container/backend.go deleted file mode 100644 index 444260af9f..0000000000 --- a/api/server/router/container/backend.go +++ /dev/null @@ -1,72 +0,0 @@ -package container - -import ( - "io" - "time" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/pkg/archive" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" -) - -// execBackend includes functions to implement to provide exec functionality. -type execBackend interface { - ContainerExecCreate(name string, config *types.ExecConfig) (string, error) - ContainerExecInspect(id string) (*backend.ExecInspect, error) - ContainerExecResize(name string, height, width int) error - ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) error - ExecExists(name string) (bool, error) -} - -// copyBackend includes functions to implement to provide container copy functionality. -type copyBackend interface { - ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) - ContainerCopy(name string, res string) (io.ReadCloser, error) - ContainerExport(name string, out io.Writer) error - ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error - ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) -} - -// stateBackend includes functions to implement to provide container state lifecycle functionality. -type stateBackend interface { - ContainerCreate(config types.ContainerCreateConfig, validateHostname bool) (types.ContainerCreateResponse, error) - ContainerKill(name string, sig uint64) error - ContainerPause(name string) error - ContainerRename(oldName, newName string) error - ContainerResize(name string, height, width int) error - ContainerRestart(name string, seconds int) error - ContainerRm(name string, config *types.ContainerRmConfig) error - ContainerStart(name string, hostConfig *container.HostConfig, validateHostname bool) error - ContainerStop(name string, seconds int) error - ContainerUnpause(name string) error - ContainerUpdate(name string, hostConfig *container.HostConfig, validateHostname bool) ([]string, error) - ContainerWait(name string, timeout time.Duration) (int, error) -} - -// monitorBackend includes functions to implement to provide containers monitoring functionality. -type monitorBackend interface { - ContainerChanges(name string) ([]archive.Change, error) - ContainerInspect(name string, size bool, version string) (interface{}, error) - ContainerLogs(ctx context.Context, name string, config *backend.ContainerLogsConfig, started chan struct{}) error - ContainerStats(ctx context.Context, name string, config *backend.ContainerStatsConfig) error - ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) - - Containers(config *types.ContainerListOptions) ([]*types.Container, error) -} - -// attachBackend includes function to implement to provide container attaching functionality. -type attachBackend interface { - ContainerAttach(name string, c *backend.ContainerAttachConfig) error -} - -// Backend is all the methods that need to be implemented to provide container specific functionality. -type Backend interface { - execBackend - copyBackend - stateBackend - monitorBackend - attachBackend -} diff --git a/api/server/router/container/container.go b/api/server/router/container/container.go deleted file mode 100644 index d6fea4c353..0000000000 --- a/api/server/router/container/container.go +++ /dev/null @@ -1,76 +0,0 @@ -package container - -import ( - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/router" -) - -type validationError struct { - error -} - -func (validationError) IsValidationError() bool { - return true -} - -// containerRouter is a router to talk with the container controller -type containerRouter struct { - backend Backend - decoder httputils.ContainerDecoder - routes []router.Route -} - -// NewRouter initializes a new container router -func NewRouter(b Backend, decoder httputils.ContainerDecoder) router.Router { - r := &containerRouter{ - backend: b, - decoder: decoder, - } - r.initRoutes() - return r -} - -// Routes returns the available routes to the container controller -func (r *containerRouter) Routes() []router.Route { - return r.routes -} - -// initRoutes initializes the routes in container router -func (r *containerRouter) initRoutes() { - r.routes = []router.Route{ - // HEAD - router.NewHeadRoute("/containers/{name:.*}/archive", r.headContainersArchive), - // GET - router.NewGetRoute("/containers/json", r.getContainersJSON), - router.NewGetRoute("/containers/{name:.*}/export", r.getContainersExport), - router.NewGetRoute("/containers/{name:.*}/changes", r.getContainersChanges), - router.NewGetRoute("/containers/{name:.*}/json", r.getContainersByName), - router.NewGetRoute("/containers/{name:.*}/top", r.getContainersTop), - router.Cancellable(router.NewGetRoute("/containers/{name:.*}/logs", r.getContainersLogs)), - router.Cancellable(router.NewGetRoute("/containers/{name:.*}/stats", r.getContainersStats)), - router.NewGetRoute("/containers/{name:.*}/attach/ws", r.wsContainersAttach), - router.NewGetRoute("/exec/{id:.*}/json", r.getExecByID), - router.NewGetRoute("/containers/{name:.*}/archive", r.getContainersArchive), - // POST - router.NewPostRoute("/containers/create", r.postContainersCreate), - router.NewPostRoute("/containers/{name:.*}/kill", r.postContainersKill), - router.NewPostRoute("/containers/{name:.*}/pause", r.postContainersPause), - router.NewPostRoute("/containers/{name:.*}/unpause", r.postContainersUnpause), - router.NewPostRoute("/containers/{name:.*}/restart", r.postContainersRestart), - router.NewPostRoute("/containers/{name:.*}/start", r.postContainersStart), - router.NewPostRoute("/containers/{name:.*}/stop", r.postContainersStop), - router.NewPostRoute("/containers/{name:.*}/wait", r.postContainersWait), - router.NewPostRoute("/containers/{name:.*}/resize", r.postContainersResize), - router.NewPostRoute("/containers/{name:.*}/attach", r.postContainersAttach), - router.NewPostRoute("/containers/{name:.*}/copy", r.postContainersCopy), // Deprecated since 1.8, Errors out since 1.12 - router.NewPostRoute("/containers/{name:.*}/exec", r.postContainerExecCreate), - router.NewPostRoute("/exec/{name:.*}/start", r.postContainerExecStart), - router.NewPostRoute("/exec/{name:.*}/resize", r.postContainerExecResize), - router.NewPostRoute("/containers/{name:.*}/rename", r.postContainerRename), - router.NewPostRoute("/containers/{name:.*}/update", r.postContainerUpdate), - // PUT - router.NewPutRoute("/containers/{name:.*}/archive", r.putContainersArchive), - // DELETE - router.NewDeleteRoute("/containers/{name:.*}", r.deleteContainers), - } -} diff --git a/api/server/router/container/container_routes.go b/api/server/router/container/container_routes.go deleted file mode 100644 index 977ce2522d..0000000000 --- a/api/server/router/container/container_routes.go +++ /dev/null @@ -1,527 +0,0 @@ -package container - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "strconv" - "strings" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/signal" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/versions" - "golang.org/x/net/context" - "golang.org/x/net/websocket" -) - -func (s *containerRouter) getContainersJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - filter, err := filters.FromParam(r.Form.Get("filters")) - if err != nil { - return err - } - - config := &types.ContainerListOptions{ - All: httputils.BoolValue(r, "all"), - Size: httputils.BoolValue(r, "size"), - Since: r.Form.Get("since"), - Before: r.Form.Get("before"), - Filter: filter, - } - - if tmpLimit := r.Form.Get("limit"); tmpLimit != "" { - limit, err := strconv.Atoi(tmpLimit) - if err != nil { - return err - } - config.Limit = limit - } - - containers, err := s.backend.Containers(config) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, containers) -} - -func (s *containerRouter) getContainersStats(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - stream := httputils.BoolValueOrDefault(r, "stream", true) - if !stream { - w.Header().Set("Content-Type", "application/json") - } - - config := &backend.ContainerStatsConfig{ - Stream: stream, - OutStream: w, - Version: string(httputils.VersionFromContext(ctx)), - } - - return s.backend.ContainerStats(ctx, vars["name"], config) -} - -func (s *containerRouter) getContainersLogs(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - // Args are validated before the stream starts because when it starts we're - // sending HTTP 200 by writing an empty chunk of data to tell the client that - // daemon is going to stream. By sending this initial HTTP 200 we can't report - // any error after the stream starts (i.e. container not found, wrong parameters) - // with the appropriate status code. - stdout, stderr := httputils.BoolValue(r, "stdout"), httputils.BoolValue(r, "stderr") - if !(stdout || stderr) { - return fmt.Errorf("Bad parameters: you must choose at least one stream") - } - - containerName := vars["name"] - logsConfig := &backend.ContainerLogsConfig{ - ContainerLogsOptions: types.ContainerLogsOptions{ - Follow: httputils.BoolValue(r, "follow"), - Timestamps: httputils.BoolValue(r, "timestamps"), - Since: r.Form.Get("since"), - Tail: r.Form.Get("tail"), - ShowStdout: stdout, - ShowStderr: stderr, - Details: httputils.BoolValue(r, "details"), - }, - OutStream: w, - } - - chStarted := make(chan struct{}) - if err := s.backend.ContainerLogs(ctx, containerName, logsConfig, chStarted); err != nil { - select { - case <-chStarted: - // The client may be expecting all of the data we're sending to - // be multiplexed, so send it through OutStream, which will - // have been set up to handle that if needed. - fmt.Fprintf(logsConfig.OutStream, "Error running logs job: %v\n", err) - default: - return err - } - } - - return nil -} - -func (s *containerRouter) getContainersExport(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return s.backend.ContainerExport(vars["name"], w) -} - -func (s *containerRouter) postContainersStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - // If contentLength is -1, we can assumed chunked encoding - // or more technically that the length is unknown - // https://golang.org/src/pkg/net/http/request.go#L139 - // net/http otherwise seems to swallow any headers related to chunked encoding - // including r.TransferEncoding - // allow a nil body for backwards compatibility - - version := httputils.VersionFromContext(ctx) - var hostConfig *container.HostConfig - // A non-nil json object is at least 7 characters. - if r.ContentLength > 7 || r.ContentLength == -1 { - if versions.GreaterThanOrEqualTo(version, "1.24") { - return validationError{fmt.Errorf("starting container with HostConfig was deprecated since v1.10 and removed in v1.12")} - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - c, err := s.decoder.DecodeHostConfig(r.Body) - if err != nil { - return err - } - hostConfig = c - } - - validateHostname := versions.GreaterThanOrEqualTo(version, "1.24") - if err := s.backend.ContainerStart(vars["name"], hostConfig, validateHostname); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (s *containerRouter) postContainersStop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - seconds, _ := strconv.Atoi(r.Form.Get("t")) - - if err := s.backend.ContainerStop(vars["name"], seconds); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - - return nil -} - -type errContainerIsRunning interface { - ContainerIsRunning() bool -} - -func (s *containerRouter) postContainersKill(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - var sig syscall.Signal - name := vars["name"] - - // If we have a signal, look at it. Otherwise, do nothing - if sigStr := r.Form.Get("signal"); sigStr != "" { - var err error - if sig, err = signal.ParseSignal(sigStr); err != nil { - return err - } - } - - if err := s.backend.ContainerKill(name, uint64(sig)); err != nil { - var isStopped bool - if e, ok := err.(errContainerIsRunning); ok { - isStopped = !e.ContainerIsRunning() - } - - // Return error that's not caused because the container is stopped. - // Return error if the container is not running and the api is >= 1.20 - // to keep backwards compatibility. - version := httputils.VersionFromContext(ctx) - if versions.GreaterThanOrEqualTo(version, "1.20") || !isStopped { - return fmt.Errorf("Cannot kill container %s: %v", name, err) - } - } - - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (s *containerRouter) postContainersRestart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - timeout, _ := strconv.Atoi(r.Form.Get("t")) - - if err := s.backend.ContainerRestart(vars["name"], timeout); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersPause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := s.backend.ContainerPause(vars["name"]); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersUnpause(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := s.backend.ContainerUnpause(vars["name"]); err != nil { - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersWait(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - status, err := s.backend.ContainerWait(vars["name"], -1*time.Second) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, &types.ContainerWaitResponse{ - StatusCode: status, - }) -} - -func (s *containerRouter) getContainersChanges(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - changes, err := s.backend.ContainerChanges(vars["name"]) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, changes) -} - -func (s *containerRouter) getContainersTop(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - procList, err := s.backend.ContainerTop(vars["name"], r.Form.Get("ps_args")) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, procList) -} - -func (s *containerRouter) postContainerRename(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - name := vars["name"] - newName := r.Form.Get("name") - if err := s.backend.ContainerRename(name, newName); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (s *containerRouter) postContainerUpdate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - version := httputils.VersionFromContext(ctx) - var updateConfig container.UpdateConfig - - decoder := json.NewDecoder(r.Body) - if err := decoder.Decode(&updateConfig); err != nil { - return err - } - - hostConfig := &container.HostConfig{ - Resources: updateConfig.Resources, - RestartPolicy: updateConfig.RestartPolicy, - } - - name := vars["name"] - validateHostname := versions.GreaterThanOrEqualTo(version, "1.24") - warnings, err := s.backend.ContainerUpdate(name, hostConfig, validateHostname) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, &types.ContainerUpdateResponse{ - Warnings: warnings, - }) -} - -func (s *containerRouter) postContainersCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - name := r.Form.Get("name") - - config, hostConfig, networkingConfig, err := s.decoder.DecodeConfig(r.Body) - if err != nil { - return err - } - version := httputils.VersionFromContext(ctx) - adjustCPUShares := versions.LessThan(version, "1.19") - - validateHostname := versions.GreaterThanOrEqualTo(version, "1.24") - ccr, err := s.backend.ContainerCreate(types.ContainerCreateConfig{ - Name: name, - Config: config, - HostConfig: hostConfig, - NetworkingConfig: networkingConfig, - AdjustCPUShares: adjustCPUShares, - }, validateHostname) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, ccr) -} - -func (s *containerRouter) deleteContainers(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - name := vars["name"] - config := &types.ContainerRmConfig{ - ForceRemove: httputils.BoolValue(r, "force"), - RemoveVolume: httputils.BoolValue(r, "v"), - RemoveLink: httputils.BoolValue(r, "link"), - } - - if err := s.backend.ContainerRm(name, config); err != nil { - // Force a 404 for the empty string - if strings.Contains(strings.ToLower(err.Error()), "prefix can't be empty") { - return fmt.Errorf("no such container: \"\"") - } - return err - } - - w.WriteHeader(http.StatusNoContent) - - return nil -} - -func (s *containerRouter) postContainersResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - height, err := strconv.Atoi(r.Form.Get("h")) - if err != nil { - return err - } - width, err := strconv.Atoi(r.Form.Get("w")) - if err != nil { - return err - } - - return s.backend.ContainerResize(vars["name"], height, width) -} - -func (s *containerRouter) postContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - err := httputils.ParseForm(r) - if err != nil { - return err - } - containerName := vars["name"] - - _, upgrade := r.Header["Upgrade"] - detachKeys := r.FormValue("detachKeys") - - hijacker, ok := w.(http.Hijacker) - if !ok { - return fmt.Errorf("error attaching to container %s, hijack connection missing", containerName) - } - - setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { - conn, _, err := hijacker.Hijack() - if err != nil { - return nil, nil, nil, err - } - - // set raw mode - conn.Write([]byte{}) - - if upgrade { - fmt.Fprintf(conn, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") - } else { - fmt.Fprintf(conn, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") - } - - closer := func() error { - httputils.CloseStreams(conn) - return nil - } - return ioutils.NewReadCloserWrapper(conn, closer), conn, conn, nil - } - - attachConfig := &backend.ContainerAttachConfig{ - GetStreams: setupStreams, - UseStdin: httputils.BoolValue(r, "stdin"), - UseStdout: httputils.BoolValue(r, "stdout"), - UseStderr: httputils.BoolValue(r, "stderr"), - Logs: httputils.BoolValue(r, "logs"), - Stream: httputils.BoolValue(r, "stream"), - DetachKeys: detachKeys, - MuxStreams: true, - } - - if err = s.backend.ContainerAttach(containerName, attachConfig); err != nil { - logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) - // Remember to close stream if error happens - conn, _, errHijack := hijacker.Hijack() - if errHijack == nil { - statusCode := httputils.GetHTTPErrorStatusCode(err) - statusText := http.StatusText(statusCode) - fmt.Fprintf(conn, "HTTP/1.1 %d %s\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n%s\r\n", statusCode, statusText, err.Error()) - httputils.CloseStreams(conn) - } else { - logrus.Errorf("Error Hijacking: %v", err) - } - } - return nil -} - -func (s *containerRouter) wsContainersAttach(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - containerName := vars["name"] - - var err error - detachKeys := r.FormValue("detachKeys") - - done := make(chan struct{}) - started := make(chan struct{}) - - setupStreams := func() (io.ReadCloser, io.Writer, io.Writer, error) { - wsChan := make(chan *websocket.Conn) - h := func(conn *websocket.Conn) { - wsChan <- conn - <-done - } - - srv := websocket.Server{Handler: h, Handshake: nil} - go func() { - close(started) - srv.ServeHTTP(w, r) - }() - - conn := <-wsChan - return conn, conn, conn, nil - } - - attachConfig := &backend.ContainerAttachConfig{ - GetStreams: setupStreams, - Logs: httputils.BoolValue(r, "logs"), - Stream: httputils.BoolValue(r, "stream"), - DetachKeys: detachKeys, - UseStdin: true, - UseStdout: true, - UseStderr: true, - MuxStreams: false, // TODO: this should be true since it's a single stream for both stdout and stderr - } - - err = s.backend.ContainerAttach(containerName, attachConfig) - close(done) - select { - case <-started: - logrus.Errorf("Error attaching websocket: %s", err) - return nil - default: - } - return err -} diff --git a/api/server/router/container/copy.go b/api/server/router/container/copy.go deleted file mode 100644 index 554b40e8be..0000000000 --- a/api/server/router/container/copy.go +++ /dev/null @@ -1,119 +0,0 @@ -package container - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "os" - "strings" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/versions" - "golang.org/x/net/context" -) - -// postContainersCopy is deprecated in favor of getContainersArchive. -func (s *containerRouter) postContainersCopy(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - // Deprecated since 1.8, Errors out since 1.12 - version := httputils.VersionFromContext(ctx) - if versions.GreaterThanOrEqualTo(version, "1.24") { - w.WriteHeader(http.StatusNotFound) - return nil - } - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - cfg := types.CopyConfig{} - if err := json.NewDecoder(r.Body).Decode(&cfg); err != nil { - return err - } - - if cfg.Resource == "" { - return fmt.Errorf("Path cannot be empty") - } - - data, err := s.backend.ContainerCopy(vars["name"], cfg.Resource) - if err != nil { - if strings.Contains(strings.ToLower(err.Error()), "no such container") { - w.WriteHeader(http.StatusNotFound) - return nil - } - if os.IsNotExist(err) { - return fmt.Errorf("Could not find the file %s in container %s", cfg.Resource, vars["name"]) - } - return err - } - defer data.Close() - - w.Header().Set("Content-Type", "application/x-tar") - if _, err := io.Copy(w, data); err != nil { - return err - } - - return nil -} - -// // Encode the stat to JSON, base64 encode, and place in a header. -func setContainerPathStatHeader(stat *types.ContainerPathStat, header http.Header) error { - statJSON, err := json.Marshal(stat) - if err != nil { - return err - } - - header.Set( - "X-Docker-Container-Path-Stat", - base64.StdEncoding.EncodeToString(statJSON), - ) - - return nil -} - -func (s *containerRouter) headContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - v, err := httputils.ArchiveFormValues(r, vars) - if err != nil { - return err - } - - stat, err := s.backend.ContainerStatPath(v.Name, v.Path) - if err != nil { - return err - } - - return setContainerPathStatHeader(stat, w.Header()) -} - -func (s *containerRouter) getContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - v, err := httputils.ArchiveFormValues(r, vars) - if err != nil { - return err - } - - tarArchive, stat, err := s.backend.ContainerArchivePath(v.Name, v.Path) - if err != nil { - return err - } - defer tarArchive.Close() - - if err := setContainerPathStatHeader(stat, w.Header()); err != nil { - return err - } - - w.Header().Set("Content-Type", "application/x-tar") - _, err = io.Copy(w, tarArchive) - - return err -} - -func (s *containerRouter) putContainersArchive(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - v, err := httputils.ArchiveFormValues(r, vars) - if err != nil { - return err - } - - noOverwriteDirNonDir := httputils.BoolValue(r, "noOverwriteDirNonDir") - return s.backend.ContainerExtractToDir(v.Name, v.Path, noOverwriteDirNonDir, r.Body) -} diff --git a/api/server/router/container/exec.go b/api/server/router/container/exec.go deleted file mode 100644 index 21f5dc8300..0000000000 --- a/api/server/router/container/exec.go +++ /dev/null @@ -1,134 +0,0 @@ -package container - -import ( - "encoding/json" - "fmt" - "io" - "net/http" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/versions" - "golang.org/x/net/context" -) - -func (s *containerRouter) getExecByID(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - eConfig, err := s.backend.ContainerExecInspect(vars["id"]) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, eConfig) -} - -func (s *containerRouter) postContainerExecCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := httputils.CheckForJSON(r); err != nil { - return err - } - name := vars["name"] - - execConfig := &types.ExecConfig{} - if err := json.NewDecoder(r.Body).Decode(execConfig); err != nil { - return err - } - - if len(execConfig.Cmd) == 0 { - return fmt.Errorf("No exec command specified") - } - - // Register an instance of Exec in container. - id, err := s.backend.ContainerExecCreate(name, execConfig) - if err != nil { - logrus.Errorf("Error setting up exec command in container %s: %v", name, err) - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerExecCreateResponse{ - ID: id, - }) -} - -// TODO(vishh): Refactor the code to avoid having to specify stream config as part of both create and start. -func (s *containerRouter) postContainerExecStart(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - version := httputils.VersionFromContext(ctx) - if versions.GreaterThan(version, "1.21") { - if err := httputils.CheckForJSON(r); err != nil { - return err - } - } - - var ( - execName = vars["name"] - stdin, inStream io.ReadCloser - stdout, stderr, outStream io.Writer - ) - - execStartCheck := &types.ExecStartCheck{} - if err := json.NewDecoder(r.Body).Decode(execStartCheck); err != nil { - return err - } - - if exists, err := s.backend.ExecExists(execName); !exists { - return err - } - - if !execStartCheck.Detach { - var err error - // Setting up the streaming http interface. - inStream, outStream, err = httputils.HijackConnection(w) - if err != nil { - return err - } - defer httputils.CloseStreams(inStream, outStream) - - if _, ok := r.Header["Upgrade"]; ok { - fmt.Fprintf(outStream, "HTTP/1.1 101 UPGRADED\r\nContent-Type: application/vnd.docker.raw-stream\r\nConnection: Upgrade\r\nUpgrade: tcp\r\n\r\n") - } else { - fmt.Fprintf(outStream, "HTTP/1.1 200 OK\r\nContent-Type: application/vnd.docker.raw-stream\r\n\r\n") - } - - stdin = inStream - stdout = outStream - if !execStartCheck.Tty { - stderr = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) - stdout = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) - } - } - - // Now run the user process in container. - // Maybe we should we pass ctx here if we're not detaching? - if err := s.backend.ContainerExecStart(context.Background(), execName, stdin, stdout, stderr); err != nil { - if execStartCheck.Detach { - return err - } - stdout.Write([]byte(err.Error() + "\r\n")) - logrus.Errorf("Error running exec in container: %v", err) - } - return nil -} - -func (s *containerRouter) postContainerExecResize(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - height, err := strconv.Atoi(r.Form.Get("h")) - if err != nil { - return err - } - width, err := strconv.Atoi(r.Form.Get("w")) - if err != nil { - return err - } - - return s.backend.ContainerExecResize(vars["name"], height, width) -} diff --git a/api/server/router/container/inspect.go b/api/server/router/container/inspect.go deleted file mode 100644 index dbbced7eee..0000000000 --- a/api/server/router/container/inspect.go +++ /dev/null @@ -1,21 +0,0 @@ -package container - -import ( - "net/http" - - "github.com/docker/docker/api/server/httputils" - "golang.org/x/net/context" -) - -// getContainersByName inspects container's configuration and serializes it as json. -func (s *containerRouter) getContainersByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - displaySize := httputils.BoolValue(r, "size") - - version := httputils.VersionFromContext(ctx) - json, err := s.backend.ContainerInspect(vars["name"], displaySize, version) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, json) -} diff --git a/api/server/router/image/backend.go b/api/server/router/image/backend.go deleted file mode 100644 index 08101736e0..0000000000 --- a/api/server/router/image/backend.go +++ /dev/null @@ -1,43 +0,0 @@ -package image - -import ( - "io" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/registry" - "golang.org/x/net/context" -) - -// Backend is all the methods that need to be implemented -// to provide image specific functionality. -type Backend interface { - containerBackend - imageBackend - importExportBackend - registryBackend -} - -type containerBackend interface { - Commit(name string, config *backend.ContainerCommitConfig) (imageID string, err error) -} - -type imageBackend interface { - ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) - ImageHistory(imageName string) ([]*types.ImageHistory, error) - Images(filterArgs string, filter string, all bool) ([]*types.Image, error) - LookupImage(name string) (*types.ImageInspect, error) - TagImage(imageName, repository, tag string) error -} - -type importExportBackend interface { - LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error - ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error - ExportImage(names []string, outStream io.Writer) error -} - -type registryBackend interface { - PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error - PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error - SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, authConfig *types.AuthConfig, metaHeaders map[string][]string) (*registry.SearchResults, error) -} diff --git a/api/server/router/image/image.go b/api/server/router/image/image.go deleted file mode 100644 index 71c95e1e3d..0000000000 --- a/api/server/router/image/image.go +++ /dev/null @@ -1,49 +0,0 @@ -package image - -import ( - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/router" -) - -// imageRouter is a router to talk with the image controller -type imageRouter struct { - backend Backend - decoder httputils.ContainerDecoder - routes []router.Route -} - -// NewRouter initializes a new image router -func NewRouter(backend Backend, decoder httputils.ContainerDecoder) router.Router { - r := &imageRouter{ - backend: backend, - decoder: decoder, - } - r.initRoutes() - return r -} - -// Routes returns the available routes to the image controller -func (r *imageRouter) Routes() []router.Route { - return r.routes -} - -// initRoutes initializes the routes in the image router -func (r *imageRouter) initRoutes() { - r.routes = []router.Route{ - // GET - router.NewGetRoute("/images/json", r.getImagesJSON), - router.NewGetRoute("/images/search", r.getImagesSearch), - router.NewGetRoute("/images/get", r.getImagesGet), - router.NewGetRoute("/images/{name:.*}/get", r.getImagesGet), - router.NewGetRoute("/images/{name:.*}/history", r.getImagesHistory), - router.NewGetRoute("/images/{name:.*}/json", r.getImagesByName), - // POST - router.NewPostRoute("/commit", r.postCommit), - router.NewPostRoute("/images/load", r.postImagesLoad), - router.Cancellable(router.NewPostRoute("/images/create", r.postImagesCreate)), - router.Cancellable(router.NewPostRoute("/images/{name:.*}/push", r.postImagesPush)), - router.NewPostRoute("/images/{name:.*}/tag", r.postImagesTag), - // DELETE - router.NewDeleteRoute("/images/{name:.*}", r.deleteImages), - } -} diff --git a/api/server/router/image/image_routes.go b/api/server/router/image/image_routes.go deleted file mode 100644 index 18a36fda6d..0000000000 --- a/api/server/router/image/image_routes.go +++ /dev/null @@ -1,319 +0,0 @@ -package image - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "strconv" - "strings" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/versions" - "golang.org/x/net/context" -) - -func (s *imageRouter) postCommit(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - cname := r.Form.Get("container") - - pause := httputils.BoolValue(r, "pause") - version := httputils.VersionFromContext(ctx) - if r.FormValue("pause") == "" && versions.GreaterThanOrEqualTo(version, "1.13") { - pause = true - } - - c, _, _, err := s.decoder.DecodeConfig(r.Body) - if err != nil && err != io.EOF { //Do not fail if body is empty. - return err - } - if c == nil { - c = &container.Config{} - } - - commitCfg := &backend.ContainerCommitConfig{ - ContainerCommitConfig: types.ContainerCommitConfig{ - Pause: pause, - Repo: r.Form.Get("repo"), - Tag: r.Form.Get("tag"), - Author: r.Form.Get("author"), - Comment: r.Form.Get("comment"), - Config: c, - MergeConfigs: true, - }, - Changes: r.Form["changes"], - } - - imgID, err := s.backend.Commit(cname, commitCfg) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, &types.ContainerCommitResponse{ - ID: string(imgID), - }) -} - -// Creates an image from Pull or from Import -func (s *imageRouter) postImagesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - var ( - image = r.Form.Get("fromImage") - repo = r.Form.Get("repo") - tag = r.Form.Get("tag") - message = r.Form.Get("message") - err error - output = ioutils.NewWriteFlusher(w) - ) - defer output.Close() - - w.Header().Set("Content-Type", "application/json") - - if image != "" { //pull - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - - authEncoded := r.Header.Get("X-Registry-Auth") - authConfig := &types.AuthConfig{} - if authEncoded != "" { - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { - // for a pull it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - authConfig = &types.AuthConfig{} - } - } - - err = s.backend.PullImage(ctx, image, tag, metaHeaders, authConfig, output) - } else { //import - src := r.Form.Get("fromSrc") - // 'err' MUST NOT be defined within this block, we need any error - // generated from the download to be available to the output - // stream processing below - err = s.backend.ImportImage(src, repo, tag, message, r.Body, output, r.Form["changes"]) - } - if err != nil { - if !output.Flushed() { - return err - } - sf := streamformatter.NewJSONStreamFormatter() - output.Write(sf.FormatError(err)) - } - - return nil -} - -func (s *imageRouter) postImagesPush(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - if err := httputils.ParseForm(r); err != nil { - return err - } - authConfig := &types.AuthConfig{} - - authEncoded := r.Header.Get("X-Registry-Auth") - if authEncoded != "" { - // the new format is to handle the authConfig as a header - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { - // to increase compatibility to existing api it is defaulting to be empty - authConfig = &types.AuthConfig{} - } - } else { - // the old format is supported for compatibility if there was no authConfig header - if err := json.NewDecoder(r.Body).Decode(authConfig); err != nil { - return fmt.Errorf("Bad parameters and missing X-Registry-Auth: %v", err) - } - } - - image := vars["name"] - tag := r.Form.Get("tag") - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - - w.Header().Set("Content-Type", "application/json") - - if err := s.backend.PushImage(ctx, image, tag, metaHeaders, authConfig, output); err != nil { - if !output.Flushed() { - return err - } - sf := streamformatter.NewJSONStreamFormatter() - output.Write(sf.FormatError(err)) - } - return nil -} - -func (s *imageRouter) getImagesGet(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - w.Header().Set("Content-Type", "application/x-tar") - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - var names []string - if name, ok := vars["name"]; ok { - names = []string{name} - } else { - names = r.Form["names"] - } - - if err := s.backend.ExportImage(names, output); err != nil { - if !output.Flushed() { - return err - } - sf := streamformatter.NewJSONStreamFormatter() - output.Write(sf.FormatError(err)) - } - return nil -} - -func (s *imageRouter) postImagesLoad(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - quiet := httputils.BoolValueOrDefault(r, "quiet", true) - - if !quiet { - w.Header().Set("Content-Type", "application/json") - - output := ioutils.NewWriteFlusher(w) - defer output.Close() - if err := s.backend.LoadImage(r.Body, output, quiet); err != nil { - output.Write(streamformatter.NewJSONStreamFormatter().FormatError(err)) - } - return nil - } - return s.backend.LoadImage(r.Body, w, quiet) -} - -func (s *imageRouter) deleteImages(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - name := vars["name"] - - if strings.TrimSpace(name) == "" { - return fmt.Errorf("image name cannot be blank") - } - - force := httputils.BoolValue(r, "force") - prune := !httputils.BoolValue(r, "noprune") - - list, err := s.backend.ImageDelete(name, force, prune) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, list) -} - -func (s *imageRouter) getImagesByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - imageInspect, err := s.backend.LookupImage(vars["name"]) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, imageInspect) -} - -func (s *imageRouter) getImagesJSON(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - // FIXME: The filter parameter could just be a match filter - images, err := s.backend.Images(r.Form.Get("filters"), r.Form.Get("filter"), httputils.BoolValue(r, "all")) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, images) -} - -func (s *imageRouter) getImagesHistory(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - name := vars["name"] - history, err := s.backend.ImageHistory(name) - if err != nil { - return err - } - - return httputils.WriteJSON(w, http.StatusOK, history) -} - -func (s *imageRouter) postImagesTag(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := s.backend.TagImage(vars["name"], r.Form.Get("repo"), r.Form.Get("tag")); err != nil { - return err - } - w.WriteHeader(http.StatusCreated) - return nil -} - -func (s *imageRouter) getImagesSearch(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - var ( - config *types.AuthConfig - authEncoded = r.Header.Get("X-Registry-Auth") - headers = map[string][]string{} - ) - - if authEncoded != "" { - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(&config); err != nil { - // for a search it is not an error if no auth was given - // to increase compatibility with the existing api it is defaulting to be empty - config = &types.AuthConfig{} - } - } - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - headers[k] = v - } - } - limit := registry.DefaultSearchLimit - if r.Form.Get("limit") != "" { - limitValue, err := strconv.Atoi(r.Form.Get("limit")) - if err != nil { - return err - } - limit = limitValue - } - query, err := s.backend.SearchRegistryForImages(ctx, r.Form.Get("filters"), r.Form.Get("term"), limit, config, headers) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, query.Results) -} diff --git a/api/server/router/local.go b/api/server/router/local.go deleted file mode 100644 index 7cb2a5a2f3..0000000000 --- a/api/server/router/local.go +++ /dev/null @@ -1,96 +0,0 @@ -package router - -import ( - "net/http" - - "github.com/docker/docker/api/server/httputils" - "golang.org/x/net/context" -) - -// localRoute defines an individual API route to connect -// with the docker daemon. It implements Route. -type localRoute struct { - method string - path string - handler httputils.APIFunc -} - -// Handler returns the APIFunc to let the server wrap it in middlewares. -func (l localRoute) Handler() httputils.APIFunc { - return l.handler -} - -// Method returns the http method that the route responds to. -func (l localRoute) Method() string { - return l.method -} - -// Path returns the subpath where the route responds to. -func (l localRoute) Path() string { - return l.path -} - -// NewRoute initializes a new local route for the router. -func NewRoute(method, path string, handler httputils.APIFunc) Route { - return localRoute{method, path, handler} -} - -// NewGetRoute initializes a new route with the http method GET. -func NewGetRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("GET", path, handler) -} - -// NewPostRoute initializes a new route with the http method POST. -func NewPostRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("POST", path, handler) -} - -// NewPutRoute initializes a new route with the http method PUT. -func NewPutRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("PUT", path, handler) -} - -// NewDeleteRoute initializes a new route with the http method DELETE. -func NewDeleteRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("DELETE", path, handler) -} - -// NewOptionsRoute initializes a new route with the http method OPTIONS. -func NewOptionsRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("OPTIONS", path, handler) -} - -// NewHeadRoute initializes a new route with the http method HEAD. -func NewHeadRoute(path string, handler httputils.APIFunc) Route { - return NewRoute("HEAD", path, handler) -} - -func cancellableHandler(h httputils.APIFunc) httputils.APIFunc { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if notifier, ok := w.(http.CloseNotifier); ok { - notify := notifier.CloseNotify() - notifyCtx, cancel := context.WithCancel(ctx) - finished := make(chan struct{}) - defer close(finished) - ctx = notifyCtx - go func() { - select { - case <-notify: - cancel() - case <-finished: - } - }() - } - return h(ctx, w, r, vars) - } -} - -// Cancellable makes new route which embeds http.CloseNotifier feature to -// context.Context of handler. -func Cancellable(r Route) Route { - return localRoute{ - method: r.Method(), - path: r.Path(), - handler: cancellableHandler(r.Handler()), - } -} diff --git a/api/server/router/network/backend.go b/api/server/router/network/backend.go deleted file mode 100644 index 6e322fa378..0000000000 --- a/api/server/router/network/backend.go +++ /dev/null @@ -1,20 +0,0 @@ -package network - -import ( - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/network" - "github.com/docker/libnetwork" -) - -// Backend is all the methods that need to be implemented -// to provide network specific functionality. -type Backend interface { - FindNetwork(idName string) (libnetwork.Network, error) - GetNetworkByName(idName string) (libnetwork.Network, error) - GetNetworksByID(partialID string) []libnetwork.Network - GetNetworks() []libnetwork.Network - CreateNetwork(nc types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) - ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error - DisconnectContainerFromNetwork(containerName string, network libnetwork.Network, force bool) error - DeleteNetwork(name string) error -} diff --git a/api/server/router/network/filter.go b/api/server/router/network/filter.go deleted file mode 100644 index 60a59a4f13..0000000000 --- a/api/server/router/network/filter.go +++ /dev/null @@ -1,98 +0,0 @@ -package network - -import ( - "fmt" - - "github.com/docker/docker/runconfig" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" -) - -type filterHandler func([]types.NetworkResource, string) ([]types.NetworkResource, error) - -var ( - // AcceptedFilters is an acceptable filters for validation - AcceptedFilters = map[string]bool{ - "driver": true, - "type": true, - "name": true, - "id": true, - "label": true, - } -) - -func filterNetworkByType(nws []types.NetworkResource, netType string) (retNws []types.NetworkResource, err error) { - switch netType { - case "builtin": - for _, nw := range nws { - if runconfig.IsPreDefinedNetwork(nw.Name) { - retNws = append(retNws, nw) - } - } - case "custom": - for _, nw := range nws { - if !runconfig.IsPreDefinedNetwork(nw.Name) { - retNws = append(retNws, nw) - } - } - default: - return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType) - } - return retNws, nil -} - -// filterNetworks filters network list according to user specified filter -// and returns user chosen networks -func filterNetworks(nws []types.NetworkResource, filter filters.Args) ([]types.NetworkResource, error) { - // if filter is empty, return original network list - if filter.Len() == 0 { - return nws, nil - } - - if err := filter.Validate(AcceptedFilters); err != nil { - return nil, err - } - - displayNet := []types.NetworkResource{} - for _, nw := range nws { - if filter.Include("driver") { - if !filter.ExactMatch("driver", nw.Driver) { - continue - } - } - if filter.Include("name") { - if !filter.Match("name", nw.Name) { - continue - } - } - if filter.Include("id") { - if !filter.Match("id", nw.ID) { - continue - } - } - if filter.Include("label") { - if !filter.MatchKVList("label", nw.Labels) { - continue - } - } - displayNet = append(displayNet, nw) - } - - if filter.Include("type") { - var typeNet []types.NetworkResource - errFilter := filter.WalkValues("type", func(fval string) error { - passList, err := filterNetworkByType(displayNet, fval) - if err != nil { - return err - } - typeNet = append(typeNet, passList...) - return nil - }) - if errFilter != nil { - return nil, errFilter - } - displayNet = typeNet - } - - return displayNet, nil -} diff --git a/api/server/router/network/network.go b/api/server/router/network/network.go deleted file mode 100644 index 8688c3ed1f..0000000000 --- a/api/server/router/network/network.go +++ /dev/null @@ -1,42 +0,0 @@ -package network - -import ( - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/daemon/cluster" -) - -// networkRouter is a router to talk with the network controller -type networkRouter struct { - backend Backend - clusterProvider *cluster.Cluster - routes []router.Route -} - -// NewRouter initializes a new network router -func NewRouter(b Backend, c *cluster.Cluster) router.Router { - r := &networkRouter{ - backend: b, - clusterProvider: c, - } - r.initRoutes() - return r -} - -// Routes returns the available routes to the network controller -func (r *networkRouter) Routes() []router.Route { - return r.routes -} - -func (r *networkRouter) initRoutes() { - r.routes = []router.Route{ - // GET - router.NewGetRoute("/networks", r.getNetworksList), - router.NewGetRoute("/networks/{id:.*}", r.getNetwork), - // POST - router.NewPostRoute("/networks/create", r.postNetworkCreate), - router.NewPostRoute("/networks/{id:.*}/connect", r.postNetworkConnect), - router.NewPostRoute("/networks/{id:.*}/disconnect", r.postNetworkDisconnect), - // DELETE - router.NewDeleteRoute("/networks/{id:.*}", r.deleteNetwork), - } -} diff --git a/api/server/router/network/network_routes.go b/api/server/router/network/network_routes.go deleted file mode 100644 index 7a50e6c31b..0000000000 --- a/api/server/router/network/network_routes.go +++ /dev/null @@ -1,297 +0,0 @@ -package network - -import ( - "encoding/json" - "fmt" - "net/http" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/errors" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/network" - "github.com/docker/libnetwork" -) - -func (n *networkRouter) getNetworksList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - filter := r.Form.Get("filters") - netFilters, err := filters.FromParam(filter) - if err != nil { - return err - } - - list := []types.NetworkResource{} - - if nr, err := n.clusterProvider.GetNetworks(); err == nil { - for _, nw := range nr { - list = append(list, nw) - } - } - - // Combine the network list returned by Docker daemon if it is not already - // returned by the cluster manager -SKIP: - for _, nw := range n.backend.GetNetworks() { - for _, nl := range list { - if nl.ID == nw.ID() { - continue SKIP - } - } - list = append(list, *n.buildNetworkResource(nw)) - } - - list, err = filterNetworks(list, netFilters) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, list) -} - -func (n *networkRouter) getNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - nw, err := n.backend.FindNetwork(vars["id"]) - if err != nil { - if nr, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil { - return httputils.WriteJSON(w, http.StatusOK, nr) - } - return err - } - return httputils.WriteJSON(w, http.StatusOK, n.buildNetworkResource(nw)) -} - -func (n *networkRouter) postNetworkCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var create types.NetworkCreateRequest - - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - if err := json.NewDecoder(r.Body).Decode(&create); err != nil { - return err - } - - if _, err := n.clusterProvider.GetNetwork(create.Name); err == nil { - return libnetwork.NetworkNameError(create.Name) - } - - nw, err := n.backend.CreateNetwork(create) - if err != nil { - if _, ok := err.(libnetwork.ManagerRedirectError); !ok { - return err - } - id, err := n.clusterProvider.CreateNetwork(create) - if err != nil { - return err - } - nw = &types.NetworkCreateResponse{ID: id} - } - - return httputils.WriteJSON(w, http.StatusCreated, nw) -} - -func (n *networkRouter) postNetworkConnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var connect types.NetworkConnect - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - if err := json.NewDecoder(r.Body).Decode(&connect); err != nil { - return err - } - - nw, err := n.backend.FindNetwork(vars["id"]) - if err != nil { - return err - } - - if nw.Info().Dynamic() { - err := fmt.Errorf("operation not supported for swarm scoped networks") - return errors.NewRequestForbiddenError(err) - } - - return n.backend.ConnectContainerToNetwork(connect.Container, nw.Name(), connect.EndpointConfig) -} - -func (n *networkRouter) postNetworkDisconnect(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var disconnect types.NetworkDisconnect - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - if err := json.NewDecoder(r.Body).Decode(&disconnect); err != nil { - return err - } - - nw, err := n.backend.FindNetwork(vars["id"]) - if err != nil { - return err - } - - if nw.Info().Dynamic() { - err := fmt.Errorf("operation not supported for swarm scoped networks") - return errors.NewRequestForbiddenError(err) - } - - return n.backend.DisconnectContainerFromNetwork(disconnect.Container, nw, disconnect.Force) -} - -func (n *networkRouter) deleteNetwork(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if _, err := n.clusterProvider.GetNetwork(vars["id"]); err == nil { - return n.clusterProvider.RemoveNetwork(vars["id"]) - } - if err := n.backend.DeleteNetwork(vars["id"]); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} - -func (n *networkRouter) buildNetworkResource(nw libnetwork.Network) *types.NetworkResource { - r := &types.NetworkResource{} - if nw == nil { - return r - } - - info := nw.Info() - r.Name = nw.Name() - r.ID = nw.ID() - r.Scope = info.Scope() - if n.clusterProvider.IsManager() { - if _, err := n.clusterProvider.GetNetwork(nw.Name()); err == nil { - r.Scope = "swarm" - } - } else if info.Dynamic() { - r.Scope = "swarm" - } - r.Driver = nw.Type() - r.EnableIPv6 = info.IPv6Enabled() - r.Internal = info.Internal() - r.Options = info.DriverOptions() - r.Containers = make(map[string]types.EndpointResource) - buildIpamResources(r, info) - r.Internal = info.Internal() - r.Labels = info.Labels() - - epl := nw.Endpoints() - for _, e := range epl { - ei := e.Info() - if ei == nil { - continue - } - sb := ei.Sandbox() - key := "ep-" + e.ID() - if sb != nil { - key = sb.ContainerID() - } - - r.Containers[key] = buildEndpointResource(e) - } - return r -} - -func buildIpamResources(r *types.NetworkResource, nwInfo libnetwork.NetworkInfo) { - id, opts, ipv4conf, ipv6conf := nwInfo.IpamConfig() - - ipv4Info, ipv6Info := nwInfo.IpamInfo() - - r.IPAM.Driver = id - - r.IPAM.Options = opts - - r.IPAM.Config = []network.IPAMConfig{} - for _, ip4 := range ipv4conf { - if ip4.PreferredPool == "" { - continue - } - iData := network.IPAMConfig{} - iData.Subnet = ip4.PreferredPool - iData.IPRange = ip4.SubPool - iData.Gateway = ip4.Gateway - iData.AuxAddress = ip4.AuxAddresses - r.IPAM.Config = append(r.IPAM.Config, iData) - } - - if len(r.IPAM.Config) == 0 { - for _, ip4Info := range ipv4Info { - iData := network.IPAMConfig{} - iData.Subnet = ip4Info.IPAMData.Pool.String() - iData.Gateway = ip4Info.IPAMData.Gateway.String() - r.IPAM.Config = append(r.IPAM.Config, iData) - } - } - - hasIpv6Conf := false - for _, ip6 := range ipv6conf { - if ip6.PreferredPool == "" { - continue - } - hasIpv6Conf = true - iData := network.IPAMConfig{} - iData.Subnet = ip6.PreferredPool - iData.IPRange = ip6.SubPool - iData.Gateway = ip6.Gateway - iData.AuxAddress = ip6.AuxAddresses - r.IPAM.Config = append(r.IPAM.Config, iData) - } - - if !hasIpv6Conf { - for _, ip6Info := range ipv6Info { - iData := network.IPAMConfig{} - iData.Subnet = ip6Info.IPAMData.Pool.String() - iData.Gateway = ip6Info.IPAMData.Gateway.String() - r.IPAM.Config = append(r.IPAM.Config, iData) - } - } -} - -func buildEndpointResource(e libnetwork.Endpoint) types.EndpointResource { - er := types.EndpointResource{} - if e == nil { - return er - } - - er.EndpointID = e.ID() - er.Name = e.Name() - ei := e.Info() - if ei == nil { - return er - } - - if iface := ei.Iface(); iface != nil { - if mac := iface.MacAddress(); mac != nil { - er.MacAddress = mac.String() - } - if ip := iface.Address(); ip != nil && len(ip.IP) > 0 { - er.IPv4Address = ip.String() - } - - if ipv6 := iface.AddressIPv6(); ipv6 != nil && len(ipv6.IP) > 0 { - er.IPv6Address = ipv6.String() - } - } - return er -} diff --git a/api/server/router/plugin/backend.go b/api/server/router/plugin/backend.go deleted file mode 100644 index 0eb4f5b8f1..0000000000 --- a/api/server/router/plugin/backend.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build experimental - -package plugin - -import ( - "net/http" - - enginetypes "github.com/docker/engine-api/types" -) - -// Backend for Plugin -type Backend interface { - Disable(name string) error - Enable(name string) error - List() ([]enginetypes.Plugin, error) - Inspect(name string) (enginetypes.Plugin, error) - Remove(name string) error - Set(name string, args []string) error - Pull(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) (enginetypes.PluginPrivileges, error) - Push(name string, metaHeaders http.Header, authConfig *enginetypes.AuthConfig) error -} diff --git a/api/server/router/plugin/plugin.go b/api/server/router/plugin/plugin.go deleted file mode 100644 index 999ba6b746..0000000000 --- a/api/server/router/plugin/plugin.go +++ /dev/null @@ -1,23 +0,0 @@ -package plugin - -import "github.com/docker/docker/api/server/router" - -// pluginRouter is a router to talk with the plugin controller -type pluginRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new plugin router -func NewRouter(b Backend) router.Router { - r := &pluginRouter{ - backend: b, - } - r.initRoutes() - return r -} - -// Routes returns the available routers to the plugin controller -func (r *pluginRouter) Routes() []router.Route { - return r.routes -} diff --git a/api/server/router/plugin/plugin_experimental.go b/api/server/router/plugin/plugin_experimental.go deleted file mode 100644 index 4e437ee65e..0000000000 --- a/api/server/router/plugin/plugin_experimental.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build experimental - -package plugin - -import ( - "github.com/docker/docker/api/server/router" -) - -func (r *pluginRouter) initRoutes() { - r.routes = []router.Route{ - router.NewGetRoute("/plugins", r.listPlugins), - router.NewGetRoute("/plugins/{name:.*}", r.inspectPlugin), - router.NewDeleteRoute("/plugins/{name:.*}", r.removePlugin), - router.NewPostRoute("/plugins/{name:.*}/enable", r.enablePlugin), // PATCH? - router.NewPostRoute("/plugins/{name:.*}/disable", r.disablePlugin), - router.NewPostRoute("/plugins/pull", r.pullPlugin), - router.NewPostRoute("/plugins/{name:.*}/push", r.pushPlugin), - router.NewPostRoute("/plugins/{name:.*}/set", r.setPlugin), - } -} diff --git a/api/server/router/plugin/plugin_regular.go b/api/server/router/plugin/plugin_regular.go deleted file mode 100644 index f987faecd2..0000000000 --- a/api/server/router/plugin/plugin_regular.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !experimental - -package plugin - -func (r *pluginRouter) initRoutes() {} - -// Backend is empty so that the package can compile in non-experimental -// (Needed by volume driver) -type Backend interface{} diff --git a/api/server/router/plugin/plugin_routes.go b/api/server/router/plugin/plugin_routes.go deleted file mode 100644 index dfdde72483..0000000000 --- a/api/server/router/plugin/plugin_routes.go +++ /dev/null @@ -1,103 +0,0 @@ -// +build experimental - -package plugin - -import ( - "encoding/base64" - "encoding/json" - "net/http" - "strings" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func (pr *pluginRouter) pullPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - - // Get X-Registry-Auth - authEncoded := r.Header.Get("X-Registry-Auth") - authConfig := &types.AuthConfig{} - if authEncoded != "" { - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { - authConfig = &types.AuthConfig{} - } - } - - privileges, err := pr.backend.Pull(r.FormValue("name"), metaHeaders, authConfig) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, privileges) -} - -func (pr *pluginRouter) enablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return pr.backend.Enable(vars["name"]) -} - -func (pr *pluginRouter) disablePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return pr.backend.Disable(vars["name"]) -} - -func (pr *pluginRouter) removePlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return pr.backend.Remove(vars["name"]) -} - -func (pr *pluginRouter) pushPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - metaHeaders := map[string][]string{} - for k, v := range r.Header { - if strings.HasPrefix(k, "X-Meta-") { - metaHeaders[k] = v - } - } - - // Get X-Registry-Auth - authEncoded := r.Header.Get("X-Registry-Auth") - authConfig := &types.AuthConfig{} - if authEncoded != "" { - authJSON := base64.NewDecoder(base64.URLEncoding, strings.NewReader(authEncoded)) - if err := json.NewDecoder(authJSON).Decode(authConfig); err != nil { - authConfig = &types.AuthConfig{} - } - } - return pr.backend.Push(vars["name"], metaHeaders, authConfig) -} - -func (pr *pluginRouter) setPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var args []string - if err := json.NewDecoder(r.Body).Decode(&args); err != nil { - return err - } - return pr.backend.Set(vars["name"], args) -} - -func (pr *pluginRouter) listPlugins(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - l, err := pr.backend.List() - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, l) -} - -func (pr *pluginRouter) inspectPlugin(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - result, err := pr.backend.Inspect(vars["name"]) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, result) -} diff --git a/api/server/router/router.go b/api/server/router/router.go deleted file mode 100644 index 2de25c27ff..0000000000 --- a/api/server/router/router.go +++ /dev/null @@ -1,19 +0,0 @@ -package router - -import "github.com/docker/docker/api/server/httputils" - -// Router defines an interface to specify a group of routes to add to the docker server. -type Router interface { - // Routes returns the list of routes to add to the docker server. - Routes() []Route -} - -// Route defines an individual API route in the docker server. -type Route interface { - // Handler returns the raw function to create the http handler. - Handler() httputils.APIFunc - // Method returns the http method that the route responds to. - Method() string - // Path returns the subpath where the route responds to. - Path() string -} diff --git a/api/server/router/swarm/backend.go b/api/server/router/swarm/backend.go deleted file mode 100644 index e51ce7ba6b..0000000000 --- a/api/server/router/swarm/backend.go +++ /dev/null @@ -1,26 +0,0 @@ -package swarm - -import ( - basictypes "github.com/docker/engine-api/types" - types "github.com/docker/engine-api/types/swarm" -) - -// Backend abstracts an swarm commands manager. -type Backend interface { - Init(req types.InitRequest) (string, error) - Join(req types.JoinRequest) error - Leave(force bool) error - Inspect() (types.Swarm, error) - Update(uint64, types.Spec, types.UpdateFlags) error - GetServices(basictypes.ServiceListOptions) ([]types.Service, error) - GetService(string) (types.Service, error) - CreateService(types.ServiceSpec, string) (string, error) - UpdateService(string, uint64, types.ServiceSpec, string) error - RemoveService(string) error - GetNodes(basictypes.NodeListOptions) ([]types.Node, error) - GetNode(string) (types.Node, error) - UpdateNode(string, uint64, types.NodeSpec) error - RemoveNode(string, bool) error - GetTasks(basictypes.TaskListOptions) ([]types.Task, error) - GetTask(string) (types.Task, error) -} diff --git a/api/server/router/swarm/cluster.go b/api/server/router/swarm/cluster.go deleted file mode 100644 index a67ffa9632..0000000000 --- a/api/server/router/swarm/cluster.go +++ /dev/null @@ -1,44 +0,0 @@ -package swarm - -import "github.com/docker/docker/api/server/router" - -// buildRouter is a router to talk with the build controller -type swarmRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new build router -func NewRouter(b Backend) router.Router { - r := &swarmRouter{ - backend: b, - } - r.initRoutes() - return r -} - -// Routes returns the available routers to the swarm controller -func (sr *swarmRouter) Routes() []router.Route { - return sr.routes -} - -func (sr *swarmRouter) initRoutes() { - sr.routes = []router.Route{ - router.NewPostRoute("/swarm/init", sr.initCluster), - router.NewPostRoute("/swarm/join", sr.joinCluster), - router.NewPostRoute("/swarm/leave", sr.leaveCluster), - router.NewGetRoute("/swarm", sr.inspectCluster), - router.NewPostRoute("/swarm/update", sr.updateCluster), - router.NewGetRoute("/services", sr.getServices), - router.NewGetRoute("/services/{id:.*}", sr.getService), - router.NewPostRoute("/services/create", sr.createService), - router.NewPostRoute("/services/{id:.*}/update", sr.updateService), - router.NewDeleteRoute("/services/{id:.*}", sr.removeService), - router.NewGetRoute("/nodes", sr.getNodes), - router.NewGetRoute("/nodes/{id:.*}", sr.getNode), - router.NewDeleteRoute("/nodes/{id:.*}", sr.removeNode), - router.NewPostRoute("/nodes/{id:.*}/update", sr.updateNode), - router.NewGetRoute("/tasks", sr.getTasks), - router.NewGetRoute("/tasks/{id:.*}", sr.getTask), - } -} diff --git a/api/server/router/swarm/cluster_routes.go b/api/server/router/swarm/cluster_routes.go deleted file mode 100644 index 53b49b7d59..0000000000 --- a/api/server/router/swarm/cluster_routes.go +++ /dev/null @@ -1,261 +0,0 @@ -package swarm - -import ( - "encoding/json" - "fmt" - "net/http" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - basictypes "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - types "github.com/docker/engine-api/types/swarm" - "golang.org/x/net/context" -) - -func (sr *swarmRouter) initCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var req types.InitRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return err - } - nodeID, err := sr.backend.Init(req) - if err != nil { - logrus.Errorf("Error initializing swarm: %v", err) - return err - } - return httputils.WriteJSON(w, http.StatusOK, nodeID) -} - -func (sr *swarmRouter) joinCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var req types.JoinRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return err - } - return sr.backend.Join(req) -} - -func (sr *swarmRouter) leaveCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - force := httputils.BoolValue(r, "force") - return sr.backend.Leave(force) -} - -func (sr *swarmRouter) inspectCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - swarm, err := sr.backend.Inspect() - if err != nil { - logrus.Errorf("Error getting swarm: %v", err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, swarm) -} - -func (sr *swarmRouter) updateCluster(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var swarm types.Spec - if err := json.NewDecoder(r.Body).Decode(&swarm); err != nil { - return err - } - - rawVersion := r.URL.Query().Get("version") - version, err := strconv.ParseUint(rawVersion, 10, 64) - if err != nil { - return fmt.Errorf("Invalid swarm version '%s': %s", rawVersion, err.Error()) - } - - var flags types.UpdateFlags - - if value := r.URL.Query().Get("rotateWorkerToken"); value != "" { - rot, err := strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for rotateWorkerToken: %s", value) - } - - flags.RotateWorkerToken = rot - } - - if value := r.URL.Query().Get("rotateManagerToken"); value != "" { - rot, err := strconv.ParseBool(value) - if err != nil { - return fmt.Errorf("invalid value for rotateManagerToken: %s", value) - } - - flags.RotateManagerToken = rot - } - - if err := sr.backend.Update(version, swarm, flags); err != nil { - logrus.Errorf("Error configuring swarm: %v", err) - return err - } - return nil -} - -func (sr *swarmRouter) getServices(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - filter, err := filters.FromParam(r.Form.Get("filters")) - if err != nil { - return err - } - - services, err := sr.backend.GetServices(basictypes.ServiceListOptions{Filter: filter}) - if err != nil { - logrus.Errorf("Error getting services: %v", err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, services) -} - -func (sr *swarmRouter) getService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - service, err := sr.backend.GetService(vars["id"]) - if err != nil { - logrus.Errorf("Error getting service %s: %v", vars["id"], err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, service) -} - -func (sr *swarmRouter) createService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var service types.ServiceSpec - if err := json.NewDecoder(r.Body).Decode(&service); err != nil { - return err - } - - // Get returns "" if the header does not exist - encodedAuth := r.Header.Get("X-Registry-Auth") - - id, err := sr.backend.CreateService(service, encodedAuth) - if err != nil { - logrus.Errorf("Error creating service %s: %v", id, err) - return err - } - - return httputils.WriteJSON(w, http.StatusCreated, &basictypes.ServiceCreateResponse{ - ID: id, - }) -} - -func (sr *swarmRouter) updateService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var service types.ServiceSpec - if err := json.NewDecoder(r.Body).Decode(&service); err != nil { - return err - } - - rawVersion := r.URL.Query().Get("version") - version, err := strconv.ParseUint(rawVersion, 10, 64) - if err != nil { - return fmt.Errorf("Invalid service version '%s': %s", rawVersion, err.Error()) - } - - // Get returns "" if the header does not exist - encodedAuth := r.Header.Get("X-Registry-Auth") - - if err := sr.backend.UpdateService(vars["id"], version, service, encodedAuth); err != nil { - logrus.Errorf("Error updating service %s: %v", vars["id"], err) - return err - } - return nil -} - -func (sr *swarmRouter) removeService(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := sr.backend.RemoveService(vars["id"]); err != nil { - logrus.Errorf("Error removing service %s: %v", vars["id"], err) - return err - } - return nil -} - -func (sr *swarmRouter) getNodes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - filter, err := filters.FromParam(r.Form.Get("filters")) - if err != nil { - return err - } - - nodes, err := sr.backend.GetNodes(basictypes.NodeListOptions{Filter: filter}) - if err != nil { - logrus.Errorf("Error getting nodes: %v", err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, nodes) -} - -func (sr *swarmRouter) getNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - node, err := sr.backend.GetNode(vars["id"]) - if err != nil { - logrus.Errorf("Error getting node %s: %v", vars["id"], err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, node) -} - -func (sr *swarmRouter) updateNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var node types.NodeSpec - if err := json.NewDecoder(r.Body).Decode(&node); err != nil { - return err - } - - rawVersion := r.URL.Query().Get("version") - version, err := strconv.ParseUint(rawVersion, 10, 64) - if err != nil { - return fmt.Errorf("Invalid node version '%s': %s", rawVersion, err.Error()) - } - - if err := sr.backend.UpdateNode(vars["id"], version, node); err != nil { - logrus.Errorf("Error updating node %s: %v", vars["id"], err) - return err - } - return nil -} - -func (sr *swarmRouter) removeNode(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - force := httputils.BoolValue(r, "force") - - if err := sr.backend.RemoveNode(vars["id"], force); err != nil { - logrus.Errorf("Error removing node %s: %v", vars["id"], err) - return err - } - return nil -} - -func (sr *swarmRouter) getTasks(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - filter, err := filters.FromParam(r.Form.Get("filters")) - if err != nil { - return err - } - - tasks, err := sr.backend.GetTasks(basictypes.TaskListOptions{Filter: filter}) - if err != nil { - logrus.Errorf("Error getting tasks: %v", err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, tasks) -} - -func (sr *swarmRouter) getTask(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - task, err := sr.backend.GetTask(vars["id"]) - if err != nil { - logrus.Errorf("Error getting task %s: %v", vars["id"], err) - return err - } - - return httputils.WriteJSON(w, http.StatusOK, task) -} diff --git a/api/server/router/system/backend.go b/api/server/router/system/backend.go deleted file mode 100644 index 83609ab55a..0000000000 --- a/api/server/router/system/backend.go +++ /dev/null @@ -1,20 +0,0 @@ -package system - -import ( - "time" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -// Backend is the methods that need to be implemented to provide -// system specific functionality. -type Backend interface { - SystemInfo() (*types.Info, error) - SystemVersion() types.Version - SubscribeToEvents(since, until time.Time, ef filters.Args) ([]events.Message, chan interface{}) - UnsubscribeFromEvents(chan interface{}) - AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) -} diff --git a/api/server/router/system/system.go b/api/server/router/system/system.go deleted file mode 100644 index e5742c9fe8..0000000000 --- a/api/server/router/system/system.go +++ /dev/null @@ -1,38 +0,0 @@ -package system - -import ( - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/daemon/cluster" -) - -// systemRouter provides information about the Docker system overall. -// It gathers information about host, daemon and container events. -type systemRouter struct { - backend Backend - clusterProvider *cluster.Cluster - routes []router.Route -} - -// NewRouter initializes a new system router -func NewRouter(b Backend, c *cluster.Cluster) router.Router { - r := &systemRouter{ - backend: b, - clusterProvider: c, - } - - r.routes = []router.Route{ - router.NewOptionsRoute("/{anyroute:.*}", optionsHandler), - router.NewGetRoute("/_ping", pingHandler), - router.Cancellable(router.NewGetRoute("/events", r.getEvents)), - router.NewGetRoute("/info", r.getInfo), - router.NewGetRoute("/version", r.getVersion), - router.NewPostRoute("/auth", r.postAuth), - } - - return r -} - -// Routes returns all the API routes dedicated to the docker system -func (s *systemRouter) Routes() []router.Route { - return s.routes -} diff --git a/api/server/router/system/system_routes.go b/api/server/router/system/system_routes.go deleted file mode 100644 index 8050301c9d..0000000000 --- a/api/server/router/system/system_routes.go +++ /dev/null @@ -1,154 +0,0 @@ -package system - -import ( - "encoding/json" - "fmt" - "net/http" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/errors" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" - timetypes "github.com/docker/engine-api/types/time" - "golang.org/x/net/context" -) - -func optionsHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - w.WriteHeader(http.StatusOK) - return nil -} - -func pingHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - _, err := w.Write([]byte{'O', 'K'}) - return err -} - -func (s *systemRouter) getInfo(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - info, err := s.backend.SystemInfo() - if err != nil { - return err - } - if s.clusterProvider != nil { - info.Swarm = s.clusterProvider.Info() - } - - return httputils.WriteJSON(w, http.StatusOK, info) -} - -func (s *systemRouter) getVersion(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - info := s.backend.SystemVersion() - info.APIVersion = api.DefaultVersion - - return httputils.WriteJSON(w, http.StatusOK, info) -} - -func (s *systemRouter) getEvents(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - since, err := eventTime(r.Form.Get("since")) - if err != nil { - return err - } - until, err := eventTime(r.Form.Get("until")) - if err != nil { - return err - } - - var ( - timeout <-chan time.Time - onlyPastEvents bool - ) - if !until.IsZero() { - if until.Before(since) { - return errors.NewBadRequestError(fmt.Errorf("`since` time (%s) cannot be after `until` time (%s)", r.Form.Get("since"), r.Form.Get("until"))) - } - - now := time.Now() - - onlyPastEvents = until.Before(now) - - if !onlyPastEvents { - dur := until.Sub(now) - timeout = time.NewTimer(dur).C - } - } - - ef, err := filters.FromParam(r.Form.Get("filters")) - if err != nil { - return err - } - - w.Header().Set("Content-Type", "application/json") - output := ioutils.NewWriteFlusher(w) - defer output.Close() - output.Flush() - - enc := json.NewEncoder(output) - - buffered, l := s.backend.SubscribeToEvents(since, until, ef) - defer s.backend.UnsubscribeFromEvents(l) - - for _, ev := range buffered { - if err := enc.Encode(ev); err != nil { - return err - } - } - - if onlyPastEvents { - return nil - } - - for { - select { - case ev := <-l: - jev, ok := ev.(events.Message) - if !ok { - logrus.Warnf("unexpected event message: %q", ev) - continue - } - if err := enc.Encode(jev); err != nil { - return err - } - case <-timeout: - return nil - case <-ctx.Done(): - logrus.Debug("Client context cancelled, stop sending events") - return nil - } - } -} - -func (s *systemRouter) postAuth(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - var config *types.AuthConfig - err := json.NewDecoder(r.Body).Decode(&config) - r.Body.Close() - if err != nil { - return err - } - status, token, err := s.backend.AuthenticateToRegistry(ctx, config) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, &types.AuthResponse{ - Status: status, - IdentityToken: token, - }) -} - -func eventTime(formTime string) (time.Time, error) { - t, tNano, err := timetypes.ParseTimestamps(formTime, -1) - if err != nil { - return time.Time{}, err - } - if t == -1 { - return time.Time{}, nil - } - return time.Unix(t, tNano), nil -} diff --git a/api/server/router/volume/backend.go b/api/server/router/volume/backend.go deleted file mode 100644 index fbf5ed27f6..0000000000 --- a/api/server/router/volume/backend.go +++ /dev/null @@ -1,15 +0,0 @@ -package volume - -import ( - // TODO return types need to be refactored into pkg - "github.com/docker/engine-api/types" -) - -// Backend is the methods that need to be implemented to provide -// volume specific functionality -type Backend interface { - Volumes(filter string) ([]*types.Volume, []string, error) - VolumeInspect(name string) (*types.Volume, error) - VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) - VolumeRm(name string) error -} diff --git a/api/server/router/volume/volume.go b/api/server/router/volume/volume.go deleted file mode 100644 index 2683dcec52..0000000000 --- a/api/server/router/volume/volume.go +++ /dev/null @@ -1,35 +0,0 @@ -package volume - -import "github.com/docker/docker/api/server/router" - -// volumeRouter is a router to talk with the volumes controller -type volumeRouter struct { - backend Backend - routes []router.Route -} - -// NewRouter initializes a new volume router -func NewRouter(b Backend) router.Router { - r := &volumeRouter{ - backend: b, - } - r.initRoutes() - return r -} - -// Routes returns the available routes to the volumes controller -func (r *volumeRouter) Routes() []router.Route { - return r.routes -} - -func (r *volumeRouter) initRoutes() { - r.routes = []router.Route{ - // GET - router.NewGetRoute("/volumes", r.getVolumesList), - router.NewGetRoute("/volumes/{name:.*}", r.getVolumeByName), - // POST - router.NewPostRoute("/volumes/create", r.postVolumesCreate), - // DELETE - router.NewDeleteRoute("/volumes/{name:.*}", r.deleteVolumes), - } -} diff --git a/api/server/router/volume/volume_routes.go b/api/server/router/volume/volume_routes.go deleted file mode 100644 index 5aa0d4a7a7..0000000000 --- a/api/server/router/volume/volume_routes.go +++ /dev/null @@ -1,66 +0,0 @@ -package volume - -import ( - "encoding/json" - "net/http" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -func (v *volumeRouter) getVolumesList(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - volumes, warnings, err := v.backend.Volumes(r.Form.Get("filters")) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, &types.VolumesListResponse{Volumes: volumes, Warnings: warnings}) -} - -func (v *volumeRouter) getVolumeByName(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - volume, err := v.backend.VolumeInspect(vars["name"]) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusOK, volume) -} - -func (v *volumeRouter) postVolumesCreate(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - - if err := httputils.CheckForJSON(r); err != nil { - return err - } - - var req types.VolumeCreateRequest - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - return err - } - - volume, err := v.backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels) - if err != nil { - return err - } - return httputils.WriteJSON(w, http.StatusCreated, volume) -} - -func (v *volumeRouter) deleteVolumes(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if err := httputils.ParseForm(r); err != nil { - return err - } - if err := v.backend.VolumeRm(vars["name"]); err != nil { - return err - } - w.WriteHeader(http.StatusNoContent) - return nil -} diff --git a/api/server/router_swapper.go b/api/server/router_swapper.go deleted file mode 100644 index 1ecc7a7f39..0000000000 --- a/api/server/router_swapper.go +++ /dev/null @@ -1,30 +0,0 @@ -package server - -import ( - "net/http" - "sync" - - "github.com/gorilla/mux" -) - -// routerSwapper is an http.Handler that allows you to swap -// mux routers. -type routerSwapper struct { - mu sync.Mutex - router *mux.Router -} - -// Swap changes the old router with the new one. -func (rs *routerSwapper) Swap(newRouter *mux.Router) { - rs.mu.Lock() - rs.router = newRouter - rs.mu.Unlock() -} - -// ServeHTTP makes the routerSwapper to implement the http.Handler interface. -func (rs *routerSwapper) ServeHTTP(w http.ResponseWriter, r *http.Request) { - rs.mu.Lock() - router := rs.router - rs.mu.Unlock() - router.ServeHTTP(w, r) -} diff --git a/api/server/server.go b/api/server/server.go deleted file mode 100644 index b67fe52846..0000000000 --- a/api/server/server.go +++ /dev/null @@ -1,207 +0,0 @@ -package server - -import ( - "crypto/tls" - "fmt" - "net" - "net/http" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/middleware" - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/errors" - "github.com/gorilla/mux" - "golang.org/x/net/context" -) - -// versionMatcher defines a variable matcher to be parsed by the router -// when a request is about to be served. -const versionMatcher = "/v{version:[0-9.]+}" - -// Config provides the configuration for the API server -type Config struct { - Logging bool - EnableCors bool - CorsHeaders string - Version string - SocketGroup string - TLSConfig *tls.Config -} - -// Server contains instance details for the server -type Server struct { - cfg *Config - servers []*HTTPServer - routers []router.Router - routerSwapper *routerSwapper - middlewares []middleware.Middleware -} - -// New returns a new instance of the server based on the specified configuration. -// It allocates resources which will be needed for ServeAPI(ports, unix-sockets). -func New(cfg *Config) *Server { - return &Server{ - cfg: cfg, - } -} - -// UseMiddleware appends a new middleware to the request chain. -// This needs to be called before the API routes are configured. -func (s *Server) UseMiddleware(m middleware.Middleware) { - s.middlewares = append(s.middlewares, m) -} - -// Accept sets a listener the server accepts connections into. -func (s *Server) Accept(addr string, listeners ...net.Listener) { - for _, listener := range listeners { - httpServer := &HTTPServer{ - srv: &http.Server{ - Addr: addr, - }, - l: listener, - } - s.servers = append(s.servers, httpServer) - } -} - -// Close closes servers and thus stop receiving requests -func (s *Server) Close() { - for _, srv := range s.servers { - if err := srv.Close(); err != nil { - logrus.Error(err) - } - } -} - -// serveAPI loops through all initialized servers and spawns goroutine -// with Server method for each. It sets createMux() as Handler also. -func (s *Server) serveAPI() error { - var chErrors = make(chan error, len(s.servers)) - for _, srv := range s.servers { - srv.srv.Handler = s.routerSwapper - go func(srv *HTTPServer) { - var err error - logrus.Infof("API listen on %s", srv.l.Addr()) - if err = srv.Serve(); err != nil && strings.Contains(err.Error(), "use of closed network connection") { - err = nil - } - chErrors <- err - }(srv) - } - - for i := 0; i < len(s.servers); i++ { - err := <-chErrors - if err != nil { - return err - } - } - - return nil -} - -// HTTPServer contains an instance of http server and the listener. -// srv *http.Server, contains configuration to create a http server and a mux router with all api end points. -// l net.Listener, is a TCP or Socket listener that dispatches incoming request to the router. -type HTTPServer struct { - srv *http.Server - l net.Listener -} - -// Serve starts listening for inbound requests. -func (s *HTTPServer) Serve() error { - return s.srv.Serve(s.l) -} - -// Close closes the HTTPServer from listening for the inbound requests. -func (s *HTTPServer) Close() error { - return s.l.Close() -} - -func (s *Server) makeHTTPHandler(handler httputils.APIFunc) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - // Define the context that we'll pass around to share info - // like the docker-request-id. - // - // The 'context' will be used for global data that should - // apply to all requests. Data that is specific to the - // immediate function being called should still be passed - // as 'args' on the function call. - ctx := context.Background() - handlerFunc := s.handleWithGlobalMiddlewares(handler) - - vars := mux.Vars(r) - if vars == nil { - vars = make(map[string]string) - } - - if err := handlerFunc(ctx, w, r, vars); err != nil { - logrus.Errorf("Handler for %s %s returned error: %v", r.Method, r.URL.Path, err) - httputils.MakeErrorHandler(err)(w, r) - } - } -} - -// InitRouter initializes the list of routers for the server. -// This method also enables the Go profiler if enableProfiler is true. -func (s *Server) InitRouter(enableProfiler bool, routers ...router.Router) { - for _, r := range routers { - s.routers = append(s.routers, r) - } - - m := s.createMux() - if enableProfiler { - profilerSetup(m) - } - s.routerSwapper = &routerSwapper{ - router: m, - } -} - -// createMux initializes the main router the server uses. -func (s *Server) createMux() *mux.Router { - m := mux.NewRouter() - - logrus.Debug("Registering routers") - for _, apiRouter := range s.routers { - for _, r := range apiRouter.Routes() { - f := s.makeHTTPHandler(r.Handler()) - - logrus.Debugf("Registering %s, %s", r.Method(), r.Path()) - m.Path(versionMatcher + r.Path()).Methods(r.Method()).Handler(f) - m.Path(r.Path()).Methods(r.Method()).Handler(f) - } - } - - err := errors.NewRequestNotFoundError(fmt.Errorf("page not found")) - notFoundHandler := httputils.MakeErrorHandler(err) - m.HandleFunc(versionMatcher+"/{path:.*}", notFoundHandler) - m.NotFoundHandler = notFoundHandler - - return m -} - -// Wait blocks the server goroutine until it exits. -// It sends an error message if there is any error during -// the API execution. -func (s *Server) Wait(waitChan chan error) { - if err := s.serveAPI(); err != nil { - logrus.Errorf("ServeAPI error: %v", err) - waitChan <- err - return - } - waitChan <- nil -} - -// DisableProfiler reloads the server mux without adding the profiler routes. -func (s *Server) DisableProfiler() { - s.routerSwapper.Swap(s.createMux()) -} - -// EnableProfiler reloads the server mux adding the profiler routes. -func (s *Server) EnableProfiler() { - m := s.createMux() - profilerSetup(m) - s.routerSwapper.Swap(m) -} diff --git a/api/server/server_test.go b/api/server/server_test.go deleted file mode 100644 index 13c28eab78..0000000000 --- a/api/server/server_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package server - -import ( - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/docker/docker/api" - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/api/server/middleware" - - "golang.org/x/net/context" -) - -func TestMiddlewares(t *testing.T) { - cfg := &Config{ - Version: "0.1omega2", - } - srv := &Server{ - cfg: cfg, - } - - srv.UseMiddleware(middleware.NewVersionMiddleware("0.1omega2", api.DefaultVersion, api.MinVersion)) - - req, _ := http.NewRequest("GET", "/containers/json", nil) - resp := httptest.NewRecorder() - ctx := context.Background() - - localHandler := func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - if httputils.VersionFromContext(ctx) == "" { - t.Fatalf("Expected version, got empty string") - } - - if sv := w.Header().Get("Server"); !strings.Contains(sv, "Docker/0.1omega2") { - t.Fatalf("Expected server version in the header `Docker/0.1omega2`, got %s", sv) - } - - return nil - } - - handlerFunc := srv.handleWithGlobalMiddlewares(localHandler) - if err := handlerFunc(ctx, resp, req, map[string]string{}); err != nil { - t.Fatal(err) - } -} diff --git a/api/types/backend/backend.go b/api/types/backend/backend.go deleted file mode 100644 index c7b4f01757..0000000000 --- a/api/types/backend/backend.go +++ /dev/null @@ -1,85 +0,0 @@ -// Package backend includes types to send information to server backends. -// TODO(calavera): This package is pending of extraction to engine-api -// when the server package is clean of daemon dependencies. -package backend - -import ( - "io" - - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/engine-api/types" -) - -// ContainerAttachConfig holds the streams to use when connecting to a container to view logs. -type ContainerAttachConfig struct { - GetStreams func() (io.ReadCloser, io.Writer, io.Writer, error) - UseStdin bool - UseStdout bool - UseStderr bool - Logs bool - Stream bool - DetachKeys string - - // Used to signify that streams are multiplexed and therefore need a StdWriter to encode stdout/sderr messages accordingly. - // TODO @cpuguy83: This shouldn't be needed. It was only added so that http and websocket endpoints can use the same function, and the websocket function was not using a stdwriter prior to this change... - // HOWEVER, the websocket endpoint is using a single stream and SHOULD be encoded with stdout/stderr as is done for HTTP since it is still just a single stream. - // Since such a change is an API change unrelated to the current changeset we'll keep it as is here and change separately. - MuxStreams bool -} - -// ContainerLogsConfig holds configs for logging operations. Exists -// for users of the backend to to pass it a logging configuration. -type ContainerLogsConfig struct { - types.ContainerLogsOptions - OutStream io.Writer -} - -// ContainerStatsConfig holds information for configuring the runtime -// behavior of a backend.ContainerStats() call. -type ContainerStatsConfig struct { - Stream bool - OutStream io.Writer - Version string -} - -// ExecInspect holds information about a running process started -// with docker exec. -type ExecInspect struct { - ID string - Running bool - ExitCode *int - ProcessConfig *ExecProcessConfig - OpenStdin bool - OpenStderr bool - OpenStdout bool - CanRemove bool - ContainerID string - DetachKeys []byte -} - -// ExecProcessConfig holds information about the exec process -// running on the host. -type ExecProcessConfig struct { - Tty bool `json:"tty"` - Entrypoint string `json:"entrypoint"` - Arguments []string `json:"arguments"` - Privileged *bool `json:"privileged,omitempty"` - User string `json:"user,omitempty"` -} - -// ContainerCommitConfig is a wrapper around -// types.ContainerCommitConfig that also -// transports configuration changes for a container. -type ContainerCommitConfig struct { - types.ContainerCommitConfig - Changes []string -} - -// ProgressWriter is an interface -// to transport progress streams. -type ProgressWriter struct { - Output io.Writer - StdoutFormatter *streamformatter.StdoutFormatter - StderrFormatter *streamformatter.StderrFormatter - ProgressReaderFunc func(io.ReadCloser) io.ReadCloser -} diff --git a/builder/builder.go b/builder/builder.go deleted file mode 100644 index 125e56ab22..0000000000 --- a/builder/builder.go +++ /dev/null @@ -1,155 +0,0 @@ -// Package builder defines interfaces for any Docker builder to implement. -// -// Historically, only server-side Dockerfile interpreters existed. -// This package allows for other implementations of Docker builders. -package builder - -import ( - "io" - "os" - "time" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/image" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "golang.org/x/net/context" -) - -const ( - // DefaultDockerfileName is the Default filename with Docker commands, read by docker build - DefaultDockerfileName string = "Dockerfile" -) - -// Context represents a file system tree. -type Context interface { - // Close allows to signal that the filesystem tree won't be used anymore. - // For Context implementations using a temporary directory, it is recommended to - // delete the temporary directory in Close(). - Close() error - // Stat returns an entry corresponding to path if any. - // It is recommended to return an error if path was not found. - // If path is a symlink it also returns the path to the target file. - Stat(path string) (string, FileInfo, error) - // Open opens path from the context and returns a readable stream of it. - Open(path string) (io.ReadCloser, error) - // Walk walks the tree of the context with the function passed to it. - Walk(root string, walkFn WalkFunc) error -} - -// WalkFunc is the type of the function called for each file or directory visited by Context.Walk(). -type WalkFunc func(path string, fi FileInfo, err error) error - -// ModifiableContext represents a modifiable Context. -// TODO: remove this interface once we can get rid of Remove() -type ModifiableContext interface { - Context - // Remove deletes the entry specified by `path`. - // It is usual for directory entries to delete all its subentries. - Remove(path string) error -} - -// FileInfo extends os.FileInfo to allow retrieving an absolute path to the file. -// TODO: remove this interface once pkg/archive exposes a walk function that Context can use. -type FileInfo interface { - os.FileInfo - Path() string -} - -// PathFileInfo is a convenience struct that implements the FileInfo interface. -type PathFileInfo struct { - os.FileInfo - // FilePath holds the absolute path to the file. - FilePath string - // Name holds the basename for the file. - FileName string -} - -// Path returns the absolute path to the file. -func (fi PathFileInfo) Path() string { - return fi.FilePath -} - -// Name returns the basename of the file. -func (fi PathFileInfo) Name() string { - if fi.FileName != "" { - return fi.FileName - } - return fi.FileInfo.Name() -} - -// Hashed defines an extra method intended for implementations of os.FileInfo. -type Hashed interface { - // Hash returns the hash of a file. - Hash() string - SetHash(string) -} - -// HashedFileInfo is a convenient struct that augments FileInfo with a field. -type HashedFileInfo struct { - FileInfo - // FileHash represents the hash of a file. - FileHash string -} - -// Hash returns the hash of a file. -func (fi HashedFileInfo) Hash() string { - return fi.FileHash -} - -// SetHash sets the hash of a file. -func (fi *HashedFileInfo) SetHash(h string) { - fi.FileHash = h -} - -// Backend abstracts calls to a Docker Daemon. -type Backend interface { - // TODO: use digest reference instead of name - - // GetImageOnBuild looks up a Docker image referenced by `name`. - GetImageOnBuild(name string) (Image, error) - // TagImage tags an image with newTag - TagImageWithReference(image.ID, reference.Named) error - // PullOnBuild tells Docker to pull image referenced by `name`. - PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (Image, error) - // ContainerAttachRaw attaches to container. - ContainerAttachRaw(cID string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error - // ContainerCreate creates a new Docker container and returns potential warnings - ContainerCreate(config types.ContainerCreateConfig, validateHostname bool) (types.ContainerCreateResponse, error) - // ContainerRm removes a container specified by `id`. - ContainerRm(name string, config *types.ContainerRmConfig) error - // Commit creates a new Docker image from an existing Docker container. - Commit(string, *backend.ContainerCommitConfig) (string, error) - // ContainerKill stops the container execution abruptly. - ContainerKill(containerID string, sig uint64) error - // ContainerStart starts a new container - ContainerStart(containerID string, hostConfig *container.HostConfig, validateHostname bool) error - // ContainerWait stops processing until the given container is stopped. - ContainerWait(containerID string, timeout time.Duration) (int, error) - // ContainerUpdateCmdOnBuild updates container.Path and container.Args - ContainerUpdateCmdOnBuild(containerID string, cmd []string) error - - // ContainerCopy copies/extracts a source FileInfo to a destination path inside a container - // specified by a container object. - // TODO: make an Extract method instead of passing `decompress` - // TODO: do not pass a FileInfo, instead refactor the archive package to export a Walk function that can be used - // with Context.Walk - //ContainerCopy(name string, res string) (io.ReadCloser, error) - // TODO: use copyBackend api - CopyOnBuild(containerID string, destPath string, src FileInfo, decompress bool) error -} - -// Image represents a Docker image used by the builder. -type Image interface { - ImageID() string - RunConfig() *container.Config -} - -// ImageCache abstracts an image cache store. -// (parent image, child runconfig) -> child image -type ImageCache interface { - // GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent` - // and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. - GetCachedImageOnBuild(parentID string, cfg *container.Config) (imageID string, err error) -} diff --git a/builder/context.go b/builder/context.go deleted file mode 100644 index 600f42319b..0000000000 --- a/builder/context.go +++ /dev/null @@ -1,260 +0,0 @@ -package builder - -import ( - "bufio" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/gitutils" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" -) - -// ValidateContextDirectory checks if all the contents of the directory -// can be read and returns an error if some files can't be read -// symlinks which point to non-existing files don't trigger an error -func ValidateContextDirectory(srcPath string, excludes []string) error { - contextRoot, err := getContextRoot(srcPath) - if err != nil { - return err - } - return filepath.Walk(contextRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - if os.IsPermission(err) { - return fmt.Errorf("can't stat '%s'", filePath) - } - if os.IsNotExist(err) { - return nil - } - return err - } - - // skip this directory/file if it's not in the path, it won't get added to the context - if relFilePath, err := filepath.Rel(contextRoot, filePath); err != nil { - return err - } else if skip, err := fileutils.Matches(relFilePath, excludes); err != nil { - return err - } else if skip { - if f.IsDir() { - return filepath.SkipDir - } - return nil - } - - // skip checking if symlinks point to non-existing files, such symlinks can be useful - // also skip named pipes, because they hanging on open - if f.Mode()&(os.ModeSymlink|os.ModeNamedPipe) != 0 { - return nil - } - - if !f.IsDir() { - currentFile, err := os.Open(filePath) - if err != nil && os.IsPermission(err) { - return fmt.Errorf("no permission to read from '%s'", filePath) - } - currentFile.Close() - } - return nil - }) -} - -// GetContextFromReader will read the contents of the given reader as either a -// Dockerfile or tar archive. Returns a tar archive used as a context and a -// path to the Dockerfile inside the tar. -func GetContextFromReader(r io.ReadCloser, dockerfileName string) (out io.ReadCloser, relDockerfile string, err error) { - buf := bufio.NewReader(r) - - magic, err := buf.Peek(archive.HeaderSize) - if err != nil && err != io.EOF { - return nil, "", fmt.Errorf("failed to peek context header from STDIN: %v", err) - } - - if archive.IsArchive(magic) { - return ioutils.NewReadCloserWrapper(buf, func() error { return r.Close() }), dockerfileName, nil - } - - // Input should be read as a Dockerfile. - tmpDir, err := ioutil.TempDir("", "docker-build-context-") - if err != nil { - return nil, "", fmt.Errorf("unbale to create temporary context directory: %v", err) - } - - f, err := os.Create(filepath.Join(tmpDir, DefaultDockerfileName)) - if err != nil { - return nil, "", err - } - _, err = io.Copy(f, buf) - if err != nil { - f.Close() - return nil, "", err - } - - if err := f.Close(); err != nil { - return nil, "", err - } - if err := r.Close(); err != nil { - return nil, "", err - } - - tar, err := archive.Tar(tmpDir, archive.Uncompressed) - if err != nil { - return nil, "", err - } - - return ioutils.NewReadCloserWrapper(tar, func() error { - err := tar.Close() - os.RemoveAll(tmpDir) - return err - }), DefaultDockerfileName, nil - -} - -// GetContextFromGitURL uses a Git URL as context for a `docker build`. The -// git repo is cloned into a temporary directory used as the context directory. -// Returns the absolute path to the temporary context directory, the relative -// path of the dockerfile in that context directory, and a non-nil error on -// success. -func GetContextFromGitURL(gitURL, dockerfileName string) (absContextDir, relDockerfile string, err error) { - if _, err := exec.LookPath("git"); err != nil { - return "", "", fmt.Errorf("unable to find 'git': %v", err) - } - if absContextDir, err = gitutils.Clone(gitURL); err != nil { - return "", "", fmt.Errorf("unable to 'git clone' to temporary context directory: %v", err) - } - - return getDockerfileRelPath(absContextDir, dockerfileName) -} - -// GetContextFromURL uses a remote URL as context for a `docker build`. The -// remote resource is downloaded as either a Dockerfile or a tar archive. -// Returns the tar archive used for the context and a path of the -// dockerfile inside the tar. -func GetContextFromURL(out io.Writer, remoteURL, dockerfileName string) (io.ReadCloser, string, error) { - response, err := httputils.Download(remoteURL) - if err != nil { - return nil, "", fmt.Errorf("unable to download remote context %s: %v", remoteURL, err) - } - progressOutput := streamformatter.NewStreamFormatter().NewProgressOutput(out, true) - - // Pass the response body through a progress reader. - progReader := progress.NewProgressReader(response.Body, progressOutput, response.ContentLength, "", fmt.Sprintf("Downloading build context from remote url: %s", remoteURL)) - - return GetContextFromReader(ioutils.NewReadCloserWrapper(progReader, func() error { return response.Body.Close() }), dockerfileName) -} - -// GetContextFromLocalDir uses the given local directory as context for a -// `docker build`. Returns the absolute path to the local context directory, -// the relative path of the dockerfile in that context directory, and a non-nil -// error on success. -func GetContextFromLocalDir(localDir, dockerfileName string) (absContextDir, relDockerfile string, err error) { - // When using a local context directory, when the Dockerfile is specified - // with the `-f/--file` option then it is considered relative to the - // current directory and not the context directory. - if dockerfileName != "" { - if dockerfileName, err = filepath.Abs(dockerfileName); err != nil { - return "", "", fmt.Errorf("unable to get absolute path to Dockerfile: %v", err) - } - } - - return getDockerfileRelPath(localDir, dockerfileName) -} - -// getDockerfileRelPath uses the given context directory for a `docker build` -// and returns the absolute path to the context directory, the relative path of -// the dockerfile in that context directory, and a non-nil error on success. -func getDockerfileRelPath(givenContextDir, givenDockerfile string) (absContextDir, relDockerfile string, err error) { - if absContextDir, err = filepath.Abs(givenContextDir); err != nil { - return "", "", fmt.Errorf("unable to get absolute context directory of given context directory %q: %v", givenContextDir, err) - } - - // The context dir might be a symbolic link, so follow it to the actual - // target directory. - // - // FIXME. We use isUNC (always false on non-Windows platforms) to workaround - // an issue in golang. On Windows, EvalSymLinks does not work on UNC file - // paths (those starting with \\). This hack means that when using links - // on UNC paths, they will not be followed. - if !isUNC(absContextDir) { - absContextDir, err = filepath.EvalSymlinks(absContextDir) - if err != nil { - return "", "", fmt.Errorf("unable to evaluate symlinks in context path: %v", err) - } - } - - stat, err := os.Lstat(absContextDir) - if err != nil { - return "", "", fmt.Errorf("unable to stat context directory %q: %v", absContextDir, err) - } - - if !stat.IsDir() { - return "", "", fmt.Errorf("context must be a directory: %s", absContextDir) - } - - absDockerfile := givenDockerfile - if absDockerfile == "" { - // No -f/--file was specified so use the default relative to the - // context directory. - absDockerfile = filepath.Join(absContextDir, DefaultDockerfileName) - - // Just to be nice ;-) look for 'dockerfile' too but only - // use it if we found it, otherwise ignore this check - if _, err = os.Lstat(absDockerfile); os.IsNotExist(err) { - altPath := filepath.Join(absContextDir, strings.ToLower(DefaultDockerfileName)) - if _, err = os.Lstat(altPath); err == nil { - absDockerfile = altPath - } - } - } - - // If not already an absolute path, the Dockerfile path should be joined to - // the base directory. - if !filepath.IsAbs(absDockerfile) { - absDockerfile = filepath.Join(absContextDir, absDockerfile) - } - - // Evaluate symlinks in the path to the Dockerfile too. - // - // FIXME. We use isUNC (always false on non-Windows platforms) to workaround - // an issue in golang. On Windows, EvalSymLinks does not work on UNC file - // paths (those starting with \\). This hack means that when using links - // on UNC paths, they will not be followed. - if !isUNC(absDockerfile) { - absDockerfile, err = filepath.EvalSymlinks(absDockerfile) - if err != nil { - return "", "", fmt.Errorf("unable to evaluate symlinks in Dockerfile path: %v", err) - } - } - - if _, err := os.Lstat(absDockerfile); err != nil { - if os.IsNotExist(err) { - return "", "", fmt.Errorf("Cannot locate Dockerfile: %q", absDockerfile) - } - return "", "", fmt.Errorf("unable to stat Dockerfile: %v", err) - } - - if relDockerfile, err = filepath.Rel(absContextDir, absDockerfile); err != nil { - return "", "", fmt.Errorf("unable to get relative Dockerfile path: %v", err) - } - - if strings.HasPrefix(relDockerfile, ".."+string(filepath.Separator)) { - return "", "", fmt.Errorf("The Dockerfile (%s) must be within the build context (%s)", givenDockerfile, givenContextDir) - } - - return absContextDir, relDockerfile, nil -} - -// isUNC returns true if the path is UNC (one starting \\). It always returns -// false on Linux. -func isUNC(path string) bool { - return runtime.GOOS == "windows" && strings.HasPrefix(path, `\\`) -} diff --git a/builder/context_test.go b/builder/context_test.go deleted file mode 100644 index 27d29d79f4..0000000000 --- a/builder/context_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package builder - -import ( - "archive/tar" - "bytes" - "io" - "io/ioutil" - "path/filepath" - "runtime" - "strings" - "testing" - - "github.com/docker/docker/pkg/archive" -) - -var prepareEmpty = func(t *testing.T) (string, func()) { - return "", func() {} -} - -var prepareNoFiles = func(t *testing.T) (string, func()) { - return createTestTempDir(t, "", "builder-context-test") -} - -var prepareOneFile = func(t *testing.T) (string, func()) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - return contextDir, cleanup -} - -func testValidateContextDirectory(t *testing.T, prepare func(t *testing.T) (string, func()), excludes []string) { - contextDir, cleanup := prepare(t) - defer cleanup() - - err := ValidateContextDirectory(contextDir, excludes) - - if err != nil { - t.Fatalf("Error should be nil, got: %s", err) - } -} - -func TestGetContextFromLocalDirNoDockerfile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") - - if err == nil { - t.Fatalf("Error should not be nil") - } - - if absContextDir != "" { - t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) - } - - if relDockerfile != "" { - t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) - } -} - -func TestGetContextFromLocalDirNotExistingDir(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - fakePath := filepath.Join(contextDir, "fake") - - absContextDir, relDockerfile, err := GetContextFromLocalDir(fakePath, "") - - if err == nil { - t.Fatalf("Error should not be nil") - } - - if absContextDir != "" { - t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) - } - - if relDockerfile != "" { - t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) - } -} - -func TestGetContextFromLocalDirNotExistingDockerfile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - fakePath := filepath.Join(contextDir, "fake") - - absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, fakePath) - - if err == nil { - t.Fatalf("Error should not be nil") - } - - if absContextDir != "" { - t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) - } - - if relDockerfile != "" { - t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) - } -} - -func TestGetContextFromLocalDirWithNoDirectory(t *testing.T) { - contextDir, dirCleanup := createTestTempDir(t, "", "builder-context-test") - defer dirCleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - chdirCleanup := chdir(t, contextDir) - defer chdirCleanup() - - absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") - - if err != nil { - t.Fatalf("Error when getting context from local dir: %s", err) - } - - if absContextDir != contextDir { - t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) - } - - if relDockerfile != DefaultDockerfileName { - t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) - } -} - -func TestGetContextFromLocalDirWithDockerfile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, "") - - if err != nil { - t.Fatalf("Error when getting context from local dir: %s", err) - } - - if absContextDir != contextDir { - t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) - } - - if relDockerfile != DefaultDockerfileName { - t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) - } -} - -func TestGetContextFromLocalDirLocalFile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - testFilename := createTestTempFile(t, contextDir, "tmpTest", "test", 0777) - - absContextDir, relDockerfile, err := GetContextFromLocalDir(testFilename, "") - - if err == nil { - t.Fatalf("Error should not be nil") - } - - if absContextDir != "" { - t.Fatalf("Absolute directory path should be empty, got: %s", absContextDir) - } - - if relDockerfile != "" { - t.Fatalf("Relative path to Dockerfile should be empty, got: %s", relDockerfile) - } -} - -func TestGetContextFromLocalDirWithCustomDockerfile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - chdirCleanup := chdir(t, contextDir) - defer chdirCleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - absContextDir, relDockerfile, err := GetContextFromLocalDir(contextDir, DefaultDockerfileName) - - if err != nil { - t.Fatalf("Error when getting context from local dir: %s", err) - } - - if absContextDir != contextDir { - t.Fatalf("Absolute directory path should be equal to %s, got: %s", contextDir, absContextDir) - } - - if relDockerfile != DefaultDockerfileName { - t.Fatalf("Relative path to dockerfile should be equal to %s, got: %s", DefaultDockerfileName, relDockerfile) - } - -} - -func TestGetContextFromReaderString(t *testing.T) { - tarArchive, relDockerfile, err := GetContextFromReader(ioutil.NopCloser(strings.NewReader(dockerfileContents)), "") - - if err != nil { - t.Fatalf("Error when executing GetContextFromReader: %s", err) - } - - tarReader := tar.NewReader(tarArchive) - - _, err = tarReader.Next() - - if err != nil { - t.Fatalf("Error when reading tar archive: %s", err) - } - - buff := new(bytes.Buffer) - buff.ReadFrom(tarReader) - contents := buff.String() - - _, err = tarReader.Next() - - if err != io.EOF { - t.Fatalf("Tar stream too long: %s", err) - } - - if err = tarArchive.Close(); err != nil { - t.Fatalf("Error when closing tar stream: %s", err) - } - - if dockerfileContents != contents { - t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents) - } - - if relDockerfile != DefaultDockerfileName { - t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile) - } -} - -func TestGetContextFromReaderTar(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-context-test") - defer cleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - tarStream, err := archive.Tar(contextDir, archive.Uncompressed) - - if err != nil { - t.Fatalf("Error when creating tar: %s", err) - } - - tarArchive, relDockerfile, err := GetContextFromReader(tarStream, DefaultDockerfileName) - - if err != nil { - t.Fatalf("Error when executing GetContextFromReader: %s", err) - } - - tarReader := tar.NewReader(tarArchive) - - header, err := tarReader.Next() - - if err != nil { - t.Fatalf("Error when reading tar archive: %s", err) - } - - if header.Name != DefaultDockerfileName { - t.Fatalf("Dockerfile name should be: %s, got: %s", DefaultDockerfileName, header.Name) - } - - buff := new(bytes.Buffer) - buff.ReadFrom(tarReader) - contents := buff.String() - - _, err = tarReader.Next() - - if err != io.EOF { - t.Fatalf("Tar stream too long: %s", err) - } - - if err = tarArchive.Close(); err != nil { - t.Fatalf("Error when closing tar stream: %s", err) - } - - if dockerfileContents != contents { - t.Fatalf("Uncompressed tar archive does not equal: %s, got: %s", dockerfileContents, contents) - } - - if relDockerfile != DefaultDockerfileName { - t.Fatalf("Relative path not equals %s, got: %s", DefaultDockerfileName, relDockerfile) - } -} - -func TestValidateContextDirectoryEmptyContext(t *testing.T) { - // This isn't a valid test on Windows. See https://play.golang.org/p/RR6z6jxR81. - // The test will ultimately end up calling filepath.Abs(""). On Windows, - // golang will error. On Linux, golang will return /. Due to there being - // drive letters on Windows, this is probably the correct behaviour for - // Windows. - if runtime.GOOS == "windows" { - t.Skip("Invalid test on Windows") - } - testValidateContextDirectory(t, prepareEmpty, []string{}) -} - -func TestValidateContextDirectoryContextWithNoFiles(t *testing.T) { - testValidateContextDirectory(t, prepareNoFiles, []string{}) -} - -func TestValidateContextDirectoryWithOneFile(t *testing.T) { - testValidateContextDirectory(t, prepareOneFile, []string{}) -} - -func TestValidateContextDirectoryWithOneFileExcludes(t *testing.T) { - testValidateContextDirectory(t, prepareOneFile, []string{DefaultDockerfileName}) -} diff --git a/builder/context_unix.go b/builder/context_unix.go deleted file mode 100644 index d1f72e0573..0000000000 --- a/builder/context_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package builder - -import ( - "path/filepath" -) - -func getContextRoot(srcPath string) (string, error) { - return filepath.Join(srcPath, "."), nil -} diff --git a/builder/context_windows.go b/builder/context_windows.go deleted file mode 100644 index b8ba2ba231..0000000000 --- a/builder/context_windows.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build windows - -package builder - -import ( - "path/filepath" - - "github.com/docker/docker/pkg/longpath" -) - -func getContextRoot(srcPath string) (string, error) { - cr, err := filepath.Abs(srcPath) - if err != nil { - return "", err - } - return longpath.AddPrefix(cr), nil -} diff --git a/builder/dockerfile/bflag.go b/builder/dockerfile/bflag.go deleted file mode 100644 index c2e6c7dae0..0000000000 --- a/builder/dockerfile/bflag.go +++ /dev/null @@ -1,176 +0,0 @@ -package dockerfile - -import ( - "fmt" - "strings" -) - -// FlagType is the type of the build flag -type FlagType int - -const ( - boolType FlagType = iota - stringType -) - -// BFlags contains all flags information for the builder -type BFlags struct { - Args []string // actual flags/args from cmd line - flags map[string]*Flag - used map[string]*Flag - Err error -} - -// Flag contains all information for a flag -type Flag struct { - bf *BFlags - name string - flagType FlagType - Value string -} - -// NewBFlags return the new BFlags struct -func NewBFlags() *BFlags { - return &BFlags{ - flags: make(map[string]*Flag), - used: make(map[string]*Flag), - } -} - -// AddBool adds a bool flag to BFlags -// Note, any error will be generated when Parse() is called (see Parse). -func (bf *BFlags) AddBool(name string, def bool) *Flag { - flag := bf.addFlag(name, boolType) - if flag == nil { - return nil - } - if def { - flag.Value = "true" - } else { - flag.Value = "false" - } - return flag -} - -// AddString adds a string flag to BFlags -// Note, any error will be generated when Parse() is called (see Parse). -func (bf *BFlags) AddString(name string, def string) *Flag { - flag := bf.addFlag(name, stringType) - if flag == nil { - return nil - } - flag.Value = def - return flag -} - -// addFlag is a generic func used by the other AddXXX() func -// to add a new flag to the BFlags struct. -// Note, any error will be generated when Parse() is called (see Parse). -func (bf *BFlags) addFlag(name string, flagType FlagType) *Flag { - if _, ok := bf.flags[name]; ok { - bf.Err = fmt.Errorf("Duplicate flag defined: %s", name) - return nil - } - - newFlag := &Flag{ - bf: bf, - name: name, - flagType: flagType, - } - bf.flags[name] = newFlag - - return newFlag -} - -// IsUsed checks if the flag is used -func (fl *Flag) IsUsed() bool { - if _, ok := fl.bf.used[fl.name]; ok { - return true - } - return false -} - -// IsTrue checks if a bool flag is true -func (fl *Flag) IsTrue() bool { - if fl.flagType != boolType { - // Should never get here - panic(fmt.Errorf("Trying to use IsTrue on a non-boolean: %s", fl.name)) - } - return fl.Value == "true" -} - -// Parse parses and checks if the BFlags is valid. -// Any error noticed during the AddXXX() funcs will be generated/returned -// here. We do this because an error during AddXXX() is more like a -// compile time error so it doesn't matter too much when we stop our -// processing as long as we do stop it, so this allows the code -// around AddXXX() to be just: -// defFlag := AddString("description", "") -// w/o needing to add an if-statement around each one. -func (bf *BFlags) Parse() error { - // If there was an error while defining the possible flags - // go ahead and bubble it back up here since we didn't do it - // earlier in the processing - if bf.Err != nil { - return fmt.Errorf("Error setting up flags: %s", bf.Err) - } - - for _, arg := range bf.Args { - if !strings.HasPrefix(arg, "--") { - return fmt.Errorf("Arg should start with -- : %s", arg) - } - - if arg == "--" { - return nil - } - - arg = arg[2:] - value := "" - - index := strings.Index(arg, "=") - if index >= 0 { - value = arg[index+1:] - arg = arg[:index] - } - - flag, ok := bf.flags[arg] - if !ok { - return fmt.Errorf("Unknown flag: %s", arg) - } - - if _, ok = bf.used[arg]; ok { - return fmt.Errorf("Duplicate flag specified: %s", arg) - } - - bf.used[arg] = flag - - switch flag.flagType { - case boolType: - // value == "" is only ok if no "=" was specified - if index >= 0 && value == "" { - return fmt.Errorf("Missing a value on flag: %s", arg) - } - - lower := strings.ToLower(value) - if lower == "" { - flag.Value = "true" - } else if lower == "true" || lower == "false" { - flag.Value = lower - } else { - return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) - } - - case stringType: - if index < 0 { - return fmt.Errorf("Missing a value on flag: %s", arg) - } - flag.Value = value - - default: - panic(fmt.Errorf("No idea what kind of flag we have! Should never get here!")) - } - - } - - return nil -} diff --git a/builder/dockerfile/bflag_test.go b/builder/dockerfile/bflag_test.go deleted file mode 100644 index 65cfceadd0..0000000000 --- a/builder/dockerfile/bflag_test.go +++ /dev/null @@ -1,187 +0,0 @@ -package dockerfile - -import ( - "testing" -) - -func TestBuilderFlags(t *testing.T) { - var expected string - var err error - - // --- - - bf := NewBFlags() - bf.Args = []string{} - if err := bf.Parse(); err != nil { - t.Fatalf("Test1 of %q was supposed to work: %s", bf.Args, err) - } - - // --- - - bf = NewBFlags() - bf.Args = []string{"--"} - if err := bf.Parse(); err != nil { - t.Fatalf("Test2 of %q was supposed to work: %s", bf.Args, err) - } - - // --- - - bf = NewBFlags() - flStr1 := bf.AddString("str1", "") - flBool1 := bf.AddBool("bool1", false) - bf.Args = []string{} - if err = bf.Parse(); err != nil { - t.Fatalf("Test3 of %q was supposed to work: %s", bf.Args, err) - } - - if flStr1.IsUsed() == true { - t.Fatalf("Test3 - str1 was not used!") - } - if flBool1.IsUsed() == true { - t.Fatalf("Test3 - bool1 was not used!") - } - - // --- - - bf = NewBFlags() - flStr1 = bf.AddString("str1", "HI") - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test4 of %q was supposed to work: %s", bf.Args, err) - } - - if flStr1.Value != "HI" { - t.Fatalf("Str1 was supposed to default to: HI") - } - if flBool1.IsTrue() { - t.Fatalf("Bool1 was supposed to default to: false") - } - if flStr1.IsUsed() == true { - t.Fatalf("Str1 was not used!") - } - if flBool1.IsUsed() == true { - t.Fatalf("Bool1 was not used!") - } - - // --- - - bf = NewBFlags() - flStr1 = bf.AddString("str1", "HI") - bf.Args = []string{"--str1"} - - if err = bf.Parse(); err == nil { - t.Fatalf("Test %q was supposed to fail", bf.Args) - } - - // --- - - bf = NewBFlags() - flStr1 = bf.AddString("str1", "HI") - bf.Args = []string{"--str1="} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - expected = "" - if flStr1.Value != expected { - t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) - } - - // --- - - bf = NewBFlags() - flStr1 = bf.AddString("str1", "HI") - bf.Args = []string{"--str1=BYE"} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - expected = "BYE" - if flStr1.Value != expected { - t.Fatalf("Str1 (%q) should be: %q", flStr1.Value, expected) - } - - // --- - - bf = NewBFlags() - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool1"} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - if !flBool1.IsTrue() { - t.Fatalf("Test-b1 Bool1 was supposed to be true") - } - - // --- - - bf = NewBFlags() - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool1=true"} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - if !flBool1.IsTrue() { - t.Fatalf("Test-b2 Bool1 was supposed to be true") - } - - // --- - - bf = NewBFlags() - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool1=false"} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - if flBool1.IsTrue() { - t.Fatalf("Test-b3 Bool1 was supposed to be false") - } - - // --- - - bf = NewBFlags() - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool1=false1"} - - if err = bf.Parse(); err == nil { - t.Fatalf("Test %q was supposed to fail", bf.Args) - } - - // --- - - bf = NewBFlags() - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool2"} - - if err = bf.Parse(); err == nil { - t.Fatalf("Test %q was supposed to fail", bf.Args) - } - - // --- - - bf = NewBFlags() - flStr1 = bf.AddString("str1", "HI") - flBool1 = bf.AddBool("bool1", false) - bf.Args = []string{"--bool1", "--str1=BYE"} - - if err = bf.Parse(); err != nil { - t.Fatalf("Test %q was supposed to work: %s", bf.Args, err) - } - - if flStr1.Value != "BYE" { - t.Fatalf("Teset %s, str1 should be BYE", bf.Args) - } - if !flBool1.IsTrue() { - t.Fatalf("Teset %s, bool1 should be true", bf.Args) - } -} diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go deleted file mode 100644 index 7bd9013fef..0000000000 --- a/builder/dockerfile/builder.go +++ /dev/null @@ -1,330 +0,0 @@ -package dockerfile - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerfile/parser" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "golang.org/x/net/context" -) - -var validCommitCommands = map[string]bool{ - "cmd": true, - "entrypoint": true, - "healthcheck": true, - "env": true, - "expose": true, - "label": true, - "onbuild": true, - "user": true, - "volume": true, - "workdir": true, -} - -// BuiltinAllowedBuildArgs is list of built-in allowed build args -var BuiltinAllowedBuildArgs = map[string]bool{ - "HTTP_PROXY": true, - "http_proxy": true, - "HTTPS_PROXY": true, - "https_proxy": true, - "FTP_PROXY": true, - "ftp_proxy": true, - "NO_PROXY": true, - "no_proxy": true, -} - -// Builder is a Dockerfile builder -// It implements the builder.Backend interface. -type Builder struct { - options *types.ImageBuildOptions - - Stdout io.Writer - Stderr io.Writer - Output io.Writer - - docker builder.Backend - context builder.Context - clientCtx context.Context - cancel context.CancelFunc - - dockerfile *parser.Node - runConfig *container.Config // runconfig for cmd, run, entrypoint etc. - flags *BFlags - tmpContainers map[string]struct{} - image string // imageID - noBaseImage bool - maintainer string - cmdSet bool - disableCommit bool - cacheBusted bool - allowedBuildArgs map[string]bool // list of build-time args that are allowed for expansion/substitution and passing to commands in 'run'. - directive parser.Directive - - // TODO: remove once docker.Commit can receive a tag - id string -} - -// BuildManager implements builder.Backend and is shared across all Builder objects. -type BuildManager struct { - backend builder.Backend -} - -// NewBuildManager creates a BuildManager. -func NewBuildManager(b builder.Backend) (bm *BuildManager) { - return &BuildManager{backend: b} -} - -// BuildFromContext builds a new image from a given context. -func (bm *BuildManager) BuildFromContext(ctx context.Context, src io.ReadCloser, remote string, buildOptions *types.ImageBuildOptions, pg backend.ProgressWriter) (string, error) { - buildContext, dockerfileName, err := builder.DetectContextFromRemoteURL(src, remote, pg.ProgressReaderFunc) - if err != nil { - return "", err - } - defer func() { - if err := buildContext.Close(); err != nil { - logrus.Debugf("[BUILDER] failed to remove temporary context: %v", err) - } - }() - if len(dockerfileName) > 0 { - buildOptions.Dockerfile = dockerfileName - } - b, err := NewBuilder(ctx, buildOptions, bm.backend, builder.DockerIgnoreContext{ModifiableContext: buildContext}, nil) - if err != nil { - return "", err - } - return b.build(pg.StdoutFormatter, pg.StderrFormatter, pg.Output) -} - -// NewBuilder creates a new Dockerfile builder from an optional dockerfile and a Config. -// If dockerfile is nil, the Dockerfile specified by Config.DockerfileName, -// will be read from the Context passed to Build(). -func NewBuilder(clientCtx context.Context, config *types.ImageBuildOptions, backend builder.Backend, buildContext builder.Context, dockerfile io.ReadCloser) (b *Builder, err error) { - if config == nil { - config = new(types.ImageBuildOptions) - } - if config.BuildArgs == nil { - config.BuildArgs = make(map[string]string) - } - ctx, cancel := context.WithCancel(clientCtx) - b = &Builder{ - clientCtx: ctx, - cancel: cancel, - options: config, - Stdout: os.Stdout, - Stderr: os.Stderr, - docker: backend, - context: buildContext, - runConfig: new(container.Config), - tmpContainers: map[string]struct{}{}, - id: stringid.GenerateNonCryptoID(), - allowedBuildArgs: make(map[string]bool), - directive: parser.Directive{ - EscapeSeen: false, - LookingForDirectives: true, - }, - } - parser.SetEscapeToken(parser.DefaultEscapeToken, &b.directive) // Assume the default token for escape - - if dockerfile != nil { - b.dockerfile, err = parser.Parse(dockerfile, &b.directive) - if err != nil { - return nil, err - } - } - - return b, nil -} - -// sanitizeRepoAndTags parses the raw "t" parameter received from the client -// to a slice of repoAndTag. -// It also validates each repoName and tag. -func sanitizeRepoAndTags(names []string) ([]reference.Named, error) { - var ( - repoAndTags []reference.Named - // This map is used for deduplicating the "-t" parameter. - uniqNames = make(map[string]struct{}) - ) - for _, repo := range names { - if repo == "" { - continue - } - - ref, err := reference.ParseNamed(repo) - if err != nil { - return nil, err - } - - ref = reference.WithDefaultTag(ref) - - if _, isCanonical := ref.(reference.Canonical); isCanonical { - return nil, errors.New("build tag cannot contain a digest") - } - - if _, isTagged := ref.(reference.NamedTagged); !isTagged { - ref, err = reference.WithTag(ref, reference.DefaultTag) - if err != nil { - return nil, err - } - } - - nameWithTag := ref.String() - - if _, exists := uniqNames[nameWithTag]; !exists { - uniqNames[nameWithTag] = struct{}{} - repoAndTags = append(repoAndTags, ref) - } - } - return repoAndTags, nil -} - -// build runs the Dockerfile builder from a context and a docker object that allows to make calls -// to Docker. -// -// This will (barring errors): -// -// * read the dockerfile from context -// * parse the dockerfile if not already parsed -// * walk the AST and execute it by dispatching to handlers. If Remove -// or ForceRemove is set, additional cleanup around containers happens after -// processing. -// * Tag image, if applicable. -// * Print a happy message and return the image ID. -// -func (b *Builder) build(stdout io.Writer, stderr io.Writer, out io.Writer) (string, error) { - b.Stdout = stdout - b.Stderr = stderr - b.Output = out - - // If Dockerfile was not parsed yet, extract it from the Context - if b.dockerfile == nil { - if err := b.readDockerfile(); err != nil { - return "", err - } - } - - repoAndTags, err := sanitizeRepoAndTags(b.options.Tags) - if err != nil { - return "", err - } - - if len(b.options.Labels) > 0 { - line := "LABEL " - for k, v := range b.options.Labels { - line += fmt.Sprintf("%q=%q ", k, v) - } - _, node, err := parser.ParseLine(line, &b.directive) - if err != nil { - return "", err - } - b.dockerfile.Children = append(b.dockerfile.Children, node) - } - - var shortImgID string - for i, n := range b.dockerfile.Children { - select { - case <-b.clientCtx.Done(): - logrus.Debug("Builder: build cancelled!") - fmt.Fprintf(b.Stdout, "Build cancelled") - return "", fmt.Errorf("Build cancelled") - default: - // Not cancelled yet, keep going... - } - if err := b.dispatch(i, n); err != nil { - if b.options.ForceRemove { - b.clearTmp() - } - return "", err - } - - shortImgID = stringid.TruncateID(b.image) - fmt.Fprintf(b.Stdout, " ---> %s\n", shortImgID) - if b.options.Remove { - b.clearTmp() - } - } - - // check if there are any leftover build-args that were passed but not - // consumed during build. Return an error, if there are any. - leftoverArgs := []string{} - for arg := range b.options.BuildArgs { - if !b.isBuildArgAllowed(arg) { - leftoverArgs = append(leftoverArgs, arg) - } - } - if len(leftoverArgs) > 0 { - return "", fmt.Errorf("One or more build-args %v were not consumed, failing build.", leftoverArgs) - } - - if b.image == "" { - return "", fmt.Errorf("No image was generated. Is your Dockerfile empty?") - } - - imageID := image.ID(b.image) - for _, rt := range repoAndTags { - if err := b.docker.TagImageWithReference(imageID, rt); err != nil { - return "", err - } - } - - fmt.Fprintf(b.Stdout, "Successfully built %s\n", shortImgID) - return b.image, nil -} - -// Cancel cancels an ongoing Dockerfile build. -func (b *Builder) Cancel() { - b.cancel() -} - -// BuildFromConfig builds directly from `changes`, treating it as if it were the contents of a Dockerfile -// It will: -// - Call parse.Parse() to get an AST root for the concatenated Dockerfile entries. -// - Do build by calling builder.dispatch() to call all entries' handling routines -// -// BuildFromConfig is used by the /commit endpoint, with the changes -// coming from the query parameter of the same name. -// -// TODO: Remove? -func BuildFromConfig(config *container.Config, changes []string) (*container.Config, error) { - b, err := NewBuilder(context.Background(), nil, nil, nil, nil) - if err != nil { - return nil, err - } - - ast, err := parser.Parse(bytes.NewBufferString(strings.Join(changes, "\n")), &b.directive) - if err != nil { - return nil, err - } - - // ensure that the commands are valid - for _, n := range ast.Children { - if !validCommitCommands[n.Value] { - return nil, fmt.Errorf("%s is not a valid change command", n.Value) - } - } - - b.runConfig = config - b.Stdout = ioutil.Discard - b.Stderr = ioutil.Discard - b.disableCommit = true - - for i, n := range ast.Children { - if err := b.dispatch(i, n); err != nil { - return nil, err - } - } - - return b.runConfig, nil -} diff --git a/builder/dockerfile/builder_unix.go b/builder/dockerfile/builder_unix.go deleted file mode 100644 index 76a7ce74f9..0000000000 --- a/builder/dockerfile/builder_unix.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !windows - -package dockerfile - -var defaultShell = []string{"/bin/sh", "-c"} diff --git a/builder/dockerfile/builder_windows.go b/builder/dockerfile/builder_windows.go deleted file mode 100644 index 37e9fbcf4b..0000000000 --- a/builder/dockerfile/builder_windows.go +++ /dev/null @@ -1,3 +0,0 @@ -package dockerfile - -var defaultShell = []string{"cmd", "/S", "/C"} diff --git a/builder/dockerfile/command/command.go b/builder/dockerfile/command/command.go deleted file mode 100644 index f23c6874b5..0000000000 --- a/builder/dockerfile/command/command.go +++ /dev/null @@ -1,46 +0,0 @@ -// Package command contains the set of Dockerfile commands. -package command - -// Define constants for the command strings -const ( - Add = "add" - Arg = "arg" - Cmd = "cmd" - Copy = "copy" - Entrypoint = "entrypoint" - Env = "env" - Expose = "expose" - From = "from" - Healthcheck = "healthcheck" - Label = "label" - Maintainer = "maintainer" - Onbuild = "onbuild" - Run = "run" - Shell = "shell" - StopSignal = "stopsignal" - User = "user" - Volume = "volume" - Workdir = "workdir" -) - -// Commands is list of all Dockerfile commands -var Commands = map[string]struct{}{ - Add: {}, - Arg: {}, - Cmd: {}, - Copy: {}, - Entrypoint: {}, - Env: {}, - Expose: {}, - From: {}, - Healthcheck: {}, - Label: {}, - Maintainer: {}, - Onbuild: {}, - Run: {}, - Shell: {}, - StopSignal: {}, - User: {}, - Volume: {}, - Workdir: {}, -} diff --git a/builder/dockerfile/dispatchers.go b/builder/dockerfile/dispatchers.go deleted file mode 100644 index 3e1bb822be..0000000000 --- a/builder/dockerfile/dispatchers.go +++ /dev/null @@ -1,758 +0,0 @@ -package dockerfile - -// This file contains the dispatchers for each command. Note that -// `nullDispatch` is not actually a command, but support for commands we parse -// but do nothing with. -// -// See evaluator.go for a higher level discussion of the whole evaluator -// package. - -import ( - "fmt" - "regexp" - "runtime" - "sort" - "strconv" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api" - "github.com/docker/docker/builder" - "github.com/docker/docker/pkg/signal" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// ENV foo bar -// -// Sets the environment variable foo to bar, also makes interpolation -// in the dockerfile available from the next statement on via ${foo}. -// -func env(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { - return errAtLeastOneArgument("ENV") - } - - if len(args)%2 != 0 { - // should never get here, but just in case - return errTooManyArguments("ENV") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - // TODO/FIXME/NOT USED - // Just here to show how to use the builder flags stuff within the - // context of a builder command. Will remove once we actually add - // a builder command to something! - /* - flBool1 := b.flags.AddBool("bool1", false) - flStr1 := b.flags.AddString("str1", "HI") - - if err := b.flags.Parse(); err != nil { - return err - } - - fmt.Printf("Bool1:%v\n", flBool1) - fmt.Printf("Str1:%v\n", flStr1) - */ - - commitStr := "ENV" - - for j := 0; j < len(args); j++ { - // name ==> args[j] - // value ==> args[j+1] - newVar := args[j] + "=" + args[j+1] + "" - commitStr += " " + newVar - - gotOne := false - for i, envVar := range b.runConfig.Env { - envParts := strings.SplitN(envVar, "=", 2) - if envParts[0] == args[j] { - b.runConfig.Env[i] = newVar - gotOne = true - break - } - } - if !gotOne { - b.runConfig.Env = append(b.runConfig.Env, newVar) - } - j++ - } - - return b.commit("", b.runConfig.Cmd, commitStr) -} - -// MAINTAINER some text -// -// Sets the maintainer metadata. -func maintainer(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return errExactlyOneArgument("MAINTAINER") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - b.maintainer = args[0] - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("MAINTAINER %s", b.maintainer)) -} - -// LABEL some json data describing the image -// -// Sets the Label variable foo to bar, -// -func label(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { - return errAtLeastOneArgument("LABEL") - } - if len(args)%2 != 0 { - // should never get here, but just in case - return errTooManyArguments("LABEL") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - commitStr := "LABEL" - - if b.runConfig.Labels == nil { - b.runConfig.Labels = map[string]string{} - } - - for j := 0; j < len(args); j++ { - // name ==> args[j] - // value ==> args[j+1] - newVar := args[j] + "=" + args[j+1] + "" - commitStr += " " + newVar - - b.runConfig.Labels[args[j]] = args[j+1] - j++ - } - return b.commit("", b.runConfig.Cmd, commitStr) -} - -// ADD foo /path -// -// Add the file 'foo' to '/path'. Tarball and Remote URL (git, http) handling -// exist here. If you do not wish to have this automatic handling, use COPY. -// -func add(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) < 2 { - return errAtLeastOneArgument("ADD") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - return b.runContextCommand(args, true, true, "ADD") -} - -// COPY foo /path -// -// Same as 'ADD' but without the tar and remote url handling. -// -func dispatchCopy(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) < 2 { - return errAtLeastOneArgument("COPY") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - return b.runContextCommand(args, false, false, "COPY") -} - -// FROM imagename -// -// This sets the image the dockerfile will build on top of. -// -func from(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return errExactlyOneArgument("FROM") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - name := args[0] - - var ( - image builder.Image - err error - ) - - // Windows cannot support a container with no base image. - if name == api.NoBaseImageSpecifier { - if runtime.GOOS == "windows" { - return fmt.Errorf("Windows does not support FROM scratch") - } - b.image = "" - b.noBaseImage = true - } else { - // TODO: don't use `name`, instead resolve it to a digest - if !b.options.PullParent { - image, err = b.docker.GetImageOnBuild(name) - // TODO: shouldn't we error out if error is different from "not found" ? - } - if image == nil { - image, err = b.docker.PullOnBuild(b.clientCtx, name, b.options.AuthConfigs, b.Output) - if err != nil { - return err - } - } - } - - return b.processImageFrom(image) -} - -// ONBUILD RUN echo yo -// -// ONBUILD triggers run when the image is used in a FROM statement. -// -// ONBUILD handling has a lot of special-case functionality, the heading in -// evaluator.go and comments around dispatch() in the same file explain the -// special cases. search for 'OnBuild' in internals.go for additional special -// cases. -// -func onbuild(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { - return errAtLeastOneArgument("ONBUILD") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - triggerInstruction := strings.ToUpper(strings.TrimSpace(args[0])) - switch triggerInstruction { - case "ONBUILD": - return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") - case "MAINTAINER", "FROM": - return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", triggerInstruction) - } - - original = regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(original, "") - - b.runConfig.OnBuild = append(b.runConfig.OnBuild, original) - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ONBUILD %s", original)) -} - -// WORKDIR /tmp -// -// Set the working directory for future RUN/CMD/etc statements. -// -func workdir(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return errExactlyOneArgument("WORKDIR") - } - - err := b.flags.Parse() - if err != nil { - return err - } - - // This is from the Dockerfile and will not necessarily be in platform - // specific semantics, hence ensure it is converted. - b.runConfig.WorkingDir, err = normaliseWorkdir(b.runConfig.WorkingDir, args[0]) - if err != nil { - return err - } - - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("WORKDIR %v", b.runConfig.WorkingDir)) -} - -// RUN some command yo -// -// run a command and commit the image. Args are automatically prepended with -// the current SHELL which defaults to 'sh -c' under linux or 'cmd /S /C' under -// Windows, in the event there is only one argument The difference in processing: -// -// RUN echo hi # sh -c echo hi (Linux) -// RUN echo hi # cmd /S /C echo hi (Windows) -// RUN [ "echo", "hi" ] # echo hi -// -func run(b *Builder, args []string, attributes map[string]bool, original string) error { - if b.image == "" && !b.noBaseImage { - return fmt.Errorf("Please provide a source image with `from` prior to run") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - args = handleJSONArgs(args, attributes) - - if !attributes["json"] { - args = append(getShell(b.runConfig), args...) - } - config := &container.Config{ - Cmd: strslice.StrSlice(args), - Image: b.image, - } - - // stash the cmd - cmd := b.runConfig.Cmd - if len(b.runConfig.Entrypoint) == 0 && len(b.runConfig.Cmd) == 0 { - b.runConfig.Cmd = config.Cmd - } - - // stash the config environment - env := b.runConfig.Env - - defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) - defer func(env []string) { b.runConfig.Env = env }(env) - - // derive the net build-time environment for this run. We let config - // environment override the build time environment. - // This means that we take the b.buildArgs list of env vars and remove - // any of those variables that are defined as part of the container. In other - // words, anything in b.Config.Env. What's left is the list of build-time env - // vars that we need to add to each RUN command - note the list could be empty. - // - // We don't persist the build time environment with container's config - // environment, but just sort and prepend it to the command string at time - // of commit. - // This helps with tracing back the image's actual environment at the time - // of RUN, without leaking it to the final image. It also aids cache - // lookup for same image built with same build time environment. - cmdBuildEnv := []string{} - configEnv := runconfigopts.ConvertKVStringsToMap(b.runConfig.Env) - for key, val := range b.options.BuildArgs { - if !b.isBuildArgAllowed(key) { - // skip build-args that are not in allowed list, meaning they have - // not been defined by an "ARG" Dockerfile command yet. - // This is an error condition but only if there is no "ARG" in the entire - // Dockerfile, so we'll generate any necessary errors after we parsed - // the entire file (see 'leftoverArgs' processing in evaluator.go ) - continue - } - if _, ok := configEnv[key]; !ok { - cmdBuildEnv = append(cmdBuildEnv, fmt.Sprintf("%s=%s", key, val)) - } - } - - // derive the command to use for probeCache() and to commit in this container. - // Note that we only do this if there are any build-time env vars. Also, we - // use the special argument "|#" at the start of the args array. This will - // avoid conflicts with any RUN command since commands can not - // start with | (vertical bar). The "#" (number of build envs) is there to - // help ensure proper cache matches. We don't want a RUN command - // that starts with "foo=abc" to be considered part of a build-time env var. - saveCmd := config.Cmd - if len(cmdBuildEnv) > 0 { - sort.Strings(cmdBuildEnv) - tmpEnv := append([]string{fmt.Sprintf("|%d", len(cmdBuildEnv))}, cmdBuildEnv...) - saveCmd = strslice.StrSlice(append(tmpEnv, saveCmd...)) - } - - b.runConfig.Cmd = saveCmd - hit, err := b.probeCache() - if err != nil { - return err - } - if hit { - return nil - } - - // set Cmd manually, this is special case only for Dockerfiles - b.runConfig.Cmd = config.Cmd - // set build-time environment for 'run'. - b.runConfig.Env = append(b.runConfig.Env, cmdBuildEnv...) - // set config as already being escaped, this prevents double escaping on windows - b.runConfig.ArgsEscaped = true - - logrus.Debugf("[BUILDER] Command to be executed: %v", b.runConfig.Cmd) - - cID, err := b.create() - if err != nil { - return err - } - - if err := b.run(cID); err != nil { - return err - } - - // revert to original config environment and set the command string to - // have the build-time env vars in it (if any) so that future cache look-ups - // properly match it. - b.runConfig.Env = env - b.runConfig.Cmd = saveCmd - return b.commit(cID, cmd, "run") -} - -// CMD foo -// -// Set the default command to run in the container (which may be empty). -// Argument handling is the same as RUN. -// -func cmd(b *Builder, args []string, attributes map[string]bool, original string) error { - if err := b.flags.Parse(); err != nil { - return err - } - - cmdSlice := handleJSONArgs(args, attributes) - - if !attributes["json"] { - cmdSlice = append(getShell(b.runConfig), cmdSlice...) - } - - b.runConfig.Cmd = strslice.StrSlice(cmdSlice) - - if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("CMD %q", cmdSlice)); err != nil { - return err - } - - if len(args) != 0 { - b.cmdSet = true - } - - return nil -} - -// parseOptInterval(flag) is the duration of flag.Value, or 0 if -// empty. An error is reported if the value is given and is not positive. -func parseOptInterval(f *Flag) (time.Duration, error) { - s := f.Value - if s == "" { - return 0, nil - } - d, err := time.ParseDuration(s) - if err != nil { - return 0, err - } - if d <= 0 { - return 0, fmt.Errorf("Interval %#v must be positive", f.name) - } - return d, nil -} - -// HEALTHCHECK foo -// -// Set the default healthcheck command to run in the container (which may be empty). -// Argument handling is the same as RUN. -// -func healthcheck(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { - return fmt.Errorf("HEALTHCHECK requires an argument") - } - typ := strings.ToUpper(args[0]) - args = args[1:] - if typ == "NONE" { - if len(args) != 0 { - return fmt.Errorf("HEALTHCHECK NONE takes no arguments") - } - test := strslice.StrSlice{typ} - b.runConfig.Healthcheck = &container.HealthConfig{ - Test: test, - } - } else { - if b.runConfig.Healthcheck != nil { - oldCmd := b.runConfig.Healthcheck.Test - if len(oldCmd) > 0 && oldCmd[0] != "NONE" { - fmt.Fprintf(b.Stdout, "Note: overriding previous HEALTHCHECK: %v\n", oldCmd) - } - } - - healthcheck := container.HealthConfig{} - - flInterval := b.flags.AddString("interval", "") - flTimeout := b.flags.AddString("timeout", "") - flRetries := b.flags.AddString("retries", "") - - if err := b.flags.Parse(); err != nil { - return err - } - - switch typ { - case "CMD": - cmdSlice := handleJSONArgs(args, attributes) - if len(cmdSlice) == 0 { - return fmt.Errorf("Missing command after HEALTHCHECK CMD") - } - - if !attributes["json"] { - typ = "CMD-SHELL" - } - - healthcheck.Test = strslice.StrSlice(append([]string{typ}, cmdSlice...)) - default: - return fmt.Errorf("Unknown type %#v in HEALTHCHECK (try CMD)", typ) - } - - interval, err := parseOptInterval(flInterval) - if err != nil { - return err - } - healthcheck.Interval = interval - - timeout, err := parseOptInterval(flTimeout) - if err != nil { - return err - } - healthcheck.Timeout = timeout - - if flRetries.Value != "" { - retries, err := strconv.ParseInt(flRetries.Value, 10, 32) - if err != nil { - return err - } - if retries < 1 { - return fmt.Errorf("--retries must be at least 1 (not %d)", retries) - } - healthcheck.Retries = int(retries) - } else { - healthcheck.Retries = 0 - } - - b.runConfig.Healthcheck = &healthcheck - } - - if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("HEALTHCHECK %q", b.runConfig.Healthcheck)); err != nil { - return err - } - - return nil -} - -// ENTRYPOINT /usr/sbin/nginx -// -// Set the entrypoint to /usr/sbin/nginx. Will accept the CMD as the arguments -// to /usr/sbin/nginx. Uses the default shell if not in JSON format. -// -// Handles command processing similar to CMD and RUN, only b.runConfig.Entrypoint -// is initialized at NewBuilder time instead of through argument parsing. -// -func entrypoint(b *Builder, args []string, attributes map[string]bool, original string) error { - if err := b.flags.Parse(); err != nil { - return err - } - - parsed := handleJSONArgs(args, attributes) - - switch { - case attributes["json"]: - // ENTRYPOINT ["echo", "hi"] - b.runConfig.Entrypoint = strslice.StrSlice(parsed) - case len(parsed) == 0: - // ENTRYPOINT [] - b.runConfig.Entrypoint = nil - default: - // ENTRYPOINT echo hi - b.runConfig.Entrypoint = strslice.StrSlice(append(getShell(b.runConfig), parsed[0])) - } - - // when setting the entrypoint if a CMD was not explicitly set then - // set the command to nil - if !b.cmdSet { - b.runConfig.Cmd = nil - } - - if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("ENTRYPOINT %q", b.runConfig.Entrypoint)); err != nil { - return err - } - - return nil -} - -// EXPOSE 6667/tcp 7000/tcp -// -// Expose ports for links and port mappings. This all ends up in -// b.runConfig.ExposedPorts for runconfig. -// -func expose(b *Builder, args []string, attributes map[string]bool, original string) error { - portsTab := args - - if len(args) == 0 { - return errAtLeastOneArgument("EXPOSE") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - if b.runConfig.ExposedPorts == nil { - b.runConfig.ExposedPorts = make(nat.PortSet) - } - - ports, _, err := nat.ParsePortSpecs(portsTab) - if err != nil { - return err - } - - // instead of using ports directly, we build a list of ports and sort it so - // the order is consistent. This prevents cache burst where map ordering - // changes between builds - portList := make([]string, len(ports)) - var i int - for port := range ports { - if _, exists := b.runConfig.ExposedPorts[port]; !exists { - b.runConfig.ExposedPorts[port] = struct{}{} - } - portList[i] = string(port) - i++ - } - sort.Strings(portList) - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("EXPOSE %s", strings.Join(portList, " "))) -} - -// USER foo -// -// Set the user to 'foo' for future commands and when running the -// ENTRYPOINT/CMD at container run time. -// -func user(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return errExactlyOneArgument("USER") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - b.runConfig.User = args[0] - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("USER %v", args)) -} - -// VOLUME /foo -// -// Expose the volume /foo for use. Will also accept the JSON array form. -// -func volume(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) == 0 { - return errAtLeastOneArgument("VOLUME") - } - - if err := b.flags.Parse(); err != nil { - return err - } - - if b.runConfig.Volumes == nil { - b.runConfig.Volumes = map[string]struct{}{} - } - for _, v := range args { - v = strings.TrimSpace(v) - if v == "" { - return fmt.Errorf("Volume specified can not be an empty string") - } - b.runConfig.Volumes[v] = struct{}{} - } - if err := b.commit("", b.runConfig.Cmd, fmt.Sprintf("VOLUME %v", args)); err != nil { - return err - } - return nil -} - -// STOPSIGNAL signal -// -// Set the signal that will be used to kill the container. -func stopSignal(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return fmt.Errorf("STOPSIGNAL requires exactly one argument") - } - - sig := args[0] - _, err := signal.ParseSignal(sig) - if err != nil { - return err - } - - b.runConfig.StopSignal = sig - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("STOPSIGNAL %v", args)) -} - -// ARG name[=value] -// -// Adds the variable foo to the trusted list of variables that can be passed -// to builder using the --build-arg flag for expansion/subsitution or passing to 'run'. -// Dockerfile author may optionally set a default value of this variable. -func arg(b *Builder, args []string, attributes map[string]bool, original string) error { - if len(args) != 1 { - return fmt.Errorf("ARG requires exactly one argument definition") - } - - var ( - name string - value string - hasDefault bool - ) - - arg := args[0] - // 'arg' can just be a name or name-value pair. Note that this is different - // from 'env' that handles the split of name and value at the parser level. - // The reason for doing it differently for 'arg' is that we support just - // defining an arg and not assign it a value (while 'env' always expects a - // name-value pair). If possible, it will be good to harmonize the two. - if strings.Contains(arg, "=") { - parts := strings.SplitN(arg, "=", 2) - name = parts[0] - value = parts[1] - hasDefault = true - } else { - name = arg - hasDefault = false - } - // add the arg to allowed list of build-time args from this step on. - b.allowedBuildArgs[name] = true - - // If there is a default value associated with this arg then add it to the - // b.buildArgs if one is not already passed to the builder. The args passed - // to builder override the default value of 'arg'. - if _, ok := b.options.BuildArgs[name]; !ok && hasDefault { - b.options.BuildArgs[name] = value - } - - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("ARG %s", arg)) -} - -// SHELL powershell -command -// -// Set the non-default shell to use. -func shell(b *Builder, args []string, attributes map[string]bool, original string) error { - if err := b.flags.Parse(); err != nil { - return err - } - shellSlice := handleJSONArgs(args, attributes) - switch { - case len(shellSlice) == 0: - // SHELL [] - return errAtLeastOneArgument("SHELL") - case attributes["json"]: - // SHELL ["powershell", "-command"] - b.runConfig.Shell = strslice.StrSlice(shellSlice) - default: - // SHELL powershell -command - not JSON - return errNotJSON("SHELL", original) - } - return b.commit("", b.runConfig.Cmd, fmt.Sprintf("SHELL %v", shellSlice)) -} - -func errAtLeastOneArgument(command string) error { - return fmt.Errorf("%s requires at least one argument", command) -} - -func errExactlyOneArgument(command string) error { - return fmt.Errorf("%s requires exactly one argument", command) -} - -func errTooManyArguments(command string) error { - return fmt.Errorf("Bad input to %s, too many arguments", command) -} - -// getShell is a helper function which gets the right shell for prefixing the -// shell-form of RUN, ENTRYPOINT and CMD instructions -func getShell(c *container.Config) []string { - if 0 == len(c.Shell) { - return defaultShell[:] - } - return c.Shell[:] -} diff --git a/builder/dockerfile/dispatchers_unix.go b/builder/dockerfile/dispatchers_unix.go deleted file mode 100644 index 8b0dfc3911..0000000000 --- a/builder/dockerfile/dispatchers_unix.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build !windows - -package dockerfile - -import ( - "fmt" - "os" - "path/filepath" -) - -// normaliseWorkdir normalises a user requested working directory in a -// platform sematically consistent way. -func normaliseWorkdir(current string, requested string) (string, error) { - if requested == "" { - return "", fmt.Errorf("cannot normalise nothing") - } - current = filepath.FromSlash(current) - requested = filepath.FromSlash(requested) - if !filepath.IsAbs(requested) { - return filepath.Join(string(os.PathSeparator), current, requested), nil - } - return requested, nil -} - -func errNotJSON(command, _ string) error { - return fmt.Errorf("%s requires the arguments to be in JSON form", command) -} diff --git a/builder/dockerfile/dispatchers_windows.go b/builder/dockerfile/dispatchers_windows.go deleted file mode 100644 index 5a40ae09d3..0000000000 --- a/builder/dockerfile/dispatchers_windows.go +++ /dev/null @@ -1,65 +0,0 @@ -package dockerfile - -import ( - "fmt" - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/docker/docker/pkg/system" -) - -// normaliseWorkdir normalises a user requested working directory in a -// platform sematically consistent way. -func normaliseWorkdir(current string, requested string) (string, error) { - if requested == "" { - return "", fmt.Errorf("cannot normalise nothing") - } - - current = filepath.FromSlash(current) - requested = filepath.FromSlash(requested) - - // Target semantics is C:\somefolder, specifically in the format: - // UPPERCASEDriveLetter-Colon-Backslash-FolderName. We are already - // guaranteed that `current`, if set, is consistent. This allows us to - // cope correctly with any of the following in a Dockerfile: - // WORKDIR a --> C:\a - // WORKDIR c:\\foo --> C:\foo - // WORKDIR \\foo --> C:\foo - // WORKDIR /foo --> C:\foo - // WORKDIR c:\\foo \ WORKDIR bar --> C:\foo --> C:\foo\bar - // WORKDIR C:/foo \ WORKDIR bar --> C:\foo --> C:\foo\bar - // WORKDIR C:/foo \ WORKDIR \\bar --> C:\foo --> C:\bar - // WORKDIR /foo \ WORKDIR c:/bar --> C:\foo --> C:\bar - if len(current) == 0 || system.IsAbs(requested) { - if (requested[0] == os.PathSeparator) || - (len(requested) > 1 && string(requested[1]) != ":") || - (len(requested) == 1) { - requested = filepath.Join(`C:\`, requested) - } - } else { - requested = filepath.Join(current, requested) - } - // Upper-case drive letter - return (strings.ToUpper(string(requested[0])) + requested[1:]), nil -} - -func errNotJSON(command, original string) error { - // For Windows users, give a hint if it looks like it might contain - // a path which hasn't been escaped such as ["c:\windows\system32\prog.exe", "-param"], - // as JSON must be escaped. Unfortunate... - // - // Specifically looking for quote-driveletter-colon-backslash, there's no - // double backslash and a [] pair. No, this is not perfect, but it doesn't - // have to be. It's simply a hint to make life a little easier. - extra := "" - original = filepath.FromSlash(strings.ToLower(strings.Replace(strings.ToLower(original), strings.ToLower(command)+" ", "", -1))) - if len(regexp.MustCompile(`"[a-z]:\\.*`).FindStringSubmatch(original)) > 0 && - !strings.Contains(original, `\\`) && - strings.Contains(original, "[") && - strings.Contains(original, "]") { - extra = fmt.Sprintf(`. It looks like '%s' includes a file path without an escaped back-slash. JSON requires back-slashes to be escaped such as ["c:\\path\\to\\file.exe", "/parameter"]`, original) - } - return fmt.Errorf("%s requires the arguments to be in JSON form%s", command, extra) -} diff --git a/builder/dockerfile/dispatchers_windows_test.go b/builder/dockerfile/dispatchers_windows_test.go deleted file mode 100644 index 4c53713197..0000000000 --- a/builder/dockerfile/dispatchers_windows_test.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build windows - -package dockerfile - -import "testing" - -func TestNormaliseWorkdir(t *testing.T) { - tests := []struct{ current, requested, expected, etext string }{ - {``, ``, ``, `cannot normalise nothing`}, - {``, `a`, `C:\a`, ``}, - {``, `c:\foo`, `C:\foo`, ``}, - {``, `\foo`, `C:\foo`, ``}, - {``, `/foo`, `C:\foo`, ``}, - {``, `C:/foo`, `C:\foo`, ``}, - {`C:\foo`, `bar`, `C:\foo\bar`, ``}, - {`C:\foo`, `/bar`, `C:\bar`, ``}, - {`C:\foo`, `\bar`, `C:\bar`, ``}, - } - for _, i := range tests { - r, e := normaliseWorkdir(i.current, i.requested) - - if i.etext != "" && e == nil { - t.Fatalf("TestNormaliseWorkingDir Expected error %s", i.etext) - } - - if i.etext != "" && e.Error() != i.etext { - t.Fatalf("TestNormaliseWorkingDir Expected error %s, got %s", i.etext, e.Error()) - } - - if r != i.expected { - t.Fatalf("TestNormaliseWorkingDir Expected %s for %s %s", i.expected, i.current, i.requested) - } - } -} diff --git a/builder/dockerfile/envVarTest b/builder/dockerfile/envVarTest deleted file mode 100644 index 1a7fe975a7..0000000000 --- a/builder/dockerfile/envVarTest +++ /dev/null @@ -1,112 +0,0 @@ -hello | hello -he'll'o | hello -he'llo | hello -he\'llo | he'llo -he\\'llo | he\llo -abc\tdef | abctdef -"abc\tdef" | abc\tdef -'abc\tdef' | abc\tdef -hello\ | hello -hello\\ | hello\ -"hello | hello -"hello\" | hello" -"hel'lo" | hel'lo -'hello | hello -'hello\' | hello\ -"''" | '' -$. | $. -$1 | -he$1x | hex -he$.x | he$.x -he$pwd. | he. -he$PWD | he/home -he\$PWD | he$PWD -he\\$PWD | he\/home -he\${} | he${} -he\${}xx | he${}xx -he${} | he -he${}xx | hexx -he${hi} | he -he${hi}xx | hexx -he${PWD} | he/home -he${.} | error -he${XXX:-000}xx | he000xx -he${PWD:-000}xx | he/homexx -he${XXX:-$PWD}xx | he/homexx -he${XXX:-${PWD:-yyy}}xx | he/homexx -he${XXX:-${YYY:-yyy}}xx | heyyyxx -he${XXX:YYY} | error -he${XXX:+${PWD}}xx | hexx -he${PWD:+${XXX}}xx | hexx -he${PWD:+${SHELL}}xx | hebashxx -he${XXX:+000}xx | hexx -he${PWD:+000}xx | he000xx -'he${XX}' | he${XX} -"he${PWD}" | he/home -"he'$PWD'" | he'/home' -"$PWD" | /home -'$PWD' | $PWD -'\$PWD' | \$PWD -'"hello"' | "hello" -he\$PWD | he$PWD -"he\$PWD" | he$PWD -'he\$PWD' | he\$PWD -he${PWD | error -he${PWD:=000}xx | error -he${PWD:+${PWD}:}xx | he/home:xx -he${XXX:-\$PWD:}xx | he$PWD:xx -he${XXX:-\${PWD}z}xx | he${PWDz}xx -안녕하세요 | 안녕하세요 -안'녕'하세요 | 안녕하세요 -안'녕하세요 | 안녕하세요 -안녕\'하세요 | 안녕'하세요 -안\\'녕하세요 | 안\녕하세요 -안녕\t하세요 | 안녕t하세요 -"안녕\t하세요" | 안녕\t하세요 -'안녕\t하세요 | 안녕\t하세요 -안녕하세요\ | 안녕하세요 -안녕하세요\\ | 안녕하세요\ -"안녕하세요 | 안녕하세요 -"안녕하세요\" | 안녕하세요" -"안녕'하세요" | 안녕'하세요 -'안녕하세요 | 안녕하세요 -'안녕하세요\' | 안녕하세요\ -안녕$1x | 안녕x -안녕$.x | 안녕$.x -안녕$pwd. | 안녕. -안녕$PWD | 안녕/home -안녕\$PWD | 안녕$PWD -안녕\\$PWD | 안녕\/home -안녕\${} | 안녕${} -안녕\${}xx | 안녕${}xx -안녕${} | 안녕 -안녕${}xx | 안녕xx -안녕${hi} | 안녕 -안녕${hi}xx | 안녕xx -안녕${PWD} | 안녕/home -안녕${.} | error -안녕${XXX:-000}xx | 안녕000xx -안녕${PWD:-000}xx | 안녕/homexx -안녕${XXX:-$PWD}xx | 안녕/homexx -안녕${XXX:-${PWD:-yyy}}xx | 안녕/homexx -안녕${XXX:-${YYY:-yyy}}xx | 안녕yyyxx -안녕${XXX:YYY} | error -안녕${XXX:+${PWD}}xx | 안녕xx -안녕${PWD:+${XXX}}xx | 안녕xx -안녕${PWD:+${SHELL}}xx | 안녕bashxx -안녕${XXX:+000}xx | 안녕xx -안녕${PWD:+000}xx | 안녕000xx -'안녕${XX}' | 안녕${XX} -"안녕${PWD}" | 안녕/home -"안녕'$PWD'" | 안녕'/home' -'"안녕"' | "안녕" -안녕\$PWD | 안녕$PWD -"안녕\$PWD" | 안녕$PWD -'안녕\$PWD' | 안녕\$PWD -안녕${PWD | error -안녕${PWD:=000}xx | error -안녕${PWD:+${PWD}:}xx | 안녕/home:xx -안녕${XXX:-\$PWD:}xx | 안녕$PWD:xx -안녕${XXX:-\${PWD}z}xx | 안녕${PWDz}xx -$KOREAN | 한국어 -안녕$KOREAN | 안녕한국어 diff --git a/builder/dockerfile/evaluator.go b/builder/dockerfile/evaluator.go deleted file mode 100644 index 4c9d425f9b..0000000000 --- a/builder/dockerfile/evaluator.go +++ /dev/null @@ -1,203 +0,0 @@ -// Package dockerfile is the evaluation step in the Dockerfile parse/evaluate pipeline. -// -// It incorporates a dispatch table based on the parser.Node values (see the -// parser package for more information) that are yielded from the parser itself. -// Calling NewBuilder with the BuildOpts struct can be used to customize the -// experience for execution purposes only. Parsing is controlled in the parser -// package, and this division of responsibility should be respected. -// -// Please see the jump table targets for the actual invocations, most of which -// will call out to the functions in internals.go to deal with their tasks. -// -// ONBUILD is a special case, which is covered in the onbuild() func in -// dispatchers.go. -// -// The evaluator uses the concept of "steps", which are usually each processable -// line in the Dockerfile. Each step is numbered and certain actions are taken -// before and after each step, such as creating an image ID and removing temporary -// containers and images. Note that ONBUILD creates a kinda-sorta "sub run" which -// includes its own set of steps (usually only one of them). -package dockerfile - -import ( - "fmt" - "strings" - - "github.com/docker/docker/builder/dockerfile/command" - "github.com/docker/docker/builder/dockerfile/parser" -) - -// Environment variable interpolation will happen on these statements only. -var replaceEnvAllowed = map[string]bool{ - command.Env: true, - command.Label: true, - command.Add: true, - command.Copy: true, - command.Workdir: true, - command.Expose: true, - command.Volume: true, - command.User: true, - command.StopSignal: true, - command.Arg: true, -} - -// Certain commands are allowed to have their args split into more -// words after env var replacements. Meaning: -// ENV foo="123 456" -// EXPOSE $foo -// should result in the same thing as: -// EXPOSE 123 456 -// and not treat "123 456" as a single word. -// Note that: EXPOSE "$foo" and EXPOSE $foo are not the same thing. -// Quotes will cause it to still be treated as single word. -var allowWordExpansion = map[string]bool{ - command.Expose: true, -} - -var evaluateTable map[string]func(*Builder, []string, map[string]bool, string) error - -func init() { - evaluateTable = map[string]func(*Builder, []string, map[string]bool, string) error{ - command.Add: add, - command.Arg: arg, - command.Cmd: cmd, - command.Copy: dispatchCopy, // copy() is a go builtin - command.Entrypoint: entrypoint, - command.Env: env, - command.Expose: expose, - command.From: from, - command.Healthcheck: healthcheck, - command.Label: label, - command.Maintainer: maintainer, - command.Onbuild: onbuild, - command.Run: run, - command.Shell: shell, - command.StopSignal: stopSignal, - command.User: user, - command.Volume: volume, - command.Workdir: workdir, - } -} - -// This method is the entrypoint to all statement handling routines. -// -// Almost all nodes will have this structure: -// Child[Node, Node, Node] where Child is from parser.Node.Children and each -// node comes from parser.Node.Next. This forms a "line" with a statement and -// arguments and we process them in this normalized form by hitting -// evaluateTable with the leaf nodes of the command and the Builder object. -// -// ONBUILD is a special case; in this case the parser will emit: -// Child[Node, Child[Node, Node...]] where the first node is the literal -// "onbuild" and the child entrypoint is the command of the ONBUILD statement, -// such as `RUN` in ONBUILD RUN foo. There is special case logic in here to -// deal with that, at least until it becomes more of a general concern with new -// features. -func (b *Builder) dispatch(stepN int, ast *parser.Node) error { - cmd := ast.Value - upperCasedCmd := strings.ToUpper(cmd) - - // To ensure the user is given a decent error message if the platform - // on which the daemon is running does not support a builder command. - if err := platformSupports(strings.ToLower(cmd)); err != nil { - return err - } - - attrs := ast.Attributes - original := ast.Original - flags := ast.Flags - strList := []string{} - msg := fmt.Sprintf("Step %d : %s", stepN+1, upperCasedCmd) - - if len(ast.Flags) > 0 { - msg += " " + strings.Join(ast.Flags, " ") - } - - if cmd == "onbuild" { - if ast.Next == nil { - return fmt.Errorf("ONBUILD requires at least one argument") - } - ast = ast.Next.Children[0] - strList = append(strList, ast.Value) - msg += " " + ast.Value - - if len(ast.Flags) > 0 { - msg += " " + strings.Join(ast.Flags, " ") - } - - } - - // count the number of nodes that we are going to traverse first - // so we can pre-create the argument and message array. This speeds up the - // allocation of those list a lot when they have a lot of arguments - cursor := ast - var n int - for cursor.Next != nil { - cursor = cursor.Next - n++ - } - msgList := make([]string, n) - - var i int - // Append the build-time args to config-environment. - // This allows builder config to override the variables, making the behavior similar to - // a shell script i.e. `ENV foo bar` overrides value of `foo` passed in build - // context. But `ENV foo $foo` will use the value from build context if one - // isn't already been defined by a previous ENV primitive. - // Note, we get this behavior because we know that ProcessWord() will - // stop on the first occurrence of a variable name and not notice - // a subsequent one. So, putting the buildArgs list after the Config.Env - // list, in 'envs', is safe. - envs := b.runConfig.Env - for key, val := range b.options.BuildArgs { - if !b.isBuildArgAllowed(key) { - // skip build-args that are not in allowed list, meaning they have - // not been defined by an "ARG" Dockerfile command yet. - // This is an error condition but only if there is no "ARG" in the entire - // Dockerfile, so we'll generate any necessary errors after we parsed - // the entire file (see 'leftoverArgs' processing in evaluator.go ) - continue - } - envs = append(envs, fmt.Sprintf("%s=%s", key, val)) - } - for ast.Next != nil { - ast = ast.Next - var str string - str = ast.Value - if replaceEnvAllowed[cmd] { - var err error - var words []string - - if allowWordExpansion[cmd] { - words, err = ProcessWords(str, envs) - if err != nil { - return err - } - strList = append(strList, words...) - } else { - str, err = ProcessWord(str, envs) - if err != nil { - return err - } - strList = append(strList, str) - } - } else { - strList = append(strList, str) - } - msgList[i] = ast.Value - i++ - } - - msg += " " + strings.Join(msgList, " ") - fmt.Fprintln(b.Stdout, msg) - - // XXX yes, we skip any cmds that are not valid; the parser should have - // picked these out already. - if f, ok := evaluateTable[cmd]; ok { - b.flags = NewBFlags() - b.flags.Args = flags - return f(b, strList, attrs, original) - } - - return fmt.Errorf("Unknown instruction: %s", upperCasedCmd) -} diff --git a/builder/dockerfile/evaluator_test.go b/builder/dockerfile/evaluator_test.go deleted file mode 100644 index 6f5ce767bc..0000000000 --- a/builder/dockerfile/evaluator_test.go +++ /dev/null @@ -1,231 +0,0 @@ -package dockerfile - -import ( - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerfile/parser" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" -) - -type dispatchTestCase struct { - name, dockerfile, expectedError string - files map[string]string -} - -func init() { - reexec.Init() -} - -func initDispatchTestCases() []dispatchTestCase { - dispatchTestCases := []dispatchTestCase{{ - name: "copyEmptyWhitespace", - dockerfile: `COPY - quux \ - bar`, - expectedError: "COPY requires at least one argument", - }, - { - name: "ONBUILD forbidden FROM", - dockerfile: "ONBUILD FROM scratch", - expectedError: "FROM isn't allowed as an ONBUILD trigger", - files: nil, - }, - { - name: "ONBUILD forbidden MAINTAINER", - dockerfile: "ONBUILD MAINTAINER docker.io", - expectedError: "MAINTAINER isn't allowed as an ONBUILD trigger", - files: nil, - }, - { - name: "ARG two arguments", - dockerfile: "ARG foo bar", - expectedError: "ARG requires exactly one argument definition", - files: nil, - }, - { - name: "MAINTAINER unknown flag", - dockerfile: "MAINTAINER --boo joe@example.com", - expectedError: "Unknown flag: boo", - files: nil, - }, - { - name: "ADD multiple files to file", - dockerfile: "ADD file1.txt file2.txt test", - expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, - }, - { - name: "JSON ADD multiple files to file", - dockerfile: `ADD ["file1.txt", "file2.txt", "test"]`, - expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, - }, - { - name: "Wildcard ADD multiple files to file", - dockerfile: "ADD file*.txt test", - expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, - }, - { - name: "Wildcard JSON ADD multiple files to file", - dockerfile: `ADD ["file*.txt", "test"]`, - expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, - }, - { - name: "COPY multiple files to file", - dockerfile: "COPY file1.txt file2.txt test", - expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, - }, - { - name: "JSON COPY multiple files to file", - dockerfile: `COPY ["file1.txt", "file2.txt", "test"]`, - expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"file1.txt": "test1", "file2.txt": "test2"}, - }, - { - name: "ADD multiple files to file with whitespace", - dockerfile: `ADD [ "test file1.txt", "test file2.txt", "test" ]`, - expectedError: "When using ADD with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, - }, - { - name: "COPY multiple files to file with whitespace", - dockerfile: `COPY [ "test file1.txt", "test file2.txt", "test" ]`, - expectedError: "When using COPY with more than one source file, the destination must be a directory and end with a /", - files: map[string]string{"test file1.txt": "test1", "test file2.txt": "test2"}, - }, - { - name: "COPY wildcard no files", - dockerfile: `COPY file*.txt /tmp/`, - expectedError: "No source files were specified", - files: nil, - }, - { - name: "COPY url", - dockerfile: `COPY https://index.docker.io/robots.txt /`, - expectedError: "Source can't be a URL for COPY", - files: nil, - }, - { - name: "Chaining ONBUILD", - dockerfile: `ONBUILD ONBUILD RUN touch foobar`, - expectedError: "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed", - files: nil, - }, - { - name: "Invalid instruction", - dockerfile: `foo bar`, - expectedError: "Unknown instruction: FOO", - files: nil, - }} - - return dispatchTestCases -} - -func TestDispatch(t *testing.T) { - testCases := initDispatchTestCases() - - for _, testCase := range testCases { - executeTestCase(t, testCase) - } -} - -func executeTestCase(t *testing.T, testCase dispatchTestCase) { - contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") - defer cleanup() - - for filename, content := range testCase.files { - createTestTempFile(t, contextDir, filename, content, 0777) - } - - tarStream, err := archive.Tar(contextDir, archive.Uncompressed) - - if err != nil { - t.Fatalf("Error when creating tar stream: %s", err) - } - - defer func() { - if err = tarStream.Close(); err != nil { - t.Fatalf("Error when closing tar stream: %s", err) - } - }() - - context, err := builder.MakeTarSumContext(tarStream) - - if err != nil { - t.Fatalf("Error when creating tar context: %s", err) - } - - defer func() { - if err = context.Close(); err != nil { - t.Fatalf("Error when closing tar context: %s", err) - } - }() - - r := strings.NewReader(testCase.dockerfile) - d := parser.Directive{} - parser.SetEscapeToken(parser.DefaultEscapeToken, &d) - n, err := parser.Parse(r, &d) - - if err != nil { - t.Fatalf("Error when parsing Dockerfile: %s", err) - } - - config := &container.Config{} - options := &types.ImageBuildOptions{} - - b := &Builder{runConfig: config, options: options, Stdout: ioutil.Discard, context: context} - - err = b.dispatch(0, n.Children[0]) - - if err == nil { - t.Fatalf("No error when executing test %s", testCase.name) - } - - if !strings.Contains(err.Error(), testCase.expectedError) { - t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", testCase.expectedError, err.Error()) - } - -} - -// createTestTempDir creates a temporary directory for testing. -// It returns the created path and a cleanup function which is meant to be used as deferred call. -// When an error occurs, it terminates the test. -func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { - path, err := ioutil.TempDir(dir, prefix) - - if err != nil { - t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) - } - - return path, func() { - err = os.RemoveAll(path) - - if err != nil { - t.Fatalf("Error when removing directory %s: %s", path, err) - } - } -} - -// createTestTempFile creates a temporary file within dir with specific contents and permissions. -// When an error occurs, it terminates the test -func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { - filePath := filepath.Join(dir, filename) - err := ioutil.WriteFile(filePath, []byte(contents), perm) - - if err != nil { - t.Fatalf("Error when creating %s file: %s", filename, err) - } - - return filePath -} diff --git a/builder/dockerfile/evaluator_unix.go b/builder/dockerfile/evaluator_unix.go deleted file mode 100644 index 28fd5b156b..0000000000 --- a/builder/dockerfile/evaluator_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !windows - -package dockerfile - -// platformSupports is a short-term function to give users a quality error -// message if a Dockerfile uses a command not supported on the platform. -func platformSupports(command string) error { - return nil -} diff --git a/builder/dockerfile/evaluator_windows.go b/builder/dockerfile/evaluator_windows.go deleted file mode 100644 index 43a0b70be7..0000000000 --- a/builder/dockerfile/evaluator_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -package dockerfile - -import "fmt" - -// platformSupports is gives users a quality error message if a Dockerfile uses -// a command not supported on the platform. -func platformSupports(command string) error { - switch command { - case "user", "stopsignal": - return fmt.Errorf("The daemon on this platform does not support the command '%s'", command) - } - return nil -} diff --git a/builder/dockerfile/internals.go b/builder/dockerfile/internals.go deleted file mode 100644 index 2dc3fe6bd1..0000000000 --- a/builder/dockerfile/internals.go +++ /dev/null @@ -1,669 +0,0 @@ -package dockerfile - -// internals for handling commands. Covers many areas and a lot of -// non-contiguous functionality. Please read the comments. - -import ( - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "os" - "path/filepath" - "sort" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/builder" - "github.com/docker/docker/builder/dockerfile/parser" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/pkg/tarsum" - "github.com/docker/docker/pkg/urlutil" - "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/strslice" -) - -func (b *Builder) commit(id string, autoCmd strslice.StrSlice, comment string) error { - if b.disableCommit { - return nil - } - if b.image == "" && !b.noBaseImage { - return fmt.Errorf("Please provide a source image with `from` prior to commit") - } - b.runConfig.Image = b.image - - if id == "" { - cmd := b.runConfig.Cmd - b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), "#(nop) ", comment)) - defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) - - hit, err := b.probeCache() - if err != nil { - return err - } else if hit { - return nil - } - id, err = b.create() - if err != nil { - return err - } - } - - // Note: Actually copy the struct - autoConfig := *b.runConfig - autoConfig.Cmd = autoCmd - - commitCfg := &backend.ContainerCommitConfig{ - ContainerCommitConfig: types.ContainerCommitConfig{ - Author: b.maintainer, - Pause: true, - Config: &autoConfig, - }, - } - - // Commit the container - imageID, err := b.docker.Commit(id, commitCfg) - if err != nil { - return err - } - - b.image = imageID - return nil -} - -type copyInfo struct { - builder.FileInfo - decompress bool -} - -func (b *Builder) runContextCommand(args []string, allowRemote bool, allowLocalDecompression bool, cmdName string) error { - if b.context == nil { - return fmt.Errorf("No context given. Impossible to use %s", cmdName) - } - - if len(args) < 2 { - return fmt.Errorf("Invalid %s format - at least two arguments required", cmdName) - } - - // Work in daemon-specific filepath semantics - dest := filepath.FromSlash(args[len(args)-1]) // last one is always the dest - - b.runConfig.Image = b.image - - var infos []copyInfo - - // Loop through each src file and calculate the info we need to - // do the copy (e.g. hash value if cached). Don't actually do - // the copy until we've looked at all src files - var err error - for _, orig := range args[0 : len(args)-1] { - var fi builder.FileInfo - decompress := allowLocalDecompression - if urlutil.IsURL(orig) { - if !allowRemote { - return fmt.Errorf("Source can't be a URL for %s", cmdName) - } - fi, err = b.download(orig) - if err != nil { - return err - } - defer os.RemoveAll(filepath.Dir(fi.Path())) - decompress = false - infos = append(infos, copyInfo{fi, decompress}) - continue - } - // not a URL - subInfos, err := b.calcCopyInfo(cmdName, orig, allowLocalDecompression, true) - if err != nil { - return err - } - - infos = append(infos, subInfos...) - } - - if len(infos) == 0 { - return fmt.Errorf("No source files were specified") - } - if len(infos) > 1 && !strings.HasSuffix(dest, string(os.PathSeparator)) { - return fmt.Errorf("When using %s with more than one source file, the destination must be a directory and end with a /", cmdName) - } - - // For backwards compat, if there's just one info then use it as the - // cache look-up string, otherwise hash 'em all into one - var srcHash string - var origPaths string - - if len(infos) == 1 { - fi := infos[0].FileInfo - origPaths = fi.Name() - if hfi, ok := fi.(builder.Hashed); ok { - srcHash = hfi.Hash() - } - } else { - var hashs []string - var origs []string - for _, info := range infos { - fi := info.FileInfo - origs = append(origs, fi.Name()) - if hfi, ok := fi.(builder.Hashed); ok { - hashs = append(hashs, hfi.Hash()) - } - } - hasher := sha256.New() - hasher.Write([]byte(strings.Join(hashs, ","))) - srcHash = "multi:" + hex.EncodeToString(hasher.Sum(nil)) - origPaths = strings.Join(origs, " ") - } - - cmd := b.runConfig.Cmd - b.runConfig.Cmd = strslice.StrSlice(append(getShell(b.runConfig), fmt.Sprintf("#(nop) %s %s in %s ", cmdName, srcHash, dest))) - defer func(cmd strslice.StrSlice) { b.runConfig.Cmd = cmd }(cmd) - - if hit, err := b.probeCache(); err != nil { - return err - } else if hit { - return nil - } - - container, err := b.docker.ContainerCreate(types.ContainerCreateConfig{Config: b.runConfig}, true) - if err != nil { - return err - } - b.tmpContainers[container.ID] = struct{}{} - - comment := fmt.Sprintf("%s %s in %s", cmdName, origPaths, dest) - - // Twiddle the destination when its a relative path - meaning, make it - // relative to the WORKINGDIR - if dest, err = normaliseDest(cmdName, b.runConfig.WorkingDir, dest); err != nil { - return err - } - - for _, info := range infos { - if err := b.docker.CopyOnBuild(container.ID, dest, info.FileInfo, info.decompress); err != nil { - return err - } - } - - return b.commit(container.ID, cmd, comment) -} - -func (b *Builder) download(srcURL string) (fi builder.FileInfo, err error) { - // get filename from URL - u, err := url.Parse(srcURL) - if err != nil { - return - } - path := filepath.FromSlash(u.Path) // Ensure in platform semantics - if strings.HasSuffix(path, string(os.PathSeparator)) { - path = path[:len(path)-1] - } - parts := strings.Split(path, string(os.PathSeparator)) - filename := parts[len(parts)-1] - if filename == "" { - err = fmt.Errorf("cannot determine filename from url: %s", u) - return - } - - // Initiate the download - resp, err := httputils.Download(srcURL) - if err != nil { - return - } - - // Prepare file in a tmp dir - tmpDir, err := ioutils.TempDir("", "docker-remote") - if err != nil { - return - } - defer func() { - if err != nil { - os.RemoveAll(tmpDir) - } - }() - tmpFileName := filepath.Join(tmpDir, filename) - tmpFile, err := os.OpenFile(tmpFileName, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600) - if err != nil { - return - } - - stdoutFormatter := b.Stdout.(*streamformatter.StdoutFormatter) - progressOutput := stdoutFormatter.StreamFormatter.NewProgressOutput(stdoutFormatter.Writer, true) - progressReader := progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Downloading") - // Download and dump result to tmp file - if _, err = io.Copy(tmpFile, progressReader); err != nil { - tmpFile.Close() - return - } - fmt.Fprintln(b.Stdout) - // ignoring error because the file was already opened successfully - tmpFileSt, err := tmpFile.Stat() - if err != nil { - return - } - tmpFile.Close() - - // Set the mtime to the Last-Modified header value if present - // Otherwise just remove atime and mtime - mTime := time.Time{} - - lastMod := resp.Header.Get("Last-Modified") - if lastMod != "" { - // If we can't parse it then just let it default to 'zero' - // otherwise use the parsed time value - if parsedMTime, err := http.ParseTime(lastMod); err == nil { - mTime = parsedMTime - } - } - - if err = system.Chtimes(tmpFileName, mTime, mTime); err != nil { - return - } - - // Calc the checksum, even if we're using the cache - r, err := archive.Tar(tmpFileName, archive.Uncompressed) - if err != nil { - return - } - tarSum, err := tarsum.NewTarSum(r, true, tarsum.Version1) - if err != nil { - return - } - if _, err = io.Copy(ioutil.Discard, tarSum); err != nil { - return - } - hash := tarSum.Sum(nil) - r.Close() - return &builder.HashedFileInfo{FileInfo: builder.PathFileInfo{FileInfo: tmpFileSt, FilePath: tmpFileName}, FileHash: hash}, nil -} - -func (b *Builder) calcCopyInfo(cmdName, origPath string, allowLocalDecompression, allowWildcards bool) ([]copyInfo, error) { - - // Work in daemon-specific OS filepath semantics - origPath = filepath.FromSlash(origPath) - - if origPath != "" && origPath[0] == os.PathSeparator && len(origPath) > 1 { - origPath = origPath[1:] - } - origPath = strings.TrimPrefix(origPath, "."+string(os.PathSeparator)) - - // Deal with wildcards - if allowWildcards && containsWildcards(origPath) { - var copyInfos []copyInfo - if err := b.context.Walk("", func(path string, info builder.FileInfo, err error) error { - if err != nil { - return err - } - if info.Name() == "" { - // Why are we doing this check? - return nil - } - if match, _ := filepath.Match(origPath, path); !match { - return nil - } - - // Note we set allowWildcards to false in case the name has - // a * in it - subInfos, err := b.calcCopyInfo(cmdName, path, allowLocalDecompression, false) - if err != nil { - return err - } - copyInfos = append(copyInfos, subInfos...) - return nil - }); err != nil { - return nil, err - } - return copyInfos, nil - } - - // Must be a dir or a file - - statPath, fi, err := b.context.Stat(origPath) - if err != nil { - return nil, err - } - - copyInfos := []copyInfo{{FileInfo: fi, decompress: allowLocalDecompression}} - - hfi, handleHash := fi.(builder.Hashed) - if !handleHash { - return copyInfos, nil - } - - // Deal with the single file case - if !fi.IsDir() { - hfi.SetHash("file:" + hfi.Hash()) - return copyInfos, nil - } - // Must be a dir - var subfiles []string - err = b.context.Walk(statPath, func(path string, info builder.FileInfo, err error) error { - if err != nil { - return err - } - // we already checked handleHash above - subfiles = append(subfiles, info.(builder.Hashed).Hash()) - return nil - }) - if err != nil { - return nil, err - } - - sort.Strings(subfiles) - hasher := sha256.New() - hasher.Write([]byte(strings.Join(subfiles, ","))) - hfi.SetHash("dir:" + hex.EncodeToString(hasher.Sum(nil))) - - return copyInfos, nil -} - -func containsWildcards(name string) bool { - for i := 0; i < len(name); i++ { - ch := name[i] - if ch == '\\' { - i++ - } else if ch == '*' || ch == '?' || ch == '[' { - return true - } - } - return false -} - -func (b *Builder) processImageFrom(img builder.Image) error { - if img != nil { - b.image = img.ImageID() - - if img.RunConfig() != nil { - b.runConfig = img.RunConfig() - } - } - - // Check to see if we have a default PATH, note that windows won't - // have one as its set by HCS - if system.DefaultPathEnv != "" { - // Convert the slice of strings that represent the current list - // of env vars into a map so we can see if PATH is already set. - // If its not set then go ahead and give it our default value - configEnv := opts.ConvertKVStringsToMap(b.runConfig.Env) - if _, ok := configEnv["PATH"]; !ok { - b.runConfig.Env = append(b.runConfig.Env, - "PATH="+system.DefaultPathEnv) - } - } - - if img == nil { - // Typically this means they used "FROM scratch" - return nil - } - - // Process ONBUILD triggers if they exist - if nTriggers := len(b.runConfig.OnBuild); nTriggers != 0 { - word := "trigger" - if nTriggers > 1 { - word = "triggers" - } - fmt.Fprintf(b.Stderr, "# Executing %d build %s...\n", nTriggers, word) - } - - // Copy the ONBUILD triggers, and remove them from the config, since the config will be comitted. - onBuildTriggers := b.runConfig.OnBuild - b.runConfig.OnBuild = []string{} - - // parse the ONBUILD triggers by invoking the parser - for _, step := range onBuildTriggers { - ast, err := parser.Parse(strings.NewReader(step), &b.directive) - if err != nil { - return err - } - - for i, n := range ast.Children { - switch strings.ToUpper(n.Value) { - case "ONBUILD": - return fmt.Errorf("Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") - case "MAINTAINER", "FROM": - return fmt.Errorf("%s isn't allowed as an ONBUILD trigger", n.Value) - } - - if err := b.dispatch(i, n); err != nil { - return err - } - } - } - - return nil -} - -// probeCache checks if `b.docker` implements builder.ImageCache and image-caching -// is enabled (`b.UseCache`). -// If so attempts to look up the current `b.image` and `b.runConfig` pair with `b.docker`. -// If an image is found, probeCache returns `(true, nil)`. -// If no image is found, it returns `(false, nil)`. -// If there is any error, it returns `(false, err)`. -func (b *Builder) probeCache() (bool, error) { - c, ok := b.docker.(builder.ImageCache) - if !ok || b.options.NoCache || b.cacheBusted { - return false, nil - } - cache, err := c.GetCachedImageOnBuild(b.image, b.runConfig) - if err != nil { - return false, err - } - if len(cache) == 0 { - logrus.Debugf("[BUILDER] Cache miss: %s", b.runConfig.Cmd) - b.cacheBusted = true - return false, nil - } - - fmt.Fprintf(b.Stdout, " ---> Using cache\n") - logrus.Debugf("[BUILDER] Use cached version: %s", b.runConfig.Cmd) - b.image = string(cache) - - return true, nil -} - -func (b *Builder) create() (string, error) { - if b.image == "" && !b.noBaseImage { - return "", fmt.Errorf("Please provide a source image with `from` prior to run") - } - b.runConfig.Image = b.image - - resources := container.Resources{ - CgroupParent: b.options.CgroupParent, - CPUShares: b.options.CPUShares, - CPUPeriod: b.options.CPUPeriod, - CPUQuota: b.options.CPUQuota, - CpusetCpus: b.options.CPUSetCPUs, - CpusetMems: b.options.CPUSetMems, - Memory: b.options.Memory, - MemorySwap: b.options.MemorySwap, - Ulimits: b.options.Ulimits, - } - - // TODO: why not embed a hostconfig in builder? - hostConfig := &container.HostConfig{ - Isolation: b.options.Isolation, - ShmSize: b.options.ShmSize, - Resources: resources, - } - - config := *b.runConfig - - // Create the container - c, err := b.docker.ContainerCreate(types.ContainerCreateConfig{ - Config: b.runConfig, - HostConfig: hostConfig, - }, true) - if err != nil { - return "", err - } - for _, warning := range c.Warnings { - fmt.Fprintf(b.Stdout, " ---> [Warning] %s\n", warning) - } - - b.tmpContainers[c.ID] = struct{}{} - fmt.Fprintf(b.Stdout, " ---> Running in %s\n", stringid.TruncateID(c.ID)) - - // override the entry point that may have been picked up from the base image - if err := b.docker.ContainerUpdateCmdOnBuild(c.ID, config.Cmd); err != nil { - return "", err - } - - return c.ID, nil -} - -var errCancelled = errors.New("build cancelled") - -func (b *Builder) run(cID string) (err error) { - errCh := make(chan error) - go func() { - errCh <- b.docker.ContainerAttachRaw(cID, nil, b.Stdout, b.Stderr, true) - }() - - finished := make(chan struct{}) - var once sync.Once - finish := func() { close(finished) } - cancelErrCh := make(chan error, 1) - defer once.Do(finish) - go func() { - select { - case <-b.clientCtx.Done(): - logrus.Debugln("Build cancelled, killing and removing container:", cID) - b.docker.ContainerKill(cID, 0) - b.removeContainer(cID) - cancelErrCh <- errCancelled - case <-finished: - cancelErrCh <- nil - } - }() - - if err := b.docker.ContainerStart(cID, nil, true); err != nil { - return err - } - - // Block on reading output from container, stop on err or chan closed - if err := <-errCh; err != nil { - return err - } - - if ret, _ := b.docker.ContainerWait(cID, -1); ret != 0 { - // TODO: change error type, because jsonmessage.JSONError assumes HTTP - return &jsonmessage.JSONError{ - Message: fmt.Sprintf("The command '%s' returned a non-zero code: %d", strings.Join(b.runConfig.Cmd, " "), ret), - Code: ret, - } - } - once.Do(finish) - return <-cancelErrCh -} - -func (b *Builder) removeContainer(c string) error { - rmConfig := &types.ContainerRmConfig{ - ForceRemove: true, - RemoveVolume: true, - } - if err := b.docker.ContainerRm(c, rmConfig); err != nil { - fmt.Fprintf(b.Stdout, "Error removing intermediate container %s: %v\n", stringid.TruncateID(c), err) - return err - } - return nil -} - -func (b *Builder) clearTmp() { - for c := range b.tmpContainers { - if err := b.removeContainer(c); err != nil { - return - } - delete(b.tmpContainers, c) - fmt.Fprintf(b.Stdout, "Removing intermediate container %s\n", stringid.TruncateID(c)) - } -} - -// readDockerfile reads a Dockerfile from the current context. -func (b *Builder) readDockerfile() error { - // If no -f was specified then look for 'Dockerfile'. If we can't find - // that then look for 'dockerfile'. If neither are found then default - // back to 'Dockerfile' and use that in the error message. - if b.options.Dockerfile == "" { - b.options.Dockerfile = builder.DefaultDockerfileName - if _, _, err := b.context.Stat(b.options.Dockerfile); os.IsNotExist(err) { - lowercase := strings.ToLower(b.options.Dockerfile) - if _, _, err := b.context.Stat(lowercase); err == nil { - b.options.Dockerfile = lowercase - } - } - } - - err := b.parseDockerfile() - - if err != nil { - return err - } - - // After the Dockerfile has been parsed, we need to check the .dockerignore - // file for either "Dockerfile" or ".dockerignore", and if either are - // present then erase them from the build context. These files should never - // have been sent from the client but we did send them to make sure that - // we had the Dockerfile to actually parse, and then we also need the - // .dockerignore file to know whether either file should be removed. - // Note that this assumes the Dockerfile has been read into memory and - // is now safe to be removed. - if dockerIgnore, ok := b.context.(builder.DockerIgnoreContext); ok { - dockerIgnore.Process([]string{b.options.Dockerfile}) - } - return nil -} - -func (b *Builder) parseDockerfile() error { - f, err := b.context.Open(b.options.Dockerfile) - if err != nil { - if os.IsNotExist(err) { - return fmt.Errorf("Cannot locate specified Dockerfile: %s", b.options.Dockerfile) - } - return err - } - defer f.Close() - if f, ok := f.(*os.File); ok { - // ignoring error because Open already succeeded - fi, err := f.Stat() - if err != nil { - return fmt.Errorf("Unexpected error reading Dockerfile: %v", err) - } - if fi.Size() == 0 { - return fmt.Errorf("The Dockerfile (%s) cannot be empty", b.options.Dockerfile) - } - } - b.dockerfile, err = parser.Parse(f, &b.directive) - if err != nil { - return err - } - - return nil -} - -// determine if build arg is part of built-in args or user -// defined args in Dockerfile at any point in time. -func (b *Builder) isBuildArgAllowed(arg string) bool { - if _, ok := BuiltinAllowedBuildArgs[arg]; ok { - return true - } - if _, ok := b.allowedBuildArgs[arg]; ok { - return true - } - return false -} diff --git a/builder/dockerfile/internals_test.go b/builder/dockerfile/internals_test.go deleted file mode 100644 index 5583cee9f2..0000000000 --- a/builder/dockerfile/internals_test.go +++ /dev/null @@ -1,102 +0,0 @@ -package dockerfile - -import ( - "strings" - "testing" - - "github.com/docker/docker/builder" - "github.com/docker/docker/pkg/archive" - "github.com/docker/engine-api/types" -) - -func TestEmptyDockerfile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") - defer cleanup() - - createTestTempFile(t, contextDir, builder.DefaultDockerfileName, "", 0777) - - tarStream, err := archive.Tar(contextDir, archive.Uncompressed) - - if err != nil { - t.Fatalf("Error when creating tar stream: %s", err) - } - - defer func() { - if err = tarStream.Close(); err != nil { - t.Fatalf("Error when closing tar stream: %s", err) - } - }() - - context, err := builder.MakeTarSumContext(tarStream) - - if err != nil { - t.Fatalf("Error when creating tar context: %s", err) - } - - defer func() { - if err = context.Close(); err != nil { - t.Fatalf("Error when closing tar context: %s", err) - } - }() - - options := &types.ImageBuildOptions{} - - b := &Builder{options: options, context: context} - - err = b.readDockerfile() - - if err == nil { - t.Fatalf("No error when executing test for empty Dockerfile") - } - - if !strings.Contains(err.Error(), "The Dockerfile (Dockerfile) cannot be empty") { - t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", "The Dockerfile (Dockerfile) cannot be empty", err.Error()) - } -} - -func TestDockerfileOutsideTheBuildContext(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-dockerfile-test") - defer cleanup() - - tarStream, err := archive.Tar(contextDir, archive.Uncompressed) - - if err != nil { - t.Fatalf("Error when creating tar stream: %s", err) - } - - defer func() { - if err = tarStream.Close(); err != nil { - t.Fatalf("Error when closing tar stream: %s", err) - } - }() - - context, err := builder.MakeTarSumContext(tarStream) - - if err != nil { - t.Fatalf("Error when creating tar context: %s", err) - } - - defer func() { - if err = context.Close(); err != nil { - t.Fatalf("Error when closing tar context: %s", err) - } - }() - - options := &types.ImageBuildOptions{ - Dockerfile: "../../Dockerfile", - } - - b := &Builder{options: options, context: context} - - err = b.readDockerfile() - - if err == nil { - t.Fatalf("No error when executing test for Dockerfile outside the build context") - } - - expectedError := "Forbidden path outside the build context" - - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("Wrong error message. Should be \"%s\". Got \"%s\"", expectedError, err.Error()) - } -} diff --git a/builder/dockerfile/internals_unix.go b/builder/dockerfile/internals_unix.go deleted file mode 100644 index 6cd990d892..0000000000 --- a/builder/dockerfile/internals_unix.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !windows - -package dockerfile - -import ( - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" -) - -// normaliseDest normalises the destination of a COPY/ADD command in a -// platform semantically consistent way. -func normaliseDest(cmdName, workingDir, requested string) (string, error) { - dest := filepath.FromSlash(requested) - endsInSlash := strings.HasSuffix(requested, string(os.PathSeparator)) - if !system.IsAbs(requested) { - dest = filepath.Join(string(os.PathSeparator), filepath.FromSlash(workingDir), dest) - // Make sure we preserve any trailing slash - if endsInSlash { - dest += string(os.PathSeparator) - } - } - return dest, nil -} diff --git a/builder/dockerfile/internals_windows.go b/builder/dockerfile/internals_windows.go deleted file mode 100644 index f70360300d..0000000000 --- a/builder/dockerfile/internals_windows.go +++ /dev/null @@ -1,56 +0,0 @@ -package dockerfile - -import ( - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" -) - -// normaliseDest normalises the destination of a COPY/ADD command in a -// platform semantically consistent way. -func normaliseDest(cmdName, workingDir, requested string) (string, error) { - dest := filepath.FromSlash(requested) - endsInSlash := strings.HasSuffix(dest, string(os.PathSeparator)) - - // We are guaranteed that the working directory is already consistent, - // However, Windows also has, for now, the limitation that ADD/COPY can - // only be done to the system drive, not any drives that might be present - // as a result of a bind mount. - // - // So... if the path requested is Linux-style absolute (/foo or \\foo), - // we assume it is the system drive. If it is a Windows-style absolute - // (DRIVE:\\foo), error if DRIVE is not C. And finally, ensure we - // strip any configured working directories drive letter so that it - // can be subsequently legitimately converted to a Windows volume-style - // pathname. - - // Not a typo - filepath.IsAbs, not system.IsAbs on this next check as - // we only want to validate where the DriveColon part has been supplied. - if filepath.IsAbs(dest) { - if strings.ToUpper(string(dest[0])) != "C" { - return "", fmt.Errorf("Windows does not support %s with a destinations not on the system drive (C:)", cmdName) - } - dest = dest[2:] // Strip the drive letter - } - - // Cannot handle relative where WorkingDir is not the system drive. - if len(workingDir) > 0 { - if ((len(workingDir) > 1) && !system.IsAbs(workingDir[2:])) || (len(workingDir) == 1) { - return "", fmt.Errorf("Current WorkingDir %s is not platform consistent", workingDir) - } - if !system.IsAbs(dest) { - if string(workingDir[0]) != "C" { - return "", fmt.Errorf("Windows does not support %s with relative paths when WORKDIR is not the system drive", cmdName) - } - dest = filepath.Join(string(os.PathSeparator), workingDir[2:], dest) - // Make sure we preserve any trailing slash - if endsInSlash { - dest += string(os.PathSeparator) - } - } - } - return dest, nil -} diff --git a/builder/dockerfile/internals_windows_test.go b/builder/dockerfile/internals_windows_test.go deleted file mode 100644 index 868a6671a3..0000000000 --- a/builder/dockerfile/internals_windows_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build windows - -package dockerfile - -import "testing" - -func TestNormaliseDest(t *testing.T) { - tests := []struct{ current, requested, expected, etext string }{ - {``, `D:\`, ``, `Windows does not support TEST with a destinations not on the system drive (C:)`}, - {``, `e:/`, ``, `Windows does not support TEST with a destinations not on the system drive (C:)`}, - {`invalid`, `./c1`, ``, `Current WorkingDir invalid is not platform consistent`}, - {`C:`, ``, ``, `Current WorkingDir C: is not platform consistent`}, - {`C`, ``, ``, `Current WorkingDir C is not platform consistent`}, - {`D:\`, `.`, ``, "Windows does not support TEST with relative paths when WORKDIR is not the system drive"}, - {``, `D`, `D`, ``}, - {``, `./a1`, `.\a1`, ``}, - {``, `.\b1`, `.\b1`, ``}, - {``, `/`, `\`, ``}, - {``, `\`, `\`, ``}, - {``, `c:/`, `\`, ``}, - {``, `c:\`, `\`, ``}, - {``, `.`, `.`, ``}, - {`C:\wdd`, `./a1`, `\wdd\a1`, ``}, - {`C:\wde`, `.\b1`, `\wde\b1`, ``}, - {`C:\wdf`, `/`, `\`, ``}, - {`C:\wdg`, `\`, `\`, ``}, - {`C:\wdh`, `c:/`, `\`, ``}, - {`C:\wdi`, `c:\`, `\`, ``}, - {`C:\wdj`, `.`, `\wdj`, ``}, - {`C:\wdk`, `foo/bar`, `\wdk\foo\bar`, ``}, - {`C:\wdl`, `foo\bar`, `\wdl\foo\bar`, ``}, - {`C:\wdm`, `foo/bar/`, `\wdm\foo\bar\`, ``}, - {`C:\wdn`, `foo\bar/`, `\wdn\foo\bar\`, ``}, - } - for _, i := range tests { - got, err := normaliseDest("TEST", i.current, i.requested) - if err != nil && i.etext == "" { - t.Fatalf("TestNormaliseDest Got unexpected error %q for %s %s. ", err.Error(), i.current, i.requested) - } - if i.etext != "" && ((err == nil) || (err != nil && err.Error() != i.etext)) { - if err == nil { - t.Fatalf("TestNormaliseDest Expected an error for %s %s but didn't get one", i.current, i.requested) - } else { - t.Fatalf("TestNormaliseDest Wrong error text for %s %s - %s", i.current, i.requested, err.Error()) - } - } - if i.etext == "" && got != i.expected { - t.Fatalf("TestNormaliseDest Expected %q for %q and %q. Got %q", i.expected, i.current, i.requested, got) - } - } -} diff --git a/builder/dockerfile/parser/dumper/main.go b/builder/dockerfile/parser/dumper/main.go deleted file mode 100644 index 6561708c23..0000000000 --- a/builder/dockerfile/parser/dumper/main.go +++ /dev/null @@ -1,35 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/docker/docker/builder/dockerfile/parser" -) - -func main() { - var f *os.File - var err error - - if len(os.Args) < 2 { - fmt.Println("please supply filename(s)") - os.Exit(1) - } - - for _, fn := range os.Args[1:] { - f, err = os.Open(fn) - if err != nil { - panic(err) - } - - d := parser.Directive{LookingForDirectives: true} - parser.SetEscapeToken(parser.DefaultEscapeToken, &d) - - ast, err := parser.Parse(f, &d) - if err != nil { - panic(err) - } else { - fmt.Println(ast.Dump()) - } - } -} diff --git a/builder/dockerfile/parser/json_test.go b/builder/dockerfile/parser/json_test.go deleted file mode 100644 index 60d74d9c36..0000000000 --- a/builder/dockerfile/parser/json_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package parser - -import ( - "testing" -) - -var invalidJSONArraysOfStrings = []string{ - `["a",42,"b"]`, - `["a",123.456,"b"]`, - `["a",{},"b"]`, - `["a",{"c": "d"},"b"]`, - `["a",["c"],"b"]`, - `["a",true,"b"]`, - `["a",false,"b"]`, - `["a",null,"b"]`, -} - -var validJSONArraysOfStrings = map[string][]string{ - `[]`: {}, - `[""]`: {""}, - `["a"]`: {"a"}, - `["a","b"]`: {"a", "b"}, - `[ "a", "b" ]`: {"a", "b"}, - `[ "a", "b" ]`: {"a", "b"}, - ` [ "a", "b" ] `: {"a", "b"}, - `["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"]`: {"abc 123", "♥", "☃", "\" \\ / \b \f \n \r \t \u0000"}, -} - -func TestJSONArraysOfStrings(t *testing.T) { - for json, expected := range validJSONArraysOfStrings { - d := Directive{} - SetEscapeToken(DefaultEscapeToken, &d) - - if node, _, err := parseJSON(json, &d); err != nil { - t.Fatalf("%q should be a valid JSON array of strings, but wasn't! (err: %q)", json, err) - } else { - i := 0 - for node != nil { - if i >= len(expected) { - t.Fatalf("expected result is shorter than parsed result (%d vs %d+) in %q", len(expected), i+1, json) - } - if node.Value != expected[i] { - t.Fatalf("expected %q (not %q) in %q at pos %d", expected[i], node.Value, json, i) - } - node = node.Next - i++ - } - if i != len(expected) { - t.Fatalf("expected result is longer than parsed result (%d vs %d) in %q", len(expected), i+1, json) - } - } - } - for _, json := range invalidJSONArraysOfStrings { - d := Directive{} - SetEscapeToken(DefaultEscapeToken, &d) - - if _, _, err := parseJSON(json, &d); err != errDockerfileNotStringArray { - t.Fatalf("%q should be an invalid JSON array of strings, but wasn't!", json) - } - } -} diff --git a/builder/dockerfile/parser/line_parsers.go b/builder/dockerfile/parser/line_parsers.go deleted file mode 100644 index d82b19a081..0000000000 --- a/builder/dockerfile/parser/line_parsers.go +++ /dev/null @@ -1,361 +0,0 @@ -package parser - -// line parsers are dispatch calls that parse a single unit of text into a -// Node object which contains the whole statement. Dockerfiles have varied -// (but not usually unique, see ONBUILD for a unique example) parsing rules -// per-command, and these unify the processing in a way that makes it -// manageable. - -import ( - "encoding/json" - "errors" - "fmt" - "strings" - "unicode" - "unicode/utf8" -) - -var ( - errDockerfileNotStringArray = errors.New("When using JSON array syntax, arrays must be comprised of strings only.") -) - -// ignore the current argument. This will still leave a command parsed, but -// will not incorporate the arguments into the ast. -func parseIgnore(rest string, d *Directive) (*Node, map[string]bool, error) { - return &Node{}, nil, nil -} - -// used for onbuild. Could potentially be used for anything that represents a -// statement with sub-statements. -// -// ONBUILD RUN foo bar -> (onbuild (run foo bar)) -// -func parseSubCommand(rest string, d *Directive) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - - _, child, err := ParseLine(rest, d) - if err != nil { - return nil, nil, err - } - - return &Node{Children: []*Node{child}}, nil, nil -} - -// helper to parse words (i.e space delimited or quoted strings) in a statement. -// The quotes are preserved as part of this function and they are stripped later -// as part of processWords(). -func parseWords(rest string, d *Directive) []string { - const ( - inSpaces = iota // looking for start of a word - inWord - inQuote - ) - - words := []string{} - phase := inSpaces - word := "" - quote := '\000' - blankOK := false - var ch rune - var chWidth int - - for pos := 0; pos <= len(rest); pos += chWidth { - if pos != len(rest) { - ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) - } - - if phase == inSpaces { // Looking for start of word - if pos == len(rest) { // end of input - break - } - if unicode.IsSpace(ch) { // skip spaces - continue - } - phase = inWord // found it, fall through - } - if (phase == inWord || phase == inQuote) && (pos == len(rest)) { - if blankOK || len(word) > 0 { - words = append(words, word) - } - break - } - if phase == inWord { - if unicode.IsSpace(ch) { - phase = inSpaces - if blankOK || len(word) > 0 { - words = append(words, word) - } - word = "" - blankOK = false - continue - } - if ch == '\'' || ch == '"' { - quote = ch - blankOK = true - phase = inQuote - } - if ch == d.EscapeToken { - if pos+chWidth == len(rest) { - continue // just skip an escape token at end of line - } - // If we're not quoted and we see an escape token, then always just - // add the escape token plus the char to the word, even if the char - // is a quote. - word += string(ch) - pos += chWidth - ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) - } - word += string(ch) - continue - } - if phase == inQuote { - if ch == quote { - phase = inWord - } - // The escape token is special except for ' quotes - can't escape anything for ' - if ch == d.EscapeToken && quote != '\'' { - if pos+chWidth == len(rest) { - phase = inWord - continue // just skip the escape token at end - } - pos += chWidth - word += string(ch) - ch, chWidth = utf8.DecodeRuneInString(rest[pos:]) - } - word += string(ch) - } - } - - return words -} - -// parse environment like statements. Note that this does *not* handle -// variable interpolation, which will be handled in the evaluator. -func parseNameVal(rest string, key string, d *Directive) (*Node, map[string]bool, error) { - // This is kind of tricky because we need to support the old - // variant: KEY name value - // as well as the new one: KEY name=value ... - // The trigger to know which one is being used will be whether we hit - // a space or = first. space ==> old, "=" ==> new - - words := parseWords(rest, d) - if len(words) == 0 { - return nil, nil, nil - } - - var rootnode *Node - - // Old format (KEY name value) - if !strings.Contains(words[0], "=") { - node := &Node{} - rootnode = node - strs := tokenWhitespace.Split(rest, 2) - - if len(strs) < 2 { - return nil, nil, fmt.Errorf(key + " must have two arguments") - } - - node.Value = strs[0] - node.Next = &Node{} - node.Next.Value = strs[1] - } else { - var prevNode *Node - for i, word := range words { - if !strings.Contains(word, "=") { - return nil, nil, fmt.Errorf("Syntax error - can't find = in %q. Must be of the form: name=value", word) - } - parts := strings.SplitN(word, "=", 2) - - name := &Node{} - value := &Node{} - - name.Next = value - name.Value = parts[0] - value.Value = parts[1] - - if i == 0 { - rootnode = name - } else { - prevNode.Next = name - } - prevNode = value - } - } - - return rootnode, nil, nil -} - -func parseEnv(rest string, d *Directive) (*Node, map[string]bool, error) { - return parseNameVal(rest, "ENV", d) -} - -func parseLabel(rest string, d *Directive) (*Node, map[string]bool, error) { - return parseNameVal(rest, "LABEL", d) -} - -// parses a statement containing one or more keyword definition(s) and/or -// value assignments, like `name1 name2= name3="" name4=value`. -// Note that this is a stricter format than the old format of assignment, -// allowed by parseNameVal(), in a way that this only allows assignment of the -// form `keyword=[]` like `name2=`, `name3=""`, and `name4=value` above. -// In addition, a keyword definition alone is of the form `keyword` like `name1` -// above. And the assignments `name2=` and `name3=""` are equivalent and -// assign an empty value to the respective keywords. -func parseNameOrNameVal(rest string, d *Directive) (*Node, map[string]bool, error) { - words := parseWords(rest, d) - if len(words) == 0 { - return nil, nil, nil - } - - var ( - rootnode *Node - prevNode *Node - ) - for i, word := range words { - node := &Node{} - node.Value = word - if i == 0 { - rootnode = node - } else { - prevNode.Next = node - } - prevNode = node - } - - return rootnode, nil, nil -} - -// parses a whitespace-delimited set of arguments. The result is effectively a -// linked list of string arguments. -func parseStringsWhitespaceDelimited(rest string, d *Directive) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - - node := &Node{} - rootnode := node - prevnode := node - for _, str := range tokenWhitespace.Split(rest, -1) { // use regexp - prevnode = node - node.Value = str - node.Next = &Node{} - node = node.Next - } - - // XXX to get around regexp.Split *always* providing an empty string at the - // end due to how our loop is constructed, nil out the last node in the - // chain. - prevnode.Next = nil - - return rootnode, nil, nil -} - -// parsestring just wraps the string in quotes and returns a working node. -func parseString(rest string, d *Directive) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - n := &Node{} - n.Value = rest - return n, nil, nil -} - -// parseJSON converts JSON arrays to an AST. -func parseJSON(rest string, d *Directive) (*Node, map[string]bool, error) { - rest = strings.TrimLeftFunc(rest, unicode.IsSpace) - if !strings.HasPrefix(rest, "[") { - return nil, nil, fmt.Errorf(`Error parsing "%s" as a JSON array`, rest) - } - - var myJSON []interface{} - if err := json.NewDecoder(strings.NewReader(rest)).Decode(&myJSON); err != nil { - return nil, nil, err - } - - var top, prev *Node - for _, str := range myJSON { - s, ok := str.(string) - if !ok { - return nil, nil, errDockerfileNotStringArray - } - - node := &Node{Value: s} - if prev == nil { - top = node - } else { - prev.Next = node - } - prev = node - } - - return top, map[string]bool{"json": true}, nil -} - -// parseMaybeJSON determines if the argument appears to be a JSON array. If -// so, passes to parseJSON; if not, quotes the result and returns a single -// node. -func parseMaybeJSON(rest string, d *Directive) (*Node, map[string]bool, error) { - if rest == "" { - return nil, nil, nil - } - - node, attrs, err := parseJSON(rest, d) - - if err == nil { - return node, attrs, nil - } - if err == errDockerfileNotStringArray { - return nil, nil, err - } - - node = &Node{} - node.Value = rest - return node, nil, nil -} - -// parseMaybeJSONToList determines if the argument appears to be a JSON array. If -// so, passes to parseJSON; if not, attempts to parse it as a whitespace -// delimited string. -func parseMaybeJSONToList(rest string, d *Directive) (*Node, map[string]bool, error) { - node, attrs, err := parseJSON(rest, d) - - if err == nil { - return node, attrs, nil - } - if err == errDockerfileNotStringArray { - return nil, nil, err - } - - return parseStringsWhitespaceDelimited(rest, d) -} - -// The HEALTHCHECK command is like parseMaybeJSON, but has an extra type argument. -func parseHealthConfig(rest string, d *Directive) (*Node, map[string]bool, error) { - // Find end of first argument - var sep int - for ; sep < len(rest); sep++ { - if unicode.IsSpace(rune(rest[sep])) { - break - } - } - next := sep - for ; next < len(rest); next++ { - if !unicode.IsSpace(rune(rest[next])) { - break - } - } - - if sep == 0 { - return nil, nil, nil - } - - typ := rest[:sep] - cmd, attrs, err := parseMaybeJSON(rest[next:], d) - if err != nil { - return nil, nil, err - } - - return &Node{Value: typ, Next: cmd}, attrs, err -} diff --git a/builder/dockerfile/parser/parser.go b/builder/dockerfile/parser/parser.go deleted file mode 100644 index 6fb84d921c..0000000000 --- a/builder/dockerfile/parser/parser.go +++ /dev/null @@ -1,215 +0,0 @@ -// Package parser implements a parser and parse tree dumper for Dockerfiles. -package parser - -import ( - "bufio" - "bytes" - "fmt" - "io" - "regexp" - "strings" - "unicode" - - "github.com/docker/docker/builder/dockerfile/command" -) - -// Node is a structure used to represent a parse tree. -// -// In the node there are three fields, Value, Next, and Children. Value is the -// current token's string value. Next is always the next non-child token, and -// children contains all the children. Here's an example: -// -// (value next (child child-next child-next-next) next-next) -// -// This data structure is frankly pretty lousy for handling complex languages, -// but lucky for us the Dockerfile isn't very complicated. This structure -// works a little more effectively than a "proper" parse tree for our needs. -// -type Node struct { - Value string // actual content - Next *Node // the next item in the current sexp - Children []*Node // the children of this sexp - Attributes map[string]bool // special attributes for this node - Original string // original line used before parsing - Flags []string // only top Node should have this set - StartLine int // the line in the original dockerfile where the node begins - EndLine int // the line in the original dockerfile where the node ends -} - -// Directive is the structure used during a build run to hold the state of -// parsing directives. -type Directive struct { - EscapeToken rune // Current escape token - LineContinuationRegex *regexp.Regexp // Current line contination regex - LookingForDirectives bool // Whether we are currently looking for directives - EscapeSeen bool // Whether the escape directive has been seen -} - -var ( - dispatch map[string]func(string, *Directive) (*Node, map[string]bool, error) - tokenWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) - tokenEscapeCommand = regexp.MustCompile(`^#[ \t]*escape[ \t]*=[ \t]*(?P.).*$`) - tokenComment = regexp.MustCompile(`^#.*$`) -) - -// DefaultEscapeToken is the default escape token -const DefaultEscapeToken = "\\" - -// SetEscapeToken sets the default token for escaping characters in a Dockerfile. -func SetEscapeToken(s string, d *Directive) error { - if s != "`" && s != "\\" { - return fmt.Errorf("invalid ESCAPE '%s'. Must be ` or \\", s) - } - d.EscapeToken = rune(s[0]) - d.LineContinuationRegex = regexp.MustCompile(`\` + s + `[ \t]*$`) - return nil -} - -func init() { - // Dispatch Table. see line_parsers.go for the parse functions. - // The command is parsed and mapped to the line parser. The line parser - // receives the arguments but not the command, and returns an AST after - // reformulating the arguments according to the rules in the parser - // functions. Errors are propagated up by Parse() and the resulting AST can - // be incorporated directly into the existing AST as a next. - dispatch = map[string]func(string, *Directive) (*Node, map[string]bool, error){ - command.Add: parseMaybeJSONToList, - command.Arg: parseNameOrNameVal, - command.Cmd: parseMaybeJSON, - command.Copy: parseMaybeJSONToList, - command.Entrypoint: parseMaybeJSON, - command.Env: parseEnv, - command.Expose: parseStringsWhitespaceDelimited, - command.From: parseString, - command.Healthcheck: parseHealthConfig, - command.Label: parseLabel, - command.Maintainer: parseString, - command.Onbuild: parseSubCommand, - command.Run: parseMaybeJSON, - command.Shell: parseMaybeJSON, - command.StopSignal: parseString, - command.User: parseString, - command.Volume: parseMaybeJSONToList, - command.Workdir: parseString, - } -} - -// ParseLine parse a line and return the remainder. -func ParseLine(line string, d *Directive) (string, *Node, error) { - // Handle the parser directive '# escape=. Parser directives must precede - // any builder instruction or other comments, and cannot be repeated. - if d.LookingForDirectives { - tecMatch := tokenEscapeCommand.FindStringSubmatch(strings.ToLower(line)) - if len(tecMatch) > 0 { - if d.EscapeSeen == true { - return "", nil, fmt.Errorf("only one escape parser directive can be used") - } - for i, n := range tokenEscapeCommand.SubexpNames() { - if n == "escapechar" { - if err := SetEscapeToken(tecMatch[i], d); err != nil { - return "", nil, err - } - d.EscapeSeen = true - return "", nil, nil - } - } - } - } - - d.LookingForDirectives = false - - if line = stripComments(line); line == "" { - return "", nil, nil - } - - if d.LineContinuationRegex.MatchString(line) { - line = d.LineContinuationRegex.ReplaceAllString(line, "") - return line, nil, nil - } - - cmd, flags, args, err := splitCommand(line) - if err != nil { - return "", nil, err - } - - node := &Node{} - node.Value = cmd - - sexp, attrs, err := fullDispatch(cmd, args, d) - if err != nil { - return "", nil, err - } - - node.Next = sexp - node.Attributes = attrs - node.Original = line - node.Flags = flags - - return "", node, nil -} - -// Parse is the main parse routine. -// It handles an io.ReadWriteCloser and returns the root of the AST. -func Parse(rwc io.Reader, d *Directive) (*Node, error) { - currentLine := 0 - root := &Node{} - root.StartLine = -1 - scanner := bufio.NewScanner(rwc) - - utf8bom := []byte{0xEF, 0xBB, 0xBF} - for scanner.Scan() { - scannedBytes := scanner.Bytes() - // We trim UTF8 BOM - if currentLine == 0 { - scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) - } - scannedLine := strings.TrimLeftFunc(string(scannedBytes), unicode.IsSpace) - currentLine++ - line, child, err := ParseLine(scannedLine, d) - if err != nil { - return nil, err - } - startLine := currentLine - - if line != "" && child == nil { - for scanner.Scan() { - newline := scanner.Text() - currentLine++ - - if stripComments(strings.TrimSpace(newline)) == "" { - continue - } - - line, child, err = ParseLine(line+newline, d) - if err != nil { - return nil, err - } - - if child != nil { - break - } - } - if child == nil && line != "" { - _, child, err = ParseLine(line, d) - if err != nil { - return nil, err - } - } - } - - if child != nil { - // Update the line information for the current child. - child.StartLine = startLine - child.EndLine = currentLine - // Update the line information for the root. The starting line of the root is always the - // starting line of the first child and the ending line is the ending line of the last child. - if root.StartLine < 0 { - root.StartLine = currentLine - } - root.EndLine = currentLine - root.Children = append(root.Children, child) - } - } - - return root, nil -} diff --git a/builder/dockerfile/parser/parser_test.go b/builder/dockerfile/parser/parser_test.go deleted file mode 100644 index e7b0c07c53..0000000000 --- a/builder/dockerfile/parser/parser_test.go +++ /dev/null @@ -1,174 +0,0 @@ -package parser - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "testing" -) - -const testDir = "testfiles" -const negativeTestDir = "testfiles-negative" -const testFileLineInfo = "testfile-line/Dockerfile" - -func getDirs(t *testing.T, dir string) []string { - f, err := os.Open(dir) - if err != nil { - t.Fatal(err) - } - - defer f.Close() - - dirs, err := f.Readdirnames(0) - if err != nil { - t.Fatal(err) - } - - return dirs -} - -func TestTestNegative(t *testing.T) { - for _, dir := range getDirs(t, negativeTestDir) { - dockerfile := filepath.Join(negativeTestDir, dir, "Dockerfile") - - df, err := os.Open(dockerfile) - if err != nil { - t.Fatalf("Dockerfile missing for %s: %v", dir, err) - } - - d := Directive{LookingForDirectives: true} - SetEscapeToken(DefaultEscapeToken, &d) - _, err = Parse(df, &d) - if err == nil { - t.Fatalf("No error parsing broken dockerfile for %s", dir) - } - - df.Close() - } -} - -func TestTestData(t *testing.T) { - for _, dir := range getDirs(t, testDir) { - dockerfile := filepath.Join(testDir, dir, "Dockerfile") - resultfile := filepath.Join(testDir, dir, "result") - - df, err := os.Open(dockerfile) - if err != nil { - t.Fatalf("Dockerfile missing for %s: %v", dir, err) - } - defer df.Close() - - d := Directive{LookingForDirectives: true} - SetEscapeToken(DefaultEscapeToken, &d) - ast, err := Parse(df, &d) - if err != nil { - t.Fatalf("Error parsing %s's dockerfile: %v", dir, err) - } - - content, err := ioutil.ReadFile(resultfile) - if err != nil { - t.Fatalf("Error reading %s's result file: %v", dir, err) - } - - if runtime.GOOS == "windows" { - // CRLF --> CR to match Unix behavior - content = bytes.Replace(content, []byte{'\x0d', '\x0a'}, []byte{'\x0a'}, -1) - } - - if ast.Dump()+"\n" != string(content) { - fmt.Fprintln(os.Stderr, "Result:\n"+ast.Dump()) - fmt.Fprintln(os.Stderr, "Expected:\n"+string(content)) - t.Fatalf("%s: AST dump of dockerfile does not match result", dir) - } - } -} - -func TestParseWords(t *testing.T) { - tests := []map[string][]string{ - { - "input": {"foo"}, - "expect": {"foo"}, - }, - { - "input": {"foo bar"}, - "expect": {"foo", "bar"}, - }, - { - "input": {"foo\\ bar"}, - "expect": {"foo\\ bar"}, - }, - { - "input": {"foo=bar"}, - "expect": {"foo=bar"}, - }, - { - "input": {"foo bar 'abc xyz'"}, - "expect": {"foo", "bar", "'abc xyz'"}, - }, - { - "input": {`foo bar "abc xyz"`}, - "expect": {"foo", "bar", `"abc xyz"`}, - }, - { - "input": {"àöû"}, - "expect": {"àöû"}, - }, - { - "input": {`föo bàr "âbc xÿz"`}, - "expect": {"föo", "bàr", `"âbc xÿz"`}, - }, - } - - for _, test := range tests { - d := Directive{LookingForDirectives: true} - SetEscapeToken(DefaultEscapeToken, &d) - words := parseWords(test["input"][0], &d) - if len(words) != len(test["expect"]) { - t.Fatalf("length check failed. input: %v, expect: %q, output: %q", test["input"][0], test["expect"], words) - } - for i, word := range words { - if word != test["expect"][i] { - t.Fatalf("word check failed for word: %q. input: %q, expect: %q, output: %q", word, test["input"][0], test["expect"], words) - } - } - } -} - -func TestLineInformation(t *testing.T) { - df, err := os.Open(testFileLineInfo) - if err != nil { - t.Fatalf("Dockerfile missing for %s: %v", testFileLineInfo, err) - } - defer df.Close() - - d := Directive{LookingForDirectives: true} - SetEscapeToken(DefaultEscapeToken, &d) - ast, err := Parse(df, &d) - if err != nil { - t.Fatalf("Error parsing dockerfile %s: %v", testFileLineInfo, err) - } - - if ast.StartLine != 5 || ast.EndLine != 31 { - fmt.Fprintf(os.Stderr, "Wrong root line information: expected(%d-%d), actual(%d-%d)\n", 5, 31, ast.StartLine, ast.EndLine) - t.Fatalf("Root line information doesn't match result.") - } - if len(ast.Children) != 3 { - fmt.Fprintf(os.Stderr, "Wrong number of child: expected(%d), actual(%d)\n", 3, len(ast.Children)) - t.Fatalf("Root line information doesn't match result for %s", testFileLineInfo) - } - expected := [][]int{ - {5, 5}, - {11, 12}, - {17, 31}, - } - for i, child := range ast.Children { - if child.StartLine != expected[i][0] || child.EndLine != expected[i][1] { - t.Logf("Wrong line information for child %d: expected(%d-%d), actual(%d-%d)\n", - i, expected[i][0], expected[i][1], child.StartLine, child.EndLine) - t.Fatalf("Root line information doesn't match result.") - } - } -} diff --git a/builder/dockerfile/parser/testfile-line/Dockerfile b/builder/dockerfile/parser/testfile-line/Dockerfile deleted file mode 100644 index c7601c9f69..0000000000 --- a/builder/dockerfile/parser/testfile-line/Dockerfile +++ /dev/null @@ -1,35 +0,0 @@ -# ESCAPE=\ - - - -FROM brimstone/ubuntu:14.04 - - -# TORUN -v /var/run/docker.sock:/var/run/docker.sock - - -ENV GOPATH \ -/go - - - -# Install the packages we need, clean up after them and us -RUN apt-get update \ - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ - - - && apt-get install -y --no-install-recommends git golang ca-certificates \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists \ - - && go get -v github.com/brimstone/consuldock \ - && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ - - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ - && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ - && rm /tmp/dpkg.* \ - && rm -rf $GOPATH - - - - diff --git a/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile b/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile deleted file mode 100644 index 1d65578794..0000000000 --- a/builder/dockerfile/parser/testfiles-negative/env_no_value/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM busybox - -ENV PATH diff --git a/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile b/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile deleted file mode 100644 index d1be4596c7..0000000000 --- a/builder/dockerfile/parser/testfiles-negative/shykes-nested-json/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD [ "echo", [ "nested json" ] ] diff --git a/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile b/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile deleted file mode 100644 index 00b444cba5..0000000000 --- a/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/Dockerfile +++ /dev/null @@ -1,11 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER Seongyeol Lim - -COPY . /go/src/github.com/docker/docker -ADD . / -ADD null / -COPY nullfile /tmp -ADD [ "vimrc", "/tmp" ] -COPY [ "bashrc", "/tmp" ] -COPY [ "test file", "/tmp" ] -ADD [ "test file", "/tmp/test file" ] diff --git a/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result b/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result deleted file mode 100644 index 85aee64018..0000000000 --- a/builder/dockerfile/parser/testfiles/ADD-COPY-with-JSON/result +++ /dev/null @@ -1,10 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "Seongyeol Lim ") -(copy "." "/go/src/github.com/docker/docker") -(add "." "/") -(add "null" "/") -(copy "nullfile" "/tmp") -(add "vimrc" "/tmp") -(copy "bashrc" "/tmp") -(copy "test file" "/tmp") -(add "test file" "/tmp/test file") diff --git a/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile b/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile deleted file mode 100644 index 0364ef9d96..0000000000 --- a/builder/dockerfile/parser/testfiles/brimstone-consuldock/Dockerfile +++ /dev/null @@ -1,26 +0,0 @@ -#escape=\ -FROM brimstone/ubuntu:14.04 - -MAINTAINER brimstone@the.narro.ws - -# TORUN -v /var/run/docker.sock:/var/run/docker.sock - -ENV GOPATH /go - -# Set our command -ENTRYPOINT ["/usr/local/bin/consuldock"] - -# Install the packages we need, clean up after them and us -RUN apt-get update \ - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ - && apt-get install -y --no-install-recommends git golang ca-certificates \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists \ - - && go get -v github.com/brimstone/consuldock \ - && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \ - - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ - && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ - && rm /tmp/dpkg.* \ - && rm -rf $GOPATH diff --git a/builder/dockerfile/parser/testfiles/brimstone-consuldock/result b/builder/dockerfile/parser/testfiles/brimstone-consuldock/result deleted file mode 100644 index 227f748cda..0000000000 --- a/builder/dockerfile/parser/testfiles/brimstone-consuldock/result +++ /dev/null @@ -1,5 +0,0 @@ -(from "brimstone/ubuntu:14.04") -(maintainer "brimstone@the.narro.ws") -(env "GOPATH" "/go") -(entrypoint "/usr/local/bin/consuldock") -(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/brimstone/consuldock && mv $GOPATH/bin/consuldock /usr/local/bin/consuldock \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile b/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile deleted file mode 100644 index 25ae352166..0000000000 --- a/builder/dockerfile/parser/testfiles/brimstone-docker-consul/Dockerfile +++ /dev/null @@ -1,52 +0,0 @@ -FROM brimstone/ubuntu:14.04 - -CMD [] - -ENTRYPOINT ["/usr/bin/consul", "agent", "-server", "-data-dir=/consul", "-client=0.0.0.0", "-ui-dir=/webui"] - -EXPOSE 8500 8600 8400 8301 8302 - -RUN apt-get update \ - && apt-get install -y unzip wget \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists - -RUN cd /tmp \ - && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ - -O web_ui.zip \ - && unzip web_ui.zip \ - && mv dist /webui \ - && rm web_ui.zip - -RUN apt-get update \ - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ - && apt-get install -y --no-install-recommends unzip wget \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists \ - - && cd /tmp \ - && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip \ - -O web_ui.zip \ - && unzip web_ui.zip \ - && mv dist /webui \ - && rm web_ui.zip \ - - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ - && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ - && rm /tmp/dpkg.* - -ENV GOPATH /go - -RUN apt-get update \ - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean \ - && apt-get install -y --no-install-recommends git golang ca-certificates build-essential \ - && apt-get clean \ - && rm -rf /var/lib/apt/lists \ - - && go get -v github.com/hashicorp/consul \ - && mv $GOPATH/bin/consul /usr/bin/consul \ - - && dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \ - && apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \ - && rm /tmp/dpkg.* \ - && rm -rf $GOPATH diff --git a/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result b/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result deleted file mode 100644 index 16492e516a..0000000000 --- a/builder/dockerfile/parser/testfiles/brimstone-docker-consul/result +++ /dev/null @@ -1,9 +0,0 @@ -(from "brimstone/ubuntu:14.04") -(cmd) -(entrypoint "/usr/bin/consul" "agent" "-server" "-data-dir=/consul" "-client=0.0.0.0" "-ui-dir=/webui") -(expose "8500" "8600" "8400" "8301" "8302") -(run "apt-get update && apt-get install -y unzip wget \t&& apt-get clean \t&& rm -rf /var/lib/apt/lists") -(run "cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip") -(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends unzip wget && apt-get clean && rm -rf /var/lib/apt/lists && cd /tmp && wget https://dl.bintray.com/mitchellh/consul/0.3.1_web_ui.zip -O web_ui.zip && unzip web_ui.zip && mv dist /webui && rm web_ui.zip \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.*") -(env "GOPATH" "/go") -(run "apt-get update \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.clean && apt-get install -y --no-install-recommends git golang ca-certificates build-essential && apt-get clean && rm -rf /var/lib/apt/lists \t&& go get -v github.com/hashicorp/consul \t&& mv $GOPATH/bin/consul /usr/bin/consul \t&& dpkg -l | awk '/^ii/ {print $2}' > /tmp/dpkg.dirty \t&& apt-get remove --purge -y $(diff /tmp/dpkg.clean /tmp/dpkg.dirty | awk '/^>/ {print $2}') \t&& rm /tmp/dpkg.* \t&& rm -rf $GOPATH") diff --git a/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile b/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile deleted file mode 100644 index 42b324e77b..0000000000 --- a/builder/dockerfile/parser/testfiles/continueIndent/Dockerfile +++ /dev/null @@ -1,36 +0,0 @@ -FROM ubuntu:14.04 - -RUN echo hello\ - world\ - goodnight \ - moon\ - light\ -ning -RUN echo hello \ - world -RUN echo hello \ -world -RUN echo hello \ -goodbye\ -frog -RUN echo hello \ -world -RUN echo hi \ - \ - world \ -\ - good\ -\ -night -RUN echo goodbye\ -frog -RUN echo good\ -bye\ -frog - -RUN echo hello \ -# this is a comment - -# this is a comment with a blank line surrounding it - -this is some more useful stuff diff --git a/builder/dockerfile/parser/testfiles/continueIndent/result b/builder/dockerfile/parser/testfiles/continueIndent/result deleted file mode 100644 index 268ae073c8..0000000000 --- a/builder/dockerfile/parser/testfiles/continueIndent/result +++ /dev/null @@ -1,10 +0,0 @@ -(from "ubuntu:14.04") -(run "echo hello world goodnight moon lightning") -(run "echo hello world") -(run "echo hello world") -(run "echo hello goodbyefrog") -(run "echo hello world") -(run "echo hi world goodnight") -(run "echo goodbyefrog") -(run "echo goodbyefrog") -(run "echo hello this is some more useful stuff") diff --git a/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile b/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile deleted file mode 100644 index 8ccb71a578..0000000000 --- a/builder/dockerfile/parser/testfiles/cpuguy83-nagios/Dockerfile +++ /dev/null @@ -1,54 +0,0 @@ -FROM cpuguy83/ubuntu -ENV NAGIOS_HOME /opt/nagios -ENV NAGIOS_USER nagios -ENV NAGIOS_GROUP nagios -ENV NAGIOS_CMDUSER nagios -ENV NAGIOS_CMDGROUP nagios -ENV NAGIOSADMIN_USER nagiosadmin -ENV NAGIOSADMIN_PASS nagios -ENV APACHE_RUN_USER nagios -ENV APACHE_RUN_GROUP nagios -ENV NAGIOS_TIMEZONE UTC - -RUN sed -i 's/universe/universe multiverse/' /etc/apt/sources.list -RUN apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx -RUN ( egrep -i "^${NAGIOS_GROUP}" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i "^${NAGIOS_CMDGROUP}" /etc/group || groupadd $NAGIOS_CMDGROUP ) -RUN ( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER ) - -ADD http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3 /tmp/nagios.tar.gz -RUN cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf -ADD http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz /tmp/ -RUN cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install - -RUN sed -i.bak 's/.*\=www\-data//g' /etc/apache2/envvars -RUN export DOC_ROOT="DocumentRoot $(echo $NAGIOS_HOME/share)"; sed -i "s,DocumentRoot.*,$DOC_ROOT," /etc/apache2/sites-enabled/000-default - -RUN ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo - -RUN echo "use_timezone=$NAGIOS_TIMEZONE" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo "SetEnv TZ \"${NAGIOS_TIMEZONE}\"" >> /etc/apache2/conf.d/nagios.conf - -RUN mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs -RUN echo "cfg_dir=${NAGIOS_HOME}/etc/conf.d" >> ${NAGIOS_HOME}/etc/nagios.cfg -RUN echo "cfg_dir=${NAGIOS_HOME}/etc/monitor" >> ${NAGIOS_HOME}/etc/nagios.cfg -RUN download-mibs && echo "mibs +ALL" > /etc/snmp/snmp.conf - -RUN sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && \ - sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg -RUN cp /etc/services /var/spool/postfix/etc/ - -RUN mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix -ADD nagios.init /etc/sv/nagios/run -ADD apache.init /etc/sv/apache/run -ADD postfix.init /etc/sv/postfix/run -ADD postfix.stop /etc/sv/postfix/finish - -ADD start.sh /usr/local/bin/start_nagios - -ENV APACHE_LOCK_DIR /var/run -ENV APACHE_LOG_DIR /var/log/apache2 - -EXPOSE 80 - -VOLUME ["/opt/nagios/var", "/opt/nagios/etc", "/opt/nagios/libexec", "/var/log/apache2", "/usr/share/snmp/mibs"] - -CMD ["/usr/local/bin/start_nagios"] diff --git a/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result b/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result deleted file mode 100644 index 25dd3ddfe5..0000000000 --- a/builder/dockerfile/parser/testfiles/cpuguy83-nagios/result +++ /dev/null @@ -1,40 +0,0 @@ -(from "cpuguy83/ubuntu") -(env "NAGIOS_HOME" "/opt/nagios") -(env "NAGIOS_USER" "nagios") -(env "NAGIOS_GROUP" "nagios") -(env "NAGIOS_CMDUSER" "nagios") -(env "NAGIOS_CMDGROUP" "nagios") -(env "NAGIOSADMIN_USER" "nagiosadmin") -(env "NAGIOSADMIN_PASS" "nagios") -(env "APACHE_RUN_USER" "nagios") -(env "APACHE_RUN_GROUP" "nagios") -(env "NAGIOS_TIMEZONE" "UTC") -(run "sed -i 's/universe/universe multiverse/' /etc/apt/sources.list") -(run "apt-get update && apt-get install -y iputils-ping netcat build-essential snmp snmpd snmp-mibs-downloader php5-cli apache2 libapache2-mod-php5 runit bc postfix bsd-mailx") -(run "( egrep -i \"^${NAGIOS_GROUP}\" /etc/group || groupadd $NAGIOS_GROUP ) && ( egrep -i \"^${NAGIOS_CMDGROUP}\" /etc/group || groupadd $NAGIOS_CMDGROUP )") -(run "( id -u $NAGIOS_USER || useradd --system $NAGIOS_USER -g $NAGIOS_GROUP -d $NAGIOS_HOME ) && ( id -u $NAGIOS_CMDUSER || useradd --system -d $NAGIOS_HOME -g $NAGIOS_CMDGROUP $NAGIOS_CMDUSER )") -(add "http://downloads.sourceforge.net/project/nagios/nagios-3.x/nagios-3.5.1/nagios-3.5.1.tar.gz?r=http%3A%2F%2Fwww.nagios.org%2Fdownload%2Fcore%2Fthanks%2F%3Ft%3D1398863696&ts=1398863718&use_mirror=superb-dca3" "/tmp/nagios.tar.gz") -(run "cd /tmp && tar -zxvf nagios.tar.gz && cd nagios && ./configure --prefix=${NAGIOS_HOME} --exec-prefix=${NAGIOS_HOME} --enable-event-broker --with-nagios-command-user=${NAGIOS_CMDUSER} --with-command-group=${NAGIOS_CMDGROUP} --with-nagios-user=${NAGIOS_USER} --with-nagios-group=${NAGIOS_GROUP} && make all && make install && make install-config && make install-commandmode && cp sample-config/httpd.conf /etc/apache2/conf.d/nagios.conf") -(add "http://www.nagios-plugins.org/download/nagios-plugins-1.5.tar.gz" "/tmp/") -(run "cd /tmp && tar -zxvf nagios-plugins-1.5.tar.gz && cd nagios-plugins-1.5 && ./configure --prefix=${NAGIOS_HOME} && make && make install") -(run "sed -i.bak 's/.*\\=www\\-data//g' /etc/apache2/envvars") -(run "export DOC_ROOT=\"DocumentRoot $(echo $NAGIOS_HOME/share)\"; sed -i \"s,DocumentRoot.*,$DOC_ROOT,\" /etc/apache2/sites-enabled/000-default") -(run "ln -s ${NAGIOS_HOME}/bin/nagios /usr/local/bin/nagios && mkdir -p /usr/share/snmp/mibs && chmod 0755 /usr/share/snmp/mibs && touch /usr/share/snmp/mibs/.foo") -(run "echo \"use_timezone=$NAGIOS_TIMEZONE\" >> ${NAGIOS_HOME}/etc/nagios.cfg && echo \"SetEnv TZ \\\"${NAGIOS_TIMEZONE}\\\"\" >> /etc/apache2/conf.d/nagios.conf") -(run "mkdir -p ${NAGIOS_HOME}/etc/conf.d && mkdir -p ${NAGIOS_HOME}/etc/monitor && ln -s /usr/share/snmp/mibs ${NAGIOS_HOME}/libexec/mibs") -(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/conf.d\" >> ${NAGIOS_HOME}/etc/nagios.cfg") -(run "echo \"cfg_dir=${NAGIOS_HOME}/etc/monitor\" >> ${NAGIOS_HOME}/etc/nagios.cfg") -(run "download-mibs && echo \"mibs +ALL\" > /etc/snmp/snmp.conf") -(run "sed -i 's,/bin/mail,/usr/bin/mail,' /opt/nagios/etc/objects/commands.cfg && sed -i 's,/usr/usr,/usr,' /opt/nagios/etc/objects/commands.cfg") -(run "cp /etc/services /var/spool/postfix/etc/") -(run "mkdir -p /etc/sv/nagios && mkdir -p /etc/sv/apache && rm -rf /etc/sv/getty-5 && mkdir -p /etc/sv/postfix") -(add "nagios.init" "/etc/sv/nagios/run") -(add "apache.init" "/etc/sv/apache/run") -(add "postfix.init" "/etc/sv/postfix/run") -(add "postfix.stop" "/etc/sv/postfix/finish") -(add "start.sh" "/usr/local/bin/start_nagios") -(env "APACHE_LOCK_DIR" "/var/run") -(env "APACHE_LOG_DIR" "/var/log/apache2") -(expose "80") -(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") -(cmd "/usr/local/bin/start_nagios") diff --git a/builder/dockerfile/parser/testfiles/docker/Dockerfile b/builder/dockerfile/parser/testfiles/docker/Dockerfile deleted file mode 100644 index 9717adbd6f..0000000000 --- a/builder/dockerfile/parser/testfiles/docker/Dockerfile +++ /dev/null @@ -1,103 +0,0 @@ -# This file describes the standard way to build Docker, using docker -# -# Usage: -# -# # Assemble the full dev environment. This is slow the first time. -# docker build -t docker . -# -# # Mount your source in an interactive container for quick testing: -# docker run -v `pwd`:/go/src/github.com/docker/docker --privileged -i -t docker bash -# -# # Run the test suite: -# docker run --privileged docker hack/make.sh test -# -# # Publish a release: -# docker run --privileged \ -# -e AWS_S3_BUCKET=baz \ -# -e AWS_ACCESS_KEY=foo \ -# -e AWS_SECRET_KEY=bar \ -# -e GPG_PASSPHRASE=gloubiboulga \ -# docker hack/release.sh -# -# Note: AppArmor used to mess with privileged mode, but this is no longer -# the case. Therefore, you don't have to disable it anymore. -# - -FROM ubuntu:14.04 -MAINTAINER Tianon Gravi (@tianon) - -# Packaged dependencies -RUN apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \ - apt-utils \ - aufs-tools \ - automake \ - btrfs-tools \ - build-essential \ - curl \ - dpkg-sig \ - git \ - iptables \ - libapparmor-dev \ - libcap-dev \ - libsqlite3-dev \ - mercurial \ - pandoc \ - parallel \ - reprepro \ - ruby1.9.1 \ - ruby1.9.1-dev \ - s3cmd=1.1.0* \ - --no-install-recommends - -# Get lvm2 source for compiling statically -RUN git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103 -# see https://git.fedorahosted.org/cgit/lvm2.git/refs/tags for release tags -# note: we don't use "git clone -b" above because it then spews big nasty warnings about 'detached HEAD' state that we can't silence as easily as we can silence them using "git checkout" directly - -# Compile and install lvm2 -RUN cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper -# see https://git.fedorahosted.org/cgit/lvm2.git/tree/INSTALL - -# Install Go -RUN curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz -ENV PATH /usr/local/go/bin:$PATH -ENV GOPATH /go:/go/src/github.com/docker/docker/vendor -RUN cd /usr/local/go/src && ./make.bash --no-clean 2>&1 - -# Compile Go for cross compilation -ENV DOCKER_CROSSPLATFORMS \ - linux/386 linux/arm \ - darwin/amd64 darwin/386 \ - freebsd/amd64 freebsd/386 freebsd/arm -# (set an explicit GOARM of 5 for maximum compatibility) -ENV GOARM 5 -RUN cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done' - -# Grab Go's cover tool for dead-simple code coverage testing -RUN go get golang.org/x/tools/cmd/cover - -# TODO replace FPM with some very minimal debhelper stuff -RUN gem install --no-rdoc --no-ri fpm --version 1.0.2 - -# Get the "busybox" image source so we can build locally instead of pulling -RUN git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox - -# Setup s3cmd config -RUN /bin/echo -e '[default]\naccess_key=$AWS_ACCESS_KEY\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg - -# Set user.email so crosbymichael's in-container merge commits go smoothly -RUN git config --global user.email 'docker-dummy@example.com' - -# Add an unprivileged user to be used for tests which need it -RUN groupadd -r docker -RUN useradd --create-home --gid docker unprivilegeduser - -VOLUME /var/lib/docker -WORKDIR /go/src/github.com/docker/docker -ENV DOCKER_BUILDTAGS apparmor selinux - -# Wrap all commands in the "docker-in-docker" script to allow nested containers -ENTRYPOINT ["hack/dind"] - -# Upload docker source -COPY . /go/src/github.com/docker/docker diff --git a/builder/dockerfile/parser/testfiles/docker/result b/builder/dockerfile/parser/testfiles/docker/result deleted file mode 100644 index d032f9bac4..0000000000 --- a/builder/dockerfile/parser/testfiles/docker/result +++ /dev/null @@ -1,24 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "Tianon Gravi (@tianon)") -(run "apt-get update && DEBIAN_FRONTEND=noninteractive apt-get install -yq \tapt-utils \taufs-tools \tautomake \tbtrfs-tools \tbuild-essential \tcurl \tdpkg-sig \tgit \tiptables \tlibapparmor-dev \tlibcap-dev \tlibsqlite3-dev \tmercurial \tpandoc \tparallel \treprepro \truby1.9.1 \truby1.9.1-dev \ts3cmd=1.1.0* \t--no-install-recommends") -(run "git clone --no-checkout https://git.fedorahosted.org/git/lvm2.git /usr/local/lvm2 && cd /usr/local/lvm2 && git checkout -q v2_02_103") -(run "cd /usr/local/lvm2 && ./configure --enable-static_link && make device-mapper && make install_device-mapper") -(run "curl -sSL https://golang.org/dl/go1.3.src.tar.gz | tar -v -C /usr/local -xz") -(env "PATH" "/usr/local/go/bin:$PATH") -(env "GOPATH" "/go:/go/src/github.com/docker/docker/vendor") -(run "cd /usr/local/go/src && ./make.bash --no-clean 2>&1") -(env "DOCKER_CROSSPLATFORMS" "linux/386 linux/arm \tdarwin/amd64 darwin/386 \tfreebsd/amd64 freebsd/386 freebsd/arm") -(env "GOARM" "5") -(run "cd /usr/local/go/src && bash -xc 'for platform in $DOCKER_CROSSPLATFORMS; do GOOS=${platform%/*} GOARCH=${platform##*/} ./make.bash --no-clean 2>&1; done'") -(run "go get golang.org/x/tools/cmd/cover") -(run "gem install --no-rdoc --no-ri fpm --version 1.0.2") -(run "git clone -b buildroot-2014.02 https://github.com/jpetazzo/docker-busybox.git /docker-busybox") -(run "/bin/echo -e '[default]\\naccess_key=$AWS_ACCESS_KEY\\nsecret_key=$AWS_SECRET_KEY' > /.s3cfg") -(run "git config --global user.email 'docker-dummy@example.com'") -(run "groupadd -r docker") -(run "useradd --create-home --gid docker unprivilegeduser") -(volume "/var/lib/docker") -(workdir "/go/src/github.com/docker/docker") -(env "DOCKER_BUILDTAGS" "apparmor selinux") -(entrypoint "hack/dind") -(copy "." "/go/src/github.com/docker/docker") diff --git a/builder/dockerfile/parser/testfiles/env/Dockerfile b/builder/dockerfile/parser/testfiles/env/Dockerfile deleted file mode 100644 index 08fa18acec..0000000000 --- a/builder/dockerfile/parser/testfiles/env/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM ubuntu -ENV name value -ENV name=value -ENV name=value name2=value2 -ENV name="value value1" -ENV name=value\ value2 -ENV name="value'quote space'value2" -ENV name='value"double quote"value2' -ENV name=value\ value2 name2=value2\ value3 -ENV name="a\"b" -ENV name="a\'b" -ENV name='a\'b' -ENV name='a\'b'' -ENV name='a\"b' -ENV name="''" -# don't put anything after the next line - it must be the last line of the -# Dockerfile and it must end with \ -ENV name=value \ - name1=value1 \ - name2="value2a \ - value2b" \ - name3="value3a\n\"value3b\"" \ - name4="value4a\\nvalue4b" \ diff --git a/builder/dockerfile/parser/testfiles/env/result b/builder/dockerfile/parser/testfiles/env/result deleted file mode 100644 index ba0a6dd7cb..0000000000 --- a/builder/dockerfile/parser/testfiles/env/result +++ /dev/null @@ -1,16 +0,0 @@ -(from "ubuntu") -(env "name" "value") -(env "name" "value") -(env "name" "value" "name2" "value2") -(env "name" "\"value value1\"") -(env "name" "value\\ value2") -(env "name" "\"value'quote space'value2\"") -(env "name" "'value\"double quote\"value2'") -(env "name" "value\\ value2" "name2" "value2\\ value3") -(env "name" "\"a\\\"b\"") -(env "name" "\"a\\'b\"") -(env "name" "'a\\'b'") -(env "name" "'a\\'b''") -(env "name" "'a\\\"b'") -(env "name" "\"''\"") -(env "name" "value" "name1" "value1" "name2" "\"value2a value2b\"" "name3" "\"value3a\\n\\\"value3b\\\"\"" "name4" "\"value4a\\\\nvalue4b\"") diff --git a/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile b/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile deleted file mode 100644 index 6def7efdcd..0000000000 --- a/builder/dockerfile/parser/testfiles/escape-after-comment/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -# Comment here. Should not be looking for the following parser directive. -# Hence the following line will be ignored, and the subsequent backslash -# continuation will be the default. -# escape = ` - -FROM image -MAINTAINER foo@bar.com -ENV GOPATH \ -\go \ No newline at end of file diff --git a/builder/dockerfile/parser/testfiles/escape-after-comment/result b/builder/dockerfile/parser/testfiles/escape-after-comment/result deleted file mode 100644 index 21522a880b..0000000000 --- a/builder/dockerfile/parser/testfiles/escape-after-comment/result +++ /dev/null @@ -1,3 +0,0 @@ -(from "image") -(maintainer "foo@bar.com") -(env "GOPATH" "\\go") diff --git a/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile b/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile deleted file mode 100644 index 08a8cc4326..0000000000 --- a/builder/dockerfile/parser/testfiles/escape-nonewline/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -# escape = `` -# There is no white space line after the directives. This still succeeds, but goes -# against best practices. -FROM image -MAINTAINER foo@bar.com -ENV GOPATH ` -\go \ No newline at end of file diff --git a/builder/dockerfile/parser/testfiles/escape-nonewline/result b/builder/dockerfile/parser/testfiles/escape-nonewline/result deleted file mode 100644 index 21522a880b..0000000000 --- a/builder/dockerfile/parser/testfiles/escape-nonewline/result +++ /dev/null @@ -1,3 +0,0 @@ -(from "image") -(maintainer "foo@bar.com") -(env "GOPATH" "\\go") diff --git a/builder/dockerfile/parser/testfiles/escape/Dockerfile b/builder/dockerfile/parser/testfiles/escape/Dockerfile deleted file mode 100644 index ef30414a5e..0000000000 --- a/builder/dockerfile/parser/testfiles/escape/Dockerfile +++ /dev/null @@ -1,6 +0,0 @@ -#escape = ` - -FROM image -MAINTAINER foo@bar.com -ENV GOPATH ` -\go \ No newline at end of file diff --git a/builder/dockerfile/parser/testfiles/escape/result b/builder/dockerfile/parser/testfiles/escape/result deleted file mode 100644 index 21522a880b..0000000000 --- a/builder/dockerfile/parser/testfiles/escape/result +++ /dev/null @@ -1,3 +0,0 @@ -(from "image") -(maintainer "foo@bar.com") -(env "GOPATH" "\\go") diff --git a/builder/dockerfile/parser/testfiles/escapes/Dockerfile b/builder/dockerfile/parser/testfiles/escapes/Dockerfile deleted file mode 100644 index 1ffb17ef08..0000000000 --- a/builder/dockerfile/parser/testfiles/escapes/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER Erik \\Hollensbe \" - -RUN apt-get \update && \ - apt-get \"install znc -y -ADD \conf\\" /.znc - -RUN foo \ - -bar \ - -baz - -CMD [ "\/usr\\\"/bin/znc", "-f", "-r" ] diff --git a/builder/dockerfile/parser/testfiles/escapes/result b/builder/dockerfile/parser/testfiles/escapes/result deleted file mode 100644 index 13e409cb1a..0000000000 --- a/builder/dockerfile/parser/testfiles/escapes/result +++ /dev/null @@ -1,6 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "Erik \\\\Hollensbe \\\"") -(run "apt-get \\update && apt-get \\\"install znc -y") -(add "\\conf\\\\\"" "/.znc") -(run "foo bar baz") -(cmd "/usr\\\"/bin/znc" "-f" "-r") diff --git a/builder/dockerfile/parser/testfiles/flags/Dockerfile b/builder/dockerfile/parser/testfiles/flags/Dockerfile deleted file mode 100644 index 2418e0f069..0000000000 --- a/builder/dockerfile/parser/testfiles/flags/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM scratch -COPY foo /tmp/ -COPY --user=me foo /tmp/ -COPY --doit=true foo /tmp/ -COPY --user=me --doit=true foo /tmp/ -COPY --doit=true -- foo /tmp/ -COPY -- foo /tmp/ -CMD --doit [ "a", "b" ] -CMD --doit=true -- [ "a", "b" ] -CMD --doit -- [ ] diff --git a/builder/dockerfile/parser/testfiles/flags/result b/builder/dockerfile/parser/testfiles/flags/result deleted file mode 100644 index 4578f4cba4..0000000000 --- a/builder/dockerfile/parser/testfiles/flags/result +++ /dev/null @@ -1,10 +0,0 @@ -(from "scratch") -(copy "foo" "/tmp/") -(copy ["--user=me"] "foo" "/tmp/") -(copy ["--doit=true"] "foo" "/tmp/") -(copy ["--user=me" "--doit=true"] "foo" "/tmp/") -(copy ["--doit=true"] "foo" "/tmp/") -(copy "foo" "/tmp/") -(cmd ["--doit"] "a" "b") -(cmd ["--doit=true"] "a" "b") -(cmd ["--doit"]) diff --git a/builder/dockerfile/parser/testfiles/health/Dockerfile b/builder/dockerfile/parser/testfiles/health/Dockerfile deleted file mode 100644 index 081e442882..0000000000 --- a/builder/dockerfile/parser/testfiles/health/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM debian -ADD check.sh main.sh /app/ -CMD /app/main.sh -HEALTHCHECK -HEALTHCHECK --interval=5s --timeout=3s --retries=3 \ - CMD /app/check.sh --quiet -HEALTHCHECK CMD -HEALTHCHECK CMD a b -HEALTHCHECK --timeout=3s CMD ["foo"] -HEALTHCHECK CONNECT TCP 7000 diff --git a/builder/dockerfile/parser/testfiles/health/result b/builder/dockerfile/parser/testfiles/health/result deleted file mode 100644 index 092924f88c..0000000000 --- a/builder/dockerfile/parser/testfiles/health/result +++ /dev/null @@ -1,9 +0,0 @@ -(from "debian") -(add "check.sh" "main.sh" "/app/") -(cmd "/app/main.sh") -(healthcheck) -(healthcheck ["--interval=5s" "--timeout=3s" "--retries=3"] "CMD" "/app/check.sh --quiet") -(healthcheck "CMD") -(healthcheck "CMD" "a b") -(healthcheck ["--timeout=3s"] "CMD" "foo") -(healthcheck "CONNECT" "TCP 7000") diff --git a/builder/dockerfile/parser/testfiles/influxdb/Dockerfile b/builder/dockerfile/parser/testfiles/influxdb/Dockerfile deleted file mode 100644 index 587fb9b54b..0000000000 --- a/builder/dockerfile/parser/testfiles/influxdb/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update && apt-get install wget -y -RUN wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb -RUN dpkg -i influxdb_latest_amd64.deb -RUN rm -r /opt/influxdb/shared - -VOLUME /opt/influxdb/shared - -CMD /usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml - -EXPOSE 8083 -EXPOSE 8086 -EXPOSE 8090 -EXPOSE 8099 diff --git a/builder/dockerfile/parser/testfiles/influxdb/result b/builder/dockerfile/parser/testfiles/influxdb/result deleted file mode 100644 index 0998e87e63..0000000000 --- a/builder/dockerfile/parser/testfiles/influxdb/result +++ /dev/null @@ -1,11 +0,0 @@ -(from "ubuntu:14.04") -(run "apt-get update && apt-get install wget -y") -(run "wget http://s3.amazonaws.com/influxdb/influxdb_latest_amd64.deb") -(run "dpkg -i influxdb_latest_amd64.deb") -(run "rm -r /opt/influxdb/shared") -(volume "/opt/influxdb/shared") -(cmd "/usr/bin/influxdb --pidfile /var/run/influxdb.pid -config /opt/influxdb/shared/config.toml") -(expose "8083") -(expose "8086") -(expose "8090") -(expose "8099") diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile deleted file mode 100644 index 39fe27d99c..0000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD "[\"echo\", \"Phew, I just managed to escaped those double quotes\"]" diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result deleted file mode 100644 index afc220c2a7..0000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string-double/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "\"[\\\"echo\\\", \\\"Phew, I just managed to escaped those double quotes\\\"]\"") diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile deleted file mode 100644 index eaae081a06..0000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD '["echo", "Well, JSON in a string is JSON too?"]' diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result deleted file mode 100644 index 484804e2b2..0000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-json-inside-string/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "'[\"echo\", \"Well, JSON in a string is JSON too?\"]'") diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile deleted file mode 100644 index c3ac63c07a..0000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD ['echo','single quotes are invalid JSON'] diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result deleted file mode 100644 index 6147891207..0000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-single-quotes/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "['echo','single quotes are invalid JSON']") diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile deleted file mode 100644 index 5fd4afa522..0000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD ["echo", "Please, close the brackets when you're done" diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result deleted file mode 100644 index 1ffbb8ff85..0000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-bracket/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "[\"echo\", \"Please, close the brackets when you're done\"") diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile deleted file mode 100644 index 30cc4bb48f..0000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -CMD ["echo", "look ma, no quote!] diff --git a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result b/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result deleted file mode 100644 index 32048147b5..0000000000 --- a/builder/dockerfile/parser/testfiles/jeztah-invalid-json-unterminated-string/result +++ /dev/null @@ -1 +0,0 @@ -(cmd "[\"echo\", \"look ma, no quote!]") diff --git a/builder/dockerfile/parser/testfiles/json/Dockerfile b/builder/dockerfile/parser/testfiles/json/Dockerfile deleted file mode 100644 index a586917110..0000000000 --- a/builder/dockerfile/parser/testfiles/json/Dockerfile +++ /dev/null @@ -1,8 +0,0 @@ -CMD [] -CMD [""] -CMD ["a"] -CMD ["a","b"] -CMD [ "a", "b" ] -CMD [ "a", "b" ] -CMD [ "a", "b" ] -CMD ["abc 123", "♥", "☃", "\" \\ \/ \b \f \n \r \t \u0000"] diff --git a/builder/dockerfile/parser/testfiles/json/result b/builder/dockerfile/parser/testfiles/json/result deleted file mode 100644 index c6553e6e1a..0000000000 --- a/builder/dockerfile/parser/testfiles/json/result +++ /dev/null @@ -1,8 +0,0 @@ -(cmd) -(cmd "") -(cmd "a") -(cmd "a" "b") -(cmd "a" "b") -(cmd "a" "b") -(cmd "a" "b") -(cmd "abc 123" "♥" "☃" "\" \\ / \b \f \n \r \t \x00") diff --git a/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile b/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile deleted file mode 100644 index 35f9c24aa6..0000000000 --- a/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER James Turnbull "james@example.com" -ENV REFRESHED_AT 2014-06-01 -RUN apt-get update -RUN apt-get -y install redis-server redis-tools -EXPOSE 6379 -ENTRYPOINT [ "/usr/bin/redis-server" ] diff --git a/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result b/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result deleted file mode 100644 index b5ac6fe445..0000000000 --- a/builder/dockerfile/parser/testfiles/kartar-entrypoint-oddities/result +++ /dev/null @@ -1,7 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "James Turnbull \"james@example.com\"") -(env "REFRESHED_AT" "2014-06-01") -(run "apt-get update") -(run "apt-get -y install redis-server redis-tools") -(expose "6379") -(entrypoint "/usr/bin/redis-server") diff --git a/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile b/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile deleted file mode 100644 index 188395fe83..0000000000 --- a/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/Dockerfile +++ /dev/null @@ -1,48 +0,0 @@ -FROM busybox:buildroot-2014.02 - -MAINTAINER docker - -ONBUILD RUN ["echo", "test"] -ONBUILD RUN echo test -ONBUILD COPY . / - - -# RUN Commands \ -# linebreak in comment \ -RUN ["ls", "-la"] -RUN ["echo", "'1234'"] -RUN echo "1234" -RUN echo 1234 -RUN echo '1234' && \ - echo "456" && \ - echo 789 -RUN sh -c 'echo root:testpass \ - > /tmp/passwd' -RUN mkdir -p /test /test2 /test3/test - -# ENV \ -ENV SCUBA 1 DUBA 3 -ENV SCUBA "1 DUBA 3" - -# CMD \ -CMD ["echo", "test"] -CMD echo test -CMD echo "test" -CMD echo 'test' -CMD echo 'test' | wc - - -#EXPOSE\ -EXPOSE 3000 -EXPOSE 9000 5000 6000 - -USER docker -USER docker:root - -VOLUME ["/test"] -VOLUME ["/test", "/test2"] -VOLUME /test3 - -WORKDIR /test - -ADD . / -COPY . copy diff --git a/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result b/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result deleted file mode 100644 index 6f7d57a396..0000000000 --- a/builder/dockerfile/parser/testfiles/lk4d4-the-edge-case-generator/result +++ /dev/null @@ -1,29 +0,0 @@ -(from "busybox:buildroot-2014.02") -(maintainer "docker ") -(onbuild (run "echo" "test")) -(onbuild (run "echo test")) -(onbuild (copy "." "/")) -(run "ls" "-la") -(run "echo" "'1234'") -(run "echo \"1234\"") -(run "echo 1234") -(run "echo '1234' && echo \"456\" && echo 789") -(run "sh -c 'echo root:testpass > /tmp/passwd'") -(run "mkdir -p /test /test2 /test3/test") -(env "SCUBA" "1 DUBA 3") -(env "SCUBA" "\"1 DUBA 3\"") -(cmd "echo" "test") -(cmd "echo test") -(cmd "echo \"test\"") -(cmd "echo 'test'") -(cmd "echo 'test' | wc -") -(expose "3000") -(expose "9000" "5000" "6000") -(user "docker") -(user "docker:root") -(volume "/test") -(volume "/test" "/test2") -(volume "/test3") -(workdir "/test") -(add "." "/") -(copy "." "copy") diff --git a/builder/dockerfile/parser/testfiles/mail/Dockerfile b/builder/dockerfile/parser/testfiles/mail/Dockerfile deleted file mode 100644 index f64c1168c1..0000000000 --- a/builder/dockerfile/parser/testfiles/mail/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y -ADD .muttrc / -ADD .offlineimaprc / -ADD .tmux.conf / -ADD mutt /.mutt -ADD vim /.vim -ADD vimrc /.vimrc -ADD crontab /etc/crontab -RUN chmod 644 /etc/crontab -RUN mkdir /Mail -RUN mkdir /.offlineimap -RUN echo "export TERM=screen-256color" >/.zshenv - -CMD setsid cron; tmux -2 diff --git a/builder/dockerfile/parser/testfiles/mail/result b/builder/dockerfile/parser/testfiles/mail/result deleted file mode 100644 index a0efcf04b6..0000000000 --- a/builder/dockerfile/parser/testfiles/mail/result +++ /dev/null @@ -1,14 +0,0 @@ -(from "ubuntu:14.04") -(run "apt-get update -qy && apt-get install mutt offlineimap vim-nox abook elinks curl tmux cron zsh -y") -(add ".muttrc" "/") -(add ".offlineimaprc" "/") -(add ".tmux.conf" "/") -(add "mutt" "/.mutt") -(add "vim" "/.vim") -(add "vimrc" "/.vimrc") -(add "crontab" "/etc/crontab") -(run "chmod 644 /etc/crontab") -(run "mkdir /Mail") -(run "mkdir /.offlineimap") -(run "echo \"export TERM=screen-256color\" >/.zshenv") -(cmd "setsid cron; tmux -2") diff --git a/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile b/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile deleted file mode 100644 index 57bb5976a3..0000000000 --- a/builder/dockerfile/parser/testfiles/multiple-volumes/Dockerfile +++ /dev/null @@ -1,3 +0,0 @@ -FROM foo - -VOLUME /opt/nagios/var /opt/nagios/etc /opt/nagios/libexec /var/log/apache2 /usr/share/snmp/mibs diff --git a/builder/dockerfile/parser/testfiles/multiple-volumes/result b/builder/dockerfile/parser/testfiles/multiple-volumes/result deleted file mode 100644 index 18dbdeeaa0..0000000000 --- a/builder/dockerfile/parser/testfiles/multiple-volumes/result +++ /dev/null @@ -1,2 +0,0 @@ -(from "foo") -(volume "/opt/nagios/var" "/opt/nagios/etc" "/opt/nagios/libexec" "/var/log/apache2" "/usr/share/snmp/mibs") diff --git a/builder/dockerfile/parser/testfiles/mumble/Dockerfile b/builder/dockerfile/parser/testfiles/mumble/Dockerfile deleted file mode 100644 index 5b9ec06a6c..0000000000 --- a/builder/dockerfile/parser/testfiles/mumble/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update && apt-get install libcap2-bin mumble-server -y - -ADD ./mumble-server.ini /etc/mumble-server.ini - -CMD /usr/sbin/murmurd diff --git a/builder/dockerfile/parser/testfiles/mumble/result b/builder/dockerfile/parser/testfiles/mumble/result deleted file mode 100644 index a0036a943e..0000000000 --- a/builder/dockerfile/parser/testfiles/mumble/result +++ /dev/null @@ -1,4 +0,0 @@ -(from "ubuntu:14.04") -(run "apt-get update && apt-get install libcap2-bin mumble-server -y") -(add "./mumble-server.ini" "/etc/mumble-server.ini") -(cmd "/usr/sbin/murmurd") diff --git a/builder/dockerfile/parser/testfiles/nginx/Dockerfile b/builder/dockerfile/parser/testfiles/nginx/Dockerfile deleted file mode 100644 index bf8368e1ca..0000000000 --- a/builder/dockerfile/parser/testfiles/nginx/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER Erik Hollensbe - -RUN apt-get update && apt-get install nginx-full -y -RUN rm -rf /etc/nginx -ADD etc /etc/nginx -RUN chown -R root:root /etc/nginx -RUN /usr/sbin/nginx -qt -RUN mkdir /www - -CMD ["/usr/sbin/nginx"] - -VOLUME /www -EXPOSE 80 diff --git a/builder/dockerfile/parser/testfiles/nginx/result b/builder/dockerfile/parser/testfiles/nginx/result deleted file mode 100644 index 56ddb6f258..0000000000 --- a/builder/dockerfile/parser/testfiles/nginx/result +++ /dev/null @@ -1,11 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "Erik Hollensbe ") -(run "apt-get update && apt-get install nginx-full -y") -(run "rm -rf /etc/nginx") -(add "etc" "/etc/nginx") -(run "chown -R root:root /etc/nginx") -(run "/usr/sbin/nginx -qt") -(run "mkdir /www") -(cmd "/usr/sbin/nginx") -(volume "/www") -(expose "80") diff --git a/builder/dockerfile/parser/testfiles/tf2/Dockerfile b/builder/dockerfile/parser/testfiles/tf2/Dockerfile deleted file mode 100644 index 72b79bdd7d..0000000000 --- a/builder/dockerfile/parser/testfiles/tf2/Dockerfile +++ /dev/null @@ -1,23 +0,0 @@ -FROM ubuntu:12.04 - -EXPOSE 27015 -EXPOSE 27005 -EXPOSE 26901 -EXPOSE 27020 - -RUN apt-get update && apt-get install libc6-dev-i386 curl unzip -y -RUN mkdir -p /steam -RUN curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam -ADD ./script /steam/script -RUN /steam/steamcmd.sh +runscript /steam/script -RUN curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf -RUN curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf -ADD ./server.cfg /steam/tf2/tf/cfg/server.cfg -ADD ./ctf_2fort.cfg /steam/tf2/tf/cfg/ctf_2fort.cfg -ADD ./sourcemod.cfg /steam/tf2/tf/cfg/sourcemod/sourcemod.cfg -RUN rm -r /steam/tf2/tf/addons/sourcemod/configs -ADD ./configs /steam/tf2/tf/addons/sourcemod/configs -RUN mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en -RUN cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en - -CMD cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill diff --git a/builder/dockerfile/parser/testfiles/tf2/result b/builder/dockerfile/parser/testfiles/tf2/result deleted file mode 100644 index d4f94cd8be..0000000000 --- a/builder/dockerfile/parser/testfiles/tf2/result +++ /dev/null @@ -1,20 +0,0 @@ -(from "ubuntu:12.04") -(expose "27015") -(expose "27005") -(expose "26901") -(expose "27020") -(run "apt-get update && apt-get install libc6-dev-i386 curl unzip -y") -(run "mkdir -p /steam") -(run "curl http://media.steampowered.com/client/steamcmd_linux.tar.gz | tar vxz -C /steam") -(add "./script" "/steam/script") -(run "/steam/steamcmd.sh +runscript /steam/script") -(run "curl http://mirror.pointysoftware.net/alliedmodders/mmsource-1.10.0-linux.tar.gz | tar vxz -C /steam/tf2/tf") -(run "curl http://mirror.pointysoftware.net/alliedmodders/sourcemod-1.5.3-linux.tar.gz | tar vxz -C /steam/tf2/tf") -(add "./server.cfg" "/steam/tf2/tf/cfg/server.cfg") -(add "./ctf_2fort.cfg" "/steam/tf2/tf/cfg/ctf_2fort.cfg") -(add "./sourcemod.cfg" "/steam/tf2/tf/cfg/sourcemod/sourcemod.cfg") -(run "rm -r /steam/tf2/tf/addons/sourcemod/configs") -(add "./configs" "/steam/tf2/tf/addons/sourcemod/configs") -(run "mkdir -p /steam/tf2/tf/addons/sourcemod/translations/en") -(run "cp /steam/tf2/tf/addons/sourcemod/translations/*.txt /steam/tf2/tf/addons/sourcemod/translations/en") -(cmd "cd /steam/tf2 && ./srcds_run -port 27015 +ip 0.0.0.0 +map ctf_2fort -autoupdate -steam_dir /steam -steamcmd_script /steam/script +tf_bot_quota 12 +tf_bot_quota_mode fill") diff --git a/builder/dockerfile/parser/testfiles/weechat/Dockerfile b/builder/dockerfile/parser/testfiles/weechat/Dockerfile deleted file mode 100644 index 4842088166..0000000000 --- a/builder/dockerfile/parser/testfiles/weechat/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM ubuntu:14.04 - -RUN apt-get update -qy && apt-get install tmux zsh weechat-curses -y - -ADD .weechat /.weechat -ADD .tmux.conf / -RUN echo "export TERM=screen-256color" >/.zshenv - -CMD zsh -c weechat diff --git a/builder/dockerfile/parser/testfiles/weechat/result b/builder/dockerfile/parser/testfiles/weechat/result deleted file mode 100644 index c3abb4c54f..0000000000 --- a/builder/dockerfile/parser/testfiles/weechat/result +++ /dev/null @@ -1,6 +0,0 @@ -(from "ubuntu:14.04") -(run "apt-get update -qy && apt-get install tmux zsh weechat-curses -y") -(add ".weechat" "/.weechat") -(add ".tmux.conf" "/") -(run "echo \"export TERM=screen-256color\" >/.zshenv") -(cmd "zsh -c weechat") diff --git a/builder/dockerfile/parser/testfiles/znc/Dockerfile b/builder/dockerfile/parser/testfiles/znc/Dockerfile deleted file mode 100644 index 3a4da6e916..0000000000 --- a/builder/dockerfile/parser/testfiles/znc/Dockerfile +++ /dev/null @@ -1,7 +0,0 @@ -FROM ubuntu:14.04 -MAINTAINER Erik Hollensbe - -RUN apt-get update && apt-get install znc -y -ADD conf /.znc - -CMD [ "/usr/bin/znc", "-f", "-r" ] diff --git a/builder/dockerfile/parser/testfiles/znc/result b/builder/dockerfile/parser/testfiles/znc/result deleted file mode 100644 index 5493b255fd..0000000000 --- a/builder/dockerfile/parser/testfiles/znc/result +++ /dev/null @@ -1,5 +0,0 @@ -(from "ubuntu:14.04") -(maintainer "Erik Hollensbe ") -(run "apt-get update && apt-get install znc -y") -(add "conf" "/.znc") -(cmd "/usr/bin/znc" "-f" "-r") diff --git a/builder/dockerfile/parser/utils.go b/builder/dockerfile/parser/utils.go deleted file mode 100644 index cd7af75e79..0000000000 --- a/builder/dockerfile/parser/utils.go +++ /dev/null @@ -1,176 +0,0 @@ -package parser - -import ( - "fmt" - "strconv" - "strings" - "unicode" -) - -// Dump dumps the AST defined by `node` as a list of sexps. -// Returns a string suitable for printing. -func (node *Node) Dump() string { - str := "" - str += node.Value - - if len(node.Flags) > 0 { - str += fmt.Sprintf(" %q", node.Flags) - } - - for _, n := range node.Children { - str += "(" + n.Dump() + ")\n" - } - - if node.Next != nil { - for n := node.Next; n != nil; n = n.Next { - if len(n.Children) > 0 { - str += " " + n.Dump() - } else { - str += " " + strconv.Quote(n.Value) - } - } - } - - return strings.TrimSpace(str) -} - -// performs the dispatch based on the two primal strings, cmd and args. Please -// look at the dispatch table in parser.go to see how these dispatchers work. -func fullDispatch(cmd, args string, d *Directive) (*Node, map[string]bool, error) { - fn := dispatch[cmd] - - // Ignore invalid Dockerfile instructions - if fn == nil { - fn = parseIgnore - } - - sexp, attrs, err := fn(args, d) - if err != nil { - return nil, nil, err - } - - return sexp, attrs, nil -} - -// splitCommand takes a single line of text and parses out the cmd and args, -// which are used for dispatching to more exact parsing functions. -func splitCommand(line string) (string, []string, string, error) { - var args string - var flags []string - - // Make sure we get the same results irrespective of leading/trailing spaces - cmdline := tokenWhitespace.Split(strings.TrimSpace(line), 2) - cmd := strings.ToLower(cmdline[0]) - - if len(cmdline) == 2 { - var err error - args, flags, err = extractBuilderFlags(cmdline[1]) - if err != nil { - return "", nil, "", err - } - } - - return cmd, flags, strings.TrimSpace(args), nil -} - -// covers comments and empty lines. Lines should be trimmed before passing to -// this function. -func stripComments(line string) string { - // string is already trimmed at this point - if tokenComment.MatchString(line) { - return tokenComment.ReplaceAllString(line, "") - } - - return line -} - -func extractBuilderFlags(line string) (string, []string, error) { - // Parses the BuilderFlags and returns the remaining part of the line - - const ( - inSpaces = iota // looking for start of a word - inWord - inQuote - ) - - words := []string{} - phase := inSpaces - word := "" - quote := '\000' - blankOK := false - var ch rune - - for pos := 0; pos <= len(line); pos++ { - if pos != len(line) { - ch = rune(line[pos]) - } - - if phase == inSpaces { // Looking for start of word - if pos == len(line) { // end of input - break - } - if unicode.IsSpace(ch) { // skip spaces - continue - } - - // Only keep going if the next word starts with -- - if ch != '-' || pos+1 == len(line) || rune(line[pos+1]) != '-' { - return line[pos:], words, nil - } - - phase = inWord // found someting with "--", fall through - } - if (phase == inWord || phase == inQuote) && (pos == len(line)) { - if word != "--" && (blankOK || len(word) > 0) { - words = append(words, word) - } - break - } - if phase == inWord { - if unicode.IsSpace(ch) { - phase = inSpaces - if word == "--" { - return line[pos:], words, nil - } - if blankOK || len(word) > 0 { - words = append(words, word) - } - word = "" - blankOK = false - continue - } - if ch == '\'' || ch == '"' { - quote = ch - blankOK = true - phase = inQuote - continue - } - if ch == '\\' { - if pos+1 == len(line) { - continue // just skip \ at end - } - pos++ - ch = rune(line[pos]) - } - word += string(ch) - continue - } - if phase == inQuote { - if ch == quote { - phase = inWord - continue - } - if ch == '\\' { - if pos+1 == len(line) { - phase = inWord - continue // just skip \ at end - } - pos++ - ch = rune(line[pos]) - } - word += string(ch) - } - } - - return "", words, nil -} diff --git a/builder/dockerfile/shell_parser.go b/builder/dockerfile/shell_parser.go deleted file mode 100644 index c714266778..0000000000 --- a/builder/dockerfile/shell_parser.go +++ /dev/null @@ -1,314 +0,0 @@ -package dockerfile - -// This will take a single word and an array of env variables and -// process all quotes (" and ') as well as $xxx and ${xxx} env variable -// tokens. Tries to mimic bash shell process. -// It doesn't support all flavors of ${xx:...} formats but new ones can -// be added by adding code to the "special ${} format processing" section - -import ( - "fmt" - "strings" - "text/scanner" - "unicode" -) - -type shellWord struct { - word string - scanner scanner.Scanner - envs []string - pos int -} - -// ProcessWord will use the 'env' list of environment variables, -// and replace any env var references in 'word'. -func ProcessWord(word string, env []string) (string, error) { - sw := &shellWord{ - word: word, - envs: env, - pos: 0, - } - sw.scanner.Init(strings.NewReader(word)) - word, _, err := sw.process() - return word, err -} - -// ProcessWords will use the 'env' list of environment variables, -// and replace any env var references in 'word' then it will also -// return a slice of strings which represents the 'word' -// split up based on spaces - taking into account quotes. Note that -// this splitting is done **after** the env var substitutions are done. -// Note, each one is trimmed to remove leading and trailing spaces (unless -// they are quoted", but ProcessWord retains spaces between words. -func ProcessWords(word string, env []string) ([]string, error) { - sw := &shellWord{ - word: word, - envs: env, - pos: 0, - } - sw.scanner.Init(strings.NewReader(word)) - _, words, err := sw.process() - return words, err -} - -func (sw *shellWord) process() (string, []string, error) { - return sw.processStopOn(scanner.EOF) -} - -type wordsStruct struct { - word string - words []string - inWord bool -} - -func (w *wordsStruct) addChar(ch rune) { - if unicode.IsSpace(ch) && w.inWord { - if len(w.word) != 0 { - w.words = append(w.words, w.word) - w.word = "" - w.inWord = false - } - } else if !unicode.IsSpace(ch) { - w.addRawChar(ch) - } -} - -func (w *wordsStruct) addRawChar(ch rune) { - w.word += string(ch) - w.inWord = true -} - -func (w *wordsStruct) addString(str string) { - var scan scanner.Scanner - scan.Init(strings.NewReader(str)) - for scan.Peek() != scanner.EOF { - w.addChar(scan.Next()) - } -} - -func (w *wordsStruct) addRawString(str string) { - w.word += str - w.inWord = true -} - -func (w *wordsStruct) getWords() []string { - if len(w.word) > 0 { - w.words = append(w.words, w.word) - - // Just in case we're called again by mistake - w.word = "" - w.inWord = false - } - return w.words -} - -// Process the word, starting at 'pos', and stop when we get to the -// end of the word or the 'stopChar' character -func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { - var result string - var words wordsStruct - - var charFuncMapping = map[rune]func() (string, error){ - '\'': sw.processSingleQuote, - '"': sw.processDoubleQuote, - '$': sw.processDollar, - } - - for sw.scanner.Peek() != scanner.EOF { - ch := sw.scanner.Peek() - - if stopChar != scanner.EOF && ch == stopChar { - sw.scanner.Next() - break - } - if fn, ok := charFuncMapping[ch]; ok { - // Call special processing func for certain chars - tmp, err := fn() - if err != nil { - return "", []string{}, err - } - result += tmp - - if ch == rune('$') { - words.addString(tmp) - } else { - words.addRawString(tmp) - } - } else { - // Not special, just add it to the result - ch = sw.scanner.Next() - - if ch == '\\' { - // '\' escapes, except end of line - - ch = sw.scanner.Next() - - if ch == scanner.EOF { - break - } - - words.addRawChar(ch) - } else { - words.addChar(ch) - } - - result += string(ch) - } - } - - return result, words.getWords(), nil -} - -func (sw *shellWord) processSingleQuote() (string, error) { - // All chars between single quotes are taken as-is - // Note, you can't escape ' - var result string - - sw.scanner.Next() - - for { - ch := sw.scanner.Next() - if ch == '\'' || ch == scanner.EOF { - break - } - result += string(ch) - } - - return result, nil -} - -func (sw *shellWord) processDoubleQuote() (string, error) { - // All chars up to the next " are taken as-is, even ', except any $ chars - // But you can escape " with a \ - var result string - - sw.scanner.Next() - - for sw.scanner.Peek() != scanner.EOF { - ch := sw.scanner.Peek() - if ch == '"' { - sw.scanner.Next() - break - } - if ch == '$' { - tmp, err := sw.processDollar() - if err != nil { - return "", err - } - result += tmp - } else { - ch = sw.scanner.Next() - if ch == '\\' { - chNext := sw.scanner.Peek() - - if chNext == scanner.EOF { - // Ignore \ at end of word - continue - } - - if chNext == '"' || chNext == '$' { - // \" and \$ can be escaped, all other \'s are left as-is - ch = sw.scanner.Next() - } - } - result += string(ch) - } - } - - return result, nil -} - -func (sw *shellWord) processDollar() (string, error) { - sw.scanner.Next() - ch := sw.scanner.Peek() - if ch == '{' { - sw.scanner.Next() - name := sw.processName() - ch = sw.scanner.Peek() - if ch == '}' { - // Normal ${xx} case - sw.scanner.Next() - return sw.getEnv(name), nil - } - if ch == ':' { - // Special ${xx:...} format processing - // Yes it allows for recursive $'s in the ... spot - - sw.scanner.Next() // skip over : - modifier := sw.scanner.Next() - - word, _, err := sw.processStopOn('}') - if err != nil { - return "", err - } - - // Grab the current value of the variable in question so we - // can use to to determine what to do based on the modifier - newValue := sw.getEnv(name) - - switch modifier { - case '+': - if newValue != "" { - newValue = word - } - return newValue, nil - - case '-': - if newValue == "" { - newValue = word - } - return newValue, nil - - default: - return "", fmt.Errorf("Unsupported modifier (%c) in substitution: %s", modifier, sw.word) - } - } - return "", fmt.Errorf("Missing ':' in substitution: %s", sw.word) - } - // $xxx case - name := sw.processName() - if name == "" { - return "$", nil - } - return sw.getEnv(name), nil -} - -func (sw *shellWord) processName() string { - // Read in a name (alphanumeric or _) - // If it starts with a numeric then just return $# - var name string - - for sw.scanner.Peek() != scanner.EOF { - ch := sw.scanner.Peek() - if len(name) == 0 && unicode.IsDigit(ch) { - ch = sw.scanner.Next() - return string(ch) - } - if !unicode.IsLetter(ch) && !unicode.IsDigit(ch) && ch != '_' { - break - } - ch = sw.scanner.Next() - name += string(ch) - } - - return name -} - -func (sw *shellWord) getEnv(name string) string { - for _, env := range sw.envs { - i := strings.Index(env, "=") - if i < 0 { - if name == env { - // Should probably never get here, but just in case treat - // it like "var" and "var=" are the same - return "" - } - continue - } - if name != env[:i] { - continue - } - return env[i+1:] - } - return "" -} diff --git a/builder/dockerfile/shell_parser_test.go b/builder/dockerfile/shell_parser_test.go deleted file mode 100644 index 81ac591e99..0000000000 --- a/builder/dockerfile/shell_parser_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package dockerfile - -import ( - "bufio" - "os" - "strings" - "testing" -) - -func TestShellParser4EnvVars(t *testing.T) { - fn := "envVarTest" - - file, err := os.Open(fn) - if err != nil { - t.Fatalf("Can't open '%s': %s", err, fn) - } - defer file.Close() - - scanner := bufio.NewScanner(file) - envs := []string{"PWD=/home", "SHELL=bash", "KOREAN=한국어"} - for scanner.Scan() { - line := scanner.Text() - - // Trim comments and blank lines - i := strings.Index(line, "#") - if i >= 0 { - line = line[:i] - } - line = strings.TrimSpace(line) - - if line == "" { - continue - } - - words := strings.Split(line, "|") - if len(words) != 2 { - t.Fatalf("Error in '%s' - should be exactly one | in:%q", fn, line) - } - - words[0] = strings.TrimSpace(words[0]) - words[1] = strings.TrimSpace(words[1]) - - newWord, err := ProcessWord(words[0], envs) - - if err != nil { - newWord = "error" - } - - if newWord != words[1] { - t.Fatalf("Error. Src: %s Calc: %s Expected: %s", words[0], newWord, words[1]) - } - } -} - -func TestShellParser4Words(t *testing.T) { - fn := "wordsTest" - - file, err := os.Open(fn) - if err != nil { - t.Fatalf("Can't open '%s': %s", err, fn) - } - defer file.Close() - - envs := []string{} - scanner := bufio.NewScanner(file) - for scanner.Scan() { - line := scanner.Text() - - if strings.HasPrefix(line, "#") { - continue - } - - if strings.HasPrefix(line, "ENV ") { - line = strings.TrimLeft(line[3:], " ") - envs = append(envs, line) - continue - } - - words := strings.Split(line, "|") - if len(words) != 2 { - t.Fatalf("Error in '%s' - should be exactly one | in: %q", fn, line) - } - test := strings.TrimSpace(words[0]) - expected := strings.Split(strings.TrimLeft(words[1], " "), ",") - - result, err := ProcessWords(test, envs) - - if err != nil { - result = []string{"error"} - } - - if len(result) != len(expected) { - t.Fatalf("Error. %q was suppose to result in %q, but got %q instead", test, expected, result) - } - for i, w := range expected { - if w != result[i] { - t.Fatalf("Error. %q was suppose to result in %q, but got %q instead", test, expected, result) - } - } - } -} - -func TestGetEnv(t *testing.T) { - sw := &shellWord{ - word: "", - envs: nil, - pos: 0, - } - - sw.envs = []string{} - if sw.getEnv("foo") != "" { - t.Fatalf("2 - 'foo' should map to ''") - } - - sw.envs = []string{"foo"} - if sw.getEnv("foo") != "" { - t.Fatalf("3 - 'foo' should map to ''") - } - - sw.envs = []string{"foo="} - if sw.getEnv("foo") != "" { - t.Fatalf("4 - 'foo' should map to ''") - } - - sw.envs = []string{"foo=bar"} - if sw.getEnv("foo") != "bar" { - t.Fatalf("5 - 'foo' should map to 'bar'") - } - - sw.envs = []string{"foo=bar", "car=hat"} - if sw.getEnv("foo") != "bar" { - t.Fatalf("6 - 'foo' should map to 'bar'") - } - if sw.getEnv("car") != "hat" { - t.Fatalf("7 - 'car' should map to 'hat'") - } - - // Make sure we grab the first 'car' in the list - sw.envs = []string{"foo=bar", "car=hat", "car=bike"} - if sw.getEnv("car") != "hat" { - t.Fatalf("8 - 'car' should map to 'hat'") - } -} diff --git a/builder/dockerfile/support.go b/builder/dockerfile/support.go deleted file mode 100644 index e87588910b..0000000000 --- a/builder/dockerfile/support.go +++ /dev/null @@ -1,19 +0,0 @@ -package dockerfile - -import "strings" - -// handleJSONArgs parses command passed to CMD, ENTRYPOINT, RUN and SHELL instruction in Dockerfile -// for exec form it returns untouched args slice -// for shell form it returns concatenated args as the first element of a slice -func handleJSONArgs(args []string, attributes map[string]bool) []string { - if len(args) == 0 { - return []string{} - } - - if attributes != nil && attributes["json"] { - return args - } - - // literal string command, not an exec array - return []string{strings.Join(args, " ")} -} diff --git a/builder/dockerfile/support_test.go b/builder/dockerfile/support_test.go deleted file mode 100644 index 7cc6fe9dcb..0000000000 --- a/builder/dockerfile/support_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package dockerfile - -import "testing" - -type testCase struct { - name string - args []string - attributes map[string]bool - expected []string -} - -func initTestCases() []testCase { - testCases := []testCase{} - - testCases = append(testCases, testCase{ - name: "empty args", - args: []string{}, - attributes: make(map[string]bool), - expected: []string{}, - }) - - jsonAttributes := make(map[string]bool) - jsonAttributes["json"] = true - - testCases = append(testCases, testCase{ - name: "json attribute with one element", - args: []string{"foo"}, - attributes: jsonAttributes, - expected: []string{"foo"}, - }) - - testCases = append(testCases, testCase{ - name: "json attribute with two elements", - args: []string{"foo", "bar"}, - attributes: jsonAttributes, - expected: []string{"foo", "bar"}, - }) - - testCases = append(testCases, testCase{ - name: "no attributes", - args: []string{"foo", "bar"}, - attributes: nil, - expected: []string{"foo bar"}, - }) - - return testCases -} - -func TestHandleJSONArgs(t *testing.T) { - testCases := initTestCases() - - for _, test := range testCases { - arguments := handleJSONArgs(test.args, test.attributes) - - if len(arguments) != len(test.expected) { - t.Fatalf("In test \"%s\": length of returned slice is incorrect. Expected: %d, got: %d", test.name, len(test.expected), len(arguments)) - } - - for i := range test.expected { - if arguments[i] != test.expected[i] { - t.Fatalf("In test \"%s\": element as position %d is incorrect. Expected: %s, got: %s", test.name, i, test.expected[i], arguments[i]) - } - } - } -} diff --git a/builder/dockerfile/wordsTest b/builder/dockerfile/wordsTest deleted file mode 100644 index fa916c67f9..0000000000 --- a/builder/dockerfile/wordsTest +++ /dev/null @@ -1,25 +0,0 @@ -hello | hello -hello${hi}bye | hellobye -ENV hi=hi -hello${hi}bye | hellohibye -ENV space=abc def -hello${space}bye | helloabc,defbye -hello"${space}"bye | helloabc defbye -hello "${space}"bye | hello,abc defbye -ENV leading= ab c -hello${leading}def | hello,ab,cdef -hello"${leading}" def | hello ab c,def -hello"${leading}" | hello ab c -hello${leading} | hello,ab,c -# next line MUST have 3 trailing spaces, don't erase them! -ENV trailing=ab c -hello${trailing} | helloab,c -hello${trailing}d | helloab,c,d -hello"${trailing}"d | helloab c d -# next line MUST have 3 trailing spaces, don't erase them! -hel"lo${trailing}" | helloab c -hello" there " | hello there -hello there | hello,there -hello\ there | hello there -hello" there | hello there -hello\" there | hello",there diff --git a/builder/dockerignore.go b/builder/dockerignore.go deleted file mode 100644 index 2990770a4a..0000000000 --- a/builder/dockerignore.go +++ /dev/null @@ -1,47 +0,0 @@ -package builder - -import ( - "os" - - "github.com/docker/docker/builder/dockerignore" - "github.com/docker/docker/pkg/fileutils" -) - -// DockerIgnoreContext wraps a ModifiableContext to add a method -// for handling the .dockerignore file at the root of the context. -type DockerIgnoreContext struct { - ModifiableContext -} - -// Process reads the .dockerignore file at the root of the embedded context. -// If .dockerignore does not exist in the context, then nil is returned. -// -// It can take a list of files to be removed after .dockerignore is removed. -// This is used for server-side implementations of builders that need to send -// the .dockerignore file as well as the special files specified in filesToRemove, -// but expect them to be excluded from the context after they were processed. -// -// For example, server-side Dockerfile builders are expected to pass in the name -// of the Dockerfile to be removed after it was parsed. -// -// TODO: Don't require a ModifiableContext (use Context instead) and don't remove -// files, instead handle a list of files to be excluded from the context. -func (c DockerIgnoreContext) Process(filesToRemove []string) error { - f, err := c.Open(".dockerignore") - // Note that a missing .dockerignore file isn't treated as an error - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - excludes, _ := dockerignore.ReadAll(f) - filesToRemove = append([]string{".dockerignore"}, filesToRemove...) - for _, fileToRemove := range filesToRemove { - rm, _ := fileutils.Matches(fileToRemove, excludes) - if rm { - c.Remove(fileToRemove) - } - } - return nil -} diff --git a/builder/dockerignore/dockerignore.go b/builder/dockerignore/dockerignore.go deleted file mode 100644 index 9ddf5dd51e..0000000000 --- a/builder/dockerignore/dockerignore.go +++ /dev/null @@ -1,49 +0,0 @@ -package dockerignore - -import ( - "bufio" - "bytes" - "fmt" - "io" - "path/filepath" - "strings" -) - -// ReadAll reads a .dockerignore file and returns the list of file patterns -// to ignore. Note this will trim whitespace from each line as well -// as use GO's "clean" func to get the shortest/cleanest path for each. -func ReadAll(reader io.ReadCloser) ([]string, error) { - if reader == nil { - return nil, nil - } - defer reader.Close() - scanner := bufio.NewScanner(reader) - var excludes []string - currentLine := 0 - - utf8bom := []byte{0xEF, 0xBB, 0xBF} - for scanner.Scan() { - scannedBytes := scanner.Bytes() - // We trim UTF8 BOM - if currentLine == 0 { - scannedBytes = bytes.TrimPrefix(scannedBytes, utf8bom) - } - pattern := string(scannedBytes) - currentLine++ - // Lines starting with # (comments) are ignored before processing - if strings.HasPrefix(pattern, "#") { - continue - } - pattern = strings.TrimSpace(pattern) - if pattern == "" { - continue - } - pattern = filepath.Clean(pattern) - pattern = filepath.ToSlash(pattern) - excludes = append(excludes, pattern) - } - if err := scanner.Err(); err != nil { - return nil, fmt.Errorf("Error reading .dockerignore: %v", err) - } - return excludes, nil -} diff --git a/builder/dockerignore/dockerignore_test.go b/builder/dockerignore/dockerignore_test.go deleted file mode 100644 index 361b041912..0000000000 --- a/builder/dockerignore/dockerignore_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package dockerignore - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestReadAll(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "dockerignore-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - di, err := ReadAll(nil) - if err != nil { - t.Fatalf("Expected not to have error, got %v", err) - } - - if diLen := len(di); diLen != 0 { - t.Fatalf("Expected to have zero dockerignore entry, got %d", diLen) - } - - diName := filepath.Join(tmpDir, ".dockerignore") - content := fmt.Sprintf("test1\n/test2\n/a/file/here\n\nlastfile") - err = ioutil.WriteFile(diName, []byte(content), 0777) - if err != nil { - t.Fatal(err) - } - - diFd, err := os.Open(diName) - if err != nil { - t.Fatal(err) - } - di, err = ReadAll(diFd) - if err != nil { - t.Fatal(err) - } - - if di[0] != "test1" { - t.Fatalf("First element is not test1") - } - if di[1] != "/test2" { - t.Fatalf("Second element is not /test2") - } - if di[2] != "/a/file/here" { - t.Fatalf("Third element is not /a/file/here") - } - if di[3] != "lastfile" { - t.Fatalf("Fourth element is not lastfile") - } -} diff --git a/builder/dockerignore_test.go b/builder/dockerignore_test.go deleted file mode 100644 index 3c0ceda4cf..0000000000 --- a/builder/dockerignore_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package builder - -import ( - "io/ioutil" - "log" - "os" - "sort" - "testing" -) - -const shouldStayFilename = "should_stay" - -func extractFilenames(files []os.FileInfo) []string { - filenames := make([]string, len(files), len(files)) - - for i, file := range files { - filenames[i] = file.Name() - } - - return filenames -} - -func checkDirectory(t *testing.T, dir string, expectedFiles []string) { - files, err := ioutil.ReadDir(dir) - - if err != nil { - t.Fatalf("Could not read directory: %s", err) - } - - if len(files) != len(expectedFiles) { - log.Fatalf("Directory should contain exactly %d file(s), got %d", len(expectedFiles), len(files)) - } - - filenames := extractFilenames(files) - sort.Strings(filenames) - sort.Strings(expectedFiles) - - for i, filename := range filenames { - if filename != expectedFiles[i] { - t.Fatalf("File %s should be in the directory, got: %s", expectedFiles[i], filename) - } - } -} - -func executeProcess(t *testing.T, contextDir string) { - modifiableCtx := &tarSumContext{root: contextDir} - ctx := DockerIgnoreContext{ModifiableContext: modifiableCtx} - - err := ctx.Process([]string{DefaultDockerfileName}) - - if err != nil { - t.Fatalf("Error when executing Process: %s", err) - } -} - -func TestProcessShouldRemoveDockerfileDockerignore(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") - defer cleanup() - - createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) - createTestTempFile(t, contextDir, dockerignoreFilename, "Dockerfile\n.dockerignore", 0777) - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - executeProcess(t, contextDir) - - checkDirectory(t, contextDir, []string{shouldStayFilename}) - -} - -func TestProcessNoDockerignore(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") - defer cleanup() - - createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - executeProcess(t, contextDir) - - checkDirectory(t, contextDir, []string{shouldStayFilename, DefaultDockerfileName}) - -} - -func TestProcessShouldLeaveAllFiles(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-dockerignore-process-test") - defer cleanup() - - createTestTempFile(t, contextDir, shouldStayFilename, testfileContents, 0777) - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - createTestTempFile(t, contextDir, dockerignoreFilename, "input1\ninput2", 0777) - - executeProcess(t, contextDir) - - checkDirectory(t, contextDir, []string{shouldStayFilename, DefaultDockerfileName, dockerignoreFilename}) - -} diff --git a/builder/git.go b/builder/git.go deleted file mode 100644 index 74df244611..0000000000 --- a/builder/git.go +++ /dev/null @@ -1,28 +0,0 @@ -package builder - -import ( - "os" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/gitutils" -) - -// MakeGitContext returns a Context from gitURL that is cloned in a temporary directory. -func MakeGitContext(gitURL string) (ModifiableContext, error) { - root, err := gitutils.Clone(gitURL) - if err != nil { - return nil, err - } - - c, err := archive.Tar(root, archive.Uncompressed) - if err != nil { - return nil, err - } - - defer func() { - // TODO: print errors? - c.Close() - os.RemoveAll(root) - }() - return MakeTarSumContext(c) -} diff --git a/builder/remote.go b/builder/remote.go deleted file mode 100644 index 12f34c7b60..0000000000 --- a/builder/remote.go +++ /dev/null @@ -1,152 +0,0 @@ -package builder - -import ( - "bytes" - "errors" - "fmt" - "io" - "io/ioutil" - "regexp" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/urlutil" -) - -// When downloading remote contexts, limit the amount (in bytes) -// to be read from the response body in order to detect its Content-Type -const maxPreambleLength = 100 - -const acceptableRemoteMIME = `(?:application/(?:(?:x\-)?tar|octet\-stream|((?:x\-)?(?:gzip|bzip2?|xz)))|(?:text/plain))` - -var mimeRe = regexp.MustCompile(acceptableRemoteMIME) - -// MakeRemoteContext downloads a context from remoteURL and returns it. -// -// If contentTypeHandlers is non-nil, then the Content-Type header is read along with a maximum of -// maxPreambleLength bytes from the body to help detecting the MIME type. -// Look at acceptableRemoteMIME for more details. -// -// If a match is found, then the body is sent to the contentType handler and a (potentially compressed) tar stream is expected -// to be returned. If no match is found, it is assumed the body is a tar stream (compressed or not). -// In either case, an (assumed) tar stream is passed to MakeTarSumContext whose result is returned. -func MakeRemoteContext(remoteURL string, contentTypeHandlers map[string]func(io.ReadCloser) (io.ReadCloser, error)) (ModifiableContext, error) { - f, err := httputils.Download(remoteURL) - if err != nil { - return nil, fmt.Errorf("Error downloading remote context %s: %v", remoteURL, err) - } - defer f.Body.Close() - - var contextReader io.ReadCloser - if contentTypeHandlers != nil { - contentType := f.Header.Get("Content-Type") - clen := f.ContentLength - - contentType, contextReader, err = inspectResponse(contentType, f.Body, clen) - if err != nil { - return nil, fmt.Errorf("Error detecting content type for remote %s: %v", remoteURL, err) - } - defer contextReader.Close() - - // This loop tries to find a content-type handler for the detected content-type. - // If it could not find one from the caller-supplied map, it tries the empty content-type `""` - // which is interpreted as a fallback handler (usually used for raw tar contexts). - for _, ct := range []string{contentType, ""} { - if fn, ok := contentTypeHandlers[ct]; ok { - defer contextReader.Close() - if contextReader, err = fn(contextReader); err != nil { - return nil, err - } - break - } - } - } - - // Pass through - this is a pre-packaged context, presumably - // with a Dockerfile with the right name inside it. - return MakeTarSumContext(contextReader) -} - -// DetectContextFromRemoteURL returns a context and in certain cases the name of the dockerfile to be used -// irrespective of user input. -// progressReader is only used if remoteURL is actually a URL (not empty, and not a Git endpoint). -func DetectContextFromRemoteURL(r io.ReadCloser, remoteURL string, createProgressReader func(in io.ReadCloser) io.ReadCloser) (context ModifiableContext, dockerfileName string, err error) { - switch { - case remoteURL == "": - context, err = MakeTarSumContext(r) - case urlutil.IsGitURL(remoteURL): - context, err = MakeGitContext(remoteURL) - case urlutil.IsURL(remoteURL): - context, err = MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ - httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { - dockerfile, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - - // dockerfileName is set to signal that the remote was interpreted as a single Dockerfile, in which case the caller - // should use dockerfileName as the new name for the Dockerfile, irrespective of any other user input. - dockerfileName = DefaultDockerfileName - - // TODO: return a context without tarsum - return archive.Generate(dockerfileName, string(dockerfile)) - }, - // fallback handler (tar context) - "": func(rc io.ReadCloser) (io.ReadCloser, error) { - return createProgressReader(rc), nil - }, - }) - default: - err = fmt.Errorf("remoteURL (%s) could not be recognized as URL", remoteURL) - } - return -} - -// inspectResponse looks into the http response data at r to determine whether its -// content-type is on the list of acceptable content types for remote build contexts. -// This function returns: -// - a string representation of the detected content-type -// - an io.Reader for the response body -// - an error value which will be non-nil either when something goes wrong while -// reading bytes from r or when the detected content-type is not acceptable. -func inspectResponse(ct string, r io.ReadCloser, clen int64) (string, io.ReadCloser, error) { - plen := clen - if plen <= 0 || plen > maxPreambleLength { - plen = maxPreambleLength - } - - preamble := make([]byte, plen, plen) - rlen, err := r.Read(preamble) - if rlen == 0 { - return ct, r, errors.New("Empty response") - } - if err != nil && err != io.EOF { - return ct, r, err - } - - preambleR := bytes.NewReader(preamble) - bodyReader := ioutil.NopCloser(io.MultiReader(preambleR, r)) - // Some web servers will use application/octet-stream as the default - // content type for files without an extension (e.g. 'Dockerfile') - // so if we receive this value we better check for text content - contentType := ct - if len(ct) == 0 || ct == httputils.MimeTypes.OctetStream { - contentType, _, err = httputils.DetectContentType(preamble) - if err != nil { - return contentType, bodyReader, err - } - } - - contentType = selectAcceptableMIME(contentType) - var cterr error - if len(contentType) == 0 { - cterr = fmt.Errorf("unsupported Content-Type %q", ct) - contentType = ct - } - - return contentType, bodyReader, cterr -} - -func selectAcceptableMIME(ct string) string { - return mimeRe.FindString(ct) -} diff --git a/builder/remote_test.go b/builder/remote_test.go deleted file mode 100644 index 20ee02a57b..0000000000 --- a/builder/remote_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package builder - -import ( - "bytes" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "testing" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" -) - -var binaryContext = []byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00} //xz magic - -func TestSelectAcceptableMIME(t *testing.T) { - validMimeStrings := []string{ - "application/x-bzip2", - "application/bzip2", - "application/gzip", - "application/x-gzip", - "application/x-xz", - "application/xz", - "application/tar", - "application/x-tar", - "application/octet-stream", - "text/plain", - } - - invalidMimeStrings := []string{ - "", - "application/octet", - "application/json", - } - - for _, m := range invalidMimeStrings { - if len(selectAcceptableMIME(m)) > 0 { - t.Fatalf("Should not have accepted %q", m) - } - } - - for _, m := range validMimeStrings { - if str := selectAcceptableMIME(m); str == "" { - t.Fatalf("Should have accepted %q", m) - } - } -} - -func TestInspectEmptyResponse(t *testing.T) { - ct := "application/octet-stream" - br := ioutil.NopCloser(bytes.NewReader([]byte(""))) - contentType, bReader, err := inspectResponse(ct, br, 0) - if err == nil { - t.Fatalf("Should have generated an error for an empty response") - } - if contentType != "application/octet-stream" { - t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) - } - body, err := ioutil.ReadAll(bReader) - if err != nil { - t.Fatal(err) - } - if len(body) != 0 { - t.Fatal("response body should remain empty") - } -} - -func TestInspectResponseBinary(t *testing.T) { - ct := "application/octet-stream" - br := ioutil.NopCloser(bytes.NewReader(binaryContext)) - contentType, bReader, err := inspectResponse(ct, br, int64(len(binaryContext))) - if err != nil { - t.Fatal(err) - } - if contentType != "application/octet-stream" { - t.Fatalf("Content type should be 'application/octet-stream' but is %q", contentType) - } - body, err := ioutil.ReadAll(bReader) - if err != nil { - t.Fatal(err) - } - if len(body) != len(binaryContext) { - t.Fatalf("Wrong response size %d, should be == len(binaryContext)", len(body)) - } - for i := range body { - if body[i] != binaryContext[i] { - t.Fatalf("Corrupted response body at byte index %d", i) - } - } -} - -func TestResponseUnsupportedContentType(t *testing.T) { - content := []byte(dockerfileContents) - ct := "application/json" - br := ioutil.NopCloser(bytes.NewReader(content)) - contentType, bReader, err := inspectResponse(ct, br, int64(len(dockerfileContents))) - - if err == nil { - t.Fatal("Should have returned an error on content-type 'application/json'") - } - if contentType != ct { - t.Fatalf("Should not have altered content-type: orig: %s, altered: %s", ct, contentType) - } - body, err := ioutil.ReadAll(bReader) - if err != nil { - t.Fatal(err) - } - if string(body) != dockerfileContents { - t.Fatalf("Corrupted response body %s", body) - } -} - -func TestInspectResponseTextSimple(t *testing.T) { - content := []byte(dockerfileContents) - ct := "text/plain" - br := ioutil.NopCloser(bytes.NewReader(content)) - contentType, bReader, err := inspectResponse(ct, br, int64(len(content))) - if err != nil { - t.Fatal(err) - } - if contentType != "text/plain" { - t.Fatalf("Content type should be 'text/plain' but is %q", contentType) - } - body, err := ioutil.ReadAll(bReader) - if err != nil { - t.Fatal(err) - } - if string(body) != dockerfileContents { - t.Fatalf("Corrupted response body %s", body) - } -} - -func TestInspectResponseEmptyContentType(t *testing.T) { - content := []byte(dockerfileContents) - br := ioutil.NopCloser(bytes.NewReader(content)) - contentType, bodyReader, err := inspectResponse("", br, int64(len(content))) - if err != nil { - t.Fatal(err) - } - if contentType != "text/plain" { - t.Fatalf("Content type should be 'text/plain' but is %q", contentType) - } - body, err := ioutil.ReadAll(bodyReader) - if err != nil { - t.Fatal(err) - } - if string(body) != dockerfileContents { - t.Fatalf("Corrupted response body %s", body) - } -} - -func TestMakeRemoteContext(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - createTestTempFile(t, contextDir, DefaultDockerfileName, dockerfileContents, 0777) - - mux := http.NewServeMux() - server := httptest.NewServer(mux) - serverURL, _ := url.Parse(server.URL) - - serverURL.Path = "/" + DefaultDockerfileName - remoteURL := serverURL.String() - - mux.Handle("/", http.FileServer(http.Dir(contextDir))) - - remoteContext, err := MakeRemoteContext(remoteURL, map[string]func(io.ReadCloser) (io.ReadCloser, error){ - httputils.MimeTypes.TextPlain: func(rc io.ReadCloser) (io.ReadCloser, error) { - dockerfile, err := ioutil.ReadAll(rc) - if err != nil { - return nil, err - } - return archive.Generate(DefaultDockerfileName, string(dockerfile)) - }, - }) - - if err != nil { - t.Fatalf("Error when executing DetectContextFromRemoteURL: %s", err) - } - - if remoteContext == nil { - t.Fatalf("Remote context should not be nil") - } - - tarSumCtx, ok := remoteContext.(*tarSumContext) - - if !ok { - t.Fatalf("Cast error, remote context should be casted to tarSumContext") - } - - fileInfoSums := tarSumCtx.sums - - if fileInfoSums.Len() != 1 { - t.Fatalf("Size of file info sums should be 1, got: %d", fileInfoSums.Len()) - } - - fileInfo := fileInfoSums.GetFile(DefaultDockerfileName) - - if fileInfo == nil { - t.Fatalf("There should be file named %s in fileInfoSums", DefaultDockerfileName) - } - - if fileInfo.Pos() != 0 { - t.Fatalf("File %s should have position 0, got %d", DefaultDockerfileName, fileInfo.Pos()) - } -} diff --git a/builder/tarsum.go b/builder/tarsum.go deleted file mode 100644 index 48372cb01c..0000000000 --- a/builder/tarsum.go +++ /dev/null @@ -1,158 +0,0 @@ -package builder - -import ( - "fmt" - "io" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/tarsum" -) - -type tarSumContext struct { - root string - sums tarsum.FileInfoSums -} - -func (c *tarSumContext) Close() error { - return os.RemoveAll(c.root) -} - -func convertPathError(err error, cleanpath string) error { - if err, ok := err.(*os.PathError); ok { - err.Path = cleanpath - return err - } - return err -} - -func (c *tarSumContext) Open(path string) (io.ReadCloser, error) { - cleanpath, fullpath, err := c.normalize(path) - if err != nil { - return nil, err - } - r, err := os.Open(fullpath) - if err != nil { - return nil, convertPathError(err, cleanpath) - } - return r, nil -} - -func (c *tarSumContext) Stat(path string) (string, FileInfo, error) { - cleanpath, fullpath, err := c.normalize(path) - if err != nil { - return "", nil, err - } - - st, err := os.Lstat(fullpath) - if err != nil { - return "", nil, convertPathError(err, cleanpath) - } - - rel, err := filepath.Rel(c.root, fullpath) - if err != nil { - return "", nil, convertPathError(err, cleanpath) - } - - // We set sum to path by default for the case where GetFile returns nil. - // The usual case is if relative path is empty. - sum := path - // Use the checksum of the followed path(not the possible symlink) because - // this is the file that is actually copied. - if tsInfo := c.sums.GetFile(rel); tsInfo != nil { - sum = tsInfo.Sum() - } - fi := &HashedFileInfo{PathFileInfo{st, fullpath, filepath.Base(cleanpath)}, sum} - return rel, fi, nil -} - -// MakeTarSumContext returns a build Context from a tar stream. -// -// It extracts the tar stream to a temporary folder that is deleted as soon as -// the Context is closed. -// As the extraction happens, a tarsum is calculated for every file, and the set of -// all those sums then becomes the source of truth for all operations on this Context. -// -// Closing tarStream has to be done by the caller. -func MakeTarSumContext(tarStream io.Reader) (ModifiableContext, error) { - root, err := ioutils.TempDir("", "docker-builder") - if err != nil { - return nil, err - } - - tsc := &tarSumContext{root: root} - - // Make sure we clean-up upon error. In the happy case the caller - // is expected to manage the clean-up - defer func() { - if err != nil { - tsc.Close() - } - }() - - decompressedStream, err := archive.DecompressStream(tarStream) - if err != nil { - return nil, err - } - - sum, err := tarsum.NewTarSum(decompressedStream, true, tarsum.Version1) - if err != nil { - return nil, err - } - - if err := chrootarchive.Untar(sum, root, nil); err != nil { - return nil, err - } - - tsc.sums = sum.GetSums() - - return tsc, nil -} - -func (c *tarSumContext) normalize(path string) (cleanpath, fullpath string, err error) { - cleanpath = filepath.Clean(string(os.PathSeparator) + path)[1:] - fullpath, err = symlink.FollowSymlinkInScope(filepath.Join(c.root, path), c.root) - if err != nil { - return "", "", fmt.Errorf("Forbidden path outside the build context: %s (%s)", path, fullpath) - } - _, err = os.Lstat(fullpath) - if err != nil { - return "", "", convertPathError(err, path) - } - return -} - -func (c *tarSumContext) Walk(root string, walkFn WalkFunc) error { - root = filepath.Join(c.root, filepath.Join(string(filepath.Separator), root)) - return filepath.Walk(root, func(fullpath string, info os.FileInfo, err error) error { - rel, err := filepath.Rel(c.root, fullpath) - if err != nil { - return err - } - if rel == "." { - return nil - } - - sum := rel - if tsInfo := c.sums.GetFile(filepath.ToSlash(rel)); tsInfo != nil { - sum = tsInfo.Sum() - } - fi := &HashedFileInfo{PathFileInfo{FileInfo: info, FilePath: fullpath}, sum} - if err := walkFn(rel, fi, nil); err != nil { - return err - } - return nil - }) -} - -func (c *tarSumContext) Remove(path string) error { - _, fullpath, err := c.normalize(path) - if err != nil { - return err - } - return os.RemoveAll(fullpath) -} diff --git a/builder/tarsum_test.go b/builder/tarsum_test.go deleted file mode 100644 index cc3f6f5154..0000000000 --- a/builder/tarsum_test.go +++ /dev/null @@ -1,265 +0,0 @@ -package builder - -import ( - "bufio" - "bytes" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" -) - -const ( - filename = "test" - contents = "contents test" -) - -func init() { - reexec.Init() -} - -func TestCloseRootDirectory(t *testing.T) { - contextDir, err := ioutil.TempDir("", "builder-tarsum-test") - - if err != nil { - t.Fatalf("Error with creating temporary directory: %s", err) - } - - tarsum := &tarSumContext{root: contextDir} - - err = tarsum.Close() - - if err != nil { - t.Fatalf("Error while executing Close: %s", err) - } - - _, err = os.Stat(contextDir) - - if !os.IsNotExist(err) { - t.Fatalf("Directory should not exist at this point") - defer os.RemoveAll(contextDir) - } -} - -func TestOpenFile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - createTestTempFile(t, contextDir, filename, contents, 0777) - - tarSum := &tarSumContext{root: contextDir} - - file, err := tarSum.Open(filename) - - if err != nil { - t.Fatalf("Error when executing Open: %s", err) - } - - defer file.Close() - - scanner := bufio.NewScanner(file) - buff := bytes.NewBufferString("") - - for scanner.Scan() { - buff.WriteString(scanner.Text()) - } - - if contents != buff.String() { - t.Fatalf("Contents are not equal. Expected: %s, got: %s", contents, buff.String()) - } - -} - -func TestOpenNotExisting(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - tarSum := &tarSumContext{root: contextDir} - - file, err := tarSum.Open("not-existing") - - if file != nil { - t.Fatal("Opened file should be nil") - } - - if !os.IsNotExist(err) { - t.Fatalf("Error when executing Open: %s", err) - } -} - -func TestStatFile(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - testFilename := createTestTempFile(t, contextDir, filename, contents, 0777) - - tarSum := &tarSumContext{root: contextDir} - - relPath, fileInfo, err := tarSum.Stat(filename) - - if err != nil { - t.Fatalf("Error when executing Stat: %s", err) - } - - if relPath != filename { - t.Fatalf("Relative path should be equal to %s, got %s", filename, relPath) - } - - if fileInfo.Path() != testFilename { - t.Fatalf("Full path should be equal to %s, got %s", testFilename, fileInfo.Path()) - } -} - -func TestStatSubdir(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") - - testFilename := createTestTempFile(t, contextSubdir, filename, contents, 0777) - - tarSum := &tarSumContext{root: contextDir} - - relativePath, err := filepath.Rel(contextDir, testFilename) - - if err != nil { - t.Fatalf("Error when getting relative path: %s", err) - } - - relPath, fileInfo, err := tarSum.Stat(relativePath) - - if err != nil { - t.Fatalf("Error when executing Stat: %s", err) - } - - if relPath != relativePath { - t.Fatalf("Relative path should be equal to %s, got %s", relativePath, relPath) - } - - if fileInfo.Path() != testFilename { - t.Fatalf("Full path should be equal to %s, got %s", testFilename, fileInfo.Path()) - } -} - -func TestStatNotExisting(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - tarSum := &tarSumContext{root: contextDir} - - relPath, fileInfo, err := tarSum.Stat("not-existing") - - if relPath != "" { - t.Fatal("Relative path should be nil") - } - - if fileInfo != nil { - t.Fatalf("File info should be nil") - } - - if !os.IsNotExist(err) { - t.Fatalf("This file should not exist: %s", err) - } -} - -func TestRemoveDirectory(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") - - relativePath, err := filepath.Rel(contextDir, contextSubdir) - - if err != nil { - t.Fatalf("Error when getting relative path: %s", err) - } - - tarSum := &tarSumContext{root: contextDir} - - err = tarSum.Remove(relativePath) - - if err != nil { - t.Fatalf("Error when executing Remove: %s", err) - } - - _, err = os.Stat(contextSubdir) - - if !os.IsNotExist(err) { - t.Fatalf("Directory should not exist at this point") - } -} - -func TestMakeSumTarContext(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - createTestTempFile(t, contextDir, filename, contents, 0777) - - tarStream, err := archive.Tar(contextDir, archive.Uncompressed) - - if err != nil { - t.Fatalf("error: %s", err) - } - - defer tarStream.Close() - - tarSum, err := MakeTarSumContext(tarStream) - - if err != nil { - t.Fatalf("Error when executing MakeSumContext: %s", err) - } - - if tarSum == nil { - t.Fatalf("Tar sum context should not be nil") - } -} - -func TestWalkWithoutError(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") - - createTestTempFile(t, contextSubdir, filename, contents, 0777) - - tarSum := &tarSumContext{root: contextDir} - - walkFun := func(path string, fi FileInfo, err error) error { - return nil - } - - err := tarSum.Walk(contextSubdir, walkFun) - - if err != nil { - t.Fatalf("Error when executing Walk: %s", err) - } -} - -type WalkError struct { -} - -func (we WalkError) Error() string { - return "Error when executing Walk" -} - -func TestWalkWithError(t *testing.T) { - contextDir, cleanup := createTestTempDir(t, "", "builder-tarsum-test") - defer cleanup() - - contextSubdir := createTestTempSubdir(t, contextDir, "builder-tarsum-test-subdir") - - tarSum := &tarSumContext{root: contextDir} - - walkFun := func(path string, fi FileInfo, err error) error { - return WalkError{} - } - - err := tarSum.Walk(contextSubdir, walkFun) - - if err == nil { - t.Fatalf("Error should not be nil") - } -} diff --git a/builder/utils_test.go b/builder/utils_test.go deleted file mode 100644 index 1101ff1d1d..0000000000 --- a/builder/utils_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package builder - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -const ( - dockerfileContents = "FROM busybox" - dockerignoreFilename = ".dockerignore" - testfileContents = "test" -) - -// createTestTempDir creates a temporary directory for testing. -// It returns the created path and a cleanup function which is meant to be used as deferred call. -// When an error occurs, it terminates the test. -func createTestTempDir(t *testing.T, dir, prefix string) (string, func()) { - path, err := ioutil.TempDir(dir, prefix) - - if err != nil { - t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) - } - - return path, func() { - err = os.RemoveAll(path) - - if err != nil { - t.Fatalf("Error when removing directory %s: %s", path, err) - } - } -} - -// createTestTempSubdir creates a temporary directory for testing. -// It returns the created path but doesn't provide a cleanup function, -// so createTestTempSubdir should be used only for creating temporary subdirectories -// whose parent directories are properly cleaned up. -// When an error occurs, it terminates the test. -func createTestTempSubdir(t *testing.T, dir, prefix string) string { - path, err := ioutil.TempDir(dir, prefix) - - if err != nil { - t.Fatalf("Error when creating directory %s with prefix %s: %s", dir, prefix, err) - } - - return path -} - -// createTestTempFile creates a temporary file within dir with specific contents and permissions. -// When an error occurs, it terminates the test -func createTestTempFile(t *testing.T, dir, filename, contents string, perm os.FileMode) string { - filePath := filepath.Join(dir, filename) - err := ioutil.WriteFile(filePath, []byte(contents), perm) - - if err != nil { - t.Fatalf("Error when creating %s file: %s", filename, err) - } - - return filePath -} - -// chdir changes current working directory to dir. -// It returns a function which changes working directory back to the previous one. -// This function is meant to be executed as a deferred call. -// When an error occurs, it terminates the test. -func chdir(t *testing.T, dir string) func() { - workingDirectory, err := os.Getwd() - - if err != nil { - t.Fatalf("Error when retrieving working directory: %s", err) - } - - err = os.Chdir(dir) - - if err != nil { - t.Fatalf("Error when changing directory to %s: %s", dir, err) - } - - return func() { - err = os.Chdir(workingDirectory) - - if err != nil { - t.Fatalf("Error when changing back to working directory (%s): %s", workingDirectory, err) - } - } -} diff --git a/cli/cli.go b/cli/cli.go deleted file mode 100644 index 8d21cda69d..0000000000 --- a/cli/cli.go +++ /dev/null @@ -1,191 +0,0 @@ -package cli - -import ( - "errors" - "fmt" - "io" - "os" - "strings" - - flag "github.com/docker/docker/pkg/mflag" -) - -// Cli represents a command line interface. -type Cli struct { - Stderr io.Writer - handlers []Handler - Usage func() -} - -// Handler holds the different commands Cli will call -// It should have methods with names starting with `Cmd` like: -// func (h myHandler) CmdFoo(args ...string) error -type Handler interface { - Command(name string) func(...string) error -} - -// Initializer can be optionally implemented by a Handler to -// initialize before each call to one of its commands. -type Initializer interface { - Initialize() error -} - -// New instantiates a ready-to-use Cli. -func New(handlers ...Handler) *Cli { - // make the generic Cli object the first cli handler - // in order to handle `docker help` appropriately - cli := new(Cli) - cli.handlers = append([]Handler{cli}, handlers...) - return cli -} - -var errCommandNotFound = errors.New("command not found") - -func (cli *Cli) command(args ...string) (func(...string) error, error) { - for _, c := range cli.handlers { - if c == nil { - continue - } - if cmd := c.Command(strings.Join(args, " ")); cmd != nil { - if ci, ok := c.(Initializer); ok { - if err := ci.Initialize(); err != nil { - return nil, err - } - } - return cmd, nil - } - } - return nil, errCommandNotFound -} - -// Run executes the specified command. -func (cli *Cli) Run(args ...string) error { - if len(args) > 1 { - command, err := cli.command(args[:2]...) - if err == nil { - return command(args[2:]...) - } - if err != errCommandNotFound { - return err - } - } - if len(args) > 0 { - command, err := cli.command(args[0]) - if err != nil { - if err == errCommandNotFound { - cli.noSuchCommand(args[0]) - return nil - } - return err - } - return command(args[1:]...) - } - return cli.CmdHelp() -} - -func (cli *Cli) noSuchCommand(command string) { - if cli.Stderr == nil { - cli.Stderr = os.Stderr - } - fmt.Fprintf(cli.Stderr, "docker: '%s' is not a docker command.\nSee 'docker --help'.\n", command) - os.Exit(1) -} - -// Command returns a command handler, or nil if the command does not exist -func (cli *Cli) Command(name string) func(...string) error { - return map[string]func(...string) error{ - "help": cli.CmdHelp, - }[name] -} - -// CmdHelp displays information on a Docker command. -// -// If more than one command is specified, information is only shown for the first command. -// -// Usage: docker help COMMAND or docker COMMAND --help -func (cli *Cli) CmdHelp(args ...string) error { - if len(args) > 1 { - command, err := cli.command(args[:2]...) - if err == nil { - command("--help") - return nil - } - if err != errCommandNotFound { - return err - } - } - if len(args) > 0 { - command, err := cli.command(args[0]) - if err != nil { - if err == errCommandNotFound { - cli.noSuchCommand(args[0]) - return nil - } - return err - } - command("--help") - return nil - } - - if cli.Usage == nil { - flag.Usage() - } else { - cli.Usage() - } - - return nil -} - -// Subcmd is a subcommand of the main "docker" command. -// A subcommand represents an action that can be performed -// from the Docker command line client. -// -// To see all available subcommands, run "docker --help". -func Subcmd(name string, synopses []string, description string, exitOnError bool) *flag.FlagSet { - var errorHandling flag.ErrorHandling - if exitOnError { - errorHandling = flag.ExitOnError - } else { - errorHandling = flag.ContinueOnError - } - flags := flag.NewFlagSet(name, errorHandling) - flags.Usage = func() { - flags.ShortUsage() - flags.PrintDefaults() - } - - flags.ShortUsage = func() { - if len(synopses) == 0 { - synopses = []string{""} - } - - // Allow for multiple command usage synopses. - for i, synopsis := range synopses { - lead := "\t" - if i == 0 { - // First line needs the word 'Usage'. - lead = "Usage:\t" - } - - if synopsis != "" { - synopsis = " " + synopsis - } - - fmt.Fprintf(flags.Out(), "\n%sdocker %s%s", lead, name, synopsis) - } - - fmt.Fprintf(flags.Out(), "\n\n%s\n", description) - } - - return flags -} - -// StatusError reports an unsuccessful exit by a command. -type StatusError struct { - Status string - StatusCode int -} - -func (e StatusError) Error() string { - return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode) -} diff --git a/cli/cobraadaptor/adaptor.go b/cli/cobraadaptor/adaptor.go deleted file mode 100644 index 04c705cd21..0000000000 --- a/cli/cobraadaptor/adaptor.go +++ /dev/null @@ -1,159 +0,0 @@ -package cobraadaptor - -import ( - "github.com/docker/docker/api/client" - "github.com/docker/docker/api/client/container" - "github.com/docker/docker/api/client/image" - "github.com/docker/docker/api/client/network" - "github.com/docker/docker/api/client/node" - "github.com/docker/docker/api/client/plugin" - "github.com/docker/docker/api/client/registry" - "github.com/docker/docker/api/client/service" - "github.com/docker/docker/api/client/stack" - "github.com/docker/docker/api/client/swarm" - "github.com/docker/docker/api/client/system" - "github.com/docker/docker/api/client/volume" - "github.com/docker/docker/cli" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/pkg/term" - "github.com/spf13/cobra" -) - -// CobraAdaptor is an adaptor for supporting spf13/cobra commands in the -// docker/cli framework -type CobraAdaptor struct { - rootCmd *cobra.Command - dockerCli *client.DockerCli -} - -// NewCobraAdaptor returns a new handler -func NewCobraAdaptor(clientFlags *cliflags.ClientFlags) CobraAdaptor { - stdin, stdout, stderr := term.StdStreams() - dockerCli := client.NewDockerCli(stdin, stdout, stderr, clientFlags) - - var rootCmd = &cobra.Command{ - Use: "docker [OPTIONS]", - Short: "A self-sufficient runtime for containers", - SilenceUsage: true, - SilenceErrors: true, - } - rootCmd.SetUsageTemplate(usageTemplate) - rootCmd.SetHelpTemplate(helpTemplate) - rootCmd.SetFlagErrorFunc(cli.FlagErrorFunc) - rootCmd.SetOutput(stdout) - rootCmd.AddCommand( - node.NewNodeCommand(dockerCli), - service.NewServiceCommand(dockerCli), - stack.NewStackCommand(dockerCli), - stack.NewTopLevelDeployCommand(dockerCli), - swarm.NewSwarmCommand(dockerCli), - container.NewAttachCommand(dockerCli), - container.NewCommitCommand(dockerCli), - container.NewCopyCommand(dockerCli), - container.NewCreateCommand(dockerCli), - container.NewDiffCommand(dockerCli), - container.NewExportCommand(dockerCli), - container.NewKillCommand(dockerCli), - container.NewLogsCommand(dockerCli), - container.NewPauseCommand(dockerCli), - container.NewPortCommand(dockerCli), - container.NewPsCommand(dockerCli), - container.NewRenameCommand(dockerCli), - container.NewRestartCommand(dockerCli), - container.NewRmCommand(dockerCli), - container.NewRunCommand(dockerCli), - container.NewStartCommand(dockerCli), - container.NewStatsCommand(dockerCli), - container.NewStopCommand(dockerCli), - container.NewTopCommand(dockerCli), - container.NewUnpauseCommand(dockerCli), - container.NewWaitCommand(dockerCli), - image.NewBuildCommand(dockerCli), - image.NewHistoryCommand(dockerCli), - image.NewImagesCommand(dockerCli), - image.NewLoadCommand(dockerCli), - image.NewRemoveCommand(dockerCli), - image.NewSaveCommand(dockerCli), - image.NewPullCommand(dockerCli), - image.NewPushCommand(dockerCli), - image.NewSearchCommand(dockerCli), - image.NewImportCommand(dockerCli), - image.NewTagCommand(dockerCli), - network.NewNetworkCommand(dockerCli), - system.NewEventsCommand(dockerCli), - registry.NewLoginCommand(dockerCli), - registry.NewLogoutCommand(dockerCli), - system.NewVersionCommand(dockerCli), - volume.NewVolumeCommand(dockerCli), - ) - plugin.NewPluginCommand(rootCmd, dockerCli) - - rootCmd.PersistentFlags().BoolP("help", "h", false, "Print usage") - rootCmd.PersistentFlags().MarkShorthandDeprecated("help", "please use --help") - - return CobraAdaptor{ - rootCmd: rootCmd, - dockerCli: dockerCli, - } -} - -// Usage returns the list of commands and their short usage string for -// all top level cobra commands. -func (c CobraAdaptor) Usage() []cli.Command { - cmds := []cli.Command{} - for _, cmd := range c.rootCmd.Commands() { - if cmd.Name() != "" { - cmds = append(cmds, cli.Command{Name: cmd.Name(), Description: cmd.Short}) - } - } - return cmds -} - -func (c CobraAdaptor) run(cmd string, args []string) error { - if err := c.dockerCli.Initialize(); err != nil { - return err - } - // Prepend the command name to support normal cobra command delegation - c.rootCmd.SetArgs(append([]string{cmd}, args...)) - return c.rootCmd.Execute() -} - -// Command returns a cli command handler if one exists -func (c CobraAdaptor) Command(name string) func(...string) error { - for _, cmd := range c.rootCmd.Commands() { - if cmd.Name() == name { - return func(args ...string) error { - return c.run(name, args) - } - } - } - return nil -} - -// GetRootCommand returns the root command. Required to generate the man pages -// and reference docs from a script outside this package. -func (c CobraAdaptor) GetRootCommand() *cobra.Command { - return c.rootCmd -} - -var usageTemplate = `Usage: {{if not .HasSubCommands}}{{.UseLine}}{{end}}{{if .HasSubCommands}}{{ .CommandPath}} COMMAND{{end}} - -{{ .Short | trim }}{{if gt .Aliases 0}} - -Aliases: - {{.NameAndAliases}}{{end}}{{if .HasExample}} - -Examples: -{{ .Example }}{{end}}{{if .HasFlags}} - -Options: -{{.Flags.FlagUsages | trimRightSpace}}{{end}}{{ if .HasAvailableSubCommands}} - -Commands:{{range .Commands}}{{if .IsAvailableCommand}} - {{rpad .Name .NamePadding }} {{.Short}}{{end}}{{end}}{{end}}{{ if .HasSubCommands }} - -Run '{{.CommandPath}} COMMAND --help' for more information on a command.{{end}} -` - -var helpTemplate = ` -{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}` diff --git a/cli/error.go b/cli/error.go deleted file mode 100644 index e421c7f7c7..0000000000 --- a/cli/error.go +++ /dev/null @@ -1,20 +0,0 @@ -package cli - -import "strings" - -// Errors is a list of errors. -// Useful in a loop if you don't want to return the error right away and you want to display after the loop, -// all the errors that happened during the loop. -type Errors []error - -func (errList Errors) Error() string { - if len(errList) < 1 { - return "" - } - - out := make([]string, len(errList)) - for i := range errList { - out[i] = errList[i].Error() - } - return strings.Join(out, ", ") -} diff --git a/cli/flagerrors.go b/cli/flagerrors.go deleted file mode 100644 index 23225e8ad6..0000000000 --- a/cli/flagerrors.go +++ /dev/null @@ -1,24 +0,0 @@ -package cli - -import ( - "fmt" - - "github.com/spf13/cobra" -) - -// FlagErrorFunc prints an error messages which matches the format of the -// docker/docker/cli error messages -func FlagErrorFunc(cmd *cobra.Command, err error) error { - if err == nil { - return err - } - - usage := "" - if cmd.HasSubCommands() { - usage = "\n\n" + cmd.UsageString() - } - return StatusError{ - Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage), - StatusCode: 125, - } -} diff --git a/cli/flags/client.go b/cli/flags/client.go deleted file mode 100644 index cc7309db4b..0000000000 --- a/cli/flags/client.go +++ /dev/null @@ -1,12 +0,0 @@ -package flags - -import flag "github.com/docker/docker/pkg/mflag" - -// ClientFlags represents flags for the docker client. -type ClientFlags struct { - FlagSet *flag.FlagSet - Common *CommonFlags - PostParse func() - - ConfigDir string -} diff --git a/cli/flags/common.go b/cli/flags/common.go deleted file mode 100644 index 4726b04f2a..0000000000 --- a/cli/flags/common.go +++ /dev/null @@ -1,123 +0,0 @@ -package flags - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/opts" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/go-connections/tlsconfig" -) - -const ( - // DefaultTrustKeyFile is the default filename for the trust key - DefaultTrustKeyFile = "key.json" - // DefaultCaFile is the default filename for the CA pem file - DefaultCaFile = "ca.pem" - // DefaultKeyFile is the default filename for the key pem file - DefaultKeyFile = "key.pem" - // DefaultCertFile is the default filename for the cert pem file - DefaultCertFile = "cert.pem" - // TLSVerifyKey is the default flag name for the tls verification option - TLSVerifyKey = "tlsverify" -) - -var ( - dockerCertPath = os.Getenv("DOCKER_CERT_PATH") - dockerTLSVerify = os.Getenv("DOCKER_TLS_VERIFY") != "" -) - -// CommonFlags are flags common to both the client and the daemon. -type CommonFlags struct { - FlagSet *flag.FlagSet - PostParse func() - - Debug bool - Hosts []string - LogLevel string - TLS bool - TLSVerify bool - TLSOptions *tlsconfig.Options - TrustKey string -} - -// InitCommonFlags initializes flags common to both client and daemon -func InitCommonFlags() *CommonFlags { - var commonFlags = &CommonFlags{FlagSet: new(flag.FlagSet)} - - if dockerCertPath == "" { - dockerCertPath = cliconfig.ConfigDir() - } - - commonFlags.PostParse = func() { postParseCommon(commonFlags) } - - cmd := commonFlags.FlagSet - - cmd.BoolVar(&commonFlags.Debug, []string{"D", "-debug"}, false, "Enable debug mode") - cmd.StringVar(&commonFlags.LogLevel, []string{"l", "-log-level"}, "info", "Set the logging level") - cmd.BoolVar(&commonFlags.TLS, []string{"-tls"}, false, "Use TLS; implied by --tlsverify") - cmd.BoolVar(&commonFlags.TLSVerify, []string{"-tlsverify"}, dockerTLSVerify, "Use TLS and verify the remote") - - // TODO use flag flag.String([]string{"i", "-identity"}, "", "Path to libtrust key file") - - var tlsOptions tlsconfig.Options - commonFlags.TLSOptions = &tlsOptions - cmd.StringVar(&tlsOptions.CAFile, []string{"-tlscacert"}, filepath.Join(dockerCertPath, DefaultCaFile), "Trust certs signed only by this CA") - cmd.StringVar(&tlsOptions.CertFile, []string{"-tlscert"}, filepath.Join(dockerCertPath, DefaultCertFile), "Path to TLS certificate file") - cmd.StringVar(&tlsOptions.KeyFile, []string{"-tlskey"}, filepath.Join(dockerCertPath, DefaultKeyFile), "Path to TLS key file") - - cmd.Var(opts.NewNamedListOptsRef("hosts", &commonFlags.Hosts, opts.ValidateHost), []string{"H", "-host"}, "Daemon socket(s) to connect to") - return commonFlags -} - -func postParseCommon(commonFlags *CommonFlags) { - cmd := commonFlags.FlagSet - - SetDaemonLogLevel(commonFlags.LogLevel) - - // Regardless of whether the user sets it to true or false, if they - // specify --tlsverify at all then we need to turn on tls - // TLSVerify can be true even if not set due to DOCKER_TLS_VERIFY env var, so we need - // to check that here as well - if cmd.IsSet("-"+TLSVerifyKey) || commonFlags.TLSVerify { - commonFlags.TLS = true - } - - if !commonFlags.TLS { - commonFlags.TLSOptions = nil - } else { - tlsOptions := commonFlags.TLSOptions - tlsOptions.InsecureSkipVerify = !commonFlags.TLSVerify - - // Reset CertFile and KeyFile to empty string if the user did not specify - // the respective flags and the respective default files were not found. - if !cmd.IsSet("-tlscert") { - if _, err := os.Stat(tlsOptions.CertFile); os.IsNotExist(err) { - tlsOptions.CertFile = "" - } - } - if !cmd.IsSet("-tlskey") { - if _, err := os.Stat(tlsOptions.KeyFile); os.IsNotExist(err) { - tlsOptions.KeyFile = "" - } - } - } -} - -// SetDaemonLogLevel sets the logrus logging level -// TODO: this is a bad name, it applies to the client as well. -func SetDaemonLogLevel(logLevel string) { - if logLevel != "" { - lvl, err := logrus.ParseLevel(logLevel) - if err != nil { - fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel) - os.Exit(1) - } - logrus.SetLevel(lvl) - } else { - logrus.SetLevel(logrus.InfoLevel) - } -} diff --git a/cli/required.go b/cli/required.go deleted file mode 100644 index 8ee02c8429..0000000000 --- a/cli/required.go +++ /dev/null @@ -1,96 +0,0 @@ -package cli - -import ( - "fmt" - "strings" - - "github.com/spf13/cobra" -) - -// NoArgs validates args and returns an error if there are any args -func NoArgs(cmd *cobra.Command, args []string) error { - if len(args) == 0 { - return nil - } - - if cmd.HasSubCommands() { - return fmt.Errorf("\n" + strings.TrimRight(cmd.UsageString(), "\n")) - } - - return fmt.Errorf( - "\"%s\" accepts no argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) -} - -// RequiresMinArgs returns an error if there is not at least min args -func RequiresMinArgs(min int) cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) >= min { - return nil - } - return fmt.Errorf( - "\"%s\" requires at least %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - min, - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } -} - -// RequiresMaxArgs returns an error if there is not at most max args -func RequiresMaxArgs(max int) cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) <= max { - return nil - } - return fmt.Errorf( - "\"%s\" requires at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - max, - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } -} - -// RequiresRangeArgs returns an error if there is not at least min args and at most max args -func RequiresRangeArgs(min int, max int) cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) >= min && len(args) <= max { - return nil - } - return fmt.Errorf( - "\"%s\" requires at least %d and at most %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - min, - max, - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } -} - -// ExactArgs returns an error if there is not the exact number of args -func ExactArgs(number int) cobra.PositionalArgs { - return func(cmd *cobra.Command, args []string) error { - if len(args) == number { - return nil - } - return fmt.Errorf( - "\"%s\" requires exactly %d argument(s).\nSee '%s --help'.\n\nUsage: %s\n\n%s", - cmd.CommandPath(), - number, - cmd.CommandPath(), - cmd.UseLine(), - cmd.Short, - ) - } -} diff --git a/cli/usage.go b/cli/usage.go deleted file mode 100644 index 0e2923740f..0000000000 --- a/cli/usage.go +++ /dev/null @@ -1,24 +0,0 @@ -package cli - -// Command is the struct containing the command name and description -type Command struct { - Name string - Description string -} - -// DockerCommandUsage lists the top level docker commands and their short usage -var DockerCommandUsage = []Command{ - {"exec", "Run a command in a running container"}, - {"info", "Display system-wide information"}, - {"inspect", "Return low-level information on a container, image or task"}, - {"update", "Update configuration of one or more containers"}, -} - -// DockerCommands stores all the docker command -var DockerCommands = make(map[string]Command) - -func init() { - for _, cmd := range DockerCommandUsage { - DockerCommands[cmd.Name] = cmd - } -} diff --git a/cliconfig/config.go b/cliconfig/config.go deleted file mode 100644 index 9d5df0ac4b..0000000000 --- a/cliconfig/config.go +++ /dev/null @@ -1,120 +0,0 @@ -package cliconfig - -import ( - "fmt" - "io" - "os" - "path/filepath" - - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/docker/pkg/homedir" - "github.com/docker/engine-api/types" -) - -const ( - // ConfigFileName is the name of config file - ConfigFileName = "config.json" - configFileDir = ".docker" - oldConfigfile = ".dockercfg" -) - -var ( - configDir = os.Getenv("DOCKER_CONFIG") -) - -func init() { - if configDir == "" { - configDir = filepath.Join(homedir.Get(), configFileDir) - } -} - -// ConfigDir returns the directory the configuration file is stored in -func ConfigDir() string { - return configDir -} - -// SetConfigDir sets the directory the configuration file is stored in -func SetConfigDir(dir string) { - configDir = dir -} - -// NewConfigFile initializes an empty configuration file for the given filename 'fn' -func NewConfigFile(fn string) *configfile.ConfigFile { - return &configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - HTTPHeaders: make(map[string]string), - Filename: fn, - } -} - -// LegacyLoadFromReader is a convenience function that creates a ConfigFile object from -// a non-nested reader -func LegacyLoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LegacyLoadFromReader(configData) - return &configFile, err -} - -// LoadFromReader is a convenience function that creates a ConfigFile object from -// a reader -func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) { - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - } - err := configFile.LoadFromReader(configData) - return &configFile, err -} - -// Load reads the configuration files in the given directory, and sets up -// the auth config information and returns values. -// FIXME: use the internal golang config parser -func Load(configDir string) (*configfile.ConfigFile, error) { - if configDir == "" { - configDir = ConfigDir() - } - - configFile := configfile.ConfigFile{ - AuthConfigs: make(map[string]types.AuthConfig), - Filename: filepath.Join(configDir, ConfigFileName), - } - - // Try happy path first - latest config file - if _, err := os.Stat(configFile.Filename); err == nil { - file, err := os.Open(configFile.Filename) - if err != nil { - return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) - } - defer file.Close() - err = configFile.LoadFromReader(file) - if err != nil { - err = fmt.Errorf("%s - %v", configFile.Filename, err) - } - return &configFile, err - } else if !os.IsNotExist(err) { - // if file is there but we can't stat it for any reason other - // than it doesn't exist then stop - return &configFile, fmt.Errorf("%s - %v", configFile.Filename, err) - } - - // Can't find latest config file so check for the old one - confFile := filepath.Join(homedir.Get(), oldConfigfile) - if _, err := os.Stat(confFile); err != nil { - return &configFile, nil //missing file is not an error - } - file, err := os.Open(confFile) - if err != nil { - return &configFile, fmt.Errorf("%s - %v", confFile, err) - } - defer file.Close() - err = configFile.LegacyLoadFromReader(file) - if err != nil { - return &configFile, fmt.Errorf("%s - %v", confFile, err) - } - - if configFile.HTTPHeaders == nil { - configFile.HTTPHeaders = map[string]string{} - } - return &configFile, nil -} diff --git a/cliconfig/config_test.go b/cliconfig/config_test.go deleted file mode 100644 index 78b360cc2c..0000000000 --- a/cliconfig/config_test.go +++ /dev/null @@ -1,545 +0,0 @@ -package cliconfig - -import ( - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/docker/pkg/homedir" -) - -func TestEmptyConfigDir(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - SetConfigDir(tmpHome) - - config, err := Load("") - if err != nil { - t.Fatalf("Failed loading on empty config dir: %q", err) - } - - expectedConfigFilename := filepath.Join(tmpHome, ConfigFileName) - if config.Filename != expectedConfigFilename { - t.Fatalf("Expected config filename %s, got %s", expectedConfigFilename, config.Filename) - } - - // Now save it and make sure it shows up in new form - saveConfigAndValidateNewFormat(t, config, tmpHome) -} - -func TestMissingFile(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on missing file: %q", err) - } - - // Now save it and make sure it shows up in new form - saveConfigAndValidateNewFormat(t, config, tmpHome) -} - -func TestSaveFileToDirs(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - tmpHome += "/.docker" - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on missing file: %q", err) - } - - // Now save it and make sure it shows up in new form - saveConfigAndValidateNewFormat(t, config, tmpHome) -} - -func TestEmptyFile(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - if err := ioutil.WriteFile(fn, []byte(""), 0600); err != nil { - t.Fatal(err) - } - - _, err = Load(tmpHome) - if err == nil { - t.Fatalf("Was supposed to fail") - } -} - -func TestEmptyJson(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - if err := ioutil.WriteFile(fn, []byte("{}"), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - // Now save it and make sure it shows up in new form - saveConfigAndValidateNewFormat(t, config, tmpHome) -} - -func TestOldInvalidsAuth(t *testing.T) { - invalids := map[string]string{ - `username = test`: "The Auth config file is empty", - `username -password`: "Invalid Auth config file", - `username = test -email`: "Invalid auth configuration file", - } - - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - homeKey := homedir.Key() - homeVal := homedir.Get() - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpHome) - - for content, expectedError := range invalids { - fn := filepath.Join(tmpHome, oldConfigfile) - if err := ioutil.WriteFile(fn, []byte(content), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - // Use Contains instead of == since the file name will change each time - if err == nil || !strings.Contains(err.Error(), expectedError) { - t.Fatalf("Should have failed\nConfig: %v\nGot: %v\nExpected: %v", config, err, expectedError) - } - - } -} - -func TestOldValidAuth(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - homeKey := homedir.Key() - homeVal := homedir.Get() - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpHome) - - fn := filepath.Join(tmpHome, oldConfigfile) - js := `username = am9lam9lOmhlbGxv - email = user@example.com` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatal(err) - } - - // defaultIndexserver is https://index.docker.io/v1/ - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv" - } - } -}` - - if configStr != expConfStr { - t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) - } -} - -func TestOldJsonInvalid(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - homeKey := homedir.Key() - homeVal := homedir.Get() - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpHome) - - fn := filepath.Join(tmpHome, oldConfigfile) - js := `{"https://index.docker.io/v1/":{"auth":"test","email":"user@example.com"}}` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - // Use Contains instead of == since the file name will change each time - if err == nil || !strings.Contains(err.Error(), "Invalid auth configuration file") { - t.Fatalf("Expected an error got : %v, %v", config, err) - } -} - -func TestOldJson(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - homeKey := homedir.Key() - homeVal := homedir.Get() - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpHome) - - fn := filepath.Join(tmpHome, oldConfigfile) - js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv", - "email": "user@example.com" - } - } -}` - - if configStr != expConfStr { - t.Fatalf("Should have save in new form: \n'%s'\n not \n'%s'\n", configStr, expConfStr) - } -} - -func TestNewJson(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } } }` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv" - } - } -}` - - if configStr != expConfStr { - t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) - } -} - -func TestNewJsonNoEmail(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } } }` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv" - } - } -}` - - if configStr != expConfStr { - t.Fatalf("Should have save in new form: \n%s\n not \n%s", configStr, expConfStr) - } -} - -func TestJsonWithPsFormat(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - js := `{ - "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, - "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" -}` - if err := ioutil.WriteFile(fn, []byte(js), 0600); err != nil { - t.Fatal(err) - } - - config, err := Load(tmpHome) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { - t.Fatalf("Unknown ps format: %s\n", config.PsFormat) - } - - // Now save it and make sure it shows up in new form - configStr := saveConfigAndValidateNewFormat(t, config, tmpHome) - if !strings.Contains(configStr, `"psFormat":`) || - !strings.Contains(configStr, "{{.ID}}") { - t.Fatalf("Should have save in new form: %s", configStr) - } -} - -// Save it and make sure it shows up in new form -func saveConfigAndValidateNewFormat(t *testing.T, config *configfile.ConfigFile, homeFolder string) string { - if err := config.Save(); err != nil { - t.Fatalf("Failed to save: %q", err) - } - - buf, err := ioutil.ReadFile(filepath.Join(homeFolder, ConfigFileName)) - if err != nil { - t.Fatal(err) - } - if !strings.Contains(string(buf), `"auths":`) { - t.Fatalf("Should have save in new form: %s", string(buf)) - } - return string(buf) -} - -func TestConfigDir(t *testing.T) { - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpHome) - - if ConfigDir() == tmpHome { - t.Fatalf("Expected ConfigDir to be different than %s by default, but was the same", tmpHome) - } - - // Update configDir - SetConfigDir(tmpHome) - - if ConfigDir() != tmpHome { - t.Fatalf("Expected ConfigDir to %s, but was %s", tmpHome, ConfigDir()) - } -} - -func TestConfigFile(t *testing.T) { - configFilename := "configFilename" - configFile := NewConfigFile(configFilename) - - if configFile.Filename != configFilename { - t.Fatalf("Expected %s, got %s", configFilename, configFile.Filename) - } -} - -func TestJsonReaderNoFile(t *testing.T) { - js := ` { "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } } }` - - config, err := LoadFromReader(strings.NewReader(js)) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } - -} - -func TestOldJsonReaderNoFile(t *testing.T) { - js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` - - config, err := LegacyLoadFromReader(strings.NewReader(js)) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - ac := config.AuthConfigs["https://index.docker.io/v1/"] - if ac.Username != "joejoe" || ac.Password != "hello" { - t.Fatalf("Missing data from parsing:\n%q", config) - } -} - -func TestJsonWithPsFormatNoFile(t *testing.T) { - js := `{ - "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv", "email": "user@example.com" } }, - "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" -}` - config, err := LoadFromReader(strings.NewReader(js)) - if err != nil { - t.Fatalf("Failed loading on empty json file: %q", err) - } - - if config.PsFormat != `table {{.ID}}\t{{.Label "com.docker.label.cpu"}}` { - t.Fatalf("Unknown ps format: %s\n", config.PsFormat) - } - -} - -func TestJsonSaveWithNoFile(t *testing.T) { - js := `{ - "auths": { "https://index.docker.io/v1/": { "auth": "am9lam9lOmhlbGxv" } }, - "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" -}` - config, err := LoadFromReader(strings.NewReader(js)) - err = config.Save() - if err == nil { - t.Fatalf("Expected error. File should not have been able to save with no file name.") - } - - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatalf("Failed to create a temp dir: %q", err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - err = config.SaveToWriter(f) - if err != nil { - t.Fatalf("Failed saving to file: %q", err) - } - buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) - if err != nil { - t.Fatal(err) - } - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv" - } - }, - "psFormat": "table {{.ID}}\\t{{.Label \"com.docker.label.cpu\"}}" -}` - if string(buf) != expConfStr { - t.Fatalf("Should have save in new form: \n%s\nnot \n%s", string(buf), expConfStr) - } -} - -func TestLegacyJsonSaveWithNoFile(t *testing.T) { - - js := `{"https://index.docker.io/v1/":{"auth":"am9lam9lOmhlbGxv","email":"user@example.com"}}` - config, err := LegacyLoadFromReader(strings.NewReader(js)) - err = config.Save() - if err == nil { - t.Fatalf("Expected error. File should not have been able to save with no file name.") - } - - tmpHome, err := ioutil.TempDir("", "config-test") - if err != nil { - t.Fatalf("Failed to create a temp dir: %q", err) - } - defer os.RemoveAll(tmpHome) - - fn := filepath.Join(tmpHome, ConfigFileName) - f, _ := os.OpenFile(fn, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err = config.SaveToWriter(f); err != nil { - t.Fatalf("Failed saving to file: %q", err) - } - buf, err := ioutil.ReadFile(filepath.Join(tmpHome, ConfigFileName)) - if err != nil { - t.Fatal(err) - } - - expConfStr := `{ - "auths": { - "https://index.docker.io/v1/": { - "auth": "am9lam9lOmhlbGxv", - "email": "user@example.com" - } - } -}` - - if string(buf) != expConfStr { - t.Fatalf("Should have save in new form: \n%s\n not \n%s", string(buf), expConfStr) - } -} diff --git a/cliconfig/configfile/file.go b/cliconfig/configfile/file.go deleted file mode 100644 index 7c94e27dce..0000000000 --- a/cliconfig/configfile/file.go +++ /dev/null @@ -1,177 +0,0 @@ -package configfile - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/docker/engine-api/types" -) - -const ( - // This constant is only used for really old config files when the - // URL wasn't saved as part of the config file and it was just - // assumed to be this value. - defaultIndexserver = "https://index.docker.io/v1/" -) - -// ConfigFile ~/.docker/config.json file info -type ConfigFile struct { - AuthConfigs map[string]types.AuthConfig `json:"auths"` - HTTPHeaders map[string]string `json:"HttpHeaders,omitempty"` - PsFormat string `json:"psFormat,omitempty"` - ImagesFormat string `json:"imagesFormat,omitempty"` - DetachKeys string `json:"detachKeys,omitempty"` - CredentialsStore string `json:"credsStore,omitempty"` - Filename string `json:"-"` // Note: for internal use only -} - -// LegacyLoadFromReader reads the non-nested configuration data given and sets up the -// auth config information with given directory and populates the receiver object -func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error { - b, err := ioutil.ReadAll(configData) - if err != nil { - return err - } - - if err := json.Unmarshal(b, &configFile.AuthConfigs); err != nil { - arr := strings.Split(string(b), "\n") - if len(arr) < 2 { - return fmt.Errorf("The Auth config file is empty") - } - authConfig := types.AuthConfig{} - origAuth := strings.Split(arr[0], " = ") - if len(origAuth) != 2 { - return fmt.Errorf("Invalid Auth config file") - } - authConfig.Username, authConfig.Password, err = decodeAuth(origAuth[1]) - if err != nil { - return err - } - authConfig.ServerAddress = defaultIndexserver - configFile.AuthConfigs[defaultIndexserver] = authConfig - } else { - for k, authConfig := range configFile.AuthConfigs { - authConfig.Username, authConfig.Password, err = decodeAuth(authConfig.Auth) - if err != nil { - return err - } - authConfig.Auth = "" - authConfig.ServerAddress = k - configFile.AuthConfigs[k] = authConfig - } - } - return nil -} - -// LoadFromReader reads the configuration data given and sets up the auth config -// information with given directory and populates the receiver object -func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error { - if err := json.NewDecoder(configData).Decode(&configFile); err != nil { - return err - } - var err error - for addr, ac := range configFile.AuthConfigs { - ac.Username, ac.Password, err = decodeAuth(ac.Auth) - if err != nil { - return err - } - ac.Auth = "" - ac.ServerAddress = addr - configFile.AuthConfigs[addr] = ac - } - return nil -} - -// ContainsAuth returns whether there is authentication configured -// in this file or not. -func (configFile *ConfigFile) ContainsAuth() bool { - return configFile.CredentialsStore != "" || - (configFile.AuthConfigs != nil && len(configFile.AuthConfigs) > 0) -} - -// SaveToWriter encodes and writes out all the authorization information to -// the given writer -func (configFile *ConfigFile) SaveToWriter(writer io.Writer) error { - // Encode sensitive data into a new/temp struct - tmpAuthConfigs := make(map[string]types.AuthConfig, len(configFile.AuthConfigs)) - for k, authConfig := range configFile.AuthConfigs { - authCopy := authConfig - // encode and save the authstring, while blanking out the original fields - authCopy.Auth = encodeAuth(&authCopy) - authCopy.Username = "" - authCopy.Password = "" - authCopy.ServerAddress = "" - tmpAuthConfigs[k] = authCopy - } - - saveAuthConfigs := configFile.AuthConfigs - configFile.AuthConfigs = tmpAuthConfigs - defer func() { configFile.AuthConfigs = saveAuthConfigs }() - - data, err := json.MarshalIndent(configFile, "", "\t") - if err != nil { - return err - } - _, err = writer.Write(data) - return err -} - -// Save encodes and writes out all the authorization information -func (configFile *ConfigFile) Save() error { - if configFile.Filename == "" { - return fmt.Errorf("Can't save config with empty filename") - } - - if err := os.MkdirAll(filepath.Dir(configFile.Filename), 0700); err != nil { - return err - } - f, err := os.OpenFile(configFile.Filename, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - defer f.Close() - return configFile.SaveToWriter(f) -} - -// encodeAuth creates a base64 encoded string to containing authorization information -func encodeAuth(authConfig *types.AuthConfig) string { - if authConfig.Username == "" && authConfig.Password == "" { - return "" - } - - authStr := authConfig.Username + ":" + authConfig.Password - msg := []byte(authStr) - encoded := make([]byte, base64.StdEncoding.EncodedLen(len(msg))) - base64.StdEncoding.Encode(encoded, msg) - return string(encoded) -} - -// decodeAuth decodes a base64 encoded string and returns username and password -func decodeAuth(authStr string) (string, string, error) { - if authStr == "" { - return "", "", nil - } - - decLen := base64.StdEncoding.DecodedLen(len(authStr)) - decoded := make([]byte, decLen) - authByte := []byte(authStr) - n, err := base64.StdEncoding.Decode(decoded, authByte) - if err != nil { - return "", "", err - } - if n > decLen { - return "", "", fmt.Errorf("Something went wrong decoding auth config") - } - arr := strings.SplitN(string(decoded), ":", 2) - if len(arr) != 2 { - return "", "", fmt.Errorf("Invalid auth configuration file") - } - password := strings.Trim(arr[1], "\x00") - return arr[0], password, nil -} diff --git a/cliconfig/configfile/file_test.go b/cliconfig/configfile/file_test.go deleted file mode 100644 index 15eecb73e2..0000000000 --- a/cliconfig/configfile/file_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package configfile - -import ( - "testing" - - "github.com/docker/engine-api/types" -) - -func TestEncodeAuth(t *testing.T) { - newAuthConfig := &types.AuthConfig{Username: "ken", Password: "test"} - authStr := encodeAuth(newAuthConfig) - decAuthConfig := &types.AuthConfig{} - var err error - decAuthConfig.Username, decAuthConfig.Password, err = decodeAuth(authStr) - if err != nil { - t.Fatal(err) - } - if newAuthConfig.Username != decAuthConfig.Username { - t.Fatal("Encode Username doesn't match decoded Username") - } - if newAuthConfig.Password != decAuthConfig.Password { - t.Fatal("Encode Password doesn't match decoded Password") - } - if authStr != "a2VuOnRlc3Q=" { - t.Fatal("AuthString encoding isn't correct.") - } -} diff --git a/cliconfig/credentials/credentials.go b/cliconfig/credentials/credentials.go deleted file mode 100644 index 510cf8cf0e..0000000000 --- a/cliconfig/credentials/credentials.go +++ /dev/null @@ -1,17 +0,0 @@ -package credentials - -import ( - "github.com/docker/engine-api/types" -) - -// Store is the interface that any credentials store must implement. -type Store interface { - // Erase removes credentials from the store for a given server. - Erase(serverAddress string) error - // Get retrieves credentials from the store for a given server. - Get(serverAddress string) (types.AuthConfig, error) - // GetAll retrieves all the credentials from the store. - GetAll() (map[string]types.AuthConfig, error) - // Store saves credentials in the store. - Store(authConfig types.AuthConfig) error -} diff --git a/cliconfig/credentials/default_store.go b/cliconfig/credentials/default_store.go deleted file mode 100644 index b4733709b1..0000000000 --- a/cliconfig/credentials/default_store.go +++ /dev/null @@ -1,22 +0,0 @@ -package credentials - -import ( - "os/exec" - - "github.com/docker/docker/cliconfig/configfile" -) - -// DetectDefaultStore sets the default credentials store -// if the host includes the default store helper program. -func DetectDefaultStore(c *configfile.ConfigFile) { - if c.CredentialsStore != "" { - // user defined - return - } - - if defaultCredentialsStore != "" { - if _, err := exec.LookPath(remoteCredentialsPrefix + defaultCredentialsStore); err == nil { - c.CredentialsStore = defaultCredentialsStore - } - } -} diff --git a/cliconfig/credentials/default_store_darwin.go b/cliconfig/credentials/default_store_darwin.go deleted file mode 100644 index 63e8ed4010..0000000000 --- a/cliconfig/credentials/default_store_darwin.go +++ /dev/null @@ -1,3 +0,0 @@ -package credentials - -const defaultCredentialsStore = "osxkeychain" diff --git a/cliconfig/credentials/default_store_linux.go b/cliconfig/credentials/default_store_linux.go deleted file mode 100644 index 864c540f6c..0000000000 --- a/cliconfig/credentials/default_store_linux.go +++ /dev/null @@ -1,3 +0,0 @@ -package credentials - -const defaultCredentialsStore = "secretservice" diff --git a/cliconfig/credentials/default_store_unsupported.go b/cliconfig/credentials/default_store_unsupported.go deleted file mode 100644 index 519ef53dcd..0000000000 --- a/cliconfig/credentials/default_store_unsupported.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !windows,!darwin,!linux - -package credentials - -const defaultCredentialsStore = "" diff --git a/cliconfig/credentials/default_store_windows.go b/cliconfig/credentials/default_store_windows.go deleted file mode 100644 index fb6a9745cf..0000000000 --- a/cliconfig/credentials/default_store_windows.go +++ /dev/null @@ -1,3 +0,0 @@ -package credentials - -const defaultCredentialsStore = "wincred" diff --git a/cliconfig/credentials/file_store.go b/cliconfig/credentials/file_store.go deleted file mode 100644 index cf1c89fcc5..0000000000 --- a/cliconfig/credentials/file_store.go +++ /dev/null @@ -1,67 +0,0 @@ -package credentials - -import ( - "strings" - - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/engine-api/types" -) - -// fileStore implements a credentials store using -// the docker configuration file to keep the credentials in plain text. -type fileStore struct { - file *configfile.ConfigFile -} - -// NewFileStore creates a new file credentials store. -func NewFileStore(file *configfile.ConfigFile) Store { - return &fileStore{ - file: file, - } -} - -// Erase removes the given credentials from the file store. -func (c *fileStore) Erase(serverAddress string) error { - delete(c.file.AuthConfigs, serverAddress) - return c.file.Save() -} - -// Get retrieves credentials for a specific server from the file store. -func (c *fileStore) Get(serverAddress string) (types.AuthConfig, error) { - authConfig, ok := c.file.AuthConfigs[serverAddress] - if !ok { - // Maybe they have a legacy config file, we will iterate the keys converting - // them to the new format and testing - for registry, ac := range c.file.AuthConfigs { - if serverAddress == convertToHostname(registry) { - return ac, nil - } - } - - authConfig = types.AuthConfig{} - } - return authConfig, nil -} - -func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) { - return c.file.AuthConfigs, nil -} - -// Store saves the given credentials in the file store. -func (c *fileStore) Store(authConfig types.AuthConfig) error { - c.file.AuthConfigs[authConfig.ServerAddress] = authConfig - return c.file.Save() -} - -func convertToHostname(url string) string { - stripped := url - if strings.HasPrefix(url, "http://") { - stripped = strings.Replace(url, "http://", "", 1) - } else if strings.HasPrefix(url, "https://") { - stripped = strings.Replace(url, "https://", "", 1) - } - - nameParts := strings.SplitN(stripped, "/", 2) - - return nameParts[0] -} diff --git a/cliconfig/credentials/file_store_test.go b/cliconfig/credentials/file_store_test.go deleted file mode 100644 index f087f04e75..0000000000 --- a/cliconfig/credentials/file_store_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package credentials - -import ( - "io/ioutil" - "testing" - - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/engine-api/types" -) - -func newConfigFile(auths map[string]types.AuthConfig) *configfile.ConfigFile { - tmp, _ := ioutil.TempFile("", "docker-test") - name := tmp.Name() - tmp.Close() - - c := cliconfig.NewConfigFile(name) - c.AuthConfigs = auths - return c -} - -func TestFileStoreAddCredentials(t *testing.T) { - f := newConfigFile(make(map[string]types.AuthConfig)) - - s := NewFileStore(f) - err := s.Store(types.AuthConfig{ - Auth: "super_secret_token", - Email: "foo@example.com", - ServerAddress: "https://example.com", - }) - - if err != nil { - t.Fatal(err) - } - - if len(f.AuthConfigs) != 1 { - t.Fatalf("expected 1 auth config, got %d", len(f.AuthConfigs)) - } - - a, ok := f.AuthConfigs["https://example.com"] - if !ok { - t.Fatalf("expected auth for https://example.com, got %v", f.AuthConfigs) - } - if a.Auth != "super_secret_token" { - t.Fatalf("expected auth `super_secret_token`, got %s", a.Auth) - } - if a.Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com`, got %s", a.Email) - } -} - -func TestFileStoreGet(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - "https://example.com": { - Auth: "super_secret_token", - Email: "foo@example.com", - ServerAddress: "https://example.com", - }, - }) - - s := NewFileStore(f) - a, err := s.Get("https://example.com") - if err != nil { - t.Fatal(err) - } - if a.Auth != "super_secret_token" { - t.Fatalf("expected auth `super_secret_token`, got %s", a.Auth) - } - if a.Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com`, got %s", a.Email) - } -} - -func TestFileStoreGetAll(t *testing.T) { - s1 := "https://example.com" - s2 := "https://example2.com" - f := newConfigFile(map[string]types.AuthConfig{ - s1: { - Auth: "super_secret_token", - Email: "foo@example.com", - ServerAddress: "https://example.com", - }, - s2: { - Auth: "super_secret_token2", - Email: "foo@example2.com", - ServerAddress: "https://example2.com", - }, - }) - - s := NewFileStore(f) - as, err := s.GetAll() - if err != nil { - t.Fatal(err) - } - if len(as) != 2 { - t.Fatalf("wanted 2, got %d", len(as)) - } - if as[s1].Auth != "super_secret_token" { - t.Fatalf("expected auth `super_secret_token`, got %s", as[s1].Auth) - } - if as[s1].Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com`, got %s", as[s1].Email) - } - if as[s2].Auth != "super_secret_token2" { - t.Fatalf("expected auth `super_secret_token2`, got %s", as[s2].Auth) - } - if as[s2].Email != "foo@example2.com" { - t.Fatalf("expected email `foo@example2.com`, got %s", as[s2].Email) - } -} - -func TestFileStoreErase(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - "https://example.com": { - Auth: "super_secret_token", - Email: "foo@example.com", - ServerAddress: "https://example.com", - }, - }) - - s := NewFileStore(f) - err := s.Erase("https://example.com") - if err != nil { - t.Fatal(err) - } - - // file store never returns errors, check that the auth config is empty - a, err := s.Get("https://example.com") - if err != nil { - t.Fatal(err) - } - - if a.Auth != "" { - t.Fatalf("expected empty auth token, got %s", a.Auth) - } - if a.Email != "" { - t.Fatalf("expected empty email, got %s", a.Email) - } -} diff --git a/cliconfig/credentials/native_store.go b/cliconfig/credentials/native_store.go deleted file mode 100644 index 1717ce7b39..0000000000 --- a/cliconfig/credentials/native_store.go +++ /dev/null @@ -1,126 +0,0 @@ -package credentials - -import ( - "github.com/docker/docker-credential-helpers/client" - "github.com/docker/docker-credential-helpers/credentials" - "github.com/docker/docker/cliconfig/configfile" - "github.com/docker/engine-api/types" -) - -const ( - remoteCredentialsPrefix = "docker-credential-" - tokenUsername = "" -) - -// nativeStore implements a credentials store -// using native keychain to keep credentials secure. -// It piggybacks into a file store to keep users' emails. -type nativeStore struct { - programFunc client.ProgramFunc - fileStore Store -} - -// NewNativeStore creates a new native store that -// uses a remote helper program to manage credentials. -func NewNativeStore(file *configfile.ConfigFile) Store { - name := remoteCredentialsPrefix + file.CredentialsStore - return &nativeStore{ - programFunc: client.NewShellProgramFunc(name), - fileStore: NewFileStore(file), - } -} - -// Erase removes the given credentials from the native store. -func (c *nativeStore) Erase(serverAddress string) error { - if err := client.Erase(c.programFunc, serverAddress); err != nil { - return err - } - - // Fallback to plain text store to remove email - return c.fileStore.Erase(serverAddress) -} - -// Get retrieves credentials for a specific server from the native store. -func (c *nativeStore) Get(serverAddress string) (types.AuthConfig, error) { - // load user email if it exist or an empty auth config. - auth, _ := c.fileStore.Get(serverAddress) - - creds, err := c.getCredentialsFromStore(serverAddress) - if err != nil { - return auth, err - } - auth.Username = creds.Username - auth.IdentityToken = creds.IdentityToken - auth.Password = creds.Password - - return auth, nil -} - -// GetAll retrieves all the credentials from the native store. -func (c *nativeStore) GetAll() (map[string]types.AuthConfig, error) { - auths, _ := c.fileStore.GetAll() - - for s, ac := range auths { - creds, _ := c.getCredentialsFromStore(s) - ac.Username = creds.Username - ac.Password = creds.Password - ac.IdentityToken = creds.IdentityToken - auths[s] = ac - } - - return auths, nil -} - -// Store saves the given credentials in the file store. -func (c *nativeStore) Store(authConfig types.AuthConfig) error { - if err := c.storeCredentialsInStore(authConfig); err != nil { - return err - } - authConfig.Username = "" - authConfig.Password = "" - authConfig.IdentityToken = "" - - // Fallback to old credential in plain text to save only the email - return c.fileStore.Store(authConfig) -} - -// storeCredentialsInStore executes the command to store the credentials in the native store. -func (c *nativeStore) storeCredentialsInStore(config types.AuthConfig) error { - creds := &credentials.Credentials{ - ServerURL: config.ServerAddress, - Username: config.Username, - Secret: config.Password, - } - - if config.IdentityToken != "" { - creds.Username = tokenUsername - creds.Secret = config.IdentityToken - } - - return client.Store(c.programFunc, creds) -} - -// getCredentialsFromStore executes the command to get the credentials from the native store. -func (c *nativeStore) getCredentialsFromStore(serverAddress string) (types.AuthConfig, error) { - var ret types.AuthConfig - - creds, err := client.Get(c.programFunc, serverAddress) - if err != nil { - if credentials.IsErrCredentialsNotFound(err) { - // do not return an error if the credentials are not - // in the keyckain. Let docker ask for new credentials. - return ret, nil - } - return ret, err - } - - if creds.Username == tokenUsername { - ret.IdentityToken = creds.Secret - } else { - ret.Password = creds.Secret - ret.Username = creds.Username - } - - ret.ServerAddress = serverAddress - return ret, nil -} diff --git a/cliconfig/credentials/native_store_test.go b/cliconfig/credentials/native_store_test.go deleted file mode 100644 index 952e447dfe..0000000000 --- a/cliconfig/credentials/native_store_test.go +++ /dev/null @@ -1,356 +0,0 @@ -package credentials - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "strings" - "testing" - - "github.com/docker/docker-credential-helpers/client" - "github.com/docker/docker-credential-helpers/credentials" - "github.com/docker/engine-api/types" -) - -const ( - validServerAddress = "https://index.docker.io/v1" - validServerAddress2 = "https://example.com:5002" - invalidServerAddress = "https://foobar.example.com" - missingCredsAddress = "https://missing.docker.io/v1" -) - -var errCommandExited = fmt.Errorf("exited 1") - -// mockCommand simulates interactions between the docker client and a remote -// credentials helper. -// Unit tests inject this mocked command into the remote to control execution. -type mockCommand struct { - arg string - input io.Reader -} - -// Output returns responses from the remote credentials helper. -// It mocks those responses based in the input in the mock. -func (m *mockCommand) Output() ([]byte, error) { - in, err := ioutil.ReadAll(m.input) - if err != nil { - return nil, err - } - inS := string(in) - - switch m.arg { - case "erase": - switch inS { - case validServerAddress: - return nil, nil - default: - return []byte("program failed"), errCommandExited - } - case "get": - switch inS { - case validServerAddress: - return []byte(`{"Username": "foo", "Secret": "bar"}`), nil - case validServerAddress2: - return []byte(`{"Username": "", "Secret": "abcd1234"}`), nil - case missingCredsAddress: - return []byte(credentials.NewErrCredentialsNotFound().Error()), errCommandExited - case invalidServerAddress: - return []byte("program failed"), errCommandExited - } - case "store": - var c credentials.Credentials - err := json.NewDecoder(strings.NewReader(inS)).Decode(&c) - if err != nil { - return []byte("program failed"), errCommandExited - } - switch c.ServerURL { - case validServerAddress: - return nil, nil - default: - return []byte("program failed"), errCommandExited - } - } - - return []byte(fmt.Sprintf("unknown argument %q with %q", m.arg, inS)), errCommandExited -} - -// Input sets the input to send to a remote credentials helper. -func (m *mockCommand) Input(in io.Reader) { - m.input = in -} - -func mockCommandFn(args ...string) client.Program { - return &mockCommand{ - arg: args[0], - } -} - -func TestNativeStoreAddCredentials(t *testing.T) { - f := newConfigFile(make(map[string]types.AuthConfig)) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - err := s.Store(types.AuthConfig{ - Username: "foo", - Password: "bar", - Email: "foo@example.com", - ServerAddress: validServerAddress, - }) - - if err != nil { - t.Fatal(err) - } - - if len(f.AuthConfigs) != 1 { - t.Fatalf("expected 1 auth config, got %d", len(f.AuthConfigs)) - } - - a, ok := f.AuthConfigs[validServerAddress] - if !ok { - t.Fatalf("expected auth for %s, got %v", validServerAddress, f.AuthConfigs) - } - if a.Auth != "" { - t.Fatalf("expected auth to be empty, got %s", a.Auth) - } - if a.Username != "" { - t.Fatalf("expected username to be empty, got %s", a.Username) - } - if a.Password != "" { - t.Fatalf("expected password to be empty, got %s", a.Password) - } - if a.IdentityToken != "" { - t.Fatalf("expected identity token to be empty, got %s", a.IdentityToken) - } - if a.Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com`, got %s", a.Email) - } -} - -func TestNativeStoreAddInvalidCredentials(t *testing.T) { - f := newConfigFile(make(map[string]types.AuthConfig)) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - err := s.Store(types.AuthConfig{ - Username: "foo", - Password: "bar", - Email: "foo@example.com", - ServerAddress: invalidServerAddress, - }) - - if err == nil { - t.Fatal("expected error, got nil") - } - - if !strings.Contains(err.Error(), "program failed") { - t.Fatalf("expected `program failed`, got %v", err) - } - - if len(f.AuthConfigs) != 0 { - t.Fatalf("expected 0 auth config, got %d", len(f.AuthConfigs)) - } -} - -func TestNativeStoreGet(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - a, err := s.Get(validServerAddress) - if err != nil { - t.Fatal(err) - } - - if a.Username != "foo" { - t.Fatalf("expected username `foo`, got %s", a.Username) - } - if a.Password != "bar" { - t.Fatalf("expected password `bar`, got %s", a.Password) - } - if a.IdentityToken != "" { - t.Fatalf("expected identity token to be empty, got %s", a.IdentityToken) - } - if a.Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com`, got %s", a.Email) - } -} - -func TestNativeStoreGetIdentityToken(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress2: { - Email: "foo@example2.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - a, err := s.Get(validServerAddress2) - if err != nil { - t.Fatal(err) - } - - if a.Username != "" { - t.Fatalf("expected username to be empty, got %s", a.Username) - } - if a.Password != "" { - t.Fatalf("expected password to be empty, got %s", a.Password) - } - if a.IdentityToken != "abcd1234" { - t.Fatalf("expected identity token `abcd1234`, got %s", a.IdentityToken) - } - if a.Email != "foo@example2.com" { - t.Fatalf("expected email `foo@example2.com`, got %s", a.Email) - } -} - -func TestNativeStoreGetAll(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - validServerAddress2: { - Email: "foo@example2.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - as, err := s.GetAll() - if err != nil { - t.Fatal(err) - } - - if len(as) != 2 { - t.Fatalf("wanted 2, got %d", len(as)) - } - - if as[validServerAddress].Username != "foo" { - t.Fatalf("expected username `foo` for %s, got %s", validServerAddress, as[validServerAddress].Username) - } - if as[validServerAddress].Password != "bar" { - t.Fatalf("expected password `bar` for %s, got %s", validServerAddress, as[validServerAddress].Password) - } - if as[validServerAddress].IdentityToken != "" { - t.Fatalf("expected identity to be empty for %s, got %s", validServerAddress, as[validServerAddress].IdentityToken) - } - if as[validServerAddress].Email != "foo@example.com" { - t.Fatalf("expected email `foo@example.com` for %s, got %s", validServerAddress, as[validServerAddress].Email) - } - if as[validServerAddress2].Username != "" { - t.Fatalf("expected username to be empty for %s, got %s", validServerAddress2, as[validServerAddress2].Username) - } - if as[validServerAddress2].Password != "" { - t.Fatalf("expected password to be empty for %s, got %s", validServerAddress2, as[validServerAddress2].Password) - } - if as[validServerAddress2].IdentityToken != "abcd1234" { - t.Fatalf("expected identity token `abcd1324` for %s, got %s", validServerAddress2, as[validServerAddress2].IdentityToken) - } - if as[validServerAddress2].Email != "foo@example2.com" { - t.Fatalf("expected email `foo@example2.com` for %s, got %s", validServerAddress2, as[validServerAddress2].Email) - } -} - -func TestNativeStoreGetMissingCredentials(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - _, err := s.Get(missingCredsAddress) - if err != nil { - // missing credentials do not produce an error - t.Fatal(err) - } -} - -func TestNativeStoreGetInvalidAddress(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - _, err := s.Get(invalidServerAddress) - if err == nil { - t.Fatal("expected error, got nil") - } - - if !strings.Contains(err.Error(), "program failed") { - t.Fatalf("expected `program failed`, got %v", err) - } -} - -func TestNativeStoreErase(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - err := s.Erase(validServerAddress) - if err != nil { - t.Fatal(err) - } - - if len(f.AuthConfigs) != 0 { - t.Fatalf("expected 0 auth configs, got %d", len(f.AuthConfigs)) - } -} - -func TestNativeStoreEraseInvalidAddress(t *testing.T) { - f := newConfigFile(map[string]types.AuthConfig{ - validServerAddress: { - Email: "foo@example.com", - }, - }) - f.CredentialsStore = "mock" - - s := &nativeStore{ - programFunc: mockCommandFn, - fileStore: NewFileStore(f), - } - err := s.Erase(invalidServerAddress) - if err == nil { - t.Fatal("expected error, got nil") - } - - if !strings.Contains(err.Error(), "program failed") { - t.Fatalf("expected `program failed`, got %v", err) - } -} diff --git a/cmd/docker/daemon.go b/cmd/docker/daemon.go deleted file mode 100644 index 8fe3484761..0000000000 --- a/cmd/docker/daemon.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -const daemonBinary = "dockerd" - -// DaemonProxy acts as a cli.Handler to proxy calls to the daemon binary -type DaemonProxy struct{} - -// NewDaemonProxy returns a new handler -func NewDaemonProxy() DaemonProxy { - return DaemonProxy{} -} - -// Command returns a cli command handler if one exists -func (p DaemonProxy) Command(name string) func(...string) error { - return map[string]func(...string) error{ - "daemon": p.CmdDaemon, - }[name] -} diff --git a/cmd/docker/daemon_none.go b/cmd/docker/daemon_none.go deleted file mode 100644 index d66bf1a546..0000000000 --- a/cmd/docker/daemon_none.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !daemon - -package main - -import ( - "fmt" - "runtime" - "strings" -) - -// CmdDaemon reports on an error on windows, because there is no exec -func (p DaemonProxy) CmdDaemon(args ...string) error { - return fmt.Errorf( - "`docker daemon` is not supported on %s. Please run `dockerd` directly", - strings.Title(runtime.GOOS)) -} diff --git a/cmd/docker/daemon_none_test.go b/cmd/docker/daemon_none_test.go deleted file mode 100644 index d75453bcc5..0000000000 --- a/cmd/docker/daemon_none_test.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !daemon - -package main - -import ( - "strings" - "testing" -) - -func TestCmdDaemon(t *testing.T) { - proxy := NewDaemonProxy() - err := proxy.CmdDaemon("--help") - if err == nil { - t.Fatal("Expected CmdDaemon to fail on Windows.") - } - - if !strings.Contains(err.Error(), "Please run `dockerd`") { - t.Fatalf("Expected an error about running dockerd, got %s", err) - } -} diff --git a/cmd/docker/daemon_unix.go b/cmd/docker/daemon_unix.go deleted file mode 100644 index d515b82914..0000000000 --- a/cmd/docker/daemon_unix.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build daemon - -package main - -import ( - "os" - "os/exec" - "path/filepath" - "syscall" -) - -// CmdDaemon execs dockerd with the same flags -func (p DaemonProxy) CmdDaemon(args ...string) error { - // Special case for handling `docker help daemon`. When pkg/mflag is removed - // we can support this on the daemon side, but that is not possible with - // pkg/mflag because it uses os.Exit(1) instead of returning an error on - // unexpected args. - if len(args) == 0 || args[0] != "--help" { - // Use os.Args[1:] so that "global" args are passed to dockerd - args = stripDaemonArg(os.Args[1:]) - } - - binaryPath, err := findDaemonBinary() - if err != nil { - return err - } - - return syscall.Exec( - binaryPath, - append([]string{daemonBinary}, args...), - os.Environ()) -} - -// findDaemonBinary looks for the path to the dockerd binary starting with -// the directory of the current executable (if one exists) and followed by $PATH -func findDaemonBinary() (string, error) { - execDirname := filepath.Dir(os.Args[0]) - if execDirname != "" { - binaryPath := filepath.Join(execDirname, daemonBinary) - if _, err := os.Stat(binaryPath); err == nil { - return binaryPath, nil - } - } - - return exec.LookPath(daemonBinary) -} - -// stripDaemonArg removes the `daemon` argument from the list -func stripDaemonArg(args []string) []string { - for i, arg := range args { - if arg == "daemon" { - return append(args[:i], args[i+1:]...) - } - } - return args -} diff --git a/cmd/docker/docker.go b/cmd/docker/docker.go deleted file mode 100644 index 0c727e32c9..0000000000 --- a/cmd/docker/docker.go +++ /dev/null @@ -1,118 +0,0 @@ -package main - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/client" - "github.com/docker/docker/cli" - "github.com/docker/docker/cli/cobraadaptor" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/dockerversion" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/utils" -) - -var ( - commonFlags = cliflags.InitCommonFlags() - clientFlags = initClientFlags(commonFlags) - flHelp = flag.Bool([]string{"h", "-help"}, false, "Print usage") - flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") -) - -func main() { - // Set terminal emulation based on platform as required. - stdin, stdout, stderr := term.StdStreams() - - logrus.SetOutput(stderr) - - flag.Merge(flag.CommandLine, clientFlags.FlagSet, commonFlags.FlagSet) - - cobraAdaptor := cobraadaptor.NewCobraAdaptor(clientFlags) - - flag.Usage = func() { - fmt.Fprint(stdout, "Usage: docker [OPTIONS] COMMAND [arg...]\n docker [ --help | -v | --version ]\n\n") - fmt.Fprint(stdout, "A self-sufficient runtime for containers.\n\nOptions:\n") - - flag.CommandLine.SetOutput(stdout) - flag.PrintDefaults() - - help := "\nCommands:\n" - - dockerCommands := append(cli.DockerCommandUsage, cobraAdaptor.Usage()...) - for _, cmd := range sortCommands(dockerCommands) { - help += fmt.Sprintf(" %-10.10s%s\n", cmd.Name, cmd.Description) - } - - help += "\nRun 'docker COMMAND --help' for more information on a command." - fmt.Fprintf(stdout, "%s\n", help) - } - - flag.Parse() - - if *flVersion { - showVersion() - return - } - - if *flHelp { - // if global flag --help is present, regardless of what other options and commands there are, - // just print the usage. - flag.Usage() - return - } - - clientCli := client.NewDockerCli(stdin, stdout, stderr, clientFlags) - - c := cli.New(clientCli, NewDaemonProxy(), cobraAdaptor) - if err := c.Run(flag.Args()...); err != nil { - if sterr, ok := err.(cli.StatusError); ok { - if sterr.Status != "" { - fmt.Fprintln(stderr, sterr.Status) - } - // StatusError should only be used for errors, and all errors should - // have a non-zero exit status, so never exit with 0 - if sterr.StatusCode == 0 { - os.Exit(1) - } - os.Exit(sterr.StatusCode) - } - fmt.Fprintln(stderr, err) - os.Exit(1) - } -} - -func showVersion() { - if utils.ExperimentalBuild() { - fmt.Printf("Docker version %s, build %s, experimental\n", dockerversion.Version, dockerversion.GitCommit) - } else { - fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) - } -} - -func initClientFlags(commonFlags *cliflags.CommonFlags) *cliflags.ClientFlags { - clientFlags := &cliflags.ClientFlags{FlagSet: new(flag.FlagSet), Common: commonFlags} - client := clientFlags.FlagSet - client.StringVar(&clientFlags.ConfigDir, []string{"-config"}, cliconfig.ConfigDir(), "Location of client config files") - - clientFlags.PostParse = func() { - clientFlags.Common.PostParse() - - if clientFlags.ConfigDir != "" { - cliconfig.SetConfigDir(clientFlags.ConfigDir) - } - - if clientFlags.Common.TrustKey == "" { - clientFlags.Common.TrustKey = filepath.Join(cliconfig.ConfigDir(), cliflags.DefaultTrustKeyFile) - } - - if clientFlags.Common.Debug { - utils.EnableDebug() - } - } - return clientFlags -} diff --git a/cmd/docker/docker_test.go b/cmd/docker/docker_test.go deleted file mode 100644 index 5708c96cb5..0000000000 --- a/cmd/docker/docker_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "os" - "testing" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/utils" -) - -func TestClientDebugEnabled(t *testing.T) { - defer utils.DisableDebug() - - clientFlags.Common.FlagSet.Parse([]string{"-D"}) - clientFlags.PostParse() - - if os.Getenv("DEBUG") != "1" { - t.Fatal("expected debug enabled, got false") - } - if logrus.GetLevel() != logrus.DebugLevel { - t.Fatalf("expected logrus debug level, got %v", logrus.GetLevel()) - } -} diff --git a/cmd/docker/docker_windows.go b/cmd/docker/docker_windows.go deleted file mode 100644 index 9bc507e20c..0000000000 --- a/cmd/docker/docker_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "sync/atomic" - - _ "github.com/docker/docker/autogen/winresources/docker" -) - -//go:cgo_import_dynamic main.dummy CommandLineToArgvW%2 "shell32.dll" - -var dummy uintptr - -func init() { - // Ensure that this import is not removed by the linker. This is used to - // ensure that shell32.dll is loaded by the system loader, preventing - // go#15286 from triggering on Nano Server TP5. - atomic.LoadUintptr(&dummy) -} diff --git a/cmd/docker/usage.go b/cmd/docker/usage.go deleted file mode 100644 index 792d178073..0000000000 --- a/cmd/docker/usage.go +++ /dev/null @@ -1,22 +0,0 @@ -package main - -import ( - "sort" - - "github.com/docker/docker/cli" -) - -type byName []cli.Command - -func (a byName) Len() int { return len(a) } -func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byName) Less(i, j int) bool { return a[i].Name < a[j].Name } - -// TODO(tiborvass): do not show 'daemon' on client-only binaries - -func sortCommands(commands []cli.Command) []cli.Command { - dockerCommands := make([]cli.Command, len(commands)) - copy(dockerCommands, commands) - sort.Sort(byName(dockerCommands)) - return dockerCommands -} diff --git a/cmd/docker/usage_test.go b/cmd/docker/usage_test.go deleted file mode 100644 index 0453265db8..0000000000 --- a/cmd/docker/usage_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package main - -import ( - "sort" - "testing" - - "github.com/docker/docker/cli" -) - -// Tests if the subcommands of docker are sorted -func TestDockerSubcommandsAreSorted(t *testing.T) { - if !sort.IsSorted(byName(cli.DockerCommandUsage)) { - t.Fatal("Docker subcommands are not in sorted order") - } -} diff --git a/cmd/dockerd/README.md b/cmd/dockerd/README.md deleted file mode 100644 index a8c20b3549..0000000000 --- a/cmd/dockerd/README.md +++ /dev/null @@ -1,3 +0,0 @@ -docker.go contains Docker daemon's main function. - -This file provides first line CLI argument parsing and environment variable setting. diff --git a/cmd/dockerd/daemon.go b/cmd/dockerd/daemon.go deleted file mode 100644 index 0d3cace22a..0000000000 --- a/cmd/dockerd/daemon.go +++ /dev/null @@ -1,447 +0,0 @@ -package main - -import ( - "crypto/tls" - "fmt" - "io" - "os" - "path/filepath" - "runtime" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/uuid" - "github.com/docker/docker/api" - apiserver "github.com/docker/docker/api/server" - "github.com/docker/docker/api/server/middleware" - "github.com/docker/docker/api/server/router" - "github.com/docker/docker/api/server/router/build" - "github.com/docker/docker/api/server/router/container" - "github.com/docker/docker/api/server/router/image" - "github.com/docker/docker/api/server/router/network" - swarmrouter "github.com/docker/docker/api/server/router/swarm" - systemrouter "github.com/docker/docker/api/server/router/system" - "github.com/docker/docker/api/server/router/volume" - "github.com/docker/docker/builder/dockerfile" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/daemon" - "github.com/docker/docker/daemon/cluster" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/authorization" - "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/listeners" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/pidfile" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/registry" - "github.com/docker/docker/runconfig" - "github.com/docker/docker/utils" - "github.com/docker/go-connections/tlsconfig" -) - -const ( - daemonConfigFileFlag = "-config-file" -) - -// DaemonCli represents the daemon CLI. -type DaemonCli struct { - *daemon.Config - commonFlags *cliflags.CommonFlags - configFile *string - - api *apiserver.Server - d *daemon.Daemon -} - -func presentInHelp(usage string) string { return usage } -func absentFromHelp(string) string { return "" } - -// NewDaemonCli returns a pre-configured daemon CLI -func NewDaemonCli() *DaemonCli { - // TODO(tiborvass): remove InstallFlags? - daemonConfig := new(daemon.Config) - daemonConfig.LogConfig.Config = make(map[string]string) - daemonConfig.ClusterOpts = make(map[string]string) - - if runtime.GOOS != "linux" { - daemonConfig.V2Only = true - } - - daemonConfig.InstallFlags(flag.CommandLine, presentInHelp) - configFile := flag.CommandLine.String([]string{daemonConfigFileFlag}, defaultDaemonConfigFile, "Daemon configuration file") - flag.CommandLine.Require(flag.Exact, 0) - - return &DaemonCli{ - Config: daemonConfig, - commonFlags: cliflags.InitCommonFlags(), - configFile: configFile, - } -} - -func migrateKey() (err error) { - // Migrate trust key if exists at ~/.docker/key.json and owned by current user - oldPath := filepath.Join(cliconfig.ConfigDir(), cliflags.DefaultTrustKeyFile) - newPath := filepath.Join(getDaemonConfDir(), cliflags.DefaultTrustKeyFile) - if _, statErr := os.Stat(newPath); os.IsNotExist(statErr) && currentUserIsOwner(oldPath) { - defer func() { - // Ensure old path is removed if no error occurred - if err == nil { - err = os.Remove(oldPath) - } else { - logrus.Warnf("Key migration failed, key file not removed at %s", oldPath) - os.Remove(newPath) - } - }() - - if err := system.MkdirAll(getDaemonConfDir(), os.FileMode(0644)); err != nil { - return fmt.Errorf("Unable to create daemon configuration directory: %s", err) - } - - newFile, err := os.OpenFile(newPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return fmt.Errorf("error creating key file %q: %s", newPath, err) - } - defer newFile.Close() - - oldFile, err := os.Open(oldPath) - if err != nil { - return fmt.Errorf("error opening key file %q: %s", oldPath, err) - } - defer oldFile.Close() - - if _, err := io.Copy(newFile, oldFile); err != nil { - return fmt.Errorf("error copying key: %s", err) - } - - logrus.Infof("Migrated key from %s to %s", oldPath, newPath) - } - - return nil -} - -func (cli *DaemonCli) start() (err error) { - stopc := make(chan bool) - defer close(stopc) - - // warn from uuid package when running the daemon - uuid.Loggerf = logrus.Warnf - - flags := flag.CommandLine - cli.commonFlags.PostParse() - - if cli.commonFlags.TrustKey == "" { - cli.commonFlags.TrustKey = filepath.Join(getDaemonConfDir(), cliflags.DefaultTrustKeyFile) - } - cliConfig, err := loadDaemonCliConfig(cli.Config, flags, cli.commonFlags, *cli.configFile) - if err != nil { - return err - } - cli.Config = cliConfig - - if cli.Config.Debug { - utils.EnableDebug() - } - - if utils.ExperimentalBuild() { - logrus.Warn("Running experimental build") - } - - logrus.SetFormatter(&logrus.TextFormatter{ - TimestampFormat: jsonlog.RFC3339NanoFixed, - DisableColors: cli.Config.RawLogs, - }) - - if err := setDefaultUmask(); err != nil { - return fmt.Errorf("Failed to set umask: %v", err) - } - - if len(cli.LogConfig.Config) > 0 { - if err := logger.ValidateLogOpts(cli.LogConfig.Type, cli.LogConfig.Config); err != nil { - return fmt.Errorf("Failed to set log opts: %v", err) - } - } - - if cli.Pidfile != "" { - pf, err := pidfile.New(cli.Pidfile) - if err != nil { - return fmt.Errorf("Error starting daemon: %v", err) - } - defer func() { - if err := pf.Remove(); err != nil { - logrus.Error(err) - } - }() - } - - serverConfig := &apiserver.Config{ - Logging: true, - SocketGroup: cli.Config.SocketGroup, - Version: dockerversion.Version, - EnableCors: cli.Config.EnableCors, - CorsHeaders: cli.Config.CorsHeaders, - } - - if cli.Config.TLS { - tlsOptions := tlsconfig.Options{ - CAFile: cli.Config.CommonTLSOptions.CAFile, - CertFile: cli.Config.CommonTLSOptions.CertFile, - KeyFile: cli.Config.CommonTLSOptions.KeyFile, - } - - if cli.Config.TLSVerify { - // server requires and verifies client's certificate - tlsOptions.ClientAuth = tls.RequireAndVerifyClientCert - } - tlsConfig, err := tlsconfig.Server(tlsOptions) - if err != nil { - return err - } - serverConfig.TLSConfig = tlsConfig - } - - if len(cli.Config.Hosts) == 0 { - cli.Config.Hosts = make([]string, 1) - } - - api := apiserver.New(serverConfig) - cli.api = api - - for i := 0; i < len(cli.Config.Hosts); i++ { - var err error - if cli.Config.Hosts[i], err = opts.ParseHost(cli.Config.TLS, cli.Config.Hosts[i]); err != nil { - return fmt.Errorf("error parsing -H %s : %v", cli.Config.Hosts[i], err) - } - - protoAddr := cli.Config.Hosts[i] - protoAddrParts := strings.SplitN(protoAddr, "://", 2) - if len(protoAddrParts) != 2 { - return fmt.Errorf("bad format %s, expected PROTO://ADDR", protoAddr) - } - - proto := protoAddrParts[0] - addr := protoAddrParts[1] - - // It's a bad idea to bind to TCP without tlsverify. - if proto == "tcp" && (serverConfig.TLSConfig == nil || serverConfig.TLSConfig.ClientAuth != tls.RequireAndVerifyClientCert) { - logrus.Warn("[!] DON'T BIND ON ANY IP ADDRESS WITHOUT setting -tlsverify IF YOU DON'T KNOW WHAT YOU'RE DOING [!]") - } - ls, err := listeners.Init(proto, addr, serverConfig.SocketGroup, serverConfig.TLSConfig) - if err != nil { - return err - } - ls = wrapListeners(proto, ls) - // If we're binding to a TCP port, make sure that a container doesn't try to use it. - if proto == "tcp" { - if err := allocateDaemonPort(addr); err != nil { - return err - } - } - logrus.Debugf("Listener created for HTTP on %s (%s)", protoAddrParts[0], protoAddrParts[1]) - api.Accept(protoAddrParts[1], ls...) - } - - if err := migrateKey(); err != nil { - return err - } - cli.TrustKeyPath = cli.commonFlags.TrustKey - - registryService := registry.NewService(cli.Config.ServiceOptions) - containerdRemote, err := libcontainerd.New(cli.getLibcontainerdRoot(), cli.getPlatformRemoteOptions()...) - if err != nil { - return err - } - cli.api = api - signal.Trap(func() { - cli.stop() - <-stopc // wait for daemonCli.start() to return - }) - - d, err := daemon.NewDaemon(cli.Config, registryService, containerdRemote) - if err != nil { - return fmt.Errorf("Error starting daemon: %v", err) - } - - name, _ := os.Hostname() - - c, err := cluster.New(cluster.Config{ - Root: cli.Config.Root, - Name: name, - Backend: d, - NetworkSubnetsProvider: d, - DefaultAdvertiseAddr: cli.Config.SwarmDefaultAdvertiseAddr, - }) - if err != nil { - logrus.Fatalf("Error creating cluster component: %v", err) - } - - logrus.Info("Daemon has completed initialization") - - logrus.WithFields(logrus.Fields{ - "version": dockerversion.Version, - "commit": dockerversion.GitCommit, - "graphdriver": d.GraphDriverName(), - }).Info("Docker daemon") - - cli.initMiddlewares(api, serverConfig) - initRouter(api, d, c) - - cli.d = d - cli.setupConfigReloadTrap() - - // The serve API routine never exits unless an error occurs - // We need to start it as a goroutine and wait on it so - // daemon doesn't exit - serveAPIWait := make(chan error) - go api.Wait(serveAPIWait) - - // after the daemon is done setting up we can notify systemd api - notifySystem() - - // Daemon is fully initialized and handling API traffic - // Wait for serve API to complete - errAPI := <-serveAPIWait - c.Cleanup() - shutdownDaemon(d, 15) - containerdRemote.Cleanup() - if errAPI != nil { - return fmt.Errorf("Shutting down due to ServeAPI error: %v", errAPI) - } - - return nil -} - -func (cli *DaemonCli) reloadConfig() { - reload := func(config *daemon.Config) { - if err := cli.d.Reload(config); err != nil { - logrus.Errorf("Error reconfiguring the daemon: %v", err) - return - } - if config.IsValueSet("debug") { - debugEnabled := utils.IsDebugEnabled() - switch { - case debugEnabled && !config.Debug: // disable debug - utils.DisableDebug() - cli.api.DisableProfiler() - case config.Debug && !debugEnabled: // enable debug - utils.EnableDebug() - cli.api.EnableProfiler() - } - - } - } - - if err := daemon.ReloadConfiguration(*cli.configFile, flag.CommandLine, reload); err != nil { - logrus.Error(err) - } -} - -func (cli *DaemonCli) stop() { - cli.api.Close() -} - -// shutdownDaemon just wraps daemon.Shutdown() to handle a timeout in case -// d.Shutdown() is waiting too long to kill container or worst it's -// blocked there -func shutdownDaemon(d *daemon.Daemon, timeout time.Duration) { - ch := make(chan struct{}) - go func() { - d.Shutdown() - close(ch) - }() - select { - case <-ch: - logrus.Debug("Clean shutdown succeeded") - case <-time.After(timeout * time.Second): - logrus.Error("Force shutdown daemon") - } -} - -func loadDaemonCliConfig(config *daemon.Config, flags *flag.FlagSet, commonConfig *cliflags.CommonFlags, configFile string) (*daemon.Config, error) { - config.Debug = commonConfig.Debug - config.Hosts = commonConfig.Hosts - config.LogLevel = commonConfig.LogLevel - config.TLS = commonConfig.TLS - config.TLSVerify = commonConfig.TLSVerify - config.CommonTLSOptions = daemon.CommonTLSOptions{} - - if commonConfig.TLSOptions != nil { - config.CommonTLSOptions.CAFile = commonConfig.TLSOptions.CAFile - config.CommonTLSOptions.CertFile = commonConfig.TLSOptions.CertFile - config.CommonTLSOptions.KeyFile = commonConfig.TLSOptions.KeyFile - } - - if configFile != "" { - c, err := daemon.MergeDaemonConfigurations(config, flags, configFile) - if err != nil { - if flags.IsSet(daemonConfigFileFlag) || !os.IsNotExist(err) { - return nil, fmt.Errorf("unable to configure the Docker daemon with file %s: %v\n", configFile, err) - } - } - // the merged configuration can be nil if the config file didn't exist. - // leave the current configuration as it is if when that happens. - if c != nil { - config = c - } - } - - if err := daemon.ValidateConfiguration(config); err != nil { - return nil, err - } - - // Regardless of whether the user sets it to true or false, if they - // specify TLSVerify at all then we need to turn on TLS - if config.IsValueSet(cliflags.TLSVerifyKey) { - config.TLS = true - } - - // ensure that the log level is the one set after merging configurations - cliflags.SetDaemonLogLevel(config.LogLevel) - - return config, nil -} - -func initRouter(s *apiserver.Server, d *daemon.Daemon, c *cluster.Cluster) { - decoder := runconfig.ContainerDecoder{} - - routers := []router.Router{ - container.NewRouter(d, decoder), - image.NewRouter(d, decoder), - systemrouter.NewRouter(d, c), - volume.NewRouter(d), - build.NewRouter(dockerfile.NewBuildManager(d)), - swarmrouter.NewRouter(c), - } - if d.NetworkControllerEnabled() { - routers = append(routers, network.NewRouter(d, c)) - } - routers = addExperimentalRouters(routers) - - s.InitRouter(utils.IsDebugEnabled(), routers...) -} - -func (cli *DaemonCli) initMiddlewares(s *apiserver.Server, cfg *apiserver.Config) { - v := cfg.Version - - vm := middleware.NewVersionMiddleware(v, api.DefaultVersion, api.MinVersion) - s.UseMiddleware(vm) - - if cfg.EnableCors { - c := middleware.NewCORSMiddleware(cfg.CorsHeaders) - s.UseMiddleware(c) - } - - u := middleware.NewUserAgentMiddleware(v) - s.UseMiddleware(u) - - if len(cli.Config.AuthorizationPlugins) > 0 { - authZPlugins := authorization.NewPlugins(cli.Config.AuthorizationPlugins) - handleAuthorization := authorization.NewMiddleware(authZPlugins) - s.UseMiddleware(handleAuthorization) - } -} diff --git a/cmd/dockerd/daemon_freebsd.go b/cmd/dockerd/daemon_freebsd.go deleted file mode 100644 index 623aaf4b09..0000000000 --- a/cmd/dockerd/daemon_freebsd.go +++ /dev/null @@ -1,5 +0,0 @@ -package main - -// notifySystem sends a message to the host when the server is ready to be used -func notifySystem() { -} diff --git a/cmd/dockerd/daemon_linux.go b/cmd/dockerd/daemon_linux.go deleted file mode 100644 index a556daa187..0000000000 --- a/cmd/dockerd/daemon_linux.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux - -package main - -import systemdDaemon "github.com/coreos/go-systemd/daemon" - -// notifySystem sends a message to the host when the server is ready to be used -func notifySystem() { - // Tell the init daemon we are accepting requests - go systemdDaemon.SdNotify("READY=1") -} diff --git a/cmd/dockerd/daemon_solaris.go b/cmd/dockerd/daemon_solaris.go deleted file mode 100644 index a0f4908601..0000000000 --- a/cmd/dockerd/daemon_solaris.go +++ /dev/null @@ -1,74 +0,0 @@ -// +build solaris - -package main - -import ( - "fmt" - "net" - "os" - "path/filepath" - "syscall" - - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/pkg/system" -) - -const defaultDaemonConfigFile = "" - -// currentUserIsOwner checks whether the current user is the owner of the given -// file. -func currentUserIsOwner(f string) bool { - if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { - if int(fileInfo.UID()) == os.Getuid() { - return true - } - } - return false -} - -// setDefaultUmask sets the umask to 0022 to avoid problems -// caused by custom umask -func setDefaultUmask() error { - desiredUmask := 0022 - syscall.Umask(desiredUmask) - if umask := syscall.Umask(desiredUmask); umask != desiredUmask { - return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) - } - - return nil -} - -func getDaemonConfDir() string { - return "/etc/docker" -} - -// setupConfigReloadTrap configures the USR2 signal to reload the configuration. -func (cli *DaemonCli) setupConfigReloadTrap() { -} - -// notifySystem sends a message to the host when the server is ready to be used -func notifySystem() { -} - -func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { - opts := []libcontainerd.RemoteOption{} - return opts -} - -// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to -// store their state. -func (cli *DaemonCli) getLibcontainerdRoot() string { - return filepath.Join(cli.Config.ExecRoot, "libcontainerd") -} - -func allocateDaemonPort(addr string) error { - return nil -} - -// notifyShutdown is called after the daemon shuts down but before the process exits. -func notifyShutdown(err error) { -} - -func wrapListeners(proto string, ls []net.Listener) []net.Listener { - return ls -} diff --git a/cmd/dockerd/daemon_test.go b/cmd/dockerd/daemon_test.go deleted file mode 100644 index c16e11aec9..0000000000 --- a/cmd/dockerd/daemon_test.go +++ /dev/null @@ -1,294 +0,0 @@ -package main - -import ( - "io/ioutil" - "os" - "strings" - "testing" - - "github.com/Sirupsen/logrus" - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/daemon" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/mflag" - "github.com/docker/go-connections/tlsconfig" -) - -func TestLoadDaemonCliConfigWithoutOverriding(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{ - Debug: true, - } - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - loadedConfig, err := loadDaemonCliConfig(c, flags, common, "/tmp/fooobarbaz") - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatalf("expected configuration %v, got nil", c) - } - if !loadedConfig.Debug { - t.Fatalf("expected debug to be copied from the common flags, got false") - } -} - -func TestLoadDaemonCliConfigWithTLS(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{ - TLS: true, - TLSOptions: &tlsconfig.Options{ - CAFile: "/tmp/ca.pem", - }, - } - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - loadedConfig, err := loadDaemonCliConfig(c, flags, common, "/tmp/fooobarbaz") - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatalf("expected configuration %v, got nil", c) - } - if loadedConfig.CommonTLSOptions.CAFile != "/tmp/ca.pem" { - t.Fatalf("expected /tmp/ca.pem, got %s: %q", loadedConfig.CommonTLSOptions.CAFile, loadedConfig) - } -} - -func TestLoadDaemonCliConfigWithConflicts(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{} - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - configFile := f.Name() - defer os.Remove(configFile) - - f.Write([]byte(`{"labels": ["l3=foo"]}`)) - f.Close() - - var labels []string - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.String([]string{daemonConfigFileFlag}, "", "") - flags.Var(opts.NewNamedListOptsRef("labels", &labels, opts.ValidateLabel), []string{"-label"}, "") - - flags.Set(daemonConfigFileFlag, configFile) - if err := flags.Set("-label", "l1=bar"); err != nil { - t.Fatal(err) - } - if err := flags.Set("-label", "l2=baz"); err != nil { - t.Fatal(err) - } - - _, err = loadDaemonCliConfig(c, flags, common, configFile) - if err == nil { - t.Fatalf("expected configuration error, got nil") - } - if !strings.Contains(err.Error(), "labels") { - t.Fatalf("expected labels conflict, got %v", err) - } -} - -func TestLoadDaemonCliConfigWithTLSVerify(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{ - TLSOptions: &tlsconfig.Options{ - CAFile: "/tmp/ca.pem", - }, - } - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - configFile := f.Name() - defer os.Remove(configFile) - - f.Write([]byte(`{"tlsverify": true}`)) - f.Close() - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.Bool([]string{"-tlsverify"}, false, "") - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatalf("expected configuration %v, got nil", c) - } - - if !loadedConfig.TLS { - t.Fatalf("expected TLS enabled, got %q", loadedConfig) - } -} - -func TestLoadDaemonCliConfigWithExplicitTLSVerifyFalse(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{ - TLSOptions: &tlsconfig.Options{ - CAFile: "/tmp/ca.pem", - }, - } - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - configFile := f.Name() - defer os.Remove(configFile) - - f.Write([]byte(`{"tlsverify": false}`)) - f.Close() - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.Bool([]string{"-tlsverify"}, false, "") - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatalf("expected configuration %v, got nil", c) - } - - if !loadedConfig.TLS { - t.Fatalf("expected TLS enabled, got %q", loadedConfig) - } -} - -func TestLoadDaemonCliConfigWithoutTLSVerify(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{ - TLSOptions: &tlsconfig.Options{ - CAFile: "/tmp/ca.pem", - }, - } - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - configFile := f.Name() - defer os.Remove(configFile) - - f.Write([]byte(`{}`)) - f.Close() - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatalf("expected configuration %v, got nil", c) - } - - if loadedConfig.TLS { - t.Fatalf("expected TLS disabled, got %q", loadedConfig) - } -} - -func TestLoadDaemonCliConfigWithLogLevel(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{} - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - configFile := f.Name() - defer os.Remove(configFile) - - f.Write([]byte(`{"log-level": "warn"}`)) - f.Close() - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.String([]string{"-log-level"}, "", "") - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatalf("expected configuration %v, got nil", c) - } - if loadedConfig.LogLevel != "warn" { - t.Fatalf("expected warn log level, got %v", loadedConfig.LogLevel) - } - - if logrus.GetLevel() != logrus.WarnLevel { - t.Fatalf("expected warn log level, got %v", logrus.GetLevel()) - } -} - -func TestLoadDaemonConfigWithEmbeddedOptions(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{} - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.String([]string{"-tlscacert"}, "", "") - flags.String([]string{"-log-driver"}, "", "") - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - configFile := f.Name() - defer os.Remove(configFile) - - f.Write([]byte(`{"tlscacert": "/etc/certs/ca.pem", "log-driver": "syslog"}`)) - f.Close() - - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatal("expected configuration, got nil") - } - if loadedConfig.CommonTLSOptions.CAFile != "/etc/certs/ca.pem" { - t.Fatalf("expected CA file path /etc/certs/ca.pem, got %v", loadedConfig.CommonTLSOptions.CAFile) - } - if loadedConfig.LogConfig.Type != "syslog" { - t.Fatalf("expected LogConfig type syslog, got %v", loadedConfig.LogConfig.Type) - } -} - -func TestLoadDaemonConfigWithRegistryOptions(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{} - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - c.ServiceOptions.InstallCliFlags(flags, absentFromHelp) - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - configFile := f.Name() - defer os.Remove(configFile) - - f.Write([]byte(`{"registry-mirrors": ["https://mirrors.docker.com"], "insecure-registries": ["https://insecure.docker.com"], "disable-legacy-registry": true}`)) - f.Close() - - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatal("expected configuration, got nil") - } - - m := loadedConfig.Mirrors - if len(m) != 1 { - t.Fatalf("expected 1 mirror, got %d", len(m)) - } - - r := loadedConfig.InsecureRegistries - if len(r) != 1 { - t.Fatalf("expected 1 insecure registries, got %d", len(r)) - } - - if !loadedConfig.V2Only { - t.Fatal("expected disable-legacy-registry to be true, got false") - } -} diff --git a/cmd/dockerd/daemon_unix.go b/cmd/dockerd/daemon_unix.go deleted file mode 100644 index 114e9426fd..0000000000 --- a/cmd/dockerd/daemon_unix.go +++ /dev/null @@ -1,131 +0,0 @@ -// +build !windows,!solaris - -package main - -import ( - "fmt" - "net" - "os" - "os/signal" - "path/filepath" - "strconv" - "syscall" - - "github.com/docker/docker/cmd/dockerd/hack" - "github.com/docker/docker/daemon" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/pkg/system" - "github.com/docker/libnetwork/portallocator" -) - -const defaultDaemonConfigFile = "/etc/docker/daemon.json" - -// currentUserIsOwner checks whether the current user is the owner of the given -// file. -func currentUserIsOwner(f string) bool { - if fileInfo, err := system.Stat(f); err == nil && fileInfo != nil { - if int(fileInfo.UID()) == os.Getuid() { - return true - } - } - return false -} - -// setDefaultUmask sets the umask to 0022 to avoid problems -// caused by custom umask -func setDefaultUmask() error { - desiredUmask := 0022 - syscall.Umask(desiredUmask) - if umask := syscall.Umask(desiredUmask); umask != desiredUmask { - return fmt.Errorf("failed to set umask: expected %#o, got %#o", desiredUmask, umask) - } - - return nil -} - -func getDaemonConfDir() string { - return "/etc/docker" -} - -// setupConfigReloadTrap configures the USR2 signal to reload the configuration. -func (cli *DaemonCli) setupConfigReloadTrap() { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGHUP) - go func() { - for range c { - cli.reloadConfig() - } - }() -} - -func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { - opts := []libcontainerd.RemoteOption{ - libcontainerd.WithDebugLog(cli.Config.Debug), - libcontainerd.WithOOMScore(cli.Config.OOMScoreAdjust), - } - if cli.Config.ContainerdAddr != "" { - opts = append(opts, libcontainerd.WithRemoteAddr(cli.Config.ContainerdAddr)) - } else { - opts = append(opts, libcontainerd.WithStartDaemon(true)) - } - if daemon.UsingSystemd(cli.Config) { - args := []string{"--systemd-cgroup=true"} - opts = append(opts, libcontainerd.WithRuntimeArgs(args)) - } - if cli.Config.LiveRestore { - opts = append(opts, libcontainerd.WithLiveRestore(true)) - } - opts = append(opts, libcontainerd.WithRuntimePath(daemon.DefaultRuntimeBinary)) - return opts -} - -// getLibcontainerdRoot gets the root directory for libcontainerd/containerd to -// store their state. -func (cli *DaemonCli) getLibcontainerdRoot() string { - return filepath.Join(cli.Config.ExecRoot, "libcontainerd") -} - -// allocateDaemonPort ensures that there are no containers -// that try to use any port allocated for the docker server. -func allocateDaemonPort(addr string) error { - host, port, err := net.SplitHostPort(addr) - if err != nil { - return err - } - - intPort, err := strconv.Atoi(port) - if err != nil { - return err - } - - var hostIPs []net.IP - if parsedIP := net.ParseIP(host); parsedIP != nil { - hostIPs = append(hostIPs, parsedIP) - } else if hostIPs, err = net.LookupIP(host); err != nil { - return fmt.Errorf("failed to lookup %s address in host specification", host) - } - - pa := portallocator.Get() - for _, hostIP := range hostIPs { - if _, err := pa.RequestPort(hostIP, "tcp", intPort); err != nil { - return fmt.Errorf("failed to allocate daemon listening port %d (err: %v)", intPort, err) - } - } - return nil -} - -// notifyShutdown is called after the daemon shuts down but before the process exits. -func notifyShutdown(err error) { -} - -func wrapListeners(proto string, ls []net.Listener) []net.Listener { - switch proto { - case "unix": - ls[0] = &hack.MalformedHostHeaderOverride{ls[0]} - case "fd": - for i := range ls { - ls[i] = &hack.MalformedHostHeaderOverride{ls[i]} - } - } - return ls -} diff --git a/cmd/dockerd/daemon_unix_test.go b/cmd/dockerd/daemon_unix_test.go deleted file mode 100644 index a72468eddb..0000000000 --- a/cmd/dockerd/daemon_unix_test.go +++ /dev/null @@ -1,212 +0,0 @@ -// +build !windows - -package main - -import ( - "io/ioutil" - "testing" - - cliflags "github.com/docker/docker/cli/flags" - "github.com/docker/docker/daemon" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/mflag" -) - -func TestLoadDaemonCliConfigWithDaemonFlags(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{ - Debug: true, - LogLevel: "info", - } - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{"log-opts": {"max-size": "1k"}}`)) - f.Close() - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.String([]string{daemonConfigFileFlag}, "", "") - flags.BoolVar(&c.EnableSelinuxSupport, []string{"-selinux-enabled"}, true, "") - flags.StringVar(&c.LogConfig.Type, []string{"-log-driver"}, "json-file", "") - flags.Var(opts.NewNamedMapOpts("log-opts", c.LogConfig.Config, nil), []string{"-log-opt"}, "") - flags.Set(daemonConfigFileFlag, configFile) - - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatalf("expected configuration %v, got nil", c) - } - if !loadedConfig.Debug { - t.Fatalf("expected debug mode, got false") - } - if loadedConfig.LogLevel != "info" { - t.Fatalf("expected info log level, got %v", loadedConfig.LogLevel) - } - if !loadedConfig.EnableSelinuxSupport { - t.Fatalf("expected enabled selinux support, got disabled") - } - if loadedConfig.LogConfig.Type != "json-file" { - t.Fatalf("expected LogConfig type json-file, got %v", loadedConfig.LogConfig.Type) - } - if maxSize := loadedConfig.LogConfig.Config["max-size"]; maxSize != "1k" { - t.Fatalf("expected log max-size `1k`, got %s", maxSize) - } -} - -func TestLoadDaemonConfigWithNetwork(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{} - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.String([]string{"-bip"}, "", "") - flags.String([]string{"-ip"}, "", "") - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{"bip": "127.0.0.2", "ip": "127.0.0.1"}`)) - f.Close() - - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatalf("expected configuration %v, got nil", c) - } - if loadedConfig.IP != "127.0.0.2" { - t.Fatalf("expected IP 127.0.0.2, got %v", loadedConfig.IP) - } - if loadedConfig.DefaultIP.String() != "127.0.0.1" { - t.Fatalf("expected DefaultIP 127.0.0.1, got %s", loadedConfig.DefaultIP) - } -} - -func TestLoadDaemonConfigWithMapOptions(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{} - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - - flags.Var(opts.NewNamedMapOpts("cluster-store-opts", c.ClusterOpts, nil), []string{"-cluster-store-opt"}, "") - flags.Var(opts.NewNamedMapOpts("log-opts", c.LogConfig.Config, nil), []string{"-log-opt"}, "") - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{ - "cluster-store-opts": {"kv.cacertfile": "/var/lib/docker/discovery_certs/ca.pem"}, - "log-opts": {"tag": "test"} -}`)) - f.Close() - - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatal("expected configuration, got nil") - } - if loadedConfig.ClusterOpts == nil { - t.Fatal("expected cluster options, got nil") - } - - expectedPath := "/var/lib/docker/discovery_certs/ca.pem" - if caPath := loadedConfig.ClusterOpts["kv.cacertfile"]; caPath != expectedPath { - t.Fatalf("expected %s, got %s", expectedPath, caPath) - } - - if loadedConfig.LogConfig.Config == nil { - t.Fatal("expected log config options, got nil") - } - if tag := loadedConfig.LogConfig.Config["tag"]; tag != "test" { - t.Fatalf("expected log tag `test`, got %s", tag) - } -} - -func TestLoadDaemonConfigWithTrueDefaultValues(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{} - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.BoolVar(&c.EnableUserlandProxy, []string{"-userland-proxy"}, true, "") - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - if err := flags.ParseFlags([]string{}, false); err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{ - "userland-proxy": false -}`)) - f.Close() - - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatal("expected configuration, got nil") - } - - if loadedConfig.EnableUserlandProxy { - t.Fatal("expected userland proxy to be disabled, got enabled") - } - - // make sure reloading doesn't generate configuration - // conflicts after normalizing boolean values. - err = daemon.ReloadConfiguration(configFile, flags, func(reloadedConfig *daemon.Config) { - if reloadedConfig.EnableUserlandProxy { - t.Fatal("expected userland proxy to be disabled, got enabled") - } - }) - if err != nil { - t.Fatal(err) - } -} - -func TestLoadDaemonConfigWithTrueDefaultValuesLeaveDefaults(t *testing.T) { - c := &daemon.Config{} - common := &cliflags.CommonFlags{} - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.BoolVar(&c.EnableUserlandProxy, []string{"-userland-proxy"}, true, "") - - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - if err := flags.ParseFlags([]string{}, false); err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{}`)) - f.Close() - - loadedConfig, err := loadDaemonCliConfig(c, flags, common, configFile) - if err != nil { - t.Fatal(err) - } - if loadedConfig == nil { - t.Fatal("expected configuration, got nil") - } - - if !loadedConfig.EnableUserlandProxy { - t.Fatal("expected userland proxy to be enabled, got disabled") - } -} diff --git a/cmd/dockerd/daemon_windows.go b/cmd/dockerd/daemon_windows.go deleted file mode 100644 index 9772f2b2ef..0000000000 --- a/cmd/dockerd/daemon_windows.go +++ /dev/null @@ -1,82 +0,0 @@ -package main - -import ( - "fmt" - "net" - "os" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/pkg/system" -) - -var defaultDaemonConfigFile = os.Getenv("programdata") + string(os.PathSeparator) + "docker" + string(os.PathSeparator) + "config" + string(os.PathSeparator) + "daemon.json" - -// currentUserIsOwner checks whether the current user is the owner of the given -// file. -func currentUserIsOwner(f string) bool { - return false -} - -// setDefaultUmask doesn't do anything on windows -func setDefaultUmask() error { - return nil -} - -func getDaemonConfDir() string { - return os.Getenv("PROGRAMDATA") + `\docker\config` -} - -// notifySystem sends a message to the host when the server is ready to be used -func notifySystem() { - if service != nil { - err := service.started() - if err != nil { - logrus.Fatal(err) - } - } -} - -// notifyShutdown is called after the daemon shuts down but before the process exits. -func notifyShutdown(err error) { - if service != nil { - service.stopped(err) - } -} - -// setupConfigReloadTrap configures a Win32 event to reload the configuration. -func (cli *DaemonCli) setupConfigReloadTrap() { - go func() { - sa := syscall.SecurityAttributes{ - Length: 0, - } - ev := "Global\\docker-daemon-config-" + fmt.Sprint(os.Getpid()) - if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 { - logrus.Debugf("Config reload - waiting signal at %s", ev) - for { - syscall.WaitForSingleObject(h, syscall.INFINITE) - cli.reloadConfig() - } - } - }() -} - -func (cli *DaemonCli) getPlatformRemoteOptions() []libcontainerd.RemoteOption { - return nil -} - -// getLibcontainerdRoot gets the root directory for libcontainerd to store its -// state. The Windows libcontainerd implementation does not need to write a spec -// or state to disk, so this is a no-op. -func (cli *DaemonCli) getLibcontainerdRoot() string { - return "" -} - -func allocateDaemonPort(addr string) error { - return nil -} - -func wrapListeners(proto string, ls []net.Listener) []net.Listener { - return ls -} diff --git a/cmd/dockerd/docker.go b/cmd/dockerd/docker.go deleted file mode 100644 index b1c9b803a1..0000000000 --- a/cmd/dockerd/docker.go +++ /dev/null @@ -1,82 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/dockerversion" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/docker/pkg/term" - "github.com/docker/docker/utils" -) - -var ( - daemonCli = NewDaemonCli() - flHelp = flag.Bool([]string{"h", "-help"}, false, "Print usage") - flVersion = flag.Bool([]string{"v", "-version"}, false, "Print version information and quit") -) - -func main() { - if reexec.Init() { - return - } - - // Set terminal emulation based on platform as required. - _, stdout, stderr := term.StdStreams() - - logrus.SetOutput(stderr) - - flag.Merge(flag.CommandLine, daemonCli.commonFlags.FlagSet) - - flag.Usage = func() { - fmt.Fprint(stdout, "Usage: dockerd [OPTIONS]\n\n") - fmt.Fprint(stdout, "A self-sufficient runtime for containers.\n\nOptions:\n") - - flag.CommandLine.SetOutput(stdout) - flag.PrintDefaults() - } - flag.CommandLine.ShortUsage = func() { - fmt.Fprint(stderr, "\nUsage:\tdockerd [OPTIONS]\n") - } - - if err := flag.CommandLine.ParseFlags(os.Args[1:], false); err != nil { - os.Exit(1) - } - - if *flVersion { - showVersion() - return - } - - if *flHelp { - // if global flag --help is present, regardless of what other options and commands there are, - // just print the usage. - flag.Usage() - return - } - - // On Windows, this may be launching as a service or with an option to - // register the service. - stop, err := initService() - if err != nil { - logrus.Fatal(err) - } - - if !stop { - err = daemonCli.start() - notifyShutdown(err) - if err != nil { - logrus.Fatal(err) - } - } -} - -func showVersion() { - if utils.ExperimentalBuild() { - fmt.Printf("Docker version %s, build %s, experimental\n", dockerversion.Version, dockerversion.GitCommit) - } else { - fmt.Printf("Docker version %s, build %s\n", dockerversion.Version, dockerversion.GitCommit) - } -} diff --git a/cmd/dockerd/docker_windows.go b/cmd/dockerd/docker_windows.go deleted file mode 100644 index 19c5587cb6..0000000000 --- a/cmd/dockerd/docker_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -package main - -import ( - "sync/atomic" - - _ "github.com/docker/docker/autogen/winresources/dockerd" -) - -//go:cgo_import_dynamic main.dummy CommandLineToArgvW%2 "shell32.dll" - -var dummy uintptr - -func init() { - // Ensure that this import is not removed by the linker. This is used to - // ensure that shell32.dll is loaded by the system loader, preventing - // go#15286 from triggering on Nano Server TP5. - atomic.LoadUintptr(&dummy) -} diff --git a/cmd/dockerd/hack/malformed_host_override.go b/cmd/dockerd/hack/malformed_host_override.go deleted file mode 100644 index d4aa3ddd73..0000000000 --- a/cmd/dockerd/hack/malformed_host_override.go +++ /dev/null @@ -1,121 +0,0 @@ -// +build !windows - -package hack - -import "net" - -// MalformedHostHeaderOverride is a wrapper to be able -// to overcome the 400 Bad request coming from old docker -// clients that send an invalid Host header. -type MalformedHostHeaderOverride struct { - net.Listener -} - -// MalformedHostHeaderOverrideConn wraps the underlying unix -// connection and keeps track of the first read from http.Server -// which just reads the headers. -type MalformedHostHeaderOverrideConn struct { - net.Conn - first bool -} - -var closeConnHeader = []byte("\r\nConnection: close\r") - -// Read reads the first *read* request from http.Server to inspect -// the Host header. If the Host starts with / then we're talking to -// an old docker client which send an invalid Host header. To not -// error out in http.Server we rewrite the first bytes of the request -// to sanitize the Host header itself. -// In case we're not dealing with old docker clients the data is just passed -// to the server w/o modification. -func (l *MalformedHostHeaderOverrideConn) Read(b []byte) (n int, err error) { - // http.Server uses a 4k buffer - if l.first && len(b) == 4096 { - // This keeps track of the first read from http.Server which just reads - // the headers - l.first = false - // The first read of the connection by http.Server is done limited to - // DefaultMaxHeaderBytes (usually 1 << 20) + 4096. - // Here we do the first read which gets us all the http headers to - // be inspected and modified below. - c, err := l.Conn.Read(b) - if err != nil { - return c, err - } - - var ( - start, end int - firstLineFeed = -1 - buf []byte - ) - for i := 0; i <= c-1-7; i++ { - if b[i] == '\n' && firstLineFeed == -1 { - firstLineFeed = i - } - if b[i] != '\n' { - continue - } - - if b[i+1] == '\r' && b[i+2] == '\n' { - return c, nil - } - - if b[i+1] != 'H' { - continue - } - if b[i+2] != 'o' { - continue - } - if b[i+3] != 's' { - continue - } - if b[i+4] != 't' { - continue - } - if b[i+5] != ':' { - continue - } - if b[i+6] != ' ' { - continue - } - if b[i+7] != '/' { - continue - } - // ensure clients other than the docker clients do not get this hack - if i != firstLineFeed { - return c, nil - } - start = i + 7 - // now find where the value ends - for ii, bbb := range b[start:c] { - if bbb == '\n' { - end = start + ii - break - } - } - buf = make([]byte, 0, c+len(closeConnHeader)-(end-start)) - // strip the value of the host header and - // inject `Connection: close` to ensure we don't reuse this connection - buf = append(buf, b[:start]...) - buf = append(buf, closeConnHeader...) - buf = append(buf, b[end:c]...) - copy(b, buf) - break - } - if len(buf) == 0 { - return c, nil - } - return len(buf), nil - } - return l.Conn.Read(b) -} - -// Accept makes the listener accepts connections and wraps the connection -// in a MalformedHostHeaderOverrideConn initilizing first to true. -func (l *MalformedHostHeaderOverride) Accept() (net.Conn, error) { - c, err := l.Listener.Accept() - if err != nil { - return c, err - } - return &MalformedHostHeaderOverrideConn{c, true}, nil -} diff --git a/cmd/dockerd/hack/malformed_host_override_test.go b/cmd/dockerd/hack/malformed_host_override_test.go deleted file mode 100644 index 1a0a60baf3..0000000000 --- a/cmd/dockerd/hack/malformed_host_override_test.go +++ /dev/null @@ -1,124 +0,0 @@ -// +build !windows - -package hack - -import ( - "bytes" - "io" - "net" - "strings" - "testing" -) - -type bufConn struct { - net.Conn - buf *bytes.Buffer -} - -func (bc *bufConn) Read(b []byte) (int, error) { - return bc.buf.Read(b) -} - -func TestHeaderOverrideHack(t *testing.T) { - tests := [][2][]byte{ - { - []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), - []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\n"), - }, - { - []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\nFoo: Bar\r\n"), - []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\nFoo: Bar\r\n"), - }, - { - []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something!"), - []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something!"), - }, - { - []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), - []byte("GET /foo\nHost: \r\nConnection: close\r\nUser-Agent: Docker\r\n\r\ntest something! " + strings.Repeat("test", 15000)), - }, - { - []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), - []byte("GET /foo\nFoo: Bar\nHost: /var/run/docker.sock\nUser-Agent: Docker\r\n\r\n"), - }, - } - - // Test for https://github.com/docker/docker/issues/23045 - h0 := "GET /foo\nUser-Agent: Docker\r\n\r\n" - h0 = h0 + strings.Repeat("a", 4096-len(h0)-1) + "\n" - tests = append(tests, [2][]byte{[]byte(h0), []byte(h0)}) - - for _, pair := range tests { - read := make([]byte, 4096) - client := &bufConn{ - buf: bytes.NewBuffer(pair[0]), - } - l := MalformedHostHeaderOverrideConn{client, true} - - n, err := l.Read(read) - if err != nil && err != io.EOF { - t.Fatalf("read: %d - %d, err: %v\n%s", n, len(pair[0]), err, string(read[:n])) - } - if !bytes.Equal(read[:n], pair[1][:n]) { - t.Fatalf("\n%s\n%s\n", read[:n], pair[1][:n]) - } - } -} - -func BenchmarkWithHack(b *testing.B) { - client, srv := net.Pipe() - done := make(chan struct{}) - req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") - read := make([]byte, 4096) - b.SetBytes(int64(len(req) * 30)) - - l := MalformedHostHeaderOverrideConn{client, true} - go func() { - for { - if _, err := srv.Write(req); err != nil { - srv.Close() - break - } - l.first = true // make sure each subsequent run uses the hack parsing - } - close(done) - }() - - for i := 0; i < b.N; i++ { - for i := 0; i < 30; i++ { - if n, err := l.Read(read); err != nil && err != io.EOF { - b.Fatalf("read: %d - %d, err: %v\n%s", n, len(req), err, string(read[:n])) - } - } - } - l.Close() - <-done -} - -func BenchmarkNoHack(b *testing.B) { - client, srv := net.Pipe() - done := make(chan struct{}) - req := []byte("GET /foo\nHost: /var/run/docker.sock\nUser-Agent: Docker\n") - read := make([]byte, 4096) - b.SetBytes(int64(len(req) * 30)) - - go func() { - for { - if _, err := srv.Write(req); err != nil { - srv.Close() - break - } - } - close(done) - }() - - for i := 0; i < b.N; i++ { - for i := 0; i < 30; i++ { - if _, err := client.Read(read); err != nil && err != io.EOF { - b.Fatal(err) - } - } - } - client.Close() - <-done -} diff --git a/cmd/dockerd/routes.go b/cmd/dockerd/routes.go deleted file mode 100644 index 65b97bd8c2..0000000000 --- a/cmd/dockerd/routes.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !experimental - -package main - -import "github.com/docker/docker/api/server/router" - -func addExperimentalRouters(routers []router.Router) []router.Router { - return routers -} diff --git a/cmd/dockerd/routes_experimental.go b/cmd/dockerd/routes_experimental.go deleted file mode 100644 index 665df9499a..0000000000 --- a/cmd/dockerd/routes_experimental.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build experimental - -package main - -import ( - "github.com/docker/docker/api/server/router" - pluginrouter "github.com/docker/docker/api/server/router/plugin" - "github.com/docker/docker/plugin" -) - -func addExperimentalRouters(routers []router.Router) []router.Router { - return append(routers, pluginrouter.NewRouter(plugin.GetManager())) -} diff --git a/cmd/dockerd/service_unsupported.go b/cmd/dockerd/service_unsupported.go deleted file mode 100644 index dd53802a5e..0000000000 --- a/cmd/dockerd/service_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package main - -func initService() (bool, error) { - return false, nil -} diff --git a/cmd/dockerd/service_windows.go b/cmd/dockerd/service_windows.go deleted file mode 100644 index e78dad20c5..0000000000 --- a/cmd/dockerd/service_windows.go +++ /dev/null @@ -1,369 +0,0 @@ -package main - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "syscall" - - "github.com/Sirupsen/logrus" - flag "github.com/docker/docker/pkg/mflag" - "golang.org/x/sys/windows" - "golang.org/x/sys/windows/svc" - "golang.org/x/sys/windows/svc/debug" - "golang.org/x/sys/windows/svc/eventlog" - "golang.org/x/sys/windows/svc/mgr" -) - -var ( - flServiceName = flag.String([]string{"-service-name"}, "docker", "Set the Windows service name") - flRegisterService = flag.Bool([]string{"-register-service"}, false, "Register the service and exit") - flUnregisterService = flag.Bool([]string{"-unregister-service"}, false, "Unregister the service and exit") - flRunService = flag.Bool([]string{"-run-service"}, false, "") - - setStdHandle = syscall.NewLazyDLL("kernel32.dll").NewProc("SetStdHandle") - oldStderr syscall.Handle - panicFile *os.File - - service *handler -) - -const ( - // These should match the values in event_messages.mc. - eventInfo = 1 - eventWarn = 1 - eventError = 1 - eventDebug = 2 - eventPanic = 3 - eventFatal = 4 - - eventExtraOffset = 10 // Add this to any event to get a string that supports extended data -) - -type handler struct { - tosvc chan bool - fromsvc chan error -} - -type etwHook struct { - log *eventlog.Log -} - -func (h *etwHook) Levels() []logrus.Level { - return []logrus.Level{ - logrus.PanicLevel, - logrus.FatalLevel, - logrus.ErrorLevel, - logrus.WarnLevel, - logrus.InfoLevel, - logrus.DebugLevel, - } -} - -func (h *etwHook) Fire(e *logrus.Entry) error { - var ( - etype uint16 - eid uint32 - ) - - switch e.Level { - case logrus.PanicLevel: - etype = windows.EVENTLOG_ERROR_TYPE - eid = eventPanic - case logrus.FatalLevel: - etype = windows.EVENTLOG_ERROR_TYPE - eid = eventFatal - case logrus.ErrorLevel: - etype = windows.EVENTLOG_ERROR_TYPE - eid = eventError - case logrus.WarnLevel: - etype = windows.EVENTLOG_WARNING_TYPE - eid = eventWarn - case logrus.InfoLevel: - etype = windows.EVENTLOG_INFORMATION_TYPE - eid = eventInfo - case logrus.DebugLevel: - etype = windows.EVENTLOG_INFORMATION_TYPE - eid = eventDebug - default: - return errors.New("unknown level") - } - - // If there is additional data, include it as a second string. - exts := "" - if len(e.Data) > 0 { - fs := bytes.Buffer{} - for k, v := range e.Data { - fs.WriteString(k) - fs.WriteByte('=') - fmt.Fprint(&fs, v) - fs.WriteByte(' ') - } - - exts = fs.String()[:fs.Len()-1] - eid += eventExtraOffset - } - - if h.log == nil { - fmt.Fprintf(os.Stderr, "%s [%s]\n", e.Message, exts) - return nil - } - - var ( - ss [2]*uint16 - err error - ) - - ss[0], err = syscall.UTF16PtrFromString(e.Message) - if err != nil { - return err - } - - count := uint16(1) - if exts != "" { - ss[1], err = syscall.UTF16PtrFromString(exts) - if err != nil { - return err - } - - count++ - } - - return windows.ReportEvent(h.log.Handle, etype, 0, eid, 0, count, 0, &ss[0], nil) -} - -func getServicePath() (string, error) { - p, err := exec.LookPath(os.Args[0]) - if err != nil { - return "", err - } - return filepath.Abs(p) -} - -func registerService() error { - p, err := getServicePath() - if err != nil { - return err - } - m, err := mgr.Connect() - if err != nil { - return err - } - defer m.Disconnect() - c := mgr.Config{ - ServiceType: windows.SERVICE_WIN32_OWN_PROCESS, - StartType: mgr.StartAutomatic, - ErrorControl: mgr.ErrorNormal, - DisplayName: "Docker Engine", - } - - // Configure the service to launch with the arguments that were just passed. - args := []string{"--run-service"} - for _, a := range os.Args[1:] { - if a != "--register-service" && a != "--unregister-service" { - args = append(args, a) - } - } - - s, err := m.CreateService(*flServiceName, p, c, args...) - if err != nil { - return err - } - defer s.Close() - err = eventlog.Install(*flServiceName, p, false, eventlog.Info|eventlog.Warning|eventlog.Error) - if err != nil { - return err - } - - return nil -} - -func unregisterService() error { - m, err := mgr.Connect() - if err != nil { - return err - } - defer m.Disconnect() - - s, err := m.OpenService(*flServiceName) - if err != nil { - return err - } - defer s.Close() - - eventlog.Remove(*flServiceName) - err = s.Delete() - if err != nil { - return err - } - return nil -} - -func initService() (bool, error) { - if *flUnregisterService { - if *flRegisterService { - return true, errors.New("--register-service and --unregister-service cannot be used together") - } - return true, unregisterService() - } - - if *flRegisterService { - return true, registerService() - } - - if !*flRunService { - return false, nil - } - - interactive, err := svc.IsAnInteractiveSession() - if err != nil { - return false, err - } - - h := &handler{ - tosvc: make(chan bool), - fromsvc: make(chan error), - } - - var log *eventlog.Log - if !interactive { - log, err = eventlog.Open(*flServiceName) - if err != nil { - return false, err - } - } - - logrus.AddHook(&etwHook{log}) - logrus.SetOutput(ioutil.Discard) - - service = h - go func() { - if interactive { - err = debug.Run(*flServiceName, h) - } else { - err = svc.Run(*flServiceName, h) - } - - h.fromsvc <- err - }() - - // Wait for the first signal from the service handler. - err = <-h.fromsvc - if err != nil { - return false, err - } - return false, nil -} - -func (h *handler) started() error { - // This must be delayed until daemonCli initializes Config.Root - err := initPanicFile(filepath.Join(daemonCli.Config.Root, "panic.log")) - if err != nil { - return err - } - - h.tosvc <- false - return nil -} - -func (h *handler) stopped(err error) { - logrus.Debugf("Stopping service: %v", err) - h.tosvc <- err != nil - <-h.fromsvc -} - -func (h *handler) Execute(_ []string, r <-chan svc.ChangeRequest, s chan<- svc.Status) (bool, uint32) { - s <- svc.Status{State: svc.StartPending, Accepts: 0} - // Unblock initService() - h.fromsvc <- nil - - // Wait for initialization to complete. - failed := <-h.tosvc - if failed { - logrus.Debug("Aborting service start due to failure during initialization") - return true, 1 - } - - s <- svc.Status{State: svc.Running, Accepts: svc.AcceptStop | svc.AcceptShutdown | svc.Accepted(windows.SERVICE_ACCEPT_PARAMCHANGE)} - logrus.Debug("Service running") -Loop: - for { - select { - case failed = <-h.tosvc: - break Loop - case c := <-r: - switch c.Cmd { - case svc.Cmd(windows.SERVICE_CONTROL_PARAMCHANGE): - daemonCli.reloadConfig() - case svc.Interrogate: - s <- c.CurrentStatus - case svc.Stop, svc.Shutdown: - s <- svc.Status{State: svc.StopPending, Accepts: 0} - daemonCli.stop() - } - } - } - - removePanicFile() - if failed { - return true, 1 - } - return false, 0 -} - -func initPanicFile(path string) error { - var err error - panicFile, err = os.OpenFile(path, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0) - if err != nil { - return err - } - - st, err := panicFile.Stat() - if err != nil { - return err - } - - // If there are contents in the file already, move the file out of the way - // and replace it. - if st.Size() > 0 { - panicFile.Close() - os.Rename(path, path+".old") - panicFile, err = os.Create(path) - if err != nil { - return err - } - } - - // Update STD_ERROR_HANDLE to point to the panic file so that Go writes to - // it when it panics. Remember the old stderr to restore it before removing - // the panic file. - sh := syscall.STD_ERROR_HANDLE - h, err := syscall.GetStdHandle(sh) - if err != nil { - return err - } - - oldStderr = h - - r, _, err := setStdHandle.Call(uintptr(sh), uintptr(panicFile.Fd())) - if r == 0 && err != nil { - return err - } - - return nil -} - -func removePanicFile() { - if st, err := panicFile.Stat(); err == nil { - if st.Size() == 0 { - sh := syscall.STD_ERROR_HANDLE - setStdHandle.Call(uintptr(sh), uintptr(oldStderr)) - panicFile.Close() - os.Remove(panicFile.Name()) - } - } -} diff --git a/container/archive.go b/container/archive.go deleted file mode 100644 index e22a001c59..0000000000 --- a/container/archive.go +++ /dev/null @@ -1,76 +0,0 @@ -package container - -import ( - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/system" - "github.com/docker/engine-api/types" -) - -// ResolvePath resolves the given path in the container to a resource on the -// host. Returns a resolved path (absolute path to the resource on the host), -// the absolute path to the resource relative to the container's rootfs, and -// an error if the path points to outside the container's rootfs. -func (container *Container) ResolvePath(path string) (resolvedPath, absPath string, err error) { - // Check if a drive letter supplied, it must be the system drive. No-op except on Windows - path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) - if err != nil { - return "", "", err - } - - // Consider the given path as an absolute path in the container. - absPath = archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) - - // Split the absPath into its Directory and Base components. We will - // resolve the dir in the scope of the container then append the base. - dirPath, basePath := filepath.Split(absPath) - - resolvedDirPath, err := container.GetResourcePath(dirPath) - if err != nil { - return "", "", err - } - - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath - - return resolvedPath, absPath, nil -} - -// StatPath is the unexported version of StatPath. Locks and mounts should -// be acquired before calling this method and the given path should be fully -// resolved to a path on the host corresponding to the given absolute path -// inside the container. -func (container *Container) StatPath(resolvedPath, absPath string) (stat *types.ContainerPathStat, err error) { - lstat, err := os.Lstat(resolvedPath) - if err != nil { - return nil, err - } - - var linkTarget string - if lstat.Mode()&os.ModeSymlink != 0 { - // Fully evaluate the symlink in the scope of the container rootfs. - hostPath, err := container.GetResourcePath(absPath) - if err != nil { - return nil, err - } - - linkTarget, err = filepath.Rel(container.BaseFS, hostPath) - if err != nil { - return nil, err - } - - // Make it an absolute path. - linkTarget = filepath.Join(string(filepath.Separator), linkTarget) - } - - return &types.ContainerPathStat{ - Name: filepath.Base(absPath), - Size: lstat.Size(), - Mode: lstat.Mode(), - Mtime: lstat.ModTime(), - LinkTarget: linkTarget, - }, nil -} diff --git a/container/container.go b/container/container.go deleted file mode 100644 index d34226922a..0000000000 --- a/container/container.go +++ /dev/null @@ -1,974 +0,0 @@ -package container - -import ( - "encoding/json" - "fmt" - "io" - "net" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/jsonfilelog" - "github.com/docker/docker/daemon/network" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/restartmanager" - "github.com/docker/docker/runconfig" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/docker/volume" - containertypes "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/go-connections/nat" - "github.com/docker/libnetwork" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/options" - "github.com/docker/libnetwork/types" - "github.com/opencontainers/runc/libcontainer/label" -) - -const configFileName = "config.v2.json" - -var ( - errInvalidEndpoint = fmt.Errorf("invalid endpoint while building port map info") - errInvalidNetwork = fmt.Errorf("invalid network settings while building port map info") -) - -// DetachError is special error which returned in case of container detach. -type DetachError struct{} - -func (DetachError) Error() string { - return "detached from container" -} - -// CommonContainer holds the fields for a container which are -// applicable across all platforms supported by the daemon. -type CommonContainer struct { - *runconfig.StreamConfig - // embed for Container to support states directly. - *State `json:"State"` // Needed for remote api version <= 1.11 - Root string `json:"-"` // Path to the "home" of the container, including metadata. - BaseFS string `json:"-"` // Path to the graphdriver mountpoint - RWLayer layer.RWLayer `json:"-"` - ID string - Created time.Time - Managed bool - Path string - Args []string - Config *containertypes.Config - ImageID image.ID `json:"Image"` - NetworkSettings *network.Settings - LogPath string - Name string - Driver string - // MountLabel contains the options for the 'mount' command - MountLabel string - ProcessLabel string - RestartCount int - HasBeenStartedBefore bool - HasBeenManuallyStopped bool // used for unless-stopped restart policy - MountPoints map[string]*volume.MountPoint - HostConfig *containertypes.HostConfig `json:"-"` // do not serialize the host config in the json, otherwise we'll make the container unportable - ExecCommands *exec.Store `json:"-"` - // logDriver for closing - LogDriver logger.Logger `json:"-"` - LogCopier *logger.Copier `json:"-"` - restartManager restartmanager.RestartManager - attachContext *attachContext -} - -// NewBaseContainer creates a new container with its -// basic configuration. -func NewBaseContainer(id, root string) *Container { - return &Container{ - CommonContainer: CommonContainer{ - ID: id, - State: NewState(), - ExecCommands: exec.NewStore(), - Root: root, - MountPoints: make(map[string]*volume.MountPoint), - StreamConfig: runconfig.NewStreamConfig(), - attachContext: &attachContext{}, - }, - } -} - -// FromDisk loads the container configuration stored in the host. -func (container *Container) FromDisk() error { - pth, err := container.ConfigPath() - if err != nil { - return err - } - - jsonSource, err := os.Open(pth) - if err != nil { - return err - } - defer jsonSource.Close() - - dec := json.NewDecoder(jsonSource) - - // Load container settings - if err := dec.Decode(container); err != nil { - return err - } - - if err := label.ReserveLabel(container.ProcessLabel); err != nil { - return err - } - return container.readHostConfig() -} - -// ToDisk saves the container configuration on disk. -func (container *Container) ToDisk() error { - pth, err := container.ConfigPath() - if err != nil { - return err - } - - jsonSource, err := ioutils.NewAtomicFileWriter(pth, 0666) - if err != nil { - return err - } - defer jsonSource.Close() - - enc := json.NewEncoder(jsonSource) - - // Save container settings - if err := enc.Encode(container); err != nil { - return err - } - - return container.WriteHostConfig() -} - -// ToDiskLocking saves the container configuration on disk in a thread safe way. -func (container *Container) ToDiskLocking() error { - container.Lock() - err := container.ToDisk() - container.Unlock() - return err -} - -// readHostConfig reads the host configuration from disk for the container. -func (container *Container) readHostConfig() error { - container.HostConfig = &containertypes.HostConfig{} - // If the hostconfig file does not exist, do not read it. - // (We still have to initialize container.HostConfig, - // but that's OK, since we just did that above.) - pth, err := container.HostConfigPath() - if err != nil { - return err - } - - f, err := os.Open(pth) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - defer f.Close() - - if err := json.NewDecoder(f).Decode(&container.HostConfig); err != nil { - return err - } - - container.InitDNSHostConfig() - - return nil -} - -// WriteHostConfig saves the host configuration on disk for the container. -func (container *Container) WriteHostConfig() error { - pth, err := container.HostConfigPath() - if err != nil { - return err - } - - f, err := ioutils.NewAtomicFileWriter(pth, 0666) - if err != nil { - return err - } - defer f.Close() - - return json.NewEncoder(f).Encode(&container.HostConfig) -} - -// SetupWorkingDirectory sets up the container's working directory as set in container.Config.WorkingDir -func (container *Container) SetupWorkingDirectory(rootUID, rootGID int) error { - if container.Config.WorkingDir == "" { - return nil - } - - container.Config.WorkingDir = filepath.Clean(container.Config.WorkingDir) - - // If can't mount container FS at this point (eg Hyper-V Containers on - // Windows) bail out now with no action. - if !container.canMountFS() { - return nil - } - - pth, err := container.GetResourcePath(container.Config.WorkingDir) - if err != nil { - return err - } - - if err := idtools.MkdirAllNewAs(pth, 0755, rootUID, rootGID); err != nil { - pthInfo, err2 := os.Stat(pth) - if err2 == nil && pthInfo != nil && !pthInfo.IsDir() { - return fmt.Errorf("Cannot mkdir: %s is not a directory", container.Config.WorkingDir) - } - - return err - } - - return nil -} - -// GetResourcePath evaluates `path` in the scope of the container's BaseFS, with proper path -// sanitisation. Symlinks are all scoped to the BaseFS of the container, as -// though the container's BaseFS was `/`. -// -// The BaseFS of a container is the host-facing path which is bind-mounted as -// `/` inside the container. This method is essentially used to access a -// particular path inside the container as though you were a process in that -// container. -// -// NOTE: The returned path is *only* safely scoped inside the container's BaseFS -// if no component of the returned path changes (such as a component -// symlinking to a different path) between using this method and using the -// path. See symlink.FollowSymlinkInScope for more details. -func (container *Container) GetResourcePath(path string) (string, error) { - // IMPORTANT - These are paths on the OS where the daemon is running, hence - // any filepath operations must be done in an OS agnostic way. - - cleanPath := cleanResourcePath(path) - r, e := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, cleanPath), container.BaseFS) - - // Log this here on the daemon side as there's otherwise no indication apart - // from the error being propagated all the way back to the client. This makes - // debugging significantly easier and clearly indicates the error comes from the daemon. - if e != nil { - logrus.Errorf("Failed to FollowSymlinkInScope BaseFS %s cleanPath %s path %s %s\n", container.BaseFS, cleanPath, path, e) - } - return r, e -} - -// GetRootResourcePath evaluates `path` in the scope of the container's root, with proper path -// sanitisation. Symlinks are all scoped to the root of the container, as -// though the container's root was `/`. -// -// The root of a container is the host-facing configuration metadata directory. -// Only use this method to safely access the container's `container.json` or -// other metadata files. If in doubt, use container.GetResourcePath. -// -// NOTE: The returned path is *only* safely scoped inside the container's root -// if no component of the returned path changes (such as a component -// symlinking to a different path) between using this method and using the -// path. See symlink.FollowSymlinkInScope for more details. -func (container *Container) GetRootResourcePath(path string) (string, error) { - // IMPORTANT - These are paths on the OS where the daemon is running, hence - // any filepath operations must be done in an OS agnostic way. - cleanPath := filepath.Join(string(os.PathSeparator), path) - return symlink.FollowSymlinkInScope(filepath.Join(container.Root, cleanPath), container.Root) -} - -// ExitOnNext signals to the monitor that it should not restart the container -// after we send the kill signal. -func (container *Container) ExitOnNext() { - if container.restartManager != nil { - container.restartManager.Cancel() - } -} - -// HostConfigPath returns the path to the container's JSON hostconfig -func (container *Container) HostConfigPath() (string, error) { - return container.GetRootResourcePath("hostconfig.json") -} - -// ConfigPath returns the path to the container's JSON config -func (container *Container) ConfigPath() (string, error) { - return container.GetRootResourcePath(configFileName) -} - -// StartLogger starts a new logger driver for the container. -func (container *Container) StartLogger(cfg containertypes.LogConfig) (logger.Logger, error) { - c, err := logger.GetLogDriver(cfg.Type) - if err != nil { - return nil, fmt.Errorf("Failed to get logging factory: %v", err) - } - ctx := logger.Context{ - Config: cfg.Config, - ContainerID: container.ID, - ContainerName: container.Name, - ContainerEntrypoint: container.Path, - ContainerArgs: container.Args, - ContainerImageID: container.ImageID.String(), - ContainerImageName: container.Config.Image, - ContainerCreated: container.Created, - ContainerEnv: container.Config.Env, - ContainerLabels: container.Config.Labels, - DaemonName: "docker", - } - - // Set logging file for "json-logger" - if cfg.Type == jsonfilelog.Name { - ctx.LogPath, err = container.GetRootResourcePath(fmt.Sprintf("%s-json.log", container.ID)) - if err != nil { - return nil, err - } - } - return c(ctx) -} - -// GetProcessLabel returns the process label for the container. -func (container *Container) GetProcessLabel() string { - // even if we have a process label return "" if we are running - // in privileged mode - if container.HostConfig.Privileged { - return "" - } - return container.ProcessLabel -} - -// GetMountLabel returns the mounting label for the container. -// This label is empty if the container is privileged. -func (container *Container) GetMountLabel() string { - return container.MountLabel -} - -// GetExecIDs returns the list of exec commands running on the container. -func (container *Container) GetExecIDs() []string { - return container.ExecCommands.List() -} - -// Attach connects to the container's TTY, delegating to standard -// streams or websockets depending on the configuration. -func (container *Container) Attach(stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { - ctx := container.InitAttachContext() - return AttachStreams(ctx, container.StreamConfig, container.Config.OpenStdin, container.Config.StdinOnce, container.Config.Tty, stdin, stdout, stderr, keys) -} - -// AttachStreams connects streams to a TTY. -// Used by exec too. Should this move somewhere else? -func AttachStreams(ctx context.Context, streamConfig *runconfig.StreamConfig, openStdin, stdinOnce, tty bool, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer, keys []byte) chan error { - var ( - cStdout, cStderr io.ReadCloser - cStdin io.WriteCloser - wg sync.WaitGroup - errors = make(chan error, 3) - ) - - if stdin != nil && openStdin { - cStdin = streamConfig.StdinPipe() - wg.Add(1) - } - - if stdout != nil { - cStdout = streamConfig.StdoutPipe() - wg.Add(1) - } - - if stderr != nil { - cStderr = streamConfig.StderrPipe() - wg.Add(1) - } - - // Connect stdin of container to the http conn. - go func() { - if stdin == nil || !openStdin { - return - } - logrus.Debug("attach: stdin: begin") - - var err error - if tty { - _, err = copyEscapable(cStdin, stdin, keys) - } else { - _, err = io.Copy(cStdin, stdin) - } - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - logrus.Errorf("attach: stdin: %s", err) - errors <- err - } - if stdinOnce && !tty { - cStdin.Close() - } else { - // No matter what, when stdin is closed (io.Copy unblock), close stdout and stderr - if cStdout != nil { - cStdout.Close() - } - if cStderr != nil { - cStderr.Close() - } - } - logrus.Debug("attach: stdin: end") - wg.Done() - }() - - attachStream := func(name string, stream io.Writer, streamPipe io.ReadCloser) { - if stream == nil { - return - } - - logrus.Debugf("attach: %s: begin", name) - _, err := io.Copy(stream, streamPipe) - if err == io.ErrClosedPipe { - err = nil - } - if err != nil { - logrus.Errorf("attach: %s: %v", name, err) - errors <- err - } - // Make sure stdin gets closed - if stdin != nil { - stdin.Close() - } - streamPipe.Close() - logrus.Debugf("attach: %s: end", name) - wg.Done() - } - - go attachStream("stdout", stdout, cStdout) - go attachStream("stderr", stderr, cStderr) - - return promise.Go(func() error { - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - select { - case <-done: - case <-ctx.Done(): - // close all pipes - if cStdin != nil { - cStdin.Close() - } - if cStdout != nil { - cStdout.Close() - } - if cStderr != nil { - cStderr.Close() - } - <-done - } - close(errors) - for err := range errors { - if err != nil { - return err - } - } - return nil - }) -} - -// Code c/c from io.Copy() modified to handle escape sequence -func copyEscapable(dst io.Writer, src io.ReadCloser, keys []byte) (written int64, err error) { - if len(keys) == 0 { - // Default keys : ctrl-p ctrl-q - keys = []byte{16, 17} - } - buf := make([]byte, 32*1024) - for { - nr, er := src.Read(buf) - if nr > 0 { - // ---- Docker addition - preservBuf := []byte{} - for i, key := range keys { - preservBuf = append(preservBuf, buf[0:nr]...) - if nr != 1 || buf[0] != key { - break - } - if i == len(keys)-1 { - src.Close() - return 0, DetachError{} - } - nr, er = src.Read(buf) - } - var nw int - var ew error - if len(preservBuf) > 0 { - nw, ew = dst.Write(preservBuf) - nr = len(preservBuf) - } else { - // ---- End of docker - nw, ew = dst.Write(buf[0:nr]) - } - if nw > 0 { - written += int64(nw) - } - if ew != nil { - err = ew - break - } - if nr != nw { - err = io.ErrShortWrite - break - } - } - if er == io.EOF { - break - } - if er != nil { - err = er - break - } - } - return written, err -} - -// ShouldRestart decides whether the daemon should restart the container or not. -// This is based on the container's restart policy. -func (container *Container) ShouldRestart() bool { - shouldRestart, _, _ := container.restartManager.ShouldRestart(uint32(container.ExitCode()), container.HasBeenManuallyStopped, container.FinishedAt.Sub(container.StartedAt)) - return shouldRestart -} - -// AddMountPointWithVolume adds a new mount point configured with a volume to the container. -func (container *Container) AddMountPointWithVolume(destination string, vol volume.Volume, rw bool) { - container.MountPoints[destination] = &volume.MountPoint{ - Name: vol.Name(), - Driver: vol.DriverName(), - Destination: destination, - RW: rw, - Volume: vol, - CopyData: volume.DefaultCopyMode, - } -} - -// IsDestinationMounted checks whether a path is mounted on the container or not. -func (container *Container) IsDestinationMounted(destination string) bool { - return container.MountPoints[destination] != nil -} - -// StopSignal returns the signal used to stop the container. -func (container *Container) StopSignal() int { - var stopSignal syscall.Signal - if container.Config.StopSignal != "" { - stopSignal, _ = signal.ParseSignal(container.Config.StopSignal) - } - - if int(stopSignal) == 0 { - stopSignal, _ = signal.ParseSignal(signal.DefaultStopSignal) - } - return int(stopSignal) -} - -// InitDNSHostConfig ensures that the dns fields are never nil. -// New containers don't ever have those fields nil, -// but pre created containers can still have those nil values. -// The non-recommended host configuration in the start api can -// make these fields nil again, this corrects that issue until -// we remove that behavior for good. -// See https://github.com/docker/docker/pull/17779 -// for a more detailed explanation on why we don't want that. -func (container *Container) InitDNSHostConfig() { - container.Lock() - defer container.Unlock() - if container.HostConfig.DNS == nil { - container.HostConfig.DNS = make([]string, 0) - } - - if container.HostConfig.DNSSearch == nil { - container.HostConfig.DNSSearch = make([]string, 0) - } - - if container.HostConfig.DNSOptions == nil { - container.HostConfig.DNSOptions = make([]string, 0) - } -} - -// GetEndpointInNetwork returns the container's endpoint to the provided network. -func (container *Container) GetEndpointInNetwork(n libnetwork.Network) (libnetwork.Endpoint, error) { - endpointName := strings.TrimPrefix(container.Name, "/") - return n.EndpointByName(endpointName) -} - -func (container *Container) buildPortMapInfo(ep libnetwork.Endpoint) error { - if ep == nil { - return errInvalidEndpoint - } - - networkSettings := container.NetworkSettings - if networkSettings == nil { - return errInvalidNetwork - } - - if len(networkSettings.Ports) == 0 { - pm, err := getEndpointPortMapInfo(ep) - if err != nil { - return err - } - networkSettings.Ports = pm - } - return nil -} - -func getEndpointPortMapInfo(ep libnetwork.Endpoint) (nat.PortMap, error) { - pm := nat.PortMap{} - driverInfo, err := ep.DriverInfo() - if err != nil { - return pm, err - } - - if driverInfo == nil { - // It is not an error for epInfo to be nil - return pm, nil - } - - if expData, ok := driverInfo[netlabel.ExposedPorts]; ok { - if exposedPorts, ok := expData.([]types.TransportPort); ok { - for _, tp := range exposedPorts { - natPort, err := nat.NewPort(tp.Proto.String(), strconv.Itoa(int(tp.Port))) - if err != nil { - return pm, fmt.Errorf("Error parsing Port value(%v):%v", tp.Port, err) - } - pm[natPort] = nil - } - } - } - - mapData, ok := driverInfo[netlabel.PortMap] - if !ok { - return pm, nil - } - - if portMapping, ok := mapData.([]types.PortBinding); ok { - for _, pp := range portMapping { - natPort, err := nat.NewPort(pp.Proto.String(), strconv.Itoa(int(pp.Port))) - if err != nil { - return pm, err - } - natBndg := nat.PortBinding{HostIP: pp.HostIP.String(), HostPort: strconv.Itoa(int(pp.HostPort))} - pm[natPort] = append(pm[natPort], natBndg) - } - } - - return pm, nil -} - -// GetSandboxPortMapInfo retrieves the current port-mapping programmed for the given sandbox -func GetSandboxPortMapInfo(sb libnetwork.Sandbox) nat.PortMap { - pm := nat.PortMap{} - if sb == nil { - return pm - } - - for _, ep := range sb.Endpoints() { - pm, _ = getEndpointPortMapInfo(ep) - if len(pm) > 0 { - break - } - } - return pm -} - -// BuildEndpointInfo sets endpoint-related fields on container.NetworkSettings based on the provided network and endpoint. -func (container *Container) BuildEndpointInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { - if ep == nil { - return errInvalidEndpoint - } - - networkSettings := container.NetworkSettings - if networkSettings == nil { - return errInvalidNetwork - } - - epInfo := ep.Info() - if epInfo == nil { - // It is not an error to get an empty endpoint info - return nil - } - - if _, ok := networkSettings.Networks[n.Name()]; !ok { - networkSettings.Networks[n.Name()] = new(networktypes.EndpointSettings) - } - networkSettings.Networks[n.Name()].NetworkID = n.ID() - networkSettings.Networks[n.Name()].EndpointID = ep.ID() - - iface := epInfo.Iface() - if iface == nil { - return nil - } - - if iface.MacAddress() != nil { - networkSettings.Networks[n.Name()].MacAddress = iface.MacAddress().String() - } - - if iface.Address() != nil { - ones, _ := iface.Address().Mask.Size() - networkSettings.Networks[n.Name()].IPAddress = iface.Address().IP.String() - networkSettings.Networks[n.Name()].IPPrefixLen = ones - } - - if iface.AddressIPv6() != nil && iface.AddressIPv6().IP.To16() != nil { - onesv6, _ := iface.AddressIPv6().Mask.Size() - networkSettings.Networks[n.Name()].GlobalIPv6Address = iface.AddressIPv6().IP.String() - networkSettings.Networks[n.Name()].GlobalIPv6PrefixLen = onesv6 - } - - return nil -} - -// UpdateJoinInfo updates network settings when container joins network n with endpoint ep. -func (container *Container) UpdateJoinInfo(n libnetwork.Network, ep libnetwork.Endpoint) error { - if err := container.buildPortMapInfo(ep); err != nil { - return err - } - - epInfo := ep.Info() - if epInfo == nil { - // It is not an error to get an empty endpoint info - return nil - } - if epInfo.Gateway() != nil { - container.NetworkSettings.Networks[n.Name()].Gateway = epInfo.Gateway().String() - } - if epInfo.GatewayIPv6().To16() != nil { - container.NetworkSettings.Networks[n.Name()].IPv6Gateway = epInfo.GatewayIPv6().String() - } - - return nil -} - -// UpdateSandboxNetworkSettings updates the sandbox ID and Key. -func (container *Container) UpdateSandboxNetworkSettings(sb libnetwork.Sandbox) error { - container.NetworkSettings.SandboxID = sb.ID() - container.NetworkSettings.SandboxKey = sb.Key() - return nil -} - -// BuildJoinOptions builds endpoint Join options from a given network. -func (container *Container) BuildJoinOptions(n libnetwork.Network) ([]libnetwork.EndpointOption, error) { - var joinOptions []libnetwork.EndpointOption - if epConfig, ok := container.NetworkSettings.Networks[n.Name()]; ok { - for _, str := range epConfig.Links { - name, alias, err := runconfigopts.ParseLink(str) - if err != nil { - return nil, err - } - joinOptions = append(joinOptions, libnetwork.CreateOptionAlias(name, alias)) - } - } - return joinOptions, nil -} - -// BuildCreateEndpointOptions builds endpoint options from a given network. -func (container *Container) BuildCreateEndpointOptions(n libnetwork.Network, epConfig *networktypes.EndpointSettings, sb libnetwork.Sandbox) ([]libnetwork.EndpointOption, error) { - var ( - bindings = make(nat.PortMap) - pbList []types.PortBinding - exposeList []types.TransportPort - createOptions []libnetwork.EndpointOption - ) - - defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() - - if n.Name() == defaultNetName || container.NetworkSettings.IsAnonymousEndpoint { - createOptions = append(createOptions, libnetwork.CreateOptionAnonymous()) - } - - if epConfig != nil { - ipam := epConfig.IPAMConfig - if ipam != nil && (ipam.IPv4Address != "" || ipam.IPv6Address != "" || len(ipam.LinkLocalIPs) > 0) { - var ipList []net.IP - for _, ips := range ipam.LinkLocalIPs { - if ip := net.ParseIP(ips); ip != nil { - ipList = append(ipList, ip) - } - } - createOptions = append(createOptions, - libnetwork.CreateOptionIpam(net.ParseIP(ipam.IPv4Address), net.ParseIP(ipam.IPv6Address), ipList, nil)) - } - - for _, alias := range epConfig.Aliases { - createOptions = append(createOptions, libnetwork.CreateOptionMyAlias(alias)) - } - } - - if container.NetworkSettings.Service != nil { - svcCfg := container.NetworkSettings.Service - - var vip string - if svcCfg.VirtualAddresses[n.ID()] != nil { - vip = svcCfg.VirtualAddresses[n.ID()].IPv4 - } - - var portConfigs []*libnetwork.PortConfig - for _, portConfig := range svcCfg.ExposedPorts { - portConfigs = append(portConfigs, &libnetwork.PortConfig{ - Name: portConfig.Name, - Protocol: libnetwork.PortConfig_Protocol(portConfig.Protocol), - TargetPort: portConfig.TargetPort, - PublishedPort: portConfig.PublishedPort, - }) - } - - createOptions = append(createOptions, libnetwork.CreateOptionService(svcCfg.Name, svcCfg.ID, net.ParseIP(vip), portConfigs, svcCfg.Aliases[n.ID()])) - } - - if !containertypes.NetworkMode(n.Name()).IsUserDefined() { - createOptions = append(createOptions, libnetwork.CreateOptionDisableResolution()) - } - - // configs that are applicable only for the endpoint in the network - // to which container was connected to on docker run. - // Ideally all these network-specific endpoint configurations must be moved under - // container.NetworkSettings.Networks[n.Name()] - if n.Name() == container.HostConfig.NetworkMode.NetworkName() || - (n.Name() == defaultNetName && container.HostConfig.NetworkMode.IsDefault()) { - if container.Config.MacAddress != "" { - mac, err := net.ParseMAC(container.Config.MacAddress) - if err != nil { - return nil, err - } - - genericOption := options.Generic{ - netlabel.MacAddress: mac, - } - - createOptions = append(createOptions, libnetwork.EndpointOptionGeneric(genericOption)) - } - } - - // Port-mapping rules belong to the container & applicable only to non-internal networks - portmaps := GetSandboxPortMapInfo(sb) - if n.Info().Internal() || len(portmaps) > 0 { - return createOptions, nil - } - - if container.HostConfig.PortBindings != nil { - for p, b := range container.HostConfig.PortBindings { - bindings[p] = []nat.PortBinding{} - for _, bb := range b { - bindings[p] = append(bindings[p], nat.PortBinding{ - HostIP: bb.HostIP, - HostPort: bb.HostPort, - }) - } - } - } - - portSpecs := container.Config.ExposedPorts - ports := make([]nat.Port, len(portSpecs)) - var i int - for p := range portSpecs { - ports[i] = p - i++ - } - nat.SortPortMap(ports, bindings) - for _, port := range ports { - expose := types.TransportPort{} - expose.Proto = types.ParseProtocol(port.Proto()) - expose.Port = uint16(port.Int()) - exposeList = append(exposeList, expose) - - pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} - binding := bindings[port] - for i := 0; i < len(binding); i++ { - pbCopy := pb.GetCopy() - newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) - var portStart, portEnd int - if err == nil { - portStart, portEnd, err = newP.Range() - } - if err != nil { - return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) - } - pbCopy.HostPort = uint16(portStart) - pbCopy.HostPortEnd = uint16(portEnd) - pbCopy.HostIP = net.ParseIP(binding[i].HostIP) - pbList = append(pbList, pbCopy) - } - - if container.HostConfig.PublishAllPorts && len(binding) == 0 { - pbList = append(pbList, pb) - } - } - - createOptions = append(createOptions, - libnetwork.CreateOptionPortMapping(pbList), - libnetwork.CreateOptionExposedPorts(exposeList)) - - return createOptions, nil -} - -// UpdateMonitor updates monitor configure for running container -func (container *Container) UpdateMonitor(restartPolicy containertypes.RestartPolicy) { - type policySetter interface { - SetPolicy(containertypes.RestartPolicy) - } - - if rm, ok := container.RestartManager(false).(policySetter); ok { - rm.SetPolicy(restartPolicy) - } -} - -// FullHostname returns hostname and optional domain appended to it. -func (container *Container) FullHostname() string { - fullHostname := container.Config.Hostname - if container.Config.Domainname != "" { - fullHostname = fmt.Sprintf("%s.%s", fullHostname, container.Config.Domainname) - } - return fullHostname -} - -// RestartManager returns the current restartmanager instance connected to container. -func (container *Container) RestartManager(reset bool) restartmanager.RestartManager { - if reset { - container.RestartCount = 0 - container.restartManager = nil - } - if container.restartManager == nil { - container.restartManager = restartmanager.New(container.HostConfig.RestartPolicy, container.RestartCount) - } - - return container.restartManager -} - -type attachContext struct { - ctx context.Context - cancel context.CancelFunc - mu sync.Mutex -} - -// InitAttachContext initialize or returns existing context for attach calls to -// track container liveness. -func (container *Container) InitAttachContext() context.Context { - container.attachContext.mu.Lock() - defer container.attachContext.mu.Unlock() - if container.attachContext.ctx == nil { - container.attachContext.ctx, container.attachContext.cancel = context.WithCancel(context.Background()) - } - return container.attachContext.ctx -} - -// CancelAttachContext cancel attach context. All attach calls should detach -// after this call. -func (container *Container) CancelAttachContext() { - container.attachContext.mu.Lock() - if container.attachContext.ctx != nil { - container.attachContext.cancel() - container.attachContext.ctx = nil - } - container.attachContext.mu.Unlock() -} diff --git a/container/container_solaris.go b/container/container_solaris.go deleted file mode 100644 index ca02d8ea89..0000000000 --- a/container/container_solaris.go +++ /dev/null @@ -1,95 +0,0 @@ -// +build solaris - -package container - -import ( - "os" - "path/filepath" - - "github.com/docker/docker/volume" - "github.com/docker/engine-api/types/container" -) - -// Container holds fields specific to the Solaris implementation. See -// CommonContainer for standard fields common to all containers. -type Container struct { - CommonContainer - - // fields below here are platform specific. - HostnamePath string - HostsPath string - ResolvConfPath string -} - -// ExitStatus provides exit reasons for a container. -type ExitStatus struct { - // The exit code with which the container exited. - ExitCode int -} - -// CreateDaemonEnvironment creates a new environment variable slice for this container. -func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string { - return nil -} - -func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) { - return volumeMounts, nil -} - -// TrySetNetworkMount attempts to set the network mounts given a provided destination and -// the path to use for it; return true if the given destination was a network mount file -func (container *Container) TrySetNetworkMount(destination string, path string) bool { - return true -} - -// NetworkMounts returns the list of network mounts. -func (container *Container) NetworkMounts() []Mount { - var mount []Mount - return mount -} - -// CopyImagePathContent copies files in destination to the volume. -func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { - return nil -} - -// UnmountIpcMounts unmount Ipc related mounts. -func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { -} - -// IpcMounts returns the list of Ipc related mounts. -func (container *Container) IpcMounts() []Mount { - return nil -} - -// UpdateContainer updates configuration of a container -func (container *Container) UpdateContainer(hostConfig *container.HostConfig) error { - return nil -} - -// UnmountVolumes explicitly unmounts volumes from the container. -func (container *Container) UnmountVolumes(forceSyscall bool, volumeEventLog func(name, action string, attributes map[string]string)) error { - return nil -} - -// TmpfsMounts returns the list of tmpfs mounts -func (container *Container) TmpfsMounts() []Mount { - var mounts []Mount - return mounts -} - -// cleanResourcePath cleans a resource path and prepares to combine with mnt path -func cleanResourcePath(path string) string { - return filepath.Join(string(os.PathSeparator), path) -} - -// BuildHostnameFile writes the container's hostname file. -func (container *Container) BuildHostnameFile() error { - return nil -} - -// canMountFS determines if the file system for the container -// can be mounted locally. A no-op on non-Windows platforms -func (container *Container) canMountFS() bool { - return true -} diff --git a/container/container_unit_test.go b/container/container_unit_test.go deleted file mode 100644 index 67b829f9f9..0000000000 --- a/container/container_unit_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package container - -import ( - "testing" - - "github.com/docker/docker/pkg/signal" - "github.com/docker/engine-api/types/container" -) - -func TestContainerStopSignal(t *testing.T) { - c := &Container{ - CommonContainer: CommonContainer{ - Config: &container.Config{}, - }, - } - - def, err := signal.ParseSignal(signal.DefaultStopSignal) - if err != nil { - t.Fatal(err) - } - - s := c.StopSignal() - if s != int(def) { - t.Fatalf("Expected %v, got %v", def, s) - } - - c = &Container{ - CommonContainer: CommonContainer{ - Config: &container.Config{StopSignal: "SIGKILL"}, - }, - } - s = c.StopSignal() - if s != 9 { - t.Fatalf("Expected 9, got %v", s) - } -} diff --git a/container/container_unix.go b/container/container_unix.go deleted file mode 100644 index 2727b818f5..0000000000 --- a/container/container_unix.go +++ /dev/null @@ -1,419 +0,0 @@ -// +build linux freebsd - -package container - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/utils" - "github.com/docker/docker/volume" - containertypes "github.com/docker/engine-api/types/container" - "github.com/opencontainers/runc/libcontainer/label" -) - -// DefaultSHMSize is the default size (64MB) of the SHM which will be mounted in the container -const DefaultSHMSize int64 = 67108864 - -// Container holds the fields specific to unixen implementations. -// See CommonContainer for standard fields common to all containers. -type Container struct { - CommonContainer - - // Fields below here are platform specific. - AppArmorProfile string - HostnamePath string - HostsPath string - ShmPath string - ResolvConfPath string - SeccompProfile string - NoNewPrivileges bool -} - -// ExitStatus provides exit reasons for a container. -type ExitStatus struct { - // The exit code with which the container exited. - ExitCode int - - // Whether the container encountered an OOM. - OOMKilled bool -} - -// CreateDaemonEnvironment returns the list of all environment variables given the list of -// environment variables related to links. -// Sets PATH, HOSTNAME and if container.Config.Tty is set: TERM. -// The defaults set here do not override the values in container.Config.Env -func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string { - // Setup environment - env := []string{ - "PATH=" + system.DefaultPathEnv, - "HOSTNAME=" + container.Config.Hostname, - } - if container.Config.Tty { - env = append(env, "TERM=xterm") - } - env = append(env, linkedEnv...) - // because the env on the container can override certain default values - // we need to replace the 'env' keys where they match and append anything - // else. - env = utils.ReplaceOrAppendEnvValues(env, container.Config.Env) - return env -} - -// TrySetNetworkMount attempts to set the network mounts given a provided destination and -// the path to use for it; return true if the given destination was a network mount file -func (container *Container) TrySetNetworkMount(destination string, path string) bool { - if destination == "/etc/resolv.conf" { - container.ResolvConfPath = path - return true - } - if destination == "/etc/hostname" { - container.HostnamePath = path - return true - } - if destination == "/etc/hosts" { - container.HostsPath = path - return true - } - - return false -} - -// BuildHostnameFile writes the container's hostname file. -func (container *Container) BuildHostnameFile() error { - hostnamePath, err := container.GetRootResourcePath("hostname") - if err != nil { - return err - } - container.HostnamePath = hostnamePath - return ioutil.WriteFile(container.HostnamePath, []byte(container.Config.Hostname+"\n"), 0644) -} - -// appendNetworkMounts appends any network mounts to the array of mount points passed in -func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) { - for _, mnt := range container.NetworkMounts() { - dest, err := container.GetResourcePath(mnt.Destination) - if err != nil { - return nil, err - } - volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest}) - } - return volumeMounts, nil -} - -// NetworkMounts returns the list of network mounts. -func (container *Container) NetworkMounts() []Mount { - var mounts []Mount - shared := container.HostConfig.NetworkMode.IsContainer() - if container.ResolvConfPath != "" { - if _, err := os.Stat(container.ResolvConfPath); err != nil { - logrus.Warnf("ResolvConfPath set to %q, but can't stat this filename (err = %v); skipping", container.ResolvConfPath, err) - } else { - if !container.HasMountFor("/etc/resolv.conf") { - label.Relabel(container.ResolvConfPath, container.MountLabel, shared) - } - writable := !container.HostConfig.ReadonlyRootfs - if m, exists := container.MountPoints["/etc/resolv.conf"]; exists { - writable = m.RW - } - mounts = append(mounts, Mount{ - Source: container.ResolvConfPath, - Destination: "/etc/resolv.conf", - Writable: writable, - Propagation: volume.DefaultPropagationMode, - }) - } - } - if container.HostnamePath != "" { - if _, err := os.Stat(container.HostnamePath); err != nil { - logrus.Warnf("HostnamePath set to %q, but can't stat this filename (err = %v); skipping", container.HostnamePath, err) - } else { - if !container.HasMountFor("/etc/hostname") { - label.Relabel(container.HostnamePath, container.MountLabel, shared) - } - writable := !container.HostConfig.ReadonlyRootfs - if m, exists := container.MountPoints["/etc/hostname"]; exists { - writable = m.RW - } - mounts = append(mounts, Mount{ - Source: container.HostnamePath, - Destination: "/etc/hostname", - Writable: writable, - Propagation: volume.DefaultPropagationMode, - }) - } - } - if container.HostsPath != "" { - if _, err := os.Stat(container.HostsPath); err != nil { - logrus.Warnf("HostsPath set to %q, but can't stat this filename (err = %v); skipping", container.HostsPath, err) - } else { - if !container.HasMountFor("/etc/hosts") { - label.Relabel(container.HostsPath, container.MountLabel, shared) - } - writable := !container.HostConfig.ReadonlyRootfs - if m, exists := container.MountPoints["/etc/hosts"]; exists { - writable = m.RW - } - mounts = append(mounts, Mount{ - Source: container.HostsPath, - Destination: "/etc/hosts", - Writable: writable, - Propagation: volume.DefaultPropagationMode, - }) - } - } - return mounts -} - -// CopyImagePathContent copies files in destination to the volume. -func (container *Container) CopyImagePathContent(v volume.Volume, destination string) error { - rootfs, err := symlink.FollowSymlinkInScope(filepath.Join(container.BaseFS, destination), container.BaseFS) - if err != nil { - return err - } - - if _, err = ioutil.ReadDir(rootfs); err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - id := stringid.GenerateNonCryptoID() - path, err := v.Mount(id) - if err != nil { - return err - } - - defer func() { - if err := v.Unmount(id); err != nil { - logrus.Warnf("error while unmounting volume %s: %v", v.Name(), err) - } - }() - return copyExistingContents(rootfs, path) -} - -// ShmResourcePath returns path to shm -func (container *Container) ShmResourcePath() (string, error) { - return container.GetRootResourcePath("shm") -} - -// HasMountFor checks if path is a mountpoint -func (container *Container) HasMountFor(path string) bool { - _, exists := container.MountPoints[path] - return exists -} - -// UnmountIpcMounts uses the provided unmount function to unmount shm and mqueue if they were mounted -func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { - if container.HostConfig.IpcMode.IsContainer() || container.HostConfig.IpcMode.IsHost() { - return - } - - var warnings []string - - if !container.HasMountFor("/dev/shm") { - shmPath, err := container.ShmResourcePath() - if err != nil { - logrus.Error(err) - warnings = append(warnings, err.Error()) - } else if shmPath != "" { - if err := unmount(shmPath); err != nil && !os.IsNotExist(err) { - warnings = append(warnings, fmt.Sprintf("failed to umount %s: %v", shmPath, err)) - } - - } - } - - if len(warnings) > 0 { - logrus.Warnf("failed to cleanup ipc mounts:\n%v", strings.Join(warnings, "\n")) - } -} - -// IpcMounts returns the list of IPC mounts -func (container *Container) IpcMounts() []Mount { - var mounts []Mount - - if !container.HasMountFor("/dev/shm") { - label.SetFileLabel(container.ShmPath, container.MountLabel) - mounts = append(mounts, Mount{ - Source: container.ShmPath, - Destination: "/dev/shm", - Writable: true, - Propagation: volume.DefaultPropagationMode, - }) - } - - return mounts -} - -// UpdateContainer updates configuration of a container. -func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { - container.Lock() - defer container.Unlock() - - // update resources of container - resources := hostConfig.Resources - cResources := &container.HostConfig.Resources - if resources.BlkioWeight != 0 { - cResources.BlkioWeight = resources.BlkioWeight - } - if resources.CPUShares != 0 { - cResources.CPUShares = resources.CPUShares - } - if resources.CPUPeriod != 0 { - cResources.CPUPeriod = resources.CPUPeriod - } - if resources.CPUQuota != 0 { - cResources.CPUQuota = resources.CPUQuota - } - if resources.CpusetCpus != "" { - cResources.CpusetCpus = resources.CpusetCpus - } - if resources.CpusetMems != "" { - cResources.CpusetMems = resources.CpusetMems - } - if resources.Memory != 0 { - cResources.Memory = resources.Memory - } - if resources.MemorySwap != 0 { - cResources.MemorySwap = resources.MemorySwap - } - if resources.MemoryReservation != 0 { - cResources.MemoryReservation = resources.MemoryReservation - } - if resources.KernelMemory != 0 { - cResources.KernelMemory = resources.KernelMemory - } - - // update HostConfig of container - if hostConfig.RestartPolicy.Name != "" { - container.HostConfig.RestartPolicy = hostConfig.RestartPolicy - } - - if err := container.ToDisk(); err != nil { - logrus.Errorf("Error saving updated container: %v", err) - return err - } - - return nil -} - -func detachMounted(path string) error { - return syscall.Unmount(path, syscall.MNT_DETACH) -} - -// UnmountVolumes unmounts all volumes -func (container *Container) UnmountVolumes(forceSyscall bool, volumeEventLog func(name, action string, attributes map[string]string)) error { - var ( - volumeMounts []volume.MountPoint - err error - ) - - for _, mntPoint := range container.MountPoints { - dest, err := container.GetResourcePath(mntPoint.Destination) - if err != nil { - return err - } - - volumeMounts = append(volumeMounts, volume.MountPoint{Destination: dest, Volume: mntPoint.Volume, ID: mntPoint.ID}) - } - - // Append any network mounts to the list (this is a no-op on Windows) - if volumeMounts, err = appendNetworkMounts(container, volumeMounts); err != nil { - return err - } - - for _, volumeMount := range volumeMounts { - if forceSyscall { - if err := detachMounted(volumeMount.Destination); err != nil { - logrus.Warnf("%s unmountVolumes: Failed to do lazy umount %v", container.ID, err) - } - } - - if volumeMount.Volume != nil { - if err := volumeMount.Volume.Unmount(volumeMount.ID); err != nil { - return err - } - volumeMount.ID = "" - - attributes := map[string]string{ - "driver": volumeMount.Volume.DriverName(), - "container": container.ID, - } - volumeEventLog(volumeMount.Volume.Name(), "unmount", attributes) - } - } - - return nil -} - -// copyExistingContents copies from the source to the destination and -// ensures the ownership is appropriately set. -func copyExistingContents(source, destination string) error { - volList, err := ioutil.ReadDir(source) - if err != nil { - return err - } - if len(volList) > 0 { - srcList, err := ioutil.ReadDir(destination) - if err != nil { - return err - } - if len(srcList) == 0 { - // If the source volume is empty, copies files from the root into the volume - if err := chrootarchive.CopyWithTar(source, destination); err != nil { - return err - } - } - } - return copyOwnership(source, destination) -} - -// copyOwnership copies the permissions and uid:gid of the source file -// to the destination file -func copyOwnership(source, destination string) error { - stat, err := system.Stat(source) - if err != nil { - return err - } - - if err := os.Chown(destination, int(stat.UID()), int(stat.GID())); err != nil { - return err - } - - return os.Chmod(destination, os.FileMode(stat.Mode())) -} - -// TmpfsMounts returns the list of tmpfs mounts -func (container *Container) TmpfsMounts() []Mount { - var mounts []Mount - for dest, data := range container.HostConfig.Tmpfs { - mounts = append(mounts, Mount{ - Source: "tmpfs", - Destination: dest, - Data: data, - }) - } - return mounts -} - -// cleanResourcePath cleans a resource path and prepares to combine with mnt path -func cleanResourcePath(path string) string { - return filepath.Join(string(os.PathSeparator), path) -} - -// canMountFS determines if the file system for the container -// can be mounted locally. A no-op on non-Windows platforms -func (container *Container) canMountFS() bool { - return true -} diff --git a/container/container_windows.go b/container/container_windows.go deleted file mode 100644 index 38560bb59a..0000000000 --- a/container/container_windows.go +++ /dev/null @@ -1,109 +0,0 @@ -// +build windows - -package container - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/docker/docker/utils" - "github.com/docker/docker/volume" - containertypes "github.com/docker/engine-api/types/container" -) - -// Container holds fields specific to the Windows implementation. See -// CommonContainer for standard fields common to all containers. -type Container struct { - CommonContainer - - HostnamePath string - HostsPath string - ResolvConfPath string - // Fields below here are platform specific. -} - -// ExitStatus provides exit reasons for a container. -type ExitStatus struct { - // The exit code with which the container exited. - ExitCode int -} - -// CreateDaemonEnvironment creates a new environment variable slice for this container. -func (container *Container) CreateDaemonEnvironment(linkedEnv []string) []string { - // because the env on the container can override certain default values - // we need to replace the 'env' keys where they match and append anything - // else. - return utils.ReplaceOrAppendEnvValues(linkedEnv, container.Config.Env) -} - -// UnmountIpcMounts unmount Ipc related mounts. -// This is a NOOP on windows. -func (container *Container) UnmountIpcMounts(unmount func(pth string) error) { -} - -// IpcMounts returns the list of Ipc related mounts. -func (container *Container) IpcMounts() []Mount { - return nil -} - -// UnmountVolumes explicitly unmounts volumes from the container. -func (container *Container) UnmountVolumes(forceSyscall bool, volumeEventLog func(name, action string, attributes map[string]string)) error { - return nil -} - -// TmpfsMounts returns the list of tmpfs mounts -func (container *Container) TmpfsMounts() []Mount { - var mounts []Mount - return mounts -} - -// UpdateContainer updates configuration of a container -func (container *Container) UpdateContainer(hostConfig *containertypes.HostConfig) error { - container.Lock() - defer container.Unlock() - resources := hostConfig.Resources - if resources.BlkioWeight != 0 || resources.CPUShares != 0 || - resources.CPUPeriod != 0 || resources.CPUQuota != 0 || - resources.CpusetCpus != "" || resources.CpusetMems != "" || - resources.Memory != 0 || resources.MemorySwap != 0 || - resources.MemoryReservation != 0 || resources.KernelMemory != 0 { - return fmt.Errorf("Resource updating isn't supported on Windows") - } - // update HostConfig of container - if hostConfig.RestartPolicy.Name != "" { - container.HostConfig.RestartPolicy = hostConfig.RestartPolicy - } - return nil -} - -// appendNetworkMounts appends any network mounts to the array of mount points passed in. -// Windows does not support network mounts (not to be confused with SMB network mounts), so -// this is a no-op. -func appendNetworkMounts(container *Container, volumeMounts []volume.MountPoint) ([]volume.MountPoint, error) { - return volumeMounts, nil -} - -// cleanResourcePath cleans a resource path by removing C:\ syntax, and prepares -// to combine with a volume path -func cleanResourcePath(path string) string { - if len(path) >= 2 { - c := path[0] - if path[1] == ':' && ('a' <= c && c <= 'z' || 'A' <= c && c <= 'Z') { - path = path[2:] - } - } - return filepath.Join(string(os.PathSeparator), path) -} - -// BuildHostnameFile writes the container's hostname file. -func (container *Container) BuildHostnameFile() error { - return nil -} - -// canMountFS determines if the file system for the container -// can be mounted locally. In the case of Windows, this is not possible -// for Hyper-V containers during WORKDIR execution for example. -func (container *Container) canMountFS() bool { - return !containertypes.Isolation.IsHyperV(container.HostConfig.Isolation) -} diff --git a/container/health.go b/container/health.go deleted file mode 100644 index 7a459d8182..0000000000 --- a/container/health.go +++ /dev/null @@ -1,52 +0,0 @@ -package container - -import ( - "github.com/Sirupsen/logrus" - "github.com/docker/engine-api/types" -) - -// Health holds the current container health-check state -type Health struct { - types.Health - stop chan struct{} // Write struct{} to stop the monitor -} - -// String returns a human-readable description of the health-check state -func (s *Health) String() string { - // This happens when the container is being shutdown and the monitor has stopped - // or the monitor has yet to be setup. - if s.stop == nil { - return types.Unhealthy - } - - switch s.Status { - case types.Starting: - return "health: starting" - default: // Healthy and Unhealthy are clear on their own - return s.Status - } -} - -// OpenMonitorChannel creates and returns a new monitor channel. If there already is one, -// it returns nil. -func (s *Health) OpenMonitorChannel() chan struct{} { - if s.stop == nil { - logrus.Debug("OpenMonitorChannel") - s.stop = make(chan struct{}) - return s.stop - } - return nil -} - -// CloseMonitorChannel closes any existing monitor channel. -func (s *Health) CloseMonitorChannel() { - if s.stop != nil { - logrus.Debug("CloseMonitorChannel: waiting for probe to stop") - // This channel does not buffer. Once the write succeeds, the monitor - // has read the stop request and will not make any further updates - // to c.State.Health. - s.stop <- struct{}{} - s.stop = nil - logrus.Debug("CloseMonitorChannel done") - } -} diff --git a/container/history.go b/container/history.go deleted file mode 100644 index c80c2aa0cc..0000000000 --- a/container/history.go +++ /dev/null @@ -1,30 +0,0 @@ -package container - -import "sort" - -// History is a convenience type for storing a list of containers, -// sorted by creation date in descendant order. -type History []*Container - -// Len returns the number of containers in the history. -func (history *History) Len() int { - return len(*history) -} - -// Less compares two containers and returns true if the second one -// was created before the first one. -func (history *History) Less(i, j int) bool { - containers := *history - return containers[j].Created.Before(containers[i].Created) -} - -// Swap switches containers i and j positions in the history. -func (history *History) Swap(i, j int) { - containers := *history - containers[i], containers[j] = containers[j], containers[i] -} - -// sort orders the history by creation date in descendant order. -func (history *History) sort() { - sort.Sort(history) -} diff --git a/container/memory_store.go b/container/memory_store.go deleted file mode 100644 index 9fa1165d9a..0000000000 --- a/container/memory_store.go +++ /dev/null @@ -1,92 +0,0 @@ -package container - -import "sync" - -// memoryStore implements a Store in memory. -type memoryStore struct { - s map[string]*Container - sync.RWMutex -} - -// NewMemoryStore initializes a new memory store. -func NewMemoryStore() Store { - return &memoryStore{ - s: make(map[string]*Container), - } -} - -// Add appends a new container to the memory store. -// It overrides the id if it existed before. -func (c *memoryStore) Add(id string, cont *Container) { - c.Lock() - c.s[id] = cont - c.Unlock() -} - -// Get returns a container from the store by id. -func (c *memoryStore) Get(id string) *Container { - c.RLock() - res := c.s[id] - c.RUnlock() - return res -} - -// Delete removes a container from the store by id. -func (c *memoryStore) Delete(id string) { - c.Lock() - delete(c.s, id) - c.Unlock() -} - -// List returns a sorted list of containers from the store. -// The containers are ordered by creation date. -func (c *memoryStore) List() []*Container { - containers := History(c.all()) - containers.sort() - return containers -} - -// Size returns the number of containers in the store. -func (c *memoryStore) Size() int { - c.RLock() - defer c.RUnlock() - return len(c.s) -} - -// First returns the first container found in the store by a given filter. -func (c *memoryStore) First(filter StoreFilter) *Container { - for _, cont := range c.all() { - if filter(cont) { - return cont - } - } - return nil -} - -// ApplyAll calls the reducer function with every container in the store. -// This operation is asyncronous in the memory store. -// NOTE: Modifications to the store MUST NOT be done by the StoreReducer. -func (c *memoryStore) ApplyAll(apply StoreReducer) { - wg := new(sync.WaitGroup) - for _, cont := range c.all() { - wg.Add(1) - go func(container *Container) { - apply(container) - wg.Done() - }(cont) - } - - wg.Wait() -} - -func (c *memoryStore) all() []*Container { - c.RLock() - containers := make([]*Container, 0, len(c.s)) - for _, cont := range c.s { - containers = append(containers, cont) - } - c.RUnlock() - return containers -} - -var _ Store = &memoryStore{} diff --git a/container/memory_store_test.go b/container/memory_store_test.go deleted file mode 100644 index f81738fae1..0000000000 --- a/container/memory_store_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package container - -import ( - "testing" - "time" -) - -func TestNewMemoryStore(t *testing.T) { - s := NewMemoryStore() - m, ok := s.(*memoryStore) - if !ok { - t.Fatalf("store is not a memory store %v", s) - } - if m.s == nil { - t.Fatal("expected store map to not be nil") - } -} - -func TestAddContainers(t *testing.T) { - s := NewMemoryStore() - s.Add("id", NewBaseContainer("id", "root")) - if s.Size() != 1 { - t.Fatalf("expected store size 1, got %v", s.Size()) - } -} - -func TestGetContainer(t *testing.T) { - s := NewMemoryStore() - s.Add("id", NewBaseContainer("id", "root")) - c := s.Get("id") - if c == nil { - t.Fatal("expected container to not be nil") - } -} - -func TestDeleteContainer(t *testing.T) { - s := NewMemoryStore() - s.Add("id", NewBaseContainer("id", "root")) - s.Delete("id") - if c := s.Get("id"); c != nil { - t.Fatalf("expected container to be nil after removal, got %v", c) - } - - if s.Size() != 0 { - t.Fatalf("expected store size to be 0, got %v", s.Size()) - } -} - -func TestListContainers(t *testing.T) { - s := NewMemoryStore() - - cont := NewBaseContainer("id", "root") - cont.Created = time.Now() - cont2 := NewBaseContainer("id2", "root") - cont2.Created = time.Now().Add(24 * time.Hour) - - s.Add("id", cont) - s.Add("id2", cont2) - - list := s.List() - if len(list) != 2 { - t.Fatalf("expected list size 2, got %v", len(list)) - } - if list[0].ID != "id2" { - t.Fatalf("expected older container to be first, got %v", list[0].ID) - } -} - -func TestFirstContainer(t *testing.T) { - s := NewMemoryStore() - - s.Add("id", NewBaseContainer("id", "root")) - s.Add("id2", NewBaseContainer("id2", "root")) - - first := s.First(func(cont *Container) bool { - return cont.ID == "id2" - }) - - if first == nil { - t.Fatal("expected container to not be nil") - } - if first.ID != "id2" { - t.Fatalf("expected id2, got %v", first) - } -} - -func TestApplyAllContainer(t *testing.T) { - s := NewMemoryStore() - - s.Add("id", NewBaseContainer("id", "root")) - s.Add("id2", NewBaseContainer("id2", "root")) - - s.ApplyAll(func(cont *Container) { - if cont.ID == "id2" { - cont.ID = "newID" - } - }) - - cont := s.Get("id2") - if cont == nil { - t.Fatal("expected container to not be nil") - } - if cont.ID != "newID" { - t.Fatalf("expected newID, got %v", cont) - } -} diff --git a/container/monitor.go b/container/monitor.go deleted file mode 100644 index 6a7ece654d..0000000000 --- a/container/monitor.go +++ /dev/null @@ -1,46 +0,0 @@ -package container - -import ( - "time" - - "github.com/Sirupsen/logrus" -) - -const ( - loggerCloseTimeout = 10 * time.Second -) - -// Reset puts a container into a state where it can be restarted again. -func (container *Container) Reset(lock bool) { - if lock { - container.Lock() - defer container.Unlock() - } - - if err := container.CloseStreams(); err != nil { - logrus.Errorf("%s: %s", container.ID, err) - } - - // Re-create a brand new stdin pipe once the container exited - if container.Config.OpenStdin { - container.NewInputPipes() - } - - if container.LogDriver != nil { - if container.LogCopier != nil { - exit := make(chan struct{}) - go func() { - container.LogCopier.Wait() - close(exit) - }() - select { - case <-time.After(loggerCloseTimeout): - logrus.Warn("Logger didn't exit in time: logs may be truncated") - case <-exit: - } - } - container.LogDriver.Close() - container.LogCopier = nil - container.LogDriver = nil - } -} diff --git a/container/mounts_unix.go b/container/mounts_unix.go deleted file mode 100644 index c52abed2dc..0000000000 --- a/container/mounts_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows - -package container - -// Mount contains information for a mount operation. -type Mount struct { - Source string `json:"source"` - Destination string `json:"destination"` - Writable bool `json:"writable"` - Data string `json:"data"` - Propagation string `json:"mountpropagation"` -} diff --git a/container/mounts_windows.go b/container/mounts_windows.go deleted file mode 100644 index 01b327f788..0000000000 --- a/container/mounts_windows.go +++ /dev/null @@ -1,8 +0,0 @@ -package container - -// Mount contains information for a mount operation. -type Mount struct { - Source string `json:"source"` - Destination string `json:"destination"` - Writable bool `json:"writable"` -} diff --git a/container/state.go b/container/state.go deleted file mode 100644 index 081d91f41e..0000000000 --- a/container/state.go +++ /dev/null @@ -1,310 +0,0 @@ -package container - -import ( - "fmt" - "sync" - "time" - - "golang.org/x/net/context" - - "github.com/docker/go-units" -) - -// State holds the current container state, and has methods to get and -// set the state. Container has an embed, which allows all of the -// functions defined against State to run against Container. -type State struct { - sync.Mutex - // FIXME: Why do we have both paused and running if a - // container cannot be paused and running at the same time? - Running bool - Paused bool - Restarting bool - OOMKilled bool - RemovalInProgress bool // Not need for this to be persistent on disk. - Dead bool - Pid int - exitCode int - error string // contains last known error when starting the container - StartedAt time.Time - FinishedAt time.Time - waitChan chan struct{} - Health *Health -} - -// NewState creates a default state object with a fresh channel for state changes. -func NewState() *State { - return &State{ - waitChan: make(chan struct{}), - } -} - -// String returns a human-readable description of the state -func (s *State) String() string { - if s.Running { - if s.Paused { - return fmt.Sprintf("Up %s (Paused)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) - } - if s.Restarting { - return fmt.Sprintf("Restarting (%d) %s ago", s.exitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) - } - - if h := s.Health; h != nil { - return fmt.Sprintf("Up %s (%s)", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt)), h.String()) - } - return fmt.Sprintf("Up %s", units.HumanDuration(time.Now().UTC().Sub(s.StartedAt))) - } - - if s.RemovalInProgress { - return "Removal In Progress" - } - - if s.Dead { - return "Dead" - } - - if s.StartedAt.IsZero() { - return "Created" - } - - if s.FinishedAt.IsZero() { - return "" - } - - return fmt.Sprintf("Exited (%d) %s ago", s.exitCode, units.HumanDuration(time.Now().UTC().Sub(s.FinishedAt))) -} - -// StateString returns a single string to describe state -func (s *State) StateString() string { - if s.Running { - if s.Paused { - return "paused" - } - if s.Restarting { - return "restarting" - } - return "running" - } - - if s.Dead { - return "dead" - } - - if s.StartedAt.IsZero() { - return "created" - } - - return "exited" -} - -// IsValidStateString checks if the provided string is a valid container state or not. -func IsValidStateString(s string) bool { - if s != "paused" && - s != "restarting" && - s != "running" && - s != "dead" && - s != "created" && - s != "exited" { - return false - } - return true -} - -func wait(waitChan <-chan struct{}, timeout time.Duration) error { - if timeout < 0 { - <-waitChan - return nil - } - select { - case <-time.After(timeout): - return fmt.Errorf("Timed out: %v", timeout) - case <-waitChan: - return nil - } -} - -// WaitStop waits until state is stopped. If state already stopped it returns -// immediately. If you want wait forever you must supply negative timeout. -// Returns exit code, that was passed to SetStoppedLocking -func (s *State) WaitStop(timeout time.Duration) (int, error) { - s.Lock() - if !s.Running { - exitCode := s.exitCode - s.Unlock() - return exitCode, nil - } - waitChan := s.waitChan - s.Unlock() - if err := wait(waitChan, timeout); err != nil { - return -1, err - } - s.Lock() - defer s.Unlock() - return s.ExitCode(), nil -} - -// WaitWithContext waits for the container to stop. Optional context can be -// passed for canceling the request. -func (s *State) WaitWithContext(ctx context.Context) error { - // todo(tonistiigi): make other wait functions use this - s.Lock() - if !s.Running { - state := *s - defer s.Unlock() - if state.exitCode == 0 { - return nil - } - return &state - } - waitChan := s.waitChan - s.Unlock() - select { - case <-waitChan: - s.Lock() - state := *s - s.Unlock() - if state.exitCode == 0 { - return nil - } - return &state - case <-ctx.Done(): - return ctx.Err() - } -} - -// IsRunning returns whether the running flag is set. Used by Container to check whether a container is running. -func (s *State) IsRunning() bool { - s.Lock() - res := s.Running - s.Unlock() - return res -} - -// GetPID holds the process id of a container. -func (s *State) GetPID() int { - s.Lock() - res := s.Pid - s.Unlock() - return res -} - -// ExitCode returns current exitcode for the state. Take lock before if state -// may be shared. -func (s *State) ExitCode() int { - res := s.exitCode - return res -} - -// SetExitCode sets current exitcode for the state. Take lock before if state -// may be shared. -func (s *State) SetExitCode(ec int) { - s.exitCode = ec -} - -// SetRunning sets the state of the container to "running". -func (s *State) SetRunning(pid int, initial bool) { - s.error = "" - s.Running = true - s.Paused = false - s.Restarting = false - s.exitCode = 0 - s.Pid = pid - if initial { - s.StartedAt = time.Now().UTC() - } -} - -// SetStoppedLocking locks the container state and sets it to "stopped". -func (s *State) SetStoppedLocking(exitStatus *ExitStatus) { - s.Lock() - s.SetStopped(exitStatus) - s.Unlock() -} - -// SetStopped sets the container state to "stopped" without locking. -func (s *State) SetStopped(exitStatus *ExitStatus) { - s.Running = false - s.Paused = false - s.Restarting = false - s.Pid = 0 - s.FinishedAt = time.Now().UTC() - s.setFromExitStatus(exitStatus) - close(s.waitChan) // fire waiters for stop - s.waitChan = make(chan struct{}) -} - -// SetRestartingLocking is when docker handles the auto restart of containers when they are -// in the middle of a stop and being restarted again -func (s *State) SetRestartingLocking(exitStatus *ExitStatus) { - s.Lock() - s.SetRestarting(exitStatus) - s.Unlock() -} - -// SetRestarting sets the container state to "restarting". -// It also sets the container PID to 0. -func (s *State) SetRestarting(exitStatus *ExitStatus) { - // we should consider the container running when it is restarting because of - // all the checks in docker around rm/stop/etc - s.Running = true - s.Restarting = true - s.Pid = 0 - s.FinishedAt = time.Now().UTC() - s.setFromExitStatus(exitStatus) - close(s.waitChan) // fire waiters for stop - s.waitChan = make(chan struct{}) -} - -// SetError sets the container's error state. This is useful when we want to -// know the error that occurred when container transits to another state -// when inspecting it -func (s *State) SetError(err error) { - s.error = err.Error() -} - -// IsPaused returns whether the container is paused or not. -func (s *State) IsPaused() bool { - s.Lock() - res := s.Paused - s.Unlock() - return res -} - -// IsRestarting returns whether the container is restarting or not. -func (s *State) IsRestarting() bool { - s.Lock() - res := s.Restarting - s.Unlock() - return res -} - -// SetRemovalInProgress sets the container state as being removed. -// It returns true if the container was already in that state. -func (s *State) SetRemovalInProgress() bool { - s.Lock() - defer s.Unlock() - if s.RemovalInProgress { - return true - } - s.RemovalInProgress = true - return false -} - -// ResetRemovalInProgress makes the RemovalInProgress state to false. -func (s *State) ResetRemovalInProgress() { - s.Lock() - s.RemovalInProgress = false - s.Unlock() -} - -// SetDead sets the container state to "dead" -func (s *State) SetDead() { - s.Lock() - s.Dead = true - s.Unlock() -} - -// Error returns current error for the state. -func (s *State) Error() string { - return s.error -} diff --git a/container/state_solaris.go b/container/state_solaris.go deleted file mode 100644 index 9aef1d518e..0000000000 --- a/container/state_solaris.go +++ /dev/null @@ -1,7 +0,0 @@ -package container - -// setFromExitStatus is a platform specific helper function to set the state -// based on the ExitStatus structure. -func (s *State) setFromExitStatus(exitStatus *ExitStatus) { - s.exitCode = exitStatus.ExitCode -} diff --git a/container/state_test.go b/container/state_test.go deleted file mode 100644 index 83ff1efcb6..0000000000 --- a/container/state_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package container - -import ( - "sync/atomic" - "testing" - "time" -) - -func TestStateRunStop(t *testing.T) { - s := NewState() - for i := 1; i < 3; i++ { // full lifecycle two times - s.Lock() - s.SetRunning(i+100, false) - s.Unlock() - - if !s.IsRunning() { - t.Fatal("State not running") - } - if s.Pid != i+100 { - t.Fatalf("Pid %v, expected %v", s.Pid, i+100) - } - if s.ExitCode() != 0 { - t.Fatalf("ExitCode %v, expected 0", s.ExitCode()) - } - - stopped := make(chan struct{}) - var exit int64 - go func() { - exitCode, _ := s.WaitStop(-1 * time.Second) - atomic.StoreInt64(&exit, int64(exitCode)) - close(stopped) - }() - s.SetStoppedLocking(&ExitStatus{ExitCode: i}) - if s.IsRunning() { - t.Fatal("State is running") - } - if s.ExitCode() != i { - t.Fatalf("ExitCode %v, expected %v", s.ExitCode(), i) - } - if s.Pid != 0 { - t.Fatalf("Pid %v, expected 0", s.Pid) - } - select { - case <-time.After(100 * time.Millisecond): - t.Fatal("Stop callback doesn't fire in 100 milliseconds") - case <-stopped: - t.Log("Stop callback fired") - } - exitCode := int(atomic.LoadInt64(&exit)) - if exitCode != i { - t.Fatalf("ExitCode %v, expected %v", exitCode, i) - } - if exitCode, err := s.WaitStop(-1 * time.Second); err != nil || exitCode != i { - t.Fatalf("WaitStop returned exitCode: %v, err: %v, expected exitCode: %v, err: %v", exitCode, err, i, nil) - } - } -} - -func TestStateTimeoutWait(t *testing.T) { - s := NewState() - stopped := make(chan struct{}) - go func() { - s.WaitStop(100 * time.Millisecond) - close(stopped) - }() - select { - case <-time.After(200 * time.Millisecond): - t.Fatal("Stop callback doesn't fire in 100 milliseconds") - case <-stopped: - t.Log("Stop callback fired") - } - - s.SetStoppedLocking(&ExitStatus{ExitCode: 1}) - - stopped = make(chan struct{}) - go func() { - s.WaitStop(100 * time.Millisecond) - close(stopped) - }() - select { - case <-time.After(200 * time.Millisecond): - t.Fatal("Stop callback doesn't fire in 100 milliseconds") - case <-stopped: - t.Log("Stop callback fired") - } - -} diff --git a/container/state_unix.go b/container/state_unix.go deleted file mode 100644 index f09d015e0b..0000000000 --- a/container/state_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build linux freebsd - -package container - -// setFromExitStatus is a platform specific helper function to set the state -// based on the ExitStatus structure. -func (s *State) setFromExitStatus(exitStatus *ExitStatus) { - s.exitCode = exitStatus.ExitCode - s.OOMKilled = exitStatus.OOMKilled -} diff --git a/container/state_windows.go b/container/state_windows.go deleted file mode 100644 index 9aef1d518e..0000000000 --- a/container/state_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package container - -// setFromExitStatus is a platform specific helper function to set the state -// based on the ExitStatus structure. -func (s *State) setFromExitStatus(exitStatus *ExitStatus) { - s.exitCode = exitStatus.ExitCode -} diff --git a/container/store.go b/container/store.go deleted file mode 100644 index 042fb1a349..0000000000 --- a/container/store.go +++ /dev/null @@ -1,28 +0,0 @@ -package container - -// StoreFilter defines a function to filter -// container in the store. -type StoreFilter func(*Container) bool - -// StoreReducer defines a function to -// manipulate containers in the store -type StoreReducer func(*Container) - -// Store defines an interface that -// any container store must implement. -type Store interface { - // Add appends a new container to the store. - Add(string, *Container) - // Get returns a container from the store by the identifier it was stored with. - Get(string) *Container - // Delete removes a container from the store by the identifier it was stored with. - Delete(string) - // List returns a list of containers from the store. - List() []*Container - // Size returns the number of containers in the store. - Size() int - // First returns the first container found in the store by a given filter. - First(StoreFilter) *Container - // ApplyAll calls the reducer function with every container in the store. - ApplyAll(StoreReducer) -} diff --git a/contrib/README.md b/contrib/README.md deleted file mode 100644 index 92b1d94433..0000000000 --- a/contrib/README.md +++ /dev/null @@ -1,4 +0,0 @@ -The `contrib` directory contains scripts, images, and other helpful things -which are not part of the core docker distribution. Please note that they -could be out of date, since they do not receive the same attention as the -rest of the repository. diff --git a/contrib/REVIEWERS b/contrib/REVIEWERS deleted file mode 100644 index 18e05a3070..0000000000 --- a/contrib/REVIEWERS +++ /dev/null @@ -1 +0,0 @@ -Tianon Gravi (@tianon) diff --git a/contrib/apparmor/main.go b/contrib/apparmor/main.go deleted file mode 100644 index f4a2978b86..0000000000 --- a/contrib/apparmor/main.go +++ /dev/null @@ -1,56 +0,0 @@ -package main - -import ( - "fmt" - "log" - "os" - "path" - "text/template" - - "github.com/docker/docker/pkg/aaparser" -) - -type profileData struct { - Version int -} - -func main() { - if len(os.Args) < 2 { - log.Fatal("pass a filename to save the profile in.") - } - - // parse the arg - apparmorProfilePath := os.Args[1] - - version, err := aaparser.GetVersion() - if err != nil { - log.Fatal(err) - } - data := profileData{ - Version: version, - } - fmt.Printf("apparmor_parser is of version %+v\n", data) - - // parse the template - compiled, err := template.New("apparmor_profile").Parse(dockerProfileTemplate) - if err != nil { - log.Fatalf("parsing template failed: %v", err) - } - - // make sure /etc/apparmor.d exists - if err := os.MkdirAll(path.Dir(apparmorProfilePath), 0755); err != nil { - log.Fatal(err) - } - - f, err := os.OpenFile(apparmorProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - log.Fatal(err) - } - defer f.Close() - - if err := compiled.Execute(f, data); err != nil { - log.Fatalf("executing template failed: %v", err) - } - - fmt.Printf("created apparmor profile for version %+v at %q\n", data, apparmorProfilePath) -} diff --git a/contrib/apparmor/template.go b/contrib/apparmor/template.go deleted file mode 100644 index e5e1c8bed6..0000000000 --- a/contrib/apparmor/template.go +++ /dev/null @@ -1,268 +0,0 @@ -package main - -const dockerProfileTemplate = `@{DOCKER_GRAPH_PATH}=/var/lib/docker - -profile /usr/bin/docker (attach_disconnected, complain) { - # Prevent following links to these files during container setup. - deny /etc/** mkl, - deny /dev/** kl, - deny /sys/** mkl, - deny /proc/** mkl, - - mount -> @{DOCKER_GRAPH_PATH}/**, - mount -> /, - mount -> /proc/**, - mount -> /sys/**, - mount -> /run/docker/netns/**, - mount -> /.pivot_root[0-9]*/, - - / r, - - umount, - pivot_root, -{{if ge .Version 209000}} - signal (receive) peer=@{profile_name}, - signal (receive) peer=unconfined, - signal (send), -{{end}} - network, - capability, - owner /** rw, - @{DOCKER_GRAPH_PATH}/** rwl, - @{DOCKER_GRAPH_PATH}/linkgraph.db k, - @{DOCKER_GRAPH_PATH}/network/files/boltdb.db k, - @{DOCKER_GRAPH_PATH}/network/files/local-kv.db k, - @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/linkgraph.db k, - - # For non-root client use: - /dev/urandom r, - /dev/null rw, - /dev/pts/[0-9]* rw, - /run/docker.sock rw, - /proc/** r, - /proc/[0-9]*/attr/exec w, - /sys/kernel/mm/hugepages/ r, - /etc/localtime r, - /etc/ld.so.cache r, - /etc/passwd r, - -{{if ge .Version 209000}} - ptrace peer=@{profile_name}, - ptrace (read) peer=docker-default, - deny ptrace (trace) peer=docker-default, - deny ptrace peer=/usr/bin/docker///bin/ps, -{{end}} - - /usr/lib/** rm, - /lib/** rm, - - /usr/bin/docker pix, - /sbin/xtables-multi rCx, - /sbin/iptables rCx, - /sbin/modprobe rCx, - /sbin/auplink rCx, - /sbin/mke2fs rCx, - /sbin/tune2fs rCx, - /sbin/blkid rCx, - /bin/kmod rCx, - /usr/bin/xz rCx, - /bin/ps rCx, - /bin/tar rCx, - /bin/cat rCx, - /sbin/zfs rCx, - /sbin/apparmor_parser rCx, - -{{if ge .Version 209000}} - # Transitions - change_profile -> docker-*, - change_profile -> unconfined, -{{end}} - - profile /bin/cat (complain) { - /etc/ld.so.cache r, - /lib/** rm, - /dev/null rw, - /proc r, - /bin/cat mr, - - # For reading in 'docker stats': - /proc/[0-9]*/net/dev r, - } - profile /bin/ps (complain) { - /etc/ld.so.cache r, - /etc/localtime r, - /etc/passwd r, - /etc/nsswitch.conf r, - /lib/** rm, - /proc/[0-9]*/** r, - /dev/null rw, - /bin/ps mr, - -{{if ge .Version 209000}} - # We don't need ptrace so we'll deny and ignore the error. - deny ptrace (read, trace), -{{end}} - - # Quiet dac_override denials - deny capability dac_override, - deny capability dac_read_search, - deny capability sys_ptrace, - - /dev/tty r, - /proc/stat r, - /proc/cpuinfo r, - /proc/meminfo r, - /proc/uptime r, - /sys/devices/system/cpu/online r, - /proc/sys/kernel/pid_max r, - /proc/ r, - /proc/tty/drivers r, - } - profile /sbin/iptables (complain) { -{{if ge .Version 209000}} - signal (receive) peer=/usr/bin/docker, -{{end}} - capability net_admin, - } - profile /sbin/auplink flags=(attach_disconnected, complain) { -{{if ge .Version 209000}} - signal (receive) peer=/usr/bin/docker, -{{end}} - capability sys_admin, - capability dac_override, - - @{DOCKER_GRAPH_PATH}/aufs/** rw, - @{DOCKER_GRAPH_PATH}/tmp/** rw, - # For user namespaces: - @{DOCKER_GRAPH_PATH}/[0-9]*.[0-9]*/** rw, - - /sys/fs/aufs/** r, - /lib/** rm, - /apparmor/.null r, - /dev/null rw, - /etc/ld.so.cache r, - /sbin/auplink rm, - /proc/fs/aufs/** rw, - /proc/[0-9]*/mounts rw, - } - profile /sbin/modprobe /bin/kmod (complain) { -{{if ge .Version 209000}} - signal (receive) peer=/usr/bin/docker, -{{end}} - capability sys_module, - /etc/ld.so.cache r, - /lib/** rm, - /dev/null rw, - /apparmor/.null rw, - /sbin/modprobe rm, - /bin/kmod rm, - /proc/cmdline r, - /sys/module/** r, - /etc/modprobe.d{/,/**} r, - } - # xz works via pipes, so we do not need access to the filesystem. - profile /usr/bin/xz (complain) { -{{if ge .Version 209000}} - signal (receive) peer=/usr/bin/docker, -{{end}} - /etc/ld.so.cache r, - /lib/** rm, - /usr/bin/xz rm, - deny /proc/** rw, - deny /sys/** rw, - } - profile /sbin/xtables-multi (attach_disconnected, complain) { - /etc/ld.so.cache r, - /lib/** rm, - /sbin/xtables-multi rm, - /apparmor/.null w, - /dev/null rw, - - /proc r, - - capability net_raw, - capability net_admin, - network raw, - } - profile /sbin/zfs (attach_disconnected, complain) { - file, - capability, - } - profile /sbin/mke2fs (complain) { - /sbin/mke2fs rm, - - /lib/** rm, - - /apparmor/.null w, - - /etc/ld.so.cache r, - /etc/mke2fs.conf r, - /etc/mtab r, - - /dev/dm-* rw, - /dev/urandom r, - /dev/null rw, - - /proc/swaps r, - /proc/[0-9]*/mounts r, - } - profile /sbin/tune2fs (complain) { - /sbin/tune2fs rm, - - /lib/** rm, - - /apparmor/.null w, - - /etc/blkid.conf r, - /etc/mtab r, - /etc/ld.so.cache r, - - /dev/null rw, - /dev/.blkid.tab r, - /dev/dm-* rw, - - /proc/swaps r, - /proc/[0-9]*/mounts r, - } - profile /sbin/blkid (complain) { - /sbin/blkid rm, - - /lib/** rm, - /apparmor/.null w, - - /etc/ld.so.cache r, - /etc/blkid.conf r, - - /dev/null rw, - /dev/.blkid.tab rl, - /dev/.blkid.tab* rwl, - /dev/dm-* r, - - /sys/devices/virtual/block/** r, - - capability mknod, - - mount -> @{DOCKER_GRAPH_PATH}/**, - } - profile /sbin/apparmor_parser (complain) { - /sbin/apparmor_parser rm, - - /lib/** rm, - - /etc/ld.so.cache r, - /etc/apparmor/** r, - /etc/apparmor.d/** r, - /etc/apparmor.d/cache/** w, - - /dev/null rw, - - /sys/kernel/security/apparmor/** r, - /sys/kernel/security/apparmor/.replace w, - - /proc/[0-9]*/mounts r, - /proc/sys/kernel/osrelease r, - /proc r, - - capability mac_admin, - } -}` diff --git a/contrib/builder/deb/amd64/README.md b/contrib/builder/deb/amd64/README.md deleted file mode 100644 index 20a0ff1006..0000000000 --- a/contrib/builder/deb/amd64/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# `dockercore/builder-deb` - -This image's tags contain the dependencies for building Docker `.deb`s for each of the Debian-based platforms Docker targets. - -To add new tags, see [`contrib/builder/deb/amd64` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/deb/amd64), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/contrib/builder/deb/amd64/build.sh b/contrib/builder/deb/amd64/build.sh deleted file mode 100755 index 8271d9dc47..0000000000 --- a/contrib/builder/deb/amd64/build.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -e - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -set -x -./generate.sh -for d in */; do - docker build -t "dockercore/builder-deb:$(basename "$d")" "$d" -done diff --git a/contrib/builder/deb/amd64/debian-jessie/Dockerfile b/contrib/builder/deb/amd64/debian-jessie/Dockerfile deleted file mode 100644 index 42a30b7b98..0000000000 --- a/contrib/builder/deb/amd64/debian-jessie/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! -# - -FROM debian:jessie - -# allow replacing httpredir mirror -ARG APT_MIRROR=httpredir.debian.org -RUN sed -i s/httpredir.debian.org/$APT_MIRROR/g /etc/apt/sources.list - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.6.3 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux -ENV RUNC_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/amd64/debian-stretch/Dockerfile b/contrib/builder/deb/amd64/debian-stretch/Dockerfile deleted file mode 100644 index 973ebb0aaa..0000000000 --- a/contrib/builder/deb/amd64/debian-stretch/Dockerfile +++ /dev/null @@ -1,20 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! -# - -FROM debian:stretch - -# allow replacing httpredir mirror -ARG APT_MIRROR=httpredir.debian.org -RUN sed -i s/httpredir.debian.org/$APT_MIRROR/g /etc/apt/sources.list - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.6.3 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/contrib/builder/deb/amd64/debian-wheezy/Dockerfile b/contrib/builder/deb/amd64/debian-wheezy/Dockerfile deleted file mode 100644 index 069bf51d1d..0000000000 --- a/contrib/builder/deb/amd64/debian-wheezy/Dockerfile +++ /dev/null @@ -1,22 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! -# - -FROM debian:wheezy-backports - -# allow replacing httpredir mirror -ARG APT_MIRROR=httpredir.debian.org -RUN sed -i s/httpredir.debian.org/$APT_MIRROR/g /etc/apt/sources.list -RUN sed -i s/httpredir.debian.org/$APT_MIRROR/g /etc/apt/sources.list.d/backports.list - -RUN apt-get update && apt-get install -y -t wheezy-backports btrfs-tools --no-install-recommends && rm -rf /var/lib/apt/lists/* -RUN apt-get update && apt-get install -y apparmor bash-completion build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.6.3 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux -ENV RUNC_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/amd64/generate.sh b/contrib/builder/deb/amd64/generate.sh deleted file mode 100755 index 038b753a4b..0000000000 --- a/contrib/builder/deb/amd64/generate.sh +++ /dev/null @@ -1,147 +0,0 @@ -#!/bin/bash -set -e - -# usage: ./generate.sh [versions] -# ie: ./generate.sh -# to update all Dockerfiles in this directory -# or: ./generate.sh debian-jessie -# to only update debian-jessie/Dockerfile -# or: ./generate.sh debian-newversion -# to create a new folder and a Dockerfile within it - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -versions=( "$@" ) -if [ ${#versions[@]} -eq 0 ]; then - versions=( */ ) -fi -versions=( "${versions[@]%/}" ) - -for version in "${versions[@]}"; do - distro="${version%-*}" - suite="${version##*-}" - from="${distro}:${suite}" - - case "$from" in - debian:wheezy) - # add -backports, like our users have to - from+='-backports' - ;; - esac - - mkdir -p "$version" - echo "$version -> FROM $from" - cat > "$version/Dockerfile" <<-EOF - # - # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! - # - - FROM $from - EOF - - echo >> "$version/Dockerfile" - - if [ "$distro" = "debian" ]; then - cat >> "$version/Dockerfile" <<-'EOF' - # allow replacing httpredir mirror - ARG APT_MIRROR=httpredir.debian.org - RUN sed -i s/httpredir.debian.org/$APT_MIRROR/g /etc/apt/sources.list - EOF - - if [ "$suite" = "wheezy" ]; then - cat >> "$version/Dockerfile" <<-'EOF' - RUN sed -i s/httpredir.debian.org/$APT_MIRROR/g /etc/apt/sources.list.d/backports.list - EOF - fi - - echo "" >> "$version/Dockerfile" - fi - - extraBuildTags='pkcs11' - runcBuildTags= - - # this list is sorted alphabetically; please keep it that way - packages=( - apparmor # for apparmor_parser for testing the profile - bash-completion # for bash-completion debhelper integration - btrfs-tools # for "btrfs/ioctl.h" (and "version.h" if possible) - build-essential # "essential for building Debian packages" - curl ca-certificates # for downloading Go - debhelper # for easy ".deb" building - dh-apparmor # for apparmor debhelper - dh-systemd # for systemd debhelper integration - git # for "git commit" info in "docker -v" - libapparmor-dev # for "sys/apparmor.h" - libdevmapper-dev # for "libdevmapper.h" - libltdl-dev # for pkcs11 "ltdl.h" - libseccomp-dev # for "seccomp.h" & "libseccomp.so" - libsqlite3-dev # for "sqlite3.h" - pkg-config # for detecting things like libsystemd-journal dynamically - ) - # packaging for "sd-journal.h" and libraries varies - case "$suite" in - precise|wheezy) ;; - sid|stretch|wily|xenial) packages+=( libsystemd-dev );; - *) packages+=( libsystemd-journal-dev );; - esac - - # debian wheezy & ubuntu precise do not have the right libseccomp libs - # debian jessie & ubuntu trusty have a libseccomp < 2.2.1 :( - case "$suite" in - precise|wheezy|jessie|trusty) - packages=( "${packages[@]/libseccomp-dev}" ) - runcBuildTags="apparmor selinux" - ;; - *) - extraBuildTags+=' seccomp' - runcBuildTags="apparmor seccomp selinux" - ;; - esac - - - if [ "$suite" = 'precise' ]; then - # precise has a few package issues - - # - dh-systemd doesn't exist at all - packages=( "${packages[@]/dh-systemd}" ) - - # - libdevmapper-dev is missing critical structs (too old) - packages=( "${packages[@]/libdevmapper-dev}" ) - extraBuildTags+=' exclude_graphdriver_devicemapper' - - # - btrfs-tools is missing "ioctl.h" (too old), so it's useless - # (since kernels on precise are old too, just skip btrfs entirely) - packages=( "${packages[@]/btrfs-tools}" ) - extraBuildTags+=' exclude_graphdriver_btrfs' - fi - - if [ "$suite" = 'wheezy' ]; then - # pull a couple packages from backports explicitly - # (build failures otherwise) - backportsPackages=( btrfs-tools ) - for pkg in "${backportsPackages[@]}"; do - packages=( "${packages[@]/$pkg}" ) - done - echo "RUN apt-get update && apt-get install -y -t $suite-backports ${backportsPackages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" - fi - - echo "RUN apt-get update && apt-get install -y ${packages[*]} --no-install-recommends && rm -rf /var/lib/apt/lists/*" >> "$version/Dockerfile" - - echo >> "$version/Dockerfile" - - awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" - echo 'RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" - echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" - - echo >> "$version/Dockerfile" - - echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" - - echo >> "$version/Dockerfile" - - # print build tags in alphabetical order - buildTags=$( echo "apparmor selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) - - echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" - echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" -done diff --git a/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile b/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile deleted file mode 100644 index 30004436d8..0000000000 --- a/contrib/builder/deb/amd64/ubuntu-precise/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! -# - -FROM ubuntu:precise - -RUN apt-get update && apt-get install -y apparmor bash-completion build-essential curl ca-certificates debhelper dh-apparmor git libapparmor-dev libltdl-dev libsqlite3-dev pkg-config --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.6.3 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor exclude_graphdriver_btrfs exclude_graphdriver_devicemapper pkcs11 selinux -ENV RUNC_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile b/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile deleted file mode 100644 index b3af745245..0000000000 --- a/contrib/builder/deb/amd64/ubuntu-trusty/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! -# - -FROM ubuntu:trusty - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev pkg-config libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.6.3 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 selinux -ENV RUNC_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/amd64/ubuntu-wily/Dockerfile b/contrib/builder/deb/amd64/ubuntu-wily/Dockerfile deleted file mode 100644 index a12fa71167..0000000000 --- a/contrib/builder/deb/amd64/ubuntu-wily/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! -# - -FROM ubuntu:wily - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.6.3 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile b/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile deleted file mode 100644 index b114daffb0..0000000000 --- a/contrib/builder/deb/amd64/ubuntu-xenial/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/deb/amd64/generate.sh"! -# - -FROM ubuntu:xenial - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libseccomp-dev libsqlite3-dev pkg-config libsystemd-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.6.3 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS apparmor pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS apparmor seccomp selinux diff --git a/contrib/builder/deb/armhf/debian-jessie/Dockerfile b/contrib/builder/deb/armhf/debian-jessie/Dockerfile deleted file mode 100644 index be6eb273c3..0000000000 --- a/contrib/builder/deb/armhf/debian-jessie/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM armhf/debian:jessie - -# allow replacing httpredir mirror -ARG APT_MIRROR=httpredir.debian.org -RUN sed -i s/httpredir.debian.org/$APT_MIRROR/g /etc/apt/sources.list - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.6.3 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 -ENV DOCKER_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile b/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile deleted file mode 100644 index 2eae9ce407..0000000000 --- a/contrib/builder/deb/armhf/raspbian-jessie/Dockerfile +++ /dev/null @@ -1,15 +0,0 @@ -FROM resin/rpi-raspbian:jessie - -# allow replacing httpredir mirror -ARG APT_MIRROR=httpredir.debian.org -RUN sed -i s/httpredir.debian.org/$APT_MIRROR/g /etc/apt/sources.list - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.6.3 -ENV GOARM 6 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 -ENV DOCKER_BUILDTAGS apparmor selinux diff --git a/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile b/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile deleted file mode 100644 index fceefeaf74..0000000000 --- a/contrib/builder/deb/armhf/ubuntu-trusty/Dockerfile +++ /dev/null @@ -1,10 +0,0 @@ -FROM armhf/ubuntu:trusty - -RUN apt-get update && apt-get install -y apparmor bash-completion btrfs-tools build-essential curl ca-certificates debhelper dh-apparmor dh-systemd git libapparmor-dev libdevmapper-dev libltdl-dev libsqlite3-dev libsystemd-journal-dev --no-install-recommends && rm -rf /var/lib/apt/lists/* - -ENV GO_VERSION 1.6.3 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-armv6l.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 -ENV DOCKER_BUILDTAGS apparmor selinux diff --git a/contrib/builder/rpm/amd64/README.md b/contrib/builder/rpm/amd64/README.md deleted file mode 100644 index 5f2e888c7a..0000000000 --- a/contrib/builder/rpm/amd64/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# `dockercore/builder-rpm` - -This image's tags contain the dependencies for building Docker `.rpm`s for each of the RPM-based platforms Docker targets. - -To add new tags, see [`contrib/builder/rpm/amd64` in https://github.com/docker/docker](https://github.com/docker/docker/tree/master/contrib/builder/rpm/amd64), specifically the `generate.sh` script, whose usage is described in a comment at the top of the file. diff --git a/contrib/builder/rpm/amd64/build.sh b/contrib/builder/rpm/amd64/build.sh deleted file mode 100755 index 558f7ee0db..0000000000 --- a/contrib/builder/rpm/amd64/build.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -set -e - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -set -x -./generate.sh -for d in */; do - docker build -t "dockercore/builder-rpm:$(basename "$d")" "$d" -done diff --git a/contrib/builder/rpm/amd64/centos-7/Dockerfile b/contrib/builder/rpm/amd64/centos-7/Dockerfile deleted file mode 100644 index 3d066557b8..0000000000 --- a/contrib/builder/rpm/amd64/centos-7/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! -# - -FROM centos:7 - -RUN yum groupinstall -y "Development Tools" -RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs -RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git - -ENV GO_VERSION 1.6.3 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS seccomp selinux - diff --git a/contrib/builder/rpm/amd64/fedora-22/Dockerfile b/contrib/builder/rpm/amd64/fedora-22/Dockerfile deleted file mode 100644 index b415410e6c..0000000000 --- a/contrib/builder/rpm/amd64/fedora-22/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! -# - -FROM fedora:22 - -RUN dnf install -y @development-tools fedora-packager -RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git - -ENV GO_VERSION 1.6.3 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS seccomp selinux - diff --git a/contrib/builder/rpm/amd64/fedora-23/Dockerfile b/contrib/builder/rpm/amd64/fedora-23/Dockerfile deleted file mode 100644 index 424840bd5b..0000000000 --- a/contrib/builder/rpm/amd64/fedora-23/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! -# - -FROM fedora:23 - -RUN dnf install -y @development-tools fedora-packager -RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git - -ENV GO_VERSION 1.6.3 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS seccomp selinux - diff --git a/contrib/builder/rpm/amd64/fedora-24/Dockerfile b/contrib/builder/rpm/amd64/fedora-24/Dockerfile deleted file mode 100644 index 36091399c1..0000000000 --- a/contrib/builder/rpm/amd64/fedora-24/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! -# - -FROM fedora:24 - -RUN dnf install -y @development-tools fedora-packager -RUN dnf install -y btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git - -ENV GO_VERSION 1.6.3 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS seccomp selinux - diff --git a/contrib/builder/rpm/amd64/generate.sh b/contrib/builder/rpm/amd64/generate.sh deleted file mode 100755 index 02306ea53b..0000000000 --- a/contrib/builder/rpm/amd64/generate.sh +++ /dev/null @@ -1,173 +0,0 @@ -#!/bin/bash -set -e - -# usage: ./generate.sh [versions] -# ie: ./generate.sh -# to update all Dockerfiles in this directory -# or: ./generate.sh -# to only update fedora-23/Dockerfile -# or: ./generate.sh fedora-newversion -# to create a new folder and a Dockerfile within it - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -versions=( "$@" ) -if [ ${#versions[@]} -eq 0 ]; then - versions=( */ ) -fi -versions=( "${versions[@]%/}" ) - -for version in "${versions[@]}"; do - distro="${version%-*}" - suite="${version##*-}" - from="${distro}:${suite}" - installer=yum - if [[ "$distro" == "fedora" ]]; then - installer=dnf - fi - - mkdir -p "$version" - echo "$version -> FROM $from" - cat > "$version/Dockerfile" <<-EOF - # - # THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! - # - - FROM $from - EOF - - echo >> "$version/Dockerfile" - - extraBuildTags='pkcs11' - runcBuildTags= - - case "$from" in - oraclelinux:6) - # We need a known version of the kernel-uek-devel headers to set CGO_CPPFLAGS, so grab the UEKR4 GA version - # This requires using yum-config-manager from yum-utils to enable the UEKR4 yum repo - echo "RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4" >> "$version/Dockerfile" - echo "RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek" >> "$version/Dockerfile" - echo >> "$version/Dockerfile" - ;; - *) ;; - esac - - case "$from" in - centos:*) - # get "Development Tools" packages dependencies - echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" - - if [[ "$version" == "centos-7" ]]; then - echo 'RUN yum -y swap -- remove systemd-container systemd-container-libs -- install systemd systemd-libs' >> "$version/Dockerfile" - fi - ;; - oraclelinux:*) - # get "Development Tools" packages and dependencies - # we also need yum-utils for yum-config-manager to pull the latest repo file - echo 'RUN yum groupinstall -y "Development Tools"' >> "$version/Dockerfile" - ;; - opensuse:*) - # get rpm-build and curl packages and dependencies - echo 'RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build' >> "$version/Dockerfile" - ;; - *) - echo "RUN ${installer} install -y @development-tools fedora-packager" >> "$version/Dockerfile" - ;; - esac - - # this list is sorted alphabetically; please keep it that way - packages=( - btrfs-progs-devel # for "btrfs/ioctl.h" (and "version.h" if possible) - device-mapper-devel # for "libdevmapper.h" - glibc-static - libseccomp-devel # for "seccomp.h" & "libseccomp.so" - libselinux-devel # for "libselinux.so" - libtool-ltdl-devel # for pkcs11 "ltdl.h" - pkgconfig # for the pkg-config command - selinux-policy - selinux-policy-devel - sqlite-devel # for "sqlite3.h" - systemd-devel # for "sd-journal.h" and libraries - tar # older versions of dev-tools do not have tar - git # required for containerd and runc clone - ) - - case "$from" in - oraclelinux:7) - # Enable the optional repository - packages=( --enablerepo=ol7_optional_latest "${packages[*]}" ) - ;; - esac - - case "$from" in - oraclelinux:6) - # doesn't use systemd, doesn't have a devel package for it - packages=( "${packages[@]/systemd-devel}" ) - ;; - esac - - # opensuse & oraclelinx:6 do not have the right libseccomp libs - case "$from" in - opensuse:*|oraclelinux:6) - packages=( "${packages[@]/libseccomp-devel}" ) - runcBuildTags="selinux" - ;; - *) - extraBuildTags+=' seccomp' - runcBuildTags="seccomp selinux" - ;; - esac - - case "$from" in - opensuse:*) - packages=( "${packages[@]/btrfs-progs-devel/libbtrfs-devel}" ) - packages=( "${packages[@]/pkgconfig/pkg-config}" ) - if [[ "$from" == "opensuse:13."* ]]; then - packages+=( systemd-rpm-macros ) - fi - - # use zypper - echo "RUN zypper --non-interactive install ${packages[*]}" >> "$version/Dockerfile" - ;; - *) - echo "RUN ${installer} install -y ${packages[*]}" >> "$version/Dockerfile" - ;; - esac - - echo >> "$version/Dockerfile" - - - awk '$1 == "ENV" && $2 == "GO_VERSION" { print; exit }' ../../../../Dockerfile >> "$version/Dockerfile" - echo 'RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local' >> "$version/Dockerfile" - echo 'ENV PATH $PATH:/usr/local/go/bin' >> "$version/Dockerfile" - - echo >> "$version/Dockerfile" - - echo 'ENV AUTO_GOPATH 1' >> "$version/Dockerfile" - - echo >> "$version/Dockerfile" - - # print build tags in alphabetical order - buildTags=$( echo "selinux $extraBuildTags" | xargs -n1 | sort -n | tr '\n' ' ' | sed -e 's/[[:space:]]*$//' ) - - echo "ENV DOCKER_BUILDTAGS $buildTags" >> "$version/Dockerfile" - echo "ENV RUNC_BUILDTAGS $runcBuildTags" >> "$version/Dockerfile" - echo >> "$version/Dockerfile" - - case "$from" in - oraclelinux:6) - # We need to set the CGO_CPPFLAGS environment to use the updated UEKR4 headers with all the userns stuff. - # The ordering is very important and should not be changed. - echo 'ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \' >> "$version/Dockerfile" - echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \' >> "$version/Dockerfile" - echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \' >> "$version/Dockerfile" - echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \' >> "$version/Dockerfile" - echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \' >> "$version/Dockerfile" - echo ' -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include' >> "$version/Dockerfile" - echo >> "$version/Dockerfile" - ;; - *) ;; - esac - - -done diff --git a/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile b/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile deleted file mode 100644 index 62952a5a19..0000000000 --- a/contrib/builder/rpm/amd64/opensuse-13.2/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! -# - -FROM opensuse:13.2 - -RUN zypper --non-interactive install ca-certificates* curl gzip rpm-build -RUN zypper --non-interactive install libbtrfs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkg-config selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git systemd-rpm-macros - -ENV GO_VERSION 1.6.3 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS pkcs11 selinux -ENV RUNC_BUILDTAGS selinux - diff --git a/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile b/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile deleted file mode 100644 index c47e5ab9a3..0000000000 --- a/contrib/builder/rpm/amd64/oraclelinux-6/Dockerfile +++ /dev/null @@ -1,28 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! -# - -FROM oraclelinux:6 - -RUN yum install -y yum-utils && curl -o /etc/yum.repos.d/public-yum-ol6.repo http://yum.oracle.com/public-yum-ol6.repo && yum-config-manager -q --enable ol6_UEKR4 -RUN yum install -y kernel-uek-devel-4.1.12-32.el6uek - -RUN yum groupinstall -y "Development Tools" -RUN yum install -y btrfs-progs-devel device-mapper-devel glibc-static libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel tar git - -ENV GO_VERSION 1.6.3 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS pkcs11 selinux -ENV RUNC_BUILDTAGS selinux - -ENV CGO_CPPFLAGS -D__EXPORTED_HEADERS__ \ - -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/generated/uapi \ - -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/arch/x86/include/uapi \ - -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/generated/uapi \ - -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include/uapi \ - -I/usr/src/kernels/4.1.12-32.el6uek.x86_64/include - diff --git a/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile b/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile deleted file mode 100644 index af6121c826..0000000000 --- a/contrib/builder/rpm/amd64/oraclelinux-7/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -# -# THIS FILE IS AUTOGENERATED; SEE "contrib/builder/rpm/amd64/generate.sh"! -# - -FROM oraclelinux:7 - -RUN yum groupinstall -y "Development Tools" -RUN yum install -y --enablerepo=ol7_optional_latest btrfs-progs-devel device-mapper-devel glibc-static libseccomp-devel libselinux-devel libtool-ltdl-devel pkgconfig selinux-policy selinux-policy-devel sqlite-devel systemd-devel tar git - -ENV GO_VERSION 1.6.3 -RUN curl -fSL "https://storage.googleapis.com/golang/go${GO_VERSION}.linux-amd64.tar.gz" | tar xzC /usr/local -ENV PATH $PATH:/usr/local/go/bin - -ENV AUTO_GOPATH 1 - -ENV DOCKER_BUILDTAGS pkcs11 seccomp selinux -ENV RUNC_BUILDTAGS seccomp selinux - diff --git a/contrib/check-config.sh b/contrib/check-config.sh deleted file mode 100755 index 5fbfa2eb88..0000000000 --- a/contrib/check-config.sh +++ /dev/null @@ -1,297 +0,0 @@ -#!/usr/bin/env bash -set -e - -# bits of this were adapted from lxc-checkconfig -# see also https://github.com/lxc/lxc/blob/lxc-1.0.2/src/lxc/lxc-checkconfig.in - -possibleConfigs=( - '/proc/config.gz' - "/boot/config-$(uname -r)" - "/usr/src/linux-$(uname -r)/.config" - '/usr/src/linux/.config' -) - -if [ $# -gt 0 ]; then - CONFIG="$1" -else - : ${CONFIG:="${possibleConfigs[0]}"} -fi - -if ! command -v zgrep &> /dev/null; then - zgrep() { - zcat "$2" | grep "$1" - } -fi - -kernelVersion="$(uname -r)" -kernelMajor="${kernelVersion%%.*}" -kernelMinor="${kernelVersion#$kernelMajor.}" -kernelMinor="${kernelMinor%%.*}" - -is_set() { - zgrep "CONFIG_$1=[y|m]" "$CONFIG" > /dev/null -} -is_set_in_kernel() { - zgrep "CONFIG_$1=y" "$CONFIG" > /dev/null -} -is_set_as_module() { - zgrep "CONFIG_$1=m" "$CONFIG" > /dev/null -} - -color() { - local codes=() - if [ "$1" = 'bold' ]; then - codes=( "${codes[@]}" '1' ) - shift - fi - if [ "$#" -gt 0 ]; then - local code= - case "$1" in - # see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors - black) code=30 ;; - red) code=31 ;; - green) code=32 ;; - yellow) code=33 ;; - blue) code=34 ;; - magenta) code=35 ;; - cyan) code=36 ;; - white) code=37 ;; - esac - if [ "$code" ]; then - codes=( "${codes[@]}" "$code" ) - fi - fi - local IFS=';' - echo -en '\033['"${codes[*]}"'m' -} -wrap_color() { - text="$1" - shift - color "$@" - echo -n "$text" - color reset - echo -} - -wrap_good() { - echo "$(wrap_color "$1" white): $(wrap_color "$2" green)" -} -wrap_bad() { - echo "$(wrap_color "$1" bold): $(wrap_color "$2" bold red)" -} -wrap_warning() { - wrap_color >&2 "$*" red -} - -check_flag() { - if is_set_in_kernel "$1"; then - wrap_good "CONFIG_$1" 'enabled' - elif is_set_as_module "$1"; then - wrap_good "CONFIG_$1" 'enabled (as module)' - else - wrap_bad "CONFIG_$1" 'missing' - fi -} - -check_flags() { - for flag in "$@"; do - echo "- $(check_flag "$flag")" - done -} - -check_command() { - if command -v "$1" >/dev/null 2>&1; then - wrap_good "$1 command" 'available' - else - wrap_bad "$1 command" 'missing' - fi -} - -check_device() { - if [ -c "$1" ]; then - wrap_good "$1" 'present' - else - wrap_bad "$1" 'missing' - fi -} - -check_distro_userns() { - source /etc/os-release 2>/dev/null || /bin/true - if [[ "${ID}" =~ ^(centos|rhel)$ && "${VERSION_ID}" =~ ^7 ]]; then - # this is a CentOS7 or RHEL7 system - grep -q "user_namespace.enable=1" /proc/cmdline || { - # no user namespace support enabled - wrap_bad " (RHEL7/CentOS7" "User namespaces disabled; add 'user_namespace.enable=1' to boot command line)" - } - fi -} - -if [ ! -e "$CONFIG" ]; then - wrap_warning "warning: $CONFIG does not exist, searching other paths for kernel config ..." - for tryConfig in "${possibleConfigs[@]}"; do - if [ -e "$tryConfig" ]; then - CONFIG="$tryConfig" - break - fi - done - if [ ! -e "$CONFIG" ]; then - wrap_warning "error: cannot find kernel config" - wrap_warning " try running this script again, specifying the kernel config:" - wrap_warning " CONFIG=/path/to/kernel/.config $0 or $0 /path/to/kernel/.config" - exit 1 - fi -fi - -wrap_color "info: reading kernel config from $CONFIG ..." white -echo - -echo 'Generally Necessary:' - -echo -n '- ' -cgroupSubsystemDir="$(awk '/[, ](cpu|cpuacct|cpuset|devices|freezer|memory)[, ]/ && $3 == "cgroup" { print $2 }' /proc/mounts | head -n1)" -cgroupDir="$(dirname "$cgroupSubsystemDir")" -if [ -d "$cgroupDir/cpu" -o -d "$cgroupDir/cpuacct" -o -d "$cgroupDir/cpuset" -o -d "$cgroupDir/devices" -o -d "$cgroupDir/freezer" -o -d "$cgroupDir/memory" ]; then - echo "$(wrap_good 'cgroup hierarchy' 'properly mounted') [$cgroupDir]" -else - if [ "$cgroupSubsystemDir" ]; then - echo "$(wrap_bad 'cgroup hierarchy' 'single mountpoint!') [$cgroupSubsystemDir]" - else - echo "$(wrap_bad 'cgroup hierarchy' 'nonexistent??')" - fi - echo " $(wrap_color '(see https://github.com/tianon/cgroupfs-mount)' yellow)" -fi - -if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then - echo -n '- ' - if command -v apparmor_parser &> /dev/null; then - echo "$(wrap_good 'apparmor' 'enabled and tools installed')" - else - echo "$(wrap_bad 'apparmor' 'enabled, but apparmor_parser missing')" - echo -n ' ' - if command -v apt-get &> /dev/null; then - echo "$(wrap_color '(use "apt-get install apparmor" to fix this)')" - elif command -v yum &> /dev/null; then - echo "$(wrap_color '(your best bet is "yum install apparmor-parser")')" - else - echo "$(wrap_color '(look for an "apparmor" package for your distribution)')" - fi - fi -fi - -flags=( - NAMESPACES {NET,PID,IPC,UTS}_NS - DEVPTS_MULTIPLE_INSTANCES - CGROUPS CGROUP_CPUACCT CGROUP_DEVICE CGROUP_FREEZER CGROUP_SCHED CPUSETS MEMCG - KEYS - VETH BRIDGE BRIDGE_NETFILTER - NF_NAT_IPV4 IP_NF_FILTER IP_NF_TARGET_MASQUERADE - NETFILTER_XT_MATCH_{ADDRTYPE,CONNTRACK} - NF_NAT NF_NAT_NEEDED - - # required for bind-mounting /dev/mqueue into containers - POSIX_MQUEUE -) -check_flags "${flags[@]}" -echo - -echo 'Optional Features:' -{ - check_flags USER_NS - check_distro_userns -} -{ - check_flags SECCOMP -} -{ - check_flags CGROUP_PIDS -} -{ - check_flags MEMCG_SWAP MEMCG_SWAP_ENABLED - if is_set MEMCG_SWAP && ! is_set MEMCG_SWAP_ENABLED; then - echo " $(wrap_color '(note that cgroup swap accounting is not enabled in your kernel config, you can enable it by setting boot option "swapaccount=1")' bold black)" - fi -} - -if [ "$kernelMajor" -lt 4 ] || [ "$kernelMajor" -eq 4 -a "$kernelMinor" -le 5 ]; then - check_flags MEMCG_KMEM -fi - -if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 18 ]; then - check_flags RESOURCE_COUNTERS -fi - -if [ "$kernelMajor" -lt 3 ] || [ "$kernelMajor" -eq 3 -a "$kernelMinor" -le 13 ]; then - netprio=NETPRIO_CGROUP -else - netprio=CGROUP_NET_PRIO -fi - -flags=( - BLK_CGROUP BLK_DEV_THROTTLING IOSCHED_CFQ CFQ_GROUP_IOSCHED - CGROUP_PERF - CGROUP_HUGETLB - NET_CLS_CGROUP $netprio - CFS_BANDWIDTH FAIR_GROUP_SCHED RT_GROUP_SCHED - IP_VS -) -check_flags "${flags[@]}" - -check_flags EXT3_FS EXT3_FS_XATTR EXT3_FS_POSIX_ACL EXT3_FS_SECURITY -if ! is_set EXT3_FS || ! is_set EXT3_FS_XATTR || ! is_set EXT3_FS_POSIX_ACL || ! is_set EXT3_FS_SECURITY; then - echo " $(wrap_color '(enable these ext3 configs if you are using ext3 as backing filesystem)' bold black)" -fi - -check_flags EXT4_FS EXT4_FS_POSIX_ACL EXT4_FS_SECURITY -if ! is_set EXT4_FS || ! is_set EXT4_FS_POSIX_ACL || ! is_set EXT4_FS_SECURITY; then - echo " $(wrap_color 'enable these ext4 configs if you are using ext4 as backing filesystem' bold black)" -fi - -echo '- Network Drivers:' -{ - echo '- "'$(wrap_color 'overlay' blue)'":' - check_flags VXLAN | sed 's/^/ /' - echo ' Optional (for secure networks):' - check_flags XFRM_ALGO XFRM_USER | sed 's/^/ /' - echo '- "'$(wrap_color 'ipvlan' blue)'":' - check_flags IPVLAN | sed 's/^/ /' - echo '- "'$(wrap_color 'macvlan' blue)'":' - check_flags MACVLAN DUMMY | sed 's/^/ /' -} | sed 's/^/ /' - -echo '- Storage Drivers:' -{ - echo '- "'$(wrap_color 'aufs' blue)'":' - check_flags AUFS_FS | sed 's/^/ /' - if ! is_set AUFS_FS && grep -q aufs /proc/filesystems; then - echo " $(wrap_color '(note that some kernels include AUFS patches but not the AUFS_FS flag)' bold black)" - fi - - echo '- "'$(wrap_color 'btrfs' blue)'":' - check_flags BTRFS_FS | sed 's/^/ /' - - echo '- "'$(wrap_color 'devicemapper' blue)'":' - check_flags BLK_DEV_DM DM_THIN_PROVISIONING | sed 's/^/ /' - - echo '- "'$(wrap_color 'overlay' blue)'":' - check_flags OVERLAY_FS | sed 's/^/ /' - - echo '- "'$(wrap_color 'zfs' blue)'":' - echo " - $(check_device /dev/zfs)" - echo " - $(check_command zfs)" - echo " - $(check_command zpool)" -} | sed 's/^/ /' -echo - -check_limit_over() -{ - if [ $(cat "$1") -le "$2" ]; then - wrap_bad "- $1" "$(cat $1)" - wrap_color " This should be set to at least $2, for example set: sysctl -w kernel/keys/root_maxkeys=1000000" bold black - else - wrap_good "- $1" "$(cat $1)" - fi -} - -echo 'Limits:' -check_limit_over /proc/sys/kernel/keys/root_maxkeys 10000 -echo diff --git a/contrib/completion/REVIEWERS b/contrib/completion/REVIEWERS deleted file mode 100644 index 03ee2dde3d..0000000000 --- a/contrib/completion/REVIEWERS +++ /dev/null @@ -1,2 +0,0 @@ -Tianon Gravi (@tianon) -Jessie Frazelle (@jfrazelle) diff --git a/contrib/completion/bash/docker b/contrib/completion/bash/docker deleted file mode 100644 index 43a5fbb22c..0000000000 --- a/contrib/completion/bash/docker +++ /dev/null @@ -1,2980 +0,0 @@ -#!/bin/bash -# -# bash completion file for core docker commands -# -# This script provides completion of: -# - commands and their options -# - container ids and names -# - image repos and tags -# - filepaths -# -# To enable the completions either: -# - place this file in /etc/bash_completion.d -# or -# - copy this file to e.g. ~/.docker-completion.sh and add the line -# below to your .bashrc after bash completion features are loaded -# . ~/.docker-completion.sh -# -# Configuration: -# -# For several commands, the amount of completions can be configured by -# setting environment variables. -# -# DOCKER_COMPLETION_SHOW_NETWORK_IDS -# DOCKER_COMPLETION_SHOW_NODE_IDS -# DOCKER_COMPLETION_SHOW_SERVICE_IDS -# "no" - Show names only (default) -# "yes" - Show names and ids -# -# You can tailor completion for the "events", "history", "inspect", "run", -# "rmi" and "save" commands by settings the following environment -# variables: -# -# DOCKER_COMPLETION_SHOW_IMAGE_IDS -# "none" - Show names only (default) -# "non-intermediate" - Show names and ids, but omit intermediate image IDs -# "all" - Show names and ids, including intermediate image IDs -# -# DOCKER_COMPLETION_SHOW_TAGS -# "yes" - include tags in completion options (default) -# "no" - don't include tags in completion options - -# -# Note: -# Currently, the completions will not work if the docker daemon is not -# bound to the default communication port/socket -# If the docker daemon is using a unix socket for communication your user -# must have access to the socket for the completions to function correctly -# -# Note for developers: -# Please arrange options sorted alphabetically by long name with the short -# options immediately following their corresponding long form. -# This order should be applied to lists, alternatives and code blocks. - -__docker_previous_extglob_setting=$(shopt -p extglob) -shopt -s extglob - -__docker_q() { - docker ${host:+-H "$host"} ${config:+--config "$config"} 2>/dev/null "$@" -} - -__docker_complete_containers_all() { - local IFS=$'\n' - local containers=( $(__docker_q ps -aq --no-trunc) ) - if [ "$1" ]; then - containers=( $(__docker_q inspect --format "{{if $1}}{{.Id}}{{end}}" "${containers[@]}") ) - fi - local names=( $(__docker_q inspect --format '{{.Name}}' "${containers[@]}") ) - names=( "${names[@]#/}" ) # trim off the leading "/" from the container names - unset IFS - COMPREPLY=( $(compgen -W "${names[*]} ${containers[*]}" -- "$cur") ) -} - -__docker_complete_containers_running() { - __docker_complete_containers_all '.State.Running' -} - -__docker_complete_containers_stopped() { - __docker_complete_containers_all 'not .State.Running' -} - -__docker_complete_containers_pauseable() { - __docker_complete_containers_all 'and .State.Running (not .State.Paused)' -} - -__docker_complete_containers_unpauseable() { - __docker_complete_containers_all '.State.Paused' -} - -__docker_complete_container_names() { - local containers=( $(__docker_q ps -aq --no-trunc) ) - local names=( $(__docker_q inspect --format '{{.Name}}' "${containers[@]}") ) - names=( "${names[@]#/}" ) # trim off the leading "/" from the container names - COMPREPLY=( $(compgen -W "${names[*]}" -- "$cur") ) -} - -__docker_complete_container_ids() { - local containers=( $(__docker_q ps -aq) ) - COMPREPLY=( $(compgen -W "${containers[*]}" -- "$cur") ) -} - -__docker_complete_images() { - local images_args="" - - case "$DOCKER_COMPLETION_SHOW_IMAGE_IDS" in - all) - images_args="--no-trunc -a" - ;; - non-intermediate) - images_args="--no-trunc" - ;; - esac - - local repo_print_command - if [ "${DOCKER_COMPLETION_SHOW_TAGS:-yes}" = "yes" ]; then - repo_print_command='print $1; print $1":"$2' - else - repo_print_command='print $1' - fi - - local awk_script - case "$DOCKER_COMPLETION_SHOW_IMAGE_IDS" in - all|non-intermediate) - awk_script='NR>1 { print $3; if ($1 != "") { '"$repo_print_command"' } }' - ;; - none|*) - awk_script='NR>1 && $1 != "" { '"$repo_print_command"' }' - ;; - esac - - local images=$(__docker_q images $images_args | awk "$awk_script") - COMPREPLY=( $(compgen -W "$images" -- "$cur") ) - __ltrim_colon_completions "$cur" -} - -__docker_complete_image_repos() { - local repos="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1 }')" - COMPREPLY=( $(compgen -W "$repos" -- "$cur") ) -} - -__docker_complete_image_repos_and_tags() { - local reposAndTags="$(__docker_q images | awk 'NR>1 && $1 != "" { print $1; print $1":"$2 }')" - COMPREPLY=( $(compgen -W "$reposAndTags" -- "$cur") ) - __ltrim_colon_completions "$cur" -} - -__docker_complete_containers_and_images() { - __docker_complete_containers_all - local containers=( "${COMPREPLY[@]}" ) - __docker_complete_images - COMPREPLY+=( "${containers[@]}" ) -} - -# Returns the names and optionally IDs of networks. -# The selection can be narrowed by an optional filter parameter, e.g. 'type=custom' -__docker_networks() { - local filter="$1" - # By default, only network names are completed. - # Set DOCKER_COMPLETION_SHOW_NETWORK_IDS=yes to also complete network IDs. - local fields='$2' - [ "${DOCKER_COMPLETION_SHOW_NETWORK_IDS}" = yes ] && fields='$1,$2' - __docker_q network ls --no-trunc ${filter:+-f "$filter"} | awk "NR>1 {print $fields}" - #__docker_q network ls --no-trunc | awk "NR>1 {print $fields}" -} - -__docker_complete_networks() { - COMPREPLY=( $(compgen -W "$(__docker_networks $@)" -- "$cur") ) -} - -__docker_complete_network_ids() { - COMPREPLY=( $(compgen -W "$(__docker_q network ls -q --no-trunc)" -- "$cur") ) -} - -__docker_complete_network_names() { - COMPREPLY=( $(compgen -W "$(__docker_q network ls | awk 'NR>1 {print $2}')" -- "$cur") ) -} - -__docker_complete_containers_in_network() { - local containers=$(__docker_q network inspect -f '{{range $i, $c := .Containers}}{{$i}} {{$c.Name}} {{end}}' "$1") - COMPREPLY=( $(compgen -W "$containers" -- "$cur") ) -} - -__docker_complete_volumes() { - COMPREPLY=( $(compgen -W "$(__docker_q volume ls -q)" -- "$cur") ) -} - -__docker_plugins() { - __docker_q info | sed -n "/^Plugins/,/^[^ ]/s/ $1: //p" -} - -__docker_complete_plugins() { - COMPREPLY=( $(compgen -W "$(__docker_plugins $1)" -- "$cur") ) -} - -__docker_runtimes() { - __docker_q info | sed -n 's/^Runtimes: \(.*\)/\1/p' -} - -__docker_complete_runtimes() { - COMPREPLY=( $(compgen -W "$(__docker_runtimes)" -- "$cur") ) -} - -# Returns a list of all nodes. Additional arguments to `docker node` -# may be specified in order to filter the node list, e.g. -# `__docker_nodes --filter role=manager` -# By default, only node names are completed. -# Set DOCKER_COMPLETION_SHOW_NODE_IDS=yes to also complete node IDs. -# An optional first argument `--id|--name` may be used to limit -# the output to the IDs or names of matching nodes. This setting takes -# precedence over the environment setting. -__docker_nodes() { - local fields='$2' # default: node name only - [ "${DOCKER_COMPLETION_SHOW_NODE_IDS}" = yes ] && fields='$1,$2' # ID and name - - if [ "$1" = "--id" ] ; then - fields='$1' # IDs only - shift - elif [ "$1" = "--name" ] ; then - fields='$2' # names only - shift - fi - __docker_q node ls "$@" | tr -d '*' | awk "NR>1 {print $fields}" -} - -# Applies completion of nodes based on the current value of `$cur` or -# the value of the optional first argument `--cur`, if given. -# Additional filters may be appended, see `__docker_nodes`. -__docker_complete_nodes() { - local current=$cur - if [ "$1" = "--cur" ] ; then - current="$2" - shift 2 - fi - COMPREPLY=( $(compgen -W "$(__docker_nodes "$@")" -- "$current") ) -} - -__docker_complete_nodes_plus_self() { - __docker_complete_nodes "$@" - COMPREPLY+=( self ) -} - -# Returns a list of all services. Additional arguments to `docker service ls` -# may be specified in order to filter the service list, e.g. -# `__docker_services --filter name=xxx` -# By default, only node names are completed. -# Set DOCKER_COMPLETION_SHOW_SERVICE_IDS=yes to also complete service IDs. -# An optional first argument `--id|--name` may be used to limit -# the output to the IDs or names of matching services. This setting takes -# precedence over the environment setting. -__docker_services() { - local fields='$2' # default: service name only - [ "${DOCKER_COMPLETION_SHOW_SERVICE_IDS}" = yes ] && fields='$1,$2' # ID & name - - if [ "$1" = "--id" ] ; then - fields='$1' # IDs only - shift - elif [ "$1" = "--name" ] ; then - fields='$2' # names only - shift - fi - __docker_q service ls "$@" | awk "NR>1 {print $fields}" -} - -# Applies completion of services based on the current value of `$cur` or -# the value of the optional first argument `--cur`, if given. -# Additional filters may be appended, see `__docker_services`. -__docker_complete_services() { - local current=$cur - if [ "$1" = "--cur" ] ; then - current="$2" - shift 2 - fi - COMPREPLY=( $(compgen -W "$(__docker_services "$@")" -- "$current") ) -} - -# Appends the word passed as an argument to every word in `$COMPREPLY`. -# Normally you do this with `compgen -S`. This function exists so that you can use -# the __docker_complete_XXX functions in cases where you need a suffix. -__docker_append_to_completions() { - COMPREPLY=( ${COMPREPLY[@]/%/"$1"} ) -} - -# Finds the position of the first word that is neither option nor an option's argument. -# If there are options that require arguments, you should pass a glob describing those -# options, e.g. "--option1|-o|--option2" -# Use this function to restrict completions to exact positions after the argument list. -__docker_pos_first_nonflag() { - local argument_flags=$1 - - local counter=$((${subcommand_pos:-${command_pos}} + 1)) - while [ $counter -le $cword ]; do - if [ -n "$argument_flags" ] && eval "case '${words[$counter]}' in $argument_flags) true ;; *) false ;; esac"; then - (( counter++ )) - # eat "=" in case of --option=arg syntax - [ "${words[$counter]}" = "=" ] && (( counter++ )) - else - case "${words[$counter]}" in - -*) - ;; - *) - break - ;; - esac - fi - - # Bash splits words at "=", retaining "=" as a word, examples: - # "--debug=false" => 3 words, "--log-opt syslog-facility=daemon" => 4 words - while [ "${words[$counter + 1]}" = "=" ] ; do - counter=$(( counter + 2)) - done - - (( counter++ )) - done - - echo $counter -} - -# If we are currently completing the value of a map option (key=value) -# which matches the extglob given as an argument, returns key. -# This function is needed for key-specific completions. -__docker_map_key_of_current_option() { - local glob="$1" - - local key glob_pos - if [ "$cur" = "=" ] ; then # key= case - key="$prev" - glob_pos=$((cword - 2)) - elif [[ $cur == *=* ]] ; then # key=value case (OSX) - key=${cur%=*} - glob_pos=$((cword - 1)) - elif [ "$prev" = "=" ] ; then - key=${words[$cword - 2]} # key=value case - glob_pos=$((cword - 3)) - else - return - fi - - [ "${words[$glob_pos]}" = "=" ] && ((glob_pos--)) # --option=key=value syntax - - [[ ${words[$glob_pos]} == @($glob) ]] && echo "$key" -} - -# Returns the value of the first option matching option_glob. -# Valid values for option_glob are option names like '--log-level' and -# globs like '--log-level|-l' -# Only positions between the command and the current word are considered. -__docker_value_of_option() { - local option_extglob=$(__docker_to_extglob "$1") - - local counter=$((command_pos + 1)) - while [ $counter -lt $cword ]; do - case ${words[$counter]} in - $option_extglob ) - echo ${words[$counter + 1]} - break - ;; - esac - (( counter++ )) - done -} - -# Transforms a multiline list of strings into a single line string -# with the words separated by "|". -# This is used to prepare arguments to __docker_pos_first_nonflag(). -__docker_to_alternatives() { - local parts=( $1 ) - local IFS='|' - echo "${parts[*]}" -} - -# Transforms a multiline list of options into an extglob pattern -# suitable for use in case statements. -__docker_to_extglob() { - local extglob=$( __docker_to_alternatives "$1" ) - echo "@($extglob)" -} - -# Subcommand processing. -# Locates the first occurrence of any of the subcommands contained in the -# first argument. In case of a match, calls the corresponding completion -# function and returns 0. -# If no match is found, 1 is returned. The calling function can then -# continue processing its completion. -# -# TODO if the preceding command has options that accept arguments and an -# argument is equal ot one of the subcommands, this is falsely detected as -# a match. -__docker_subcommands() { - local subcommands="$1" - - local counter=$(($command_pos + 1)) - while [ $counter -lt $cword ]; do - case "${words[$counter]}" in - $(__docker_to_extglob "$subcommands") ) - subcommand_pos=$counter - local subcommand=${words[$counter]} - local completions_func=_docker_${command}_${subcommand} - declare -F $completions_func >/dev/null && $completions_func - return 0 - ;; - esac - (( counter++ )) - done - return 1 -} - -# suppress trailing whitespace -__docker_nospace() { - # compopt is not available in ancient bash versions - type compopt &>/dev/null && compopt -o nospace -} - -__docker_complete_resolved_hostname() { - command -v host >/dev/null 2>&1 || return - COMPREPLY=( $(host 2>/dev/null "${cur%:}" | awk '/has address/ {print $4}') ) -} - -__docker_complete_capabilities() { - # The list of capabilities is defined in types.go, ALL was added manually. - COMPREPLY=( $( compgen -W " - ALL - AUDIT_CONTROL - AUDIT_WRITE - AUDIT_READ - BLOCK_SUSPEND - CHOWN - DAC_OVERRIDE - DAC_READ_SEARCH - FOWNER - FSETID - IPC_LOCK - IPC_OWNER - KILL - LEASE - LINUX_IMMUTABLE - MAC_ADMIN - MAC_OVERRIDE - MKNOD - NET_ADMIN - NET_BIND_SERVICE - NET_BROADCAST - NET_RAW - SETFCAP - SETGID - SETPCAP - SETUID - SYS_ADMIN - SYS_BOOT - SYS_CHROOT - SYSLOG - SYS_MODULE - SYS_NICE - SYS_PACCT - SYS_PTRACE - SYS_RAWIO - SYS_RESOURCE - SYS_TIME - SYS_TTY_CONFIG - WAKE_ALARM - " -- "$cur" ) ) -} - -__docker_complete_detach-keys() { - case "$prev" in - --detach-keys) - case "$cur" in - *,) - COMPREPLY=( $( compgen -W "${cur}ctrl-" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "ctrl-" -- "$cur" ) ) - ;; - esac - - __docker_nospace - return - ;; - esac - return 1 -} - -__docker_complete_isolation() { - COMPREPLY=( $( compgen -W "default hyperv process" -- "$cur" ) ) -} - -__docker_complete_log_drivers() { - COMPREPLY=( $( compgen -W " - awslogs - etwlogs - fluentd - gcplogs - gelf - journald - json-file - none - splunk - syslog - " -- "$cur" ) ) -} - -__docker_complete_log_options() { - # see docs/reference/logging/index.md - local awslogs_options="awslogs-region awslogs-group awslogs-stream" - local fluentd_options="env fluentd-address fluentd-async-connect fluentd-buffer-limit fluentd-retry-wait fluentd-max-retries labels tag" - local gcplogs_options="env gcp-log-cmd gcp-project labels" - local gelf_options="env gelf-address gelf-compression-level gelf-compression-type labels tag" - local journald_options="env labels tag" - local json_file_options="env labels max-file max-size" - local syslog_options="env labels syslog-address syslog-facility syslog-format syslog-tls-ca-cert syslog-tls-cert syslog-tls-key syslog-tls-skip-verify tag" - local splunk_options="env labels splunk-caname splunk-capath splunk-index splunk-insecureskipverify splunk-source splunk-sourcetype splunk-token splunk-url tag" - - local all_options="$fluentd_options $gcplogs_options $gelf_options $journald_options $json_file_options $syslog_options $splunk_options" - - case $(__docker_value_of_option --log-driver) in - '') - COMPREPLY=( $( compgen -W "$all_options" -S = -- "$cur" ) ) - ;; - awslogs) - COMPREPLY=( $( compgen -W "$awslogs_options" -S = -- "$cur" ) ) - ;; - fluentd) - COMPREPLY=( $( compgen -W "$fluentd_options" -S = -- "$cur" ) ) - ;; - gcplogs) - COMPREPLY=( $( compgen -W "$gcplogs_options" -S = -- "$cur" ) ) - ;; - gelf) - COMPREPLY=( $( compgen -W "$gelf_options" -S = -- "$cur" ) ) - ;; - journald) - COMPREPLY=( $( compgen -W "$journald_options" -S = -- "$cur" ) ) - ;; - json-file) - COMPREPLY=( $( compgen -W "$json_file_options" -S = -- "$cur" ) ) - ;; - syslog) - COMPREPLY=( $( compgen -W "$syslog_options" -S = -- "$cur" ) ) - ;; - splunk) - COMPREPLY=( $( compgen -W "$splunk_options" -S = -- "$cur" ) ) - ;; - *) - return - ;; - esac - - __docker_nospace -} - -__docker_complete_log_driver_options() { - local key=$(__docker_map_key_of_current_option '--log-opt') - case "$key" in - fluentd-async-connect) - COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) - return - ;; - gelf-address) - COMPREPLY=( $( compgen -W "udp" -S "://" -- "${cur##*=}" ) ) - __docker_nospace - return - ;; - gelf-compression-level) - COMPREPLY=( $( compgen -W "1 2 3 4 5 6 7 8 9" -- "${cur##*=}" ) ) - return - ;; - gelf-compression-type) - COMPREPLY=( $( compgen -W "gzip none zlib" -- "${cur##*=}" ) ) - return - ;; - syslog-address) - COMPREPLY=( $( compgen -W "tcp:// tcp+tls:// udp:// unix://" -- "${cur##*=}" ) ) - __docker_nospace - __ltrim_colon_completions "${cur}" - return - ;; - syslog-facility) - COMPREPLY=( $( compgen -W " - auth - authpriv - cron - daemon - ftp - kern - local0 - local1 - local2 - local3 - local4 - local5 - local6 - local7 - lpr - mail - news - syslog - user - uucp - " -- "${cur##*=}" ) ) - return - ;; - syslog-format) - COMPREPLY=( $( compgen -W "rfc3164 rfc5424 rfc5424micro" -- "${cur##*=}" ) ) - return - ;; - syslog-tls-ca-cert|syslog-tls-cert|syslog-tls-key) - _filedir - return - ;; - syslog-tls-skip-verify) - COMPREPLY=( $( compgen -W "true" -- "${cur##*=}" ) ) - return - ;; - splunk-url) - COMPREPLY=( $( compgen -W "http:// https://" -- "${cur##*=}" ) ) - __docker_nospace - __ltrim_colon_completions "${cur}" - return - ;; - splunk-insecureskipverify) - COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) - return - ;; - esac - return 1 -} - -__docker_complete_log_levels() { - COMPREPLY=( $( compgen -W "debug info warn error fatal" -- "$cur" ) ) -} - -__docker_complete_restart() { - case "$prev" in - --restart) - case "$cur" in - on-failure:*) - ;; - *) - COMPREPLY=( $( compgen -W "always no on-failure on-failure: unless-stopped" -- "$cur") ) - ;; - esac - return - ;; - esac - return 1 -} - -# a selection of the available signals that is most likely of interest in the -# context of docker containers. -__docker_complete_signals() { - local signals=( - SIGCONT - SIGHUP - SIGINT - SIGKILL - SIGQUIT - SIGSTOP - SIGTERM - SIGUSR1 - SIGUSR2 - ) - COMPREPLY=( $( compgen -W "${signals[*]} ${signals[*]#SIG}" -- "$( echo $cur | tr '[:lower:]' '[:upper:]')" ) ) -} - -__docker_complete_user_group() { - if [[ $cur == *:* ]] ; then - COMPREPLY=( $(compgen -g -- "${cur#*:}") ) - else - COMPREPLY=( $(compgen -u -S : -- "$cur") ) - __docker_nospace - fi -} - -# global options that may appear after the docker command -_docker_docker() { - local boolean_options=" - $global_boolean_options - --help - --version -v - " - - case "$prev" in - --config) - _filedir -d - return - ;; - --log-level|-l) - __docker_complete_log_levels - return - ;; - $(__docker_to_extglob "$global_options_with_args") ) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$boolean_options $global_options_with_args" -- "$cur" ) ) - ;; - *) - local counter=$( __docker_pos_first_nonflag "$(__docker_to_extglob "$global_options_with_args")" ) - if [ $cword -eq $counter ]; then - COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) ) - fi - ;; - esac -} - -_docker_attach() { - __docker_complete_detach-keys && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--detach-keys --help --no-stdin --sig-proxy=false" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--detach-keys') - if [ $cword -eq $counter ]; then - __docker_complete_containers_running - fi - ;; - esac -} - -_docker_build() { - local options_with_args=" - --build-arg - --cgroup-parent - --cpuset-cpus - --cpuset-mems - --cpu-shares -c - --cpu-period - --cpu-quota - --file -f - --isolation - --label - --memory -m - --memory-swap - --shm-size - --tag -t - --ulimit - " - - local boolean_options=" - --disable-content-trust=false - --force-rm - --help - --no-cache - --pull - --quiet -q - --rm - " - - local all_options="$options_with_args $boolean_options" - - case "$prev" in - --build-arg) - COMPREPLY=( $( compgen -e -- "$cur" ) ) - __docker_nospace - return - ;; - --file|-f) - _filedir - return - ;; - --isolation) - __docker_complete_isolation - return - ;; - --tag|-t) - __docker_complete_image_repos_and_tags - return - ;; - $(__docker_to_extglob "$options_with_args") ) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) - ;; - *) - local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) - if [ $cword -eq $counter ]; then - _filedir -d - fi - ;; - esac -} - -_docker_commit() { - case "$prev" in - --author|-a|--change|-c|--message|-m) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--author -a --change -c --help --message -m --pause=false -p=false" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--author|-a|--change|-c|--message|-m') - - if [ $cword -eq $counter ]; then - __docker_complete_containers_all - return - fi - (( counter++ )) - - if [ $cword -eq $counter ]; then - __docker_complete_image_repos_and_tags - return - fi - ;; - esac -} - -_docker_cp() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--follow-link -L --help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - case "$cur" in - *:) - return - ;; - *) - # combined container and filename completion - _filedir - local files=( ${COMPREPLY[@]} ) - - __docker_complete_containers_all - COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) - local containers=( ${COMPREPLY[@]} ) - - COMPREPLY=( $( compgen -W "${files[*]} ${containers[*]}" -- "$cur" ) ) - if [[ "$COMPREPLY" == *: ]]; then - __docker_nospace - fi - return - ;; - esac - fi - (( counter++ )) - - if [ $cword -eq $counter ]; then - if [ -e "$prev" ]; then - __docker_complete_containers_all - COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) - __docker_nospace - else - _filedir - fi - return - fi - ;; - esac -} - -_docker_create() { - _docker_run -} - -_docker_daemon() { - local boolean_options=" - $global_boolean_options - --disable-legacy-registry - --help - --icc=false - --ip-forward=false - --ip-masq=false - --iptables=false - --ipv6 - --live-restore - --raw-logs - --selinux-enabled - --userland-proxy=false - " - local options_with_args=" - $global_options_with_args - --add-runtime - --api-cors-header - --authorization-plugin - --bip - --bridge -b - --cgroup-parent - --cluster-advertise - --cluster-store - --cluster-store-opt - --config-file - --containerd - --default-gateway - --default-gateway-v6 - --default-ulimit - --dns - --dns-search - --dns-opt - --exec-opt - --exec-root - --fixed-cidr - --fixed-cidr-v6 - --graph -g - --group -G - --insecure-registry - --ip - --label - --log-driver - --log-opt - --max-concurrent-downloads - --max-concurrent-uploads - --mtu - --oom-score-adjust - --pidfile -p - --registry-mirror - --storage-driver -s - --storage-opt - --userns-remap - " - - __docker_complete_log_driver_options && return - - key=$(__docker_map_key_of_current_option '--cluster-store-opt') - case "$key" in - kv.*file) - cur=${cur##*=} - _filedir - return - ;; - esac - - local key=$(__docker_map_key_of_current_option '--storage-opt') - case "$key" in - dm.blkdiscard|dm.override_udev_sync_check|dm.use_deferred_removal|dm.use_deferred_deletion) - COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) - return - ;; - dm.fs) - COMPREPLY=( $( compgen -W "ext4 xfs" -- "${cur##*=}" ) ) - return - ;; - dm.thinpooldev) - cur=${cur##*=} - _filedir - return - ;; - esac - - case "$prev" in - --authorization-plugin) - __docker_complete_plugins Authorization - return - ;; - --cluster-store) - COMPREPLY=( $( compgen -W "consul etcd zk" -S "://" -- "$cur" ) ) - __docker_nospace - return - ;; - --cluster-store-opt) - COMPREPLY=( $( compgen -W "discovery.heartbeat discovery.ttl kv.cacertfile kv.certfile kv.keyfile kv.path" -S = -- "$cur" ) ) - __docker_nospace - return - ;; - --exec-root|--graph|-g) - _filedir -d - return - ;; - --log-driver) - __docker_complete_log_drivers - return - ;; - --config-file|--containerd|--pidfile|-p|--tlscacert|--tlscert|--tlskey) - _filedir - return - ;; - --storage-driver|-s) - COMPREPLY=( $( compgen -W "aufs btrfs devicemapper overlay overlay2 vfs zfs" -- "$(echo $cur | tr '[:upper:]' '[:lower:]')" ) ) - return - ;; - --storage-opt) - local btrfs_options="btrfs.min_space" - local devicemapper_options=" - dm.basesize - dm.blkdiscard - dm.blocksize - dm.fs - dm.loopdatasize - dm.loopmetadatasize - dm.min_free_space - dm.mkfsarg - dm.mountopt - dm.override_udev_sync_check - dm.thinpooldev - dm.use_deferred_deletion - dm.use_deferred_removal - " - local zfs_options="zfs.fsname" - - case $(__docker_value_of_option '--storage-driver|-s') in - '') - COMPREPLY=( $( compgen -W "$btrfs_options $devicemapper_options $zfs_options" -S = -- "$cur" ) ) - ;; - btrfs) - COMPREPLY=( $( compgen -W "$btrfs_options" -S = -- "$cur" ) ) - ;; - devicemapper) - COMPREPLY=( $( compgen -W "$devicemapper_options" -S = -- "$cur" ) ) - ;; - zfs) - COMPREPLY=( $( compgen -W "$zfs_options" -S = -- "$cur" ) ) - ;; - *) - return - ;; - esac - __docker_nospace - return - ;; - --log-level|-l) - __docker_complete_log_levels - return - ;; - --log-opt) - __docker_complete_log_options - return - ;; - --userns-remap) - __docker_complete_user_group - return - ;; - $(__docker_to_extglob "$options_with_args") ) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) - ;; - esac -} - -_docker_diff() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_containers_all - fi - ;; - esac -} - -_docker_events() { - local key=$(__docker_map_key_of_current_option '-f|--filter') - case "$key" in - container) - cur="${cur##*=}" - __docker_complete_containers_all - return - ;; - daemon) - local name=$(__docker_q info | sed -n 's/^\(ID\|Name\): //p') - COMPREPLY=( $( compgen -W "$name" -- "${cur##*=}" ) ) - return - ;; - event) - COMPREPLY=( $( compgen -W " - attach - commit - connect - copy - create - delete - destroy - detach - die - disconnect - exec_create - exec_detach - exec_start - export - import - kill - load - mount - oom - pause - pull - push - reload - rename - resize - restart - save - start - stop - tag - top - unmount - unpause - untag - update - " -- "${cur##*=}" ) ) - return - ;; - image) - cur="${cur##*=}" - __docker_complete_images - return - ;; - network) - cur="${cur##*=}" - __docker_complete_networks - return - ;; - type) - COMPREPLY=( $( compgen -W "container daemon image network volume" -- "${cur##*=}" ) ) - return - ;; - volume) - cur="${cur##*=}" - __docker_complete_volumes - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -S = -W "container daemon event image label network type volume" -- "$cur" ) ) - __docker_nospace - return - ;; - --since|--until) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--filter -f --help --since --until" -- "$cur" ) ) - ;; - esac -} - -_docker_exec() { - __docker_complete_detach-keys && return - - case "$prev" in - --user|-u) - __docker_complete_user_group - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--detach -d --detach-keys --help --interactive -i --privileged -t --tty -u --user" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_running - ;; - esac -} - -_docker_export() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_containers_all - fi - ;; - esac -} - -_docker_help() { - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - COMPREPLY=( $( compgen -W "${commands[*]}" -- "$cur" ) ) - fi -} - -_docker_history() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --human=false -H=false --no-trunc --quiet -q" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_images - fi - ;; - esac -} - -_docker_images() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - before) - cur="${cur##*=}" - __docker_complete_images - return - ;; - dangling) - COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) - return - ;; - label) - return - ;; - since) - cur="${cur##*=}" - __docker_complete_images - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -S = -W "before dangling label since" -- "$cur" ) ) - __docker_nospace - return - ;; - --format) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--all -a --digests --filter -f --format --help --no-trunc --quiet -q" -- "$cur" ) ) - ;; - =) - return - ;; - *) - __docker_complete_image_repos - ;; - esac -} - -_docker_import() { - case "$prev" in - --change|-c|--message|-m) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--change -c --help --message -m" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--change|-c|--message|-m') - if [ $cword -eq $counter ]; then - return - fi - (( counter++ )) - - if [ $cword -eq $counter ]; then - __docker_complete_image_repos_and_tags - return - fi - ;; - esac -} - -_docker_info() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - esac -} - -_docker_inspect() { - case "$prev" in - --format|-f) - return - ;; - --type) - COMPREPLY=( $( compgen -W "image container" -- "$cur" ) ) - return - ;; - - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--format -f --help --size -s --type" -- "$cur" ) ) - ;; - *) - case $(__docker_value_of_option --type) in - '') - __docker_complete_containers_and_images - ;; - container) - __docker_complete_containers_all - ;; - image) - __docker_complete_images - ;; - esac - esac -} - -_docker_kill() { - case "$prev" in - --signal|-s) - __docker_complete_signals - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --signal -s" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_running - ;; - esac -} - -_docker_load() { - case "$prev" in - --input|-i) - _filedir - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --input -i --quiet -q" -- "$cur" ) ) - ;; - esac -} - -_docker_login() { - case "$prev" in - --password|-p|--username|-u) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --password -p --username -u" -- "$cur" ) ) - ;; - esac -} - -_docker_logout() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - esac -} - -_docker_logs() { - case "$prev" in - --since|--tail) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--details --follow -f --help --since --tail --timestamps -t" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--tail') - if [ $cword -eq $counter ]; then - __docker_complete_containers_all - fi - ;; - esac -} - -_docker_network_connect() { - local options_with_args=" - --alias - --ip - --ip6 - --link - --link-local-ip - " - - local boolean_options=" - --help - " - - case "$prev" in - --link) - case "$cur" in - *:*) - ;; - *) - __docker_complete_containers_running - COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) - __docker_nospace - ;; - esac - return - ;; - $(__docker_to_extglob "$options_with_args") ) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) - ;; - *) - local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) - if [ $cword -eq $counter ]; then - __docker_complete_networks - elif [ $cword -eq $(($counter + 1)) ]; then - __docker_complete_containers_all - fi - ;; - esac -} - -_docker_network_create() { - case "$prev" in - --aux-address|--gateway|--internal|--ip-range|--ipam-opt|--ipv6|--opt|-o|--subnet) - return - ;; - --ipam-driver) - COMPREPLY=( $( compgen -W "default" -- "$cur" ) ) - return - ;; - --driver|-d) - local plugins="$(__docker_plugins Network) macvlan" - # remove drivers that allow one instance only - plugins=${plugins/ host / } - plugins=${plugins/ null / } - COMPREPLY=( $(compgen -W "$plugins" -- "$cur") ) - return - ;; - --label) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--aux-address --driver -d --gateway --help --internal --ip-range --ipam-driver --ipam-opt --ipv6 --label --opt -o --subnet" -- "$cur" ) ) - ;; - esac -} - -_docker_network_disconnect() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_networks - elif [ $cword -eq $(($counter + 1)) ]; then - __docker_complete_containers_in_network "$prev" - fi - ;; - esac -} - -_docker_network_inspect() { - case "$prev" in - --format|-f) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) - ;; - *) - __docker_complete_networks - esac -} - -_docker_network_ls() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - driver) - local plugins=" $(__docker_plugins Network) " - COMPREPLY=( $(compgen -W "$plugins" -- "${cur##*=}") ) - return - ;; - id) - cur="${cur##*=}" - __docker_complete_network_ids - return - ;; - name) - cur="${cur##*=}" - __docker_complete_network_names - return - ;; - type) - COMPREPLY=( $( compgen -W "builtin custom" -- "${cur##*=}" ) ) - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -S = -W "driver id label name type" -- "$cur" ) ) - __docker_nospace - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--filter -f --help --no-trunc --quiet -q" -- "$cur" ) ) - ;; - esac -} - -_docker_network_rm() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - __docker_complete_networks type=custom - esac -} - -_docker_network() { - local subcommands=" - connect - create - disconnect - inspect - ls - rm - " - __docker_subcommands "$subcommands" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_service() { - local subcommands=" - create - inspect - ls list - rm remove - scale - ps - update - " - __docker_subcommands "$subcommands" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_service_create() { - _docker_service_update -} - -_docker_service_inspect() { - case "$prev" in - --format|-f) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--format -f --help --pretty" -- "$cur" ) ) - ;; - *) - __docker_complete_services - esac -} - -_docker_service_list() { - _docker_service_ls -} - -_docker_service_ls() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - id) - __docker_complete_services --cur "${cur##*=}" --id - return - ;; - name) - __docker_complete_services --cur "${cur##*=}" --name - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -W "id label name" -S = -- "$cur" ) ) - __docker_nospace - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) ) - ;; - esac -} - -_docker_service_remove() { - _docker_service_rm -} - -_docker_service_rm() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - __docker_complete_services - esac -} - -_docker_service_scale() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - __docker_complete_services - __docker_append_to_completions "=" - __docker_nospace - ;; - esac -} - -_docker_service_ps() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - desired-state) - COMPREPLY=( $( compgen -W "accepted running" -- "${cur##*=}" ) ) - return - ;; - name) - __docker_complete_services --cur "${cur##*=}" --name - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -W "desired-state id name" -S = -- "$cur" ) ) - __docker_nospace - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--all -a --filter -f --help --no-resolve" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--filter|-f') - if [ $cword -eq $counter ]; then - __docker_complete_services - fi - ;; - esac -} - -_docker_service_update() { - local $subcommand="${words[$subcommand_pos]}" - - local options_with_args=" - --constraint - --endpoint-mode - --env -e - --label -l - --limit-cpu - --limit-memory - --log-driver - --log-opt - --mount - --name - --network - --publish -p - --replicas - --reserve-cpu - --reserve-memory - --restart-condition - --restart-delay - --restart-max-attempts - --restart-window - --stop-grace-period - --update-delay - --update-failure-action - --update-parallelism - --user -u - --workdir -w - " - - local boolean_options=" - --help - --with-registry-auth - " - - __docker_complete_log_driver_options && return - - if [ "$subcommand" = "create" ] ; then - options_with_args="$options_with_args - --container-label - --mode - " - - case "$prev" in - --mode) - COMPREPLY=( $( compgen -W "global replicated" -- "$cur" ) ) - return - ;; - esac - fi - if [ "$subcommand" = "update" ] ; then - options_with_args="$options_with_args - --arg - --container-label-add - --container-label-rm - --image - " - - case "$prev" in - --image) - __docker_complete_image_repos_and_tags - return - ;; - esac - fi - - case "$prev" in - --endpoint-mode) - COMPREPLY=( $( compgen -W "dnsrr vip" -- "$cur" ) ) - return - ;; - --env|-e) - # we do not append a "=" here because "-e VARNAME" is legal systax, too - COMPREPLY=( $( compgen -e -- "$cur" ) ) - __docker_nospace - return - ;; - --log-driver) - __docker_complete_log_drivers - return - ;; - --log-opt) - __docker_complete_log_options - return - ;; - --network) - __docker_complete_networks - return - ;; - --restart-condition) - COMPREPLY=( $( compgen -W "any none on-failure" -- "$cur" ) ) - return - ;; - --user|-u) - __docker_complete_user_group - return - ;; - $(__docker_to_extglob "$options_with_args") ) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$boolean_options $options_with_args" -- "$cur" ) ) - ;; - *) - if [ "$subcommand" = "update" ] ; then - __docker_complete_services - fi - esac -} - -_docker_swarm() { - local subcommands=" - init - join - join-token - leave - update - " - __docker_subcommands "$subcommands" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_swarm_init() { - case "$prev" in - --listen-addr) - if [[ $cur == *: ]] ; then - COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) - fi - return - ;; - --advertise-addr) - if [[ $cur == *: ]] ; then - COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) - fi - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--advertise-addr --force-new-cluster --help --listen-addr" -- "$cur" ) ) - ;; - esac -} - -_docker_swarm_join() { - case "$prev" in - --token) - return - ;; - --listen-addr) - if [[ $cur == *: ]] ; then - COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) - fi - return - ;; - --advertise-addr) - if [[ $cur == *: ]] ; then - COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) - fi - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--advertise-addr --help --listen-addr --token" -- "$cur" ) ) - ;; - *:) - COMPREPLY=( $( compgen -W "2377" -- "${cur##*:}" ) ) - ;; - esac -} - -_docker_swarm_join-token() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --quiet -q --rotate" -- "$cur" ) ) - ;; - *) - local counter=$( __docker_pos_first_nonflag ) - if [ $cword -eq $counter ]; then - COMPREPLY=( $( compgen -W "manager worker" -- "$cur" ) ) - fi - ;; - esac -} - -_docker_swarm_leave() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--force --help" -- "$cur" ) ) - ;; - esac -} - -_docker_swarm_update() { - case "$prev" in - --cert-expiry|--dispatcher-heartbeat|--task-history-limit) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--cert-expiry --dispatcher-heartbeat --help --task-history-limit" -- "$cur" ) ) - ;; - esac -} - -_docker_node() { - local subcommands=" - demote - inspect - ls list - promote - rm remove - ps - update - " - __docker_subcommands "$subcommands" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_node_demote() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - __docker_complete_nodes --filter role=manager - esac -} - -_docker_node_inspect() { - case "$prev" in - --format|-f) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--format -f --help --pretty" -- "$cur" ) ) - ;; - *) - __docker_complete_nodes - esac -} - -_docker_node_list() { - _docker_node_ls -} - -_docker_node_ls() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - id) - __docker_complete_nodes --cur "${cur##*=}" --id - return - ;; - name) - __docker_complete_nodes --cur "${cur##*=}" --name - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -W "id label name" -S = -- "$cur" ) ) - __docker_nospace - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) ) - ;; - esac -} - -_docker_node_promote() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - __docker_complete_nodes --filter role=worker - esac -} - -_docker_node_remove() { - _docker_node_rm -} - -_docker_node_rm() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--force --help" -- "$cur" ) ) - ;; - *) - __docker_complete_nodes - esac -} - -_docker_node_ps() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - desired-state) - COMPREPLY=( $( compgen -W "accepted running" -- "${cur##*=}" ) ) - return - ;; - name) - __docker_complete_services --cur "${cur##*=}" --name - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -W "desired-state id label name" -S = -- "$cur" ) ) - __docker_nospace - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--all -a --filter -f --help --no-resolve" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag '--filter|-f') - if [ $cword -eq $counter ]; then - __docker_complete_nodes_plus_self - fi - ;; - esac -} - -_docker_node_update() { - case "$prev" in - --availability) - COMPREPLY=( $( compgen -W "active drain pause" -- "$cur" ) ) - return - ;; - --role) - COMPREPLY=( $( compgen -W "manager worker" -- "$cur" ) ) - return - ;; - --label-add|--label-rm) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--availability --help --label-add --label-rm --role" -- "$cur" ) ) - ;; - *) - __docker_complete_nodes - esac -} - -_docker_pause() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_containers_pauseable - fi - ;; - esac -} - -_docker_port() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_containers_all - fi - ;; - esac -} - -_docker_ps() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - ancestor) - cur="${cur##*=}" - __docker_complete_images - return - ;; - before) - cur="${cur##*=}" - __docker_complete_containers_all - return - ;; - id) - cur="${cur##*=}" - __docker_complete_container_ids - return - ;; - name) - cur="${cur##*=}" - __docker_complete_container_names - return - ;; - network) - cur="${cur##*=}" - __docker_complete_networks - return - ;; - since) - cur="${cur##*=}" - __docker_complete_containers_all - return - ;; - status) - COMPREPLY=( $( compgen -W "created dead exited paused restarting running" -- "${cur##*=}" ) ) - return - ;; - volume) - cur="${cur##*=}" - __docker_complete_volumes - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -S = -W "ancestor before exited id label name network since status volume" -- "$cur" ) ) - __docker_nospace - return - ;; - --format|-n) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--all -a --filter -f --format --help --latest -l -n --no-trunc --quiet -q --size -s" -- "$cur" ) ) - ;; - esac -} - -_docker_pull() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--all-tags -a --disable-content-trust=false --help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - for arg in "${COMP_WORDS[@]}"; do - case "$arg" in - --all-tags|-a) - __docker_complete_image_repos - return - ;; - esac - done - __docker_complete_image_repos_and_tags - fi - ;; - esac -} - -_docker_push() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--disable-content-trust=false --help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_image_repos_and_tags - fi - ;; - esac -} - -_docker_rename() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_containers_all - fi - ;; - esac -} - -_docker_restart() { - case "$prev" in - --time|-t) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_all - ;; - esac -} - -_docker_rm() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--force -f --help --link -l --volumes -v" -- "$cur" ) ) - ;; - *) - for arg in "${COMP_WORDS[@]}"; do - case "$arg" in - --force|-f) - __docker_complete_containers_all - return - ;; - esac - done - __docker_complete_containers_stopped - ;; - esac -} - -_docker_rmi() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--force -f --help --no-prune" -- "$cur" ) ) - ;; - *) - __docker_complete_images - ;; - esac -} - -_docker_run() { - local options_with_args=" - --add-host - --attach -a - --blkio-weight - --blkio-weight-device - --cap-add - --cap-drop - --cgroup-parent - --cidfile - --cpu-period - --cpu-quota - --cpuset-cpus - --cpuset-mems - --cpu-shares -c - --device - --device-read-bps - --device-read-iops - --device-write-bps - --device-write-iops - --dns - --dns-opt - --dns-search - --entrypoint - --env -e - --env-file - --expose - --group-add - --hostname -h - --ip - --ip6 - --ipc - --isolation - --kernel-memory - --label-file - --label -l - --link - --link-local-ip - --log-driver - --log-opt - --mac-address - --memory -m - --memory-swap - --memory-swappiness - --memory-reservation - --name - --network - --network-alias - --oom-score-adj - --pid - --pids-limit - --publish -p - --restart - --runtime - --security-opt - --shm-size - --stop-signal - --storage-opt - --tmpfs - --sysctl - --ulimit - --user -u - --userns - --uts - --volume-driver - --volumes-from - --volume -v - --workdir -w - " - - local boolean_options=" - --disable-content-trust=false - --help - --interactive -i - --oom-kill-disable - --privileged - --publish-all -P - --read-only - --tty -t - " - - if [ "$command" = "run" ] ; then - options_with_args="$options_with_args - --detach-keys - --health-cmd - --health-interval - --health-retries - --health-timeout - " - boolean_options="$boolean_options - --detach -d - --no-healthcheck - --rm - --sig-proxy=false - " - __docker_complete_detach-keys && return - fi - - local all_options="$options_with_args $boolean_options" - - - __docker_complete_log_driver_options && return - __docker_complete_restart && return - - local key=$(__docker_map_key_of_current_option '--security-opt') - case "$key" in - label) - [[ $cur == *: ]] && return - COMPREPLY=( $( compgen -W "user: role: type: level: disable" -- "${cur##*=}") ) - if [ "${COMPREPLY[*]}" != "disable" ] ; then - __docker_nospace - fi - return - ;; - seccomp) - local cur=${cur##*=} - _filedir - COMPREPLY+=( $( compgen -W "unconfined" -- "$cur" ) ) - return - ;; - esac - - case "$prev" in - --add-host) - case "$cur" in - *:) - __docker_complete_resolved_hostname - return - ;; - esac - ;; - --attach|-a) - COMPREPLY=( $( compgen -W 'stdin stdout stderr' -- "$cur" ) ) - return - ;; - --cap-add|--cap-drop) - __docker_complete_capabilities - return - ;; - --cidfile|--env-file|--label-file) - _filedir - return - ;; - --device|--tmpfs|--volume|-v) - case "$cur" in - *:*) - # TODO somehow do _filedir for stuff inside the image, if it's already specified (which is also somewhat difficult to determine) - ;; - '') - COMPREPLY=( $( compgen -W '/' -- "$cur" ) ) - __docker_nospace - ;; - /*) - _filedir - __docker_nospace - ;; - esac - return - ;; - --env|-e) - # we do not append a "=" here because "-e VARNAME" is legal systax, too - COMPREPLY=( $( compgen -e -- "$cur" ) ) - __docker_nospace - return - ;; - --ipc) - case "$cur" in - *:*) - cur="${cur#*:}" - __docker_complete_containers_running - ;; - *) - COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) - if [ "$COMPREPLY" = "container:" ]; then - __docker_nospace - fi - ;; - esac - return - ;; - --isolation) - __docker_complete_isolation - return - ;; - --link) - case "$cur" in - *:*) - ;; - *) - __docker_complete_containers_running - COMPREPLY=( $( compgen -W "${COMPREPLY[*]}" -S ':' ) ) - __docker_nospace - ;; - esac - return - ;; - --log-driver) - __docker_complete_log_drivers - return - ;; - --log-opt) - __docker_complete_log_options - return - ;; - --network) - case "$cur" in - container:*) - local cur=${cur#*:} - __docker_complete_containers_all - ;; - *) - COMPREPLY=( $( compgen -W "$(__docker_plugins Network) $(__docker_networks) container:" -- "$cur") ) - if [ "${COMPREPLY[*]}" = "container:" ] ; then - __docker_nospace - fi - ;; - esac - return - ;; - --pid) - case "$cur" in - *:*) - cur="${cur#*:}" - __docker_complete_containers_running - ;; - *) - COMPREPLY=( $( compgen -W 'host container:' -- "$cur" ) ) - if [ "$COMPREPLY" = "container:" ]; then - __docker_nospace - fi - ;; - esac - return - ;; - --runtime) - __docker_complete_runtimes - return - ;; - --security-opt) - COMPREPLY=( $( compgen -W "apparmor= label= no-new-privileges seccomp=" -- "$cur") ) - if [ "${COMPREPLY[*]}" != "no-new-privileges" ] ; then - __docker_nospace - fi - return - ;; - --storage-opt) - COMPREPLY=( $( compgen -W "size" -S = -- "$cur") ) - __docker_nospace - return - ;; - --user|-u) - __docker_complete_user_group - return - ;; - --userns) - COMPREPLY=( $( compgen -W "host" -- "$cur" ) ) - return - ;; - --volume-driver) - __docker_complete_plugins Volume - return - ;; - --volumes-from) - __docker_complete_containers_all - return - ;; - $(__docker_to_extglob "$options_with_args") ) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) - ;; - *) - local counter=$( __docker_pos_first_nonflag $( __docker_to_alternatives "$options_with_args" ) ) - if [ $cword -eq $counter ]; then - __docker_complete_images - fi - ;; - esac -} - -_docker_save() { - case "$prev" in - --output|-o) - _filedir - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --output -o" -- "$cur" ) ) - ;; - *) - __docker_complete_images - ;; - esac -} - -_docker_search() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - is-automated) - COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) - return - ;; - is-official) - COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) ) - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -S = -W "is-automated is-official stars" -- "$cur" ) ) - __docker_nospace - return - ;; - --limit) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--filter --help --limit --no-trunc" -- "$cur" ) ) - ;; - esac -} - -_docker_start() { - __docker_complete_detach-keys && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--attach -a --detach-keys --help --interactive -i" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_stopped - ;; - esac -} - -_docker_stats() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--all -a --help --no-stream" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_running - ;; - esac -} - -_docker_stop() { - case "$prev" in - --time|-t) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help --time -t" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_running - ;; - esac -} - -_docker_tag() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - - if [ $cword -eq $counter ]; then - __docker_complete_image_repos_and_tags - return - fi - (( counter++ )) - - if [ $cword -eq $counter ]; then - __docker_complete_image_repos_and_tags - return - fi - ;; - esac -} - -_docker_unpause() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_containers_unpauseable - fi - ;; - esac -} - -_docker_update() { - local options_with_args=" - --blkio-weight - --cpu-period - --cpu-quota - --cpuset-cpus - --cpuset-mems - --cpu-shares -c - --kernel-memory - --memory -m - --memory-reservation - --memory-swap - --restart - " - - local boolean_options=" - --help - " - - local all_options="$options_with_args $boolean_options" - - __docker_complete_restart && return - - case "$prev" in - $(__docker_to_extglob "$options_with_args") ) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "$all_options" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_all - ;; - esac -} - -_docker_top() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - local counter=$(__docker_pos_first_nonflag) - if [ $cword -eq $counter ]; then - __docker_complete_containers_running - fi - ;; - esac -} - -_docker_version() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - esac -} - -_docker_volume_create() { - case "$prev" in - --driver|-d) - __docker_complete_plugins Volume - return - ;; - --label|--name|--opt|-o) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--driver -d --help --label --name --opt -o" -- "$cur" ) ) - ;; - esac -} - -_docker_volume_inspect() { - case "$prev" in - --format|-f) - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--format -f --help" -- "$cur" ) ) - ;; - *) - __docker_complete_volumes - ;; - esac -} - -_docker_volume_ls() { - local key=$(__docker_map_key_of_current_option '--filter|-f') - case "$key" in - dangling) - COMPREPLY=( $( compgen -W "true false" -- "${cur##*=}" ) ) - return - ;; - driver) - cur=${cur##*=} - __docker_complete_plugins Volume - return - ;; - name) - cur=${cur##*=} - __docker_complete_volumes - return - ;; - esac - - case "$prev" in - --filter|-f) - COMPREPLY=( $( compgen -S = -W "dangling driver name" -- "$cur" ) ) - __docker_nospace - return - ;; - esac - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--filter -f --help --quiet -q" -- "$cur" ) ) - ;; - esac -} - -_docker_volume_rm() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - __docker_complete_volumes - ;; - esac -} - -_docker_volume() { - local subcommands=" - create - inspect - ls - rm - " - __docker_subcommands "$subcommands" && return - - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) ) - ;; - esac -} - -_docker_wait() { - case "$cur" in - -*) - COMPREPLY=( $( compgen -W "--help" -- "$cur" ) ) - ;; - *) - __docker_complete_containers_all - ;; - esac -} - -_docker() { - local previous_extglob_setting=$(shopt -p extglob) - shopt -s extglob - - local commands=( - attach - build - commit - cp - create - daemon - diff - events - exec - export - history - images - import - info - inspect - kill - load - login - logout - logs - network - node - pause - port - ps - pull - push - rename - restart - rm - rmi - run - save - search - service - start - stats - stop - swarm - tag - top - unpause - update - version - volume - wait - ) - - # These options are valid as global options for all client commands - # and valid as command options for `docker daemon` - local global_boolean_options=" - --debug -D - --tls - --tlsverify - " - local global_options_with_args=" - --config - --host -H - --log-level -l - --tlscacert - --tlscert - --tlskey - " - - local host config - - COMPREPLY=() - local cur prev words cword - _get_comp_words_by_ref -n : cur prev words cword - - local command='docker' command_pos=0 subcommand_pos - local counter=1 - while [ $counter -lt $cword ]; do - case "${words[$counter]}" in - # save host so that completion can use custom daemon - --host|-H) - (( counter++ )) - host="${words[$counter]}" - ;; - # save config so that completion can use custom configuration directories - --config) - (( counter++ )) - config="${words[$counter]}" - ;; - $(__docker_to_extglob "$global_options_with_args") ) - (( counter++ )) - ;; - -*) - ;; - =) - (( counter++ )) - ;; - *) - command="${words[$counter]}" - command_pos=$counter - break - ;; - esac - (( counter++ )) - done - - local binary="${words[0]}" - if [[ $binary == ?(*/)dockerd ]] ; then - # for the dockerd binary, we reuse completion of `docker daemon`. - # dockerd does not have subcommands and global options. - command=daemon - command_pos=0 - fi - - local completions_func=_docker_${command} - declare -F $completions_func >/dev/null && $completions_func - - eval "$previous_extglob_setting" - return 0 -} - -eval "$__docker_previous_extglob_setting" -unset __docker_previous_extglob_setting - -complete -F _docker docker dockerd diff --git a/contrib/completion/fish/docker.fish b/contrib/completion/fish/docker.fish deleted file mode 100644 index 72ccd05533..0000000000 --- a/contrib/completion/fish/docker.fish +++ /dev/null @@ -1,400 +0,0 @@ -# docker.fish - docker completions for fish shell -# -# This file is generated by gen_docker_fish_completions.py from: -# https://github.com/barnybug/docker-fish-completion -# -# To install the completions: -# mkdir -p ~/.config/fish/completions -# cp docker.fish ~/.config/fish/completions -# -# Completion supported: -# - parameters -# - commands -# - containers -# - images -# - repositories - -function __fish_docker_no_subcommand --description 'Test if docker has yet to be given the subcommand' - for i in (commandline -opc) - if contains -- $i attach build commit cp create diff events exec export history images import info inspect kill load login logout logs pause port ps pull push rename restart rm rmi run save search start stop tag top unpause version wait stats - return 1 - end - end - return 0 -end - -function __fish_print_docker_containers --description 'Print a list of docker containers' -a select - switch $select - case running - docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Up" {print $1 "\n" $(NF)}' | tr ',' '\n' - case stopped - docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; $5 ~ "^Exit" {print $1 "\n" $(NF)}' | tr ',' '\n' - case all - docker ps -a --no-trunc | command awk 'NR>1' | command awk 'BEGIN {FS=" +"}; {print $1 "\n" $(NF)}' | tr ',' '\n' - end -end - -function __fish_print_docker_images --description 'Print a list of docker images' - docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1":"$2}' -end - -function __fish_print_docker_repositories --description 'Print a list of docker repositories' - docker images | command awk 'NR>1' | command grep -v '' | command awk '{print $1}' | command sort | command uniq -end - -# common options -complete -c docker -f -n '__fish_docker_no_subcommand' -l api-cors-header -d "Set CORS headers in the remote API. Default is cors disabled" -complete -c docker -f -n '__fish_docker_no_subcommand' -s b -l bridge -d 'Attach containers to a pre-existing network bridge' -complete -c docker -f -n '__fish_docker_no_subcommand' -l bip -d "Use this CIDR notation address for the network bridge's IP, not compatible with -b" -complete -c docker -f -n '__fish_docker_no_subcommand' -s D -l debug -d 'Enable debug mode' -complete -c docker -f -n '__fish_docker_no_subcommand' -s d -l daemon -d 'Enable daemon mode' -complete -c docker -f -n '__fish_docker_no_subcommand' -l dns -d 'Force Docker to use specific DNS servers' -complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-opt -d 'Force Docker to use specific DNS options' -complete -c docker -f -n '__fish_docker_no_subcommand' -l dns-search -d 'Force Docker to use specific DNS search domains' -complete -c docker -f -n '__fish_docker_no_subcommand' -l exec-opt -d 'Set runtime execution options' -complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr -d 'IPv4 subnet for fixed IPs (e.g. 10.20.0.0/16)' -complete -c docker -f -n '__fish_docker_no_subcommand' -l fixed-cidr-v6 -d 'IPv6 subnet for fixed IPs (e.g.: 2001:a02b/48)' -complete -c docker -f -n '__fish_docker_no_subcommand' -s G -l group -d 'Group to assign the unix socket specified by -H when running in daemon mode' -complete -c docker -f -n '__fish_docker_no_subcommand' -s g -l graph -d 'Path to use as the root of the Docker runtime' -complete -c docker -f -n '__fish_docker_no_subcommand' -s H -l host -d 'The socket(s) to bind to in daemon mode or connect to in client mode, specified using one or more tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd.' -complete -c docker -f -n '__fish_docker_no_subcommand' -s h -l help -d 'Print usage' -complete -c docker -f -n '__fish_docker_no_subcommand' -l icc -d 'Allow unrestricted inter-container and Docker daemon host communication' -complete -c docker -f -n '__fish_docker_no_subcommand' -l insecure-registry -d 'Enable insecure communication with specified registries (no certificate verification for HTTPS and enable HTTP fallback) (e.g., localhost:5000 or 10.20.0.0/16)' -complete -c docker -f -n '__fish_docker_no_subcommand' -l ip -d 'Default IP address to use when binding container ports' -complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-forward -d 'Enable net.ipv4.ip_forward and IPv6 forwarding if --fixed-cidr-v6 is defined. IPv6 forwarding may interfere with your existing IPv6 configuration when using Router Advertisement.' -complete -c docker -f -n '__fish_docker_no_subcommand' -l ip-masq -d "Enable IP masquerading for bridge's IP range" -complete -c docker -f -n '__fish_docker_no_subcommand' -l iptables -d "Enable Docker's addition of iptables rules" -complete -c docker -f -n '__fish_docker_no_subcommand' -l ipv6 -d 'Enable IPv6 networking' -complete -c docker -f -n '__fish_docker_no_subcommand' -s l -l log-level -d 'Set the logging level (debug, info, warn, error, fatal)' -complete -c docker -f -n '__fish_docker_no_subcommand' -l label -d 'Set key=value labels to the daemon (displayed in `docker info`)' -complete -c docker -f -n '__fish_docker_no_subcommand' -l mtu -d 'Set the containers network MTU' -complete -c docker -f -n '__fish_docker_no_subcommand' -s p -l pidfile -d 'Path to use for daemon PID file' -complete -c docker -f -n '__fish_docker_no_subcommand' -l registry-mirror -d 'Specify a preferred Docker registry mirror' -complete -c docker -f -n '__fish_docker_no_subcommand' -s s -l storage-driver -d 'Force the Docker runtime to use a specific storage driver' -complete -c docker -f -n '__fish_docker_no_subcommand' -l selinux-enabled -d 'Enable selinux support. SELinux does not presently support the BTRFS storage driver' -complete -c docker -f -n '__fish_docker_no_subcommand' -l storage-opt -d 'Set storage driver options' -complete -c docker -f -n '__fish_docker_no_subcommand' -l tls -d 'Use TLS; implied by --tlsverify' -complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscacert -d 'Trust only remotes providing a certificate signed by the CA given here' -complete -c docker -f -n '__fish_docker_no_subcommand' -l tlscert -d 'Path to TLS certificate file' -complete -c docker -f -n '__fish_docker_no_subcommand' -l tlskey -d 'Path to TLS key file' -complete -c docker -f -n '__fish_docker_no_subcommand' -l tlsverify -d 'Use TLS and verify the remote (daemon: verify client, client: verify daemon)' -complete -c docker -f -n '__fish_docker_no_subcommand' -s v -l version -d 'Print version information and quit' - -# subcommands -# attach -complete -c docker -f -n '__fish_docker_no_subcommand' -a attach -d 'Attach to a running container' -complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l no-stdin -d 'Do not attach STDIN' -complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -l sig-proxy -d 'Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied.' -complete -c docker -A -f -n '__fish_seen_subcommand_from attach' -a '(__fish_print_docker_containers running)' -d "Container" - -# build -complete -c docker -f -n '__fish_docker_no_subcommand' -a build -d 'Build an image from a Dockerfile' -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s f -l file -d "Name of the Dockerfile(Default is 'Dockerfile' at context root)" -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l force-rm -d 'Always remove intermediate containers, even after unsuccessful builds' -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l no-cache -d 'Do not use cache when building the image' -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l pull -d 'Always attempt to pull a newer version of the image' -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s q -l quiet -d 'Suppress the build output and print image ID on success' -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -l rm -d 'Remove intermediate containers after a successful build' -complete -c docker -A -f -n '__fish_seen_subcommand_from build' -s t -l tag -d 'Repository name (and optionally a tag) to be applied to the resulting image in case of success' - -# commit -complete -c docker -f -n '__fish_docker_no_subcommand' -a commit -d "Create a new image from a container's changes" -complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s a -l author -d 'Author (e.g., "John Hannibal Smith ")' -complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s m -l message -d 'Commit message' -complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -s p -l pause -d 'Pause container during commit' -complete -c docker -A -f -n '__fish_seen_subcommand_from commit' -a '(__fish_print_docker_containers all)' -d "Container" - -# cp -complete -c docker -f -n '__fish_docker_no_subcommand' -a cp -d "Copy files/folders between a container and the local filesystem" -complete -c docker -A -f -n '__fish_seen_subcommand_from cp' -l help -d 'Print usage' - -# create -complete -c docker -f -n '__fish_docker_no_subcommand' -a create -d 'Create a new container' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpu-shares -d 'CPU shares (relative weight)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-add -d 'Add Linux capabilities' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cap-drop -d 'Drop Linux capabilities' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cidfile -d 'Write the container ID to the file' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns -d 'Set custom DNS servers' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)" -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s e -l env -d 'Set environment variables' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l env-file -d 'Read in a line delimited file of environment variables' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s h -l hostname -d 'Container host name' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s i -l interactive -d 'Keep STDIN open even if not attached' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l link -d 'Add link to another container in the form of :alias' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s m -l memory -d 'Memory limit (format: [], where unit = b, k, m or g)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: [], where unit = b, k, m or g)" -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l name -d 'Assign a name to the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l net -d 'Set the Network mode for the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s p -l publish -d "Publish a container's port to the host" -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l pid -d 'Default is to create a private PID namespace for the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l privileged -d 'Give extended privileges to this container' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l read-only -d "Mount the container's root filesystem as read only" -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l security-opt -d 'Security Options' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s t -l tty -d 'Allocate a pseudo-TTY' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s u -l user -d 'Username or UID' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l volumes-from -d 'Mount volumes from the specified container(s)' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -s w -l workdir -d 'Working directory inside the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -a '(__fish_print_docker_images)' -d "Image" - -# diff -complete -c docker -f -n '__fish_docker_no_subcommand' -a diff -d "Inspect changes on a container's filesystem" -complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from diff' -a '(__fish_print_docker_containers all)' -d "Container" - -# events -complete -c docker -f -n '__fish_docker_no_subcommand' -a events -d 'Get real time events from the server' -complete -c docker -A -f -n '__fish_seen_subcommand_from events' -s f -l filter -d "Provide filter values (i.e., 'event=stop')" -complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l since -d 'Show all events created since timestamp' -complete -c docker -A -f -n '__fish_seen_subcommand_from events' -l until -d 'Stream events until this timestamp' - -# exec -complete -c docker -f -n '__fish_docker_no_subcommand' -a exec -d 'Run a command in a running container' -complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s d -l detach -d 'Detached mode: run command in the background' -complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s i -l interactive -d 'Keep STDIN open even if not attached' -complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -s t -l tty -d 'Allocate a pseudo-TTY' -complete -c docker -A -f -n '__fish_seen_subcommand_from exec' -a '(__fish_print_docker_containers running)' -d "Container" - -# export -complete -c docker -f -n '__fish_docker_no_subcommand' -a export -d 'Stream the contents of a container as a tar archive' -complete -c docker -A -f -n '__fish_seen_subcommand_from export' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from export' -a '(__fish_print_docker_containers all)' -d "Container" - -# history -complete -c docker -f -n '__fish_docker_no_subcommand' -a history -d 'Show the history of an image' -complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from history' -l no-trunc -d "Don't truncate output" -complete -c docker -A -f -n '__fish_seen_subcommand_from history' -s q -l quiet -d 'Only show numeric IDs' -complete -c docker -A -f -n '__fish_seen_subcommand_from history' -a '(__fish_print_docker_images)' -d "Image" - -# images -complete -c docker -f -n '__fish_docker_no_subcommand' -a images -d 'List images' -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s a -l all -d 'Show all images (by default filter out the intermediate image layers)' -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s f -l filter -d "Provide filter values (i.e., 'dangling=true')" -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -l no-trunc -d "Don't truncate output" -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -s q -l quiet -d 'Only show numeric IDs' -complete -c docker -A -f -n '__fish_seen_subcommand_from images' -a '(__fish_print_docker_repositories)' -d "Repository" - -# import -complete -c docker -f -n '__fish_docker_no_subcommand' -a import -d 'Create a new filesystem image from the contents of a tarball' -complete -c docker -A -f -n '__fish_seen_subcommand_from import' -l help -d 'Print usage' - -# info -complete -c docker -f -n '__fish_docker_no_subcommand' -a info -d 'Display system-wide information' - -# inspect -complete -c docker -f -n '__fish_docker_no_subcommand' -a inspect -d 'Return low-level information on a container or image' -complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s f -l format -d 'Format the output using the given go template.' -complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -s s -l size -d 'Display total file sizes if the type is container.' -complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_images)' -d "Image" -complete -c docker -A -f -n '__fish_seen_subcommand_from inspect' -a '(__fish_print_docker_containers all)' -d "Container" - -# kill -complete -c docker -f -n '__fish_docker_no_subcommand' -a kill -d 'Kill a running container' -complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -s s -l signal -d 'Signal to send to the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from kill' -a '(__fish_print_docker_containers running)' -d "Container" - -# load -complete -c docker -f -n '__fish_docker_no_subcommand' -a load -d 'Load an image from a tar archive' -complete -c docker -A -f -n '__fish_seen_subcommand_from load' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from load' -s i -l input -d 'Read from a tar archive file, instead of STDIN' - -# login -complete -c docker -f -n '__fish_docker_no_subcommand' -a login -d 'Log in to a Docker registry server' -complete -c docker -A -f -n '__fish_seen_subcommand_from login' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s p -l password -d 'Password' -complete -c docker -A -f -n '__fish_seen_subcommand_from login' -s u -l username -d 'Username' - -# logout -complete -c docker -f -n '__fish_docker_no_subcommand' -a logout -d 'Log out from a Docker registry server' - -# logs -complete -c docker -f -n '__fish_docker_no_subcommand' -a logs -d 'Fetch the logs of a container' -complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s f -l follow -d 'Follow log output' -complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -s t -l timestamps -d 'Show timestamps' -complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l since -d 'Show logs since timestamp' -complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -l tail -d 'Output the specified number of lines at the end of logs (defaults to all logs)' -complete -c docker -A -f -n '__fish_seen_subcommand_from logs' -a '(__fish_print_docker_containers running)' -d "Container" - -# port -complete -c docker -f -n '__fish_docker_no_subcommand' -a port -d 'Lookup the public-facing port that is NAT-ed to PRIVATE_PORT' -complete -c docker -A -f -n '__fish_seen_subcommand_from port' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from port' -a '(__fish_print_docker_containers running)' -d "Container" - -# pause -complete -c docker -f -n '__fish_docker_no_subcommand' -a pause -d 'Pause all processes within a container' -complete -c docker -A -f -n '__fish_seen_subcommand_from pause' -a '(__fish_print_docker_containers running)' -d "Container" - -# ps -complete -c docker -f -n '__fish_docker_no_subcommand' -a ps -d 'List containers' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s a -l all -d 'Show all containers. Only running containers are shown by default.' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l before -d 'Show only container created before Id or Name, include non-running ones.' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s f -l filter -d 'Provide filter values. Valid filters:' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s l -l latest -d 'Show only the latest created container, include non-running ones.' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s n -d 'Show n last created containers, include non-running ones.' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l no-trunc -d "Don't truncate output" -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s q -l quiet -d 'Only display numeric IDs' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -s s -l size -d 'Display total file sizes' -complete -c docker -A -f -n '__fish_seen_subcommand_from ps' -l since -d 'Show only containers created since Id or Name, include non-running ones.' - -# pull -complete -c docker -f -n '__fish_docker_no_subcommand' -a pull -d 'Pull an image or a repository from a Docker registry server' -complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -s a -l all-tags -d 'Download all tagged images in the repository' -complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_images)' -d "Image" -complete -c docker -A -f -n '__fish_seen_subcommand_from pull' -a '(__fish_print_docker_repositories)' -d "Repository" - -# push -complete -c docker -f -n '__fish_docker_no_subcommand' -a push -d 'Push an image or a repository to a Docker registry server' -complete -c docker -A -f -n '__fish_seen_subcommand_from push' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_images)' -d "Image" -complete -c docker -A -f -n '__fish_seen_subcommand_from push' -a '(__fish_print_docker_repositories)' -d "Repository" - -# rename -complete -c docker -f -n '__fish_docker_no_subcommand' -a rename -d 'Rename an existing container' - -# restart -complete -c docker -f -n '__fish_docker_no_subcommand' -a restart -d 'Restart a container' -complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -s t -l time -d 'Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds.' -complete -c docker -A -f -n '__fish_seen_subcommand_from restart' -a '(__fish_print_docker_containers running)' -d "Container" - -# rm -complete -c docker -f -n '__fish_docker_no_subcommand' -a rm -d 'Remove one or more containers' -complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -d 'Force the removal of a running container (uses SIGKILL)' -complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s l -l link -d 'Remove the specified link and not the underlying container' -complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s v -l volumes -d 'Remove the volumes associated with the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -a '(__fish_print_docker_containers stopped)' -d "Container" -complete -c docker -A -f -n '__fish_seen_subcommand_from rm' -s f -l force -a '(__fish_print_docker_containers all)' -d "Container" - -# rmi -complete -c docker -f -n '__fish_docker_no_subcommand' -a rmi -d 'Remove one or more images' -complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -s f -l force -d 'Force removal of the image' -complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -l no-prune -d 'Do not delete untagged parents' -complete -c docker -A -f -n '__fish_seen_subcommand_from rmi' -a '(__fish_print_docker_images)' -d "Image" - -# run -complete -c docker -f -n '__fish_docker_no_subcommand' -a run -d 'Run a command in a new container' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s a -l attach -d 'Attach to STDIN, STDOUT or STDERR.' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l add-host -d 'Add a custom host-to-IP mapping (host:ip)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s c -l cpu-shares -d 'CPU shares (relative weight)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-add -d 'Add Linux capabilities' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cap-drop -d 'Drop Linux capabilities' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cidfile -d 'Write the container ID to the file' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l cpuset -d 'CPUs in which to allow execution (0-3, 0,1)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s d -l detach -d 'Detached mode: run the container in the background and print the new container ID' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l device -d 'Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns -d 'Set custom DNS servers' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-opt -d "Set custom DNS options (Use --dns-opt='' if you don't wish to set options)" -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l dns-search -d "Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain)" -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s e -l env -d 'Set environment variables' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l entrypoint -d 'Overwrite the default ENTRYPOINT of the image' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l env-file -d 'Read in a line delimited file of environment variables' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l expose -d 'Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host' -complete -c docker -A -f -n '__fish_seen_subcommand_from create' -l group-add -d 'Add additional groups to run as' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s h -l hostname -d 'Container host name' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s i -l interactive -d 'Keep STDIN open even if not attached' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l ipc -d 'Default is to create a private IPC namespace (POSIX SysV IPC) for the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l link -d 'Add link to another container in the form of :alias' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s m -l memory -d 'Memory limit (format: [], where unit = b, k, m or g)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l mac-address -d 'Container MAC address (e.g. 92:d0:c6:0a:29:33)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l memory-swap -d "Total memory usage (memory + swap), set '-1' to disable swap (format: [], where unit = b, k, m or g)" -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l name -d 'Assign a name to the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l net -d 'Set the Network mode for the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s P -l publish-all -d 'Publish all exposed ports to random ports on the host interfaces' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s p -l publish -d "Publish a container's port to the host" -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l pid -d 'Default is to create a private PID namespace for the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l privileged -d 'Give extended privileges to this container' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l read-only -d "Mount the container's root filesystem as read only" -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l restart -d 'Restart policy to apply when a container exits (no, on-failure[:max-retry], always)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l rm -d 'Automatically remove the container when it exits (incompatible with -d)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l security-opt -d 'Security Options' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l sig-proxy -d 'Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied.' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l stop-signal -d 'Signal to kill a container' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s t -l tty -d 'Allocate a pseudo-TTY' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s u -l user -d 'Username or UID' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l tmpfs -d 'Mount tmpfs on a directory' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s v -l volume -d 'Bind mount a volume (e.g., from the host: -v /host:/container, from Docker: -v /container)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -l volumes-from -d 'Mount volumes from the specified container(s)' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -s w -l workdir -d 'Working directory inside the container' -complete -c docker -A -f -n '__fish_seen_subcommand_from run' -a '(__fish_print_docker_images)' -d "Image" - -# save -complete -c docker -f -n '__fish_docker_no_subcommand' -a save -d 'Save an image to a tar archive' -complete -c docker -A -f -n '__fish_seen_subcommand_from save' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from save' -s o -l output -d 'Write to an file, instead of STDOUT' -complete -c docker -A -f -n '__fish_seen_subcommand_from save' -a '(__fish_print_docker_images)' -d "Image" - -# search -complete -c docker -f -n '__fish_docker_no_subcommand' -a search -d 'Search for an image on the registry (defaults to the Docker Hub)' -complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l automated -d 'Only show automated builds' -complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from search' -l no-trunc -d "Don't truncate output" -complete -c docker -A -f -n '__fish_seen_subcommand_from search' -s s -l stars -d 'Only displays with at least x stars' - -# start -complete -c docker -f -n '__fish_docker_no_subcommand' -a start -d 'Start a container' -complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s a -l attach -d "Attach container's STDOUT and STDERR and forward all signals to the process" -complete -c docker -A -f -n '__fish_seen_subcommand_from start' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from start' -s i -l interactive -d "Attach container's STDIN" -complete -c docker -A -f -n '__fish_seen_subcommand_from start' -a '(__fish_print_docker_containers stopped)' -d "Container" - -# stats -complete -c docker -f -n '__fish_docker_no_subcommand' -a stats -d "Display a live stream of one or more containers' resource usage statistics" -complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -l no-stream -d 'Disable streaming stats and only pull the first result' -complete -c docker -A -f -n '__fish_seen_subcommand_from stats' -a '(__fish_print_docker_containers running)' -d "Container" - -# stop -complete -c docker -f -n '__fish_docker_no_subcommand' -a stop -d 'Stop a container' -complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -s t -l time -d 'Number of seconds to wait for the container to stop before killing it. Default is 10 seconds.' -complete -c docker -A -f -n '__fish_seen_subcommand_from stop' -a '(__fish_print_docker_containers running)' -d "Container" - -# tag -complete -c docker -f -n '__fish_docker_no_subcommand' -a tag -d 'Tag an image into a repository' -complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -s f -l force -d 'Force' -complete -c docker -A -f -n '__fish_seen_subcommand_from tag' -l help -d 'Print usage' - -# top -complete -c docker -f -n '__fish_docker_no_subcommand' -a top -d 'Lookup the running processes of a container' -complete -c docker -A -f -n '__fish_seen_subcommand_from top' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from top' -a '(__fish_print_docker_containers running)' -d "Container" - -# unpause -complete -c docker -f -n '__fish_docker_no_subcommand' -a unpause -d 'Unpause a paused container' -complete -c docker -A -f -n '__fish_seen_subcommand_from unpause' -a '(__fish_print_docker_containers running)' -d "Container" - -# version -complete -c docker -f -n '__fish_docker_no_subcommand' -a version -d 'Show the Docker version information' - -# wait -complete -c docker -f -n '__fish_docker_no_subcommand' -a wait -d 'Block until a container stops, then print its exit code' -complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -l help -d 'Print usage' -complete -c docker -A -f -n '__fish_seen_subcommand_from wait' -a '(__fish_print_docker_containers running)' -d "Container" diff --git a/contrib/completion/powershell/posh-docker.psm1 b/contrib/completion/powershell/posh-docker.psm1 deleted file mode 100644 index c0d6cc6b22..0000000000 --- a/contrib/completion/powershell/posh-docker.psm1 +++ /dev/null @@ -1,179 +0,0 @@ -# Powershell completion for docker - -### Prerequisite -# Docker.exe needs to be in your PATH. -# If the command is not found, you will need to add a docker alias or add the docker installation folder (e.g. `%ProgramFiles%\Docker Toolbox`) to your PATH environment variable. - -### Installation (Latest stable) -# Windows 10 / Windows Server 2016: -# 1. Open a powershell prompt -# 2. Run `Install-Module -Scope CurrentUser posh-docker` -# -# Earlier Windows versions: -# 1. Install [PackageManagement PowerShell Modules Preview](https://www.microsoft.com/en-us/download/details.aspx?id=49186) -# 2. Open a powershell prompt -# 3. Run `Install-Module -Scope CurrentUser posh-docker` - -### Installation (From source) -# Copy this file to the %userprofile%\Documents\WindowsPowerShell\Modules\posh-docker directory (create directories as needed) - -### Usage -# After installation, execute the following line to enable autocompletion for the current powershell session: -# -# Import-Module posh-docker -# -# To make it persistent, add the above line to your profile. For example, run `notepad $PROFILE` and insert the line above. - -$global:DockerCompletion = @{} - -$script:flagRegex = "^ (-[^, =]+),? ?(--[^= ]+)?" - -function script:Get-Containers($filter) -{ - if ($filter -eq $null) - { - docker ps -a --no-trunc --format "{{.Names}}" - } else { - docker ps -a --no-trunc --format "{{.Names}}" --filter $filter - } -} - -function script:Get-AutoCompleteResult -{ - param([Parameter(ValueFromPipeline=$true)] $value) - - Process - { - New-Object System.Management.Automation.CompletionResult $value - } -} - -filter script:MatchingCommand($commandName) -{ - if ($_.StartsWith($commandName)) - { - $_ - } -} - -$completion_Docker = { - param($commandName, $commandAst, $cursorPosition) - - $command = $null - $commandParameters = @{} - $state = "Unknown" - $wordToComplete = $commandAst.CommandElements | Where-Object { $_.ToString() -eq $commandName } | Foreach-Object { $commandAst.CommandElements.IndexOf($_) } - - for ($i=1; $i -lt $commandAst.CommandElements.Count; $i++) - { - $p = $commandAst.CommandElements[$i].ToString() - - if ($p.StartsWith("-")) - { - if ($state -eq "Unknown" -or $state -eq "Options") - { - $commandParameters[$i] = "Option" - $state = "Options" - } - else - { - $commandParameters[$i] = "CommandOption" - $state = "CommandOptions" - } - } - else - { - if ($state -ne "CommandOptions") - { - $commandParameters[$i] = "Command" - $command = $p - $state = "CommandOptions" - } - else - { - $commandParameters[$i] = "CommandOther" - } - } - } - - if ($global:DockerCompletion.Count -eq 0) - { - $global:DockerCompletion["commands"] = @{} - $global:DockerCompletion["options"] = @() - - docker --help | ForEach-Object { - Write-Output $_ - if ($_ -match "^ (\w+)\s+(.+)") - { - $global:DockerCompletion["commands"][$Matches[1]] = @{} - - $currentCommand = $global:DockerCompletion["commands"][$Matches[1]] - $currentCommand["options"] = @() - } - elseif ($_ -match $flagRegex) - { - $global:DockerCompletion["options"] += $Matches[1] - if ($Matches[2] -ne $null) - { - $global:DockerCompletion["options"] += $Matches[2] - } - } - } - - } - - if ($wordToComplete -eq $null) - { - $commandToComplete = "Command" - if ($commandParameters.Count -gt 0) - { - if ($commandParameters[$commandParameters.Count] -eq "Command") - { - $commandToComplete = "CommandOther" - } - } - } else { - $commandToComplete = $commandParameters[$wordToComplete] - } - - switch ($commandToComplete) - { - "Command" { $global:DockerCompletion["commands"].Keys | MatchingCommand -Command $commandName | Sort-Object | Get-AutoCompleteResult } - "Option" { $global:DockerCompletion["options"] | MatchingCommand -Command $commandName | Sort-Object | Get-AutoCompleteResult } - "CommandOption" { - $options = $global:DockerCompletion["commands"][$command]["options"] - if ($options.Count -eq 0) - { - docker $command --help | % { - if ($_ -match $flagRegex) - { - $options += $Matches[1] - if ($Matches[2] -ne $null) - { - $options += $Matches[2] - } - } - } - } - - $global:DockerCompletion["commands"][$command]["options"] = $options - $options | MatchingCommand -Command $commandName | Sort-Object | Get-AutoCompleteResult - } - "CommandOther" { - $filter = $null - switch ($command) - { - "start" { $filter = "status=exited" } - "stop" { $filter = "status=running" } - } - Get-Containers $filter | MatchingCommand -Command $commandName | Sort-Object | Get-AutoCompleteResult - } - default { $global:DockerCompletion["commands"].Keys | MatchingCommand -Command $commandName } - } -} - -# Register the TabExpension2 function -if (-not $global:options) { $global:options = @{CustomArgumentCompleters = @{};NativeArgumentCompleters = @{}}} -$global:options['NativeArgumentCompleters']['docker'] = $Completion_Docker - -$function:tabexpansion2 = $function:tabexpansion2 -replace 'End\r\n{','End { if ($null -ne $options) { $options += $global:options} else {$options = $global:options}' \ No newline at end of file diff --git a/contrib/completion/zsh/REVIEWERS b/contrib/completion/zsh/REVIEWERS deleted file mode 100644 index 03ee2dde3d..0000000000 --- a/contrib/completion/zsh/REVIEWERS +++ /dev/null @@ -1,2 +0,0 @@ -Tianon Gravi (@tianon) -Jessie Frazelle (@jfrazelle) diff --git a/contrib/completion/zsh/_docker b/contrib/completion/zsh/_docker deleted file mode 100644 index 554379246b..0000000000 --- a/contrib/completion/zsh/_docker +++ /dev/null @@ -1,2114 +0,0 @@ -#compdef docker dockerd -# -# zsh completion for docker (http://docker.com) -# -# version: 0.3.0 -# github: https://github.com/felixr/docker-zsh-completion -# -# contributors: -# - Felix Riedel -# - Steve Durrheimer -# - Vincent Bernat -# -# license: -# -# Copyright (c) 2013, Felix Riedel -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions are met: -# * Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# * Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# * Neither the name of the nor the -# names of its contributors may be used to endorse or promote products -# derived from this software without specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -# DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY -# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# - -# Short-option stacking can be enabled with: -# zstyle ':completion:*:*:docker:*' option-stacking yes -# zstyle ':completion:*:*:docker-*:*' option-stacking yes -__docker_arguments() { - if zstyle -t ":completion:${curcontext}:" option-stacking; then - print -- -s - fi -} - -__docker_get_containers() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - local kind type line s - declare -a running stopped lines args names - - kind=$1; shift - type=$1; shift - [[ $kind = (stopped|all) ]] && args=($args -a) - - lines=(${(f)"$(_call_program commands docker $docker_options ps --format 'table' --no-trunc $args)"}) - - # Parse header line to find columns - local i=1 j=1 k header=${lines[1]} - declare -A begin end - while (( j < ${#header} - 1 )); do - i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) - j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) - k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) - begin[${header[$i,$((j-1))]}]=$i - end[${header[$i,$((j-1))]}]=$k - done - end[${header[$i,$((j-1))]}]=-1 # Last column, should go to the end of the line - lines=(${lines[2,-1]}) - - # Container ID - if [[ $type = (ids|all) ]]; then - for line in $lines; do - s="${${line[${begin[CONTAINER ID]},${end[CONTAINER ID]}]%% ##}[0,12]}" - s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" - s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" - if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then - stopped=($stopped $s) - else - running=($running $s) - fi - done - fi - - # Names: we only display the one without slash. All other names - # are generated and may clutter the completion. However, with - # Swarm, all names may be prefixed by the swarm node name. - if [[ $type = (names|all) ]]; then - for line in $lines; do - names=(${(ps:,:)${${line[${begin[NAMES]},${end[NAMES]}]}%% *}}) - # First step: find a common prefix and strip it (swarm node case) - (( ${#${(u)names%%/*}} == 1 )) && names=${names#${names[1]%%/*}/} - # Second step: only keep the first name without a / - s=${${names:#*/*}[1]} - # If no name, well give up. - (( $#s != 0 )) || continue - s="$s:${(l:15:: :::)${${line[${begin[CREATED]},${end[CREATED]}]/ ago/}%% ##}}" - s="$s, ${${${line[${begin[IMAGE]},${end[IMAGE]}]}/:/\\:}%% ##}" - if [[ ${line[${begin[STATUS]},${end[STATUS]}]} = Exit* ]]; then - stopped=($stopped $s) - else - running=($running $s) - fi - done - fi - - [[ $kind = (running|all) ]] && _describe -t containers-running "running containers" running "$@" && ret=0 - [[ $kind = (stopped|all) ]] && _describe -t containers-stopped "stopped containers" stopped "$@" && ret=0 - return ret -} - -__docker_stoppedcontainers() { - [[ $PREFIX = -* ]] && return 1 - __docker_get_containers stopped all "$@" -} - -__docker_runningcontainers() { - [[ $PREFIX = -* ]] && return 1 - __docker_get_containers running all "$@" -} - -__docker_containers() { - [[ $PREFIX = -* ]] && return 1 - __docker_get_containers all all "$@" -} - -__docker_containers_ids() { - [[ $PREFIX = -* ]] && return 1 - __docker_get_containers all ids "$@" -} - -__docker_containers_names() { - [[ $PREFIX = -* ]] && return 1 - __docker_get_containers all names "$@" -} - -__docker_plugins() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - emulate -L zsh - setopt extendedglob - local -a plugins - plugins=(${(ps: :)${(M)${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Plugins:}%%$'\n'^ *}}:# $1: *}## $1: }) - _describe -t plugins "$1 plugins" plugins && ret=0 - return ret -} - -__docker_images() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - declare -a images - images=(${${${(f)"$(_call_program commands docker $docker_options images)"}[2,-1]}/(#b)([^ ]##) ##([^ ]##) ##([^ ]##)*/${match[3]}:${(r:15:: :::)match[2]} in ${match[1]}}) - _describe -t docker-images "images" images && ret=0 - __docker_repositories_with_tags && ret=0 - return ret -} - -__docker_repositories() { - [[ $PREFIX = -* ]] && return 1 - declare -a repos - repos=(${${${(f)"$(_call_program commands docker $docker_options images)"}%% *}[2,-1]}) - repos=(${repos#}) - _describe -t docker-repos "repositories" repos -} - -__docker_repositories_with_tags() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - declare -a repos onlyrepos matched - declare m - repos=(${${${${(f)"$(_call_program commands docker $docker_options images)"}[2,-1]}/ ##/:::}%% *}) - repos=(${${repos%:::}#}) - # Check if we have a prefix-match for the current prefix. - onlyrepos=(${repos%::*}) - for m in $onlyrepos; do - [[ ${PREFIX##${~~m}} != ${PREFIX} ]] && { - # Yes, complete with tags - repos=(${${repos/:::/:}/:/\\:}) - _describe -t docker-repos-with-tags "repositories with tags" repos && ret=0 - return ret - } - done - # No, only complete repositories - onlyrepos=(${${repos%:::*}/:/\\:}) - _describe -t docker-repos "repositories" onlyrepos -qS : && ret=0 - - return ret -} - -__docker_search() { - [[ $PREFIX = -* ]] && return 1 - local cache_policy - zstyle -s ":completion:${curcontext}:" cache-policy cache_policy - if [[ -z "$cache_policy" ]]; then - zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy - fi - - local searchterm cachename - searchterm="${words[$CURRENT]%/}" - cachename=_docker-search-$searchterm - - local expl - local -a result - if ( [[ ${(P)+cachename} -eq 0 ]] || _cache_invalid ${cachename#_} ) \ - && ! _retrieve_cache ${cachename#_}; then - _message "Searching for ${searchterm}..." - result=(${${${(f)"$(_call_program commands docker $docker_options search $searchterm)"}%% *}[2,-1]}) - _store_cache ${cachename#_} result - fi - _wanted dockersearch expl 'available images' compadd -a result -} - -__docker_get_log_options() { - [[ $PREFIX = -* ]] && return 1 - - integer ret=1 - local log_driver=${opt_args[--log-driver]:-"all"} - local -a awslogs_options fluentd_options gelf_options journald_options json_file_options syslog_options splunk_options - - awslogs_options=("awslogs-region" "awslogs-group" "awslogs-stream") - fluentd_options=("env" "fluentd-address" "fluentd-async-connect" "fluentd-buffer-limit" "fluentd-retry-wait" "fluentd-max-retries" "labels" "tag") - gcplogs_options=("env" "gcp-log-cmd" "gcp-project" "labels") - gelf_options=("env" "gelf-address" "gelf-compression-level" "gelf-compression-type" "labels" "tag") - journald_options=("env" "labels" "tag") - json_file_options=("env" "labels" "max-file" "max-size") - syslog_options=("env" "labels" "syslog-address" "syslog-facility" "syslog-format" "syslog-tls-ca-cert" "syslog-tls-cert" "syslog-tls-key" "syslog-tls-skip-verify" "tag") - splunk_options=("env" "labels" "splunk-caname" "splunk-capath" "splunk-index" "splunk-insecureskipverify" "splunk-source" "splunk-sourcetype" "splunk-token" "splunk-url" "tag") - - [[ $log_driver = (awslogs|all) ]] && _describe -t awslogs-options "awslogs options" awslogs_options "$@" && ret=0 - [[ $log_driver = (fluentd|all) ]] && _describe -t fluentd-options "fluentd options" fluentd_options "$@" && ret=0 - [[ $log_driver = (gcplogs|all) ]] && _describe -t gcplogs-options "gcplogs options" gcplogs_options "$@" && ret=0 - [[ $log_driver = (gelf|all) ]] && _describe -t gelf-options "gelf options" gelf_options "$@" && ret=0 - [[ $log_driver = (journald|all) ]] && _describe -t journald-options "journald options" journald_options "$@" && ret=0 - [[ $log_driver = (json-file|all) ]] && _describe -t json-file-options "json-file options" json_file_options "$@" && ret=0 - [[ $log_driver = (syslog|all) ]] && _describe -t syslog-options "syslog options" syslog_options "$@" && ret=0 - [[ $log_driver = (splunk|all) ]] && _describe -t splunk-options "splunk options" splunk_options "$@" && ret=0 - - return ret -} - -__docker_log_drivers() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - drivers=(awslogs etwlogs fluentd gcplogs gelf journald json-file none splunk syslog) - _describe -t log-drivers "log drivers" drivers && ret=0 - return ret -} - -__docker_log_options() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (syslog-format) - syslog_format_opts=('rfc3164' 'rfc5424' 'rfc5424micro') - _describe -t syslog-format-opts "Syslog format Options" syslog_format_opts && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - __docker_get_log_options -qS "=" && ret=0 - fi - - return ret -} - -__docker_complete_detach_keys() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - compset -P "*," - keys=(${:-{a-z}}) - ctrl_keys=(${:-ctrl-{{a-z},{@,'[','\\','^',']',_}}}) - _describe -t detach_keys "[a-z]" keys -qS "," && ret=0 - _describe -t detach_keys-ctrl "'ctrl-' + 'a-z @ [ \\\\ ] ^ _'" ctrl_keys -qS "," && ret=0 -} - -__docker_complete_pid() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - local -a opts vopts - - opts=('host') - vopts=('container') - - if compset -P '*:'; then - case "${${words[-1]%:*}#*=}" in - (container) - __docker_runningcontainers && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - _describe -t pid-value-opts "PID Options with value" vopts -qS ":" && ret=0 - _describe -t pid-opts "PID Options" opts && ret=0 - fi - - return ret -} - -__docker_complete_runtimes() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - emulate -L zsh - setopt extendedglob - local -a runtimes_opts - runtimes_opts=(${(ps: :)${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Runtimes: }%%$'\n'^ *}}}) - _describe -t runtimes-opts "runtimes options" runtimes_opts && ret=0 -} - -__docker_complete_ps_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (ancestor) - __docker_images && ret=0 - ;; - (before|since) - __docker_containers && ret=0 - ;; - (id) - __docker_containers_ids && ret=0 - ;; - (name) - __docker_containers_names && ret=0 - ;; - (network) - __docker_networks && ret=0 - ;; - (status) - status_opts=('created' 'dead' 'exited' 'paused' 'restarting' 'running') - _describe -t status-filter-opts "Status Filter Options" status_opts && ret=0 - ;; - (volume) - __docker_volumes && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - opts=('ancestor' 'before' 'exited' 'id' 'label' 'name' 'network' 'since' 'status' 'volume') - _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_complete_search_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - declare -a boolean_opts opts - - boolean_opts=('true' 'false') - opts=('is-automated' 'is-official' 'stars') - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (is-automated|is-official) - _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - _describe -t filter-opts "filter options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_complete_images_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - declare -a boolean_opts opts - - boolean_opts=('true' 'false') - opts=('before' 'dangling' 'label' 'since') - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (before|since) - __docker_images && ret=0 - ;; - (dangling) - _describe -t boolean-filter-opts "filter options" boolean_opts && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_complete_events_filter() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - declare -a opts - - opts=('container' 'daemon' 'event' 'image' 'label' 'network' 'type' 'volume') - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (container) - __docker_containers && ret=0 - ;; - (daemon) - emulate -L zsh - setopt extendedglob - local -a daemon_opts - daemon_opts=( - ${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'Name: }%%$'\n'^ *}} - ${${(f)${${"$(_call_program commands docker $docker_options info)"##*$'\n'ID: }%%$'\n'^ *}}//:/\\:} - ) - _describe -t daemon-filter-opts "daemon filter options" daemon_opts && ret=0 - ;; - (event) - local -a event_opts - event_opts=('attach' 'commit' 'connect' 'copy' 'create' 'delete' 'destroy' 'detach' 'die' 'disconnect' 'exec_create' 'exec_detach' - 'exec_start' 'export' 'import' 'kill' 'load' 'mount' 'oom' 'pause' 'pull' 'push' 'reload' 'rename' 'resize' 'restart' 'save' 'start' - 'stop' 'tag' 'top' 'unmount' 'unpause' 'untag' 'update') - _describe -t event-filter-opts "event filter options" event_opts && ret=0 - ;; - (image) - __docker_images && ret=0 - ;; - (network) - __docker_networks && ret=0 - ;; - (type) - local -a type_opts - type_opts=('container' 'daemon' 'image' 'network' 'volume') - _describe -t type-filter-opts "type filter options" type_opts && ret=0 - ;; - (volume) - __docker_volumes && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - _describe -t filter-opts "filter options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_network_complete_ls_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (driver) - __docker_plugins Network && ret=0 - ;; - (id) - __docker_networks_ids && ret=0 - ;; - (name) - __docker_networks_names && ret=0 - ;; - (type) - type_opts=('builtin' 'custom') - _describe -t type-filter-opts "Type Filter Options" type_opts && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - opts=('driver' 'id' 'label' 'name' 'type') - _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_get_networks() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - local line s - declare -a lines networks - - type=$1; shift - - lines=(${(f)"$(_call_program commands docker $docker_options network ls)"}) - - # Parse header line to find columns - local i=1 j=1 k header=${lines[1]} - declare -A begin end - while (( j < ${#header} - 1 )); do - i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) - j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) - k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) - begin[${header[$i,$((j-1))]}]=$i - end[${header[$i,$((j-1))]}]=$k - done - end[${header[$i,$((j-1))]}]=-1 - lines=(${lines[2,-1]}) - - # Network ID - if [[ $type = (ids|all) ]]; then - for line in $lines; do - s="${line[${begin[NETWORK ID]},${end[NETWORK ID]}]%% ##}" - s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" - networks=($networks $s) - done - fi - - # Names - if [[ $type = (names|all) ]]; then - for line in $lines; do - s="${line[${begin[NAME]},${end[NAME]}]%% ##}" - s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" - networks=($networks $s) - done - fi - - _describe -t networks-list "networks" networks "$@" && ret=0 - return ret -} - -__docker_networks() { - [[ $PREFIX = -* ]] && return 1 - __docker_get_networks all "$@" -} - -__docker_networks_ids() { - [[ $PREFIX = -* ]] && return 1 - __docker_get_networks ids "$@" -} - -__docker_networks_names() { - [[ $PREFIX = -* ]] && return 1 - __docker_get_networks names "$@" -} - -__docker_network_commands() { - local -a _docker_network_subcommands - _docker_network_subcommands=( - "connect:Connect a container to a network" - "create:Creates a new network with a name specified by the user" - "disconnect:Disconnects a container from a network" - "inspect:Displays detailed information on a network" - "ls:Lists all the networks created by the user" - "rm:Deletes one or more networks" - ) - _describe -t docker-network-commands "docker network command" _docker_network_subcommands -} - -__docker_network_subcommand() { - local -a _command_args opts_help - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - - case "$words[1]" in - (connect) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*--alias=[Add network-scoped alias for the container]:alias: " \ - "($help)--ip=[Container IPv4 address]:IPv4: " \ - "($help)--ip6=[Container IPv6 address]:IPv6: " \ - "($help)*--link=[Add a link to another container]:link:->link" \ - "($help)*--link-local-ip=[Add a link-local address for the container]:IPv4/IPv6: " \ - "($help -)1:network:__docker_networks" \ - "($help -)2:containers:__docker_containers" && ret=0 - - case $state in - (link) - if compset -P "*:"; then - _wanted alias expl "Alias" compadd -E "" && ret=0 - else - __docker_runningcontainers -qS ":" && ret=0 - fi - ;; - esac - ;; - (create) - _arguments $(__docker_arguments) -A '-*' \ - $opts_help \ - "($help)*--aux-address[Auxiliary IPv4 or IPv6 addresses used by network driver]:key=IP: " \ - "($help -d --driver)"{-d=,--driver=}"[Driver to manage the Network]:driver:(null host bridge overlay)" \ - "($help)*--gateway=[IPv4 or IPv6 Gateway for the master subnet]:IP: " \ - "($help)--internal[Restricts external access to the network]" \ - "($help)*--ip-range=[Allocate container ip from a sub-range]:IP/mask: " \ - "($help)--ipam-driver=[IP Address Management Driver]:driver:(default)" \ - "($help)*--ipam-opt=[Custom IPAM plugin options]:opt=value: " \ - "($help)--ipv6[Enable IPv6 networking]" \ - "($help)*--label=[Set metadata on a network]:label=value: " \ - "($help)*"{-o=,--opt=}"[Driver specific options]:opt=value: " \ - "($help)*--subnet=[Subnet in CIDR format that represents a network segment]:IP/mask: " \ - "($help -)1:Network Name: " && ret=0 - ;; - (disconnect) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)1:network:__docker_networks" \ - "($help -)2:containers:__docker_containers" && ret=0 - ;; - (inspect) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ - "($help -)*:network:__docker_networks" && ret=0 - ;; - (ls) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)--no-trunc[Do not truncate the output]" \ - "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ - "($help -q --quiet)"{-q,--quiet}"[Only display numeric IDs]" && ret=0 - case $state in - (filter-options) - __docker_network_complete_ls_filters && ret=0 - ;; - esac - ;; - (rm) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:network:__docker_networks" && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0 - ;; - esac - - return ret -} - -# BO node - -__docker_node_complete_ls_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (id) - __docker_complete_nodes_ids && ret=0 - ;; - (membership) - membership_opts=('accepted' 'pending' 'rejected') - _describe -t membership-opts "membership options" membership_opts && ret=0 - ;; - (name) - __docker_complete_nodes_names && ret=0 - ;; - (role) - role_opts=('manager' 'worker') - _describe -t role-opts "role options" role_opts && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - opts=('id' 'label' 'membership' 'name' 'role') - _describe -t filter-opts "filter options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_node_complete_ps_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (desired-state) - state_opts=('accepted' 'running') - _describe -t state-opts "desired state options" state_opts && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - opts=('desired-state' 'id' 'label' 'name') - _describe -t filter-opts "filter options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_nodes() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - local line s - declare -a lines nodes args - - type=$1; shift - filter=$1; shift - [[ $filter != "none" ]] && args=("-f $filter") - - lines=(${(f)"$(_call_program commands docker $docker_options node ls $args)"}) - - # Parse header line to find columns - local i=1 j=1 k header=${lines[1]} - declare -A begin end - while (( j < ${#header} - 1 )); do - i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) - j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) - k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) - begin[${header[$i,$((j-1))]}]=$i - end[${header[$i,$((j-1))]}]=$k - done - end[${header[$i,$((j-1))]}]=-1 - lines=(${lines[2,-1]}) - - # Node ID - if [[ $type = (ids|all) ]]; then - for line in $lines; do - s="${line[${begin[ID]},${end[ID]}]%% ##}" - nodes=($nodes $s) - done - fi - - # Names - if [[ $type = (names|all) ]]; then - for line in $lines; do - s="${line[${begin[NAME]},${end[NAME]}]%% ##}" - nodes=($nodes $s) - done - fi - - _describe -t nodes-list "nodes" nodes "$@" && ret=0 - return ret -} - -__docker_complete_nodes() { - [[ $PREFIX = -* ]] && return 1 - __docker_nodes all none "$@" -} - -__docker_complete_nodes_ids() { - [[ $PREFIX = -* ]] && return 1 - __docker_nodes ids none "$@" -} - -__docker_complete_nodes_names() { - [[ $PREFIX = -* ]] && return 1 - __docker_nodes names none "$@" -} - -__docker_complete_pending_nodes() { - [[ $PREFIX = -* ]] && return 1 - __docker_nodes all "membership=pending" "$@" -} - -__docker_complete_manager_nodes() { - [[ $PREFIX = -* ]] && return 1 - __docker_nodes all "role=manager" "$@" -} - -__docker_complete_worker_nodes() { - [[ $PREFIX = -* ]] && return 1 - __docker_nodes all "role=worker" "$@" -} - -__docker_node_commands() { - local -a _docker_node_subcommands - _docker_node_subcommands=( - "demote:Demote a node as manager in the swarm" - "inspect:Display detailed information on one or more nodes" - "ls:List nodes in the swarm" - "promote:Promote a node as manager in the swarm" - "rm:Remove one or more nodes from the swarm" - "ps:List tasks running on a node" - "update:Update a node" - ) - _describe -t docker-node-commands "docker node command" _docker_node_subcommands -} - -__docker_node_subcommand() { - local -a _command_args opts_help - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - - case "$words[1]" in - (rm|remove) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)--force[Force remove an active node]" \ - "($help -)*:node:__docker_complete_pending_nodes" && ret=0 - ;; - (demote) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:node:__docker_complete_manager_nodes" && ret=0 - ;; - (inspect) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ - "($help)--pretty[Print the information in a human friendly format]" \ - "($help -)*:node:__docker_complete_nodes" && ret=0 - ;; - (ls|list) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ - "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 - case $state in - (filter-options) - __docker_node_complete_ls_filters && ret=0 - ;; - esac - ;; - (promote) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:node:__docker_complete_worker_nodes" && ret=0 - ;; - (ps) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -a --all)"{-a,--all}"[Display all instances]" \ - "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ - "($help)--no-resolve[Do not map IDs to Names]" \ - "($help -)1:node:__docker_complete_nodes" && ret=0 - case $state in - (filter-options) - __docker_node_complete_ps_filters && ret=0 - ;; - esac - ;; - (update) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)--availability=[Availability of the node]:availability:(active pause drain)" \ - "($help)*--label-add=[Add or update a node label]:key=value: " \ - "($help)*--label-rm=[Remove a node label if exists]:label: " \ - "($help)--role=[Role of the node]:role:(manager worker)" \ - "($help -)1:node:__docker_complete_nodes" && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_node_commands" && ret=0 - ;; - esac - - return ret -} - -# EO node - -# BO plugin - -__docker_complete_plugins() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - local line s - declare -a lines plugins - - lines=(${(f)"$(_call_program commands docker $docker_options plugin ls)"}) - - # Parse header line to find columns - local i=1 j=1 k header=${lines[1]} - declare -A begin end - while (( j < ${#header} - 1 )); do - i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) - j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) - k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) - begin[${header[$i,$((j-1))]}]=$i - end[${header[$i,$((j-1))]}]=$k - done - end[${header[$i,$((j-1))]}]=-1 - lines=(${lines[2,-1]}) - - # Name - for line in $lines; do - s="${line[${begin[NAME]},${end[NAME]}]%% ##}" - s="$s:${(l:7:: :::)${${line[${begin[TAG]},${end[TAG]}]}%% ##}}" - plugins=($plugins $s) - done - - _describe -t plugins-list "plugins" plugins "$@" && ret=0 - return ret -} - -__docker_plugin_commands() { - local -a _docker_plugin_subcommands - _docker_plugin_subcommands=( - "disable:Disable a plugin" - "enable:Enable a plugin" - "inspect:Return low-level information about a plugin" - "install:Install a plugin" - "ls:List plugins" - "push:Push a plugin" - "rm:Remove a plugin" - "set:Change settings for a plugin" - ) - _describe -t docker-plugin-commands "docker plugin command" _docker_plugin_subcommands -} - -__docker_plugin_subcommand() { - local -a _command_args opts_help - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - - case "$words[1]" in - (disable|enable|inspect|install|ls|push|rm) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)1:plugin:__docker_complete_plugins" && ret=0 - ;; - (set) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)1:plugin:__docker_complete_plugins" \ - "($help-)*:key=value: " && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_plugin_commands" && ret=0 - ;; - esac - - return ret -} - -# EO plugin - -# BO service - -__docker_service_complete_ls_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (id) - __docker_complete_services_ids && ret=0 - ;; - (name) - __docker_complete_services_names && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - opts=('id' 'label' 'name') - _describe -t filter-opts "filter options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_service_complete_ps_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (desired-state) - state_opts=('accepted' 'running') - _describe -t state-opts "desired state options" state_opts && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - opts=('desired-state' 'id' 'label' 'name') - _describe -t filter-opts "filter options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_services() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - local line s - declare -a lines services - - type=$1; shift - - lines=(${(f)"$(_call_program commands docker $docker_options service ls)"}) - - # Parse header line to find columns - local i=1 j=1 k header=${lines[1]} - declare -A begin end - while (( j < ${#header} - 1 )); do - i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) - j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) - k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) - begin[${header[$i,$((j-1))]}]=$i - end[${header[$i,$((j-1))]}]=$k - done - end[${header[$i,$((j-1))]}]=-1 - lines=(${lines[2,-1]}) - - # Service ID - if [[ $type = (ids|all) ]]; then - for line in $lines; do - s="${line[${begin[ID]},${end[ID]}]%% ##}" - s="$s:${(l:7:: :::)${${line[${begin[IMAGE]},${end[IMAGE]}]}%% ##}}" - services=($services $s) - done - fi - - # Names - if [[ $type = (names|all) ]]; then - for line in $lines; do - s="${line[${begin[NAME]},${end[NAME]}]%% ##}" - s="$s:${(l:7:: :::)${${line[${begin[IMAGE]},${end[IMAGE]}]}%% ##}}" - services=($services $s) - done - fi - - _describe -t services-list "services" services "$@" && ret=0 - return ret -} - -__docker_complete_services() { - [[ $PREFIX = -* ]] && return 1 - __docker_services all "$@" -} - -__docker_complete_services_ids() { - [[ $PREFIX = -* ]] && return 1 - __docker_services ids "$@" -} - -__docker_complete_services_names() { - [[ $PREFIX = -* ]] && return 1 - __docker_services names "$@" -} - -__docker_service_commands() { - local -a _docker_service_subcommands - _docker_service_subcommands=( - "create:Create a new service" - "inspect:Display detailed information on one or more services" - "ls:List services" - "rm:Remove one or more services" - "scale:Scale one or multiple services" - "ps:List the tasks of a service" - "update:Update a service" - ) - _describe -t docker-service-commands "docker service command" _docker_service_subcommands -} - -__docker_service_subcommand() { - local -a _command_args opts_help opts_create_update - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - opts_create_update=( - "($help)*--constraint=[Placement constraints]:constraint: " - "($help)--endpoint-mode=[Placement constraints]:mode:(dnsrr vip)" - "($help)*"{-e=,--env=}"[Set environment variables]:env: " - "($help)*--label=[Service labels]:label: " - "($help)--limit-cpu=[Limit CPUs]:value: " - "($help)--limit-memory=[Limit Memory]:value: " - "($help)--log-driver=[Logging driver for service]:logging driver:__docker_log_drivers" - "($help)*--log-opt=[Logging driver options]:log driver options:__docker_log_options" - "($help)*--mount=[Attach a mount to the service]:mount: " - "($help)--name=[Service name]:name: " - "($help)*--network=[Network attachments]:network: " - "($help)*"{-p=,--publish=}"[Publish a port as a node port]:port: " - "($help)--replicas=[Number of tasks]:replicas: " - "($help)--reserve-cpu=[Reserve CPUs]:value: " - "($help)--reserve-memory=[Reserve Memory]:value: " - "($help)--restart-condition=[Restart when condition is met]:mode:(any none on-failure)" - "($help)--restart-delay=[Delay between restart attempts]:delay: " - "($help)--restart-max-attempts=[Maximum number of restarts before giving up]:max-attempts: " - "($help)--restart-window=[Window used to evaluate the restart policy]:window: " - "($help)--stop-grace-period=[Time to wait before force killing a container]:grace period: " - "($help)--update-delay=[Delay between updates]:delay: " - "($help)--update-failure-action=[Action on update failure]:mode:(pause continue)" - "($help)--update-parallelism=[Maximum number of tasks updated simultaneously]:number: " - "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" - "($help)--with-registry-auth[Send registry authentication details to swarm agents]" - "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories" - ) - - case "$words[1]" in - (create) - _arguments $(__docker_arguments) \ - $opts_help \ - $opts_create_update \ - "($help)*--container-label=[Container labels]:label: " \ - "($help)--mode=[Service Mode]:mode:(global replicated)" \ - "($help -): :__docker_images" \ - "($help -):command: _command_names -e" \ - "($help -)*::arguments: _normal" && ret=0 - ;; - (inspect) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ - "($help)--pretty[Print the information in a human friendly format]" \ - "($help -)*:service:__docker_complete_services" && ret=0 - ;; - (ls|list) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*"{-f=,--filter=}"[Filter output based on conditions provided]:filter:->filter-options" \ - "($help -q --quiet)"{-q,--quiet}"[Only display IDs]" && ret=0 - case $state in - (filter-options) - __docker_service_complete_ls_filters && ret=0 - ;; - esac - ;; - (rm|remove) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:service:__docker_complete_services" && ret=0 - ;; - (scale) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:service:->values" && ret=0 - case $state in - (values) - if compset -P '*='; then - _message 'replicas' && ret=0 - else - __docker_complete_services -qS "=" - fi - ;; - esac - ;; - (ps) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -a --all)"{-a,--all}"[Display all tasks]" \ - "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ - "($help)--no-resolve[Do not map IDs to Names]" \ - "($help -)1:service:__docker_complete_services" && ret=0 - case $state in - (filter-options) - __docker_service_complete_ps_filters && ret=0 - ;; - esac - ;; - (update) - _arguments $(__docker_arguments) \ - $opts_help \ - $opts_create_update \ - "($help)--arg=[Service command args]:arguments: _normal" \ - "($help)*--container-label-add=[Add or update container labels]:label: " \ - "($help)*--container-label-rm=[Remove a container label by its key]:label: " \ - "($help)--image=[Service image tag]:image:__docker_repositories" \ - "($help -)1:service:__docker_complete_services" && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_service_commands" && ret=0 - ;; - esac - - return ret -} - -# EO service - -# BO swarm - -__docker_swarm_commands() { - local -a _docker_swarm_subcommands - _docker_swarm_subcommands=( - "init:Initialize a swarm" - "join:Join a swarm as a node and/or manager" - "join-token:Manage join tokens" - "leave:Leave a swarm" - "update:Update the swarm" - ) - _describe -t docker-swarm-commands "docker swarm command" _docker_swarm_subcommands -} - -__docker_swarm_subcommand() { - local -a _command_args opts_help - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - - case "$words[1]" in - (init) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)--advertise-addr[Advertised address]:ip\:port: " \ - "($help)*--external-ca=[Specifications of one or more certificate signing endpoints]:endpoint: " \ - "($help)--force-new-cluster[Force create a new cluster from current state]" \ - "($help)--listen-addr=[Listen address]:ip\:port: " && ret=0 - ;; - (join) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)--advertise-addr[Advertised address]:ip\:port: " \ - "($help)--listen-addr=[Listen address]:ip\:port: " \ - "($help)--token=[Token for entry into the swarm]:secret: " \ - "($help -):host\:port: " && ret=0 - ;; - (join-token) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -q --quiet)"{-q,--quiet}"[Only display token]" \ - "($help)--rotate[Rotate join token]" \ - "($help -):role:(manager worker)" && ret=0 - ;; - (leave) - _arguments $(__docker_arguments) \ - $opts_help && ret=0 - ;; - (update) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)--cert-expiry=[Validity period for node certificates]:duration: " \ - "($help)--dispatcher-heartbeat=[Dispatcher heartbeat period]:duration: " \ - "($help)--task-history-limit=[Task history retention limit]:limit: " && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_network_commands" && ret=0 - ;; - esac - - return ret -} - -# EO swarm - -__docker_volume_complete_ls_filters() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - - if compset -P '*='; then - case "${${words[-1]%=*}#*=}" in - (dangling) - dangling_opts=('true' 'false') - _describe -t dangling-filter-opts "Dangling Filter Options" dangling_opts && ret=0 - ;; - (driver) - __docker_plugins Volume && ret=0 - ;; - (name) - __docker_volumes && ret=0 - ;; - *) - _message 'value' && ret=0 - ;; - esac - else - opts=('dangling' 'driver' 'name') - _describe -t filter-opts "Filter Options" opts -qS "=" && ret=0 - fi - - return ret -} - -__docker_volumes() { - [[ $PREFIX = -* ]] && return 1 - integer ret=1 - declare -a lines volumes - - lines=(${(f)"$(_call_program commands docker $docker_options volume ls)"}) - - # Parse header line to find columns - local i=1 j=1 k header=${lines[1]} - declare -A begin end - while (( j < ${#header} - 1 )); do - i=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 1 )) - j=$(( i + ${${header[$i,-1]}[(i) ]} - 1 )) - k=$(( j + ${${header[$j,-1]}[(i)[^ ]]} - 2 )) - begin[${header[$i,$((j-1))]}]=$i - end[${header[$i,$((j-1))]}]=$k - done - end[${header[$i,$((j-1))]}]=-1 - lines=(${lines[2,-1]}) - - # Names - local line s - for line in $lines; do - s="${line[${begin[VOLUME NAME]},${end[VOLUME NAME]}]%% ##}" - s="$s:${(l:7:: :::)${${line[${begin[DRIVER]},${end[DRIVER]}]}%% ##}}" - volumes=($volumes $s) - done - - _describe -t volumes-list "volumes" volumes && ret=0 - return ret -} - -__docker_volume_commands() { - local -a _docker_volume_subcommands - _docker_volume_subcommands=( - "create:Create a volume" - "inspect:Display detailed information on one or more volumes" - "ls:List volumes" - "rm:Remove one or more volumes" - ) - _describe -t docker-volume-commands "docker volume command" _docker_volume_subcommands -} - -__docker_volume_subcommand() { - local -a _command_args opts_help - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - - case "$words[1]" in - (create) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -d --driver)"{-d=,--driver=}"[Volume driver name]:Driver name:(local)" \ - "($help)*--label=[Set metadata for a volume]:label=value: " \ - "($help)--name=[Volume name]" \ - "($help)*"{-o=,--opt=}"[Driver specific options]:Driver option: " && ret=0 - ;; - (inspect) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ - "($help -)1:volume:__docker_volumes" && ret=0 - ;; - (ls) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*"{-f=,--filter=}"[Provide filter values]:filter:->filter-options" \ - "($help -q --quiet)"{-q,--quiet}"[Only display volume names]" && ret=0 - case $state in - (filter-options) - __docker_volume_complete_ls_filters && ret=0 - ;; - esac - ;; - (rm) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -):volume:__docker_volumes" && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_volume_commands" && ret=0 - ;; - esac - - return ret -} - -__docker_caching_policy() { - oldp=( "$1"(Nmh+1) ) # 1 hour - (( $#oldp )) -} - -__docker_commands() { - local cache_policy - - zstyle -s ":completion:${curcontext}:" cache-policy cache_policy - if [[ -z "$cache_policy" ]]; then - zstyle ":completion:${curcontext}:" cache-policy __docker_caching_policy - fi - - if ( [[ ${+_docker_subcommands} -eq 0 ]] || _cache_invalid docker_subcommands) \ - && ! _retrieve_cache docker_subcommands; - then - local -a lines - lines=(${(f)"$(_call_program commands docker 2>&1)"}) - _docker_subcommands=(${${${lines[$((${lines[(i)Commands:]} + 1)),${lines[(I) *]}]}## #}/ ##/:}) - _docker_subcommands=($_docker_subcommands 'daemon:Enable daemon mode' 'help:Show help for a command') - (( $#_docker_subcommands > 2 )) && _store_cache docker_subcommands _docker_subcommands - fi - _describe -t docker-commands "docker command" _docker_subcommands -} - -__docker_subcommand() { - local -a _command_args opts_help opts_build_create_run opts_build_create_run_update opts_create_run opts_create_run_update - local expl help="--help" - integer ret=1 - - opts_help=("(: -)--help[Print usage]") - opts_build_create_run=( - "($help)--cgroup-parent=[Parent cgroup for the container]:cgroup: " - "($help)--isolation=[Container isolation technology]:isolation:(default hyperv process)" - "($help)--disable-content-trust[Skip image verification]" - "($help)*--shm-size=[Size of '/dev/shm' (format is '')]:shm size: " - "($help)*--ulimit=[ulimit options]:ulimit: " - "($help)--userns=[Container user namespace]:user namespace:(host)" - ) - opts_build_create_run_update=( - "($help -c --cpu-shares)"{-c=,--cpu-shares=}"[CPU shares (relative weight)]:CPU shares:(0 10 100 200 500 800 1000)" - "($help)--cpu-period=[Limit the CPU CFS (Completely Fair Scheduler) period]:CPU period: " - "($help)--cpu-quota=[Limit the CPU CFS (Completely Fair Scheduler) quota]:CPU quota: " - "($help)--cpuset-cpus=[CPUs in which to allow execution]:CPUs: " - "($help)--cpuset-mems=[MEMs in which to allow execution]:MEMs: " - "($help -m --memory)"{-m=,--memory=}"[Memory limit]:Memory limit: " - "($help)--memory-swap=[Total memory limit with swap]:Memory limit: " - ) - opts_create_run=( - "($help -a --attach)"{-a=,--attach=}"[Attach to stdin, stdout or stderr]:device:(STDIN STDOUT STDERR)" - "($help)*--add-host=[Add a custom host-to-IP mapping]:host\:ip mapping: " - "($help)*--blkio-weight-device=[Block IO (relative device weight)]:device:Block IO weight: " - "($help)*--cap-add=[Add Linux capabilities]:capability: " - "($help)*--cap-drop=[Drop Linux capabilities]:capability: " - "($help)--cidfile=[Write the container ID to the file]:CID file:_files" - "($help)*--device=[Add a host device to the container]:device:_files" - "($help)*--device-read-bps=[Limit the read rate (bytes per second) from a device]:device:IO rate: " - "($help)*--device-read-iops=[Limit the read rate (IO per second) from a device]:device:IO rate: " - "($help)*--device-write-bps=[Limit the write rate (bytes per second) to a device]:device:IO rate: " - "($help)*--device-write-iops=[Limit the write rate (IO per second) to a device]:device:IO rate: " - "($help)*--dns=[Custom DNS servers]:DNS server: " - "($help)*--dns-opt=[Custom DNS options]:DNS option: " - "($help)*--dns-search=[Custom DNS search domains]:DNS domains: " - "($help)*"{-e=,--env=}"[Environment variables]:environment variable: " - "($help)--entrypoint=[Overwrite the default entrypoint of the image]:entry point: " - "($help)*--env-file=[Read environment variables from a file]:environment file:_files" - "($help)*--expose=[Expose a port from the container without publishing it]: " - "($help)*--group-add=[Add additional groups to run as]:group:_groups" - "($help -h --hostname)"{-h=,--hostname=}"[Container host name]:hostname:_hosts" - "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" - "($help)--ip=[Container IPv4 address]:IPv4: " - "($help)--ip6=[Container IPv6 address]:IPv6: " - "($help)--ipc=[IPC namespace to use]:IPC namespace: " - "($help)*--link=[Add link to another container]:link:->link" - "($help)*--link-local-ip=[Add a link-local address for the container]:IPv4/IPv6: " - "($help)*"{-l=,--label=}"[Container metadata]:label: " - "($help)--log-driver=[Default driver for container logs]:logging driver:__docker_log_drivers" - "($help)*--log-opt=[Log driver specific options]:log driver options:__docker_log_options" - "($help)--mac-address=[Container MAC address]:MAC address: " - "($help)--name=[Container name]:name: " - "($help)--network=[Connect a container to a network]:network mode:(bridge none container host)" - "($help)*--network-alias=[Add network-scoped alias for the container]:alias: " - "($help)--oom-kill-disable[Disable OOM Killer]" - "($help)--oom-score-adj[Tune the host's OOM preferences for containers (accepts -1000 to 1000)]" - "($help)--pids-limit[Tune container pids limit (set -1 for unlimited)]" - "($help -P --publish-all)"{-P,--publish-all}"[Publish all exposed ports]" - "($help)*"{-p=,--publish=}"[Expose a container's port to the host]:port:_ports" - "($help)--pid=[PID namespace to use]:PID namespace:__docker_complete_pid" - "($help)--privileged[Give extended privileges to this container]" - "($help)--read-only[Mount the container's root filesystem as read only]" - "($help)*--security-opt=[Security options]:security option: " - "($help)*--sysctl=-[sysctl options]:sysctl: " - "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" - "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" - "($help)--tmpfs[mount tmpfs]" - "($help)*-v[Bind mount a volume]:volume: " - "($help)--volume-driver=[Optional volume driver for the container]:volume driver:(local)" - "($help)*--volumes-from=[Mount volumes from the specified container]:volume: " - "($help -w --workdir)"{-w=,--workdir=}"[Working directory inside the container]:directory:_directories" - ) - opts_create_run_update=( - "($help)--blkio-weight=[Block IO (relative weight), between 10 and 1000]:Block IO weight:(10 100 500 1000)" - "($help)--kernel-memory=[Kernel memory limit in bytes]:Memory limit: " - "($help)--memory-reservation=[Memory soft limit]:Memory limit: " - "($help)--restart=[Restart policy]:restart policy:(no on-failure always unless-stopped)" - ) - opts_attach_exec_run_start=( - "($help)--detach-keys=[Escape key sequence used to detach a container]:sequence:__docker_complete_detach_keys" - ) - - case "$words[1]" in - (attach) - _arguments $(__docker_arguments) \ - $opts_help \ - $opts_attach_exec_run_start \ - "($help)--no-stdin[Do not attach stdin]" \ - "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ - "($help -):containers:__docker_runningcontainers" && ret=0 - ;; - (build) - _arguments $(__docker_arguments) \ - $opts_help \ - $opts_build_create_run \ - $opts_build_create_run_update \ - "($help)*--build-arg[Build-time variables]:=: " \ - "($help -f --file)"{-f=,--file=}"[Name of the Dockerfile]:Dockerfile:_files" \ - "($help)--force-rm[Always remove intermediate containers]" \ - "($help)*--label=[Set metadata for an image]:label=value: " \ - "($help)--no-cache[Do not use cache when building the image]" \ - "($help)--pull[Attempt to pull a newer version of the image]" \ - "($help -q --quiet)"{-q,--quiet}"[Suppress verbose build output]" \ - "($help)--rm[Remove intermediate containers after a successful build]" \ - "($help -t --tag)*"{-t=,--tag=}"[Repository, name and tag for the image]: :__docker_repositories_with_tags" \ - "($help -):path or URL:_directories" && ret=0 - ;; - (commit) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -a --author)"{-a=,--author=}"[Author]:author: " \ - "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ - "($help -m --message)"{-m=,--message=}"[Commit message]:message: " \ - "($help -p --pause)"{-p,--pause}"[Pause container during commit]" \ - "($help -):container:__docker_containers" \ - "($help -): :__docker_repositories_with_tags" && ret=0 - ;; - (cp) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -L --follow-link)"{-L,--follow-link}"[Always follow symbol link]" \ - "($help -)1:container:->container" \ - "($help -)2:hostpath:_files" && ret=0 - case $state in - (container) - if compset -P "*:"; then - _files && ret=0 - else - __docker_containers -qS ":" && ret=0 - fi - ;; - esac - ;; - (create) - _arguments $(__docker_arguments) \ - $opts_help \ - $opts_build_create_run \ - $opts_build_create_run_update \ - $opts_create_run \ - $opts_create_run_update \ - "($help -): :__docker_images" \ - "($help -):command: _command_names -e" \ - "($help -)*::arguments: _normal" && ret=0 - - case $state in - (link) - if compset -P "*:"; then - _wanted alias expl "Alias" compadd -E "" && ret=0 - else - __docker_runningcontainers -qS ":" && ret=0 - fi - ;; - esac - - ;; - (daemon) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*--add-runtime=[Register an additional OCI compatible runtime]:runtime:__docker_complete_runtimes" \ - "($help)--api-cors-header=[CORS headers in the remote API]:CORS headers: " \ - "($help)*--authorization-plugin=[Authorization plugins to load]" \ - "($help -b --bridge)"{-b=,--bridge=}"[Attach containers to a network bridge]:bridge:_net_interfaces" \ - "($help)--bip=[Network bridge IP]:IP address: " \ - "($help)--cgroup-parent=[Parent cgroup for all containers]:cgroup: " \ - "($help)--config-file=[Path to daemon configuration file]:Config File:_files" \ - "($help)--containerd=[Path to containerd socket]:socket:_files -g \"*.sock\"" \ - "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ - "($help)--default-gateway[Container default gateway IPv4 address]:IPv4 address: " \ - "($help)--default-gateway-v6[Container default gateway IPv6 address]:IPv6 address: " \ - "($help)--cluster-store=[URL of the distributed storage backend]:Cluster Store:->cluster-store" \ - "($help)--cluster-advertise=[Address or interface name to advertise]:Instance to advertise (host\:port): " \ - "($help)*--cluster-store-opt=[Cluster store options]:Cluster options:->cluster-store-options" \ - "($help)*--dns=[DNS server to use]:DNS: " \ - "($help)*--dns-search=[DNS search domains to use]:DNS search: " \ - "($help)*--dns-opt=[DNS options to use]:DNS option: " \ - "($help)*--default-ulimit=[Default ulimits for containers]:ulimit: " \ - "($help)--disable-legacy-registry[Disable contacting legacy registries]" \ - "($help)*--exec-opt=[Runtime execution options]:runtime execution options: " \ - "($help)--exec-root=[Root directory for execution state files]:path:_directories" \ - "($help)--fixed-cidr=[IPv4 subnet for fixed IPs]:IPv4 subnet: " \ - "($help)--fixed-cidr-v6=[IPv6 subnet for fixed IPs]:IPv6 subnet: " \ - "($help -G --group)"{-G=,--group=}"[Group for the unix socket]:group:_groups" \ - "($help -g --graph)"{-g=,--graph=}"[Root of the Docker runtime]:path:_directories" \ - "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \ - "($help)--icc[Enable inter-container communication]" \ - "($help)*--insecure-registry=[Enable insecure registry communication]:registry: " \ - "($help)--ip=[Default IP when binding container ports]" \ - "($help)--ip-forward[Enable net.ipv4.ip_forward]" \ - "($help)--ip-masq[Enable IP masquerading]" \ - "($help)--iptables[Enable addition of iptables rules]" \ - "($help)--ipv6[Enable IPv6 networking]" \ - "($help -l --log-level)"{-l=,--log-level=}"[Logging level]:level:(debug info warn error fatal)" \ - "($help)*--label=[Key=value labels]:label: " \ - "($help)--live-restore[Enable live restore of docker when containers are still running]" \ - "($help)--log-driver=[Default driver for container logs]:logging driver:__docker_log_drivers" \ - "($help)*--log-opt=[Default log driver options for containers]:log driver options:__docker_log_options" \ - "($help)--max-concurrent-downloads[Set the max concurrent downloads for each pull]" \ - "($help)--max-concurrent-uploads[Set the max concurrent uploads for each push]" \ - "($help)--mtu=[Network MTU]:mtu:(0 576 1420 1500 9000)" \ - "($help)--oom-score-adjust=[Set the oom_score_adj for the daemon]:oom-score:(-500)" \ - "($help -p --pidfile)"{-p=,--pidfile=}"[Path to use for daemon PID file]:PID file:_files" \ - "($help)--raw-logs[Full timestamps without ANSI coloring]" \ - "($help)*--registry-mirror=[Preferred Docker registry mirror]:registry mirror: " \ - "($help -s --storage-driver)"{-s=,--storage-driver=}"[Storage driver to use]:driver:(aufs btrfs devicemapper overlay overlay2 vfs zfs)" \ - "($help)--selinux-enabled[Enable selinux support]" \ - "($help)*--storage-opt=[Storage driver options]:storage driver options: " \ - "($help)--tls[Use TLS]" \ - "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g \"*.(pem|crt)\"" \ - "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g \"*.(pem|crt)\"" \ - "($help)--tlskey=[Path to TLS key file]:Key file:_files -g \"*.(pem|key)\"" \ - "($help)--tlsverify[Use TLS and verify the remote]" \ - "($help)--userns-remap=[User/Group setting for user namespaces]:user\:group:->users-groups" \ - "($help)--userland-proxy[Use userland proxy for loopback traffic]" && ret=0 - - case $state in - (cluster-store) - if compset -P '*://'; then - _message 'host:port' && ret=0 - else - store=('consul' 'etcd' 'zk') - _describe -t cluster-store "Cluster Store" store -qS "://" && ret=0 - fi - ;; - (cluster-store-options) - if compset -P '*='; then - _files && ret=0 - else - opts=('discovery.heartbeat' 'discovery.ttl' 'kv.cacertfile' 'kv.certfile' 'kv.keyfile' 'kv.path') - _describe -t cluster-store-opts "Cluster Store Options" opts -qS "=" && ret=0 - fi - ;; - (users-groups) - if compset -P '*:'; then - _groups && ret=0 - else - _describe -t userns-default "default Docker user management" '(default)' && ret=0 - _users && ret=0 - fi - ;; - esac - ;; - (diff) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:containers:__docker_containers" && ret=0 - ;; - (events) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_events_filter" \ - "($help)--since=[Events created since this timestamp]:timestamp: " \ - "($help)--until=[Events created until this timestamp]:timestamp: " && ret=0 - ;; - (exec) - local state - _arguments $(__docker_arguments) \ - $opts_help \ - $opts_attach_exec_run_start \ - "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ - "($help -i --interactive)"{-i,--interactive}"[Keep stdin open even if not attached]" \ - "($help)--privileged[Give extended Linux capabilities to the command]" \ - "($help -t --tty)"{-t,--tty}"[Allocate a pseudo-tty]" \ - "($help -u --user)"{-u=,--user=}"[Username or UID]:user:_users" \ - "($help -):containers:__docker_runningcontainers" \ - "($help -)*::command:->anycommand" && ret=0 - - case $state in - (anycommand) - shift 1 words - (( CURRENT-- )) - _normal && ret=0 - ;; - esac - ;; - (export) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -o --output)"{-o=,--output=}"[Write to a file, instead of stdout]:output file:_files" \ - "($help -)*:containers:__docker_containers" && ret=0 - ;; - (history) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -H --human)"{-H,--human}"[Print sizes and dates in human readable format]" \ - "($help)--no-trunc[Do not truncate output]" \ - "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ - "($help -)*: :__docker_images" && ret=0 - ;; - (images) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -a --all)"{-a,--all}"[Show all images]" \ - "($help)--digests[Show digests]" \ - "($help)*"{-f=,--filter=}"[Filter values]:filter:->filter-options" \ - "($help)--format[Pretty-print containers using a Go template]:format: " \ - "($help)--no-trunc[Do not truncate output]" \ - "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ - "($help -): :__docker_repositories" && ret=0 - - case $state in - (filter-options) - __docker_complete_images_filters && ret=0 - ;; - esac - ;; - (import) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*"{-c=,--change=}"[Apply Dockerfile instruction to the created image]:Dockerfile:_files" \ - "($help -m --message)"{-m=,--message=}"[Commit message for imported image]:message: " \ - "($help -):URL:(- http:// file://)" \ - "($help -): :__docker_repositories_with_tags" && ret=0 - ;; - (info|version) - _arguments $(__docker_arguments) \ - $opts_help && ret=0 - ;; - (inspect) - local state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --format)"{-f=,--format=}"[Format the output using the given go template]:template: " \ - "($help -s --size)"{-s,--size}"[Display total file sizes if the type is container]" \ - "($help)--type=[Return JSON for specified type]:type:(image container)" \ - "($help -)*: :->values" && ret=0 - - case $state in - (values) - if [[ ${words[(r)--type=container]} == --type=container ]]; then - __docker_containers && ret=0 - elif [[ ${words[(r)--type=image]} == --type=image ]]; then - __docker_images && ret=0 - else - __docker_images && __docker_containers && ret=0 - fi - ;; - esac - ;; - (kill) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -s --signal)"{-s=,--signal=}"[Signal to send]:signal:_signals" \ - "($help -)*:containers:__docker_runningcontainers" && ret=0 - ;; - (load) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -i --input)"{-i=,--input=}"[Read from tar archive file]:archive file:_files -g \"*.((tar|TAR)(.gz|.GZ|.Z|.bz2|.lzma|.xz|)|(tbz|tgz|txz))(-.)\"" \ - "($help -q --quiet)"{-q,--quiet}"[Suppress the load output]" && ret=0 - ;; - (login) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -p --password)"{-p=,--password=}"[Password]:password: " \ - "($help -u --user)"{-u=,--user=}"[Username]:username: " \ - "($help -)1:server: " && ret=0 - ;; - (logout) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)1:server: " && ret=0 - ;; - (logs) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)--details[Show extra details provided to logs]" \ - "($help -f --follow)"{-f,--follow}"[Follow log output]" \ - "($help -s --since)"{-s=,--since=}"[Show logs since this timestamp]:timestamp: " \ - "($help -t --timestamps)"{-t,--timestamps}"[Show timestamps]" \ - "($help)--tail=[Output the last K lines]:lines:(1 10 20 50 all)" \ - "($help -)*:containers:__docker_containers" && ret=0 - ;; - (network) - local curcontext="$curcontext" state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - case $state in - (command) - __docker_network_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-${words[-1]}: - __docker_network_subcommand && ret=0 - ;; - esac - ;; - (node) - local curcontext="$curcontext" state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - case $state in - (command) - __docker_node_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-${words[-1]}: - __docker_node_subcommand && ret=0 - ;; - esac - ;; - (pause|unpause) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:containers:__docker_runningcontainers" && ret=0 - ;; - (port) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)1:containers:__docker_runningcontainers" \ - "($help -)2:port:_ports" && ret=0 - ;; - (ps) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -a --all)"{-a,--all}"[Show all containers]" \ - "($help)--before=[Show only container created before...]:containers:__docker_containers" \ - "($help)*"{-f=,--filter=}"[Filter values]:filter:__docker_complete_ps_filters" \ - "($help)--format[Pretty-print containers using a Go template]:format: " \ - "($help -l --latest)"{-l,--latest}"[Show only the latest created container]" \ - "($help)-n[Show n last created containers, include non-running one]:n:(1 5 10 25 50)" \ - "($help)--no-trunc[Do not truncate output]" \ - "($help -q --quiet)"{-q,--quiet}"[Only show numeric IDs]" \ - "($help -s --size)"{-s,--size}"[Display total file sizes]" \ - "($help)--since=[Show only containers created since...]:containers:__docker_containers" && ret=0 - ;; - (pull) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -a --all-tags)"{-a,--all-tags}"[Download all tagged images]" \ - "($help)--disable-content-trust[Skip image verification]" \ - "($help -):name:__docker_search" && ret=0 - ;; - (push) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)--disable-content-trust[Skip image signing]" \ - "($help -): :__docker_images" && ret=0 - ;; - (rename) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -):old name:__docker_containers" \ - "($help -):new name: " && ret=0 - ;; - (restart|stop) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -t --time)"{-t=,--time=}"[Number of seconds to try to stop for before killing the container]:seconds to before killing:(1 5 10 30 60)" \ - "($help -)*:containers:__docker_runningcontainers" && ret=0 - ;; - (rm) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --force)"{-f,--force}"[Force removal]" \ - "($help -l --link)"{-l,--link}"[Remove the specified link and not the underlying container]" \ - "($help -v --volumes)"{-v,--volumes}"[Remove the volumes associated to the container]" \ - "($help -)*:containers:->values" && ret=0 - case $state in - (values) - if [[ ${words[(r)-f]} == -f || ${words[(r)--force]} == --force ]]; then - __docker_containers && ret=0 - else - __docker_stoppedcontainers && ret=0 - fi - ;; - esac - ;; - (rmi) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -f --force)"{-f,--force}"[Force removal]" \ - "($help)--no-prune[Do not delete untagged parents]" \ - "($help -)*: :__docker_images" && ret=0 - ;; - (run) - _arguments $(__docker_arguments) \ - $opts_help \ - $opts_build_create_run \ - $opts_build_create_run_update \ - $opts_create_run \ - $opts_create_run_update \ - $opts_attach_exec_run_start \ - "($help -d --detach)"{-d,--detach}"[Detached mode: leave the container running in the background]" \ - "($help)--health-cmd=[Command to run to check health]:command: " \ - "($help)--health-interval=[Time between running the check]:time: " \ - "($help)--health-retries=[Consecutive failures needed to report unhealthy]:retries:(1 2 3 4 5)" \ - "($help)--health-timeout=[Maximum time to allow one check to run]:time: " \ - "($help)--no-healthcheck[Disable any container-specified HEALTHCHECK]" \ - "($help)--rm[Remove intermediate containers when it exits]" \ - "($help)--runtime=[Name of the runtime to be used for that container]:runtime:__docker_complete_runtimes" \ - "($help)--sig-proxy[Proxy all received signals to the process (non-TTY mode only)]" \ - "($help)--stop-signal=[Signal to kill a container]:signal:_signals" \ - "($help)--storage-opt=[Storage driver options for the container]:storage options:->storage-opt" \ - "($help -): :__docker_images" \ - "($help -):command: _command_names -e" \ - "($help -)*::arguments: _normal" && ret=0 - - case $state in - (link) - if compset -P "*:"; then - _wanted alias expl "Alias" compadd -E "" && ret=0 - else - __docker_runningcontainers -qS ":" && ret=0 - fi - ;; - (storage-opt) - if compset -P "*="; then - _message "value" && ret=0 - else - opts=('size') - _describe -t filter-opts "storage options" opts -qS "=" && ret=0 - fi - ;; - esac - - ;; - (save) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -o --output)"{-o=,--output=}"[Write to file]:file:_files" \ - "($help -)*: :__docker_images" && ret=0 - ;; - (search) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help)*"{-f=,--filter=}"[Filter values]:filter:->filter-options" \ - "($help)--limit=[Maximum returned search results]:limit:(1 5 10 25 50)" \ - "($help)--no-trunc[Do not truncate output]" \ - "($help -):term: " && ret=0 - - case $state in - (filter-options) - __docker_complete_search_filters && ret=0 - ;; - esac - ;; - (service) - local curcontext="$curcontext" state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - case $state in - (command) - __docker_service_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-${words[-1]}: - __docker_service_subcommand && ret=0 - ;; - esac - ;; - (start) - _arguments $(__docker_arguments) \ - $opts_help \ - $opts_attach_exec_run_start \ - "($help -a --attach)"{-a,--attach}"[Attach container's stdout/stderr and forward all signals]" \ - "($help -i --interactive)"{-i,--interactive}"[Attach container's stding]" \ - "($help -)*:containers:__docker_stoppedcontainers" && ret=0 - ;; - (stats) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -a --all)"{-a,--all}"[Show all containers (default shows just running)]" \ - "($help)--no-stream[Disable streaming stats and only pull the first result]" \ - "($help -)*:containers:__docker_runningcontainers" && ret=0 - ;; - (swarm) - local curcontext="$curcontext" state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - case $state in - (command) - __docker_swarm_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-${words[-1]}: - __docker_swarm_subcommand && ret=0 - ;; - esac - ;; - (tag) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -):source:__docker_images"\ - "($help -):destination:__docker_repositories_with_tags" && ret=0 - ;; - (top) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)1:containers:__docker_runningcontainers" \ - "($help -)*:: :->ps-arguments" && ret=0 - case $state in - (ps-arguments) - _ps && ret=0 - ;; - esac - - ;; - (update) - _arguments $(__docker_arguments) \ - $opts_help \ - $opts_create_run_update \ - $opts_build_create_run_update \ - "($help -)*: :->values" && ret=0 - - case $state in - (values) - if [[ ${words[(r)--kernel-memory*]} = (--kernel-memory*) ]]; then - __docker_stoppedcontainers && ret=0 - else - __docker_containers && ret=0 - fi - ;; - esac - ;; - (volume) - local curcontext="$curcontext" state - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - case $state in - (command) - __docker_volume_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-${words[-1]}: - __docker_volume_subcommand && ret=0 - ;; - esac - ;; - (wait) - _arguments $(__docker_arguments) \ - $opts_help \ - "($help -)*:containers:__docker_runningcontainers" && ret=0 - ;; - (help) - _arguments $(__docker_arguments) ":subcommand:__docker_commands" && ret=0 - ;; - esac - - return ret -} - -_docker() { - # Support for subservices, which allows for `compdef _docker docker-shell=_docker_containers`. - # Based on /usr/share/zsh/functions/Completion/Unix/_git without support for `ret`. - if [[ $service != docker ]]; then - _call_function - _$service - return - fi - - local curcontext="$curcontext" state line help="-h --help" - integer ret=1 - typeset -A opt_args - - _arguments $(__docker_arguments) -C \ - "(: -)"{-h,--help}"[Print usage]" \ - "($help)--config[Location of client config files]:path:_directories" \ - "($help -D --debug)"{-D,--debug}"[Enable debug mode]" \ - "($help -H --host)"{-H=,--host=}"[tcp://host:port to bind/connect to]:host: " \ - "($help -l --log-level)"{-l=,--log-level=}"[Logging level]:level:(debug info warn error fatal)" \ - "($help)--tls[Use TLS]" \ - "($help)--tlscacert=[Trust certs signed only by this CA]:PEM file:_files -g "*.(pem|crt)"" \ - "($help)--tlscert=[Path to TLS certificate file]:PEM file:_files -g "*.(pem|crt)"" \ - "($help)--tlskey=[Path to TLS key file]:Key file:_files -g "*.(pem|key)"" \ - "($help)--tlsverify[Use TLS and verify the remote]" \ - "($help)--userland-proxy[Use userland proxy for loopback traffic]" \ - "($help -v --version)"{-v,--version}"[Print version information and quit]" \ - "($help -): :->command" \ - "($help -)*:: :->option-or-argument" && ret=0 - - local host=${opt_args[-H]}${opt_args[--host]} - local config=${opt_args[--config]} - local docker_options="${host:+--host $host} ${config:+--config $config}" - - case $state in - (command) - __docker_commands && ret=0 - ;; - (option-or-argument) - curcontext=${curcontext%:*:*}:docker-$words[1]: - __docker_subcommand && ret=0 - ;; - esac - - return ret -} - -_dockerd() { - integer ret=1 - words[1]='daemon' - __docker_subcommand && ret=0 - return ret -} - -_docker "$@" - -# Local Variables: -# mode: Shell-Script -# sh-indentation: 4 -# indent-tabs-mode: nil -# sh-basic-offset: 4 -# End: -# vim: ft=zsh sw=4 ts=4 et diff --git a/contrib/desktop-integration/README.md b/contrib/desktop-integration/README.md deleted file mode 100644 index 85a01b9ee9..0000000000 --- a/contrib/desktop-integration/README.md +++ /dev/null @@ -1,11 +0,0 @@ -Desktop Integration -=================== - -The ./contrib/desktop-integration contains examples of typical dockerized -desktop applications. - -Examples -======== - -* Chromium: ./chromium/Dockerfile shows a way to dockerize a common application -* Gparted: ./gparted/Dockerfile shows a way to dockerize a common application w devices diff --git a/contrib/desktop-integration/chromium/Dockerfile b/contrib/desktop-integration/chromium/Dockerfile deleted file mode 100644 index 5cacd1f999..0000000000 --- a/contrib/desktop-integration/chromium/Dockerfile +++ /dev/null @@ -1,36 +0,0 @@ -# VERSION: 0.1 -# DESCRIPTION: Create chromium container with its dependencies -# AUTHOR: Jessica Frazelle -# COMMENTS: -# This file describes how to build a Chromium container with all -# dependencies installed. It uses native X11 unix socket. -# Tested on Debian Jessie -# USAGE: -# # Download Chromium Dockerfile -# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/chromium/Dockerfile -# -# # Build chromium image -# docker build -t chromium . -# -# # Run stateful data-on-host chromium. For ephemeral, remove -v /data/chromium:/data -# docker run -v /data/chromium:/data -v /tmp/.X11-unix:/tmp/.X11-unix \ -# -e DISPLAY=unix$DISPLAY chromium - -# # To run stateful dockerized data containers -# docker run --volumes-from chromium-data -v /tmp/.X11-unix:/tmp/.X11-unix \ -# -e DISPLAY=unix$DISPLAY chromium - -# Base docker image -FROM debian:jessie -MAINTAINER Jessica Frazelle - -# Install Chromium -RUN apt-get update && apt-get install -y \ - chromium \ - chromium-l10n \ - libcanberra-gtk-module \ - libexif-dev \ - --no-install-recommends - -# Autorun chromium -CMD ["/usr/bin/chromium", "--no-sandbox", "--user-data-dir=/data"] diff --git a/contrib/desktop-integration/gparted/Dockerfile b/contrib/desktop-integration/gparted/Dockerfile deleted file mode 100644 index 3ddb23208d..0000000000 --- a/contrib/desktop-integration/gparted/Dockerfile +++ /dev/null @@ -1,31 +0,0 @@ -# VERSION: 0.1 -# DESCRIPTION: Create gparted container with its dependencies -# AUTHOR: Jessica Frazelle -# COMMENTS: -# This file describes how to build a gparted container with all -# dependencies installed. It uses native X11 unix socket. -# Tested on Debian Jessie -# USAGE: -# # Download gparted Dockerfile -# wget http://raw.githubusercontent.com/docker/docker/master/contrib/desktop-integration/gparted/Dockerfile -# -# # Build gparted image -# docker build -t gparted . -# -# docker run -v /tmp/.X11-unix:/tmp/.X11-unix \ -# --device=/dev/sda:/dev/sda \ -# -e DISPLAY=unix$DISPLAY gparted -# - -# Base docker image -FROM debian:jessie -MAINTAINER Jessica Frazelle - -# Install Gparted and its dependencies -RUN apt-get update && apt-get install -y \ - gparted \ - libcanberra-gtk-module \ - --no-install-recommends - -# Autorun gparted -CMD ["/usr/sbin/gparted"] diff --git a/contrib/docker-device-tool/README.md b/contrib/docker-device-tool/README.md deleted file mode 100644 index 1bf11d202c..0000000000 --- a/contrib/docker-device-tool/README.md +++ /dev/null @@ -1,14 +0,0 @@ -Docker device tool for devicemapper storage driver backend -=================== - -The ./contrib/docker-device-tool contains a tool to manipulate devicemapper thin-pool. - -Compile -======== - - $ make shell - ## inside build container - $ go build contrib/docker-device-tool/device_tool.go - - # if devicemapper version is old and compliation fails, compile with `libdm_no_deferred_remove` tag - $ go build -tags libdm_no_deferred_remove contrib/docker-device-tool/device_tool.go diff --git a/contrib/docker-device-tool/device_tool.go b/contrib/docker-device-tool/device_tool.go deleted file mode 100644 index 73e1bfdf5b..0000000000 --- a/contrib/docker-device-tool/device_tool.go +++ /dev/null @@ -1,176 +0,0 @@ -// +build !windows - -package main - -import ( - "flag" - "fmt" - "os" - "path" - "sort" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/graphdriver/devmapper" - "github.com/docker/docker/pkg/devicemapper" -) - -func usage() { - fmt.Fprintf(os.Stderr, "Usage: %s [status] | [list] | [device id] | [resize new-pool-size] | [snap new-id base-id] | [remove id] | [mount id mountpoint]\n", os.Args[0]) - flag.PrintDefaults() - os.Exit(1) -} - -func byteSizeFromString(arg string) (int64, error) { - digits := "" - rest := "" - last := strings.LastIndexAny(arg, "0123456789") - if last >= 0 { - digits = arg[:last+1] - rest = arg[last+1:] - } - - val, err := strconv.ParseInt(digits, 10, 64) - if err != nil { - return val, err - } - - rest = strings.ToLower(strings.TrimSpace(rest)) - - var multiplier int64 = 1 - switch rest { - case "": - multiplier = 1 - case "k", "kb": - multiplier = 1024 - case "m", "mb": - multiplier = 1024 * 1024 - case "g", "gb": - multiplier = 1024 * 1024 * 1024 - case "t", "tb": - multiplier = 1024 * 1024 * 1024 * 1024 - default: - return 0, fmt.Errorf("Unknown size unit: %s", rest) - } - - return val * multiplier, nil -} - -func main() { - root := flag.String("r", "/var/lib/docker", "Docker root dir") - flDebug := flag.Bool("D", false, "Debug mode") - - flag.Parse() - - if *flDebug { - os.Setenv("DEBUG", "1") - logrus.SetLevel(logrus.DebugLevel) - } - - if flag.NArg() < 1 { - usage() - } - - args := flag.Args() - - home := path.Join(*root, "devicemapper") - devices, err := devmapper.NewDeviceSet(home, false, nil, nil, nil) - if err != nil { - fmt.Println("Can't initialize device mapper: ", err) - os.Exit(1) - } - - switch args[0] { - case "status": - status := devices.Status() - fmt.Printf("Pool name: %s\n", status.PoolName) - fmt.Printf("Data Loopback file: %s\n", status.DataLoopback) - fmt.Printf("Metadata Loopback file: %s\n", status.MetadataLoopback) - fmt.Printf("Sector size: %d\n", status.SectorSize) - fmt.Printf("Data use: %d of %d (%.1f %%)\n", status.Data.Used, status.Data.Total, 100.0*float64(status.Data.Used)/float64(status.Data.Total)) - fmt.Printf("Metadata use: %d of %d (%.1f %%)\n", status.Metadata.Used, status.Metadata.Total, 100.0*float64(status.Metadata.Used)/float64(status.Metadata.Total)) - break - case "list": - ids := devices.List() - sort.Strings(ids) - for _, id := range ids { - fmt.Println(id) - } - break - case "device": - if flag.NArg() < 2 { - usage() - } - status, err := devices.GetDeviceStatus(args[1]) - if err != nil { - fmt.Println("Can't get device info: ", err) - os.Exit(1) - } - fmt.Printf("Id: %d\n", status.DeviceID) - fmt.Printf("Size: %d\n", status.Size) - fmt.Printf("Transaction Id: %d\n", status.TransactionID) - fmt.Printf("Size in Sectors: %d\n", status.SizeInSectors) - fmt.Printf("Mapped Sectors: %d\n", status.MappedSectors) - fmt.Printf("Highest Mapped Sector: %d\n", status.HighestMappedSector) - break - case "resize": - if flag.NArg() < 2 { - usage() - } - - size, err := byteSizeFromString(args[1]) - if err != nil { - fmt.Println("Invalid size: ", err) - os.Exit(1) - } - - err = devices.ResizePool(size) - if err != nil { - fmt.Println("Error resizing pool: ", err) - os.Exit(1) - } - - break - case "snap": - if flag.NArg() < 3 { - usage() - } - - err := devices.AddDevice(args[1], args[2], nil) - if err != nil { - fmt.Println("Can't create snap device: ", err) - os.Exit(1) - } - break - case "remove": - if flag.NArg() < 2 { - usage() - } - - err := devicemapper.RemoveDevice(args[1]) - if err != nil { - fmt.Println("Can't remove device: ", err) - os.Exit(1) - } - break - case "mount": - if flag.NArg() < 3 { - usage() - } - - err := devices.MountDevice(args[1], args[2], "") - if err != nil { - fmt.Println("Can't create snap device: ", err) - os.Exit(1) - } - break - default: - fmt.Printf("Unknown command %s\n", args[0]) - usage() - - os.Exit(1) - } - - return -} diff --git a/contrib/docker-device-tool/device_tool_windows.go b/contrib/docker-device-tool/device_tool_windows.go deleted file mode 100644 index da29a2cadf..0000000000 --- a/contrib/docker-device-tool/device_tool_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package main - -func main() { -} diff --git a/contrib/dockerize-disk.sh b/contrib/dockerize-disk.sh deleted file mode 100755 index 444e243abe..0000000000 --- a/contrib/dockerize-disk.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/usr/bin/env bash -set -e - -if ! command -v qemu-nbd &> /dev/null; then - echo >&2 'error: "qemu-nbd" not found!' - exit 1 -fi - -usage() { - echo "Convert disk image to docker image" - echo "" - echo "usage: $0 image-name disk-image-file [ base-image ]" - echo " ie: $0 cirros:0.3.3 cirros-0.3.3-x86_64-disk.img" - echo " $0 ubuntu:cloud ubuntu-14.04-server-cloudimg-amd64-disk1.img ubuntu:14.04" -} - -if [ "$#" -lt 2 ]; then - usage - exit 1 -fi - -CURDIR=$(pwd) - -image_name="${1%:*}" -image_tag="${1#*:}" -if [ "$image_tag" == "$1" ]; then - image_tag="latest" -fi - -disk_image_file="$2" -docker_base_image="$3" - -block_device=/dev/nbd0 - -builddir=$(mktemp -d) - -cleanup() { - umount "$builddir/disk_image" || true - umount "$builddir/workdir" || true - qemu-nbd -d $block_device &> /dev/null || true - rm -rf $builddir -} -trap cleanup EXIT - -# Mount disk image -modprobe nbd max_part=63 -qemu-nbd -rc ${block_device} -P 1 "$disk_image_file" -mkdir "$builddir/disk_image" -mount -o ro ${block_device} "$builddir/disk_image" - -mkdir "$builddir/workdir" -mkdir "$builddir/diff" - -base_image_mounts="" - -# Unpack base image -if [ -n "$docker_base_image" ]; then - mkdir -p "$builddir/base" - docker pull "$docker_base_image" - docker save "$docker_base_image" | tar -xC "$builddir/base" - - image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") - while [ -n "$image_id" ]; do - mkdir -p "$builddir/base/$image_id/layer" - tar -xf "$builddir/base/$image_id/layer.tar" -C "$builddir/base/$image_id/layer" - - base_image_mounts="${base_image_mounts}:$builddir/base/$image_id/layer=ro+wh" - image_id=$(docker inspect -f "{{.Parent}}" "$image_id") - done -fi - -# Mount work directory -mount -t aufs -o "br=$builddir/diff=rw${base_image_mounts},dio,xino=/dev/shm/aufs.xino" none "$builddir/workdir" - -# Update files -cd $builddir -LC_ALL=C diff -rq disk_image workdir \ - | sed -re "s|Only in workdir(.*?): |DEL \1/|g;s|Only in disk_image(.*?): |ADD \1/|g;s|Files disk_image/(.+) and workdir/(.+) differ|UPDATE /\1|g" \ - | while read action entry; do - case "$action" in - ADD|UPDATE) - cp -a "disk_image$entry" "workdir$entry" - ;; - DEL) - rm -rf "workdir$entry" - ;; - *) - echo "Error: unknown diff line: $action $entry" >&2 - ;; - esac - done - -# Pack new image -new_image_id="$(for i in $(seq 1 32); do printf "%02x" $(($RANDOM % 256)); done)" -mkdir -p $builddir/result/$new_image_id -cd diff -tar -cf $builddir/result/$new_image_id/layer.tar * -echo "1.0" > $builddir/result/$new_image_id/VERSION -cat > $builddir/result/$new_image_id/json <<-EOS -{ "docker_version": "1.4.1" -, "id": "$new_image_id" -, "created": "$(date -u +%Y-%m-%dT%H:%M:%S.%NZ)" -EOS - -if [ -n "$docker_base_image" ]; then - image_id=$(docker inspect -f "{{.Id}}" "$docker_base_image") - echo ", \"parent\": \"$image_id\"" >> $builddir/result/$new_image_id/json -fi - -echo "}" >> $builddir/result/$new_image_id/json - -echo "{\"$image_name\":{\"$image_tag\":\"$new_image_id\"}}" > $builddir/result/repositories - -cd $builddir/result - -# mkdir -p $CURDIR/$image_name -# cp -r * $CURDIR/$image_name -tar -c * | docker load diff --git a/contrib/download-frozen-image-v1.sh b/contrib/download-frozen-image-v1.sh deleted file mode 100755 index 29d7ff59fd..0000000000 --- a/contrib/download-frozen-image-v1.sh +++ /dev/null @@ -1,108 +0,0 @@ -#!/bin/bash -set -e - -# hello-world latest ef872312fe1b 3 months ago 910 B -# hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B - -# debian latest f6fab3b798be 10 weeks ago 85.1 MB -# debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB - -if ! command -v curl &> /dev/null; then - echo >&2 'error: "curl" not found!' - exit 1 -fi - -usage() { - echo "usage: $0 dir image[:tag][@image-id] ..." - echo " ie: $0 /tmp/hello-world hello-world" - echo " $0 /tmp/debian-jessie debian:jessie" - echo " $0 /tmp/old-hello-world hello-world@ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9" - echo " $0 /tmp/old-debian debian:latest@f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd" - [ -z "$1" ] || exit "$1" -} - -dir="$1" # dir for building tar in -shift || usage 1 >&2 - -[ $# -gt 0 -a "$dir" ] || usage 2 >&2 -mkdir -p "$dir" - -# hacky workarounds for Bash 3 support (no associative arrays) -images=() -rm -f "$dir"/tags-*.tmp -# repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' - -while [ $# -gt 0 ]; do - imageTag="$1" - shift - image="${imageTag%%[:@]*}" - tag="${imageTag#*:}" - imageId="${tag##*@}" - [ "$imageId" != "$tag" ] || imageId= - [ "$tag" != "$imageTag" ] || tag='latest' - tag="${tag%@*}" - - imageFile="${image//\//_}" # "/" can't be in filenames :) - - token="$(curl -sSL -o /dev/null -D- -H 'X-Docker-Token: true' "https://index.docker.io/v1/repositories/$image/images" | tr -d '\r' | awk -F ': *' '$1 == "X-Docker-Token" { print $2 }')" - - if [ -z "$imageId" ]; then - imageId="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/repositories/$image/tags/$tag")" - imageId="${imageId//\"/}" - fi - - ancestryJson="$(curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/ancestry")" - if [ "${ancestryJson:0:1}" != '[' ]; then - echo >&2 "error: /v1/images/$imageId/ancestry returned something unexpected:" - echo >&2 " $ancestryJson" - exit 1 - fi - - IFS=',' - ancestry=( ${ancestryJson//[\[\] \"]/} ) - unset IFS - - if [ -s "$dir/tags-$imageFile.tmp" ]; then - echo -n ', ' >> "$dir/tags-$imageFile.tmp" - else - images=( "${images[@]}" "$image" ) - fi - echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" - - echo "Downloading '$imageTag' (${#ancestry[@]} layers)..." - for imageId in "${ancestry[@]}"; do - mkdir -p "$dir/$imageId" - echo '1.0' > "$dir/$imageId/VERSION" - - curl -sSL -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/json" -o "$dir/$imageId/json" - - # TODO figure out why "-C -" doesn't work here - # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." - # "HTTP/1.1 416 Requested Range Not Satisfiable" - if [ -f "$dir/$imageId/layer.tar" ]; then - # TODO hackpatch for no -C support :'( - echo "skipping existing ${imageId:0:12}" - continue - fi - curl -SL --progress -H "Authorization: Token $token" "https://registry-1.docker.io/v1/images/$imageId/layer" -o "$dir/$imageId/layer.tar" # -C - - done - echo -done - -echo -n '{' > "$dir/repositories" -firstImage=1 -for image in "${images[@]}"; do - imageFile="${image//\//_}" # "/" can't be in filenames :) - - [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" - firstImage= - echo -n $'\n\t' >> "$dir/repositories" - echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" -done -echo -n $'\n}\n' >> "$dir/repositories" - -rm -f "$dir"/tags-*.tmp - -echo "Download of images into '$dir' complete." -echo "Use something like the following to load the result into a Docker daemon:" -echo " tar -cC '$dir' . | docker load" diff --git a/contrib/download-frozen-image-v2.sh b/contrib/download-frozen-image-v2.sh deleted file mode 100755 index 111e3fa2ba..0000000000 --- a/contrib/download-frozen-image-v2.sh +++ /dev/null @@ -1,121 +0,0 @@ -#!/bin/bash -set -e - -# hello-world latest ef872312fe1b 3 months ago 910 B -# hello-world latest ef872312fe1bbc5e05aae626791a47ee9b032efa8f3bda39cc0be7b56bfe59b9 3 months ago 910 B - -# debian latest f6fab3b798be 10 weeks ago 85.1 MB -# debian latest f6fab3b798be3174f45aa1eb731f8182705555f89c9026d8c1ef230cbf8301dd 10 weeks ago 85.1 MB - -if ! command -v curl &> /dev/null; then - echo >&2 'error: "curl" not found!' - exit 1 -fi - -usage() { - echo "usage: $0 dir image[:tag][@digest] ..." - echo " $0 /tmp/old-hello-world hello-world:latest@sha256:8be990ef2aeb16dbcb9271ddfe2610fa6658d13f6dfb8bc72074cc1ca36966a7" - [ -z "$1" ] || exit "$1" -} - -dir="$1" # dir for building tar in -shift || usage 1 >&2 - -[ $# -gt 0 -a "$dir" ] || usage 2 >&2 -mkdir -p "$dir" - -# hacky workarounds for Bash 3 support (no associative arrays) -images=() -rm -f "$dir"/tags-*.tmp -# repositories[busybox]='"latest": "...", "ubuntu-14.04": "..."' - -while [ $# -gt 0 ]; do - imageTag="$1" - shift - image="${imageTag%%[:@]*}" - imageTag="${imageTag#*:}" - digest="${imageTag##*@}" - tag="${imageTag%%@*}" - - # add prefix library if passed official image - if [[ "$image" != *"/"* ]]; then - image="library/$image" - fi - - imageFile="${image//\//_}" # "/" can't be in filenames :) - - token="$(curl -sSL "https://auth.docker.io/token?service=registry.docker.io&scope=repository:$image:pull" | jq --raw-output .token)" - - manifestJson="$(curl -sSL -H "Authorization: Bearer $token" "https://registry-1.docker.io/v2/$image/manifests/$digest")" - if [ "${manifestJson:0:1}" != '{' ]; then - echo >&2 "error: /v2/$image/manifests/$digest returned something unexpected:" - echo >&2 " $manifestJson" - exit 1 - fi - - layersFs=$(echo "$manifestJson" | jq --raw-output '.fsLayers | .[] | .blobSum') - - IFS=$'\n' - # bash v4 on Windows CI requires CRLF separator - if [ "$(go env GOHOSTOS)" = 'windows' ]; then - major=$(echo ${BASH_VERSION%%[^0.9]} | cut -d. -f1) - if [ "$major" -ge 4 ]; then - IFS=$'\r\n' - fi - fi - layers=( ${layersFs} ) - unset IFS - - history=$(echo "$manifestJson" | jq '.history | [.[] | .v1Compatibility]') - imageId=$(echo "$history" | jq --raw-output .[0] | jq --raw-output .id) - - if [ -s "$dir/tags-$imageFile.tmp" ]; then - echo -n ', ' >> "$dir/tags-$imageFile.tmp" - else - images=( "${images[@]}" "$image" ) - fi - echo -n '"'"$tag"'": "'"$imageId"'"' >> "$dir/tags-$imageFile.tmp" - - echo "Downloading '${image}:${tag}@${digest}' (${#layers[@]} layers)..." - for i in "${!layers[@]}"; do - imageJson=$(echo "$history" | jq --raw-output .[${i}]) - imageId=$(echo "$imageJson" | jq --raw-output .id) - imageLayer=${layers[$i]} - - mkdir -p "$dir/$imageId" - echo '1.0' > "$dir/$imageId/VERSION" - - echo "$imageJson" > "$dir/$imageId/json" - - # TODO figure out why "-C -" doesn't work here - # "curl: (33) HTTP server doesn't seem to support byte ranges. Cannot resume." - # "HTTP/1.1 416 Requested Range Not Satisfiable" - if [ -f "$dir/$imageId/layer.tar" ]; then - # TODO hackpatch for no -C support :'( - echo "skipping existing ${imageId:0:12}" - continue - fi - token="$(curl -sSL "https://auth.docker.io/token?service=registry.docker.io&scope=repository:$image:pull" | jq --raw-output .token)" - curl -SL --progress -H "Authorization: Bearer $token" "https://registry-1.docker.io/v2/$image/blobs/$imageLayer" -o "$dir/$imageId/layer.tar" # -C - - done - echo -done - -echo -n '{' > "$dir/repositories" -firstImage=1 -for image in "${images[@]}"; do - imageFile="${image//\//_}" # "/" can't be in filenames :) - image="${image#library\/}" - - [ "$firstImage" ] || echo -n ',' >> "$dir/repositories" - firstImage= - echo -n $'\n\t' >> "$dir/repositories" - echo -n '"'"$image"'": { '"$(cat "$dir/tags-$imageFile.tmp")"' }' >> "$dir/repositories" -done -echo -n $'\n}\n' >> "$dir/repositories" - -rm -f "$dir"/tags-*.tmp - -echo "Download of images into '$dir' complete." -echo "Use something like the following to load the result into a Docker daemon:" -echo " tar -cC '$dir' . | docker load" diff --git a/contrib/gitdm/aliases b/contrib/gitdm/aliases deleted file mode 100644 index dd5dd34335..0000000000 --- a/contrib/gitdm/aliases +++ /dev/null @@ -1,148 +0,0 @@ -Danny.Yates@mailonline.co.uk danny@codeaholics.org -KenCochrane@gmail.com kencochrane@gmail.com -LÉVEIL thomasleveil@gmail.com -Vincent.Bernat@exoscale.ch bernat@luffy.cx -acidburn@docker.com jess@docker.com -admin@jtlebi.fr jt@yadutaf.fr -ahmetalpbalkan@gmail.com ahmetb@microsoft.com -aj@gandi.net aj@gandi.net -albers@users.noreply.github.com github@albersweb.de -alexander.larsson@gmail.com alexl@redhat.com -amurdaca@redhat.com antonio.murdaca@gmail.com -amy@gandi.net aj@gandi.net -andrew.weiss@microsoft.com andrew.weiss@outlook.com -angt@users.noreply.github.com adrien@gallouet.fr -ankushagarwal@users.noreply.github.com ankushagarwal11@gmail.com -anonymouse2048@gmail.com lheckemann@twig-world.com -anusha@docker.com anusha.ragunathan@docker.com -asarai@suse.com asarai@suse.de -avi.miller@gmail.com avi.miller@oracle.com -bernat@luffy.cx Vincent.Bernat@exoscale.ch -bgoff@cpuguy83-mbp.home cpuguy83@gmail.com -brandon@ifup.co brandon@ifup.org -brent@docker.com brent.salisbury@docker.com -charmes.guillaume@gmail.com guillaume.charmes@docker.com -chenchun.feed@gmail.com ramichen@tencent.com -chooper@plumata.com charles.hooper@dotcloud.com -crosby.michael@gmail.com michael@docker.com -crosbymichael@gmail.com michael@docker.com -cyphar@cyphar.com asarai@suse.de -daehyeok@daehyeok-ui-MacBook-Air.local daehyeok@gmail.com -daehyeok@daehyeokui-MacBook-Air.local daehyeok@gmail.com -daniel.norberg@gmail.com dano@spotify.com -daniel@dotcloud.com daniel.mizyrycki@dotcloud.com -darren@rancher.com darren.s.shepherd@gmail.com -dave@dtucker.co.uk dt@docker.com -dev@vvieux.com victor.vieux@docker.com -dgasienica@zynga.com daniel@gasienica.ch -dnephin@gmail.com dnephin@docker.com -dominikh@fork-bomb.org dominik@honnef.co -dqminh89@gmail.com dqminh@cloudflare.com -dsxiao@dataman-inc.com dxiao@redhat.com -duglin@users.noreply.github.com dug@us.ibm.com -eric.hanchrow@gmail.com ehanchrow@ine.com -erik+github@hollensbe.org github@hollensbe.org -estesp@gmail.com estesp@linux.vnet.ibm.com -ewindisch@docker.com eric@windisch.us -f.joffrey@gmail.com joffrey@docker.com -fkautz@alumni.cmu.edu fkautz@redhat.com -frank.rosquin@gmail.com frank.rosquin+github@gmail.com -gh@mattyw.net mattyw@me.com -git@julienbordellier.com julienbordellier@gmail.com -github@metaliveblog.com github@developersupport.net -github@srid.name sridharr@activestate.com -guillaume.charmes@dotcloud.com guillaume.charmes@docker.com -guillaume@charmes.net guillaume.charmes@docker.com -guillaume@docker.com guillaume.charmes@docker.com -guillaume@dotcloud.com guillaume.charmes@docker.com -haoshuwei24@gmail.com haosw@cn.ibm.com -hollie.teal@docker.com hollie@docker.com -hollietealok@users.noreply.github.com hollie@docker.com -hsinko@users.noreply.github.com 21551195@zju.edu.cn -iamironbob@gmail.com altsysrq@gmail.com -icecrime@gmail.com arnaud.porterie@docker.com -jatzen@gmail.com jacob@jacobatzen.dk -jeff@allingeek.com jeff.nickoloff@gmail.com -jefferya@programmerq.net jeff@docker.com -jerome.petazzoni@dotcloud.com jerome.petazzoni@dotcloud.com -jfrazelle@users.noreply.github.com jess@docker.com -jhoward@microsoft.com John.Howard@microsoft.com -jlhawn@berkeley.edu josh.hawn@docker.com -joffrey@dotcloud.com joffrey@docker.com -john.howard@microsoft.com John.Howard@microsoft.com -jp@enix.org jerome.petazzoni@dotcloud.com -justin.cormack@unikernel.com justin.cormack@docker.com -justin.simonelis@PTS-JSIMON2.toronto.exclamation.com justin.p.simonelis@gmail.com -justin@specialbusservice.com justin.cormack@docker.com -katsuta_soshi@cyberagent.co.jp soshi.katsuta@gmail.com -kuehnle@online.de git.nivoc@neverbox.com -kwk@users.noreply.github.com konrad.wilhelm.kleine@gmail.com -leijitang@gmail.com leijitang@huawei.com -liubin0329@gmail.com liubin0329@users.noreply.github.com -lk4d4math@gmail.com lk4d4@docker.com -louis@dotcloud.com kalessin@kalessin.fr -lsm5@redhat.com lsm5@fedoraproject.org -lyndaoleary@hotmail.com lyndaoleary29@gmail.com -madhu@socketplane.io madhu@docker.com -martins@noironetworks.com aanm90@gmail.com -mary@docker.com mary.anthony@docker.com -mastahyeti@users.noreply.github.com mastahyeti@gmail.com -maztaim@users.noreply.github.com taim@bosboot.org -me@runcom.ninja antonio.murdaca@gmail.com -mheon@mheonlaptop.redhat.com mheon@redhat.com -michael@crosbymichael.com michael@docker.com -mohitsoni1989@gmail.com mosoni@ebay.com -moxieandmore@gmail.com mary.anthony@docker.com -moyses.furtado@wplex.com.br moysesb@gmail.com -msabramo@gmail.com marc@marc-abramowitz.com -mzdaniel@glidelink.net daniel.mizyrycki@dotcloud.com -nathan.leclaire@gmail.com nathan.leclaire@docker.com -nathanleclaire@gmail.com nathan.leclaire@docker.com -ostezer@users.noreply.github.com ostezer@gmail.com -peter@scraperwiki.com p@pwaller.net -princess@docker.com jess@docker.com -proppy@aminche.com proppy@google.com -qhuang@10.0.2.15 h.huangqiang@huawei.com -resouer@gmail.com resouer@163.com -roberto_hashioka@hotmail.com roberto.hashioka@docker.com -root@vagrant-ubuntu-12.10.vagrantup.com daniel.mizyrycki@dotcloud.com -runcom@linux.com antonio.murdaca@gmail.com -runcom@redhat.com antonio.murdaca@gmail.com -runcom@users.noreply.github.com antonio.murdaca@gmail.com -s@docker.com solomon@docker.com -shawnlandden@gmail.com shawn@churchofgit.com -singh.gurjeet@gmail.com gurjeet@singh.im -sjoerd@byte.nl sjoerd-github@linuxonly.nl -smahajan@redhat.com shishir.mahajan@redhat.com -solomon.hykes@dotcloud.com solomon@docker.com -solomon@dotcloud.com solomon@docker.com -stefanb@us.ibm.com stefanb@linux.vnet.ibm.com -stevvooe@users.noreply.github.com stephen.day@docker.com -superbaloo+registrations.github@superbaloo.net baloo@gandi.net -tangicolin@gmail.com tangicolin@gmail.com -thaJeztah@users.noreply.github.com github@gone.nl -thatcher@dotcloud.com thatcher@docker.com -thatcher@gmx.net thatcher@docker.com -tibor@docker.com teabee89@gmail.com -tiborvass@users.noreply.github.com teabee89@gmail.com -timruffles@googlemail.com oi@truffles.me.uk -tintypemolly@Ohui-MacBook-Pro.local tintypemolly@gmail.com -tj@init.me tejesh.mehta@gmail.com -tristan.carel@gmail.com tristan@cogniteev.com -unclejack@users.noreply.github.com cristian.staretu@gmail.com -unclejacksons@gmail.com cristian.staretu@gmail.com -vbatts@hashbangbash.com vbatts@redhat.com -victor.vieux@dotcloud.com victor.vieux@docker.com -victor@docker.com victor.vieux@docker.com -victor@dotcloud.com victor.vieux@docker.com -victorvieux@gmail.com victor.vieux@docker.com -vieux@docker.com victor.vieux@docker.com -vincent+github@demeester.fr vincent@sbr.pm -vincent@bernat.im bernat@luffy.cx -vojnovski@gmail.com viktor.vojnovski@amadeus.com -whoshuu@gmail.com huu@prismskylabs.com -xiaods@gmail.com dxiao@redhat.com -xlgao@zju.edu.cn xlgao@zju.edu.cn -yestin.sun@polyera.com sunyi0804@gmail.com -yuchangchun1@huawei.com yuchangchun1@huawei.com -zjaffee@us.ibm.com zij@case.edu diff --git a/contrib/gitdm/domain-map b/contrib/gitdm/domain-map deleted file mode 100644 index 1f1849e4f6..0000000000 --- a/contrib/gitdm/domain-map +++ /dev/null @@ -1,39 +0,0 @@ -# -# Docker -# - -docker.com Docker -dotcloud.com Docker - -aluzzardi@gmail.com Docker -cpuguy83@gmail.com Docker -derek@mcgstyle.net Docker -github@gone.nl Docker -kencochrane@gmail.com Docker -mickael.laventure@gmail.com Docker -sam.alba@gmail.com Docker -svendowideit@fosiki.com Docker -svendowideit@home.org.au Docker -tonistiigi@gmail.com Docker - -cristian.staretu@gmail.com Docker < 2015-01-01 -cristian.staretu@gmail.com Cisco - -github@hollensbe.org Docker < 2015-01-01 -github@hollensbe.org Cisco - -david.calavera@gmail.com Docker < 2016-04-01 -david.calavera@gmail.com Netlify - -# -# Others -# - -cisco.com Cisco -google.com Google -ibm.com IBM -huawei.com Huawei -microsoft.com Microsoft - -redhat.com Red Hat -mrunalp@gmail.com Red Hat diff --git a/contrib/gitdm/generate_aliases.sh b/contrib/gitdm/generate_aliases.sh deleted file mode 100755 index dd6a564995..0000000000 --- a/contrib/gitdm/generate_aliases.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# -# This script generates a gitdm compatible email aliases file from a git -# formatted .mailmap file. -# -# Usage: -# $> ./generate_aliases > aliases -# - -cat $1 | \ - grep -v '^#' | \ - sed 's/^[^<]*<\([^>]*\)>/\1/' | \ - grep '<.*>' | sed -e 's/[<>]/ /g' | \ - awk '{if ($3 != "") { print $3" "$1 } else {print $2" "$1}}' | \ - sort | uniq diff --git a/contrib/gitdm/gitdm.config b/contrib/gitdm/gitdm.config deleted file mode 100644 index d9b62b0b43..0000000000 --- a/contrib/gitdm/gitdm.config +++ /dev/null @@ -1,17 +0,0 @@ -# -# EmailAliases lets us cope with developers who use more -# than one address. -# -EmailAliases aliases - -# -# EmailMap does the main work of mapping addresses onto -# employers. -# -EmailMap domain-map - -# -# Use GroupMap to map a file full of addresses to the -# same employer -# -# GroupMap company-Docker Docker diff --git a/contrib/httpserver/Dockerfile b/contrib/httpserver/Dockerfile deleted file mode 100644 index 747dc91bcf..0000000000 --- a/contrib/httpserver/Dockerfile +++ /dev/null @@ -1,4 +0,0 @@ -FROM busybox -EXPOSE 80/tcp -COPY httpserver . -CMD ["./httpserver"] diff --git a/contrib/httpserver/server.go b/contrib/httpserver/server.go deleted file mode 100644 index a75d5abb3d..0000000000 --- a/contrib/httpserver/server.go +++ /dev/null @@ -1,12 +0,0 @@ -package main - -import ( - "log" - "net/http" -) - -func main() { - fs := http.FileServer(http.Dir("/static")) - http.Handle("/", fs) - log.Panic(http.ListenAndServe(":80", nil)) -} diff --git a/contrib/init/openrc/docker.confd b/contrib/init/openrc/docker.confd deleted file mode 100644 index 244403113e..0000000000 --- a/contrib/init/openrc/docker.confd +++ /dev/null @@ -1,13 +0,0 @@ -# /etc/conf.d/docker: config file for /etc/init.d/docker - -# where the docker daemon output gets piped -#DOCKER_LOGFILE="/var/log/docker.log" - -# where docker's pid get stored -#DOCKER_PIDFILE="/run/docker.pid" - -# where the docker daemon itself is run from -#DOCKERD_BINARY="/usr/bin/dockerd" - -# any other random options you want to pass to docker -DOCKER_OPTS="" diff --git a/contrib/init/openrc/docker.initd b/contrib/init/openrc/docker.initd deleted file mode 100644 index f2e1536a96..0000000000 --- a/contrib/init/openrc/docker.initd +++ /dev/null @@ -1,19 +0,0 @@ -#!/sbin/openrc-run -# Copyright 1999-2013 Gentoo Foundation -# Distributed under the terms of the GNU General Public License v2 - -command="${DOCKERD_BINARY:-/usr/bin/dockerd}" -pidfile="${DOCKER_PIDFILE:-/run/${RC_SVCNAME}.pid}" -command_args="-p \"${pidfile}\" ${DOCKER_OPTS}" -DOCKER_LOGFILE="${DOCKER_LOGFILE:-/var/log/${RC_SVCNAME}.log}" -start_stop_daemon_args="--background \ - --stderr \"${DOCKER_LOGFILE}\" --stdout \"${DOCKER_LOGFILE}\"" - -start_pre() { - checkpath -f -m 0644 -o root:docker "$DOCKER_LOGFILE" - - ulimit -n 1048576 - ulimit -u 1048576 - - return 0 -} diff --git a/contrib/init/systemd/REVIEWERS b/contrib/init/systemd/REVIEWERS deleted file mode 100644 index b9ba55b3fb..0000000000 --- a/contrib/init/systemd/REVIEWERS +++ /dev/null @@ -1,3 +0,0 @@ -Lokesh Mandvekar (@lsm5) -Brandon Philips (@philips) -Jessie Frazelle (@jfrazelle) diff --git a/contrib/init/systemd/docker.service b/contrib/init/systemd/docker.service deleted file mode 100644 index c3f3472472..0000000000 --- a/contrib/init/systemd/docker.service +++ /dev/null @@ -1,29 +0,0 @@ -[Unit] -Description=Docker Application Container Engine -Documentation=https://docs.docker.com -After=network.target docker.socket -Requires=docker.socket - -[Service] -Type=notify -# the default is not to use systemd for cgroups because the delegate issues still -# exists and systemd currently does not support the cgroup feature set required -# for containers run by docker -ExecStart=/usr/bin/dockerd -H fd:// -ExecReload=/bin/kill -s HUP $MAINPID -# Having non-zero Limit*s causes performance problems due to accounting overhead -# in the kernel. We recommend using cgroups to do container-local accounting. -LimitNOFILE=infinity -LimitNPROC=infinity -LimitCORE=infinity -# Uncomment TasksMax if your systemd version supports it. -# Only systemd 226 and above support this version. -#TasksMax=infinity -TimeoutStartSec=0 -# set delegate yes so that systemd does not reset the cgroups of docker containers -Delegate=yes -# kill only the docker process, not all processes in the cgroup -KillMode=process - -[Install] -WantedBy=multi-user.target diff --git a/contrib/init/systemd/docker.service.rpm b/contrib/init/systemd/docker.service.rpm deleted file mode 100644 index 08e90da81e..0000000000 --- a/contrib/init/systemd/docker.service.rpm +++ /dev/null @@ -1,28 +0,0 @@ -[Unit] -Description=Docker Application Container Engine -Documentation=https://docs.docker.com -After=network.target - -[Service] -Type=notify -# the default is not to use systemd for cgroups because the delegate issues still -# exists and systemd currently does not support the cgroup feature set required -# for containers run by docker -ExecStart=/usr/bin/dockerd -ExecReload=/bin/kill -s HUP $MAINPID -# Having non-zero Limit*s causes performance problems due to accounting overhead -# in the kernel. We recommend using cgroups to do container-local accounting. -LimitNOFILE=infinity -LimitNPROC=infinity -LimitCORE=infinity -# Uncomment TasksMax if your systemd version supports it. -# Only systemd 226 and above support this version. -#TasksMax=infinity -TimeoutStartSec=0 -# set delegate yes so that systemd does not reset the cgroups of docker containers -Delegate=yes -# kill only the docker process, not all processes in the cgroup -KillMode=process - -[Install] -WantedBy=multi-user.target diff --git a/contrib/init/systemd/docker.socket b/contrib/init/systemd/docker.socket deleted file mode 100644 index 7dd95098e4..0000000000 --- a/contrib/init/systemd/docker.socket +++ /dev/null @@ -1,12 +0,0 @@ -[Unit] -Description=Docker Socket for the API -PartOf=docker.service - -[Socket] -ListenStream=/var/run/docker.sock -SocketMode=0660 -SocketUser=root -SocketGroup=docker - -[Install] -WantedBy=sockets.target diff --git a/contrib/init/sysvinit-debian/docker b/contrib/init/sysvinit-debian/docker deleted file mode 100755 index 30d14f1c42..0000000000 --- a/contrib/init/sysvinit-debian/docker +++ /dev/null @@ -1,149 +0,0 @@ -#!/bin/sh -set -e - -### BEGIN INIT INFO -# Provides: docker -# Required-Start: $syslog $remote_fs -# Required-Stop: $syslog $remote_fs -# Should-Start: cgroupfs-mount cgroup-lite -# Should-Stop: cgroupfs-mount cgroup-lite -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: Create lightweight, portable, self-sufficient containers. -# Description: -# Docker is an open-source project to easily create lightweight, portable, -# self-sufficient containers from any application. The same container that a -# developer builds and tests on a laptop can run at scale, in production, on -# VMs, bare metal, OpenStack clusters, public clouds and more. -### END INIT INFO - -export PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin - -BASE=docker - -# modify these in /etc/default/$BASE (/etc/default/docker) -DOCKERD=/usr/bin/dockerd -# This is the pid file managed by docker itself -DOCKER_PIDFILE=/var/run/$BASE.pid -# This is the pid file created/managed by start-stop-daemon -DOCKER_SSD_PIDFILE=/var/run/$BASE-ssd.pid -DOCKER_LOGFILE=/var/log/$BASE.log -DOCKER_OPTS= -DOCKER_DESC="Docker" - -# Get lsb functions -. /lib/lsb/init-functions - -if [ -f /etc/default/$BASE ]; then - . /etc/default/$BASE -fi - -# Check docker is present -if [ ! -x $DOCKERD ]; then - log_failure_msg "$DOCKERD not present or not executable" - exit 1 -fi - -check_init() { - # see also init_is_upstart in /lib/lsb/init-functions (which isn't available in Ubuntu 12.04, or we'd use it directly) - if [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; then - log_failure_msg "$DOCKER_DESC is managed via upstart, try using service $BASE $1" - exit 1 - fi -} - -fail_unless_root() { - if [ "$(id -u)" != '0' ]; then - log_failure_msg "$DOCKER_DESC must be run as root" - exit 1 - fi -} - -cgroupfs_mount() { - # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount - if grep -v '^#' /etc/fstab | grep -q cgroup \ - || [ ! -e /proc/cgroups ] \ - || [ ! -d /sys/fs/cgroup ]; then - return - fi - if ! mountpoint -q /sys/fs/cgroup; then - mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup - fi - ( - cd /sys/fs/cgroup - for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do - mkdir -p $sys - if ! mountpoint -q $sys; then - if ! mount -n -t cgroup -o $sys cgroup $sys; then - rmdir $sys || true - fi - fi - done - ) -} - -case "$1" in - start) - check_init - - fail_unless_root - - cgroupfs_mount - - touch "$DOCKER_LOGFILE" - chgrp docker "$DOCKER_LOGFILE" - - ulimit -n 1048576 - if [ "$BASH" ]; then - ulimit -u 1048576 - else - ulimit -p 1048576 - fi - - log_begin_msg "Starting $DOCKER_DESC: $BASE" - start-stop-daemon --start --background \ - --no-close \ - --exec "$DOCKERD" \ - --pidfile "$DOCKER_SSD_PIDFILE" \ - --make-pidfile \ - -- \ - -p "$DOCKER_PIDFILE" \ - $DOCKER_OPTS \ - >> "$DOCKER_LOGFILE" 2>&1 - log_end_msg $? - ;; - - stop) - check_init - fail_unless_root - log_begin_msg "Stopping $DOCKER_DESC: $BASE" - start-stop-daemon --stop --pidfile "$DOCKER_SSD_PIDFILE" --retry 10 - log_end_msg $? - ;; - - restart) - check_init - fail_unless_root - docker_pid=`cat "$DOCKER_SSD_PIDFILE" 2>/dev/null` - [ -n "$docker_pid" ] \ - && ps -p $docker_pid > /dev/null 2>&1 \ - && $0 stop - $0 start - ;; - - force-reload) - check_init - fail_unless_root - $0 restart - ;; - - status) - check_init - status_of_proc -p "$DOCKER_SSD_PIDFILE" "$DOCKERD" "$DOCKER_DESC" - ;; - - *) - echo "Usage: service docker {start|stop|restart|status}" - exit 1 - ;; -esac diff --git a/contrib/init/sysvinit-debian/docker.default b/contrib/init/sysvinit-debian/docker.default deleted file mode 100644 index c7dc9a2a7f..0000000000 --- a/contrib/init/sysvinit-debian/docker.default +++ /dev/null @@ -1,20 +0,0 @@ -# Docker Upstart and SysVinit configuration file - -# -# THIS FILE DOES NOT APPLY TO SYSTEMD -# -# Please see the documentation for "systemd drop-ins": -# https://docs.docker.com/engine/articles/systemd/ -# - -# Customize location of Docker binary (especially for development testing). -#DOCKERD="/usr/local/bin/dockerd" - -# Use DOCKER_OPTS to modify the daemon startup options. -#DOCKER_OPTS="--dns 8.8.8.8 --dns 8.8.4.4" - -# If you need Docker to use an HTTP proxy, it can also be specified here. -#export http_proxy="http://127.0.0.1:3128/" - -# This is also a handy place to tweak where Docker's temporary files go. -#export TMPDIR="/mnt/bigdrive/docker-tmp" diff --git a/contrib/init/sysvinit-redhat/docker b/contrib/init/sysvinit-redhat/docker deleted file mode 100755 index a7e2e5a39e..0000000000 --- a/contrib/init/sysvinit-redhat/docker +++ /dev/null @@ -1,153 +0,0 @@ -#!/bin/sh -# -# /etc/rc.d/init.d/docker -# -# Daemon for docker.com -# -# chkconfig: 2345 95 95 -# description: Daemon for docker.com - -### BEGIN INIT INFO -# Provides: docker -# Required-Start: $network cgconfig -# Required-Stop: -# Should-Start: -# Should-Stop: -# Default-Start: 2 3 4 5 -# Default-Stop: 0 1 6 -# Short-Description: start and stop docker -# Description: Daemon for docker.com -### END INIT INFO - -# Source function library. -. /etc/rc.d/init.d/functions - -prog="docker" -unshare=/usr/bin/unshare -exec="/usr/bin/dockerd" -pidfile="/var/run/$prog.pid" -lockfile="/var/lock/subsys/$prog" -logfile="/var/log/$prog" - -[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog - -prestart() { - service cgconfig status > /dev/null - - if [[ $? != 0 ]]; then - service cgconfig start - fi - -} - -start() { - if [ ! -x $exec ]; then - if [ ! -e $exec ]; then - echo "Docker executable $exec not found" - else - echo "You do not have permission to execute the Docker executable $exec" - fi - exit 5 - fi - - check_for_cleanup - - if ! [ -f $pidfile ]; then - prestart - printf "Starting $prog:\t" - echo "\n$(date)\n" >> $logfile - "$unshare" -m -- $exec $other_args >> $logfile 2>&1 & - pid=$! - touch $lockfile - # wait up to 10 seconds for the pidfile to exist. see - # https://github.com/docker/docker/issues/5359 - tries=0 - while [ ! -f $pidfile -a $tries -lt 10 ]; do - sleep 1 - tries=$((tries + 1)) - echo -n '.' - done - if [ ! -f $pidfile ]; then - failure - echo - exit 1 - fi - success - echo - else - failure - echo - printf "$pidfile still exists...\n" - exit 7 - fi -} - -stop() { - echo -n $"Stopping $prog: " - killproc -p $pidfile -d 300 $prog - retval=$? - echo - [ $retval -eq 0 ] && rm -f $lockfile - return $retval -} - -restart() { - stop - start -} - -reload() { - restart -} - -force_reload() { - restart -} - -rh_status() { - status -p $pidfile $prog -} - -rh_status_q() { - rh_status >/dev/null 2>&1 -} - - -check_for_cleanup() { - if [ -f ${pidfile} ]; then - /bin/ps -fp $(cat ${pidfile}) > /dev/null || rm ${pidfile} - fi -} - -case "$1" in - start) - rh_status_q && exit 0 - $1 - ;; - stop) - rh_status_q || exit 0 - $1 - ;; - restart) - $1 - ;; - reload) - rh_status_q || exit 7 - $1 - ;; - force-reload) - force_reload - ;; - status) - rh_status - ;; - condrestart|try-restart) - rh_status_q || exit 0 - restart - ;; - *) - echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" - exit 2 -esac - -exit $? diff --git a/contrib/init/sysvinit-redhat/docker.sysconfig b/contrib/init/sysvinit-redhat/docker.sysconfig deleted file mode 100644 index 0864b3d77f..0000000000 --- a/contrib/init/sysvinit-redhat/docker.sysconfig +++ /dev/null @@ -1,7 +0,0 @@ -# /etc/sysconfig/docker -# -# Other arguments to pass to the docker daemon process -# These will be parsed by the sysv initscript and appended -# to the arguments list passed to docker daemon - -other_args="" diff --git a/contrib/init/upstart/REVIEWERS b/contrib/init/upstart/REVIEWERS deleted file mode 100644 index 03ee2dde3d..0000000000 --- a/contrib/init/upstart/REVIEWERS +++ /dev/null @@ -1,2 +0,0 @@ -Tianon Gravi (@tianon) -Jessie Frazelle (@jfrazelle) diff --git a/contrib/init/upstart/docker.conf b/contrib/init/upstart/docker.conf deleted file mode 100644 index b5ad74a6af..0000000000 --- a/contrib/init/upstart/docker.conf +++ /dev/null @@ -1,68 +0,0 @@ -description "Docker daemon" - -start on (filesystem and net-device-up IFACE!=lo) -stop on runlevel [!2345] -limit nofile 524288 1048576 -limit nproc 524288 1048576 - -respawn - -kill timeout 20 - -pre-start script - # see also https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount - if grep -v '^#' /etc/fstab | grep -q cgroup \ - || [ ! -e /proc/cgroups ] \ - || [ ! -d /sys/fs/cgroup ]; then - exit 0 - fi - if ! mountpoint -q /sys/fs/cgroup; then - mount -t tmpfs -o uid=0,gid=0,mode=0755 cgroup /sys/fs/cgroup - fi - ( - cd /sys/fs/cgroup - for sys in $(awk '!/^#/ { if ($4 == 1) print $1 }' /proc/cgroups); do - mkdir -p $sys - if ! mountpoint -q $sys; then - if ! mount -n -t cgroup -o $sys cgroup $sys; then - rmdir $sys || true - fi - fi - done - ) -end script - -script - # modify these in /etc/default/$UPSTART_JOB (/etc/default/docker) - DOCKERD=/usr/bin/dockerd - DOCKER_OPTS= - if [ -f /etc/default/$UPSTART_JOB ]; then - . /etc/default/$UPSTART_JOB - fi - exec "$DOCKERD" $DOCKER_OPTS --raw-logs -end script - -# Don't emit "started" event until docker.sock is ready. -# See https://github.com/docker/docker/issues/6647 -post-start script - DOCKER_OPTS= - DOCKER_SOCKET= - if [ -f /etc/default/$UPSTART_JOB ]; then - . /etc/default/$UPSTART_JOB - fi - - if ! printf "%s" "$DOCKER_OPTS" | grep -qE -e '-H|--host'; then - DOCKER_SOCKET=/var/run/docker.sock - else - DOCKER_SOCKET=$(printf "%s" "$DOCKER_OPTS" | grep -oP -e '(-H|--host)\W*unix://\K(\S+)') - fi - - if [ -n "$DOCKER_SOCKET" ]; then - while ! [ -e "$DOCKER_SOCKET" ]; do - initctl status $UPSTART_JOB | grep -qE "(stop|respawn)/" && exit 1 - echo "Waiting for $DOCKER_SOCKET" - sleep 0.1 - done - echo "$DOCKER_SOCKET is up" - fi -end script diff --git a/contrib/mkimage-alpine.sh b/contrib/mkimage-alpine.sh deleted file mode 100755 index 47cd35ce62..0000000000 --- a/contrib/mkimage-alpine.sh +++ /dev/null @@ -1,87 +0,0 @@ -#!/bin/sh - -set -e - -[ $(id -u) -eq 0 ] || { - printf >&2 '%s requires root\n' "$0" - exit 1 -} - -usage() { - printf >&2 '%s: [-r release] [-m mirror] [-s] [-c additional repository]\n' "$0" - exit 1 -} - -tmp() { - TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-XXXXXXXXXX) - ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/alpine-docker-rootfs-XXXXXXXXXX) - trap "rm -rf $TMP $ROOTFS" EXIT TERM INT -} - -apkv() { - curl -sSL $MAINREPO/$ARCH/APKINDEX.tar.gz | tar -Oxz | - grep --text '^P:apk-tools-static$' -A1 | tail -n1 | cut -d: -f2 -} - -getapk() { - curl -sSL $MAINREPO/$ARCH/apk-tools-static-$(apkv).apk | - tar -xz -C $TMP sbin/apk.static -} - -mkbase() { - $TMP/sbin/apk.static --repository $MAINREPO --update-cache --allow-untrusted \ - --root $ROOTFS --initdb add alpine-base -} - -conf() { - printf '%s\n' $MAINREPO > $ROOTFS/etc/apk/repositories - printf '%s\n' $ADDITIONALREPO >> $ROOTFS/etc/apk/repositories -} - -pack() { - local id - id=$(tar --numeric-owner -C $ROOTFS -c . | docker import - alpine:$REL) - - docker tag $id alpine:latest - docker run -i -t --rm alpine printf 'alpine:%s with id=%s created!\n' $REL $id -} - -save() { - [ $SAVE -eq 1 ] || return - - tar --numeric-owner -C $ROOTFS -c . | xz > rootfs.tar.xz -} - -while getopts "hr:m:s" opt; do - case $opt in - r) - REL=$OPTARG - ;; - m) - MIRROR=$OPTARG - ;; - s) - SAVE=1 - ;; - c) - ADDITIONALREPO=community - ;; - *) - usage - ;; - esac -done - -REL=${REL:-edge} -MIRROR=${MIRROR:-http://nl.alpinelinux.org/alpine} -SAVE=${SAVE:-0} -MAINREPO=$MIRROR/$REL/main -ADDITIONALREPO=$MIRROR/$REL/community -ARCH=${ARCH:-$(uname -m)} - -tmp -getapk -mkbase -conf -pack -save diff --git a/contrib/mkimage-arch-pacman.conf b/contrib/mkimage-arch-pacman.conf deleted file mode 100644 index 45fe03dc96..0000000000 --- a/contrib/mkimage-arch-pacman.conf +++ /dev/null @@ -1,92 +0,0 @@ -# -# /etc/pacman.conf -# -# See the pacman.conf(5) manpage for option and repository directives - -# -# GENERAL OPTIONS -# -[options] -# The following paths are commented out with their default values listed. -# If you wish to use different paths, uncomment and update the paths. -#RootDir = / -#DBPath = /var/lib/pacman/ -#CacheDir = /var/cache/pacman/pkg/ -#LogFile = /var/log/pacman.log -#GPGDir = /etc/pacman.d/gnupg/ -HoldPkg = pacman glibc -#XferCommand = /usr/bin/curl -C - -f %u > %o -#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u -#CleanMethod = KeepInstalled -#UseDelta = 0.7 -Architecture = auto - -# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup -#IgnorePkg = -#IgnoreGroup = - -#NoUpgrade = -#NoExtract = - -# Misc options -#UseSyslog -#Color -#TotalDownload -# We cannot check disk space from within a chroot environment -#CheckSpace -#VerbosePkgLists - -# By default, pacman accepts packages signed by keys that its local keyring -# trusts (see pacman-key and its man page), as well as unsigned packages. -SigLevel = Required DatabaseOptional -LocalFileSigLevel = Optional -#RemoteFileSigLevel = Required - -# NOTE: You must run `pacman-key --init` before first using pacman; the local -# keyring can then be populated with the keys of all official Arch Linux -# packagers with `pacman-key --populate archlinux`. - -# -# REPOSITORIES -# - can be defined here or included from another file -# - pacman will search repositories in the order defined here -# - local/custom mirrors can be added here or in separate files -# - repositories listed first will take precedence when packages -# have identical names, regardless of version number -# - URLs will have $repo replaced by the name of the current repo -# - URLs will have $arch replaced by the name of the architecture -# -# Repository entries are of the format: -# [repo-name] -# Server = ServerName -# Include = IncludePath -# -# The header [repo-name] is crucial - it must be present and -# uncommented to enable the repo. -# - -# The testing repositories are disabled by default. To enable, uncomment the -# repo name header and Include lines. You can add preferred servers immediately -# after the header, and they will be used before the default mirrors. - -#[testing] -#Include = /etc/pacman.d/mirrorlist - -[core] -Include = /etc/pacman.d/mirrorlist - -[extra] -Include = /etc/pacman.d/mirrorlist - -#[community-testing] -#Include = /etc/pacman.d/mirrorlist - -[community] -Include = /etc/pacman.d/mirrorlist - -# An example of a custom package repository. See the pacman manpage for -# tips on creating your own repositories. -#[custom] -#SigLevel = Optional TrustAll -#Server = file:///home/custompkgs - diff --git a/contrib/mkimage-arch.sh b/contrib/mkimage-arch.sh deleted file mode 100755 index 793b21e3c6..0000000000 --- a/contrib/mkimage-arch.sh +++ /dev/null @@ -1,122 +0,0 @@ -#!/usr/bin/env bash -# Generate a minimal filesystem for archlinux and load it into the local -# docker as "archlinux" -# requires root -set -e - -hash pacstrap &>/dev/null || { - echo "Could not find pacstrap. Run pacman -S arch-install-scripts" - exit 1 -} - -hash expect &>/dev/null || { - echo "Could not find expect. Run pacman -S expect" - exit 1 -} - - -export LANG="C.UTF-8" - -ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-archlinux-XXXXXXXXXX) -chmod 755 $ROOTFS - -# packages to ignore for space savings -PKGIGNORE=( - cryptsetup - device-mapper - dhcpcd - iproute2 - jfsutils - linux - lvm2 - man-db - man-pages - mdadm - nano - netctl - openresolv - pciutils - pcmciautils - reiserfsprogs - s-nail - systemd-sysvcompat - usbutils - vi - xfsprogs -) -IFS=',' -PKGIGNORE="${PKGIGNORE[*]}" -unset IFS - -case "$(uname -m)" in - armv*) - if pacman -Q archlinuxarm-keyring >/dev/null 2>&1; then - pacman-key --init - pacman-key --populate archlinuxarm - else - echo "Could not find archlinuxarm-keyring. Please, install it and run pacman-key --populate archlinuxarm" - exit 1 - fi - PACMAN_CONF='./mkimage-archarm-pacman.conf' - PACMAN_MIRRORLIST='Server = http://mirror.archlinuxarm.org/$arch/$repo' - PACMAN_EXTRA_PKGS='archlinuxarm-keyring' - EXPECT_TIMEOUT=120 - ARCH_KEYRING=archlinuxarm - DOCKER_IMAGE_NAME=archlinuxarm - ;; - *) - PACMAN_CONF='./mkimage-arch-pacman.conf' - PACMAN_MIRRORLIST='Server = https://mirrors.kernel.org/archlinux/$repo/os/$arch' - PACMAN_EXTRA_PKGS='' - EXPECT_TIMEOUT=60 - ARCH_KEYRING=archlinux - DOCKER_IMAGE_NAME=archlinux - ;; -esac - -export PACMAN_MIRRORLIST - -expect < $ROOTFS/etc/locale.gen -arch-chroot $ROOTFS locale-gen -arch-chroot $ROOTFS /bin/sh -c 'echo $PACMAN_MIRRORLIST > /etc/pacman.d/mirrorlist' - -# udev doesn't work in containers, rebuild /dev -DEV=$ROOTFS/dev -rm -rf $DEV -mkdir -p $DEV -mknod -m 666 $DEV/null c 1 3 -mknod -m 666 $DEV/zero c 1 5 -mknod -m 666 $DEV/random c 1 8 -mknod -m 666 $DEV/urandom c 1 9 -mkdir -m 755 $DEV/pts -mkdir -m 1777 $DEV/shm -mknod -m 666 $DEV/tty c 5 0 -mknod -m 600 $DEV/console c 5 1 -mknod -m 666 $DEV/tty0 c 4 0 -mknod -m 666 $DEV/full c 1 7 -mknod -m 600 $DEV/initctl p -mknod -m 666 $DEV/ptmx c 5 2 -ln -sf /proc/self/fd $DEV/fd - -tar --numeric-owner --xattrs --acls -C $ROOTFS -c . | docker import - $DOCKER_IMAGE_NAME -docker run --rm -t $DOCKER_IMAGE_NAME echo Success. -rm -rf $ROOTFS diff --git a/contrib/mkimage-archarm-pacman.conf b/contrib/mkimage-archarm-pacman.conf deleted file mode 100644 index 38b01bf40b..0000000000 --- a/contrib/mkimage-archarm-pacman.conf +++ /dev/null @@ -1,98 +0,0 @@ -# -# /etc/pacman.conf -# -# See the pacman.conf(5) manpage for option and repository directives - -# -# GENERAL OPTIONS -# -[options] -# The following paths are commented out with their default values listed. -# If you wish to use different paths, uncomment and update the paths. -#RootDir = / -#DBPath = /var/lib/pacman/ -#CacheDir = /var/cache/pacman/pkg/ -#LogFile = /var/log/pacman.log -#GPGDir = /etc/pacman.d/gnupg/ -HoldPkg = pacman glibc -#XferCommand = /usr/bin/curl -C - -f %u > %o -#XferCommand = /usr/bin/wget --passive-ftp -c -O %o %u -#CleanMethod = KeepInstalled -#UseDelta = 0.7 -Architecture = armv7h - -# Pacman won't upgrade packages listed in IgnorePkg and members of IgnoreGroup -#IgnorePkg = -#IgnoreGroup = - -#NoUpgrade = -#NoExtract = - -# Misc options -#UseSyslog -#Color -#TotalDownload -# We cannot check disk space from within a chroot environment -#CheckSpace -#VerbosePkgLists - -# By default, pacman accepts packages signed by keys that its local keyring -# trusts (see pacman-key and its man page), as well as unsigned packages. -SigLevel = Required DatabaseOptional -LocalFileSigLevel = Optional -#RemoteFileSigLevel = Required - -# NOTE: You must run `pacman-key --init` before first using pacman; the local -# keyring can then be populated with the keys of all official Arch Linux -# packagers with `pacman-key --populate archlinux`. - -# -# REPOSITORIES -# - can be defined here or included from another file -# - pacman will search repositories in the order defined here -# - local/custom mirrors can be added here or in separate files -# - repositories listed first will take precedence when packages -# have identical names, regardless of version number -# - URLs will have $repo replaced by the name of the current repo -# - URLs will have $arch replaced by the name of the architecture -# -# Repository entries are of the format: -# [repo-name] -# Server = ServerName -# Include = IncludePath -# -# The header [repo-name] is crucial - it must be present and -# uncommented to enable the repo. -# - -# The testing repositories are disabled by default. To enable, uncomment the -# repo name header and Include lines. You can add preferred servers immediately -# after the header, and they will be used before the default mirrors. - -#[testing] -#Include = /etc/pacman.d/mirrorlist - -[core] -Include = /etc/pacman.d/mirrorlist - -[extra] -Include = /etc/pacman.d/mirrorlist - -#[community-testing] -#Include = /etc/pacman.d/mirrorlist - -[community] -Include = /etc/pacman.d/mirrorlist - -[alarm] -Include = /etc/pacman.d/mirrorlist - -[aur] -Include = /etc/pacman.d/mirrorlist - -# An example of a custom package repository. See the pacman manpage for -# tips on creating your own repositories. -#[custom] -#SigLevel = Optional TrustAll -#Server = file:///home/custompkgs - diff --git a/contrib/mkimage-busybox.sh b/contrib/mkimage-busybox.sh deleted file mode 100755 index b11a6bb265..0000000000 --- a/contrib/mkimage-busybox.sh +++ /dev/null @@ -1,43 +0,0 @@ -#!/usr/bin/env bash -# Generate a very minimal filesystem based on busybox-static, -# and load it into the local docker under the name "busybox". - -echo >&2 -echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/busybox-static' -echo >&2 - -BUSYBOX=$(which busybox) -[ "$BUSYBOX" ] || { - echo "Sorry, I could not locate busybox." - echo "Try 'apt-get install busybox-static'?" - exit 1 -} - -set -e -ROOTFS=${TMPDIR:-/var/tmp}/rootfs-busybox-$$-$RANDOM -mkdir $ROOTFS -cd $ROOTFS - -mkdir bin etc dev dev/pts lib proc sys tmp -touch etc/resolv.conf -cp /etc/nsswitch.conf etc/nsswitch.conf -echo root:x:0:0:root:/:/bin/sh > etc/passwd -echo root:x:0: > etc/group -ln -s lib lib64 -ln -s bin sbin -cp $BUSYBOX bin -for X in $(busybox --list) -do - ln -s busybox bin/$X -done -rm bin/init -ln bin/busybox bin/init -cp /lib/x86_64-linux-gnu/lib{pthread,c,dl,nsl,nss_*}.so.* lib -cp /lib/x86_64-linux-gnu/ld-linux-x86-64.so.2 lib -for X in console null ptmx random stdin stdout stderr tty urandom zero -do - cp -a /dev/$X dev -done - -tar --numeric-owner -cf- . | docker import - busybox -docker run -i -u root busybox /bin/echo Success. diff --git a/contrib/mkimage-crux.sh b/contrib/mkimage-crux.sh deleted file mode 100755 index 3f0bdcae3c..0000000000 --- a/contrib/mkimage-crux.sh +++ /dev/null @@ -1,75 +0,0 @@ -#!/usr/bin/env bash -# Generate a minimal filesystem for CRUX/Linux and load it into the local -# docker as "cruxlinux" -# requires root and the crux iso (http://crux.nu) - -set -e - -die () { - echo >&2 "$@" - exit 1 -} - -[ "$#" -eq 1 ] || die "1 argument(s) required, $# provided. Usage: ./mkimage-crux.sh /path/to/iso" - -ISO=${1} - -ROOTFS=$(mktemp -d ${TMPDIR:-/var/tmp}/rootfs-crux-XXXXXXXXXX) -CRUX=$(mktemp -d ${TMPDIR:-/var/tmp}/crux-XXXXXXXXXX) -TMP=$(mktemp -d ${TMPDIR:-/var/tmp}/XXXXXXXXXX) - -VERSION=$(basename --suffix=.iso $ISO | sed 's/[^0-9.]*\([0-9.]*\).*/\1/') - -# Mount the ISO -mount -o ro,loop $ISO $CRUX - -# Extract pkgutils -tar -C $TMP -xf $CRUX/tools/pkgutils#*.pkg.tar.gz - -# Put pkgadd in the $PATH -export PATH="$TMP/usr/bin:$PATH" - -# Install core packages -mkdir -p $ROOTFS/var/lib/pkg -touch $ROOTFS/var/lib/pkg/db -for pkg in $CRUX/crux/core/*; do - pkgadd -r $ROOTFS $pkg -done - -# Remove agetty and inittab config -if (grep agetty ${ROOTFS}/etc/inittab 2>&1 > /dev/null); then - echo "Removing agetty from /etc/inittab ..." - chroot ${ROOTFS} sed -i -e "/agetty/d" /etc/inittab - chroot ${ROOTFS} sed -i -e "/shutdown/d" /etc/inittab - chroot ${ROOTFS} sed -i -e "/^$/N;/^\n$/d" /etc/inittab -fi - -# Remove kernel source -rm -rf $ROOTFS/usr/src/* - -# udev doesn't work in containers, rebuild /dev -DEV=$ROOTFS/dev -rm -rf $DEV -mkdir -p $DEV -mknod -m 666 $DEV/null c 1 3 -mknod -m 666 $DEV/zero c 1 5 -mknod -m 666 $DEV/random c 1 8 -mknod -m 666 $DEV/urandom c 1 9 -mkdir -m 755 $DEV/pts -mkdir -m 1777 $DEV/shm -mknod -m 666 $DEV/tty c 5 0 -mknod -m 600 $DEV/console c 5 1 -mknod -m 666 $DEV/tty0 c 4 0 -mknod -m 666 $DEV/full c 1 7 -mknod -m 600 $DEV/initctl p -mknod -m 666 $DEV/ptmx c 5 2 - -IMAGE_ID=$(tar --numeric-owner -C $ROOTFS -c . | docker import - crux:$VERSION) -docker tag $IMAGE_ID crux:latest -docker run -i -t crux echo Success. - -# Cleanup -umount $CRUX -rm -rf $ROOTFS -rm -rf $CRUX -rm -rf $TMP diff --git a/contrib/mkimage-debootstrap.sh b/contrib/mkimage-debootstrap.sh deleted file mode 100755 index 412a5ce0a7..0000000000 --- a/contrib/mkimage-debootstrap.sh +++ /dev/null @@ -1,297 +0,0 @@ -#!/usr/bin/env bash -set -e - -echo >&2 -echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/debootstrap' -echo >&2 - -variant='minbase' -include='iproute,iputils-ping' -arch='amd64' # intentionally undocumented for now -skipDetection= -strictDebootstrap= -justTar= - -usage() { - echo >&2 - - echo >&2 "usage: $0 [options] repo suite [mirror]" - - echo >&2 - echo >&2 'options: (not recommended)' - echo >&2 " -p set an http_proxy for debootstrap" - echo >&2 " -v $variant # change default debootstrap variant" - echo >&2 " -i $include # change default package includes" - echo >&2 " -d # strict debootstrap (do not apply any docker-specific tweaks)" - echo >&2 " -s # skip version detection and tagging (ie, precise also tagged as 12.04)" - echo >&2 " # note that this will also skip adding universe and/or security/updates to sources.list" - echo >&2 " -t # just create a tarball, especially for dockerbrew (uses repo as tarball name)" - - echo >&2 - echo >&2 " ie: $0 username/debian squeeze" - echo >&2 " $0 username/debian squeeze http://ftp.uk.debian.org/debian/" - - echo >&2 - echo >&2 " ie: $0 username/ubuntu precise" - echo >&2 " $0 username/ubuntu precise http://mirrors.melbourne.co.uk/ubuntu/" - - echo >&2 - echo >&2 " ie: $0 -t precise.tar.bz2 precise" - echo >&2 " $0 -t wheezy.tgz wheezy" - echo >&2 " $0 -t wheezy-uk.tar.xz wheezy http://ftp.uk.debian.org/debian/" - - echo >&2 -} - -# these should match the names found at http://www.debian.org/releases/ -debianStable=wheezy -debianUnstable=sid -# this should match the name found at http://releases.ubuntu.com/ -ubuntuLatestLTS=trusty -# this should match the name found at http://releases.tanglu.org/ -tangluLatest=aequorea - -while getopts v:i:a:p:dst name; do - case "$name" in - p) - http_proxy="$OPTARG" - ;; - v) - variant="$OPTARG" - ;; - i) - include="$OPTARG" - ;; - a) - arch="$OPTARG" - ;; - d) - strictDebootstrap=1 - ;; - s) - skipDetection=1 - ;; - t) - justTar=1 - ;; - ?) - usage - exit 0 - ;; - esac -done -shift $(($OPTIND - 1)) - -repo="$1" -suite="$2" -mirror="${3:-}" # stick to the default debootstrap mirror if one is not provided - -if [ ! "$repo" ] || [ ! "$suite" ]; then - usage - exit 1 -fi - -# some rudimentary detection for whether we need to "sudo" our docker calls -docker='' -if docker version > /dev/null 2>&1; then - docker='docker' -elif sudo docker version > /dev/null 2>&1; then - docker='sudo docker' -elif command -v docker > /dev/null 2>&1; then - docker='docker' -else - echo >&2 "warning: either docker isn't installed, or your current user cannot run it;" - echo >&2 " this script is not likely to work as expected" - sleep 3 - docker='docker' # give us a command-not-found later -fi - -# make sure we have an absolute path to our final tarball so we can still reference it properly after we change directory -if [ "$justTar" ]; then - if [ ! -d "$(dirname "$repo")" ]; then - echo >&2 "error: $(dirname "$repo") does not exist" - exit 1 - fi - repo="$(cd "$(dirname "$repo")" && pwd -P)/$(basename "$repo")" -fi - -# will be filled in later, if [ -z "$skipDetection" ] -lsbDist='' - -target="${TMPDIR:-/var/tmp}/docker-rootfs-debootstrap-$suite-$$-$RANDOM" - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" -returnTo="$(pwd -P)" - -if [ "$suite" = 'lucid' ]; then - # lucid fails and doesn't include gpgv in minbase; "apt-get update" fails - include+=',gpgv' -fi - -set -x - -# bootstrap -mkdir -p "$target" -sudo http_proxy=$http_proxy debootstrap --verbose --variant="$variant" --include="$include" --arch="$arch" "$suite" "$target" "$mirror" - -cd "$target" - -if [ -z "$strictDebootstrap" ]; then - # prevent init scripts from running during install/update - # policy-rc.d (for most scripts) - echo $'#!/bin/sh\nexit 101' | sudo tee usr/sbin/policy-rc.d > /dev/null - sudo chmod +x usr/sbin/policy-rc.d - # initctl (for some pesky upstart scripts) - sudo chroot . dpkg-divert --local --rename --add /sbin/initctl - sudo ln -sf /bin/true sbin/initctl - # see https://github.com/docker/docker/issues/446#issuecomment-16953173 - - # shrink the image, since apt makes us fat (wheezy: ~157.5MB vs ~120MB) - sudo chroot . apt-get clean - - if strings usr/bin/dpkg | grep -q unsafe-io; then - # while we're at it, apt is unnecessarily slow inside containers - # this forces dpkg not to call sync() after package extraction and speeds up install - # the benefit is huge on spinning disks, and the penalty is nonexistent on SSD or decent server virtualization - echo 'force-unsafe-io' | sudo tee etc/dpkg/dpkg.cfg.d/02apt-speedup > /dev/null - # we have this wrapped up in an "if" because the "force-unsafe-io" - # option was added in dpkg 1.15.8.6 - # (see http://bugs.debian.org/cgi-bin/bugreport.cgi?bug=584254#82), - # and ubuntu lucid/10.04 only has 1.15.5.6 - fi - - # we want to effectively run "apt-get clean" after every install to keep images small (see output of "apt-get clean -s" for context) - { - aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' - echo "DPkg::Post-Invoke { ${aptGetClean} };" - echo "APT::Update::Post-Invoke { ${aptGetClean} };" - echo 'Dir::Cache::pkgcache ""; Dir::Cache::srcpkgcache "";' - } | sudo tee etc/apt/apt.conf.d/no-cache > /dev/null - - # and remove the translations, too - echo 'Acquire::Languages "none";' | sudo tee etc/apt/apt.conf.d/no-languages > /dev/null - - # helpful undo lines for each the above tweaks (for lack of a better home to keep track of them): - # rm /usr/sbin/policy-rc.d - # rm /sbin/initctl; dpkg-divert --rename --remove /sbin/initctl - # rm /etc/dpkg/dpkg.cfg.d/02apt-speedup - # rm /etc/apt/apt.conf.d/no-cache - # rm /etc/apt/apt.conf.d/no-languages - - if [ -z "$skipDetection" ]; then - # see also rudimentary platform detection in hack/install.sh - lsbDist='' - if [ -r etc/lsb-release ]; then - lsbDist="$(. etc/lsb-release && echo "$DISTRIB_ID")" - fi - if [ -z "$lsbDist" ] && [ -r etc/debian_version ]; then - lsbDist='Debian' - fi - - case "$lsbDist" in - Debian) - # add the updates and security repositories - if [ "$suite" != "$debianUnstable" -a "$suite" != 'unstable' ]; then - # ${suite}-updates only applies to non-unstable - sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list - - # same for security updates - echo "deb http://security.debian.org/ $suite/updates main" | sudo tee -a etc/apt/sources.list > /dev/null - fi - ;; - Ubuntu) - # add the universe, updates, and security repositories - sudo sed -i " - s/ $suite main$/ $suite main universe/; p; - s/ $suite main/ ${suite}-updates main/; p; - s/ $suite-updates main/ ${suite}-security main/ - " etc/apt/sources.list - ;; - Tanglu) - # add the updates repository - if [ "$suite" = "$tangluLatest" ]; then - # ${suite}-updates only applies to stable Tanglu versions - sudo sed -i "p; s/ $suite main$/ ${suite}-updates main/" etc/apt/sources.list - fi - ;; - SteamOS) - # add contrib and non-free - sudo sed -i "s/ $suite main$/ $suite main contrib non-free/" etc/apt/sources.list - ;; - esac - fi - - # make sure our packages lists are as up to date as we can get them - sudo chroot . apt-get update - sudo chroot . apt-get dist-upgrade -y -fi - -if [ "$justTar" ]; then - # create the tarball file so it has the right permissions (ie, not root) - touch "$repo" - - # fill the tarball - sudo tar --numeric-owner -caf "$repo" . -else - # create the image (and tag $repo:$suite) - sudo tar --numeric-owner -c . | $docker import - $repo:$suite - - # test the image - $docker run -i -t $repo:$suite echo success - - if [ -z "$skipDetection" ]; then - case "$lsbDist" in - Debian) - if [ "$suite" = "$debianStable" -o "$suite" = 'stable' ] && [ -r etc/debian_version ]; then - # tag latest - $docker tag $repo:$suite $repo:latest - - if [ -r etc/debian_version ]; then - # tag the specific debian release version (which is only reasonable to tag on debian stable) - ver=$(cat etc/debian_version) - $docker tag $repo:$suite $repo:$ver - fi - fi - ;; - Ubuntu) - if [ "$suite" = "$ubuntuLatestLTS" ]; then - # tag latest - $docker tag $repo:$suite $repo:latest - fi - if [ -r etc/lsb-release ]; then - lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" - if [ "$lsbRelease" ]; then - # tag specific Ubuntu version number, if available (12.04, etc.) - $docker tag $repo:$suite $repo:$lsbRelease - fi - fi - ;; - Tanglu) - if [ "$suite" = "$tangluLatest" ]; then - # tag latest - $docker tag $repo:$suite $repo:latest - fi - if [ -r etc/lsb-release ]; then - lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" - if [ "$lsbRelease" ]; then - # tag specific Tanglu version number, if available (1.0, 2.0, etc.) - $docker tag $repo:$suite $repo:$lsbRelease - fi - fi - ;; - SteamOS) - if [ -r etc/lsb-release ]; then - lsbRelease="$(. etc/lsb-release && echo "$DISTRIB_RELEASE")" - if [ "$lsbRelease" ]; then - # tag specific SteamOS version number, if available (1.0, 2.0, etc.) - $docker tag $repo:$suite $repo:$lsbRelease - fi - fi - ;; - esac - fi -fi - -# cleanup -cd "$returnTo" -sudo rm -rf "$target" diff --git a/contrib/mkimage-rinse.sh b/contrib/mkimage-rinse.sh deleted file mode 100755 index 7e0935062f..0000000000 --- a/contrib/mkimage-rinse.sh +++ /dev/null @@ -1,123 +0,0 @@ -#!/usr/bin/env bash -# -# Create a base CentOS Docker image. - -# This script is useful on systems with rinse available (e.g., -# building a CentOS image on Debian). See contrib/mkimage-yum.sh for -# a way to build CentOS images on systems with yum installed. - -set -e - -echo >&2 -echo >&2 'warning: this script is deprecated - see mkimage.sh and mkimage/rinse' -echo >&2 - -repo="$1" -distro="$2" -mirror="$3" - -if [ ! "$repo" ] || [ ! "$distro" ]; then - self="$(basename $0)" - echo >&2 "usage: $self repo distro [mirror]" - echo >&2 - echo >&2 " ie: $self username/centos centos-5" - echo >&2 " $self username/centos centos-6" - echo >&2 - echo >&2 " ie: $self username/slc slc-5" - echo >&2 " $self username/slc slc-6" - echo >&2 - echo >&2 " ie: $self username/centos centos-5 http://vault.centos.org/5.8/os/x86_64/CentOS/" - echo >&2 " $self username/centos centos-6 http://vault.centos.org/6.3/os/x86_64/Packages/" - echo >&2 - echo >&2 'See /etc/rinse for supported values of "distro" and for examples of' - echo >&2 ' expected values of "mirror".' - echo >&2 - echo >&2 'This script is tested to work with the original upstream version of rinse,' - echo >&2 ' found at http://www.steve.org.uk/Software/rinse/ and also in Debian at' - echo >&2 ' http://packages.debian.org/wheezy/rinse -- as always, YMMV.' - echo >&2 - exit 1 -fi - -target="${TMPDIR:-/var/tmp}/docker-rootfs-rinse-$distro-$$-$RANDOM" - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" -returnTo="$(pwd -P)" - -rinseArgs=( --arch amd64 --distribution "$distro" --directory "$target" ) -if [ "$mirror" ]; then - rinseArgs+=( --mirror "$mirror" ) -fi - -set -x - -mkdir -p "$target" - -sudo rinse "${rinseArgs[@]}" - -cd "$target" - -# rinse fails a little at setting up /dev, so we'll just wipe it out and create our own -sudo rm -rf dev -sudo mkdir -m 755 dev -( - cd dev - sudo ln -sf /proc/self/fd ./ - sudo mkdir -m 755 pts - sudo mkdir -m 1777 shm - sudo mknod -m 600 console c 5 1 - sudo mknod -m 600 initctl p - sudo mknod -m 666 full c 1 7 - sudo mknod -m 666 null c 1 3 - sudo mknod -m 666 ptmx c 5 2 - sudo mknod -m 666 random c 1 8 - sudo mknod -m 666 tty c 5 0 - sudo mknod -m 666 tty0 c 4 0 - sudo mknod -m 666 urandom c 1 9 - sudo mknod -m 666 zero c 1 5 -) - -# effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" -# locales -sudo rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} -# docs and man pages -sudo rm -rf usr/share/{man,doc,info,gnome/help} -# cracklib -sudo rm -rf usr/share/cracklib -# i18n -sudo rm -rf usr/share/i18n -# yum cache -sudo rm -rf var/cache/yum -sudo mkdir -p --mode=0755 var/cache/yum -# sln -sudo rm -rf sbin/sln -# ldconfig -#sudo rm -rf sbin/ldconfig -sudo rm -rf etc/ld.so.cache var/cache/ldconfig -sudo mkdir -p --mode=0755 var/cache/ldconfig - -# allow networking init scripts inside the container to work without extra steps -echo 'NETWORKING=yes' | sudo tee etc/sysconfig/network > /dev/null - -# to restore locales later: -# yum reinstall glibc-common - -version= -if [ -r etc/redhat-release ]; then - version="$(sed -E 's/^[^0-9.]*([0-9.]+).*$/\1/' etc/redhat-release)" -elif [ -r etc/SuSE-release ]; then - version="$(awk '/^VERSION/ { print $3 }' etc/SuSE-release)" -fi - -if [ -z "$version" ]; then - echo >&2 "warning: cannot autodetect OS version, using $distro as tag" - sleep 20 - version="$distro" -fi - -sudo tar --numeric-owner -c . | docker import - $repo:$version - -docker run -i -t $repo:$version echo success - -cd "$returnTo" -sudo rm -rf "$target" diff --git a/contrib/mkimage-yum.sh b/contrib/mkimage-yum.sh deleted file mode 100755 index 919160c844..0000000000 --- a/contrib/mkimage-yum.sh +++ /dev/null @@ -1,134 +0,0 @@ -#!/usr/bin/env bash -# -# Create a base CentOS Docker image. -# -# This script is useful on systems with yum installed (e.g., building -# a CentOS image on CentOS). See contrib/mkimage-rinse.sh for a way -# to build CentOS images on other systems. - -usage() { - cat < -OPTIONS: - -p "" The list of packages to install in the container. - The default is blank. - -g "" The groups of packages to install in the container. - The default is "Core". - -y The path to the yum config to install packages from. The - default is /etc/yum.conf for Centos/RHEL and /etc/dnf/dnf.conf for Fedora -EOOPTS - exit 1 -} - -# option defaults -yum_config=/etc/yum.conf -if [ -f /etc/dnf/dnf.conf ] && command -v dnf &> /dev/null; then - yum_config=/etc/dnf/dnf.conf - alias yum=dnf -fi -install_groups="Core" -while getopts ":y:p:g:h" opt; do - case $opt in - y) - yum_config=$OPTARG - ;; - h) - usage - ;; - p) - install_packages="$OPTARG" - ;; - g) - install_groups="$OPTARG" - ;; - \?) - echo "Invalid option: -$OPTARG" - usage - ;; - esac -done -shift $((OPTIND - 1)) -name=$1 - -if [[ -z $name ]]; then - usage -fi - -target=$(mktemp -d --tmpdir $(basename $0).XXXXXX) - -set -x - -mkdir -m 755 "$target"/dev -mknod -m 600 "$target"/dev/console c 5 1 -mknod -m 600 "$target"/dev/initctl p -mknod -m 666 "$target"/dev/full c 1 7 -mknod -m 666 "$target"/dev/null c 1 3 -mknod -m 666 "$target"/dev/ptmx c 5 2 -mknod -m 666 "$target"/dev/random c 1 8 -mknod -m 666 "$target"/dev/tty c 5 0 -mknod -m 666 "$target"/dev/tty0 c 4 0 -mknod -m 666 "$target"/dev/urandom c 1 9 -mknod -m 666 "$target"/dev/zero c 1 5 - -# amazon linux yum will fail without vars set -if [ -d /etc/yum/vars ]; then - mkdir -p -m 755 "$target"/etc/yum - cp -a /etc/yum/vars "$target"/etc/yum/ -fi - -if [[ -n "$install_groups" ]]; -then - yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ - --setopt=group_package_types=mandatory -y groupinstall $install_groups -fi - -if [[ -n "$install_packages" ]]; -then - yum -c "$yum_config" --installroot="$target" --releasever=/ --setopt=tsflags=nodocs \ - --setopt=group_package_types=mandatory -y install $install_packages -fi - -yum -c "$yum_config" --installroot="$target" -y clean all - -cat > "$target"/etc/sysconfig/network <&2 "warning: cannot autodetect OS version, using '$name' as tag" - version=$name -fi - -tar --numeric-owner -c -C "$target" . | docker import - $name:$version - -docker run -i -t --rm $name:$version /bin/bash -c 'echo success' - -rm -rf "$target" diff --git a/contrib/mkimage.sh b/contrib/mkimage.sh deleted file mode 100755 index 3976d72d9f..0000000000 --- a/contrib/mkimage.sh +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env bash -set -e - -mkimg="$(basename "$0")" - -usage() { - echo >&2 "usage: $mkimg [-d dir] [-t tag] [--compression algo| --no-compression] script [script-args]" - echo >&2 " ie: $mkimg -t someuser/debian debootstrap --variant=minbase jessie" - echo >&2 " $mkimg -t someuser/ubuntu debootstrap --include=ubuntu-minimal --components=main,universe trusty" - echo >&2 " $mkimg -t someuser/busybox busybox-static" - echo >&2 " $mkimg -t someuser/centos:5 rinse --distribution centos-5" - echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4" - echo >&2 " $mkimg -t someuser/mageia:4 mageia-urpmi --version=4 --mirror=http://somemirror/" - exit 1 -} - -scriptDir="$(dirname "$(readlink -f "$BASH_SOURCE")")/mkimage" - -optTemp=$(getopt --options '+d:t:c:hC' --longoptions 'dir:,tag:,compression:,no-compression,help' --name "$mkimg" -- "$@") -eval set -- "$optTemp" -unset optTemp - -dir= -tag= -compression="auto" -while true; do - case "$1" in - -d|--dir) dir="$2" ; shift 2 ;; - -t|--tag) tag="$2" ; shift 2 ;; - --compression) compression="$2" ; shift 2 ;; - --no-compression) compression="none" ; shift 1 ;; - -h|--help) usage ;; - --) shift ; break ;; - esac -done - -script="$1" -[ "$script" ] || usage -shift - -if [ "$compression" == 'auto' ] || [ -z "$compression" ] -then - compression='xz' -fi - -[ "$compression" == 'none' ] && compression='' - -if [ ! -x "$scriptDir/$script" ]; then - echo >&2 "error: $script does not exist or is not executable" - echo >&2 " see $scriptDir for possible scripts" - exit 1 -fi - -# don't mistake common scripts like .febootstrap-minimize as image-creators -if [[ "$script" == .* ]]; then - echo >&2 "error: $script is a script helper, not a script" - echo >&2 " see $scriptDir for possible scripts" - exit 1 -fi - -delDir= -if [ -z "$dir" ]; then - dir="$(mktemp -d ${TMPDIR:-/var/tmp}/docker-mkimage.XXXXXXXXXX)" - delDir=1 -fi - -rootfsDir="$dir/rootfs" -( set -x; mkdir -p "$rootfsDir" ) - -# pass all remaining arguments to $script -"$scriptDir/$script" "$rootfsDir" "$@" - -# Docker mounts tmpfs at /dev and procfs at /proc so we can remove them -rm -rf "$rootfsDir/dev" "$rootfsDir/proc" -mkdir -p "$rootfsDir/dev" "$rootfsDir/proc" - -# make sure /etc/resolv.conf has something useful in it -mkdir -p "$rootfsDir/etc" -cat > "$rootfsDir/etc/resolv.conf" <<'EOF' -nameserver 8.8.8.8 -nameserver 8.8.4.4 -EOF - -tarFile="$dir/rootfs.tar${compression:+.$compression}" -touch "$tarFile" - -( - set -x - tar --numeric-owner --create --auto-compress --file "$tarFile" --directory "$rootfsDir" --transform='s,^./,,' . -) - -echo >&2 "+ cat > '$dir/Dockerfile'" -cat > "$dir/Dockerfile" <> "$dir/Dockerfile" ) - break - fi -done - -( set -x; rm -rf "$rootfsDir" ) - -if [ "$tag" ]; then - ( set -x; docker build -t "$tag" "$dir" ) -elif [ "$delDir" ]; then - # if we didn't specify a tag and we're going to delete our dir, let's just build an untagged image so that we did _something_ - ( set -x; docker build "$dir" ) -fi - -if [ "$delDir" ]; then - ( set -x; rm -rf "$dir" ) -fi diff --git a/contrib/mkimage/.febootstrap-minimize b/contrib/mkimage/.febootstrap-minimize deleted file mode 100755 index 7749e63fb0..0000000000 --- a/contrib/mkimage/.febootstrap-minimize +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash -set -e - -rootfsDir="$1" -shift - -( - cd "$rootfsDir" - - # effectively: febootstrap-minimize --keep-zoneinfo --keep-rpmdb --keep-services "$target" - # locales - rm -rf usr/{{lib,share}/locale,{lib,lib64}/gconv,bin/localedef,sbin/build-locale-archive} - # docs and man pages - rm -rf usr/share/{man,doc,info,gnome/help} - # cracklib - rm -rf usr/share/cracklib - # i18n - rm -rf usr/share/i18n - # yum cache - rm -rf var/cache/yum - mkdir -p --mode=0755 var/cache/yum - # sln - rm -rf sbin/sln - # ldconfig - #rm -rf sbin/ldconfig - rm -rf etc/ld.so.cache var/cache/ldconfig - mkdir -p --mode=0755 var/cache/ldconfig -) diff --git a/contrib/mkimage/busybox-static b/contrib/mkimage/busybox-static deleted file mode 100755 index e15322b49d..0000000000 --- a/contrib/mkimage/busybox-static +++ /dev/null @@ -1,34 +0,0 @@ -#!/usr/bin/env bash -set -e - -rootfsDir="$1" -shift - -busybox="$(which busybox 2>/dev/null || true)" -if [ -z "$busybox" ]; then - echo >&2 'error: busybox: not found' - echo >&2 ' install it with your distribution "busybox-static" package' - exit 1 -fi -if ! ldd "$busybox" 2>&1 | grep -q 'not a dynamic executable'; then - echo >&2 "error: '$busybox' appears to be a dynamic executable" - echo >&2 ' you should install your distribution "busybox-static" package instead' - exit 1 -fi - -mkdir -p "$rootfsDir/bin" -rm -f "$rootfsDir/bin/busybox" # just in case -cp "$busybox" "$rootfsDir/bin/busybox" - -( - cd "$rootfsDir" - - IFS=$'\n' - modules=( $(bin/busybox --list-modules) ) - unset IFS - - for module in "${modules[@]}"; do - mkdir -p "$(dirname "$module")" - ln -sf /bin/busybox "$module" - done -) diff --git a/contrib/mkimage/debootstrap b/contrib/mkimage/debootstrap deleted file mode 100755 index 7d56d8ea9f..0000000000 --- a/contrib/mkimage/debootstrap +++ /dev/null @@ -1,226 +0,0 @@ -#!/usr/bin/env bash -set -e - -rootfsDir="$1" -shift - -# we have to do a little fancy footwork to make sure "rootfsDir" becomes the second non-option argument to debootstrap - -before=() -while [ $# -gt 0 ] && [[ "$1" == -* ]]; do - before+=( "$1" ) - shift -done - -suite="$1" -shift - -# get path to "chroot" in our current PATH -chrootPath="$(type -P chroot)" -rootfs_chroot() { - # "chroot" doesn't set PATH, so we need to set it explicitly to something our new debootstrap chroot can use appropriately! - - # set PATH and chroot away! - PATH='/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin' \ - "$chrootPath" "$rootfsDir" "$@" -} - -# allow for DEBOOTSTRAP=qemu-debootstrap ./mkimage.sh ... -: ${DEBOOTSTRAP:=debootstrap} - -( - set -x - $DEBOOTSTRAP "${before[@]}" "$suite" "$rootfsDir" "$@" -) - -# now for some Docker-specific tweaks - -# prevent init scripts from running during install/update -echo >&2 "+ echo exit 101 > '$rootfsDir/usr/sbin/policy-rc.d'" -cat > "$rootfsDir/usr/sbin/policy-rc.d" <<-'EOF' - #!/bin/sh - - # For most Docker users, "apt-get install" only happens during "docker build", - # where starting services doesn't work and often fails in humorous ways. This - # prevents those failures by stopping the services from attempting to start. - - exit 101 -EOF -chmod +x "$rootfsDir/usr/sbin/policy-rc.d" - -# prevent upstart scripts from running during install/update -( - set -x - rootfs_chroot dpkg-divert --local --rename --add /sbin/initctl - cp -a "$rootfsDir/usr/sbin/policy-rc.d" "$rootfsDir/sbin/initctl" - sed -i 's/^exit.*/exit 0/' "$rootfsDir/sbin/initctl" -) - -# shrink a little, since apt makes us cache-fat (wheezy: ~157.5MB vs ~120MB) -( set -x; rootfs_chroot apt-get clean ) - -# this file is one APT creates to make sure we don't "autoremove" our currently -# in-use kernel, which doesn't really apply to debootstraps/Docker images that -# don't even have kernels installed -rm -f "$rootfsDir/etc/apt/apt.conf.d/01autoremove-kernels" - -# Ubuntu 10.04 sucks... :) -if strings "$rootfsDir/usr/bin/dpkg" | grep -q unsafe-io; then - # force dpkg not to call sync() after package extraction (speeding up installs) - echo >&2 "+ echo force-unsafe-io > '$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup'" - cat > "$rootfsDir/etc/dpkg/dpkg.cfg.d/docker-apt-speedup" <<-'EOF' - # For most Docker users, package installs happen during "docker build", which - # doesn't survive power loss and gets restarted clean afterwards anyhow, so - # this minor tweak gives us a nice speedup (much nicer on spinning disks, - # obviously). - - force-unsafe-io - EOF -fi - -if [ -d "$rootfsDir/etc/apt/apt.conf.d" ]; then - # _keep_ us lean by effectively running "apt-get clean" after every install - aptGetClean='"rm -f /var/cache/apt/archives/*.deb /var/cache/apt/archives/partial/*.deb /var/cache/apt/*.bin || true";' - echo >&2 "+ cat > '$rootfsDir/etc/apt/apt.conf.d/docker-clean'" - cat > "$rootfsDir/etc/apt/apt.conf.d/docker-clean" <<-EOF - # Since for most Docker users, package installs happen in "docker build" steps, - # they essentially become individual layers due to the way Docker handles - # layering, especially using CoW filesystems. What this means for us is that - # the caches that APT keeps end up just wasting space in those layers, making - # our layers unnecessarily large (especially since we'll normally never use - # these caches again and will instead just "docker build" again and make a brand - # new image). - - # Ideally, these would just be invoking "apt-get clean", but in our testing, - # that ended up being cyclic and we got stuck on APT's lock, so we get this fun - # creation that's essentially just "apt-get clean". - DPkg::Post-Invoke { ${aptGetClean} }; - APT::Update::Post-Invoke { ${aptGetClean} }; - - Dir::Cache::pkgcache ""; - Dir::Cache::srcpkgcache ""; - - # Note that we do realize this isn't the ideal way to do this, and are always - # open to better suggestions (https://github.com/docker/docker/issues). - EOF - - # remove apt-cache translations for fast "apt-get update" - echo >&2 "+ echo Acquire::Languages 'none' > '$rootfsDir/etc/apt/apt.conf.d/docker-no-languages'" - cat > "$rootfsDir/etc/apt/apt.conf.d/docker-no-languages" <<-'EOF' - # In Docker, we don't often need the "Translations" files, so we're just wasting - # time and space by downloading them, and this inhibits that. For users that do - # need them, it's a simple matter to delete this file and "apt-get update". :) - - Acquire::Languages "none"; - EOF - - echo >&2 "+ echo Acquire::GzipIndexes 'true' > '$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes'" - cat > "$rootfsDir/etc/apt/apt.conf.d/docker-gzip-indexes" <<-'EOF' - # Since Docker users using "RUN apt-get update && apt-get install -y ..." in - # their Dockerfiles don't go delete the lists files afterwards, we want them to - # be as small as possible on-disk, so we explicitly request "gz" versions and - # tell Apt to keep them gzipped on-disk. - - # For comparison, an "apt-get update" layer without this on a pristine - # "debian:wheezy" base image was "29.88 MB", where with this it was only - # "8.273 MB". - - Acquire::GzipIndexes "true"; - Acquire::CompressionTypes::Order:: "gz"; - EOF - - # update "autoremove" configuration to be aggressive about removing suggests deps that weren't manually installed - echo >&2 "+ echo Apt::AutoRemove::SuggestsImportant 'false' > '$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests'" - cat > "$rootfsDir/etc/apt/apt.conf.d/docker-autoremove-suggests" <<-'EOF' - # Since Docker users are looking for the smallest possible final images, the - # following emerges as a very common pattern: - - # RUN apt-get update \ - # && apt-get install -y \ - # && \ - # && apt-get purge -y --auto-remove - - # By default, APT will actually _keep_ packages installed via Recommends or - # Depends if another package Suggests them, even and including if the package - # that originally caused them to be installed is removed. Setting this to - # "false" ensures that APT is appropriately aggressive about removing the - # packages it added. - - # https://aptitude.alioth.debian.org/doc/en/ch02s05s05.html#configApt-AutoRemove-SuggestsImportant - Apt::AutoRemove::SuggestsImportant "false"; - EOF -fi - -if [ -z "$DONT_TOUCH_SOURCES_LIST" ]; then - # tweak sources.list, where appropriate - lsbDist= - if [ -z "$lsbDist" -a -r "$rootfsDir/etc/os-release" ]; then - lsbDist="$(. "$rootfsDir/etc/os-release" && echo "$ID")" - fi - if [ -z "$lsbDist" -a -r "$rootfsDir/etc/lsb-release" ]; then - lsbDist="$(. "$rootfsDir/etc/lsb-release" && echo "$DISTRIB_ID")" - fi - if [ -z "$lsbDist" -a -r "$rootfsDir/etc/debian_version" ]; then - lsbDist='Debian' - fi - # normalize to lowercase for easier matching - lsbDist="$(echo "$lsbDist" | tr '[:upper:]' '[:lower:]')" - case "$lsbDist" in - debian) - # updates and security! - if [ "$suite" != 'sid' -a "$suite" != 'unstable' ]; then - ( - set -x - sed -i " - p; - s/ $suite / ${suite}-updates / - " "$rootfsDir/etc/apt/sources.list" - echo "deb http://security.debian.org $suite/updates main" >> "$rootfsDir/etc/apt/sources.list" - ) - fi - ;; - ubuntu) - # add the updates and security repositories - ( - set -x - sed -i " - p; - s/ $suite / ${suite}-updates /; p; - s/ $suite-updates / ${suite}-security / - " "$rootfsDir/etc/apt/sources.list" - ) - ;; - tanglu) - # add the updates repository - if [ "$suite" != 'devel' ]; then - ( - set -x - sed -i " - p; - s/ $suite / ${suite}-updates / - " "$rootfsDir/etc/apt/sources.list" - ) - fi - ;; - steamos) - # add contrib and non-free if "main" is the only component - ( - set -x - sed -i "s/ $suite main$/ $suite main contrib non-free/" "$rootfsDir/etc/apt/sources.list" - ) - ;; - esac -fi - -( - set -x - - # make sure we're fully up-to-date - rootfs_chroot sh -xc 'apt-get update && apt-get dist-upgrade -y' - - # delete all the apt list files since they're big and get stale quickly - rm -rf "$rootfsDir/var/lib/apt/lists"/* - # this forces "apt-get update" in dependent images, which is also good - - mkdir "$rootfsDir/var/lib/apt/lists/partial" # Lucid... "E: Lists directory /var/lib/apt/lists/partial is missing." -) diff --git a/contrib/mkimage/mageia-urpmi b/contrib/mkimage/mageia-urpmi deleted file mode 100755 index 93fb289cac..0000000000 --- a/contrib/mkimage/mageia-urpmi +++ /dev/null @@ -1,61 +0,0 @@ -#!/usr/bin/env bash -# -# Needs to be run from Mageia 4 or greater for kernel support for docker. -# -# Mageia 4 does not have docker available in official repos, so please -# install and run the docker binary manually. -# -# Tested working versions are for Mageia 2 onwards (inc. cauldron). -# -set -e - -rootfsDir="$1" -shift - -optTemp=$(getopt --options '+v:,m:' --longoptions 'version:,mirror:' --name mageia-urpmi -- "$@") -eval set -- "$optTemp" -unset optTemp - -installversion= -mirror= -while true; do - case "$1" in - -v|--version) installversion="$2" ; shift 2 ;; - -m|--mirror) mirror="$2" ; shift 2 ;; - --) shift ; break ;; - esac -done - -if [ -z $installversion ]; then - # Attempt to match host version - if [ -r /etc/mageia-release ]; then - installversion="$(sed 's/^[^0-9\]*\([0-9.]\+\).*$/\1/' /etc/mageia-release)" - else - echo "Error: no version supplied and unable to detect host mageia version" - exit 1 - fi -fi - -if [ -z $mirror ]; then - # No mirror provided, default to mirrorlist - mirror="--mirrorlist https://mirrors.mageia.org/api/mageia.$installversion.x86_64.list" -fi - -( - set -x - urpmi.addmedia --distrib \ - $mirror \ - --urpmi-root "$rootfsDir" - urpmi basesystem-minimal urpmi \ - --auto \ - --no-suggests \ - --urpmi-root "$rootfsDir" \ - --root "$rootfsDir" -) - -"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" - -if [ -d "$rootfsDir/etc/sysconfig" ]; then - # allow networking init scripts inside the container to work without extra steps - echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" -fi diff --git a/contrib/mkimage/rinse b/contrib/mkimage/rinse deleted file mode 100755 index 75eb4f0d9d..0000000000 --- a/contrib/mkimage/rinse +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env bash -set -e - -rootfsDir="$1" -shift - -# specifying --arch below is safe because "$@" can override it and the "latest" one wins :) - -( - set -x - rinse --directory "$rootfsDir" --arch amd64 "$@" -) - -"$(dirname "$BASH_SOURCE")/.febootstrap-minimize" "$rootfsDir" - -if [ -d "$rootfsDir/etc/sysconfig" ]; then - # allow networking init scripts inside the container to work without extra steps - echo 'NETWORKING=yes' > "$rootfsDir/etc/sysconfig/network" -fi - -# make sure we're fully up-to-date, too -( - set -x - chroot "$rootfsDir" yum update -y -) diff --git a/contrib/nnp-test/Dockerfile b/contrib/nnp-test/Dockerfile deleted file mode 100644 index 026d86954f..0000000000 --- a/contrib/nnp-test/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM buildpack-deps:jessie - -COPY . /usr/src/ - -WORKDIR /usr/src/ - -RUN gcc -g -Wall -static nnp-test.c -o /usr/bin/nnp-test - -RUN chmod +s /usr/bin/nnp-test diff --git a/contrib/nnp-test/nnp-test.c b/contrib/nnp-test/nnp-test.c deleted file mode 100644 index b767da7e1a..0000000000 --- a/contrib/nnp-test/nnp-test.c +++ /dev/null @@ -1,10 +0,0 @@ -#include -#include -#include - -int main(int argc, char *argv[]) -{ - printf("EUID=%d\n", geteuid()); - return 0; -} - diff --git a/contrib/nuke-graph-directory.sh b/contrib/nuke-graph-directory.sh deleted file mode 100755 index 99b527de15..0000000000 --- a/contrib/nuke-graph-directory.sh +++ /dev/null @@ -1,65 +0,0 @@ -#!/bin/sh -set -e - -dir="$1" - -if [ -z "$dir" ]; then - { - echo 'This script is for destroying old /var/lib/docker directories more safely than' - echo ' "rm -rf", which can cause data loss or other serious issues.' - echo - echo "usage: $0 directory" - echo " ie: $0 /var/lib/docker" - } >&2 - exit 1 -fi - -if [ "$(id -u)" != 0 ]; then - echo >&2 "error: $0 must be run as root" - exit 1 -fi - -if [ ! -d "$dir" ]; then - echo >&2 "error: $dir is not a directory" - exit 1 -fi - -dir="$(readlink -f "$dir")" - -echo -echo "Nuking $dir ..." -echo ' (if this is wrong, press Ctrl+C NOW!)' -echo - -( set -x; sleep 10 ) -echo - -dir_in_dir() { - inner="$1" - outer="$2" - [ "${inner#$outer}" != "$inner" ] -} - -# let's start by unmounting any submounts in $dir -# (like -v /home:... for example - DON'T DELETE MY HOME DIRECTORY BRU!) -for mount in $(awk '{ print $5 }' /proc/self/mountinfo); do - mount="$(readlink -f "$mount" || true)" - if dir_in_dir "$mount" "$dir"; then - ( set -x; umount -f "$mount" ) - fi -done - -# now, let's go destroy individual btrfs subvolumes, if any exist -if command -v btrfs > /dev/null 2>&1; then - root="$(df "$dir" | awk 'NR>1 { print $NF }')" - root="${root%/}" # if root is "/", we want it to become "" - for subvol in $(btrfs subvolume list -o "$root/" 2>/dev/null | awk -F' path ' '{ print $2 }' | sort -r); do - subvolDir="$root/$subvol" - if dir_in_dir "$subvolDir" "$dir"; then - ( set -x; btrfs subvolume delete "$subvolDir" ) - fi - done -fi - -# finally, DESTROY ALL THINGS -( set -x; rm -rf "$dir" ) diff --git a/contrib/project-stats.sh b/contrib/project-stats.sh deleted file mode 100755 index 2691c72ffb..0000000000 --- a/contrib/project-stats.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash - -## Run this script from the root of the docker repository -## to query project stats useful to the maintainers. -## You will need to install `pulls` and `issues` from -## https://github.com/crosbymichael/pulls - -set -e - -echo -n "Open pulls: " -PULLS=$(pulls | wc -l); let PULLS=$PULLS-1 -echo $PULLS - -echo -n "Pulls alru: " -pulls alru - -echo -n "Open issues: " -ISSUES=$(issues list | wc -l); let ISSUES=$ISSUES-1 -echo $ISSUES - -echo -n "Issues alru: " -issues alru diff --git a/contrib/report-issue.sh b/contrib/report-issue.sh deleted file mode 100755 index cb54f1a5bc..0000000000 --- a/contrib/report-issue.sh +++ /dev/null @@ -1,105 +0,0 @@ -#!/bin/sh - -# This is a convenience script for reporting issues that include a base -# template of information. See https://github.com/docker/docker/pull/8845 - -set -e - -DOCKER_ISSUE_URL=${DOCKER_ISSUE_URL:-"https://github.com/docker/docker/issues/new"} -DOCKER_ISSUE_NAME_PREFIX=${DOCKER_ISSUE_NAME_PREFIX:-"Report: "} -DOCKER=${DOCKER:-"docker"} -DOCKER_COMMAND="${DOCKER}" -export DOCKER_COMMAND - -# pulled from https://gist.github.com/cdown/1163649 -function urlencode() { - # urlencode - - local length="${#1}" - for (( i = 0; i < length; i++ )); do - local c="${1:i:1}" - case $c in - [a-zA-Z0-9.~_-]) printf "$c" ;; - *) printf '%%%02X' "'$c" - esac - done -} - -function template() { -# this should always match the template from CONTRIBUTING.md - cat <<- EOM - Description of problem: - - - \`docker version\`: - `${DOCKER_COMMAND} -D version` - - - \`docker info\`: - `${DOCKER_COMMAND} -D info` - - - \`uname -a\`: - `uname -a` - - - Environment details (AWS, VirtualBox, physical, etc.): - - - How reproducible: - - - Steps to Reproduce: - 1. - 2. - 3. - - - Actual Results: - - - Expected Results: - - - Additional info: - - - EOM -} - -function format_issue_url() { - if [ ${#@} -ne 2 ] ; then - return 1 - fi - local issue_name=$(urlencode "${DOCKER_ISSUE_NAME_PREFIX}${1}") - local issue_body=$(urlencode "${2}") - echo "${DOCKER_ISSUE_URL}?title=${issue_name}&body=${issue_body}" -} - - -echo -ne "Do you use \`sudo\` to call docker? [y|N]: " -read -r -n 1 use_sudo -echo "" - -if [ "x${use_sudo}" = "xy" -o "x${use_sudo}" = "xY" ]; then - export DOCKER_COMMAND="sudo ${DOCKER}" -fi - -echo -ne "Title of new issue?: " -read -r issue_title -echo "" - -issue_url=$(format_issue_url "${issue_title}" "$(template)") - -if which xdg-open 2>/dev/null >/dev/null ; then - echo -ne "Would like to launch this report in your browser? [Y|n]: " - read -r -n 1 launch_now - echo "" - - if [ "${launch_now}" != "n" -a "${launch_now}" != "N" ]; then - xdg-open "${issue_url}" - fi -fi - -echo "If you would like to manually open the url, you can open this link if your browser: ${issue_url}" - diff --git a/contrib/reprepro/suites.sh b/contrib/reprepro/suites.sh deleted file mode 100755 index 9ecf99d465..0000000000 --- a/contrib/reprepro/suites.sh +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -cd "$(dirname "$BASH_SOURCE")/../.." - -targets_from() { - git fetch -q https://github.com/docker/docker.git "$1" - git ls-tree -r --name-only "$(git rev-parse FETCH_HEAD)" contrib/builder/deb/ | grep '/Dockerfile$' | sed -r 's!^contrib/builder/deb/|^contrib/builder/deb/amd64/|-debootstrap|/Dockerfile$!!g' | grep -v / -} - -release_branch=$(git ls-remote --heads https://github.com/docker/docker.git | awk -F 'refs/heads/' '$2 ~ /^release/ { print $2 }' | sort -V | tail -1) -{ targets_from master; targets_from "$release_branch"; } | sort -u diff --git a/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE b/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE deleted file mode 100644 index d511905c16..0000000000 --- a/contrib/selinux-fedora-24/docker-engine-selinux/LICENSE +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/contrib/selinux-fedora-24/docker-engine-selinux/Makefile b/contrib/selinux-fedora-24/docker-engine-selinux/Makefile deleted file mode 100644 index 16df33ef32..0000000000 --- a/contrib/selinux-fedora-24/docker-engine-selinux/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -TARGETS?=docker -MODULES?=${TARGETS:=.pp.bz2} -SHAREDIR?=/usr/share - -all: ${TARGETS:=.pp.bz2} - -%.pp.bz2: %.pp - @echo Compressing $^ -\> $@ - bzip2 -9 $^ - -%.pp: %.te - make -f ${SHAREDIR}/selinux/devel/Makefile $@ - -clean: - rm -f *~ *.tc *.pp *.pp.bz2 - rm -rf tmp *.tar.gz - -man: install - sepolicy manpage --domain ${TARGETS}_t - -install: - semodule -i ${TARGETS} - diff --git a/contrib/selinux-fedora-24/docker-engine-selinux/README.md b/contrib/selinux-fedora-24/docker-engine-selinux/README.md deleted file mode 100644 index 7ea3117a89..0000000000 --- a/contrib/selinux-fedora-24/docker-engine-selinux/README.md +++ /dev/null @@ -1 +0,0 @@ -SELinux policy for docker diff --git a/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc b/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc deleted file mode 100644 index d6cb0e5792..0000000000 --- a/contrib/selinux-fedora-24/docker-engine-selinux/docker.fc +++ /dev/null @@ -1,29 +0,0 @@ -/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) - -/usr/bin/docker -- gen_context(system_u:object_r:docker_exec_t,s0) -/usr/bin/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) -/usr/lib/docker/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) - -/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) -/usr/lib/systemd/system/docker-novolume-plugin.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) - -/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) - -/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) -/var/lib/kublet(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) -/var/lib/docker/vfs(/.*)? gen_context(system_u:object_r:svirt_sandbox_file_t,s0) - -/var/run/docker(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker/plugins(/.*)? gen_context(system_u:object_r:docker_plugin_var_run_t,s0) - -/var/lock/lxc(/.*)? gen_context(system_u:object_r:docker_lock_t,s0) - -/var/log/lxc(/.*)? gen_context(system_u:object_r:docker_log_t,s0) - -/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) diff --git a/contrib/selinux-fedora-24/docker-engine-selinux/docker.if b/contrib/selinux-fedora-24/docker-engine-selinux/docker.if deleted file mode 100644 index e087e8b98b..0000000000 --- a/contrib/selinux-fedora-24/docker-engine-selinux/docker.if +++ /dev/null @@ -1,523 +0,0 @@ - -## The open-source application container engine. - -######################################## -## -## Execute docker in the docker domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_domtrans',` - gen_require(` - type docker_t, docker_exec_t; - ') - - corecmd_search_bin($1) - domtrans_pattern($1, docker_exec_t, docker_t) -') - -######################################## -## -## Execute docker in the caller domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_exec',` - gen_require(` - type docker_exec_t; - ') - - corecmd_search_bin($1) - can_exec($1, docker_exec_t) -') - -######################################## -## -## Search docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_search_lib',` - gen_require(` - type docker_var_lib_t; - ') - - allow $1 docker_var_lib_t:dir search_dir_perms; - files_search_var_lib($1) -') - -######################################## -## -## Execute docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_exec_lib',` - gen_require(` - type docker_var_lib_t; - ') - - allow $1 docker_var_lib_t:dir search_dir_perms; - can_exec($1, docker_var_lib_t) -') - -######################################## -## -## Read docker lib files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_lib_files',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Read docker share files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_share_files',` - gen_require(` - type docker_share_t; - ') - - files_search_var_lib($1) - list_dirs_pattern($1, docker_share_t, docker_share_t) - read_files_pattern($1, docker_share_t, docker_share_t) - read_lnk_files_pattern($1, docker_share_t, docker_share_t) -') - -###################################### -## -## Allow the specified domain to execute apache -## in the caller domain. -## -## -## -## Domain allowed access. -## -## -# -interface(`apache_exec',` - gen_require(` - type httpd_exec_t; - ') - - can_exec($1, httpd_exec_t) -') - -###################################### -## -## Allow the specified domain to execute docker shared files -## in the caller domain. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_exec_share_files',` - gen_require(` - type docker_share_t; - ') - - can_exec($1, docker_share_t) -') - -######################################## -## -## Manage docker lib files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_manage_lib_files',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) - manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Manage docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_manage_lib_dirs',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Create objects in a docker var lib directory -## with an automatic type transition to -## a specified private type. -## -## -## -## Domain allowed access. -## -## -## -## -## The type of the object to create. -## -## -## -## -## The class of the object to be created. -## -## -## -## -## The name of the object being created. -## -## -# -interface(`docker_lib_filetrans',` - gen_require(` - type docker_var_lib_t; - ') - - filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) -') - -######################################## -## -## Read docker PID files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_pid_files',` - gen_require(` - type docker_var_run_t; - ') - - files_search_pids($1) - read_files_pattern($1, docker_var_run_t, docker_var_run_t) -') - -######################################## -## -## Execute docker server in the docker domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_systemctl',` - gen_require(` - type docker_t; - type docker_unit_file_t; - ') - - systemd_exec_systemctl($1) - init_reload_services($1) - systemd_read_fifo_file_passwd_run($1) - allow $1 docker_unit_file_t:file read_file_perms; - allow $1 docker_unit_file_t:service manage_service_perms; - - ps_process_pattern($1, docker_t) -') - -######################################## -## -## Read and write docker shared memory. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_rw_sem',` - gen_require(` - type docker_t; - ') - - allow $1 docker_t:sem rw_sem_perms; -') - -####################################### -## -## Read and write the docker pty type. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_use_ptys',` - gen_require(` - type docker_devpts_t; - ') - - allow $1 docker_devpts_t:chr_file rw_term_perms; -') - -####################################### -## -## Allow domain to create docker content -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_filetrans_named_content',` - - gen_require(` - type docker_var_lib_t; - type docker_share_t; - type docker_log_t; - type docker_var_run_t; - type docker_home_t; - ') - - files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") - files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") - files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") - logging_log_filetrans($1, docker_log_t, dir, "lxc") - files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") - userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") -') - -######################################## -## -## Connect to docker over a unix stream socket. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_stream_connect',` - gen_require(` - type docker_t, docker_var_run_t; - ') - - files_search_pids($1) - stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) -') - -######################################## -## -## Connect to SPC containers over a unix stream socket. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_spc_stream_connect',` - gen_require(` - type spc_t, spc_var_run_t; - ') - - files_search_pids($1) - files_write_all_pid_sockets($1) - allow $1 spc_t:unix_stream_socket connectto; -') - -######################################## -## -## All of the rules required to administrate -## an docker environment -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_admin',` - gen_require(` - type docker_t; - type docker_var_lib_t, docker_var_run_t; - type docker_unit_file_t; - type docker_lock_t; - type docker_log_t; - type docker_config_t; - ') - - allow $1 docker_t:process { ptrace signal_perms }; - ps_process_pattern($1, docker_t) - - admin_pattern($1, docker_config_t) - - files_search_var_lib($1) - admin_pattern($1, docker_var_lib_t) - - files_search_pids($1) - admin_pattern($1, docker_var_run_t) - - files_search_locks($1) - admin_pattern($1, docker_lock_t) - - logging_search_logs($1) - admin_pattern($1, docker_log_t) - - docker_systemctl($1) - admin_pattern($1, docker_unit_file_t) - allow $1 docker_unit_file_t:service all_service_perms; - - optional_policy(` - systemd_passwd_agent_exec($1) - systemd_read_fifo_file_passwd_run($1) - ') -') - -######################################## -## -## Execute docker_auth_exec_t in the docker_auth domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_auth_domtrans',` - gen_require(` - type docker_auth_t, docker_auth_exec_t; - ') - - corecmd_search_bin($1) - domtrans_pattern($1, docker_auth_exec_t, docker_auth_t) -') - -###################################### -## -## Execute docker_auth in the caller domain. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_auth_exec',` - gen_require(` - type docker_auth_exec_t; - ') - - corecmd_search_bin($1) - can_exec($1, docker_auth_exec_t) -') - -######################################## -## -## Connect to docker_auth over a unix stream socket. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_auth_stream_connect',` - gen_require(` - type docker_auth_t, docker_plugin_var_run_t; - ') - - files_search_pids($1) - stream_connect_pattern($1, docker_plugin_var_run_t, docker_plugin_var_run_t, docker_auth_t) -') - -######################################## -## -## docker domain typebounds calling domain. -## -## -## -## Domain to be typebound. -## -## -# -interface(`docker_typebounds',` - gen_require(` - type docker_t; - ') - - typebounds docker_t $1; -') - -######################################## -## -## Allow any docker_exec_t to be an entrypoint of this domain -## -## -## -## Domain allowed access. -## -## -## -# -interface(`docker_entrypoint',` - gen_require(` - type docker_exec_t; - ') - allow $1 docker_exec_t:file entrypoint; -') diff --git a/contrib/selinux-fedora-24/docker-engine-selinux/docker.te b/contrib/selinux-fedora-24/docker-engine-selinux/docker.te deleted file mode 100644 index 4231688382..0000000000 --- a/contrib/selinux-fedora-24/docker-engine-selinux/docker.te +++ /dev/null @@ -1,399 +0,0 @@ -policy_module(docker, 1.0.0) - -######################################## -# -# Declarations -# - -## -##

-## Determine whether docker can -## connect to all TCP ports. -##

-##
-gen_tunable(docker_connect_any, false) - -type docker_t; -type docker_exec_t; -init_daemon_domain(docker_t, docker_exec_t) -domain_subj_id_change_exemption(docker_t) -domain_role_change_exemption(docker_t) - -type spc_t; -domain_type(spc_t) -role system_r types spc_t; - -type docker_auth_t; -type docker_auth_exec_t; -init_daemon_domain(docker_auth_t, docker_auth_exec_t) - -type spc_var_run_t; -files_pid_file(spc_var_run_t) - -type docker_var_lib_t; -files_type(docker_var_lib_t) - -type docker_home_t; -userdom_user_home_content(docker_home_t) - -type docker_config_t; -files_config_file(docker_config_t) - -type docker_lock_t; -files_lock_file(docker_lock_t) - -type docker_log_t; -logging_log_file(docker_log_t) - -type docker_tmp_t; -files_tmp_file(docker_tmp_t) - -type docker_tmpfs_t; -files_tmpfs_file(docker_tmpfs_t) - -type docker_var_run_t; -files_pid_file(docker_var_run_t) - -type docker_plugin_var_run_t; -files_pid_file(docker_plugin_var_run_t) - -type docker_unit_file_t; -systemd_unit_file(docker_unit_file_t) - -type docker_devpts_t; -term_pty(docker_devpts_t) - -type docker_share_t; -files_type(docker_share_t) - -######################################## -# -# docker local policy -# -allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; -allow docker_t self:tun_socket relabelto; -allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; -allow docker_t self:fifo_file rw_fifo_file_perms; -allow docker_t self:unix_stream_socket create_stream_socket_perms; -allow docker_t self:tcp_socket create_stream_socket_perms; -allow docker_t self:udp_socket create_socket_perms; -allow docker_t self:capability2 block_suspend; - -docker_auth_stream_connect(docker_t) - -manage_files_pattern(docker_t, docker_home_t, docker_home_t) -manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) -manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) -userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") - -manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) -manage_files_pattern(docker_t, docker_config_t, docker_config_t) -files_etc_filetrans(docker_t, docker_config_t, dir, "docker") - -manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) -manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) -files_lock_filetrans(docker_t, docker_lock_t, { dir file }, "lxc") - -manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) -manage_files_pattern(docker_t, docker_log_t, docker_log_t) -manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) -logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) -allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; - -manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) -manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) -manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) -files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) - -manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -allow docker_t docker_tmpfs_t:dir relabelfrom; -can_exec(docker_t, docker_tmpfs_t) -fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) -allow docker_t docker_tmpfs_t:chr_file mounton; - -manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) -manage_files_pattern(docker_t, docker_share_t, docker_share_t) -manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) -allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; - -can_exec(docker_t, docker_share_t) -#docker_filetrans_named_content(docker_t) - -manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; -files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) - -manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) - -allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; -term_create_pty(docker_t, docker_devpts_t) - -kernel_read_system_state(docker_t) -kernel_read_network_state(docker_t) -kernel_read_all_sysctls(docker_t) -kernel_rw_net_sysctls(docker_t) -kernel_setsched(docker_t) -kernel_read_all_proc(docker_t) - -domain_use_interactive_fds(docker_t) -domain_dontaudit_read_all_domains_state(docker_t) - -corecmd_exec_bin(docker_t) -corecmd_exec_shell(docker_t) - -corenet_tcp_bind_generic_node(docker_t) -corenet_tcp_sendrecv_generic_if(docker_t) -corenet_tcp_sendrecv_generic_node(docker_t) -corenet_tcp_sendrecv_generic_port(docker_t) -corenet_tcp_bind_all_ports(docker_t) -corenet_tcp_connect_http_port(docker_t) -corenet_tcp_connect_commplex_main_port(docker_t) -corenet_udp_sendrecv_generic_if(docker_t) -corenet_udp_sendrecv_generic_node(docker_t) -corenet_udp_sendrecv_all_ports(docker_t) -corenet_udp_bind_generic_node(docker_t) -corenet_udp_bind_all_ports(docker_t) - -files_read_config_files(docker_t) -files_dontaudit_getattr_all_dirs(docker_t) -files_dontaudit_getattr_all_files(docker_t) - -fs_read_cgroup_files(docker_t) -fs_read_tmpfs_symlinks(docker_t) -fs_search_all(docker_t) -fs_getattr_all_fs(docker_t) - -storage_raw_rw_fixed_disk(docker_t) - -auth_use_nsswitch(docker_t) -auth_dontaudit_getattr_shadow(docker_t) - -init_read_state(docker_t) -init_status(docker_t) - -logging_send_audit_msgs(docker_t) -logging_send_syslog_msg(docker_t) - -miscfiles_read_localization(docker_t) - -mount_domtrans(docker_t) - -seutil_read_default_contexts(docker_t) -seutil_read_config(docker_t) - -sysnet_dns_name_resolve(docker_t) -sysnet_exec_ifconfig(docker_t) - -optional_policy(` - rpm_exec(docker_t) - rpm_read_db(docker_t) - rpm_exec(docker_t) -') - -optional_policy(` - fstools_domtrans(docker_t) -') - -optional_policy(` - iptables_domtrans(docker_t) -') - -optional_policy(` - openvswitch_stream_connect(docker_t) -') - -# -# lxc rules -# - -allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; - -allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; - -allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; -allow docker_t self:netlink_audit_socket create_netlink_socket_perms; -allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; -allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; - -allow docker_t docker_var_lib_t:dir mounton; -allow docker_t docker_var_lib_t:chr_file mounton; -can_exec(docker_t, docker_var_lib_t) - -kernel_dontaudit_setsched(docker_t) -kernel_get_sysvipc_info(docker_t) -kernel_request_load_module(docker_t) -kernel_mounton_messages(docker_t) -kernel_mounton_all_proc(docker_t) -kernel_mounton_all_sysctls(docker_t) -kernel_unlabeled_entry_type(spc_t) -kernel_unlabeled_domtrans(docker_t, spc_t) - -dev_getattr_all(docker_t) -dev_getattr_sysfs_fs(docker_t) -dev_read_urand(docker_t) -dev_read_lvm_control(docker_t) -dev_rw_sysfs(docker_t) -dev_rw_loop_control(docker_t) -dev_rw_lvm_control(docker_t) - -files_getattr_isid_type_dirs(docker_t) -files_manage_isid_type_dirs(docker_t) -files_manage_isid_type_files(docker_t) -files_manage_isid_type_symlinks(docker_t) -files_manage_isid_type_chr_files(docker_t) -files_manage_isid_type_blk_files(docker_t) -files_exec_isid_files(docker_t) -files_mounton_isid(docker_t) -files_mounton_non_security(docker_t) -files_mounton_isid_type_chr_file(docker_t) - -fs_mount_all_fs(docker_t) -fs_unmount_all_fs(docker_t) -fs_remount_all_fs(docker_t) -files_mounton_isid(docker_t) -fs_manage_cgroup_dirs(docker_t) -fs_manage_cgroup_files(docker_t) -fs_relabelfrom_xattr_fs(docker_t) -fs_relabelfrom_tmpfs(docker_t) -fs_read_tmpfs_symlinks(docker_t) -fs_list_hugetlbfs(docker_t) - -term_use_generic_ptys(docker_t) -term_use_ptmx(docker_t) -term_getattr_pty_fs(docker_t) -term_relabel_pty_fs(docker_t) -term_mounton_unallocated_ttys(docker_t) - -modutils_domtrans_insmod(docker_t) - -systemd_status_all_unit_files(docker_t) -systemd_start_systemd_services(docker_t) - -userdom_stream_connect(docker_t) -userdom_search_user_home_content(docker_t) -userdom_read_all_users_state(docker_t) -userdom_relabel_user_home_files(docker_t) -userdom_relabel_user_tmp_files(docker_t) -userdom_relabel_user_tmp_dirs(docker_t) - -optional_policy(` - gpm_getattr_gpmctl(docker_t) -') - -optional_policy(` - dbus_system_bus_client(docker_t) - init_dbus_chat(docker_t) - init_start_transient_unit(docker_t) - - optional_policy(` - systemd_dbus_chat_logind(docker_t) - systemd_dbus_chat_machined(docker_t) - ') - - optional_policy(` - firewalld_dbus_chat(docker_t) - ') -') - -optional_policy(` - udev_read_db(docker_t) -') - -optional_policy(` - unconfined_domain(docker_t) - unconfined_typebounds(docker_t) -') - -optional_policy(` - virt_read_config(docker_t) - virt_exec(docker_t) - virt_stream_connect(docker_t) - virt_stream_connect_sandbox(docker_t) - virt_exec_sandbox_files(docker_t) - virt_manage_sandbox_files(docker_t) - virt_relabel_sandbox_filesystem(docker_t) - # for lxc - virt_transition_svirt_sandbox(docker_t, system_r) - virt_mounton_sandbox_file(docker_t) -# virt_attach_sandbox_tun_iface(docker_t) - allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; - virt_sandbox_entrypoint(docker_t) -') - -tunable_policy(`docker_connect_any',` - corenet_tcp_connect_all_ports(docker_t) - corenet_sendrecv_all_packets(docker_t) - corenet_tcp_sendrecv_all_ports(docker_t) -') - -######################################## -# -# spc local policy -# -allow spc_t { docker_var_lib_t docker_share_t }:file entrypoint; -role system_r types spc_t; - -domtrans_pattern(docker_t, docker_share_t, spc_t) -domtrans_pattern(docker_t, docker_var_lib_t, spc_t) -allow docker_t spc_t:process { setsched signal_perms }; -ps_process_pattern(docker_t, spc_t) -allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; -filetrans_pattern(docker_t, docker_var_lib_t, docker_share_t, dir, "overlay") - -optional_policy(` - systemd_dbus_chat_machined(spc_t) -') - -optional_policy(` - dbus_chat_system_bus(spc_t) -') - -optional_policy(` - unconfined_domain_noaudit(spc_t) -') - -optional_policy(` - virt_transition_svirt_sandbox(spc_t, system_r) - virt_sandbox_entrypoint(spc_t) -') - -######################################## -# -# docker_auth local policy -# -allow docker_auth_t self:fifo_file rw_fifo_file_perms; -allow docker_auth_t self:unix_stream_socket create_stream_socket_perms; -dontaudit docker_auth_t self:capability net_admin; - -docker_stream_connect(docker_auth_t) - -manage_dirs_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) -manage_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) -manage_sock_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) -manage_lnk_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) -files_pid_filetrans(docker_auth_t, docker_plugin_var_run_t, { dir file lnk_file sock_file }) - -domain_use_interactive_fds(docker_auth_t) - -kernel_read_net_sysctls(docker_auth_t) - -auth_use_nsswitch(docker_auth_t) - -files_read_etc_files(docker_auth_t) - -miscfiles_read_localization(docker_auth_t) - -sysnet_dns_name_resolve(docker_auth_t) diff --git a/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE b/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE deleted file mode 100644 index d511905c16..0000000000 --- a/contrib/selinux-oraclelinux-7/docker-engine-selinux/LICENSE +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile b/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile deleted file mode 100644 index 16df33ef32..0000000000 --- a/contrib/selinux-oraclelinux-7/docker-engine-selinux/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -TARGETS?=docker -MODULES?=${TARGETS:=.pp.bz2} -SHAREDIR?=/usr/share - -all: ${TARGETS:=.pp.bz2} - -%.pp.bz2: %.pp - @echo Compressing $^ -\> $@ - bzip2 -9 $^ - -%.pp: %.te - make -f ${SHAREDIR}/selinux/devel/Makefile $@ - -clean: - rm -f *~ *.tc *.pp *.pp.bz2 - rm -rf tmp *.tar.gz - -man: install - sepolicy manpage --domain ${TARGETS}_t - -install: - semodule -i ${TARGETS} - diff --git a/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md b/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md deleted file mode 100644 index 7ea3117a89..0000000000 --- a/contrib/selinux-oraclelinux-7/docker-engine-selinux/README.md +++ /dev/null @@ -1 +0,0 @@ -SELinux policy for docker diff --git a/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc b/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc deleted file mode 100644 index 10b7d52a8b..0000000000 --- a/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.fc +++ /dev/null @@ -1,33 +0,0 @@ -/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) - -/usr/bin/docker -- gen_context(system_u:object_r:docker_exec_t,s0) -/usr/bin/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) -/usr/lib/docker/docker-novolume-plugin -- gen_context(system_u:object_r:docker_auth_exec_t,s0) - -/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) -/usr/lib/systemd/system/docker-novolume-plugin.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) - -/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) - -/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) -/var/lib/kublet(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) -/var/lib/docker/vfs(/.*)? gen_context(system_u:object_r:svirt_sandbox_file_t,s0) - -/var/run/docker(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker/plugins(/.*)? gen_context(system_u:object_r:docker_plugin_var_run_t,s0) - -/var/lock/lxc(/.*)? gen_context(system_u:object_r:docker_lock_t,s0) - -/var/log/lxc(/.*)? gen_context(system_u:object_r:docker_log_t,s0) - -/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) - -# OL7.2 systemd selinux update -/var/run/systemd/machines(/.*)? gen_context(system_u:object_r:systemd_machined_var_run_t,s0) -/var/lib/machines(/.*)? gen_context(system_u:object_r:systemd_machined_var_lib_t,s0) diff --git a/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if b/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if deleted file mode 100644 index 4780af05f7..0000000000 --- a/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.if +++ /dev/null @@ -1,659 +0,0 @@ - -## The open-source application container engine. - -######################################## -## -## Execute docker in the docker domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_domtrans',` - gen_require(` - type docker_t, docker_exec_t; - ') - - corecmd_search_bin($1) - domtrans_pattern($1, docker_exec_t, docker_t) -') - -######################################## -## -## Execute docker in the caller domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_exec',` - gen_require(` - type docker_exec_t; - ') - - corecmd_search_bin($1) - can_exec($1, docker_exec_t) -') - -######################################## -## -## Search docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_search_lib',` - gen_require(` - type docker_var_lib_t; - ') - - allow $1 docker_var_lib_t:dir search_dir_perms; - files_search_var_lib($1) -') - -######################################## -## -## Execute docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_exec_lib',` - gen_require(` - type docker_var_lib_t; - ') - - allow $1 docker_var_lib_t:dir search_dir_perms; - can_exec($1, docker_var_lib_t) -') - -######################################## -## -## Read docker lib files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_lib_files',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Read docker share files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_share_files',` - gen_require(` - type docker_share_t; - ') - - files_search_var_lib($1) - list_dirs_pattern($1, docker_share_t, docker_share_t) - read_files_pattern($1, docker_share_t, docker_share_t) - read_lnk_files_pattern($1, docker_share_t, docker_share_t) -') - -###################################### -## -## Allow the specified domain to execute docker shared files -## in the caller domain. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_exec_share_files',` - gen_require(` - type docker_share_t; - ') - - can_exec($1, docker_share_t) -') - -######################################## -## -## Manage docker lib files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_manage_lib_files',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) - manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Manage docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_manage_lib_dirs',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Create objects in a docker var lib directory -## with an automatic type transition to -## a specified private type. -## -## -## -## Domain allowed access. -## -## -## -## -## The type of the object to create. -## -## -## -## -## The class of the object to be created. -## -## -## -## -## The name of the object being created. -## -## -# -interface(`docker_lib_filetrans',` - gen_require(` - type docker_var_lib_t; - ') - - filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) -') - -######################################## -## -## Read docker PID files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_pid_files',` - gen_require(` - type docker_var_run_t; - ') - - files_search_pids($1) - read_files_pattern($1, docker_var_run_t, docker_var_run_t) -') - -######################################## -## -## Execute docker server in the docker domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_systemctl',` - gen_require(` - type docker_t; - type docker_unit_file_t; - ') - - systemd_exec_systemctl($1) - init_reload_services($1) - systemd_read_fifo_file_passwd_run($1) - allow $1 docker_unit_file_t:file read_file_perms; - allow $1 docker_unit_file_t:service manage_service_perms; - - ps_process_pattern($1, docker_t) -') - -######################################## -## -## Read and write docker shared memory. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_rw_sem',` - gen_require(` - type docker_t; - ') - - allow $1 docker_t:sem rw_sem_perms; -') - -####################################### -## -## Read and write the docker pty type. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_use_ptys',` - gen_require(` - type docker_devpts_t; - ') - - allow $1 docker_devpts_t:chr_file rw_term_perms; -') - -####################################### -## -## Allow domain to create docker content -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_filetrans_named_content',` - - gen_require(` - type docker_var_lib_t; - type docker_share_t; - type docker_log_t; - type docker_var_run_t; - type docker_home_t; - ') - - files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") - files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") - files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") - logging_log_filetrans($1, docker_log_t, dir, "lxc") - files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") - userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") -') - -######################################## -## -## Connect to docker over a unix stream socket. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_stream_connect',` - gen_require(` - type docker_t, docker_var_run_t; - ') - - files_search_pids($1) - stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) -') - -######################################## -## -## Connect to SPC containers over a unix stream socket. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_spc_stream_connect',` - gen_require(` - type spc_t, spc_var_run_t; - ') - - files_search_pids($1) - files_write_all_pid_sockets($1) - allow $1 spc_t:unix_stream_socket connectto; -') - -######################################## -## -## All of the rules required to administrate -## an docker environment -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_admin',` - gen_require(` - type docker_t; - type docker_var_lib_t, docker_var_run_t; - type docker_unit_file_t; - type docker_lock_t; - type docker_log_t; - type docker_config_t; - ') - - allow $1 docker_t:process { ptrace signal_perms }; - ps_process_pattern($1, docker_t) - - admin_pattern($1, docker_config_t) - - files_search_var_lib($1) - admin_pattern($1, docker_var_lib_t) - - files_search_pids($1) - admin_pattern($1, docker_var_run_t) - - files_search_locks($1) - admin_pattern($1, docker_lock_t) - - logging_search_logs($1) - admin_pattern($1, docker_log_t) - - docker_systemctl($1) - admin_pattern($1, docker_unit_file_t) - allow $1 docker_unit_file_t:service all_service_perms; - - optional_policy(` - systemd_passwd_agent_exec($1) - systemd_read_fifo_file_passwd_run($1) - ') -') - -######################################## -## -## Execute docker_auth_exec_t in the docker_auth domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_auth_domtrans',` - gen_require(` - type docker_auth_t, docker_auth_exec_t; - ') - - corecmd_search_bin($1) - domtrans_pattern($1, docker_auth_exec_t, docker_auth_t) -') - -###################################### -## -## Execute docker_auth in the caller domain. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_auth_exec',` - gen_require(` - type docker_auth_exec_t; - ') - - corecmd_search_bin($1) - can_exec($1, docker_auth_exec_t) -') - -######################################## -## -## Connect to docker_auth over a unix stream socket. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_auth_stream_connect',` - gen_require(` - type docker_auth_t, docker_plugin_var_run_t; - ') - - files_search_pids($1) - stream_connect_pattern($1, docker_plugin_var_run_t, docker_plugin_var_run_t, docker_auth_t) -') - -######################################## -## -## docker domain typebounds calling domain. -## -## -## -## Domain to be typebound. -## -## -# -interface(`docker_typebounds',` - gen_require(` - type docker_t; - ') - - typebounds docker_t $1; -') - -######################################## -## -## Allow any docker_exec_t to be an entrypoint of this domain -## -## -## -## Domain allowed access. -## -## -## -# -interface(`docker_entrypoint',` - gen_require(` - type docker_exec_t; - ') - allow $1 docker_exec_t:file entrypoint; -') - -######################################## -## -## Send and receive messages from -## systemd machined over dbus. -## -## -## -## Domain allowed access. -## -## -# -interface(`systemd_dbus_chat_machined',` - gen_require(` - type systemd_machined_t; - class dbus send_msg; - ') - - allow $1 systemd_machined_t:dbus send_msg; - allow systemd_machined_t $1:dbus send_msg; - ps_process_pattern(systemd_machined_t, $1) -') - -######################################## -## -## Allow any svirt_sandbox_file_t to be an entrypoint of this domain -## -## -## -## Domain allowed access. -## -## -## -# -interface(`virt_sandbox_entrypoint',` - gen_require(` - type svirt_sandbox_file_t; - ') - allow $1 svirt_sandbox_file_t:file entrypoint; -') - -######################################## -## -## Send and receive messages from -## virt over dbus. -## -## -## -## Domain allowed access. -## -## -# -interface(`virt_dbus_chat',` - gen_require(` - type virtd_t; - class dbus send_msg; - ') - - allow $1 virtd_t:dbus send_msg; - allow virtd_t $1:dbus send_msg; - ps_process_pattern(virtd_t, $1) -') - -####################################### -## -## Read the process state of virt sandbox containers -## -## -## -## Domain allowed access. -## -## -# -interface(`virt_sandbox_read_state',` - gen_require(` - attribute svirt_sandbox_domain; - ') - - ps_process_pattern($1, svirt_sandbox_domain) -') - -###################################### -## -## Send a signal to sandbox domains -## -## -## -## Domain allowed access. -## -## -# -interface(`virt_signal_sandbox',` - gen_require(` - attribute svirt_sandbox_domain; - ') - - allow $1 svirt_sandbox_domain:process signal; -') - -####################################### -## -## Getattr Sandbox File systems -## -## -## -## Domain allowed access. -## -## -# -interface(`virt_getattr_sandbox_filesystem',` - gen_require(` - type svirt_sandbox_file_t; - ') - - allow $1 svirt_sandbox_file_t:filesystem getattr; -') - -####################################### -## -## Read Sandbox Files -## -## -## -## Domain allowed access. -## -## -# -interface(`virt_read_sandbox_files',` - gen_require(` - type svirt_sandbox_file_t; - ') - - list_dirs_pattern($1, svirt_sandbox_file_t, svirt_sandbox_file_t) - read_files_pattern($1, svirt_sandbox_file_t, svirt_sandbox_file_t) - read_lnk_files_pattern($1, svirt_sandbox_file_t, svirt_sandbox_file_t) -') - -####################################### -## -## Read the process state of spc containers -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_spc_read_state',` - gen_require(` - type spc_t; - ') - - ps_process_pattern($1, spc_t) -') - diff --git a/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te b/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te deleted file mode 100644 index d4de36fe46..0000000000 --- a/contrib/selinux-oraclelinux-7/docker-engine-selinux/docker.te +++ /dev/null @@ -1,465 +0,0 @@ -policy_module(docker, 1.0.0) - -######################################## -# -# Declarations -# - -## -##

-## Determine whether docker can -## connect to all TCP ports. -##

-##
-gen_tunable(docker_connect_any, false) - -type docker_t; -type docker_exec_t; -init_daemon_domain(docker_t, docker_exec_t) -domain_subj_id_change_exemption(docker_t) -domain_role_change_exemption(docker_t) - -type spc_t; -domain_type(spc_t) -role system_r types spc_t; - -type docker_auth_t; -type docker_auth_exec_t; -init_daemon_domain(docker_auth_t, docker_auth_exec_t) - -type spc_var_run_t; -files_pid_file(spc_var_run_t) - -type docker_var_lib_t; -files_type(docker_var_lib_t) - -type docker_home_t; -userdom_user_home_content(docker_home_t) - -type docker_config_t; -files_config_file(docker_config_t) - -type docker_lock_t; -files_lock_file(docker_lock_t) - -type docker_log_t; -logging_log_file(docker_log_t) - -type docker_tmp_t; -files_tmp_file(docker_tmp_t) - -type docker_tmpfs_t; -files_tmpfs_file(docker_tmpfs_t) - -type docker_var_run_t; -files_pid_file(docker_var_run_t) - -type docker_plugin_var_run_t; -files_pid_file(docker_plugin_var_run_t) - -type docker_unit_file_t; -systemd_unit_file(docker_unit_file_t) - -type docker_devpts_t; -term_pty(docker_devpts_t) - -type docker_share_t; -files_type(docker_share_t) - -# OL7 systemd selinux update -type systemd_machined_t; -type systemd_machined_exec_t; -init_daemon_domain(systemd_machined_t, systemd_machined_exec_t) - -# /run/systemd/machines -type systemd_machined_var_run_t; -files_pid_file(systemd_machined_var_run_t) - -# /var/lib/machines -type systemd_machined_var_lib_t; -files_type(systemd_machined_var_lib_t) - - -######################################## -# -# docker local policy -# -allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; -allow docker_t self:tun_socket relabelto; -allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; -allow docker_t self:fifo_file rw_fifo_file_perms; -allow docker_t self:unix_stream_socket create_stream_socket_perms; -allow docker_t self:tcp_socket create_stream_socket_perms; -allow docker_t self:udp_socket create_socket_perms; -allow docker_t self:capability2 block_suspend; - -docker_auth_stream_connect(docker_t) - -manage_files_pattern(docker_t, docker_home_t, docker_home_t) -manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) -manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) -userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") - -manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) -manage_files_pattern(docker_t, docker_config_t, docker_config_t) -files_etc_filetrans(docker_t, docker_config_t, dir, "docker") - -manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) -manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) -files_lock_filetrans(docker_t, docker_lock_t, { dir file }, "lxc") - -manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) -manage_files_pattern(docker_t, docker_log_t, docker_log_t) -manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) -logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) -allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; - -manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) -manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) -manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) -files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) - -manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -allow docker_t docker_tmpfs_t:dir relabelfrom; -can_exec(docker_t, docker_tmpfs_t) -fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) -allow docker_t docker_tmpfs_t:chr_file mounton; - -manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) -manage_files_pattern(docker_t, docker_share_t, docker_share_t) -manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) -allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; - -can_exec(docker_t, docker_share_t) -#docker_filetrans_named_content(docker_t) - -manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; -files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) - -manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) - -allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; -term_create_pty(docker_t, docker_devpts_t) - -kernel_read_system_state(docker_t) -kernel_read_network_state(docker_t) -kernel_read_all_sysctls(docker_t) -kernel_rw_net_sysctls(docker_t) -kernel_setsched(docker_t) -kernel_read_all_proc(docker_t) - -domain_use_interactive_fds(docker_t) -domain_dontaudit_read_all_domains_state(docker_t) - -corecmd_exec_bin(docker_t) -corecmd_exec_shell(docker_t) - -corenet_tcp_bind_generic_node(docker_t) -corenet_tcp_sendrecv_generic_if(docker_t) -corenet_tcp_sendrecv_generic_node(docker_t) -corenet_tcp_sendrecv_generic_port(docker_t) -corenet_tcp_bind_all_ports(docker_t) -corenet_tcp_connect_http_port(docker_t) -corenet_tcp_connect_commplex_main_port(docker_t) -corenet_udp_sendrecv_generic_if(docker_t) -corenet_udp_sendrecv_generic_node(docker_t) -corenet_udp_sendrecv_all_ports(docker_t) -corenet_udp_bind_generic_node(docker_t) -corenet_udp_bind_all_ports(docker_t) - -files_read_config_files(docker_t) -files_dontaudit_getattr_all_dirs(docker_t) -files_dontaudit_getattr_all_files(docker_t) - -fs_read_cgroup_files(docker_t) -fs_read_tmpfs_symlinks(docker_t) -fs_search_all(docker_t) -fs_getattr_all_fs(docker_t) - -storage_raw_rw_fixed_disk(docker_t) - -auth_use_nsswitch(docker_t) -auth_dontaudit_getattr_shadow(docker_t) - -init_read_state(docker_t) -init_status(docker_t) - -logging_send_audit_msgs(docker_t) -logging_send_syslog_msg(docker_t) - -miscfiles_read_localization(docker_t) - -mount_domtrans(docker_t) - -seutil_read_default_contexts(docker_t) -seutil_read_config(docker_t) - -sysnet_dns_name_resolve(docker_t) -sysnet_exec_ifconfig(docker_t) - -optional_policy(` - rpm_exec(docker_t) - rpm_read_db(docker_t) - rpm_exec(docker_t) -') - -optional_policy(` - fstools_domtrans(docker_t) -') - -optional_policy(` - iptables_domtrans(docker_t) -') - -optional_policy(` - openvswitch_stream_connect(docker_t) -') - -# -# lxc rules -# - -allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; - -allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; - -allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; -allow docker_t self:netlink_audit_socket create_netlink_socket_perms; -allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; -allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; - -allow docker_t docker_var_lib_t:dir mounton; -allow docker_t docker_var_lib_t:chr_file mounton; -can_exec(docker_t, docker_var_lib_t) - -kernel_dontaudit_setsched(docker_t) -kernel_get_sysvipc_info(docker_t) -kernel_request_load_module(docker_t) -kernel_mounton_messages(docker_t) -kernel_mounton_all_proc(docker_t) -kernel_mounton_all_sysctls(docker_t) -kernel_unlabeled_entry_type(spc_t) -kernel_unlabeled_domtrans(docker_t, spc_t) - -dev_getattr_all(docker_t) -dev_getattr_sysfs_fs(docker_t) -dev_read_urand(docker_t) -dev_read_lvm_control(docker_t) -dev_rw_sysfs(docker_t) -dev_rw_loop_control(docker_t) -dev_rw_lvm_control(docker_t) - -files_getattr_isid_type_dirs(docker_t) -files_manage_isid_type_dirs(docker_t) -files_manage_isid_type_files(docker_t) -files_manage_isid_type_symlinks(docker_t) -files_manage_isid_type_chr_files(docker_t) -files_manage_isid_type_blk_files(docker_t) -files_exec_isid_files(docker_t) -files_mounton_isid(docker_t) -files_mounton_non_security(docker_t) -files_mounton_isid_type_chr_file(docker_t) - -fs_mount_all_fs(docker_t) -fs_unmount_all_fs(docker_t) -fs_remount_all_fs(docker_t) -files_mounton_isid(docker_t) -fs_manage_cgroup_dirs(docker_t) -fs_manage_cgroup_files(docker_t) -fs_relabelfrom_xattr_fs(docker_t) -fs_relabelfrom_tmpfs(docker_t) -fs_read_tmpfs_symlinks(docker_t) -fs_list_hugetlbfs(docker_t) - -term_use_generic_ptys(docker_t) -term_use_ptmx(docker_t) -term_getattr_pty_fs(docker_t) -term_relabel_pty_fs(docker_t) -term_mounton_unallocated_ttys(docker_t) - -modutils_domtrans_insmod(docker_t) - -systemd_status_all_unit_files(docker_t) -systemd_start_systemd_services(docker_t) - -userdom_stream_connect(docker_t) -userdom_search_user_home_content(docker_t) -userdom_read_all_users_state(docker_t) -userdom_relabel_user_home_files(docker_t) -userdom_relabel_user_tmp_files(docker_t) -userdom_relabel_user_tmp_dirs(docker_t) - -optional_policy(` - gpm_getattr_gpmctl(docker_t) -') - -optional_policy(` - dbus_system_bus_client(docker_t) - init_dbus_chat(docker_t) - init_start_transient_unit(docker_t) - - optional_policy(` - systemd_dbus_chat_logind(docker_t) - systemd_dbus_chat_machined(docker_t) - ') - - optional_policy(` - firewalld_dbus_chat(docker_t) - ') -') - -optional_policy(` - udev_read_db(docker_t) -') - -optional_policy(` - unconfined_domain(docker_t) - # unconfined_typebounds(docker_t) -') - -optional_policy(` - virt_read_config(docker_t) - virt_exec(docker_t) - virt_stream_connect(docker_t) - virt_stream_connect_sandbox(docker_t) - virt_exec_sandbox_files(docker_t) - virt_manage_sandbox_files(docker_t) - virt_relabel_sandbox_filesystem(docker_t) - # for lxc - virt_transition_svirt_sandbox(docker_t, system_r) - virt_mounton_sandbox_file(docker_t) -# virt_attach_sandbox_tun_iface(docker_t) - allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; - virt_sandbox_entrypoint(docker_t) -') - -tunable_policy(`docker_connect_any',` - corenet_tcp_connect_all_ports(docker_t) - corenet_sendrecv_all_packets(docker_t) - corenet_tcp_sendrecv_all_ports(docker_t) -') - -######################################## -# -# spc local policy -# -allow spc_t { docker_var_lib_t docker_share_t }:file entrypoint; -role system_r types spc_t; - -domtrans_pattern(docker_t, docker_share_t, spc_t) -domtrans_pattern(docker_t, docker_var_lib_t, spc_t) -allow docker_t spc_t:process { setsched signal_perms }; -ps_process_pattern(docker_t, spc_t) -allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; -filetrans_pattern(docker_t, docker_var_lib_t, docker_share_t, dir, "overlay") - -optional_policy(` - systemd_dbus_chat_machined(spc_t) -') - -optional_policy(` - dbus_chat_system_bus(spc_t) -') - -optional_policy(` - unconfined_domain_noaudit(spc_t) -') - -optional_policy(` - virt_transition_svirt_sandbox(spc_t, system_r) - virt_sandbox_entrypoint(spc_t) -') - -######################################## -# -# docker_auth local policy -# -allow docker_auth_t self:fifo_file rw_fifo_file_perms; -allow docker_auth_t self:unix_stream_socket create_stream_socket_perms; -dontaudit docker_auth_t self:capability net_admin; - -docker_stream_connect(docker_auth_t) - -manage_dirs_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) -manage_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) -manage_sock_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) -manage_lnk_files_pattern(docker_auth_t, docker_plugin_var_run_t, docker_plugin_var_run_t) -files_pid_filetrans(docker_auth_t, docker_plugin_var_run_t, { dir file lnk_file sock_file }) - -domain_use_interactive_fds(docker_auth_t) - -kernel_read_net_sysctls(docker_auth_t) - -auth_use_nsswitch(docker_auth_t) - -files_read_etc_files(docker_auth_t) - -miscfiles_read_localization(docker_auth_t) - -sysnet_dns_name_resolve(docker_auth_t) - -######################################## -# -# OL7.2 systemd selinux update -# systemd_machined local policy -# -allow systemd_machined_t self:capability { dac_override setgid sys_admin sys_chroot sys_ptrace }; -allow systemd_machined_t systemd_unit_file_t:service { status start }; -allow systemd_machined_t self:unix_dgram_socket create_socket_perms; - -manage_dirs_pattern(systemd_machined_t, systemd_machined_var_run_t, systemd_machined_var_run_t) -manage_files_pattern(systemd_machined_t, systemd_machined_var_run_t, systemd_machined_var_run_t) -manage_lnk_files_pattern(systemd_machined_t, systemd_machined_var_run_t, systemd_machined_var_run_t) -init_pid_filetrans(systemd_machined_t, systemd_machined_var_run_t, dir, "machines") - -manage_dirs_pattern(systemd_machined_t, systemd_machined_var_lib_t, systemd_machined_var_lib_t) -manage_files_pattern(systemd_machined_t, systemd_machined_var_lib_t, systemd_machined_var_lib_t) -manage_lnk_files_pattern(systemd_machined_t, systemd_machined_var_lib_t, systemd_machined_var_lib_t) -init_var_lib_filetrans(systemd_machined_t, systemd_machined_var_lib_t, dir, "machines") - -kernel_dgram_send(systemd_machined_t) -# This is a bug, but need for now. -kernel_read_unlabeled_state(systemd_machined_t) - -init_dbus_chat(systemd_machined_t) -init_status(systemd_machined_t) - -userdom_dbus_send_all_users(systemd_machined_t) - -term_use_ptmx(systemd_machined_t) - -optional_policy(` - dbus_connect_system_bus(systemd_machined_t) - dbus_system_bus_client(systemd_machined_t) -') - -optional_policy(` - docker_read_share_files(systemd_machined_t) - docker_spc_read_state(systemd_machined_t) -') - -optional_policy(` - virt_dbus_chat(systemd_machined_t) - virt_sandbox_read_state(systemd_machined_t) - virt_signal_sandbox(systemd_machined_t) - virt_stream_connect_sandbox(systemd_machined_t) - virt_rw_svirt_dev(systemd_machined_t) - virt_getattr_sandbox_filesystem(systemd_machined_t) - virt_read_sandbox_files(systemd_machined_t) -') - - diff --git a/contrib/selinux/docker-engine-selinux/LICENSE b/contrib/selinux/docker-engine-selinux/LICENSE deleted file mode 100644 index 5b6e7c66c2..0000000000 --- a/contrib/selinux/docker-engine-selinux/LICENSE +++ /dev/null @@ -1,340 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc. - 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Library General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program; if not, write to the Free Software - Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA - - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Library General -Public License instead of this License. diff --git a/contrib/selinux/docker-engine-selinux/Makefile b/contrib/selinux/docker-engine-selinux/Makefile deleted file mode 100644 index 1bdc695afe..0000000000 --- a/contrib/selinux/docker-engine-selinux/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -TARGETS?=docker -MODULES?=${TARGETS:=.pp.bz2} -SHAREDIR?=/usr/share - -all: ${TARGETS:=.pp.bz2} - -%.pp.bz2: %.pp - @echo Compressing $^ -\> $@ - bzip2 -9 $^ - -%.pp: %.te - make -f ${SHAREDIR}/selinux/devel/Makefile $@ - -clean: - rm -f *~ *.tc *.pp *.pp.bz2 - rm -rf tmp *.tar.gz diff --git a/contrib/selinux/docker-engine-selinux/docker.fc b/contrib/selinux/docker-engine-selinux/docker.fc deleted file mode 100644 index fe9c58a4ae..0000000000 --- a/contrib/selinux/docker-engine-selinux/docker.fc +++ /dev/null @@ -1,20 +0,0 @@ -/root/\.docker gen_context(system_u:object_r:docker_home_t,s0) - -/usr/bin/dockerd -- gen_context(system_u:object_r:docker_exec_t,s0) - -/usr/lib/systemd/system/docker.service -- gen_context(system_u:object_r:docker_unit_file_t,s0) - -/etc/docker(/.*)? gen_context(system_u:object_r:docker_config_t,s0) - -/var/lib/docker(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) -/var/lib/kublet(/.*)? gen_context(system_u:object_r:docker_var_lib_t,s0) -/var/lib/docker/vfs(/.*)? gen_context(system_u:object_r:svirt_sandbox_file_t,s0) - -/var/run/docker\.pid -- gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker\.sock -s gen_context(system_u:object_r:docker_var_run_t,s0) -/var/run/docker-client(/.*)? gen_context(system_u:object_r:docker_var_run_t,s0) - -/var/lib/docker/init(/.*)? gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/containers/.*/hosts gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/containers/.*/hostname gen_context(system_u:object_r:docker_share_t,s0) -/var/lib/docker/.*/config\.env gen_context(system_u:object_r:docker_share_t,s0) diff --git a/contrib/selinux/docker-engine-selinux/docker.if b/contrib/selinux/docker-engine-selinux/docker.if deleted file mode 100644 index ca075c05c5..0000000000 --- a/contrib/selinux/docker-engine-selinux/docker.if +++ /dev/null @@ -1,461 +0,0 @@ - -## The open-source application container engine. - -######################################## -## -## Execute docker in the docker domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_domtrans',` - gen_require(` - type docker_t, docker_exec_t; - ') - - corecmd_search_bin($1) - domtrans_pattern($1, docker_exec_t, docker_t) -') - -######################################## -## -## Execute docker in the caller domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_exec',` - gen_require(` - type docker_exec_t; - ') - - corecmd_search_bin($1) - can_exec($1, docker_exec_t) -') - -######################################## -## -## Search docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_search_lib',` - gen_require(` - type docker_var_lib_t; - ') - - allow $1 docker_var_lib_t:dir search_dir_perms; - files_search_var_lib($1) -') - -######################################## -## -## Execute docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_exec_lib',` - gen_require(` - type docker_var_lib_t; - ') - - allow $1 docker_var_lib_t:dir search_dir_perms; - can_exec($1, docker_var_lib_t) -') - -######################################## -## -## Read docker lib files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_lib_files',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - read_files_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Read docker share files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_share_files',` - gen_require(` - type docker_share_t; - ') - - files_search_var_lib($1) - read_files_pattern($1, docker_share_t, docker_share_t) -') - -######################################## -## -## Manage docker lib files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_manage_lib_files',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - manage_files_pattern($1, docker_var_lib_t, docker_var_lib_t) - manage_lnk_files_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Manage docker lib directories. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_manage_lib_dirs',` - gen_require(` - type docker_var_lib_t; - ') - - files_search_var_lib($1) - manage_dirs_pattern($1, docker_var_lib_t, docker_var_lib_t) -') - -######################################## -## -## Create objects in a docker var lib directory -## with an automatic type transition to -## a specified private type. -## -## -## -## Domain allowed access. -## -## -## -## -## The type of the object to create. -## -## -## -## -## The class of the object to be created. -## -## -## -## -## The name of the object being created. -## -## -# -interface(`docker_lib_filetrans',` - gen_require(` - type docker_var_lib_t; - ') - - filetrans_pattern($1, docker_var_lib_t, $2, $3, $4) -') - -######################################## -## -## Read docker PID files. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_read_pid_files',` - gen_require(` - type docker_var_run_t; - ') - - files_search_pids($1) - read_files_pattern($1, docker_var_run_t, docker_var_run_t) -') - -######################################## -## -## Execute docker server in the docker domain. -## -## -## -## Domain allowed to transition. -## -## -# -interface(`docker_systemctl',` - gen_require(` - type docker_t; - type docker_unit_file_t; - ') - - systemd_exec_systemctl($1) - init_reload_services($1) - systemd_read_fifo_file_passwd_run($1) - allow $1 docker_unit_file_t:file read_file_perms; - allow $1 docker_unit_file_t:service manage_service_perms; - - ps_process_pattern($1, docker_t) -') - -######################################## -## -## Read and write docker shared memory. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_rw_sem',` - gen_require(` - type docker_t; - ') - - allow $1 docker_t:sem rw_sem_perms; -') - -####################################### -## -## Read and write the docker pty type. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_use_ptys',` - gen_require(` - type docker_devpts_t; - ') - - allow $1 docker_devpts_t:chr_file rw_term_perms; -') - -####################################### -## -## Allow domain to create docker content -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_filetrans_named_content',` - - gen_require(` - type docker_var_lib_t; - type docker_share_t; - type docker_log_t; - type docker_var_run_t; - type docker_home_t; - ') - - files_pid_filetrans($1, docker_var_run_t, file, "docker.pid") - files_pid_filetrans($1, docker_var_run_t, sock_file, "docker.sock") - files_pid_filetrans($1, docker_var_run_t, dir, "docker-client") - files_var_lib_filetrans($1, docker_var_lib_t, dir, "docker") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "config.env") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hosts") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "hostname") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, file, "resolv.conf") - filetrans_pattern($1, docker_var_lib_t, docker_share_t, dir, "init") - userdom_admin_home_dir_filetrans($1, docker_home_t, dir, ".docker") -') - -######################################## -## -## Connect to docker over a unix stream socket. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_stream_connect',` - gen_require(` - type docker_t, docker_var_run_t; - ') - - files_search_pids($1) - stream_connect_pattern($1, docker_var_run_t, docker_var_run_t, docker_t) -') - -######################################## -## -## Connect to SPC containers over a unix stream socket. -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_spc_stream_connect',` - gen_require(` - type spc_t, spc_var_run_t; - ') - - files_search_pids($1) - files_write_all_pid_sockets($1) - allow $1 spc_t:unix_stream_socket connectto; -') - - -######################################## -## -## All of the rules required to administrate -## an docker environment -## -## -## -## Domain allowed access. -## -## -# -interface(`docker_admin',` - gen_require(` - type docker_t; - type docker_var_lib_t, docker_var_run_t; - type docker_unit_file_t; - type docker_lock_t; - type docker_log_t; - type docker_config_t; - ') - - allow $1 docker_t:process { ptrace signal_perms }; - ps_process_pattern($1, docker_t) - - admin_pattern($1, docker_config_t) - - files_search_var_lib($1) - admin_pattern($1, docker_var_lib_t) - - files_search_pids($1) - admin_pattern($1, docker_var_run_t) - - files_search_locks($1) - admin_pattern($1, docker_lock_t) - - logging_search_logs($1) - admin_pattern($1, docker_log_t) - - docker_systemctl($1) - admin_pattern($1, docker_unit_file_t) - allow $1 docker_unit_file_t:service all_service_perms; - - optional_policy(` - systemd_passwd_agent_exec($1) - systemd_read_fifo_file_passwd_run($1) - ') -') - -interface(`domain_stub_named_filetrans_domain',` - gen_require(` - attribute named_filetrans_domain; - ') -') - -interface(`lvm_stub',` - gen_require(` - type lvm_t; - ') -') -interface(`staff_stub',` - gen_require(` - type staff_t; - ') -') -interface(`virt_stub_svirt_sandbox_domain',` - gen_require(` - attribute svirt_sandbox_domain; - ') -') -interface(`virt_stub_svirt_sandbox_file',` - gen_require(` - type svirt_sandbox_file_t; - ') -') -interface(`fs_dontaudit_remount_tmpfs',` - gen_require(` - type tmpfs_t; - ') - - dontaudit $1 tmpfs_t:filesystem remount; -') -interface(`dev_dontaudit_list_all_dev_nodes',` - gen_require(` - type device_t; - ') - - dontaudit $1 device_t:dir list_dir_perms; -') -interface(`kernel_unlabeled_entry_type',` - gen_require(` - type unlabeled_t; - ') - - domain_entry_file($1, unlabeled_t) -') -interface(`kernel_unlabeled_domtrans',` - gen_require(` - type unlabeled_t; - ') - - read_lnk_files_pattern($1, unlabeled_t, unlabeled_t) - domain_transition_pattern($1, unlabeled_t, $2) - type_transition $1 unlabeled_t:process $2; -') -interface(`files_write_all_pid_sockets',` - gen_require(` - attribute pidfile; - ') - - allow $1 pidfile:sock_file write_sock_file_perms; -') -interface(`dev_dontaudit_mounton_sysfs',` - gen_require(` - type sysfs_t; - ') - - dontaudit $1 sysfs_t:dir mounton; -') diff --git a/contrib/selinux/docker-engine-selinux/docker.te b/contrib/selinux/docker-engine-selinux/docker.te deleted file mode 100644 index 999742f302..0000000000 --- a/contrib/selinux/docker-engine-selinux/docker.te +++ /dev/null @@ -1,414 +0,0 @@ -policy_module(docker, 1.0.0) - -######################################## -# -# Declarations -# - -## -##

-## Allow sandbox containers manage fuse files -##

-##
-gen_tunable(virt_sandbox_use_fusefs, false) - -## -##

-## Determine whether docker can -## connect to all TCP ports. -##

-##
-gen_tunable(docker_connect_any, false) - -type docker_t; -type docker_exec_t; -init_daemon_domain(docker_t, docker_exec_t) -domain_subj_id_change_exemption(docker_t) -domain_role_change_exemption(docker_t) - -type spc_t; -domain_type(spc_t) -role system_r types spc_t; - -type spc_var_run_t; -files_pid_file(spc_var_run_t) - -type docker_var_lib_t; -files_type(docker_var_lib_t) - -type docker_home_t; -userdom_user_home_content(docker_home_t) - -type docker_config_t; -files_config_file(docker_config_t) - -type docker_lock_t; -files_lock_file(docker_lock_t) - -type docker_log_t; -logging_log_file(docker_log_t) - -type docker_tmp_t; -files_tmp_file(docker_tmp_t) - -type docker_tmpfs_t; -files_tmpfs_file(docker_tmpfs_t) - -type docker_var_run_t; -files_pid_file(docker_var_run_t) - -type docker_unit_file_t; -systemd_unit_file(docker_unit_file_t) - -type docker_devpts_t; -term_pty(docker_devpts_t) - -type docker_share_t; -files_type(docker_share_t) - -######################################## -# -# docker local policy -# -allow docker_t self:capability { chown kill fowner fsetid mknod net_admin net_bind_service net_raw setfcap }; -allow docker_t self:tun_socket relabelto; -allow docker_t self:process { getattr signal_perms setrlimit setfscreate }; -allow docker_t self:fifo_file rw_fifo_file_perms; -allow docker_t self:unix_stream_socket create_stream_socket_perms; -allow docker_t self:tcp_socket create_stream_socket_perms; -allow docker_t self:udp_socket create_socket_perms; -allow docker_t self:capability2 block_suspend; - -manage_files_pattern(docker_t, docker_home_t, docker_home_t) -manage_dirs_pattern(docker_t, docker_home_t, docker_home_t) -manage_lnk_files_pattern(docker_t, docker_home_t, docker_home_t) -userdom_admin_home_dir_filetrans(docker_t, docker_home_t, dir, ".docker") - -manage_dirs_pattern(docker_t, docker_config_t, docker_config_t) -manage_files_pattern(docker_t, docker_config_t, docker_config_t) -files_etc_filetrans(docker_t, docker_config_t, dir, "docker") - -manage_dirs_pattern(docker_t, docker_lock_t, docker_lock_t) -manage_files_pattern(docker_t, docker_lock_t, docker_lock_t) - -manage_dirs_pattern(docker_t, docker_log_t, docker_log_t) -manage_files_pattern(docker_t, docker_log_t, docker_log_t) -manage_lnk_files_pattern(docker_t, docker_log_t, docker_log_t) -logging_log_filetrans(docker_t, docker_log_t, { dir file lnk_file }) -allow docker_t docker_log_t:dir_file_class_set { relabelfrom relabelto }; - -manage_dirs_pattern(docker_t, docker_tmp_t, docker_tmp_t) -manage_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) -manage_lnk_files_pattern(docker_t, docker_tmp_t, docker_tmp_t) -files_tmp_filetrans(docker_t, docker_tmp_t, { dir file lnk_file }) - -manage_dirs_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_lnk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_fifo_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_chr_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -manage_blk_files_pattern(docker_t, docker_tmpfs_t, docker_tmpfs_t) -allow docker_t docker_tmpfs_t:dir relabelfrom; -can_exec(docker_t, docker_tmpfs_t) -fs_tmpfs_filetrans(docker_t, docker_tmpfs_t, { dir file }) -allow docker_t docker_tmpfs_t:chr_file mounton; - -manage_dirs_pattern(docker_t, docker_share_t, docker_share_t) -manage_files_pattern(docker_t, docker_share_t, docker_share_t) -manage_lnk_files_pattern(docker_t, docker_share_t, docker_share_t) -allow docker_t docker_share_t:dir_file_class_set { relabelfrom relabelto }; - -can_exec(docker_t, docker_share_t) -#docker_filetrans_named_content(docker_t) - -manage_dirs_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_chr_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_blk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -manage_lnk_files_pattern(docker_t, docker_var_lib_t, docker_var_lib_t) -allow docker_t docker_var_lib_t:dir_file_class_set { relabelfrom relabelto }; -files_var_lib_filetrans(docker_t, docker_var_lib_t, { dir file lnk_file }) - -manage_dirs_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_sock_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -manage_lnk_files_pattern(docker_t, docker_var_run_t, docker_var_run_t) -files_pid_filetrans(docker_t, docker_var_run_t, { dir file lnk_file sock_file }) - -allow docker_t docker_devpts_t:chr_file { relabelfrom rw_chr_file_perms setattr_chr_file_perms }; -term_create_pty(docker_t, docker_devpts_t) - -kernel_read_system_state(docker_t) -kernel_read_network_state(docker_t) -kernel_read_all_sysctls(docker_t) -kernel_rw_net_sysctls(docker_t) -kernel_setsched(docker_t) -kernel_read_all_proc(docker_t) - -domain_use_interactive_fds(docker_t) -domain_dontaudit_read_all_domains_state(docker_t) - -corecmd_exec_bin(docker_t) -corecmd_exec_shell(docker_t) - -corenet_tcp_bind_generic_node(docker_t) -corenet_tcp_sendrecv_generic_if(docker_t) -corenet_tcp_sendrecv_generic_node(docker_t) -corenet_tcp_sendrecv_generic_port(docker_t) -corenet_tcp_bind_all_ports(docker_t) -corenet_tcp_connect_http_port(docker_t) -corenet_tcp_connect_commplex_main_port(docker_t) -corenet_udp_sendrecv_generic_if(docker_t) -corenet_udp_sendrecv_generic_node(docker_t) -corenet_udp_sendrecv_all_ports(docker_t) -corenet_udp_bind_generic_node(docker_t) -corenet_udp_bind_all_ports(docker_t) - -files_read_config_files(docker_t) -files_dontaudit_getattr_all_dirs(docker_t) -files_dontaudit_getattr_all_files(docker_t) - -fs_read_cgroup_files(docker_t) -fs_read_tmpfs_symlinks(docker_t) -fs_search_all(docker_t) -fs_getattr_all_fs(docker_t) - -storage_raw_rw_fixed_disk(docker_t) - -auth_use_nsswitch(docker_t) -auth_dontaudit_getattr_shadow(docker_t) - -init_read_state(docker_t) -init_status(docker_t) - -logging_send_audit_msgs(docker_t) -logging_send_syslog_msg(docker_t) - -miscfiles_read_localization(docker_t) - -mount_domtrans(docker_t) - -seutil_read_default_contexts(docker_t) -seutil_read_config(docker_t) - -sysnet_dns_name_resolve(docker_t) -sysnet_exec_ifconfig(docker_t) - -optional_policy(` - rpm_exec(docker_t) - rpm_read_db(docker_t) - rpm_exec(docker_t) -') - -optional_policy(` - fstools_domtrans(docker_t) -') - -optional_policy(` - iptables_domtrans(docker_t) -') - -optional_policy(` - openvswitch_stream_connect(docker_t) -') - -allow docker_t self:capability { dac_override setgid setpcap setuid sys_admin sys_boot sys_chroot sys_ptrace }; - -allow docker_t self:process { getcap setcap setexec setpgid setsched signal_perms }; - -allow docker_t self:netlink_route_socket rw_netlink_socket_perms;; -allow docker_t self:netlink_audit_socket create_netlink_socket_perms; -allow docker_t self:unix_dgram_socket { create_socket_perms sendto }; -allow docker_t self:unix_stream_socket { create_stream_socket_perms connectto }; - -allow docker_t docker_var_lib_t:dir mounton; -allow docker_t docker_var_lib_t:chr_file mounton; -can_exec(docker_t, docker_var_lib_t) - -kernel_dontaudit_setsched(docker_t) -kernel_get_sysvipc_info(docker_t) -kernel_request_load_module(docker_t) -kernel_mounton_messages(docker_t) -kernel_mounton_all_proc(docker_t) -kernel_mounton_all_sysctls(docker_t) -kernel_unlabeled_entry_type(spc_t) -kernel_unlabeled_domtrans(docker_t, spc_t) - -dev_getattr_all(docker_t) -dev_getattr_sysfs_fs(docker_t) -dev_read_urand(docker_t) -dev_read_lvm_control(docker_t) -dev_rw_sysfs(docker_t) -dev_rw_loop_control(docker_t) -dev_rw_lvm_control(docker_t) - -files_getattr_isid_type_dirs(docker_t) -files_manage_isid_type_dirs(docker_t) -files_manage_isid_type_files(docker_t) -files_manage_isid_type_symlinks(docker_t) -files_manage_isid_type_chr_files(docker_t) -files_manage_isid_type_blk_files(docker_t) -files_exec_isid_files(docker_t) -files_mounton_isid(docker_t) -files_mounton_non_security(docker_t) -files_mounton_isid_type_chr_file(docker_t) - -fs_mount_all_fs(docker_t) -fs_unmount_all_fs(docker_t) -fs_remount_all_fs(docker_t) -files_mounton_isid(docker_t) -fs_manage_cgroup_dirs(docker_t) -fs_manage_cgroup_files(docker_t) -fs_relabelfrom_xattr_fs(docker_t) -fs_relabelfrom_tmpfs(docker_t) -fs_read_tmpfs_symlinks(docker_t) -fs_list_hugetlbfs(docker_t) - -term_use_generic_ptys(docker_t) -term_use_ptmx(docker_t) -term_getattr_pty_fs(docker_t) -term_relabel_pty_fs(docker_t) -term_mounton_unallocated_ttys(docker_t) - -modutils_domtrans_insmod(docker_t) - -systemd_status_all_unit_files(docker_t) -systemd_start_systemd_services(docker_t) - -userdom_stream_connect(docker_t) -userdom_search_user_home_content(docker_t) -userdom_read_all_users_state(docker_t) -userdom_relabel_user_home_files(docker_t) -userdom_relabel_user_tmp_files(docker_t) -userdom_relabel_user_tmp_dirs(docker_t) - -optional_policy(` - gpm_getattr_gpmctl(docker_t) -') - -optional_policy(` - dbus_system_bus_client(docker_t) - init_dbus_chat(docker_t) - init_start_transient_unit(docker_t) - - optional_policy(` - systemd_dbus_chat_logind(docker_t) - ') - - optional_policy(` - firewalld_dbus_chat(docker_t) - ') -') - -optional_policy(` - udev_read_db(docker_t) -') - -optional_policy(` - virt_read_config(docker_t) - virt_exec(docker_t) - virt_stream_connect(docker_t) - virt_stream_connect_sandbox(docker_t) - virt_exec_sandbox_files(docker_t) - virt_manage_sandbox_files(docker_t) - virt_relabel_sandbox_filesystem(docker_t) - virt_transition_svirt_sandbox(docker_t, system_r) - virt_mounton_sandbox_file(docker_t) -# virt_attach_sandbox_tun_iface(docker_t) - allow docker_t svirt_sandbox_domain:tun_socket relabelfrom; -') - -tunable_policy(`docker_connect_any',` - corenet_tcp_connect_all_ports(docker_t) - corenet_sendrecv_all_packets(docker_t) - corenet_tcp_sendrecv_all_ports(docker_t) -') - -######################################## -# -# spc local policy -# -domain_entry_file(spc_t, docker_share_t) -domain_entry_file(spc_t, docker_var_lib_t) -role system_r types spc_t; - -domain_entry_file(spc_t, docker_share_t) -domain_entry_file(spc_t, docker_var_lib_t) -domtrans_pattern(docker_t, docker_share_t, spc_t) -domtrans_pattern(docker_t, docker_var_lib_t, spc_t) -allow docker_t spc_t:process { setsched signal_perms }; -ps_process_pattern(docker_t, spc_t) -allow docker_t spc_t:socket_class_set { relabelto relabelfrom }; - -optional_policy(` - dbus_chat_system_bus(spc_t) -') - -optional_policy(` - unconfined_domain_noaudit(spc_t) -') - -optional_policy(` - unconfined_domain(docker_t) -') - -optional_policy(` - virt_transition_svirt_sandbox(spc_t, system_r) -') - -######################################## -# -# docker upstream policy -# - -optional_policy(` -# domain_stub_named_filetrans_domain() - gen_require(` - attribute named_filetrans_domain; - ') - - docker_filetrans_named_content(named_filetrans_domain) -') - -optional_policy(` - lvm_stub() - docker_rw_sem(lvm_t) -') - -optional_policy(` - staff_stub() - docker_stream_connect(staff_t) - docker_exec(staff_t) -') - -optional_policy(` - virt_stub_svirt_sandbox_domain() - virt_stub_svirt_sandbox_file() - allow svirt_sandbox_domain self:netlink_kobject_uevent_socket create_socket_perms; - docker_read_share_files(svirt_sandbox_domain) - docker_lib_filetrans(svirt_sandbox_domain,svirt_sandbox_file_t, sock_file) - docker_use_ptys(svirt_sandbox_domain) - docker_spc_stream_connect(svirt_sandbox_domain) - fs_list_tmpfs(svirt_sandbox_domain) - fs_rw_hugetlbfs_files(svirt_sandbox_domain) - fs_dontaudit_remount_tmpfs(svirt_sandbox_domain) - dev_dontaudit_mounton_sysfs(svirt_sandbox_domain) - - tunable_policy(`virt_sandbox_use_fusefs',` - fs_manage_fusefs_dirs(svirt_sandbox_domain) - fs_manage_fusefs_files(svirt_sandbox_domain) - fs_manage_fusefs_symlinks(svirt_sandbox_domain) - ') - gen_require(` - attribute domain; - ') - - dontaudit svirt_sandbox_domain domain:key {search link}; -') - -optional_policy(` - gen_require(` - type pcp_pmcd_t; - ') - docker_manage_lib_files(pcp_pmcd_t) -') diff --git a/contrib/selinux/docker-engine-selinux/docker_selinux.8.gz b/contrib/selinux/docker-engine-selinux/docker_selinux.8.gz deleted file mode 100644 index ab5d59445ac1601ca378aaa3e71fb9cff43a1592..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 2847 zcmV+)3*hu0iwFo7v)okz17vSwYh`j@b7gF4ZgqGrH~__3TaVke5`I4V6@*`!6l=S| zL4lsU6wa>G7)ZQ6Yo}<61q526ZDJ)-B`NQ^I6wZ(@S=+?ue_UwK6D$4GsAB#9L|h1 zT74p9kjmtNshEi^7cAB+)14KO+rU$c!fk;-5#O zmV?vz9JoRUq(p7=UrB&Q;!Mydm$39gew3ZrB;ilS8)GkXHjhLJ~Z zb`9~dA;BW%P_PmCCQFh~L6RLy9thu%13cK#JwqnV8WL401Q%PfK6v5y10~;YJ{0bIQB&IB4h8PX!L;;nhe>W6UN2*vY){HP=m;wW%^*n~)VL%-lM6=;wQLDcf$Tqah4DzZ&A-OQ5 zpk}9!d<;9LGN)V+s;qrrJQSwpk3}bnLm)E<+bWW&HyOZUN+Z8! zrYvwTu1*6Pt*!k*0iAMYb~43Bh141ajx6w1(;G*YMQ=Hqr`EP^4~)I(9~ggC#Eqs? zD{L+egeI(L1_4dCa15BrIqU}t4{6QdV-7S)QIVWJI5#x+uY;!+G9tCH0HBZ%Sxi)C z8$>lWY$jj8Y28@j&axe-pr4r%%d53zgn1bWHS|f79H5xC)H0jgI zs0uU)bj^^I3>RHe-bFS9yYM?pRYwLc4Vmq2q_6juX;@({ab1JGR{4 zfw)WDORWuVA|@%o>a>8w)TaS@706>x{ypfAMZF9;#_*bFSi{*fL({Pf9G43qVP2w% zIefPU=Gn9DQa}6`GQB;xg;6xIw?0G;TbJ9dJ-0w6?P0F2$a6Y?)YuAX#LrY*3cta9 znbBSK62e9L9P1w1f-7Y@QM`a6_IzSR@_3V?)m{U-#s5;+q3R`&mIcd5CTWU?x6D`% zV8;+6L+lw|cO#sY_EKF!`4838h8Nl%`!hOJ>#s0)&D)<2@%Y(r-jGtktqviMNwER^ z48UzB*EEZ@e$}nh;O;eIRpUakf#QQ=iR`XhC_rrG0lrx?CC@<(%RcL-uQ2I}h+fpL z9caOv&z5Hp3f=+ka%(o(UvEx4okAy2e(WgrYB{7zbvTC@2yGVCyZlv@2@b z=9Ay1H{|2&^EC99RZZMk!DF%LI|5gCWOU6CMOBv8JxJALYABUav}-9d4&F+u4l=Z! zt$tIpHaGSo&AtLQ{yMwy<-K68`LOBhW^!G%4q$9F$y%XFlC6?ufnD{##t<;$jUKy4 zmY|~YRRVg>(K3^a{nIz&(T{I`?WEsR6=!_ySm4JPevFGmrwyKZ;Z$C|CJP8Jt~=KX zH>2s6DdEf(n*uUl@{rz-3Z5RXd1H00sjUle)ye1-ZW8H-mPzWaX2Z92 z2)V}{CiL_>nKMVNq%`CEQ5d3}lA>0PK!ac7>?t`f8d{Df`Sy8gn~~aa>{ftd?6kTc zF|j|25>LYg?~WqBj^i1)>7ay0aXYDvzLZeVoOJ<)sByEhPdb$d6PF5P+?nTA#c=PA@scfsdyQ}Y5_8NS&t1%dCZ80_lCjU~9q zjg5~^n4io*sRMUjw&e-&PXRHliX zY20}XrJnZnS;I@=y@9J_Lt)mmQkSDND_Fue2MBz)TLn7JCJNW?r(tTxn%2Mxv7ZB5 zT8-6m%Jsu2I&X4wlF-Qy)}Z;JzP3%3&VP8`3&%{68=F@ZwA>hn8)wGbGNbs`rvOw+Ii6L#f+~(-Xebl9tVa4Q=RZL4J-Fm?7Us&zIL%JG!WlDgL{mh6HshYGQfieib=4EQndS1#f<%XMS1_3R~RknrMThpB*A zCYMWHDccRA1lxy7yBA1<_@xnpti)d@-AL;Gr58s<`kYA`A6_^qr^Q>}F{(R=iy*ms z_mz-vzy~*6=s#M}x=+}dR_%&(wP_tsZHrdF6ek~>)suhy9Ri#~Hp*p+-+449V#y8* z2Vd{B>(20^n+gC1%*l?5Ejz8!n$?sqc{{6|sNQ9TW$Yu)$1I|Q6&gyDs=UJFgs-`Q z0ehv#<~$8HY8O8d4mOJ-J2c8J|4Myuef#ALRG`bj8C+l}nrYeoSfF~{9fp7{rG2HY zM<;c3{cS*>;P9>+Vg|o4pzX0HSg7$y!pS!7-9zUVZUj6|-4GUXvo>%SjTOt~zIt=- z-(4J4q<(_iha62Dz7?pde3zq!?w%O>PqiXYgOcCA&On092;Ebjh1w>3&(N6b`qqva z_kj(bC9a^$msVm=VkX>UOUv6-Ye@!LXc8$>j6$ xb`W`pZ+>}u< - - - - - - ADD - ARG - CMD - COPY - ENTRYPOINT - ENV - EXPOSE - FROM - HEALTHCHECK - LABEL - MAINTAINER - ONBUILD - RUN - SHELL - STOPSIGNAL - USER - VOLUME - WORKDIR - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/contrib/syntax/nano/Dockerfile.nanorc b/contrib/syntax/nano/Dockerfile.nanorc deleted file mode 100644 index 8b63dae945..0000000000 --- a/contrib/syntax/nano/Dockerfile.nanorc +++ /dev/null @@ -1,26 +0,0 @@ -## Syntax highlighting for Dockerfiles -syntax "Dockerfile" "Dockerfile[^/]*$" - -## Keywords -icolor red "^(ONBUILD\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)[[:space:]]" - -## Brackets & parenthesis -color brightgreen "(\(|\)|\[|\])" - -## Double ampersand -color brightmagenta "&&" - -## Comments -icolor cyan "^[[:space:]]*#.*$" - -## Blank space at EOL -color ,green "[[:space:]]+$" - -## Strings, single-quoted -color brightwhite "'([^']|(\\'))*'" "%[qw]\{[^}]*\}" "%[qw]\([^)]*\)" "%[qw]<[^>]*>" "%[qw]\[[^]]*\]" "%[qw]\$[^$]*\$" "%[qw]\^[^^]*\^" "%[qw]![^!]*!" - -## Strings, double-quoted -color brightwhite ""([^"]|(\\"))*"" "%[QW]?\{[^}]*\}" "%[QW]?\([^)]*\)" "%[QW]?<[^>]*>" "%[QW]?\[[^]]*\]" "%[QW]?\$[^$]*\$" "%[QW]?\^[^^]*\^" "%[QW]?![^!]*!" - -## Single and double quotes -color brightyellow "('|\")" diff --git a/contrib/syntax/nano/README.md b/contrib/syntax/nano/README.md deleted file mode 100644 index 5985208b09..0000000000 --- a/contrib/syntax/nano/README.md +++ /dev/null @@ -1,32 +0,0 @@ -Dockerfile.nanorc -================= - -Dockerfile syntax highlighting for nano - -Single User Installation ------------------------- -1. Create a nano syntax directory in your home directory: - * `mkdir -p ~/.nano/syntax` - -2. Copy `Dockerfile.nanorc` to` ~/.nano/syntax/` - * `cp Dockerfile.nanorc ~/.nano/syntax/` - -3. Add the following to your `~/.nanorc` to tell nano where to find the `Dockerfile.nanorc` file - ``` -## Dockerfile files -include "~/.nano/syntax/Dockerfile.nanorc" - ``` - -System Wide Installation ------------------------- -1. Create a nano syntax directory: - * `mkdir /usr/local/share/nano` - -2. Copy `Dockerfile.nanorc` to `/usr/local/share/nano` - * `cp Dockerfile.nanorc /usr/local/share/nano/` - -3. Add the following to your `/etc/nanorc`: - ``` -## Dockerfile files -include "/usr/local/share/nano/Dockerfile.nanorc" - ``` diff --git a/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences b/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences deleted file mode 100644 index 20f0d04ca8..0000000000 --- a/contrib/syntax/textmate/Docker.tmbundle/Preferences/Dockerfile.tmPreferences +++ /dev/null @@ -1,24 +0,0 @@ - - - - - name - Comments - scope - source.dockerfile - settings - - shellVariables - - - name - TM_COMMENT_START - value - # - - - - uuid - 2B215AC0-A7F3-4090-9FF6-F4842BD56CA7 - - diff --git a/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage b/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage deleted file mode 100644 index 948a9bfc20..0000000000 --- a/contrib/syntax/textmate/Docker.tmbundle/Syntaxes/Dockerfile.tmLanguage +++ /dev/null @@ -1,143 +0,0 @@ - - - - - fileTypes - - Dockerfile - - name - Dockerfile - patterns - - - captures - - 1 - - name - keyword.control.dockerfile - - 2 - - name - keyword.other.special-method.dockerfile - - - match - ^\s*(?:(ONBUILD)\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)\s - - - captures - - 1 - - name - keyword.operator.dockerfile - - 2 - - name - keyword.other.special-method.dockerfile - - - match - ^\s*(?:(ONBUILD)\s+)?(CMD|ENTRYPOINT)\s - - - begin - " - beginCaptures - - 1 - - name - punctuation.definition.string.begin.dockerfile - - - end - " - endCaptures - - 1 - - name - punctuation.definition.string.end.dockerfile - - - name - string.quoted.double.dockerfile - patterns - - - match - \\. - name - constant.character.escaped.dockerfile - - - - - begin - ' - beginCaptures - - 1 - - name - punctuation.definition.string.begin.dockerfile - - - end - ' - endCaptures - - 1 - - name - punctuation.definition.string.end.dockerfile - - - name - string.quoted.single.dockerfile - patterns - - - match - \\. - name - constant.character.escaped.dockerfile - - - - - captures - - 1 - - name - punctuation.whitespace.comment.leading.dockerfile - - 2 - - name - comment.line.number-sign.dockerfile - - 3 - - name - punctuation.definition.comment.dockerfile - - - comment - comment.line - match - ^(\s*)((#).*$\n?) - - - scopeName - source.dockerfile - uuid - a39d8795-59d2-49af-aa00-fe74ee29576e - - diff --git a/contrib/syntax/textmate/Docker.tmbundle/info.plist b/contrib/syntax/textmate/Docker.tmbundle/info.plist deleted file mode 100644 index 239f4b0a9b..0000000000 --- a/contrib/syntax/textmate/Docker.tmbundle/info.plist +++ /dev/null @@ -1,16 +0,0 @@ - - - - - contactEmailRot13 - germ@andz.com.ar - contactName - GermanDZ - description - Helpers for Docker. - name - Docker - uuid - 8B9DDBAF-E65C-4E12-FFA7-467D4AA535B1 - - diff --git a/contrib/syntax/textmate/README.md b/contrib/syntax/textmate/README.md deleted file mode 100644 index ce611018e5..0000000000 --- a/contrib/syntax/textmate/README.md +++ /dev/null @@ -1,17 +0,0 @@ -# Docker.tmbundle - -Dockerfile syntax highlighting for TextMate and Sublime Text. - -## Install - -### Sublime Text - -Available for Sublime Text under [package control](https://sublime.wbond.net/packages/Dockerfile%20Syntax%20Highlighting). -Search for *Dockerfile Syntax Highlighting* - -### TextMate 2 - -You can install this bundle in TextMate by opening the preferences and going to the bundles tab. After installation it will be automatically updated for you. - -enjoy. - diff --git a/contrib/syntax/textmate/REVIEWERS b/contrib/syntax/textmate/REVIEWERS deleted file mode 100644 index 965743df64..0000000000 --- a/contrib/syntax/textmate/REVIEWERS +++ /dev/null @@ -1 +0,0 @@ -Asbjorn Enge (@asbjornenge) diff --git a/contrib/syntax/vim/LICENSE b/contrib/syntax/vim/LICENSE deleted file mode 100644 index e67cdabd22..0000000000 --- a/contrib/syntax/vim/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Copyright (c) 2013 Honza Pokorny -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. -2. Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/contrib/syntax/vim/README.md b/contrib/syntax/vim/README.md deleted file mode 100644 index 5aa9bd825d..0000000000 --- a/contrib/syntax/vim/README.md +++ /dev/null @@ -1,26 +0,0 @@ -dockerfile.vim -============== - -Syntax highlighting for Dockerfiles - -Installation ------------- -With [pathogen](https://github.com/tpope/vim-pathogen), the usual way... - -With [Vundle](https://github.com/gmarik/Vundle.vim) - - Plugin 'docker/docker' , {'rtp': '/contrib/syntax/vim/'} - -Features --------- - -The syntax highlighting includes: - -* The directives (e.g. `FROM`) -* Strings -* Comments - -License -------- - -BSD, short and sweet diff --git a/contrib/syntax/vim/doc/dockerfile.txt b/contrib/syntax/vim/doc/dockerfile.txt deleted file mode 100644 index e69e2b7b30..0000000000 --- a/contrib/syntax/vim/doc/dockerfile.txt +++ /dev/null @@ -1,18 +0,0 @@ -*dockerfile.txt* Syntax highlighting for Dockerfiles - -Author: Honza Pokorny -License: BSD - -INSTALLATION *installation* - -Drop it on your Pathogen path and you're all set. - -FEATURES *features* - -The syntax highlighting includes: - -* The directives (e.g. FROM) -* Strings -* Comments - - vim:tw=78:et:ft=help:norl: diff --git a/contrib/syntax/vim/ftdetect/dockerfile.vim b/contrib/syntax/vim/ftdetect/dockerfile.vim deleted file mode 100644 index ee10e5d6a0..0000000000 --- a/contrib/syntax/vim/ftdetect/dockerfile.vim +++ /dev/null @@ -1 +0,0 @@ -au BufNewFile,BufRead [Dd]ockerfile,Dockerfile.* set filetype=dockerfile diff --git a/contrib/syntax/vim/syntax/dockerfile.vim b/contrib/syntax/vim/syntax/dockerfile.vim deleted file mode 100644 index a067e6ad4c..0000000000 --- a/contrib/syntax/vim/syntax/dockerfile.vim +++ /dev/null @@ -1,31 +0,0 @@ -" dockerfile.vim - Syntax highlighting for Dockerfiles -" Maintainer: Honza Pokorny -" Version: 0.5 - - -if exists("b:current_syntax") - finish -endif - -let b:current_syntax = "dockerfile" - -syntax case ignore - -syntax match dockerfileKeyword /\v^\s*(ONBUILD\s+)?(ADD|ARG|CMD|COPY|ENTRYPOINT|ENV|EXPOSE|FROM|HEALTHCHECK|LABEL|MAINTAINER|RUN|SHELL|STOPSIGNAL|USER|VOLUME|WORKDIR)\s/ -highlight link dockerfileKeyword Keyword - -syntax region dockerfileString start=/\v"/ skip=/\v\\./ end=/\v"/ -highlight link dockerfileString String - -syntax match dockerfileComment "\v^\s*#.*$" -highlight link dockerfileComment Comment - -set commentstring=#\ %s - -" match "RUN", "CMD", and "ENTRYPOINT" lines, and parse them as shell -let s:current_syntax = b:current_syntax -unlet b:current_syntax -syntax include @SH syntax/sh.vim -let b:current_syntax = s:current_syntax -syntax region shLine matchgroup=dockerfileKeyword start=/\v^\s*(RUN|CMD|ENTRYPOINT)\s/ end=/\v$/ contains=@SH -" since @SH will handle "\" as part of the same line automatically, this "just works" for line continuation too, but with the caveat that it will highlight "RUN echo '" followed by a newline as if it were a block because the "'" is shell line continuation... not sure how to fix that just yet (TODO) diff --git a/contrib/syscall-test/Dockerfile b/contrib/syscall-test/Dockerfile deleted file mode 100644 index 8cd6bebf3d..0000000000 --- a/contrib/syscall-test/Dockerfile +++ /dev/null @@ -1,9 +0,0 @@ -FROM buildpack-deps:jessie - -COPY . /usr/src/ - -WORKDIR /usr/src/ - -RUN gcc -g -Wall -static userns.c -o /usr/bin/userns-test \ - && gcc -g -Wall -static ns.c -o /usr/bin/ns-test \ - && gcc -g -Wall -static acct.c -o /usr/bin/acct-test diff --git a/contrib/syscall-test/acct.c b/contrib/syscall-test/acct.c deleted file mode 100644 index 88ac287966..0000000000 --- a/contrib/syscall-test/acct.c +++ /dev/null @@ -1,16 +0,0 @@ -#define _GNU_SOURCE -#include -#include -#include -#include -#include - -int main(int argc, char **argv) -{ - int err = acct("/tmp/t"); - if (err == -1) { - fprintf(stderr, "acct failed: %s\n", strerror(errno)); - exit(EXIT_FAILURE); - } - exit(EXIT_SUCCESS); -} diff --git a/contrib/syscall-test/ns.c b/contrib/syscall-test/ns.c deleted file mode 100644 index 33684e1c3d..0000000000 --- a/contrib/syscall-test/ns.c +++ /dev/null @@ -1,63 +0,0 @@ -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ - -struct clone_args { - char **argv; -}; - -// child_exec is the func that will be executed as the result of clone -static int child_exec(void *stuff) -{ - struct clone_args *args = (struct clone_args *)stuff; - if (execvp(args->argv[0], args->argv) != 0) { - fprintf(stderr, "failed to execvp argments %s\n", - strerror(errno)); - exit(-1); - } - // we should never reach here! - exit(EXIT_FAILURE); -} - -int main(int argc, char **argv) -{ - struct clone_args args; - args.argv = &argv[1]; - - int clone_flags = CLONE_NEWNS | CLONE_NEWPID | SIGCHLD; - - // allocate stack for child - char *stack; /* Start of stack buffer */ - char *child_stack; /* End of stack buffer */ - stack = - mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); - if (stack == MAP_FAILED) { - fprintf(stderr, "mmap failed: %s\n", strerror(errno)); - exit(EXIT_FAILURE); - } - child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ - - // the result of this call is that our child_exec will be run in another - // process returning its pid - pid_t pid = clone(child_exec, child_stack, clone_flags, &args); - if (pid < 0) { - fprintf(stderr, "clone failed: %s\n", strerror(errno)); - exit(EXIT_FAILURE); - } - // lets wait on our child process here before we, the parent, exits - if (waitpid(pid, NULL, 0) == -1) { - fprintf(stderr, "failed to wait pid %d\n", pid); - exit(EXIT_FAILURE); - } - exit(EXIT_SUCCESS); -} diff --git a/contrib/syscall-test/userns.c b/contrib/syscall-test/userns.c deleted file mode 100644 index 2af36f4228..0000000000 --- a/contrib/syscall-test/userns.c +++ /dev/null @@ -1,63 +0,0 @@ -#define _GNU_SOURCE -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define STACK_SIZE (1024 * 1024) /* Stack size for cloned child */ - -struct clone_args { - char **argv; -}; - -// child_exec is the func that will be executed as the result of clone -static int child_exec(void *stuff) -{ - struct clone_args *args = (struct clone_args *)stuff; - if (execvp(args->argv[0], args->argv) != 0) { - fprintf(stderr, "failed to execvp argments %s\n", - strerror(errno)); - exit(-1); - } - // we should never reach here! - exit(EXIT_FAILURE); -} - -int main(int argc, char **argv) -{ - struct clone_args args; - args.argv = &argv[1]; - - int clone_flags = CLONE_NEWUSER | SIGCHLD; - - // allocate stack for child - char *stack; /* Start of stack buffer */ - char *child_stack; /* End of stack buffer */ - stack = - mmap(NULL, STACK_SIZE, PROT_READ | PROT_WRITE, - MAP_SHARED | MAP_ANON | MAP_STACK, -1, 0); - if (stack == MAP_FAILED) { - fprintf(stderr, "mmap failed: %s\n", strerror(errno)); - exit(EXIT_FAILURE); - } - child_stack = stack + STACK_SIZE; /* Assume stack grows downward */ - - // the result of this call is that our child_exec will be run in another - // process returning its pid - pid_t pid = clone(child_exec, child_stack, clone_flags, &args); - if (pid < 0) { - fprintf(stderr, "clone failed: %s\n", strerror(errno)); - exit(EXIT_FAILURE); - } - // lets wait on our child process here before we, the parent, exits - if (waitpid(pid, NULL, 0) == -1) { - fprintf(stderr, "failed to wait pid %d\n", pid); - exit(EXIT_FAILURE); - } - exit(EXIT_SUCCESS); -} diff --git a/contrib/udev/80-docker.rules b/contrib/udev/80-docker.rules deleted file mode 100644 index f934c01757..0000000000 --- a/contrib/udev/80-docker.rules +++ /dev/null @@ -1,3 +0,0 @@ -# hide docker's loopback devices from udisks, and thus from user desktops -SUBSYSTEM=="block", ENV{DM_NAME}=="docker-*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" -SUBSYSTEM=="block", DEVPATH=="/devices/virtual/block/loop*", ATTR{loop/backing_file}=="/var/lib/docker/*", ENV{UDISKS_PRESENTATION_HIDE}="1", ENV{UDISKS_IGNORE}="1" diff --git a/contrib/vagrant-docker/README.md b/contrib/vagrant-docker/README.md deleted file mode 100644 index 360bfad34a..0000000000 --- a/contrib/vagrant-docker/README.md +++ /dev/null @@ -1,50 +0,0 @@ -# Vagrant integration - -Currently there are at least 4 different projects that we are aware of that deals -with integration with [Vagrant](http://vagrantup.com/) at different levels. One -approach is to use Docker as a [provisioner](http://docs.vagrantup.com/v2/provisioning/index.html) -which means you can create containers and pull base images on VMs using Docker's -CLI and the other is to use Docker as a [provider](http://docs.vagrantup.com/v2/providers/index.html), -meaning you can use Vagrant to control Docker containers. - - -### Provisioners - -* [Vocker](https://github.com/fgrehm/vocker) -* [Ventriloquist](https://github.com/fgrehm/ventriloquist) - -### Providers - -* [docker-provider](https://github.com/fgrehm/docker-provider) -* [vagrant-shell](https://github.com/destructuring/vagrant-shell) - -## Setting up Vagrant-docker with the Remote API - -The initial Docker upstart script will not work because it runs on `127.0.0.1`, which is not accessible to the host machine. Instead, we need to change the script to connect to `0.0.0.0`. To do this, modify `/etc/init/docker.conf` to look like this: - -``` -description "Docker daemon" - -start on filesystem -stop on runlevel [!2345] - -respawn - -script - /usr/bin/docker daemon -H=tcp://0.0.0.0:2375 -end script -``` - -Once that's done, you need to set up a SSH tunnel between your host machine and the vagrant machine that's running Docker. This can be done by running the following command in a host terminal: - -``` -ssh -L 2375:localhost:2375 -p 2222 vagrant@localhost -``` - -(The first 2375 is what your host can connect to, the second 2375 is what port Docker is running on in the vagrant machine, and the 2222 is the port Vagrant is providing for SSH. If VirtualBox is the VM you're using, you can see what value "2222" should be by going to: Network > Adapter 1 > Advanced > Port Forwarding in the VirtualBox GUI.) - -Note that because the port has been changed, to run docker commands from within the command line you must run them like this: - -``` -sudo docker -H 0.0.0.0:2375 < commands for docker > -``` diff --git a/daemon/apparmor_default.go b/daemon/apparmor_default.go deleted file mode 100644 index e4065b4ad9..0000000000 --- a/daemon/apparmor_default.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build linux - -package daemon - -import ( - "github.com/Sirupsen/logrus" - aaprofile "github.com/docker/docker/profiles/apparmor" - "github.com/opencontainers/runc/libcontainer/apparmor" -) - -// Define constants for native driver -const ( - defaultApparmorProfile = "docker-default" -) - -func installDefaultAppArmorProfile() { - if apparmor.IsEnabled() { - if err := aaprofile.InstallDefault(defaultApparmorProfile); err != nil { - apparmorProfiles := []string{defaultApparmorProfile} - - // Allow daemon to run if loading failed, but are active - // (possibly through another run, manually, or via system startup) - for _, policy := range apparmorProfiles { - if err := aaprofile.IsLoaded(policy); err != nil { - logrus.Errorf("AppArmor enabled on system but the %s profile could not be loaded.", policy) - } - } - } - } -} diff --git a/daemon/apparmor_default_unsupported.go b/daemon/apparmor_default_unsupported.go deleted file mode 100644 index f186a68af9..0000000000 --- a/daemon/apparmor_default_unsupported.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !linux - -package daemon - -func installDefaultAppArmorProfile() { -} diff --git a/daemon/archive.go b/daemon/archive.go deleted file mode 100644 index b2221560a3..0000000000 --- a/daemon/archive.go +++ /dev/null @@ -1,436 +0,0 @@ -package daemon - -import ( - "errors" - "io" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/builder" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/system" - "github.com/docker/engine-api/types" -) - -// ErrExtractPointNotDirectory is used to convey that the operation to extract -// a tar archive to a directory in a container has failed because the specified -// path does not refer to a directory. -var ErrExtractPointNotDirectory = errors.New("extraction point is not a directory") - -// ContainerCopy performs a deprecated operation of archiving the resource at -// the specified path in the container identified by the given name. -func (daemon *Daemon) ContainerCopy(name string, res string) (io.ReadCloser, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - if res[0] == '/' || res[0] == '\\' { - res = res[1:] - } - - return daemon.containerCopy(container, res) -} - -// ContainerStatPath stats the filesystem resource at the specified path in the -// container identified by the given name. -func (daemon *Daemon) ContainerStatPath(name string, path string) (stat *types.ContainerPathStat, err error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - return daemon.containerStatPath(container, path) -} - -// ContainerArchivePath creates an archive of the filesystem resource at the -// specified path in the container identified by the given name. Returns a -// tar archive of the resource and whether it was a directory or a single file. -func (daemon *Daemon) ContainerArchivePath(name string, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, nil, err - } - - return daemon.containerArchivePath(container, path) -} - -// ContainerExtractToDir extracts the given archive to the specified location -// in the filesystem of the container identified by the given name. The given -// path must be of a directory in the container. If it is not, the error will -// be ErrExtractPointNotDirectory. If noOverwriteDirNonDir is true then it will -// be an error if unpacking the given content would cause an existing directory -// to be replaced with a non-directory and vice versa. -func (daemon *Daemon) ContainerExtractToDir(name, path string, noOverwriteDirNonDir bool, content io.Reader) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - return daemon.containerExtractToDir(container, path, noOverwriteDirNonDir, content) -} - -// containerStatPath stats the filesystem resource at the specified path in this -// container. Returns stat info about the resource. -func (daemon *Daemon) containerStatPath(container *container.Container, path string) (stat *types.ContainerPathStat, err error) { - container.Lock() - defer container.Unlock() - - if err = daemon.Mount(container); err != nil { - return nil, err - } - defer daemon.Unmount(container) - - err = daemon.mountVolumes(container) - defer container.UnmountVolumes(true, daemon.LogVolumeEvent) - if err != nil { - return nil, err - } - - resolvedPath, absPath, err := container.ResolvePath(path) - if err != nil { - return nil, err - } - - return container.StatPath(resolvedPath, absPath) -} - -// containerArchivePath creates an archive of the filesystem resource at the specified -// path in this container. Returns a tar archive of the resource and stat info -// about the resource. -func (daemon *Daemon) containerArchivePath(container *container.Container, path string) (content io.ReadCloser, stat *types.ContainerPathStat, err error) { - container.Lock() - - defer func() { - if err != nil { - // Wait to unlock the container until the archive is fully read - // (see the ReadCloseWrapper func below) or if there is an error - // before that occurs. - container.Unlock() - } - }() - - if err = daemon.Mount(container); err != nil { - return nil, nil, err - } - - defer func() { - if err != nil { - // unmount any volumes - container.UnmountVolumes(true, daemon.LogVolumeEvent) - // unmount the container's rootfs - daemon.Unmount(container) - } - }() - - if err = daemon.mountVolumes(container); err != nil { - return nil, nil, err - } - - resolvedPath, absPath, err := container.ResolvePath(path) - if err != nil { - return nil, nil, err - } - - stat, err = container.StatPath(resolvedPath, absPath) - if err != nil { - return nil, nil, err - } - - // We need to rebase the archive entries if the last element of the - // resolved path was a symlink that was evaluated and is now different - // than the requested path. For example, if the given path was "/foo/bar/", - // but it resolved to "/var/lib/docker/containers/{id}/foo/baz/", we want - // to ensure that the archive entries start with "bar" and not "baz". This - // also catches the case when the root directory of the container is - // requested: we want the archive entries to start with "/" and not the - // container ID. - data, err := archive.TarResourceRebase(resolvedPath, filepath.Base(absPath)) - if err != nil { - return nil, nil, err - } - - content = ioutils.NewReadCloserWrapper(data, func() error { - err := data.Close() - container.UnmountVolumes(true, daemon.LogVolumeEvent) - daemon.Unmount(container) - container.Unlock() - return err - }) - - daemon.LogContainerEvent(container, "archive-path") - - return content, stat, nil -} - -// containerExtractToDir extracts the given tar archive to the specified location in the -// filesystem of this container. The given path must be of a directory in the -// container. If it is not, the error will be ErrExtractPointNotDirectory. If -// noOverwriteDirNonDir is true then it will be an error if unpacking the -// given content would cause an existing directory to be replaced with a non- -// directory and vice versa. -func (daemon *Daemon) containerExtractToDir(container *container.Container, path string, noOverwriteDirNonDir bool, content io.Reader) (err error) { - container.Lock() - defer container.Unlock() - - if err = daemon.Mount(container); err != nil { - return err - } - defer daemon.Unmount(container) - - err = daemon.mountVolumes(container) - defer container.UnmountVolumes(true, daemon.LogVolumeEvent) - if err != nil { - return err - } - - // Check if a drive letter supplied, it must be the system drive. No-op except on Windows - path, err = system.CheckSystemDriveAndRemoveDriveLetter(path) - if err != nil { - return err - } - - // The destination path needs to be resolved to a host path, with all - // symbolic links followed in the scope of the container's rootfs. Note - // that we do not use `container.ResolvePath(path)` here because we need - // to also evaluate the last path element if it is a symlink. This is so - // that you can extract an archive to a symlink that points to a directory. - - // Consider the given path as an absolute path in the container. - absPath := archive.PreserveTrailingDotOrSeparator(filepath.Join(string(filepath.Separator), path), path) - - // This will evaluate the last path element if it is a symlink. - resolvedPath, err := container.GetResourcePath(absPath) - if err != nil { - return err - } - - stat, err := os.Lstat(resolvedPath) - if err != nil { - return err - } - - if !stat.IsDir() { - return ErrExtractPointNotDirectory - } - - // Need to check if the path is in a volume. If it is, it cannot be in a - // read-only volume. If it is not in a volume, the container cannot be - // configured with a read-only rootfs. - - // Use the resolved path relative to the container rootfs as the new - // absPath. This way we fully follow any symlinks in a volume that may - // lead back outside the volume. - // - // The Windows implementation of filepath.Rel in golang 1.4 does not - // support volume style file path semantics. On Windows when using the - // filter driver, we are guaranteed that the path will always be - // a volume file path. - var baseRel string - if strings.HasPrefix(resolvedPath, `\\?\Volume{`) { - if strings.HasPrefix(resolvedPath, container.BaseFS) { - baseRel = resolvedPath[len(container.BaseFS):] - if baseRel[:1] == `\` { - baseRel = baseRel[1:] - } - } - } else { - baseRel, err = filepath.Rel(container.BaseFS, resolvedPath) - } - if err != nil { - return err - } - // Make it an absolute path. - absPath = filepath.Join(string(filepath.Separator), baseRel) - - toVolume, err := checkIfPathIsInAVolume(container, absPath) - if err != nil { - return err - } - - if !toVolume && container.HostConfig.ReadonlyRootfs { - return ErrRootFSReadOnly - } - - uid, gid := daemon.GetRemappedUIDGID() - options := &archive.TarOptions{ - NoOverwriteDirNonDir: noOverwriteDirNonDir, - ChownOpts: &archive.TarChownOptions{ - UID: uid, GID: gid, // TODO: should all ownership be set to root (either real or remapped)? - }, - } - if err := chrootarchive.Untar(content, resolvedPath, options); err != nil { - return err - } - - daemon.LogContainerEvent(container, "extract-to-dir") - - return nil -} - -func (daemon *Daemon) containerCopy(container *container.Container, resource string) (rc io.ReadCloser, err error) { - container.Lock() - - defer func() { - if err != nil { - // Wait to unlock the container until the archive is fully read - // (see the ReadCloseWrapper func below) or if there is an error - // before that occurs. - container.Unlock() - } - }() - - if err := daemon.Mount(container); err != nil { - return nil, err - } - - defer func() { - if err != nil { - // unmount any volumes - container.UnmountVolumes(true, daemon.LogVolumeEvent) - // unmount the container's rootfs - daemon.Unmount(container) - } - }() - - if err := daemon.mountVolumes(container); err != nil { - return nil, err - } - - basePath, err := container.GetResourcePath(resource) - if err != nil { - return nil, err - } - stat, err := os.Stat(basePath) - if err != nil { - return nil, err - } - var filter []string - if !stat.IsDir() { - d, f := filepath.Split(basePath) - basePath = d - filter = []string{f} - } else { - filter = []string{filepath.Base(basePath)} - basePath = filepath.Dir(basePath) - } - archive, err := archive.TarWithOptions(basePath, &archive.TarOptions{ - Compression: archive.Uncompressed, - IncludeFiles: filter, - }) - if err != nil { - return nil, err - } - - reader := ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - container.UnmountVolumes(true, daemon.LogVolumeEvent) - daemon.Unmount(container) - container.Unlock() - return err - }) - daemon.LogContainerEvent(container, "copy") - return reader, nil -} - -// CopyOnBuild copies/extracts a source FileInfo to a destination path inside a container -// specified by a container object. -// TODO: make sure callers don't unnecessarily convert destPath with filepath.FromSlash (Copy does it already). -// CopyOnBuild should take in abstract paths (with slashes) and the implementation should convert it to OS-specific paths. -func (daemon *Daemon) CopyOnBuild(cID string, destPath string, src builder.FileInfo, decompress bool) error { - srcPath := src.Path() - destExists := true - destDir := false - rootUID, rootGID := daemon.GetRemappedUIDGID() - - // Work in daemon-local OS specific file paths - destPath = filepath.FromSlash(destPath) - - c, err := daemon.GetContainer(cID) - if err != nil { - return err - } - err = daemon.Mount(c) - if err != nil { - return err - } - defer daemon.Unmount(c) - - dest, err := c.GetResourcePath(destPath) - if err != nil { - return err - } - - // Preserve the trailing slash - // TODO: why are we appending another path separator if there was already one? - if strings.HasSuffix(destPath, string(os.PathSeparator)) || destPath == "." { - destDir = true - dest += string(os.PathSeparator) - } - - destPath = dest - - destStat, err := os.Stat(destPath) - if err != nil { - if !os.IsNotExist(err) { - //logrus.Errorf("Error performing os.Stat on %s. %s", destPath, err) - return err - } - destExists = false - } - - uidMaps, gidMaps := daemon.GetUIDGIDMaps() - archiver := &archive.Archiver{ - Untar: chrootarchive.Untar, - UIDMaps: uidMaps, - GIDMaps: gidMaps, - } - - if src.IsDir() { - // copy as directory - if err := archiver.CopyWithTar(srcPath, destPath); err != nil { - return err - } - return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) - } - if decompress && archive.IsArchivePath(srcPath) { - // Only try to untar if it is a file and that we've been told to decompress (when ADD-ing a remote file) - - // First try to unpack the source as an archive - // to support the untar feature we need to clean up the path a little bit - // because tar is very forgiving. First we need to strip off the archive's - // filename from the path but this is only added if it does not end in slash - tarDest := destPath - if strings.HasSuffix(tarDest, string(os.PathSeparator)) { - tarDest = filepath.Dir(destPath) - } - - // try to successfully untar the orig - err := archiver.UntarPath(srcPath, tarDest) - /* - if err != nil { - logrus.Errorf("Couldn't untar to %s: %v", tarDest, err) - } - */ - return err - } - - // only needed for fixPermissions, but might as well put it before CopyFileWithTar - if destDir || (destExists && destStat.IsDir()) { - destPath = filepath.Join(destPath, src.Name()) - } - - if err := idtools.MkdirAllNewAs(filepath.Dir(destPath), 0755, rootUID, rootGID); err != nil { - return err - } - if err := archiver.CopyFileWithTar(srcPath, destPath); err != nil { - return err - } - - return fixPermissions(srcPath, destPath, rootUID, rootGID, destExists) -} diff --git a/daemon/archive_unix.go b/daemon/archive_unix.go deleted file mode 100644 index 47666fe5e8..0000000000 --- a/daemon/archive_unix.go +++ /dev/null @@ -1,58 +0,0 @@ -// +build !windows - -package daemon - -import ( - "os" - "path/filepath" - - "github.com/docker/docker/container" -) - -// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it -// cannot be in a read-only volume. If it is not in a volume, the container -// cannot be configured with a read-only rootfs. -func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { - var toVolume bool - for _, mnt := range container.MountPoints { - if toVolume = mnt.HasResource(absPath); toVolume { - if mnt.RW { - break - } - return false, ErrVolumeReadonly - } - } - return toVolume, nil -} - -func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { - // If the destination didn't already exist, or the destination isn't a - // directory, then we should Lchown the destination. Otherwise, we shouldn't - // Lchown the destination. - destStat, err := os.Stat(destination) - if err != nil { - // This should *never* be reached, because the destination must've already - // been created while untar-ing the context. - return err - } - doChownDestination := !destExisted || !destStat.IsDir() - - // We Walk on the source rather than on the destination because we don't - // want to change permissions on things we haven't created or modified. - return filepath.Walk(source, func(fullpath string, info os.FileInfo, err error) error { - // Do not alter the walk root iff. it existed before, as it doesn't fall under - // the domain of "things we should chown". - if !doChownDestination && (source == fullpath) { - return nil - } - - // Path is prefixed by source: substitute with destination instead. - cleaned, err := filepath.Rel(source, fullpath) - if err != nil { - return err - } - - fullpath = filepath.Join(destination, cleaned) - return os.Lchown(fullpath, uid, gid) - }) -} diff --git a/daemon/archive_windows.go b/daemon/archive_windows.go deleted file mode 100644 index b3a1045341..0000000000 --- a/daemon/archive_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -package daemon - -import "github.com/docker/docker/container" - -// checkIfPathIsInAVolume checks if the path is in a volume. If it is, it -// cannot be in a read-only volume. If it is not in a volume, the container -// cannot be configured with a read-only rootfs. -// -// This is a no-op on Windows which does not support read-only volumes, or -// extracting to a mount point inside a volume. TODO Windows: FIXME Post-TP5 -func checkIfPathIsInAVolume(container *container.Container, absPath string) (bool, error) { - return false, nil -} - -func fixPermissions(source, destination string, uid, gid int, destExisted bool) error { - // chown is not supported on Windows - return nil -} diff --git a/daemon/attach.go b/daemon/attach.go deleted file mode 100644 index 3d4a51eead..0000000000 --- a/daemon/attach.go +++ /dev/null @@ -1,147 +0,0 @@ -package daemon - -import ( - "fmt" - "io" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/errors" - "github.com/docker/docker/pkg/stdcopy" - "github.com/docker/docker/pkg/term" -) - -// ContainerAttach attaches to logs according to the config passed in. See ContainerAttachConfig. -func (daemon *Daemon) ContainerAttach(prefixOrName string, c *backend.ContainerAttachConfig) error { - keys := []byte{} - var err error - if c.DetachKeys != "" { - keys, err = term.ToBytes(c.DetachKeys) - if err != nil { - return fmt.Errorf("Invalid escape keys (%s) provided", c.DetachKeys) - } - } - - container, err := daemon.GetContainer(prefixOrName) - if err != nil { - return err - } - if container.IsPaused() { - err := fmt.Errorf("Container %s is paused. Unpause the container before attach", prefixOrName) - return errors.NewRequestConflictError(err) - } - - inStream, outStream, errStream, err := c.GetStreams() - if err != nil { - return err - } - defer inStream.Close() - - if !container.Config.Tty && c.MuxStreams { - errStream = stdcopy.NewStdWriter(errStream, stdcopy.Stderr) - outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) - } - - var stdin io.ReadCloser - var stdout, stderr io.Writer - - if c.UseStdin { - stdin = inStream - } - if c.UseStdout { - stdout = outStream - } - if c.UseStderr { - stderr = errStream - } - - if err := daemon.containerAttach(container, stdin, stdout, stderr, c.Logs, c.Stream, keys); err != nil { - fmt.Fprintf(outStream, "Error attaching: %s\n", err) - } - return nil -} - -// ContainerAttachRaw attaches the provided streams to the container's stdio -func (daemon *Daemon) ContainerAttachRaw(prefixOrName string, stdin io.ReadCloser, stdout, stderr io.Writer, stream bool) error { - container, err := daemon.GetContainer(prefixOrName) - if err != nil { - return err - } - return daemon.containerAttach(container, stdin, stdout, stderr, false, stream, nil) -} - -func (daemon *Daemon) containerAttach(c *container.Container, stdin io.ReadCloser, stdout, stderr io.Writer, logs, stream bool, keys []byte) error { - if logs { - logDriver, err := daemon.getLogger(c) - if err != nil { - return err - } - cLog, ok := logDriver.(logger.LogReader) - if !ok { - return logger.ErrReadLogsNotSupported - } - logs := cLog.ReadLogs(logger.ReadConfig{Tail: -1}) - - LogLoop: - for { - select { - case msg, ok := <-logs.Msg: - if !ok { - break LogLoop - } - if msg.Source == "stdout" && stdout != nil { - stdout.Write(msg.Line) - } - if msg.Source == "stderr" && stderr != nil { - stderr.Write(msg.Line) - } - case err := <-logs.Err: - logrus.Errorf("Error streaming logs: %v", err) - break LogLoop - } - } - } - - daemon.LogContainerEvent(c, "attach") - - //stream - if stream { - var stdinPipe io.ReadCloser - if stdin != nil { - r, w := io.Pipe() - go func() { - defer w.Close() - defer logrus.Debug("Closing buffered stdin pipe") - io.Copy(w, stdin) - }() - stdinPipe = r - } - - waitChan := make(chan struct{}) - if c.Config.StdinOnce && !c.Config.Tty { - go func() { - c.WaitStop(-1 * time.Second) - close(waitChan) - }() - } - - err := <-c.Attach(stdinPipe, stdout, stderr, keys) - if err != nil { - if _, ok := err.(container.DetachError); ok { - daemon.LogContainerEvent(c, "detach") - } else { - logrus.Errorf("attach failed with error: %v", err) - } - } - - // If we are in stdinonce mode, wait for the process to end - // otherwise, simply return - if c.Config.StdinOnce && !c.Config.Tty { - <-waitChan - } - } - return nil -} diff --git a/daemon/auth.go b/daemon/auth.go deleted file mode 100644 index a1400d88ae..0000000000 --- a/daemon/auth.go +++ /dev/null @@ -1,13 +0,0 @@ -package daemon - -import ( - "golang.org/x/net/context" - - "github.com/docker/docker/dockerversion" - "github.com/docker/engine-api/types" -) - -// AuthenticateToRegistry checks the validity of credentials in authConfig -func (daemon *Daemon) AuthenticateToRegistry(ctx context.Context, authConfig *types.AuthConfig) (string, string, error) { - return daemon.RegistryService.Auth(ctx, authConfig, dockerversion.DockerUserAgent(ctx)) -} diff --git a/daemon/caps/utils_unix.go b/daemon/caps/utils_unix.go deleted file mode 100644 index c99485f51d..0000000000 --- a/daemon/caps/utils_unix.go +++ /dev/null @@ -1,131 +0,0 @@ -// +build !windows - -package caps - -import ( - "fmt" - "strings" - - "github.com/docker/docker/pkg/stringutils" - "github.com/syndtr/gocapability/capability" -) - -var capabilityList Capabilities - -func init() { - last := capability.CAP_LAST_CAP - // hack for RHEL6 which has no /proc/sys/kernel/cap_last_cap - if last == capability.Cap(63) { - last = capability.CAP_BLOCK_SUSPEND - } - for _, cap := range capability.List() { - if cap > last { - continue - } - capabilityList = append(capabilityList, - &CapabilityMapping{ - Key: "CAP_" + strings.ToUpper(cap.String()), - Value: cap, - }, - ) - } -} - -type ( - // CapabilityMapping maps linux capability name to its value of capability.Cap type - // Capabilities is one of the security systems in Linux Security Module (LSM) - // framework provided by the kernel. - // For more details on capabilities, see http://man7.org/linux/man-pages/man7/capabilities.7.html - CapabilityMapping struct { - Key string `json:"key,omitempty"` - Value capability.Cap `json:"value,omitempty"` - } - // Capabilities contains all CapabilityMapping - Capabilities []*CapabilityMapping -) - -// String returns of CapabilityMapping -func (c *CapabilityMapping) String() string { - return c.Key -} - -// GetCapability returns CapabilityMapping which contains specific key -func GetCapability(key string) *CapabilityMapping { - for _, capp := range capabilityList { - if capp.Key == key { - cpy := *capp - return &cpy - } - } - return nil -} - -// GetAllCapabilities returns all of the capabilities -func GetAllCapabilities() []string { - output := make([]string, len(capabilityList)) - for i, capability := range capabilityList { - output[i] = capability.String() - } - return output -} - -// TweakCapabilities can tweak capabilities by adding or dropping capabilities -// based on the basics capabilities. -func TweakCapabilities(basics, adds, drops []string) ([]string, error) { - var ( - newCaps []string - allCaps = GetAllCapabilities() - ) - - // FIXME(tonistiigi): docker format is without CAP_ prefix, oci is with prefix - // Currently they are mixed in here. We should do conversion in one place. - - // look for invalid cap in the drop list - for _, cap := range drops { - if strings.ToLower(cap) == "all" { - continue - } - - if !stringutils.InSlice(allCaps, "CAP_"+cap) { - return nil, fmt.Errorf("Unknown capability drop: %q", cap) - } - } - - // handle --cap-add=all - if stringutils.InSlice(adds, "all") { - basics = allCaps - } - - if !stringutils.InSlice(drops, "all") { - for _, cap := range basics { - // skip `all` already handled above - if strings.ToLower(cap) == "all" { - continue - } - - // if we don't drop `all`, add back all the non-dropped caps - if !stringutils.InSlice(drops, cap[4:]) { - newCaps = append(newCaps, strings.ToUpper(cap)) - } - } - } - - for _, cap := range adds { - // skip `all` already handled above - if strings.ToLower(cap) == "all" { - continue - } - - cap = "CAP_" + cap - - if !stringutils.InSlice(allCaps, cap) { - return nil, fmt.Errorf("Unknown capability to add: %q", cap) - } - - // add cap if not already in the list - if !stringutils.InSlice(newCaps, cap) { - newCaps = append(newCaps, strings.ToUpper(cap)) - } - } - return newCaps, nil -} diff --git a/daemon/changes.go b/daemon/changes.go deleted file mode 100644 index 7a58763cd9..0000000000 --- a/daemon/changes.go +++ /dev/null @@ -1,15 +0,0 @@ -package daemon - -import "github.com/docker/docker/pkg/archive" - -// ContainerChanges returns a list of container fs changes -func (daemon *Daemon) ContainerChanges(name string) ([]archive.Change, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - container.Lock() - defer container.Unlock() - return container.RWLayer.Changes() -} diff --git a/daemon/cluster/cluster.go b/daemon/cluster/cluster.go deleted file mode 100644 index 04a65f01f4..0000000000 --- a/daemon/cluster/cluster.go +++ /dev/null @@ -1,1419 +0,0 @@ -package cluster - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net" - "os" - "path/filepath" - "strings" - "sync" - "time" - - "google.golang.org/grpc" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/cluster/convert" - executorpkg "github.com/docker/docker/daemon/cluster/executor" - "github.com/docker/docker/daemon/cluster/executor/container" - "github.com/docker/docker/errors" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/runconfig" - apitypes "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - types "github.com/docker/engine-api/types/swarm" - swarmagent "github.com/docker/swarmkit/agent" - swarmapi "github.com/docker/swarmkit/api" - "golang.org/x/net/context" -) - -const swarmDirName = "swarm" -const controlSocket = "control.sock" -const swarmConnectTimeout = 20 * time.Second -const swarmRequestTimeout = 20 * time.Second -const stateFile = "docker-state.json" -const defaultAddr = "0.0.0.0:2377" - -const ( - initialReconnectDelay = 100 * time.Millisecond - maxReconnectDelay = 30 * time.Second -) - -// ErrNoSwarm is returned on leaving a cluster that was never initialized -var ErrNoSwarm = fmt.Errorf("This node is not part of a swarm") - -// ErrSwarmExists is returned on initialize or join request for a cluster that has already been activated -var ErrSwarmExists = fmt.Errorf("This node is already part of a swarm. Use \"docker swarm leave\" to leave this swarm and join another one.") - -// ErrPendingSwarmExists is returned on initialize or join request for a cluster that is already processing a similar request but has not succeeded yet. -var ErrPendingSwarmExists = fmt.Errorf("This node is processing an existing join request that has not succeeded yet. Use \"docker swarm leave\" to cancel the current request.") - -// ErrSwarmJoinTimeoutReached is returned when cluster join could not complete before timeout was reached. -var ErrSwarmJoinTimeoutReached = fmt.Errorf("Timeout was reached before node was joined. The attempt to join the swarm will continue in the background. Use the \"docker info\" command to see the current swarm status of your node.") - -// defaultSpec contains some sane defaults if cluster options are missing on init -var defaultSpec = types.Spec{ - Raft: types.RaftConfig{ - SnapshotInterval: 10000, - KeepOldSnapshots: 0, - LogEntriesForSlowFollowers: 500, - HeartbeatTick: 1, - ElectionTick: 3, - }, - CAConfig: types.CAConfig{ - NodeCertExpiry: 90 * 24 * time.Hour, - }, - Dispatcher: types.DispatcherConfig{ - HeartbeatPeriod: uint64((5 * time.Second).Nanoseconds()), - }, - Orchestration: types.OrchestrationConfig{ - TaskHistoryRetentionLimit: 10, - }, -} - -type state struct { - // LocalAddr is this machine's local IP or hostname, if specified. - LocalAddr string - // RemoteAddr is the address that was given to "swarm join. It is used - // to find LocalAddr if necessary. - RemoteAddr string - // ListenAddr is the address we bind to, including a port. - ListenAddr string - // AdvertiseAddr is the address other nodes should connect to, - // including a port. - AdvertiseAddr string -} - -// NetworkSubnetsProvider exposes functions for retrieving the subnets -// of networks managed by Docker, so they can be filtered. -type NetworkSubnetsProvider interface { - V4Subnets() []net.IPNet - V6Subnets() []net.IPNet -} - -// Config provides values for Cluster. -type Config struct { - Root string - Name string - Backend executorpkg.Backend - NetworkSubnetsProvider NetworkSubnetsProvider - - // DefaultAdvertiseAddr is the default host/IP or network interface to use - // if no AdvertiseAddr value is specified. - DefaultAdvertiseAddr string -} - -// Cluster provides capabilities to participate in a cluster as a worker or a -// manager. -type Cluster struct { - sync.RWMutex - *node - root string - config Config - configEvent chan struct{} // todo: make this array and goroutine safe - localAddr string - actualLocalAddr string // after resolution, not persisted - remoteAddr string - listenAddr string - advertiseAddr string - stop bool - err error - cancelDelay func() -} - -type node struct { - *swarmagent.Node - done chan struct{} - ready bool - conn *grpc.ClientConn - client swarmapi.ControlClient - reconnectDelay time.Duration -} - -// New creates a new Cluster instance using provided config. -func New(config Config) (*Cluster, error) { - root := filepath.Join(config.Root, swarmDirName) - if err := os.MkdirAll(root, 0700); err != nil { - return nil, err - } - c := &Cluster{ - root: root, - config: config, - configEvent: make(chan struct{}, 10), - } - - st, err := c.loadState() - if err != nil { - if os.IsNotExist(err) { - return c, nil - } - return nil, err - } - - n, err := c.startNewNode(false, st.LocalAddr, st.RemoteAddr, st.ListenAddr, st.AdvertiseAddr, "", "") - if err != nil { - return nil, err - } - - select { - case <-time.After(swarmConnectTimeout): - logrus.Errorf("swarm component could not be started before timeout was reached") - case <-n.Ready(): - case <-n.done: - return nil, fmt.Errorf("swarm component could not be started: %v", c.err) - } - go c.reconnectOnFailure(n) - return c, nil -} - -func (c *Cluster) loadState() (*state, error) { - dt, err := ioutil.ReadFile(filepath.Join(c.root, stateFile)) - if err != nil { - return nil, err - } - // missing certificate means no actual state to restore from - if _, err := os.Stat(filepath.Join(c.root, "certificates/swarm-node.crt")); err != nil { - if os.IsNotExist(err) { - c.clearState() - } - return nil, err - } - var st state - if err := json.Unmarshal(dt, &st); err != nil { - return nil, err - } - return &st, nil -} - -func (c *Cluster) saveState() error { - dt, err := json.Marshal(state{ - LocalAddr: c.localAddr, - RemoteAddr: c.remoteAddr, - ListenAddr: c.listenAddr, - AdvertiseAddr: c.advertiseAddr, - }) - if err != nil { - return err - } - return ioutils.AtomicWriteFile(filepath.Join(c.root, stateFile), dt, 0600) -} - -func (c *Cluster) reconnectOnFailure(n *node) { - for { - <-n.done - c.Lock() - if c.stop || c.node != nil { - c.Unlock() - return - } - n.reconnectDelay *= 2 - if n.reconnectDelay > maxReconnectDelay { - n.reconnectDelay = maxReconnectDelay - } - logrus.Warnf("Restarting swarm in %.2f seconds", n.reconnectDelay.Seconds()) - delayCtx, cancel := context.WithTimeout(context.Background(), n.reconnectDelay) - c.cancelDelay = cancel - c.Unlock() - <-delayCtx.Done() - if delayCtx.Err() != context.DeadlineExceeded { - return - } - c.Lock() - if c.node != nil { - c.Unlock() - return - } - var err error - n, err = c.startNewNode(false, c.localAddr, c.getRemoteAddress(), c.listenAddr, c.advertiseAddr, c.getRemoteAddress(), "") - if err != nil { - c.err = err - close(n.done) - } - c.Unlock() - } -} - -func (c *Cluster) startNewNode(forceNewCluster bool, localAddr, remoteAddr, listenAddr, advertiseAddr, joinAddr, joinToken string) (*node, error) { - if err := c.config.Backend.IsSwarmCompatible(); err != nil { - return nil, err - } - - actualLocalAddr := localAddr - if actualLocalAddr == "" { - // If localAddr was not specified, resolve it automatically - // based on the route to joinAddr. localAddr can only be left - // empty on "join". - listenHost, _, err := net.SplitHostPort(listenAddr) - if err != nil { - return nil, fmt.Errorf("could not parse listen address: %v", err) - } - - listenAddrIP := net.ParseIP(listenHost) - if listenAddrIP == nil || !listenAddrIP.IsUnspecified() { - actualLocalAddr = listenHost - } else { - if remoteAddr == "" { - // Should never happen except using swarms created by - // old versions that didn't save remoteAddr. - remoteAddr = "8.8.8.8:53" - } - conn, err := net.Dial("udp", remoteAddr) - if err != nil { - return nil, fmt.Errorf("could not find local IP address: %v", err) - } - localHostPort := conn.LocalAddr().String() - actualLocalAddr, _, _ = net.SplitHostPort(localHostPort) - conn.Close() - } - } - - c.node = nil - c.cancelDelay = nil - c.stop = false - n, err := swarmagent.NewNode(&swarmagent.NodeConfig{ - Hostname: c.config.Name, - ForceNewCluster: forceNewCluster, - ListenControlAPI: filepath.Join(c.root, controlSocket), - ListenRemoteAPI: listenAddr, - AdvertiseRemoteAPI: advertiseAddr, - JoinAddr: joinAddr, - StateDir: c.root, - JoinToken: joinToken, - Executor: container.NewExecutor(c.config.Backend), - HeartbeatTick: 1, - ElectionTick: 3, - }) - if err != nil { - return nil, err - } - ctx := context.Background() - if err := n.Start(ctx); err != nil { - return nil, err - } - node := &node{ - Node: n, - done: make(chan struct{}), - reconnectDelay: initialReconnectDelay, - } - c.node = node - c.localAddr = localAddr - c.actualLocalAddr = actualLocalAddr // not saved - c.remoteAddr = remoteAddr - c.listenAddr = listenAddr - c.advertiseAddr = advertiseAddr - c.saveState() - - c.config.Backend.SetClusterProvider(c) - go func() { - err := n.Err(ctx) - if err != nil { - logrus.Errorf("cluster exited with error: %v", err) - } - c.Lock() - c.node = nil - c.err = err - c.Unlock() - close(node.done) - }() - - go func() { - select { - case <-n.Ready(): - c.Lock() - node.ready = true - c.err = nil - c.Unlock() - case <-ctx.Done(): - } - c.configEvent <- struct{}{} - }() - - go func() { - for conn := range n.ListenControlSocket(ctx) { - c.Lock() - if node.conn != conn { - if conn == nil { - node.client = nil - } else { - node.client = swarmapi.NewControlClient(conn) - } - } - node.conn = conn - c.Unlock() - c.configEvent <- struct{}{} - } - }() - - return node, nil -} - -// Init initializes new cluster from user provided request. -func (c *Cluster) Init(req types.InitRequest) (string, error) { - c.Lock() - if node := c.node; node != nil { - if !req.ForceNewCluster { - c.Unlock() - return "", ErrSwarmExists - } - if err := c.stopNode(); err != nil { - c.Unlock() - return "", err - } - } - - if err := validateAndSanitizeInitRequest(&req); err != nil { - c.Unlock() - return "", err - } - - listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) - if err != nil { - c.Unlock() - return "", err - } - - advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) - if err != nil { - c.Unlock() - return "", err - } - - localAddr := listenHost - - // If the advertise address is not one of the system's - // addresses, we also require a listen address. - listenAddrIP := net.ParseIP(listenHost) - if listenAddrIP != nil && listenAddrIP.IsUnspecified() { - advertiseIP := net.ParseIP(advertiseHost) - if advertiseIP == nil { - // not an IP - c.Unlock() - return "", errMustSpecifyListenAddr - } - - systemIPs := listSystemIPs() - - found := false - for _, systemIP := range systemIPs { - if systemIP.Equal(advertiseIP) { - found = true - break - } - } - if !found { - c.Unlock() - return "", errMustSpecifyListenAddr - } - localAddr = advertiseIP.String() - } - - // todo: check current state existing - n, err := c.startNewNode(req.ForceNewCluster, localAddr, "", net.JoinHostPort(listenHost, listenPort), net.JoinHostPort(advertiseHost, advertisePort), "", "") - if err != nil { - c.Unlock() - return "", err - } - c.Unlock() - - select { - case <-n.Ready(): - if err := initClusterSpec(n, req.Spec); err != nil { - return "", err - } - go c.reconnectOnFailure(n) - return n.NodeID(), nil - case <-n.done: - c.RLock() - defer c.RUnlock() - if !req.ForceNewCluster { // if failure on first attempt don't keep state - if err := c.clearState(); err != nil { - return "", err - } - } - return "", c.err - } -} - -// Join makes current Cluster part of an existing swarm cluster. -func (c *Cluster) Join(req types.JoinRequest) error { - c.Lock() - if node := c.node; node != nil { - c.Unlock() - return ErrSwarmExists - } - if err := validateAndSanitizeJoinRequest(&req); err != nil { - c.Unlock() - return err - } - - listenHost, listenPort, err := resolveListenAddr(req.ListenAddr) - if err != nil { - c.Unlock() - return err - } - - var advertiseAddr string - if req.AdvertiseAddr != "" { - advertiseHost, advertisePort, err := c.resolveAdvertiseAddr(req.AdvertiseAddr, listenPort) - // For joining, we don't need to provide an advertise address, - // since the remote side can detect it. - if err == nil { - advertiseAddr = net.JoinHostPort(advertiseHost, advertisePort) - } - } - - // todo: check current state existing - n, err := c.startNewNode(false, "", req.RemoteAddrs[0], net.JoinHostPort(listenHost, listenPort), advertiseAddr, req.RemoteAddrs[0], req.JoinToken) - if err != nil { - c.Unlock() - return err - } - c.Unlock() - - select { - case <-time.After(swarmConnectTimeout): - // attempt to connect will continue in background, also reconnecting - go c.reconnectOnFailure(n) - return ErrSwarmJoinTimeoutReached - case <-n.Ready(): - go c.reconnectOnFailure(n) - return nil - case <-n.done: - c.RLock() - defer c.RUnlock() - return c.err - } -} - -// stopNode is a helper that stops the active c.node and waits until it has -// shut down. Call while keeping the cluster lock. -func (c *Cluster) stopNode() error { - if c.node == nil { - return nil - } - c.stop = true - if c.cancelDelay != nil { - c.cancelDelay() - c.cancelDelay = nil - } - node := c.node - ctx, cancel := context.WithTimeout(context.Background(), 15*time.Second) - defer cancel() - // TODO: can't hold lock on stop because it calls back to network - c.Unlock() - defer c.Lock() - if err := node.Stop(ctx); err != nil && !strings.Contains(err.Error(), "context canceled") { - return err - } - <-node.done - return nil -} - -// Leave shuts down Cluster and removes current state. -func (c *Cluster) Leave(force bool) error { - c.Lock() - node := c.node - if node == nil { - c.Unlock() - return ErrNoSwarm - } - - if node.Manager() != nil && !force { - msg := "You are attempting to leave the swarm on a node that is participating as a manager. " - if c.isActiveManager() { - active, reachable, unreachable, err := c.managerStats() - if err == nil { - if active && reachable-2 <= unreachable { - if reachable == 1 && unreachable == 0 { - msg += "Removing the last manager erases all current state of the swarm. Use `--force` to ignore this message. " - c.Unlock() - return fmt.Errorf(msg) - } - msg += fmt.Sprintf("Removing this node leaves %v managers out of %v. Without a Raft quorum your swarm will be inaccessible. ", reachable-1, reachable+unreachable) - } - } - } else { - msg += "Doing so may lose the consensus of your cluster. " - } - - msg += "The only way to restore a swarm that has lost consensus is to reinitialize it with `--force-new-cluster`. Use `--force` to suppress this message." - c.Unlock() - return fmt.Errorf(msg) - } - if err := c.stopNode(); err != nil { - c.Unlock() - return err - } - c.Unlock() - if nodeID := node.NodeID(); nodeID != "" { - for _, id := range c.config.Backend.ListContainersForNode(nodeID) { - if err := c.config.Backend.ContainerRm(id, &apitypes.ContainerRmConfig{ForceRemove: true}); err != nil { - logrus.Errorf("error removing %v: %v", id, err) - } - } - } - c.configEvent <- struct{}{} - // todo: cleanup optional? - if err := c.clearState(); err != nil { - return err - } - return nil -} - -func (c *Cluster) clearState() error { - // todo: backup this data instead of removing? - if err := os.RemoveAll(c.root); err != nil { - return err - } - if err := os.MkdirAll(c.root, 0700); err != nil { - return err - } - c.config.Backend.SetClusterProvider(nil) - return nil -} - -func (c *Cluster) getRequestContext() (context.Context, func()) { // TODO: not needed when requests don't block on qourum lost - return context.WithTimeout(context.Background(), swarmRequestTimeout) -} - -// Inspect retrieves the configuration properties of a managed swarm cluster. -func (c *Cluster) Inspect() (types.Swarm, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return types.Swarm{}, c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - swarm, err := getSwarm(ctx, c.client) - if err != nil { - return types.Swarm{}, err - } - - if err != nil { - return types.Swarm{}, err - } - - return convert.SwarmFromGRPC(*swarm), nil -} - -// Update updates configuration of a managed swarm cluster. -func (c *Cluster) Update(version uint64, spec types.Spec, flags types.UpdateFlags) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - swarm, err := getSwarm(ctx, c.client) - if err != nil { - return err - } - - swarmSpec, err := convert.SwarmSpecToGRPC(spec) - if err != nil { - return err - } - - _, err = c.client.UpdateCluster( - ctx, - &swarmapi.UpdateClusterRequest{ - ClusterID: swarm.ID, - Spec: &swarmSpec, - ClusterVersion: &swarmapi.Version{ - Index: version, - }, - Rotation: swarmapi.JoinTokenRotation{ - RotateWorkerToken: flags.RotateWorkerToken, - RotateManagerToken: flags.RotateManagerToken, - }, - }, - ) - return err -} - -// IsManager returns true if Cluster is participating as a manager. -func (c *Cluster) IsManager() bool { - c.RLock() - defer c.RUnlock() - return c.isActiveManager() -} - -// IsAgent returns true if Cluster is participating as a worker/agent. -func (c *Cluster) IsAgent() bool { - c.RLock() - defer c.RUnlock() - return c.node != nil && c.ready -} - -// GetLocalAddress returns the local address. -func (c *Cluster) GetLocalAddress() string { - c.RLock() - defer c.RUnlock() - return c.actualLocalAddr -} - -// GetListenAddress returns the listen address. -func (c *Cluster) GetListenAddress() string { - c.RLock() - defer c.RUnlock() - return c.listenAddr -} - -// GetAdvertiseAddress returns the remotely reachable address of this node. -func (c *Cluster) GetAdvertiseAddress() string { - c.RLock() - defer c.RUnlock() - if c.advertiseAddr != "" { - advertiseHost, _, _ := net.SplitHostPort(c.advertiseAddr) - return advertiseHost - } - return c.actualLocalAddr -} - -// GetRemoteAddress returns a known advertise address of a remote manager if -// available. -// todo: change to array/connect with info -func (c *Cluster) GetRemoteAddress() string { - c.RLock() - defer c.RUnlock() - return c.getRemoteAddress() -} - -func (c *Cluster) getRemoteAddress() string { - if c.node == nil { - return "" - } - nodeID := c.node.NodeID() - for _, r := range c.node.Remotes() { - if r.NodeID != nodeID { - return r.Addr - } - } - return "" -} - -// ListenClusterEvents returns a channel that receives messages on cluster -// participation changes. -// todo: make cancelable and accessible to multiple callers -func (c *Cluster) ListenClusterEvents() <-chan struct{} { - return c.configEvent -} - -// Info returns information about the current cluster state. -func (c *Cluster) Info() types.Info { - info := types.Info{ - NodeAddr: c.GetAdvertiseAddress(), - } - - c.RLock() - defer c.RUnlock() - - if c.node == nil { - info.LocalNodeState = types.LocalNodeStateInactive - if c.cancelDelay != nil { - info.LocalNodeState = types.LocalNodeStateError - } - } else { - info.LocalNodeState = types.LocalNodeStatePending - if c.ready == true { - info.LocalNodeState = types.LocalNodeStateActive - } - } - if c.err != nil { - info.Error = c.err.Error() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - if c.isActiveManager() { - info.ControlAvailable = true - swarm, err := c.Inspect() - if err != nil { - info.Error = err.Error() - } - - // Strip JoinTokens - info.Cluster = swarm.ClusterInfo - - if r, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{}); err == nil { - info.Nodes = len(r.Nodes) - for _, n := range r.Nodes { - if n.ManagerStatus != nil { - info.Managers = info.Managers + 1 - } - } - } - } - - if c.node != nil { - for _, r := range c.node.Remotes() { - info.RemoteManagers = append(info.RemoteManagers, types.Peer{NodeID: r.NodeID, Addr: r.Addr}) - } - info.NodeID = c.node.NodeID() - } - - return info -} - -// isActiveManager should not be called without a read lock -func (c *Cluster) isActiveManager() bool { - return c.node != nil && c.conn != nil -} - -// errNoManager returns error describing why manager commands can't be used. -// Call with read lock. -func (c *Cluster) errNoManager() error { - if c.node == nil { - return fmt.Errorf("This node is not a swarm manager. Use \"docker swarm init\" or \"docker swarm join\" to connect this node to swarm and try again.") - } - if c.node.Manager() != nil { - return fmt.Errorf("This node is not a swarm manager. Manager is being prepared or has trouble connecting to the cluster.") - } - return fmt.Errorf("This node is not a swarm manager. Worker nodes can't be used to view or modify cluster state. Please run this command on a manager node or promote the current node to a manager.") -} - -// GetServices returns all services of a managed swarm cluster. -func (c *Cluster) GetServices(options apitypes.ServiceListOptions) ([]types.Service, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return nil, c.errNoManager() - } - - filters, err := newListServicesFilters(options.Filter) - if err != nil { - return nil, err - } - ctx, cancel := c.getRequestContext() - defer cancel() - - r, err := c.client.ListServices( - ctx, - &swarmapi.ListServicesRequest{Filters: filters}) - if err != nil { - return nil, err - } - - services := []types.Service{} - - for _, service := range r.Services { - services = append(services, convert.ServiceFromGRPC(*service)) - } - - return services, nil -} - -// CreateService creates a new service in a managed swarm cluster. -func (c *Cluster) CreateService(s types.ServiceSpec, encodedAuth string) (string, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return "", c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - err := c.populateNetworkID(ctx, c.client, &s) - if err != nil { - return "", err - } - - serviceSpec, err := convert.ServiceSpecToGRPC(s) - if err != nil { - return "", err - } - - if encodedAuth != "" { - ctnr := serviceSpec.Task.GetContainer() - if ctnr == nil { - return "", fmt.Errorf("service does not use container tasks") - } - ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} - } - - r, err := c.client.CreateService(ctx, &swarmapi.CreateServiceRequest{Spec: &serviceSpec}) - if err != nil { - return "", err - } - - return r.Service.ID, nil -} - -// GetService returns a service based on an ID or name. -func (c *Cluster) GetService(input string) (types.Service, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return types.Service{}, c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - service, err := getService(ctx, c.client, input) - if err != nil { - return types.Service{}, err - } - return convert.ServiceFromGRPC(*service), nil -} - -// UpdateService updates existing service to match new properties. -func (c *Cluster) UpdateService(serviceID string, version uint64, spec types.ServiceSpec, encodedAuth string) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - err := c.populateNetworkID(ctx, c.client, &spec) - if err != nil { - return err - } - - serviceSpec, err := convert.ServiceSpecToGRPC(spec) - if err != nil { - return err - } - - if encodedAuth != "" { - ctnr := serviceSpec.Task.GetContainer() - if ctnr == nil { - return fmt.Errorf("service does not use container tasks") - } - ctnr.PullOptions = &swarmapi.ContainerSpec_PullOptions{RegistryAuth: encodedAuth} - } else { - // this is needed because if the encodedAuth isn't being updated then we - // shouldn't lose it, and continue to use the one that was already present - currentService, err := getService(ctx, c.client, serviceID) - if err != nil { - return err - } - ctnr := currentService.Spec.Task.GetContainer() - if ctnr == nil { - return fmt.Errorf("service does not use container tasks") - } - serviceSpec.Task.GetContainer().PullOptions = ctnr.PullOptions - } - - _, err = c.client.UpdateService( - ctx, - &swarmapi.UpdateServiceRequest{ - ServiceID: serviceID, - Spec: &serviceSpec, - ServiceVersion: &swarmapi.Version{ - Index: version, - }, - }, - ) - return err -} - -// RemoveService removes a service from a managed swarm cluster. -func (c *Cluster) RemoveService(input string) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - service, err := getService(ctx, c.client, input) - if err != nil { - return err - } - - if _, err := c.client.RemoveService(ctx, &swarmapi.RemoveServiceRequest{ServiceID: service.ID}); err != nil { - return err - } - return nil -} - -// GetNodes returns a list of all nodes known to a cluster. -func (c *Cluster) GetNodes(options apitypes.NodeListOptions) ([]types.Node, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return nil, c.errNoManager() - } - - filters, err := newListNodesFilters(options.Filter) - if err != nil { - return nil, err - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - r, err := c.client.ListNodes( - ctx, - &swarmapi.ListNodesRequest{Filters: filters}) - if err != nil { - return nil, err - } - - nodes := []types.Node{} - - for _, node := range r.Nodes { - nodes = append(nodes, convert.NodeFromGRPC(*node)) - } - return nodes, nil -} - -// GetNode returns a node based on an ID or name. -func (c *Cluster) GetNode(input string) (types.Node, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return types.Node{}, c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - node, err := getNode(ctx, c.client, input) - if err != nil { - return types.Node{}, err - } - return convert.NodeFromGRPC(*node), nil -} - -// UpdateNode updates existing nodes properties. -func (c *Cluster) UpdateNode(nodeID string, version uint64, spec types.NodeSpec) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - nodeSpec, err := convert.NodeSpecToGRPC(spec) - if err != nil { - return err - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - _, err = c.client.UpdateNode( - ctx, - &swarmapi.UpdateNodeRequest{ - NodeID: nodeID, - Spec: &nodeSpec, - NodeVersion: &swarmapi.Version{ - Index: version, - }, - }, - ) - return err -} - -// RemoveNode removes a node from a cluster -func (c *Cluster) RemoveNode(input string, force bool) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - node, err := getNode(ctx, c.client, input) - if err != nil { - return err - } - - if _, err := c.client.RemoveNode(ctx, &swarmapi.RemoveNodeRequest{NodeID: node.ID, Force: force}); err != nil { - return err - } - return nil -} - -// GetTasks returns a list of tasks matching the filter options. -func (c *Cluster) GetTasks(options apitypes.TaskListOptions) ([]types.Task, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return nil, c.errNoManager() - } - - byName := func(filter filters.Args) error { - if filter.Include("service") { - serviceFilters := filter.Get("service") - for _, serviceFilter := range serviceFilters { - service, err := c.GetService(serviceFilter) - if err != nil { - return err - } - filter.Del("service", serviceFilter) - filter.Add("service", service.ID) - } - } - if filter.Include("node") { - nodeFilters := filter.Get("node") - for _, nodeFilter := range nodeFilters { - node, err := c.GetNode(nodeFilter) - if err != nil { - return err - } - filter.Del("node", nodeFilter) - filter.Add("node", node.ID) - } - } - return nil - } - - filters, err := newListTasksFilters(options.Filter, byName) - if err != nil { - return nil, err - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - r, err := c.client.ListTasks( - ctx, - &swarmapi.ListTasksRequest{Filters: filters}) - if err != nil { - return nil, err - } - - tasks := []types.Task{} - - for _, task := range r.Tasks { - tasks = append(tasks, convert.TaskFromGRPC(*task)) - } - return tasks, nil -} - -// GetTask returns a task by an ID. -func (c *Cluster) GetTask(input string) (types.Task, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return types.Task{}, c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - task, err := getTask(ctx, c.client, input) - if err != nil { - return types.Task{}, err - } - return convert.TaskFromGRPC(*task), nil -} - -// GetNetwork returns a cluster network by an ID. -func (c *Cluster) GetNetwork(input string) (apitypes.NetworkResource, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return apitypes.NetworkResource{}, c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - network, err := getNetwork(ctx, c.client, input) - if err != nil { - return apitypes.NetworkResource{}, err - } - return convert.BasicNetworkFromGRPC(*network), nil -} - -// GetNetworks returns all current cluster managed networks. -func (c *Cluster) GetNetworks() ([]apitypes.NetworkResource, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return nil, c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - r, err := c.client.ListNetworks(ctx, &swarmapi.ListNetworksRequest{}) - if err != nil { - return nil, err - } - - var networks []apitypes.NetworkResource - - for _, network := range r.Networks { - networks = append(networks, convert.BasicNetworkFromGRPC(*network)) - } - - return networks, nil -} - -// CreateNetwork creates a new cluster managed network. -func (c *Cluster) CreateNetwork(s apitypes.NetworkCreateRequest) (string, error) { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return "", c.errNoManager() - } - - if runconfig.IsPreDefinedNetwork(s.Name) { - err := fmt.Errorf("%s is a pre-defined network and cannot be created", s.Name) - return "", errors.NewRequestForbiddenError(err) - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - networkSpec := convert.BasicNetworkCreateToGRPC(s) - r, err := c.client.CreateNetwork(ctx, &swarmapi.CreateNetworkRequest{Spec: &networkSpec}) - if err != nil { - return "", err - } - - return r.Network.ID, nil -} - -// RemoveNetwork removes a cluster network. -func (c *Cluster) RemoveNetwork(input string) error { - c.RLock() - defer c.RUnlock() - - if !c.isActiveManager() { - return c.errNoManager() - } - - ctx, cancel := c.getRequestContext() - defer cancel() - - network, err := getNetwork(ctx, c.client, input) - if err != nil { - return err - } - - if _, err := c.client.RemoveNetwork(ctx, &swarmapi.RemoveNetworkRequest{NetworkID: network.ID}); err != nil { - return err - } - return nil -} - -func (c *Cluster) populateNetworkID(ctx context.Context, client swarmapi.ControlClient, s *types.ServiceSpec) error { - for i, n := range s.Networks { - apiNetwork, err := getNetwork(ctx, client, n.Target) - if err != nil { - if ln, _ := c.config.Backend.FindNetwork(n.Target); ln != nil && !ln.Info().Dynamic() { - err = fmt.Errorf("network %s is not eligible for docker services", ln.Name()) - return errors.NewRequestForbiddenError(err) - } - return err - } - s.Networks[i].Target = apiNetwork.ID - } - return nil -} - -func getNetwork(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Network, error) { - // GetNetwork to match via full ID. - rg, err := c.GetNetwork(ctx, &swarmapi.GetNetworkRequest{NetworkID: input}) - if err != nil { - // If any error (including NotFound), ListNetworks to match via ID prefix and full name. - rl, err := c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{Names: []string{input}}}) - if err != nil || len(rl.Networks) == 0 { - rl, err = c.ListNetworks(ctx, &swarmapi.ListNetworksRequest{Filters: &swarmapi.ListNetworksRequest_Filters{IDPrefixes: []string{input}}}) - } - - if err != nil { - return nil, err - } - - if len(rl.Networks) == 0 { - return nil, fmt.Errorf("network %s not found", input) - } - - if l := len(rl.Networks); l > 1 { - return nil, fmt.Errorf("network %s is ambiguous (%d matches found)", input, l) - } - - return rl.Networks[0], nil - } - return rg.Network, nil -} - -// Cleanup stops active swarm node. This is run before daemon shutdown. -func (c *Cluster) Cleanup() { - c.Lock() - node := c.node - if node == nil { - c.Unlock() - return - } - defer c.Unlock() - if c.isActiveManager() { - active, reachable, unreachable, err := c.managerStats() - if err == nil { - singlenode := active && reachable == 1 && unreachable == 0 - if active && !singlenode && reachable-2 <= unreachable { - logrus.Errorf("Leaving cluster with %v managers left out of %v. Raft quorum will be lost.", reachable-1, reachable+unreachable) - } - } - } - c.stopNode() -} - -func (c *Cluster) managerStats() (current bool, reachable int, unreachable int, err error) { - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - nodes, err := c.client.ListNodes(ctx, &swarmapi.ListNodesRequest{}) - if err != nil { - return false, 0, 0, err - } - for _, n := range nodes.Nodes { - if n.ManagerStatus != nil { - if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_REACHABLE { - reachable++ - if n.ID == c.node.NodeID() { - current = true - } - } - if n.ManagerStatus.Reachability == swarmapi.RaftMemberStatus_UNREACHABLE { - unreachable++ - } - } - } - return -} - -func validateAndSanitizeInitRequest(req *types.InitRequest) error { - var err error - req.ListenAddr, err = validateAddr(req.ListenAddr) - if err != nil { - return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) - } - - spec := &req.Spec - // provide sane defaults instead of erroring - if spec.Name == "" { - spec.Name = "default" - } - if spec.Raft.SnapshotInterval == 0 { - spec.Raft.SnapshotInterval = defaultSpec.Raft.SnapshotInterval - } - if spec.Raft.LogEntriesForSlowFollowers == 0 { - spec.Raft.LogEntriesForSlowFollowers = defaultSpec.Raft.LogEntriesForSlowFollowers - } - if spec.Raft.ElectionTick == 0 { - spec.Raft.ElectionTick = defaultSpec.Raft.ElectionTick - } - if spec.Raft.HeartbeatTick == 0 { - spec.Raft.HeartbeatTick = defaultSpec.Raft.HeartbeatTick - } - if spec.Dispatcher.HeartbeatPeriod == 0 { - spec.Dispatcher.HeartbeatPeriod = defaultSpec.Dispatcher.HeartbeatPeriod - } - if spec.CAConfig.NodeCertExpiry == 0 { - spec.CAConfig.NodeCertExpiry = defaultSpec.CAConfig.NodeCertExpiry - } - if spec.Orchestration.TaskHistoryRetentionLimit == 0 { - spec.Orchestration.TaskHistoryRetentionLimit = defaultSpec.Orchestration.TaskHistoryRetentionLimit - } - return nil -} - -func validateAndSanitizeJoinRequest(req *types.JoinRequest) error { - var err error - req.ListenAddr, err = validateAddr(req.ListenAddr) - if err != nil { - return fmt.Errorf("invalid ListenAddr %q: %v", req.ListenAddr, err) - } - if len(req.RemoteAddrs) == 0 { - return fmt.Errorf("at least 1 RemoteAddr is required to join") - } - for i := range req.RemoteAddrs { - req.RemoteAddrs[i], err = validateAddr(req.RemoteAddrs[i]) - if err != nil { - return fmt.Errorf("invalid remoteAddr %q: %v", req.RemoteAddrs[i], err) - } - } - return nil -} - -func validateAddr(addr string) (string, error) { - if addr == "" { - return addr, fmt.Errorf("invalid empty address") - } - newaddr, err := opts.ParseTCPAddr(addr, defaultAddr) - if err != nil { - return addr, nil - } - return strings.TrimPrefix(newaddr, "tcp://"), nil -} - -func initClusterSpec(node *node, spec types.Spec) error { - ctx, _ := context.WithTimeout(context.Background(), 5*time.Second) - for conn := range node.ListenControlSocket(ctx) { - if ctx.Err() != nil { - return ctx.Err() - } - if conn != nil { - client := swarmapi.NewControlClient(conn) - var cluster *swarmapi.Cluster - for i := 0; ; i++ { - lcr, err := client.ListClusters(ctx, &swarmapi.ListClustersRequest{}) - if err != nil { - return fmt.Errorf("error on listing clusters: %v", err) - } - if len(lcr.Clusters) == 0 { - if i < 10 { - time.Sleep(200 * time.Millisecond) - continue - } - return fmt.Errorf("empty list of clusters was returned") - } - cluster = lcr.Clusters[0] - break - } - newspec, err := convert.SwarmSpecToGRPC(spec) - if err != nil { - return fmt.Errorf("error updating cluster settings: %v", err) - } - _, err = client.UpdateCluster(ctx, &swarmapi.UpdateClusterRequest{ - ClusterID: cluster.ID, - ClusterVersion: &cluster.Meta.Version, - Spec: &newspec, - }) - if err != nil { - return fmt.Errorf("error updating cluster settings: %v", err) - } - return nil - } - } - return ctx.Err() -} diff --git a/daemon/cluster/convert/container.go b/daemon/cluster/convert/container.go deleted file mode 100644 index 83cc5342bc..0000000000 --- a/daemon/cluster/convert/container.go +++ /dev/null @@ -1,116 +0,0 @@ -package convert - -import ( - "fmt" - "strings" - - types "github.com/docker/engine-api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" -) - -func containerSpecFromGRPC(c *swarmapi.ContainerSpec) types.ContainerSpec { - containerSpec := types.ContainerSpec{ - Image: c.Image, - Labels: c.Labels, - Command: c.Command, - Args: c.Args, - Env: c.Env, - Dir: c.Dir, - User: c.User, - } - - // Mounts - for _, m := range c.Mounts { - mount := types.Mount{ - Target: m.Target, - Source: m.Source, - Type: types.MountType(strings.ToLower(swarmapi.Mount_MountType_name[int32(m.Type)])), - ReadOnly: m.ReadOnly, - } - - if m.BindOptions != nil { - mount.BindOptions = &types.BindOptions{ - Propagation: types.MountPropagation(strings.ToLower(swarmapi.Mount_BindOptions_MountPropagation_name[int32(m.BindOptions.Propagation)])), - } - } - - if m.VolumeOptions != nil { - mount.VolumeOptions = &types.VolumeOptions{ - NoCopy: m.VolumeOptions.NoCopy, - Labels: m.VolumeOptions.Labels, - } - if m.VolumeOptions.DriverConfig != nil { - mount.VolumeOptions.DriverConfig = &types.Driver{ - Name: m.VolumeOptions.DriverConfig.Name, - Options: m.VolumeOptions.DriverConfig.Options, - } - } - } - containerSpec.Mounts = append(containerSpec.Mounts, mount) - } - - if c.StopGracePeriod != nil { - grace, _ := ptypes.Duration(c.StopGracePeriod) - containerSpec.StopGracePeriod = &grace - } - return containerSpec -} - -func containerToGRPC(c types.ContainerSpec) (*swarmapi.ContainerSpec, error) { - containerSpec := &swarmapi.ContainerSpec{ - Image: c.Image, - Labels: c.Labels, - Command: c.Command, - Args: c.Args, - Env: c.Env, - Dir: c.Dir, - User: c.User, - } - - if c.StopGracePeriod != nil { - containerSpec.StopGracePeriod = ptypes.DurationProto(*c.StopGracePeriod) - } - - // Mounts - for _, m := range c.Mounts { - mount := swarmapi.Mount{ - Target: m.Target, - Source: m.Source, - ReadOnly: m.ReadOnly, - } - - if mountType, ok := swarmapi.Mount_MountType_value[strings.ToUpper(string(m.Type))]; ok { - mount.Type = swarmapi.Mount_MountType(mountType) - } else if string(m.Type) != "" { - return nil, fmt.Errorf("invalid MountType: %q", m.Type) - } - - if m.BindOptions != nil { - if mountPropagation, ok := swarmapi.Mount_BindOptions_MountPropagation_value[strings.ToUpper(string(m.BindOptions.Propagation))]; ok { - mount.BindOptions = &swarmapi.Mount_BindOptions{Propagation: swarmapi.Mount_BindOptions_MountPropagation(mountPropagation)} - } else if string(m.BindOptions.Propagation) != "" { - return nil, fmt.Errorf("invalid MountPropagation: %q", m.BindOptions.Propagation) - - } - - } - - if m.VolumeOptions != nil { - mount.VolumeOptions = &swarmapi.Mount_VolumeOptions{ - NoCopy: m.VolumeOptions.NoCopy, - Labels: m.VolumeOptions.Labels, - } - if m.VolumeOptions.DriverConfig != nil { - mount.VolumeOptions.DriverConfig = &swarmapi.Driver{ - Name: m.VolumeOptions.DriverConfig.Name, - Options: m.VolumeOptions.DriverConfig.Options, - } - } - } - - containerSpec.Mounts = append(containerSpec.Mounts, mount) - } - - return containerSpec, nil -} diff --git a/daemon/cluster/convert/network.go b/daemon/cluster/convert/network.go deleted file mode 100644 index 6bb9a8185c..0000000000 --- a/daemon/cluster/convert/network.go +++ /dev/null @@ -1,199 +0,0 @@ -package convert - -import ( - "strings" - - basictypes "github.com/docker/engine-api/types" - networktypes "github.com/docker/engine-api/types/network" - types "github.com/docker/engine-api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" -) - -func networkAttachementFromGRPC(na *swarmapi.NetworkAttachment) types.NetworkAttachment { - if na != nil { - return types.NetworkAttachment{ - Network: networkFromGRPC(na.Network), - Addresses: na.Addresses, - } - } - return types.NetworkAttachment{} -} - -func networkFromGRPC(n *swarmapi.Network) types.Network { - if n != nil { - network := types.Network{ - ID: n.ID, - Spec: types.NetworkSpec{ - IPv6Enabled: n.Spec.Ipv6Enabled, - Internal: n.Spec.Internal, - IPAMOptions: ipamFromGRPC(n.Spec.IPAM), - }, - IPAMOptions: ipamFromGRPC(n.IPAM), - } - - // Meta - network.Version.Index = n.Meta.Version.Index - network.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt) - network.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt) - - //Annotations - network.Spec.Name = n.Spec.Annotations.Name - network.Spec.Labels = n.Spec.Annotations.Labels - - //DriverConfiguration - if n.Spec.DriverConfig != nil { - network.Spec.DriverConfiguration = &types.Driver{ - Name: n.Spec.DriverConfig.Name, - Options: n.Spec.DriverConfig.Options, - } - } - - //DriverState - if n.DriverState != nil { - network.DriverState = types.Driver{ - Name: n.DriverState.Name, - Options: n.DriverState.Options, - } - } - - return network - } - return types.Network{} -} - -func ipamFromGRPC(i *swarmapi.IPAMOptions) *types.IPAMOptions { - var ipam *types.IPAMOptions - if i != nil { - ipam = &types.IPAMOptions{} - if i.Driver != nil { - ipam.Driver.Name = i.Driver.Name - ipam.Driver.Options = i.Driver.Options - } - - for _, config := range i.Configs { - ipam.Configs = append(ipam.Configs, types.IPAMConfig{ - Subnet: config.Subnet, - Range: config.Range, - Gateway: config.Gateway, - }) - } - } - return ipam -} - -func endpointSpecFromGRPC(es *swarmapi.EndpointSpec) *types.EndpointSpec { - var endpointSpec *types.EndpointSpec - if es != nil { - endpointSpec = &types.EndpointSpec{} - endpointSpec.Mode = types.ResolutionMode(strings.ToLower(es.Mode.String())) - - for _, portState := range es.Ports { - endpointSpec.Ports = append(endpointSpec.Ports, types.PortConfig{ - Name: portState.Name, - Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])), - TargetPort: portState.TargetPort, - PublishedPort: portState.PublishedPort, - }) - } - } - return endpointSpec -} - -func endpointFromGRPC(e *swarmapi.Endpoint) types.Endpoint { - endpoint := types.Endpoint{} - if e != nil { - if espec := endpointSpecFromGRPC(e.Spec); espec != nil { - endpoint.Spec = *espec - } - - for _, portState := range e.Ports { - endpoint.Ports = append(endpoint.Ports, types.PortConfig{ - Name: portState.Name, - Protocol: types.PortConfigProtocol(strings.ToLower(swarmapi.PortConfig_Protocol_name[int32(portState.Protocol)])), - TargetPort: portState.TargetPort, - PublishedPort: portState.PublishedPort, - }) - } - - for _, v := range e.VirtualIPs { - endpoint.VirtualIPs = append(endpoint.VirtualIPs, types.EndpointVirtualIP{ - NetworkID: v.NetworkID, - Addr: v.Addr}) - } - - } - - return endpoint -} - -// BasicNetworkFromGRPC converts a grpc Network to a NetworkResource. -func BasicNetworkFromGRPC(n swarmapi.Network) basictypes.NetworkResource { - spec := n.Spec - var ipam networktypes.IPAM - if spec.IPAM != nil { - if spec.IPAM.Driver != nil { - ipam.Driver = spec.IPAM.Driver.Name - ipam.Options = spec.IPAM.Driver.Options - } - ipam.Config = make([]networktypes.IPAMConfig, 0, len(spec.IPAM.Configs)) - for _, ic := range spec.IPAM.Configs { - ipamConfig := networktypes.IPAMConfig{ - Subnet: ic.Subnet, - IPRange: ic.Range, - Gateway: ic.Gateway, - AuxAddress: ic.Reserved, - } - ipam.Config = append(ipam.Config, ipamConfig) - } - } - - nr := basictypes.NetworkResource{ - ID: n.ID, - Name: n.Spec.Annotations.Name, - Scope: "swarm", - EnableIPv6: spec.Ipv6Enabled, - IPAM: ipam, - Internal: spec.Internal, - Labels: n.Spec.Annotations.Labels, - } - - if n.DriverState != nil { - nr.Driver = n.DriverState.Name - nr.Options = n.DriverState.Options - } - - return nr -} - -// BasicNetworkCreateToGRPC converts a NetworkCreateRequest to a grpc NetworkSpec. -func BasicNetworkCreateToGRPC(create basictypes.NetworkCreateRequest) swarmapi.NetworkSpec { - ns := swarmapi.NetworkSpec{ - Annotations: swarmapi.Annotations{ - Name: create.Name, - Labels: create.Labels, - }, - DriverConfig: &swarmapi.Driver{ - Name: create.Driver, - Options: create.Options, - }, - Ipv6Enabled: create.EnableIPv6, - Internal: create.Internal, - IPAM: &swarmapi.IPAMOptions{ - Driver: &swarmapi.Driver{ - Name: create.IPAM.Driver, - Options: create.IPAM.Options, - }, - }, - } - ipamSpec := make([]*swarmapi.IPAMConfig, 0, len(create.IPAM.Config)) - for _, ipamConfig := range create.IPAM.Config { - ipamSpec = append(ipamSpec, &swarmapi.IPAMConfig{ - Subnet: ipamConfig.Subnet, - Range: ipamConfig.IPRange, - Gateway: ipamConfig.Gateway, - }) - } - ns.IPAM.Configs = ipamSpec - return ns -} diff --git a/daemon/cluster/convert/node.go b/daemon/cluster/convert/node.go deleted file mode 100644 index 53d7efa428..0000000000 --- a/daemon/cluster/convert/node.go +++ /dev/null @@ -1,88 +0,0 @@ -package convert - -import ( - "fmt" - "strings" - - types "github.com/docker/engine-api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" -) - -// NodeFromGRPC converts a grpc Node to a Node. -func NodeFromGRPC(n swarmapi.Node) types.Node { - node := types.Node{ - ID: n.ID, - Spec: types.NodeSpec{ - Role: types.NodeRole(strings.ToLower(n.Spec.Role.String())), - Availability: types.NodeAvailability(strings.ToLower(n.Spec.Availability.String())), - }, - Status: types.NodeStatus{ - State: types.NodeState(strings.ToLower(n.Status.State.String())), - Message: n.Status.Message, - }, - } - - // Meta - node.Version.Index = n.Meta.Version.Index - node.CreatedAt, _ = ptypes.Timestamp(n.Meta.CreatedAt) - node.UpdatedAt, _ = ptypes.Timestamp(n.Meta.UpdatedAt) - - //Annotations - node.Spec.Name = n.Spec.Annotations.Name - node.Spec.Labels = n.Spec.Annotations.Labels - - //Description - if n.Description != nil { - node.Description.Hostname = n.Description.Hostname - if n.Description.Platform != nil { - node.Description.Platform.Architecture = n.Description.Platform.Architecture - node.Description.Platform.OS = n.Description.Platform.OS - } - if n.Description.Resources != nil { - node.Description.Resources.NanoCPUs = n.Description.Resources.NanoCPUs - node.Description.Resources.MemoryBytes = n.Description.Resources.MemoryBytes - } - if n.Description.Engine != nil { - node.Description.Engine.EngineVersion = n.Description.Engine.EngineVersion - node.Description.Engine.Labels = n.Description.Engine.Labels - for _, plugin := range n.Description.Engine.Plugins { - node.Description.Engine.Plugins = append(node.Description.Engine.Plugins, types.PluginDescription{Type: plugin.Type, Name: plugin.Name}) - } - } - } - - //Manager - if n.ManagerStatus != nil { - node.ManagerStatus = &types.ManagerStatus{ - Leader: n.ManagerStatus.Leader, - Reachability: types.Reachability(strings.ToLower(n.ManagerStatus.Reachability.String())), - Addr: n.ManagerStatus.Addr, - } - } - - return node -} - -// NodeSpecToGRPC converts a NodeSpec to a grpc NodeSpec. -func NodeSpecToGRPC(s types.NodeSpec) (swarmapi.NodeSpec, error) { - spec := swarmapi.NodeSpec{ - Annotations: swarmapi.Annotations{ - Name: s.Name, - Labels: s.Labels, - }, - } - if role, ok := swarmapi.NodeRole_value[strings.ToUpper(string(s.Role))]; ok { - spec.Role = swarmapi.NodeRole(role) - } else { - return swarmapi.NodeSpec{}, fmt.Errorf("invalid Role: %q", s.Role) - } - - if availability, ok := swarmapi.NodeSpec_Availability_value[strings.ToUpper(string(s.Availability))]; ok { - spec.Availability = swarmapi.NodeSpec_Availability(availability) - } else { - return swarmapi.NodeSpec{}, fmt.Errorf("invalid Availability: %q", s.Availability) - } - - return spec, nil -} diff --git a/daemon/cluster/convert/service.go b/daemon/cluster/convert/service.go deleted file mode 100644 index 75e7c3bcfa..0000000000 --- a/daemon/cluster/convert/service.go +++ /dev/null @@ -1,311 +0,0 @@ -package convert - -import ( - "fmt" - "strings" - - "github.com/docker/docker/pkg/namesgenerator" - types "github.com/docker/engine-api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" -) - -// ServiceFromGRPC converts a grpc Service to a Service. -func ServiceFromGRPC(s swarmapi.Service) types.Service { - spec := s.Spec - containerConfig := spec.Task.Runtime.(*swarmapi.TaskSpec_Container).Container - - networks := make([]types.NetworkAttachmentConfig, 0, len(spec.Networks)) - for _, n := range spec.Networks { - networks = append(networks, types.NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) - } - service := types.Service{ - ID: s.ID, - - Spec: types.ServiceSpec{ - TaskTemplate: types.TaskSpec{ - ContainerSpec: containerSpecFromGRPC(containerConfig), - Resources: resourcesFromGRPC(s.Spec.Task.Resources), - RestartPolicy: restartPolicyFromGRPC(s.Spec.Task.Restart), - Placement: placementFromGRPC(s.Spec.Task.Placement), - LogDriver: driverFromGRPC(s.Spec.Task.LogDriver), - }, - - Networks: networks, - EndpointSpec: endpointSpecFromGRPC(s.Spec.Endpoint), - }, - Endpoint: endpointFromGRPC(s.Endpoint), - } - - // Meta - service.Version.Index = s.Meta.Version.Index - service.CreatedAt, _ = ptypes.Timestamp(s.Meta.CreatedAt) - service.UpdatedAt, _ = ptypes.Timestamp(s.Meta.UpdatedAt) - - // Annotations - service.Spec.Name = s.Spec.Annotations.Name - service.Spec.Labels = s.Spec.Annotations.Labels - - // UpdateConfig - if s.Spec.Update != nil { - service.Spec.UpdateConfig = &types.UpdateConfig{ - Parallelism: s.Spec.Update.Parallelism, - } - - service.Spec.UpdateConfig.Delay, _ = ptypes.Duration(&s.Spec.Update.Delay) - - switch s.Spec.Update.FailureAction { - case swarmapi.UpdateConfig_PAUSE: - service.Spec.UpdateConfig.FailureAction = types.UpdateFailureActionPause - case swarmapi.UpdateConfig_CONTINUE: - service.Spec.UpdateConfig.FailureAction = types.UpdateFailureActionContinue - } - } - - // Mode - switch t := s.Spec.GetMode().(type) { - case *swarmapi.ServiceSpec_Global: - service.Spec.Mode.Global = &types.GlobalService{} - case *swarmapi.ServiceSpec_Replicated: - service.Spec.Mode.Replicated = &types.ReplicatedService{ - Replicas: &t.Replicated.Replicas, - } - } - - // UpdateStatus - service.UpdateStatus = types.UpdateStatus{} - if s.UpdateStatus != nil { - switch s.UpdateStatus.State { - case swarmapi.UpdateStatus_UPDATING: - service.UpdateStatus.State = types.UpdateStateUpdating - case swarmapi.UpdateStatus_PAUSED: - service.UpdateStatus.State = types.UpdateStatePaused - case swarmapi.UpdateStatus_COMPLETED: - service.UpdateStatus.State = types.UpdateStateCompleted - } - - service.UpdateStatus.StartedAt, _ = ptypes.Timestamp(s.UpdateStatus.StartedAt) - service.UpdateStatus.CompletedAt, _ = ptypes.Timestamp(s.UpdateStatus.CompletedAt) - service.UpdateStatus.Message = s.UpdateStatus.Message - } - - return service -} - -// ServiceSpecToGRPC converts a ServiceSpec to a grpc ServiceSpec. -func ServiceSpecToGRPC(s types.ServiceSpec) (swarmapi.ServiceSpec, error) { - name := s.Name - if name == "" { - name = namesgenerator.GetRandomName(0) - } - - networks := make([]*swarmapi.ServiceSpec_NetworkAttachmentConfig, 0, len(s.Networks)) - for _, n := range s.Networks { - networks = append(networks, &swarmapi.ServiceSpec_NetworkAttachmentConfig{Target: n.Target, Aliases: n.Aliases}) - } - - spec := swarmapi.ServiceSpec{ - Annotations: swarmapi.Annotations{ - Name: name, - Labels: s.Labels, - }, - Task: swarmapi.TaskSpec{ - Resources: resourcesToGRPC(s.TaskTemplate.Resources), - LogDriver: driverToGRPC(s.TaskTemplate.LogDriver), - }, - Networks: networks, - } - - containerSpec, err := containerToGRPC(s.TaskTemplate.ContainerSpec) - if err != nil { - return swarmapi.ServiceSpec{}, err - } - spec.Task.Runtime = &swarmapi.TaskSpec_Container{Container: containerSpec} - - restartPolicy, err := restartPolicyToGRPC(s.TaskTemplate.RestartPolicy) - if err != nil { - return swarmapi.ServiceSpec{}, err - } - spec.Task.Restart = restartPolicy - - if s.TaskTemplate.Placement != nil { - spec.Task.Placement = &swarmapi.Placement{ - Constraints: s.TaskTemplate.Placement.Constraints, - } - } - - if s.UpdateConfig != nil { - var failureAction swarmapi.UpdateConfig_FailureAction - switch s.UpdateConfig.FailureAction { - case types.UpdateFailureActionPause, "": - failureAction = swarmapi.UpdateConfig_PAUSE - case types.UpdateFailureActionContinue: - failureAction = swarmapi.UpdateConfig_CONTINUE - default: - return swarmapi.ServiceSpec{}, fmt.Errorf("unrecongized update failure action %s", s.UpdateConfig.FailureAction) - } - spec.Update = &swarmapi.UpdateConfig{ - Parallelism: s.UpdateConfig.Parallelism, - Delay: *ptypes.DurationProto(s.UpdateConfig.Delay), - FailureAction: failureAction, - } - } - - if s.EndpointSpec != nil { - if s.EndpointSpec.Mode != "" && - s.EndpointSpec.Mode != types.ResolutionModeVIP && - s.EndpointSpec.Mode != types.ResolutionModeDNSRR { - return swarmapi.ServiceSpec{}, fmt.Errorf("invalid resolution mode: %q", s.EndpointSpec.Mode) - } - - spec.Endpoint = &swarmapi.EndpointSpec{} - - spec.Endpoint.Mode = swarmapi.EndpointSpec_ResolutionMode(swarmapi.EndpointSpec_ResolutionMode_value[strings.ToUpper(string(s.EndpointSpec.Mode))]) - - for _, portConfig := range s.EndpointSpec.Ports { - spec.Endpoint.Ports = append(spec.Endpoint.Ports, &swarmapi.PortConfig{ - Name: portConfig.Name, - Protocol: swarmapi.PortConfig_Protocol(swarmapi.PortConfig_Protocol_value[strings.ToUpper(string(portConfig.Protocol))]), - TargetPort: portConfig.TargetPort, - PublishedPort: portConfig.PublishedPort, - }) - } - } - - //Mode - if s.Mode.Global != nil { - spec.Mode = &swarmapi.ServiceSpec_Global{ - Global: &swarmapi.GlobalService{}, - } - } else if s.Mode.Replicated != nil && s.Mode.Replicated.Replicas != nil { - spec.Mode = &swarmapi.ServiceSpec_Replicated{ - Replicated: &swarmapi.ReplicatedService{Replicas: *s.Mode.Replicated.Replicas}, - } - } else { - spec.Mode = &swarmapi.ServiceSpec_Replicated{ - Replicated: &swarmapi.ReplicatedService{Replicas: 1}, - } - } - - return spec, nil -} - -func resourcesFromGRPC(res *swarmapi.ResourceRequirements) *types.ResourceRequirements { - var resources *types.ResourceRequirements - if res != nil { - resources = &types.ResourceRequirements{} - if res.Limits != nil { - resources.Limits = &types.Resources{ - NanoCPUs: res.Limits.NanoCPUs, - MemoryBytes: res.Limits.MemoryBytes, - } - } - if res.Reservations != nil { - resources.Reservations = &types.Resources{ - NanoCPUs: res.Reservations.NanoCPUs, - MemoryBytes: res.Reservations.MemoryBytes, - } - } - } - - return resources -} - -func resourcesToGRPC(res *types.ResourceRequirements) *swarmapi.ResourceRequirements { - var reqs *swarmapi.ResourceRequirements - if res != nil { - reqs = &swarmapi.ResourceRequirements{} - if res.Limits != nil { - reqs.Limits = &swarmapi.Resources{ - NanoCPUs: res.Limits.NanoCPUs, - MemoryBytes: res.Limits.MemoryBytes, - } - } - if res.Reservations != nil { - reqs.Reservations = &swarmapi.Resources{ - NanoCPUs: res.Reservations.NanoCPUs, - MemoryBytes: res.Reservations.MemoryBytes, - } - - } - } - return reqs -} - -func restartPolicyFromGRPC(p *swarmapi.RestartPolicy) *types.RestartPolicy { - var rp *types.RestartPolicy - if p != nil { - rp = &types.RestartPolicy{} - rp.Condition = types.RestartPolicyCondition(strings.ToLower(p.Condition.String())) - if p.Delay != nil { - delay, _ := ptypes.Duration(p.Delay) - rp.Delay = &delay - } - if p.Window != nil { - window, _ := ptypes.Duration(p.Window) - rp.Window = &window - } - - rp.MaxAttempts = &p.MaxAttempts - } - return rp -} - -func restartPolicyToGRPC(p *types.RestartPolicy) (*swarmapi.RestartPolicy, error) { - var rp *swarmapi.RestartPolicy - if p != nil { - rp = &swarmapi.RestartPolicy{} - sanatizedCondition := strings.ToUpper(strings.Replace(string(p.Condition), "-", "_", -1)) - if condition, ok := swarmapi.RestartPolicy_RestartCondition_value[sanatizedCondition]; ok { - rp.Condition = swarmapi.RestartPolicy_RestartCondition(condition) - } else if string(p.Condition) == "" { - rp.Condition = swarmapi.RestartOnAny - } else { - return nil, fmt.Errorf("invalid RestartCondition: %q", p.Condition) - } - - if p.Delay != nil { - rp.Delay = ptypes.DurationProto(*p.Delay) - } - if p.Window != nil { - rp.Window = ptypes.DurationProto(*p.Window) - } - if p.MaxAttempts != nil { - rp.MaxAttempts = *p.MaxAttempts - - } - } - return rp, nil -} - -func placementFromGRPC(p *swarmapi.Placement) *types.Placement { - var r *types.Placement - if p != nil { - r = &types.Placement{} - r.Constraints = p.Constraints - } - - return r -} - -func driverFromGRPC(p *swarmapi.Driver) *types.Driver { - if p == nil { - return nil - } - - return &types.Driver{ - Name: p.Name, - Options: p.Options, - } -} - -func driverToGRPC(p *types.Driver) *swarmapi.Driver { - if p == nil { - return nil - } - - return &swarmapi.Driver{ - Name: p.Name, - Options: p.Options, - } -} diff --git a/daemon/cluster/convert/swarm.go b/daemon/cluster/convert/swarm.go deleted file mode 100644 index c8f9c6f90d..0000000000 --- a/daemon/cluster/convert/swarm.go +++ /dev/null @@ -1,100 +0,0 @@ -package convert - -import ( - "fmt" - "strings" - "time" - - types "github.com/docker/engine-api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" -) - -// SwarmFromGRPC converts a grpc Cluster to a Swarm. -func SwarmFromGRPC(c swarmapi.Cluster) types.Swarm { - swarm := types.Swarm{ - ClusterInfo: types.ClusterInfo{ - ID: c.ID, - Spec: types.Spec{ - Orchestration: types.OrchestrationConfig{ - TaskHistoryRetentionLimit: c.Spec.Orchestration.TaskHistoryRetentionLimit, - }, - Raft: types.RaftConfig{ - SnapshotInterval: c.Spec.Raft.SnapshotInterval, - KeepOldSnapshots: c.Spec.Raft.KeepOldSnapshots, - LogEntriesForSlowFollowers: c.Spec.Raft.LogEntriesForSlowFollowers, - HeartbeatTick: c.Spec.Raft.HeartbeatTick, - ElectionTick: c.Spec.Raft.ElectionTick, - }, - }, - }, - JoinTokens: types.JoinTokens{ - Worker: c.RootCA.JoinTokens.Worker, - Manager: c.RootCA.JoinTokens.Manager, - }, - } - - heartbeatPeriod, _ := ptypes.Duration(c.Spec.Dispatcher.HeartbeatPeriod) - swarm.Spec.Dispatcher.HeartbeatPeriod = uint64(heartbeatPeriod) - - swarm.Spec.CAConfig.NodeCertExpiry, _ = ptypes.Duration(c.Spec.CAConfig.NodeCertExpiry) - - for _, ca := range c.Spec.CAConfig.ExternalCAs { - swarm.Spec.CAConfig.ExternalCAs = append(swarm.Spec.CAConfig.ExternalCAs, &types.ExternalCA{ - Protocol: types.ExternalCAProtocol(strings.ToLower(ca.Protocol.String())), - URL: ca.URL, - Options: ca.Options, - }) - } - - // Meta - swarm.Version.Index = c.Meta.Version.Index - swarm.CreatedAt, _ = ptypes.Timestamp(c.Meta.CreatedAt) - swarm.UpdatedAt, _ = ptypes.Timestamp(c.Meta.UpdatedAt) - - // Annotations - swarm.Spec.Name = c.Spec.Annotations.Name - swarm.Spec.Labels = c.Spec.Annotations.Labels - - return swarm -} - -// SwarmSpecToGRPC converts a Spec to a grpc ClusterSpec. -func SwarmSpecToGRPC(s types.Spec) (swarmapi.ClusterSpec, error) { - spec := swarmapi.ClusterSpec{ - Annotations: swarmapi.Annotations{ - Name: s.Name, - Labels: s.Labels, - }, - Orchestration: swarmapi.OrchestrationConfig{ - TaskHistoryRetentionLimit: s.Orchestration.TaskHistoryRetentionLimit, - }, - Raft: swarmapi.RaftConfig{ - SnapshotInterval: s.Raft.SnapshotInterval, - KeepOldSnapshots: s.Raft.KeepOldSnapshots, - LogEntriesForSlowFollowers: s.Raft.LogEntriesForSlowFollowers, - HeartbeatTick: s.Raft.HeartbeatTick, - ElectionTick: s.Raft.ElectionTick, - }, - Dispatcher: swarmapi.DispatcherConfig{ - HeartbeatPeriod: ptypes.DurationProto(time.Duration(s.Dispatcher.HeartbeatPeriod)), - }, - CAConfig: swarmapi.CAConfig{ - NodeCertExpiry: ptypes.DurationProto(s.CAConfig.NodeCertExpiry), - }, - } - - for _, ca := range s.CAConfig.ExternalCAs { - protocol, ok := swarmapi.ExternalCA_CAProtocol_value[strings.ToUpper(string(ca.Protocol))] - if !ok { - return swarmapi.ClusterSpec{}, fmt.Errorf("invalid protocol: %q", ca.Protocol) - } - spec.CAConfig.ExternalCAs = append(spec.CAConfig.ExternalCAs, &swarmapi.ExternalCA{ - Protocol: swarmapi.ExternalCA_CAProtocol(protocol), - URL: ca.URL, - Options: ca.Options, - }) - } - - return spec, nil -} diff --git a/daemon/cluster/convert/task.go b/daemon/cluster/convert/task.go deleted file mode 100644 index 4ba85b726a..0000000000 --- a/daemon/cluster/convert/task.go +++ /dev/null @@ -1,54 +0,0 @@ -package convert - -import ( - "strings" - - types "github.com/docker/engine-api/types/swarm" - swarmapi "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/protobuf/ptypes" -) - -// TaskFromGRPC converts a grpc Task to a Task. -func TaskFromGRPC(t swarmapi.Task) types.Task { - containerConfig := t.Spec.Runtime.(*swarmapi.TaskSpec_Container).Container - containerStatus := t.Status.GetContainer() - task := types.Task{ - ID: t.ID, - ServiceID: t.ServiceID, - Slot: int(t.Slot), - NodeID: t.NodeID, - Spec: types.TaskSpec{ - ContainerSpec: containerSpecFromGRPC(containerConfig), - Resources: resourcesFromGRPC(t.Spec.Resources), - RestartPolicy: restartPolicyFromGRPC(t.Spec.Restart), - Placement: placementFromGRPC(t.Spec.Placement), - LogDriver: driverFromGRPC(t.Spec.LogDriver), - }, - Status: types.TaskStatus{ - State: types.TaskState(strings.ToLower(t.Status.State.String())), - Message: t.Status.Message, - Err: t.Status.Err, - }, - DesiredState: types.TaskState(strings.ToLower(t.DesiredState.String())), - } - - // Meta - task.Version.Index = t.Meta.Version.Index - task.CreatedAt, _ = ptypes.Timestamp(t.Meta.CreatedAt) - task.UpdatedAt, _ = ptypes.Timestamp(t.Meta.UpdatedAt) - - task.Status.Timestamp, _ = ptypes.Timestamp(t.Status.Timestamp) - - if containerStatus != nil { - task.Status.ContainerStatus.ContainerID = containerStatus.ContainerID - task.Status.ContainerStatus.PID = int(containerStatus.PID) - task.Status.ContainerStatus.ExitCode = int(containerStatus.ExitCode) - } - - // NetworksAttachments - for _, na := range t.Networks { - task.NetworksAttachments = append(task.NetworksAttachments, networkAttachementFromGRPC(na)) - } - - return task -} diff --git a/daemon/cluster/executor/backend.go b/daemon/cluster/executor/backend.go deleted file mode 100644 index 9fa8ef70b1..0000000000 --- a/daemon/cluster/executor/backend.go +++ /dev/null @@ -1,43 +0,0 @@ -package executor - -import ( - "io" - "time" - - clustertypes "github.com/docker/docker/daemon/cluster/provider" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/network" - "github.com/docker/libnetwork" - "github.com/docker/libnetwork/cluster" - networktypes "github.com/docker/libnetwork/types" - "golang.org/x/net/context" -) - -// Backend defines the executor component for a swarm agent. -type Backend interface { - CreateManagedNetwork(clustertypes.NetworkCreateRequest) error - DeleteManagedNetwork(name string) error - FindNetwork(idName string) (libnetwork.Network, error) - SetupIngress(req clustertypes.NetworkCreateRequest, nodeIP string) error - PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error - CreateManagedContainer(config types.ContainerCreateConfig, validateHostname bool) (types.ContainerCreateResponse, error) - ContainerStart(name string, hostConfig *container.HostConfig, validateHostname bool) error - ContainerStop(name string, seconds int) error - ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error - UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error - ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) - ContainerWaitWithContext(ctx context.Context, name string) error - ContainerRm(name string, config *types.ContainerRmConfig) error - ContainerKill(name string, sig uint64) error - SystemInfo() (*types.Info, error) - VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) - ListContainersForNode(nodeID string) []string - SetNetworkBootstrapKeys([]*networktypes.EncryptionKey) error - SetClusterProvider(provider cluster.Provider) - IsSwarmCompatible() error - SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) - UnsubscribeFromEvents(listener chan interface{}) -} diff --git a/daemon/cluster/executor/container/adapter.go b/daemon/cluster/executor/container/adapter.go deleted file mode 100644 index 38ff63afc2..0000000000 --- a/daemon/cluster/executor/container/adapter.go +++ /dev/null @@ -1,273 +0,0 @@ -package container - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "strings" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/server/httputils" - executorpkg "github.com/docker/docker/daemon/cluster/executor" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/versions" - "github.com/docker/libnetwork" - "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/log" - "golang.org/x/net/context" -) - -// containerAdapter conducts remote operations for a container. All calls -// are mostly naked calls to the client API, seeded with information from -// containerConfig. -type containerAdapter struct { - backend executorpkg.Backend - container *containerConfig -} - -func newContainerAdapter(b executorpkg.Backend, task *api.Task) (*containerAdapter, error) { - ctnr, err := newContainerConfig(task) - if err != nil { - return nil, err - } - - return &containerAdapter{ - container: ctnr, - backend: b, - }, nil -} - -func (c *containerAdapter) pullImage(ctx context.Context) error { - spec := c.container.spec() - - // if the image needs to be pulled, the auth config will be retrieved and updated - var encodedAuthConfig string - if spec.PullOptions != nil { - encodedAuthConfig = spec.PullOptions.RegistryAuth - } - - authConfig := &types.AuthConfig{} - if encodedAuthConfig != "" { - if err := json.NewDecoder(base64.NewDecoder(base64.URLEncoding, strings.NewReader(encodedAuthConfig))).Decode(authConfig); err != nil { - logrus.Warnf("invalid authconfig: %v", err) - } - } - - pr, pw := io.Pipe() - metaHeaders := map[string][]string{} - go func() { - err := c.backend.PullImage(ctx, c.container.image(), "", metaHeaders, authConfig, pw) - pw.CloseWithError(err) - }() - - dec := json.NewDecoder(pr) - m := map[string]interface{}{} - for { - if err := dec.Decode(&m); err != nil { - if err == io.EOF { - break - } - return err - } - // TODO(stevvooe): Report this status somewhere. - logrus.Debugln("pull progress", m) - } - // if the final stream object contained an error, return it - if errMsg, ok := m["error"]; ok { - return fmt.Errorf("%v", errMsg) - } - return nil -} - -func (c *containerAdapter) createNetworks(ctx context.Context) error { - for _, network := range c.container.networks() { - ncr, err := c.container.networkCreateRequest(network) - if err != nil { - return err - } - - if err := c.backend.CreateManagedNetwork(ncr); err != nil { // todo name missing - if _, ok := err.(libnetwork.NetworkNameError); ok { - continue - } - - return err - } - } - - return nil -} - -func (c *containerAdapter) removeNetworks(ctx context.Context) error { - for _, nid := range c.container.networks() { - if err := c.backend.DeleteManagedNetwork(nid); err != nil { - if _, ok := err.(*libnetwork.ActiveEndpointsError); ok { - continue - } - log.G(ctx).Errorf("network %s remove failed: %v", nid, err) - return err - } - } - - return nil -} - -func (c *containerAdapter) create(ctx context.Context, backend executorpkg.Backend) error { - var cr types.ContainerCreateResponse - var err error - version := httputils.VersionFromContext(ctx) - validateHostname := versions.GreaterThanOrEqualTo(version, "1.24") - - if cr, err = backend.CreateManagedContainer(types.ContainerCreateConfig{ - Name: c.container.name(), - Config: c.container.config(), - HostConfig: c.container.hostConfig(), - // Use the first network in container create - NetworkingConfig: c.container.createNetworkingConfig(), - }, validateHostname); err != nil { - return err - } - - // Docker daemon currently doesn't support multiple networks in container create - // Connect to all other networks - nc := c.container.connectNetworkingConfig() - - if nc != nil { - for n, ep := range nc.EndpointsConfig { - if err := backend.ConnectContainerToNetwork(cr.ID, n, ep); err != nil { - return err - } - } - } - - if err := backend.UpdateContainerServiceConfig(cr.ID, c.container.serviceConfig()); err != nil { - return err - } - - return nil -} - -func (c *containerAdapter) start(ctx context.Context) error { - version := httputils.VersionFromContext(ctx) - validateHostname := versions.GreaterThanOrEqualTo(version, "1.24") - return c.backend.ContainerStart(c.container.name(), nil, validateHostname) -} - -func (c *containerAdapter) inspect(ctx context.Context) (types.ContainerJSON, error) { - cs, err := c.backend.ContainerInspectCurrent(c.container.name(), false) - if ctx.Err() != nil { - return types.ContainerJSON{}, ctx.Err() - } - if err != nil { - return types.ContainerJSON{}, err - } - return *cs, nil -} - -// events issues a call to the events API and returns a channel with all -// events. The stream of events can be shutdown by cancelling the context. -func (c *containerAdapter) events(ctx context.Context) <-chan events.Message { - log.G(ctx).Debugf("waiting on events") - buffer, l := c.backend.SubscribeToEvents(time.Time{}, time.Time{}, c.container.eventFilter()) - eventsq := make(chan events.Message, len(buffer)) - - for _, event := range buffer { - eventsq <- event - } - - go func() { - defer c.backend.UnsubscribeFromEvents(l) - - for { - select { - case ev := <-l: - jev, ok := ev.(events.Message) - if !ok { - log.G(ctx).Warnf("unexpected event message: %q", ev) - continue - } - select { - case eventsq <- jev: - case <-ctx.Done(): - return - } - case <-ctx.Done(): - return - } - } - }() - - return eventsq -} - -func (c *containerAdapter) wait(ctx context.Context) error { - return c.backend.ContainerWaitWithContext(ctx, c.container.name()) -} - -func (c *containerAdapter) shutdown(ctx context.Context) error { - // Default stop grace period to 10s. - stopgrace := 10 - spec := c.container.spec() - if spec.StopGracePeriod != nil { - stopgrace = int(spec.StopGracePeriod.Seconds) - } - return c.backend.ContainerStop(c.container.name(), stopgrace) -} - -func (c *containerAdapter) terminate(ctx context.Context) error { - return c.backend.ContainerKill(c.container.name(), uint64(syscall.SIGKILL)) -} - -func (c *containerAdapter) remove(ctx context.Context) error { - return c.backend.ContainerRm(c.container.name(), &types.ContainerRmConfig{ - RemoveVolume: true, - ForceRemove: true, - }) -} - -func (c *containerAdapter) createVolumes(ctx context.Context, backend executorpkg.Backend) error { - // Create plugin volumes that are embedded inside a Mount - for _, mount := range c.container.task.Spec.GetContainer().Mounts { - if mount.Type != api.MountTypeVolume { - continue - } - - if mount.VolumeOptions == nil { - continue - } - - if mount.VolumeOptions.DriverConfig == nil { - continue - } - - req := c.container.volumeCreateRequest(&mount) - - // Check if this volume exists on the engine - if _, err := backend.VolumeCreate(req.Name, req.Driver, req.DriverOpts, req.Labels); err != nil { - // TODO(amitshukla): Today, volume create through the engine api does not return an error - // when the named volume with the same parameters already exists. - // It returns an error if the driver name is different - that is a valid error - return err - } - - } - - return nil -} - -// todo: typed/wrapped errors -func isContainerCreateNameConflict(err error) bool { - return strings.Contains(err.Error(), "Conflict. The name") -} - -func isUnknownContainer(err error) bool { - return strings.Contains(err.Error(), "No such container:") -} - -func isStoppedContainer(err error) bool { - return strings.Contains(err.Error(), "is already stopped") -} diff --git a/daemon/cluster/executor/container/container.go b/daemon/cluster/executor/container/container.go deleted file mode 100644 index 19e23558ec..0000000000 --- a/daemon/cluster/executor/container/container.go +++ /dev/null @@ -1,508 +0,0 @@ -package container - -import ( - "errors" - "fmt" - "net" - "strings" - "time" - - "github.com/Sirupsen/logrus" - - clustertypes "github.com/docker/docker/daemon/cluster/provider" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" - enginecontainer "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/network" - "github.com/docker/swarmkit/agent/exec" - "github.com/docker/swarmkit/api" -) - -const ( - // Explicitly use the kernel's default setting for CPU quota of 100ms. - // https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt - cpuQuotaPeriod = 100 * time.Millisecond - - // systemLabelPrefix represents the reserved namespace for system labels. - systemLabelPrefix = "com.docker.swarm" -) - -// containerConfig converts task properties into docker container compatible -// components. -type containerConfig struct { - task *api.Task - networksAttachments map[string]*api.NetworkAttachment -} - -// newContainerConfig returns a validated container config. No methods should -// return an error if this function returns without error. -func newContainerConfig(t *api.Task) (*containerConfig, error) { - var c containerConfig - return &c, c.setTask(t) -} - -func (c *containerConfig) setTask(t *api.Task) error { - container := t.Spec.GetContainer() - if container == nil { - return exec.ErrRuntimeUnsupported - } - - if container.Image == "" { - return ErrImageRequired - } - - if err := validateMounts(container.Mounts); err != nil { - return err - } - - // index the networks by name - c.networksAttachments = make(map[string]*api.NetworkAttachment, len(t.Networks)) - for _, attachment := range t.Networks { - c.networksAttachments[attachment.Network.Spec.Annotations.Name] = attachment - } - - c.task = t - return nil -} - -func (c *containerConfig) endpoint() *api.Endpoint { - return c.task.Endpoint -} - -func (c *containerConfig) spec() *api.ContainerSpec { - return c.task.Spec.GetContainer() -} - -func (c *containerConfig) name() string { - if c.task.Annotations.Name != "" { - // if set, use the container Annotations.Name field, set in the orchestrator. - return c.task.Annotations.Name - } - - // fallback to service.slot.id. - return strings.Join([]string{c.task.ServiceAnnotations.Name, fmt.Sprint(c.task.Slot), c.task.ID}, ".") -} - -func (c *containerConfig) image() string { - raw := c.spec().Image - ref, err := reference.ParseNamed(raw) - if err != nil { - return raw - } - return reference.WithDefaultTag(ref).String() -} - -func (c *containerConfig) config() *enginecontainer.Config { - config := &enginecontainer.Config{ - Labels: c.labels(), - User: c.spec().User, - Env: c.spec().Env, - WorkingDir: c.spec().Dir, - Image: c.image(), - Volumes: c.volumes(), - } - - if len(c.spec().Command) > 0 { - // If Command is provided, we replace the whole invocation with Command - // by replacing Entrypoint and specifying Cmd. Args is ignored in this - // case. - config.Entrypoint = append(config.Entrypoint, c.spec().Command...) - config.Cmd = append(config.Cmd, c.spec().Args...) - } else if len(c.spec().Args) > 0 { - // In this case, we assume the image has an Entrypoint and Args - // specifies the arguments for that entrypoint. - config.Cmd = c.spec().Args - } - - return config -} - -func (c *containerConfig) labels() map[string]string { - var ( - system = map[string]string{ - "task": "", // mark as cluster task - "task.id": c.task.ID, - "task.name": fmt.Sprintf("%v.%v", c.task.ServiceAnnotations.Name, c.task.Slot), - "node.id": c.task.NodeID, - "service.id": c.task.ServiceID, - "service.name": c.task.ServiceAnnotations.Name, - } - labels = make(map[string]string) - ) - - // base labels are those defined in the spec. - for k, v := range c.spec().Labels { - labels[k] = v - } - - // we then apply the overrides from the task, which may be set via the - // orchestrator. - for k, v := range c.task.Annotations.Labels { - labels[k] = v - } - - // finally, we apply the system labels, which override all labels. - for k, v := range system { - labels[strings.Join([]string{systemLabelPrefix, k}, ".")] = v - } - - return labels -} - -// volumes gets placed into the Volumes field on the containerConfig. -func (c *containerConfig) volumes() map[string]struct{} { - r := make(map[string]struct{}) - // Volumes *only* creates anonymous volumes. The rest is mixed in with - // binds, which aren't actually binds. Basically, any volume that - // results in a single component must be added here. - // - // This is reversed engineered from the behavior of the engine API. - for _, mount := range c.spec().Mounts { - if mount.Type == api.MountTypeVolume && mount.Source == "" { - r[mount.Target] = struct{}{} - } - } - return r -} - -func (c *containerConfig) tmpfs() map[string]string { - r := make(map[string]string) - - for _, spec := range c.spec().Mounts { - if spec.Type != api.MountTypeTmpfs { - continue - } - - r[spec.Target] = getMountMask(&spec) - } - - return r -} - -func (c *containerConfig) binds() []string { - var r []string - for _, mount := range c.spec().Mounts { - if mount.Type == api.MountTypeBind || (mount.Type == api.MountTypeVolume && mount.Source != "") { - spec := fmt.Sprintf("%s:%s", mount.Source, mount.Target) - mask := getMountMask(&mount) - if mask != "" { - spec = fmt.Sprintf("%s:%s", spec, mask) - } - r = append(r, spec) - } - } - return r -} - -func getMountMask(m *api.Mount) string { - var maskOpts []string - if m.ReadOnly { - maskOpts = append(maskOpts, "ro") - } - - switch m.Type { - case api.MountTypeVolume: - if m.VolumeOptions != nil && m.VolumeOptions.NoCopy { - maskOpts = append(maskOpts, "nocopy") - } - case api.MountTypeBind: - if m.BindOptions == nil { - break - } - - switch m.BindOptions.Propagation { - case api.MountPropagationPrivate: - maskOpts = append(maskOpts, "private") - case api.MountPropagationRPrivate: - maskOpts = append(maskOpts, "rprivate") - case api.MountPropagationShared: - maskOpts = append(maskOpts, "shared") - case api.MountPropagationRShared: - maskOpts = append(maskOpts, "rshared") - case api.MountPropagationSlave: - maskOpts = append(maskOpts, "slave") - case api.MountPropagationRSlave: - maskOpts = append(maskOpts, "rslave") - } - case api.MountTypeTmpfs: - if m.TmpfsOptions == nil { - break - } - - if m.TmpfsOptions.Mode != 0 { - maskOpts = append(maskOpts, fmt.Sprintf("mode=%o", m.TmpfsOptions.Mode)) - } - - if m.TmpfsOptions.SizeBytes != 0 { - // calculate suffix here, making this linux specific, but that is - // okay, since API is that way anyways. - - // we do this by finding the suffix that divides evenly into the - // value, returing the value itself, with no suffix, if it fails. - // - // For the most part, we don't enforce any semantic to this values. - // The operating system will usually align this and enforce minimum - // and maximums. - var ( - size = m.TmpfsOptions.SizeBytes - suffix string - ) - for _, r := range []struct { - suffix string - divisor int64 - }{ - {"g", 1 << 30}, - {"m", 1 << 20}, - {"k", 1 << 10}, - } { - if size%r.divisor == 0 { - size = size / r.divisor - suffix = r.suffix - break - } - } - - maskOpts = append(maskOpts, fmt.Sprintf("size=%d%s", size, suffix)) - } - } - - return strings.Join(maskOpts, ",") -} - -func (c *containerConfig) hostConfig() *enginecontainer.HostConfig { - hc := &enginecontainer.HostConfig{ - Resources: c.resources(), - Binds: c.binds(), - Tmpfs: c.tmpfs(), - } - - if c.task.LogDriver != nil { - hc.LogConfig = enginecontainer.LogConfig{ - Type: c.task.LogDriver.Name, - Config: c.task.LogDriver.Options, - } - } - - return hc -} - -// This handles the case of volumes that are defined inside a service Mount -func (c *containerConfig) volumeCreateRequest(mount *api.Mount) *types.VolumeCreateRequest { - var ( - driverName string - driverOpts map[string]string - labels map[string]string - ) - - if mount.VolumeOptions != nil && mount.VolumeOptions.DriverConfig != nil { - driverName = mount.VolumeOptions.DriverConfig.Name - driverOpts = mount.VolumeOptions.DriverConfig.Options - labels = mount.VolumeOptions.Labels - } - - if mount.VolumeOptions != nil { - return &types.VolumeCreateRequest{ - Name: mount.Source, - Driver: driverName, - DriverOpts: driverOpts, - Labels: labels, - } - } - return nil -} - -func (c *containerConfig) resources() enginecontainer.Resources { - resources := enginecontainer.Resources{} - - // If no limits are specified let the engine use its defaults. - // - // TODO(aluzzardi): We might want to set some limits anyway otherwise - // "unlimited" tasks will step over the reservation of other tasks. - r := c.task.Spec.Resources - if r == nil || r.Limits == nil { - return resources - } - - if r.Limits.MemoryBytes > 0 { - resources.Memory = r.Limits.MemoryBytes - } - - if r.Limits.NanoCPUs > 0 { - // CPU Period must be set in microseconds. - resources.CPUPeriod = int64(cpuQuotaPeriod / time.Microsecond) - resources.CPUQuota = r.Limits.NanoCPUs * resources.CPUPeriod / 1e9 - } - - return resources -} - -// Docker daemon supports just 1 network during container create. -func (c *containerConfig) createNetworkingConfig() *network.NetworkingConfig { - var networks []*api.NetworkAttachment - if c.task.Spec.GetContainer() != nil { - networks = c.task.Networks - } - - epConfig := make(map[string]*network.EndpointSettings) - if len(networks) > 0 { - epConfig[networks[0].Network.Spec.Annotations.Name] = getEndpointConfig(networks[0]) - } - - return &network.NetworkingConfig{EndpointsConfig: epConfig} -} - -// TODO: Merge this function with createNetworkingConfig after daemon supports multiple networks in container create -func (c *containerConfig) connectNetworkingConfig() *network.NetworkingConfig { - var networks []*api.NetworkAttachment - if c.task.Spec.GetContainer() != nil { - networks = c.task.Networks - } - - // First network is used during container create. Other networks are used in "docker network connect" - if len(networks) < 2 { - return nil - } - - epConfig := make(map[string]*network.EndpointSettings) - for _, na := range networks[1:] { - epConfig[na.Network.Spec.Annotations.Name] = getEndpointConfig(na) - } - return &network.NetworkingConfig{EndpointsConfig: epConfig} -} - -func getEndpointConfig(na *api.NetworkAttachment) *network.EndpointSettings { - var ipv4, ipv6 string - for _, addr := range na.Addresses { - ip, _, err := net.ParseCIDR(addr) - if err != nil { - continue - } - - if ip.To4() != nil { - ipv4 = ip.String() - continue - } - - if ip.To16() != nil { - ipv6 = ip.String() - } - } - - return &network.EndpointSettings{ - IPAMConfig: &network.EndpointIPAMConfig{ - IPv4Address: ipv4, - IPv6Address: ipv6, - }, - } -} - -func (c *containerConfig) virtualIP(networkID string) string { - if c.task.Endpoint == nil { - return "" - } - - for _, eVip := range c.task.Endpoint.VirtualIPs { - // We only support IPv4 VIPs for now. - if eVip.NetworkID == networkID { - vip, _, err := net.ParseCIDR(eVip.Addr) - if err != nil { - return "" - } - - return vip.String() - } - } - - return "" -} - -func (c *containerConfig) serviceConfig() *clustertypes.ServiceConfig { - if len(c.task.Networks) == 0 { - return nil - } - - logrus.Debugf("Creating service config in agent for t = %+v", c.task) - svcCfg := &clustertypes.ServiceConfig{ - Name: c.task.ServiceAnnotations.Name, - Aliases: make(map[string][]string), - ID: c.task.ServiceID, - VirtualAddresses: make(map[string]*clustertypes.VirtualAddress), - } - - for _, na := range c.task.Networks { - svcCfg.VirtualAddresses[na.Network.ID] = &clustertypes.VirtualAddress{ - // We support only IPv4 virtual IP for now. - IPv4: c.virtualIP(na.Network.ID), - } - if len(na.Aliases) > 0 { - svcCfg.Aliases[na.Network.ID] = na.Aliases - } - } - - if c.task.Endpoint != nil { - for _, ePort := range c.task.Endpoint.Ports { - svcCfg.ExposedPorts = append(svcCfg.ExposedPorts, &clustertypes.PortConfig{ - Name: ePort.Name, - Protocol: int32(ePort.Protocol), - TargetPort: ePort.TargetPort, - PublishedPort: ePort.PublishedPort, - }) - } - } - - return svcCfg -} - -// networks returns a list of network names attached to the container. The -// returned name can be used to lookup the corresponding network create -// options. -func (c *containerConfig) networks() []string { - var networks []string - - for name := range c.networksAttachments { - networks = append(networks, name) - } - - return networks -} - -func (c *containerConfig) networkCreateRequest(name string) (clustertypes.NetworkCreateRequest, error) { - na, ok := c.networksAttachments[name] - if !ok { - return clustertypes.NetworkCreateRequest{}, errors.New("container: unknown network referenced") - } - - options := types.NetworkCreate{ - // ID: na.Network.ID, - Driver: na.Network.DriverState.Name, - IPAM: network.IPAM{ - Driver: na.Network.IPAM.Driver.Name, - }, - Options: na.Network.DriverState.Options, - Labels: na.Network.Spec.Annotations.Labels, - Internal: na.Network.Spec.Internal, - EnableIPv6: na.Network.Spec.Ipv6Enabled, - CheckDuplicate: true, - } - - for _, ic := range na.Network.IPAM.Configs { - c := network.IPAMConfig{ - Subnet: ic.Subnet, - IPRange: ic.Range, - Gateway: ic.Gateway, - } - options.IPAM.Config = append(options.IPAM.Config, c) - } - - return clustertypes.NetworkCreateRequest{na.Network.ID, types.NetworkCreateRequest{Name: name, NetworkCreate: options}}, nil -} - -func (c containerConfig) eventFilter() filters.Args { - filter := filters.NewArgs() - filter.Add("type", events.ContainerEventType) - filter.Add("name", c.name()) - filter.Add("label", fmt.Sprintf("%v.task.id=%v", systemLabelPrefix, c.task.ID)) - return filter -} diff --git a/daemon/cluster/executor/container/controller.go b/daemon/cluster/executor/container/controller.go deleted file mode 100644 index 5f865aae4c..0000000000 --- a/daemon/cluster/executor/container/controller.go +++ /dev/null @@ -1,457 +0,0 @@ -package container - -import ( - "fmt" - "os" - - executorpkg "github.com/docker/docker/daemon/cluster/executor" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/events" - "github.com/docker/libnetwork" - "github.com/docker/swarmkit/agent/exec" - "github.com/docker/swarmkit/api" - "github.com/docker/swarmkit/log" - "github.com/pkg/errors" - "golang.org/x/net/context" -) - -// controller implements agent.Controller against docker's API. -// -// Most operations against docker's API are done through the container name, -// which is unique to the task. -type controller struct { - backend executorpkg.Backend - task *api.Task - adapter *containerAdapter - closed chan struct{} - err error - - pulled chan struct{} // closed after pull - cancelPull func() // cancels pull context if not nil - pullErr error // pull error, only read after pulled closed -} - -var _ exec.Controller = &controller{} - -// NewController returns a dockerexec runner for the provided task. -func newController(b executorpkg.Backend, task *api.Task) (*controller, error) { - adapter, err := newContainerAdapter(b, task) - if err != nil { - return nil, err - } - - return &controller{ - backend: b, - task: task, - adapter: adapter, - closed: make(chan struct{}), - }, nil -} - -func (r *controller) Task() (*api.Task, error) { - return r.task, nil -} - -// ContainerStatus returns the container-specific status for the task. -func (r *controller) ContainerStatus(ctx context.Context) (*api.ContainerStatus, error) { - ctnr, err := r.adapter.inspect(ctx) - if err != nil { - if isUnknownContainer(err) { - return nil, nil - } - return nil, err - } - return parseContainerStatus(ctnr) -} - -// Update tasks a recent task update and applies it to the container. -func (r *controller) Update(ctx context.Context, t *api.Task) error { - // TODO(stevvooe): While assignment of tasks is idempotent, we do allow - // updates of metadata, such as labelling, as well as any other properties - // that make sense. - return nil -} - -// Prepare creates a container and ensures the image is pulled. -// -// If the container has already be created, exec.ErrTaskPrepared is returned. -func (r *controller) Prepare(ctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - // Make sure all the networks that the task needs are created. - if err := r.adapter.createNetworks(ctx); err != nil { - return err - } - - // Make sure all the volumes that the task needs are created. - if err := r.adapter.createVolumes(ctx, r.backend); err != nil { - return err - } - - if os.Getenv("DOCKER_SERVICE_PREFER_OFFLINE_IMAGE") != "1" { - if r.pulled == nil { - // Fork the pull to a different context to allow pull to continue - // on re-entrant calls to Prepare. This ensures that Prepare can be - // idempotent and not incur the extra cost of pulling when - // cancelled on updates. - var pctx context.Context - - r.pulled = make(chan struct{}) - pctx, r.cancelPull = context.WithCancel(context.Background()) // TODO(stevvooe): Bind a context to the entire controller. - - go func() { - defer close(r.pulled) - r.pullErr = r.adapter.pullImage(pctx) // protected by closing r.pulled - }() - } - - select { - case <-ctx.Done(): - return ctx.Err() - case <-r.pulled: - if r.pullErr != nil { - // NOTE(stevvooe): We always try to pull the image to make sure we have - // the most up to date version. This will return an error, but we only - // log it. If the image truly doesn't exist, the create below will - // error out. - // - // This gives us some nice behavior where we use up to date versions of - // mutable tags, but will still run if the old image is available but a - // registry is down. - // - // If you don't want this behavior, lock down your image to an - // immutable tag or digest. - log.G(ctx).WithError(r.pullErr).Error("pulling image failed") - } - } - } - - if err := r.adapter.create(ctx, r.backend); err != nil { - if isContainerCreateNameConflict(err) { - if _, err := r.adapter.inspect(ctx); err != nil { - return err - } - - // container is already created. success! - return exec.ErrTaskPrepared - } - - return err - } - - return nil -} - -// Start the container. An error will be returned if the container is already started. -func (r *controller) Start(ctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - ctnr, err := r.adapter.inspect(ctx) - if err != nil { - return err - } - - // Detect whether the container has *ever* been started. If so, we don't - // issue the start. - // - // TODO(stevvooe): This is very racy. While reading inspect, another could - // start the process and we could end up starting it twice. - if ctnr.State.Status != "created" { - return exec.ErrTaskStarted - } - - for { - if err := r.adapter.start(ctx); err != nil { - if _, ok := err.(libnetwork.ErrNoSuchNetwork); ok { - // Retry network creation again if we - // failed because some of the networks - // were not found. - if err := r.adapter.createNetworks(ctx); err != nil { - return err - } - - continue - } - - return errors.Wrap(err, "starting container failed") - } - - break - } - - // no health check - if ctnr.Config == nil || ctnr.Config.Healthcheck == nil { - return nil - } - - healthCmd := ctnr.Config.Healthcheck.Test - - if len(healthCmd) == 0 || healthCmd[0] == "NONE" { - return nil - } - - // wait for container to be healthy - eventq := r.adapter.events(ctx) - - var healthErr error - for { - select { - case event := <-eventq: - if !r.matchevent(event) { - continue - } - - switch event.Action { - case "die": // exit on terminal events - ctnr, err := r.adapter.inspect(ctx) - if err != nil { - return errors.Wrap(err, "die event received") - } else if ctnr.State.ExitCode != 0 { - return &exitError{code: ctnr.State.ExitCode, cause: healthErr} - } - - return nil - case "destroy": - // If we get here, something has gone wrong but we want to exit - // and report anyways. - return ErrContainerDestroyed - case "health_status: unhealthy": - // in this case, we stop the container and report unhealthy status - if err := r.Shutdown(ctx); err != nil { - return errors.Wrap(err, "unhealthy container shutdown failed") - } - // set health check error, and wait for container to fully exit ("die" event) - healthErr = ErrContainerUnhealthy - case "health_status: healthy": - return nil - } - case <-ctx.Done(): - return ctx.Err() - case <-r.closed: - return r.err - } - } -} - -// Wait on the container to exit. -func (r *controller) Wait(pctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - ctx, cancel := context.WithCancel(pctx) - defer cancel() - - healthErr := make(chan error, 1) - go func() { - ectx, cancel := context.WithCancel(ctx) // cancel event context on first event - defer cancel() - if err := r.checkHealth(ectx); err == ErrContainerUnhealthy { - healthErr <- ErrContainerUnhealthy - if err := r.Shutdown(ectx); err != nil { - log.G(ectx).WithError(err).Debug("shutdown failed on unhealthy") - } - } - }() - - err := r.adapter.wait(ctx) - if ctx.Err() != nil { - return ctx.Err() - } - - if err != nil { - ee := &exitError{} - if ec, ok := err.(exec.ExitCoder); ok { - ee.code = ec.ExitCode() - } - select { - case e := <-healthErr: - ee.cause = e - default: - if err.Error() != "" { - ee.cause = err - } - } - return ee - } - - return nil -} - -// Shutdown the container cleanly. -func (r *controller) Shutdown(ctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - if r.cancelPull != nil { - r.cancelPull() - } - - if err := r.adapter.shutdown(ctx); err != nil { - if isUnknownContainer(err) || isStoppedContainer(err) { - return nil - } - - return err - } - - return nil -} - -// Terminate the container, with force. -func (r *controller) Terminate(ctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - if r.cancelPull != nil { - r.cancelPull() - } - - if err := r.adapter.terminate(ctx); err != nil { - if isUnknownContainer(err) { - return nil - } - - return err - } - - return nil -} - -// Remove the container and its resources. -func (r *controller) Remove(ctx context.Context) error { - if err := r.checkClosed(); err != nil { - return err - } - - if r.cancelPull != nil { - r.cancelPull() - } - - // It may be necessary to shut down the task before removing it. - if err := r.Shutdown(ctx); err != nil { - if isUnknownContainer(err) { - return nil - } - // This may fail if the task was already shut down. - log.G(ctx).WithError(err).Debug("shutdown failed on removal") - } - - // Try removing networks referenced in this task in case this - // task is the last one referencing it - if err := r.adapter.removeNetworks(ctx); err != nil { - if isUnknownContainer(err) { - return nil - } - return err - } - - if err := r.adapter.remove(ctx); err != nil { - if isUnknownContainer(err) { - return nil - } - - return err - } - return nil -} - -// Close the runner and clean up any ephemeral resources. -func (r *controller) Close() error { - select { - case <-r.closed: - return r.err - default: - if r.cancelPull != nil { - r.cancelPull() - } - - r.err = exec.ErrControllerClosed - close(r.closed) - } - return nil -} - -func (r *controller) matchevent(event events.Message) bool { - if event.Type != events.ContainerEventType { - return false - } - - // TODO(stevvooe): Filter based on ID matching, in addition to name. - - // Make sure the events are for this container. - if event.Actor.Attributes["name"] != r.adapter.container.name() { - return false - } - - return true -} - -func (r *controller) checkClosed() error { - select { - case <-r.closed: - return r.err - default: - return nil - } -} - -func parseContainerStatus(ctnr types.ContainerJSON) (*api.ContainerStatus, error) { - status := &api.ContainerStatus{ - ContainerID: ctnr.ID, - PID: int32(ctnr.State.Pid), - ExitCode: int32(ctnr.State.ExitCode), - } - - return status, nil -} - -type exitError struct { - code int - cause error -} - -func (e *exitError) Error() string { - if e.cause != nil { - return fmt.Sprintf("task: non-zero exit (%v): %v", e.code, e.cause) - } - - return fmt.Sprintf("task: non-zero exit (%v)", e.code) -} - -func (e *exitError) ExitCode() int { - return int(e.code) -} - -func (e *exitError) Cause() error { - return e.cause -} - -// checkHealth blocks until unhealthy container is detected or ctx exits -func (r *controller) checkHealth(ctx context.Context) error { - eventq := r.adapter.events(ctx) - - for { - select { - case <-ctx.Done(): - return nil - case <-r.closed: - return nil - case event := <-eventq: - if !r.matchevent(event) { - continue - } - - switch event.Action { - case "health_status: unhealthy": - return ErrContainerUnhealthy - } - } - } -} diff --git a/daemon/cluster/executor/container/errors.go b/daemon/cluster/executor/container/errors.go deleted file mode 100644 index 63e1233566..0000000000 --- a/daemon/cluster/executor/container/errors.go +++ /dev/null @@ -1,15 +0,0 @@ -package container - -import "fmt" - -var ( - // ErrImageRequired returned if a task is missing the image definition. - ErrImageRequired = fmt.Errorf("dockerexec: image required") - - // ErrContainerDestroyed returned when a container is prematurely destroyed - // during a wait call. - ErrContainerDestroyed = fmt.Errorf("dockerexec: container destroyed") - - // ErrContainerUnhealthy returned if controller detects the health check failure - ErrContainerUnhealthy = fmt.Errorf("dockerexec: unhealthy container") -) diff --git a/daemon/cluster/executor/container/executor.go b/daemon/cluster/executor/container/executor.go deleted file mode 100644 index 50ad60e9ce..0000000000 --- a/daemon/cluster/executor/container/executor.go +++ /dev/null @@ -1,160 +0,0 @@ -package container - -import ( - "sort" - "strings" - - executorpkg "github.com/docker/docker/daemon/cluster/executor" - clustertypes "github.com/docker/docker/daemon/cluster/provider" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/network" - networktypes "github.com/docker/libnetwork/types" - "github.com/docker/swarmkit/agent/exec" - "github.com/docker/swarmkit/api" - "golang.org/x/net/context" -) - -type executor struct { - backend executorpkg.Backend -} - -// NewExecutor returns an executor from the docker client. -func NewExecutor(b executorpkg.Backend) exec.Executor { - return &executor{ - backend: b, - } -} - -// Describe returns the underlying node description from the docker client. -func (e *executor) Describe(ctx context.Context) (*api.NodeDescription, error) { - info, err := e.backend.SystemInfo() - if err != nil { - return nil, err - } - - plugins := map[api.PluginDescription]struct{}{} - addPlugins := func(typ string, names []string) { - for _, name := range names { - plugins[api.PluginDescription{ - Type: typ, - Name: name, - }] = struct{}{} - } - } - - addPlugins("Volume", info.Plugins.Volume) - // Add builtin driver "overlay" (the only builtin multi-host driver) to - // the plugin list by default. - addPlugins("Network", append([]string{"overlay"}, info.Plugins.Network...)) - addPlugins("Authorization", info.Plugins.Authorization) - - pluginFields := make([]api.PluginDescription, 0, len(plugins)) - for k := range plugins { - pluginFields = append(pluginFields, k) - } - - sort.Sort(sortedPlugins(pluginFields)) - - // parse []string labels into a map[string]string - labels := map[string]string{} - for _, l := range info.Labels { - stringSlice := strings.SplitN(l, "=", 2) - // this will take the last value in the list for a given key - // ideally, one shouldn't assign multiple values to the same key - if len(stringSlice) > 1 { - labels[stringSlice[0]] = stringSlice[1] - } - } - - description := &api.NodeDescription{ - Hostname: info.Name, - Platform: &api.Platform{ - Architecture: info.Architecture, - OS: info.OSType, - }, - Engine: &api.EngineDescription{ - EngineVersion: info.ServerVersion, - Labels: labels, - Plugins: pluginFields, - }, - Resources: &api.Resources{ - NanoCPUs: int64(info.NCPU) * 1e9, - MemoryBytes: info.MemTotal, - }, - } - - return description, nil -} - -func (e *executor) Configure(ctx context.Context, node *api.Node) error { - na := node.Attachment - if na == nil { - return nil - } - - options := types.NetworkCreate{ - Driver: na.Network.DriverState.Name, - IPAM: network.IPAM{ - Driver: na.Network.IPAM.Driver.Name, - }, - Options: na.Network.DriverState.Options, - CheckDuplicate: true, - } - - for _, ic := range na.Network.IPAM.Configs { - c := network.IPAMConfig{ - Subnet: ic.Subnet, - IPRange: ic.Range, - Gateway: ic.Gateway, - } - options.IPAM.Config = append(options.IPAM.Config, c) - } - - return e.backend.SetupIngress(clustertypes.NetworkCreateRequest{ - na.Network.ID, - types.NetworkCreateRequest{ - Name: na.Network.Spec.Annotations.Name, - NetworkCreate: options, - }, - }, na.Addresses[0]) -} - -// Controller returns a docker container runner. -func (e *executor) Controller(t *api.Task) (exec.Controller, error) { - ctlr, err := newController(e.backend, t) - if err != nil { - return nil, err - } - - return ctlr, nil -} - -func (e *executor) SetNetworkBootstrapKeys(keys []*api.EncryptionKey) error { - nwKeys := []*networktypes.EncryptionKey{} - for _, key := range keys { - nwKey := &networktypes.EncryptionKey{ - Subsystem: key.Subsystem, - Algorithm: int32(key.Algorithm), - Key: make([]byte, len(key.Key)), - LamportTime: key.LamportTime, - } - copy(nwKey.Key, key.Key) - nwKeys = append(nwKeys, nwKey) - } - e.backend.SetNetworkBootstrapKeys(nwKeys) - - return nil -} - -type sortedPlugins []api.PluginDescription - -func (sp sortedPlugins) Len() int { return len(sp) } - -func (sp sortedPlugins) Swap(i, j int) { sp[i], sp[j] = sp[j], sp[i] } - -func (sp sortedPlugins) Less(i, j int) bool { - if sp[i].Type != sp[j].Type { - return sp[i].Type < sp[j].Type - } - return sp[i].Name < sp[j].Name -} diff --git a/daemon/cluster/executor/container/health_test.go b/daemon/cluster/executor/container/health_test.go deleted file mode 100644 index 472624b54d..0000000000 --- a/daemon/cluster/executor/container/health_test.go +++ /dev/null @@ -1,102 +0,0 @@ -// +build !windows - -package container - -import ( - "testing" - "time" - - "github.com/docker/docker/container" - "github.com/docker/docker/daemon" - "github.com/docker/docker/daemon/events" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/swarmkit/api" - "golang.org/x/net/context" -) - -func TestHealthStates(t *testing.T) { - - // set up environment: events, task, container .... - e := events.New() - _, l, _ := e.Subscribe() - defer e.Evict(l) - - task := &api.Task{ - ID: "id", - ServiceID: "sid", - Spec: api.TaskSpec{ - Runtime: &api.TaskSpec_Container{ - Container: &api.ContainerSpec{ - Image: "image_name", - Labels: map[string]string{ - "com.docker.swarm.task.id": "id", - }, - }, - }, - }, - Annotations: api.Annotations{Name: "name"}, - } - - c := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "id", - Name: "name", - Config: &containertypes.Config{ - Image: "image_name", - Labels: map[string]string{ - "com.docker.swarm.task.id": "id", - }, - }, - }, - } - - daemon := &daemon.Daemon{ - EventsService: e, - } - - controller, err := newController(daemon, task) - if err != nil { - t.Fatalf("create controller fail %v", err) - } - - errChan := make(chan error, 1) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // fire checkHealth - go func() { - err := controller.checkHealth(ctx) - select { - case errChan <- err: - case <-ctx.Done(): - } - }() - - // send an event and expect to get expectedErr - // if expectedErr is nil, shouldn't get any error - logAndExpect := func(msg string, expectedErr error) { - daemon.LogContainerEvent(c, msg) - - timer := time.NewTimer(1 * time.Second) - defer timer.Stop() - - select { - case err := <-errChan: - if err != expectedErr { - t.Fatalf("expect error %v, but get %v", expectedErr, err) - } - case <-timer.C: - if expectedErr != nil { - t.Fatalf("time limit exceeded, didn't get expected error") - } - } - } - - // events that are ignored by checkHealth - logAndExpect("health_status: running", nil) - logAndExpect("health_status: healthy", nil) - logAndExpect("die", nil) - - // unhealthy event will be caught by checkHealth - logAndExpect("health_status: unhealthy", ErrContainerUnhealthy) -} diff --git a/daemon/cluster/executor/container/validate.go b/daemon/cluster/executor/container/validate.go deleted file mode 100644 index dad1524a6d..0000000000 --- a/daemon/cluster/executor/container/validate.go +++ /dev/null @@ -1,43 +0,0 @@ -package container - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/docker/swarmkit/api" -) - -func validateMounts(mounts []api.Mount) error { - for _, mount := range mounts { - // Target must always be absolute - if !filepath.IsAbs(mount.Target) { - return fmt.Errorf("invalid mount target, must be an absolute path: %s", mount.Target) - } - - switch mount.Type { - // The checks on abs paths are required due to the container API confusing - // volume mounts as bind mounts when the source is absolute (and vice-versa) - // See #25253 - // TODO: This is probably not neccessary once #22373 is merged - case api.MountTypeBind: - if !filepath.IsAbs(mount.Source) { - return fmt.Errorf("invalid bind mount source, must be an absolute path: %s", mount.Source) - } - if _, err := os.Stat(mount.Source); os.IsNotExist(err) { - return fmt.Errorf("invalid bind mount source, source path not found: %s", mount.Source) - } - case api.MountTypeVolume: - if filepath.IsAbs(mount.Source) { - return fmt.Errorf("invalid volume mount source, must not be an absolute path: %s", mount.Source) - } - case api.MountTypeTmpfs: - if mount.Source != "" { - return fmt.Errorf("invalid tmpfs source, source must be empty") - } - default: - return fmt.Errorf("invalid mount type: %s", mount.Type) - } - } - return nil -} diff --git a/daemon/cluster/executor/container/validate_test.go b/daemon/cluster/executor/container/validate_test.go deleted file mode 100644 index d911c1ebec..0000000000 --- a/daemon/cluster/executor/container/validate_test.go +++ /dev/null @@ -1,141 +0,0 @@ -package container - -import ( - "io/ioutil" - "os" - "strings" - "testing" - - "github.com/docker/docker/daemon" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/swarmkit/api" -) - -func newTestControllerWithMount(m api.Mount) (*controller, error) { - return newController(&daemon.Daemon{}, &api.Task{ - ID: stringid.GenerateRandomID(), - ServiceID: stringid.GenerateRandomID(), - Spec: api.TaskSpec{ - Runtime: &api.TaskSpec_Container{ - Container: &api.ContainerSpec{ - Image: "image_name", - Labels: map[string]string{ - "com.docker.swarm.task.id": "id", - }, - Mounts: []api.Mount{m}, - }, - }, - }, - }) -} - -func TestControllerValidateMountBind(t *testing.T) { - // with improper source - if _, err := newTestControllerWithMount(api.Mount{ - Type: api.MountTypeBind, - Source: "foo", - Target: testAbsPath, - }); err == nil || !strings.Contains(err.Error(), "invalid bind mount source") { - t.Fatalf("expected error, got: %v", err) - } - - // with non-existing source - if _, err := newTestControllerWithMount(api.Mount{ - Type: api.MountTypeBind, - Source: "/some-non-existing-host-path/", - Target: testAbsPath, - }); err == nil || !strings.Contains(err.Error(), "invalid bind mount source") { - t.Fatalf("expected error, got: %v", err) - } - - // with proper source - tmpdir, err := ioutil.TempDir("", "TestControllerValidateMountBind") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer os.Remove(tmpdir) - - if _, err := newTestControllerWithMount(api.Mount{ - Type: api.MountTypeBind, - Source: tmpdir, - Target: testAbsPath, - }); err != nil { - t.Fatalf("expected error, got: %v", err) - } -} - -func TestControllerValidateMountVolume(t *testing.T) { - // with improper source - if _, err := newTestControllerWithMount(api.Mount{ - Type: api.MountTypeVolume, - Source: testAbsPath, - Target: testAbsPath, - }); err == nil || !strings.Contains(err.Error(), "invalid volume mount source") { - t.Fatalf("expected error, got: %v", err) - } - - // with proper source - if _, err := newTestControllerWithMount(api.Mount{ - Type: api.MountTypeVolume, - Source: "foo", - Target: testAbsPath, - }); err != nil { - t.Fatalf("expected error, got: %v", err) - } -} - -func TestControllerValidateMountTarget(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestControllerValidateMountTarget") - if err != nil { - t.Fatalf("failed to create temp dir: %v", err) - } - defer os.Remove(tmpdir) - - // with improper target - if _, err := newTestControllerWithMount(api.Mount{ - Type: api.MountTypeBind, - Source: testAbsPath, - Target: "foo", - }); err == nil || !strings.Contains(err.Error(), "invalid mount target") { - t.Fatalf("expected error, got: %v", err) - } - - // with proper target - if _, err := newTestControllerWithMount(api.Mount{ - Type: api.MountTypeBind, - Source: tmpdir, - Target: testAbsPath, - }); err != nil { - t.Fatalf("expected no error, got: %v", err) - } -} - -func TestControllerValidateMountTmpfs(t *testing.T) { - // with improper target - if _, err := newTestControllerWithMount(api.Mount{ - Type: api.MountTypeTmpfs, - Source: "foo", - Target: testAbsPath, - }); err == nil || !strings.Contains(err.Error(), "invalid tmpfs source") { - t.Fatalf("expected error, got: %v", err) - } - - // with proper target - if _, err := newTestControllerWithMount(api.Mount{ - Type: api.MountTypeTmpfs, - Target: testAbsPath, - }); err != nil { - t.Fatalf("expected no error, got: %v", err) - } -} - -func TestControllerValidateMountInvalidType(t *testing.T) { - // with improper target - if _, err := newTestControllerWithMount(api.Mount{ - Type: api.Mount_MountType(9999), - Source: "foo", - Target: testAbsPath, - }); err == nil || !strings.Contains(err.Error(), "invalid mount type") { - t.Fatalf("expected error, got: %v", err) - } -} diff --git a/daemon/cluster/executor/container/validate_unix_test.go b/daemon/cluster/executor/container/validate_unix_test.go deleted file mode 100644 index 5e122de918..0000000000 --- a/daemon/cluster/executor/container/validate_unix_test.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !windows - -package container - -const ( - testAbsPath = "/foo" -) diff --git a/daemon/cluster/executor/container/validate_windows_test.go b/daemon/cluster/executor/container/validate_windows_test.go deleted file mode 100644 index 8eb3e9987a..0000000000 --- a/daemon/cluster/executor/container/validate_windows_test.go +++ /dev/null @@ -1,5 +0,0 @@ -package container - -const ( - testAbsPath = `c:\foo` -) diff --git a/daemon/cluster/filters.go b/daemon/cluster/filters.go deleted file mode 100644 index c49ea7b39b..0000000000 --- a/daemon/cluster/filters.go +++ /dev/null @@ -1,98 +0,0 @@ -package cluster - -import ( - "fmt" - "strings" - - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types/filters" - swarmapi "github.com/docker/swarmkit/api" -) - -func newListNodesFilters(filter filters.Args) (*swarmapi.ListNodesRequest_Filters, error) { - accepted := map[string]bool{ - "name": true, - "id": true, - "label": true, - "role": true, - "membership": true, - } - if err := filter.Validate(accepted); err != nil { - return nil, err - } - f := &swarmapi.ListNodesRequest_Filters{ - NamePrefixes: filter.Get("name"), - IDPrefixes: filter.Get("id"), - Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), - } - - for _, r := range filter.Get("role") { - if role, ok := swarmapi.NodeRole_value[strings.ToUpper(r)]; ok { - f.Roles = append(f.Roles, swarmapi.NodeRole(role)) - } else if r != "" { - return nil, fmt.Errorf("Invalid role filter: '%s'", r) - } - } - - for _, a := range filter.Get("membership") { - if membership, ok := swarmapi.NodeSpec_Membership_value[strings.ToUpper(a)]; ok { - f.Memberships = append(f.Memberships, swarmapi.NodeSpec_Membership(membership)) - } else if a != "" { - return nil, fmt.Errorf("Invalid membership filter: '%s'", a) - } - } - - return f, nil -} - -func newListServicesFilters(filter filters.Args) (*swarmapi.ListServicesRequest_Filters, error) { - accepted := map[string]bool{ - "name": true, - "id": true, - "label": true, - } - if err := filter.Validate(accepted); err != nil { - return nil, err - } - return &swarmapi.ListServicesRequest_Filters{ - NamePrefixes: filter.Get("name"), - IDPrefixes: filter.Get("id"), - Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), - }, nil -} - -func newListTasksFilters(filter filters.Args, transformFunc func(filters.Args) error) (*swarmapi.ListTasksRequest_Filters, error) { - accepted := map[string]bool{ - "name": true, - "id": true, - "label": true, - "service": true, - "node": true, - "desired-state": true, - } - if err := filter.Validate(accepted); err != nil { - return nil, err - } - if transformFunc != nil { - if err := transformFunc(filter); err != nil { - return nil, err - } - } - f := &swarmapi.ListTasksRequest_Filters{ - NamePrefixes: filter.Get("name"), - IDPrefixes: filter.Get("id"), - Labels: runconfigopts.ConvertKVStringsToMap(filter.Get("label")), - ServiceIDs: filter.Get("service"), - NodeIDs: filter.Get("node"), - } - - for _, s := range filter.Get("desired-state") { - if state, ok := swarmapi.TaskState_value[strings.ToUpper(s)]; ok { - f.DesiredStates = append(f.DesiredStates, swarmapi.TaskState(state)) - } else if s != "" { - return nil, fmt.Errorf("Invalid desired-state filter: '%s'", s) - } - } - - return f, nil -} diff --git a/daemon/cluster/helpers.go b/daemon/cluster/helpers.go deleted file mode 100644 index be5bf56e87..0000000000 --- a/daemon/cluster/helpers.go +++ /dev/null @@ -1,108 +0,0 @@ -package cluster - -import ( - "fmt" - - swarmapi "github.com/docker/swarmkit/api" - "golang.org/x/net/context" -) - -func getSwarm(ctx context.Context, c swarmapi.ControlClient) (*swarmapi.Cluster, error) { - rl, err := c.ListClusters(ctx, &swarmapi.ListClustersRequest{}) - if err != nil { - return nil, err - } - - if len(rl.Clusters) == 0 { - return nil, fmt.Errorf("swarm not found") - } - - // TODO: assume one cluster only - return rl.Clusters[0], nil -} - -func getNode(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Node, error) { - // GetNode to match via full ID. - rg, err := c.GetNode(ctx, &swarmapi.GetNodeRequest{NodeID: input}) - if err != nil { - // If any error (including NotFound), ListNodes to match via full name. - rl, err := c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{Names: []string{input}}}) - - if err != nil || len(rl.Nodes) == 0 { - // If any error or 0 result, ListNodes to match via ID prefix. - rl, err = c.ListNodes(ctx, &swarmapi.ListNodesRequest{Filters: &swarmapi.ListNodesRequest_Filters{IDPrefixes: []string{input}}}) - } - - if err != nil { - return nil, err - } - - if len(rl.Nodes) == 0 { - return nil, fmt.Errorf("node %s not found", input) - } - - if l := len(rl.Nodes); l > 1 { - return nil, fmt.Errorf("node %s is ambiguous (%d matches found)", input, l) - } - - return rl.Nodes[0], nil - } - return rg.Node, nil -} - -func getService(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Service, error) { - // GetService to match via full ID. - rg, err := c.GetService(ctx, &swarmapi.GetServiceRequest{ServiceID: input}) - if err != nil { - // If any error (including NotFound), ListServices to match via full name. - rl, err := c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{Names: []string{input}}}) - if err != nil || len(rl.Services) == 0 { - // If any error or 0 result, ListServices to match via ID prefix. - rl, err = c.ListServices(ctx, &swarmapi.ListServicesRequest{Filters: &swarmapi.ListServicesRequest_Filters{IDPrefixes: []string{input}}}) - } - - if err != nil { - return nil, err - } - - if len(rl.Services) == 0 { - return nil, fmt.Errorf("service %s not found", input) - } - - if l := len(rl.Services); l > 1 { - return nil, fmt.Errorf("service %s is ambiguous (%d matches found)", input, l) - } - - return rl.Services[0], nil - } - return rg.Service, nil -} - -func getTask(ctx context.Context, c swarmapi.ControlClient, input string) (*swarmapi.Task, error) { - // GetTask to match via full ID. - rg, err := c.GetTask(ctx, &swarmapi.GetTaskRequest{TaskID: input}) - if err != nil { - // If any error (including NotFound), ListTasks to match via full name. - rl, err := c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{Names: []string{input}}}) - - if err != nil || len(rl.Tasks) == 0 { - // If any error or 0 result, ListTasks to match via ID prefix. - rl, err = c.ListTasks(ctx, &swarmapi.ListTasksRequest{Filters: &swarmapi.ListTasksRequest_Filters{IDPrefixes: []string{input}}}) - } - - if err != nil { - return nil, err - } - - if len(rl.Tasks) == 0 { - return nil, fmt.Errorf("task %s not found", input) - } - - if l := len(rl.Tasks); l > 1 { - return nil, fmt.Errorf("task %s is ambiguous (%d matches found)", input, l) - } - - return rl.Tasks[0], nil - } - return rg.Task, nil -} diff --git a/daemon/cluster/listen_addr.go b/daemon/cluster/listen_addr.go deleted file mode 100644 index 2c93d0d828..0000000000 --- a/daemon/cluster/listen_addr.go +++ /dev/null @@ -1,270 +0,0 @@ -package cluster - -import ( - "errors" - "fmt" - "net" -) - -var ( - errNoSuchInterface = errors.New("no such interface") - errNoIP = errors.New("could not find the system's IP address") - errMustSpecifyListenAddr = errors.New("must specify a listening address because the address to advertise is not recognized as a system address") - errBadListenAddr = errors.New("listen address must be an IP address or network interface (with optional port number)") - errBadAdvertiseAddr = errors.New("advertise address must be an IP address or network interface (with optional port number)") - errBadDefaultAdvertiseAddr = errors.New("default advertise address must be an IP address or network interface (without a port number)") -) - -func resolveListenAddr(specifiedAddr string) (string, string, error) { - specifiedHost, specifiedPort, err := net.SplitHostPort(specifiedAddr) - if err != nil { - return "", "", fmt.Errorf("could not parse listen address %s", specifiedAddr) - } - - // Does the host component match any of the interface names on the - // system? If so, use the address from that interface. - interfaceAddr, err := resolveInterfaceAddr(specifiedHost) - if err == nil { - return interfaceAddr.String(), specifiedPort, nil - } - if err != errNoSuchInterface { - return "", "", err - } - - // If it's not an interface, it must be an IP (for now) - if net.ParseIP(specifiedHost) == nil { - return "", "", errBadListenAddr - } - - return specifiedHost, specifiedPort, nil -} - -func (c *Cluster) resolveAdvertiseAddr(advertiseAddr, listenAddrPort string) (string, string, error) { - // Approach: - // - If an advertise address is specified, use that. Resolve the - // interface's address if an interface was specified in - // advertiseAddr. Fill in the port from listenAddrPort if necessary. - // - If DefaultAdvertiseAddr is not empty, use that with the port from - // listenAddrPort. Resolve the interface's address from - // if an interface name was specified in DefaultAdvertiseAddr. - // - Otherwise, try to autodetect the system's address. Use the port in - // listenAddrPort with this address if autodetection succeeds. - - if advertiseAddr != "" { - advertiseHost, advertisePort, err := net.SplitHostPort(advertiseAddr) - if err != nil { - // Not a host:port specification - advertiseHost = advertiseAddr - advertisePort = listenAddrPort - } - - // Does the host component match any of the interface names on the - // system? If so, use the address from that interface. - interfaceAddr, err := resolveInterfaceAddr(advertiseHost) - if err == nil { - return interfaceAddr.String(), advertisePort, nil - } - if err != errNoSuchInterface { - return "", "", err - } - - // If it's not an interface, it must be an IP (for now) - if net.ParseIP(advertiseHost) == nil { - return "", "", errBadAdvertiseAddr - } - - return advertiseHost, advertisePort, nil - } - - if c.config.DefaultAdvertiseAddr != "" { - // Does the default advertise address component match any of the - // interface names on the system? If so, use the address from - // that interface. - interfaceAddr, err := resolveInterfaceAddr(c.config.DefaultAdvertiseAddr) - if err == nil { - return interfaceAddr.String(), listenAddrPort, nil - } - if err != errNoSuchInterface { - return "", "", err - } - - // If it's not an interface, it must be an IP (for now) - if net.ParseIP(c.config.DefaultAdvertiseAddr) == nil { - return "", "", errBadDefaultAdvertiseAddr - } - - return c.config.DefaultAdvertiseAddr, listenAddrPort, nil - } - - systemAddr, err := c.resolveSystemAddr() - if err != nil { - return "", "", err - } - return systemAddr.String(), listenAddrPort, nil -} - -func resolveInterfaceAddr(specifiedInterface string) (net.IP, error) { - // Use a specific interface's IP address. - intf, err := net.InterfaceByName(specifiedInterface) - if err != nil { - return nil, errNoSuchInterface - } - - addrs, err := intf.Addrs() - if err != nil { - return nil, err - } - - var interfaceAddr4, interfaceAddr6 net.IP - - for _, addr := range addrs { - ipAddr, ok := addr.(*net.IPNet) - - if ok { - if ipAddr.IP.To4() != nil { - // IPv4 - if interfaceAddr4 != nil { - return nil, fmt.Errorf("interface %s has more than one IPv4 address", specifiedInterface) - } - interfaceAddr4 = ipAddr.IP - } else { - // IPv6 - if interfaceAddr6 != nil { - return nil, fmt.Errorf("interface %s has more than one IPv6 address", specifiedInterface) - } - interfaceAddr6 = ipAddr.IP - } - } - } - - if interfaceAddr4 == nil && interfaceAddr6 == nil { - return nil, fmt.Errorf("interface %s has no usable IPv4 or IPv6 address", specifiedInterface) - } - - // In the case that there's exactly one IPv4 address - // and exactly one IPv6 address, favor IPv4 over IPv6. - if interfaceAddr4 != nil { - return interfaceAddr4, nil - } - return interfaceAddr6, nil -} - -func (c *Cluster) resolveSystemAddr() (net.IP, error) { - // Use the system's only IP address, or fail if there are - // multiple addresses to choose from. - interfaces, err := net.Interfaces() - if err != nil { - return nil, err - } - - var systemAddr net.IP - var systemInterface net.Interface - - // List Docker-managed subnets - v4Subnets := c.config.NetworkSubnetsProvider.V4Subnets() - v6Subnets := c.config.NetworkSubnetsProvider.V6Subnets() - -ifaceLoop: - for _, intf := range interfaces { - // Skip inactive interfaces and loopback interfaces - if (intf.Flags&net.FlagUp == 0) || (intf.Flags&net.FlagLoopback) != 0 { - continue - } - - addrs, err := intf.Addrs() - if err != nil { - continue - } - - var interfaceAddr4, interfaceAddr6 net.IP - - for _, addr := range addrs { - ipAddr, ok := addr.(*net.IPNet) - - // Skip loopback and link-local addresses - if !ok || !ipAddr.IP.IsGlobalUnicast() { - continue - } - - if ipAddr.IP.To4() != nil { - // IPv4 - - // Ignore addresses in subnets that are managed by Docker. - for _, subnet := range v4Subnets { - if subnet.Contains(ipAddr.IP) { - continue ifaceLoop - } - } - - if interfaceAddr4 != nil { - return nil, fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on interface %s (%s and %s)", intf.Name, interfaceAddr4, ipAddr.IP) - } - - interfaceAddr4 = ipAddr.IP - } else { - // IPv6 - - // Ignore addresses in subnets that are managed by Docker. - for _, subnet := range v6Subnets { - if subnet.Contains(ipAddr.IP) { - continue ifaceLoop - } - } - - if interfaceAddr6 != nil { - return nil, fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on interface %s (%s and %s)", intf.Name, interfaceAddr6, ipAddr.IP) - } - - interfaceAddr6 = ipAddr.IP - } - } - - // In the case that this interface has exactly one IPv4 address - // and exactly one IPv6 address, favor IPv4 over IPv6. - if interfaceAddr4 != nil { - if systemAddr != nil { - return nil, fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on different interfaces (%s on %s and %s on %s)", systemAddr, systemInterface.Name, interfaceAddr4, intf.Name) - } - systemAddr = interfaceAddr4 - systemInterface = intf - } else if interfaceAddr6 != nil { - if systemAddr != nil { - return nil, fmt.Errorf("could not choose an IP address to advertise since this system has multiple addresses on different interfaces (%s on %s and %s on %s)", systemAddr, systemInterface.Name, interfaceAddr6, intf.Name) - } - systemAddr = interfaceAddr6 - systemInterface = intf - } - } - - if systemAddr == nil { - return nil, errNoIP - } - - return systemAddr, nil -} - -func listSystemIPs() []net.IP { - interfaces, err := net.Interfaces() - if err != nil { - return nil - } - - var systemAddrs []net.IP - - for _, intf := range interfaces { - addrs, err := intf.Addrs() - if err != nil { - continue - } - - for _, addr := range addrs { - ipAddr, ok := addr.(*net.IPNet) - - if ok { - systemAddrs = append(systemAddrs, ipAddr.IP) - } - } - } - - return systemAddrs -} diff --git a/daemon/cluster/provider/network.go b/daemon/cluster/provider/network.go deleted file mode 100644 index d99c2f7294..0000000000 --- a/daemon/cluster/provider/network.go +++ /dev/null @@ -1,37 +0,0 @@ -package provider - -import "github.com/docker/engine-api/types" - -// NetworkCreateRequest is a request when creating a network. -type NetworkCreateRequest struct { - ID string - types.NetworkCreateRequest -} - -// NetworkCreateResponse is a response when creating a network. -type NetworkCreateResponse struct { - ID string `json:"Id"` -} - -// VirtualAddress represents a virtual address. -type VirtualAddress struct { - IPv4 string - IPv6 string -} - -// PortConfig represents a port configuration. -type PortConfig struct { - Name string - Protocol int32 - TargetPort uint32 - PublishedPort uint32 -} - -// ServiceConfig represents a service configuration. -type ServiceConfig struct { - ID string - Name string - Aliases map[string][]string - VirtualAddresses map[string]*VirtualAddress - ExposedPorts []*PortConfig -} diff --git a/daemon/commit.go b/daemon/commit.go deleted file mode 100644 index 24c7a46701..0000000000 --- a/daemon/commit.go +++ /dev/null @@ -1,264 +0,0 @@ -package daemon - -import ( - "encoding/json" - "fmt" - "runtime" - "strings" - "time" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/builder/dockerfile" - "github.com/docker/docker/container" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/reference" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/go-connections/nat" -) - -// merge merges two Config, the image container configuration (defaults values), -// and the user container configuration, either passed by the API or generated -// by the cli. -// It will mutate the specified user configuration (userConf) with the image -// configuration where the user configuration is incomplete. -func merge(userConf, imageConf *containertypes.Config) error { - if userConf.User == "" { - userConf.User = imageConf.User - } - if len(userConf.ExposedPorts) == 0 { - userConf.ExposedPorts = imageConf.ExposedPorts - } else if imageConf.ExposedPorts != nil { - if userConf.ExposedPorts == nil { - userConf.ExposedPorts = make(nat.PortSet) - } - for port := range imageConf.ExposedPorts { - if _, exists := userConf.ExposedPorts[port]; !exists { - userConf.ExposedPorts[port] = struct{}{} - } - } - } - - if len(userConf.Env) == 0 { - userConf.Env = imageConf.Env - } else { - for _, imageEnv := range imageConf.Env { - found := false - imageEnvKey := strings.Split(imageEnv, "=")[0] - for _, userEnv := range userConf.Env { - userEnvKey := strings.Split(userEnv, "=")[0] - if imageEnvKey == userEnvKey { - found = true - break - } - } - if !found { - userConf.Env = append(userConf.Env, imageEnv) - } - } - } - - if userConf.Labels == nil { - userConf.Labels = map[string]string{} - } - if imageConf.Labels != nil { - for l := range userConf.Labels { - imageConf.Labels[l] = userConf.Labels[l] - } - userConf.Labels = imageConf.Labels - } - - if len(userConf.Entrypoint) == 0 { - if len(userConf.Cmd) == 0 { - userConf.Cmd = imageConf.Cmd - } - - if userConf.Entrypoint == nil { - userConf.Entrypoint = imageConf.Entrypoint - } - } - if imageConf.Healthcheck != nil { - if userConf.Healthcheck == nil { - userConf.Healthcheck = imageConf.Healthcheck - } else { - if len(userConf.Healthcheck.Test) == 0 { - userConf.Healthcheck.Test = imageConf.Healthcheck.Test - } - if userConf.Healthcheck.Interval == 0 { - userConf.Healthcheck.Interval = imageConf.Healthcheck.Interval - } - if userConf.Healthcheck.Timeout == 0 { - userConf.Healthcheck.Timeout = imageConf.Healthcheck.Timeout - } - if userConf.Healthcheck.Retries == 0 { - userConf.Healthcheck.Retries = imageConf.Healthcheck.Retries - } - } - } - - if userConf.WorkingDir == "" { - userConf.WorkingDir = imageConf.WorkingDir - } - if len(userConf.Volumes) == 0 { - userConf.Volumes = imageConf.Volumes - } else { - for k, v := range imageConf.Volumes { - userConf.Volumes[k] = v - } - } - - if userConf.StopSignal == "" { - userConf.StopSignal = imageConf.StopSignal - } - return nil -} - -// Commit creates a new filesystem image from the current state of a container. -// The image can optionally be tagged into a repository. -func (daemon *Daemon) Commit(name string, c *backend.ContainerCommitConfig) (string, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return "", err - } - - // It is not possible to commit a running container on Windows - if runtime.GOOS == "windows" && container.IsRunning() { - return "", fmt.Errorf("Windows does not support commit of a running container") - } - - if c.Pause && !container.IsPaused() { - daemon.containerPause(container) - defer daemon.containerUnpause(container) - } - - newConfig, err := dockerfile.BuildFromConfig(c.Config, c.Changes) - if err != nil { - return "", err - } - - if c.MergeConfigs { - if err := merge(newConfig, container.Config); err != nil { - return "", err - } - } - - rwTar, err := daemon.exportContainerRw(container) - if err != nil { - return "", err - } - defer func() { - if rwTar != nil { - rwTar.Close() - } - }() - - var history []image.History - rootFS := image.NewRootFS() - osVersion := "" - var osFeatures []string - - if container.ImageID != "" { - img, err := daemon.imageStore.Get(container.ImageID) - if err != nil { - return "", err - } - history = img.History - rootFS = img.RootFS - osVersion = img.OSVersion - osFeatures = img.OSFeatures - } - - l, err := daemon.layerStore.Register(rwTar, rootFS.ChainID()) - if err != nil { - return "", err - } - defer layer.ReleaseAndLog(daemon.layerStore, l) - - h := image.History{ - Author: c.Author, - Created: time.Now().UTC(), - CreatedBy: strings.Join(container.Config.Cmd, " "), - Comment: c.Comment, - EmptyLayer: true, - } - - if diffID := l.DiffID(); layer.DigestSHA256EmptyTar != diffID { - h.EmptyLayer = false - rootFS.Append(diffID) - } - - history = append(history, h) - - config, err := json.Marshal(&image.Image{ - V1Image: image.V1Image{ - DockerVersion: dockerversion.Version, - Config: newConfig, - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - Container: container.ID, - ContainerConfig: *container.Config, - Author: c.Author, - Created: h.Created, - }, - RootFS: rootFS, - History: history, - OSFeatures: osFeatures, - OSVersion: osVersion, - }) - - if err != nil { - return "", err - } - - id, err := daemon.imageStore.Create(config) - if err != nil { - return "", err - } - - if container.ImageID != "" { - if err := daemon.imageStore.SetParent(id, container.ImageID); err != nil { - return "", err - } - } - - if c.Repo != "" { - newTag, err := reference.WithName(c.Repo) // todo: should move this to API layer - if err != nil { - return "", err - } - if c.Tag != "" { - if newTag, err = reference.WithTag(newTag, c.Tag); err != nil { - return "", err - } - } - if err := daemon.TagImageWithReference(id, newTag); err != nil { - return "", err - } - } - - attributes := map[string]string{ - "comment": c.Comment, - } - daemon.LogContainerEventWithAttributes(container, "commit", attributes) - return id.String(), nil -} - -func (daemon *Daemon) exportContainerRw(container *container.Container) (archive.Archive, error) { - if err := daemon.Mount(container); err != nil { - return nil, err - } - - archive, err := container.RWLayer.TarStream() - if err != nil { - daemon.Unmount(container) // logging is already handled in the `Unmount` function - return nil, err - } - return ioutils.NewReadCloserWrapper(archive, func() error { - archive.Close() - return container.RWLayer.Unmount() - }), - nil -} diff --git a/daemon/config.go b/daemon/config.go deleted file mode 100644 index bf568efefa..0000000000 --- a/daemon/config.go +++ /dev/null @@ -1,453 +0,0 @@ -package daemon - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "strings" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/discovery" - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/docker/registry" - "github.com/imdario/mergo" -) - -const ( - // defaultMaxConcurrentDownloads is the default value for - // maximum number of downloads that - // may take place at a time for each pull. - defaultMaxConcurrentDownloads = 3 - // defaultMaxConcurrentUploads is the default value for - // maximum number of uploads that - // may take place at a time for each push. - defaultMaxConcurrentUploads = 5 - // stockRuntimeName is the reserved name/alias used to represent the - // OCI runtime being shipped with the docker daemon package. - stockRuntimeName = "runc" -) - -const ( - defaultNetworkMtu = 1500 - disableNetworkBridge = "none" -) - -// flatOptions contains configuration keys -// that MUST NOT be parsed as deep structures. -// Use this to differentiate these options -// with others like the ones in CommonTLSOptions. -var flatOptions = map[string]bool{ - "cluster-store-opts": true, - "log-opts": true, - "runtimes": true, -} - -// LogConfig represents the default log configuration. -// It includes json tags to deserialize configuration from a file -// using the same names that the flags in the command line use. -type LogConfig struct { - Type string `json:"log-driver,omitempty"` - Config map[string]string `json:"log-opts,omitempty"` -} - -// commonBridgeConfig stores all the platform-common bridge driver specific -// configuration. -type commonBridgeConfig struct { - Iface string `json:"bridge,omitempty"` - FixedCIDR string `json:"fixed-cidr,omitempty"` -} - -// CommonTLSOptions defines TLS configuration for the daemon server. -// It includes json tags to deserialize configuration from a file -// using the same names that the flags in the command line use. -type CommonTLSOptions struct { - CAFile string `json:"tlscacert,omitempty"` - CertFile string `json:"tlscert,omitempty"` - KeyFile string `json:"tlskey,omitempty"` -} - -// CommonConfig defines the configuration of a docker daemon which is -// common across platforms. -// It includes json tags to deserialize configuration from a file -// using the same names that the flags in the command line use. -type CommonConfig struct { - AuthorizationPlugins []string `json:"authorization-plugins,omitempty"` // AuthorizationPlugins holds list of authorization plugins - AutoRestart bool `json:"-"` - Context map[string][]string `json:"-"` - DisableBridge bool `json:"-"` - DNS []string `json:"dns,omitempty"` - DNSOptions []string `json:"dns-opts,omitempty"` - DNSSearch []string `json:"dns-search,omitempty"` - ExecOptions []string `json:"exec-opts,omitempty"` - GraphDriver string `json:"storage-driver,omitempty"` - GraphOptions []string `json:"storage-opts,omitempty"` - Labels []string `json:"labels,omitempty"` - Mtu int `json:"mtu,omitempty"` - Pidfile string `json:"pidfile,omitempty"` - RawLogs bool `json:"raw-logs,omitempty"` - Root string `json:"graph,omitempty"` - SocketGroup string `json:"group,omitempty"` - TrustKeyPath string `json:"-"` - CorsHeaders string `json:"api-cors-header,omitempty"` - EnableCors bool `json:"api-enable-cors,omitempty"` - LiveRestore bool `json:"live-restore,omitempty"` - - // ClusterStore is the storage backend used for the cluster information. It is used by both - // multihost networking (to store networks and endpoints information) and by the node discovery - // mechanism. - ClusterStore string `json:"cluster-store,omitempty"` - - // ClusterOpts is used to pass options to the discovery package for tuning libkv settings, such - // as TLS configuration settings. - ClusterOpts map[string]string `json:"cluster-store-opts,omitempty"` - - // ClusterAdvertise is the network endpoint that the Engine advertises for the purpose of node - // discovery. This should be a 'host:port' combination on which that daemon instance is - // reachable by other hosts. - ClusterAdvertise string `json:"cluster-advertise,omitempty"` - - // MaxConcurrentDownloads is the maximum number of downloads that - // may take place at a time for each pull. - MaxConcurrentDownloads *int `json:"max-concurrent-downloads,omitempty"` - - // MaxConcurrentUploads is the maximum number of uploads that - // may take place at a time for each push. - MaxConcurrentUploads *int `json:"max-concurrent-uploads,omitempty"` - - Debug bool `json:"debug,omitempty"` - Hosts []string `json:"hosts,omitempty"` - LogLevel string `json:"log-level,omitempty"` - TLS bool `json:"tls,omitempty"` - TLSVerify bool `json:"tlsverify,omitempty"` - - // Embedded structs that allow config - // deserialization without the full struct. - CommonTLSOptions - - // SwarmDefaultAdvertiseAddr is the default host/IP or network interface - // to use if a wildcard address is specified in the ListenAddr value - // given to the /swarm/init endpoint and no advertise address is - // specified. - SwarmDefaultAdvertiseAddr string `json:"swarm-default-advertise-addr"` - - LogConfig - bridgeConfig // bridgeConfig holds bridge network specific configuration. - registry.ServiceOptions - - reloadLock sync.Mutex - valuesSet map[string]interface{} -} - -// InstallCommonFlags adds command-line options to the top-level flag parser for -// the current process. -// Subsequent calls to `flag.Parse` will populate config with values parsed -// from the command-line. -func (config *Config) InstallCommonFlags(cmd *flag.FlagSet, usageFn func(string) string) { - var maxConcurrentDownloads, maxConcurrentUploads int - - config.ServiceOptions.InstallCliFlags(cmd, usageFn) - - cmd.Var(opts.NewNamedListOptsRef("storage-opts", &config.GraphOptions, nil), []string{"-storage-opt"}, usageFn("Storage driver options")) - cmd.Var(opts.NewNamedListOptsRef("authorization-plugins", &config.AuthorizationPlugins, nil), []string{"-authorization-plugin"}, usageFn("Authorization plugins to load")) - cmd.Var(opts.NewNamedListOptsRef("exec-opts", &config.ExecOptions, nil), []string{"-exec-opt"}, usageFn("Runtime execution options")) - cmd.StringVar(&config.Pidfile, []string{"p", "-pidfile"}, defaultPidFile, usageFn("Path to use for daemon PID file")) - cmd.StringVar(&config.Root, []string{"g", "-graph"}, defaultGraph, usageFn("Root of the Docker runtime")) - cmd.BoolVar(&config.AutoRestart, []string{"#r", "#-restart"}, true, usageFn("--restart on the daemon has been deprecated in favor of --restart policies on docker run")) - cmd.StringVar(&config.GraphDriver, []string{"s", "-storage-driver"}, "", usageFn("Storage driver to use")) - cmd.IntVar(&config.Mtu, []string{"#mtu", "-mtu"}, 0, usageFn("Set the containers network MTU")) - cmd.BoolVar(&config.RawLogs, []string{"-raw-logs"}, false, usageFn("Full timestamps without ANSI coloring")) - // FIXME: why the inconsistency between "hosts" and "sockets"? - cmd.Var(opts.NewListOptsRef(&config.DNS, opts.ValidateIPAddress), []string{"#dns", "-dns"}, usageFn("DNS server to use")) - cmd.Var(opts.NewNamedListOptsRef("dns-opts", &config.DNSOptions, nil), []string{"-dns-opt"}, usageFn("DNS options to use")) - cmd.Var(opts.NewListOptsRef(&config.DNSSearch, opts.ValidateDNSSearch), []string{"-dns-search"}, usageFn("DNS search domains to use")) - cmd.Var(opts.NewNamedListOptsRef("labels", &config.Labels, opts.ValidateLabel), []string{"-label"}, usageFn("Set key=value labels to the daemon")) - cmd.StringVar(&config.LogConfig.Type, []string{"-log-driver"}, "json-file", usageFn("Default driver for container logs")) - cmd.Var(opts.NewNamedMapOpts("log-opts", config.LogConfig.Config, nil), []string{"-log-opt"}, usageFn("Default log driver options for containers")) - cmd.StringVar(&config.ClusterAdvertise, []string{"-cluster-advertise"}, "", usageFn("Address or interface name to advertise")) - cmd.StringVar(&config.ClusterStore, []string{"-cluster-store"}, "", usageFn("URL of the distributed storage backend")) - cmd.Var(opts.NewNamedMapOpts("cluster-store-opts", config.ClusterOpts, nil), []string{"-cluster-store-opt"}, usageFn("Set cluster store options")) - cmd.StringVar(&config.CorsHeaders, []string{"-api-cors-header"}, "", usageFn("Set CORS headers in the remote API")) - cmd.IntVar(&maxConcurrentDownloads, []string{"-max-concurrent-downloads"}, defaultMaxConcurrentDownloads, usageFn("Set the max concurrent downloads for each pull")) - cmd.IntVar(&maxConcurrentUploads, []string{"-max-concurrent-uploads"}, defaultMaxConcurrentUploads, usageFn("Set the max concurrent uploads for each push")) - - cmd.StringVar(&config.SwarmDefaultAdvertiseAddr, []string{"-swarm-default-advertise-addr"}, "", usageFn("Set default address or interface for swarm advertised address")) - - config.MaxConcurrentDownloads = &maxConcurrentDownloads - config.MaxConcurrentUploads = &maxConcurrentUploads -} - -// IsValueSet returns true if a configuration value -// was explicitly set in the configuration file. -func (config *Config) IsValueSet(name string) bool { - if config.valuesSet == nil { - return false - } - _, ok := config.valuesSet[name] - return ok -} - -func parseClusterAdvertiseSettings(clusterStore, clusterAdvertise string) (string, error) { - if clusterAdvertise == "" { - return "", errDiscoveryDisabled - } - if clusterStore == "" { - return "", fmt.Errorf("invalid cluster configuration. --cluster-advertise must be accompanied by --cluster-store configuration") - } - - advertise, err := discovery.ParseAdvertise(clusterAdvertise) - if err != nil { - return "", fmt.Errorf("discovery advertise parsing failed (%v)", err) - } - return advertise, nil -} - -// ReloadConfiguration reads the configuration in the host and reloads the daemon and server. -func ReloadConfiguration(configFile string, flags *flag.FlagSet, reload func(*Config)) error { - logrus.Infof("Got signal to reload configuration, reloading from: %s", configFile) - newConfig, err := getConflictFreeConfiguration(configFile, flags) - if err != nil { - return err - } - - if err := ValidateConfiguration(newConfig); err != nil { - return fmt.Errorf("file configuration validation failed (%v)", err) - } - - reload(newConfig) - return nil -} - -// boolValue is an interface that boolean value flags implement -// to tell the command line how to make -name equivalent to -name=true. -type boolValue interface { - IsBoolFlag() bool -} - -// MergeDaemonConfigurations reads a configuration file, -// loads the file configuration in an isolated structure, -// and merges the configuration provided from flags on top -// if there are no conflicts. -func MergeDaemonConfigurations(flagsConfig *Config, flags *flag.FlagSet, configFile string) (*Config, error) { - fileConfig, err := getConflictFreeConfiguration(configFile, flags) - if err != nil { - return nil, err - } - - if err := ValidateConfiguration(fileConfig); err != nil { - return nil, fmt.Errorf("file configuration validation failed (%v)", err) - } - - // merge flags configuration on top of the file configuration - if err := mergo.Merge(fileConfig, flagsConfig); err != nil { - return nil, err - } - - // We need to validate again once both fileConfig and flagsConfig - // have been merged - if err := ValidateConfiguration(fileConfig); err != nil { - return nil, fmt.Errorf("file configuration validation failed (%v)", err) - } - - return fileConfig, nil -} - -// getConflictFreeConfiguration loads the configuration from a JSON file. -// It compares that configuration with the one provided by the flags, -// and returns an error if there are conflicts. -func getConflictFreeConfiguration(configFile string, flags *flag.FlagSet) (*Config, error) { - b, err := ioutil.ReadFile(configFile) - if err != nil { - return nil, err - } - - var config Config - var reader io.Reader - if flags != nil { - var jsonConfig map[string]interface{} - reader = bytes.NewReader(b) - if err := json.NewDecoder(reader).Decode(&jsonConfig); err != nil { - return nil, err - } - - configSet := configValuesSet(jsonConfig) - - if err := findConfigurationConflicts(configSet, flags); err != nil { - return nil, err - } - - // Override flag values to make sure the values set in the config file with nullable values, like `false`, - // are not overridden by default truthy values from the flags that were not explicitly set. - // See https://github.com/docker/docker/issues/20289 for an example. - // - // TODO: Rewrite configuration logic to avoid same issue with other nullable values, like numbers. - namedOptions := make(map[string]interface{}) - for key, value := range configSet { - f := flags.Lookup("-" + key) - if f == nil { // ignore named flags that don't match - namedOptions[key] = value - continue - } - - if _, ok := f.Value.(boolValue); ok { - f.Value.Set(fmt.Sprintf("%v", value)) - } - } - if len(namedOptions) > 0 { - // set also default for mergeVal flags that are boolValue at the same time. - flags.VisitAll(func(f *flag.Flag) { - if opt, named := f.Value.(opts.NamedOption); named { - v, set := namedOptions[opt.Name()] - _, boolean := f.Value.(boolValue) - if set && boolean { - f.Value.Set(fmt.Sprintf("%v", v)) - } - } - }) - } - - config.valuesSet = configSet - } - - reader = bytes.NewReader(b) - err = json.NewDecoder(reader).Decode(&config) - return &config, err -} - -// configValuesSet returns the configuration values explicitly set in the file. -func configValuesSet(config map[string]interface{}) map[string]interface{} { - flatten := make(map[string]interface{}) - for k, v := range config { - if m, isMap := v.(map[string]interface{}); isMap && !flatOptions[k] { - for km, vm := range m { - flatten[km] = vm - } - continue - } - - flatten[k] = v - } - return flatten -} - -// findConfigurationConflicts iterates over the provided flags searching for -// duplicated configurations and unknown keys. It returns an error with all the conflicts if -// it finds any. -func findConfigurationConflicts(config map[string]interface{}, flags *flag.FlagSet) error { - // 1. Search keys from the file that we don't recognize as flags. - unknownKeys := make(map[string]interface{}) - for key, value := range config { - flagName := "-" + key - if flag := flags.Lookup(flagName); flag == nil { - unknownKeys[key] = value - } - } - - // 2. Discard values that implement NamedOption. - // Their configuration name differs from their flag name, like `labels` and `label`. - if len(unknownKeys) > 0 { - unknownNamedConflicts := func(f *flag.Flag) { - if namedOption, ok := f.Value.(opts.NamedOption); ok { - if _, valid := unknownKeys[namedOption.Name()]; valid { - delete(unknownKeys, namedOption.Name()) - } - } - } - flags.VisitAll(unknownNamedConflicts) - } - - if len(unknownKeys) > 0 { - var unknown []string - for key := range unknownKeys { - unknown = append(unknown, key) - } - return fmt.Errorf("the following directives don't match any configuration option: %s", strings.Join(unknown, ", ")) - } - - var conflicts []string - printConflict := func(name string, flagValue, fileValue interface{}) string { - return fmt.Sprintf("%s: (from flag: %v, from file: %v)", name, flagValue, fileValue) - } - - // 3. Search keys that are present as a flag and as a file option. - duplicatedConflicts := func(f *flag.Flag) { - // search option name in the json configuration payload if the value is a named option - if namedOption, ok := f.Value.(opts.NamedOption); ok { - if optsValue, ok := config[namedOption.Name()]; ok { - conflicts = append(conflicts, printConflict(namedOption.Name(), f.Value.String(), optsValue)) - } - } else { - // search flag name in the json configuration payload without trailing dashes - for _, name := range f.Names { - name = strings.TrimLeft(name, "-") - - if value, ok := config[name]; ok { - conflicts = append(conflicts, printConflict(name, f.Value.String(), value)) - break - } - } - } - } - - flags.Visit(duplicatedConflicts) - - if len(conflicts) > 0 { - return fmt.Errorf("the following directives are specified both as a flag and in the configuration file: %s", strings.Join(conflicts, ", ")) - } - return nil -} - -// ValidateConfiguration validates some specific configs. -// such as config.DNS, config.Labels, config.DNSSearch, -// as well as config.MaxConcurrentDownloads, config.MaxConcurrentUploads. -func ValidateConfiguration(config *Config) error { - // validate DNS - for _, dns := range config.DNS { - if _, err := opts.ValidateIPAddress(dns); err != nil { - return err - } - } - - // validate DNSSearch - for _, dnsSearch := range config.DNSSearch { - if _, err := opts.ValidateDNSSearch(dnsSearch); err != nil { - return err - } - } - - // validate Labels - for _, label := range config.Labels { - if _, err := opts.ValidateLabel(label); err != nil { - return err - } - } - - // validate MaxConcurrentDownloads - if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil && *config.MaxConcurrentDownloads < 0 { - return fmt.Errorf("invalid max concurrent downloads: %d", *config.MaxConcurrentDownloads) - } - - // validate MaxConcurrentUploads - if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil && *config.MaxConcurrentUploads < 0 { - return fmt.Errorf("invalid max concurrent uploads: %d", *config.MaxConcurrentUploads) - } - - // validate that "default" runtime is not reset - if runtimes := config.GetAllRuntimes(); len(runtimes) > 0 { - if _, ok := runtimes[stockRuntimeName]; ok { - return fmt.Errorf("runtime name '%s' is reserved", stockRuntimeName) - } - } - - if defaultRuntime := config.GetDefaultRuntimeName(); defaultRuntime != "" && defaultRuntime != stockRuntimeName { - runtimes := config.GetAllRuntimes() - if _, ok := runtimes[defaultRuntime]; !ok { - return fmt.Errorf("specified default runtime '%s' does not exist", defaultRuntime) - } - } - - return nil -} diff --git a/daemon/config_experimental.go b/daemon/config_experimental.go deleted file mode 100644 index ceb7c38225..0000000000 --- a/daemon/config_experimental.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build experimental - -package daemon - -import flag "github.com/docker/docker/pkg/mflag" - -func (config *Config) attachExperimentalFlags(cmd *flag.FlagSet, usageFn func(string) string) { -} diff --git a/daemon/config_solaris.go b/daemon/config_solaris.go deleted file mode 100644 index e59d0514df..0000000000 --- a/daemon/config_solaris.go +++ /dev/null @@ -1,47 +0,0 @@ -package daemon - -import ( - flag "github.com/docker/docker/pkg/mflag" -) - -var ( - defaultPidFile = "/var/run/docker.pid" - defaultGraph = "/var/lib/docker" - defaultExec = "zones" -) - -// Config defines the configuration of a docker daemon. -// These are the configuration settings that you pass -// to the docker daemon when you launch it with say: `docker -d -e lxc` -type Config struct { - CommonConfig - - // Fields below here are platform specific. - ExecRoot string `json:"exec-root,omitempty"` -} - -// bridgeConfig stores all the bridge driver specific -// configuration. -type bridgeConfig struct { - commonBridgeConfig -} - -// InstallFlags adds command-line options to the top-level flag parser for -// the current process. -// Subsequent calls to `flag.Parse` will populate config with values parsed -// from the command-line. -func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { - // First handle install flags which are consistent cross-platform - config.InstallCommonFlags(cmd, usageFn) - - // Then platform-specific install flags - config.attachExperimentalFlags(cmd, usageFn) -} - -// GetExecRoot returns the user configured Exec-root -func (config *Config) GetExecRoot() string { - return config.ExecRoot -} -func (config *Config) isSwarmCompatible() error { - return nil -} diff --git a/daemon/config_stub.go b/daemon/config_stub.go deleted file mode 100644 index 796e6b6e4e..0000000000 --- a/daemon/config_stub.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !experimental - -package daemon - -import flag "github.com/docker/docker/pkg/mflag" - -func (config *Config) attachExperimentalFlags(cmd *flag.FlagSet, usageFn func(string) string) { -} diff --git a/daemon/config_test.go b/daemon/config_test.go deleted file mode 100644 index 0375c1ae21..0000000000 --- a/daemon/config_test.go +++ /dev/null @@ -1,278 +0,0 @@ -package daemon - -import ( - "io/ioutil" - "os" - "strings" - "testing" - - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/mflag" -) - -func TestDaemonConfigurationMerge(t *testing.T) { - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{"debug": true}`)) - f.Close() - - c := &Config{ - CommonConfig: CommonConfig{ - AutoRestart: true, - LogConfig: LogConfig{ - Type: "syslog", - Config: map[string]string{"tag": "test"}, - }, - }, - } - - cc, err := MergeDaemonConfigurations(c, nil, configFile) - if err != nil { - t.Fatal(err) - } - if !cc.Debug { - t.Fatalf("expected %v, got %v\n", true, cc.Debug) - } - if !cc.AutoRestart { - t.Fatalf("expected %v, got %v\n", true, cc.AutoRestart) - } - if cc.LogConfig.Type != "syslog" { - t.Fatalf("expected syslog config, got %q\n", cc.LogConfig) - } -} - -func TestDaemonConfigurationNotFound(t *testing.T) { - _, err := MergeDaemonConfigurations(&Config{}, nil, "/tmp/foo-bar-baz-docker") - if err == nil || !os.IsNotExist(err) { - t.Fatalf("expected does not exist error, got %v", err) - } -} - -func TestDaemonBrokenConfiguration(t *testing.T) { - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{"Debug": tru`)) - f.Close() - - _, err = MergeDaemonConfigurations(&Config{}, nil, configFile) - if err == nil { - t.Fatalf("expected error, got %v", err) - } -} - -func TestParseClusterAdvertiseSettings(t *testing.T) { - _, err := parseClusterAdvertiseSettings("something", "") - if err != errDiscoveryDisabled { - t.Fatalf("expected discovery disabled error, got %v\n", err) - } - - _, err = parseClusterAdvertiseSettings("", "something") - if err == nil { - t.Fatalf("expected discovery store error, got %v\n", err) - } - - _, err = parseClusterAdvertiseSettings("etcd", "127.0.0.1:8080") - if err != nil { - t.Fatal(err) - } -} - -func TestFindConfigurationConflicts(t *testing.T) { - config := map[string]interface{}{"authorization-plugins": "foobar"} - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - - flags.String([]string{"-authorization-plugins"}, "", "") - if err := flags.Set("-authorization-plugins", "asdf"); err != nil { - t.Fatal(err) - } - - err := findConfigurationConflicts(config, flags) - if err == nil { - t.Fatal("expected error, got nil") - } - if !strings.Contains(err.Error(), "authorization-plugins: (from flag: asdf, from file: foobar)") { - t.Fatalf("expected authorization-plugins conflict, got %v", err) - } -} - -func TestFindConfigurationConflictsWithNamedOptions(t *testing.T) { - config := map[string]interface{}{"hosts": []string{"qwer"}} - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - - var hosts []string - flags.Var(opts.NewNamedListOptsRef("hosts", &hosts, opts.ValidateHost), []string{"H", "-host"}, "Daemon socket(s) to connect to") - if err := flags.Set("-host", "tcp://127.0.0.1:4444"); err != nil { - t.Fatal(err) - } - if err := flags.Set("H", "unix:///var/run/docker.sock"); err != nil { - t.Fatal(err) - } - - err := findConfigurationConflicts(config, flags) - if err == nil { - t.Fatal("expected error, got nil") - } - if !strings.Contains(err.Error(), "hosts") { - t.Fatalf("expected hosts conflict, got %v", err) - } -} - -func TestDaemonConfigurationMergeConflicts(t *testing.T) { - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{"debug": true}`)) - f.Close() - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.Bool([]string{"debug"}, false, "") - flags.Set("debug", "false") - - _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) - if err == nil { - t.Fatal("expected error, got nil") - } - if !strings.Contains(err.Error(), "debug") { - t.Fatalf("expected debug conflict, got %v", err) - } -} - -func TestDaemonConfigurationMergeConflictsWithInnerStructs(t *testing.T) { - f, err := ioutil.TempFile("", "docker-config-") - if err != nil { - t.Fatal(err) - } - - configFile := f.Name() - f.Write([]byte(`{"tlscacert": "/etc/certificates/ca.pem"}`)) - f.Close() - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - flags.String([]string{"tlscacert"}, "", "") - flags.Set("tlscacert", "~/.docker/ca.pem") - - _, err = MergeDaemonConfigurations(&Config{}, flags, configFile) - if err == nil { - t.Fatal("expected error, got nil") - } - if !strings.Contains(err.Error(), "tlscacert") { - t.Fatalf("expected tlscacert conflict, got %v", err) - } -} - -func TestFindConfigurationConflictsWithUnknownKeys(t *testing.T) { - config := map[string]interface{}{"tls-verify": "true"} - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - - flags.Bool([]string{"-tlsverify"}, false, "") - err := findConfigurationConflicts(config, flags) - if err == nil { - t.Fatal("expected error, got nil") - } - if !strings.Contains(err.Error(), "the following directives don't match any configuration option: tls-verify") { - t.Fatalf("expected tls-verify conflict, got %v", err) - } -} - -func TestFindConfigurationConflictsWithMergedValues(t *testing.T) { - var hosts []string - config := map[string]interface{}{"hosts": "tcp://127.0.0.1:2345"} - base := mflag.NewFlagSet("base", mflag.ContinueOnError) - base.Var(opts.NewNamedListOptsRef("hosts", &hosts, nil), []string{"H", "-host"}, "") - - flags := mflag.NewFlagSet("test", mflag.ContinueOnError) - mflag.Merge(flags, base) - - err := findConfigurationConflicts(config, flags) - if err != nil { - t.Fatal(err) - } - - flags.Set("-host", "unix:///var/run/docker.sock") - err = findConfigurationConflicts(config, flags) - if err == nil { - t.Fatal("expected error, got nil") - } - if !strings.Contains(err.Error(), "hosts: (from flag: [unix:///var/run/docker.sock], from file: tcp://127.0.0.1:2345)") { - t.Fatalf("expected hosts conflict, got %v", err) - } -} - -func TestValidateConfiguration(t *testing.T) { - c1 := &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"one"}, - }, - } - - err := ValidateConfiguration(c1) - if err == nil { - t.Fatal("expected error, got nil") - } - - c2 := &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"one=two"}, - }, - } - - err = ValidateConfiguration(c2) - if err != nil { - t.Fatalf("expected no error, got error %v", err) - } - - c3 := &Config{ - CommonConfig: CommonConfig{ - DNS: []string{"1.1.1.1"}, - }, - } - - err = ValidateConfiguration(c3) - if err != nil { - t.Fatalf("expected no error, got error %v", err) - } - - c4 := &Config{ - CommonConfig: CommonConfig{ - DNS: []string{"1.1.1.1o"}, - }, - } - - err = ValidateConfiguration(c4) - if err == nil { - t.Fatal("expected error, got nil") - } - - c5 := &Config{ - CommonConfig: CommonConfig{ - DNSSearch: []string{"a.b.c"}, - }, - } - - err = ValidateConfiguration(c5) - if err != nil { - t.Fatalf("expected no error, got error %v", err) - } - - c6 := &Config{ - CommonConfig: CommonConfig{ - DNSSearch: []string{"123456"}, - }, - } - - err = ValidateConfiguration(c6) - if err == nil { - t.Fatal("expected error, got nil") - } -} diff --git a/daemon/config_unix.go b/daemon/config_unix.go deleted file mode 100644 index 526ec3bd3c..0000000000 --- a/daemon/config_unix.go +++ /dev/null @@ -1,140 +0,0 @@ -// +build linux freebsd - -package daemon - -import ( - "fmt" - "net" - - "github.com/docker/docker/opts" - flag "github.com/docker/docker/pkg/mflag" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/docker/go-units" -) - -var ( - defaultPidFile = "/var/run/docker.pid" - defaultGraph = "/var/lib/docker" - defaultExecRoot = "/var/run/docker" -) - -// Config defines the configuration of a docker daemon. -// It includes json tags to deserialize configuration from a file -// using the same names that the flags in the command line uses. -type Config struct { - CommonConfig - - // Fields below here are platform specific. - CgroupParent string `json:"cgroup-parent,omitempty"` - ContainerdAddr string `json:"containerd,omitempty"` - EnableSelinuxSupport bool `json:"selinux-enabled,omitempty"` - ExecRoot string `json:"exec-root,omitempty"` - RemappedRoot string `json:"userns-remap,omitempty"` - Ulimits map[string]*units.Ulimit `json:"default-ulimits,omitempty"` - Runtimes map[string]types.Runtime `json:"runtimes,omitempty"` - DefaultRuntime string `json:"default-runtime,omitempty"` - OOMScoreAdjust int `json:"oom-score-adjust,omitempty"` -} - -// bridgeConfig stores all the bridge driver specific -// configuration. -type bridgeConfig struct { - commonBridgeConfig - - // Fields below here are platform specific. - EnableIPv6 bool `json:"ipv6,omitempty"` - EnableIPTables bool `json:"iptables,omitempty"` - EnableIPForward bool `json:"ip-forward,omitempty"` - EnableIPMasq bool `json:"ip-masq,omitempty"` - EnableUserlandProxy bool `json:"userland-proxy,omitempty"` - DefaultIP net.IP `json:"ip,omitempty"` - IP string `json:"bip,omitempty"` - FixedCIDRv6 string `json:"fixed-cidr-v6,omitempty"` - DefaultGatewayIPv4 net.IP `json:"default-gateway,omitempty"` - DefaultGatewayIPv6 net.IP `json:"default-gateway-v6,omitempty"` - InterContainerCommunication bool `json:"icc,omitempty"` -} - -// InstallFlags adds command-line options to the top-level flag parser for -// the current process. -// Subsequent calls to `flag.Parse` will populate config with values parsed -// from the command-line. -func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { - // First handle install flags which are consistent cross-platform - config.InstallCommonFlags(cmd, usageFn) - - // Then platform-specific install flags - cmd.BoolVar(&config.EnableSelinuxSupport, []string{"-selinux-enabled"}, false, usageFn("Enable selinux support")) - cmd.StringVar(&config.SocketGroup, []string{"G", "-group"}, "docker", usageFn("Group for the unix socket")) - config.Ulimits = make(map[string]*units.Ulimit) - cmd.Var(runconfigopts.NewUlimitOpt(&config.Ulimits), []string{"-default-ulimit"}, usageFn("Default ulimits for containers")) - cmd.BoolVar(&config.bridgeConfig.EnableIPTables, []string{"#iptables", "-iptables"}, true, usageFn("Enable addition of iptables rules")) - cmd.BoolVar(&config.bridgeConfig.EnableIPForward, []string{"#ip-forward", "-ip-forward"}, true, usageFn("Enable net.ipv4.ip_forward")) - cmd.BoolVar(&config.bridgeConfig.EnableIPMasq, []string{"-ip-masq"}, true, usageFn("Enable IP masquerading")) - cmd.BoolVar(&config.bridgeConfig.EnableIPv6, []string{"-ipv6"}, false, usageFn("Enable IPv6 networking")) - cmd.StringVar(&config.ExecRoot, []string{"-exec-root"}, defaultExecRoot, usageFn("Root directory for execution state files")) - cmd.StringVar(&config.bridgeConfig.IP, []string{"#bip", "-bip"}, "", usageFn("Specify network bridge IP")) - cmd.StringVar(&config.bridgeConfig.Iface, []string{"b", "-bridge"}, "", usageFn("Attach containers to a network bridge")) - cmd.StringVar(&config.bridgeConfig.FixedCIDR, []string{"-fixed-cidr"}, "", usageFn("IPv4 subnet for fixed IPs")) - cmd.StringVar(&config.bridgeConfig.FixedCIDRv6, []string{"-fixed-cidr-v6"}, "", usageFn("IPv6 subnet for fixed IPs")) - cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv4, ""), []string{"-default-gateway"}, usageFn("Container default gateway IPv4 address")) - cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultGatewayIPv6, ""), []string{"-default-gateway-v6"}, usageFn("Container default gateway IPv6 address")) - cmd.BoolVar(&config.bridgeConfig.InterContainerCommunication, []string{"#icc", "-icc"}, true, usageFn("Enable inter-container communication")) - cmd.Var(opts.NewIPOpt(&config.bridgeConfig.DefaultIP, "0.0.0.0"), []string{"#ip", "-ip"}, usageFn("Default IP when binding container ports")) - cmd.BoolVar(&config.bridgeConfig.EnableUserlandProxy, []string{"-userland-proxy"}, true, usageFn("Use userland proxy for loopback traffic")) - cmd.BoolVar(&config.EnableCors, []string{"#api-enable-cors", "#-api-enable-cors"}, false, usageFn("Enable CORS headers in the remote API, this is deprecated by --api-cors-header")) - cmd.StringVar(&config.CgroupParent, []string{"-cgroup-parent"}, "", usageFn("Set parent cgroup for all containers")) - cmd.StringVar(&config.RemappedRoot, []string{"-userns-remap"}, "", usageFn("User/Group setting for user namespaces")) - cmd.StringVar(&config.ContainerdAddr, []string{"-containerd"}, "", usageFn("Path to containerd socket")) - cmd.BoolVar(&config.LiveRestore, []string{"-live-restore"}, false, usageFn("Enable live restore of docker when containers are still running")) - config.Runtimes = make(map[string]types.Runtime) - cmd.Var(runconfigopts.NewNamedRuntimeOpt("runtimes", &config.Runtimes, stockRuntimeName), []string{"-add-runtime"}, usageFn("Register an additional OCI compatible runtime")) - cmd.StringVar(&config.DefaultRuntime, []string{"-default-runtime"}, stockRuntimeName, usageFn("Default OCI runtime for containers")) - cmd.IntVar(&config.OOMScoreAdjust, []string{"-oom-score-adjust"}, -500, usageFn("Set the oom_score_adj for the daemon")) - - config.attachExperimentalFlags(cmd, usageFn) -} - -// GetRuntime returns the runtime path and arguments for a given -// runtime name -func (config *Config) GetRuntime(name string) *types.Runtime { - config.reloadLock.Lock() - defer config.reloadLock.Unlock() - if rt, ok := config.Runtimes[name]; ok { - return &rt - } - return nil -} - -// GetDefaultRuntimeName returns the current default runtime -func (config *Config) GetDefaultRuntimeName() string { - config.reloadLock.Lock() - rt := config.DefaultRuntime - config.reloadLock.Unlock() - - return rt -} - -// GetAllRuntimes returns a copy of the runtimes map -func (config *Config) GetAllRuntimes() map[string]types.Runtime { - config.reloadLock.Lock() - rts := config.Runtimes - config.reloadLock.Unlock() - return rts -} - -// GetExecRoot returns the user configured Exec-root -func (config *Config) GetExecRoot() string { - return config.ExecRoot -} - -func (config *Config) isSwarmCompatible() error { - if config.ClusterStore != "" || config.ClusterAdvertise != "" { - return fmt.Errorf("--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") - } - if config.LiveRestore { - return fmt.Errorf("--live-restore daemon configuration is incompatible with swarm mode") - } - return nil -} diff --git a/daemon/config_windows.go b/daemon/config_windows.go deleted file mode 100644 index 061f7e737c..0000000000 --- a/daemon/config_windows.go +++ /dev/null @@ -1,68 +0,0 @@ -package daemon - -import ( - "os" - - flag "github.com/docker/docker/pkg/mflag" - "github.com/docker/engine-api/types" -) - -var ( - defaultPidFile = os.Getenv("programdata") + string(os.PathSeparator) + "docker.pid" - defaultGraph = os.Getenv("programdata") + string(os.PathSeparator) + "docker" -) - -// bridgeConfig stores all the bridge driver specific -// configuration. -type bridgeConfig struct { - commonBridgeConfig -} - -// Config defines the configuration of a docker daemon. -// These are the configuration settings that you pass -// to the docker daemon when you launch it with say: `docker daemon -e windows` -type Config struct { - CommonConfig - - // Fields below here are platform specific. (There are none presently - // for the Windows daemon.) -} - -// InstallFlags adds command-line options to the top-level flag parser for -// the current process. -// Subsequent calls to `flag.Parse` will populate config with values parsed -// from the command-line. -func (config *Config) InstallFlags(cmd *flag.FlagSet, usageFn func(string) string) { - // First handle install flags which are consistent cross-platform - config.InstallCommonFlags(cmd, usageFn) - - // Then platform-specific install flags. - cmd.StringVar(&config.bridgeConfig.FixedCIDR, []string{"-fixed-cidr"}, "", usageFn("IPv4 subnet for fixed IPs")) - cmd.StringVar(&config.bridgeConfig.Iface, []string{"b", "-bridge"}, "", "Attach containers to a virtual switch") - cmd.StringVar(&config.SocketGroup, []string{"G", "-group"}, "", usageFn("Users or groups that can access the named pipe")) -} - -// GetRuntime returns the runtime path and arguments for a given -// runtime name -func (config *Config) GetRuntime(name string) *types.Runtime { - return nil -} - -// GetDefaultRuntimeName returns the current default runtime -func (config *Config) GetDefaultRuntimeName() string { - return stockRuntimeName -} - -// GetAllRuntimes returns a copy of the runtimes map -func (config *Config) GetAllRuntimes() map[string]types.Runtime { - return map[string]types.Runtime{} -} - -// GetExecRoot returns the user configured Exec-root -func (config *Config) GetExecRoot() string { - return "" -} - -func (config *Config) isSwarmCompatible() error { - return nil -} diff --git a/daemon/container.go b/daemon/container.go deleted file mode 100644 index b9f63dedde..0000000000 --- a/daemon/container.go +++ /dev/null @@ -1,256 +0,0 @@ -package daemon - -import ( - "fmt" - "path/filepath" - "regexp" - "time" - - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/network" - "github.com/docker/docker/errors" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/pkg/truncindex" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// GetContainer looks for a container using the provided information, which could be -// one of the following inputs from the caller: -// - A full container ID, which will exact match a container in daemon's list -// - A container name, which will only exact match via the GetByName() function -// - A partial container ID prefix (e.g. short ID) of any length that is -// unique enough to only return a single container object -// If none of these searches succeed, an error is returned -func (daemon *Daemon) GetContainer(prefixOrName string) (*container.Container, error) { - if len(prefixOrName) == 0 { - return nil, errors.NewBadRequestError(fmt.Errorf("No container name or ID supplied")) - } - - if containerByID := daemon.containers.Get(prefixOrName); containerByID != nil { - // prefix is an exact match to a full container ID - return containerByID, nil - } - - // GetByName will match only an exact name provided; we ignore errors - if containerByName, _ := daemon.GetByName(prefixOrName); containerByName != nil { - // prefix is an exact match to a full container Name - return containerByName, nil - } - - containerID, indexError := daemon.idIndex.Get(prefixOrName) - if indexError != nil { - // When truncindex defines an error type, use that instead - if indexError == truncindex.ErrNotExist { - err := fmt.Errorf("No such container: %s", prefixOrName) - return nil, errors.NewRequestNotFoundError(err) - } - return nil, indexError - } - return daemon.containers.Get(containerID), nil -} - -// Exists returns a true if a container of the specified ID or name exists, -// false otherwise. -func (daemon *Daemon) Exists(id string) bool { - c, _ := daemon.GetContainer(id) - return c != nil -} - -// IsPaused returns a bool indicating if the specified container is paused. -func (daemon *Daemon) IsPaused(id string) bool { - c, _ := daemon.GetContainer(id) - return c.State.IsPaused() -} - -func (daemon *Daemon) containerRoot(id string) string { - return filepath.Join(daemon.repository, id) -} - -// Load reads the contents of a container from disk -// This is typically done at startup. -func (daemon *Daemon) load(id string) (*container.Container, error) { - container := daemon.newBaseContainer(id) - - if err := container.FromDisk(); err != nil { - return nil, err - } - - if container.ID != id { - return container, fmt.Errorf("Container %s is stored at %s", container.ID, id) - } - - return container, nil -} - -// Register makes a container object usable by the daemon as -func (daemon *Daemon) Register(c *container.Container) error { - // Attach to stdout and stderr - if c.Config.OpenStdin { - c.NewInputPipes() - } else { - c.NewNopInputPipe() - } - - daemon.containers.Add(c.ID, c) - daemon.idIndex.Add(c.ID) - - return nil -} - -func (daemon *Daemon) newContainer(name string, config *containertypes.Config, imgID image.ID, managed bool) (*container.Container, error) { - var ( - id string - err error - noExplicitName = name == "" - ) - id, name, err = daemon.generateIDAndName(name) - if err != nil { - return nil, err - } - - daemon.generateHostname(id, config) - entrypoint, args := daemon.getEntrypointAndArgs(config.Entrypoint, config.Cmd) - - base := daemon.newBaseContainer(id) - base.Created = time.Now().UTC() - base.Managed = managed - base.Path = entrypoint - base.Args = args //FIXME: de-duplicate from config - base.Config = config - base.HostConfig = &containertypes.HostConfig{} - base.ImageID = imgID - base.NetworkSettings = &network.Settings{IsAnonymousEndpoint: noExplicitName} - base.Name = name - base.Driver = daemon.GraphDriverName() - - return base, err -} - -// GetByName returns a container given a name. -func (daemon *Daemon) GetByName(name string) (*container.Container, error) { - if len(name) == 0 { - return nil, fmt.Errorf("No container name supplied") - } - fullName := name - if name[0] != '/' { - fullName = "/" + name - } - id, err := daemon.nameIndex.Get(fullName) - if err != nil { - return nil, fmt.Errorf("Could not find entity for %s", name) - } - e := daemon.containers.Get(id) - if e == nil { - return nil, fmt.Errorf("Could not find container for entity id %s", id) - } - return e, nil -} - -// newBaseContainer creates a new container with its initial -// configuration based on the root storage from the daemon. -func (daemon *Daemon) newBaseContainer(id string) *container.Container { - return container.NewBaseContainer(id, daemon.containerRoot(id)) -} - -func (daemon *Daemon) getEntrypointAndArgs(configEntrypoint strslice.StrSlice, configCmd strslice.StrSlice) (string, []string) { - if len(configEntrypoint) != 0 { - return configEntrypoint[0], append(configEntrypoint[1:], configCmd...) - } - return configCmd[0], configCmd[1:] -} - -func (daemon *Daemon) generateHostname(id string, config *containertypes.Config) { - // Generate default hostname - if config.Hostname == "" { - config.Hostname = id[:12] - } -} - -func (daemon *Daemon) setSecurityOptions(container *container.Container, hostConfig *containertypes.HostConfig) error { - container.Lock() - defer container.Unlock() - return parseSecurityOpt(container, hostConfig) -} - -func (daemon *Daemon) setHostConfig(container *container.Container, hostConfig *containertypes.HostConfig) error { - // Do not lock while creating volumes since this could be calling out to external plugins - // Don't want to block other actions, like `docker ps` because we're waiting on an external plugin - if err := daemon.registerMountPoints(container, hostConfig); err != nil { - return err - } - - container.Lock() - defer container.Unlock() - - // Register any links from the host config before starting the container - if err := daemon.registerLinks(container, hostConfig); err != nil { - return err - } - - // make sure links is not nil - // this ensures that on the next daemon restart we don't try to migrate from legacy sqlite links - if hostConfig.Links == nil { - hostConfig.Links = []string{} - } - - container.HostConfig = hostConfig - return container.ToDisk() -} - -// verifyContainerSettings performs validation of the hostconfig and config -// structures. -func (daemon *Daemon) verifyContainerSettings(hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool, validateHostname bool) ([]string, error) { - - // First perform verification of settings common across all platforms. - if config != nil { - if config.WorkingDir != "" { - config.WorkingDir = filepath.FromSlash(config.WorkingDir) // Ensure in platform semantics - if !system.IsAbs(config.WorkingDir) { - return nil, fmt.Errorf("The working directory '%s' is invalid. It needs to be an absolute path", config.WorkingDir) - } - } - - if len(config.StopSignal) > 0 { - _, err := signal.ParseSignal(config.StopSignal) - if err != nil { - return nil, err - } - } - - // Validate if the given hostname is RFC 1123 (https://tools.ietf.org/html/rfc1123) compliant. - if validateHostname && len(config.Hostname) > 0 { - // RFC1123 specifies that 63 bytes is the maximium length - // Windows has the limitation of 63 bytes in length - // Linux hostname is limited to HOST_NAME_MAX=64, not including the terminating null byte. - // We limit the length to 63 bytes here to match RFC1035 and RFC1123. - matched, _ := regexp.MatchString("^(([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])\\.)*([[:alnum:]]|[[:alnum:]][[:alnum:]\\-]*[[:alnum:]])$", config.Hostname) - if len(config.Hostname) > 63 || !matched { - return nil, fmt.Errorf("invalid hostname format: %s", config.Hostname) - } - } - } - - if hostConfig == nil { - return nil, nil - } - - for port := range hostConfig.PortBindings { - _, portStr := nat.SplitProtoPort(string(port)) - if _, err := nat.ParsePort(portStr); err != nil { - return nil, fmt.Errorf("Invalid port specification: %q", portStr) - } - for _, pb := range hostConfig.PortBindings[port] { - _, err := nat.NewPort(nat.SplitProtoPort(pb.HostPort)) - if err != nil { - return nil, fmt.Errorf("Invalid port specification: %q", pb.HostPort) - } - } - } - - // Now do platform-specific verification - return verifyPlatformContainerSettings(daemon, hostConfig, config, update) -} diff --git a/daemon/container_operations.go b/daemon/container_operations.go deleted file mode 100644 index 9132684bab..0000000000 --- a/daemon/container_operations.go +++ /dev/null @@ -1,768 +0,0 @@ -package daemon - -import ( - "errors" - "fmt" - "net" - "os" - "path" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/network" - derr "github.com/docker/docker/errors" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/runconfig" - containertypes "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/go-connections/nat" - "github.com/docker/libnetwork" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/options" - "github.com/docker/libnetwork/types" -) - -var ( - // ErrRootFSReadOnly is returned when a container - // rootfs is marked readonly. - ErrRootFSReadOnly = errors.New("container rootfs is marked read-only") - getPortMapInfo = container.GetSandboxPortMapInfo -) - -func (daemon *Daemon) buildSandboxOptions(container *container.Container) ([]libnetwork.SandboxOption, error) { - var ( - sboxOptions []libnetwork.SandboxOption - err error - dns []string - dnsSearch []string - dnsOptions []string - bindings = make(nat.PortMap) - pbList []types.PortBinding - exposeList []types.TransportPort - ) - - defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() - sboxOptions = append(sboxOptions, libnetwork.OptionHostname(container.Config.Hostname), - libnetwork.OptionDomainname(container.Config.Domainname)) - - if container.HostConfig.NetworkMode.IsHost() { - sboxOptions = append(sboxOptions, libnetwork.OptionUseDefaultSandbox()) - if len(container.HostConfig.ExtraHosts) == 0 { - sboxOptions = append(sboxOptions, libnetwork.OptionOriginHostsPath("/etc/hosts")) - } - if len(container.HostConfig.DNS) == 0 && len(daemon.configStore.DNS) == 0 && - len(container.HostConfig.DNSSearch) == 0 && len(daemon.configStore.DNSSearch) == 0 && - len(container.HostConfig.DNSOptions) == 0 && len(daemon.configStore.DNSOptions) == 0 { - sboxOptions = append(sboxOptions, libnetwork.OptionOriginResolvConfPath("/etc/resolv.conf")) - } - } else { - // OptionUseExternalKey is mandatory for userns support. - // But optional for non-userns support - sboxOptions = append(sboxOptions, libnetwork.OptionUseExternalKey()) - } - - container.HostsPath, err = container.GetRootResourcePath("hosts") - if err != nil { - return nil, err - } - sboxOptions = append(sboxOptions, libnetwork.OptionHostsPath(container.HostsPath)) - - container.ResolvConfPath, err = container.GetRootResourcePath("resolv.conf") - if err != nil { - return nil, err - } - sboxOptions = append(sboxOptions, libnetwork.OptionResolvConfPath(container.ResolvConfPath)) - - if len(container.HostConfig.DNS) > 0 { - dns = container.HostConfig.DNS - } else if len(daemon.configStore.DNS) > 0 { - dns = daemon.configStore.DNS - } - - for _, d := range dns { - sboxOptions = append(sboxOptions, libnetwork.OptionDNS(d)) - } - - if len(container.HostConfig.DNSSearch) > 0 { - dnsSearch = container.HostConfig.DNSSearch - } else if len(daemon.configStore.DNSSearch) > 0 { - dnsSearch = daemon.configStore.DNSSearch - } - - for _, ds := range dnsSearch { - sboxOptions = append(sboxOptions, libnetwork.OptionDNSSearch(ds)) - } - - if len(container.HostConfig.DNSOptions) > 0 { - dnsOptions = container.HostConfig.DNSOptions - } else if len(daemon.configStore.DNSOptions) > 0 { - dnsOptions = daemon.configStore.DNSOptions - } - - for _, ds := range dnsOptions { - sboxOptions = append(sboxOptions, libnetwork.OptionDNSOptions(ds)) - } - - if container.NetworkSettings.SecondaryIPAddresses != nil { - name := container.Config.Hostname - if container.Config.Domainname != "" { - name = name + "." + container.Config.Domainname - } - - for _, a := range container.NetworkSettings.SecondaryIPAddresses { - sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(name, a.Addr)) - } - } - - for _, extraHost := range container.HostConfig.ExtraHosts { - // allow IPv6 addresses in extra hosts; only split on first ":" - parts := strings.SplitN(extraHost, ":", 2) - sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(parts[0], parts[1])) - } - - if container.HostConfig.PortBindings != nil { - for p, b := range container.HostConfig.PortBindings { - bindings[p] = []nat.PortBinding{} - for _, bb := range b { - bindings[p] = append(bindings[p], nat.PortBinding{ - HostIP: bb.HostIP, - HostPort: bb.HostPort, - }) - } - } - } - - portSpecs := container.Config.ExposedPorts - ports := make([]nat.Port, len(portSpecs)) - var i int - for p := range portSpecs { - ports[i] = p - i++ - } - nat.SortPortMap(ports, bindings) - for _, port := range ports { - expose := types.TransportPort{} - expose.Proto = types.ParseProtocol(port.Proto()) - expose.Port = uint16(port.Int()) - exposeList = append(exposeList, expose) - - pb := types.PortBinding{Port: expose.Port, Proto: expose.Proto} - binding := bindings[port] - for i := 0; i < len(binding); i++ { - pbCopy := pb.GetCopy() - newP, err := nat.NewPort(nat.SplitProtoPort(binding[i].HostPort)) - var portStart, portEnd int - if err == nil { - portStart, portEnd, err = newP.Range() - } - if err != nil { - return nil, fmt.Errorf("Error parsing HostPort value(%s):%v", binding[i].HostPort, err) - } - pbCopy.HostPort = uint16(portStart) - pbCopy.HostPortEnd = uint16(portEnd) - pbCopy.HostIP = net.ParseIP(binding[i].HostIP) - pbList = append(pbList, pbCopy) - } - - if container.HostConfig.PublishAllPorts && len(binding) == 0 { - pbList = append(pbList, pb) - } - } - - sboxOptions = append(sboxOptions, - libnetwork.OptionPortMapping(pbList), - libnetwork.OptionExposedPorts(exposeList)) - - // Legacy Link feature is supported only for the default bridge network. - // return if this call to build join options is not for default bridge network - // Legacy Link is only supported by docker run --link - bridgeSettings, ok := container.NetworkSettings.Networks[defaultNetName] - if !ok { - return sboxOptions, nil - } - - if bridgeSettings.EndpointID == "" { - return sboxOptions, nil - } - - var ( - childEndpoints, parentEndpoints []string - cEndpointID string - ) - - children := daemon.children(container) - for linkAlias, child := range children { - if !isLinkable(child) { - return nil, fmt.Errorf("Cannot link to %s, as it does not belong to the default network", child.Name) - } - _, alias := path.Split(linkAlias) - // allow access to the linked container via the alias, real name, and container hostname - aliasList := alias + " " + child.Config.Hostname - // only add the name if alias isn't equal to the name - if alias != child.Name[1:] { - aliasList = aliasList + " " + child.Name[1:] - } - sboxOptions = append(sboxOptions, libnetwork.OptionExtraHost(aliasList, child.NetworkSettings.Networks[defaultNetName].IPAddress)) - cEndpointID = child.NetworkSettings.Networks[defaultNetName].EndpointID - if cEndpointID != "" { - childEndpoints = append(childEndpoints, cEndpointID) - } - } - - for alias, parent := range daemon.parents(container) { - if daemon.configStore.DisableBridge || !container.HostConfig.NetworkMode.IsPrivate() { - continue - } - - _, alias = path.Split(alias) - logrus.Debugf("Update /etc/hosts of %s for alias %s with ip %s", parent.ID, alias, bridgeSettings.IPAddress) - sboxOptions = append(sboxOptions, libnetwork.OptionParentUpdate( - parent.ID, - alias, - bridgeSettings.IPAddress, - )) - if cEndpointID != "" { - parentEndpoints = append(parentEndpoints, cEndpointID) - } - } - - linkOptions := options.Generic{ - netlabel.GenericData: options.Generic{ - "ParentEndpoints": parentEndpoints, - "ChildEndpoints": childEndpoints, - }, - } - - sboxOptions = append(sboxOptions, libnetwork.OptionGeneric(linkOptions)) - return sboxOptions, nil -} - -func (daemon *Daemon) updateNetworkSettings(container *container.Container, n libnetwork.Network) error { - if container.NetworkSettings == nil { - container.NetworkSettings = &network.Settings{Networks: make(map[string]*networktypes.EndpointSettings)} - } - - if !container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { - return runconfig.ErrConflictHostNetwork - } - - for s := range container.NetworkSettings.Networks { - sn, err := daemon.FindNetwork(s) - if err != nil { - continue - } - - if sn.Name() == n.Name() { - // Avoid duplicate config - return nil - } - if !containertypes.NetworkMode(sn.Type()).IsPrivate() || - !containertypes.NetworkMode(n.Type()).IsPrivate() { - return runconfig.ErrConflictSharedNetwork - } - if containertypes.NetworkMode(sn.Name()).IsNone() || - containertypes.NetworkMode(n.Name()).IsNone() { - return runconfig.ErrConflictNoNetwork - } - } - - if _, ok := container.NetworkSettings.Networks[n.Name()]; !ok { - container.NetworkSettings.Networks[n.Name()] = new(networktypes.EndpointSettings) - } - - return nil -} - -func (daemon *Daemon) updateEndpointNetworkSettings(container *container.Container, n libnetwork.Network, ep libnetwork.Endpoint) error { - if err := container.BuildEndpointInfo(n, ep); err != nil { - return err - } - - if container.HostConfig.NetworkMode == runconfig.DefaultDaemonNetworkMode() { - container.NetworkSettings.Bridge = daemon.configStore.bridgeConfig.Iface - } - - return nil -} - -// UpdateNetwork is used to update the container's network (e.g. when linked containers -// get removed/unlinked). -func (daemon *Daemon) updateNetwork(container *container.Container) error { - ctrl := daemon.netController - sid := container.NetworkSettings.SandboxID - - sb, err := ctrl.SandboxByID(sid) - if err != nil { - return fmt.Errorf("error locating sandbox id %s: %v", sid, err) - } - - // Find if container is connected to the default bridge network - var n libnetwork.Network - for name := range container.NetworkSettings.Networks { - sn, err := daemon.FindNetwork(name) - if err != nil { - continue - } - if sn.Name() == runconfig.DefaultDaemonNetworkMode().NetworkName() { - n = sn - break - } - } - - if n == nil { - // Not connected to the default bridge network; Nothing to do - return nil - } - - options, err := daemon.buildSandboxOptions(container) - if err != nil { - return fmt.Errorf("Update network failed: %v", err) - } - - if err := sb.Refresh(options...); err != nil { - return fmt.Errorf("Update network failed: Failure in refresh sandbox %s: %v", sid, err) - } - - return nil -} - -func errClusterNetworkOnRun(n string) error { - return fmt.Errorf("swarm-scoped network (%s) is not compatible with `docker create` or `docker run`. This network can only be used by a docker service", n) -} - -// updateContainerNetworkSettings update the network settings -func (daemon *Daemon) updateContainerNetworkSettings(container *container.Container, endpointsConfig map[string]*networktypes.EndpointSettings) error { - var ( - n libnetwork.Network - err error - ) - - mode := container.HostConfig.NetworkMode - if container.Config.NetworkDisabled || mode.IsContainer() { - return nil - } - - networkName := mode.NetworkName() - if mode.IsDefault() { - networkName = daemon.netController.Config().Daemon.DefaultNetwork - } - if mode.IsUserDefined() { - n, err = daemon.FindNetwork(networkName) - if err != nil { - return err - } - if !container.Managed && n.Info().Dynamic() { - return errClusterNetworkOnRun(networkName) - } - networkName = n.Name() - } - if container.NetworkSettings == nil { - container.NetworkSettings = &network.Settings{} - } - if len(endpointsConfig) > 0 { - container.NetworkSettings.Networks = endpointsConfig - } - if container.NetworkSettings.Networks == nil { - container.NetworkSettings.Networks = make(map[string]*networktypes.EndpointSettings) - container.NetworkSettings.Networks[networkName] = new(networktypes.EndpointSettings) - } - if !mode.IsUserDefined() { - return nil - } - // Make sure to internally store the per network endpoint config by network name - if _, ok := container.NetworkSettings.Networks[networkName]; ok { - return nil - } - if nwConfig, ok := container.NetworkSettings.Networks[n.ID()]; ok { - container.NetworkSettings.Networks[networkName] = nwConfig - delete(container.NetworkSettings.Networks, n.ID()) - return nil - } - - return nil -} - -func (daemon *Daemon) allocateNetwork(container *container.Container) error { - controller := daemon.netController - - if daemon.netController == nil { - return nil - } - - // Cleanup any stale sandbox left over due to ungraceful daemon shutdown - if err := controller.SandboxDestroy(container.ID); err != nil { - logrus.Errorf("failed to cleanup up stale network sandbox for container %s", container.ID) - } - - updateSettings := false - if len(container.NetworkSettings.Networks) == 0 { - if container.Config.NetworkDisabled || container.HostConfig.NetworkMode.IsContainer() { - return nil - } - - err := daemon.updateContainerNetworkSettings(container, nil) - if err != nil { - return err - } - updateSettings = true - } - - // always connect default network first since only default - // network mode support link and we need do some setting - // on sandbox initialize for link, but the sandbox only be initialized - // on first network connecting. - defaultNetName := runconfig.DefaultDaemonNetworkMode().NetworkName() - if nConf, ok := container.NetworkSettings.Networks[defaultNetName]; ok { - if err := daemon.connectToNetwork(container, defaultNetName, nConf, updateSettings); err != nil { - return err - } - - } - for n, nConf := range container.NetworkSettings.Networks { - if n == defaultNetName { - continue - } - if err := daemon.connectToNetwork(container, n, nConf, updateSettings); err != nil { - return err - } - } - - return container.WriteHostConfig() -} - -func (daemon *Daemon) getNetworkSandbox(container *container.Container) libnetwork.Sandbox { - var sb libnetwork.Sandbox - daemon.netController.WalkSandboxes(func(s libnetwork.Sandbox) bool { - if s.ContainerID() == container.ID { - sb = s - return true - } - return false - }) - return sb -} - -// hasUserDefinedIPAddress returns whether the passed endpoint configuration contains IP address configuration -func hasUserDefinedIPAddress(epConfig *networktypes.EndpointSettings) bool { - return epConfig != nil && epConfig.IPAMConfig != nil && (len(epConfig.IPAMConfig.IPv4Address) > 0 || len(epConfig.IPAMConfig.IPv6Address) > 0) -} - -// User specified ip address is acceptable only for networks with user specified subnets. -func validateNetworkingConfig(n libnetwork.Network, epConfig *networktypes.EndpointSettings) error { - if n == nil || epConfig == nil { - return nil - } - if !hasUserDefinedIPAddress(epConfig) { - return nil - } - _, _, nwIPv4Configs, nwIPv6Configs := n.Info().IpamConfig() - for _, s := range []struct { - ipConfigured bool - subnetConfigs []*libnetwork.IpamConf - }{ - { - ipConfigured: len(epConfig.IPAMConfig.IPv4Address) > 0, - subnetConfigs: nwIPv4Configs, - }, - { - ipConfigured: len(epConfig.IPAMConfig.IPv6Address) > 0, - subnetConfigs: nwIPv6Configs, - }, - } { - if s.ipConfigured { - foundSubnet := false - for _, cfg := range s.subnetConfigs { - if len(cfg.PreferredPool) > 0 { - foundSubnet = true - break - } - } - if !foundSubnet { - return runconfig.ErrUnsupportedNetworkNoSubnetAndIP - } - } - } - - return nil -} - -// cleanOperationalData resets the operational data from the passed endpoint settings -func cleanOperationalData(es *networktypes.EndpointSettings) { - es.EndpointID = "" - es.Gateway = "" - es.IPAddress = "" - es.IPPrefixLen = 0 - es.IPv6Gateway = "" - es.GlobalIPv6Address = "" - es.GlobalIPv6PrefixLen = 0 - es.MacAddress = "" -} - -func (daemon *Daemon) updateNetworkConfig(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (libnetwork.Network, error) { - if container.HostConfig.NetworkMode.IsContainer() { - return nil, runconfig.ErrConflictSharedNetwork - } - - if containertypes.NetworkMode(idOrName).IsBridge() && - daemon.configStore.DisableBridge { - container.Config.NetworkDisabled = true - return nil, nil - } - - if !containertypes.NetworkMode(idOrName).IsUserDefined() { - if hasUserDefinedIPAddress(endpointConfig) { - return nil, runconfig.ErrUnsupportedNetworkAndIP - } - if endpointConfig != nil && len(endpointConfig.Aliases) > 0 { - return nil, runconfig.ErrUnsupportedNetworkAndAlias - } - } else { - addShortID := true - shortID := stringid.TruncateID(container.ID) - for _, alias := range endpointConfig.Aliases { - if alias == shortID { - addShortID = false - break - } - } - if addShortID { - endpointConfig.Aliases = append(endpointConfig.Aliases, shortID) - } - } - - n, err := daemon.FindNetwork(idOrName) - if err != nil { - return nil, err - } - - if err := validateNetworkingConfig(n, endpointConfig); err != nil { - return nil, err - } - - if updateSettings { - if err := daemon.updateNetworkSettings(container, n); err != nil { - return nil, err - } - } - return n, nil -} - -func (daemon *Daemon) connectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings, updateSettings bool) (err error) { - if endpointConfig == nil { - endpointConfig = &networktypes.EndpointSettings{} - } - n, err := daemon.updateNetworkConfig(container, idOrName, endpointConfig, updateSettings) - if err != nil { - return err - } - if n == nil { - return nil - } - - controller := daemon.netController - - sb := daemon.getNetworkSandbox(container) - createOptions, err := container.BuildCreateEndpointOptions(n, endpointConfig, sb) - if err != nil { - return err - } - - endpointName := strings.TrimPrefix(container.Name, "/") - ep, err := n.CreateEndpoint(endpointName, createOptions...) - if err != nil { - return err - } - defer func() { - if err != nil { - if e := ep.Delete(false); e != nil { - logrus.Warnf("Could not rollback container connection to network %s", idOrName) - } - } - }() - container.NetworkSettings.Networks[n.Name()] = endpointConfig - - if err := daemon.updateEndpointNetworkSettings(container, n, ep); err != nil { - return err - } - - if sb == nil { - options, err := daemon.buildSandboxOptions(container) - if err != nil { - return err - } - sb, err = controller.NewSandbox(container.ID, options...) - if err != nil { - return err - } - - container.UpdateSandboxNetworkSettings(sb) - } - - joinOptions, err := container.BuildJoinOptions(n) - if err != nil { - return err - } - - if err := ep.Join(sb, joinOptions...); err != nil { - return err - } - - if err := container.UpdateJoinInfo(n, ep); err != nil { - return fmt.Errorf("Updating join info failed: %v", err) - } - - container.NetworkSettings.Ports = getPortMapInfo(sb) - - daemon.LogNetworkEventWithAttributes(n, "connect", map[string]string{"container": container.ID}) - return nil -} - -// ForceEndpointDelete deletes an endpoing from a network forcefully -func (daemon *Daemon) ForceEndpointDelete(name string, n libnetwork.Network) error { - ep, err := n.EndpointByName(name) - if err != nil { - return err - } - return ep.Delete(true) -} - -func disconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { - var ( - ep libnetwork.Endpoint - sbox libnetwork.Sandbox - ) - - s := func(current libnetwork.Endpoint) bool { - epInfo := current.Info() - if epInfo == nil { - return false - } - if sb := epInfo.Sandbox(); sb != nil { - if sb.ContainerID() == container.ID { - ep = current - sbox = sb - return true - } - } - return false - } - n.WalkEndpoints(s) - - if ep == nil && force { - epName := strings.TrimPrefix(container.Name, "/") - ep, err := n.EndpointByName(epName) - if err != nil { - return err - } - return ep.Delete(force) - } - - if ep == nil { - return fmt.Errorf("container %s is not connected to the network", container.ID) - } - - if err := ep.Leave(sbox); err != nil { - return fmt.Errorf("container %s failed to leave network %s: %v", container.ID, n.Name(), err) - } - - container.NetworkSettings.Ports = getPortMapInfo(sbox) - - if err := ep.Delete(false); err != nil { - return fmt.Errorf("endpoint delete failed for container %s on network %s: %v", container.ID, n.Name(), err) - } - - delete(container.NetworkSettings.Networks, n.Name()) - return nil -} - -func (daemon *Daemon) initializeNetworking(container *container.Container) error { - var err error - - if container.HostConfig.NetworkMode.IsContainer() { - // we need to get the hosts files from the container to join - nc, err := daemon.getNetworkedContainer(container.ID, container.HostConfig.NetworkMode.ConnectedContainer()) - if err != nil { - return err - } - container.HostnamePath = nc.HostnamePath - container.HostsPath = nc.HostsPath - container.ResolvConfPath = nc.ResolvConfPath - container.Config.Hostname = nc.Config.Hostname - container.Config.Domainname = nc.Config.Domainname - return nil - } - - if container.HostConfig.NetworkMode.IsHost() { - container.Config.Hostname, err = os.Hostname() - if err != nil { - return err - } - } - - if err := daemon.allocateNetwork(container); err != nil { - return err - } - - return container.BuildHostnameFile() -} - -func (daemon *Daemon) getNetworkedContainer(containerID, connectedContainerID string) (*container.Container, error) { - nc, err := daemon.GetContainer(connectedContainerID) - if err != nil { - return nil, err - } - if containerID == nc.ID { - return nil, fmt.Errorf("cannot join own network") - } - if !nc.IsRunning() { - err := fmt.Errorf("cannot join network of a non running container: %s", connectedContainerID) - return nil, derr.NewRequestConflictError(err) - } - if nc.IsRestarting() { - return nil, errContainerIsRestarting(connectedContainerID) - } - return nc, nil -} - -func (daemon *Daemon) releaseNetwork(container *container.Container) { - if daemon.netController == nil { - return - } - if container.HostConfig.NetworkMode.IsContainer() || container.Config.NetworkDisabled { - return - } - - sid := container.NetworkSettings.SandboxID - settings := container.NetworkSettings.Networks - container.NetworkSettings.Ports = nil - - if sid == "" || len(settings) == 0 { - return - } - - var networks []libnetwork.Network - for n, epSettings := range settings { - if nw, err := daemon.FindNetwork(n); err == nil { - networks = append(networks, nw) - } - cleanOperationalData(epSettings) - } - - sb, err := daemon.netController.SandboxByID(sid) - if err != nil { - logrus.Warnf("error locating sandbox id %s: %v", sid, err) - return - } - - if err := sb.Delete(); err != nil { - logrus.Errorf("Error deleting sandbox id %s for container %s: %v", sid, container.ID, err) - } - - for _, nw := range networks { - attributes := map[string]string{ - "container": container.ID, - } - daemon.LogNetworkEventWithAttributes(nw, "disconnect", attributes) - } -} diff --git a/daemon/container_operations_solaris.go b/daemon/container_operations_solaris.go deleted file mode 100644 index b98faaae16..0000000000 --- a/daemon/container_operations_solaris.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build solaris - -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/libnetwork" -) - -func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { - return nil, nil -} - -// ConnectToNetwork connects a container to a network -func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { - return fmt.Errorf("Solaris does not support connecting a running container to a network") -} - -// getSize returns real size & virtual size -func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { - return 0, 0 -} - -// DisconnectFromNetwork disconnects a container from the network -func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { - return fmt.Errorf("Solaris does not support disconnecting a running container from a network") -} - -func (daemon *Daemon) setupIpcDirs(container *container.Container) error { - return nil -} - -func (daemon *Daemon) mountVolumes(container *container.Container) error { - return nil -} - -func killProcessDirectly(container *container.Container) error { - return nil -} - -func detachMounted(path string) error { - return nil -} - -func isLinkable(child *container.Container) bool { - return false -} diff --git a/daemon/container_operations_unix.go b/daemon/container_operations_unix.go deleted file mode 100644 index 55bd3fc839..0000000000 --- a/daemon/container_operations_unix.go +++ /dev/null @@ -1,385 +0,0 @@ -// +build linux freebsd - -package daemon - -import ( - "fmt" - "os" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/links" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/runconfig" - containertypes "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/libnetwork" - "github.com/opencontainers/runc/libcontainer/configs" - "github.com/opencontainers/runc/libcontainer/devices" - "github.com/opencontainers/runc/libcontainer/label" - "github.com/opencontainers/specs/specs-go" -) - -func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } -func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } - -func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { - var env []string - children := daemon.children(container) - - bridgeSettings := container.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] - if bridgeSettings == nil { - return nil, nil - } - - for linkAlias, child := range children { - if !child.IsRunning() { - return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, linkAlias) - } - - childBridgeSettings := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] - if childBridgeSettings == nil { - return nil, fmt.Errorf("container %s not attached to default bridge network", child.ID) - } - - link := links.NewLink( - bridgeSettings.IPAddress, - childBridgeSettings.IPAddress, - linkAlias, - child.Config.Env, - child.Config.ExposedPorts, - ) - - for _, envVar := range link.ToEnv() { - env = append(env, envVar) - } - } - - return env, nil -} - -// getSize returns the real size & virtual size of the container. -func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { - var ( - sizeRw, sizeRootfs int64 - err error - ) - - if err := daemon.Mount(container); err != nil { - logrus.Errorf("Failed to compute size of container rootfs %s: %s", container.ID, err) - return sizeRw, sizeRootfs - } - defer daemon.Unmount(container) - - sizeRw, err = container.RWLayer.Size() - if err != nil { - logrus.Errorf("Driver %s couldn't return diff size of container %s: %s", - daemon.GraphDriverName(), container.ID, err) - // FIXME: GetSize should return an error. Not changing it now in case - // there is a side-effect. - sizeRw = -1 - } - - if parent := container.RWLayer.Parent(); parent != nil { - sizeRootfs, err = parent.Size() - if err != nil { - sizeRootfs = -1 - } else if sizeRw != -1 { - sizeRootfs += sizeRw - } - } - return sizeRw, sizeRootfs -} - -// ConnectToNetwork connects a container to a network -func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { - if endpointConfig == nil { - endpointConfig = &networktypes.EndpointSettings{} - } - if !container.Running { - if container.RemovalInProgress || container.Dead { - return errRemovalContainer(container.ID) - } - if _, err := daemon.updateNetworkConfig(container, idOrName, endpointConfig, true); err != nil { - return err - } - container.NetworkSettings.Networks[idOrName] = endpointConfig - } else { - if err := daemon.connectToNetwork(container, idOrName, endpointConfig, true); err != nil { - return err - } - } - if err := container.ToDiskLocking(); err != nil { - return fmt.Errorf("Error saving container to disk: %v", err) - } - return nil -} - -// DisconnectFromNetwork disconnects container from network n. -func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { - if container.HostConfig.NetworkMode.IsHost() && containertypes.NetworkMode(n.Type()).IsHost() { - return runconfig.ErrConflictHostNetwork - } - if !container.Running { - if container.RemovalInProgress || container.Dead { - return errRemovalContainer(container.ID) - } - if _, ok := container.NetworkSettings.Networks[n.Name()]; ok { - delete(container.NetworkSettings.Networks, n.Name()) - } else { - return fmt.Errorf("container %s is not connected to the network %s", container.ID, n.Name()) - } - } else { - if err := disconnectFromNetwork(container, n, false); err != nil { - return err - } - } - - if err := container.ToDiskLocking(); err != nil { - return fmt.Errorf("Error saving container to disk: %v", err) - } - - attributes := map[string]string{ - "container": container.ID, - } - daemon.LogNetworkEventWithAttributes(n, "disconnect", attributes) - return nil -} - -func (daemon *Daemon) getIpcContainer(container *container.Container) (*container.Container, error) { - containerID := container.HostConfig.IpcMode.Container() - c, err := daemon.GetContainer(containerID) - if err != nil { - return nil, err - } - if !c.IsRunning() { - return nil, fmt.Errorf("cannot join IPC of a non running container: %s", containerID) - } - if c.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) - } - return c, nil -} - -func (daemon *Daemon) getPidContainer(container *container.Container) (*container.Container, error) { - containerID := container.HostConfig.PidMode.Container() - c, err := daemon.GetContainer(containerID) - if err != nil { - return nil, err - } - if !c.IsRunning() { - return nil, fmt.Errorf("cannot join PID of a non running container: %s", containerID) - } - if c.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) - } - return c, nil -} - -func (daemon *Daemon) setupIpcDirs(c *container.Container) error { - var err error - - c.ShmPath, err = c.ShmResourcePath() - if err != nil { - return err - } - - if c.HostConfig.IpcMode.IsContainer() { - ic, err := daemon.getIpcContainer(c) - if err != nil { - return err - } - c.ShmPath = ic.ShmPath - } else if c.HostConfig.IpcMode.IsHost() { - if _, err := os.Stat("/dev/shm"); err != nil { - return fmt.Errorf("/dev/shm is not mounted, but must be for --ipc=host") - } - c.ShmPath = "/dev/shm" - } else { - rootUID, rootGID := daemon.GetRemappedUIDGID() - if !c.HasMountFor("/dev/shm") { - shmPath, err := c.ShmResourcePath() - if err != nil { - return err - } - - if err := idtools.MkdirAllAs(shmPath, 0700, rootUID, rootGID); err != nil { - return err - } - - shmSize := container.DefaultSHMSize - if c.HostConfig.ShmSize != 0 { - shmSize = c.HostConfig.ShmSize - } - shmproperty := "mode=1777,size=" + strconv.FormatInt(shmSize, 10) - if err := syscall.Mount("shm", shmPath, "tmpfs", uintptr(syscall.MS_NOEXEC|syscall.MS_NOSUID|syscall.MS_NODEV), label.FormatMountLabel(shmproperty, c.GetMountLabel())); err != nil { - return fmt.Errorf("mounting shm tmpfs: %s", err) - } - if err := os.Chown(shmPath, rootUID, rootGID); err != nil { - return err - } - } - - } - - return nil -} - -func (daemon *Daemon) mountVolumes(container *container.Container) error { - mounts, err := daemon.setupMounts(container) - if err != nil { - return err - } - - for _, m := range mounts { - dest, err := container.GetResourcePath(m.Destination) - if err != nil { - return err - } - - var stat os.FileInfo - stat, err = os.Stat(m.Source) - if err != nil { - return err - } - if err = fileutils.CreateIfNotExists(dest, stat.IsDir()); err != nil { - return err - } - - opts := "rbind,ro" - if m.Writable { - opts = "rbind,rw" - } - - if err := mount.Mount(m.Source, dest, "bind", opts); err != nil { - return err - } - - // mountVolumes() seems to be called for temporary mounts - // outside the container. Soon these will be unmounted with - // lazy unmount option and given we have mounted the rbind, - // all the submounts will propagate if these are shared. If - // daemon is running in host namespace and has / as shared - // then these unmounts will propagate and unmount original - // mount as well. So make all these mounts rprivate. - // Do not use propagation property of volume as that should - // apply only when mounting happen inside the container. - if err := mount.MakeRPrivate(dest); err != nil { - return err - } - } - - return nil -} - -func killProcessDirectly(container *container.Container) error { - if _, err := container.WaitStop(10 * time.Second); err != nil { - // Ensure that we don't kill ourselves - if pid := container.GetPID(); pid != 0 { - logrus.Infof("Container %s failed to exit within 10 seconds of kill - trying direct SIGKILL", stringid.TruncateID(container.ID)) - if err := syscall.Kill(pid, 9); err != nil { - if err != syscall.ESRCH { - return err - } - e := errNoSuchProcess{pid, 9} - logrus.Debug(e) - return e - } - } - } - return nil -} - -func specDevice(d *configs.Device) specs.Device { - return specs.Device{ - Type: string(d.Type), - Path: d.Path, - Major: d.Major, - Minor: d.Minor, - FileMode: fmPtr(int64(d.FileMode)), - UID: u32Ptr(int64(d.Uid)), - GID: u32Ptr(int64(d.Gid)), - } -} - -func specDeviceCgroup(d *configs.Device) specs.DeviceCgroup { - t := string(d.Type) - return specs.DeviceCgroup{ - Allow: true, - Type: &t, - Major: &d.Major, - Minor: &d.Minor, - Access: &d.Permissions, - } -} - -func getDevicesFromPath(deviceMapping containertypes.DeviceMapping) (devs []specs.Device, devPermissions []specs.DeviceCgroup, err error) { - resolvedPathOnHost := deviceMapping.PathOnHost - - // check if it is a symbolic link - if src, e := os.Lstat(deviceMapping.PathOnHost); e == nil && src.Mode()&os.ModeSymlink == os.ModeSymlink { - if linkedPathOnHost, e := filepath.EvalSymlinks(deviceMapping.PathOnHost); e == nil { - resolvedPathOnHost = linkedPathOnHost - } - } - - device, err := devices.DeviceFromPath(resolvedPathOnHost, deviceMapping.CgroupPermissions) - // if there was no error, return the device - if err == nil { - device.Path = deviceMapping.PathInContainer - return append(devs, specDevice(device)), append(devPermissions, specDeviceCgroup(device)), nil - } - - // if the device is not a device node - // try to see if it's a directory holding many devices - if err == devices.ErrNotADevice { - - // check if it is a directory - if src, e := os.Stat(resolvedPathOnHost); e == nil && src.IsDir() { - - // mount the internal devices recursively - filepath.Walk(resolvedPathOnHost, func(dpath string, f os.FileInfo, e error) error { - childDevice, e := devices.DeviceFromPath(dpath, deviceMapping.CgroupPermissions) - if e != nil { - // ignore the device - return nil - } - - // add the device to userSpecified devices - childDevice.Path = strings.Replace(dpath, resolvedPathOnHost, deviceMapping.PathInContainer, 1) - devs = append(devs, specDevice(childDevice)) - devPermissions = append(devPermissions, specDeviceCgroup(childDevice)) - - return nil - }) - } - } - - if len(devs) > 0 { - return devs, devPermissions, nil - } - - return devs, devPermissions, fmt.Errorf("error gathering device information while adding custom device %q: %s", deviceMapping.PathOnHost, err) -} - -func detachMounted(path string) error { - return syscall.Unmount(path, syscall.MNT_DETACH) -} - -func isLinkable(child *container.Container) bool { - // A container is linkable only if it belongs to the default network - _, ok := child.NetworkSettings.Networks[runconfig.DefaultDaemonNetworkMode().NetworkName()] - return ok -} - -func errRemovalContainer(containerID string) error { - return fmt.Errorf("Container %s is marked for removal and cannot be connected or disconnected to the network", containerID) -} diff --git a/daemon/container_operations_windows.go b/daemon/container_operations_windows.go deleted file mode 100644 index 32d18c2fa3..0000000000 --- a/daemon/container_operations_windows.go +++ /dev/null @@ -1,57 +0,0 @@ -// +build windows - -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/libnetwork" -) - -func (daemon *Daemon) setupLinkedContainers(container *container.Container) ([]string, error) { - return nil, nil -} - -// ConnectToNetwork connects a container to a network -func (daemon *Daemon) ConnectToNetwork(container *container.Container, idOrName string, endpointConfig *networktypes.EndpointSettings) error { - return fmt.Errorf("Windows does not support connecting a running container to a network") -} - -// DisconnectFromNetwork disconnects container from a network. -func (daemon *Daemon) DisconnectFromNetwork(container *container.Container, n libnetwork.Network, force bool) error { - return fmt.Errorf("Windows does not support disconnecting a running container from a network") -} - -// getSize returns real size & virtual size -func (daemon *Daemon) getSize(container *container.Container) (int64, int64) { - // TODO Windows - return 0, 0 -} - -func (daemon *Daemon) setupIpcDirs(container *container.Container) error { - return nil -} - -// TODO Windows: Fix Post-TP5. This is a hack to allow docker cp to work -// against containers which have volumes. You will still be able to cp -// to somewhere on the container drive, but not to any mounted volumes -// inside the container. Without this fix, docker cp is broken to any -// container which has a volume, regardless of where the file is inside the -// container. -func (daemon *Daemon) mountVolumes(container *container.Container) error { - return nil -} - -func detachMounted(path string) error { - return nil -} - -func killProcessDirectly(container *container.Container) error { - return nil -} - -func isLinkable(child *container.Container) bool { - return false -} diff --git a/daemon/create.go b/daemon/create.go deleted file mode 100644 index 13424f4755..0000000000 --- a/daemon/create.go +++ /dev/null @@ -1,260 +0,0 @@ -package daemon - -import ( - "fmt" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/errors" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/runconfig" - volumestore "github.com/docker/docker/volume/store" - "github.com/docker/engine-api/types" - containertypes "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/opencontainers/runc/libcontainer/label" -) - -// CreateManagedContainer creates a container that is managed by a Service -func (daemon *Daemon) CreateManagedContainer(params types.ContainerCreateConfig, validateHostname bool) (types.ContainerCreateResponse, error) { - return daemon.containerCreate(params, true, validateHostname) -} - -// ContainerCreate creates a regular container -func (daemon *Daemon) ContainerCreate(params types.ContainerCreateConfig, validateHostname bool) (types.ContainerCreateResponse, error) { - return daemon.containerCreate(params, false, validateHostname) -} - -func (daemon *Daemon) containerCreate(params types.ContainerCreateConfig, managed bool, validateHostname bool) (types.ContainerCreateResponse, error) { - if params.Config == nil { - return types.ContainerCreateResponse{}, fmt.Errorf("Config cannot be empty in order to create a container") - } - - warnings, err := daemon.verifyContainerSettings(params.HostConfig, params.Config, false, validateHostname) - if err != nil { - return types.ContainerCreateResponse{Warnings: warnings}, err - } - - err = daemon.verifyNetworkingConfig(params.NetworkingConfig) - if err != nil { - return types.ContainerCreateResponse{}, err - } - - if params.HostConfig == nil { - params.HostConfig = &containertypes.HostConfig{} - } - err = daemon.adaptContainerSettings(params.HostConfig, params.AdjustCPUShares) - if err != nil { - return types.ContainerCreateResponse{Warnings: warnings}, err - } - - container, err := daemon.create(params, managed) - if err != nil { - return types.ContainerCreateResponse{Warnings: warnings}, daemon.imageNotExistToErrcode(err) - } - - return types.ContainerCreateResponse{ID: container.ID, Warnings: warnings}, nil -} - -// Create creates a new container from the given configuration with a given name. -func (daemon *Daemon) create(params types.ContainerCreateConfig, managed bool) (retC *container.Container, retErr error) { - var ( - container *container.Container - img *image.Image - imgID image.ID - err error - ) - - if params.Config.Image != "" { - img, err = daemon.GetImage(params.Config.Image) - if err != nil { - return nil, err - } - imgID = img.ID() - } - - if err := daemon.mergeAndVerifyConfig(params.Config, img); err != nil { - return nil, err - } - - if err := daemon.mergeAndVerifyLogConfig(¶ms.HostConfig.LogConfig); err != nil { - return nil, err - } - - if container, err = daemon.newContainer(params.Name, params.Config, imgID, managed); err != nil { - return nil, err - } - defer func() { - if retErr != nil { - if err := daemon.cleanupContainer(container, true); err != nil { - logrus.Errorf("failed to cleanup container on create error: %v", err) - } - } - }() - - if err := daemon.setSecurityOptions(container, params.HostConfig); err != nil { - return nil, err - } - - container.HostConfig.StorageOpt = params.HostConfig.StorageOpt - - // Set RWLayer for container after mount labels have been set - if err := daemon.setRWLayer(container); err != nil { - return nil, err - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) - if err != nil { - return nil, err - } - if err := idtools.MkdirAs(container.Root, 0700, rootUID, rootGID); err != nil { - return nil, err - } - - if err := daemon.setHostConfig(container, params.HostConfig); err != nil { - return nil, err - } - defer func() { - if retErr != nil { - if err := daemon.removeMountPoints(container, true); err != nil { - logrus.Error(err) - } - } - }() - - if err := daemon.createContainerPlatformSpecificSettings(container, params.Config, params.HostConfig); err != nil { - return nil, err - } - - var endpointsConfigs map[string]*networktypes.EndpointSettings - if params.NetworkingConfig != nil { - endpointsConfigs = params.NetworkingConfig.EndpointsConfig - } - // Make sure NetworkMode has an acceptable value. We do this to ensure - // backwards API compatibility. - container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) - - if err := daemon.updateContainerNetworkSettings(container, endpointsConfigs); err != nil { - return nil, err - } - - if err := container.ToDisk(); err != nil { - logrus.Errorf("Error saving new container to disk: %v", err) - return nil, err - } - if err := daemon.Register(container); err != nil { - return nil, err - } - daemon.LogContainerEvent(container, "create") - return container, nil -} - -func (daemon *Daemon) generateSecurityOpt(ipcMode containertypes.IpcMode, pidMode containertypes.PidMode, privileged bool) ([]string, error) { - if ipcMode.IsHost() || pidMode.IsHost() || privileged { - return label.DisableSecOpt(), nil - } - - var ipcLabel []string - var pidLabel []string - ipcContainer := ipcMode.Container() - pidContainer := pidMode.Container() - if ipcContainer != "" { - c, err := daemon.GetContainer(ipcContainer) - if err != nil { - return nil, err - } - ipcLabel = label.DupSecOpt(c.ProcessLabel) - if pidContainer == "" { - return ipcLabel, err - } - } - if pidContainer != "" { - c, err := daemon.GetContainer(pidContainer) - if err != nil { - return nil, err - } - - pidLabel = label.DupSecOpt(c.ProcessLabel) - if ipcContainer == "" { - return pidLabel, err - } - } - - if pidLabel != nil && ipcLabel != nil { - for i := 0; i < len(pidLabel); i++ { - if pidLabel[i] != ipcLabel[i] { - return nil, fmt.Errorf("--ipc and --pid containers SELinux labels aren't the same") - } - } - return pidLabel, nil - } - return nil, nil -} - -func (daemon *Daemon) setRWLayer(container *container.Container) error { - var layerID layer.ChainID - if container.ImageID != "" { - img, err := daemon.imageStore.Get(container.ImageID) - if err != nil { - return err - } - layerID = img.RootFS.ChainID() - } - rwLayer, err := daemon.layerStore.CreateRWLayer(container.ID, layerID, container.MountLabel, daemon.setupInitLayer, container.HostConfig.StorageOpt) - if err != nil { - return err - } - container.RWLayer = rwLayer - - return nil -} - -// VolumeCreate creates a volume with the specified name, driver, and opts -// This is called directly from the remote API -func (daemon *Daemon) VolumeCreate(name, driverName string, opts, labels map[string]string) (*types.Volume, error) { - if name == "" { - name = stringid.GenerateNonCryptoID() - } - - v, err := daemon.volumes.Create(name, driverName, opts, labels) - if err != nil { - if volumestore.IsNameConflict(err) { - return nil, fmt.Errorf("A volume named %s already exists. Choose a different volume name.", name) - } - return nil, err - } - - daemon.LogVolumeEvent(v.Name(), "create", map[string]string{"driver": v.DriverName()}) - apiV := volumeToAPIType(v) - apiV.Mountpoint = v.Path() - return apiV, nil -} - -func (daemon *Daemon) mergeAndVerifyConfig(config *containertypes.Config, img *image.Image) error { - if img != nil && img.Config != nil { - if err := merge(config, img.Config); err != nil { - return err - } - } - if len(config.Entrypoint) == 0 && len(config.Cmd) == 0 { - return fmt.Errorf("No command specified") - } - return nil -} - -// Checks if the client set configurations for more than one network while creating a container -func (daemon *Daemon) verifyNetworkingConfig(nwConfig *networktypes.NetworkingConfig) error { - if nwConfig == nil || len(nwConfig.EndpointsConfig) <= 1 { - return nil - } - l := make([]string, 0, len(nwConfig.EndpointsConfig)) - for k := range nwConfig.EndpointsConfig { - l = append(l, k) - } - err := fmt.Errorf("Container cannot be connected to network endpoints: %s", strings.Join(l, ", ")) - return errors.NewBadRequestError(err) -} diff --git a/daemon/create_unix.go b/daemon/create_unix.go deleted file mode 100644 index 37c4a911f0..0000000000 --- a/daemon/create_unix.go +++ /dev/null @@ -1,76 +0,0 @@ -// +build !windows - -package daemon - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/stringid" - containertypes "github.com/docker/engine-api/types/container" - "github.com/opencontainers/runc/libcontainer/label" -) - -// createContainerPlatformSpecificSettings performs platform specific container create functionality -func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { - if err := daemon.Mount(container); err != nil { - return err - } - defer daemon.Unmount(container) - - rootUID, rootGID := daemon.GetRemappedUIDGID() - if err := container.SetupWorkingDirectory(rootUID, rootGID); err != nil { - return err - } - - for spec := range config.Volumes { - name := stringid.GenerateNonCryptoID() - destination := filepath.Clean(spec) - - // Skip volumes for which we already have something mounted on that - // destination because of a --volume-from. - if container.IsDestinationMounted(destination) { - continue - } - path, err := container.GetResourcePath(destination) - if err != nil { - return err - } - - stat, err := os.Stat(path) - if err == nil && !stat.IsDir() { - return fmt.Errorf("cannot mount volume over existing file, file exists %s", path) - } - - v, err := daemon.volumes.CreateWithRef(name, hostConfig.VolumeDriver, container.ID, nil, nil) - if err != nil { - return err - } - - if err := label.Relabel(v.Path(), container.MountLabel, true); err != nil { - return err - } - - container.AddMountPointWithVolume(destination, v, true) - } - return daemon.populateVolumes(container) -} - -// populateVolumes copies data from the container's rootfs into the volume for non-binds. -// this is only called when the container is created. -func (daemon *Daemon) populateVolumes(c *container.Container) error { - for _, mnt := range c.MountPoints { - if !mnt.CopyData || mnt.Volume == nil { - continue - } - - logrus.Debugf("copying image data from %s:%s, to %s", c.ID, mnt.Destination, mnt.Name) - if err := c.CopyImagePathContent(mnt.Volume, mnt.Destination); err != nil { - return err - } - } - return nil -} diff --git a/daemon/create_windows.go b/daemon/create_windows.go deleted file mode 100644 index d4da759fd3..0000000000 --- a/daemon/create_windows.go +++ /dev/null @@ -1,80 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/volume" - containertypes "github.com/docker/engine-api/types/container" -) - -// createContainerPlatformSpecificSettings performs platform specific container create functionality -func (daemon *Daemon) createContainerPlatformSpecificSettings(container *container.Container, config *containertypes.Config, hostConfig *containertypes.HostConfig) error { - // Make sure the host config has the default daemon isolation if not specified by caller. - if containertypes.Isolation.IsDefault(containertypes.Isolation(hostConfig.Isolation)) { - hostConfig.Isolation = daemon.defaultIsolation - } - - for spec := range config.Volumes { - - mp, err := volume.ParseMountSpec(spec, hostConfig.VolumeDriver) - if err != nil { - return fmt.Errorf("Unrecognised volume spec: %v", err) - } - - // If the mountpoint doesn't have a name, generate one. - if len(mp.Name) == 0 { - mp.Name = stringid.GenerateNonCryptoID() - } - - // Skip volumes for which we already have something mounted on that - // destination because of a --volume-from. - if container.IsDestinationMounted(mp.Destination) { - continue - } - - volumeDriver := hostConfig.VolumeDriver - - // Create the volume in the volume driver. If it doesn't exist, - // a new one will be created. - v, err := daemon.volumes.CreateWithRef(mp.Name, volumeDriver, container.ID, nil, nil) - if err != nil { - return err - } - - // FIXME Windows: This code block is present in the Linux version and - // allows the contents to be copied to the container FS prior to it - // being started. However, the function utilizes the FollowSymLinkInScope - // path which does not cope with Windows volume-style file paths. There - // is a separate effort to resolve this (@swernli), so this processing - // is deferred for now. A case where this would be useful is when - // a dockerfile includes a VOLUME statement, but something is created - // in that directory during the dockerfile processing. What this means - // on Windows for TP5 is that in that scenario, the contents will not - // copied, but that's (somewhat) OK as HCS will bomb out soon after - // at it doesn't support mapped directories which have contents in the - // destination path anyway. - // - // Example for repro later: - // FROM windowsservercore - // RUN mkdir c:\myvol - // RUN copy c:\windows\system32\ntdll.dll c:\myvol - // VOLUME "c:\myvol" - // - // Then - // docker build -t vol . - // docker run -it --rm vol cmd <-- This is where HCS will error out. - // - // // never attempt to copy existing content in a container FS to a shared volume - // if v.DriverName() == volume.DefaultDriverName { - // if err := container.CopyImagePathContent(v, mp.Destination); err != nil { - // return err - // } - // } - - // Add it to container.MountPoints - container.AddMountPointWithVolume(mp.Destination, v, mp.RW) - } - return nil -} diff --git a/daemon/daemon.go b/daemon/daemon.go deleted file mode 100644 index 0a3265ff02..0000000000 --- a/daemon/daemon.go +++ /dev/null @@ -1,1099 +0,0 @@ -// Package daemon exposes the functions that occur on the host server -// that the Docker daemon is running. -// -// In implementing the various functions of the daemon, there is often -// a method-specific struct for configuring the runtime behavior. -package daemon - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "path" - "path/filepath" - "runtime" - "strings" - "sync" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/docker/docker/api" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/events" - "github.com/docker/docker/daemon/exec" - "github.com/docker/engine-api/types" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/libnetwork/cluster" - // register graph drivers - _ "github.com/docker/docker/daemon/graphdriver/register" - dmetadata "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/migrate/v1" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/graphdb" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/registrar" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/pkg/truncindex" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/docker/runconfig" - "github.com/docker/docker/utils" - volumedrivers "github.com/docker/docker/volume/drivers" - "github.com/docker/docker/volume/local" - "github.com/docker/docker/volume/store" - "github.com/docker/libnetwork" - nwconfig "github.com/docker/libnetwork/config" - "github.com/docker/libtrust" -) - -var ( - // DefaultRuntimeBinary is the default runtime to be used by - // containerd if none is specified - DefaultRuntimeBinary = "docker-runc" - - errSystemNotSupported = fmt.Errorf("The Docker daemon is not supported on this platform.") -) - -// Daemon holds information about the Docker daemon. -type Daemon struct { - ID string - repository string - containers container.Store - execCommands *exec.Store - referenceStore reference.Store - downloadManager *xfer.LayerDownloadManager - uploadManager *xfer.LayerUploadManager - distributionMetadataStore dmetadata.Store - trustKey libtrust.PrivateKey - idIndex *truncindex.TruncIndex - configStore *Config - statsCollector *statsCollector - defaultLogConfig containertypes.LogConfig - RegistryService registry.Service - EventsService *events.Events - netController libnetwork.NetworkController - volumes *store.VolumeStore - discoveryWatcher discoveryReloader - root string - seccompEnabled bool - shutdown bool - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - layerStore layer.Store - imageStore image.Store - nameIndex *registrar.Registrar - linkIndex *linkIndex - containerd libcontainerd.Client - containerdRemote libcontainerd.Remote - defaultIsolation containertypes.Isolation // Default isolation mode on Windows - clusterProvider cluster.Provider -} - -func (daemon *Daemon) restore() error { - var ( - debug = utils.IsDebugEnabled() - currentDriver = daemon.GraphDriverName() - containers = make(map[string]*container.Container) - ) - - if !debug { - logrus.Info("Loading containers: start.") - } - dir, err := ioutil.ReadDir(daemon.repository) - if err != nil { - return err - } - - for _, v := range dir { - id := v.Name() - container, err := daemon.load(id) - if !debug && logrus.GetLevel() == logrus.InfoLevel { - fmt.Print(".") - } - if err != nil { - logrus.Errorf("Failed to load container %v: %v", id, err) - continue - } - - // Ignore the container if it does not support the current driver being used by the graph - if (container.Driver == "" && currentDriver == "aufs") || container.Driver == currentDriver { - rwlayer, err := daemon.layerStore.GetRWLayer(container.ID) - if err != nil { - logrus.Errorf("Failed to load container mount %v: %v", id, err) - continue - } - container.RWLayer = rwlayer - logrus.Debugf("Loaded container %v", container.ID) - - containers[container.ID] = container - } else { - logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID) - } - } - - var migrateLegacyLinks bool - restartContainers := make(map[*container.Container]chan struct{}) - activeSandboxes := make(map[string]interface{}) - for _, c := range containers { - if err := daemon.registerName(c); err != nil { - logrus.Errorf("Failed to register container %s: %s", c.ID, err) - continue - } - if err := daemon.Register(c); err != nil { - logrus.Errorf("Failed to register container %s: %s", c.ID, err) - continue - } - - // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver. - // We should rewrite it to use the daemon defaults. - // Fixes https://github.com/docker/docker/issues/22536 - if c.HostConfig.LogConfig.Type == "" { - if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil { - logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err) - continue - } - } - } - var wg sync.WaitGroup - var mapLock sync.Mutex - for _, c := range containers { - wg.Add(1) - go func(c *container.Container) { - defer wg.Done() - rm := c.RestartManager(false) - if c.IsRunning() || c.IsPaused() { - if err := daemon.containerd.Restore(c.ID, libcontainerd.WithRestartManager(rm)); err != nil { - logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err) - return - } - if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() { - options, err := daemon.buildSandboxOptions(c) - if err != nil { - logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err) - } - mapLock.Lock() - activeSandboxes[c.NetworkSettings.SandboxID] = options - mapLock.Unlock() - } - - } - // fixme: only if not running - // get list of containers we need to restart - if daemon.configStore.AutoRestart && !c.IsRunning() && !c.IsPaused() && c.ShouldRestart() { - mapLock.Lock() - restartContainers[c] = make(chan struct{}) - mapLock.Unlock() - } - - if c.RemovalInProgress { - // We probably crashed in the middle of a removal, reset - // the flag. - // - // We DO NOT remove the container here as we do not - // know if the user had requested for either the - // associated volumes, network links or both to also - // be removed. So we put the container in the "dead" - // state and leave further processing up to them. - logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID) - c.ResetRemovalInProgress() - c.SetDead() - c.ToDisk() - } - - // if c.hostConfig.Links is nil (not just empty), then it is using the old sqlite links and needs to be migrated - if c.HostConfig != nil && c.HostConfig.Links == nil { - migrateLegacyLinks = true - } - }(c) - } - wg.Wait() - daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes) - if err != nil { - return fmt.Errorf("Error initializing network controller: %v", err) - } - - // migrate any legacy links from sqlite - linkdbFile := filepath.Join(daemon.root, "linkgraph.db") - var legacyLinkDB *graphdb.Database - if migrateLegacyLinks { - legacyLinkDB, err = graphdb.NewSqliteConn(linkdbFile) - if err != nil { - return fmt.Errorf("error connecting to legacy link graph DB %s, container links may be lost: %v", linkdbFile, err) - } - defer legacyLinkDB.Close() - } - - // Now that all the containers are registered, register the links - for _, c := range containers { - if migrateLegacyLinks { - if err := daemon.migrateLegacySqliteLinks(legacyLinkDB, c); err != nil { - return err - } - } - if err := daemon.registerLinks(c, c.HostConfig); err != nil { - logrus.Errorf("failed to register link for container %s: %v", c.ID, err) - } - } - - group := sync.WaitGroup{} - for c, notifier := range restartContainers { - group.Add(1) - - go func(c *container.Container, chNotify chan struct{}) { - defer group.Done() - - logrus.Debugf("Starting container %s", c.ID) - - // ignore errors here as this is a best effort to wait for children to be - // running before we try to start the container - children := daemon.children(c) - timeout := time.After(5 * time.Second) - for _, child := range children { - if notifier, exists := restartContainers[child]; exists { - select { - case <-notifier: - case <-timeout: - } - } - } - - // Make sure networks are available before starting - daemon.waitForNetworks(c) - if err := daemon.containerStart(c); err != nil { - logrus.Errorf("Failed to start container %s: %s", c.ID, err) - } - close(chNotify) - }(c, notifier) - - } - group.Wait() - - // any containers that were started above would already have had this done, - // however we need to now prepare the mountpoints for the rest of the containers as well. - // This shouldn't cause any issue running on the containers that already had this run. - // This must be run after any containers with a restart policy so that containerized plugins - // can have a chance to be running before we try to initialize them. - for _, c := range containers { - // if the container has restart policy, do not - // prepare the mountpoints since it has been done on restarting. - // This is to speed up the daemon start when a restart container - // has a volume and the volume dirver is not available. - if _, ok := restartContainers[c]; ok { - continue - } - group.Add(1) - go func(c *container.Container) { - defer group.Done() - if err := daemon.prepareMountPoints(c); err != nil { - logrus.Error(err) - } - }(c) - } - - group.Wait() - - if !debug { - if logrus.GetLevel() == logrus.InfoLevel { - fmt.Println() - } - logrus.Info("Loading containers: done.") - } - - return nil -} - -// waitForNetworks is used during daemon initialization when starting up containers -// It ensures that all of a container's networks are available before the daemon tries to start the container. -// In practice it just makes sure the discovery service is available for containers which use a network that require discovery. -func (daemon *Daemon) waitForNetworks(c *container.Container) { - if daemon.discoveryWatcher == nil { - return - } - // Make sure if the container has a network that requires discovery that the discovery service is available before starting - for netName := range c.NetworkSettings.Networks { - // If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready - // Most likely this is because the K/V store used for discovery is in a container and needs to be started - if _, err := daemon.netController.NetworkByName(netName); err != nil { - if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { - continue - } - // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host - // FIXME: why is this slow??? - logrus.Debugf("Container %s waiting for network to be ready", c.Name) - select { - case <-daemon.discoveryWatcher.ReadyCh(): - case <-time.After(60 * time.Second): - } - return - } - } -} - -func (daemon *Daemon) children(c *container.Container) map[string]*container.Container { - return daemon.linkIndex.children(c) -} - -// parents returns the names of the parent containers of the container -// with the given name. -func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container { - return daemon.linkIndex.parents(c) -} - -func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error { - fullName := path.Join(parent.Name, alias) - if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil { - if err == registrar.ErrNameReserved { - logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err) - return nil - } - return err - } - daemon.linkIndex.link(parent, child, fullName) - return nil -} - -// SetClusterProvider sets a component for querying the current cluster state. -func (daemon *Daemon) SetClusterProvider(clusterProvider cluster.Provider) { - daemon.clusterProvider = clusterProvider - daemon.netController.SetClusterProvider(clusterProvider) -} - -// IsSwarmCompatible verifies if the current daemon -// configuration is compatible with the swarm mode -func (daemon *Daemon) IsSwarmCompatible() error { - if daemon.configStore == nil { - return nil - } - return daemon.configStore.isSwarmCompatible() -} - -// NewDaemon sets up everything for the daemon to be able to service -// requests from the webserver. -func NewDaemon(config *Config, registryService registry.Service, containerdRemote libcontainerd.Remote) (daemon *Daemon, err error) { - setDefaultMtu(config) - - // Ensure that we have a correct root key limit for launching containers. - if err := ModifyRootKeyLimit(); err != nil { - logrus.Warnf("unable to modify root key limit, number of containers could be limitied by this quota: %v", err) - } - - // Ensure we have compatible and valid configuration options - if err := verifyDaemonSettings(config); err != nil { - return nil, err - } - - // Do we have a disabled network? - config.DisableBridge = isBridgeNetworkDisabled(config) - - // Verify the platform is supported as a daemon - if !platformSupported { - return nil, errSystemNotSupported - } - - // Validate platform-specific requirements - if err := checkSystem(); err != nil { - return nil, err - } - - // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event - // on Windows to dump Go routine stacks - setupDumpStackTrap() - - uidMaps, gidMaps, err := setupRemappedRoot(config) - if err != nil { - return nil, err - } - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - - // get the canonical path to the Docker root directory - var realRoot string - if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) { - realRoot = config.Root - } else { - realRoot, err = fileutils.ReadSymlinkedDirectory(config.Root) - if err != nil { - return nil, fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err) - } - } - - if err := setupDaemonRoot(config, realRoot, rootUID, rootGID); err != nil { - return nil, err - } - - if err := setupDaemonProcess(config); err != nil { - return nil, err - } - - // set up the tmpDir to use a canonical path - tmp, err := tempDir(config.Root, rootUID, rootGID) - if err != nil { - return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err) - } - realTmp, err := fileutils.ReadSymlinkedDirectory(tmp) - if err != nil { - return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err) - } - os.Setenv("TMPDIR", realTmp) - - d := &Daemon{configStore: config} - // Ensure the daemon is properly shutdown if there is a failure during - // initialization - defer func() { - if err != nil { - if err := d.Shutdown(); err != nil { - logrus.Error(err) - } - } - }() - - // Set the default isolation mode (only applicable on Windows) - if err := d.setDefaultIsolation(); err != nil { - return nil, fmt.Errorf("error setting default isolation mode: %v", err) - } - - logrus.Debugf("Using default logging driver %s", config.LogConfig.Type) - - if err := configureMaxThreads(config); err != nil { - logrus.Warnf("Failed to configure golang's threads limit: %v", err) - } - - installDefaultAppArmorProfile() - daemonRepo := filepath.Join(config.Root, "containers") - if err := idtools.MkdirAllAs(daemonRepo, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { - return nil, err - } - - driverName := os.Getenv("DOCKER_DRIVER") - if driverName == "" { - driverName = config.GraphDriver - } - d.layerStore, err = layer.NewStoreFromOptions(layer.StoreOptions{ - StorePath: config.Root, - MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"), - GraphDriver: driverName, - GraphDriverOptions: config.GraphOptions, - UIDMaps: uidMaps, - GIDMaps: gidMaps, - }) - if err != nil { - return nil, err - } - - graphDriver := d.layerStore.DriverName() - imageRoot := filepath.Join(config.Root, "image", graphDriver) - - // Configure and validate the kernels security support - if err := configureKernelSecuritySupport(config, graphDriver); err != nil { - return nil, err - } - - logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads) - d.downloadManager = xfer.NewLayerDownloadManager(d.layerStore, *config.MaxConcurrentDownloads) - logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads) - d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads) - - ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb")) - if err != nil { - return nil, err - } - - d.imageStore, err = image.NewImageStore(ifs, d.layerStore) - if err != nil { - return nil, err - } - - // Configure the volumes driver - volStore, err := d.configureVolumes(rootUID, rootGID) - if err != nil { - return nil, err - } - - trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath) - if err != nil { - return nil, err - } - - trustDir := filepath.Join(config.Root, "trust") - - if err := system.MkdirAll(trustDir, 0700); err != nil { - return nil, err - } - - distributionMetadataStore, err := dmetadata.NewFSMetadataStore(filepath.Join(imageRoot, "distribution")) - if err != nil { - return nil, err - } - - eventsService := events.New() - - referenceStore, err := reference.NewReferenceStore(filepath.Join(imageRoot, "repositories.json")) - if err != nil { - return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err) - } - - if err := restoreCustomImage(d.imageStore, d.layerStore, referenceStore); err != nil { - return nil, fmt.Errorf("Couldn't restore custom images: %s", err) - } - - migrationStart := time.Now() - if err := v1.Migrate(config.Root, graphDriver, d.layerStore, d.imageStore, referenceStore, distributionMetadataStore); err != nil { - logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err) - } - logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds()) - - // Discovery is only enabled when the daemon is launched with an address to advertise. When - // initialized, the daemon is registered and we can store the discovery backend as its read-only - if err := d.initDiscovery(config); err != nil { - return nil, err - } - - sysInfo := sysinfo.New(false) - // Check if Devices cgroup is mounted, it is hard requirement for container security, - // on Linux. - if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled { - return nil, fmt.Errorf("Devices cgroup isn't mounted") - } - - d.ID = trustKey.PublicKey().KeyID() - d.repository = daemonRepo - d.containers = container.NewMemoryStore() - d.execCommands = exec.NewStore() - d.referenceStore = referenceStore - d.distributionMetadataStore = distributionMetadataStore - d.trustKey = trustKey - d.idIndex = truncindex.NewTruncIndex([]string{}) - d.statsCollector = d.newStatsCollector(1 * time.Second) - d.defaultLogConfig = containertypes.LogConfig{ - Type: config.LogConfig.Type, - Config: config.LogConfig.Config, - } - d.RegistryService = registryService - d.EventsService = eventsService - d.volumes = volStore - d.root = config.Root - d.uidMaps = uidMaps - d.gidMaps = gidMaps - d.seccompEnabled = sysInfo.Seccomp - - d.nameIndex = registrar.NewRegistrar() - d.linkIndex = newLinkIndex() - d.containerdRemote = containerdRemote - - go d.execCommandGC() - - d.containerd, err = containerdRemote.Client(d) - if err != nil { - return nil, err - } - - if err := d.restore(); err != nil { - return nil, err - } - - if err := pluginInit(d, config, containerdRemote); err != nil { - return nil, err - } - - return d, nil -} - -func (daemon *Daemon) shutdownContainer(c *container.Container) error { - // TODO(windows): Handle docker restart with paused containers - if c.IsPaused() { - // To terminate a process in freezer cgroup, we should send - // SIGTERM to this process then unfreeze it, and the process will - // force to terminate immediately. - logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID) - sig, ok := signal.SignalMap["TERM"] - if !ok { - return fmt.Errorf("System does not support SIGTERM") - } - if err := daemon.kill(c, int(sig)); err != nil { - return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err) - } - if err := daemon.containerUnpause(c); err != nil { - return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err) - } - if _, err := c.WaitStop(10 * time.Second); err != nil { - logrus.Debugf("container %s failed to exit in 10 seconds of SIGTERM, sending SIGKILL to force", c.ID) - sig, ok := signal.SignalMap["KILL"] - if !ok { - return fmt.Errorf("System does not support SIGKILL") - } - if err := daemon.kill(c, int(sig)); err != nil { - logrus.Errorf("Failed to SIGKILL container %s", c.ID) - } - c.WaitStop(-1 * time.Second) - return err - } - } - // If container failed to exit in 10 seconds of SIGTERM, then using the force - if err := daemon.containerStop(c, 10); err != nil { - return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err) - } - - c.WaitStop(-1 * time.Second) - return nil -} - -// Shutdown stops the daemon. -func (daemon *Daemon) Shutdown() error { - daemon.shutdown = true - // Keep mounts and networking running on daemon shutdown if - // we are to keep containers running and restore them. - - pluginShutdown() - - if daemon.configStore.LiveRestore && daemon.containers != nil { - // check if there are any running containers, if none we should do some cleanup - if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil { - return nil - } - } - - if daemon.containers != nil { - logrus.Debug("starting clean shutdown of all containers...") - daemon.containers.ApplyAll(func(c *container.Container) { - if !c.IsRunning() { - return - } - logrus.Debugf("stopping %s", c.ID) - if err := daemon.shutdownContainer(c); err != nil { - logrus.Errorf("Stop container error: %v", err) - return - } - if mountid, err := daemon.layerStore.GetMountID(c.ID); err == nil { - daemon.cleanupMountsByID(mountid) - } - logrus.Debugf("container stopped %s", c.ID) - }) - } - - // trigger libnetwork Stop only if it's initialized - if daemon.netController != nil { - daemon.netController.Stop() - } - - if daemon.layerStore != nil { - if err := daemon.layerStore.Cleanup(); err != nil { - logrus.Errorf("Error during layer Store.Cleanup(): %v", err) - } - } - - if err := daemon.cleanupMounts(); err != nil { - return err - } - - return nil -} - -// Mount sets container.BaseFS -// (is it not set coming in? why is it unset?) -func (daemon *Daemon) Mount(container *container.Container) error { - dir, err := container.RWLayer.Mount(container.GetMountLabel()) - if err != nil { - return err - } - logrus.Debugf("container mounted via layerStore: %v", dir) - - if container.BaseFS != dir { - // The mount path reported by the graph driver should always be trusted on Windows, since the - // volume path for a given mounted layer may change over time. This should only be an error - // on non-Windows operating systems. - if container.BaseFS != "" && runtime.GOOS != "windows" { - daemon.Unmount(container) - return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')", - daemon.GraphDriverName(), container.ID, container.BaseFS, dir) - } - } - container.BaseFS = dir // TODO: combine these fields - return nil -} - -// Unmount unsets the container base filesystem -func (daemon *Daemon) Unmount(container *container.Container) error { - if err := container.RWLayer.Unmount(); err != nil { - logrus.Errorf("Error unmounting container %s: %s", container.ID, err) - return err - } - return nil -} - -// V4Subnets returns the IPv4 subnets of networks that are managed by Docker. -func (daemon *Daemon) V4Subnets() []net.IPNet { - var subnets []net.IPNet - - managedNetworks := daemon.netController.Networks() - - for _, managedNetwork := range managedNetworks { - v4Infos, _ := managedNetwork.Info().IpamInfo() - for _, v4Info := range v4Infos { - if v4Info.IPAMData.Pool != nil { - subnets = append(subnets, *v4Info.IPAMData.Pool) - } - } - } - - return subnets -} - -// V6Subnets returns the IPv6 subnets of networks that are managed by Docker. -func (daemon *Daemon) V6Subnets() []net.IPNet { - var subnets []net.IPNet - - managedNetworks := daemon.netController.Networks() - - for _, managedNetwork := range managedNetworks { - _, v6Infos := managedNetwork.Info().IpamInfo() - for _, v6Info := range v6Infos { - if v6Info.IPAMData.Pool != nil { - subnets = append(subnets, *v6Info.IPAMData.Pool) - } - } - } - - return subnets -} - -func writeDistributionProgress(cancelFunc func(), outStream io.Writer, progressChan <-chan progress.Progress) { - progressOutput := streamformatter.NewJSONStreamFormatter().NewProgressOutput(outStream, false) - operationCancelled := false - - for prog := range progressChan { - if err := progressOutput.WriteProgress(prog); err != nil && !operationCancelled { - // don't log broken pipe errors as this is the normal case when a client aborts - if isBrokenPipe(err) { - logrus.Info("Pull session cancelled") - } else { - logrus.Errorf("error writing progress to client: %v", err) - } - cancelFunc() - operationCancelled = true - // Don't return, because we need to continue draining - // progressChan until it's closed to avoid a deadlock. - } - } -} - -func isBrokenPipe(e error) bool { - if netErr, ok := e.(*net.OpError); ok { - e = netErr.Err - if sysErr, ok := netErr.Err.(*os.SyscallError); ok { - e = sysErr.Err - } - } - return e == syscall.EPIPE -} - -// GraphDriverName returns the name of the graph driver used by the layer.Store -func (daemon *Daemon) GraphDriverName() string { - return daemon.layerStore.DriverName() -} - -// GetUIDGIDMaps returns the current daemon's user namespace settings -// for the full uid and gid maps which will be applied to containers -// started in this instance. -func (daemon *Daemon) GetUIDGIDMaps() ([]idtools.IDMap, []idtools.IDMap) { - return daemon.uidMaps, daemon.gidMaps -} - -// GetRemappedUIDGID returns the current daemon's uid and gid values -// if user namespaces are in use for this daemon instance. If not -// this function will return "real" root values of 0, 0. -func (daemon *Daemon) GetRemappedUIDGID() (int, int) { - uid, gid, _ := idtools.GetRootUIDGID(daemon.uidMaps, daemon.gidMaps) - return uid, gid -} - -// tempDir returns the default directory to use for temporary files. -func tempDir(rootDir string, rootUID, rootGID int) (string, error) { - var tmpDir string - if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" { - tmpDir = filepath.Join(rootDir, "tmp") - } - return tmpDir, idtools.MkdirAllAs(tmpDir, 0700, rootUID, rootGID) -} - -func (daemon *Daemon) setupInitLayer(initPath string) error { - rootUID, rootGID := daemon.GetRemappedUIDGID() - return setupInitLayer(initPath, rootUID, rootGID) -} - -func setDefaultMtu(config *Config) { - // do nothing if the config does not have the default 0 value. - if config.Mtu != 0 { - return - } - config.Mtu = defaultNetworkMtu -} - -func (daemon *Daemon) configureVolumes(rootUID, rootGID int) (*store.VolumeStore, error) { - volumesDriver, err := local.New(daemon.configStore.Root, rootUID, rootGID) - if err != nil { - return nil, err - } - - if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) { - return nil, fmt.Errorf("local volume driver could not be registered") - } - return store.New(daemon.configStore.Root) -} - -// IsShuttingDown tells whether the daemon is shutting down or not -func (daemon *Daemon) IsShuttingDown() bool { - return daemon.shutdown -} - -// initDiscovery initializes the discovery watcher for this daemon. -func (daemon *Daemon) initDiscovery(config *Config) error { - advertise, err := parseClusterAdvertiseSettings(config.ClusterStore, config.ClusterAdvertise) - if err != nil { - if err == errDiscoveryDisabled { - return nil - } - return err - } - - config.ClusterAdvertise = advertise - discoveryWatcher, err := initDiscovery(config.ClusterStore, config.ClusterAdvertise, config.ClusterOpts) - if err != nil { - return fmt.Errorf("discovery initialization failed (%v)", err) - } - - daemon.discoveryWatcher = discoveryWatcher - return nil -} - -// Reload reads configuration changes and modifies the -// daemon according to those changes. -// These are the settings that Reload changes: -// - Daemon labels. -// - Daemon debug log level. -// - Daemon max concurrent downloads -// - Daemon max concurrent uploads -// - Cluster discovery (reconfigure and restart). -// - Daemon live restore -func (daemon *Daemon) Reload(config *Config) error { - var err error - // used to hold reloaded changes - attributes := map[string]string{} - - // We need defer here to ensure the lock is released as - // daemon.SystemInfo() will try to get it too - defer func() { - if err == nil { - daemon.LogDaemonEventWithAttributes("reload", attributes) - } - }() - - daemon.configStore.reloadLock.Lock() - defer daemon.configStore.reloadLock.Unlock() - - daemon.platformReload(config, &attributes) - - if err = daemon.reloadClusterDiscovery(config); err != nil { - return err - } - - if config.IsValueSet("labels") { - daemon.configStore.Labels = config.Labels - } - if config.IsValueSet("debug") { - daemon.configStore.Debug = config.Debug - } - if config.IsValueSet("live-restore") { - daemon.configStore.LiveRestore = config.LiveRestore - if err := daemon.containerdRemote.UpdateOptions(libcontainerd.WithLiveRestore(config.LiveRestore)); err != nil { - return err - } - - } - - // If no value is set for max-concurrent-downloads we assume it is the default value - // We always "reset" as the cost is lightweight and easy to maintain. - if config.IsValueSet("max-concurrent-downloads") && config.MaxConcurrentDownloads != nil { - *daemon.configStore.MaxConcurrentDownloads = *config.MaxConcurrentDownloads - } else { - maxConcurrentDownloads := defaultMaxConcurrentDownloads - daemon.configStore.MaxConcurrentDownloads = &maxConcurrentDownloads - } - logrus.Debugf("Reset Max Concurrent Downloads: %d", *daemon.configStore.MaxConcurrentDownloads) - if daemon.downloadManager != nil { - daemon.downloadManager.SetConcurrency(*daemon.configStore.MaxConcurrentDownloads) - } - - // If no value is set for max-concurrent-upload we assume it is the default value - // We always "reset" as the cost is lightweight and easy to maintain. - if config.IsValueSet("max-concurrent-uploads") && config.MaxConcurrentUploads != nil { - *daemon.configStore.MaxConcurrentUploads = *config.MaxConcurrentUploads - } else { - maxConcurrentUploads := defaultMaxConcurrentUploads - daemon.configStore.MaxConcurrentUploads = &maxConcurrentUploads - } - logrus.Debugf("Reset Max Concurrent Uploads: %d", *daemon.configStore.MaxConcurrentUploads) - if daemon.uploadManager != nil { - daemon.uploadManager.SetConcurrency(*daemon.configStore.MaxConcurrentUploads) - } - - // We emit daemon reload event here with updatable configurations - attributes["debug"] = fmt.Sprintf("%t", daemon.configStore.Debug) - attributes["cluster-store"] = daemon.configStore.ClusterStore - if daemon.configStore.ClusterOpts != nil { - opts, _ := json.Marshal(daemon.configStore.ClusterOpts) - attributes["cluster-store-opts"] = string(opts) - } else { - attributes["cluster-store-opts"] = "{}" - } - attributes["cluster-advertise"] = daemon.configStore.ClusterAdvertise - if daemon.configStore.Labels != nil { - labels, _ := json.Marshal(daemon.configStore.Labels) - attributes["labels"] = string(labels) - } else { - attributes["labels"] = "[]" - } - attributes["max-concurrent-downloads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentDownloads) - attributes["max-concurrent-uploads"] = fmt.Sprintf("%d", *daemon.configStore.MaxConcurrentUploads) - - return nil -} - -func (daemon *Daemon) reloadClusterDiscovery(config *Config) error { - var err error - newAdvertise := daemon.configStore.ClusterAdvertise - newClusterStore := daemon.configStore.ClusterStore - if config.IsValueSet("cluster-advertise") { - if config.IsValueSet("cluster-store") { - newClusterStore = config.ClusterStore - } - newAdvertise, err = parseClusterAdvertiseSettings(newClusterStore, config.ClusterAdvertise) - if err != nil && err != errDiscoveryDisabled { - return err - } - } - - if daemon.clusterProvider != nil { - if err := config.isSwarmCompatible(); err != nil { - return err - } - } - - // check discovery modifications - if !modifiedDiscoverySettings(daemon.configStore, newAdvertise, newClusterStore, config.ClusterOpts) { - return nil - } - - // enable discovery for the first time if it was not previously enabled - if daemon.discoveryWatcher == nil { - discoveryWatcher, err := initDiscovery(newClusterStore, newAdvertise, config.ClusterOpts) - if err != nil { - return fmt.Errorf("discovery initialization failed (%v)", err) - } - daemon.discoveryWatcher = discoveryWatcher - } else { - if err == errDiscoveryDisabled { - // disable discovery if it was previously enabled and it's disabled now - daemon.discoveryWatcher.Stop() - } else { - // reload discovery - if err = daemon.discoveryWatcher.Reload(config.ClusterStore, newAdvertise, config.ClusterOpts); err != nil { - return err - } - } - } - - daemon.configStore.ClusterStore = newClusterStore - daemon.configStore.ClusterOpts = config.ClusterOpts - daemon.configStore.ClusterAdvertise = newAdvertise - - if daemon.netController == nil { - return nil - } - netOptions, err := daemon.networkOptions(daemon.configStore, nil) - if err != nil { - logrus.Warnf("Failed to reload configuration with network controller: %v", err) - return nil - } - err = daemon.netController.ReloadConfiguration(netOptions...) - if err != nil { - logrus.Warnf("Failed to reload configuration with network controller: %v", err) - } - - return nil -} - -func isBridgeNetworkDisabled(config *Config) bool { - return config.bridgeConfig.Iface == disableNetworkBridge -} - -func (daemon *Daemon) networkOptions(dconfig *Config, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) { - options := []nwconfig.Option{} - if dconfig == nil { - return options, nil - } - - options = append(options, nwconfig.OptionDataDir(dconfig.Root)) - options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot())) - - dd := runconfig.DefaultDaemonNetworkMode() - dn := runconfig.DefaultDaemonNetworkMode().NetworkName() - options = append(options, nwconfig.OptionDefaultDriver(string(dd))) - options = append(options, nwconfig.OptionDefaultNetwork(dn)) - - if strings.TrimSpace(dconfig.ClusterStore) != "" { - kv := strings.Split(dconfig.ClusterStore, "://") - if len(kv) != 2 { - return nil, fmt.Errorf("kv store daemon config must be of the form KV-PROVIDER://KV-URL") - } - options = append(options, nwconfig.OptionKVProvider(kv[0])) - options = append(options, nwconfig.OptionKVProviderURL(kv[1])) - } - if len(dconfig.ClusterOpts) > 0 { - options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts)) - } - - if daemon.discoveryWatcher != nil { - options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher)) - } - - if dconfig.ClusterAdvertise != "" { - options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise)) - } - - options = append(options, nwconfig.OptionLabels(dconfig.Labels)) - options = append(options, driverOptions(dconfig)...) - - if daemon.configStore != nil && daemon.configStore.LiveRestore && len(activeSandboxes) != 0 { - options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes)) - } - - return options, nil -} - -func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry { - out := make([]types.BlkioStatEntry, len(entries)) - for i, re := range entries { - out[i] = types.BlkioStatEntry{ - Major: re.Major, - Minor: re.Minor, - Op: re.Op, - Value: re.Value, - } - } - return out -} diff --git a/daemon/daemon_experimental.go b/daemon/daemon_experimental.go deleted file mode 100644 index cad706eb86..0000000000 --- a/daemon/daemon_experimental.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build experimental - -package daemon - -import ( - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/plugin" - "github.com/docker/engine-api/types/container" -) - -func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { - return nil, nil -} - -func pluginInit(d *Daemon, cfg *Config, remote libcontainerd.Remote) error { - return plugin.Init(cfg.Root, remote, d.RegistryService, cfg.LiveRestore, d.LogPluginEvent) -} - -func pluginShutdown() { - manager := plugin.GetManager() - // Check for a valid manager object. In error conditions, daemon init can fail - // and shutdown called, before plugin manager is initialized. - if manager != nil { - manager.Shutdown() - } -} diff --git a/daemon/daemon_linux.go b/daemon/daemon_linux.go deleted file mode 100644 index 9bdf6e2b79..0000000000 --- a/daemon/daemon_linux.go +++ /dev/null @@ -1,80 +0,0 @@ -package daemon - -import ( - "bufio" - "fmt" - "io" - "os" - "regexp" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/mount" -) - -func (daemon *Daemon) cleanupMountsByID(id string) error { - logrus.Debugf("Cleaning up old mountid %s: start.", id) - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return err - } - defer f.Close() - - return daemon.cleanupMountsFromReaderByID(f, id, mount.Unmount) -} - -func (daemon *Daemon) cleanupMountsFromReaderByID(reader io.Reader, id string, unmount func(target string) error) error { - if daemon.root == "" { - return nil - } - var errors []string - - regexps := getCleanPatterns(id) - sc := bufio.NewScanner(reader) - for sc.Scan() { - if fields := strings.Fields(sc.Text()); len(fields) >= 4 { - if mnt := fields[4]; strings.HasPrefix(mnt, daemon.root) { - for _, p := range regexps { - if p.MatchString(mnt) { - if err := unmount(mnt); err != nil { - logrus.Error(err) - errors = append(errors, err.Error()) - } - } - } - } - } - } - - if err := sc.Err(); err != nil { - return err - } - - if len(errors) > 0 { - return fmt.Errorf("Error cleaning up mounts:\n%v", strings.Join(errors, "\n")) - } - - logrus.Debugf("Cleaning up old mountid %v: done.", id) - return nil -} - -// cleanupMounts umounts shm/mqueue mounts for old containers -func (daemon *Daemon) cleanupMounts() error { - return daemon.cleanupMountsByID("") -} - -func getCleanPatterns(id string) (regexps []*regexp.Regexp) { - var patterns []string - if id == "" { - id = "[0-9a-f]{64}" - patterns = append(patterns, "containers/"+id+"/shm") - } - patterns = append(patterns, "aufs/mnt/"+id+"$", "overlay/"+id+"/merged$", "zfs/graph/"+id+"$") - for _, p := range patterns { - r, err := regexp.Compile(p) - if err == nil { - regexps = append(regexps, r) - } - } - return -} diff --git a/daemon/daemon_linux_test.go b/daemon/daemon_linux_test.go deleted file mode 100644 index c40b13ba4c..0000000000 --- a/daemon/daemon_linux_test.go +++ /dev/null @@ -1,104 +0,0 @@ -// +build linux - -package daemon - -import ( - "strings" - "testing" -) - -const mountsFixture = `142 78 0:38 / / rw,relatime - aufs none rw,si=573b861da0b3a05b,dio -143 142 0:60 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw -144 142 0:67 / /dev rw,nosuid - tmpfs tmpfs rw,mode=755 -145 144 0:78 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=666 -146 144 0:49 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw -147 142 0:84 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw -148 147 0:86 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs tmpfs rw,mode=755 -149 148 0:22 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuset -150 148 0:25 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpu -151 148 0:27 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,cpuacct -152 148 0:28 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,memory -153 148 0:29 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,devices -154 148 0:30 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,freezer -155 148 0:31 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,blkio -156 148 0:32 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,perf_event -157 148 0:33 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime - cgroup cgroup rw,hugetlb -158 148 0:35 /docker/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd -159 142 8:4 /home/mlaventure/gopath /home/mlaventure/gopath rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered -160 142 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data /var/lib/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered -164 142 8:4 /home/mlaventure/gopath/src/github.com/docker/docker /go/src/github.com/docker/docker rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered -165 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/resolv.conf /etc/resolv.conf rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered -166 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hostname /etc/hostname rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered -167 142 8:4 /var/lib/docker/containers/5425782a95e643181d8a485a2bab3c0bb21f51d7dfc03511f0e6fbf3f3aa356a/hosts /etc/hosts rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered -168 144 0:39 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k -169 144 0:12 /14 /dev/console rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 -83 147 0:10 / /sys/kernel/security rw,relatime - securityfs none rw -89 142 0:87 / /tmp rw,relatime - tmpfs none rw -97 142 0:60 / /run/docker/netns/default rw,nosuid,nodev,noexec,relatime - proc proc rw -100 160 8:4 /var/lib/docker/volumes/9a428b651ee4c538130143cad8d87f603a4bf31b928afe7ff3ecd65480692b35/_data/aufs /var/lib/docker/aufs rw,relatime - ext4 /dev/disk/by-uuid/d99e196c-1fc4-4b4f-bab9-9962b2b34e99 rw,errors=remount-ro,data=ordered -115 100 0:102 / /var/lib/docker/aufs/mnt/0ecda1c63e5b58b3d89ff380bf646c95cc980252cf0b52466d43619aec7c8432 rw,relatime - aufs none rw,si=573b861dbc01905b,dio -116 160 0:107 / /var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k -118 142 0:102 / /run/docker/libcontainerd/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/rootfs rw,relatime - aufs none rw,si=573b861dbc01905b,dio -242 142 0:60 / /run/docker/netns/c3664df2a0f7 rw,nosuid,nodev,noexec,relatime - proc proc rw -120 100 0:122 / /var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d rw,relatime - aufs none rw,si=573b861eb147805b,dio -171 142 0:122 / /run/docker/libcontainerd/e406ff6f3e18516d50e03dbca4de54767a69a403a6f7ec1edc2762812824521e/rootfs rw,relatime - aufs none rw,si=573b861eb147805b,dio -310 142 0:60 / /run/docker/netns/71a18572176b rw,nosuid,nodev,noexec,relatime - proc proc rw -` - -func TestCleanupMounts(t *testing.T) { - d := &Daemon{ - root: "/var/lib/docker/", - } - - expected := "/var/lib/docker/containers/d045dc441d2e2e1d5b3e328d47e5943811a40819fb47497c5f5a5df2d6d13c37/shm" - var unmounted int - unmount := func(target string) error { - if target == expected { - unmounted++ - } - return nil - } - - d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "", unmount) - - if unmounted != 1 { - t.Fatalf("Expected to unmount the shm (and the shm only)") - } -} - -func TestCleanupMountsByID(t *testing.T) { - d := &Daemon{ - root: "/var/lib/docker/", - } - - expected := "/var/lib/docker/aufs/mnt/03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d" - var unmounted int - unmount := func(target string) error { - if target == expected { - unmounted++ - } - return nil - } - - d.cleanupMountsFromReaderByID(strings.NewReader(mountsFixture), "03ca4b49e71f1e49a41108829f4d5c70ac95934526e2af8984a1f65f1de0715d", unmount) - - if unmounted != 1 { - t.Fatalf("Expected to unmount the auf root (and that only)") - } -} - -func TestNotCleanupMounts(t *testing.T) { - d := &Daemon{ - repository: "", - } - var unmounted bool - unmount := func(target string) error { - unmounted = true - return nil - } - mountInfo := `234 232 0:59 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw,size=65536k` - d.cleanupMountsFromReaderByID(strings.NewReader(mountInfo), "", unmount) - if unmounted { - t.Fatalf("Expected not to clean up /dev/shm") - } -} diff --git a/daemon/daemon_solaris.go b/daemon/daemon_solaris.go deleted file mode 100644 index 5c49af56c9..0000000000 --- a/daemon/daemon_solaris.go +++ /dev/null @@ -1,167 +0,0 @@ -// +build solaris,cgo - -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/libnetwork" - nwconfig "github.com/docker/libnetwork/config" -) - -//#include -import "C" - -const ( - defaultVirtualSwitch = "Virtual Switch" - platformSupported = true - solarisMinCPUShares = 1 - solarisMaxCPUShares = 65535 -) - -func (daemon *Daemon) cleanupMountsByID(id string) error { - return nil -} - -func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { - return nil -} - -func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { - return nil, nil, nil -} - -func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { - return nil -} - -// setupInitLayer populates a directory with mountpoints suitable -// for bind-mounting dockerinit into the container. The mountpoint is simply an -// empty file at /.dockerinit -// -// This extra layer is used by all containers as the top-most ro layer. It protects -// the container from unwanted side-effects on the rw layer. -func setupInitLayer(initLayer string, rootUID, rootGID int) error { - return nil -} - -func checkKernel() error { - // solaris can rely upon checkSystem() below, we don't skew kernel versions - return nil -} - -func (daemon *Daemon) getCgroupDriver() string { - return "" -} - -func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { - return nil -} - -// verifyPlatformContainerSettings performs platform-specific validation of the -// hostconfig and config structures. -func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { - warnings := []string{} - return warnings, nil -} - -// platformReload update configuration with platform specific options -func (daemon *Daemon) platformReload(config *Config, attributes *map[string]string) { -} - -// verifyDaemonSettings performs validation of daemon config struct -func verifyDaemonSettings(config *Config) error { - // checkSystem validates platform-specific requirements - return nil -} - -func checkSystem() error { - // check OS version for compatibility, ensure running in global zone - var err error - var id C.zoneid_t - - if id, err = C.getzoneid(); err != nil { - return fmt.Errorf("Exiting. Error getting zone id: %+v", err) - } - if int(id) != 0 { - return fmt.Errorf("Exiting because the Docker daemon is not running in the global zone") - } - - v, err := kernel.GetKernelVersion() - if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 5, Major: 12, Minor: 0}) < 0 { - return fmt.Errorf("Your Solaris kernel version: %s doesn't support Docker. Please upgrade to 5.12.0", v.String()) - } - return err -} - -// configureMaxThreads sets the Go runtime max threads threshold -// which is 90% of the kernel setting from /proc/sys/kernel/threads-max -func configureMaxThreads(config *Config) error { - return nil -} - -// configureKernelSecuritySupport configures and validate security support for the kernel -func configureKernelSecuritySupport(config *Config, driverName string) error { - return nil -} - -func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { - return nil, nil -} - -// registerLinks sets up links between containers and writes the -// configuration out for persistence. -func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { - return nil -} - -func (daemon *Daemon) cleanupMounts() error { - return nil -} - -// conditionalMountOnStart is a platform specific helper function during the -// container start to call mount. -func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { - return nil -} - -// conditionalUnmountOnCleanup is a platform specific helper function called -// during the cleanup of a container to unmount. -func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { - return daemon.Unmount(container) -} - -func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) error { - // Solaris has no custom images to register - return nil -} - -func driverOptions(config *Config) []nwconfig.Option { - return []nwconfig.Option{} -} - -func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { - return nil, nil -} - -// setDefaultIsolation determine the default isolation mode for the -// daemon to run in. This is only applicable on Windows -func (daemon *Daemon) setDefaultIsolation() error { - return nil -} - -func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { - return types.RootFS{} -} - -func setupDaemonProcess(config *Config) error { - return nil -} diff --git a/daemon/daemon_stub.go b/daemon/daemon_stub.go deleted file mode 100644 index a5f534964d..0000000000 --- a/daemon/daemon_stub.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !experimental - -package daemon - -import ( - "github.com/docker/docker/libcontainerd" - "github.com/docker/engine-api/types/container" -) - -func (daemon *Daemon) verifyExperimentalContainerSettings(hostConfig *container.HostConfig, config *container.Config) ([]string, error) { - return nil, nil -} - -func pluginInit(d *Daemon, config *Config, remote libcontainerd.Remote) error { - return nil -} - -func pluginShutdown() { -} diff --git a/daemon/daemon_test.go b/daemon/daemon_test.go deleted file mode 100644 index d799823970..0000000000 --- a/daemon/daemon_test.go +++ /dev/null @@ -1,532 +0,0 @@ -package daemon - -import ( - "io/ioutil" - "os" - "path/filepath" - "reflect" - "testing" - "time" - - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/discovery" - _ "github.com/docker/docker/pkg/discovery/memory" - "github.com/docker/docker/pkg/registrar" - "github.com/docker/docker/pkg/truncindex" - "github.com/docker/docker/volume" - volumedrivers "github.com/docker/docker/volume/drivers" - "github.com/docker/docker/volume/local" - "github.com/docker/docker/volume/store" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/go-connections/nat" -) - -// -// https://github.com/docker/docker/issues/8069 -// - -func TestGetContainer(t *testing.T) { - c1 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", - Name: "tender_bardeen", - }, - } - - c2 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de", - Name: "drunk_hawking", - }, - } - - c3 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "3cdbd1aa394fd68559fd1441d6eff2abfafdcba06e72d2febdba229008b0bf57", - Name: "3cdbd1aa", - }, - } - - c4 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "75fb0b800922abdbef2d27e60abcdfaf7fb0698b2a96d22d3354da361a6ff4a5", - Name: "5a4ff6a163ad4533d22d69a2b8960bf7fafdcba06e72d2febdba229008b0bf57", - }, - } - - c5 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "d22d69a2b8960bf7fafdcba06e72d2febdba960bf7fafdcba06e72d2f9008b060b", - Name: "d22d69a2b896", - }, - } - - store := container.NewMemoryStore() - store.Add(c1.ID, c1) - store.Add(c2.ID, c2) - store.Add(c3.ID, c3) - store.Add(c4.ID, c4) - store.Add(c5.ID, c5) - - index := truncindex.NewTruncIndex([]string{}) - index.Add(c1.ID) - index.Add(c2.ID) - index.Add(c3.ID) - index.Add(c4.ID) - index.Add(c5.ID) - - daemon := &Daemon{ - containers: store, - idIndex: index, - nameIndex: registrar.NewRegistrar(), - } - - daemon.reserveName(c1.ID, c1.Name) - daemon.reserveName(c2.ID, c2.Name) - daemon.reserveName(c3.ID, c3.Name) - daemon.reserveName(c4.ID, c4.Name) - daemon.reserveName(c5.ID, c5.Name) - - if container, _ := daemon.GetContainer("3cdbd1aa394fd68559fd1441d6eff2ab7c1e6363582c82febfaa8045df3bd8de"); container != c2 { - t.Fatal("Should explicitly match full container IDs") - } - - if container, _ := daemon.GetContainer("75fb0b8009"); container != c4 { - t.Fatal("Should match a partial ID") - } - - if container, _ := daemon.GetContainer("drunk_hawking"); container != c2 { - t.Fatal("Should match a full name") - } - - // c3.Name is a partial match for both c3.ID and c2.ID - if c, _ := daemon.GetContainer("3cdbd1aa"); c != c3 { - t.Fatal("Should match a full name even though it collides with another container's ID") - } - - if container, _ := daemon.GetContainer("d22d69a2b896"); container != c5 { - t.Fatal("Should match a container where the provided prefix is an exact match to the its name, and is also a prefix for its ID") - } - - if _, err := daemon.GetContainer("3cdbd1"); err == nil { - t.Fatal("Should return an error when provided a prefix that partially matches multiple container ID's") - } - - if _, err := daemon.GetContainer("nothing"); err == nil { - t.Fatal("Should return an error when provided a prefix that is neither a name or a partial match to an ID") - } -} - -func initDaemonWithVolumeStore(tmp string) (*Daemon, error) { - var err error - daemon := &Daemon{ - repository: tmp, - root: tmp, - } - daemon.volumes, err = store.New(tmp) - if err != nil { - return nil, err - } - - volumesDriver, err := local.New(tmp, 0, 0) - if err != nil { - return nil, err - } - volumedrivers.Register(volumesDriver, volumesDriver.Name()) - - return daemon, nil -} - -func TestValidContainerNames(t *testing.T) { - invalidNames := []string{"-rm", "&sdfsfd", "safd%sd"} - validNames := []string{"word-word", "word_word", "1weoid"} - - for _, name := range invalidNames { - if validContainerNamePattern.MatchString(name) { - t.Fatalf("%q is not a valid container name and was returned as valid.", name) - } - } - - for _, name := range validNames { - if !validContainerNamePattern.MatchString(name) { - t.Fatalf("%q is a valid container name and was returned as invalid.", name) - } - } -} - -func TestContainerInitDNS(t *testing.T) { - tmp, err := ioutil.TempDir("", "docker-container-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - containerID := "d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e" - containerPath := filepath.Join(tmp, containerID) - if err := os.MkdirAll(containerPath, 0755); err != nil { - t.Fatal(err) - } - - config := `{"State":{"Running":true,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":2464,"ExitCode":0, -"Error":"","StartedAt":"2015-05-26T16:48:53.869308965Z","FinishedAt":"0001-01-01T00:00:00Z"}, -"ID":"d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e","Created":"2015-05-26T16:48:53.7987917Z","Path":"top", -"Args":[],"Config":{"Hostname":"d59df5276e7b","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"Cpuset":"", -"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":true,"OpenStdin":true, -"StdinOnce":false,"Env":null,"Cmd":["top"],"Image":"ubuntu:latest","Volumes":null,"WorkingDir":"","Entrypoint":null, -"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":{}},"Image":"07f8e8c5e66084bef8f848877857537ffe1c47edd01a93af27e7161672ad0e95", -"NetworkSettings":{"IPAddress":"172.17.0.1","IPPrefixLen":16,"MacAddress":"02:42:ac:11:00:01","LinkLocalIPv6Address":"fe80::42:acff:fe11:1", -"LinkLocalIPv6PrefixLen":64,"GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"Gateway":"172.17.42.1","IPv6Gateway":"","Bridge":"docker0","Ports":{}}, -"ResolvConfPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/resolv.conf", -"HostnamePath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hostname", -"HostsPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/hosts", -"LogPath":"/var/lib/docker/containers/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e/d59df5276e7b219d510fe70565e0404bc06350e0d4b43fe961f22f339980170e-json.log", -"Name":"/ubuntu","Driver":"aufs","MountLabel":"","ProcessLabel":"","AppArmorProfile":"","RestartCount":0, -"UpdateDns":false,"Volumes":{},"VolumesRW":{},"AppliedVolumesFrom":null}` - - // Container struct only used to retrieve path to config file - container := &container.Container{CommonContainer: container.CommonContainer{Root: containerPath}} - configPath, err := container.ConfigPath() - if err != nil { - t.Fatal(err) - } - if err = ioutil.WriteFile(configPath, []byte(config), 0644); err != nil { - t.Fatal(err) - } - - hostConfig := `{"Binds":[],"ContainerIDFile":"","Memory":0,"MemorySwap":0,"CpuShares":0,"CpusetCpus":"", -"Privileged":false,"PortBindings":{},"Links":null,"PublishAllPorts":false,"Dns":null,"DnsOptions":null,"DnsSearch":null,"ExtraHosts":null,"VolumesFrom":null, -"Devices":[],"NetworkMode":"bridge","IpcMode":"","PidMode":"","CapAdd":null,"CapDrop":null,"RestartPolicy":{"Name":"no","MaximumRetryCount":0}, -"SecurityOpt":null,"ReadonlyRootfs":false,"Ulimits":null,"LogConfig":{"Type":"","Config":null},"CgroupParent":""}` - - hostConfigPath, err := container.HostConfigPath() - if err != nil { - t.Fatal(err) - } - if err = ioutil.WriteFile(hostConfigPath, []byte(hostConfig), 0644); err != nil { - t.Fatal(err) - } - - daemon, err := initDaemonWithVolumeStore(tmp) - if err != nil { - t.Fatal(err) - } - defer volumedrivers.Unregister(volume.DefaultDriverName) - - c, err := daemon.load(containerID) - if err != nil { - t.Fatal(err) - } - - if c.HostConfig.DNS == nil { - t.Fatal("Expected container DNS to not be nil") - } - - if c.HostConfig.DNSSearch == nil { - t.Fatal("Expected container DNSSearch to not be nil") - } - - if c.HostConfig.DNSOptions == nil { - t.Fatal("Expected container DNSOptions to not be nil") - } -} - -func newPortNoError(proto, port string) nat.Port { - p, _ := nat.NewPort(proto, port) - return p -} - -func TestMerge(t *testing.T) { - volumesImage := make(map[string]struct{}) - volumesImage["/test1"] = struct{}{} - volumesImage["/test2"] = struct{}{} - portsImage := make(nat.PortSet) - portsImage[newPortNoError("tcp", "1111")] = struct{}{} - portsImage[newPortNoError("tcp", "2222")] = struct{}{} - configImage := &containertypes.Config{ - ExposedPorts: portsImage, - Env: []string{"VAR1=1", "VAR2=2"}, - Volumes: volumesImage, - } - - portsUser := make(nat.PortSet) - portsUser[newPortNoError("tcp", "2222")] = struct{}{} - portsUser[newPortNoError("tcp", "3333")] = struct{}{} - volumesUser := make(map[string]struct{}) - volumesUser["/test3"] = struct{}{} - configUser := &containertypes.Config{ - ExposedPorts: portsUser, - Env: []string{"VAR2=3", "VAR3=3"}, - Volumes: volumesUser, - } - - if err := merge(configUser, configImage); err != nil { - t.Error(err) - } - - if len(configUser.ExposedPorts) != 3 { - t.Fatalf("Expected 3 ExposedPorts, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) - } - for portSpecs := range configUser.ExposedPorts { - if portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { - t.Fatalf("Expected 1111 or 2222 or 3333, found %s", portSpecs) - } - } - if len(configUser.Env) != 3 { - t.Fatalf("Expected 3 env var, VAR1=1, VAR2=3 and VAR3=3, found %d", len(configUser.Env)) - } - for _, env := range configUser.Env { - if env != "VAR1=1" && env != "VAR2=3" && env != "VAR3=3" { - t.Fatalf("Expected VAR1=1 or VAR2=3 or VAR3=3, found %s", env) - } - } - - if len(configUser.Volumes) != 3 { - t.Fatalf("Expected 3 volumes, /test1, /test2 and /test3, found %d", len(configUser.Volumes)) - } - for v := range configUser.Volumes { - if v != "/test1" && v != "/test2" && v != "/test3" { - t.Fatalf("Expected /test1 or /test2 or /test3, found %s", v) - } - } - - ports, _, err := nat.ParsePortSpecs([]string{"0000"}) - if err != nil { - t.Error(err) - } - configImage2 := &containertypes.Config{ - ExposedPorts: ports, - } - - if err := merge(configUser, configImage2); err != nil { - t.Error(err) - } - - if len(configUser.ExposedPorts) != 4 { - t.Fatalf("Expected 4 ExposedPorts, 0000, 1111, 2222 and 3333, found %d", len(configUser.ExposedPorts)) - } - for portSpecs := range configUser.ExposedPorts { - if portSpecs.Port() != "0" && portSpecs.Port() != "1111" && portSpecs.Port() != "2222" && portSpecs.Port() != "3333" { - t.Fatalf("Expected %q or %q or %q or %q, found %s", 0, 1111, 2222, 3333, portSpecs) - } - } -} - -func TestDaemonReloadLabels(t *testing.T) { - daemon := &Daemon{} - daemon.configStore = &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"foo:bar"}, - }, - } - - valuesSets := make(map[string]interface{}) - valuesSets["labels"] = "foo:baz" - newConfig := &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"foo:baz"}, - valuesSet: valuesSets, - }, - } - - daemon.Reload(newConfig) - label := daemon.configStore.Labels[0] - if label != "foo:baz" { - t.Fatalf("Expected daemon label `foo:baz`, got %s", label) - } -} - -func TestDaemonReloadNotAffectOthers(t *testing.T) { - daemon := &Daemon{} - daemon.configStore = &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"foo:bar"}, - Debug: true, - }, - } - - valuesSets := make(map[string]interface{}) - valuesSets["labels"] = "foo:baz" - newConfig := &Config{ - CommonConfig: CommonConfig{ - Labels: []string{"foo:baz"}, - valuesSet: valuesSets, - }, - } - - daemon.Reload(newConfig) - label := daemon.configStore.Labels[0] - if label != "foo:baz" { - t.Fatalf("Expected daemon label `foo:baz`, got %s", label) - } - debug := daemon.configStore.Debug - if !debug { - t.Fatalf("Expected debug 'enabled', got 'disabled'") - } -} - -func TestDaemonDiscoveryReload(t *testing.T) { - daemon := &Daemon{} - daemon.configStore = &Config{ - CommonConfig: CommonConfig{ - ClusterStore: "memory://127.0.0.1", - ClusterAdvertise: "127.0.0.1:3333", - }, - } - - if err := daemon.initDiscovery(daemon.configStore); err != nil { - t.Fatal(err) - } - - expected := discovery.Entries{ - &discovery.Entry{Host: "127.0.0.1", Port: "3333"}, - } - - select { - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for discovery") - case <-daemon.discoveryWatcher.ReadyCh(): - } - - stopCh := make(chan struct{}) - defer close(stopCh) - ch, errCh := daemon.discoveryWatcher.Watch(stopCh) - - select { - case <-time.After(1 * time.Second): - t.Fatal("failed to get discovery advertisements in time") - case e := <-ch: - if !reflect.DeepEqual(e, expected) { - t.Fatalf("expected %v, got %v\n", expected, e) - } - case e := <-errCh: - t.Fatal(e) - } - - valuesSets := make(map[string]interface{}) - valuesSets["cluster-store"] = "memory://127.0.0.1:2222" - valuesSets["cluster-advertise"] = "127.0.0.1:5555" - newConfig := &Config{ - CommonConfig: CommonConfig{ - ClusterStore: "memory://127.0.0.1:2222", - ClusterAdvertise: "127.0.0.1:5555", - valuesSet: valuesSets, - }, - } - - expected = discovery.Entries{ - &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, - } - - if err := daemon.Reload(newConfig); err != nil { - t.Fatal(err) - } - - select { - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for discovery") - case <-daemon.discoveryWatcher.ReadyCh(): - } - - ch, errCh = daemon.discoveryWatcher.Watch(stopCh) - - select { - case <-time.After(1 * time.Second): - t.Fatal("failed to get discovery advertisements in time") - case e := <-ch: - if !reflect.DeepEqual(e, expected) { - t.Fatalf("expected %v, got %v\n", expected, e) - } - case e := <-errCh: - t.Fatal(e) - } -} - -func TestDaemonDiscoveryReloadFromEmptyDiscovery(t *testing.T) { - daemon := &Daemon{} - daemon.configStore = &Config{} - - valuesSet := make(map[string]interface{}) - valuesSet["cluster-store"] = "memory://127.0.0.1:2222" - valuesSet["cluster-advertise"] = "127.0.0.1:5555" - newConfig := &Config{ - CommonConfig: CommonConfig{ - ClusterStore: "memory://127.0.0.1:2222", - ClusterAdvertise: "127.0.0.1:5555", - valuesSet: valuesSet, - }, - } - - expected := discovery.Entries{ - &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, - } - - if err := daemon.Reload(newConfig); err != nil { - t.Fatal(err) - } - - select { - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for discovery") - case <-daemon.discoveryWatcher.ReadyCh(): - } - - stopCh := make(chan struct{}) - defer close(stopCh) - ch, errCh := daemon.discoveryWatcher.Watch(stopCh) - - select { - case <-time.After(1 * time.Second): - t.Fatal("failed to get discovery advertisements in time") - case e := <-ch: - if !reflect.DeepEqual(e, expected) { - t.Fatalf("expected %v, got %v\n", expected, e) - } - case e := <-errCh: - t.Fatal(e) - } -} - -func TestDaemonDiscoveryReloadOnlyClusterAdvertise(t *testing.T) { - daemon := &Daemon{} - daemon.configStore = &Config{ - CommonConfig: CommonConfig{ - ClusterStore: "memory://127.0.0.1", - }, - } - valuesSets := make(map[string]interface{}) - valuesSets["cluster-advertise"] = "127.0.0.1:5555" - newConfig := &Config{ - CommonConfig: CommonConfig{ - ClusterAdvertise: "127.0.0.1:5555", - valuesSet: valuesSets, - }, - } - expected := discovery.Entries{ - &discovery.Entry{Host: "127.0.0.1", Port: "5555"}, - } - - if err := daemon.Reload(newConfig); err != nil { - t.Fatal(err) - } - - select { - case <-daemon.discoveryWatcher.ReadyCh(): - case <-time.After(10 * time.Second): - t.Fatal("Timeout waiting for discovery") - } - stopCh := make(chan struct{}) - defer close(stopCh) - ch, errCh := daemon.discoveryWatcher.Watch(stopCh) - - select { - case <-time.After(1 * time.Second): - t.Fatal("failed to get discovery advertisements in time") - case e := <-ch: - if !reflect.DeepEqual(e, expected) { - t.Fatalf("expected %v, got %v\n", expected, e) - } - case e := <-errCh: - t.Fatal(e) - } - -} diff --git a/daemon/daemon_unix.go b/daemon/daemon_unix.go deleted file mode 100644 index f26691226f..0000000000 --- a/daemon/daemon_unix.go +++ /dev/null @@ -1,1167 +0,0 @@ -// +build linux freebsd - -package daemon - -import ( - "bytes" - "fmt" - "io/ioutil" - "net" - "os" - "path/filepath" - "runtime" - "runtime/debug" - "strconv" - "strings" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/reference" - "github.com/docker/docker/runconfig" - runconfigopts "github.com/docker/docker/runconfig/opts" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/blkiodev" - pblkiodev "github.com/docker/engine-api/types/blkiodev" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/libnetwork" - nwconfig "github.com/docker/libnetwork/config" - "github.com/docker/libnetwork/drivers/bridge" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/netutils" - "github.com/docker/libnetwork/options" - lntypes "github.com/docker/libnetwork/types" - "github.com/golang/protobuf/ptypes" - "github.com/opencontainers/runc/libcontainer/label" - "github.com/opencontainers/runc/libcontainer/user" - "github.com/opencontainers/specs/specs-go" -) - -const ( - // See https://git.kernel.org/cgit/linux/kernel/git/tip/tip.git/tree/kernel/sched/sched.h?id=8cd9234c64c584432f6992fe944ca9e46ca8ea76#n269 - linuxMinCPUShares = 2 - linuxMaxCPUShares = 262144 - platformSupported = true - // It's not kernel limit, we want this 4M limit to supply a reasonable functional container - linuxMinMemory = 4194304 - // constants for remapped root settings - defaultIDSpecifier string = "default" - defaultRemappedID string = "dockremap" - - // constant for cgroup drivers - cgroupFsDriver = "cgroupfs" - cgroupSystemdDriver = "systemd" -) - -func getMemoryResources(config containertypes.Resources) *specs.Memory { - memory := specs.Memory{} - - if config.Memory > 0 { - limit := uint64(config.Memory) - memory.Limit = &limit - } - - if config.MemoryReservation > 0 { - reservation := uint64(config.MemoryReservation) - memory.Reservation = &reservation - } - - if config.MemorySwap != 0 { - swap := uint64(config.MemorySwap) - memory.Swap = &swap - } - - if config.MemorySwappiness != nil { - swappiness := uint64(*config.MemorySwappiness) - memory.Swappiness = &swappiness - } - - if config.KernelMemory != 0 { - kernelMemory := uint64(config.KernelMemory) - memory.Kernel = &kernelMemory - } - - return &memory -} - -func getCPUResources(config containertypes.Resources) *specs.CPU { - cpu := specs.CPU{} - - if config.CPUShares != 0 { - shares := uint64(config.CPUShares) - cpu.Shares = &shares - } - - if config.CpusetCpus != "" { - cpuset := config.CpusetCpus - cpu.Cpus = &cpuset - } - - if config.CpusetMems != "" { - cpuset := config.CpusetMems - cpu.Mems = &cpuset - } - - if config.CPUPeriod != 0 { - period := uint64(config.CPUPeriod) - cpu.Period = &period - } - - if config.CPUQuota != 0 { - quota := uint64(config.CPUQuota) - cpu.Quota = "a - } - - return &cpu -} - -func getBlkioWeightDevices(config containertypes.Resources) ([]specs.WeightDevice, error) { - var stat syscall.Stat_t - var blkioWeightDevices []specs.WeightDevice - - for _, weightDevice := range config.BlkioWeightDevice { - if err := syscall.Stat(weightDevice.Path, &stat); err != nil { - return nil, err - } - weight := weightDevice.Weight - d := specs.WeightDevice{Weight: &weight} - d.Major = int64(stat.Rdev / 256) - d.Minor = int64(stat.Rdev % 256) - blkioWeightDevices = append(blkioWeightDevices, d) - } - - return blkioWeightDevices, nil -} - -func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { - var ( - labelOpts []string - err error - ) - - for _, opt := range config.SecurityOpt { - if opt == "no-new-privileges" { - container.NoNewPrivileges = true - } else { - var con []string - if strings.Contains(opt, "=") { - con = strings.SplitN(opt, "=", 2) - } else if strings.Contains(opt, ":") { - con = strings.SplitN(opt, ":", 2) - logrus.Warn("Security options with `:` as a separator are deprecated and will be completely unsupported in 1.13, use `=` instead.") - } - - if len(con) != 2 { - return fmt.Errorf("Invalid --security-opt 1: %q", opt) - } - - switch con[0] { - case "label": - labelOpts = append(labelOpts, con[1]) - case "apparmor": - container.AppArmorProfile = con[1] - case "seccomp": - container.SeccompProfile = con[1] - default: - return fmt.Errorf("Invalid --security-opt 2: %q", opt) - } - } - } - - container.ProcessLabel, container.MountLabel, err = label.InitLabels(labelOpts) - return err -} - -func getBlkioThrottleDevices(devs []*blkiodev.ThrottleDevice) ([]specs.ThrottleDevice, error) { - var throttleDevices []specs.ThrottleDevice - var stat syscall.Stat_t - - for _, d := range devs { - if err := syscall.Stat(d.Path, &stat); err != nil { - return nil, err - } - rate := d.Rate - d := specs.ThrottleDevice{Rate: &rate} - d.Major = int64(stat.Rdev / 256) - d.Minor = int64(stat.Rdev % 256) - throttleDevices = append(throttleDevices, d) - } - - return throttleDevices, nil -} - -func checkKernelVersion(k, major, minor int) bool { - if v, err := kernel.GetKernelVersion(); err != nil { - logrus.Warnf("error getting kernel version: %s", err) - } else { - if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: k, Major: major, Minor: minor}) < 0 { - return false - } - } - return true -} - -func checkKernel() error { - // Check for unsupported kernel versions - // FIXME: it would be cleaner to not test for specific versions, but rather - // test for specific functionalities. - // Unfortunately we can't test for the feature "does not cause a kernel panic" - // without actually causing a kernel panic, so we need this workaround until - // the circumstances of pre-3.10 crashes are clearer. - // For details see https://github.com/docker/docker/issues/407 - // Docker 1.11 and above doesn't actually run on kernels older than 3.4, - // due to containerd-shim usage of PR_SET_CHILD_SUBREAPER (introduced in 3.4). - if !checkKernelVersion(3, 10, 0) { - v, _ := kernel.GetKernelVersion() - if os.Getenv("DOCKER_NOWARN_KERNEL_VERSION") == "" { - logrus.Fatalf("Your Linux kernel version %s is not supported for running docker. Please upgrade your kernel to 3.10.0 or newer.", v.String()) - } - } - return nil -} - -// adaptContainerSettings is called during container creation to modify any -// settings necessary in the HostConfig structure. -func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { - if adjustCPUShares && hostConfig.CPUShares > 0 { - // Handle unsupported CPUShares - if hostConfig.CPUShares < linuxMinCPUShares { - logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, linuxMinCPUShares) - hostConfig.CPUShares = linuxMinCPUShares - } else if hostConfig.CPUShares > linuxMaxCPUShares { - logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, linuxMaxCPUShares) - hostConfig.CPUShares = linuxMaxCPUShares - } - } - if hostConfig.Memory > 0 && hostConfig.MemorySwap == 0 { - // By default, MemorySwap is set to twice the size of Memory. - hostConfig.MemorySwap = hostConfig.Memory * 2 - } - if hostConfig.ShmSize == 0 { - hostConfig.ShmSize = container.DefaultSHMSize - } - var err error - if hostConfig.SecurityOpt == nil { - hostConfig.SecurityOpt, err = daemon.generateSecurityOpt(hostConfig.IpcMode, hostConfig.PidMode, hostConfig.Privileged) - if err != nil { - return err - } - } - if hostConfig.MemorySwappiness == nil { - defaultSwappiness := int64(-1) - hostConfig.MemorySwappiness = &defaultSwappiness - } - if hostConfig.OomKillDisable == nil { - defaultOomKillDisable := false - hostConfig.OomKillDisable = &defaultOomKillDisable - } - - return nil -} - -func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo, update bool) ([]string, error) { - warnings := []string{} - - // memory subsystem checks and adjustments - if resources.Memory != 0 && resources.Memory < linuxMinMemory { - return warnings, fmt.Errorf("Minimum memory limit allowed is 4MB") - } - if resources.Memory > 0 && !sysInfo.MemoryLimit { - warnings = append(warnings, "Your kernel does not support memory limit capabilities. Limitation discarded.") - logrus.Warn("Your kernel does not support memory limit capabilities. Limitation discarded.") - resources.Memory = 0 - resources.MemorySwap = -1 - } - if resources.Memory > 0 && resources.MemorySwap != -1 && !sysInfo.SwapLimit { - warnings = append(warnings, "Your kernel does not support swap limit capabilities, memory limited without swap.") - logrus.Warn("Your kernel does not support swap limit capabilities, memory limited without swap.") - resources.MemorySwap = -1 - } - if resources.Memory > 0 && resources.MemorySwap > 0 && resources.MemorySwap < resources.Memory { - return warnings, fmt.Errorf("Minimum memoryswap limit should be larger than memory limit, see usage") - } - if resources.Memory == 0 && resources.MemorySwap > 0 && !update { - return warnings, fmt.Errorf("You should always set the Memory limit when using Memoryswap limit, see usage") - } - if resources.MemorySwappiness != nil && *resources.MemorySwappiness != -1 && !sysInfo.MemorySwappiness { - warnings = append(warnings, "Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") - logrus.Warn("Your kernel does not support memory swappiness capabilities, memory swappiness discarded.") - resources.MemorySwappiness = nil - } - if resources.MemorySwappiness != nil { - swappiness := *resources.MemorySwappiness - if swappiness < -1 || swappiness > 100 { - return warnings, fmt.Errorf("Invalid value: %v, valid memory swappiness range is 0-100", swappiness) - } - } - if resources.MemoryReservation > 0 && !sysInfo.MemoryReservation { - warnings = append(warnings, "Your kernel does not support memory soft limit capabilities. Limitation discarded.") - logrus.Warn("Your kernel does not support memory soft limit capabilities. Limitation discarded.") - resources.MemoryReservation = 0 - } - if resources.MemoryReservation > 0 && resources.MemoryReservation < linuxMinMemory { - return warnings, fmt.Errorf("Minimum memory reservation allowed is 4MB") - } - if resources.Memory > 0 && resources.MemoryReservation > 0 && resources.Memory < resources.MemoryReservation { - return warnings, fmt.Errorf("Minimum memory limit should be larger than memory reservation limit, see usage") - } - if resources.KernelMemory > 0 && !sysInfo.KernelMemory { - warnings = append(warnings, "Your kernel does not support kernel memory limit capabilities. Limitation discarded.") - logrus.Warn("Your kernel does not support kernel memory limit capabilities. Limitation discarded.") - resources.KernelMemory = 0 - } - if resources.KernelMemory > 0 && resources.KernelMemory < linuxMinMemory { - return warnings, fmt.Errorf("Minimum kernel memory limit allowed is 4MB") - } - if resources.KernelMemory > 0 && !checkKernelVersion(4, 0, 0) { - warnings = append(warnings, "You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") - logrus.Warn("You specified a kernel memory limit on a kernel older than 4.0. Kernel memory limits are experimental on older kernels, it won't work as expected and can cause your system to be unstable.") - } - if resources.OomKillDisable != nil && !sysInfo.OomKillDisable { - // only produce warnings if the setting wasn't to *disable* the OOM Kill; no point - // warning the caller if they already wanted the feature to be off - if *resources.OomKillDisable { - warnings = append(warnings, "Your kernel does not support OomKillDisable, OomKillDisable discarded.") - logrus.Warn("Your kernel does not support OomKillDisable, OomKillDisable discarded.") - } - resources.OomKillDisable = nil - } - - if resources.PidsLimit != 0 && !sysInfo.PidsLimit { - warnings = append(warnings, "Your kernel does not support pids limit capabilities, pids limit discarded.") - logrus.Warn("Your kernel does not support pids limit capabilities, pids limit discarded.") - resources.PidsLimit = 0 - } - - // cpu subsystem checks and adjustments - if resources.CPUShares > 0 && !sysInfo.CPUShares { - warnings = append(warnings, "Your kernel does not support CPU shares. Shares discarded.") - logrus.Warn("Your kernel does not support CPU shares. Shares discarded.") - resources.CPUShares = 0 - } - if resources.CPUPeriod > 0 && !sysInfo.CPUCfsPeriod { - warnings = append(warnings, "Your kernel does not support CPU cfs period. Period discarded.") - logrus.Warn("Your kernel does not support CPU cfs period. Period discarded.") - resources.CPUPeriod = 0 - } - if resources.CPUPeriod != 0 && (resources.CPUPeriod < 1000 || resources.CPUPeriod > 1000000) { - return warnings, fmt.Errorf("CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)") - } - if resources.CPUQuota > 0 && !sysInfo.CPUCfsQuota { - warnings = append(warnings, "Your kernel does not support CPU cfs quota. Quota discarded.") - logrus.Warn("Your kernel does not support CPU cfs quota. Quota discarded.") - resources.CPUQuota = 0 - } - if resources.CPUQuota > 0 && resources.CPUQuota < 1000 { - return warnings, fmt.Errorf("CPU cfs quota can not be less than 1ms (i.e. 1000)") - } - if resources.CPUPercent > 0 { - warnings = append(warnings, "%s does not support CPU percent. Percent discarded.", runtime.GOOS) - logrus.Warnf("%s does not support CPU percent. Percent discarded.", runtime.GOOS) - resources.CPUPercent = 0 - } - - // cpuset subsystem checks and adjustments - if (resources.CpusetCpus != "" || resources.CpusetMems != "") && !sysInfo.Cpuset { - warnings = append(warnings, "Your kernel does not support cpuset. Cpuset discarded.") - logrus.Warn("Your kernel does not support cpuset. Cpuset discarded.") - resources.CpusetCpus = "" - resources.CpusetMems = "" - } - cpusAvailable, err := sysInfo.IsCpusetCpusAvailable(resources.CpusetCpus) - if err != nil { - return warnings, fmt.Errorf("Invalid value %s for cpuset cpus", resources.CpusetCpus) - } - if !cpusAvailable { - return warnings, fmt.Errorf("Requested CPUs are not available - requested %s, available: %s", resources.CpusetCpus, sysInfo.Cpus) - } - memsAvailable, err := sysInfo.IsCpusetMemsAvailable(resources.CpusetMems) - if err != nil { - return warnings, fmt.Errorf("Invalid value %s for cpuset mems", resources.CpusetMems) - } - if !memsAvailable { - return warnings, fmt.Errorf("Requested memory nodes are not available - requested %s, available: %s", resources.CpusetMems, sysInfo.Mems) - } - - // blkio subsystem checks and adjustments - if resources.BlkioWeight > 0 && !sysInfo.BlkioWeight { - warnings = append(warnings, "Your kernel does not support Block I/O weight. Weight discarded.") - logrus.Warn("Your kernel does not support Block I/O weight. Weight discarded.") - resources.BlkioWeight = 0 - } - if resources.BlkioWeight > 0 && (resources.BlkioWeight < 10 || resources.BlkioWeight > 1000) { - return warnings, fmt.Errorf("Range of blkio weight is from 10 to 1000") - } - if resources.IOMaximumBandwidth != 0 || resources.IOMaximumIOps != 0 { - return warnings, fmt.Errorf("Invalid QoS settings: %s does not support Maximum IO Bandwidth or Maximum IO IOps", runtime.GOOS) - } - if len(resources.BlkioWeightDevice) > 0 && !sysInfo.BlkioWeightDevice { - warnings = append(warnings, "Your kernel does not support Block I/O weight_device.") - logrus.Warn("Your kernel does not support Block I/O weight_device. Weight-device discarded.") - resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} - } - if len(resources.BlkioDeviceReadBps) > 0 && !sysInfo.BlkioReadBpsDevice { - warnings = append(warnings, "Your kernel does not support Block read limit in bytes per second.") - logrus.Warn("Your kernel does not support Block I/O read limit in bytes per second. --device-read-bps discarded.") - resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} - } - if len(resources.BlkioDeviceWriteBps) > 0 && !sysInfo.BlkioWriteBpsDevice { - warnings = append(warnings, "Your kernel does not support Block write limit in bytes per second.") - logrus.Warn("Your kernel does not support Block I/O write limit in bytes per second. --device-write-bps discarded.") - resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} - } - if len(resources.BlkioDeviceReadIOps) > 0 && !sysInfo.BlkioReadIOpsDevice { - warnings = append(warnings, "Your kernel does not support Block read limit in IO per second.") - logrus.Warn("Your kernel does not support Block I/O read limit in IO per second. -device-read-iops discarded.") - resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} - } - if len(resources.BlkioDeviceWriteIOps) > 0 && !sysInfo.BlkioWriteIOpsDevice { - warnings = append(warnings, "Your kernel does not support Block write limit in IO per second.") - logrus.Warn("Your kernel does not support Block I/O write limit in IO per second. --device-write-iops discarded.") - resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} - } - - return warnings, nil -} - -func (daemon *Daemon) getCgroupDriver() string { - cgroupDriver := cgroupFsDriver - - if UsingSystemd(daemon.configStore) { - cgroupDriver = cgroupSystemdDriver - } - return cgroupDriver -} - -// getCD gets the raw value of the native.cgroupdriver option, if set. -func getCD(config *Config) string { - for _, option := range config.ExecOptions { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil || !strings.EqualFold(key, "native.cgroupdriver") { - continue - } - return val - } - return "" -} - -// VerifyCgroupDriver validates native.cgroupdriver -func VerifyCgroupDriver(config *Config) error { - cd := getCD(config) - if cd == "" || cd == cgroupFsDriver || cd == cgroupSystemdDriver { - return nil - } - return fmt.Errorf("native.cgroupdriver option %s not supported", cd) -} - -// UsingSystemd returns true if cli option includes native.cgroupdriver=systemd -func UsingSystemd(config *Config) bool { - return getCD(config) == cgroupSystemdDriver -} - -// verifyPlatformContainerSettings performs platform-specific validation of the -// hostconfig and config structures. -func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { - warnings := []string{} - sysInfo := sysinfo.New(true) - - warnings, err := daemon.verifyExperimentalContainerSettings(hostConfig, config) - if err != nil { - return warnings, err - } - - w, err := verifyContainerResources(&hostConfig.Resources, sysInfo, update) - if err != nil { - return warnings, err - } - warnings = append(warnings, w...) - - if hostConfig.ShmSize < 0 { - return warnings, fmt.Errorf("SHM size must be greater than 0") - } - - if hostConfig.OomScoreAdj < -1000 || hostConfig.OomScoreAdj > 1000 { - return warnings, fmt.Errorf("Invalid value %d, range for oom score adj is [-1000, 1000]", hostConfig.OomScoreAdj) - } - - // ip-forwarding does not affect container with '--net=host' (or '--net=none') - if sysInfo.IPv4ForwardingDisabled && !(hostConfig.NetworkMode.IsHost() || hostConfig.NetworkMode.IsNone()) { - warnings = append(warnings, "IPv4 forwarding is disabled. Networking will not work.") - logrus.Warn("IPv4 forwarding is disabled. Networking will not work") - } - // check for various conflicting options with user namespaces - if daemon.configStore.RemappedRoot != "" && hostConfig.UsernsMode.IsPrivate() { - if hostConfig.Privileged { - return warnings, fmt.Errorf("Privileged mode is incompatible with user namespaces") - } - if hostConfig.NetworkMode.IsHost() { - return warnings, fmt.Errorf("Cannot share the host's network namespace when user namespaces are enabled") - } - if hostConfig.PidMode.IsHost() { - return warnings, fmt.Errorf("Cannot share the host PID namespace when user namespaces are enabled") - } - if hostConfig.ReadonlyRootfs { - return warnings, fmt.Errorf("Cannot use the --read-only option when user namespaces are enabled") - } - } - if hostConfig.CgroupParent != "" && UsingSystemd(daemon.configStore) { - // CgroupParent for systemd cgroup should be named as "xxx.slice" - if len(hostConfig.CgroupParent) <= 6 || !strings.HasSuffix(hostConfig.CgroupParent, ".slice") { - return warnings, fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") - } - } - if hostConfig.Runtime == "" { - hostConfig.Runtime = daemon.configStore.GetDefaultRuntimeName() - } - - if rt := daemon.configStore.GetRuntime(hostConfig.Runtime); rt == nil { - return warnings, fmt.Errorf("Unknown runtime specified %s", hostConfig.Runtime) - } - - return warnings, nil -} - -// platformReload update configuration with platform specific options -func (daemon *Daemon) platformReload(config *Config, attributes *map[string]string) { - if config.IsValueSet("runtimes") { - daemon.configStore.Runtimes = config.Runtimes - // Always set the default one - daemon.configStore.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary} - } - - if config.DefaultRuntime != "" { - daemon.configStore.DefaultRuntime = config.DefaultRuntime - } - - // Update attributes - var runtimeList bytes.Buffer - for name, rt := range daemon.configStore.Runtimes { - if runtimeList.Len() > 0 { - runtimeList.WriteRune(' ') - } - runtimeList.WriteString(fmt.Sprintf("%s:%s", name, rt)) - } - - (*attributes)["runtimes"] = runtimeList.String() - (*attributes)["default-runtime"] = daemon.configStore.DefaultRuntime -} - -// verifyDaemonSettings performs validation of daemon config struct -func verifyDaemonSettings(config *Config) error { - // Check for mutually incompatible config options - if config.bridgeConfig.Iface != "" && config.bridgeConfig.IP != "" { - return fmt.Errorf("You specified -b & --bip, mutually exclusive options. Please specify only one") - } - if !config.bridgeConfig.EnableIPTables && !config.bridgeConfig.InterContainerCommunication { - return fmt.Errorf("You specified --iptables=false with --icc=false. ICC=false uses iptables to function. Please set --icc or --iptables to true") - } - if !config.bridgeConfig.EnableIPTables && config.bridgeConfig.EnableIPMasq { - config.bridgeConfig.EnableIPMasq = false - } - if err := VerifyCgroupDriver(config); err != nil { - return err - } - if config.CgroupParent != "" && UsingSystemd(config) { - if len(config.CgroupParent) <= 6 || !strings.HasSuffix(config.CgroupParent, ".slice") { - return fmt.Errorf("cgroup-parent for systemd cgroup should be a valid slice named as \"xxx.slice\"") - } - } - - if config.DefaultRuntime == "" { - config.DefaultRuntime = stockRuntimeName - } - if config.Runtimes == nil { - config.Runtimes = make(map[string]types.Runtime) - } - stockRuntimeOpts := []string{} - if UsingSystemd(config) { - stockRuntimeOpts = append(stockRuntimeOpts, "--systemd-cgroup=true") - } - config.Runtimes[stockRuntimeName] = types.Runtime{Path: DefaultRuntimeBinary, Args: stockRuntimeOpts} - - return nil -} - -// checkSystem validates platform-specific requirements -func checkSystem() error { - if os.Geteuid() != 0 { - return fmt.Errorf("The Docker daemon needs to be run as root") - } - return checkKernel() -} - -// configureMaxThreads sets the Go runtime max threads threshold -// which is 90% of the kernel setting from /proc/sys/kernel/threads-max -func configureMaxThreads(config *Config) error { - mt, err := ioutil.ReadFile("/proc/sys/kernel/threads-max") - if err != nil { - return err - } - mtint, err := strconv.Atoi(strings.TrimSpace(string(mt))) - if err != nil { - return err - } - maxThreads := (mtint / 100) * 90 - debug.SetMaxThreads(maxThreads) - logrus.Debugf("Golang's threads limit set to %d", maxThreads) - return nil -} - -// configureKernelSecuritySupport configures and validates security support for the kernel -func configureKernelSecuritySupport(config *Config, driverName string) error { - if config.EnableSelinuxSupport { - if selinuxEnabled() { - // As Docker on overlayFS and SELinux are incompatible at present, error on overlayfs being enabled - if driverName == "overlay" { - return fmt.Errorf("SELinux is not supported with the %s graph driver", driverName) - } - logrus.Debug("SELinux enabled successfully") - } else { - logrus.Warn("Docker could not enable SELinux on the host system") - } - } else { - selinuxSetDisabled() - } - return nil -} - -func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { - netOptions, err := daemon.networkOptions(config, activeSandboxes) - if err != nil { - return nil, err - } - - controller, err := libnetwork.New(netOptions...) - if err != nil { - return nil, fmt.Errorf("error obtaining controller instance: %v", err) - } - - if len(activeSandboxes) > 0 { - logrus.Infof("There are old running containers, the network config will not take affect") - return controller, nil - } - - // Initialize default network on "null" - if n, _ := controller.NetworkByName("none"); n == nil { - if _, err := controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(true)); err != nil { - return nil, fmt.Errorf("Error creating default \"null\" network: %v", err) - } - } - - // Initialize default network on "host" - if n, _ := controller.NetworkByName("host"); n == nil { - if _, err := controller.NewNetwork("host", "host", "", libnetwork.NetworkOptionPersist(true)); err != nil { - return nil, fmt.Errorf("Error creating default \"host\" network: %v", err) - } - } - if !config.DisableBridge { - // Initialize default driver "bridge" - if err := initBridgeDriver(controller, config); err != nil { - return nil, err - } - } - - return controller, nil -} - -func driverOptions(config *Config) []nwconfig.Option { - bridgeConfig := options.Generic{ - "EnableIPForwarding": config.bridgeConfig.EnableIPForward, - "EnableIPTables": config.bridgeConfig.EnableIPTables, - "EnableUserlandProxy": config.bridgeConfig.EnableUserlandProxy} - bridgeOption := options.Generic{netlabel.GenericData: bridgeConfig} - - dOptions := []nwconfig.Option{} - dOptions = append(dOptions, nwconfig.OptionDriverConfig("bridge", bridgeOption)) - return dOptions -} - -func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { - if n, err := controller.NetworkByName("bridge"); err == nil { - if err = n.Delete(); err != nil { - return fmt.Errorf("could not delete the default bridge network: %v", err) - } - } - - bridgeName := bridge.DefaultBridgeName - if config.bridgeConfig.Iface != "" { - bridgeName = config.bridgeConfig.Iface - } - netOption := map[string]string{ - bridge.BridgeName: bridgeName, - bridge.DefaultBridge: strconv.FormatBool(true), - netlabel.DriverMTU: strconv.Itoa(config.Mtu), - bridge.EnableIPMasquerade: strconv.FormatBool(config.bridgeConfig.EnableIPMasq), - bridge.EnableICC: strconv.FormatBool(config.bridgeConfig.InterContainerCommunication), - } - - // --ip processing - if config.bridgeConfig.DefaultIP != nil { - netOption[bridge.DefaultBindingIP] = config.bridgeConfig.DefaultIP.String() - } - - var ( - ipamV4Conf *libnetwork.IpamConf - ipamV6Conf *libnetwork.IpamConf - ) - - ipamV4Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} - - nw, nw6List, err := netutils.ElectInterfaceAddresses(bridgeName) - if err == nil { - ipamV4Conf.PreferredPool = lntypes.GetIPNetCanonical(nw).String() - hip, _ := lntypes.GetHostPartIP(nw.IP, nw.Mask) - if hip.IsGlobalUnicast() { - ipamV4Conf.Gateway = nw.IP.String() - } - } - - if config.bridgeConfig.IP != "" { - ipamV4Conf.PreferredPool = config.bridgeConfig.IP - ip, _, err := net.ParseCIDR(config.bridgeConfig.IP) - if err != nil { - return err - } - ipamV4Conf.Gateway = ip.String() - } else if bridgeName == bridge.DefaultBridgeName && ipamV4Conf.PreferredPool != "" { - logrus.Infof("Default bridge (%s) is assigned with an IP address %s. Daemon option --bip can be used to set a preferred IP address", bridgeName, ipamV4Conf.PreferredPool) - } - - if config.bridgeConfig.FixedCIDR != "" { - _, fCIDR, err := net.ParseCIDR(config.bridgeConfig.FixedCIDR) - if err != nil { - return err - } - - ipamV4Conf.SubPool = fCIDR.String() - } - - if config.bridgeConfig.DefaultGatewayIPv4 != nil { - ipamV4Conf.AuxAddresses["DefaultGatewayIPv4"] = config.bridgeConfig.DefaultGatewayIPv4.String() - } - - var deferIPv6Alloc bool - if config.bridgeConfig.FixedCIDRv6 != "" { - _, fCIDRv6, err := net.ParseCIDR(config.bridgeConfig.FixedCIDRv6) - if err != nil { - return err - } - - // In case user has specified the daemon flag --fixed-cidr-v6 and the passed network has - // at least 48 host bits, we need to guarantee the current behavior where the containers' - // IPv6 addresses will be constructed based on the containers' interface MAC address. - // We do so by telling libnetwork to defer the IPv6 address allocation for the endpoints - // on this network until after the driver has created the endpoint and returned the - // constructed address. Libnetwork will then reserve this address with the ipam driver. - ones, _ := fCIDRv6.Mask.Size() - deferIPv6Alloc = ones <= 80 - - if ipamV6Conf == nil { - ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} - } - ipamV6Conf.PreferredPool = fCIDRv6.String() - - // In case the --fixed-cidr-v6 is specified and the current docker0 bridge IPv6 - // address belongs to the same network, we need to inform libnetwork about it, so - // that it can be reserved with IPAM and it will not be given away to somebody else - for _, nw6 := range nw6List { - if fCIDRv6.Contains(nw6.IP) { - ipamV6Conf.Gateway = nw6.IP.String() - break - } - } - } - - if config.bridgeConfig.DefaultGatewayIPv6 != nil { - if ipamV6Conf == nil { - ipamV6Conf = &libnetwork.IpamConf{AuxAddresses: make(map[string]string)} - } - ipamV6Conf.AuxAddresses["DefaultGatewayIPv6"] = config.bridgeConfig.DefaultGatewayIPv6.String() - } - - v4Conf := []*libnetwork.IpamConf{ipamV4Conf} - v6Conf := []*libnetwork.IpamConf{} - if ipamV6Conf != nil { - v6Conf = append(v6Conf, ipamV6Conf) - } - // Initialize default network on "bridge" with the same name - _, err = controller.NewNetwork("bridge", "bridge", "", - libnetwork.NetworkOptionEnableIPv6(config.bridgeConfig.EnableIPv6), - libnetwork.NetworkOptionDriverOpts(netOption), - libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), - libnetwork.NetworkOptionDeferIPv6Alloc(deferIPv6Alloc)) - if err != nil { - return fmt.Errorf("Error creating default \"bridge\" network: %v", err) - } - return nil -} - -// setupInitLayer populates a directory with mountpoints suitable -// for bind-mounting things into the container. -// -// This extra layer is used by all containers as the top-most ro layer. It protects -// the container from unwanted side-effects on the rw layer. -func setupInitLayer(initLayer string, rootUID, rootGID int) error { - for pth, typ := range map[string]string{ - "/dev/pts": "dir", - "/dev/shm": "dir", - "/proc": "dir", - "/sys": "dir", - "/.dockerenv": "file", - "/etc/resolv.conf": "file", - "/etc/hosts": "file", - "/etc/hostname": "file", - "/dev/console": "file", - "/etc/mtab": "/proc/mounts", - } { - parts := strings.Split(pth, "/") - prev := "/" - for _, p := range parts[1:] { - prev = filepath.Join(prev, p) - syscall.Unlink(filepath.Join(initLayer, prev)) - } - - if _, err := os.Stat(filepath.Join(initLayer, pth)); err != nil { - if os.IsNotExist(err) { - if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, filepath.Dir(pth)), 0755, rootUID, rootGID); err != nil { - return err - } - switch typ { - case "dir": - if err := idtools.MkdirAllNewAs(filepath.Join(initLayer, pth), 0755, rootUID, rootGID); err != nil { - return err - } - case "file": - f, err := os.OpenFile(filepath.Join(initLayer, pth), os.O_CREATE, 0755) - if err != nil { - return err - } - f.Chown(rootUID, rootGID) - f.Close() - default: - if err := os.Symlink(typ, filepath.Join(initLayer, pth)); err != nil { - return err - } - } - } else { - return err - } - } - } - - // Layer is ready to use, if it wasn't before. - return nil -} - -// Parse the remapped root (user namespace) option, which can be one of: -// username - valid username from /etc/passwd -// username:groupname - valid username; valid groupname from /etc/group -// uid - 32-bit unsigned int valid Linux UID value -// uid:gid - uid value; 32-bit unsigned int Linux GID value -// -// If no groupname is specified, and a username is specified, an attempt -// will be made to lookup a gid for that username as a groupname -// -// If names are used, they are verified to exist in passwd/group -func parseRemappedRoot(usergrp string) (string, string, error) { - - var ( - userID, groupID int - username, groupname string - ) - - idparts := strings.Split(usergrp, ":") - if len(idparts) > 2 { - return "", "", fmt.Errorf("Invalid user/group specification in --userns-remap: %q", usergrp) - } - - if uid, err := strconv.ParseInt(idparts[0], 10, 32); err == nil { - // must be a uid; take it as valid - userID = int(uid) - luser, err := user.LookupUid(userID) - if err != nil { - return "", "", fmt.Errorf("Uid %d has no entry in /etc/passwd: %v", userID, err) - } - username = luser.Name - if len(idparts) == 1 { - // if the uid was numeric and no gid was specified, take the uid as the gid - groupID = userID - lgrp, err := user.LookupGid(groupID) - if err != nil { - return "", "", fmt.Errorf("Gid %d has no entry in /etc/group: %v", groupID, err) - } - groupname = lgrp.Name - } - } else { - lookupName := idparts[0] - // special case: if the user specified "default", they want Docker to create or - // use (after creation) the "dockremap" user/group for root remapping - if lookupName == defaultIDSpecifier { - lookupName = defaultRemappedID - } - luser, err := user.LookupUser(lookupName) - if err != nil && idparts[0] != defaultIDSpecifier { - // error if the name requested isn't the special "dockremap" ID - return "", "", fmt.Errorf("Error during uid lookup for %q: %v", lookupName, err) - } else if err != nil { - // special case-- if the username == "default", then we have been asked - // to create a new entry pair in /etc/{passwd,group} for which the /etc/sub{uid,gid} - // ranges will be used for the user and group mappings in user namespaced containers - _, _, err := idtools.AddNamespaceRangesUser(defaultRemappedID) - if err == nil { - return defaultRemappedID, defaultRemappedID, nil - } - return "", "", fmt.Errorf("Error during %q user creation: %v", defaultRemappedID, err) - } - username = luser.Name - if len(idparts) == 1 { - // we only have a string username, and no group specified; look up gid from username as group - group, err := user.LookupGroup(lookupName) - if err != nil { - return "", "", fmt.Errorf("Error during gid lookup for %q: %v", lookupName, err) - } - groupID = group.Gid - groupname = group.Name - } - } - - if len(idparts) == 2 { - // groupname or gid is separately specified and must be resolved - // to an unsigned 32-bit gid - if gid, err := strconv.ParseInt(idparts[1], 10, 32); err == nil { - // must be a gid, take it as valid - groupID = int(gid) - lgrp, err := user.LookupGid(groupID) - if err != nil { - return "", "", fmt.Errorf("Gid %d has no entry in /etc/passwd: %v", groupID, err) - } - groupname = lgrp.Name - } else { - // not a number; attempt a lookup - if _, err := user.LookupGroup(idparts[1]); err != nil { - return "", "", fmt.Errorf("Error during groupname lookup for %q: %v", idparts[1], err) - } - groupname = idparts[1] - } - } - return username, groupname, nil -} - -func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { - if runtime.GOOS != "linux" && config.RemappedRoot != "" { - return nil, nil, fmt.Errorf("User namespaces are only supported on Linux") - } - - // if the daemon was started with remapped root option, parse - // the config option to the int uid,gid values - var ( - uidMaps, gidMaps []idtools.IDMap - ) - if config.RemappedRoot != "" { - username, groupname, err := parseRemappedRoot(config.RemappedRoot) - if err != nil { - return nil, nil, err - } - if username == "root" { - // Cannot setup user namespaces with a 1-to-1 mapping; "--root=0:0" is a no-op - // effectively - logrus.Warn("User namespaces: root cannot be remapped with itself; user namespaces are OFF") - return uidMaps, gidMaps, nil - } - logrus.Infof("User namespaces: ID ranges will be mapped to subuid/subgid ranges of: %s:%s", username, groupname) - // update remapped root setting now that we have resolved them to actual names - config.RemappedRoot = fmt.Sprintf("%s:%s", username, groupname) - - uidMaps, gidMaps, err = idtools.CreateIDMappings(username, groupname) - if err != nil { - return nil, nil, fmt.Errorf("Can't create ID mappings: %v", err) - } - } - return uidMaps, gidMaps, nil -} - -func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { - config.Root = rootDir - // the docker root metadata directory needs to have execute permissions for all users (g+x,o+x) - // so that syscalls executing as non-root, operating on subdirectories of the graph root - // (e.g. mounted layers of a container) can traverse this path. - // The user namespace support will create subdirectories for the remapped root host uid:gid - // pair owned by that same uid:gid pair for proper write access to those needed metadata and - // layer content subtrees. - if _, err := os.Stat(rootDir); err == nil { - // root current exists; verify the access bits are correct by setting them - if err = os.Chmod(rootDir, 0711); err != nil { - return err - } - } else if os.IsNotExist(err) { - // no root exists yet, create it 0711 with root:root ownership - if err := os.MkdirAll(rootDir, 0711); err != nil { - return err - } - } - - // if user namespaces are enabled we will create a subtree underneath the specified root - // with any/all specified remapped root uid/gid options on the daemon creating - // a new subdirectory with ownership set to the remapped uid/gid (so as to allow - // `chdir()` to work for containers namespaced to that uid/gid) - if config.RemappedRoot != "" { - config.Root = filepath.Join(rootDir, fmt.Sprintf("%d.%d", rootUID, rootGID)) - logrus.Debugf("Creating user namespaced daemon root: %s", config.Root) - // Create the root directory if it doesn't exist - if err := idtools.MkdirAllAs(config.Root, 0700, rootUID, rootGID); err != nil { - return fmt.Errorf("Cannot create daemon root: %s: %v", config.Root, err) - } - } - return nil -} - -// registerLinks writes the links to a file. -func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { - if hostConfig == nil || hostConfig.NetworkMode.IsUserDefined() { - return nil - } - - for _, l := range hostConfig.Links { - name, alias, err := runconfigopts.ParseLink(l) - if err != nil { - return err - } - child, err := daemon.GetContainer(name) - if err != nil { - return fmt.Errorf("Could not get container for %s", name) - } - for child.HostConfig.NetworkMode.IsContainer() { - parts := strings.SplitN(string(child.HostConfig.NetworkMode), ":", 2) - child, err = daemon.GetContainer(parts[1]) - if err != nil { - return fmt.Errorf("Could not get container for %s", parts[1]) - } - } - if child.HostConfig.NetworkMode.IsHost() { - return runconfig.ErrConflictHostNetworkAndLinks - } - if err := daemon.registerLink(container, child, alias); err != nil { - return err - } - } - - // After we load all the links into the daemon - // set them to nil on the hostconfig - return container.WriteHostConfig() -} - -// conditionalMountOnStart is a platform specific helper function during the -// container start to call mount. -func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { - return daemon.Mount(container) -} - -// conditionalUnmountOnCleanup is a platform specific helper function called -// during the cleanup of a container to unmount. -func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { - return daemon.Unmount(container) -} - -func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) error { - // Unix has no custom images to register - return nil -} - -func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { - if !c.IsRunning() { - return nil, errNotRunning{c.ID} - } - stats, err := daemon.containerd.Stats(c.ID) - if err != nil { - return nil, err - } - s := &types.StatsJSON{} - cgs := stats.CgroupStats - if cgs != nil { - s.BlkioStats = types.BlkioStats{ - IoServiceBytesRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceBytesRecursive), - IoServicedRecursive: copyBlkioEntry(cgs.BlkioStats.IoServicedRecursive), - IoQueuedRecursive: copyBlkioEntry(cgs.BlkioStats.IoQueuedRecursive), - IoServiceTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoServiceTimeRecursive), - IoWaitTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoWaitTimeRecursive), - IoMergedRecursive: copyBlkioEntry(cgs.BlkioStats.IoMergedRecursive), - IoTimeRecursive: copyBlkioEntry(cgs.BlkioStats.IoTimeRecursive), - SectorsRecursive: copyBlkioEntry(cgs.BlkioStats.SectorsRecursive), - } - cpu := cgs.CpuStats - s.CPUStats = types.CPUStats{ - CPUUsage: types.CPUUsage{ - TotalUsage: cpu.CpuUsage.TotalUsage, - PercpuUsage: cpu.CpuUsage.PercpuUsage, - UsageInKernelmode: cpu.CpuUsage.UsageInKernelmode, - UsageInUsermode: cpu.CpuUsage.UsageInUsermode, - }, - ThrottlingData: types.ThrottlingData{ - Periods: cpu.ThrottlingData.Periods, - ThrottledPeriods: cpu.ThrottlingData.ThrottledPeriods, - ThrottledTime: cpu.ThrottlingData.ThrottledTime, - }, - } - mem := cgs.MemoryStats.Usage - s.MemoryStats = types.MemoryStats{ - Usage: mem.Usage, - MaxUsage: mem.MaxUsage, - Stats: cgs.MemoryStats.Stats, - Failcnt: mem.Failcnt, - Limit: mem.Limit, - } - // if the container does not set memory limit, use the machineMemory - if mem.Limit > daemon.statsCollector.machineMemory && daemon.statsCollector.machineMemory > 0 { - s.MemoryStats.Limit = daemon.statsCollector.machineMemory - } - if cgs.PidsStats != nil { - s.PidsStats = types.PidsStats{ - Current: cgs.PidsStats.Current, - } - } - } - s.Read, err = ptypes.Timestamp(stats.Timestamp) - if err != nil { - return nil, err - } - return s, nil -} - -// setDefaultIsolation determines the default isolation mode for the -// daemon to run in. This is only applicable on Windows -func (daemon *Daemon) setDefaultIsolation() error { - return nil -} - -func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { - var layers []string - for _, l := range rootfs.DiffIDs { - layers = append(layers, l.String()) - } - return types.RootFS{ - Type: rootfs.Type, - Layers: layers, - } -} - -// setupDaemonProcess sets various settings for the daemon's process -func setupDaemonProcess(config *Config) error { - // setup the daemons oom_score_adj - return setupOOMScoreAdj(config.OOMScoreAdjust) -} - -func setupOOMScoreAdj(score int) error { - f, err := os.OpenFile("/proc/self/oom_score_adj", os.O_WRONLY, 0) - if err != nil { - return err - } - _, err = f.WriteString(strconv.Itoa(score)) - f.Close() - return err -} diff --git a/daemon/daemon_unix_test.go b/daemon/daemon_unix_test.go deleted file mode 100644 index fae84bab6a..0000000000 --- a/daemon/daemon_unix_test.go +++ /dev/null @@ -1,199 +0,0 @@ -// +build !windows - -package daemon - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/docker/docker/container" - containertypes "github.com/docker/engine-api/types/container" -) - -// Unix test as uses settings which are not available on Windows -func TestAdjustCPUShares(t *testing.T) { - tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - daemon := &Daemon{ - repository: tmp, - root: tmp, - } - - hostConfig := &containertypes.HostConfig{ - Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1}, - } - daemon.adaptContainerSettings(hostConfig, true) - if hostConfig.CPUShares != linuxMinCPUShares { - t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares) - } - - hostConfig.CPUShares = linuxMaxCPUShares + 1 - daemon.adaptContainerSettings(hostConfig, true) - if hostConfig.CPUShares != linuxMaxCPUShares { - t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares) - } - - hostConfig.CPUShares = 0 - daemon.adaptContainerSettings(hostConfig, true) - if hostConfig.CPUShares != 0 { - t.Error("Expected CPUShares to be unchanged") - } - - hostConfig.CPUShares = 1024 - daemon.adaptContainerSettings(hostConfig, true) - if hostConfig.CPUShares != 1024 { - t.Error("Expected CPUShares to be unchanged") - } -} - -// Unix test as uses settings which are not available on Windows -func TestAdjustCPUSharesNoAdjustment(t *testing.T) { - tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - daemon := &Daemon{ - repository: tmp, - root: tmp, - } - - hostConfig := &containertypes.HostConfig{ - Resources: containertypes.Resources{CPUShares: linuxMinCPUShares - 1}, - } - daemon.adaptContainerSettings(hostConfig, false) - if hostConfig.CPUShares != linuxMinCPUShares-1 { - t.Errorf("Expected CPUShares to be %d", linuxMinCPUShares-1) - } - - hostConfig.CPUShares = linuxMaxCPUShares + 1 - daemon.adaptContainerSettings(hostConfig, false) - if hostConfig.CPUShares != linuxMaxCPUShares+1 { - t.Errorf("Expected CPUShares to be %d", linuxMaxCPUShares+1) - } - - hostConfig.CPUShares = 0 - daemon.adaptContainerSettings(hostConfig, false) - if hostConfig.CPUShares != 0 { - t.Error("Expected CPUShares to be unchanged") - } - - hostConfig.CPUShares = 1024 - daemon.adaptContainerSettings(hostConfig, false) - if hostConfig.CPUShares != 1024 { - t.Error("Expected CPUShares to be unchanged") - } -} - -// Unix test as uses settings which are not available on Windows -func TestParseSecurityOptWithDeprecatedColon(t *testing.T) { - container := &container.Container{} - config := &containertypes.HostConfig{} - - // test apparmor - config.SecurityOpt = []string{"apparmor=test_profile"} - if err := parseSecurityOpt(container, config); err != nil { - t.Fatalf("Unexpected parseSecurityOpt error: %v", err) - } - if container.AppArmorProfile != "test_profile" { - t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) - } - - // test seccomp - sp := "/path/to/seccomp_test.json" - config.SecurityOpt = []string{"seccomp=" + sp} - if err := parseSecurityOpt(container, config); err != nil { - t.Fatalf("Unexpected parseSecurityOpt error: %v", err) - } - if container.SeccompProfile != sp { - t.Fatalf("Unexpected AppArmorProfile, expected: %q, got %q", sp, container.SeccompProfile) - } - - // test valid label - config.SecurityOpt = []string{"label=user:USER"} - if err := parseSecurityOpt(container, config); err != nil { - t.Fatalf("Unexpected parseSecurityOpt error: %v", err) - } - - // test invalid label - config.SecurityOpt = []string{"label"} - if err := parseSecurityOpt(container, config); err == nil { - t.Fatal("Expected parseSecurityOpt error, got nil") - } - - // test invalid opt - config.SecurityOpt = []string{"test"} - if err := parseSecurityOpt(container, config); err == nil { - t.Fatal("Expected parseSecurityOpt error, got nil") - } -} - -func TestParseSecurityOpt(t *testing.T) { - container := &container.Container{} - config := &containertypes.HostConfig{} - - // test apparmor - config.SecurityOpt = []string{"apparmor=test_profile"} - if err := parseSecurityOpt(container, config); err != nil { - t.Fatalf("Unexpected parseSecurityOpt error: %v", err) - } - if container.AppArmorProfile != "test_profile" { - t.Fatalf("Unexpected AppArmorProfile, expected: \"test_profile\", got %q", container.AppArmorProfile) - } - - // test seccomp - sp := "/path/to/seccomp_test.json" - config.SecurityOpt = []string{"seccomp=" + sp} - if err := parseSecurityOpt(container, config); err != nil { - t.Fatalf("Unexpected parseSecurityOpt error: %v", err) - } - if container.SeccompProfile != sp { - t.Fatalf("Unexpected SeccompProfile, expected: %q, got %q", sp, container.SeccompProfile) - } - - // test valid label - config.SecurityOpt = []string{"label=user:USER"} - if err := parseSecurityOpt(container, config); err != nil { - t.Fatalf("Unexpected parseSecurityOpt error: %v", err) - } - - // test invalid label - config.SecurityOpt = []string{"label"} - if err := parseSecurityOpt(container, config); err == nil { - t.Fatal("Expected parseSecurityOpt error, got nil") - } - - // test invalid opt - config.SecurityOpt = []string{"test"} - if err := parseSecurityOpt(container, config); err == nil { - t.Fatal("Expected parseSecurityOpt error, got nil") - } -} - -func TestNetworkOptions(t *testing.T) { - daemon := &Daemon{} - dconfigCorrect := &Config{ - CommonConfig: CommonConfig{ - ClusterStore: "consul://localhost:8500", - ClusterAdvertise: "192.168.0.1:8000", - }, - } - - if _, err := daemon.networkOptions(dconfigCorrect, nil); err != nil { - t.Fatalf("Expect networkOptions success, got error: %v", err) - } - - dconfigWrong := &Config{ - CommonConfig: CommonConfig{ - ClusterStore: "consul://localhost:8500://test://bbb", - }, - } - - if _, err := daemon.networkOptions(dconfigWrong, nil); err == nil { - t.Fatalf("Expected networkOptions error, got nil") - } -} diff --git a/daemon/daemon_unsupported.go b/daemon/daemon_unsupported.go deleted file mode 100644 index cb1acf63d6..0000000000 --- a/daemon/daemon_unsupported.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !linux,!freebsd,!windows,!solaris - -package daemon - -const platformSupported = false diff --git a/daemon/daemon_windows.go b/daemon/daemon_windows.go deleted file mode 100644 index aa8360d6a7..0000000000 --- a/daemon/daemon_windows.go +++ /dev/null @@ -1,525 +0,0 @@ -package daemon - -import ( - "encoding/json" - "errors" - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/Microsoft/hcsshim" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/daemon/graphdriver/windows" // register the windows graph driver - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/reference" - "github.com/docker/docker/runconfig" - "github.com/docker/engine-api/types" - pblkiodev "github.com/docker/engine-api/types/blkiodev" - containertypes "github.com/docker/engine-api/types/container" - "github.com/docker/libnetwork" - nwconfig "github.com/docker/libnetwork/config" - winlibnetwork "github.com/docker/libnetwork/drivers/windows" - "github.com/docker/libnetwork/netlabel" - "github.com/docker/libnetwork/options" - blkiodev "github.com/opencontainers/runc/libcontainer/configs" -) - -const ( - defaultNetworkSpace = "172.16.0.0/12" - platformSupported = true - windowsMinCPUShares = 1 - windowsMaxCPUShares = 10000 -) - -func getBlkioWeightDevices(config *containertypes.HostConfig) ([]blkiodev.WeightDevice, error) { - return nil, nil -} - -func parseSecurityOpt(container *container.Container, config *containertypes.HostConfig) error { - return nil -} - -func getBlkioReadIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { - return nil, nil -} - -func getBlkioWriteIOpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { - return nil, nil -} - -func getBlkioReadBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { - return nil, nil -} - -func getBlkioWriteBpsDevices(config *containertypes.HostConfig) ([]blkiodev.ThrottleDevice, error) { - return nil, nil -} - -func setupInitLayer(initLayer string, rootUID, rootGID int) error { - return nil -} - -func checkKernel() error { - return nil -} - -func (daemon *Daemon) getCgroupDriver() string { - return "" -} - -// adaptContainerSettings is called during container creation to modify any -// settings necessary in the HostConfig structure. -func (daemon *Daemon) adaptContainerSettings(hostConfig *containertypes.HostConfig, adjustCPUShares bool) error { - if hostConfig == nil { - return nil - } - - if hostConfig.CPUShares < 0 { - logrus.Warnf("Changing requested CPUShares of %d to minimum allowed of %d", hostConfig.CPUShares, windowsMinCPUShares) - hostConfig.CPUShares = windowsMinCPUShares - } else if hostConfig.CPUShares > windowsMaxCPUShares { - logrus.Warnf("Changing requested CPUShares of %d to maximum allowed of %d", hostConfig.CPUShares, windowsMaxCPUShares) - hostConfig.CPUShares = windowsMaxCPUShares - } - - return nil -} - -func verifyContainerResources(resources *containertypes.Resources, sysInfo *sysinfo.SysInfo) ([]string, error) { - warnings := []string{} - - // cpu subsystem checks and adjustments - if resources.CPUPercent < 0 || resources.CPUPercent > 100 { - return warnings, fmt.Errorf("Range of CPU percent is from 1 to 100") - } - - if resources.CPUPercent > 0 && resources.CPUShares > 0 { - return warnings, fmt.Errorf("Conflicting options: CPU Shares and CPU Percent cannot both be set") - } - - // TODO Windows: Add more validation of resource settings not supported on Windows - - if resources.BlkioWeight > 0 { - warnings = append(warnings, "Windows does not support Block I/O weight. Weight discarded.") - logrus.Warn("Windows does not support Block I/O weight. --blkio-weight discarded.") - resources.BlkioWeight = 0 - } - if len(resources.BlkioWeightDevice) > 0 { - warnings = append(warnings, "Windows does not support Block I/O weight_device.") - logrus.Warn("Windows does not support Block I/O weight_device. --blkio-weight-device discarded.") - resources.BlkioWeightDevice = []*pblkiodev.WeightDevice{} - } - if len(resources.BlkioDeviceReadBps) > 0 { - warnings = append(warnings, "Windows does not support Block read limit in bytes per second.") - logrus.Warn("Windows does not support Block I/O read limit in bytes per second. --device-read-bps discarded.") - resources.BlkioDeviceReadBps = []*pblkiodev.ThrottleDevice{} - } - if len(resources.BlkioDeviceWriteBps) > 0 { - warnings = append(warnings, "Windows does not support Block write limit in bytes per second.") - logrus.Warn("Windows does not support Block I/O write limit in bytes per second. --device-write-bps discarded.") - resources.BlkioDeviceWriteBps = []*pblkiodev.ThrottleDevice{} - } - if len(resources.BlkioDeviceReadIOps) > 0 { - warnings = append(warnings, "Windows does not support Block read limit in IO per second.") - logrus.Warn("Windows does not support Block I/O read limit in IO per second. -device-read-iops discarded.") - resources.BlkioDeviceReadIOps = []*pblkiodev.ThrottleDevice{} - } - if len(resources.BlkioDeviceWriteIOps) > 0 { - warnings = append(warnings, "Windows does not support Block write limit in IO per second.") - logrus.Warn("Windows does not support Block I/O write limit in IO per second. --device-write-iops discarded.") - resources.BlkioDeviceWriteIOps = []*pblkiodev.ThrottleDevice{} - } - return warnings, nil -} - -// verifyPlatformContainerSettings performs platform-specific validation of the -// hostconfig and config structures. -func verifyPlatformContainerSettings(daemon *Daemon, hostConfig *containertypes.HostConfig, config *containertypes.Config, update bool) ([]string, error) { - warnings := []string{} - - w, err := verifyContainerResources(&hostConfig.Resources, nil) - warnings = append(warnings, w...) - if err != nil { - return warnings, err - } - - return warnings, nil -} - -// platformReload update configuration with platform specific options -func (daemon *Daemon) platformReload(config *Config, attributes *map[string]string) { -} - -// verifyDaemonSettings performs validation of daemon config struct -func verifyDaemonSettings(config *Config) error { - return nil -} - -// checkSystem validates platform-specific requirements -func checkSystem() error { - // Validate the OS version. Note that docker.exe must be manifested for this - // call to return the correct version. - osv := system.GetOSVersion() - if osv.MajorVersion < 10 { - return fmt.Errorf("This version of Windows does not support the docker daemon") - } - if osv.Build < 14300 { - return fmt.Errorf("The Windows daemon requires Windows Server 2016 Technical Preview 5 build 14300 or later") - } - return nil -} - -// configureKernelSecuritySupport configures and validate security support for the kernel -func configureKernelSecuritySupport(config *Config, driverName string) error { - return nil -} - -// configureMaxThreads sets the Go runtime max threads threshold -func configureMaxThreads(config *Config) error { - return nil -} - -func (daemon *Daemon) initNetworkController(config *Config, activeSandboxes map[string]interface{}) (libnetwork.NetworkController, error) { - netOptions, err := daemon.networkOptions(config, nil) - if err != nil { - return nil, err - } - controller, err := libnetwork.New(netOptions...) - if err != nil { - return nil, fmt.Errorf("error obtaining controller instance: %v", err) - } - - hnsresponse, err := hcsshim.HNSListNetworkRequest("GET", "", "") - if err != nil { - return nil, err - } - - // Remove networks not present in HNS - for _, v := range controller.Networks() { - options := v.Info().DriverOptions() - hnsid := options[winlibnetwork.HNSID] - found := false - - for _, v := range hnsresponse { - if v.Id == hnsid { - found = true - break - } - } - - if !found { - err = v.Delete() - if err != nil { - return nil, err - } - } - } - - _, err = controller.NewNetwork("null", "none", "", libnetwork.NetworkOptionPersist(false)) - if err != nil { - return nil, err - } - - // discover and add HNS networks to windows - // network that exist are removed and added again - for _, v := range hnsresponse { - var n libnetwork.Network - s := func(current libnetwork.Network) bool { - options := current.Info().DriverOptions() - if options[winlibnetwork.HNSID] == v.Id { - n = current - return true - } - return false - } - - controller.WalkNetworks(s) - if n != nil { - v.Name = n.Name() - n.Delete() - } - - netOption := map[string]string{ - winlibnetwork.NetworkName: v.Name, - winlibnetwork.HNSID: v.Id, - } - - v4Conf := []*libnetwork.IpamConf{} - for _, subnet := range v.Subnets { - ipamV4Conf := libnetwork.IpamConf{} - ipamV4Conf.PreferredPool = subnet.AddressPrefix - ipamV4Conf.Gateway = subnet.GatewayAddress - v4Conf = append(v4Conf, &ipamV4Conf) - } - - name := v.Name - // There is only one nat network supported in windows. - // If it exists with a different name add it as the default name - if runconfig.DefaultDaemonNetworkMode() == containertypes.NetworkMode(strings.ToLower(v.Type)) { - name = runconfig.DefaultDaemonNetworkMode().NetworkName() - } - - v6Conf := []*libnetwork.IpamConf{} - _, err := controller.NewNetwork(strings.ToLower(v.Type), name, "", - libnetwork.NetworkOptionGeneric(options.Generic{ - netlabel.GenericData: netOption, - }), - libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), - ) - - if err != nil { - logrus.Errorf("Error occurred when creating network %v", err) - } - } - - if !config.DisableBridge { - // Initialize default driver "bridge" - if err := initBridgeDriver(controller, config); err != nil { - return nil, err - } - } - - return controller, nil -} - -func initBridgeDriver(controller libnetwork.NetworkController, config *Config) error { - if _, err := controller.NetworkByName(runconfig.DefaultDaemonNetworkMode().NetworkName()); err == nil { - return nil - } - - netOption := map[string]string{ - winlibnetwork.NetworkName: runconfig.DefaultDaemonNetworkMode().NetworkName(), - } - - ipamV4Conf := libnetwork.IpamConf{} - if config.bridgeConfig.FixedCIDR == "" { - ipamV4Conf.PreferredPool = defaultNetworkSpace - } else { - ipamV4Conf.PreferredPool = config.bridgeConfig.FixedCIDR - } - - v4Conf := []*libnetwork.IpamConf{&ipamV4Conf} - v6Conf := []*libnetwork.IpamConf{} - - _, err := controller.NewNetwork(string(runconfig.DefaultDaemonNetworkMode()), runconfig.DefaultDaemonNetworkMode().NetworkName(), "", - libnetwork.NetworkOptionGeneric(options.Generic{ - netlabel.GenericData: netOption, - }), - libnetwork.NetworkOptionIpam("default", "", v4Conf, v6Conf, nil), - ) - - if err != nil { - return fmt.Errorf("Error creating default network: %v", err) - } - return nil -} - -// registerLinks sets up links between containers and writes the -// configuration out for persistence. As of Windows TP4, links are not supported. -func (daemon *Daemon) registerLinks(container *container.Container, hostConfig *containertypes.HostConfig) error { - return nil -} - -func (daemon *Daemon) cleanupMountsByID(in string) error { - return nil -} - -func (daemon *Daemon) cleanupMounts() error { - return nil -} - -func setupRemappedRoot(config *Config) ([]idtools.IDMap, []idtools.IDMap, error) { - return nil, nil, nil -} - -func setupDaemonRoot(config *Config, rootDir string, rootUID, rootGID int) error { - config.Root = rootDir - // Create the root directory if it doesn't exists - if err := system.MkdirAll(config.Root, 0700); err != nil && !os.IsExist(err) { - return err - } - return nil -} - -// runasHyperVContainer returns true if we are going to run as a Hyper-V container -func (daemon *Daemon) runAsHyperVContainer(container *container.Container) bool { - if container.HostConfig.Isolation.IsDefault() { - // Container is set to use the default, so take the default from the daemon configuration - return daemon.defaultIsolation.IsHyperV() - } - - // Container is requesting an isolation mode. Honour it. - return container.HostConfig.Isolation.IsHyperV() - -} - -// conditionalMountOnStart is a platform specific helper function during the -// container start to call mount. -func (daemon *Daemon) conditionalMountOnStart(container *container.Container) error { - // We do not mount if a Hyper-V container - if !daemon.runAsHyperVContainer(container) { - return daemon.Mount(container) - } - return nil -} - -// conditionalUnmountOnCleanup is a platform specific helper function called -// during the cleanup of a container to unmount. -func (daemon *Daemon) conditionalUnmountOnCleanup(container *container.Container) error { - // We do not unmount if a Hyper-V container - if !daemon.runAsHyperVContainer(container) { - return daemon.Unmount(container) - } - return nil -} - -func restoreCustomImage(is image.Store, ls layer.Store, rs reference.Store) error { - type graphDriverStore interface { - GraphDriver() graphdriver.Driver - } - - gds, ok := ls.(graphDriverStore) - if !ok { - return nil - } - - driver := gds.GraphDriver() - wd, ok := driver.(*windows.Driver) - if !ok { - return nil - } - - imageInfos, err := wd.GetCustomImageInfos() - if err != nil { - return err - } - - // Convert imageData to valid image configuration - for _, info := range imageInfos { - name := strings.ToLower(info.Name) - - type registrar interface { - RegisterDiffID(graphID string, size int64) (layer.Layer, error) - } - r, ok := ls.(registrar) - if !ok { - return errors.New("Layerstore doesn't support RegisterDiffID") - } - if _, err := r.RegisterDiffID(info.ID, info.Size); err != nil { - return err - } - // layer is intentionally not released - - rootFS := image.NewRootFSWithBaseLayer(filepath.Base(info.Path)) - - // Create history for base layer - config, err := json.Marshal(&image.Image{ - V1Image: image.V1Image{ - DockerVersion: dockerversion.Version, - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - Created: info.CreatedTime, - }, - RootFS: rootFS, - History: []image.History{}, - OSVersion: info.OSVersion, - OSFeatures: info.OSFeatures, - }) - - named, err := reference.ParseNamed(name) - if err != nil { - return err - } - - ref, err := reference.WithTag(named, info.Version) - if err != nil { - return err - } - - id, err := is.Create(config) - if err != nil { - logrus.Warnf("Failed to restore custom image %s with error: %s.", name, err) - logrus.Warnf("Skipping image %s...", name) - continue - } - - if err := rs.AddTag(ref, id, true); err != nil { - return err - } - - logrus.Debugf("Registered base layer %s as %s", ref, id) - } - return nil -} - -func driverOptions(config *Config) []nwconfig.Option { - return []nwconfig.Option{} -} - -func (daemon *Daemon) stats(c *container.Container) (*types.StatsJSON, error) { - return nil, nil -} - -// setDefaultIsolation determine the default isolation mode for the -// daemon to run in. This is only applicable on Windows -func (daemon *Daemon) setDefaultIsolation() error { - daemon.defaultIsolation = containertypes.Isolation("process") - // On client SKUs, default to Hyper-V - if system.IsWindowsClient() { - daemon.defaultIsolation = containertypes.Isolation("hyperv") - } - for _, option := range daemon.configStore.ExecOptions { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return err - } - key = strings.ToLower(key) - switch key { - - case "isolation": - if !containertypes.Isolation(val).IsValid() { - return fmt.Errorf("Invalid exec-opt value for 'isolation':'%s'", val) - } - if containertypes.Isolation(val).IsHyperV() { - daemon.defaultIsolation = containertypes.Isolation("hyperv") - } - if containertypes.Isolation(val).IsProcess() { - if system.IsWindowsClient() { - return fmt.Errorf("Windows client operating systems only support Hyper-V containers") - } - daemon.defaultIsolation = containertypes.Isolation("process") - } - default: - return fmt.Errorf("Unrecognised exec-opt '%s'\n", key) - } - } - - logrus.Infof("Windows default isolation mode: %s", daemon.defaultIsolation) - return nil -} - -func rootFSToAPIType(rootfs *image.RootFS) types.RootFS { - var layers []string - for _, l := range rootfs.DiffIDs { - layers = append(layers, l.String()) - } - return types.RootFS{ - Type: rootfs.Type, - Layers: layers, - BaseLayer: rootfs.BaseLayer, - } -} - -func setupDaemonProcess(config *Config) error { - return nil -} diff --git a/daemon/debugtrap_unix.go b/daemon/debugtrap_unix.go deleted file mode 100644 index c4a11b07fa..0000000000 --- a/daemon/debugtrap_unix.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !windows - -package daemon - -import ( - "os" - "os/signal" - "syscall" - - psignal "github.com/docker/docker/pkg/signal" -) - -func setupDumpStackTrap() { - c := make(chan os.Signal, 1) - signal.Notify(c, syscall.SIGUSR1) - go func() { - for range c { - psignal.DumpStacks() - } - }() -} diff --git a/daemon/debugtrap_unsupported.go b/daemon/debugtrap_unsupported.go deleted file mode 100644 index eed8222f79..0000000000 --- a/daemon/debugtrap_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux,!darwin,!freebsd,!windows,!solaris - -package daemon - -func setupDumpStackTrap() { - return -} diff --git a/daemon/debugtrap_windows.go b/daemon/debugtrap_windows.go deleted file mode 100644 index 0eebc46ed5..0000000000 --- a/daemon/debugtrap_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package daemon - -import ( - "fmt" - "os" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/system" -) - -func setupDumpStackTrap() { - // Windows does not support signals like *nix systems. So instead of - // trapping on SIGUSR1 to dump stacks, we wait on a Win32 event to be - // signaled. - go func() { - sa := syscall.SecurityAttributes{ - Length: 0, - } - ev := "Global\\docker-daemon-" + fmt.Sprint(os.Getpid()) - if h, _ := system.CreateEvent(&sa, false, false, ev); h != 0 { - logrus.Debugf("Stackdump - waiting signal at %s", ev) - for { - syscall.WaitForSingleObject(h, syscall.INFINITE) - signal.DumpStacks() - } - } - }() -} diff --git a/daemon/delete.go b/daemon/delete.go deleted file mode 100644 index ec9d5c5f18..0000000000 --- a/daemon/delete.go +++ /dev/null @@ -1,157 +0,0 @@ -package daemon - -import ( - "fmt" - "os" - "path" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/errors" - "github.com/docker/docker/layer" - volumestore "github.com/docker/docker/volume/store" - "github.com/docker/engine-api/types" -) - -// ContainerRm removes the container id from the filesystem. An error -// is returned if the container is not found, or if the remove -// fails. If the remove succeeds, the container name is released, and -// network links are removed. -func (daemon *Daemon) ContainerRm(name string, config *types.ContainerRmConfig) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - // Container state RemovalInProgress should be used to avoid races. - if inProgress := container.SetRemovalInProgress(); inProgress { - return nil - } - defer container.ResetRemovalInProgress() - - // check if container wasn't deregistered by previous rm since Get - if c := daemon.containers.Get(container.ID); c == nil { - return nil - } - - if config.RemoveLink { - return daemon.rmLink(container, name) - } - - err = daemon.cleanupContainer(container, config.ForceRemove) - if err == nil || config.ForceRemove { - if e := daemon.removeMountPoints(container, config.RemoveVolume); e != nil { - logrus.Error(e) - } - } - - return err -} - -func (daemon *Daemon) rmLink(container *container.Container, name string) error { - if name[0] != '/' { - name = "/" + name - } - parent, n := path.Split(name) - if parent == "/" { - return fmt.Errorf("Conflict, cannot remove the default name of the container") - } - - parent = strings.TrimSuffix(parent, "/") - pe, err := daemon.nameIndex.Get(parent) - if err != nil { - return fmt.Errorf("Cannot get parent %s for name %s", parent, name) - } - - daemon.releaseName(name) - parentContainer, _ := daemon.GetContainer(pe) - if parentContainer != nil { - daemon.linkIndex.unlink(name, container, parentContainer) - if err := daemon.updateNetwork(parentContainer); err != nil { - logrus.Debugf("Could not update network to remove link %s: %v", n, err) - } - } - return nil -} - -// cleanupContainer unregisters a container from the daemon, stops stats -// collection and cleanly removes contents and metadata from the filesystem. -func (daemon *Daemon) cleanupContainer(container *container.Container, forceRemove bool) (err error) { - if container.IsRunning() { - if !forceRemove { - err := fmt.Errorf("You cannot remove a running container %s. Stop the container before attempting removal or use -f", container.ID) - return errors.NewRequestConflictError(err) - } - if err := daemon.Kill(container); err != nil { - return fmt.Errorf("Could not kill running container %s, cannot remove - %v", container.ID, err) - } - } - - // stop collection of stats for the container regardless - // if stats are currently getting collected. - daemon.statsCollector.stopCollection(container) - - if err = daemon.containerStop(container, 3); err != nil { - return err - } - - // Mark container dead. We don't want anybody to be restarting it. - container.SetDead() - - // Save container state to disk. So that if error happens before - // container meta file got removed from disk, then a restart of - // docker should not make a dead container alive. - if err := container.ToDiskLocking(); err != nil && !os.IsNotExist(err) { - logrus.Errorf("Error saving dying container to disk: %v", err) - } - - // If force removal is required, delete container from various - // indexes even if removal failed. - defer func() { - if err == nil || forceRemove { - daemon.nameIndex.Delete(container.ID) - daemon.linkIndex.delete(container) - selinuxFreeLxcContexts(container.ProcessLabel) - daemon.idIndex.Delete(container.ID) - daemon.containers.Delete(container.ID) - daemon.LogContainerEvent(container, "destroy") - } - }() - - if err = os.RemoveAll(container.Root); err != nil { - return fmt.Errorf("Unable to remove filesystem for %v: %v", container.ID, err) - } - - // When container creation fails and `RWLayer` has not been created yet, we - // do not call `ReleaseRWLayer` - if container.RWLayer != nil { - metadata, err := daemon.layerStore.ReleaseRWLayer(container.RWLayer) - layer.LogReleaseMetadata(metadata) - if err != nil && err != layer.ErrMountDoesNotExist { - return fmt.Errorf("Driver %s failed to remove root filesystem %s: %s", daemon.GraphDriverName(), container.ID, err) - } - } - - return nil -} - -// VolumeRm removes the volume with the given name. -// If the volume is referenced by a container it is not removed -// This is called directly from the remote API -func (daemon *Daemon) VolumeRm(name string) error { - v, err := daemon.volumes.Get(name) - if err != nil { - return err - } - - if err := daemon.volumes.Remove(v); err != nil { - if volumestore.IsInUse(err) { - err := fmt.Errorf("Unable to remove volume, volume still in use: %v", err) - return errors.NewRequestConflictError(err) - } - return fmt.Errorf("Error while removing volume %s: %v", name, err) - } - daemon.LogVolumeEvent(v.Name(), "destroy", map[string]string{"driver": v.DriverName()}) - return nil -} diff --git a/daemon/delete_test.go b/daemon/delete_test.go deleted file mode 100644 index 9db83b0685..0000000000 --- a/daemon/delete_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package daemon - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/docker/docker/container" - "github.com/docker/engine-api/types" - containertypes "github.com/docker/engine-api/types/container" -) - -func TestContainerDoubleDelete(t *testing.T) { - tmp, err := ioutil.TempDir("", "docker-daemon-unix-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - daemon := &Daemon{ - repository: tmp, - root: tmp, - } - daemon.containers = container.NewMemoryStore() - - container := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "test", - State: container.NewState(), - Config: &containertypes.Config{}, - }, - } - daemon.containers.Add(container.ID, container) - - // Mark the container as having a delete in progress - container.SetRemovalInProgress() - - // Try to remove the container when its start is removalInProgress. - // It should ignore the container and not return an error. - if err := daemon.ContainerRm(container.ID, &types.ContainerRmConfig{ForceRemove: true}); err != nil { - t.Fatal(err) - } -} diff --git a/daemon/discovery.go b/daemon/discovery.go deleted file mode 100644 index 30d2e02a71..0000000000 --- a/daemon/discovery.go +++ /dev/null @@ -1,203 +0,0 @@ -package daemon - -import ( - "errors" - "fmt" - "reflect" - "strconv" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/discovery" - - // Register the libkv backends for discovery. - _ "github.com/docker/docker/pkg/discovery/kv" -) - -const ( - // defaultDiscoveryHeartbeat is the default value for discovery heartbeat interval. - defaultDiscoveryHeartbeat = 20 * time.Second - // defaultDiscoveryTTLFactor is the default TTL factor for discovery - defaultDiscoveryTTLFactor = 3 -) - -var errDiscoveryDisabled = errors.New("discovery is disabled") - -type discoveryReloader interface { - discovery.Watcher - Stop() - Reload(backend, address string, clusterOpts map[string]string) error - ReadyCh() <-chan struct{} -} - -type daemonDiscoveryReloader struct { - backend discovery.Backend - ticker *time.Ticker - term chan bool - readyCh chan struct{} -} - -func (d *daemonDiscoveryReloader) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - return d.backend.Watch(stopCh) -} - -func (d *daemonDiscoveryReloader) ReadyCh() <-chan struct{} { - return d.readyCh -} - -func discoveryOpts(clusterOpts map[string]string) (time.Duration, time.Duration, error) { - var ( - heartbeat = defaultDiscoveryHeartbeat - ttl = defaultDiscoveryTTLFactor * defaultDiscoveryHeartbeat - ) - - if hb, ok := clusterOpts["discovery.heartbeat"]; ok { - h, err := strconv.Atoi(hb) - if err != nil { - return time.Duration(0), time.Duration(0), err - } - heartbeat = time.Duration(h) * time.Second - ttl = defaultDiscoveryTTLFactor * heartbeat - } - - if tstr, ok := clusterOpts["discovery.ttl"]; ok { - t, err := strconv.Atoi(tstr) - if err != nil { - return time.Duration(0), time.Duration(0), err - } - ttl = time.Duration(t) * time.Second - - if _, ok := clusterOpts["discovery.heartbeat"]; !ok { - h := int(t / defaultDiscoveryTTLFactor) - heartbeat = time.Duration(h) * time.Second - } - - if ttl <= heartbeat { - return time.Duration(0), time.Duration(0), - fmt.Errorf("discovery.ttl timer must be greater than discovery.heartbeat") - } - } - - return heartbeat, ttl, nil -} - -// initDiscovery initializes the nodes discovery subsystem by connecting to the specified backend -// and starts a registration loop to advertise the current node under the specified address. -func initDiscovery(backendAddress, advertiseAddress string, clusterOpts map[string]string) (discoveryReloader, error) { - heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) - if err != nil { - return nil, err - } - - reloader := &daemonDiscoveryReloader{ - backend: backend, - ticker: time.NewTicker(heartbeat), - term: make(chan bool), - readyCh: make(chan struct{}), - } - // We call Register() on the discovery backend in a loop for the whole lifetime of the daemon, - // but we never actually Watch() for nodes appearing and disappearing for the moment. - go reloader.advertiseHeartbeat(advertiseAddress) - return reloader, nil -} - -// advertiseHeartbeat registers the current node against the discovery backend using the specified -// address. The function never returns, as registration against the backend comes with a TTL and -// requires regular heartbeats. -func (d *daemonDiscoveryReloader) advertiseHeartbeat(address string) { - var ready bool - if err := d.initHeartbeat(address); err == nil { - ready = true - close(d.readyCh) - } - - for { - select { - case <-d.ticker.C: - if err := d.backend.Register(address); err != nil { - log.Warnf("Registering as %q in discovery failed: %v", address, err) - } else { - if !ready { - close(d.readyCh) - ready = true - } - } - case <-d.term: - return - } - } -} - -// initHeartbeat is used to do the first heartbeat. It uses a tight loop until -// either the timeout period is reached or the heartbeat is successful and returns. -func (d *daemonDiscoveryReloader) initHeartbeat(address string) error { - // Setup a short ticker until the first heartbeat has succeeded - t := time.NewTicker(500 * time.Millisecond) - defer t.Stop() - // timeout makes sure that after a period of time we stop being so aggressive trying to reach the discovery service - timeout := time.After(60 * time.Second) - - for { - select { - case <-timeout: - return errors.New("timeout waiting for initial discovery") - case <-d.term: - return errors.New("terminated") - case <-t.C: - if err := d.backend.Register(address); err == nil { - return nil - } - } - } -} - -// Reload makes the watcher to stop advertising and reconfigures it to advertise in a new address. -func (d *daemonDiscoveryReloader) Reload(backendAddress, advertiseAddress string, clusterOpts map[string]string) error { - d.Stop() - - heartbeat, backend, err := parseDiscoveryOptions(backendAddress, clusterOpts) - if err != nil { - return err - } - - d.backend = backend - d.ticker = time.NewTicker(heartbeat) - d.readyCh = make(chan struct{}) - - go d.advertiseHeartbeat(advertiseAddress) - return nil -} - -// Stop terminates the discovery advertising. -func (d *daemonDiscoveryReloader) Stop() { - d.ticker.Stop() - d.term <- true -} - -func parseDiscoveryOptions(backendAddress string, clusterOpts map[string]string) (time.Duration, discovery.Backend, error) { - heartbeat, ttl, err := discoveryOpts(clusterOpts) - if err != nil { - return 0, nil, err - } - - backend, err := discovery.New(backendAddress, heartbeat, ttl, clusterOpts) - if err != nil { - return 0, nil, err - } - return heartbeat, backend, nil -} - -// modifiedDiscoverySettings returns whether the discovery configuration has been modified or not. -func modifiedDiscoverySettings(config *Config, backendType, advertise string, clusterOpts map[string]string) bool { - if config.ClusterStore != backendType || config.ClusterAdvertise != advertise { - return true - } - - if (config.ClusterOpts == nil && clusterOpts == nil) || - (config.ClusterOpts == nil && len(clusterOpts) == 0) || - (len(config.ClusterOpts) == 0 && clusterOpts == nil) { - return false - } - - return !reflect.DeepEqual(config.ClusterOpts, clusterOpts) -} diff --git a/daemon/discovery_test.go b/daemon/discovery_test.go deleted file mode 100644 index 1764af1e9f..0000000000 --- a/daemon/discovery_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package daemon - -import ( - "testing" - "time" -) - -func TestDiscoveryOpts(t *testing.T) { - clusterOpts := map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "5"} - heartbeat, ttl, err := discoveryOpts(clusterOpts) - if err == nil { - t.Fatalf("discovery.ttl < discovery.heartbeat must fail") - } - - clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "10"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err == nil { - t.Fatalf("discovery.ttl == discovery.heartbeat must fail") - } - - clusterOpts = map[string]string{"discovery.heartbeat": "invalid"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err == nil { - t.Fatalf("invalid discovery.heartbeat must fail") - } - - clusterOpts = map[string]string{"discovery.ttl": "invalid"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err == nil { - t.Fatalf("invalid discovery.ttl must fail") - } - - clusterOpts = map[string]string{"discovery.heartbeat": "10", "discovery.ttl": "20"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err != nil { - t.Fatal(err) - } - - if heartbeat != 10*time.Second { - t.Fatalf("Heatbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) - } - - if ttl != 20*time.Second { - t.Fatalf("TTL - Expected : %v, Actual : %v", 20*time.Second, ttl) - } - - clusterOpts = map[string]string{"discovery.heartbeat": "10"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err != nil { - t.Fatal(err) - } - - if heartbeat != 10*time.Second { - t.Fatalf("Heatbeat - Expected : %v, Actual : %v", 10*time.Second, heartbeat) - } - - expected := 10 * defaultDiscoveryTTLFactor * time.Second - if ttl != expected { - t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) - } - - clusterOpts = map[string]string{"discovery.ttl": "30"} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err != nil { - t.Fatal(err) - } - - if ttl != 30*time.Second { - t.Fatalf("TTL - Expected : %v, Actual : %v", 30*time.Second, ttl) - } - - expected = 30 * time.Second / defaultDiscoveryTTLFactor - if heartbeat != expected { - t.Fatalf("Heatbeat - Expected : %v, Actual : %v", expected, heartbeat) - } - - clusterOpts = map[string]string{} - heartbeat, ttl, err = discoveryOpts(clusterOpts) - if err != nil { - t.Fatal(err) - } - - if heartbeat != defaultDiscoveryHeartbeat { - t.Fatalf("Heatbeat - Expected : %v, Actual : %v", defaultDiscoveryHeartbeat, heartbeat) - } - - expected = defaultDiscoveryHeartbeat * defaultDiscoveryTTLFactor - if ttl != expected { - t.Fatalf("TTL - Expected : %v, Actual : %v", expected, ttl) - } -} - -func TestModifiedDiscoverySettings(t *testing.T) { - cases := []struct { - current *Config - modified *Config - expected bool - }{ - { - current: discoveryConfig("foo", "bar", map[string]string{}), - modified: discoveryConfig("foo", "bar", map[string]string{}), - expected: false, - }, - { - current: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), - modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), - expected: false, - }, - { - current: discoveryConfig("foo", "bar", map[string]string{}), - modified: discoveryConfig("foo", "bar", nil), - expected: false, - }, - { - current: discoveryConfig("foo", "bar", nil), - modified: discoveryConfig("foo", "bar", map[string]string{}), - expected: false, - }, - { - current: discoveryConfig("foo", "bar", nil), - modified: discoveryConfig("baz", "bar", nil), - expected: true, - }, - { - current: discoveryConfig("foo", "bar", nil), - modified: discoveryConfig("foo", "baz", nil), - expected: true, - }, - { - current: discoveryConfig("foo", "bar", nil), - modified: discoveryConfig("foo", "bar", map[string]string{"foo": "bar"}), - expected: true, - }, - } - - for _, c := range cases { - got := modifiedDiscoverySettings(c.current, c.modified.ClusterStore, c.modified.ClusterAdvertise, c.modified.ClusterOpts) - if c.expected != got { - t.Fatalf("expected %v, got %v: current config %v, new config %v", c.expected, got, c.current, c.modified) - } - } -} - -func discoveryConfig(backendAddr, advertiseAddr string, opts map[string]string) *Config { - return &Config{ - CommonConfig: CommonConfig{ - ClusterStore: backendAddr, - ClusterAdvertise: advertiseAddr, - ClusterOpts: opts, - }, - } -} diff --git a/daemon/errors.go b/daemon/errors.go deleted file mode 100644 index 131c9a1e22..0000000000 --- a/daemon/errors.go +++ /dev/null @@ -1,57 +0,0 @@ -package daemon - -import ( - "fmt" - "strings" - - "github.com/docker/docker/errors" - "github.com/docker/docker/reference" -) - -func (d *Daemon) imageNotExistToErrcode(err error) error { - if dne, isDNE := err.(ErrImageDoesNotExist); isDNE { - if strings.Contains(dne.RefOrID, "@") { - e := fmt.Errorf("No such image: %s", dne.RefOrID) - return errors.NewRequestNotFoundError(e) - } - tag := reference.DefaultTag - ref, err := reference.ParseNamed(dne.RefOrID) - if err != nil { - e := fmt.Errorf("No such image: %s:%s", dne.RefOrID, tag) - return errors.NewRequestNotFoundError(e) - } - if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - tag = tagged.Tag() - } - e := fmt.Errorf("No such image: %s:%s", ref.Name(), tag) - return errors.NewRequestNotFoundError(e) - } - return err -} - -type errNotRunning struct { - containerID string -} - -func (e errNotRunning) Error() string { - return fmt.Sprintf("Container %s is not running", e.containerID) -} - -func (e errNotRunning) ContainerIsRunning() bool { - return false -} - -func errContainerIsRestarting(containerID string) error { - err := fmt.Errorf("Container %s is restarting, wait until the container is running", containerID) - return errors.NewRequestConflictError(err) -} - -func errExecNotFound(id string) error { - err := fmt.Errorf("No such exec instance '%s' found in daemon", id) - return errors.NewRequestNotFoundError(err) -} - -func errExecPaused(id string) error { - err := fmt.Errorf("Container %s is paused, unpause the container before exec", id) - return errors.NewRequestConflictError(err) -} diff --git a/daemon/events.go b/daemon/events.go deleted file mode 100644 index e01015777e..0000000000 --- a/daemon/events.go +++ /dev/null @@ -1,132 +0,0 @@ -package daemon - -import ( - "strings" - "time" - - "github.com/docker/docker/container" - daemonevents "github.com/docker/docker/daemon/events" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" - "github.com/docker/libnetwork" -) - -// LogContainerEvent generates an event related to a container with only the default attributes. -func (daemon *Daemon) LogContainerEvent(container *container.Container, action string) { - daemon.LogContainerEventWithAttributes(container, action, map[string]string{}) -} - -// LogContainerEventWithAttributes generates an event related to a container with specific given attributes. -func (daemon *Daemon) LogContainerEventWithAttributes(container *container.Container, action string, attributes map[string]string) { - copyAttributes(attributes, container.Config.Labels) - if container.Config.Image != "" { - attributes["image"] = container.Config.Image - } - attributes["name"] = strings.TrimLeft(container.Name, "/") - - actor := events.Actor{ - ID: container.ID, - Attributes: attributes, - } - daemon.EventsService.Log(action, events.ContainerEventType, actor) -} - -// LogImageEvent generates an event related to an image with only the default attributes. -func (daemon *Daemon) LogImageEvent(imageID, refName, action string) { - daemon.LogImageEventWithAttributes(imageID, refName, action, map[string]string{}) -} - -// LogImageEventWithAttributes generates an event related to an image with specific given attributes. -func (daemon *Daemon) LogImageEventWithAttributes(imageID, refName, action string, attributes map[string]string) { - img, err := daemon.GetImage(imageID) - if err == nil && img.Config != nil { - // image has not been removed yet. - // it could be missing if the event is `delete`. - copyAttributes(attributes, img.Config.Labels) - } - if refName != "" { - attributes["name"] = refName - } - actor := events.Actor{ - ID: imageID, - Attributes: attributes, - } - - daemon.EventsService.Log(action, events.ImageEventType, actor) -} - -// LogPluginEvent generates an event related to a plugin with only the default attributes. -func (daemon *Daemon) LogPluginEvent(pluginID, refName, action string) { - daemon.LogPluginEventWithAttributes(pluginID, refName, action, map[string]string{}) -} - -// LogPluginEventWithAttributes generates an event related to a plugin with specific given attributes. -func (daemon *Daemon) LogPluginEventWithAttributes(pluginID, refName, action string, attributes map[string]string) { - attributes["name"] = refName - actor := events.Actor{ - ID: pluginID, - Attributes: attributes, - } - daemon.EventsService.Log(action, events.PluginEventType, actor) -} - -// LogVolumeEvent generates an event related to a volume. -func (daemon *Daemon) LogVolumeEvent(volumeID, action string, attributes map[string]string) { - actor := events.Actor{ - ID: volumeID, - Attributes: attributes, - } - daemon.EventsService.Log(action, events.VolumeEventType, actor) -} - -// LogNetworkEvent generates an event related to a network with only the default attributes. -func (daemon *Daemon) LogNetworkEvent(nw libnetwork.Network, action string) { - daemon.LogNetworkEventWithAttributes(nw, action, map[string]string{}) -} - -// LogNetworkEventWithAttributes generates an event related to a network with specific given attributes. -func (daemon *Daemon) LogNetworkEventWithAttributes(nw libnetwork.Network, action string, attributes map[string]string) { - attributes["name"] = nw.Name() - attributes["type"] = nw.Type() - actor := events.Actor{ - ID: nw.ID(), - Attributes: attributes, - } - daemon.EventsService.Log(action, events.NetworkEventType, actor) -} - -// LogDaemonEventWithAttributes generates an event related to the daemon itself with specific given attributes. -func (daemon *Daemon) LogDaemonEventWithAttributes(action string, attributes map[string]string) { - if daemon.EventsService != nil { - if info, err := daemon.SystemInfo(); err == nil && info.Name != "" { - attributes["name"] = info.Name - } - actor := events.Actor{ - ID: daemon.ID, - Attributes: attributes, - } - daemon.EventsService.Log(action, events.DaemonEventType, actor) - } -} - -// SubscribeToEvents returns the currently record of events, a channel to stream new events from, and a function to cancel the stream of events. -func (daemon *Daemon) SubscribeToEvents(since, until time.Time, filter filters.Args) ([]events.Message, chan interface{}) { - ef := daemonevents.NewFilter(filter) - return daemon.EventsService.SubscribeTopic(since, until, ef) -} - -// UnsubscribeFromEvents stops the event subscription for a client by closing the -// channel where the daemon sends events to. -func (daemon *Daemon) UnsubscribeFromEvents(listener chan interface{}) { - daemon.EventsService.Evict(listener) -} - -// copyAttributes guarantees that labels are not mutated by event triggers. -func copyAttributes(attributes, labels map[string]string) { - if labels == nil { - return - } - for k, v := range labels { - attributes[k] = v - } -} diff --git a/daemon/events/events.go b/daemon/events/events.go deleted file mode 100644 index df2181fb09..0000000000 --- a/daemon/events/events.go +++ /dev/null @@ -1,154 +0,0 @@ -package events - -import ( - "sync" - "time" - - "github.com/docker/docker/pkg/pubsub" - eventtypes "github.com/docker/engine-api/types/events" -) - -const ( - eventsLimit = 64 - bufferSize = 1024 -) - -// Events is pubsub channel for events generated by the engine. -type Events struct { - mu sync.Mutex - events []eventtypes.Message - pub *pubsub.Publisher -} - -// New returns new *Events instance -func New() *Events { - return &Events{ - events: make([]eventtypes.Message, 0, eventsLimit), - pub: pubsub.NewPublisher(100*time.Millisecond, bufferSize), - } -} - -// Subscribe adds new listener to events, returns slice of 64 stored -// last events, a channel in which you can expect new events (in form -// of interface{}, so you need type assertion), and a function to call -// to stop the stream of events. -func (e *Events) Subscribe() ([]eventtypes.Message, chan interface{}, func()) { - e.mu.Lock() - current := make([]eventtypes.Message, len(e.events)) - copy(current, e.events) - l := e.pub.Subscribe() - e.mu.Unlock() - - cancel := func() { - e.Evict(l) - } - return current, l, cancel -} - -// SubscribeTopic adds new listener to events, returns slice of 64 stored -// last events, a channel in which you can expect new events (in form -// of interface{}, so you need type assertion). -func (e *Events) SubscribeTopic(since, until time.Time, ef *Filter) ([]eventtypes.Message, chan interface{}) { - e.mu.Lock() - - var topic func(m interface{}) bool - if ef != nil && ef.filter.Len() > 0 { - topic = func(m interface{}) bool { return ef.Include(m.(eventtypes.Message)) } - } - - buffered := e.loadBufferedEvents(since, until, topic) - - var ch chan interface{} - if topic != nil { - ch = e.pub.SubscribeTopic(topic) - } else { - // Subscribe to all events if there are no filters - ch = e.pub.Subscribe() - } - - e.mu.Unlock() - return buffered, ch -} - -// Evict evicts listener from pubsub -func (e *Events) Evict(l chan interface{}) { - e.pub.Evict(l) -} - -// Log broadcasts event to listeners. Each listener has 100 millisecond for -// receiving event or it will be skipped. -func (e *Events) Log(action, eventType string, actor eventtypes.Actor) { - now := time.Now().UTC() - jm := eventtypes.Message{ - Action: action, - Type: eventType, - Actor: actor, - Time: now.Unix(), - TimeNano: now.UnixNano(), - } - - // fill deprecated fields for container and images - switch eventType { - case eventtypes.ContainerEventType: - jm.ID = actor.ID - jm.Status = action - jm.From = actor.Attributes["image"] - case eventtypes.ImageEventType: - jm.ID = actor.ID - jm.Status = action - } - - e.mu.Lock() - if len(e.events) == cap(e.events) { - // discard oldest event - copy(e.events, e.events[1:]) - e.events[len(e.events)-1] = jm - } else { - e.events = append(e.events, jm) - } - e.mu.Unlock() - e.pub.Publish(jm) -} - -// SubscribersCount returns number of event listeners -func (e *Events) SubscribersCount() int { - return e.pub.Len() -} - -// loadBufferedEvents iterates over the cached events in the buffer -// and returns those that were emitted between two specific dates. -// It uses `time.Unix(seconds, nanoseconds)` to generate valid dates with those arguments. -// It filters those buffered messages with a topic function if it's not nil, otherwise it adds all messages. -func (e *Events) loadBufferedEvents(since, until time.Time, topic func(interface{}) bool) []eventtypes.Message { - var buffered []eventtypes.Message - if since.IsZero() && until.IsZero() { - return buffered - } - - var sinceNanoUnix int64 - if !since.IsZero() { - sinceNanoUnix = since.UnixNano() - } - - var untilNanoUnix int64 - if !until.IsZero() { - untilNanoUnix = until.UnixNano() - } - - for i := len(e.events) - 1; i >= 0; i-- { - ev := e.events[i] - - if ev.TimeNano < sinceNanoUnix { - break - } - - if untilNanoUnix > 0 && ev.TimeNano > untilNanoUnix { - continue - } - - if topic == nil || topic(ev) { - buffered = append([]eventtypes.Message{ev}, buffered...) - } - } - return buffered -} diff --git a/daemon/events/events_test.go b/daemon/events/events_test.go deleted file mode 100644 index 0c8ee6b920..0000000000 --- a/daemon/events/events_test.go +++ /dev/null @@ -1,275 +0,0 @@ -package events - -import ( - "fmt" - "testing" - "time" - - "github.com/docker/docker/daemon/events/testutils" - "github.com/docker/engine-api/types/events" - timetypes "github.com/docker/engine-api/types/time" -) - -func TestEventsLog(t *testing.T) { - e := New() - _, l1, _ := e.Subscribe() - _, l2, _ := e.Subscribe() - defer e.Evict(l1) - defer e.Evict(l2) - count := e.SubscribersCount() - if count != 2 { - t.Fatalf("Must be 2 subscribers, got %d", count) - } - actor := events.Actor{ - ID: "cont", - Attributes: map[string]string{"image": "image"}, - } - e.Log("test", events.ContainerEventType, actor) - select { - case msg := <-l1: - jmsg, ok := msg.(events.Message) - if !ok { - t.Fatalf("Unexpected type %T", msg) - } - if len(e.events) != 1 { - t.Fatalf("Must be only one event, got %d", len(e.events)) - } - if jmsg.Status != "test" { - t.Fatalf("Status should be test, got %s", jmsg.Status) - } - if jmsg.ID != "cont" { - t.Fatalf("ID should be cont, got %s", jmsg.ID) - } - if jmsg.From != "image" { - t.Fatalf("From should be image, got %s", jmsg.From) - } - case <-time.After(1 * time.Second): - t.Fatal("Timeout waiting for broadcasted message") - } - select { - case msg := <-l2: - jmsg, ok := msg.(events.Message) - if !ok { - t.Fatalf("Unexpected type %T", msg) - } - if len(e.events) != 1 { - t.Fatalf("Must be only one event, got %d", len(e.events)) - } - if jmsg.Status != "test" { - t.Fatalf("Status should be test, got %s", jmsg.Status) - } - if jmsg.ID != "cont" { - t.Fatalf("ID should be cont, got %s", jmsg.ID) - } - if jmsg.From != "image" { - t.Fatalf("From should be image, got %s", jmsg.From) - } - case <-time.After(1 * time.Second): - t.Fatal("Timeout waiting for broadcasted message") - } -} - -func TestEventsLogTimeout(t *testing.T) { - e := New() - _, l, _ := e.Subscribe() - defer e.Evict(l) - - c := make(chan struct{}) - go func() { - actor := events.Actor{ - ID: "image", - } - e.Log("test", events.ImageEventType, actor) - close(c) - }() - - select { - case <-c: - case <-time.After(time.Second): - t.Fatal("Timeout publishing message") - } -} - -func TestLogEvents(t *testing.T) { - e := New() - - for i := 0; i < eventsLimit+16; i++ { - action := fmt.Sprintf("action_%d", i) - id := fmt.Sprintf("cont_%d", i) - from := fmt.Sprintf("image_%d", i) - - actor := events.Actor{ - ID: id, - Attributes: map[string]string{"image": from}, - } - e.Log(action, events.ContainerEventType, actor) - } - time.Sleep(50 * time.Millisecond) - current, l, _ := e.Subscribe() - for i := 0; i < 10; i++ { - num := i + eventsLimit + 16 - action := fmt.Sprintf("action_%d", num) - id := fmt.Sprintf("cont_%d", num) - from := fmt.Sprintf("image_%d", num) - - actor := events.Actor{ - ID: id, - Attributes: map[string]string{"image": from}, - } - e.Log(action, events.ContainerEventType, actor) - } - if len(e.events) != eventsLimit { - t.Fatalf("Must be %d events, got %d", eventsLimit, len(e.events)) - } - - var msgs []events.Message - for len(msgs) < 10 { - m := <-l - jm, ok := (m).(events.Message) - if !ok { - t.Fatalf("Unexpected type %T", m) - } - msgs = append(msgs, jm) - } - if len(current) != eventsLimit { - t.Fatalf("Must be %d events, got %d", eventsLimit, len(current)) - } - first := current[0] - if first.Status != "action_16" { - t.Fatalf("First action is %s, must be action_16", first.Status) - } - last := current[len(current)-1] - if last.Status != "action_79" { - t.Fatalf("Last action is %s, must be action_79", last.Status) - } - - firstC := msgs[0] - if firstC.Status != "action_80" { - t.Fatalf("First action is %s, must be action_80", firstC.Status) - } - lastC := msgs[len(msgs)-1] - if lastC.Status != "action_89" { - t.Fatalf("Last action is %s, must be action_89", lastC.Status) - } -} - -// https://github.com/docker/docker/issues/20999 -// Fixtures: -// -//2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover) -//2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge) -//2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover) -func TestLoadBufferedEvents(t *testing.T) { - now := time.Now() - f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now) - if err != nil { - t.Fatal(err) - } - s, sNano, err := timetypes.ParseTimestamps(f, -1) - if err != nil { - t.Fatal(err) - } - - m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") - if err != nil { - t.Fatal(err) - } - m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") - if err != nil { - t.Fatal(err) - } - m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") - if err != nil { - t.Fatal(err) - } - - events := &Events{ - events: []events.Message{*m1, *m2, *m3}, - } - - since := time.Unix(s, sNano) - until := time.Time{} - - out := events.loadBufferedEvents(since, until, nil) - if len(out) != 1 { - t.Fatalf("expected 1 message, got %d: %v", len(out), out) - } -} - -func TestLoadBufferedEventsOnlyFromPast(t *testing.T) { - now := time.Now() - f, err := timetypes.GetTimestamp("2016-03-07T17:28:03.090000000+02:00", now) - if err != nil { - t.Fatal(err) - } - s, sNano, err := timetypes.ParseTimestamps(f, 0) - if err != nil { - t.Fatal(err) - } - - f, err = timetypes.GetTimestamp("2016-03-07T17:28:03.100000000+02:00", now) - if err != nil { - t.Fatal(err) - } - u, uNano, err := timetypes.ParseTimestamps(f, 0) - if err != nil { - t.Fatal(err) - } - - m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") - if err != nil { - t.Fatal(err) - } - m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") - if err != nil { - t.Fatal(err) - } - m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") - if err != nil { - t.Fatal(err) - } - - events := &Events{ - events: []events.Message{*m1, *m2, *m3}, - } - - since := time.Unix(s, sNano) - until := time.Unix(u, uNano) - - out := events.loadBufferedEvents(since, until, nil) - if len(out) != 1 { - t.Fatalf("expected 1 message, got %d: %v", len(out), out) - } - - if out[0].Type != "network" { - t.Fatalf("expected network event, got %s", out[0].Type) - } -} - -// #13753 -func TestIngoreBufferedWhenNoTimes(t *testing.T) { - m1, err := eventstestutils.Scan("2016-03-07T17:28:03.022433271+02:00 container die 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") - if err != nil { - t.Fatal(err) - } - m2, err := eventstestutils.Scan("2016-03-07T17:28:03.091719377+02:00 network disconnect 19c5ed41acb798f26b751e0035cd7821741ab79e2bbd59a66b5fd8abf954eaa0 (type=bridge, container=0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079, name=bridge)") - if err != nil { - t.Fatal(err) - } - m3, err := eventstestutils.Scan("2016-03-07T17:28:03.129014751+02:00 container destroy 0b863f2a26c18557fc6cdadda007c459f9ec81b874780808138aea78a3595079 (image=ubuntu, name=small_hoover)") - if err != nil { - t.Fatal(err) - } - - events := &Events{ - events: []events.Message{*m1, *m2, *m3}, - } - - since := time.Time{} - until := time.Time{} - - out := events.loadBufferedEvents(since, until, nil) - if len(out) != 0 { - t.Fatalf("expected 0 buffered events, got %q", out) - } -} diff --git a/daemon/events/filter.go b/daemon/events/filter.go deleted file mode 100644 index 525431c915..0000000000 --- a/daemon/events/filter.go +++ /dev/null @@ -1,92 +0,0 @@ -package events - -import ( - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types/events" - "github.com/docker/engine-api/types/filters" -) - -// Filter can filter out docker events from a stream -type Filter struct { - filter filters.Args -} - -// NewFilter creates a new Filter -func NewFilter(filter filters.Args) *Filter { - return &Filter{filter: filter} -} - -// Include returns true when the event ev is included by the filters -func (ef *Filter) Include(ev events.Message) bool { - return ef.filter.ExactMatch("event", ev.Action) && - ef.filter.ExactMatch("type", ev.Type) && - ef.matchDaemon(ev) && - ef.matchContainer(ev) && - ef.matchPlugin(ev) && - ef.matchVolume(ev) && - ef.matchNetwork(ev) && - ef.matchImage(ev) && - ef.matchLabels(ev.Actor.Attributes) -} - -func (ef *Filter) matchLabels(attributes map[string]string) bool { - if !ef.filter.Include("label") { - return true - } - return ef.filter.MatchKVList("label", attributes) -} - -func (ef *Filter) matchDaemon(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.DaemonEventType) -} - -func (ef *Filter) matchContainer(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.ContainerEventType) -} - -func (ef *Filter) matchPlugin(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.PluginEventType) -} - -func (ef *Filter) matchVolume(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.VolumeEventType) -} - -func (ef *Filter) matchNetwork(ev events.Message) bool { - return ef.fuzzyMatchName(ev, events.NetworkEventType) -} - -func (ef *Filter) fuzzyMatchName(ev events.Message, eventType string) bool { - return ef.filter.FuzzyMatch(eventType, ev.Actor.ID) || - ef.filter.FuzzyMatch(eventType, ev.Actor.Attributes["name"]) -} - -// matchImage matches against both event.Actor.ID (for image events) -// and event.Actor.Attributes["image"] (for container events), so that any container that was created -// from an image will be included in the image events. Also compare both -// against the stripped repo name without any tags. -func (ef *Filter) matchImage(ev events.Message) bool { - id := ev.Actor.ID - nameAttr := "image" - var imageName string - - if ev.Type == events.ImageEventType { - nameAttr = "name" - } - - if n, ok := ev.Actor.Attributes[nameAttr]; ok { - imageName = n - } - return ef.filter.ExactMatch("image", id) || - ef.filter.ExactMatch("image", imageName) || - ef.filter.ExactMatch("image", stripTag(id)) || - ef.filter.ExactMatch("image", stripTag(imageName)) -} - -func stripTag(image string) string { - ref, err := reference.ParseNamed(image) - if err != nil { - return image - } - return ref.Name() -} diff --git a/daemon/events/testutils/testutils.go b/daemon/events/testutils/testutils.go deleted file mode 100644 index c84418a9e7..0000000000 --- a/daemon/events/testutils/testutils.go +++ /dev/null @@ -1,76 +0,0 @@ -package eventstestutils - -import ( - "fmt" - "regexp" - "strings" - "time" - - "github.com/docker/engine-api/types/events" - timetypes "github.com/docker/engine-api/types/time" -) - -var ( - reTimestamp = `(?P\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}.\d{9}(:?(:?(:?-|\+)\d{2}:\d{2})|Z))` - reEventType = `(?P\w+)` - reAction = `(?P\w+)` - reID = `(?P[^\s]+)` - reAttributes = `(\s\((?P[^\)]+)\))?` - reString = fmt.Sprintf(`\A%s\s%s\s%s\s%s%s\z`, reTimestamp, reEventType, reAction, reID, reAttributes) - - // eventCliRegexp is a regular expression that matches all possible event outputs in the cli - eventCliRegexp = regexp.MustCompile(reString) -) - -// ScanMap turns an event string like the default ones formatted in the cli output -// and turns it into map. -func ScanMap(text string) map[string]string { - matches := eventCliRegexp.FindAllStringSubmatch(text, -1) - md := map[string]string{} - if len(matches) == 0 { - return md - } - - names := eventCliRegexp.SubexpNames() - for i, n := range matches[0] { - md[names[i]] = n - } - return md -} - -// Scan turns an event string like the default ones formatted in the cli output -// and turns it into an event message. -func Scan(text string) (*events.Message, error) { - md := ScanMap(text) - if len(md) == 0 { - return nil, fmt.Errorf("text is not an event: %s", text) - } - - f, err := timetypes.GetTimestamp(md["timestamp"], time.Now()) - if err != nil { - return nil, err - } - - t, tn, err := timetypes.ParseTimestamps(f, -1) - if err != nil { - return nil, err - } - - attrs := make(map[string]string) - for _, a := range strings.SplitN(md["attributes"], ", ", -1) { - kv := strings.SplitN(a, "=", 2) - attrs[kv[0]] = kv[1] - } - - tu := time.Unix(t, tn) - return &events.Message{ - Time: t, - TimeNano: tu.UnixNano(), - Type: md["eventType"], - Action: md["action"], - Actor: events.Actor{ - ID: md["id"], - Attributes: attrs, - }, - }, nil -} diff --git a/daemon/events_test.go b/daemon/events_test.go deleted file mode 100644 index 8ee14a3144..0000000000 --- a/daemon/events_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package daemon - -import ( - "testing" - "time" - - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/events" - containertypes "github.com/docker/engine-api/types/container" - eventtypes "github.com/docker/engine-api/types/events" -) - -func TestLogContainerEventCopyLabels(t *testing.T) { - e := events.New() - _, l, _ := e.Subscribe() - defer e.Evict(l) - - container := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "container_id", - Name: "container_name", - Config: &containertypes.Config{ - Image: "image_name", - Labels: map[string]string{ - "node": "1", - "os": "alpine", - }, - }, - }, - } - daemon := &Daemon{ - EventsService: e, - } - daemon.LogContainerEvent(container, "create") - - if _, mutated := container.Config.Labels["image"]; mutated { - t.Fatalf("Expected to not mutate the container labels, got %q", container.Config.Labels) - } - - validateTestAttributes(t, l, map[string]string{ - "node": "1", - "os": "alpine", - }) -} - -func TestLogContainerEventWithAttributes(t *testing.T) { - e := events.New() - _, l, _ := e.Subscribe() - defer e.Evict(l) - - container := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "container_id", - Name: "container_name", - Config: &containertypes.Config{ - Labels: map[string]string{ - "node": "1", - "os": "alpine", - }, - }, - }, - } - daemon := &Daemon{ - EventsService: e, - } - attributes := map[string]string{ - "node": "2", - "foo": "bar", - } - daemon.LogContainerEventWithAttributes(container, "create", attributes) - - validateTestAttributes(t, l, map[string]string{ - "node": "1", - "foo": "bar", - }) -} - -func validateTestAttributes(t *testing.T, l chan interface{}, expectedAttributesToTest map[string]string) { - select { - case ev := <-l: - event, ok := ev.(eventtypes.Message) - if !ok { - t.Fatalf("Unexpected event message: %q", ev) - } - for key, expected := range expectedAttributesToTest { - actual, ok := event.Actor.Attributes[key] - if !ok || actual != expected { - t.Fatalf("Expected value for key %s to be %s, but was %s (event:%v)", key, expected, actual, event) - } - } - case <-time.After(10 * time.Second): - t.Fatalf("LogEvent test timed out") - } -} diff --git a/daemon/exec.go b/daemon/exec.go deleted file mode 100644 index d57b6875d8..0000000000 --- a/daemon/exec.go +++ /dev/null @@ -1,268 +0,0 @@ -package daemon - -import ( - "fmt" - "io" - "strings" - "time" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/errors" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/signal" - "github.com/docker/docker/pkg/term" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/strslice" -) - -// Seconds to wait after sending TERM before trying KILL -const termProcessTimeout = 10 - -func (d *Daemon) registerExecCommand(container *container.Container, config *exec.Config) { - // Storing execs in container in order to kill them gracefully whenever the container is stopped or removed. - container.ExecCommands.Add(config.ID, config) - // Storing execs in daemon for easy access via remote API. - d.execCommands.Add(config.ID, config) -} - -// ExecExists looks up the exec instance and returns a bool if it exists or not. -// It will also return the error produced by `getConfig` -func (d *Daemon) ExecExists(name string) (bool, error) { - if _, err := d.getExecConfig(name); err != nil { - return false, err - } - return true, nil -} - -// getExecConfig looks up the exec instance by name. If the container associated -// with the exec instance is stopped or paused, it will return an error. -func (d *Daemon) getExecConfig(name string) (*exec.Config, error) { - ec := d.execCommands.Get(name) - - // If the exec is found but its container is not in the daemon's list of - // containers then it must have been deleted, in which case instead of - // saying the container isn't running, we should return a 404 so that - // the user sees the same error now that they will after the - // 5 minute clean-up loop is run which erases old/dead execs. - - if ec != nil { - if container := d.containers.Get(ec.ContainerID); container != nil { - if !container.IsRunning() { - return nil, fmt.Errorf("Container %s is not running: %s", container.ID, container.State.String()) - } - if container.IsPaused() { - return nil, errExecPaused(container.ID) - } - if container.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) - } - return ec, nil - } - } - - return nil, errExecNotFound(name) -} - -func (d *Daemon) unregisterExecCommand(container *container.Container, execConfig *exec.Config) { - container.ExecCommands.Delete(execConfig.ID) - d.execCommands.Delete(execConfig.ID) -} - -func (d *Daemon) getActiveContainer(name string) (*container.Container, error) { - container, err := d.GetContainer(name) - if err != nil { - return nil, err - } - - if !container.IsRunning() { - return nil, errNotRunning{container.ID} - } - if container.IsPaused() { - return nil, errExecPaused(name) - } - if container.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) - } - return container, nil -} - -// ContainerExecCreate sets up an exec in a running container. -func (d *Daemon) ContainerExecCreate(name string, config *types.ExecConfig) (string, error) { - container, err := d.getActiveContainer(name) - if err != nil { - return "", err - } - - cmd := strslice.StrSlice(config.Cmd) - entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmd) - - keys := []byte{} - if config.DetachKeys != "" { - keys, err = term.ToBytes(config.DetachKeys) - if err != nil { - err = fmt.Errorf("Invalid escape keys (%s) provided", config.DetachKeys) - return "", err - } - } - - execConfig := exec.NewConfig() - execConfig.OpenStdin = config.AttachStdin - execConfig.OpenStdout = config.AttachStdout - execConfig.OpenStderr = config.AttachStderr - execConfig.ContainerID = container.ID - execConfig.DetachKeys = keys - execConfig.Entrypoint = entrypoint - execConfig.Args = args - execConfig.Tty = config.Tty - execConfig.Privileged = config.Privileged - execConfig.User = config.User - if len(execConfig.User) == 0 { - execConfig.User = container.Config.User - } - - d.registerExecCommand(container, execConfig) - - d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) - - return execConfig.ID, nil -} - -// ContainerExecStart starts a previously set up exec instance. The -// std streams are set up. -// If ctx is cancelled, the process is terminated. -func (d *Daemon) ContainerExecStart(ctx context.Context, name string, stdin io.ReadCloser, stdout io.Writer, stderr io.Writer) (err error) { - var ( - cStdin io.ReadCloser - cStdout, cStderr io.Writer - ) - - ec, err := d.getExecConfig(name) - if err != nil { - return errExecNotFound(name) - } - - ec.Lock() - if ec.ExitCode != nil { - ec.Unlock() - err := fmt.Errorf("Error: Exec command %s has already run", ec.ID) - return errors.NewRequestConflictError(err) - } - - if ec.Running { - ec.Unlock() - return fmt.Errorf("Error: Exec command %s is already running", ec.ID) - } - ec.Running = true - defer func() { - if err != nil { - ec.Running = false - exitCode := 126 - ec.ExitCode = &exitCode - } - }() - ec.Unlock() - - c := d.containers.Get(ec.ContainerID) - logrus.Debugf("starting exec command %s in container %s", ec.ID, c.ID) - d.LogContainerEvent(c, "exec_start: "+ec.Entrypoint+" "+strings.Join(ec.Args, " ")) - - if ec.OpenStdin && stdin != nil { - r, w := io.Pipe() - go func() { - defer w.Close() - defer logrus.Debug("Closing buffered stdin pipe") - pools.Copy(w, stdin) - }() - cStdin = r - } - if ec.OpenStdout { - cStdout = stdout - } - if ec.OpenStderr { - cStderr = stderr - } - - if ec.OpenStdin { - ec.NewInputPipes() - } else { - ec.NewNopInputPipe() - } - - p := libcontainerd.Process{ - Args: append([]string{ec.Entrypoint}, ec.Args...), - Terminal: ec.Tty, - } - - if err := execSetPlatformOpt(c, ec, &p); err != nil { - return err - } - - attachErr := container.AttachStreams(ctx, ec.StreamConfig, ec.OpenStdin, true, ec.Tty, cStdin, cStdout, cStderr, ec.DetachKeys) - - if err := d.containerd.AddProcess(ctx, c.ID, name, p); err != nil { - return err - } - - select { - case <-ctx.Done(): - logrus.Debugf("Sending TERM signal to process %v in container %v", name, c.ID) - d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["TERM"])) - select { - case <-time.After(termProcessTimeout * time.Second): - logrus.Infof("Container %v, process %v failed to exit within %d seconds of signal TERM - using the force", c.ID, name, termProcessTimeout) - d.containerd.SignalProcess(c.ID, name, int(signal.SignalMap["KILL"])) - case <-attachErr: - // TERM signal worked - } - return fmt.Errorf("context cancelled") - case err := <-attachErr: - if err != nil { - if _, ok := err.(container.DetachError); !ok { - return fmt.Errorf("exec attach failed with error: %v", err) - } - d.LogContainerEvent(c, "exec_detach") - } - } - return nil -} - -// execCommandGC runs a ticker to clean up the daemon references -// of exec configs that are no longer part of the container. -func (d *Daemon) execCommandGC() { - for range time.Tick(5 * time.Minute) { - var ( - cleaned int - liveExecCommands = d.containerExecIds() - ) - for id, config := range d.execCommands.Commands() { - if config.CanRemove { - cleaned++ - d.execCommands.Delete(id) - } else { - if _, exists := liveExecCommands[id]; !exists { - config.CanRemove = true - } - } - } - if cleaned > 0 { - logrus.Debugf("clean %d unused exec commands", cleaned) - } - } -} - -// containerExecIds returns a list of all the current exec ids that are in use -// and running inside a container. -func (d *Daemon) containerExecIds() map[string]struct{} { - ids := map[string]struct{}{} - for _, c := range d.containers.List() { - for _, id := range c.ExecCommands.List() { - ids[id] = struct{}{} - } - } - return ids -} diff --git a/daemon/exec/exec.go b/daemon/exec/exec.go deleted file mode 100644 index bbeb1c16a6..0000000000 --- a/daemon/exec/exec.go +++ /dev/null @@ -1,93 +0,0 @@ -package exec - -import ( - "sync" - - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/runconfig" -) - -// Config holds the configurations for execs. The Daemon keeps -// track of both running and finished execs so that they can be -// examined both during and after completion. -type Config struct { - sync.Mutex - *runconfig.StreamConfig - ID string - Running bool - ExitCode *int - OpenStdin bool - OpenStderr bool - OpenStdout bool - CanRemove bool - ContainerID string - DetachKeys []byte - Entrypoint string - Args []string - Tty bool - Privileged bool - User string -} - -// NewConfig initializes the a new exec configuration -func NewConfig() *Config { - return &Config{ - ID: stringid.GenerateNonCryptoID(), - StreamConfig: runconfig.NewStreamConfig(), - } -} - -// Store keeps track of the exec configurations. -type Store struct { - commands map[string]*Config - sync.RWMutex -} - -// NewStore initializes a new exec store. -func NewStore() *Store { - return &Store{commands: make(map[string]*Config, 0)} -} - -// Commands returns the exec configurations in the store. -func (e *Store) Commands() map[string]*Config { - e.RLock() - commands := make(map[string]*Config, len(e.commands)) - for id, config := range e.commands { - commands[id] = config - } - e.RUnlock() - return commands -} - -// Add adds a new exec configuration to the store. -func (e *Store) Add(id string, Config *Config) { - e.Lock() - e.commands[id] = Config - e.Unlock() -} - -// Get returns an exec configuration by its id. -func (e *Store) Get(id string) *Config { - e.RLock() - res := e.commands[id] - e.RUnlock() - return res -} - -// Delete removes an exec configuration from the store. -func (e *Store) Delete(id string) { - e.Lock() - delete(e.commands, id) - e.Unlock() -} - -// List returns the list of exec ids in the store. -func (e *Store) List() []string { - var IDs []string - e.RLock() - for id := range e.commands { - IDs = append(IDs, id) - } - e.RUnlock() - return IDs -} diff --git a/daemon/exec_linux.go b/daemon/exec_linux.go deleted file mode 100644 index a2c86b2868..0000000000 --- a/daemon/exec_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/caps" - "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/libcontainerd" -) - -func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { - if len(ec.User) > 0 { - uid, gid, additionalGids, err := getUser(c, ec.User) - if err != nil { - return err - } - p.User = &libcontainerd.User{ - UID: uid, - GID: gid, - AdditionalGids: additionalGids, - } - } - if ec.Privileged { - p.Capabilities = caps.GetAllCapabilities() - } - return nil -} diff --git a/daemon/exec_solaris.go b/daemon/exec_solaris.go deleted file mode 100644 index 7003355d91..0000000000 --- a/daemon/exec_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/libcontainerd" -) - -func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { - return nil -} diff --git a/daemon/exec_windows.go b/daemon/exec_windows.go deleted file mode 100644 index a6ac1db42d..0000000000 --- a/daemon/exec_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - "github.com/docker/docker/libcontainerd" -) - -func execSetPlatformOpt(c *container.Container, ec *exec.Config, p *libcontainerd.Process) error { - // Process arguments need to be escaped before sending to OCI. - p.Args = escapeArgs(p.Args) - return nil -} diff --git a/daemon/export.go b/daemon/export.go deleted file mode 100644 index 80d7dbb2e1..0000000000 --- a/daemon/export.go +++ /dev/null @@ -1,55 +0,0 @@ -package daemon - -import ( - "fmt" - "io" - - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/ioutils" -) - -// ContainerExport writes the contents of the container to the given -// writer. An error is returned if the container cannot be found. -func (daemon *Daemon) ContainerExport(name string, out io.Writer) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - data, err := daemon.containerExport(container) - if err != nil { - return fmt.Errorf("Error exporting container %s: %v", name, err) - } - defer data.Close() - - // Stream the entire contents of the container (basically a volatile snapshot) - if _, err := io.Copy(out, data); err != nil { - return fmt.Errorf("Error exporting container %s: %v", name, err) - } - return nil -} - -func (daemon *Daemon) containerExport(container *container.Container) (archive.Archive, error) { - if err := daemon.Mount(container); err != nil { - return nil, err - } - - uidMaps, gidMaps := daemon.GetUIDGIDMaps() - archive, err := archive.TarWithOptions(container.BaseFS, &archive.TarOptions{ - Compression: archive.Uncompressed, - UIDMaps: uidMaps, - GIDMaps: gidMaps, - }) - if err != nil { - daemon.Unmount(container) - return nil, err - } - arch := ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - daemon.Unmount(container) - return err - }) - daemon.LogContainerEvent(container, "export") - return arch, err -} diff --git a/daemon/graphdriver/aufs/aufs.go b/daemon/graphdriver/aufs/aufs.go deleted file mode 100644 index fac69b8740..0000000000 --- a/daemon/graphdriver/aufs/aufs.go +++ /dev/null @@ -1,652 +0,0 @@ -// +build linux - -/* - -aufs driver directory structure - - . - ├── layers // Metadata of layers - │ ├── 1 - │ ├── 2 - │ └── 3 - ├── diff // Content of the layer - │ ├── 1 // Contains layers that need to be mounted for the id - │ ├── 2 - │ └── 3 - └── mnt // Mount points for the rw layers to be mounted - ├── 1 - ├── 2 - └── 3 - -*/ - -package aufs - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strings" - "sync" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/directory" - "github.com/docker/docker/pkg/idtools" - mountpk "github.com/docker/docker/pkg/mount" - - "github.com/opencontainers/runc/libcontainer/label" - rsystem "github.com/opencontainers/runc/libcontainer/system" -) - -var ( - // ErrAufsNotSupported is returned if aufs is not supported by the host. - ErrAufsNotSupported = fmt.Errorf("AUFS was not found in /proc/filesystems") - // ErrAufsNested means aufs cannot be used bc we are in a user namespace - ErrAufsNested = fmt.Errorf("AUFS cannot be used in non-init user namespace") - backingFs = "" - - enableDirpermLock sync.Once - enableDirperm bool -) - -func init() { - graphdriver.Register("aufs", Init) -} - -// Driver contains information about the filesystem mounted. -type Driver struct { - sync.Mutex - root string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter - pathCacheLock sync.Mutex - pathCache map[string]string -} - -// Init returns a new AUFS driver. -// An error is returned if AUFS is not supported. -func Init(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - - // Try to load the aufs kernel module - if err := supportsAufs(); err != nil { - return nil, graphdriver.ErrNotSupported - } - - fsMagic, err := graphdriver.GetFSMagic(root) - if err != nil { - return nil, err - } - if fsName, ok := graphdriver.FsNames[fsMagic]; ok { - backingFs = fsName - } - - switch fsMagic { - case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicEcryptfs: - logrus.Errorf("AUFS is not supported over %s", backingFs) - return nil, graphdriver.ErrIncompatibleFS - } - - paths := []string{ - "mnt", - "diff", - "layers", - } - - a := &Driver{ - root: root, - uidMaps: uidMaps, - gidMaps: gidMaps, - pathCache: make(map[string]string), - ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicAufs)), - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - // Create the root aufs driver dir and return - // if it already exists - // If not populate the dir structure - if err := idtools.MkdirAllAs(root, 0700, rootUID, rootGID); err != nil { - if os.IsExist(err) { - return a, nil - } - return nil, err - } - - if err := mountpk.MakePrivate(root); err != nil { - return nil, err - } - - // Populate the dir structure - for _, p := range paths { - if err := idtools.MkdirAllAs(path.Join(root, p), 0700, rootUID, rootGID); err != nil { - return nil, err - } - } - return a, nil -} - -// Return a nil error if the kernel supports aufs -// We cannot modprobe because inside dind modprobe fails -// to run -func supportsAufs() error { - // We can try to modprobe aufs first before looking at - // proc/filesystems for when aufs is supported - exec.Command("modprobe", "aufs").Run() - - if rsystem.RunningInUserNS() { - return ErrAufsNested - } - - f, err := os.Open("/proc/filesystems") - if err != nil { - return err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if strings.Contains(s.Text(), "aufs") { - return nil - } - } - return ErrAufsNotSupported -} - -func (a *Driver) rootPath() string { - return a.root -} - -func (*Driver) String() string { - return "aufs" -} - -// Status returns current information about the filesystem such as root directory, number of directories mounted, etc. -func (a *Driver) Status() [][2]string { - ids, _ := loadIds(path.Join(a.rootPath(), "layers")) - return [][2]string{ - {"Root Dir", a.rootPath()}, - {"Backing Filesystem", backingFs}, - {"Dirs", fmt.Sprintf("%d", len(ids))}, - {"Dirperm1 Supported", fmt.Sprintf("%v", useDirperm())}, - } -} - -// GetMetadata not implemented -func (a *Driver) GetMetadata(id string) (map[string]string, error) { - return nil, nil -} - -// Exists returns true if the given id is registered with -// this driver -func (a *Driver) Exists(id string) bool { - if _, err := os.Lstat(path.Join(a.rootPath(), "layers", id)); err != nil { - return false - } - return true -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (a *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - return a.Create(id, parent, mountLabel, storageOpt) -} - -// Create three folders for each id -// mnt, layers, and diff -func (a *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { - - if len(storageOpt) != 0 { - return fmt.Errorf("--storage-opt is not supported for aufs") - } - - if err := a.createDirsFor(id); err != nil { - return err - } - // Write the layers metadata - f, err := os.Create(path.Join(a.rootPath(), "layers", id)) - if err != nil { - return err - } - defer f.Close() - - if parent != "" { - ids, err := getParentIds(a.rootPath(), parent) - if err != nil { - return err - } - - if _, err := fmt.Fprintln(f, parent); err != nil { - return err - } - for _, i := range ids { - if _, err := fmt.Fprintln(f, i); err != nil { - return err - } - } - } - - return nil -} - -// createDirsFor creates two directories for the given id. -// mnt and diff -func (a *Driver) createDirsFor(id string) error { - paths := []string{ - "mnt", - "diff", - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(a.uidMaps, a.gidMaps) - if err != nil { - return err - } - // Directory permission is 0755. - // The path of directories are /mnt/ - // and /diff/ - for _, p := range paths { - if err := idtools.MkdirAllAs(path.Join(a.rootPath(), p, id), 0755, rootUID, rootGID); err != nil { - return err - } - } - return nil -} - -// Helper function to debug EBUSY errors on remove. -func debugEBusy(mountPath string) (out []string, err error) { - // lsof is not part of GNU coreutils. This is a best effort - // attempt to detect offending processes. - c := exec.Command("lsof") - - r, err := c.StdoutPipe() - if err != nil { - return nil, fmt.Errorf("Assigning pipes failed with %v", err) - } - - if err := c.Start(); err != nil { - return nil, fmt.Errorf("Starting %s failed with %v", c.Path, err) - } - - defer func() { - waiterr := c.Wait() - if waiterr != nil && err == nil { - err = fmt.Errorf("Waiting for %s failed with %v", c.Path, waiterr) - } - }() - - sc := bufio.NewScanner(r) - for sc.Scan() { - entry := sc.Text() - if strings.Contains(entry, mountPath) { - out = append(out, entry, "\n") - } - } - - return out, nil -} - -// Remove will unmount and remove the given id. -func (a *Driver) Remove(id string) error { - a.pathCacheLock.Lock() - mountpoint, exists := a.pathCache[id] - a.pathCacheLock.Unlock() - if !exists { - mountpoint = a.getMountpoint(id) - } - - var retries int - for { - mounted, err := a.mounted(mountpoint) - if err != nil { - return err - } - if !mounted { - break - } - - if err := a.unmount(mountpoint); err != nil { - if err != syscall.EBUSY { - return fmt.Errorf("aufs: unmount error: %s: %v", mountpoint, err) - } - if retries >= 5 { - out, debugErr := debugEBusy(mountpoint) - if debugErr == nil { - logrus.Warnf("debugEBusy returned %v", out) - } - return fmt.Errorf("aufs: unmount error after retries: %s: %v", mountpoint, err) - } - // If unmount returns EBUSY, it could be a transient error. Sleep and retry. - retries++ - logrus.Warnf("unmount failed due to EBUSY: retry count: %d", retries) - time.Sleep(100 * time.Millisecond) - continue - } - break - } - - // Atomically remove each directory in turn by first moving it out of the - // way (so that docker doesn't find it anymore) before doing removal of - // the whole tree. - tmpMntPath := path.Join(a.mntPath(), fmt.Sprintf("%s-removing", id)) - if err := os.Rename(mountpoint, tmpMntPath); err != nil && !os.IsNotExist(err) { - if err == syscall.EBUSY { - logrus.Warnf("os.Rename err due to EBUSY") - out, debugErr := debugEBusy(mountpoint) - if debugErr == nil { - logrus.Warnf("debugEBusy returned %v", out) - } - } - return err - } - defer os.RemoveAll(tmpMntPath) - - tmpDiffpath := path.Join(a.diffPath(), fmt.Sprintf("%s-removing", id)) - if err := os.Rename(a.getDiffPath(id), tmpDiffpath); err != nil && !os.IsNotExist(err) { - return err - } - defer os.RemoveAll(tmpDiffpath) - - // Remove the layers file for the id - if err := os.Remove(path.Join(a.rootPath(), "layers", id)); err != nil && !os.IsNotExist(err) { - return err - } - - a.pathCacheLock.Lock() - delete(a.pathCache, id) - a.pathCacheLock.Unlock() - return nil -} - -// Get returns the rootfs path for the id. -// This will mount the dir at its given path -func (a *Driver) Get(id, mountLabel string) (string, error) { - parents, err := a.getParentLayerPaths(id) - if err != nil && !os.IsNotExist(err) { - return "", err - } - - a.pathCacheLock.Lock() - m, exists := a.pathCache[id] - a.pathCacheLock.Unlock() - - if !exists { - m = a.getDiffPath(id) - if len(parents) > 0 { - m = a.getMountpoint(id) - } - } - if count := a.ctr.Increment(m); count > 1 { - return m, nil - } - - // If a dir does not have a parent ( no layers )do not try to mount - // just return the diff path to the data - if len(parents) > 0 { - if err := a.mount(id, m, mountLabel, parents); err != nil { - return "", err - } - } - - a.pathCacheLock.Lock() - a.pathCache[id] = m - a.pathCacheLock.Unlock() - return m, nil -} - -// Put unmounts and updates list of active mounts. -func (a *Driver) Put(id string) error { - a.pathCacheLock.Lock() - m, exists := a.pathCache[id] - if !exists { - m = a.getMountpoint(id) - a.pathCache[id] = m - } - a.pathCacheLock.Unlock() - if count := a.ctr.Decrement(m); count > 0 { - return nil - } - - err := a.unmount(m) - if err != nil { - logrus.Debugf("Failed to unmount %s aufs: %v", id, err) - } - return err -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -func (a *Driver) Diff(id, parent string) (archive.Archive, error) { - // AUFS doesn't need the parent layer to produce a diff. - return archive.TarWithOptions(path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ - Compression: archive.Uncompressed, - ExcludePatterns: []string{archive.WhiteoutMetaPrefix + "*", "!" + archive.WhiteoutOpaqueDir}, - UIDMaps: a.uidMaps, - GIDMaps: a.gidMaps, - }) -} - -type fileGetNilCloser struct { - storage.FileGetter -} - -func (f fileGetNilCloser) Close() error { - return nil -} - -// DiffGetter returns a FileGetCloser that can read files from the directory that -// contains files for the layer differences. Used for direct access for tar-split. -func (a *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { - p := path.Join(a.rootPath(), "diff", id) - return fileGetNilCloser{storage.NewPathFileGetter(p)}, nil -} - -func (a *Driver) applyDiff(id string, diff archive.Reader) error { - return chrootarchive.UntarUncompressed(diff, path.Join(a.rootPath(), "diff", id), &archive.TarOptions{ - UIDMaps: a.uidMaps, - GIDMaps: a.gidMaps, - }) -} - -// DiffSize calculates the changes between the specified id -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (a *Driver) DiffSize(id, parent string) (size int64, err error) { - // AUFS doesn't need the parent layer to calculate the diff size. - return directory.Size(path.Join(a.rootPath(), "diff", id)) -} - -// ApplyDiff extracts the changeset from the given diff into the -// layer with the specified id and parent, returning the size of the -// new layer in bytes. -func (a *Driver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { - // AUFS doesn't need the parent id to apply the diff. - if err = a.applyDiff(id, diff); err != nil { - return - } - - return a.DiffSize(id, parent) -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -func (a *Driver) Changes(id, parent string) ([]archive.Change, error) { - // AUFS doesn't have snapshots, so we need to get changes from all parent - // layers. - layers, err := a.getParentLayerPaths(id) - if err != nil { - return nil, err - } - return archive.Changes(layers, path.Join(a.rootPath(), "diff", id)) -} - -func (a *Driver) getParentLayerPaths(id string) ([]string, error) { - parentIds, err := getParentIds(a.rootPath(), id) - if err != nil { - return nil, err - } - layers := make([]string, len(parentIds)) - - // Get the diff paths for all the parent ids - for i, p := range parentIds { - layers[i] = path.Join(a.rootPath(), "diff", p) - } - return layers, nil -} - -func (a *Driver) mount(id string, target string, mountLabel string, layers []string) error { - a.Lock() - defer a.Unlock() - - // If the id is mounted or we get an error return - if mounted, err := a.mounted(target); err != nil || mounted { - return err - } - - rw := a.getDiffPath(id) - - if err := a.aufsMount(layers, rw, target, mountLabel); err != nil { - return fmt.Errorf("error creating aufs mount to %s: %v", target, err) - } - return nil -} - -func (a *Driver) unmount(mountPath string) error { - a.Lock() - defer a.Unlock() - - if mounted, err := a.mounted(mountPath); err != nil || !mounted { - return err - } - if err := Unmount(mountPath); err != nil { - return err - } - return nil -} - -func (a *Driver) mounted(mountpoint string) (bool, error) { - return graphdriver.Mounted(graphdriver.FsMagicAufs, mountpoint) -} - -// Cleanup aufs and unmount all mountpoints -func (a *Driver) Cleanup() error { - var dirs []string - if err := filepath.Walk(a.mntPath(), func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - return nil - } - dirs = append(dirs, path) - return nil - }); err != nil { - return err - } - - for _, m := range dirs { - if err := a.unmount(m); err != nil { - logrus.Debugf("aufs error unmounting %s: %s", m, err) - } - } - return mountpk.Unmount(a.root) -} - -func (a *Driver) aufsMount(ro []string, rw, target, mountLabel string) (err error) { - defer func() { - if err != nil { - Unmount(target) - } - }() - - // Mount options are clipped to page size(4096 bytes). If there are more - // layers then these are remounted individually using append. - - offset := 54 - if useDirperm() { - offset += len("dirperm1") - } - b := make([]byte, syscall.Getpagesize()-len(mountLabel)-offset) // room for xino & mountLabel - bp := copy(b, fmt.Sprintf("br:%s=rw", rw)) - - firstMount := true - i := 0 - - for { - for ; i < len(ro); i++ { - layer := fmt.Sprintf(":%s=ro+wh", ro[i]) - - if firstMount { - if bp+len(layer) > len(b) { - break - } - bp += copy(b[bp:], layer) - } else { - data := label.FormatMountLabel(fmt.Sprintf("append%s", layer), mountLabel) - if err = mount("none", target, "aufs", syscall.MS_REMOUNT, data); err != nil { - return - } - } - } - - if firstMount { - opts := "dio,xino=/dev/shm/aufs.xino" - if useDirperm() { - opts += ",dirperm1" - } - data := label.FormatMountLabel(fmt.Sprintf("%s,%s", string(b[:bp]), opts), mountLabel) - if err = mount("none", target, "aufs", 0, data); err != nil { - return - } - firstMount = false - } - - if i == len(ro) { - break - } - } - - return -} - -// useDirperm checks dirperm1 mount option can be used with the current -// version of aufs. -func useDirperm() bool { - enableDirpermLock.Do(func() { - base, err := ioutil.TempDir("", "docker-aufs-base") - if err != nil { - logrus.Errorf("error checking dirperm1: %v", err) - return - } - defer os.RemoveAll(base) - - union, err := ioutil.TempDir("", "docker-aufs-union") - if err != nil { - logrus.Errorf("error checking dirperm1: %v", err) - return - } - defer os.RemoveAll(union) - - opts := fmt.Sprintf("br:%s,dirperm1,xino=/dev/shm/aufs.xino", base) - if err := mount("none", union, "aufs", 0, opts); err != nil { - return - } - enableDirperm = true - if err := Unmount(union); err != nil { - logrus.Errorf("error checking dirperm1: failed to unmount %v", err) - } - }) - return enableDirperm -} diff --git a/daemon/graphdriver/aufs/aufs_test.go b/daemon/graphdriver/aufs/aufs_test.go deleted file mode 100644 index 1d0a268f94..0000000000 --- a/daemon/graphdriver/aufs/aufs_test.go +++ /dev/null @@ -1,801 +0,0 @@ -// +build linux - -package aufs - -import ( - "crypto/sha256" - "encoding/hex" - "fmt" - "io/ioutil" - "os" - "path" - "sync" - "testing" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/docker/pkg/stringid" -) - -var ( - tmpOuter = path.Join(os.TempDir(), "aufs-tests") - tmp = path.Join(tmpOuter, "aufs") -) - -func init() { - reexec.Init() -} - -func testInit(dir string, t testing.TB) graphdriver.Driver { - d, err := Init(dir, nil, nil, nil) - if err != nil { - if err == graphdriver.ErrNotSupported { - t.Skip(err) - } else { - t.Fatal(err) - } - } - return d -} - -func newDriver(t testing.TB) *Driver { - if err := os.MkdirAll(tmp, 0755); err != nil { - t.Fatal(err) - } - - d := testInit(tmp, t) - return d.(*Driver) -} - -func TestNewDriver(t *testing.T) { - if err := os.MkdirAll(tmp, 0755); err != nil { - t.Fatal(err) - } - - d := testInit(tmp, t) - defer os.RemoveAll(tmp) - if d == nil { - t.Fatalf("Driver should not be nil") - } -} - -func TestAufsString(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if d.String() != "aufs" { - t.Fatalf("Expected aufs got %s", d.String()) - } -} - -func TestCreateDirStructure(t *testing.T) { - newDriver(t) - defer os.RemoveAll(tmp) - - paths := []string{ - "mnt", - "layers", - "diff", - } - - for _, p := range paths { - if _, err := os.Stat(path.Join(tmp, p)); err != nil { - t.Fatal(err) - } - } -} - -// We should be able to create two drivers with the same dir structure -func TestNewDriverFromExistingDir(t *testing.T) { - if err := os.MkdirAll(tmp, 0755); err != nil { - t.Fatal(err) - } - - testInit(tmp, t) - testInit(tmp, t) - os.RemoveAll(tmp) -} - -func TestCreateNewDir(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", "", "", nil); err != nil { - t.Fatal(err) - } -} - -func TestCreateNewDirStructure(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", "", "", nil); err != nil { - t.Fatal(err) - } - - paths := []string{ - "mnt", - "diff", - "layers", - } - - for _, p := range paths { - if _, err := os.Stat(path.Join(tmp, p, "1")); err != nil { - t.Fatal(err) - } - } -} - -func TestRemoveImage(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", "", "", nil); err != nil { - t.Fatal(err) - } - - if err := d.Remove("1"); err != nil { - t.Fatal(err) - } - - paths := []string{ - "mnt", - "diff", - "layers", - } - - for _, p := range paths { - if _, err := os.Stat(path.Join(tmp, p, "1")); err == nil { - t.Fatalf("Error should not be nil because dirs with id 1 should be delted: %s", p) - } - } -} - -func TestGetWithoutParent(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", "", "", nil); err != nil { - t.Fatal(err) - } - - diffPath, err := d.Get("1", "") - if err != nil { - t.Fatal(err) - } - expected := path.Join(tmp, "diff", "1") - if diffPath != expected { - t.Fatalf("Expected path %s got %s", expected, diffPath) - } -} - -func TestCleanupWithNoDirs(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } -} - -func TestCleanupWithDir(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", "", "", nil); err != nil { - t.Fatal(err) - } - - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } -} - -func TestMountedFalseResponse(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", "", "", nil); err != nil { - t.Fatal(err) - } - - response, err := d.mounted(d.getDiffPath("1")) - if err != nil { - t.Fatal(err) - } - - if response != false { - t.Fatalf("Response if dir id 1 is mounted should be false") - } -} - -func TestMountedTrueReponse(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - if err := d.Create("1", "", "", nil); err != nil { - t.Fatal(err) - } - if err := d.Create("2", "1", "", nil); err != nil { - t.Fatal(err) - } - - _, err := d.Get("2", "") - if err != nil { - t.Fatal(err) - } - - response, err := d.mounted(d.pathCache["2"]) - if err != nil { - t.Fatal(err) - } - - if response != true { - t.Fatalf("Response if dir id 2 is mounted should be true") - } -} - -func TestMountWithParent(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", "", "", nil); err != nil { - t.Fatal(err) - } - if err := d.Create("2", "1", "", nil); err != nil { - t.Fatal(err) - } - - defer func() { - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - }() - - mntPath, err := d.Get("2", "") - if err != nil { - t.Fatal(err) - } - if mntPath == "" { - t.Fatal("mntPath should not be empty string") - } - - expected := path.Join(tmp, "mnt", "2") - if mntPath != expected { - t.Fatalf("Expected %s got %s", expected, mntPath) - } -} - -func TestRemoveMountedDir(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", "", "", nil); err != nil { - t.Fatal(err) - } - if err := d.Create("2", "1", "", nil); err != nil { - t.Fatal(err) - } - - defer func() { - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - }() - - mntPath, err := d.Get("2", "") - if err != nil { - t.Fatal(err) - } - if mntPath == "" { - t.Fatal("mntPath should not be empty string") - } - - mounted, err := d.mounted(d.pathCache["2"]) - if err != nil { - t.Fatal(err) - } - - if !mounted { - t.Fatalf("Dir id 2 should be mounted") - } - - if err := d.Remove("2"); err != nil { - t.Fatal(err) - } -} - -func TestCreateWithInvalidParent(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", "docker", "", nil); err == nil { - t.Fatalf("Error should not be nil with parent does not exist") - } -} - -func TestGetDiff(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.CreateReadWrite("1", "", "", nil); err != nil { - t.Fatal(err) - } - - diffPath, err := d.Get("1", "") - if err != nil { - t.Fatal(err) - } - - // Add a file to the diff path with a fixed size - size := int64(1024) - - f, err := os.Create(path.Join(diffPath, "test_file")) - if err != nil { - t.Fatal(err) - } - if err := f.Truncate(size); err != nil { - t.Fatal(err) - } - f.Close() - - a, err := d.Diff("1", "") - if err != nil { - t.Fatal(err) - } - if a == nil { - t.Fatalf("Archive should not be nil") - } -} - -func TestChanges(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.Create("1", "", "", nil); err != nil { - t.Fatal(err) - } - if err := d.CreateReadWrite("2", "1", "", nil); err != nil { - t.Fatal(err) - } - - defer func() { - if err := d.Cleanup(); err != nil { - t.Fatal(err) - } - }() - - mntPoint, err := d.Get("2", "") - if err != nil { - t.Fatal(err) - } - - // Create a file to save in the mountpoint - f, err := os.Create(path.Join(mntPoint, "test.txt")) - if err != nil { - t.Fatal(err) - } - - if _, err := f.WriteString("testline"); err != nil { - t.Fatal(err) - } - if err := f.Close(); err != nil { - t.Fatal(err) - } - - changes, err := d.Changes("2", "") - if err != nil { - t.Fatal(err) - } - if len(changes) != 1 { - t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) - } - change := changes[0] - - expectedPath := "/test.txt" - if change.Path != expectedPath { - t.Fatalf("Expected path %s got %s", expectedPath, change.Path) - } - - if change.Kind != archive.ChangeAdd { - t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) - } - - if err := d.CreateReadWrite("3", "2", "", nil); err != nil { - t.Fatal(err) - } - mntPoint, err = d.Get("3", "") - if err != nil { - t.Fatal(err) - } - - // Create a file to save in the mountpoint - f, err = os.Create(path.Join(mntPoint, "test2.txt")) - if err != nil { - t.Fatal(err) - } - - if _, err := f.WriteString("testline"); err != nil { - t.Fatal(err) - } - if err := f.Close(); err != nil { - t.Fatal(err) - } - - changes, err = d.Changes("3", "") - if err != nil { - t.Fatal(err) - } - - if len(changes) != 1 { - t.Fatalf("Dir 2 should have one change from parent got %d", len(changes)) - } - change = changes[0] - - expectedPath = "/test2.txt" - if change.Path != expectedPath { - t.Fatalf("Expected path %s got %s", expectedPath, change.Path) - } - - if change.Kind != archive.ChangeAdd { - t.Fatalf("Change kind should be ChangeAdd got %s", change.Kind) - } -} - -func TestDiffSize(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - - if err := d.CreateReadWrite("1", "", "", nil); err != nil { - t.Fatal(err) - } - - diffPath, err := d.Get("1", "") - if err != nil { - t.Fatal(err) - } - - // Add a file to the diff path with a fixed size - size := int64(1024) - - f, err := os.Create(path.Join(diffPath, "test_file")) - if err != nil { - t.Fatal(err) - } - if err := f.Truncate(size); err != nil { - t.Fatal(err) - } - s, err := f.Stat() - if err != nil { - t.Fatal(err) - } - size = s.Size() - if err := f.Close(); err != nil { - t.Fatal(err) - } - - diffSize, err := d.DiffSize("1", "") - if err != nil { - t.Fatal(err) - } - if diffSize != size { - t.Fatalf("Expected size to be %d got %d", size, diffSize) - } -} - -func TestChildDiffSize(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - if err := d.CreateReadWrite("1", "", "", nil); err != nil { - t.Fatal(err) - } - - diffPath, err := d.Get("1", "") - if err != nil { - t.Fatal(err) - } - - // Add a file to the diff path with a fixed size - size := int64(1024) - - f, err := os.Create(path.Join(diffPath, "test_file")) - if err != nil { - t.Fatal(err) - } - if err := f.Truncate(size); err != nil { - t.Fatal(err) - } - s, err := f.Stat() - if err != nil { - t.Fatal(err) - } - size = s.Size() - if err := f.Close(); err != nil { - t.Fatal(err) - } - - diffSize, err := d.DiffSize("1", "") - if err != nil { - t.Fatal(err) - } - if diffSize != size { - t.Fatalf("Expected size to be %d got %d", size, diffSize) - } - - if err := d.Create("2", "1", "", nil); err != nil { - t.Fatal(err) - } - - diffSize, err = d.DiffSize("2", "") - if err != nil { - t.Fatal(err) - } - // The diff size for the child should be zero - if diffSize != 0 { - t.Fatalf("Expected size to be %d got %d", 0, diffSize) - } -} - -func TestExists(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - if err := d.Create("1", "", "", nil); err != nil { - t.Fatal(err) - } - - if d.Exists("none") { - t.Fatal("id name should not exist in the driver") - } - - if !d.Exists("1") { - t.Fatal("id 1 should exist in the driver") - } -} - -func TestStatus(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - if err := d.Create("1", "", "", nil); err != nil { - t.Fatal(err) - } - - status := d.Status() - if status == nil || len(status) == 0 { - t.Fatal("Status should not be nil or empty") - } - rootDir := status[0] - dirs := status[2] - if rootDir[0] != "Root Dir" { - t.Fatalf("Expected Root Dir got %s", rootDir[0]) - } - if rootDir[1] != d.rootPath() { - t.Fatalf("Expected %s got %s", d.rootPath(), rootDir[1]) - } - if dirs[0] != "Dirs" { - t.Fatalf("Expected Dirs got %s", dirs[0]) - } - if dirs[1] != "1" { - t.Fatalf("Expected 1 got %s", dirs[1]) - } -} - -func TestApplyDiff(t *testing.T) { - d := newDriver(t) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - if err := d.CreateReadWrite("1", "", "", nil); err != nil { - t.Fatal(err) - } - - diffPath, err := d.Get("1", "") - if err != nil { - t.Fatal(err) - } - - // Add a file to the diff path with a fixed size - size := int64(1024) - - f, err := os.Create(path.Join(diffPath, "test_file")) - if err != nil { - t.Fatal(err) - } - if err := f.Truncate(size); err != nil { - t.Fatal(err) - } - f.Close() - - diff, err := d.Diff("1", "") - if err != nil { - t.Fatal(err) - } - - if err := d.Create("2", "", "", nil); err != nil { - t.Fatal(err) - } - if err := d.Create("3", "2", "", nil); err != nil { - t.Fatal(err) - } - - if err := d.applyDiff("3", diff); err != nil { - t.Fatal(err) - } - - // Ensure that the file is in the mount point for id 3 - - mountPoint, err := d.Get("3", "") - if err != nil { - t.Fatal(err) - } - if _, err := os.Stat(path.Join(mountPoint, "test_file")); err != nil { - t.Fatal(err) - } -} - -func hash(c string) string { - h := sha256.New() - fmt.Fprint(h, c) - return hex.EncodeToString(h.Sum(nil)) -} - -func testMountMoreThan42Layers(t *testing.T, mountPath string) { - if err := os.MkdirAll(mountPath, 0755); err != nil { - t.Fatal(err) - } - - defer os.RemoveAll(mountPath) - d := testInit(mountPath, t).(*Driver) - defer d.Cleanup() - var last string - var expected int - - for i := 1; i < 127; i++ { - expected++ - var ( - parent = fmt.Sprintf("%d", i-1) - current = fmt.Sprintf("%d", i) - ) - - if parent == "0" { - parent = "" - } else { - parent = hash(parent) - } - current = hash(current) - - if err := d.CreateReadWrite(current, parent, "", nil); err != nil { - t.Logf("Current layer %d", i) - t.Error(err) - } - point, err := d.Get(current, "") - if err != nil { - t.Logf("Current layer %d", i) - t.Error(err) - } - f, err := os.Create(path.Join(point, current)) - if err != nil { - t.Logf("Current layer %d", i) - t.Error(err) - } - f.Close() - - if i%10 == 0 { - if err := os.Remove(path.Join(point, parent)); err != nil { - t.Logf("Current layer %d", i) - t.Error(err) - } - expected-- - } - last = current - } - - // Perform the actual mount for the top most image - point, err := d.Get(last, "") - if err != nil { - t.Error(err) - } - files, err := ioutil.ReadDir(point) - if err != nil { - t.Error(err) - } - if len(files) != expected { - t.Errorf("Expected %d got %d", expected, len(files)) - } -} - -func TestMountMoreThan42Layers(t *testing.T) { - os.RemoveAll(tmpOuter) - testMountMoreThan42Layers(t, tmp) -} - -func TestMountMoreThan42LayersMatchingPathLength(t *testing.T) { - defer os.RemoveAll(tmpOuter) - zeroes := "0" - for { - // This finds a mount path so that when combined into aufs mount options - // 4096 byte boundary would be in between the paths or in permission - // section. For '/tmp' it will use '/tmp/aufs-tests/00000000/aufs' - mountPath := path.Join(tmpOuter, zeroes, "aufs") - pathLength := 77 + len(mountPath) - - if mod := 4095 % pathLength; mod == 0 || mod > pathLength-2 { - t.Logf("Using path: %s", mountPath) - testMountMoreThan42Layers(t, mountPath) - return - } - zeroes += "0" - } -} - -func BenchmarkConcurrentAccess(b *testing.B) { - b.StopTimer() - b.ResetTimer() - - d := newDriver(b) - defer os.RemoveAll(tmp) - defer d.Cleanup() - - numConcurent := 256 - // create a bunch of ids - var ids []string - for i := 0; i < numConcurent; i++ { - ids = append(ids, stringid.GenerateNonCryptoID()) - } - - if err := d.Create(ids[0], "", "", nil); err != nil { - b.Fatal(err) - } - - if err := d.Create(ids[1], ids[0], "", nil); err != nil { - b.Fatal(err) - } - - parent := ids[1] - ids = append(ids[2:]) - - chErr := make(chan error, numConcurent) - var outerGroup sync.WaitGroup - outerGroup.Add(len(ids)) - b.StartTimer() - - // here's the actual bench - for _, id := range ids { - go func(id string) { - defer outerGroup.Done() - if err := d.Create(id, parent, "", nil); err != nil { - b.Logf("Create %s failed", id) - chErr <- err - return - } - var innerGroup sync.WaitGroup - for i := 0; i < b.N; i++ { - innerGroup.Add(1) - go func() { - d.Get(id, "") - d.Put(id) - innerGroup.Done() - }() - } - innerGroup.Wait() - d.Remove(id) - }(id) - } - - outerGroup.Wait() - b.StopTimer() - close(chErr) - for err := range chErr { - if err != nil { - b.Log(err) - b.Fail() - } - } -} diff --git a/daemon/graphdriver/aufs/dirs.go b/daemon/graphdriver/aufs/dirs.go deleted file mode 100644 index eb298d9eeb..0000000000 --- a/daemon/graphdriver/aufs/dirs.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build linux - -package aufs - -import ( - "bufio" - "io/ioutil" - "os" - "path" -) - -// Return all the directories -func loadIds(root string) ([]string, error) { - dirs, err := ioutil.ReadDir(root) - if err != nil { - return nil, err - } - out := []string{} - for _, d := range dirs { - if !d.IsDir() { - out = append(out, d.Name()) - } - } - return out, nil -} - -// Read the layers file for the current id and return all the -// layers represented by new lines in the file -// -// If there are no lines in the file then the id has no parent -// and an empty slice is returned. -func getParentIds(root, id string) ([]string, error) { - f, err := os.Open(path.Join(root, "layers", id)) - if err != nil { - return nil, err - } - defer f.Close() - - out := []string{} - s := bufio.NewScanner(f) - - for s.Scan() { - if t := s.Text(); t != "" { - out = append(out, s.Text()) - } - } - return out, s.Err() -} - -func (a *Driver) getMountpoint(id string) string { - return path.Join(a.mntPath(), id) -} - -func (a *Driver) mntPath() string { - return path.Join(a.rootPath(), "mnt") -} - -func (a *Driver) getDiffPath(id string) string { - return path.Join(a.diffPath(), id) -} - -func (a *Driver) diffPath() string { - return path.Join(a.rootPath(), "diff") -} diff --git a/daemon/graphdriver/aufs/mount.go b/daemon/graphdriver/aufs/mount.go deleted file mode 100644 index da1e892f44..0000000000 --- a/daemon/graphdriver/aufs/mount.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build linux - -package aufs - -import ( - "os/exec" - "syscall" - - "github.com/Sirupsen/logrus" -) - -// Unmount the target specified. -func Unmount(target string) error { - if err := exec.Command("auplink", target, "flush").Run(); err != nil { - logrus.Warnf("Couldn't run auplink before unmount %s: %s", target, err) - } - if err := syscall.Unmount(target, 0); err != nil { - return err - } - return nil -} diff --git a/daemon/graphdriver/aufs/mount_linux.go b/daemon/graphdriver/aufs/mount_linux.go deleted file mode 100644 index 8062bae420..0000000000 --- a/daemon/graphdriver/aufs/mount_linux.go +++ /dev/null @@ -1,7 +0,0 @@ -package aufs - -import "syscall" - -func mount(source string, target string, fstype string, flags uintptr, data string) error { - return syscall.Mount(source, target, fstype, flags, data) -} diff --git a/daemon/graphdriver/aufs/mount_unsupported.go b/daemon/graphdriver/aufs/mount_unsupported.go deleted file mode 100644 index d030b06637..0000000000 --- a/daemon/graphdriver/aufs/mount_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux - -package aufs - -import "errors" - -// MsRemount declared to specify a non-linux system mount. -const MsRemount = 0 - -func mount(source string, target string, fstype string, flags uintptr, data string) (err error) { - return errors.New("mount is not implemented on this platform") -} diff --git a/daemon/graphdriver/btrfs/btrfs.go b/daemon/graphdriver/btrfs/btrfs.go deleted file mode 100644 index 1bc85fc23e..0000000000 --- a/daemon/graphdriver/btrfs/btrfs.go +++ /dev/null @@ -1,520 +0,0 @@ -// +build linux - -package btrfs - -/* -#include -#include -#include -#include - -static void set_name_btrfs_ioctl_vol_args_v2(struct btrfs_ioctl_vol_args_v2* btrfs_struct, const char* value) { - snprintf(btrfs_struct->name, BTRFS_SUBVOL_NAME_MAX, "%s", value); -} -*/ -import "C" - -import ( - "fmt" - "os" - "path" - "path/filepath" - "strings" - "syscall" - "unsafe" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/go-units" - "github.com/opencontainers/runc/libcontainer/label" -) - -func init() { - graphdriver.Register("btrfs", Init) -} - -var ( - quotaEnabled = false - userDiskQuota = false -) - -type btrfsOptions struct { - minSpace uint64 - size uint64 -} - -// Init returns a new BTRFS driver. -// An error is returned if BTRFS is not supported. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - - fsMagic, err := graphdriver.GetFSMagic(home) - if err != nil { - return nil, err - } - - if fsMagic != graphdriver.FsMagicBtrfs { - return nil, graphdriver.ErrPrerequisites - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { - return nil, err - } - - if err := mount.MakePrivate(home); err != nil { - return nil, err - } - - opt, err := parseOptions(options) - if err != nil { - return nil, err - } - - if userDiskQuota { - if err := subvolEnableQuota(home); err != nil { - return nil, err - } - quotaEnabled = true - } - - driver := &Driver{ - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - options: opt, - } - - return graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), nil -} - -func parseOptions(opt []string) (btrfsOptions, error) { - var options btrfsOptions - for _, option := range opt { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return options, err - } - key = strings.ToLower(key) - switch key { - case "btrfs.min_space": - minSpace, err := units.RAMInBytes(val) - if err != nil { - return options, err - } - userDiskQuota = true - options.minSpace = uint64(minSpace) - default: - return options, fmt.Errorf("Unknown option %s", key) - } - } - return options, nil -} - -// Driver contains information about the filesystem mounted. -type Driver struct { - //root of the file system - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - options btrfsOptions -} - -// String prints the name of the driver (btrfs). -func (d *Driver) String() string { - return "btrfs" -} - -// Status returns current driver information in a two dimensional string array. -// Output contains "Build Version" and "Library Version" of the btrfs libraries used. -// Version information can be used to check compatibility with your kernel. -func (d *Driver) Status() [][2]string { - status := [][2]string{} - if bv := btrfsBuildVersion(); bv != "-" { - status = append(status, [2]string{"Build Version", bv}) - } - if lv := btrfsLibVersion(); lv != -1 { - status = append(status, [2]string{"Library Version", fmt.Sprintf("%d", lv)}) - } - return status -} - -// GetMetadata returns empty metadata for this driver. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - return nil, nil -} - -// Cleanup unmounts the home directory. -func (d *Driver) Cleanup() error { - if quotaEnabled { - if err := subvolDisableQuota(d.home); err != nil { - return err - } - } - - return mount.Unmount(d.home) -} - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func openDir(path string) (*C.DIR, error) { - Cpath := C.CString(path) - defer free(Cpath) - - dir := C.opendir(Cpath) - if dir == nil { - return nil, fmt.Errorf("Can't open dir") - } - return dir, nil -} - -func closeDir(dir *C.DIR) { - if dir != nil { - C.closedir(dir) - } -} - -func getDirFd(dir *C.DIR) uintptr { - return uintptr(C.dirfd(dir)) -} - -func subvolCreate(path, name string) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_vol_args - for i, c := range []byte(name) { - args.name[i] = C.char(c) - } - - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SUBVOL_CREATE, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to create btrfs subvolume: %v", errno.Error()) - } - return nil -} - -func subvolSnapshot(src, dest, name string) error { - srcDir, err := openDir(src) - if err != nil { - return err - } - defer closeDir(srcDir) - - destDir, err := openDir(dest) - if err != nil { - return err - } - defer closeDir(destDir) - - var args C.struct_btrfs_ioctl_vol_args_v2 - args.fd = C.__s64(getDirFd(srcDir)) - - var cs = C.CString(name) - C.set_name_btrfs_ioctl_vol_args_v2(&args, cs) - C.free(unsafe.Pointer(cs)) - - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(destDir), C.BTRFS_IOC_SNAP_CREATE_V2, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to create btrfs snapshot: %v", errno.Error()) - } - return nil -} - -func isSubvolume(p string) (bool, error) { - var bufStat syscall.Stat_t - if err := syscall.Lstat(p, &bufStat); err != nil { - return false, err - } - - // return true if it is a btrfs subvolume - return bufStat.Ino == C.BTRFS_FIRST_FREE_OBJECTID, nil -} - -func subvolDelete(dirpath, name string) error { - dir, err := openDir(dirpath) - if err != nil { - return err - } - defer closeDir(dir) - fullPath := path.Join(dirpath, name) - - var args C.struct_btrfs_ioctl_vol_args - - // walk the btrfs subvolumes - walkSubvolumes := func(p string, f os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) && p != fullPath { - // missing most likely because the path was a subvolume that got removed in the previous iteration - // since it's gone anyway, we don't care - return nil - } - return fmt.Errorf("error walking subvolumes: %v", err) - } - // we want to check children only so skip itself - // it will be removed after the filepath walk anyways - if f.IsDir() && p != fullPath { - sv, err := isSubvolume(p) - if err != nil { - return fmt.Errorf("Failed to test if %s is a btrfs subvolume: %v", p, err) - } - if sv { - if err := subvolDelete(path.Dir(p), f.Name()); err != nil { - return fmt.Errorf("Failed to destroy btrfs child subvolume (%s) of parent (%s): %v", p, dirpath, err) - } - } - } - return nil - } - if err := filepath.Walk(path.Join(dirpath, name), walkSubvolumes); err != nil { - return fmt.Errorf("Recursively walking subvolumes for %s failed: %v", dirpath, err) - } - - // all subvolumes have been removed - // now remove the one originally passed in - for i, c := range []byte(name) { - args.name[i] = C.char(c) - } - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_SNAP_DESTROY, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to destroy btrfs snapshot %s for %s: %v", dirpath, name, errno.Error()) - } - return nil -} - -func subvolEnableQuota(path string) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_quota_ctl_args - args.cmd = C.BTRFS_QUOTA_CTL_ENABLE - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to enable btrfs quota for %s: %v", dir, errno.Error()) - } - - return nil -} - -func subvolDisableQuota(path string) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_quota_ctl_args - args.cmd = C.BTRFS_QUOTA_CTL_DISABLE - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_CTL, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to disable btrfs quota for %s: %v", dir, errno.Error()) - } - - return nil -} - -func subvolRescanQuota(path string) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_quota_rescan_args - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QUOTA_RESCAN_WAIT, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to rescan btrfs quota for %s: %v", dir, errno.Error()) - } - - return nil -} - -func subvolLimitQgroup(path string, size uint64) error { - dir, err := openDir(path) - if err != nil { - return err - } - defer closeDir(dir) - - var args C.struct_btrfs_ioctl_qgroup_limit_args - args.lim.max_referenced = C.__u64(size) - args.lim.flags = C.BTRFS_QGROUP_LIMIT_MAX_RFER - _, _, errno := syscall.Syscall(syscall.SYS_IOCTL, getDirFd(dir), C.BTRFS_IOC_QGROUP_LIMIT, - uintptr(unsafe.Pointer(&args))) - if errno != 0 { - return fmt.Errorf("Failed to limit qgroup for %s: %v", dir, errno.Error()) - } - - return nil -} - -func (d *Driver) subvolumesDir() string { - return path.Join(d.home, "subvolumes") -} - -func (d *Driver) subvolumesDirID(id string) string { - return path.Join(d.subvolumesDir(), id) -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - return d.Create(id, parent, mountLabel, storageOpt) -} - -// Create the filesystem with given id. -func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { - subvolumes := path.Join(d.home, "subvolumes") - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAllAs(subvolumes, 0700, rootUID, rootGID); err != nil { - return err - } - if parent == "" { - if err := subvolCreate(subvolumes, id); err != nil { - return err - } - } else { - parentDir := d.subvolumesDirID(parent) - st, err := os.Stat(parentDir) - if err != nil { - return err - } - if !st.IsDir() { - return fmt.Errorf("%s: not a directory", parentDir) - } - if err := subvolSnapshot(parentDir, subvolumes, id); err != nil { - return err - } - } - - if _, ok := storageOpt["size"]; ok { - driver := &Driver{} - if err := d.parseStorageOpt(storageOpt, driver); err != nil { - return err - } - if err := d.setStorageSize(path.Join(subvolumes, id), driver); err != nil { - return err - } - } - - // if we have a remapped root (user namespaces enabled), change the created snapshot - // dir ownership to match - if rootUID != 0 || rootGID != 0 { - if err := os.Chown(path.Join(subvolumes, id), rootUID, rootGID); err != nil { - return err - } - } - - return label.Relabel(path.Join(subvolumes, id), mountLabel, false) -} - -// Parse btrfs storage options -func (d *Driver) parseStorageOpt(storageOpt map[string]string, driver *Driver) error { - // Read size to change the subvolume disk quota per container - for key, val := range storageOpt { - key := strings.ToLower(key) - switch key { - case "size": - size, err := units.RAMInBytes(val) - if err != nil { - return err - } - driver.options.size = uint64(size) - default: - return fmt.Errorf("Unknown option %s", key) - } - } - - return nil -} - -// Set btrfs storage size -func (d *Driver) setStorageSize(dir string, driver *Driver) error { - if driver.options.size <= 0 { - return fmt.Errorf("btrfs: invalid storage size: %s", units.HumanSize(float64(driver.options.size))) - } - if d.options.minSpace > 0 && driver.options.size < d.options.minSpace { - return fmt.Errorf("btrfs: storage size cannot be less than %s", units.HumanSize(float64(d.options.minSpace))) - } - - if !quotaEnabled { - if err := subvolEnableQuota(d.home); err != nil { - return err - } - quotaEnabled = true - } - - if err := subvolLimitQgroup(dir, driver.options.size); err != nil { - return err - } - - return nil -} - -// Remove the filesystem with given id. -func (d *Driver) Remove(id string) error { - dir := d.subvolumesDirID(id) - if _, err := os.Stat(dir); err != nil { - return err - } - if err := subvolDelete(d.subvolumesDir(), id); err != nil { - return err - } - if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { - return err - } - if err := subvolRescanQuota(d.home); err != nil { - return err - } - return nil -} - -// Get the requested filesystem id. -func (d *Driver) Get(id, mountLabel string) (string, error) { - dir := d.subvolumesDirID(id) - st, err := os.Stat(dir) - if err != nil { - return "", err - } - - if !st.IsDir() { - return "", fmt.Errorf("%s: not a directory", dir) - } - - return dir, nil -} - -// Put is not implemented for BTRFS as there is no cleanup required for the id. -func (d *Driver) Put(id string) error { - // Get() creates no runtime resources (like e.g. mounts) - // so this doesn't need to do anything. - return nil -} - -// Exists checks if the id exists in the filesystem. -func (d *Driver) Exists(id string) bool { - dir := d.subvolumesDirID(id) - _, err := os.Stat(dir) - return err == nil -} diff --git a/daemon/graphdriver/btrfs/btrfs_test.go b/daemon/graphdriver/btrfs/btrfs_test.go deleted file mode 100644 index 54442c0a97..0000000000 --- a/daemon/graphdriver/btrfs/btrfs_test.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build linux - -package btrfs - -import ( - "os" - "path" - "testing" - - "github.com/docker/docker/daemon/graphdriver/graphtest" -) - -// This avoids creating a new driver for each test if all tests are run -// Make sure to put new tests between TestBtrfsSetup and TestBtrfsTeardown -func TestBtrfsSetup(t *testing.T) { - graphtest.GetDriver(t, "btrfs") -} - -func TestBtrfsCreateEmpty(t *testing.T) { - graphtest.DriverTestCreateEmpty(t, "btrfs") -} - -func TestBtrfsCreateBase(t *testing.T) { - graphtest.DriverTestCreateBase(t, "btrfs") -} - -func TestBtrfsCreateSnap(t *testing.T) { - graphtest.DriverTestCreateSnap(t, "btrfs") -} - -func TestBtrfsSubvolDelete(t *testing.T) { - d := graphtest.GetDriver(t, "btrfs") - if err := d.CreateReadWrite("test", "", "", nil); err != nil { - t.Fatal(err) - } - defer graphtest.PutDriver(t) - - dir, err := d.Get("test", "") - if err != nil { - t.Fatal(err) - } - defer d.Put("test") - - if err := subvolCreate(dir, "subvoltest"); err != nil { - t.Fatal(err) - } - - if _, err := os.Stat(path.Join(dir, "subvoltest")); err != nil { - t.Fatal(err) - } - - if err := d.Remove("test"); err != nil { - t.Fatal(err) - } - - if _, err := os.Stat(path.Join(dir, "subvoltest")); !os.IsNotExist(err) { - t.Fatalf("expected not exist error on nested subvol, got: %v", err) - } -} - -func TestBtrfsTeardown(t *testing.T) { - graphtest.PutDriver(t) -} diff --git a/daemon/graphdriver/btrfs/dummy_unsupported.go b/daemon/graphdriver/btrfs/dummy_unsupported.go deleted file mode 100644 index f07088887a..0000000000 --- a/daemon/graphdriver/btrfs/dummy_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux !cgo - -package btrfs diff --git a/daemon/graphdriver/btrfs/version.go b/daemon/graphdriver/btrfs/version.go deleted file mode 100644 index 73d90cdd71..0000000000 --- a/daemon/graphdriver/btrfs/version.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build linux,!btrfs_noversion - -package btrfs - -/* -#include - -// around version 3.16, they did not define lib version yet -#ifndef BTRFS_LIB_VERSION -#define BTRFS_LIB_VERSION -1 -#endif - -// upstream had removed it, but now it will be coming back -#ifndef BTRFS_BUILD_VERSION -#define BTRFS_BUILD_VERSION "-" -#endif -*/ -import "C" - -func btrfsBuildVersion() string { - return string(C.BTRFS_BUILD_VERSION) -} - -func btrfsLibVersion() int { - return int(C.BTRFS_LIB_VERSION) -} diff --git a/daemon/graphdriver/btrfs/version_none.go b/daemon/graphdriver/btrfs/version_none.go deleted file mode 100644 index f802fbc629..0000000000 --- a/daemon/graphdriver/btrfs/version_none.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build linux,btrfs_noversion - -package btrfs - -// TODO(vbatts) remove this work-around once supported linux distros are on -// btrfs utilities of >= 3.16.1 - -func btrfsBuildVersion() string { - return "-" -} - -func btrfsLibVersion() int { - return -1 -} diff --git a/daemon/graphdriver/btrfs/version_test.go b/daemon/graphdriver/btrfs/version_test.go deleted file mode 100644 index 15a6e75cb3..0000000000 --- a/daemon/graphdriver/btrfs/version_test.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build linux,!btrfs_noversion - -package btrfs - -import ( - "testing" -) - -func TestLibVersion(t *testing.T) { - if btrfsLibVersion() <= 0 { - t.Errorf("expected output from btrfs lib version > 0") - } -} diff --git a/daemon/graphdriver/counter.go b/daemon/graphdriver/counter.go deleted file mode 100644 index 5ea604f5b6..0000000000 --- a/daemon/graphdriver/counter.go +++ /dev/null @@ -1,67 +0,0 @@ -package graphdriver - -import "sync" - -type minfo struct { - check bool - count int -} - -// RefCounter is a generic counter for use by graphdriver Get/Put calls -type RefCounter struct { - counts map[string]*minfo - mu sync.Mutex - checker Checker -} - -// NewRefCounter returns a new RefCounter -func NewRefCounter(c Checker) *RefCounter { - return &RefCounter{ - checker: c, - counts: make(map[string]*minfo), - } -} - -// Increment increaes the ref count for the given id and returns the current count -func (c *RefCounter) Increment(path string) int { - c.mu.Lock() - m := c.counts[path] - if m == nil { - m = &minfo{} - c.counts[path] = m - } - // if we are checking this path for the first time check to make sure - // if it was already mounted on the system and make sure we have a correct ref - // count if it is mounted as it is in use. - if !m.check { - m.check = true - if c.checker.IsMounted(path) { - m.count++ - } - } - m.count++ - c.mu.Unlock() - return m.count -} - -// Decrement decreases the ref count for the given id and returns the current count -func (c *RefCounter) Decrement(path string) int { - c.mu.Lock() - m := c.counts[path] - if m == nil { - m = &minfo{} - c.counts[path] = m - } - // if we are checking this path for the first time check to make sure - // if it was already mounted on the system and make sure we have a correct ref - // count if it is mounted as it is in use. - if !m.check { - m.check = true - if c.checker.IsMounted(path) { - m.count++ - } - } - m.count-- - c.mu.Unlock() - return m.count -} diff --git a/daemon/graphdriver/devmapper/README.md b/daemon/graphdriver/devmapper/README.md deleted file mode 100644 index 8de7fc226c..0000000000 --- a/daemon/graphdriver/devmapper/README.md +++ /dev/null @@ -1,96 +0,0 @@ -## devicemapper - a storage backend based on Device Mapper - -### Theory of operation - -The device mapper graphdriver uses the device mapper thin provisioning -module (dm-thinp) to implement CoW snapshots. The preferred model is -to have a thin pool reserved outside of Docker and passed to the -daemon via the `--storage-opt dm.thinpooldev` option. - -As a fallback if no thin pool is provided, loopback files will be -created. Loopback is very slow, but can be used without any -pre-configuration of storage. It is strongly recommended that you do -not use loopback in production. Ensure your Docker daemon has a -`--storage-opt dm.thinpooldev` argument provided. - -In loopback, a thin pool is created at `/var/lib/docker/devicemapper` -(devicemapper graph location) based on two block devices, one for -data and one for metadata. By default these block devices are created -automatically by using loopback mounts of automatically created sparse -files. - -The default loopback files used are -`/var/lib/docker/devicemapper/devicemapper/data` and -`/var/lib/docker/devicemapper/devicemapper/metadata`. Additional metadata -required to map from docker entities to the corresponding devicemapper -volumes is stored in the `/var/lib/docker/devicemapper/devicemapper/json` -file (encoded as Json). - -In order to support multiple devicemapper graphs on a system, the thin -pool will be named something like: `docker-0:33-19478248-pool`, where -the `0:33` part is the minor/major device nr and `19478248` is the -inode number of the `/var/lib/docker/devicemapper` directory. - -On the thin pool, docker automatically creates a base thin device, -called something like `docker-0:33-19478248-base` of a fixed -size. This is automatically formatted with an empty filesystem on -creation. This device is the base of all docker images and -containers. All base images are snapshots of this device and those -images are then in turn used as snapshots for other images and -eventually containers. - -### Information on `docker info` - -As of docker-1.4.1, `docker info` when using the `devicemapper` storage driver -will display something like: - - $ sudo docker info - [...] - Storage Driver: devicemapper - Pool Name: docker-253:1-17538953-pool - Pool Blocksize: 65.54 kB - Base Device Size: 107.4 GB - Data file: /dev/loop4 - Metadata file: /dev/loop4 - Data Space Used: 2.536 GB - Data Space Total: 107.4 GB - Data Space Available: 104.8 GB - Metadata Space Used: 7.93 MB - Metadata Space Total: 2.147 GB - Metadata Space Available: 2.14 GB - Udev Sync Supported: true - Data loop file: /home/docker/devicemapper/devicemapper/data - Metadata loop file: /home/docker/devicemapper/devicemapper/metadata - Library Version: 1.02.82-git (2013-10-04) - [...] - -#### status items - -Each item in the indented section under `Storage Driver: devicemapper` are -status information about the driver. - * `Pool Name` name of the devicemapper pool for this driver. - * `Pool Blocksize` tells the blocksize the thin pool was initialized with. This only changes on creation. - * `Base Device Size` tells the maximum size of a container and image - * `Data file` blockdevice file used for the devicemapper data - * `Metadata file` blockdevice file used for the devicemapper metadata - * `Data Space Used` tells how much of `Data file` is currently used - * `Data Space Total` tells max size the `Data file` - * `Data Space Available` tells how much free space there is in the `Data file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. - * `Metadata Space Used` tells how much of `Metadata file` is currently used - * `Metadata Space Total` tells max size the `Metadata file` - * `Metadata Space Available` tells how much free space there is in the `Metadata file`. If you are using a loop device this will report the actual space available to the loop device on the underlying filesystem. - * `Udev Sync Supported` tells whether devicemapper is able to sync with Udev. Should be `true`. - * `Data loop file` file attached to `Data file`, if loopback device is used - * `Metadata loop file` file attached to `Metadata file`, if loopback device is used - * `Library Version` from the libdevmapper used - -### About the devicemapper options - -The devicemapper backend supports some options that you can specify -when starting the docker daemon using the `--storage-opt` flags. -This uses the `dm` prefix and would be used something like `docker daemon --storage-opt dm.foo=bar`. - -These options are currently documented both in [the man -page](../../../man/docker.1.md) and in [the online -documentation](https://docs.docker.com/reference/commandline/daemon/#storage-driver-options). -If you add an options, update both the `man` page and the documentation. diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go deleted file mode 100644 index 0e663b4e58..0000000000 --- a/daemon/graphdriver/devmapper/deviceset.go +++ /dev/null @@ -1,2627 +0,0 @@ -// +build linux - -package devmapper - -import ( - "bufio" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/devicemapper" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/loopback" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/go-units" - - "github.com/opencontainers/runc/libcontainer/label" -) - -var ( - defaultDataLoopbackSize int64 = 100 * 1024 * 1024 * 1024 - defaultMetaDataLoopbackSize int64 = 2 * 1024 * 1024 * 1024 - defaultBaseFsSize uint64 = 10 * 1024 * 1024 * 1024 - defaultThinpBlockSize uint32 = 128 // 64K = 128 512b sectors - defaultUdevSyncOverride = false - maxDeviceID = 0xffffff // 24 bit, pool limit - deviceIDMapSz = (maxDeviceID + 1) / 8 - // We retry device removal so many a times that even error messages - // will fill up console during normal operation. So only log Fatal - // messages by default. - logLevel = devicemapper.LogLevelFatal - driverDeferredRemovalSupport = false - enableDeferredRemoval = false - enableDeferredDeletion = false - userBaseSize = false - defaultMinFreeSpacePercent uint32 = 10 -) - -const deviceSetMetaFile string = "deviceset-metadata" -const transactionMetaFile string = "transaction-metadata" - -type transaction struct { - OpenTransactionID uint64 `json:"open_transaction_id"` - DeviceIDHash string `json:"device_hash"` - DeviceID int `json:"device_id"` -} - -type devInfo struct { - Hash string `json:"-"` - DeviceID int `json:"device_id"` - Size uint64 `json:"size"` - TransactionID uint64 `json:"transaction_id"` - Initialized bool `json:"initialized"` - Deleted bool `json:"deleted"` - devices *DeviceSet - - // The global DeviceSet lock guarantees that we serialize all - // the calls to libdevmapper (which is not threadsafe), but we - // sometimes release that lock while sleeping. In that case - // this per-device lock is still held, protecting against - // other accesses to the device that we're doing the wait on. - // - // WARNING: In order to avoid AB-BA deadlocks when releasing - // the global lock while holding the per-device locks all - // device locks must be acquired *before* the device lock, and - // multiple device locks should be acquired parent before child. - lock sync.Mutex -} - -type metaData struct { - Devices map[string]*devInfo `json:"Devices"` -} - -// DeviceSet holds information about list of devices -type DeviceSet struct { - metaData `json:"-"` - sync.Mutex `json:"-"` // Protects all fields of DeviceSet and serializes calls into libdevmapper - root string - devicePrefix string - TransactionID uint64 `json:"-"` - NextDeviceID int `json:"next_device_id"` - deviceIDMap []byte - - // Options - dataLoopbackSize int64 - metaDataLoopbackSize int64 - baseFsSize uint64 - filesystem string - mountOptions string - mkfsArgs []string - dataDevice string // block or loop dev - dataLoopFile string // loopback file, if used - metadataDevice string // block or loop dev - metadataLoopFile string // loopback file, if used - doBlkDiscard bool - thinpBlockSize uint32 - thinPoolDevice string - transaction `json:"-"` - overrideUdevSyncCheck bool - deferredRemove bool // use deferred removal - deferredDelete bool // use deferred deletion - BaseDeviceUUID string // save UUID of base device - BaseDeviceFilesystem string // save filesystem of base device - nrDeletedDevices uint // number of deleted devices - deletionWorkerTicker *time.Ticker - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - minFreeSpacePercent uint32 //min free space percentage in thinpool -} - -// DiskUsage contains information about disk usage and is used when reporting Status of a device. -type DiskUsage struct { - // Used bytes on the disk. - Used uint64 - // Total bytes on the disk. - Total uint64 - // Available bytes on the disk. - Available uint64 -} - -// Status returns the information about the device. -type Status struct { - // PoolName is the name of the data pool. - PoolName string - // DataFile is the actual block device for data. - DataFile string - // DataLoopback loopback file, if used. - DataLoopback string - // MetadataFile is the actual block device for metadata. - MetadataFile string - // MetadataLoopback is the loopback file, if used. - MetadataLoopback string - // Data is the disk used for data. - Data DiskUsage - // Metadata is the disk used for meta data. - Metadata DiskUsage - // BaseDeviceSize is base size of container and image - BaseDeviceSize uint64 - // BaseDeviceFS is backing filesystem. - BaseDeviceFS string - // SectorSize size of the vector. - SectorSize uint64 - // UdevSyncSupported is true if sync is supported. - UdevSyncSupported bool - // DeferredRemoveEnabled is true then the device is not unmounted. - DeferredRemoveEnabled bool - // True if deferred deletion is enabled. This is different from - // deferred removal. "removal" means that device mapper device is - // deactivated. Thin device is still in thin pool and can be activated - // again. But "deletion" means that thin device will be deleted from - // thin pool and it can't be activated again. - DeferredDeleteEnabled bool - DeferredDeletedDeviceCount uint - MinFreeSpace uint64 -} - -// Structure used to export image/container metadata in docker inspect. -type deviceMetadata struct { - deviceID int - deviceSize uint64 // size in bytes - deviceName string // Device name as used during activation -} - -// DevStatus returns information about device mounted containing its id, size and sector information. -type DevStatus struct { - // DeviceID is the id of the device. - DeviceID int - // Size is the size of the filesystem. - Size uint64 - // TransactionID is a unique integer per device set used to identify an operation on the file system, this number is incremental. - TransactionID uint64 - // SizeInSectors indicates the size of the sectors allocated. - SizeInSectors uint64 - // MappedSectors indicates number of mapped sectors. - MappedSectors uint64 - // HighestMappedSector is the pointer to the highest mapped sector. - HighestMappedSector uint64 -} - -func getDevName(name string) string { - return "/dev/mapper/" + name -} - -func (info *devInfo) Name() string { - hash := info.Hash - if hash == "" { - hash = "base" - } - return fmt.Sprintf("%s-%s", info.devices.devicePrefix, hash) -} - -func (info *devInfo) DevName() string { - return getDevName(info.Name()) -} - -func (devices *DeviceSet) loopbackDir() string { - return path.Join(devices.root, "devicemapper") -} - -func (devices *DeviceSet) metadataDir() string { - return path.Join(devices.root, "metadata") -} - -func (devices *DeviceSet) metadataFile(info *devInfo) string { - file := info.Hash - if file == "" { - file = "base" - } - return path.Join(devices.metadataDir(), file) -} - -func (devices *DeviceSet) transactionMetaFile() string { - return path.Join(devices.metadataDir(), transactionMetaFile) -} - -func (devices *DeviceSet) deviceSetMetaFile() string { - return path.Join(devices.metadataDir(), deviceSetMetaFile) -} - -func (devices *DeviceSet) oldMetadataFile() string { - return path.Join(devices.loopbackDir(), "json") -} - -func (devices *DeviceSet) getPoolName() string { - if devices.thinPoolDevice == "" { - return devices.devicePrefix + "-pool" - } - return devices.thinPoolDevice -} - -func (devices *DeviceSet) getPoolDevName() string { - return getDevName(devices.getPoolName()) -} - -func (devices *DeviceSet) hasImage(name string) bool { - dirname := devices.loopbackDir() - filename := path.Join(dirname, name) - - _, err := os.Stat(filename) - return err == nil -} - -// ensureImage creates a sparse file of bytes at the path -// /devicemapper/. -// If the file already exists and new size is larger than its current size, it grows to the new size. -// Either way it returns the full path. -func (devices *DeviceSet) ensureImage(name string, size int64) (string, error) { - dirname := devices.loopbackDir() - filename := path.Join(dirname, name) - - uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) - if err != nil { - return "", err - } - if err := idtools.MkdirAllAs(dirname, 0700, uid, gid); err != nil && !os.IsExist(err) { - return "", err - } - - if fi, err := os.Stat(filename); err != nil { - if !os.IsNotExist(err) { - return "", err - } - logrus.Debugf("devmapper: Creating loopback file %s for device-manage use", filename) - file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) - if err != nil { - return "", err - } - defer file.Close() - - if err := file.Truncate(size); err != nil { - return "", err - } - } else { - if fi.Size() < size { - file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE, 0600) - if err != nil { - return "", err - } - defer file.Close() - if err := file.Truncate(size); err != nil { - return "", fmt.Errorf("devmapper: Unable to grow loopback file %s: %v", filename, err) - } - } else if fi.Size() > size { - logrus.Warnf("devmapper: Can't shrink loopback file %s", filename) - } - } - return filename, nil -} - -func (devices *DeviceSet) allocateTransactionID() uint64 { - devices.OpenTransactionID = devices.TransactionID + 1 - return devices.OpenTransactionID -} - -func (devices *DeviceSet) updatePoolTransactionID() error { - if err := devicemapper.SetTransactionID(devices.getPoolDevName(), devices.TransactionID, devices.OpenTransactionID); err != nil { - return fmt.Errorf("devmapper: Error setting devmapper transaction ID: %s", err) - } - devices.TransactionID = devices.OpenTransactionID - return nil -} - -func (devices *DeviceSet) removeMetadata(info *devInfo) error { - if err := os.RemoveAll(devices.metadataFile(info)); err != nil { - return fmt.Errorf("devmapper: Error removing metadata file %s: %s", devices.metadataFile(info), err) - } - return nil -} - -// Given json data and file path, write it to disk -func (devices *DeviceSet) writeMetaFile(jsonData []byte, filePath string) error { - tmpFile, err := ioutil.TempFile(devices.metadataDir(), ".tmp") - if err != nil { - return fmt.Errorf("devmapper: Error creating metadata file: %s", err) - } - - n, err := tmpFile.Write(jsonData) - if err != nil { - return fmt.Errorf("devmapper: Error writing metadata to %s: %s", tmpFile.Name(), err) - } - if n < len(jsonData) { - return io.ErrShortWrite - } - if err := tmpFile.Sync(); err != nil { - return fmt.Errorf("devmapper: Error syncing metadata file %s: %s", tmpFile.Name(), err) - } - if err := tmpFile.Close(); err != nil { - return fmt.Errorf("devmapper: Error closing metadata file %s: %s", tmpFile.Name(), err) - } - if err := os.Rename(tmpFile.Name(), filePath); err != nil { - return fmt.Errorf("devmapper: Error committing metadata file %s: %s", tmpFile.Name(), err) - } - - return nil -} - -func (devices *DeviceSet) saveMetadata(info *devInfo) error { - jsonData, err := json.Marshal(info) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - if err := devices.writeMetaFile(jsonData, devices.metadataFile(info)); err != nil { - return err - } - return nil -} - -func (devices *DeviceSet) markDeviceIDUsed(deviceID int) { - var mask byte - i := deviceID % 8 - mask = 1 << uint(i) - devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] | mask -} - -func (devices *DeviceSet) markDeviceIDFree(deviceID int) { - var mask byte - i := deviceID % 8 - mask = ^(1 << uint(i)) - devices.deviceIDMap[deviceID/8] = devices.deviceIDMap[deviceID/8] & mask -} - -func (devices *DeviceSet) isDeviceIDFree(deviceID int) bool { - var mask byte - i := deviceID % 8 - mask = (1 << uint(i)) - if (devices.deviceIDMap[deviceID/8] & mask) != 0 { - return false - } - return true -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) lookupDevice(hash string) (*devInfo, error) { - info := devices.Devices[hash] - if info == nil { - info = devices.loadMetadata(hash) - if info == nil { - return nil, fmt.Errorf("devmapper: Unknown device %s", hash) - } - - devices.Devices[hash] = info - } - return info, nil -} - -func (devices *DeviceSet) lookupDeviceWithLock(hash string) (*devInfo, error) { - devices.Lock() - defer devices.Unlock() - info, err := devices.lookupDevice(hash) - return info, err -} - -// This function relies on that device hash map has been loaded in advance. -// Should be called with devices.Lock() held. -func (devices *DeviceSet) constructDeviceIDMap() { - logrus.Debugf("devmapper: constructDeviceIDMap()") - defer logrus.Debugf("devmapper: constructDeviceIDMap() END") - - for _, info := range devices.Devices { - devices.markDeviceIDUsed(info.DeviceID) - logrus.Debugf("devmapper: Added deviceId=%d to DeviceIdMap", info.DeviceID) - } -} - -func (devices *DeviceSet) deviceFileWalkFunction(path string, finfo os.FileInfo) error { - - // Skip some of the meta files which are not device files. - if strings.HasSuffix(finfo.Name(), ".migrated") { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - if strings.HasPrefix(finfo.Name(), ".") { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - if finfo.Name() == deviceSetMetaFile { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - if finfo.Name() == transactionMetaFile { - logrus.Debugf("devmapper: Skipping file %s", path) - return nil - } - - logrus.Debugf("devmapper: Loading data for file %s", path) - - hash := finfo.Name() - if hash == "base" { - hash = "" - } - - // Include deleted devices also as cleanup delete device logic - // will go through it and see if there are any deleted devices. - if _, err := devices.lookupDevice(hash); err != nil { - return fmt.Errorf("devmapper: Error looking up device %s:%v", hash, err) - } - - return nil -} - -func (devices *DeviceSet) loadDeviceFilesOnStart() error { - logrus.Debugf("devmapper: loadDeviceFilesOnStart()") - defer logrus.Debugf("devmapper: loadDeviceFilesOnStart() END") - - var scan = func(path string, info os.FileInfo, err error) error { - if err != nil { - logrus.Debugf("devmapper: Can't walk the file %s", path) - return nil - } - - // Skip any directories - if info.IsDir() { - return nil - } - - return devices.deviceFileWalkFunction(path, info) - } - - return filepath.Walk(devices.metadataDir(), scan) -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) unregisterDevice(id int, hash string) error { - logrus.Debugf("devmapper: unregisterDevice(%v, %v)", id, hash) - info := &devInfo{ - Hash: hash, - DeviceID: id, - } - - delete(devices.Devices, hash) - - if err := devices.removeMetadata(info); err != nil { - logrus.Debugf("devmapper: Error removing metadata: %s", err) - return err - } - - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) registerDevice(id int, hash string, size uint64, transactionID uint64) (*devInfo, error) { - logrus.Debugf("devmapper: registerDevice(%v, %v)", id, hash) - info := &devInfo{ - Hash: hash, - DeviceID: id, - Size: size, - TransactionID: transactionID, - Initialized: false, - devices: devices, - } - - devices.Devices[hash] = info - - if err := devices.saveMetadata(info); err != nil { - // Try to remove unused device - delete(devices.Devices, hash) - return nil, err - } - - return info, nil -} - -func (devices *DeviceSet) activateDeviceIfNeeded(info *devInfo, ignoreDeleted bool) error { - logrus.Debugf("devmapper: activateDeviceIfNeeded(%v)", info.Hash) - - if info.Deleted && !ignoreDeleted { - return fmt.Errorf("devmapper: Can't activate device %v as it is marked for deletion", info.Hash) - } - - // Make sure deferred removal on device is canceled, if one was - // scheduled. - if err := devices.cancelDeferredRemoval(info); err != nil { - return fmt.Errorf("devmapper: Device Deferred Removal Cancellation Failed: %s", err) - } - - if devinfo, _ := devicemapper.GetInfo(info.Name()); devinfo != nil && devinfo.Exists != 0 { - return nil - } - - return devicemapper.ActivateDevice(devices.getPoolDevName(), info.Name(), info.DeviceID, info.Size) -} - -// Return true only if kernel supports xfs and mkfs.xfs is available -func xfsSupported() bool { - // Make sure mkfs.xfs is available - if _, err := exec.LookPath("mkfs.xfs"); err != nil { - return false - } - - // Check if kernel supports xfs filesystem or not. - exec.Command("modprobe", "xfs").Run() - - f, err := os.Open("/proc/filesystems") - if err != nil { - logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) - return false - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if strings.HasSuffix(s.Text(), "\txfs") { - return true - } - } - - if err := s.Err(); err != nil { - logrus.Warnf("devmapper: Could not check if xfs is supported: %v", err) - } - return false -} - -func determineDefaultFS() string { - if xfsSupported() { - return "xfs" - } - - logrus.Warn("devmapper: XFS is not supported in your system. Either the kernel doesn't support it or mkfs.xfs is not in your PATH. Defaulting to ext4 filesystem") - return "ext4" -} - -func (devices *DeviceSet) createFilesystem(info *devInfo) (err error) { - devname := info.DevName() - - args := []string{} - for _, arg := range devices.mkfsArgs { - args = append(args, arg) - } - - args = append(args, devname) - - if devices.filesystem == "" { - devices.filesystem = determineDefaultFS() - } - if err := devices.saveBaseDeviceFilesystem(devices.filesystem); err != nil { - return err - } - - logrus.Infof("devmapper: Creating filesystem %s on device %s", devices.filesystem, info.Name()) - defer func() { - if err != nil { - logrus.Infof("devmapper: Error while creating filesystem %s on device %s: %v", devices.filesystem, info.Name(), err) - } else { - logrus.Infof("devmapper: Successfully created filesystem %s on device %s", devices.filesystem, info.Name()) - } - }() - - switch devices.filesystem { - case "xfs": - err = exec.Command("mkfs.xfs", args...).Run() - case "ext4": - err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0,lazy_journal_init=0"}, args...)...).Run() - if err != nil { - err = exec.Command("mkfs.ext4", append([]string{"-E", "nodiscard,lazy_itable_init=0"}, args...)...).Run() - } - if err != nil { - return err - } - err = exec.Command("tune2fs", append([]string{"-c", "-1", "-i", "0"}, devname)...).Run() - default: - err = fmt.Errorf("devmapper: Unsupported filesystem type %s", devices.filesystem) - } - return -} - -func (devices *DeviceSet) migrateOldMetaData() error { - // Migrate old metadata file - jsonData, err := ioutil.ReadFile(devices.oldMetadataFile()) - if err != nil && !os.IsNotExist(err) { - return err - } - - if jsonData != nil { - m := metaData{Devices: make(map[string]*devInfo)} - - if err := json.Unmarshal(jsonData, &m); err != nil { - return err - } - - for hash, info := range m.Devices { - info.Hash = hash - devices.saveMetadata(info) - } - if err := os.Rename(devices.oldMetadataFile(), devices.oldMetadataFile()+".migrated"); err != nil { - return err - } - - } - - return nil -} - -// Cleanup deleted devices. It assumes that all the devices have been -// loaded in the hash table. -func (devices *DeviceSet) cleanupDeletedDevices() error { - devices.Lock() - - // If there are no deleted devices, there is nothing to do. - if devices.nrDeletedDevices == 0 { - devices.Unlock() - return nil - } - - var deletedDevices []*devInfo - - for _, info := range devices.Devices { - if !info.Deleted { - continue - } - logrus.Debugf("devmapper: Found deleted device %s.", info.Hash) - deletedDevices = append(deletedDevices, info) - } - - // Delete the deleted devices. DeleteDevice() first takes the info lock - // and then devices.Lock(). So drop it to avoid deadlock. - devices.Unlock() - - for _, info := range deletedDevices { - // This will again try deferred deletion. - if err := devices.DeleteDevice(info.Hash, false); err != nil { - logrus.Warnf("devmapper: Deletion of device %s, device_id=%v failed:%v", info.Hash, info.DeviceID, err) - } - } - - return nil -} - -func (devices *DeviceSet) countDeletedDevices() { - for _, info := range devices.Devices { - if !info.Deleted { - continue - } - devices.nrDeletedDevices++ - } -} - -func (devices *DeviceSet) startDeviceDeletionWorker() { - // Deferred deletion is not enabled. Don't do anything. - if !devices.deferredDelete { - return - } - - logrus.Debug("devmapper: Worker to cleanup deleted devices started") - for range devices.deletionWorkerTicker.C { - devices.cleanupDeletedDevices() - } -} - -func (devices *DeviceSet) initMetaData() error { - devices.Lock() - defer devices.Unlock() - - if err := devices.migrateOldMetaData(); err != nil { - return err - } - - _, transactionID, _, _, _, _, err := devices.poolStatus() - if err != nil { - return err - } - - devices.TransactionID = transactionID - - if err := devices.loadDeviceFilesOnStart(); err != nil { - return fmt.Errorf("devmapper: Failed to load device files:%v", err) - } - - devices.constructDeviceIDMap() - devices.countDeletedDevices() - - if err := devices.processPendingTransaction(); err != nil { - return err - } - - // Start a goroutine to cleanup Deleted Devices - go devices.startDeviceDeletionWorker() - return nil -} - -func (devices *DeviceSet) incNextDeviceID() { - // IDs are 24bit, so wrap around - devices.NextDeviceID = (devices.NextDeviceID + 1) & maxDeviceID -} - -func (devices *DeviceSet) getNextFreeDeviceID() (int, error) { - devices.incNextDeviceID() - for i := 0; i <= maxDeviceID; i++ { - if devices.isDeviceIDFree(devices.NextDeviceID) { - devices.markDeviceIDUsed(devices.NextDeviceID) - return devices.NextDeviceID, nil - } - devices.incNextDeviceID() - } - - return 0, fmt.Errorf("devmapper: Unable to find a free device ID") -} - -func (devices *DeviceSet) poolHasFreeSpace() error { - if devices.minFreeSpacePercent == 0 { - return nil - } - - _, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() - if err != nil { - return err - } - - minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 - if minFreeData < 1 { - minFreeData = 1 - } - dataFree := dataTotal - dataUsed - if dataFree < minFreeData { - return fmt.Errorf("devmapper: Thin Pool has %v free data blocks which is less than minimum required %v free data blocks. Create more free space in thin pool or use dm.min_free_space option to change behavior", (dataTotal - dataUsed), minFreeData) - } - - minFreeMetadata := (metadataTotal * uint64(devices.minFreeSpacePercent)) / 100 - if minFreeMetadata < 1 { - minFreeMetadata = 1 - } - - metadataFree := metadataTotal - metadataUsed - if metadataFree < minFreeMetadata { - return fmt.Errorf("devmapper: Thin Pool has %v free metadata blocks which is less than minimum required %v free metadata blocks. Create more free metadata space in thin pool or use dm.min_free_space option to change behavior", (metadataTotal - metadataUsed), minFreeMetadata) - } - - return nil -} - -func (devices *DeviceSet) createRegisterDevice(hash string) (*devInfo, error) { - devices.Lock() - defer devices.Unlock() - - deviceID, err := devices.getNextFreeDeviceID() - if err != nil { - return nil, err - } - - if err := devices.openTransaction(hash, deviceID); err != nil { - logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - - for { - if err := devicemapper.CreateDevice(devices.getPoolDevName(), deviceID); err != nil { - if devicemapper.DeviceIDExists(err) { - // Device ID already exists. This should not - // happen. Now we have a mechanism to find - // a free device ID. So something is not right. - // Give a warning and continue. - logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) - deviceID, err = devices.getNextFreeDeviceID() - if err != nil { - return nil, err - } - // Save new device id into transaction - devices.refreshTransaction(deviceID) - continue - } - logrus.Debugf("devmapper: Error creating device: %s", err) - devices.markDeviceIDFree(deviceID) - return nil, err - } - break - } - - logrus.Debugf("devmapper: Registering device (id %v) with FS size %v", deviceID, devices.baseFsSize) - info, err := devices.registerDevice(deviceID, hash, devices.baseFsSize, devices.OpenTransactionID) - if err != nil { - _ = devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - - if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(deviceID, hash) - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return nil, err - } - return info, nil -} - -func (devices *DeviceSet) createRegisterSnapDevice(hash string, baseInfo *devInfo, size uint64) error { - if err := devices.poolHasFreeSpace(); err != nil { - return err - } - - deviceID, err := devices.getNextFreeDeviceID() - if err != nil { - return err - } - - if err := devices.openTransaction(hash, deviceID); err != nil { - logrus.Debugf("devmapper: Error opening transaction hash = %s deviceID = %d", hash, deviceID) - devices.markDeviceIDFree(deviceID) - return err - } - - for { - if err := devicemapper.CreateSnapDevice(devices.getPoolDevName(), deviceID, baseInfo.Name(), baseInfo.DeviceID); err != nil { - if devicemapper.DeviceIDExists(err) { - // Device ID already exists. This should not - // happen. Now we have a mechanism to find - // a free device ID. So something is not right. - // Give a warning and continue. - logrus.Errorf("devmapper: Device ID %d exists in pool but it is supposed to be unused", deviceID) - deviceID, err = devices.getNextFreeDeviceID() - if err != nil { - return err - } - // Save new device id into transaction - devices.refreshTransaction(deviceID) - continue - } - logrus.Debugf("devmapper: Error creating snap device: %s", err) - devices.markDeviceIDFree(deviceID) - return err - } - break - } - - if _, err := devices.registerDevice(deviceID, hash, size, devices.OpenTransactionID); err != nil { - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - logrus.Debugf("devmapper: Error registering device: %s", err) - return err - } - - if err := devices.closeTransaction(); err != nil { - devices.unregisterDevice(deviceID, hash) - devicemapper.DeleteDevice(devices.getPoolDevName(), deviceID) - devices.markDeviceIDFree(deviceID) - return err - } - return nil -} - -func (devices *DeviceSet) loadMetadata(hash string) *devInfo { - info := &devInfo{Hash: hash, devices: devices} - - jsonData, err := ioutil.ReadFile(devices.metadataFile(info)) - if err != nil { - return nil - } - - if err := json.Unmarshal(jsonData, &info); err != nil { - return nil - } - - if info.DeviceID > maxDeviceID { - logrus.Errorf("devmapper: Ignoring Invalid DeviceId=%d", info.DeviceID) - return nil - } - - return info -} - -func getDeviceUUID(device string) (string, error) { - out, err := exec.Command("blkid", "-s", "UUID", "-o", "value", device).Output() - if err != nil { - return "", fmt.Errorf("devmapper: Failed to find uuid for device %s:%v", device, err) - } - - uuid := strings.TrimSuffix(string(out), "\n") - uuid = strings.TrimSpace(uuid) - logrus.Debugf("devmapper: UUID for device: %s is:%s", device, uuid) - return uuid, nil -} - -func (devices *DeviceSet) getBaseDeviceSize() uint64 { - info, _ := devices.lookupDevice("") - if info == nil { - return 0 - } - return info.Size -} - -func (devices *DeviceSet) getBaseDeviceFS() string { - return devices.BaseDeviceFilesystem -} - -func (devices *DeviceSet) verifyBaseDeviceUUIDFS(baseInfo *devInfo) error { - devices.Lock() - defer devices.Unlock() - - if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { - return err - } - defer devices.deactivateDevice(baseInfo) - - uuid, err := getDeviceUUID(baseInfo.DevName()) - if err != nil { - return err - } - - if devices.BaseDeviceUUID != uuid { - return fmt.Errorf("devmapper: Current Base Device UUID:%s does not match with stored UUID:%s. Possibly using a different thin pool than last invocation", uuid, devices.BaseDeviceUUID) - } - - if devices.BaseDeviceFilesystem == "" { - fsType, err := ProbeFsType(baseInfo.DevName()) - if err != nil { - return err - } - if err := devices.saveBaseDeviceFilesystem(fsType); err != nil { - return err - } - } - - // If user specified a filesystem using dm.fs option and current - // file system of base image is not same, warn user that dm.fs - // will be ignored. - if devices.BaseDeviceFilesystem != devices.filesystem { - logrus.Warnf("devmapper: Base device already exists and has filesystem %s on it. User specified filesystem %s will be ignored.", devices.BaseDeviceFilesystem, devices.filesystem) - devices.filesystem = devices.BaseDeviceFilesystem - } - return nil -} - -func (devices *DeviceSet) saveBaseDeviceFilesystem(fs string) error { - devices.BaseDeviceFilesystem = fs - return devices.saveDeviceSetMetaData() -} - -func (devices *DeviceSet) saveBaseDeviceUUID(baseInfo *devInfo) error { - devices.Lock() - defer devices.Unlock() - - if err := devices.activateDeviceIfNeeded(baseInfo, false); err != nil { - return err - } - defer devices.deactivateDevice(baseInfo) - - uuid, err := getDeviceUUID(baseInfo.DevName()) - if err != nil { - return err - } - - devices.BaseDeviceUUID = uuid - return devices.saveDeviceSetMetaData() -} - -func (devices *DeviceSet) createBaseImage() error { - logrus.Debug("devmapper: Initializing base device-mapper thin volume") - - // Create initial device - info, err := devices.createRegisterDevice("") - if err != nil { - return err - } - - logrus.Debug("devmapper: Creating filesystem on base device-mapper thin volume") - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return err - } - - if err := devices.createFilesystem(info); err != nil { - return err - } - - info.Initialized = true - if err := devices.saveMetadata(info); err != nil { - info.Initialized = false - return err - } - - if err := devices.saveBaseDeviceUUID(info); err != nil { - return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) - } - - return nil -} - -// Returns if thin pool device exists or not. If device exists, also makes -// sure it is a thin pool device and not some other type of device. -func (devices *DeviceSet) thinPoolExists(thinPoolDevice string) (bool, error) { - logrus.Debugf("devmapper: Checking for existence of the pool %s", thinPoolDevice) - - info, err := devicemapper.GetInfo(thinPoolDevice) - if err != nil { - return false, fmt.Errorf("devmapper: GetInfo() on device %s failed: %v", thinPoolDevice, err) - } - - // Device does not exist. - if info.Exists == 0 { - return false, nil - } - - _, _, deviceType, _, err := devicemapper.GetStatus(thinPoolDevice) - if err != nil { - return false, fmt.Errorf("devmapper: GetStatus() on device %s failed: %v", thinPoolDevice, err) - } - - if deviceType != "thin-pool" { - return false, fmt.Errorf("devmapper: Device %s is not a thin pool", thinPoolDevice) - } - - return true, nil -} - -func (devices *DeviceSet) checkThinPool() error { - _, transactionID, dataUsed, _, _, _, err := devices.poolStatus() - if err != nil { - return err - } - if dataUsed != 0 { - return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) that already has used data blocks", - devices.thinPoolDevice) - } - if transactionID != 0 { - return fmt.Errorf("devmapper: Unable to take ownership of thin-pool (%s) with non-zero transaction ID", - devices.thinPoolDevice) - } - return nil -} - -// Base image is initialized properly. Either save UUID for first time (for -// upgrade case or verify UUID. -func (devices *DeviceSet) setupVerifyBaseImageUUIDFS(baseInfo *devInfo) error { - // If BaseDeviceUUID is nil (upgrade case), save it and return success. - if devices.BaseDeviceUUID == "" { - if err := devices.saveBaseDeviceUUID(baseInfo); err != nil { - return fmt.Errorf("devmapper: Could not query and save base device UUID:%v", err) - } - return nil - } - - if err := devices.verifyBaseDeviceUUIDFS(baseInfo); err != nil { - return fmt.Errorf("devmapper: Base Device UUID and Filesystem verification failed: %v", err) - } - - return nil -} - -func (devices *DeviceSet) checkGrowBaseDeviceFS(info *devInfo) error { - - if !userBaseSize { - return nil - } - - if devices.baseFsSize < devices.getBaseDeviceSize() { - return fmt.Errorf("devmapper: Base device size cannot be smaller than %s", units.HumanSize(float64(devices.getBaseDeviceSize()))) - } - - if devices.baseFsSize == devices.getBaseDeviceSize() { - return nil - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - info.Size = devices.baseFsSize - - if err := devices.saveMetadata(info); err != nil { - // Try to remove unused device - delete(devices.Devices, info.Hash) - return err - } - - return devices.growFS(info) -} - -func (devices *DeviceSet) growFS(info *devInfo) error { - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return fmt.Errorf("Error activating devmapper device: %s", err) - } - - defer devices.deactivateDevice(info) - - fsMountPoint := "/run/docker/mnt" - if _, err := os.Stat(fsMountPoint); os.IsNotExist(err) { - if err := os.MkdirAll(fsMountPoint, 0700); err != nil { - return err - } - defer os.RemoveAll(fsMountPoint) - } - - options := "" - if devices.BaseDeviceFilesystem == "xfs" { - // XFS needs nouuid or it can't mount filesystems with the same fs - options = joinMountOptions(options, "nouuid") - } - options = joinMountOptions(options, devices.mountOptions) - - if err := mount.Mount(info.DevName(), fsMountPoint, devices.BaseDeviceFilesystem, options); err != nil { - return fmt.Errorf("Error mounting '%s' on '%s': %s", info.DevName(), fsMountPoint, err) - } - - defer syscall.Unmount(fsMountPoint, syscall.MNT_DETACH) - - switch devices.BaseDeviceFilesystem { - case "ext4": - if out, err := exec.Command("resize2fs", info.DevName()).CombinedOutput(); err != nil { - return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) - } - case "xfs": - if out, err := exec.Command("xfs_growfs", info.DevName()).CombinedOutput(); err != nil { - return fmt.Errorf("Failed to grow rootfs:%v:%s", err, string(out)) - } - default: - return fmt.Errorf("Unsupported filesystem type %s", devices.BaseDeviceFilesystem) - } - return nil -} - -func (devices *DeviceSet) setupBaseImage() error { - oldInfo, _ := devices.lookupDeviceWithLock("") - - // base image already exists. If it is initialized properly, do UUID - // verification and return. Otherwise remove image and set it up - // fresh. - - if oldInfo != nil { - if oldInfo.Initialized && !oldInfo.Deleted { - if err := devices.setupVerifyBaseImageUUIDFS(oldInfo); err != nil { - return err - } - - if err := devices.checkGrowBaseDeviceFS(oldInfo); err != nil { - return err - } - - return nil - } - - logrus.Debug("devmapper: Removing uninitialized base image") - // If previous base device is in deferred delete state, - // that needs to be cleaned up first. So don't try - // deferred deletion. - if err := devices.DeleteDevice("", true); err != nil { - return err - } - } - - // If we are setting up base image for the first time, make sure - // thin pool is empty. - if devices.thinPoolDevice != "" && oldInfo == nil { - if err := devices.checkThinPool(); err != nil { - return err - } - } - - // Create new base image device - if err := devices.createBaseImage(); err != nil { - return err - } - - return nil -} - -func setCloseOnExec(name string) { - if fileInfos, _ := ioutil.ReadDir("/proc/self/fd"); fileInfos != nil { - for _, i := range fileInfos { - link, _ := os.Readlink(filepath.Join("/proc/self/fd", i.Name())) - if link == name { - fd, err := strconv.Atoi(i.Name()) - if err == nil { - syscall.CloseOnExec(fd) - } - } - } - } -} - -// DMLog implements logging using DevMapperLogger interface. -func (devices *DeviceSet) DMLog(level int, file string, line int, dmError int, message string) { - // By default libdm sends us all the messages including debug ones. - // We need to filter out messages here and figure out which one - // should be printed. - if level > logLevel { - return - } - - // FIXME(vbatts) push this back into ./pkg/devicemapper/ - if level <= devicemapper.LogLevelErr { - logrus.Errorf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - } else if level <= devicemapper.LogLevelInfo { - logrus.Infof("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - } else { - // FIXME(vbatts) push this back into ./pkg/devicemapper/ - logrus.Debugf("libdevmapper(%d): %s:%d (%d) %s", level, file, line, dmError, message) - } -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -// ResizePool increases the size of the pool. -func (devices *DeviceSet) ResizePool(size int64) error { - dirname := devices.loopbackDir() - datafilename := path.Join(dirname, "data") - if len(devices.dataDevice) > 0 { - datafilename = devices.dataDevice - } - metadatafilename := path.Join(dirname, "metadata") - if len(devices.metadataDevice) > 0 { - metadatafilename = devices.metadataDevice - } - - datafile, err := os.OpenFile(datafilename, os.O_RDWR, 0) - if datafile == nil { - return err - } - defer datafile.Close() - - fi, err := datafile.Stat() - if fi == nil { - return err - } - - if fi.Size() > size { - return fmt.Errorf("devmapper: Can't shrink file") - } - - dataloopback := loopback.FindLoopDeviceFor(datafile) - if dataloopback == nil { - return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", datafilename) - } - defer dataloopback.Close() - - metadatafile, err := os.OpenFile(metadatafilename, os.O_RDWR, 0) - if metadatafile == nil { - return err - } - defer metadatafile.Close() - - metadataloopback := loopback.FindLoopDeviceFor(metadatafile) - if metadataloopback == nil { - return fmt.Errorf("devmapper: Unable to find loopback mount for: %s", metadatafilename) - } - defer metadataloopback.Close() - - // Grow loopback file - if err := datafile.Truncate(size); err != nil { - return fmt.Errorf("devmapper: Unable to grow loopback file: %s", err) - } - - // Reload size for loopback device - if err := loopback.SetCapacity(dataloopback); err != nil { - return fmt.Errorf("Unable to update loopback capacity: %s", err) - } - - // Suspend the pool - if err := devicemapper.SuspendDevice(devices.getPoolName()); err != nil { - return fmt.Errorf("devmapper: Unable to suspend pool: %s", err) - } - - // Reload with the new block sizes - if err := devicemapper.ReloadPool(devices.getPoolName(), dataloopback, metadataloopback, devices.thinpBlockSize); err != nil { - return fmt.Errorf("devmapper: Unable to reload pool: %s", err) - } - - // Resume the pool - if err := devicemapper.ResumeDevice(devices.getPoolName()); err != nil { - return fmt.Errorf("devmapper: Unable to resume pool: %s", err) - } - - return nil -} - -func (devices *DeviceSet) loadTransactionMetaData() error { - jsonData, err := ioutil.ReadFile(devices.transactionMetaFile()) - if err != nil { - // There is no active transaction. This will be the case - // during upgrade. - if os.IsNotExist(err) { - devices.OpenTransactionID = devices.TransactionID - return nil - } - return err - } - - json.Unmarshal(jsonData, &devices.transaction) - return nil -} - -func (devices *DeviceSet) saveTransactionMetaData() error { - jsonData, err := json.Marshal(&devices.transaction) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - - return devices.writeMetaFile(jsonData, devices.transactionMetaFile()) -} - -func (devices *DeviceSet) removeTransactionMetaData() error { - if err := os.RemoveAll(devices.transactionMetaFile()); err != nil { - return err - } - return nil -} - -func (devices *DeviceSet) rollbackTransaction() error { - logrus.Debugf("devmapper: Rolling back open transaction: TransactionID=%d hash=%s device_id=%d", devices.OpenTransactionID, devices.DeviceIDHash, devices.DeviceID) - - // A device id might have already been deleted before transaction - // closed. In that case this call will fail. Just leave a message - // in case of failure. - if err := devicemapper.DeleteDevice(devices.getPoolDevName(), devices.DeviceID); err != nil { - logrus.Errorf("devmapper: Unable to delete device: %s", err) - } - - dinfo := &devInfo{Hash: devices.DeviceIDHash} - if err := devices.removeMetadata(dinfo); err != nil { - logrus.Errorf("devmapper: Unable to remove metadata: %s", err) - } else { - devices.markDeviceIDFree(devices.DeviceID) - } - - if err := devices.removeTransactionMetaData(); err != nil { - logrus.Errorf("devmapper: Unable to remove transaction meta file %s: %s", devices.transactionMetaFile(), err) - } - - return nil -} - -func (devices *DeviceSet) processPendingTransaction() error { - if err := devices.loadTransactionMetaData(); err != nil { - return err - } - - // If there was open transaction but pool transaction ID is same - // as open transaction ID, nothing to roll back. - if devices.TransactionID == devices.OpenTransactionID { - return nil - } - - // If open transaction ID is less than pool transaction ID, something - // is wrong. Bail out. - if devices.OpenTransactionID < devices.TransactionID { - logrus.Errorf("devmapper: Open Transaction id %d is less than pool transaction id %d", devices.OpenTransactionID, devices.TransactionID) - return nil - } - - // Pool transaction ID is not same as open transaction. There is - // a transaction which was not completed. - if err := devices.rollbackTransaction(); err != nil { - return fmt.Errorf("devmapper: Rolling back open transaction failed: %s", err) - } - - devices.OpenTransactionID = devices.TransactionID - return nil -} - -func (devices *DeviceSet) loadDeviceSetMetaData() error { - jsonData, err := ioutil.ReadFile(devices.deviceSetMetaFile()) - if err != nil { - // For backward compatibility return success if file does - // not exist. - if os.IsNotExist(err) { - return nil - } - return err - } - - return json.Unmarshal(jsonData, devices) -} - -func (devices *DeviceSet) saveDeviceSetMetaData() error { - jsonData, err := json.Marshal(devices) - if err != nil { - return fmt.Errorf("devmapper: Error encoding metadata to json: %s", err) - } - - return devices.writeMetaFile(jsonData, devices.deviceSetMetaFile()) -} - -func (devices *DeviceSet) openTransaction(hash string, DeviceID int) error { - devices.allocateTransactionID() - devices.DeviceIDHash = hash - devices.DeviceID = DeviceID - if err := devices.saveTransactionMetaData(); err != nil { - return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) - } - return nil -} - -func (devices *DeviceSet) refreshTransaction(DeviceID int) error { - devices.DeviceID = DeviceID - if err := devices.saveTransactionMetaData(); err != nil { - return fmt.Errorf("devmapper: Error saving transaction metadata: %s", err) - } - return nil -} - -func (devices *DeviceSet) closeTransaction() error { - if err := devices.updatePoolTransactionID(); err != nil { - logrus.Debug("devmapper: Failed to close Transaction") - return err - } - return nil -} - -func determineDriverCapabilities(version string) error { - /* - * Driver version 4.27.0 and greater support deferred activation - * feature. - */ - - logrus.Debugf("devicemapper: driver version is %s", version) - - versionSplit := strings.Split(version, ".") - major, err := strconv.Atoi(versionSplit[0]) - if err != nil { - return graphdriver.ErrNotSupported - } - - if major > 4 { - driverDeferredRemovalSupport = true - return nil - } - - if major < 4 { - return nil - } - - minor, err := strconv.Atoi(versionSplit[1]) - if err != nil { - return graphdriver.ErrNotSupported - } - - /* - * If major is 4 and minor is 27, then there is no need to - * check for patch level as it can not be less than 0. - */ - if minor >= 27 { - driverDeferredRemovalSupport = true - return nil - } - - return nil -} - -// Determine the major and minor number of loopback device -func getDeviceMajorMinor(file *os.File) (uint64, uint64, error) { - stat, err := file.Stat() - if err != nil { - return 0, 0, err - } - - dev := stat.Sys().(*syscall.Stat_t).Rdev - majorNum := major(dev) - minorNum := minor(dev) - - logrus.Debugf("devmapper: Major:Minor for device: %s is:%v:%v", file.Name(), majorNum, minorNum) - return majorNum, minorNum, nil -} - -// Given a file which is backing file of a loop back device, find the -// loopback device name and its major/minor number. -func getLoopFileDeviceMajMin(filename string) (string, uint64, uint64, error) { - file, err := os.Open(filename) - if err != nil { - logrus.Debugf("devmapper: Failed to open file %s", filename) - return "", 0, 0, err - } - - defer file.Close() - loopbackDevice := loopback.FindLoopDeviceFor(file) - if loopbackDevice == nil { - return "", 0, 0, fmt.Errorf("devmapper: Unable to find loopback mount for: %s", filename) - } - defer loopbackDevice.Close() - - Major, Minor, err := getDeviceMajorMinor(loopbackDevice) - if err != nil { - return "", 0, 0, err - } - return loopbackDevice.Name(), Major, Minor, nil -} - -// Get the major/minor numbers of thin pool data and metadata devices -func (devices *DeviceSet) getThinPoolDataMetaMajMin() (uint64, uint64, uint64, uint64, error) { - var params, poolDataMajMin, poolMetadataMajMin string - - _, _, _, params, err := devicemapper.GetTable(devices.getPoolName()) - if err != nil { - return 0, 0, 0, 0, err - } - - if _, err = fmt.Sscanf(params, "%s %s", &poolMetadataMajMin, &poolDataMajMin); err != nil { - return 0, 0, 0, 0, err - } - - logrus.Debugf("devmapper: poolDataMajMin=%s poolMetaMajMin=%s\n", poolDataMajMin, poolMetadataMajMin) - - poolDataMajMinorSplit := strings.Split(poolDataMajMin, ":") - poolDataMajor, err := strconv.ParseUint(poolDataMajMinorSplit[0], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolDataMinor, err := strconv.ParseUint(poolDataMajMinorSplit[1], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolMetadataMajMinorSplit := strings.Split(poolMetadataMajMin, ":") - poolMetadataMajor, err := strconv.ParseUint(poolMetadataMajMinorSplit[0], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - poolMetadataMinor, err := strconv.ParseUint(poolMetadataMajMinorSplit[1], 10, 32) - if err != nil { - return 0, 0, 0, 0, err - } - - return poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, nil -} - -func (devices *DeviceSet) loadThinPoolLoopBackInfo() error { - poolDataMajor, poolDataMinor, poolMetadataMajor, poolMetadataMinor, err := devices.getThinPoolDataMetaMajMin() - if err != nil { - return err - } - - dirname := devices.loopbackDir() - - // data device has not been passed in. So there should be a data file - // which is being mounted as loop device. - if devices.dataDevice == "" { - datafilename := path.Join(dirname, "data") - dataLoopDevice, dataMajor, dataMinor, err := getLoopFileDeviceMajMin(datafilename) - if err != nil { - return err - } - - // Compare the two - if poolDataMajor == dataMajor && poolDataMinor == dataMinor { - devices.dataDevice = dataLoopDevice - devices.dataLoopFile = datafilename - } - - } - - // metadata device has not been passed in. So there should be a - // metadata file which is being mounted as loop device. - if devices.metadataDevice == "" { - metadatafilename := path.Join(dirname, "metadata") - metadataLoopDevice, metadataMajor, metadataMinor, err := getLoopFileDeviceMajMin(metadatafilename) - if err != nil { - return err - } - if poolMetadataMajor == metadataMajor && poolMetadataMinor == metadataMinor { - devices.metadataDevice = metadataLoopDevice - devices.metadataLoopFile = metadatafilename - } - } - - return nil -} - -func (devices *DeviceSet) enableDeferredRemovalDeletion() error { - - // If user asked for deferred removal then check both libdm library - // and kernel driver support deferred removal otherwise error out. - if enableDeferredRemoval { - if !driverDeferredRemovalSupport { - return fmt.Errorf("devmapper: Deferred removal can not be enabled as kernel does not support it") - } - if !devicemapper.LibraryDeferredRemovalSupport { - return fmt.Errorf("devmapper: Deferred removal can not be enabled as libdm does not support it") - } - logrus.Debug("devmapper: Deferred removal support enabled.") - devices.deferredRemove = true - } - - if enableDeferredDeletion { - if !devices.deferredRemove { - return fmt.Errorf("devmapper: Deferred deletion can not be enabled as deferred removal is not enabled. Enable deferred removal using --storage-opt dm.use_deferred_removal=true parameter") - } - logrus.Debug("devmapper: Deferred deletion support enabled.") - devices.deferredDelete = true - } - return nil -} - -func (devices *DeviceSet) initDevmapper(doInit bool) error { - // give ourselves to libdm as a log handler - devicemapper.LogInit(devices) - - version, err := devicemapper.GetDriverVersion() - if err != nil { - // Can't even get driver version, assume not supported - return graphdriver.ErrNotSupported - } - - if err := determineDriverCapabilities(version); err != nil { - return graphdriver.ErrNotSupported - } - - if err := devices.enableDeferredRemovalDeletion(); err != nil { - return err - } - - // https://github.com/docker/docker/issues/4036 - if supported := devicemapper.UdevSetSyncSupport(true); !supported { - if dockerversion.IAmStatic == "true" { - logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a dynamic binary to use devicemapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") - } else { - logrus.Errorf("devmapper: Udev sync is not supported. This will lead to data loss and unexpected behavior. Install a more recent version of libdevmapper or select a different storage driver. For more information, see https://docs.docker.com/engine/reference/commandline/daemon/#daemon-storage-driver-option") - } - - if !devices.overrideUdevSyncCheck { - return graphdriver.ErrNotSupported - } - } - - //create the root dir of the devmapper driver ownership to match this - //daemon's remapped root uid/gid so containers can start properly - uid, gid, err := idtools.GetRootUIDGID(devices.uidMaps, devices.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAs(devices.root, 0700, uid, gid); err != nil && !os.IsExist(err) { - return err - } - if err := os.MkdirAll(devices.metadataDir(), 0700); err != nil && !os.IsExist(err) { - return err - } - - // Set the device prefix from the device id and inode of the docker root dir - - st, err := os.Stat(devices.root) - if err != nil { - return fmt.Errorf("devmapper: Error looking up dir %s: %s", devices.root, err) - } - sysSt := st.Sys().(*syscall.Stat_t) - // "reg-" stands for "regular file". - // In the future we might use "dev-" for "device file", etc. - // docker-maj,min[-inode] stands for: - // - Managed by docker - // - The target of this device is at major and minor - // - If is defined, use that file inside the device as a loopback image. Otherwise use the device itself. - devices.devicePrefix = fmt.Sprintf("docker-%d:%d-%d", major(sysSt.Dev), minor(sysSt.Dev), sysSt.Ino) - logrus.Debugf("devmapper: Generated prefix: %s", devices.devicePrefix) - - // Check for the existence of the thin-pool device - poolExists, err := devices.thinPoolExists(devices.getPoolName()) - if err != nil { - return err - } - - // It seems libdevmapper opens this without O_CLOEXEC, and go exec will not close files - // that are not Close-on-exec, - // so we add this badhack to make sure it closes itself - setCloseOnExec("/dev/mapper/control") - - // Make sure the sparse images exist in /devicemapper/data and - // /devicemapper/metadata - - createdLoopback := false - - // If the pool doesn't exist, create it - if !poolExists && devices.thinPoolDevice == "" { - logrus.Debug("devmapper: Pool doesn't exist. Creating it.") - - var ( - dataFile *os.File - metadataFile *os.File - ) - - if devices.dataDevice == "" { - // Make sure the sparse images exist in /devicemapper/data - - hasData := devices.hasImage("data") - - if !doInit && !hasData { - return errors.New("Loopback data file not found") - } - - if !hasData { - createdLoopback = true - } - - data, err := devices.ensureImage("data", devices.dataLoopbackSize) - if err != nil { - logrus.Debugf("devmapper: Error device ensureImage (data): %s", err) - return err - } - - dataFile, err = loopback.AttachLoopDevice(data) - if err != nil { - return err - } - devices.dataLoopFile = data - devices.dataDevice = dataFile.Name() - } else { - dataFile, err = os.OpenFile(devices.dataDevice, os.O_RDWR, 0600) - if err != nil { - return err - } - } - defer dataFile.Close() - - if devices.metadataDevice == "" { - // Make sure the sparse images exist in /devicemapper/metadata - - hasMetadata := devices.hasImage("metadata") - - if !doInit && !hasMetadata { - return errors.New("Loopback metadata file not found") - } - - if !hasMetadata { - createdLoopback = true - } - - metadata, err := devices.ensureImage("metadata", devices.metaDataLoopbackSize) - if err != nil { - logrus.Debugf("devmapper: Error device ensureImage (metadata): %s", err) - return err - } - - metadataFile, err = loopback.AttachLoopDevice(metadata) - if err != nil { - return err - } - devices.metadataLoopFile = metadata - devices.metadataDevice = metadataFile.Name() - } else { - metadataFile, err = os.OpenFile(devices.metadataDevice, os.O_RDWR, 0600) - if err != nil { - return err - } - } - defer metadataFile.Close() - - if err := devicemapper.CreatePool(devices.getPoolName(), dataFile, metadataFile, devices.thinpBlockSize); err != nil { - return err - } - } - - // Pool already exists and caller did not pass us a pool. That means - // we probably created pool earlier and could not remove it as some - // containers were still using it. Detect some of the properties of - // pool, like is it using loop devices. - if poolExists && devices.thinPoolDevice == "" { - if err := devices.loadThinPoolLoopBackInfo(); err != nil { - logrus.Debugf("devmapper: Failed to load thin pool loopback device information:%v", err) - return err - } - } - - // If we didn't just create the data or metadata image, we need to - // load the transaction id and migrate old metadata - if !createdLoopback { - if err := devices.initMetaData(); err != nil { - return err - } - } - - if devices.thinPoolDevice == "" { - if devices.metadataLoopFile != "" || devices.dataLoopFile != "" { - logrus.Warn("devmapper: Usage of loopback devices is strongly discouraged for production use. Please use `--storage-opt dm.thinpooldev` or use `man docker` to refer to dm.thinpooldev section.") - } - } - - // Right now this loads only NextDeviceID. If there is more metadata - // down the line, we might have to move it earlier. - if err := devices.loadDeviceSetMetaData(); err != nil { - return err - } - - // Setup the base image - if doInit { - if err := devices.setupBaseImage(); err != nil { - logrus.Debugf("devmapper: Error device setupBaseImage: %s", err) - return err - } - } - - return nil -} - -// AddDevice adds a device and registers in the hash. -func (devices *DeviceSet) AddDevice(hash, baseHash string, storageOpt map[string]string) error { - logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s)", hash, baseHash) - defer logrus.Debugf("devmapper: AddDevice(hash=%s basehash=%s) END", hash, baseHash) - - // If a deleted device exists, return error. - baseInfo, err := devices.lookupDeviceWithLock(baseHash) - if err != nil { - return err - } - - if baseInfo.Deleted { - return fmt.Errorf("devmapper: Base device %v has been marked for deferred deletion", baseInfo.Hash) - } - - baseInfo.lock.Lock() - defer baseInfo.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - // Also include deleted devices in case hash of new device is - // same as one of the deleted devices. - if info, _ := devices.lookupDevice(hash); info != nil { - return fmt.Errorf("devmapper: device %s already exists. Deleted=%v", hash, info.Deleted) - } - - size, err := devices.parseStorageOpt(storageOpt) - if err != nil { - return err - } - - if size == 0 { - size = baseInfo.Size - } - - if size < baseInfo.Size { - return fmt.Errorf("devmapper: Container size cannot be smaller than %s", units.HumanSize(float64(baseInfo.Size))) - } - - if err := devices.createRegisterSnapDevice(hash, baseInfo, size); err != nil { - return err - } - - // Grow the container rootfs. - if size > baseInfo.Size { - info, err := devices.lookupDevice(hash) - if err != nil { - return err - } - - if err := devices.growFS(info); err != nil { - return err - } - } - - return nil -} - -func (devices *DeviceSet) parseStorageOpt(storageOpt map[string]string) (uint64, error) { - - // Read size to change the block device size per container. - for key, val := range storageOpt { - key := strings.ToLower(key) - switch key { - case "size": - size, err := units.RAMInBytes(val) - if err != nil { - return 0, err - } - return uint64(size), nil - default: - return 0, fmt.Errorf("Unknown option %s", key) - } - } - - return 0, nil -} - -func (devices *DeviceSet) markForDeferredDeletion(info *devInfo) error { - // If device is already in deleted state, there is nothing to be done. - if info.Deleted { - return nil - } - - logrus.Debugf("devmapper: Marking device %s for deferred deletion.", info.Hash) - - info.Deleted = true - - // save device metadata to reflect deleted state. - if err := devices.saveMetadata(info); err != nil { - info.Deleted = false - return err - } - - devices.nrDeletedDevices++ - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) deleteTransaction(info *devInfo, syncDelete bool) error { - if err := devices.openTransaction(info.Hash, info.DeviceID); err != nil { - logrus.Debugf("devmapper: Error opening transaction hash = %s deviceId = %d", "", info.DeviceID) - return err - } - - defer devices.closeTransaction() - - err := devicemapper.DeleteDevice(devices.getPoolDevName(), info.DeviceID) - if err != nil { - // If syncDelete is true, we want to return error. If deferred - // deletion is not enabled, we return an error. If error is - // something other then EBUSY, return an error. - if syncDelete || !devices.deferredDelete || err != devicemapper.ErrBusy { - logrus.Debugf("devmapper: Error deleting device: %s", err) - return err - } - } - - if err == nil { - if err := devices.unregisterDevice(info.DeviceID, info.Hash); err != nil { - return err - } - // If device was already in deferred delete state that means - // deletion was being tried again later. Reduce the deleted - // device count. - if info.Deleted { - devices.nrDeletedDevices-- - } - devices.markDeviceIDFree(info.DeviceID) - } else { - if err := devices.markForDeferredDeletion(info); err != nil { - return err - } - } - - return nil -} - -// Issue discard only if device open count is zero. -func (devices *DeviceSet) issueDiscard(info *devInfo) error { - logrus.Debugf("devmapper: issueDiscard(device: %s). START", info.Hash) - defer logrus.Debugf("devmapper: issueDiscard(device: %s). END", info.Hash) - // This is a workaround for the kernel not discarding block so - // on the thin pool when we remove a thinp device, so we do it - // manually. - // Even if device is deferred deleted, activate it and issue - // discards. - if err := devices.activateDeviceIfNeeded(info, true); err != nil { - return err - } - - devinfo, err := devicemapper.GetInfo(info.Name()) - if err != nil { - return err - } - - if devinfo.OpenCount != 0 { - logrus.Debugf("devmapper: Device: %s is in use. OpenCount=%d. Not issuing discards.", info.Hash, devinfo.OpenCount) - return nil - } - - if err := devicemapper.BlockDeviceDiscard(info.DevName()); err != nil { - logrus.Debugf("devmapper: Error discarding block on device: %s (ignoring)", err) - } - return nil -} - -// Should be called with devices.Lock() held. -func (devices *DeviceSet) deleteDevice(info *devInfo, syncDelete bool) error { - if devices.doBlkDiscard { - devices.issueDiscard(info) - } - - // Try to deactivate device in case it is active. - if err := devices.deactivateDevice(info); err != nil { - logrus.Debugf("devmapper: Error deactivating device: %s", err) - return err - } - - if err := devices.deleteTransaction(info, syncDelete); err != nil { - return err - } - - return nil -} - -// DeleteDevice will return success if device has been marked for deferred -// removal. If one wants to override that and want DeleteDevice() to fail if -// device was busy and could not be deleted, set syncDelete=true. -func (devices *DeviceSet) DeleteDevice(hash string, syncDelete bool) error { - logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) START", hash, syncDelete) - defer logrus.Debugf("devmapper: DeleteDevice(hash=%v syncDelete=%v) END", hash, syncDelete) - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - return devices.deleteDevice(info, syncDelete) -} - -func (devices *DeviceSet) deactivatePool() error { - logrus.Debug("devmapper: deactivatePool()") - defer logrus.Debug("devmapper: deactivatePool END") - devname := devices.getPoolDevName() - - devinfo, err := devicemapper.GetInfo(devname) - if err != nil { - return err - } - - if devinfo.Exists == 0 { - return nil - } - if err := devicemapper.RemoveDevice(devname); err != nil { - return err - } - - if d, err := devicemapper.GetDeps(devname); err == nil { - logrus.Warnf("devmapper: device %s still has %d active dependents", devname, d.Count) - } - - return nil -} - -func (devices *DeviceSet) deactivateDevice(info *devInfo) error { - logrus.Debugf("devmapper: deactivateDevice(%s)", info.Hash) - defer logrus.Debugf("devmapper: deactivateDevice END(%s)", info.Hash) - - devinfo, err := devicemapper.GetInfo(info.Name()) - if err != nil { - return err - } - - if devinfo.Exists == 0 { - return nil - } - - if devices.deferredRemove { - if err := devicemapper.RemoveDeviceDeferred(info.Name()); err != nil { - return err - } - } else { - if err := devices.removeDevice(info.Name()); err != nil { - return err - } - } - return nil -} - -// Issues the underlying dm remove operation. -func (devices *DeviceSet) removeDevice(devname string) error { - var err error - - logrus.Debugf("devmapper: removeDevice START(%s)", devname) - defer logrus.Debugf("devmapper: removeDevice END(%s)", devname) - - for i := 0; i < 200; i++ { - err = devicemapper.RemoveDevice(devname) - if err == nil { - break - } - if err != devicemapper.ErrBusy { - return err - } - - // If we see EBUSY it may be a transient error, - // sleep a bit a retry a few times. - devices.Unlock() - time.Sleep(100 * time.Millisecond) - devices.Lock() - } - - return err -} - -func (devices *DeviceSet) cancelDeferredRemoval(info *devInfo) error { - if !devices.deferredRemove { - return nil - } - - logrus.Debugf("devmapper: cancelDeferredRemoval START(%s)", info.Name()) - defer logrus.Debugf("devmapper: cancelDeferredRemoval END(%s)", info.Name()) - - devinfo, err := devicemapper.GetInfoWithDeferred(info.Name()) - - if devinfo != nil && devinfo.DeferredRemove == 0 { - return nil - } - - // Cancel deferred remove - for i := 0; i < 100; i++ { - err = devicemapper.CancelDeferredRemove(info.Name()) - if err == nil { - break - } - - if err == devicemapper.ErrEnxio { - // Device is probably already gone. Return success. - return nil - } - - if err != devicemapper.ErrBusy { - return err - } - - // If we see EBUSY it may be a transient error, - // sleep a bit a retry a few times. - devices.Unlock() - time.Sleep(100 * time.Millisecond) - devices.Lock() - } - return err -} - -// Shutdown shuts down the device by unmounting the root. -func (devices *DeviceSet) Shutdown(home string) error { - logrus.Debugf("devmapper: [deviceset %s] Shutdown()", devices.devicePrefix) - logrus.Debugf("devmapper: Shutting down DeviceSet: %s", devices.root) - defer logrus.Debugf("devmapper: [deviceset %s] Shutdown() END", devices.devicePrefix) - - // Stop deletion worker. This should start delivering new events to - // ticker channel. That means no new instance of cleanupDeletedDevice() - // will run after this call. If one instance is already running at - // the time of the call, it must be holding devices.Lock() and - // we will block on this lock till cleanup function exits. - devices.deletionWorkerTicker.Stop() - - devices.Lock() - // Save DeviceSet Metadata first. Docker kills all threads if they - // don't finish in certain time. It is possible that Shutdown() - // routine does not finish in time as we loop trying to deactivate - // some devices while these are busy. In that case shutdown() routine - // will be killed and we will not get a chance to save deviceset - // metadata. Hence save this early before trying to deactivate devices. - devices.saveDeviceSetMetaData() - - // ignore the error since it's just a best effort to not try to unmount something that's mounted - mounts, _ := mount.GetMounts() - mounted := make(map[string]bool, len(mounts)) - for _, mnt := range mounts { - mounted[mnt.Mountpoint] = true - } - - if err := filepath.Walk(path.Join(home, "mnt"), func(p string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if !info.IsDir() { - return nil - } - - if mounted[p] { - // We use MNT_DETACH here in case it is still busy in some running - // container. This means it'll go away from the global scope directly, - // and the device will be released when that container dies. - if err := syscall.Unmount(p, syscall.MNT_DETACH); err != nil { - logrus.Debugf("devmapper: Shutdown unmounting %s, error: %s", p, err) - } - } - - if devInfo, err := devices.lookupDevice(path.Base(p)); err != nil { - logrus.Debugf("devmapper: Shutdown lookup device %s, error: %s", path.Base(p), err) - } else { - if err := devices.deactivateDevice(devInfo); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate %s , error: %s", devInfo.Hash, err) - } - } - - return nil - }); err != nil && !os.IsNotExist(err) { - devices.Unlock() - return err - } - - devices.Unlock() - - info, _ := devices.lookupDeviceWithLock("") - if info != nil { - info.lock.Lock() - devices.Lock() - if err := devices.deactivateDevice(info); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate base , error: %s", err) - } - devices.Unlock() - info.lock.Unlock() - } - - devices.Lock() - if devices.thinPoolDevice == "" { - if err := devices.deactivatePool(); err != nil { - logrus.Debugf("devmapper: Shutdown deactivate pool , error: %s", err) - } - } - devices.Unlock() - - return nil -} - -// MountDevice mounts the device if not already mounted. -func (devices *DeviceSet) MountDevice(hash, path, mountLabel string) error { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - if info.Deleted { - return fmt.Errorf("devmapper: Can't mount device %v as it has been marked for deferred deletion", info.Hash) - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) - } - - fstype, err := ProbeFsType(info.DevName()) - if err != nil { - return err - } - - options := "" - - if fstype == "xfs" { - // XFS needs nouuid or it can't mount filesystems with the same fs - options = joinMountOptions(options, "nouuid") - } - - options = joinMountOptions(options, devices.mountOptions) - options = joinMountOptions(options, label.FormatMountLabel("", mountLabel)) - - if err := mount.Mount(info.DevName(), path, fstype, options); err != nil { - return fmt.Errorf("devmapper: Error mounting '%s' on '%s': %s", info.DevName(), path, err) - } - - return nil -} - -// UnmountDevice unmounts the device and removes it from hash. -func (devices *DeviceSet) UnmountDevice(hash, mountPath string) error { - logrus.Debugf("devmapper: UnmountDevice(hash=%s)", hash) - defer logrus.Debugf("devmapper: UnmountDevice(hash=%s) END", hash) - - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - logrus.Debugf("devmapper: Unmount(%s)", mountPath) - if err := syscall.Unmount(mountPath, syscall.MNT_DETACH); err != nil { - return err - } - logrus.Debug("devmapper: Unmount done") - - if err := devices.deactivateDevice(info); err != nil { - return err - } - - return nil -} - -// HasDevice returns true if the device metadata exists. -func (devices *DeviceSet) HasDevice(hash string) bool { - info, _ := devices.lookupDeviceWithLock(hash) - return info != nil -} - -// List returns a list of device ids. -func (devices *DeviceSet) List() []string { - devices.Lock() - defer devices.Unlock() - - ids := make([]string, len(devices.Devices)) - i := 0 - for k := range devices.Devices { - ids[i] = k - i++ - } - return ids -} - -func (devices *DeviceSet) deviceStatus(devName string) (sizeInSectors, mappedSectors, highestMappedSector uint64, err error) { - var params string - _, sizeInSectors, _, params, err = devicemapper.GetStatus(devName) - if err != nil { - return - } - if _, err = fmt.Sscanf(params, "%d %d", &mappedSectors, &highestMappedSector); err == nil { - return - } - return -} - -// GetDeviceStatus provides size, mapped sectors -func (devices *DeviceSet) GetDeviceStatus(hash string) (*DevStatus, error) { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return nil, err - } - - info.lock.Lock() - defer info.lock.Unlock() - - devices.Lock() - defer devices.Unlock() - - status := &DevStatus{ - DeviceID: info.DeviceID, - Size: info.Size, - TransactionID: info.TransactionID, - } - - if err := devices.activateDeviceIfNeeded(info, false); err != nil { - return nil, fmt.Errorf("devmapper: Error activating devmapper device for '%s': %s", hash, err) - } - - sizeInSectors, mappedSectors, highestMappedSector, err := devices.deviceStatus(info.DevName()) - - if err != nil { - return nil, err - } - - status.SizeInSectors = sizeInSectors - status.MappedSectors = mappedSectors - status.HighestMappedSector = highestMappedSector - - return status, nil -} - -func (devices *DeviceSet) poolStatus() (totalSizeInSectors, transactionID, dataUsed, dataTotal, metadataUsed, metadataTotal uint64, err error) { - var params string - if _, totalSizeInSectors, _, params, err = devicemapper.GetStatus(devices.getPoolName()); err == nil { - _, err = fmt.Sscanf(params, "%d %d/%d %d/%d", &transactionID, &metadataUsed, &metadataTotal, &dataUsed, &dataTotal) - } - return -} - -// DataDevicePath returns the path to the data storage for this deviceset, -// regardless of loopback or block device -func (devices *DeviceSet) DataDevicePath() string { - return devices.dataDevice -} - -// MetadataDevicePath returns the path to the metadata storage for this deviceset, -// regardless of loopback or block device -func (devices *DeviceSet) MetadataDevicePath() string { - return devices.metadataDevice -} - -func (devices *DeviceSet) getUnderlyingAvailableSpace(loopFile string) (uint64, error) { - buf := new(syscall.Statfs_t) - if err := syscall.Statfs(loopFile, buf); err != nil { - logrus.Warnf("devmapper: Couldn't stat loopfile filesystem %v: %v", loopFile, err) - return 0, err - } - return buf.Bfree * uint64(buf.Bsize), nil -} - -func (devices *DeviceSet) isRealFile(loopFile string) (bool, error) { - if loopFile != "" { - fi, err := os.Stat(loopFile) - if err != nil { - logrus.Warnf("devmapper: Couldn't stat loopfile %v: %v", loopFile, err) - return false, err - } - return fi.Mode().IsRegular(), nil - } - return false, nil -} - -// Status returns the current status of this deviceset -func (devices *DeviceSet) Status() *Status { - devices.Lock() - defer devices.Unlock() - - status := &Status{} - - status.PoolName = devices.getPoolName() - status.DataFile = devices.DataDevicePath() - status.DataLoopback = devices.dataLoopFile - status.MetadataFile = devices.MetadataDevicePath() - status.MetadataLoopback = devices.metadataLoopFile - status.UdevSyncSupported = devicemapper.UdevSyncSupported() - status.DeferredRemoveEnabled = devices.deferredRemove - status.DeferredDeleteEnabled = devices.deferredDelete - status.DeferredDeletedDeviceCount = devices.nrDeletedDevices - status.BaseDeviceSize = devices.getBaseDeviceSize() - status.BaseDeviceFS = devices.getBaseDeviceFS() - - totalSizeInSectors, _, dataUsed, dataTotal, metadataUsed, metadataTotal, err := devices.poolStatus() - if err == nil { - // Convert from blocks to bytes - blockSizeInSectors := totalSizeInSectors / dataTotal - - status.Data.Used = dataUsed * blockSizeInSectors * 512 - status.Data.Total = dataTotal * blockSizeInSectors * 512 - status.Data.Available = status.Data.Total - status.Data.Used - - // metadata blocks are always 4k - status.Metadata.Used = metadataUsed * 4096 - status.Metadata.Total = metadataTotal * 4096 - status.Metadata.Available = status.Metadata.Total - status.Metadata.Used - - status.SectorSize = blockSizeInSectors * 512 - - if check, _ := devices.isRealFile(devices.dataLoopFile); check { - actualSpace, err := devices.getUnderlyingAvailableSpace(devices.dataLoopFile) - if err == nil && actualSpace < status.Data.Available { - status.Data.Available = actualSpace - } - } - - if check, _ := devices.isRealFile(devices.metadataLoopFile); check { - actualSpace, err := devices.getUnderlyingAvailableSpace(devices.metadataLoopFile) - if err == nil && actualSpace < status.Metadata.Available { - status.Metadata.Available = actualSpace - } - } - - minFreeData := (dataTotal * uint64(devices.minFreeSpacePercent)) / 100 - status.MinFreeSpace = minFreeData * blockSizeInSectors * 512 - } - - return status -} - -// Status returns the current status of this deviceset -func (devices *DeviceSet) exportDeviceMetadata(hash string) (*deviceMetadata, error) { - info, err := devices.lookupDeviceWithLock(hash) - if err != nil { - return nil, err - } - - info.lock.Lock() - defer info.lock.Unlock() - - metadata := &deviceMetadata{info.DeviceID, info.Size, info.Name()} - return metadata, nil -} - -// NewDeviceSet creates the device set based on the options provided. -func NewDeviceSet(root string, doInit bool, options []string, uidMaps, gidMaps []idtools.IDMap) (*DeviceSet, error) { - devicemapper.SetDevDir("/dev") - - devices := &DeviceSet{ - root: root, - metaData: metaData{Devices: make(map[string]*devInfo)}, - dataLoopbackSize: defaultDataLoopbackSize, - metaDataLoopbackSize: defaultMetaDataLoopbackSize, - baseFsSize: defaultBaseFsSize, - overrideUdevSyncCheck: defaultUdevSyncOverride, - doBlkDiscard: true, - thinpBlockSize: defaultThinpBlockSize, - deviceIDMap: make([]byte, deviceIDMapSz), - deletionWorkerTicker: time.NewTicker(time.Second * 30), - uidMaps: uidMaps, - gidMaps: gidMaps, - minFreeSpacePercent: defaultMinFreeSpacePercent, - } - - foundBlkDiscard := false - for _, option := range options { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return nil, err - } - key = strings.ToLower(key) - switch key { - case "dm.basesize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - userBaseSize = true - devices.baseFsSize = uint64(size) - case "dm.loopdatasize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - devices.dataLoopbackSize = size - case "dm.loopmetadatasize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - devices.metaDataLoopbackSize = size - case "dm.fs": - if val != "ext4" && val != "xfs" { - return nil, fmt.Errorf("devmapper: Unsupported filesystem %s\n", val) - } - devices.filesystem = val - case "dm.mkfsarg": - devices.mkfsArgs = append(devices.mkfsArgs, val) - case "dm.mountopt": - devices.mountOptions = joinMountOptions(devices.mountOptions, val) - case "dm.metadatadev": - devices.metadataDevice = val - case "dm.datadev": - devices.dataDevice = val - case "dm.thinpooldev": - devices.thinPoolDevice = strings.TrimPrefix(val, "/dev/mapper/") - case "dm.blkdiscard": - foundBlkDiscard = true - devices.doBlkDiscard, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - case "dm.blocksize": - size, err := units.RAMInBytes(val) - if err != nil { - return nil, err - } - // convert to 512b sectors - devices.thinpBlockSize = uint32(size) >> 9 - case "dm.override_udev_sync_check": - devices.overrideUdevSyncCheck, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.use_deferred_removal": - enableDeferredRemoval, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.use_deferred_deletion": - enableDeferredDeletion, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - - case "dm.min_free_space": - if !strings.HasSuffix(val, "%") { - return nil, fmt.Errorf("devmapper: Option dm.min_free_space requires %% suffix") - } - - valstring := strings.TrimSuffix(val, "%") - minFreeSpacePercent, err := strconv.ParseUint(valstring, 10, 32) - if err != nil { - return nil, err - } - - if minFreeSpacePercent >= 100 { - return nil, fmt.Errorf("devmapper: Invalid value %v for option dm.min_free_space", val) - } - - devices.minFreeSpacePercent = uint32(minFreeSpacePercent) - default: - return nil, fmt.Errorf("devmapper: Unknown option %s\n", key) - } - } - - // By default, don't do blk discard hack on raw devices, its rarely useful and is expensive - if !foundBlkDiscard && (devices.dataDevice != "" || devices.thinPoolDevice != "") { - devices.doBlkDiscard = false - } - - if err := devices.initDevmapper(doInit); err != nil { - return nil, err - } - - return devices, nil -} diff --git a/daemon/graphdriver/devmapper/devmapper_doc.go b/daemon/graphdriver/devmapper/devmapper_doc.go deleted file mode 100644 index 9ab3e4f864..0000000000 --- a/daemon/graphdriver/devmapper/devmapper_doc.go +++ /dev/null @@ -1,106 +0,0 @@ -package devmapper - -// Definition of struct dm_task and sub structures (from lvm2) -// -// struct dm_ioctl { -// /* -// * The version number is made up of three parts: -// * major - no backward or forward compatibility, -// * minor - only backwards compatible, -// * patch - both backwards and forwards compatible. -// * -// * All clients of the ioctl interface should fill in the -// * version number of the interface that they were -// * compiled with. -// * -// * All recognized ioctl commands (ie. those that don't -// * return -ENOTTY) fill out this field, even if the -// * command failed. -// */ -// uint32_t version[3]; /* in/out */ -// uint32_t data_size; /* total size of data passed in -// * including this struct */ - -// uint32_t data_start; /* offset to start of data -// * relative to start of this struct */ - -// uint32_t target_count; /* in/out */ -// int32_t open_count; /* out */ -// uint32_t flags; /* in/out */ - -// /* -// * event_nr holds either the event number (input and output) or the -// * udev cookie value (input only). -// * The DM_DEV_WAIT ioctl takes an event number as input. -// * The DM_SUSPEND, DM_DEV_REMOVE and DM_DEV_RENAME ioctls -// * use the field as a cookie to return in the DM_COOKIE -// * variable with the uevents they issue. -// * For output, the ioctls return the event number, not the cookie. -// */ -// uint32_t event_nr; /* in/out */ -// uint32_t padding; - -// uint64_t dev; /* in/out */ - -// char name[DM_NAME_LEN]; /* device name */ -// char uuid[DM_UUID_LEN]; /* unique identifier for -// * the block device */ -// char data[7]; /* padding or data */ -// }; - -// struct target { -// uint64_t start; -// uint64_t length; -// char *type; -// char *params; - -// struct target *next; -// }; - -// typedef enum { -// DM_ADD_NODE_ON_RESUME, /* add /dev/mapper node with dmsetup resume */ -// DM_ADD_NODE_ON_CREATE /* add /dev/mapper node with dmsetup create */ -// } dm_add_node_t; - -// struct dm_task { -// int type; -// char *dev_name; -// char *mangled_dev_name; - -// struct target *head, *tail; - -// int read_only; -// uint32_t event_nr; -// int major; -// int minor; -// int allow_default_major_fallback; -// uid_t uid; -// gid_t gid; -// mode_t mode; -// uint32_t read_ahead; -// uint32_t read_ahead_flags; -// union { -// struct dm_ioctl *v4; -// } dmi; -// char *newname; -// char *message; -// char *geometry; -// uint64_t sector; -// int no_flush; -// int no_open_count; -// int skip_lockfs; -// int query_inactive_table; -// int suppress_identical_reload; -// dm_add_node_t add_node; -// uint64_t existing_table_size; -// int cookie_set; -// int new_uuid; -// int secure_data; -// int retry_remove; -// int enable_checks; -// int expected_errno; - -// char *uuid; -// char *mangled_uuid; -// }; -// diff --git a/daemon/graphdriver/devmapper/devmapper_test.go b/daemon/graphdriver/devmapper/devmapper_test.go deleted file mode 100644 index 5c2abcefcb..0000000000 --- a/daemon/graphdriver/devmapper/devmapper_test.go +++ /dev/null @@ -1,110 +0,0 @@ -// +build linux - -package devmapper - -import ( - "fmt" - "testing" - "time" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/daemon/graphdriver/graphtest" -) - -func init() { - // Reduce the size the the base fs and loopback for the tests - defaultDataLoopbackSize = 300 * 1024 * 1024 - defaultMetaDataLoopbackSize = 200 * 1024 * 1024 - defaultBaseFsSize = 300 * 1024 * 1024 - defaultUdevSyncOverride = true - if err := graphtest.InitLoopbacks(); err != nil { - panic(err) - } -} - -// This avoids creating a new driver for each test if all tests are run -// Make sure to put new tests between TestDevmapperSetup and TestDevmapperTeardown -func TestDevmapperSetup(t *testing.T) { - graphtest.GetDriver(t, "devicemapper") -} - -func TestDevmapperCreateEmpty(t *testing.T) { - graphtest.DriverTestCreateEmpty(t, "devicemapper") -} - -func TestDevmapperCreateBase(t *testing.T) { - graphtest.DriverTestCreateBase(t, "devicemapper") -} - -func TestDevmapperCreateSnap(t *testing.T) { - graphtest.DriverTestCreateSnap(t, "devicemapper") -} - -func TestDevmapperTeardown(t *testing.T) { - graphtest.PutDriver(t) -} - -func TestDevmapperReduceLoopBackSize(t *testing.T) { - tenMB := int64(10 * 1024 * 1024) - testChangeLoopBackSize(t, -tenMB, defaultDataLoopbackSize, defaultMetaDataLoopbackSize) -} - -func TestDevmapperIncreaseLoopBackSize(t *testing.T) { - tenMB := int64(10 * 1024 * 1024) - testChangeLoopBackSize(t, tenMB, defaultDataLoopbackSize+tenMB, defaultMetaDataLoopbackSize+tenMB) -} - -func testChangeLoopBackSize(t *testing.T, delta, expectDataSize, expectMetaDataSize int64) { - driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) - defer graphtest.PutDriver(t) - // make sure data or metadata loopback size are the default size - if s := driver.DeviceSet.Status(); s.Data.Total != uint64(defaultDataLoopbackSize) || s.Metadata.Total != uint64(defaultMetaDataLoopbackSize) { - t.Fatalf("data or metadata loop back size is incorrect") - } - if err := driver.Cleanup(); err != nil { - t.Fatal(err) - } - //Reload - d, err := Init(driver.home, []string{ - fmt.Sprintf("dm.loopdatasize=%d", defaultDataLoopbackSize+delta), - fmt.Sprintf("dm.loopmetadatasize=%d", defaultMetaDataLoopbackSize+delta), - }, nil, nil) - if err != nil { - t.Fatalf("error creating devicemapper driver: %v", err) - } - driver = d.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) - if s := driver.DeviceSet.Status(); s.Data.Total != uint64(expectDataSize) || s.Metadata.Total != uint64(expectMetaDataSize) { - t.Fatalf("data or metadata loop back size is incorrect") - } - if err := driver.Cleanup(); err != nil { - t.Fatal(err) - } -} - -// Make sure devices.Lock() has been release upon return from cleanupDeletedDevices() function -func TestDevmapperLockReleasedDeviceDeletion(t *testing.T) { - driver := graphtest.GetDriver(t, "devicemapper").(*graphtest.Driver).Driver.(*graphdriver.NaiveDiffDriver).ProtoDriver.(*Driver) - defer graphtest.PutDriver(t) - - // Call cleanupDeletedDevices() and after the call take and release - // DeviceSet Lock. If lock has not been released, this will hang. - driver.DeviceSet.cleanupDeletedDevices() - - doneChan := make(chan bool) - - go func() { - driver.DeviceSet.Lock() - defer driver.DeviceSet.Unlock() - doneChan <- true - }() - - select { - case <-time.After(time.Second * 5): - // Timer expired. That means lock was not released upon - // function return and we are deadlocked. Release lock - // here so that cleanup could succeed and fail the test. - driver.DeviceSet.Unlock() - t.Fatalf("Could not acquire devices lock after call to cleanupDeletedDevices()") - case <-doneChan: - } -} diff --git a/daemon/graphdriver/devmapper/driver.go b/daemon/graphdriver/devmapper/driver.go deleted file mode 100644 index 4a8296b66a..0000000000 --- a/daemon/graphdriver/devmapper/driver.go +++ /dev/null @@ -1,226 +0,0 @@ -// +build linux - -package devmapper - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "strconv" - - "github.com/Sirupsen/logrus" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/devicemapper" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - "github.com/docker/go-units" -) - -func init() { - graphdriver.Register("devicemapper", Init) -} - -// Driver contains the device set mounted and the home directory -type Driver struct { - *DeviceSet - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter -} - -// Init creates a driver with the given home and the set of options. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - deviceSet, err := NewDeviceSet(home, true, options, uidMaps, gidMaps) - if err != nil { - return nil, err - } - - if err := mount.MakePrivate(home); err != nil { - return nil, err - } - - d := &Driver{ - DeviceSet: deviceSet, - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), - } - - return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil -} - -func (d *Driver) String() string { - return "devicemapper" -} - -// Status returns the status about the driver in a printable format. -// Information returned contains Pool Name, Data File, Metadata file, disk usage by -// the data and metadata, etc. -func (d *Driver) Status() [][2]string { - s := d.DeviceSet.Status() - - status := [][2]string{ - {"Pool Name", s.PoolName}, - {"Pool Blocksize", fmt.Sprintf("%s", units.HumanSize(float64(s.SectorSize)))}, - {"Base Device Size", fmt.Sprintf("%s", units.HumanSize(float64(s.BaseDeviceSize)))}, - {"Backing Filesystem", s.BaseDeviceFS}, - {"Data file", s.DataFile}, - {"Metadata file", s.MetadataFile}, - {"Data Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Used)))}, - {"Data Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Total)))}, - {"Data Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Data.Available)))}, - {"Metadata Space Used", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Used)))}, - {"Metadata Space Total", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Total)))}, - {"Metadata Space Available", fmt.Sprintf("%s", units.HumanSize(float64(s.Metadata.Available)))}, - {"Thin Pool Minimum Free Space", fmt.Sprintf("%s", units.HumanSize(float64(s.MinFreeSpace)))}, - {"Udev Sync Supported", fmt.Sprintf("%v", s.UdevSyncSupported)}, - {"Deferred Removal Enabled", fmt.Sprintf("%v", s.DeferredRemoveEnabled)}, - {"Deferred Deletion Enabled", fmt.Sprintf("%v", s.DeferredDeleteEnabled)}, - {"Deferred Deleted Device Count", fmt.Sprintf("%v", s.DeferredDeletedDeviceCount)}, - } - if len(s.DataLoopback) > 0 { - status = append(status, [2]string{"Data loop file", s.DataLoopback}) - } - if len(s.MetadataLoopback) > 0 { - status = append(status, [2]string{"Metadata loop file", s.MetadataLoopback}) - } - if vStr, err := devicemapper.GetLibraryVersion(); err == nil { - status = append(status, [2]string{"Library Version", vStr}) - } - return status -} - -// GetMetadata returns a map of information about the device. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - m, err := d.DeviceSet.exportDeviceMetadata(id) - - if err != nil { - return nil, err - } - - metadata := make(map[string]string) - metadata["DeviceId"] = strconv.Itoa(m.deviceID) - metadata["DeviceSize"] = strconv.FormatUint(m.deviceSize, 10) - metadata["DeviceName"] = m.deviceName - return metadata, nil -} - -// Cleanup unmounts a device. -func (d *Driver) Cleanup() error { - err := d.DeviceSet.Shutdown(d.home) - - if err2 := mount.Unmount(d.home); err == nil { - err = err2 - } - - return err -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - return d.Create(id, parent, mountLabel, storageOpt) -} - -// Create adds a device with a given id and the parent. -func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { - if err := d.DeviceSet.AddDevice(id, parent, storageOpt); err != nil { - return err - } - - return nil -} - -// Remove removes a device with a given id, unmounts the filesystem. -func (d *Driver) Remove(id string) error { - if !d.DeviceSet.HasDevice(id) { - // Consider removing a non-existing device a no-op - // This is useful to be able to progress on container removal - // if the underlying device has gone away due to earlier errors - return nil - } - - // This assumes the device has been properly Get/Put:ed and thus is unmounted - if err := d.DeviceSet.DeleteDevice(id, false); err != nil { - return err - } - - mp := path.Join(d.home, "mnt", id) - if err := os.RemoveAll(mp); err != nil && !os.IsNotExist(err) { - return err - } - - return nil -} - -// Get mounts a device with given id into the root filesystem -func (d *Driver) Get(id, mountLabel string) (string, error) { - mp := path.Join(d.home, "mnt", id) - rootFs := path.Join(mp, "rootfs") - if count := d.ctr.Increment(mp); count > 1 { - return rootFs, nil - } - - uid, gid, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - d.ctr.Decrement(mp) - return "", err - } - - // Create the target directories if they don't exist - if err := idtools.MkdirAllAs(path.Join(d.home, "mnt"), 0755, uid, gid); err != nil && !os.IsExist(err) { - d.ctr.Decrement(mp) - return "", err - } - if err := idtools.MkdirAs(mp, 0755, uid, gid); err != nil && !os.IsExist(err) { - d.ctr.Decrement(mp) - return "", err - } - - // Mount the device - if err := d.DeviceSet.MountDevice(id, mp, mountLabel); err != nil { - d.ctr.Decrement(mp) - return "", err - } - - if err := idtools.MkdirAllAs(rootFs, 0755, uid, gid); err != nil && !os.IsExist(err) { - d.ctr.Decrement(mp) - d.DeviceSet.UnmountDevice(id, mp) - return "", err - } - - idFile := path.Join(mp, "id") - if _, err := os.Stat(idFile); err != nil && os.IsNotExist(err) { - // Create an "id" file with the container/image id in it to help reconstruct this in case - // of later problems - if err := ioutil.WriteFile(idFile, []byte(id), 0600); err != nil { - d.ctr.Decrement(mp) - d.DeviceSet.UnmountDevice(id, mp) - return "", err - } - } - - return rootFs, nil -} - -// Put unmounts a device and removes it. -func (d *Driver) Put(id string) error { - mp := path.Join(d.home, "mnt", id) - if count := d.ctr.Decrement(mp); count > 0 { - return nil - } - err := d.DeviceSet.UnmountDevice(id, mp) - if err != nil { - logrus.Errorf("devmapper: Error unmounting device %s: %s", id, err) - } - return err -} - -// Exists checks to see if the device exists. -func (d *Driver) Exists(id string) bool { - return d.DeviceSet.HasDevice(id) -} diff --git a/daemon/graphdriver/devmapper/mount.go b/daemon/graphdriver/devmapper/mount.go deleted file mode 100644 index cca1fe1b38..0000000000 --- a/daemon/graphdriver/devmapper/mount.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build linux - -package devmapper - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "syscall" -) - -// FIXME: this is copy-pasted from the aufs driver. -// It should be moved into the core. - -// Mounted returns true if a mount point exists. -func Mounted(mountpoint string) (bool, error) { - mntpoint, err := os.Stat(mountpoint) - if err != nil { - if os.IsNotExist(err) { - return false, nil - } - return false, err - } - parent, err := os.Stat(filepath.Join(mountpoint, "..")) - if err != nil { - return false, err - } - mntpointSt := mntpoint.Sys().(*syscall.Stat_t) - parentSt := parent.Sys().(*syscall.Stat_t) - return mntpointSt.Dev != parentSt.Dev, nil -} - -type probeData struct { - fsName string - magic string - offset uint64 -} - -// ProbeFsType returns the filesystem name for the given device id. -func ProbeFsType(device string) (string, error) { - probes := []probeData{ - {"btrfs", "_BHRfS_M", 0x10040}, - {"ext4", "\123\357", 0x438}, - {"xfs", "XFSB", 0}, - } - - maxLen := uint64(0) - for _, p := range probes { - l := p.offset + uint64(len(p.magic)) - if l > maxLen { - maxLen = l - } - } - - file, err := os.Open(device) - if err != nil { - return "", err - } - defer file.Close() - - buffer := make([]byte, maxLen) - l, err := file.Read(buffer) - if err != nil { - return "", err - } - - if uint64(l) != maxLen { - return "", fmt.Errorf("devmapper: unable to detect filesystem type of %s, short read", device) - } - - for _, p := range probes { - if bytes.Equal([]byte(p.magic), buffer[p.offset:p.offset+uint64(len(p.magic))]) { - return p.fsName, nil - } - } - - return "", fmt.Errorf("devmapper: Unknown filesystem type on %s", device) -} - -func joinMountOptions(a, b string) string { - if a == "" { - return b - } - if b == "" { - return a - } - return a + "," + b -} diff --git a/daemon/graphdriver/driver.go b/daemon/graphdriver/driver.go deleted file mode 100644 index 668594ab29..0000000000 --- a/daemon/graphdriver/driver.go +++ /dev/null @@ -1,243 +0,0 @@ -package graphdriver - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/vbatts/tar-split/tar/storage" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" -) - -// FsMagic unsigned id of the filesystem in use. -type FsMagic uint32 - -const ( - // FsMagicUnsupported is a predefined constant value other than a valid filesystem id. - FsMagicUnsupported = FsMagic(0x00000000) -) - -var ( - // All registered drivers - drivers map[string]InitFunc - - // ErrNotSupported returned when driver is not supported. - ErrNotSupported = errors.New("driver not supported") - // ErrPrerequisites retuned when driver does not meet prerequisites. - ErrPrerequisites = errors.New("prerequisites for driver not satisfied (wrong filesystem?)") - // ErrIncompatibleFS returned when file system is not supported. - ErrIncompatibleFS = fmt.Errorf("backing file system is unsupported for this graph driver") -) - -// InitFunc initializes the storage driver. -type InitFunc func(root string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) - -// ProtoDriver defines the basic capabilities of a driver. -// This interface exists solely to be a minimum set of methods -// for client code which choose not to implement the entire Driver -// interface and use the NaiveDiffDriver wrapper constructor. -// -// Use of ProtoDriver directly by client code is not recommended. -type ProtoDriver interface { - // String returns a string representation of this driver. - String() string - // CreateReadWrite creates a new, empty filesystem layer that is ready - // to be used as the storage for a container. - CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error - // Create creates a new, empty, filesystem layer with the - // specified id and parent and mountLabel. Parent and mountLabel may be "". - Create(id, parent, mountLabel string, storageOpt map[string]string) error - // Remove attempts to remove the filesystem layer with this id. - Remove(id string) error - // Get returns the mountpoint for the layered filesystem referred - // to by this id. You can optionally specify a mountLabel or "". - // Returns the absolute path to the mounted layered filesystem. - Get(id, mountLabel string) (dir string, err error) - // Put releases the system resources for the specified id, - // e.g, unmounting layered filesystem. - Put(id string) error - // Exists returns whether a filesystem layer with the specified - // ID exists on this driver. - Exists(id string) bool - // Status returns a set of key-value pairs which give low - // level diagnostic status about this driver. - Status() [][2]string - // Returns a set of key-value pairs which give low level information - // about the image/container driver is managing. - GetMetadata(id string) (map[string]string, error) - // Cleanup performs necessary tasks to release resources - // held by the driver, e.g., unmounting all layered filesystems - // known to this driver. - Cleanup() error -} - -// Driver is the interface for layered/snapshot file system drivers. -type Driver interface { - ProtoDriver - // Diff produces an archive of the changes between the specified - // layer and its parent layer which may be "". - Diff(id, parent string) (archive.Archive, error) - // Changes produces a list of changes between the specified layer - // and its parent layer. If parent is "", then all changes will be ADD changes. - Changes(id, parent string) ([]archive.Change, error) - // ApplyDiff extracts the changeset from the given diff into the - // layer with the specified id and parent, returning the size of the - // new layer in bytes. - // The archive.Reader must be an uncompressed stream. - ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) - // DiffSize calculates the changes between the specified id - // and its parent and returns the size in bytes of the changes - // relative to its base filesystem directory. - DiffSize(id, parent string) (size int64, err error) -} - -// DiffGetterDriver is the interface for layered file system drivers that -// provide a specialized function for getting file contents for tar-split. -type DiffGetterDriver interface { - Driver - // DiffGetter returns an interface to efficiently retrieve the contents - // of files in a layer. - DiffGetter(id string) (FileGetCloser, error) -} - -// FileGetCloser extends the storage.FileGetter interface with a Close method -// for cleaning up. -type FileGetCloser interface { - storage.FileGetter - // Close cleans up any resources associated with the FileGetCloser. - Close() error -} - -// Checker makes checks on specified filesystems. -type Checker interface { - // IsMounted returns true if the provided path is mounted for the specific checker - IsMounted(path string) bool -} - -func init() { - drivers = make(map[string]InitFunc) -} - -// Register registers an InitFunc for the driver. -func Register(name string, initFunc InitFunc) error { - if _, exists := drivers[name]; exists { - return fmt.Errorf("Name already registered %s", name) - } - drivers[name] = initFunc - - return nil -} - -// GetDriver initializes and returns the registered driver -func GetDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { - if initFunc, exists := drivers[name]; exists { - return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) - } - if pluginDriver, err := lookupPlugin(name, home, options); err == nil { - return pluginDriver, nil - } - logrus.Errorf("Failed to GetDriver graph %s %s", name, home) - return nil, ErrNotSupported -} - -// getBuiltinDriver initializes and returns the registered driver, but does not try to load from plugins -func getBuiltinDriver(name, home string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { - if initFunc, exists := drivers[name]; exists { - return initFunc(filepath.Join(home, name), options, uidMaps, gidMaps) - } - logrus.Errorf("Failed to built-in GetDriver graph %s %s", name, home) - return nil, ErrNotSupported -} - -// New creates the driver and initializes it at the specified root. -func New(root string, name string, options []string, uidMaps, gidMaps []idtools.IDMap) (Driver, error) { - if name != "" { - logrus.Debugf("[graphdriver] trying provided driver %q", name) // so the logs show specified driver - return GetDriver(name, root, options, uidMaps, gidMaps) - } - - // Guess for prior driver - driversMap := scanPriorDrivers(root) - for _, name := range priority { - if name == "vfs" { - // don't use vfs even if there is state present. - continue - } - if _, prior := driversMap[name]; prior { - // of the state found from prior drivers, check in order of our priority - // which we would prefer - driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps) - if err != nil { - // unlike below, we will return error here, because there is prior - // state, and now it is no longer supported/prereq/compatible, so - // something changed and needs attention. Otherwise the daemon's - // images would just "disappear". - logrus.Errorf("[graphdriver] prior storage driver %q failed: %s", name, err) - return nil, err - } - - // abort starting when there are other prior configured drivers - // to ensure the user explicitly selects the driver to load - if len(driversMap)-1 > 0 { - var driversSlice []string - for name := range driversMap { - driversSlice = append(driversSlice, name) - } - - return nil, fmt.Errorf("%q contains several valid graphdrivers: %s; Please cleanup or explicitly choose storage driver (-s )", root, strings.Join(driversSlice, ", ")) - } - - logrus.Infof("[graphdriver] using prior storage driver %q", name) - return driver, nil - } - } - - // Check for priority drivers first - for _, name := range priority { - driver, err := getBuiltinDriver(name, root, options, uidMaps, gidMaps) - if err != nil { - if isDriverNotSupported(err) { - continue - } - return nil, err - } - return driver, nil - } - - // Check all registered drivers if no priority driver is found - for name, initFunc := range drivers { - driver, err := initFunc(filepath.Join(root, name), options, uidMaps, gidMaps) - if err != nil { - if isDriverNotSupported(err) { - continue - } - return nil, err - } - return driver, nil - } - return nil, fmt.Errorf("No supported storage backend found") -} - -// isDriverNotSupported returns true if the error initializing -// the graph driver is a non-supported error. -func isDriverNotSupported(err error) bool { - return err == ErrNotSupported || err == ErrPrerequisites || err == ErrIncompatibleFS -} - -// scanPriorDrivers returns an un-ordered scan of directories of prior storage drivers -func scanPriorDrivers(root string) map[string]bool { - driversMap := make(map[string]bool) - - for driver := range drivers { - p := filepath.Join(root, driver) - if _, err := os.Stat(p); err == nil && driver != "vfs" { - driversMap[driver] = true - } - } - return driversMap -} diff --git a/daemon/graphdriver/driver_freebsd.go b/daemon/graphdriver/driver_freebsd.go deleted file mode 100644 index 2891a84f3a..0000000000 --- a/daemon/graphdriver/driver_freebsd.go +++ /dev/null @@ -1,19 +0,0 @@ -package graphdriver - -import "syscall" - -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "zfs", - } -) - -// Mounted checks if the given path is mounted as the fs type -func Mounted(fsType FsMagic, mountPath string) (bool, error) { - var buf syscall.Statfs_t - if err := syscall.Statfs(mountPath, &buf); err != nil { - return false, err - } - return FsMagic(buf.Type) == fsType, nil -} diff --git a/daemon/graphdriver/driver_linux.go b/daemon/graphdriver/driver_linux.go deleted file mode 100644 index 59b6164b98..0000000000 --- a/daemon/graphdriver/driver_linux.go +++ /dev/null @@ -1,133 +0,0 @@ -// +build linux - -package graphdriver - -import ( - "path/filepath" - "syscall" - - "github.com/docker/docker/pkg/mount" -) - -const ( - // FsMagicAufs filesystem id for Aufs - FsMagicAufs = FsMagic(0x61756673) - // FsMagicBtrfs filesystem id for Btrfs - FsMagicBtrfs = FsMagic(0x9123683E) - // FsMagicCramfs filesystem id for Cramfs - FsMagicCramfs = FsMagic(0x28cd3d45) - // FsMagicEcryptfs filesystem id for eCryptfs - FsMagicEcryptfs = FsMagic(0xf15f) - // FsMagicExtfs filesystem id for Extfs - FsMagicExtfs = FsMagic(0x0000EF53) - // FsMagicF2fs filesystem id for F2fs - FsMagicF2fs = FsMagic(0xF2F52010) - // FsMagicGPFS filesystem id for GPFS - FsMagicGPFS = FsMagic(0x47504653) - // FsMagicJffs2Fs filesystem if for Jffs2Fs - FsMagicJffs2Fs = FsMagic(0x000072b6) - // FsMagicJfs filesystem id for Jfs - FsMagicJfs = FsMagic(0x3153464a) - // FsMagicNfsFs filesystem id for NfsFs - FsMagicNfsFs = FsMagic(0x00006969) - // FsMagicRAMFs filesystem id for RamFs - FsMagicRAMFs = FsMagic(0x858458f6) - // FsMagicReiserFs filesystem id for ReiserFs - FsMagicReiserFs = FsMagic(0x52654973) - // FsMagicSmbFs filesystem id for SmbFs - FsMagicSmbFs = FsMagic(0x0000517B) - // FsMagicSquashFs filesystem id for SquashFs - FsMagicSquashFs = FsMagic(0x73717368) - // FsMagicTmpFs filesystem id for TmpFs - FsMagicTmpFs = FsMagic(0x01021994) - // FsMagicVxFS filesystem id for VxFs - FsMagicVxFS = FsMagic(0xa501fcf5) - // FsMagicXfs filesystem id for Xfs - FsMagicXfs = FsMagic(0x58465342) - // FsMagicZfs filesystem id for Zfs - FsMagicZfs = FsMagic(0x2fc12fc1) - // FsMagicOverlay filesystem id for overlay - FsMagicOverlay = FsMagic(0x794C7630) -) - -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "aufs", - "btrfs", - "zfs", - "devicemapper", - "overlay", - "vfs", - } - - // FsNames maps filesystem id to name of the filesystem. - FsNames = map[FsMagic]string{ - FsMagicAufs: "aufs", - FsMagicBtrfs: "btrfs", - FsMagicCramfs: "cramfs", - FsMagicExtfs: "extfs", - FsMagicF2fs: "f2fs", - FsMagicGPFS: "gpfs", - FsMagicJffs2Fs: "jffs2", - FsMagicJfs: "jfs", - FsMagicNfsFs: "nfs", - FsMagicRAMFs: "ramfs", - FsMagicReiserFs: "reiserfs", - FsMagicSmbFs: "smb", - FsMagicSquashFs: "squashfs", - FsMagicTmpFs: "tmpfs", - FsMagicUnsupported: "unsupported", - FsMagicVxFS: "vxfs", - FsMagicXfs: "xfs", - FsMagicZfs: "zfs", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - var buf syscall.Statfs_t - if err := syscall.Statfs(filepath.Dir(rootpath), &buf); err != nil { - return 0, err - } - return FsMagic(buf.Type), nil -} - -// NewFsChecker returns a checker configured for the provied FsMagic -func NewFsChecker(t FsMagic) Checker { - return &fsChecker{ - t: t, - } -} - -type fsChecker struct { - t FsMagic -} - -func (c *fsChecker) IsMounted(path string) bool { - m, _ := Mounted(c.t, path) - return m -} - -// NewDefaultChecker returns a check that parses /proc/mountinfo to check -// if the specified path is mounted. -func NewDefaultChecker() Checker { - return &defaultChecker{} -} - -type defaultChecker struct { -} - -func (c *defaultChecker) IsMounted(path string) bool { - m, _ := mount.Mounted(path) - return m -} - -// Mounted checks if the given path is mounted as the fs type -func Mounted(fsType FsMagic, mountPath string) (bool, error) { - var buf syscall.Statfs_t - if err := syscall.Statfs(mountPath, &buf); err != nil { - return false, err - } - return FsMagic(buf.Type) == fsType, nil -} diff --git a/daemon/graphdriver/driver_solaris.go b/daemon/graphdriver/driver_solaris.go deleted file mode 100644 index 29719ffa4f..0000000000 --- a/daemon/graphdriver/driver_solaris.go +++ /dev/null @@ -1,65 +0,0 @@ -// +build solaris,cgo - -package graphdriver - -/* -#include -#include - -static inline struct statvfs *getstatfs(char *s) { - struct statvfs *buf; - int err; - buf = (struct statvfs *)malloc(sizeof(struct statvfs)); - err = statvfs(s, buf); - return buf; -} -*/ -import "C" -import ( - "path/filepath" - "unsafe" - - log "github.com/Sirupsen/logrus" -) - -const ( - // FsMagicZfs filesystem id for Zfs - FsMagicZfs = FsMagic(0x2fc12fc1) -) - -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "zfs", - } - - // FsNames maps filesystem id to name of the filesystem. - FsNames = map[FsMagic]string{ - FsMagicZfs: "zfs", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - return 0, nil -} - -// Mounted checks if the given path is mounted as the fs type -//Solaris supports only ZFS for now -func Mounted(fsType FsMagic, mountPath string) (bool, error) { - - cs := C.CString(filepath.Dir(mountPath)) - buf := C.getstatfs(cs) - - // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] - if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || - (buf.f_basetype[3] != 0) { - log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", mountPath) - C.free(unsafe.Pointer(buf)) - return false, ErrPrerequisites - } - - C.free(unsafe.Pointer(buf)) - C.free(unsafe.Pointer(cs)) - return true, nil -} diff --git a/daemon/graphdriver/driver_unsupported.go b/daemon/graphdriver/driver_unsupported.go deleted file mode 100644 index 4a875608b0..0000000000 --- a/daemon/graphdriver/driver_unsupported.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !linux,!windows,!freebsd,!solaris - -package graphdriver - -var ( - // Slice of drivers that should be used in an order - priority = []string{ - "unsupported", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - return FsMagicUnsupported, nil -} diff --git a/daemon/graphdriver/driver_windows.go b/daemon/graphdriver/driver_windows.go deleted file mode 100644 index ffd30c2950..0000000000 --- a/daemon/graphdriver/driver_windows.go +++ /dev/null @@ -1,14 +0,0 @@ -package graphdriver - -var ( - // Slice of drivers that should be used in order - priority = []string{ - "windowsfilter", - } -) - -// GetFSMagic returns the filesystem id given the path. -func GetFSMagic(rootpath string) (FsMagic, error) { - // Note it is OK to return FsMagicUnsupported on Windows. - return FsMagicUnsupported, nil -} diff --git a/daemon/graphdriver/fsdiff.go b/daemon/graphdriver/fsdiff.go deleted file mode 100644 index 7d11da9260..0000000000 --- a/daemon/graphdriver/fsdiff.go +++ /dev/null @@ -1,162 +0,0 @@ -package graphdriver - -import ( - "time" - - "github.com/Sirupsen/logrus" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" -) - -var ( - // ApplyUncompressedLayer defines the unpack method used by the graph - // driver. - ApplyUncompressedLayer = chrootarchive.ApplyUncompressedLayer -) - -// NaiveDiffDriver takes a ProtoDriver and adds the -// capability of the Diffing methods which it may or may not -// support on its own. See the comment on the exported -// NewNaiveDiffDriver function below. -// Notably, the AUFS driver doesn't need to be wrapped like this. -type NaiveDiffDriver struct { - ProtoDriver - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap -} - -// NewNaiveDiffDriver returns a fully functional driver that wraps the -// given ProtoDriver and adds the capability of the following methods which -// it may or may not support on its own: -// Diff(id, parent string) (archive.Archive, error) -// Changes(id, parent string) ([]archive.Change, error) -// ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) -// DiffSize(id, parent string) (size int64, err error) -func NewNaiveDiffDriver(driver ProtoDriver, uidMaps, gidMaps []idtools.IDMap) Driver { - return &NaiveDiffDriver{ProtoDriver: driver, - uidMaps: uidMaps, - gidMaps: gidMaps} -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -func (gdw *NaiveDiffDriver) Diff(id, parent string) (arch archive.Archive, err error) { - driver := gdw.ProtoDriver - - layerFs, err := driver.Get(id, "") - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - driver.Put(id) - } - }() - - if parent == "" { - archive, err := archive.Tar(layerFs, archive.Uncompressed) - if err != nil { - return nil, err - } - return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - driver.Put(id) - return err - }), nil - } - - parentFs, err := driver.Get(parent, "") - if err != nil { - return nil, err - } - defer driver.Put(parent) - - changes, err := archive.ChangesDirs(layerFs, parentFs) - if err != nil { - return nil, err - } - - archive, err := archive.ExportChanges(layerFs, changes, gdw.uidMaps, gdw.gidMaps) - if err != nil { - return nil, err - } - - return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - driver.Put(id) - return err - }), nil -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -func (gdw *NaiveDiffDriver) Changes(id, parent string) ([]archive.Change, error) { - driver := gdw.ProtoDriver - - layerFs, err := driver.Get(id, "") - if err != nil { - return nil, err - } - defer driver.Put(id) - - parentFs := "" - - if parent != "" { - parentFs, err = driver.Get(parent, "") - if err != nil { - return nil, err - } - defer driver.Put(parent) - } - - return archive.ChangesDirs(layerFs, parentFs) -} - -// ApplyDiff extracts the changeset from the given diff into the -// layer with the specified id and parent, returning the size of the -// new layer in bytes. -func (gdw *NaiveDiffDriver) ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) { - driver := gdw.ProtoDriver - - // Mount the root filesystem so we can apply the diff/layer. - layerFs, err := driver.Get(id, "") - if err != nil { - return - } - defer driver.Put(id) - - options := &archive.TarOptions{UIDMaps: gdw.uidMaps, - GIDMaps: gdw.gidMaps} - start := time.Now().UTC() - logrus.Debug("Start untar layer") - if size, err = ApplyUncompressedLayer(layerFs, diff, options); err != nil { - return - } - logrus.Debugf("Untar time: %vs", time.Now().UTC().Sub(start).Seconds()) - - return -} - -// DiffSize calculates the changes between the specified layer -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (gdw *NaiveDiffDriver) DiffSize(id, parent string) (size int64, err error) { - driver := gdw.ProtoDriver - - changes, err := gdw.Changes(id, parent) - if err != nil { - return - } - - layerFs, err := driver.Get(id, "") - if err != nil { - return - } - defer driver.Put(id) - - return archive.ChangesSize(layerFs, changes), nil -} diff --git a/daemon/graphdriver/graphtest/graphbench_unix.go b/daemon/graphdriver/graphtest/graphbench_unix.go deleted file mode 100644 index a32df02e16..0000000000 --- a/daemon/graphdriver/graphtest/graphbench_unix.go +++ /dev/null @@ -1,264 +0,0 @@ -// +build linux freebsd - -package graphtest - -import ( - "bytes" - "io" - "io/ioutil" - "path/filepath" - "testing" - - "github.com/docker/docker/pkg/stringid" -) - -// DriverBenchExists benchmarks calls to exist -func DriverBenchExists(b *testing.B, drivername string, driveroptions ...string) { - driver := GetDriver(b, drivername, driveroptions...) - defer PutDriver(b) - - base := stringid.GenerateRandomID() - - if err := driver.Create(base, "", "", nil); err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - if !driver.Exists(base) { - b.Fatal("Newly created image doesn't exist") - } - } -} - -// DriverBenchGetEmpty benchmarks calls to get on an empty layer -func DriverBenchGetEmpty(b *testing.B, drivername string, driveroptions ...string) { - driver := GetDriver(b, drivername, driveroptions...) - defer PutDriver(b) - - base := stringid.GenerateRandomID() - - if err := driver.Create(base, "", "", nil); err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - _, err := driver.Get(base, "") - b.StopTimer() - if err != nil { - b.Fatalf("Error getting mount: %s", err) - } - if err := driver.Put(base); err != nil { - b.Fatalf("Error putting mount: %s", err) - } - b.StartTimer() - } -} - -// DriverBenchDiffBase benchmarks calls to diff on a root layer -func DriverBenchDiffBase(b *testing.B, drivername string, driveroptions ...string) { - driver := GetDriver(b, drivername, driveroptions...) - defer PutDriver(b) - - base := stringid.GenerateRandomID() - - if err := driver.Create(base, "", "", nil); err != nil { - b.Fatal(err) - } - - if err := addFiles(driver, base, 3); err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - arch, err := driver.Diff(base, "") - if err != nil { - b.Fatal(err) - } - _, err = io.Copy(ioutil.Discard, arch) - if err != nil { - b.Fatalf("Error copying archive: %s", err) - } - arch.Close() - } -} - -// DriverBenchDiffN benchmarks calls to diff on two layers with -// a provided number of files on the lower and upper layers. -func DriverBenchDiffN(b *testing.B, bottom, top int, drivername string, driveroptions ...string) { - driver := GetDriver(b, drivername, driveroptions...) - defer PutDriver(b) - base := stringid.GenerateRandomID() - upper := stringid.GenerateRandomID() - - if err := driver.Create(base, "", "", nil); err != nil { - b.Fatal(err) - } - - if err := addManyFiles(driver, base, bottom, 3); err != nil { - b.Fatal(err) - } - - if err := driver.Create(upper, base, "", nil); err != nil { - b.Fatal(err) - } - - if err := addManyFiles(driver, upper, top, 6); err != nil { - b.Fatal(err) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - arch, err := driver.Diff(upper, "") - if err != nil { - b.Fatal(err) - } - _, err = io.Copy(ioutil.Discard, arch) - if err != nil { - b.Fatalf("Error copying archive: %s", err) - } - arch.Close() - } -} - -// DriverBenchDiffApplyN benchmarks calls to diff and apply together -func DriverBenchDiffApplyN(b *testing.B, fileCount int, drivername string, driveroptions ...string) { - driver := GetDriver(b, drivername, driveroptions...) - defer PutDriver(b) - base := stringid.GenerateRandomID() - upper := stringid.GenerateRandomID() - - if err := driver.Create(base, "", "", nil); err != nil { - b.Fatal(err) - } - - if err := addManyFiles(driver, base, fileCount, 3); err != nil { - b.Fatal(err) - } - - if err := driver.Create(upper, base, "", nil); err != nil { - b.Fatal(err) - } - - if err := addManyFiles(driver, upper, fileCount, 6); err != nil { - b.Fatal(err) - } - diffSize, err := driver.DiffSize(upper, "") - if err != nil { - b.Fatal(err) - } - b.ResetTimer() - b.StopTimer() - for i := 0; i < b.N; i++ { - diff := stringid.GenerateRandomID() - if err := driver.Create(diff, base, "", nil); err != nil { - b.Fatal(err) - } - - if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { - b.Fatal(err) - } - - b.StartTimer() - - arch, err := driver.Diff(upper, "") - if err != nil { - b.Fatal(err) - } - - applyDiffSize, err := driver.ApplyDiff(diff, "", arch) - if err != nil { - b.Fatal(err) - } - - b.StopTimer() - arch.Close() - - if applyDiffSize != diffSize { - // TODO: enforce this - //b.Fatalf("Apply diff size different, got %d, expected %s", applyDiffSize, diffSize) - } - if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { - b.Fatal(err) - } - } -} - -// DriverBenchDeepLayerDiff benchmarks calls to diff on top of a given number of layers. -func DriverBenchDeepLayerDiff(b *testing.B, layerCount int, drivername string, driveroptions ...string) { - driver := GetDriver(b, drivername, driveroptions...) - defer PutDriver(b) - - base := stringid.GenerateRandomID() - - if err := driver.Create(base, "", "", nil); err != nil { - b.Fatal(err) - } - - if err := addFiles(driver, base, 50); err != nil { - b.Fatal(err) - } - - topLayer, err := addManyLayers(driver, base, layerCount) - if err != nil { - b.Fatal(err) - } - - b.ResetTimer() - for i := 0; i < b.N; i++ { - arch, err := driver.Diff(topLayer, "") - if err != nil { - b.Fatal(err) - } - _, err = io.Copy(ioutil.Discard, arch) - if err != nil { - b.Fatalf("Error copying archive: %s", err) - } - arch.Close() - } -} - -// DriverBenchDeepLayerRead benchmarks calls to read a file under a given number of layers. -func DriverBenchDeepLayerRead(b *testing.B, layerCount int, drivername string, driveroptions ...string) { - driver := GetDriver(b, drivername, driveroptions...) - defer PutDriver(b) - - base := stringid.GenerateRandomID() - - if err := driver.Create(base, "", "", nil); err != nil { - b.Fatal(err) - } - - content := []byte("test content") - if err := addFile(driver, base, "testfile.txt", content); err != nil { - b.Fatal(err) - } - - topLayer, err := addManyLayers(driver, base, layerCount) - if err != nil { - b.Fatal(err) - } - - root, err := driver.Get(topLayer, "") - if err != nil { - b.Fatal(err) - } - defer driver.Put(topLayer) - - b.ResetTimer() - for i := 0; i < b.N; i++ { - - // Read content - c, err := ioutil.ReadFile(filepath.Join(root, "testfile.txt")) - if err != nil { - b.Fatal(err) - } - - b.StopTimer() - if bytes.Compare(c, content) != 0 { - b.Fatalf("Wrong content in file %v, expected %v", c, content) - } - b.StartTimer() - } -} diff --git a/daemon/graphdriver/graphtest/graphtest_unix.go b/daemon/graphdriver/graphtest/graphtest_unix.go deleted file mode 100644 index 68be47b171..0000000000 --- a/daemon/graphdriver/graphtest/graphtest_unix.go +++ /dev/null @@ -1,359 +0,0 @@ -// +build linux freebsd - -package graphtest - -import ( - "bytes" - "io/ioutil" - "math/rand" - "os" - "path" - "reflect" - "syscall" - "testing" - "unsafe" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/go-units" -) - -var ( - drv *Driver -) - -// Driver conforms to graphdriver.Driver interface and -// contains information such as root and reference count of the number of clients using it. -// This helps in testing drivers added into the framework. -type Driver struct { - graphdriver.Driver - root string - refCount int -} - -func newDriver(t testing.TB, name string, options []string) *Driver { - root, err := ioutil.TempDir("", "docker-graphtest-") - if err != nil { - t.Fatal(err) - } - - if err := os.MkdirAll(root, 0755); err != nil { - t.Fatal(err) - } - - d, err := graphdriver.GetDriver(name, root, options, nil, nil) - if err != nil { - t.Logf("graphdriver: %v\n", err) - if err == graphdriver.ErrNotSupported || err == graphdriver.ErrPrerequisites || err == graphdriver.ErrIncompatibleFS { - t.Skipf("Driver %s not supported", name) - } - t.Fatal(err) - } - return &Driver{d, root, 1} -} - -func cleanup(t testing.TB, d *Driver) { - if err := drv.Cleanup(); err != nil { - t.Fatal(err) - } - os.RemoveAll(d.root) -} - -// GetDriver create a new driver with given name or return an existing driver with the name updating the reference count. -func GetDriver(t testing.TB, name string, options ...string) graphdriver.Driver { - if drv == nil { - drv = newDriver(t, name, options) - } else { - drv.refCount++ - } - return drv -} - -// PutDriver removes the driver if it is no longer used and updates the reference count. -func PutDriver(t testing.TB) { - if drv == nil { - t.Skip("No driver to put!") - } - drv.refCount-- - if drv.refCount == 0 { - cleanup(t, drv) - drv = nil - } -} - -// DriverTestCreateEmpty creates a new image and verifies it is empty and the right metadata -func DriverTestCreateEmpty(t testing.TB, drivername string, driverOptions ...string) { - driver := GetDriver(t, drivername, driverOptions...) - defer PutDriver(t) - - if err := driver.Create("empty", "", "", nil); err != nil { - t.Fatal(err) - } - - defer func() { - if err := driver.Remove("empty"); err != nil { - t.Fatal(err) - } - }() - - if !driver.Exists("empty") { - t.Fatal("Newly created image doesn't exist") - } - - dir, err := driver.Get("empty", "") - if err != nil { - t.Fatal(err) - } - - verifyFile(t, dir, 0755|os.ModeDir, 0, 0) - - // Verify that the directory is empty - fis, err := readDir(dir) - if err != nil { - t.Fatal(err) - } - - if len(fis) != 0 { - t.Fatal("New directory not empty") - } - - driver.Put("empty") -} - -// DriverTestCreateBase create a base driver and verify. -func DriverTestCreateBase(t testing.TB, drivername string, driverOptions ...string) { - driver := GetDriver(t, drivername, driverOptions...) - defer PutDriver(t) - - createBase(t, driver, "Base") - defer func() { - if err := driver.Remove("Base"); err != nil { - t.Fatal(err) - } - }() - verifyBase(t, driver, "Base") -} - -// DriverTestCreateSnap Create a driver and snap and verify. -func DriverTestCreateSnap(t testing.TB, drivername string, driverOptions ...string) { - driver := GetDriver(t, drivername, driverOptions...) - defer PutDriver(t) - - createBase(t, driver, "Base") - - defer func() { - if err := driver.Remove("Base"); err != nil { - t.Fatal(err) - } - }() - - if err := driver.Create("Snap", "Base", "", nil); err != nil { - t.Fatal(err) - } - - defer func() { - if err := driver.Remove("Snap"); err != nil { - t.Fatal(err) - } - }() - - verifyBase(t, driver, "Snap") -} - -// DriverTestDeepLayerRead reads a file from a lower layer under a given number of layers -func DriverTestDeepLayerRead(t testing.TB, layerCount int, drivername string, driverOptions ...string) { - driver := GetDriver(t, drivername, driverOptions...) - defer PutDriver(t) - - base := stringid.GenerateRandomID() - - if err := driver.Create(base, "", "", nil); err != nil { - t.Fatal(err) - } - - content := []byte("test content") - if err := addFile(driver, base, "testfile.txt", content); err != nil { - t.Fatal(err) - } - - topLayer, err := addManyLayers(driver, base, layerCount) - if err != nil { - t.Fatal(err) - } - - err = checkManyLayers(driver, topLayer, layerCount) - if err != nil { - t.Fatal(err) - } - - if err := checkFile(driver, topLayer, "testfile.txt", content); err != nil { - t.Fatal(err) - } -} - -// DriverTestDiffApply tests diffing and applying produces the same layer -func DriverTestDiffApply(t testing.TB, fileCount int, drivername string, driverOptions ...string) { - driver := GetDriver(t, drivername, driverOptions...) - defer PutDriver(t) - base := stringid.GenerateRandomID() - upper := stringid.GenerateRandomID() - deleteFile := "file-remove.txt" - deleteFileContent := []byte("This file should get removed in upper!") - deleteDir := "var/lib" - - if err := driver.Create(base, "", "", nil); err != nil { - t.Fatal(err) - } - - if err := addManyFiles(driver, base, fileCount, 3); err != nil { - t.Fatal(err) - } - - if err := addFile(driver, base, deleteFile, deleteFileContent); err != nil { - t.Fatal(err) - } - - if err := addDirectory(driver, base, deleteDir); err != nil { - t.Fatal(err) - } - - if err := driver.Create(upper, base, "", nil); err != nil { - t.Fatal(err) - } - - if err := addManyFiles(driver, upper, fileCount, 6); err != nil { - t.Fatal(err) - } - - if err := removeAll(driver, upper, deleteFile, deleteDir); err != nil { - t.Fatal(err) - } - - diffSize, err := driver.DiffSize(upper, "") - if err != nil { - t.Fatal(err) - } - - diff := stringid.GenerateRandomID() - if err := driver.Create(diff, base, "", nil); err != nil { - t.Fatal(err) - } - - if err := checkManyFiles(driver, diff, fileCount, 3); err != nil { - t.Fatal(err) - } - - if err := checkFile(driver, diff, deleteFile, deleteFileContent); err != nil { - t.Fatal(err) - } - - arch, err := driver.Diff(upper, base) - if err != nil { - t.Fatal(err) - } - - buf := bytes.NewBuffer(nil) - if _, err := buf.ReadFrom(arch); err != nil { - t.Fatal(err) - } - if err := arch.Close(); err != nil { - t.Fatal(err) - } - - applyDiffSize, err := driver.ApplyDiff(diff, base, bytes.NewReader(buf.Bytes())) - if err != nil { - t.Fatal(err) - } - - if applyDiffSize != diffSize { - t.Fatalf("Apply diff size different, got %d, expected %d", applyDiffSize, diffSize) - } - - if err := checkManyFiles(driver, diff, fileCount, 6); err != nil { - t.Fatal(err) - } - - if err := checkFileRemoved(driver, diff, deleteFile); err != nil { - t.Fatal(err) - } - - if err := checkFileRemoved(driver, diff, deleteDir); err != nil { - t.Fatal(err) - } -} - -// DriverTestChanges tests computed changes on a layer matches changes made -func DriverTestChanges(t testing.TB, drivername string, driverOptions ...string) { - driver := GetDriver(t, drivername, driverOptions...) - defer PutDriver(t) - base := stringid.GenerateRandomID() - upper := stringid.GenerateRandomID() - - if err := driver.Create(base, "", "", nil); err != nil { - t.Fatal(err) - } - - if err := addManyFiles(driver, base, 20, 3); err != nil { - t.Fatal(err) - } - - if err := driver.Create(upper, base, "", nil); err != nil { - t.Fatal(err) - } - - expectedChanges, err := changeManyFiles(driver, upper, 20, 6) - if err != nil { - t.Fatal(err) - } - - changes, err := driver.Changes(upper, base) - if err != nil { - t.Fatal(err) - } - - if err = checkChanges(expectedChanges, changes); err != nil { - t.Fatal(err) - } -} - -func writeRandomFile(path string, size uint64) error { - buf := make([]int64, size/8) - - r := rand.NewSource(0) - for i := range buf { - buf[i] = r.Int63() - } - - // Cast to []byte - header := *(*reflect.SliceHeader)(unsafe.Pointer(&buf)) - header.Len *= 8 - header.Cap *= 8 - data := *(*[]byte)(unsafe.Pointer(&header)) - - return ioutil.WriteFile(path, data, 0700) -} - -// DriverTestSetQuota Create a driver and test setting quota. -func DriverTestSetQuota(t *testing.T, drivername string) { - driver := GetDriver(t, drivername) - defer PutDriver(t) - - createBase(t, driver, "Base") - storageOpt := make(map[string]string, 1) - storageOpt["size"] = "50M" - if err := driver.Create("zfsTest", "Base", "", storageOpt); err != nil { - t.Fatal(err) - } - - mountPath, err := driver.Get("zfsTest", "") - if err != nil { - t.Fatal(err) - } - - quota := uint64(50 * units.MiB) - err = writeRandomFile(path.Join(mountPath, "file"), quota*2) - if pathError, ok := err.(*os.PathError); ok && pathError.Err != syscall.EDQUOT { - t.Fatalf("expect write() to fail with %v, got %v", syscall.EDQUOT, err) - } - -} diff --git a/daemon/graphdriver/graphtest/graphtest_windows.go b/daemon/graphdriver/graphtest/graphtest_windows.go deleted file mode 100644 index a50c5211e3..0000000000 --- a/daemon/graphdriver/graphtest/graphtest_windows.go +++ /dev/null @@ -1 +0,0 @@ -package graphtest diff --git a/daemon/graphdriver/graphtest/testutil.go b/daemon/graphdriver/graphtest/testutil.go deleted file mode 100644 index 4718f06852..0000000000 --- a/daemon/graphdriver/graphtest/testutil.go +++ /dev/null @@ -1,342 +0,0 @@ -package graphtest - -import ( - "bytes" - "fmt" - "io/ioutil" - "math/rand" - "os" - "path" - "sort" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/stringid" -) - -func randomContent(size int, seed int64) []byte { - s := rand.NewSource(seed) - content := make([]byte, size) - - for i := 0; i < len(content); i += 7 { - val := s.Int63() - for j := 0; i+j < len(content) && j < 7; j++ { - content[i+j] = byte(val) - val >>= 8 - } - } - - return content -} - -func addFiles(drv graphdriver.Driver, layer string, seed int64) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - if err := ioutil.WriteFile(path.Join(root, "file-a"), randomContent(64, seed), 0755); err != nil { - return err - } - if err := os.MkdirAll(path.Join(root, "dir-b"), 0755); err != nil { - return err - } - if err := ioutil.WriteFile(path.Join(root, "dir-b", "file-b"), randomContent(128, seed+1), 0755); err != nil { - return err - } - - return ioutil.WriteFile(path.Join(root, "file-c"), randomContent(128*128, seed+2), 0755) -} - -func checkFile(drv graphdriver.Driver, layer, filename string, content []byte) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - fileContent, err := ioutil.ReadFile(path.Join(root, filename)) - if err != nil { - return err - } - - if bytes.Compare(fileContent, content) != 0 { - return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) - } - - return nil -} - -func addFile(drv graphdriver.Driver, layer, filename string, content []byte) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - return ioutil.WriteFile(path.Join(root, filename), content, 0755) -} - -func addDirectory(drv graphdriver.Driver, layer, dir string) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - return os.MkdirAll(path.Join(root, dir), 0755) -} - -func removeAll(drv graphdriver.Driver, layer string, names ...string) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - for _, filename := range names { - if err := os.RemoveAll(path.Join(root, filename)); err != nil { - return err - } - } - return nil -} - -func checkFileRemoved(drv graphdriver.Driver, layer, filename string) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - if _, err := os.Stat(path.Join(root, filename)); err == nil { - return fmt.Errorf("file still exists: %s", path.Join(root, filename)) - } else if !os.IsNotExist(err) { - return err - } - - return nil -} - -func addManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - for i := 0; i < count; i += 100 { - dir := path.Join(root, fmt.Sprintf("directory-%d", i)) - if err := os.MkdirAll(dir, 0755); err != nil { - return err - } - for j := 0; i+j < count && j < 100; j++ { - file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) - if err := ioutil.WriteFile(file, randomContent(64, seed+int64(i+j)), 0755); err != nil { - return err - } - } - } - - return nil -} - -func changeManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) ([]archive.Change, error) { - root, err := drv.Get(layer, "") - if err != nil { - return nil, err - } - defer drv.Put(layer) - - changes := []archive.Change{} - for i := 0; i < count; i += 100 { - archiveRoot := fmt.Sprintf("/directory-%d", i) - if err := os.MkdirAll(path.Join(root, archiveRoot), 0755); err != nil { - return nil, err - } - for j := 0; i+j < count && j < 100; j++ { - if j == 0 { - changes = append(changes, archive.Change{ - Path: archiveRoot, - Kind: archive.ChangeModify, - }) - } - var change archive.Change - switch j % 3 { - // Update file - case 0: - change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) - change.Kind = archive.ChangeModify - if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { - return nil, err - } - // Add file - case 1: - change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d-%d", seed, i+j)) - change.Kind = archive.ChangeAdd - if err := ioutil.WriteFile(path.Join(root, change.Path), randomContent(64, seed+int64(i+j)), 0755); err != nil { - return nil, err - } - // Remove file - case 2: - change.Path = path.Join(archiveRoot, fmt.Sprintf("file-%d", i+j)) - change.Kind = archive.ChangeDelete - if err := os.Remove(path.Join(root, change.Path)); err != nil { - return nil, err - } - } - changes = append(changes, change) - } - } - - return changes, nil -} - -func checkManyFiles(drv graphdriver.Driver, layer string, count int, seed int64) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - for i := 0; i < count; i += 100 { - dir := path.Join(root, fmt.Sprintf("directory-%d", i)) - for j := 0; i+j < count && j < 100; j++ { - file := path.Join(dir, fmt.Sprintf("file-%d", i+j)) - fileContent, err := ioutil.ReadFile(file) - if err != nil { - return err - } - - content := randomContent(64, seed+int64(i+j)) - - if bytes.Compare(fileContent, content) != 0 { - return fmt.Errorf("mismatched file content %v, expecting %v", fileContent, content) - } - } - } - - return nil -} - -type changeList []archive.Change - -func (c changeList) Less(i, j int) bool { - if c[i].Path == c[j].Path { - return c[i].Kind < c[j].Kind - } - return c[i].Path < c[j].Path -} -func (c changeList) Len() int { return len(c) } -func (c changeList) Swap(i, j int) { c[j], c[i] = c[i], c[j] } - -func checkChanges(expected, actual []archive.Change) error { - if len(expected) != len(actual) { - return fmt.Errorf("unexpected number of changes, expected %d, got %d", len(expected), len(actual)) - } - sort.Sort(changeList(expected)) - sort.Sort(changeList(actual)) - - for i := range expected { - if expected[i] != actual[i] { - return fmt.Errorf("unexpected change, expecting %v, got %v", expected[i], actual[i]) - } - } - - return nil -} - -func addLayerFiles(drv graphdriver.Driver, layer, parent string, i int) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - if err := ioutil.WriteFile(path.Join(root, "top-id"), []byte(layer), 0755); err != nil { - return err - } - layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) - if err := os.MkdirAll(layerDir, 0755); err != nil { - return err - } - if err := ioutil.WriteFile(path.Join(layerDir, "layer-id"), []byte(layer), 0755); err != nil { - return err - } - if err := ioutil.WriteFile(path.Join(layerDir, "parent-id"), []byte(parent), 0755); err != nil { - return err - } - - return nil -} - -func addManyLayers(drv graphdriver.Driver, baseLayer string, count int) (string, error) { - lastLayer := baseLayer - for i := 1; i <= count; i++ { - nextLayer := stringid.GenerateRandomID() - if err := drv.Create(nextLayer, lastLayer, "", nil); err != nil { - return "", err - } - if err := addLayerFiles(drv, nextLayer, lastLayer, i); err != nil { - return "", err - } - - lastLayer = nextLayer - - } - return lastLayer, nil -} - -func checkManyLayers(drv graphdriver.Driver, layer string, count int) error { - root, err := drv.Get(layer, "") - if err != nil { - return err - } - defer drv.Put(layer) - - layerIDBytes, err := ioutil.ReadFile(path.Join(root, "top-id")) - if err != nil { - return err - } - - if bytes.Compare(layerIDBytes, []byte(layer)) != 0 { - return fmt.Errorf("mismatched file content %v, expecting %v", layerIDBytes, []byte(layer)) - } - - for i := count; i > 0; i-- { - layerDir := path.Join(root, fmt.Sprintf("layer-%d", i)) - - thisLayerIDBytes, err := ioutil.ReadFile(path.Join(layerDir, "layer-id")) - if err != nil { - return err - } - if bytes.Compare(thisLayerIDBytes, layerIDBytes) != 0 { - return fmt.Errorf("mismatched file content %v, expecting %v", thisLayerIDBytes, layerIDBytes) - } - layerIDBytes, err = ioutil.ReadFile(path.Join(layerDir, "parent-id")) - if err != nil { - return err - } - } - return nil -} - -// readDir reads a directory just like ioutil.ReadDir() -// then hides specific files (currently "lost+found") -// so the tests don't "see" it -func readDir(dir string) ([]os.FileInfo, error) { - a, err := ioutil.ReadDir(dir) - if err != nil { - return nil, err - } - - b := a[:0] - for _, x := range a { - if x.Name() != "lost+found" { // ext4 always have this dir - b = append(b, x) - } - } - - return b, nil -} diff --git a/daemon/graphdriver/graphtest/testutil_unix.go b/daemon/graphdriver/graphtest/testutil_unix.go deleted file mode 100644 index 07d038c3b2..0000000000 --- a/daemon/graphdriver/graphtest/testutil_unix.go +++ /dev/null @@ -1,143 +0,0 @@ -// +build linux freebsd - -package graphtest - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "syscall" - "testing" - - "github.com/docker/docker/daemon/graphdriver" -) - -// InitLoopbacks ensures that the loopback devices are properly created within -// the system running the device mapper tests. -func InitLoopbacks() error { - statT, err := getBaseLoopStats() - if err != nil { - return err - } - // create at least 8 loopback files, ya, that is a good number - for i := 0; i < 8; i++ { - loopPath := fmt.Sprintf("/dev/loop%d", i) - // only create new loopback files if they don't exist - if _, err := os.Stat(loopPath); err != nil { - if mkerr := syscall.Mknod(loopPath, - uint32(statT.Mode|syscall.S_IFBLK), int((7<<8)|(i&0xff)|((i&0xfff00)<<12))); mkerr != nil { - return mkerr - } - os.Chown(loopPath, int(statT.Uid), int(statT.Gid)) - } - } - return nil -} - -// getBaseLoopStats inspects /dev/loop0 to collect uid,gid, and mode for the -// loop0 device on the system. If it does not exist we assume 0,0,0660 for the -// stat data -func getBaseLoopStats() (*syscall.Stat_t, error) { - loop0, err := os.Stat("/dev/loop0") - if err != nil { - if os.IsNotExist(err) { - return &syscall.Stat_t{ - Uid: 0, - Gid: 0, - Mode: 0660, - }, nil - } - return nil, err - } - return loop0.Sys().(*syscall.Stat_t), nil -} - -func verifyFile(t testing.TB, path string, mode os.FileMode, uid, gid uint32) { - fi, err := os.Stat(path) - if err != nil { - t.Fatal(err) - } - - if fi.Mode()&os.ModeType != mode&os.ModeType { - t.Fatalf("Expected %s type 0x%x, got 0x%x", path, mode&os.ModeType, fi.Mode()&os.ModeType) - } - - if fi.Mode()&os.ModePerm != mode&os.ModePerm { - t.Fatalf("Expected %s mode %o, got %o", path, mode&os.ModePerm, fi.Mode()&os.ModePerm) - } - - if fi.Mode()&os.ModeSticky != mode&os.ModeSticky { - t.Fatalf("Expected %s sticky 0x%x, got 0x%x", path, mode&os.ModeSticky, fi.Mode()&os.ModeSticky) - } - - if fi.Mode()&os.ModeSetuid != mode&os.ModeSetuid { - t.Fatalf("Expected %s setuid 0x%x, got 0x%x", path, mode&os.ModeSetuid, fi.Mode()&os.ModeSetuid) - } - - if fi.Mode()&os.ModeSetgid != mode&os.ModeSetgid { - t.Fatalf("Expected %s setgid 0x%x, got 0x%x", path, mode&os.ModeSetgid, fi.Mode()&os.ModeSetgid) - } - - if stat, ok := fi.Sys().(*syscall.Stat_t); ok { - if stat.Uid != uid { - t.Fatalf("%s no owned by uid %d", path, uid) - } - if stat.Gid != gid { - t.Fatalf("%s not owned by gid %d", path, gid) - } - } -} - -func createBase(t testing.TB, driver graphdriver.Driver, name string) { - // We need to be able to set any perms - oldmask := syscall.Umask(0) - defer syscall.Umask(oldmask) - - if err := driver.CreateReadWrite(name, "", "", nil); err != nil { - t.Fatal(err) - } - - dir, err := driver.Get(name, "") - if err != nil { - t.Fatal(err) - } - defer driver.Put(name) - - subdir := path.Join(dir, "a subdir") - if err := os.Mkdir(subdir, 0705|os.ModeSticky); err != nil { - t.Fatal(err) - } - if err := os.Chown(subdir, 1, 2); err != nil { - t.Fatal(err) - } - - file := path.Join(dir, "a file") - if err := ioutil.WriteFile(file, []byte("Some data"), 0222|os.ModeSetuid); err != nil { - t.Fatal(err) - } -} - -func verifyBase(t testing.TB, driver graphdriver.Driver, name string) { - dir, err := driver.Get(name, "") - if err != nil { - t.Fatal(err) - } - defer driver.Put(name) - - subdir := path.Join(dir, "a subdir") - verifyFile(t, subdir, 0705|os.ModeDir|os.ModeSticky, 1, 2) - - file := path.Join(dir, "a file") - verifyFile(t, file, 0222|os.ModeSetuid, 0, 0) - - fis, err := readDir(dir) - if err != nil { - t.Fatal(err) - } - - if len(fis) != 2 { - t.Fatal("Unexpected files in base image") - } - -} diff --git a/daemon/graphdriver/overlay/copy.go b/daemon/graphdriver/overlay/copy.go deleted file mode 100644 index 7d81a83abd..0000000000 --- a/daemon/graphdriver/overlay/copy.go +++ /dev/null @@ -1,169 +0,0 @@ -// +build linux - -package overlay - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "time" - - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" -) - -type copyFlags int - -const ( - copyHardlink copyFlags = 1 << iota -) - -func copyRegular(srcPath, dstPath string, mode os.FileMode) error { - srcFile, err := os.Open(srcPath) - if err != nil { - return err - } - defer srcFile.Close() - - dstFile, err := os.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE, mode) - if err != nil { - return err - } - defer dstFile.Close() - - _, err = pools.Copy(dstFile, srcFile) - - return err -} - -func copyXattr(srcPath, dstPath, attr string) error { - data, err := system.Lgetxattr(srcPath, attr) - if err != nil { - return err - } - if data != nil { - if err := system.Lsetxattr(dstPath, attr, data, 0); err != nil { - return err - } - } - return nil -} - -func copyDir(srcDir, dstDir string, flags copyFlags) error { - err := filepath.Walk(srcDir, func(srcPath string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(srcDir, srcPath) - if err != nil { - return err - } - - dstPath := filepath.Join(dstDir, relPath) - if err != nil { - return err - } - - stat, ok := f.Sys().(*syscall.Stat_t) - if !ok { - return fmt.Errorf("Unable to get raw syscall.Stat_t data for %s", srcPath) - } - - isHardlink := false - - switch f.Mode() & os.ModeType { - case 0: // Regular file - if flags©Hardlink != 0 { - isHardlink = true - if err := os.Link(srcPath, dstPath); err != nil { - return err - } - } else { - if err := copyRegular(srcPath, dstPath, f.Mode()); err != nil { - return err - } - } - - case os.ModeDir: - if err := os.Mkdir(dstPath, f.Mode()); err != nil && !os.IsExist(err) { - return err - } - - case os.ModeSymlink: - link, err := os.Readlink(srcPath) - if err != nil { - return err - } - - if err := os.Symlink(link, dstPath); err != nil { - return err - } - - case os.ModeNamedPipe: - fallthrough - case os.ModeSocket: - if err := syscall.Mkfifo(dstPath, stat.Mode); err != nil { - return err - } - - case os.ModeDevice: - if err := syscall.Mknod(dstPath, stat.Mode, int(stat.Rdev)); err != nil { - return err - } - - default: - return fmt.Errorf("Unknown file type for %s\n", srcPath) - } - - // Everything below is copying metadata from src to dst. All this metadata - // already shares an inode for hardlinks. - if isHardlink { - return nil - } - - if err := os.Lchown(dstPath, int(stat.Uid), int(stat.Gid)); err != nil { - return err - } - - if err := copyXattr(srcPath, dstPath, "security.capability"); err != nil { - return err - } - - // We need to copy this attribute if it appears in an overlay upper layer, as - // this function is used to copy those. It is set by overlay if a directory - // is removed and then re-created and should not inherit anything from the - // same dir in the lower dir. - if err := copyXattr(srcPath, dstPath, "trusted.overlay.opaque"); err != nil { - return err - } - - isSymlink := f.Mode()&os.ModeSymlink != 0 - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if !isSymlink { - if err := os.Chmod(dstPath, f.Mode()); err != nil { - return err - } - } - - // system.Chtimes doesn't support a NOFOLLOW flag atm - if !isSymlink { - aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - mTime := time.Unix(int64(stat.Mtim.Sec), int64(stat.Mtim.Nsec)) - if err := system.Chtimes(dstPath, aTime, mTime); err != nil { - return err - } - } else { - ts := []syscall.Timespec{stat.Atim, stat.Mtim} - if err := system.LUtimesNano(dstPath, ts); err != nil { - return err - } - } - return nil - }) - return err -} diff --git a/daemon/graphdriver/overlay/overlay.go b/daemon/graphdriver/overlay/overlay.go deleted file mode 100644 index 16fcd785ff..0000000000 --- a/daemon/graphdriver/overlay/overlay.go +++ /dev/null @@ -1,444 +0,0 @@ -// +build linux - -package overlay - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path" - "syscall" - - "github.com/Sirupsen/logrus" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" - - "github.com/docker/docker/pkg/mount" - "github.com/opencontainers/runc/libcontainer/label" -) - -// This is a small wrapper over the NaiveDiffWriter that lets us have a custom -// implementation of ApplyDiff() - -var ( - // ErrApplyDiffFallback is returned to indicate that a normal ApplyDiff is applied as a fallback from Naive diff writer. - ErrApplyDiffFallback = fmt.Errorf("Fall back to normal ApplyDiff") - backingFs = "" -) - -// ApplyDiffProtoDriver wraps the ProtoDriver by extending the interface with ApplyDiff method. -type ApplyDiffProtoDriver interface { - graphdriver.ProtoDriver - // ApplyDiff writes the diff to the archive for the given id and parent id. - // It returns the size in bytes written if successful, an error ErrApplyDiffFallback is returned otherwise. - ApplyDiff(id, parent string, diff archive.Reader) (size int64, err error) -} - -type naiveDiffDriverWithApply struct { - graphdriver.Driver - applyDiff ApplyDiffProtoDriver -} - -// NaiveDiffDriverWithApply returns a NaiveDiff driver with custom ApplyDiff. -func NaiveDiffDriverWithApply(driver ApplyDiffProtoDriver, uidMaps, gidMaps []idtools.IDMap) graphdriver.Driver { - return &naiveDiffDriverWithApply{ - Driver: graphdriver.NewNaiveDiffDriver(driver, uidMaps, gidMaps), - applyDiff: driver, - } -} - -// ApplyDiff creates a diff layer with either the NaiveDiffDriver or with a fallback. -func (d *naiveDiffDriverWithApply) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { - b, err := d.applyDiff.ApplyDiff(id, parent, diff) - if err == ErrApplyDiffFallback { - return d.Driver.ApplyDiff(id, parent, diff) - } - return b, err -} - -// This backend uses the overlay union filesystem for containers -// plus hard link file sharing for images. - -// Each container/image can have a "root" subdirectory which is a plain -// filesystem hierarchy, or they can use overlay. - -// If they use overlay there is a "upper" directory and a "lower-id" -// file, as well as "merged" and "work" directories. The "upper" -// directory has the upper layer of the overlay, and "lower-id" contains -// the id of the parent whose "root" directory shall be used as the lower -// layer in the overlay. The overlay itself is mounted in the "merged" -// directory, and the "work" dir is needed for overlay to work. - -// When an overlay layer is created there are two cases, either the -// parent has a "root" dir, then we start out with an empty "upper" -// directory overlaid on the parents root. This is typically the -// case with the init layer of a container which is based on an image. -// If there is no "root" in the parent, we inherit the lower-id from -// the parent and start by making a copy in the parent's "upper" dir. -// This is typically the case for a container layer which copies -// its parent -init upper layer. - -// Additionally we also have a custom implementation of ApplyLayer -// which makes a recursive copy of the parent "root" layer using -// hardlinks to share file data, and then applies the layer on top -// of that. This means all child images share file (but not directory) -// data with the parent. - -// Driver contains information about the home directory and the list of active mounts that are created using this driver. -type Driver struct { - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter -} - -func init() { - graphdriver.Register("overlay", Init) -} - -// Init returns the NaiveDiffDriver, a native diff driver for overlay filesystem. -// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. -// If an overlay filesystem is not supported over an existing filesystem then error graphdriver.ErrIncompatibleFS is returned. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - - if err := supportsOverlay(); err != nil { - return nil, graphdriver.ErrNotSupported - } - - fsMagic, err := graphdriver.GetFSMagic(home) - if err != nil { - return nil, err - } - if fsName, ok := graphdriver.FsNames[fsMagic]; ok { - backingFs = fsName - } - - switch fsMagic { - case graphdriver.FsMagicAufs, graphdriver.FsMagicBtrfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicZfs, graphdriver.FsMagicEcryptfs: - logrus.Errorf("'overlay' is not supported over %s", backingFs) - return nil, graphdriver.ErrIncompatibleFS - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - // Create the driver home dir - if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { - return nil, err - } - - if err := mount.MakePrivate(home); err != nil { - return nil, err - } - - d := &Driver{ - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), - } - - return NaiveDiffDriverWithApply(d, uidMaps, gidMaps), nil -} - -func supportsOverlay() error { - // We can try to modprobe overlay first before looking at - // proc/filesystems for when overlay is supported - exec.Command("modprobe", "overlay").Run() - - f, err := os.Open("/proc/filesystems") - if err != nil { - return err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if s.Text() == "nodev\toverlay" { - return nil - } - } - logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") - return graphdriver.ErrNotSupported -} - -func (d *Driver) String() string { - return "overlay" -} - -// Status returns current driver information in a two dimensional string array. -// Output contains "Backing Filesystem" used in this implementation. -func (d *Driver) Status() [][2]string { - return [][2]string{ - {"Backing Filesystem", backingFs}, - } -} - -// GetMetadata returns meta data about the overlay driver such as root, LowerDir, UpperDir, WorkDir and MergeDir used to store data. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { - return nil, err - } - - metadata := make(map[string]string) - - // If id has a root, it is an image - rootDir := path.Join(dir, "root") - if _, err := os.Stat(rootDir); err == nil { - metadata["RootDir"] = rootDir - return metadata, nil - } - - lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) - if err != nil { - return nil, err - } - - metadata["LowerDir"] = path.Join(d.dir(string(lowerID)), "root") - metadata["UpperDir"] = path.Join(dir, "upper") - metadata["WorkDir"] = path.Join(dir, "work") - metadata["MergedDir"] = path.Join(dir, "merged") - - return metadata, nil -} - -// Cleanup any state created by overlay which should be cleaned when daemon -// is being shutdown. For now, we just have to unmount the bind mounted -// we had created. -func (d *Driver) Cleanup() error { - return mount.Unmount(d.home) -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - return d.Create(id, parent, mountLabel, storageOpt) -} - -// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. -// The parent filesystem is used to configure these directories for the overlay. -func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) (retErr error) { - - if len(storageOpt) != 0 { - return fmt.Errorf("--storage-opt is not supported for overlay") - } - - dir := d.dir(id) - - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { - return err - } - - defer func() { - // Clean up on failure - if retErr != nil { - os.RemoveAll(dir) - } - }() - - // Toplevel images are just a "root" dir - if parent == "" { - if err := idtools.MkdirAs(path.Join(dir, "root"), 0755, rootUID, rootGID); err != nil { - return err - } - return nil - } - - parentDir := d.dir(parent) - - // Ensure parent exists - if _, err := os.Lstat(parentDir); err != nil { - return err - } - - // If parent has a root, just do an overlay to it - parentRoot := path.Join(parentDir, "root") - - if s, err := os.Lstat(parentRoot); err == nil { - if err := idtools.MkdirAs(path.Join(dir, "upper"), s.Mode(), rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { - return err - } - if err := ioutil.WriteFile(path.Join(dir, "lower-id"), []byte(parent), 0666); err != nil { - return err - } - return nil - } - - // Otherwise, copy the upper and the lower-id from the parent - - lowerID, err := ioutil.ReadFile(path.Join(parentDir, "lower-id")) - if err != nil { - return err - } - - if err := ioutil.WriteFile(path.Join(dir, "lower-id"), lowerID, 0666); err != nil { - return err - } - - parentUpperDir := path.Join(parentDir, "upper") - s, err := os.Lstat(parentUpperDir) - if err != nil { - return err - } - - upperDir := path.Join(dir, "upper") - if err := idtools.MkdirAs(upperDir, s.Mode(), rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { - return err - } - - return copyDir(parentUpperDir, upperDir, 0) -} - -func (d *Driver) dir(id string) string { - return path.Join(d.home, id) -} - -// Remove cleans the directories that are created for this id. -func (d *Driver) Remove(id string) error { - if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// Get creates and mounts the required file system for the given id and returns the mount path. -func (d *Driver) Get(id string, mountLabel string) (s string, err error) { - dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { - return "", err - } - // If id has a root, just return it - rootDir := path.Join(dir, "root") - if _, err := os.Stat(rootDir); err == nil { - return rootDir, nil - } - mergedDir := path.Join(dir, "merged") - if count := d.ctr.Increment(mergedDir); count > 1 { - return mergedDir, nil - } - defer func() { - if err != nil { - if c := d.ctr.Decrement(mergedDir); c <= 0 { - syscall.Unmount(mergedDir, 0) - } - } - }() - lowerID, err := ioutil.ReadFile(path.Join(dir, "lower-id")) - if err != nil { - return "", err - } - var ( - lowerDir = path.Join(d.dir(string(lowerID)), "root") - upperDir = path.Join(dir, "upper") - workDir = path.Join(dir, "work") - opts = fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", lowerDir, upperDir, workDir) - ) - if err := syscall.Mount("overlay", mergedDir, "overlay", 0, label.FormatMountLabel(opts, mountLabel)); err != nil { - return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) - } - // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a - // user namespace requires this to move a directory from lower to upper. - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return "", err - } - if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { - return "", err - } - return mergedDir, nil -} - -// Put unmounts the mount path created for the give id. -func (d *Driver) Put(id string) error { - mountpoint := path.Join(d.dir(id), "merged") - if count := d.ctr.Decrement(mountpoint); count > 0 { - return nil - } - if err := syscall.Unmount(mountpoint, 0); err != nil { - logrus.Debugf("Failed to unmount %s overlay: %v", id, err) - } - return nil -} - -// ApplyDiff applies the new layer on top of the root, if parent does not exist with will return an ErrApplyDiffFallback error. -func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) { - dir := d.dir(id) - - if parent == "" { - return 0, ErrApplyDiffFallback - } - - parentRootDir := path.Join(d.dir(parent), "root") - if _, err := os.Stat(parentRootDir); err != nil { - return 0, ErrApplyDiffFallback - } - - // We now know there is a parent, and it has a "root" directory containing - // the full root filesystem. We can just hardlink it and apply the - // layer. This relies on two things: - // 1) ApplyDiff is only run once on a clean (no writes to upper layer) container - // 2) ApplyDiff doesn't do any in-place writes to files (would break hardlinks) - // These are all currently true and are not expected to break - - tmpRootDir, err := ioutil.TempDir(dir, "tmproot") - if err != nil { - return 0, err - } - defer func() { - if err != nil { - os.RemoveAll(tmpRootDir) - } else { - os.RemoveAll(path.Join(dir, "upper")) - os.RemoveAll(path.Join(dir, "work")) - os.RemoveAll(path.Join(dir, "merged")) - os.RemoveAll(path.Join(dir, "lower-id")) - } - }() - - if err = copyDir(parentRootDir, tmpRootDir, copyHardlink); err != nil { - return 0, err - } - - options := &archive.TarOptions{UIDMaps: d.uidMaps, GIDMaps: d.gidMaps} - if size, err = graphdriver.ApplyUncompressedLayer(tmpRootDir, diff, options); err != nil { - return 0, err - } - - rootDir := path.Join(dir, "root") - if err := os.Rename(tmpRootDir, rootDir); err != nil { - return 0, err - } - - return -} - -// Exists checks to see if the id is already mounted. -func (d *Driver) Exists(id string) bool { - _, err := os.Stat(d.dir(id)) - return err == nil -} diff --git a/daemon/graphdriver/overlay/overlay_test.go b/daemon/graphdriver/overlay/overlay_test.go deleted file mode 100644 index 34b6d801fd..0000000000 --- a/daemon/graphdriver/overlay/overlay_test.go +++ /dev/null @@ -1,93 +0,0 @@ -// +build linux - -package overlay - -import ( - "testing" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/daemon/graphdriver/graphtest" - "github.com/docker/docker/pkg/archive" -) - -func init() { - // Do not sure chroot to speed run time and allow archive - // errors or hangs to be debugged directly from the test process. - graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer -} - -// This avoids creating a new driver for each test if all tests are run -// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown -func TestOverlaySetup(t *testing.T) { - graphtest.GetDriver(t, "overlay") -} - -func TestOverlayCreateEmpty(t *testing.T) { - graphtest.DriverTestCreateEmpty(t, "overlay") -} - -func TestOverlayCreateBase(t *testing.T) { - graphtest.DriverTestCreateBase(t, "overlay") -} - -func TestOverlayCreateSnap(t *testing.T) { - graphtest.DriverTestCreateSnap(t, "overlay") -} - -func TestOverlay50LayerRead(t *testing.T) { - graphtest.DriverTestDeepLayerRead(t, 50, "overlay") -} - -// Fails due to bug in calculating changes after apply -// likely related to https://github.com/docker/docker/issues/21555 -func TestOverlayDiffApply10Files(t *testing.T) { - t.Skipf("Fails to compute changes after apply intermittently") - graphtest.DriverTestDiffApply(t, 10, "overlay") -} - -func TestOverlayChanges(t *testing.T) { - t.Skipf("Fails to compute changes intermittently") - graphtest.DriverTestChanges(t, "overlay") -} - -func TestOverlayTeardown(t *testing.T) { - graphtest.PutDriver(t) -} - -// Benchmarks should always setup new driver - -func BenchmarkExists(b *testing.B) { - graphtest.DriverBenchExists(b, "overlay") -} - -func BenchmarkGetEmpty(b *testing.B) { - graphtest.DriverBenchGetEmpty(b, "overlay") -} - -func BenchmarkDiffBase(b *testing.B) { - graphtest.DriverBenchDiffBase(b, "overlay") -} - -func BenchmarkDiffSmallUpper(b *testing.B) { - graphtest.DriverBenchDiffN(b, 10, 10, "overlay") -} - -func BenchmarkDiff10KFileUpper(b *testing.B) { - graphtest.DriverBenchDiffN(b, 10, 10000, "overlay") -} - -func BenchmarkDiff10KFilesBottom(b *testing.B) { - graphtest.DriverBenchDiffN(b, 10000, 10, "overlay") -} - -func BenchmarkDiffApply100(b *testing.B) { - graphtest.DriverBenchDiffApplyN(b, 100, "overlay") -} - -func BenchmarkDiff20Layers(b *testing.B) { - graphtest.DriverBenchDeepLayerDiff(b, 20, "overlay") -} - -func BenchmarkRead20Layers(b *testing.B) { - graphtest.DriverBenchDeepLayerRead(b, 20, "overlay") -} diff --git a/daemon/graphdriver/overlay/overlay_unsupported.go b/daemon/graphdriver/overlay/overlay_unsupported.go deleted file mode 100644 index 3dbb4de44e..0000000000 --- a/daemon/graphdriver/overlay/overlay_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package overlay diff --git a/daemon/graphdriver/overlay2/mount.go b/daemon/graphdriver/overlay2/mount.go deleted file mode 100644 index 44456bbb67..0000000000 --- a/daemon/graphdriver/overlay2/mount.go +++ /dev/null @@ -1,88 +0,0 @@ -// +build linux - -package overlay2 - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "os" - "runtime" - "syscall" - - "github.com/docker/docker/pkg/reexec" -) - -func init() { - reexec.Register("docker-mountfrom", mountFromMain) -} - -func fatal(err error) { - fmt.Fprint(os.Stderr, err) - os.Exit(1) -} - -type mountOptions struct { - Device string - Target string - Type string - Label string - Flag uint32 -} - -func mountFrom(dir, device, target, mType, label string) error { - options := &mountOptions{ - Device: device, - Target: target, - Type: mType, - Flag: 0, - Label: label, - } - - cmd := reexec.Command("docker-mountfrom", dir) - w, err := cmd.StdinPipe() - if err != nil { - return fmt.Errorf("mountfrom error on pipe creation: %v", err) - } - - output := bytes.NewBuffer(nil) - cmd.Stdout = output - cmd.Stderr = output - - if err := cmd.Start(); err != nil { - return fmt.Errorf("mountfrom error on re-exec cmd: %v", err) - } - //write the options to the pipe for the untar exec to read - if err := json.NewEncoder(w).Encode(options); err != nil { - return fmt.Errorf("mountfrom json encode to pipe failed: %v", err) - } - w.Close() - - if err := cmd.Wait(); err != nil { - return fmt.Errorf("mountfrom re-exec error: %v: output: %s", err, output) - } - return nil -} - -// mountfromMain is the entry-point for docker-mountfrom on re-exec. -func mountFromMain() { - runtime.LockOSThread() - flag.Parse() - - var options *mountOptions - - if err := json.NewDecoder(os.Stdin).Decode(&options); err != nil { - fatal(err) - } - - if err := os.Chdir(flag.Arg(0)); err != nil { - fatal(err) - } - - if err := syscall.Mount(options.Device, options.Target, options.Type, uintptr(options.Flag), options.Label); err != nil { - fatal(err) - } - - os.Exit(0) -} diff --git a/daemon/graphdriver/overlay2/overlay.go b/daemon/graphdriver/overlay2/overlay.go deleted file mode 100644 index 136d058d5d..0000000000 --- a/daemon/graphdriver/overlay2/overlay.go +++ /dev/null @@ -1,509 +0,0 @@ -// +build linux - -package overlay2 - -import ( - "bufio" - "errors" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path" - "strconv" - "strings" - "syscall" - - "github.com/Sirupsen/logrus" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/directory" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/parsers/kernel" - - "github.com/opencontainers/runc/libcontainer/label" -) - -var ( - // untar defines the untar method - untar = chrootarchive.UntarUncompressed -) - -// This backend uses the overlay union filesystem for containers -// with diff directories for each layer. - -// This version of the overlay driver requires at least kernel -// 4.0.0 in order to support mounting multiple diff directories. - -// Each container/image has at least a "diff" directory and "link" file. -// If there is also a "lower" file when there are diff layers -// below as well as "merged" and "work" directories. The "diff" directory -// has the upper layer of the overlay and is used to capture any -// changes to the layer. The "lower" file contains all the lower layer -// mounts separated by ":" and ordered from uppermost to lowermost -// layers. The overlay itself is mounted in the "merged" directory, -// and the "work" dir is needed for overlay to work. - -// The "link" file for each layer contains a unique string for the layer. -// Under the "l" directory at the root there will be a symbolic link -// with that unique string pointing the "diff" directory for the layer. -// The symbolic links are used to reference lower layers in the "lower" -// file and on mount. The links are used to shorten the total length -// of a layer reference without requiring changes to the layer identifier -// or root directory. Mounts are always done relative to root and -// referencing the symbolic links in order to ensure the number of -// lower directories can fit in a single page for making the mount -// syscall. A hard upper limit of 128 lower layers is enforced to ensure -// that mounts do not fail due to length. - -const ( - driverName = "overlay2" - linkDir = "l" - lowerFile = "lower" - maxDepth = 128 - - // idLength represents the number of random characters - // which can be used to create the unique link identifer - // for every layer. If this value is too long then the - // page size limit for the mount command may be exceeded. - // The idLength should be selected such that following equation - // is true (512 is a buffer for label metadata). - // ((idLength + len(linkDir) + 1) * maxDepth) <= (pageSize - 512) - idLength = 26 -) - -// Driver contains information about the home directory and the list of active mounts that are created using this driver. -type Driver struct { - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter -} - -var backingFs = "" - -func init() { - graphdriver.Register(driverName, Init) -} - -// Init returns the a native diff driver for overlay filesystem. -// If overlay filesystem is not supported on the host, graphdriver.ErrNotSupported is returned as error. -// If a overlay filesystem is not supported over a existing filesystem then error graphdriver.ErrIncompatibleFS is returned. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - opts, err := parseOptions(options) - if err != nil { - return nil, err - } - - if err := supportsOverlay(); err != nil { - return nil, graphdriver.ErrNotSupported - } - - // require kernel 4.0.0 to ensure multiple lower dirs are supported - v, err := kernel.GetKernelVersion() - if err != nil { - return nil, err - } - if kernel.CompareKernelVersion(*v, kernel.VersionInfo{Kernel: 4, Major: 0, Minor: 0}) < 0 { - if !opts.overrideKernelCheck { - return nil, graphdriver.ErrNotSupported - } - logrus.Warnf("Using pre-4.0.0 kernel for overlay2, mount failures may require kernel update") - } - - fsMagic, err := graphdriver.GetFSMagic(home) - if err != nil { - return nil, err - } - if fsName, ok := graphdriver.FsNames[fsMagic]; ok { - backingFs = fsName - } - - // check if they are running over btrfs, aufs, zfs, overlay, or ecryptfs - switch fsMagic { - case graphdriver.FsMagicBtrfs, graphdriver.FsMagicAufs, graphdriver.FsMagicZfs, graphdriver.FsMagicOverlay, graphdriver.FsMagicEcryptfs: - logrus.Errorf("'overlay2' is not supported over %s", backingFs) - return nil, graphdriver.ErrIncompatibleFS - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - // Create the driver home dir - if err := idtools.MkdirAllAs(path.Join(home, linkDir), 0700, rootUID, rootGID); err != nil && !os.IsExist(err) { - return nil, err - } - - if err := mount.MakePrivate(home); err != nil { - return nil, err - } - - d := &Driver{ - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - ctr: graphdriver.NewRefCounter(graphdriver.NewFsChecker(graphdriver.FsMagicOverlay)), - } - - return d, nil -} - -type overlayOptions struct { - overrideKernelCheck bool -} - -func parseOptions(options []string) (*overlayOptions, error) { - o := &overlayOptions{} - for _, option := range options { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return nil, err - } - key = strings.ToLower(key) - switch key { - case "overlay2.override_kernel_check": - o.overrideKernelCheck, err = strconv.ParseBool(val) - if err != nil { - return nil, err - } - default: - return nil, fmt.Errorf("overlay2: Unknown option %s\n", key) - } - } - return o, nil -} - -func supportsOverlay() error { - // We can try to modprobe overlay first before looking at - // proc/filesystems for when overlay is supported - exec.Command("modprobe", "overlay").Run() - - f, err := os.Open("/proc/filesystems") - if err != nil { - return err - } - defer f.Close() - - s := bufio.NewScanner(f) - for s.Scan() { - if s.Text() == "nodev\toverlay" { - return nil - } - } - logrus.Error("'overlay' not found as a supported filesystem on this host. Please ensure kernel is new enough and has overlay support loaded.") - return graphdriver.ErrNotSupported -} - -func (d *Driver) String() string { - return driverName -} - -// Status returns current driver information in a two dimensional string array. -// Output contains "Backing Filesystem" used in this implementation. -func (d *Driver) Status() [][2]string { - return [][2]string{ - {"Backing Filesystem", backingFs}, - } -} - -// GetMetadata returns meta data about the overlay driver such as -// LowerDir, UpperDir, WorkDir and MergeDir used to store data. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { - return nil, err - } - - metadata := map[string]string{ - "WorkDir": path.Join(dir, "work"), - "MergedDir": path.Join(dir, "merged"), - "UpperDir": path.Join(dir, "diff"), - } - - lowerDirs, err := d.getLowerDirs(id) - if err != nil { - return nil, err - } - if len(lowerDirs) > 0 { - metadata["LowerDir"] = strings.Join(lowerDirs, ":") - } - - return metadata, nil -} - -// Cleanup any state created by overlay which should be cleaned when daemon -// is being shutdown. For now, we just have to unmount the bind mounted -// we had created. -func (d *Driver) Cleanup() error { - return mount.Unmount(d.home) -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - return d.Create(id, parent, mountLabel, storageOpt) -} - -// Create is used to create the upper, lower, and merge directories required for overlay fs for a given id. -// The parent filesystem is used to configure these directories for the overlay. -func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) (retErr error) { - - if len(storageOpt) != 0 { - return fmt.Errorf("--storage-opt is not supported for overlay") - } - - dir := d.dir(id) - - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAllAs(path.Dir(dir), 0700, rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(dir, 0700, rootUID, rootGID); err != nil { - return err - } - - defer func() { - // Clean up on failure - if retErr != nil { - os.RemoveAll(dir) - } - }() - - if err := idtools.MkdirAs(path.Join(dir, "diff"), 0755, rootUID, rootGID); err != nil { - return err - } - - lid := generateID(idLength) - if err := os.Symlink(path.Join("..", id, "diff"), path.Join(d.home, linkDir, lid)); err != nil { - return err - } - - // Write link id to link file - if err := ioutil.WriteFile(path.Join(dir, "link"), []byte(lid), 0644); err != nil { - return err - } - - // if no parent directory, done - if parent == "" { - return nil - } - - if err := idtools.MkdirAs(path.Join(dir, "work"), 0700, rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(path.Join(dir, "merged"), 0700, rootUID, rootGID); err != nil { - return err - } - - lower, err := d.getLower(parent) - if err != nil { - return err - } - if lower != "" { - if err := ioutil.WriteFile(path.Join(dir, lowerFile), []byte(lower), 0666); err != nil { - return err - } - } - - return nil -} - -func (d *Driver) getLower(parent string) (string, error) { - parentDir := d.dir(parent) - - // Ensure parent exists - if _, err := os.Lstat(parentDir); err != nil { - return "", err - } - - // Read Parent link fileA - parentLink, err := ioutil.ReadFile(path.Join(parentDir, "link")) - if err != nil { - return "", err - } - lowers := []string{path.Join(linkDir, string(parentLink))} - - parentLower, err := ioutil.ReadFile(path.Join(parentDir, lowerFile)) - if err == nil { - parentLowers := strings.Split(string(parentLower), ":") - lowers = append(lowers, parentLowers...) - } - if len(lowers) > maxDepth { - return "", errors.New("max depth exceeded") - } - return strings.Join(lowers, ":"), nil -} - -func (d *Driver) dir(id string) string { - return path.Join(d.home, id) -} - -func (d *Driver) getLowerDirs(id string) ([]string, error) { - var lowersArray []string - lowers, err := ioutil.ReadFile(path.Join(d.dir(id), lowerFile)) - if err == nil { - for _, s := range strings.Split(string(lowers), ":") { - lp, err := os.Readlink(path.Join(d.home, s)) - if err != nil { - return nil, err - } - lowersArray = append(lowersArray, path.Clean(path.Join(d.home, "link", lp))) - } - } else if !os.IsNotExist(err) { - return nil, err - } - return lowersArray, nil -} - -// Remove cleans the directories that are created for this id. -func (d *Driver) Remove(id string) error { - dir := d.dir(id) - lid, err := ioutil.ReadFile(path.Join(dir, "link")) - if err == nil { - if err := os.RemoveAll(path.Join(d.home, linkDir, string(lid))); err != nil { - logrus.Debugf("Failed to remove link: %v", err) - } - } - - if err := os.RemoveAll(dir); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// Get creates and mounts the required file system for the given id and returns the mount path. -func (d *Driver) Get(id string, mountLabel string) (s string, err error) { - dir := d.dir(id) - if _, err := os.Stat(dir); err != nil { - return "", err - } - - diffDir := path.Join(dir, "diff") - lowers, err := ioutil.ReadFile(path.Join(dir, lowerFile)) - if err != nil { - // If no lower, just return diff directory - if os.IsNotExist(err) { - return diffDir, nil - } - return "", err - } - - mergedDir := path.Join(dir, "merged") - if count := d.ctr.Increment(mergedDir); count > 1 { - return mergedDir, nil - } - defer func() { - if err != nil { - if c := d.ctr.Decrement(mergedDir); c <= 0 { - syscall.Unmount(mergedDir, 0) - } - } - }() - - workDir := path.Join(dir, "work") - opts := fmt.Sprintf("lowerdir=%s,upperdir=%s,workdir=%s", string(lowers), path.Join(id, "diff"), path.Join(id, "work")) - mountLabel = label.FormatMountLabel(opts, mountLabel) - if len(mountLabel) > syscall.Getpagesize() { - return "", fmt.Errorf("cannot mount layer, mount label too large %d", len(mountLabel)) - } - - if err := mountFrom(d.home, "overlay", path.Join(id, "merged"), "overlay", mountLabel); err != nil { - return "", fmt.Errorf("error creating overlay mount to %s: %v", mergedDir, err) - } - - // chown "workdir/work" to the remapped root UID/GID. Overlay fs inside a - // user namespace requires this to move a directory from lower to upper. - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return "", err - } - - if err := os.Chown(path.Join(workDir, "work"), rootUID, rootGID); err != nil { - return "", err - } - - return mergedDir, nil -} - -// Put unmounts the mount path created for the give id. -func (d *Driver) Put(id string) error { - mountpoint := path.Join(d.dir(id), "merged") - if count := d.ctr.Decrement(mountpoint); count > 0 { - return nil - } - if err := syscall.Unmount(mountpoint, 0); err != nil { - logrus.Debugf("Failed to unmount %s overlay: %v", id, err) - } - return nil -} - -// Exists checks to see if the id is already mounted. -func (d *Driver) Exists(id string) bool { - _, err := os.Stat(d.dir(id)) - return err == nil -} - -// ApplyDiff applies the new layer into a root -func (d *Driver) ApplyDiff(id string, parent string, diff archive.Reader) (size int64, err error) { - applyDir := d.getDiffPath(id) - - logrus.Debugf("Applying tar in %s", applyDir) - // Overlay doesn't need the parent id to apply the diff - if err := untar(diff, applyDir, &archive.TarOptions{ - UIDMaps: d.uidMaps, - GIDMaps: d.gidMaps, - WhiteoutFormat: archive.OverlayWhiteoutFormat, - }); err != nil { - return 0, err - } - - return d.DiffSize(id, parent) -} - -func (d *Driver) getDiffPath(id string) string { - dir := d.dir(id) - - return path.Join(dir, "diff") -} - -// DiffSize calculates the changes between the specified id -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (d *Driver) DiffSize(id, parent string) (size int64, err error) { - return directory.Size(d.getDiffPath(id)) -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -func (d *Driver) Diff(id, parent string) (archive.Archive, error) { - diffPath := d.getDiffPath(id) - logrus.Debugf("Tar with options on %s", diffPath) - return archive.TarWithOptions(diffPath, &archive.TarOptions{ - Compression: archive.Uncompressed, - UIDMaps: d.uidMaps, - GIDMaps: d.gidMaps, - WhiteoutFormat: archive.OverlayWhiteoutFormat, - }) -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { - // Overlay doesn't have snapshots, so we need to get changes from all parent - // layers. - diffPath := d.getDiffPath(id) - layers, err := d.getLowerDirs(id) - if err != nil { - return nil, err - } - - return archive.OverlayChanges(layers, diffPath) -} diff --git a/daemon/graphdriver/overlay2/overlay_test.go b/daemon/graphdriver/overlay2/overlay_test.go deleted file mode 100644 index ae4b785dcb..0000000000 --- a/daemon/graphdriver/overlay2/overlay_test.go +++ /dev/null @@ -1,106 +0,0 @@ -// +build linux - -package overlay2 - -import ( - "os" - "syscall" - "testing" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/daemon/graphdriver/graphtest" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" -) - -func init() { - // Do not sure chroot to speed run time and allow archive - // errors or hangs to be debugged directly from the test process. - untar = archive.UntarUncompressed - graphdriver.ApplyUncompressedLayer = archive.ApplyUncompressedLayer - - reexec.Init() -} - -func cdMountFrom(dir, device, target, mType, label string) error { - wd, err := os.Getwd() - if err != nil { - return err - } - os.Chdir(dir) - defer os.Chdir(wd) - - return syscall.Mount(device, target, mType, 0, label) -} - -// This avoids creating a new driver for each test if all tests are run -// Make sure to put new tests between TestOverlaySetup and TestOverlayTeardown -func TestOverlaySetup(t *testing.T) { - graphtest.GetDriver(t, driverName) -} - -func TestOverlayCreateEmpty(t *testing.T) { - graphtest.DriverTestCreateEmpty(t, driverName) -} - -func TestOverlayCreateBase(t *testing.T) { - graphtest.DriverTestCreateBase(t, driverName) -} - -func TestOverlayCreateSnap(t *testing.T) { - graphtest.DriverTestCreateSnap(t, driverName) -} - -func TestOverlay128LayerRead(t *testing.T) { - graphtest.DriverTestDeepLayerRead(t, 128, driverName) -} - -func TestOverlayDiffApply10Files(t *testing.T) { - graphtest.DriverTestDiffApply(t, 10, driverName) -} - -func TestOverlayChanges(t *testing.T) { - graphtest.DriverTestChanges(t, driverName) -} - -func TestOverlayTeardown(t *testing.T) { - graphtest.PutDriver(t) -} - -// Benchmarks should always setup new driver - -func BenchmarkExists(b *testing.B) { - graphtest.DriverBenchExists(b, driverName) -} - -func BenchmarkGetEmpty(b *testing.B) { - graphtest.DriverBenchGetEmpty(b, driverName) -} - -func BenchmarkDiffBase(b *testing.B) { - graphtest.DriverBenchDiffBase(b, driverName) -} - -func BenchmarkDiffSmallUpper(b *testing.B) { - graphtest.DriverBenchDiffN(b, 10, 10, driverName) -} - -func BenchmarkDiff10KFileUpper(b *testing.B) { - graphtest.DriverBenchDiffN(b, 10, 10000, driverName) -} - -func BenchmarkDiff10KFilesBottom(b *testing.B) { - graphtest.DriverBenchDiffN(b, 10000, 10, driverName) -} - -func BenchmarkDiffApply100(b *testing.B) { - graphtest.DriverBenchDiffApplyN(b, 100, driverName) -} - -func BenchmarkDiff20Layers(b *testing.B) { - graphtest.DriverBenchDeepLayerDiff(b, 20, driverName) -} - -func BenchmarkRead20Layers(b *testing.B) { - graphtest.DriverBenchDeepLayerRead(b, 20, driverName) -} diff --git a/daemon/graphdriver/overlay2/overlay_unsupported.go b/daemon/graphdriver/overlay2/overlay_unsupported.go deleted file mode 100644 index e5ac4ca8c6..0000000000 --- a/daemon/graphdriver/overlay2/overlay_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package overlay2 diff --git a/daemon/graphdriver/overlay2/randomid.go b/daemon/graphdriver/overlay2/randomid.go deleted file mode 100644 index af5cb659d5..0000000000 --- a/daemon/graphdriver/overlay2/randomid.go +++ /dev/null @@ -1,80 +0,0 @@ -// +build linux - -package overlay2 - -import ( - "crypto/rand" - "encoding/base32" - "fmt" - "io" - "os" - "syscall" - "time" - - "github.com/Sirupsen/logrus" -) - -// generateID creates a new random string identifier with the given length -func generateID(l int) string { - const ( - // ensures we backoff for less than 450ms total. Use the following to - // select new value, in units of 10ms: - // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 - maxretries = 9 - backoff = time.Millisecond * 10 - ) - - var ( - totalBackoff time.Duration - count int - retries int - size = (l*5 + 7) / 8 - u = make([]byte, size) - ) - // TODO: Include time component, counter component, random component - - for { - // This should never block but the read may fail. Because of this, - // we just try to read the random number generator until we get - // something. This is a very rare condition but may happen. - b := time.Duration(retries) * backoff - time.Sleep(b) - totalBackoff += b - - n, err := io.ReadFull(rand.Reader, u[count:]) - if err != nil { - if retryOnError(err) && retries < maxretries { - count += n - retries++ - logrus.Errorf("error generating version 4 uuid, retrying: %v", err) - continue - } - - // Any other errors represent a system problem. What did someone - // do to /dev/urandom? - panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) - } - - break - } - - s := base32.StdEncoding.EncodeToString(u) - - return s[:l] -} - -// retryOnError tries to detect whether or not retrying would be fruitful. -func retryOnError(err error) bool { - switch err := err.(type) { - case *os.PathError: - return retryOnError(err.Err) // unpack the target error - case syscall.Errno: - if err == syscall.EPERM { - // EPERM represents an entropy pool exhaustion, a condition under - // which we backoff and retry. - return true - } - } - - return false -} diff --git a/daemon/graphdriver/plugin.go b/daemon/graphdriver/plugin.go deleted file mode 100644 index 9f172b72d4..0000000000 --- a/daemon/graphdriver/plugin.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build experimental - -package graphdriver - -import ( - "fmt" - "io" - - "github.com/docker/docker/pkg/plugins" -) - -type pluginClient interface { - // Call calls the specified method with the specified arguments for the plugin. - Call(string, interface{}, interface{}) error - // Stream calls the specified method with the specified arguments for the plugin and returns the response IO stream - Stream(string, interface{}) (io.ReadCloser, error) - // SendFile calls the specified method, and passes through the IO stream - SendFile(string, io.Reader, interface{}) error -} - -func lookupPlugin(name, home string, opts []string) (Driver, error) { - pl, err := plugins.Get(name, "GraphDriver") - if err != nil { - return nil, fmt.Errorf("Error looking up graphdriver plugin %s: %v", name, err) - } - return newPluginDriver(name, home, opts, pl.Client()) -} - -func newPluginDriver(name, home string, opts []string, c pluginClient) (Driver, error) { - proxy := &graphDriverProxy{name, c} - return proxy, proxy.Init(home, opts) -} diff --git a/daemon/graphdriver/plugin_unsupported.go b/daemon/graphdriver/plugin_unsupported.go deleted file mode 100644 index daa7a170e4..0000000000 --- a/daemon/graphdriver/plugin_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !experimental - -package graphdriver - -func lookupPlugin(name, home string, opts []string) (Driver, error) { - return nil, ErrNotSupported -} diff --git a/daemon/graphdriver/proxy.go b/daemon/graphdriver/proxy.go deleted file mode 100644 index 3a8d599cb6..0000000000 --- a/daemon/graphdriver/proxy.go +++ /dev/null @@ -1,225 +0,0 @@ -// +build experimental - -package graphdriver - -import ( - "errors" - "fmt" - - "github.com/docker/docker/pkg/archive" -) - -type graphDriverProxy struct { - name string - client pluginClient -} - -type graphDriverRequest struct { - ID string `json:",omitempty"` - Parent string `json:",omitempty"` - MountLabel string `json:",omitempty"` -} - -type graphDriverResponse struct { - Err string `json:",omitempty"` - Dir string `json:",omitempty"` - Exists bool `json:",omitempty"` - Status [][2]string `json:",omitempty"` - Changes []archive.Change `json:",omitempty"` - Size int64 `json:",omitempty"` - Metadata map[string]string `json:",omitempty"` -} - -type graphDriverInitRequest struct { - Home string - Opts []string -} - -func (d *graphDriverProxy) Init(home string, opts []string) error { - args := &graphDriverInitRequest{ - Home: home, - Opts: opts, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Init", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) String() string { - return d.name -} - -func (d *graphDriverProxy) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - MountLabel: mountLabel, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.CreateReadWrite", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Create(id, parent, mountLabel string, storageOpt map[string]string) error { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - MountLabel: mountLabel, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Create", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Remove(id string) error { - args := &graphDriverRequest{ID: id} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Remove", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Get(id, mountLabel string) (string, error) { - args := &graphDriverRequest{ - ID: id, - MountLabel: mountLabel, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Get", args, &ret); err != nil { - return "", err - } - var err error - if ret.Err != "" { - err = errors.New(ret.Err) - } - return ret.Dir, err -} - -func (d *graphDriverProxy) Put(id string) error { - args := &graphDriverRequest{ID: id} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Put", args, &ret); err != nil { - return err - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Exists(id string) bool { - args := &graphDriverRequest{ID: id} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Exists", args, &ret); err != nil { - return false - } - return ret.Exists -} - -func (d *graphDriverProxy) Status() [][2]string { - args := &graphDriverRequest{} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Status", args, &ret); err != nil { - return nil - } - return ret.Status -} - -func (d *graphDriverProxy) GetMetadata(id string) (map[string]string, error) { - args := &graphDriverRequest{ - ID: id, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.GetMetadata", args, &ret); err != nil { - return nil, err - } - if ret.Err != "" { - return nil, errors.New(ret.Err) - } - return ret.Metadata, nil -} - -func (d *graphDriverProxy) Cleanup() error { - args := &graphDriverRequest{} - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Cleanup", args, &ret); err != nil { - return nil - } - if ret.Err != "" { - return errors.New(ret.Err) - } - return nil -} - -func (d *graphDriverProxy) Diff(id, parent string) (archive.Archive, error) { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - body, err := d.client.Stream("GraphDriver.Diff", args) - if err != nil { - return nil, err - } - return archive.Archive(body), nil -} - -func (d *graphDriverProxy) Changes(id, parent string) ([]archive.Change, error) { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.Changes", args, &ret); err != nil { - return nil, err - } - if ret.Err != "" { - return nil, errors.New(ret.Err) - } - - return ret.Changes, nil -} - -func (d *graphDriverProxy) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { - var ret graphDriverResponse - if err := d.client.SendFile(fmt.Sprintf("GraphDriver.ApplyDiff?id=%s&parent=%s", id, parent), diff, &ret); err != nil { - return -1, err - } - if ret.Err != "" { - return -1, errors.New(ret.Err) - } - return ret.Size, nil -} - -func (d *graphDriverProxy) DiffSize(id, parent string) (int64, error) { - args := &graphDriverRequest{ - ID: id, - Parent: parent, - } - var ret graphDriverResponse - if err := d.client.Call("GraphDriver.DiffSize", args, &ret); err != nil { - return -1, err - } - if ret.Err != "" { - return -1, errors.New(ret.Err) - } - return ret.Size, nil -} diff --git a/daemon/graphdriver/register/register_aufs.go b/daemon/graphdriver/register/register_aufs.go deleted file mode 100644 index 262954d6e3..0000000000 --- a/daemon/graphdriver/register/register_aufs.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_aufs,linux - -package register - -import ( - // register the aufs graphdriver - _ "github.com/docker/docker/daemon/graphdriver/aufs" -) diff --git a/daemon/graphdriver/register/register_btrfs.go b/daemon/graphdriver/register/register_btrfs.go deleted file mode 100644 index f456cc5ce5..0000000000 --- a/daemon/graphdriver/register/register_btrfs.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_btrfs,linux - -package register - -import ( - // register the btrfs graphdriver - _ "github.com/docker/docker/daemon/graphdriver/btrfs" -) diff --git a/daemon/graphdriver/register/register_devicemapper.go b/daemon/graphdriver/register/register_devicemapper.go deleted file mode 100644 index bb2e9ef541..0000000000 --- a/daemon/graphdriver/register/register_devicemapper.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_devicemapper,linux - -package register - -import ( - // register the devmapper graphdriver - _ "github.com/docker/docker/daemon/graphdriver/devmapper" -) diff --git a/daemon/graphdriver/register/register_overlay.go b/daemon/graphdriver/register/register_overlay.go deleted file mode 100644 index 9ba849cedc..0000000000 --- a/daemon/graphdriver/register/register_overlay.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !exclude_graphdriver_overlay,linux - -package register - -import ( - // register the overlay graphdriver - _ "github.com/docker/docker/daemon/graphdriver/overlay" - _ "github.com/docker/docker/daemon/graphdriver/overlay2" -) diff --git a/daemon/graphdriver/register/register_vfs.go b/daemon/graphdriver/register/register_vfs.go deleted file mode 100644 index 98fad23b20..0000000000 --- a/daemon/graphdriver/register/register_vfs.go +++ /dev/null @@ -1,6 +0,0 @@ -package register - -import ( - // register vfs - _ "github.com/docker/docker/daemon/graphdriver/vfs" -) diff --git a/daemon/graphdriver/register/register_windows.go b/daemon/graphdriver/register/register_windows.go deleted file mode 100644 index efaa5005ed..0000000000 --- a/daemon/graphdriver/register/register_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package register - -import ( - // register the windows graph driver - _ "github.com/docker/docker/daemon/graphdriver/windows" -) diff --git a/daemon/graphdriver/register/register_zfs.go b/daemon/graphdriver/register/register_zfs.go deleted file mode 100644 index 8f34e35537..0000000000 --- a/daemon/graphdriver/register/register_zfs.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !exclude_graphdriver_zfs,linux !exclude_graphdriver_zfs,freebsd, solaris - -package register - -import ( - // register the zfs driver - _ "github.com/docker/docker/daemon/graphdriver/zfs" -) diff --git a/daemon/graphdriver/vfs/driver.go b/daemon/graphdriver/vfs/driver.go deleted file mode 100644 index a058f08bc8..0000000000 --- a/daemon/graphdriver/vfs/driver.go +++ /dev/null @@ -1,145 +0,0 @@ -package vfs - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/idtools" - - "github.com/opencontainers/runc/libcontainer/label" -) - -var ( - // CopyWithTar defines the copy method to use. - CopyWithTar = chrootarchive.CopyWithTar -) - -func init() { - graphdriver.Register("vfs", Init) -} - -// Init returns a new VFS driver. -// This sets the home directory for the driver and returns NaiveDiffDriver. -func Init(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - d := &Driver{ - home: home, - uidMaps: uidMaps, - gidMaps: gidMaps, - } - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, err - } - if err := idtools.MkdirAllAs(home, 0700, rootUID, rootGID); err != nil { - return nil, err - } - return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil -} - -// Driver holds information about the driver, home directory of the driver. -// Driver implements graphdriver.ProtoDriver. It uses only basic vfs operations. -// In order to support layering, files are copied from the parent layer into the new layer. There is no copy-on-write support. -// Driver must be wrapped in NaiveDiffDriver to be used as a graphdriver.Driver -type Driver struct { - home string - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap -} - -func (d *Driver) String() string { - return "vfs" -} - -// Status is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any status information. -func (d *Driver) Status() [][2]string { - return nil -} - -// GetMetadata is used for implementing the graphdriver.ProtoDriver interface. VFS does not currently have any meta data. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - return nil, nil -} - -// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. -func (d *Driver) Cleanup() error { - return nil -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - return d.Create(id, parent, mountLabel, storageOpt) -} - -// Create prepares the filesystem for the VFS driver and copies the directory for the given id under the parent. -func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { - if len(storageOpt) != 0 { - return fmt.Errorf("--storage-opt is not supported for vfs") - } - - dir := d.dir(id) - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - return err - } - if err := idtools.MkdirAllAs(filepath.Dir(dir), 0700, rootUID, rootGID); err != nil { - return err - } - if err := idtools.MkdirAs(dir, 0755, rootUID, rootGID); err != nil { - return err - } - opts := []string{"level:s0"} - if _, mountLabel, err := label.InitLabels(opts); err == nil { - label.SetFileLabel(dir, mountLabel) - } - if parent == "" { - return nil - } - parentDir, err := d.Get(parent, "") - if err != nil { - return fmt.Errorf("%s: %s", parent, err) - } - if err := CopyWithTar(parentDir, dir); err != nil { - return err - } - return nil -} - -func (d *Driver) dir(id string) string { - return filepath.Join(d.home, "dir", filepath.Base(id)) -} - -// Remove deletes the content from the directory for a given id. -func (d *Driver) Remove(id string) error { - if err := os.RemoveAll(d.dir(id)); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// Get returns the directory for the given id. -func (d *Driver) Get(id, mountLabel string) (string, error) { - dir := d.dir(id) - if st, err := os.Stat(dir); err != nil { - return "", err - } else if !st.IsDir() { - return "", fmt.Errorf("%s: not a directory", dir) - } - return dir, nil -} - -// Put is a noop for vfs that return nil for the error, since this driver has no runtime resources to clean up. -func (d *Driver) Put(id string) error { - // The vfs driver has no runtime resources (e.g. mounts) - // to clean up, so we don't need anything here - return nil -} - -// Exists checks to see if the directory exists for the given id. -func (d *Driver) Exists(id string) bool { - _, err := os.Stat(d.dir(id)) - return err == nil -} diff --git a/daemon/graphdriver/vfs/vfs_test.go b/daemon/graphdriver/vfs/vfs_test.go deleted file mode 100644 index 9ecf21dbaa..0000000000 --- a/daemon/graphdriver/vfs/vfs_test.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build linux - -package vfs - -import ( - "testing" - - "github.com/docker/docker/daemon/graphdriver/graphtest" - - "github.com/docker/docker/pkg/reexec" -) - -func init() { - reexec.Init() -} - -// This avoids creating a new driver for each test if all tests are run -// Make sure to put new tests between TestVfsSetup and TestVfsTeardown -func TestVfsSetup(t *testing.T) { - graphtest.GetDriver(t, "vfs") -} - -func TestVfsCreateEmpty(t *testing.T) { - graphtest.DriverTestCreateEmpty(t, "vfs") -} - -func TestVfsCreateBase(t *testing.T) { - graphtest.DriverTestCreateBase(t, "vfs") -} - -func TestVfsCreateSnap(t *testing.T) { - graphtest.DriverTestCreateSnap(t, "vfs") -} - -func TestVfsTeardown(t *testing.T) { - graphtest.PutDriver(t) -} diff --git a/daemon/graphdriver/windows/windows.go b/daemon/graphdriver/windows/windows.go deleted file mode 100644 index dd54a1cbde..0000000000 --- a/daemon/graphdriver/windows/windows.go +++ /dev/null @@ -1,853 +0,0 @@ -//+build windows - -package windows - -import ( - "bufio" - "bytes" - "crypto/sha512" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - "time" - "unsafe" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/go-winio/archive/tar" - "github.com/Microsoft/go-winio/backuptar" - "github.com/Microsoft/hcsshim" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/longpath" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/docker/pkg/system" - "github.com/vbatts/tar-split/tar/storage" -) - -// filterDriver is an HCSShim driver type for the Windows Filter driver. -const filterDriver = 1 - -// init registers the windows graph drivers to the register. -func init() { - graphdriver.Register("windowsfilter", InitFilter) - reexec.Register("docker-windows-write-layer", writeLayer) -} - -type checker struct { -} - -func (c *checker) IsMounted(path string) bool { - return false -} - -// Driver represents a windows graph driver. -type Driver struct { - // info stores the shim driver information - info hcsshim.DriverInfo - ctr *graphdriver.RefCounter - // it is safe for windows to use a cache here because it does not support - // restoring containers when the daemon dies. - cacheMu sync.Mutex - cache map[string]string -} - -func isTP5OrOlder() bool { - return system.GetOSVersion().Build <= 14300 -} - -// InitFilter returns a new Windows storage filter driver. -func InitFilter(home string, options []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - logrus.Debugf("WindowsGraphDriver InitFilter at %s", home) - d := &Driver{ - info: hcsshim.DriverInfo{ - HomeDir: home, - Flavour: filterDriver, - }, - cache: make(map[string]string), - ctr: graphdriver.NewRefCounter(&checker{}), - } - return d, nil -} - -// String returns the string representation of a driver. This should match -// the name the graph driver has been registered with. -func (d *Driver) String() string { - return "windowsfilter" -} - -// Status returns the status of the driver. -func (d *Driver) Status() [][2]string { - return [][2]string{ - {"Windows", ""}, - } -} - -// Exists returns true if the given id is registered with this driver. -func (d *Driver) Exists(id string) bool { - rID, err := d.resolveID(id) - if err != nil { - return false - } - result, err := hcsshim.LayerExists(d.info, rID) - if err != nil { - return false - } - return result -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - return d.create(id, parent, mountLabel, false, storageOpt) -} - -// Create creates a new read-only layer with the given id. -func (d *Driver) Create(id, parent, mountLabel string, storageOpt map[string]string) error { - return d.create(id, parent, mountLabel, true, storageOpt) -} - -func (d *Driver) create(id, parent, mountLabel string, readOnly bool, storageOpt map[string]string) error { - if len(storageOpt) != 0 { - return fmt.Errorf("--storage-opt is not supported for windows") - } - - rPId, err := d.resolveID(parent) - if err != nil { - return err - } - - parentChain, err := d.getLayerChain(rPId) - if err != nil { - return err - } - - var layerChain []string - - if rPId != "" { - parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) - if err != nil { - return err - } - if _, err := os.Stat(filepath.Join(parentPath, "Files")); err == nil { - // This is a legitimate parent layer (not the empty "-init" layer), - // so include it in the layer chain. - layerChain = []string{parentPath} - } - } - - layerChain = append(layerChain, parentChain...) - - if readOnly { - if err := hcsshim.CreateLayer(d.info, id, rPId); err != nil { - return err - } - } else { - var parentPath string - if len(layerChain) != 0 { - parentPath = layerChain[0] - } - - if isTP5OrOlder() { - // Pre-create the layer directory, providing an ACL to give the Hyper-V Virtual Machines - // group access. This is necessary to ensure that Hyper-V containers can access the - // virtual machine data. This is not necessary post-TP5. - path, err := syscall.UTF16FromString(filepath.Join(d.info.HomeDir, id)) - if err != nil { - return err - } - // Give system and administrators full control, and VMs read, write, and execute. - // Mark these ACEs as inherited. - sd, err := winio.SddlToSecurityDescriptor("D:(A;OICI;FA;;;SY)(A;OICI;FA;;;BA)(A;OICI;FRFWFX;;;S-1-5-83-0)") - if err != nil { - return err - } - err = syscall.CreateDirectory(&path[0], &syscall.SecurityAttributes{ - Length: uint32(unsafe.Sizeof(syscall.SecurityAttributes{})), - SecurityDescriptor: uintptr(unsafe.Pointer(&sd[0])), - }) - if err != nil { - return err - } - } - - if err := hcsshim.CreateSandboxLayer(d.info, id, parentPath, layerChain); err != nil { - return err - } - } - - if _, err := os.Lstat(d.dir(parent)); err != nil { - if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { - logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) - } - return fmt.Errorf("Cannot create layer with missing parent %s: %s", parent, err) - } - - if err := d.setLayerChain(id, layerChain); err != nil { - if err2 := hcsshim.DestroyLayer(d.info, id); err2 != nil { - logrus.Warnf("Failed to DestroyLayer %s: %s", id, err2) - } - return err - } - - return nil -} - -// dir returns the absolute path to the layer. -func (d *Driver) dir(id string) string { - return filepath.Join(d.info.HomeDir, filepath.Base(id)) -} - -// Remove unmounts and removes the dir information. -func (d *Driver) Remove(id string) error { - rID, err := d.resolveID(id) - if err != nil { - return err - } - os.RemoveAll(filepath.Join(d.info.HomeDir, "sysfile-backups", rID)) // ok to fail - return hcsshim.DestroyLayer(d.info, rID) -} - -// Get returns the rootfs path for the id. This will mount the dir at its given path. -func (d *Driver) Get(id, mountLabel string) (string, error) { - logrus.Debugf("WindowsGraphDriver Get() id %s mountLabel %s", id, mountLabel) - var dir string - - rID, err := d.resolveID(id) - if err != nil { - return "", err - } - if count := d.ctr.Increment(rID); count > 1 { - return d.cache[rID], nil - } - - // Getting the layer paths must be done outside of the lock. - layerChain, err := d.getLayerChain(rID) - if err != nil { - d.ctr.Decrement(rID) - return "", err - } - - if err := hcsshim.ActivateLayer(d.info, rID); err != nil { - d.ctr.Decrement(rID) - return "", err - } - if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { - d.ctr.Decrement(rID) - if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { - logrus.Warnf("Failed to Deactivate %s: %s", id, err) - } - return "", err - } - - mountPath, err := hcsshim.GetLayerMountPath(d.info, rID) - if err != nil { - d.ctr.Decrement(rID) - if err2 := hcsshim.DeactivateLayer(d.info, rID); err2 != nil { - logrus.Warnf("Failed to Deactivate %s: %s", id, err) - } - return "", err - } - d.cacheMu.Lock() - d.cache[rID] = mountPath - d.cacheMu.Unlock() - - // If the layer has a mount path, use that. Otherwise, use the - // folder path. - if mountPath != "" { - dir = mountPath - } else { - dir = d.dir(id) - } - - return dir, nil -} - -// Put adds a new layer to the driver. -func (d *Driver) Put(id string) error { - logrus.Debugf("WindowsGraphDriver Put() id %s", id) - - rID, err := d.resolveID(id) - if err != nil { - return err - } - if count := d.ctr.Decrement(rID); count > 0 { - return nil - } - d.cacheMu.Lock() - delete(d.cache, rID) - d.cacheMu.Unlock() - - if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { - return err - } - return hcsshim.DeactivateLayer(d.info, rID) -} - -// Cleanup ensures the information the driver stores is properly removed. -func (d *Driver) Cleanup() error { - return nil -} - -// Diff produces an archive of the changes between the specified -// layer and its parent layer which may be "". -// The layer should be mounted when calling this function -func (d *Driver) Diff(id, parent string) (_ archive.Archive, err error) { - rID, err := d.resolveID(id) - if err != nil { - return - } - - layerChain, err := d.getLayerChain(rID) - if err != nil { - return - } - - // this is assuming that the layer is unmounted - if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { - return nil, err - } - prepare := func() { - if err := hcsshim.PrepareLayer(d.info, rID, layerChain); err != nil { - logrus.Warnf("Failed to Deactivate %s: %s", rID, err) - } - } - - arch, err := d.exportLayer(rID, layerChain) - if err != nil { - prepare() - return - } - return ioutils.NewReadCloserWrapper(arch, func() error { - err := arch.Close() - prepare() - return err - }), nil -} - -// Changes produces a list of changes between the specified layer -// and its parent layer. If parent is "", then all changes will be ADD changes. -// The layer should be mounted when calling this function -func (d *Driver) Changes(id, parent string) ([]archive.Change, error) { - rID, err := d.resolveID(id) - if err != nil { - return nil, err - } - parentChain, err := d.getLayerChain(rID) - if err != nil { - return nil, err - } - - // this is assuming that the layer is unmounted - if err := hcsshim.UnprepareLayer(d.info, rID); err != nil { - return nil, err - } - defer func() { - if err := hcsshim.PrepareLayer(d.info, rID, parentChain); err != nil { - logrus.Warnf("Failed to Deactivate %s: %s", rID, err) - } - }() - - var changes []archive.Change - err = winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { - r, err := hcsshim.NewLayerReader(d.info, id, parentChain) - if err != nil { - return err - } - defer r.Close() - - for { - name, _, fileInfo, err := r.Next() - if err == io.EOF { - return nil - } - if err != nil { - return err - } - name = filepath.ToSlash(name) - if fileInfo == nil { - changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeDelete}) - } else { - // Currently there is no way to tell between an add and a modify. - changes = append(changes, archive.Change{Path: name, Kind: archive.ChangeModify}) - } - } - }) - if err != nil { - return nil, err - } - - return changes, nil -} - -// ApplyDiff extracts the changeset from the given diff into the -// layer with the specified id and parent, returning the size of the -// new layer in bytes. -// The layer should not be mounted when calling this function -func (d *Driver) ApplyDiff(id, parent string, diff archive.Reader) (int64, error) { - var layerChain []string - if parent != "" { - rPId, err := d.resolveID(parent) - if err != nil { - return 0, err - } - parentChain, err := d.getLayerChain(rPId) - if err != nil { - return 0, err - } - parentPath, err := hcsshim.GetLayerMountPath(d.info, rPId) - if err != nil { - return 0, err - } - layerChain = append(layerChain, parentPath) - layerChain = append(layerChain, parentChain...) - } - - size, err := d.importLayer(id, diff, layerChain) - if err != nil { - return 0, err - } - - if err = d.setLayerChain(id, layerChain); err != nil { - return 0, err - } - - return size, nil -} - -// DiffSize calculates the changes between the specified layer -// and its parent and returns the size in bytes of the changes -// relative to its base filesystem directory. -func (d *Driver) DiffSize(id, parent string) (size int64, err error) { - rPId, err := d.resolveID(parent) - if err != nil { - return - } - - changes, err := d.Changes(id, rPId) - if err != nil { - return - } - - layerFs, err := d.Get(id, "") - if err != nil { - return - } - defer d.Put(id) - - return archive.ChangesSize(layerFs, changes), nil -} - -// CustomImageInfo is the object returned by the driver describing the base -// image. -type CustomImageInfo struct { - ID string - Name string - Version string - Path string - Size int64 - CreatedTime time.Time - OSVersion string `json:"-"` - OSFeatures []string `json:"-"` -} - -// GetCustomImageInfos returns the image infos for window specific -// base images which should always be present. -func (d *Driver) GetCustomImageInfos() ([]CustomImageInfo, error) { - strData, err := hcsshim.GetSharedBaseImages() - if err != nil { - return nil, fmt.Errorf("Failed to restore base images: %s", err) - } - - type customImageInfoList struct { - Images []CustomImageInfo - } - - var infoData customImageInfoList - - if err = json.Unmarshal([]byte(strData), &infoData); err != nil { - err = fmt.Errorf("JSON unmarshal returned error=%s", err) - logrus.Error(err) - return nil, err - } - - var images []CustomImageInfo - - for _, imageData := range infoData.Images { - folderName := filepath.Base(imageData.Path) - - // Use crypto hash of the foldername to generate a docker style id. - h := sha512.Sum384([]byte(folderName)) - id := fmt.Sprintf("%x", h[:32]) - - if err := d.Create(id, "", "", nil); err != nil { - return nil, err - } - // Create the alternate ID file. - if err := d.setID(id, folderName); err != nil { - return nil, err - } - - imageData.ID = id - - // For now, hard code that all base images except nanoserver depend on win32k support - if imageData.Name != "NanoServer" { - imageData.OSFeatures = append(imageData.OSFeatures, "win32k") - } - - versionData := strings.Split(imageData.Version, ".") - if len(versionData) != 4 { - logrus.Warnf("Could not parse Windows version %s", imageData.Version) - } else { - // Include just major.minor.build, skip the fourth version field, which does not influence - // OS compatibility. - imageData.OSVersion = strings.Join(versionData[:3], ".") - } - - images = append(images, imageData) - } - - return images, nil -} - -// GetMetadata returns custom driver information. -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - m := make(map[string]string) - m["dir"] = d.dir(id) - return m, nil -} - -func writeTarFromLayer(r hcsshim.LayerReader, w io.Writer) error { - t := tar.NewWriter(w) - for { - name, size, fileInfo, err := r.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - if fileInfo == nil { - // Write a whiteout file. - hdr := &tar.Header{ - Name: filepath.ToSlash(filepath.Join(filepath.Dir(name), archive.WhiteoutPrefix+filepath.Base(name))), - } - err := t.WriteHeader(hdr) - if err != nil { - return err - } - } else { - err = backuptar.WriteTarFileFromBackupStream(t, r, name, size, fileInfo) - if err != nil { - return err - } - } - } - return t.Close() -} - -// exportLayer generates an archive from a layer based on the given ID. -func (d *Driver) exportLayer(id string, parentLayerPaths []string) (archive.Archive, error) { - archive, w := io.Pipe() - go func() { - err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { - r, err := hcsshim.NewLayerReader(d.info, id, parentLayerPaths) - if err != nil { - return err - } - - err = writeTarFromLayer(r, w) - cerr := r.Close() - if err == nil { - err = cerr - } - return err - }) - w.CloseWithError(err) - }() - - return archive, nil -} - -func writeLayerFromTar(r archive.Reader, w hcsshim.LayerWriter) (int64, error) { - t := tar.NewReader(r) - hdr, err := t.Next() - totalSize := int64(0) - buf := bufio.NewWriter(nil) - for err == nil { - base := path.Base(hdr.Name) - if strings.HasPrefix(base, archive.WhiteoutPrefix) { - name := path.Join(path.Dir(hdr.Name), base[len(archive.WhiteoutPrefix):]) - err = w.Remove(filepath.FromSlash(name)) - if err != nil { - return 0, err - } - hdr, err = t.Next() - } else if hdr.Typeflag == tar.TypeLink { - err = w.AddLink(filepath.FromSlash(hdr.Name), filepath.FromSlash(hdr.Linkname)) - if err != nil { - return 0, err - } - hdr, err = t.Next() - } else { - var ( - name string - size int64 - fileInfo *winio.FileBasicInfo - ) - name, size, fileInfo, err = backuptar.FileInfoFromHeader(hdr) - if err != nil { - return 0, err - } - err = w.Add(filepath.FromSlash(name), fileInfo) - if err != nil { - return 0, err - } - buf.Reset(w) - - // Add the Hyper-V Virtual Machine group ACE to the security descriptor - // for TP5 so that Xenons can access all files. This is not necessary - // for post-TP5 builds. - if isTP5OrOlder() { - if sddl, ok := hdr.Winheaders["sd"]; ok { - var ace string - if hdr.Typeflag == tar.TypeDir { - ace = "(A;OICI;0x1200a9;;;S-1-5-83-0)" - } else { - ace = "(A;;0x1200a9;;;S-1-5-83-0)" - } - if hdr.Winheaders["sd"], ok = addAceToSddlDacl(sddl, ace); !ok { - logrus.Debugf("failed to add VM ACE to %s", sddl) - } - } - } - - hdr, err = backuptar.WriteBackupStreamFromTarFile(buf, t, hdr) - ferr := buf.Flush() - if ferr != nil { - err = ferr - } - totalSize += size - } - } - if err != io.EOF { - return 0, err - } - return totalSize, nil -} - -func addAceToSddlDacl(sddl, ace string) (string, bool) { - daclStart := strings.Index(sddl, "D:") - if daclStart < 0 { - return sddl, false - } - - dacl := sddl[daclStart:] - daclEnd := strings.Index(dacl, "S:") - if daclEnd < 0 { - daclEnd = len(dacl) - } - dacl = dacl[:daclEnd] - - if strings.Contains(dacl, ace) { - return sddl, true - } - - i := 2 - for i+1 < len(dacl) { - if dacl[i] != '(' { - return sddl, false - } - - if dacl[i+1] == 'A' { - break - } - - i += 2 - for p := 1; i < len(dacl) && p > 0; i++ { - if dacl[i] == '(' { - p++ - } else if dacl[i] == ')' { - p-- - } - } - } - - return sddl[:daclStart+i] + ace + sddl[daclStart+i:], true -} - -// importLayer adds a new layer to the tag and graph store based on the given data. -func (d *Driver) importLayer(id string, layerData archive.Reader, parentLayerPaths []string) (size int64, err error) { - cmd := reexec.Command(append([]string{"docker-windows-write-layer", d.info.HomeDir, id}, parentLayerPaths...)...) - output := bytes.NewBuffer(nil) - cmd.Stdin = layerData - cmd.Stdout = output - cmd.Stderr = output - - if err = cmd.Start(); err != nil { - return - } - - if err = cmd.Wait(); err != nil { - return 0, fmt.Errorf("re-exec error: %v: output: %s", err, output) - } - - return strconv.ParseInt(output.String(), 10, 64) -} - -// writeLayer is the re-exec entry point for writing a layer from a tar file -func writeLayer() { - home := os.Args[1] - id := os.Args[2] - parentLayerPaths := os.Args[3:] - - err := func() error { - err := winio.EnableProcessPrivileges([]string{winio.SeBackupPrivilege, winio.SeRestorePrivilege}) - if err != nil { - return err - } - - info := hcsshim.DriverInfo{ - Flavour: filterDriver, - HomeDir: home, - } - - w, err := hcsshim.NewLayerWriter(info, id, parentLayerPaths) - if err != nil { - return err - } - - size, err := writeLayerFromTar(os.Stdin, w) - if err != nil { - return err - } - - err = w.Close() - if err != nil { - return err - } - - fmt.Fprint(os.Stdout, size) - return nil - }() - - if err != nil { - fmt.Fprint(os.Stderr, err) - os.Exit(1) - } -} - -// resolveID computes the layerID information based on the given id. -func (d *Driver) resolveID(id string) (string, error) { - content, err := ioutil.ReadFile(filepath.Join(d.dir(id), "layerID")) - if os.IsNotExist(err) { - return id, nil - } else if err != nil { - return "", err - } - return string(content), nil -} - -// setID stores the layerId in disk. -func (d *Driver) setID(id, altID string) error { - err := ioutil.WriteFile(filepath.Join(d.dir(id), "layerId"), []byte(altID), 0600) - if err != nil { - return err - } - return nil -} - -// getLayerChain returns the layer chain information. -func (d *Driver) getLayerChain(id string) ([]string, error) { - jPath := filepath.Join(d.dir(id), "layerchain.json") - content, err := ioutil.ReadFile(jPath) - if os.IsNotExist(err) { - return nil, nil - } else if err != nil { - return nil, fmt.Errorf("Unable to read layerchain file - %s", err) - } - - var layerChain []string - err = json.Unmarshal(content, &layerChain) - if err != nil { - return nil, fmt.Errorf("Failed to unmarshall layerchain json - %s", err) - } - - return layerChain, nil -} - -// setLayerChain stores the layer chain information in disk. -func (d *Driver) setLayerChain(id string, chain []string) error { - content, err := json.Marshal(&chain) - if err != nil { - return fmt.Errorf("Failed to marshall layerchain json - %s", err) - } - - jPath := filepath.Join(d.dir(id), "layerchain.json") - err = ioutil.WriteFile(jPath, content, 0600) - if err != nil { - return fmt.Errorf("Unable to write layerchain file - %s", err) - } - - return nil -} - -type fileGetCloserWithBackupPrivileges struct { - path string -} - -func (fg *fileGetCloserWithBackupPrivileges) Get(filename string) (io.ReadCloser, error) { - var f *os.File - // Open the file while holding the Windows backup privilege. This ensures that the - // file can be opened even if the caller does not actually have access to it according - // to the security descriptor. - err := winio.RunWithPrivilege(winio.SeBackupPrivilege, func() error { - path := longpath.AddPrefix(filepath.Join(fg.path, filename)) - p, err := syscall.UTF16FromString(path) - if err != nil { - return err - } - h, err := syscall.CreateFile(&p[0], syscall.GENERIC_READ, syscall.FILE_SHARE_READ, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) - if err != nil { - return &os.PathError{Op: "open", Path: path, Err: err} - } - f = os.NewFile(uintptr(h), path) - return nil - }) - return f, err -} - -func (fg *fileGetCloserWithBackupPrivileges) Close() error { - return nil -} - -type fileGetDestroyCloser struct { - storage.FileGetter - path string -} - -func (f *fileGetDestroyCloser) Close() error { - // TODO: activate layers and release here? - return os.RemoveAll(f.path) -} - -// DiffGetter returns a FileGetCloser that can read files from the directory that -// contains files for the layer differences. Used for direct access for tar-split. -func (d *Driver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { - id, err := d.resolveID(id) - if err != nil { - return nil, err - } - - return &fileGetCloserWithBackupPrivileges{d.dir(id)}, nil -} diff --git a/daemon/graphdriver/windows/windows_windows_test.go b/daemon/graphdriver/windows/windows_windows_test.go deleted file mode 100644 index 911a36251f..0000000000 --- a/daemon/graphdriver/windows/windows_windows_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package windows - -import "testing" - -func TestAddAceToSddlDacl(t *testing.T) { - cases := [][3]string{ - {"D:", "(A;;;)", "D:(A;;;)"}, - {"D:(A;;;)", "(A;;;)", "D:(A;;;)"}, - {"O:D:(A;;;stuff)", "(A;;;new)", "O:D:(A;;;new)(A;;;stuff)"}, - {"O:D:(D;;;no)(A;;;stuff)", "(A;;;new)", "O:D:(D;;;no)(A;;;new)(A;;;stuff)"}, - } - - for _, c := range cases { - if newSddl, worked := addAceToSddlDacl(c[0], c[1]); !worked || newSddl != c[2] { - t.Errorf("%s + %s == %s, expected %s (%v)", c[0], c[1], newSddl, c[2], worked) - } - } -} diff --git a/daemon/graphdriver/zfs/MAINTAINERS b/daemon/graphdriver/zfs/MAINTAINERS deleted file mode 100644 index 9c270c541f..0000000000 --- a/daemon/graphdriver/zfs/MAINTAINERS +++ /dev/null @@ -1,2 +0,0 @@ -Jörg Thalheim (@Mic92) -Arthur Gautier (@baloose) diff --git a/daemon/graphdriver/zfs/zfs.go b/daemon/graphdriver/zfs/zfs.go deleted file mode 100644 index 2a50623a60..0000000000 --- a/daemon/graphdriver/zfs/zfs.go +++ /dev/null @@ -1,412 +0,0 @@ -// +build linux freebsd solaris - -package zfs - -import ( - "fmt" - "os" - "os/exec" - "path" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/parsers" - zfs "github.com/mistifyio/go-zfs" - "github.com/opencontainers/runc/libcontainer/label" -) - -type zfsOptions struct { - fsName string - mountPath string -} - -func init() { - graphdriver.Register("zfs", Init) -} - -// Logger returns a zfs logger implementation. -type Logger struct{} - -// Log wraps log message from ZFS driver with a prefix '[zfs]'. -func (*Logger) Log(cmd []string) { - logrus.Debugf("[zfs] %s", strings.Join(cmd, " ")) -} - -// Init returns a new ZFS driver. -// It takes base mount path and an array of options which are represented as key value pairs. -// Each option is in the for key=value. 'zfs.fsname' is expected to be a valid key in the options. -func Init(base string, opt []string, uidMaps, gidMaps []idtools.IDMap) (graphdriver.Driver, error) { - var err error - - if _, err := exec.LookPath("zfs"); err != nil { - logrus.Debugf("[zfs] zfs command is not available: %v", err) - return nil, graphdriver.ErrPrerequisites - } - - file, err := os.OpenFile("/dev/zfs", os.O_RDWR, 600) - if err != nil { - logrus.Debugf("[zfs] cannot open /dev/zfs: %v", err) - return nil, graphdriver.ErrPrerequisites - } - defer file.Close() - - options, err := parseOptions(opt) - if err != nil { - return nil, err - } - options.mountPath = base - - rootdir := path.Dir(base) - - if options.fsName == "" { - err = checkRootdirFs(rootdir) - if err != nil { - return nil, err - } - } - - if options.fsName == "" { - options.fsName, err = lookupZfsDataset(rootdir) - if err != nil { - return nil, err - } - } - - zfs.SetLogger(new(Logger)) - - filesystems, err := zfs.Filesystems(options.fsName) - if err != nil { - return nil, fmt.Errorf("Cannot find root filesystem %s: %v", options.fsName, err) - } - - filesystemsCache := make(map[string]bool, len(filesystems)) - var rootDataset *zfs.Dataset - for _, fs := range filesystems { - if fs.Name == options.fsName { - rootDataset = fs - } - filesystemsCache[fs.Name] = true - } - - if rootDataset == nil { - return nil, fmt.Errorf("BUG: zfs get all -t filesystem -rHp '%s' should contain '%s'", options.fsName, options.fsName) - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(uidMaps, gidMaps) - if err != nil { - return nil, fmt.Errorf("Failed to get root uid/guid: %v", err) - } - if err := idtools.MkdirAllAs(base, 0700, rootUID, rootGID); err != nil { - return nil, fmt.Errorf("Failed to create '%s': %v", base, err) - } - - if err := mount.MakePrivate(base); err != nil { - return nil, err - } - d := &Driver{ - dataset: rootDataset, - options: options, - filesystemsCache: filesystemsCache, - uidMaps: uidMaps, - gidMaps: gidMaps, - ctr: graphdriver.NewRefCounter(graphdriver.NewDefaultChecker()), - } - return graphdriver.NewNaiveDiffDriver(d, uidMaps, gidMaps), nil -} - -func parseOptions(opt []string) (zfsOptions, error) { - var options zfsOptions - options.fsName = "" - for _, option := range opt { - key, val, err := parsers.ParseKeyValueOpt(option) - if err != nil { - return options, err - } - key = strings.ToLower(key) - switch key { - case "zfs.fsname": - options.fsName = val - default: - return options, fmt.Errorf("Unknown option %s", key) - } - } - return options, nil -} - -func lookupZfsDataset(rootdir string) (string, error) { - var stat syscall.Stat_t - if err := syscall.Stat(rootdir, &stat); err != nil { - return "", fmt.Errorf("Failed to access '%s': %s", rootdir, err) - } - wantedDev := stat.Dev - - mounts, err := mount.GetMounts() - if err != nil { - return "", err - } - for _, m := range mounts { - if err := syscall.Stat(m.Mountpoint, &stat); err != nil { - logrus.Debugf("[zfs] failed to stat '%s' while scanning for zfs mount: %v", m.Mountpoint, err) - continue // may fail on fuse file systems - } - - if stat.Dev == wantedDev && m.Fstype == "zfs" { - return m.Source, nil - } - } - - return "", fmt.Errorf("Failed to find zfs dataset mounted on '%s' in /proc/mounts", rootdir) -} - -// Driver holds information about the driver, such as zfs dataset, options and cache. -type Driver struct { - dataset *zfs.Dataset - options zfsOptions - sync.Mutex // protects filesystem cache against concurrent access - filesystemsCache map[string]bool - uidMaps []idtools.IDMap - gidMaps []idtools.IDMap - ctr *graphdriver.RefCounter -} - -func (d *Driver) String() string { - return "zfs" -} - -// Cleanup is used to implement graphdriver.ProtoDriver. There is no cleanup required for this driver. -func (d *Driver) Cleanup() error { - return nil -} - -// Status returns information about the ZFS filesystem. It returns a two dimensional array of information -// such as pool name, dataset name, disk usage, parent quota and compression used. -// Currently it return 'Zpool', 'Zpool Health', 'Parent Dataset', 'Space Used By Parent', -// 'Space Available', 'Parent Quota' and 'Compression'. -func (d *Driver) Status() [][2]string { - parts := strings.Split(d.dataset.Name, "/") - pool, err := zfs.GetZpool(parts[0]) - - var poolName, poolHealth string - if err == nil { - poolName = pool.Name - poolHealth = pool.Health - } else { - poolName = fmt.Sprintf("error while getting pool information %v", err) - poolHealth = "not available" - } - - quota := "no" - if d.dataset.Quota != 0 { - quota = strconv.FormatUint(d.dataset.Quota, 10) - } - - return [][2]string{ - {"Zpool", poolName}, - {"Zpool Health", poolHealth}, - {"Parent Dataset", d.dataset.Name}, - {"Space Used By Parent", strconv.FormatUint(d.dataset.Used, 10)}, - {"Space Available", strconv.FormatUint(d.dataset.Avail, 10)}, - {"Parent Quota", quota}, - {"Compression", d.dataset.Compression}, - } -} - -// GetMetadata returns image/container metadata related to graph driver -func (d *Driver) GetMetadata(id string) (map[string]string, error) { - return nil, nil -} - -func (d *Driver) cloneFilesystem(name, parentName string) error { - snapshotName := fmt.Sprintf("%d", time.Now().Nanosecond()) - parentDataset := zfs.Dataset{Name: parentName} - snapshot, err := parentDataset.Snapshot(snapshotName /*recursive */, false) - if err != nil { - return err - } - - _, err = snapshot.Clone(name, map[string]string{"mountpoint": "legacy"}) - if err == nil { - d.Lock() - d.filesystemsCache[name] = true - d.Unlock() - } - - if err != nil { - snapshot.Destroy(zfs.DestroyDeferDeletion) - return err - } - return snapshot.Destroy(zfs.DestroyDeferDeletion) -} - -func (d *Driver) zfsPath(id string) string { - return d.options.fsName + "/" + id -} - -func (d *Driver) mountPath(id string) string { - return path.Join(d.options.mountPath, "graph", getMountpoint(id)) -} - -// CreateReadWrite creates a layer that is writable for use as a container -// file system. -func (d *Driver) CreateReadWrite(id, parent, mountLabel string, storageOpt map[string]string) error { - return d.Create(id, parent, mountLabel, storageOpt) -} - -// Create prepares the dataset and filesystem for the ZFS driver for the given id under the parent. -func (d *Driver) Create(id string, parent string, mountLabel string, storageOpt map[string]string) error { - err := d.create(id, parent, storageOpt) - if err == nil { - return nil - } - if zfsError, ok := err.(*zfs.Error); ok { - if !strings.HasSuffix(zfsError.Stderr, "dataset already exists\n") { - return err - } - // aborted build -> cleanup - } else { - return err - } - - dataset := zfs.Dataset{Name: d.zfsPath(id)} - if err := dataset.Destroy(zfs.DestroyRecursiveClones); err != nil { - return err - } - - // retry - return d.create(id, parent, storageOpt) -} - -func (d *Driver) create(id, parent string, storageOpt map[string]string) error { - name := d.zfsPath(id) - quota, err := parseStorageOpt(storageOpt) - if err != nil { - return err - } - if parent == "" { - mountoptions := map[string]string{"mountpoint": "legacy"} - fs, err := zfs.CreateFilesystem(name, mountoptions) - if err == nil { - err = setQuota(name, quota) - if err == nil { - d.Lock() - d.filesystemsCache[fs.Name] = true - d.Unlock() - } - } - return err - } - err = d.cloneFilesystem(name, d.zfsPath(parent)) - if err == nil { - err = setQuota(name, quota) - } - return err -} - -func parseStorageOpt(storageOpt map[string]string) (string, error) { - // Read size to change the disk quota per container - for k, v := range storageOpt { - key := strings.ToLower(k) - switch key { - case "size": - return v, nil - default: - return "0", fmt.Errorf("Unknown option %s", key) - } - } - return "0", nil -} - -func setQuota(name string, quota string) error { - if quota == "0" { - return nil - } - fs, err := zfs.GetDataset(name) - if err != nil { - return err - } - return fs.SetProperty("quota", quota) -} - -// Remove deletes the dataset, filesystem and the cache for the given id. -func (d *Driver) Remove(id string) error { - name := d.zfsPath(id) - dataset := zfs.Dataset{Name: name} - err := dataset.Destroy(zfs.DestroyRecursive) - if err == nil { - d.Lock() - delete(d.filesystemsCache, name) - d.Unlock() - } - return err -} - -// Get returns the mountpoint for the given id after creating the target directories if necessary. -func (d *Driver) Get(id, mountLabel string) (string, error) { - mountpoint := d.mountPath(id) - if count := d.ctr.Increment(mountpoint); count > 1 { - return mountpoint, nil - } - - filesystem := d.zfsPath(id) - options := label.FormatMountLabel("", mountLabel) - logrus.Debugf(`[zfs] mount("%s", "%s", "%s")`, filesystem, mountpoint, options) - - rootUID, rootGID, err := idtools.GetRootUIDGID(d.uidMaps, d.gidMaps) - if err != nil { - d.ctr.Decrement(mountpoint) - return "", err - } - // Create the target directories if they don't exist - if err := idtools.MkdirAllAs(mountpoint, 0755, rootUID, rootGID); err != nil { - d.ctr.Decrement(mountpoint) - return "", err - } - - if err := mount.Mount(filesystem, mountpoint, "zfs", options); err != nil { - d.ctr.Decrement(mountpoint) - return "", fmt.Errorf("error creating zfs mount of %s to %s: %v", filesystem, mountpoint, err) - } - - // this could be our first mount after creation of the filesystem, and the root dir may still have root - // permissions instead of the remapped root uid:gid (if user namespaces are enabled): - if err := os.Chown(mountpoint, rootUID, rootGID); err != nil { - mount.Unmount(mountpoint) - d.ctr.Decrement(mountpoint) - return "", fmt.Errorf("error modifying zfs mountpoint (%s) directory ownership: %v", mountpoint, err) - } - - return mountpoint, nil -} - -// Put removes the existing mountpoint for the given id if it exists. -func (d *Driver) Put(id string) error { - mountpoint := d.mountPath(id) - if count := d.ctr.Decrement(mountpoint); count > 0 { - return nil - } - mounted, err := graphdriver.Mounted(graphdriver.FsMagicZfs, mountpoint) - if err != nil || !mounted { - return err - } - - logrus.Debugf(`[zfs] unmount("%s")`, mountpoint) - - if err := mount.Unmount(mountpoint); err != nil { - return fmt.Errorf("error unmounting to %s: %v", mountpoint, err) - } - return nil -} - -// Exists checks to see if the cache entry exists for the given id. -func (d *Driver) Exists(id string) bool { - d.Lock() - defer d.Unlock() - return d.filesystemsCache[d.zfsPath(id)] == true -} diff --git a/daemon/graphdriver/zfs/zfs_freebsd.go b/daemon/graphdriver/zfs/zfs_freebsd.go deleted file mode 100644 index 1c05fa794c..0000000000 --- a/daemon/graphdriver/zfs/zfs_freebsd.go +++ /dev/null @@ -1,38 +0,0 @@ -package zfs - -import ( - "fmt" - "strings" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/graphdriver" -) - -func checkRootdirFs(rootdir string) error { - var buf syscall.Statfs_t - if err := syscall.Statfs(rootdir, &buf); err != nil { - return fmt.Errorf("Failed to access '%s': %s", rootdir, err) - } - - // on FreeBSD buf.Fstypename contains ['z', 'f', 's', 0 ... ] - if (buf.Fstypename[0] != 122) || (buf.Fstypename[1] != 102) || (buf.Fstypename[2] != 115) || (buf.Fstypename[3] != 0) { - logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) - return graphdriver.ErrPrerequisites - } - - return nil -} - -func getMountpoint(id string) string { - maxlen := 12 - - // we need to preserve filesystem suffix - suffix := strings.SplitN(id, "-", 2) - - if len(suffix) > 1 { - return id[:maxlen] + "-" + suffix[1] - } - - return id[:maxlen] -} diff --git a/daemon/graphdriver/zfs/zfs_linux.go b/daemon/graphdriver/zfs/zfs_linux.go deleted file mode 100644 index 52ed516049..0000000000 --- a/daemon/graphdriver/zfs/zfs_linux.go +++ /dev/null @@ -1,27 +0,0 @@ -package zfs - -import ( - "fmt" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/graphdriver" -) - -func checkRootdirFs(rootdir string) error { - var buf syscall.Statfs_t - if err := syscall.Statfs(rootdir, &buf); err != nil { - return fmt.Errorf("Failed to access '%s': %s", rootdir, err) - } - - if graphdriver.FsMagic(buf.Type) != graphdriver.FsMagicZfs { - logrus.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) - return graphdriver.ErrPrerequisites - } - - return nil -} - -func getMountpoint(id string) string { - return id -} diff --git a/daemon/graphdriver/zfs/zfs_solaris.go b/daemon/graphdriver/zfs/zfs_solaris.go deleted file mode 100644 index 0bf6c3d071..0000000000 --- a/daemon/graphdriver/zfs/zfs_solaris.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build solaris,cgo - -package zfs - -/* -#include -#include - -static inline struct statvfs *getstatfs(char *s) { - struct statvfs *buf; - int err; - buf = (struct statvfs *)malloc(sizeof(struct statvfs)); - err = statvfs(s, buf); - return buf; -} -*/ -import "C" -import ( - "path/filepath" - "strings" - "unsafe" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/graphdriver" -) - -func checkRootdirFs(rootdir string) error { - - cs := C.CString(filepath.Dir(rootdir)) - buf := C.getstatfs(cs) - - // on Solaris buf.f_basetype contains ['z', 'f', 's', 0 ... ] - if (buf.f_basetype[0] != 122) || (buf.f_basetype[1] != 102) || (buf.f_basetype[2] != 115) || - (buf.f_basetype[3] != 0) { - log.Debugf("[zfs] no zfs dataset found for rootdir '%s'", rootdir) - C.free(unsafe.Pointer(buf)) - return graphdriver.ErrPrerequisites - } - - C.free(unsafe.Pointer(buf)) - C.free(unsafe.Pointer(cs)) - return nil -} - -/* rootfs is introduced to comply with the OCI spec -which states that root filesystem must be mounted at /rootfs/ instead of / -*/ -func getMountpoint(id string) string { - maxlen := 12 - - // we need to preserve filesystem suffix - suffix := strings.SplitN(id, "-", 2) - - if len(suffix) > 1 { - return filepath.Join(id[:maxlen]+"-"+suffix[1], "rootfs", "root") - } - - return filepath.Join(id[:maxlen], "rootfs", "root") -} diff --git a/daemon/graphdriver/zfs/zfs_test.go b/daemon/graphdriver/zfs/zfs_test.go deleted file mode 100644 index 3e22928438..0000000000 --- a/daemon/graphdriver/zfs/zfs_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build linux - -package zfs - -import ( - "testing" - - "github.com/docker/docker/daemon/graphdriver/graphtest" -) - -// This avoids creating a new driver for each test if all tests are run -// Make sure to put new tests between TestZfsSetup and TestZfsTeardown -func TestZfsSetup(t *testing.T) { - graphtest.GetDriver(t, "zfs") -} - -func TestZfsCreateEmpty(t *testing.T) { - graphtest.DriverTestCreateEmpty(t, "zfs") -} - -func TestZfsCreateBase(t *testing.T) { - graphtest.DriverTestCreateBase(t, "zfs") -} - -func TestZfsCreateSnap(t *testing.T) { - graphtest.DriverTestCreateSnap(t, "zfs") -} - -func TestZfsSetQuota(t *testing.T) { - graphtest.DriverTestSetQuota(t, "zfs") -} - -func TestZfsTeardown(t *testing.T) { - graphtest.PutDriver(t) -} diff --git a/daemon/graphdriver/zfs/zfs_unsupported.go b/daemon/graphdriver/zfs/zfs_unsupported.go deleted file mode 100644 index ce8daadaf6..0000000000 --- a/daemon/graphdriver/zfs/zfs_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !linux,!freebsd,!solaris - -package zfs - -func checkRootdirFs(rootdir string) error { - return nil -} - -func getMountpoint(id string) string { - return id -} diff --git a/daemon/health.go b/daemon/health.go deleted file mode 100644 index 734350047f..0000000000 --- a/daemon/health.go +++ /dev/null @@ -1,321 +0,0 @@ -package daemon - -import ( - "bytes" - "fmt" - "runtime" - "strings" - "sync" - "time" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/strslice" -) - -const ( - // Longest healthcheck probe output message to store. Longer messages will be truncated. - maxOutputLen = 4096 - - // Default interval between probe runs (from the end of the first to the start of the second). - // Also the time before the first probe. - defaultProbeInterval = 30 * time.Second - - // The maximum length of time a single probe run should take. If the probe takes longer - // than this, the check is considered to have failed. - defaultProbeTimeout = 30 * time.Second - - // Default number of consecutive failures of the health check - // for the container to be considered unhealthy. - defaultProbeRetries = 3 - - // Maximum number of entries to record - maxLogEntries = 5 -) - -const ( - // Exit status codes that can be returned by the probe command. - - exitStatusHealthy = 0 // Container is healthy - exitStatusUnhealthy = 1 // Container is unhealthy -) - -// probe implementations know how to run a particular type of probe. -type probe interface { - // Perform one run of the check. Returns the exit code and an optional - // short diagnostic string. - run(context.Context, *Daemon, *container.Container) (*types.HealthcheckResult, error) -} - -// cmdProbe implements the "CMD" probe type. -type cmdProbe struct { - // Run the command with the system's default shell instead of execing it directly. - shell bool -} - -// exec the healthcheck command in the container. -// Returns the exit code and probe output (if any) -func (p *cmdProbe) run(ctx context.Context, d *Daemon, container *container.Container) (*types.HealthcheckResult, error) { - cmdSlice := strslice.StrSlice(container.Config.Healthcheck.Test)[1:] - if p.shell { - if runtime.GOOS != "windows" { - cmdSlice = append([]string{"/bin/sh", "-c"}, cmdSlice...) - } else { - cmdSlice = append([]string{"cmd", "/S", "/C"}, cmdSlice...) - } - } - entrypoint, args := d.getEntrypointAndArgs(strslice.StrSlice{}, cmdSlice) - execConfig := exec.NewConfig() - execConfig.OpenStdin = false - execConfig.OpenStdout = true - execConfig.OpenStderr = true - execConfig.ContainerID = container.ID - execConfig.DetachKeys = []byte{} - execConfig.Entrypoint = entrypoint - execConfig.Args = args - execConfig.Tty = false - execConfig.Privileged = false - execConfig.User = container.Config.User - - d.registerExecCommand(container, execConfig) - d.LogContainerEvent(container, "exec_create: "+execConfig.Entrypoint+" "+strings.Join(execConfig.Args, " ")) - - output := &limitedBuffer{} - err := d.ContainerExecStart(ctx, execConfig.ID, nil, output, output) - if err != nil { - return nil, err - } - info, err := d.getExecConfig(execConfig.ID) - if err != nil { - return nil, err - } - if info.ExitCode == nil { - return nil, fmt.Errorf("Healthcheck has no exit code!") - } - // Note: Go's json package will handle invalid UTF-8 for us - out := output.String() - return &types.HealthcheckResult{ - End: time.Now(), - ExitCode: *info.ExitCode, - Output: out, - }, nil -} - -// Update the container's Status.Health struct based on the latest probe's result. -func handleProbeResult(d *Daemon, c *container.Container, result *types.HealthcheckResult) { - c.Lock() - defer c.Unlock() - - retries := c.Config.Healthcheck.Retries - if retries <= 0 { - retries = defaultProbeRetries - } - - h := c.State.Health - oldStatus := h.Status - - if len(h.Log) >= maxLogEntries { - h.Log = append(h.Log[len(h.Log)+1-maxLogEntries:], result) - } else { - h.Log = append(h.Log, result) - } - - if result.ExitCode == exitStatusHealthy { - h.FailingStreak = 0 - h.Status = types.Healthy - } else { - // Failure (including invalid exit code) - h.FailingStreak++ - if h.FailingStreak >= retries { - h.Status = types.Unhealthy - } - // Else we're starting or healthy. Stay in that state. - } - - if oldStatus != h.Status { - d.LogContainerEvent(c, "health_status: "+h.Status) - } -} - -// Run the container's monitoring thread until notified via "stop". -// There is never more than one monitor thread running per container at a time. -func monitor(d *Daemon, c *container.Container, stop chan struct{}, probe probe) { - probeTimeout := timeoutWithDefault(c.Config.Healthcheck.Timeout, defaultProbeTimeout) - probeInterval := timeoutWithDefault(c.Config.Healthcheck.Interval, defaultProbeInterval) - for { - select { - case <-stop: - logrus.Debug("Stop healthcheck monitoring (received while idle)") - return - case <-time.After(probeInterval): - logrus.Debug("Running health check...") - startTime := time.Now() - ctx, cancelProbe := context.WithTimeout(context.Background(), probeTimeout) - results := make(chan *types.HealthcheckResult) - go func() { - result, err := probe.run(ctx, d, c) - if err != nil { - logrus.Warnf("Health check error: %v", err) - results <- &types.HealthcheckResult{ - ExitCode: -1, - Output: err.Error(), - Start: startTime, - End: time.Now(), - } - } else { - result.Start = startTime - logrus.Debugf("Health check done (exitCode=%d)", result.ExitCode) - results <- result - } - close(results) - }() - select { - case <-stop: - logrus.Debug("Stop healthcheck monitoring (received while probing)") - // Stop timeout and kill probe, but don't wait for probe to exit. - cancelProbe() - return - case result := <-results: - handleProbeResult(d, c, result) - // Stop timeout - cancelProbe() - case <-ctx.Done(): - logrus.Debug("Health check taking too long") - handleProbeResult(d, c, &types.HealthcheckResult{ - ExitCode: -1, - Output: fmt.Sprintf("Health check exceeded timeout (%v)", probeTimeout), - Start: startTime, - End: time.Now(), - }) - cancelProbe() - // Wait for probe to exit (it might take a while to respond to the TERM - // signal and we don't want dying probes to pile up). - <-results - } - } - } -} - -// Get a suitable probe implementation for the container's healthcheck configuration. -// Nil will be returned if no healthcheck was configured or NONE was set. -func getProbe(c *container.Container) probe { - config := c.Config.Healthcheck - if config == nil || len(config.Test) == 0 { - return nil - } - switch config.Test[0] { - case "CMD": - return &cmdProbe{shell: false} - case "CMD-SHELL": - return &cmdProbe{shell: true} - default: - logrus.Warnf("Unknown healthcheck type '%s' (expected 'CMD')", config.Test[0]) - return nil - } -} - -// Ensure the health-check monitor is running or not, depending on the current -// state of the container. -// Called from monitor.go, with c locked. -func (d *Daemon) updateHealthMonitor(c *container.Container) { - h := c.State.Health - if h == nil { - return // No healthcheck configured - } - - probe := getProbe(c) - wantRunning := c.Running && !c.Paused && probe != nil - if wantRunning { - if stop := h.OpenMonitorChannel(); stop != nil { - go monitor(d, c, stop, probe) - } - } else { - h.CloseMonitorChannel() - } -} - -// Reset the health state for a newly-started, restarted or restored container. -// initHealthMonitor is called from monitor.go and we should never be running -// two instances at once. -// Called with c locked. -func (d *Daemon) initHealthMonitor(c *container.Container) { - // If no healthcheck is setup then don't init the monitor - if getProbe(c) == nil { - return - } - - // This is needed in case we're auto-restarting - d.stopHealthchecks(c) - - if c.State.Health == nil { - h := &container.Health{} - h.Status = types.Starting - c.State.Health = h - } - - d.updateHealthMonitor(c) -} - -// Called when the container is being stopped (whether because the health check is -// failing or for any other reason). -func (d *Daemon) stopHealthchecks(c *container.Container) { - h := c.State.Health - if h != nil { - h.CloseMonitorChannel() - } -} - -// Buffer up to maxOutputLen bytes. Further data is discarded. -type limitedBuffer struct { - buf bytes.Buffer - mu sync.Mutex - truncated bool // indicates that data has been lost -} - -// Append to limitedBuffer while there is room. -func (b *limitedBuffer) Write(data []byte) (int, error) { - b.mu.Lock() - defer b.mu.Unlock() - - bufLen := b.buf.Len() - dataLen := len(data) - keep := min(maxOutputLen-bufLen, dataLen) - if keep > 0 { - b.buf.Write(data[:keep]) - } - if keep < dataLen { - b.truncated = true - } - return dataLen, nil -} - -// The contents of the buffer, with "..." appended if it overflowed. -func (b *limitedBuffer) String() string { - b.mu.Lock() - defer b.mu.Unlock() - - out := b.buf.String() - if b.truncated { - out = out + "..." - } - return out -} - -// If configuredValue is zero, use defaultValue instead. -func timeoutWithDefault(configuredValue time.Duration, defaultValue time.Duration) time.Duration { - if configuredValue == 0 { - return defaultValue - } - return configuredValue -} - -func min(x, y int) int { - if x < y { - return x - } - return y -} diff --git a/daemon/health_test.go b/daemon/health_test.go deleted file mode 100644 index caa91da335..0000000000 --- a/daemon/health_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package daemon - -import ( - "testing" - "time" - - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/events" - "github.com/docker/engine-api/types" - containertypes "github.com/docker/engine-api/types/container" - eventtypes "github.com/docker/engine-api/types/events" -) - -func reset(c *container.Container) { - c.State = &container.State{} - c.State.Health = &container.Health{} - c.State.Health.Status = types.Starting -} - -func TestNoneHealthcheck(t *testing.T) { - c := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "container_id", - Name: "container_name", - Config: &containertypes.Config{ - Image: "image_name", - Healthcheck: &containertypes.HealthConfig{ - Test: []string{"NONE"}, - }, - }, - State: &container.State{}, - }, - } - daemon := &Daemon{} - - daemon.initHealthMonitor(c) - if c.State.Health != nil { - t.Errorf("Expecting Health to be nil, but was not") - } -} - -func TestHealthStates(t *testing.T) { - e := events.New() - _, l, _ := e.Subscribe() - defer e.Evict(l) - - expect := func(expected string) { - select { - case event := <-l: - ev := event.(eventtypes.Message) - if ev.Status != expected { - t.Errorf("Expecting event %#v, but got %#v\n", expected, ev.Status) - } - case <-time.After(1 * time.Second): - t.Errorf("Expecting event %#v, but got nothing\n", expected) - } - } - - c := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: "container_id", - Name: "container_name", - Config: &containertypes.Config{ - Image: "image_name", - }, - }, - } - daemon := &Daemon{ - EventsService: e, - } - - c.Config.Healthcheck = &containertypes.HealthConfig{ - Retries: 1, - } - - reset(c) - - handleResult := func(startTime time.Time, exitCode int) { - handleProbeResult(daemon, c, &types.HealthcheckResult{ - Start: startTime, - End: startTime, - ExitCode: exitCode, - }) - } - - // starting -> failed -> success -> failed - - handleResult(c.State.StartedAt.Add(1*time.Second), 1) - expect("health_status: unhealthy") - - handleResult(c.State.StartedAt.Add(2*time.Second), 0) - expect("health_status: healthy") - - handleResult(c.State.StartedAt.Add(3*time.Second), 1) - expect("health_status: unhealthy") - - // Test retries - - reset(c) - c.Config.Healthcheck.Retries = 3 - - handleResult(c.State.StartedAt.Add(20*time.Second), 1) - handleResult(c.State.StartedAt.Add(40*time.Second), 1) - if c.State.Health.Status != types.Starting { - t.Errorf("Expecting starting, but got %#v\n", c.State.Health.Status) - } - if c.State.Health.FailingStreak != 2 { - t.Errorf("Expecting FailingStreak=2, but got %d\n", c.State.Health.FailingStreak) - } - handleResult(c.State.StartedAt.Add(60*time.Second), 1) - expect("health_status: unhealthy") - - handleResult(c.State.StartedAt.Add(80*time.Second), 0) - expect("health_status: healthy") - if c.State.Health.FailingStreak != 0 { - t.Errorf("Expecting FailingStreak=0, but got %d\n", c.State.Health.FailingStreak) - } -} diff --git a/daemon/image.go b/daemon/image.go deleted file mode 100644 index 9a3fa1aeaa..0000000000 --- a/daemon/image.go +++ /dev/null @@ -1,124 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/builder" - "github.com/docker/docker/image" - "github.com/docker/docker/reference" - "github.com/docker/docker/runconfig" - containertypes "github.com/docker/engine-api/types/container" -) - -// ErrImageDoesNotExist is error returned when no image can be found for a reference. -type ErrImageDoesNotExist struct { - RefOrID string -} - -func (e ErrImageDoesNotExist) Error() string { - return fmt.Sprintf("no such id: %s", e.RefOrID) -} - -// GetImageID returns an image ID corresponding to the image referred to by -// refOrID. -func (daemon *Daemon) GetImageID(refOrID string) (image.ID, error) { - id, ref, err := reference.ParseIDOrReference(refOrID) - if err != nil { - return "", err - } - if id != "" { - if _, err := daemon.imageStore.Get(image.ID(id)); err != nil { - return "", ErrImageDoesNotExist{refOrID} - } - return image.ID(id), nil - } - - if id, err := daemon.referenceStore.Get(ref); err == nil { - return id, nil - } - if tagged, ok := ref.(reference.NamedTagged); ok { - if id, err := daemon.imageStore.Search(tagged.Tag()); err == nil { - for _, namedRef := range daemon.referenceStore.References(id) { - if namedRef.Name() == ref.Name() { - return id, nil - } - } - } - } - - // Search based on ID - if id, err := daemon.imageStore.Search(refOrID); err == nil { - return id, nil - } - - return "", ErrImageDoesNotExist{refOrID} -} - -// GetImage returns an image corresponding to the image referred to by refOrID. -func (daemon *Daemon) GetImage(refOrID string) (*image.Image, error) { - imgID, err := daemon.GetImageID(refOrID) - if err != nil { - return nil, err - } - return daemon.imageStore.Get(imgID) -} - -// GetImageOnBuild looks up a Docker image referenced by `name`. -func (daemon *Daemon) GetImageOnBuild(name string) (builder.Image, error) { - img, err := daemon.GetImage(name) - if err != nil { - return nil, err - } - return img, nil -} - -// GetCachedImage returns the most recent created image that is a child -// of the image with imgID, that had the same config when it was -// created. nil is returned if a child cannot be found. An error is -// returned if the parent image cannot be found. -func (daemon *Daemon) GetCachedImage(imgID image.ID, config *containertypes.Config) (*image.Image, error) { - // Loop on the children of the given image and check the config - getMatch := func(siblings []image.ID) (*image.Image, error) { - var match *image.Image - for _, id := range siblings { - img, err := daemon.imageStore.Get(id) - if err != nil { - return nil, fmt.Errorf("unable to find image %q", id) - } - - if runconfig.Compare(&img.ContainerConfig, config) { - // check for the most up to date match - if match == nil || match.Created.Before(img.Created) { - match = img - } - } - } - return match, nil - } - - // In this case, this is `FROM scratch`, which isn't an actual image. - if imgID == "" { - images := daemon.imageStore.Map() - var siblings []image.ID - for id, img := range images { - if img.Parent == imgID { - siblings = append(siblings, id) - } - } - return getMatch(siblings) - } - - // find match from child images - siblings := daemon.imageStore.Children(imgID) - return getMatch(siblings) -} - -// GetCachedImageOnBuild returns a reference to a cached image whose parent equals `parent` -// and runconfig equals `cfg`. A cache miss is expected to return an empty ID and a nil error. -func (daemon *Daemon) GetCachedImageOnBuild(imgID string, cfg *containertypes.Config) (string, error) { - cache, err := daemon.GetCachedImage(image.ID(imgID), cfg) - if cache == nil || err != nil { - return "", err - } - return cache.ID().String(), nil -} diff --git a/daemon/image_delete.go b/daemon/image_delete.go deleted file mode 100644 index 27286503e5..0000000000 --- a/daemon/image_delete.go +++ /dev/null @@ -1,404 +0,0 @@ -package daemon - -import ( - "fmt" - "strings" - - "github.com/docker/docker/container" - "github.com/docker/docker/errors" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" -) - -type conflictType int - -const ( - conflictDependentChild conflictType = (1 << iota) - conflictRunningContainer - conflictActiveReference - conflictStoppedContainer - conflictHard = conflictDependentChild | conflictRunningContainer - conflictSoft = conflictActiveReference | conflictStoppedContainer -) - -// ImageDelete deletes the image referenced by the given imageRef from this -// daemon. The given imageRef can be an image ID, ID prefix, or a repository -// reference (with an optional tag or digest, defaulting to the tag name -// "latest"). There is differing behavior depending on whether the given -// imageRef is a repository reference or not. -// -// If the given imageRef is a repository reference then that repository -// reference will be removed. However, if there exists any containers which -// were created using the same image reference then the repository reference -// cannot be removed unless either there are other repository references to the -// same image or force is true. Following removal of the repository reference, -// the referenced image itself will attempt to be deleted as described below -// but quietly, meaning any image delete conflicts will cause the image to not -// be deleted and the conflict will not be reported. -// -// There may be conflicts preventing deletion of an image and these conflicts -// are divided into two categories grouped by their severity: -// -// Hard Conflict: -// - a pull or build using the image. -// - any descendant image. -// - any running container using the image. -// -// Soft Conflict: -// - any stopped container using the image. -// - any repository tag or digest references to the image. -// -// The image cannot be removed if there are any hard conflicts and can be -// removed if there are soft conflicts only if force is true. -// -// If prune is true, ancestor images will each attempt to be deleted quietly, -// meaning any delete conflicts will cause the image to not be deleted and the -// conflict will not be reported. -// -// FIXME: remove ImageDelete's dependency on Daemon, then move to the graph -// package. This would require that we no longer need the daemon to determine -// whether images are being used by a stopped or running container. -func (daemon *Daemon) ImageDelete(imageRef string, force, prune bool) ([]types.ImageDelete, error) { - records := []types.ImageDelete{} - - imgID, err := daemon.GetImageID(imageRef) - if err != nil { - return nil, daemon.imageNotExistToErrcode(err) - } - - repoRefs := daemon.referenceStore.References(imgID) - - var removedRepositoryRef bool - if !isImageIDPrefix(imgID.String(), imageRef) { - // A repository reference was given and should be removed - // first. We can only remove this reference if either force is - // true, there are multiple repository references to this - // image, or there are no containers using the given reference. - if !force && isSingleReference(repoRefs) { - if container := daemon.getContainerUsingImage(imgID); container != nil { - // If we removed the repository reference then - // this image would remain "dangling" and since - // we really want to avoid that the client must - // explicitly force its removal. - err := fmt.Errorf("conflict: unable to remove repository reference %q (must force) - container %s is using its referenced image %s", imageRef, stringid.TruncateID(container.ID), stringid.TruncateID(imgID.String())) - return nil, errors.NewRequestConflictError(err) - } - } - - parsedRef, err := reference.ParseNamed(imageRef) - if err != nil { - return nil, err - } - - parsedRef, err = daemon.removeImageRef(parsedRef) - if err != nil { - return nil, err - } - - untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} - - daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") - records = append(records, untaggedRecord) - - repoRefs = daemon.referenceStore.References(imgID) - - // If a tag reference was removed and the only remaining - // references to the same repository are digest references, - // then clean up those digest references. - if _, isCanonical := parsedRef.(reference.Canonical); !isCanonical { - foundRepoTagRef := false - for _, repoRef := range repoRefs { - if _, repoRefIsCanonical := repoRef.(reference.Canonical); !repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { - foundRepoTagRef = true - break - } - } - if !foundRepoTagRef { - // Remove canonical references from same repository - remainingRefs := []reference.Named{} - for _, repoRef := range repoRefs { - if _, repoRefIsCanonical := repoRef.(reference.Canonical); repoRefIsCanonical && parsedRef.Name() == repoRef.Name() { - if _, err := daemon.removeImageRef(repoRef); err != nil { - return records, err - } - - untaggedRecord := types.ImageDelete{Untagged: repoRef.String()} - records = append(records, untaggedRecord) - } else { - remainingRefs = append(remainingRefs, repoRef) - - } - } - repoRefs = remainingRefs - } - } - - // If it has remaining references then the untag finished the remove - if len(repoRefs) > 0 { - return records, nil - } - - removedRepositoryRef = true - } else { - // If an ID reference was given AND there is at most one tag - // reference to the image AND all references are within one - // repository, then remove all references. - if isSingleReference(repoRefs) { - c := conflictHard - if !force { - c |= conflictSoft &^ conflictActiveReference - } - if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { - return nil, conflict - } - - for _, repoRef := range repoRefs { - parsedRef, err := daemon.removeImageRef(repoRef) - if err != nil { - return nil, err - } - - untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} - - daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") - records = append(records, untaggedRecord) - } - } - } - - return records, daemon.imageDeleteHelper(imgID, &records, force, prune, removedRepositoryRef) -} - -// isSingleReference returns true when all references are from one repository -// and there is at most one tag. Returns false for empty input. -func isSingleReference(repoRefs []reference.Named) bool { - if len(repoRefs) <= 1 { - return len(repoRefs) == 1 - } - var singleRef reference.Named - canonicalRefs := map[string]struct{}{} - for _, repoRef := range repoRefs { - if _, isCanonical := repoRef.(reference.Canonical); isCanonical { - canonicalRefs[repoRef.Name()] = struct{}{} - } else if singleRef == nil { - singleRef = repoRef - } else { - return false - } - } - if singleRef == nil { - // Just use first canonical ref - singleRef = repoRefs[0] - } - _, ok := canonicalRefs[singleRef.Name()] - return len(canonicalRefs) == 1 && ok -} - -// isImageIDPrefix returns whether the given possiblePrefix is a prefix of the -// given imageID. -func isImageIDPrefix(imageID, possiblePrefix string) bool { - if strings.HasPrefix(imageID, possiblePrefix) { - return true - } - - if i := strings.IndexRune(imageID, ':'); i >= 0 { - return strings.HasPrefix(imageID[i+1:], possiblePrefix) - } - - return false -} - -// getContainerUsingImage returns a container that was created using the given -// imageID. Returns nil if there is no such container. -func (daemon *Daemon) getContainerUsingImage(imageID image.ID) *container.Container { - return daemon.containers.First(func(c *container.Container) bool { - return c.ImageID == imageID - }) -} - -// removeImageRef attempts to parse and remove the given image reference from -// this daemon's store of repository tag/digest references. The given -// repositoryRef must not be an image ID but a repository name followed by an -// optional tag or digest reference. If tag or digest is omitted, the default -// tag is used. Returns the resolved image reference and an error. -func (daemon *Daemon) removeImageRef(ref reference.Named) (reference.Named, error) { - ref = reference.WithDefaultTag(ref) - // Ignore the boolean value returned, as far as we're concerned, this - // is an idempotent operation and it's okay if the reference didn't - // exist in the first place. - _, err := daemon.referenceStore.Delete(ref) - - return ref, err -} - -// removeAllReferencesToImageID attempts to remove every reference to the given -// imgID from this daemon's store of repository tag/digest references. Returns -// on the first encountered error. Removed references are logged to this -// daemon's event service. An "Untagged" types.ImageDelete is added to the -// given list of records. -func (daemon *Daemon) removeAllReferencesToImageID(imgID image.ID, records *[]types.ImageDelete) error { - imageRefs := daemon.referenceStore.References(imgID) - - for _, imageRef := range imageRefs { - parsedRef, err := daemon.removeImageRef(imageRef) - if err != nil { - return err - } - - untaggedRecord := types.ImageDelete{Untagged: parsedRef.String()} - - daemon.LogImageEvent(imgID.String(), imgID.String(), "untag") - *records = append(*records, untaggedRecord) - } - - return nil -} - -// ImageDeleteConflict holds a soft or hard conflict and an associated error. -// Implements the error interface. -type imageDeleteConflict struct { - hard bool - used bool - imgID image.ID - message string -} - -func (idc *imageDeleteConflict) Error() string { - var forceMsg string - if idc.hard { - forceMsg = "cannot be forced" - } else { - forceMsg = "must be forced" - } - - return fmt.Sprintf("conflict: unable to delete %s (%s) - %s", stringid.TruncateID(idc.imgID.String()), forceMsg, idc.message) -} - -// imageDeleteHelper attempts to delete the given image from this daemon. If -// the image has any hard delete conflicts (child images or running containers -// using the image) then it cannot be deleted. If the image has any soft delete -// conflicts (any tags/digests referencing the image or any stopped container -// using the image) then it can only be deleted if force is true. If the delete -// succeeds and prune is true, the parent images are also deleted if they do -// not have any soft or hard delete conflicts themselves. Any deleted images -// and untagged references are appended to the given records. If any error or -// conflict is encountered, it will be returned immediately without deleting -// the image. If quiet is true, any encountered conflicts will be ignored and -// the function will return nil immediately without deleting the image. -func (daemon *Daemon) imageDeleteHelper(imgID image.ID, records *[]types.ImageDelete, force, prune, quiet bool) error { - // First, determine if this image has any conflicts. Ignore soft conflicts - // if force is true. - c := conflictHard - if !force { - c |= conflictSoft - } - if conflict := daemon.checkImageDeleteConflict(imgID, c); conflict != nil { - if quiet && (!daemon.imageIsDangling(imgID) || conflict.used) { - // Ignore conflicts UNLESS the image is "dangling" or not being used in - // which case we want the user to know. - return nil - } - - // There was a conflict and it's either a hard conflict OR we are not - // forcing deletion on soft conflicts. - return conflict - } - - parent, err := daemon.imageStore.GetParent(imgID) - if err != nil { - // There may be no parent - parent = "" - } - - // Delete all repository tag/digest references to this image. - if err := daemon.removeAllReferencesToImageID(imgID, records); err != nil { - return err - } - - removedLayers, err := daemon.imageStore.Delete(imgID) - if err != nil { - return err - } - - daemon.LogImageEvent(imgID.String(), imgID.String(), "delete") - *records = append(*records, types.ImageDelete{Deleted: imgID.String()}) - for _, removedLayer := range removedLayers { - *records = append(*records, types.ImageDelete{Deleted: removedLayer.ChainID.String()}) - } - - if !prune || parent == "" { - return nil - } - - // We need to prune the parent image. This means delete it if there are - // no tags/digests referencing it and there are no containers using it ( - // either running or stopped). - // Do not force prunings, but do so quietly (stopping on any encountered - // conflicts). - return daemon.imageDeleteHelper(parent, records, false, true, true) -} - -// checkImageDeleteConflict determines whether there are any conflicts -// preventing deletion of the given image from this daemon. A hard conflict is -// any image which has the given image as a parent or any running container -// using the image. A soft conflict is any tags/digest referencing the given -// image or any stopped container using the image. If ignoreSoftConflicts is -// true, this function will not check for soft conflict conditions. -func (daemon *Daemon) checkImageDeleteConflict(imgID image.ID, mask conflictType) *imageDeleteConflict { - // Check if the image has any descendant images. - if mask&conflictDependentChild != 0 && len(daemon.imageStore.Children(imgID)) > 0 { - return &imageDeleteConflict{ - hard: true, - imgID: imgID, - message: "image has dependent child images", - } - } - - if mask&conflictRunningContainer != 0 { - // Check if any running container is using the image. - running := func(c *container.Container) bool { - return c.IsRunning() && c.ImageID == imgID - } - if container := daemon.containers.First(running); container != nil { - return &imageDeleteConflict{ - imgID: imgID, - hard: true, - used: true, - message: fmt.Sprintf("image is being used by running container %s", stringid.TruncateID(container.ID)), - } - } - } - - // Check if any repository tags/digest reference this image. - if mask&conflictActiveReference != 0 && len(daemon.referenceStore.References(imgID)) > 0 { - return &imageDeleteConflict{ - imgID: imgID, - message: "image is referenced in one or more repositories", - } - } - - if mask&conflictStoppedContainer != 0 { - // Check if any stopped containers reference this image. - stopped := func(c *container.Container) bool { - return !c.IsRunning() && c.ImageID == imgID - } - if container := daemon.containers.First(stopped); container != nil { - return &imageDeleteConflict{ - imgID: imgID, - used: true, - message: fmt.Sprintf("image is being used by stopped container %s", stringid.TruncateID(container.ID)), - } - } - } - - return nil -} - -// imageIsDangling returns whether the given image is "dangling" which means -// that there are no repository references to the given image and it has no -// child images. -func (daemon *Daemon) imageIsDangling(imgID image.ID) bool { - return !(len(daemon.referenceStore.References(imgID)) > 0 || len(daemon.imageStore.Children(imgID)) > 0) -} diff --git a/daemon/image_exporter.go b/daemon/image_exporter.go deleted file mode 100644 index 95d1d3dcdb..0000000000 --- a/daemon/image_exporter.go +++ /dev/null @@ -1,25 +0,0 @@ -package daemon - -import ( - "io" - - "github.com/docker/docker/image/tarexport" -) - -// ExportImage exports a list of images to the given output stream. The -// exported images are archived into a tar when written to the output -// stream. All images with the given tag and all versions containing -// the same tag are exported. names is the set of tags to export, and -// outStream is the writer which the images are written to. -func (daemon *Daemon) ExportImage(names []string, outStream io.Writer) error { - imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) - return imageExporter.Save(names, outStream) -} - -// LoadImage uploads a set of images into the repository. This is the -// complement of ImageExport. The input stream is an uncompressed tar -// ball containing images and metadata. -func (daemon *Daemon) LoadImage(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - imageExporter := tarexport.NewTarExporter(daemon.imageStore, daemon.layerStore, daemon.referenceStore, daemon) - return imageExporter.Load(inTar, outStream, quiet) -} diff --git a/daemon/image_history.go b/daemon/image_history.go deleted file mode 100644 index 05140d3685..0000000000 --- a/daemon/image_history.go +++ /dev/null @@ -1,82 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/layer" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" -) - -// ImageHistory returns a slice of ImageHistory structures for the specified image -// name by walking the image lineage. -func (daemon *Daemon) ImageHistory(name string) ([]*types.ImageHistory, error) { - img, err := daemon.GetImage(name) - if err != nil { - return nil, err - } - - history := []*types.ImageHistory{} - - layerCounter := 0 - rootFS := *img.RootFS - rootFS.DiffIDs = nil - - for _, h := range img.History { - var layerSize int64 - - if !h.EmptyLayer { - if len(img.RootFS.DiffIDs) <= layerCounter { - return nil, fmt.Errorf("too many non-empty layers in History section") - } - - rootFS.Append(img.RootFS.DiffIDs[layerCounter]) - l, err := daemon.layerStore.Get(rootFS.ChainID()) - if err != nil { - return nil, err - } - layerSize, err = l.DiffSize() - layer.ReleaseAndLog(daemon.layerStore, l) - if err != nil { - return nil, err - } - - layerCounter++ - } - - history = append([]*types.ImageHistory{{ - ID: "", - Created: h.Created.Unix(), - CreatedBy: h.CreatedBy, - Comment: h.Comment, - Size: layerSize, - }}, history...) - } - - // Fill in image IDs and tags - histImg := img - id := img.ID() - for _, h := range history { - h.ID = id.String() - - var tags []string - for _, r := range daemon.referenceStore.References(id) { - if _, ok := r.(reference.NamedTagged); ok { - tags = append(tags, r.String()) - } - } - - h.Tags = tags - - id = histImg.Parent - if id == "" { - break - } - histImg, err = daemon.GetImage(id.String()) - if err != nil { - break - } - } - - return history, nil -} diff --git a/daemon/image_inspect.go b/daemon/image_inspect.go deleted file mode 100644 index 5b0022688e..0000000000 --- a/daemon/image_inspect.go +++ /dev/null @@ -1,81 +0,0 @@ -package daemon - -import ( - "fmt" - "time" - - "github.com/docker/docker/layer" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" -) - -// LookupImage looks up an image by name and returns it as an ImageInspect -// structure. -func (daemon *Daemon) LookupImage(name string) (*types.ImageInspect, error) { - img, err := daemon.GetImage(name) - if err != nil { - return nil, fmt.Errorf("No such image: %s", name) - } - - refs := daemon.referenceStore.References(img.ID()) - repoTags := []string{} - repoDigests := []string{} - for _, ref := range refs { - switch ref.(type) { - case reference.NamedTagged: - repoTags = append(repoTags, ref.String()) - case reference.Canonical: - repoDigests = append(repoDigests, ref.String()) - } - } - - var size int64 - var layerMetadata map[string]string - layerID := img.RootFS.ChainID() - if layerID != "" { - l, err := daemon.layerStore.Get(layerID) - if err != nil { - return nil, err - } - defer layer.ReleaseAndLog(daemon.layerStore, l) - size, err = l.Size() - if err != nil { - return nil, err - } - - layerMetadata, err = l.Metadata() - if err != nil { - return nil, err - } - } - - comment := img.Comment - if len(comment) == 0 && len(img.History) > 0 { - comment = img.History[len(img.History)-1].Comment - } - - imageInspect := &types.ImageInspect{ - ID: img.ID().String(), - RepoTags: repoTags, - RepoDigests: repoDigests, - Parent: img.Parent.String(), - Comment: comment, - Created: img.Created.Format(time.RFC3339Nano), - Container: img.Container, - ContainerConfig: &img.ContainerConfig, - DockerVersion: img.DockerVersion, - Author: img.Author, - Config: img.Config, - Architecture: img.Architecture, - Os: img.OS, - Size: size, - VirtualSize: size, // TODO: field unused, deprecate - RootFS: rootFSToAPIType(img.RootFS), - } - - imageInspect.GraphDriver.Name = daemon.GraphDriverName() - - imageInspect.GraphDriver.Data = layerMetadata - - return imageInspect, nil -} diff --git a/daemon/image_pull.go b/daemon/image_pull.go deleted file mode 100644 index 06ffa06fbe..0000000000 --- a/daemon/image_pull.go +++ /dev/null @@ -1,106 +0,0 @@ -package daemon - -import ( - "io" - "strings" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/builder" - "github.com/docker/docker/distribution" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// PullImage initiates a pull operation. image is the repository name to pull, and -// tag may be either empty, or indicate a specific tag to pull. -func (daemon *Daemon) PullImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { - // Special case: "pull -a" may send an image name with a - // trailing :. This is ugly, but let's not break API - // compatibility. - image = strings.TrimSuffix(image, ":") - - ref, err := reference.ParseNamed(image) - if err != nil { - return err - } - - if tag != "" { - // The "tag" could actually be a digest. - var dgst digest.Digest - dgst, err = digest.ParseDigest(tag) - if err == nil { - ref, err = reference.WithDigest(ref, dgst) - } else { - ref, err = reference.WithTag(ref, tag) - } - if err != nil { - return err - } - } - - return daemon.pullImageWithReference(ctx, ref, metaHeaders, authConfig, outStream) -} - -// PullOnBuild tells Docker to pull image referenced by `name`. -func (daemon *Daemon) PullOnBuild(ctx context.Context, name string, authConfigs map[string]types.AuthConfig, output io.Writer) (builder.Image, error) { - ref, err := reference.ParseNamed(name) - if err != nil { - return nil, err - } - ref = reference.WithDefaultTag(ref) - - pullRegistryAuth := &types.AuthConfig{} - if len(authConfigs) > 0 { - // The request came with a full auth config file, we prefer to use that - repoInfo, err := daemon.RegistryService.ResolveRepository(ref) - if err != nil { - return nil, err - } - - resolvedConfig := registry.ResolveAuthConfig( - authConfigs, - repoInfo.Index, - ) - pullRegistryAuth = &resolvedConfig - } - - if err := daemon.pullImageWithReference(ctx, ref, nil, pullRegistryAuth, output); err != nil { - return nil, err - } - return daemon.GetImage(name) -} - -func (daemon *Daemon) pullImageWithReference(ctx context.Context, ref reference.Named, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { - // Include a buffer so that slow client connections don't affect - // transfer performance. - progressChan := make(chan progress.Progress, 100) - - writesDone := make(chan struct{}) - - ctx, cancelFunc := context.WithCancel(ctx) - - go func() { - writeDistributionProgress(cancelFunc, outStream, progressChan) - close(writesDone) - }() - - imagePullConfig := &distribution.ImagePullConfig{ - MetaHeaders: metaHeaders, - AuthConfig: authConfig, - ProgressOutput: progress.ChanOutput(progressChan), - RegistryService: daemon.RegistryService, - ImageEventLogger: daemon.LogImageEvent, - MetadataStore: daemon.distributionMetadataStore, - ImageStore: daemon.imageStore, - ReferenceStore: daemon.referenceStore, - DownloadManager: daemon.downloadManager, - } - - err := distribution.Pull(ctx, ref, imagePullConfig) - close(progressChan) - <-writesDone - return err -} diff --git a/daemon/image_push.go b/daemon/image_push.go deleted file mode 100644 index 11c89709f1..0000000000 --- a/daemon/image_push.go +++ /dev/null @@ -1,58 +0,0 @@ -package daemon - -import ( - "io" - - "github.com/docker/docker/distribution" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// PushImage initiates a push operation on the repository named localName. -func (daemon *Daemon) PushImage(ctx context.Context, image, tag string, metaHeaders map[string][]string, authConfig *types.AuthConfig, outStream io.Writer) error { - ref, err := reference.ParseNamed(image) - if err != nil { - return err - } - if tag != "" { - // Push by digest is not supported, so only tags are supported. - ref, err = reference.WithTag(ref, tag) - if err != nil { - return err - } - } - - // Include a buffer so that slow client connections don't affect - // transfer performance. - progressChan := make(chan progress.Progress, 100) - - writesDone := make(chan struct{}) - - ctx, cancelFunc := context.WithCancel(ctx) - - go func() { - writeDistributionProgress(cancelFunc, outStream, progressChan) - close(writesDone) - }() - - imagePushConfig := &distribution.ImagePushConfig{ - MetaHeaders: metaHeaders, - AuthConfig: authConfig, - ProgressOutput: progress.ChanOutput(progressChan), - RegistryService: daemon.RegistryService, - ImageEventLogger: daemon.LogImageEvent, - MetadataStore: daemon.distributionMetadataStore, - LayerStore: daemon.layerStore, - ImageStore: daemon.imageStore, - ReferenceStore: daemon.referenceStore, - TrustKey: daemon.trustKey, - UploadManager: daemon.uploadManager, - } - - err = distribution.Push(ctx, ref, imagePushConfig) - close(progressChan) - <-writesDone - return err -} diff --git a/daemon/image_tag.go b/daemon/image_tag.go deleted file mode 100644 index 01127d4707..0000000000 --- a/daemon/image_tag.go +++ /dev/null @@ -1,37 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/image" - "github.com/docker/docker/reference" -) - -// TagImage creates the tag specified by newTag, pointing to the image named -// imageName (alternatively, imageName can also be an image ID). -func (daemon *Daemon) TagImage(imageName, repository, tag string) error { - imageID, err := daemon.GetImageID(imageName) - if err != nil { - return err - } - - newTag, err := reference.WithName(repository) - if err != nil { - return err - } - if tag != "" { - if newTag, err = reference.WithTag(newTag, tag); err != nil { - return err - } - } - - return daemon.TagImageWithReference(imageID, newTag) -} - -// TagImageWithReference adds the given reference to the image ID provided. -func (daemon *Daemon) TagImageWithReference(imageID image.ID, newTag reference.Named) error { - if err := daemon.referenceStore.AddTag(newTag, imageID, true); err != nil { - return err - } - - daemon.LogImageEvent(imageID.String(), newTag.String(), "tag") - return nil -} diff --git a/daemon/images.go b/daemon/images.go deleted file mode 100644 index 0060cdafb2..0000000000 --- a/daemon/images.go +++ /dev/null @@ -1,193 +0,0 @@ -package daemon - -import ( - "fmt" - "path" - "sort" - - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" -) - -var acceptedImageFilterTags = map[string]bool{ - "dangling": true, - "label": true, - "before": true, - "since": true, -} - -// byCreated is a temporary type used to sort a list of images by creation -// time. -type byCreated []*types.Image - -func (r byCreated) Len() int { return len(r) } -func (r byCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byCreated) Less(i, j int) bool { return r[i].Created < r[j].Created } - -// Map returns a map of all images in the ImageStore -func (daemon *Daemon) Map() map[image.ID]*image.Image { - return daemon.imageStore.Map() -} - -// Images returns a filtered list of images. filterArgs is a JSON-encoded set -// of filter arguments which will be interpreted by api/types/filters. -// filter is a shell glob string applied to repository names. The argument -// named all controls whether all images in the graph are filtered, or just -// the heads. -func (daemon *Daemon) Images(filterArgs, filter string, all bool) ([]*types.Image, error) { - var ( - allImages map[image.ID]*image.Image - err error - danglingOnly = false - ) - - imageFilters, err := filters.FromParam(filterArgs) - if err != nil { - return nil, err - } - if err := imageFilters.Validate(acceptedImageFilterTags); err != nil { - return nil, err - } - - if imageFilters.Include("dangling") { - if imageFilters.ExactMatch("dangling", "true") { - danglingOnly = true - } else if !imageFilters.ExactMatch("dangling", "false") { - return nil, fmt.Errorf("Invalid filter 'dangling=%s'", imageFilters.Get("dangling")) - } - } - if danglingOnly { - allImages = daemon.imageStore.Heads() - } else { - allImages = daemon.imageStore.Map() - } - - var beforeFilter, sinceFilter *image.Image - err = imageFilters.WalkValues("before", func(value string) error { - beforeFilter, err = daemon.GetImage(value) - return err - }) - if err != nil { - return nil, err - } - - err = imageFilters.WalkValues("since", func(value string) error { - sinceFilter, err = daemon.GetImage(value) - return err - }) - if err != nil { - return nil, err - } - - images := []*types.Image{} - - var filterTagged bool - if filter != "" { - filterRef, err := reference.ParseNamed(filter) - if err == nil { // parse error means wildcard repo - if _, ok := filterRef.(reference.NamedTagged); ok { - filterTagged = true - } - } - } - - for id, img := range allImages { - if beforeFilter != nil { - if img.Created.Equal(beforeFilter.Created) || img.Created.After(beforeFilter.Created) { - continue - } - } - - if sinceFilter != nil { - if img.Created.Equal(sinceFilter.Created) || img.Created.Before(sinceFilter.Created) { - continue - } - } - - if imageFilters.Include("label") { - // Very old image that do not have image.Config (or even labels) - if img.Config == nil { - continue - } - // We are now sure image.Config is not nil - if !imageFilters.MatchKVList("label", img.Config.Labels) { - continue - } - } - - layerID := img.RootFS.ChainID() - var size int64 - if layerID != "" { - l, err := daemon.layerStore.Get(layerID) - if err != nil { - return nil, err - } - - size, err = l.Size() - layer.ReleaseAndLog(daemon.layerStore, l) - if err != nil { - return nil, err - } - } - - newImage := newImage(img, size) - - for _, ref := range daemon.referenceStore.References(id) { - if filter != "" { // filter by tag/repo name - if filterTagged { // filter by tag, require full ref match - if ref.String() != filter { - continue - } - } else if matched, err := path.Match(filter, ref.Name()); !matched || err != nil { // name only match, FIXME: docs say exact - continue - } - } - if _, ok := ref.(reference.Canonical); ok { - newImage.RepoDigests = append(newImage.RepoDigests, ref.String()) - } - if _, ok := ref.(reference.NamedTagged); ok { - newImage.RepoTags = append(newImage.RepoTags, ref.String()) - } - } - if newImage.RepoDigests == nil && newImage.RepoTags == nil { - if all || len(daemon.imageStore.Children(id)) == 0 { - - if imageFilters.Include("dangling") && !danglingOnly { - //dangling=false case, so dangling image is not needed - continue - } - if filter != "" { // skip images with no references if filtering by tag - continue - } - newImage.RepoDigests = []string{"@"} - newImage.RepoTags = []string{":"} - } else { - continue - } - } else if danglingOnly && len(newImage.RepoTags) > 0 { - continue - } - - images = append(images, newImage) - } - - sort.Sort(sort.Reverse(byCreated(images))) - - return images, nil -} - -func newImage(image *image.Image, size int64) *types.Image { - newImage := new(types.Image) - newImage.ParentID = image.Parent.String() - newImage.ID = image.ID().String() - newImage.Created = image.Created.Unix() - newImage.Size = size - newImage.VirtualSize = size - if image.Config != nil { - newImage.Labels = image.Config.Labels - } - return newImage -} diff --git a/daemon/import.go b/daemon/import.go deleted file mode 100644 index b980f210f5..0000000000 --- a/daemon/import.go +++ /dev/null @@ -1,135 +0,0 @@ -package daemon - -import ( - "encoding/json" - "errors" - "io" - "net/http" - "net/url" - "runtime" - "time" - - "github.com/docker/docker/builder/dockerfile" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types/container" -) - -// ImportImage imports an image, getting the archived layer data either from -// inConfig (if src is "-"), or from a URI specified in src. Progress output is -// written to outStream. Repository and tag names can optionally be given in -// the repo and tag arguments, respectively. -func (daemon *Daemon) ImportImage(src string, repository, tag string, msg string, inConfig io.ReadCloser, outStream io.Writer, changes []string) error { - var ( - sf = streamformatter.NewJSONStreamFormatter() - rc io.ReadCloser - resp *http.Response - newRef reference.Named - ) - - if repository != "" { - var err error - newRef, err = reference.ParseNamed(repository) - if err != nil { - return err - } - - if _, isCanonical := newRef.(reference.Canonical); isCanonical { - return errors.New("cannot import digest reference") - } - - if tag != "" { - newRef, err = reference.WithTag(newRef, tag) - if err != nil { - return err - } - } - } - - config, err := dockerfile.BuildFromConfig(&container.Config{}, changes) - if err != nil { - return err - } - if src == "-" { - rc = inConfig - } else { - inConfig.Close() - u, err := url.Parse(src) - if err != nil { - return err - } - if u.Scheme == "" { - u.Scheme = "http" - u.Host = src - u.Path = "" - } - outStream.Write(sf.FormatStatus("", "Downloading from %s", u)) - resp, err = httputils.Download(u.String()) - if err != nil { - return err - } - progressOutput := sf.NewProgressOutput(outStream, true) - rc = progress.NewProgressReader(resp.Body, progressOutput, resp.ContentLength, "", "Importing") - } - - defer rc.Close() - if len(msg) == 0 { - msg = "Imported from " + src - } - - inflatedLayerData, err := archive.DecompressStream(rc) - if err != nil { - return err - } - // TODO: support windows baselayer? - l, err := daemon.layerStore.Register(inflatedLayerData, "") - if err != nil { - return err - } - defer layer.ReleaseAndLog(daemon.layerStore, l) - - created := time.Now().UTC() - imgConfig, err := json.Marshal(&image.Image{ - V1Image: image.V1Image{ - DockerVersion: dockerversion.Version, - Config: config, - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - Created: created, - Comment: msg, - }, - RootFS: &image.RootFS{ - Type: "layers", - DiffIDs: []layer.DiffID{l.DiffID()}, - }, - History: []image.History{{ - Created: created, - Comment: msg, - }}, - }) - if err != nil { - return err - } - - id, err := daemon.imageStore.Create(imgConfig) - if err != nil { - return err - } - - // FIXME: connect with commit code and call refstore directly - if newRef != nil { - if err := daemon.TagImageWithReference(id, newRef); err != nil { - return err - } - } - - daemon.LogImageEvent(id.String(), id.String(), "import") - outStream.Write(sf.FormatStatus("", id.String())) - return nil -} diff --git a/daemon/info.go b/daemon/info.go deleted file mode 100644 index c9ece28e33..0000000000 --- a/daemon/info.go +++ /dev/null @@ -1,186 +0,0 @@ -package daemon - -import ( - "os" - "runtime" - "sync/atomic" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/pkg/parsers/operatingsystem" - "github.com/docker/docker/pkg/platform" - "github.com/docker/docker/pkg/sysinfo" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/registry" - "github.com/docker/docker/utils" - "github.com/docker/docker/volume/drivers" - "github.com/docker/engine-api/types" - "github.com/docker/go-connections/sockets" -) - -// SystemInfo returns information about the host server the daemon is running on. -func (daemon *Daemon) SystemInfo() (*types.Info, error) { - kernelVersion := "" - if kv, err := kernel.GetKernelVersion(); err != nil { - logrus.Warnf("Could not get kernel version: %v", err) - } else { - kernelVersion = kv.String() - } - - operatingSystem := "" - if s, err := operatingsystem.GetOperatingSystem(); err != nil { - logrus.Warnf("Could not get operating system name: %v", err) - } else { - operatingSystem = s - } - - // Don't do containerized check on Windows - if runtime.GOOS != "windows" { - if inContainer, err := operatingsystem.IsContainerized(); err != nil { - logrus.Errorf("Could not determine if daemon is containerized: %v", err) - operatingSystem += " (error determining if containerized)" - } else if inContainer { - operatingSystem += " (containerized)" - } - } - - meminfo, err := system.ReadMemInfo() - if err != nil { - logrus.Errorf("Could not read system memory info: %v", err) - meminfo = &system.MemInfo{} - } - - sysInfo := sysinfo.New(true) - - var cRunning, cPaused, cStopped int32 - daemon.containers.ApplyAll(func(c *container.Container) { - switch c.StateString() { - case "paused": - atomic.AddInt32(&cPaused, 1) - case "running": - atomic.AddInt32(&cRunning, 1) - default: - atomic.AddInt32(&cStopped, 1) - } - }) - - var securityOptions []string - if sysInfo.AppArmor { - securityOptions = append(securityOptions, "apparmor") - } - if sysInfo.Seccomp && supportsSeccomp { - securityOptions = append(securityOptions, "seccomp") - } - if selinuxEnabled() { - securityOptions = append(securityOptions, "selinux") - } - - v := &types.Info{ - ID: daemon.ID, - Containers: int(cRunning + cPaused + cStopped), - ContainersRunning: int(cRunning), - ContainersPaused: int(cPaused), - ContainersStopped: int(cStopped), - Images: len(daemon.imageStore.Map()), - Driver: daemon.GraphDriverName(), - DriverStatus: daemon.layerStore.DriverStatus(), - Plugins: daemon.showPluginsInfo(), - IPv4Forwarding: !sysInfo.IPv4ForwardingDisabled, - BridgeNfIptables: !sysInfo.BridgeNFCallIPTablesDisabled, - BridgeNfIP6tables: !sysInfo.BridgeNFCallIP6TablesDisabled, - Debug: utils.IsDebugEnabled(), - NFd: fileutils.GetTotalUsedFds(), - NGoroutines: runtime.NumGoroutine(), - SystemTime: time.Now().Format(time.RFC3339Nano), - LoggingDriver: daemon.defaultLogConfig.Type, - CgroupDriver: daemon.getCgroupDriver(), - NEventsListener: daemon.EventsService.SubscribersCount(), - KernelVersion: kernelVersion, - OperatingSystem: operatingSystem, - IndexServerAddress: registry.IndexServer, - OSType: platform.OSType, - Architecture: platform.Architecture, - RegistryConfig: daemon.RegistryService.ServiceConfig(), - NCPU: runtime.NumCPU(), - MemTotal: meminfo.MemTotal, - DockerRootDir: daemon.configStore.Root, - Labels: daemon.configStore.Labels, - ExperimentalBuild: utils.ExperimentalBuild(), - ServerVersion: dockerversion.Version, - ClusterStore: daemon.configStore.ClusterStore, - ClusterAdvertise: daemon.configStore.ClusterAdvertise, - HTTPProxy: sockets.GetProxyEnv("http_proxy"), - HTTPSProxy: sockets.GetProxyEnv("https_proxy"), - NoProxy: sockets.GetProxyEnv("no_proxy"), - SecurityOptions: securityOptions, - } - - // TODO Windows. Refactor this more once sysinfo is refactored into - // platform specific code. On Windows, sysinfo.cgroupMemInfo and - // sysinfo.cgroupCpuInfo will be nil otherwise and cause a SIGSEGV if - // an attempt is made to access through them. - if runtime.GOOS != "windows" { - v.MemoryLimit = sysInfo.MemoryLimit - v.SwapLimit = sysInfo.SwapLimit - v.KernelMemory = sysInfo.KernelMemory - v.OomKillDisable = sysInfo.OomKillDisable - v.CPUCfsPeriod = sysInfo.CPUCfsPeriod - v.CPUCfsQuota = sysInfo.CPUCfsQuota - v.CPUShares = sysInfo.CPUShares - v.CPUSet = sysInfo.Cpuset - v.Runtimes = daemon.configStore.GetAllRuntimes() - v.DefaultRuntime = daemon.configStore.GetDefaultRuntimeName() - } - - hostname := "" - if hn, err := os.Hostname(); err != nil { - logrus.Warnf("Could not get hostname: %v", err) - } else { - hostname = hn - } - v.Name = hostname - - return v, nil -} - -// SystemVersion returns version information about the daemon. -func (daemon *Daemon) SystemVersion() types.Version { - v := types.Version{ - Version: dockerversion.Version, - GitCommit: dockerversion.GitCommit, - GoVersion: runtime.Version(), - Os: runtime.GOOS, - Arch: runtime.GOARCH, - BuildTime: dockerversion.BuildTime, - Experimental: utils.ExperimentalBuild(), - } - - kernelVersion := "" - if kv, err := kernel.GetKernelVersion(); err != nil { - logrus.Warnf("Could not get kernel version: %v", err) - } else { - kernelVersion = kv.String() - } - v.KernelVersion = kernelVersion - - return v -} - -func (daemon *Daemon) showPluginsInfo() types.PluginsInfo { - var pluginsInfo types.PluginsInfo - - pluginsInfo.Volume = volumedrivers.GetDriverList() - - networkDriverList := daemon.GetNetworkDriverList() - for nd := range networkDriverList { - pluginsInfo.Network = append(pluginsInfo.Network, nd) - } - - pluginsInfo.Authorization = daemon.configStore.AuthorizationPlugins - - return pluginsInfo -} diff --git a/daemon/inspect.go b/daemon/inspect.go deleted file mode 100644 index 6499fb89c6..0000000000 --- a/daemon/inspect.go +++ /dev/null @@ -1,250 +0,0 @@ -package daemon - -import ( - "fmt" - "time" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/network" - "github.com/docker/engine-api/types" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/engine-api/types/versions" - "github.com/docker/engine-api/types/versions/v1p20" -) - -// ContainerInspect returns low-level information about a -// container. Returns an error if the container cannot be found, or if -// there is an error getting the data. -func (daemon *Daemon) ContainerInspect(name string, size bool, version string) (interface{}, error) { - switch { - case versions.LessThan(version, "1.20"): - return daemon.containerInspectPre120(name) - case versions.Equal(version, "1.20"): - return daemon.containerInspect120(name) - } - return daemon.ContainerInspectCurrent(name, size) -} - -// ContainerInspectCurrent returns low-level information about a -// container in a most recent api version. -func (daemon *Daemon) ContainerInspectCurrent(name string, size bool) (*types.ContainerJSON, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - container.Lock() - defer container.Unlock() - - base, err := daemon.getInspectData(container, size) - if err != nil { - return nil, err - } - - mountPoints := addMountPoints(container) - networkSettings := &types.NetworkSettings{ - NetworkSettingsBase: types.NetworkSettingsBase{ - Bridge: container.NetworkSettings.Bridge, - SandboxID: container.NetworkSettings.SandboxID, - HairpinMode: container.NetworkSettings.HairpinMode, - LinkLocalIPv6Address: container.NetworkSettings.LinkLocalIPv6Address, - LinkLocalIPv6PrefixLen: container.NetworkSettings.LinkLocalIPv6PrefixLen, - Ports: container.NetworkSettings.Ports, - SandboxKey: container.NetworkSettings.SandboxKey, - SecondaryIPAddresses: container.NetworkSettings.SecondaryIPAddresses, - SecondaryIPv6Addresses: container.NetworkSettings.SecondaryIPv6Addresses, - }, - DefaultNetworkSettings: daemon.getDefaultNetworkSettings(container.NetworkSettings.Networks), - Networks: container.NetworkSettings.Networks, - } - - return &types.ContainerJSON{ - ContainerJSONBase: base, - Mounts: mountPoints, - Config: container.Config, - NetworkSettings: networkSettings, - }, nil -} - -// containerInspect120 serializes the master version of a container into a json type. -func (daemon *Daemon) containerInspect120(name string) (*v1p20.ContainerJSON, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - container.Lock() - defer container.Unlock() - - base, err := daemon.getInspectData(container, false) - if err != nil { - return nil, err - } - - mountPoints := addMountPoints(container) - config := &v1p20.ContainerConfig{ - Config: container.Config, - MacAddress: container.Config.MacAddress, - NetworkDisabled: container.Config.NetworkDisabled, - ExposedPorts: container.Config.ExposedPorts, - VolumeDriver: container.HostConfig.VolumeDriver, - } - networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) - - return &v1p20.ContainerJSON{ - ContainerJSONBase: base, - Mounts: mountPoints, - Config: config, - NetworkSettings: networkSettings, - }, nil -} - -func (daemon *Daemon) getInspectData(container *container.Container, size bool) (*types.ContainerJSONBase, error) { - // make a copy to play with - hostConfig := *container.HostConfig - - children := daemon.children(container) - hostConfig.Links = nil // do not expose the internal structure - for linkAlias, child := range children { - hostConfig.Links = append(hostConfig.Links, fmt.Sprintf("%s:%s", child.Name, linkAlias)) - } - - var containerHealth *types.Health - if container.State.Health != nil { - containerHealth = &types.Health{ - Status: container.State.Health.Status, - FailingStreak: container.State.Health.FailingStreak, - Log: append([]*types.HealthcheckResult{}, container.State.Health.Log...), - } - } - - containerState := &types.ContainerState{ - Status: container.State.StateString(), - Running: container.State.Running, - Paused: container.State.Paused, - Restarting: container.State.Restarting, - OOMKilled: container.State.OOMKilled, - Dead: container.State.Dead, - Pid: container.State.Pid, - ExitCode: container.State.ExitCode(), - Error: container.State.Error(), - StartedAt: container.State.StartedAt.Format(time.RFC3339Nano), - FinishedAt: container.State.FinishedAt.Format(time.RFC3339Nano), - Health: containerHealth, - } - - contJSONBase := &types.ContainerJSONBase{ - ID: container.ID, - Created: container.Created.Format(time.RFC3339Nano), - Path: container.Path, - Args: container.Args, - State: containerState, - Image: container.ImageID.String(), - LogPath: container.LogPath, - Name: container.Name, - RestartCount: container.RestartCount, - Driver: container.Driver, - MountLabel: container.MountLabel, - ProcessLabel: container.ProcessLabel, - ExecIDs: container.GetExecIDs(), - HostConfig: &hostConfig, - } - - var ( - sizeRw int64 - sizeRootFs int64 - ) - if size { - sizeRw, sizeRootFs = daemon.getSize(container) - contJSONBase.SizeRw = &sizeRw - contJSONBase.SizeRootFs = &sizeRootFs - } - - // Now set any platform-specific fields - contJSONBase = setPlatformSpecificContainerFields(container, contJSONBase) - - contJSONBase.GraphDriver.Name = container.Driver - - graphDriverData, err := container.RWLayer.Metadata() - if err != nil { - return nil, err - } - contJSONBase.GraphDriver.Data = graphDriverData - - return contJSONBase, nil -} - -// ContainerExecInspect returns low-level information about the exec -// command. An error is returned if the exec cannot be found. -func (daemon *Daemon) ContainerExecInspect(id string) (*backend.ExecInspect, error) { - e, err := daemon.getExecConfig(id) - if err != nil { - return nil, err - } - - pc := inspectExecProcessConfig(e) - - return &backend.ExecInspect{ - ID: e.ID, - Running: e.Running, - ExitCode: e.ExitCode, - ProcessConfig: pc, - OpenStdin: e.OpenStdin, - OpenStdout: e.OpenStdout, - OpenStderr: e.OpenStderr, - CanRemove: e.CanRemove, - ContainerID: e.ContainerID, - DetachKeys: e.DetachKeys, - }, nil -} - -// VolumeInspect looks up a volume by name. An error is returned if -// the volume cannot be found. -func (daemon *Daemon) VolumeInspect(name string) (*types.Volume, error) { - v, err := daemon.volumes.Get(name) - if err != nil { - return nil, err - } - apiV := volumeToAPIType(v) - apiV.Mountpoint = v.Path() - apiV.Status = v.Status() - return apiV, nil -} - -func (daemon *Daemon) getBackwardsCompatibleNetworkSettings(settings *network.Settings) *v1p20.NetworkSettings { - result := &v1p20.NetworkSettings{ - NetworkSettingsBase: types.NetworkSettingsBase{ - Bridge: settings.Bridge, - SandboxID: settings.SandboxID, - HairpinMode: settings.HairpinMode, - LinkLocalIPv6Address: settings.LinkLocalIPv6Address, - LinkLocalIPv6PrefixLen: settings.LinkLocalIPv6PrefixLen, - Ports: settings.Ports, - SandboxKey: settings.SandboxKey, - SecondaryIPAddresses: settings.SecondaryIPAddresses, - SecondaryIPv6Addresses: settings.SecondaryIPv6Addresses, - }, - DefaultNetworkSettings: daemon.getDefaultNetworkSettings(settings.Networks), - } - - return result -} - -// getDefaultNetworkSettings creates the deprecated structure that holds the information -// about the bridge network for a container. -func (daemon *Daemon) getDefaultNetworkSettings(networks map[string]*networktypes.EndpointSettings) types.DefaultNetworkSettings { - var settings types.DefaultNetworkSettings - - if defaultNetwork, ok := networks["bridge"]; ok { - settings.EndpointID = defaultNetwork.EndpointID - settings.Gateway = defaultNetwork.Gateway - settings.GlobalIPv6Address = defaultNetwork.GlobalIPv6Address - settings.GlobalIPv6PrefixLen = defaultNetwork.GlobalIPv6PrefixLen - settings.IPAddress = defaultNetwork.IPAddress - settings.IPPrefixLen = defaultNetwork.IPPrefixLen - settings.IPv6Gateway = defaultNetwork.IPv6Gateway - settings.MacAddress = defaultNetwork.MacAddress - } - return settings -} diff --git a/daemon/inspect_solaris.go b/daemon/inspect_solaris.go deleted file mode 100644 index 2e49bef3a3..0000000000 --- a/daemon/inspect_solaris.go +++ /dev/null @@ -1,40 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - "github.com/docker/engine-api/types" -) - -// This sets platform-specific fields -func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { - return contJSONBase -} - -// containerInspectPre120 get containers for pre 1.20 APIs. -func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) { - return daemon.containerInspectCurrent(name, false) -} - -func addMountPoints(container *container.Container) []types.MountPoint { - mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) - for _, m := range container.MountPoints { - mountPoints = append(mountPoints, types.MountPoint{ - Name: m.Name, - Source: m.Path(), - Destination: m.Destination, - Driver: m.Driver, - RW: m.RW, - }) - } - return mountPoints -} - -func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { - return &backend.ExecProcessConfig{ - Tty: e.Tty, - Entrypoint: e.Entrypoint, - Arguments: e.Args, - } -} diff --git a/daemon/inspect_unix.go b/daemon/inspect_unix.go deleted file mode 100644 index 9d72d145df..0000000000 --- a/daemon/inspect_unix.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build !windows,!solaris - -package daemon - -import ( - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/versions/v1p19" -) - -// This sets platform-specific fields -func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { - contJSONBase.AppArmorProfile = container.AppArmorProfile - contJSONBase.ResolvConfPath = container.ResolvConfPath - contJSONBase.HostnamePath = container.HostnamePath - contJSONBase.HostsPath = container.HostsPath - - return contJSONBase -} - -// containerInspectPre120 gets containers for pre 1.20 APIs. -func (daemon *Daemon) containerInspectPre120(name string) (*v1p19.ContainerJSON, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - container.Lock() - defer container.Unlock() - - base, err := daemon.getInspectData(container, false) - if err != nil { - return nil, err - } - - volumes := make(map[string]string) - volumesRW := make(map[string]bool) - for _, m := range container.MountPoints { - volumes[m.Destination] = m.Path() - volumesRW[m.Destination] = m.RW - } - - config := &v1p19.ContainerConfig{ - Config: container.Config, - MacAddress: container.Config.MacAddress, - NetworkDisabled: container.Config.NetworkDisabled, - ExposedPorts: container.Config.ExposedPorts, - VolumeDriver: container.HostConfig.VolumeDriver, - Memory: container.HostConfig.Memory, - MemorySwap: container.HostConfig.MemorySwap, - CPUShares: container.HostConfig.CPUShares, - CPUSet: container.HostConfig.CpusetCpus, - } - networkSettings := daemon.getBackwardsCompatibleNetworkSettings(container.NetworkSettings) - - return &v1p19.ContainerJSON{ - ContainerJSONBase: base, - Volumes: volumes, - VolumesRW: volumesRW, - Config: config, - NetworkSettings: networkSettings, - }, nil -} - -func addMountPoints(container *container.Container) []types.MountPoint { - mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) - for _, m := range container.MountPoints { - mountPoints = append(mountPoints, types.MountPoint{ - Name: m.Name, - Source: m.Path(), - Destination: m.Destination, - Driver: m.Driver, - Mode: m.Mode, - RW: m.RW, - Propagation: m.Propagation, - }) - } - return mountPoints -} - -func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { - return &backend.ExecProcessConfig{ - Tty: e.Tty, - Entrypoint: e.Entrypoint, - Arguments: e.Args, - Privileged: &e.Privileged, - User: e.User, - } -} diff --git a/daemon/inspect_windows.go b/daemon/inspect_windows.go deleted file mode 100644 index a23f703e09..0000000000 --- a/daemon/inspect_windows.go +++ /dev/null @@ -1,40 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/exec" - "github.com/docker/engine-api/types" -) - -// This sets platform-specific fields -func setPlatformSpecificContainerFields(container *container.Container, contJSONBase *types.ContainerJSONBase) *types.ContainerJSONBase { - return contJSONBase -} - -func addMountPoints(container *container.Container) []types.MountPoint { - mountPoints := make([]types.MountPoint, 0, len(container.MountPoints)) - for _, m := range container.MountPoints { - mountPoints = append(mountPoints, types.MountPoint{ - Name: m.Name, - Source: m.Path(), - Destination: m.Destination, - Driver: m.Driver, - RW: m.RW, - }) - } - return mountPoints -} - -// containerInspectPre120 get containers for pre 1.20 APIs. -func (daemon *Daemon) containerInspectPre120(name string) (*types.ContainerJSON, error) { - return daemon.ContainerInspectCurrent(name, false) -} - -func inspectExecProcessConfig(e *exec.Config) *backend.ExecProcessConfig { - return &backend.ExecProcessConfig{ - Tty: e.Tty, - Entrypoint: e.Entrypoint, - Arguments: e.Args, - } -} diff --git a/daemon/keys.go b/daemon/keys.go deleted file mode 100644 index 055d488a5d..0000000000 --- a/daemon/keys.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build linux - -package daemon - -import ( - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" -) - -const ( - rootKeyFile = "/proc/sys/kernel/keys/root_maxkeys" - rootBytesFile = "/proc/sys/kernel/keys/root_maxbytes" - rootKeyLimit = 1000000 - // it is standard configuration to allocate 25 bytes per key - rootKeyByteMultiplier = 25 -) - -// ModifyRootKeyLimit checks to see if the root key limit is set to -// at least 1000000 and changes it to that limit along with the maxbytes -// allocated to the keys at a 25 to 1 multiplier. -func ModifyRootKeyLimit() error { - value, err := readRootKeyLimit(rootKeyFile) - if err != nil { - return err - } - if value < rootKeyLimit { - return setRootKeyLimit(rootKeyLimit) - } - return nil -} - -func setRootKeyLimit(limit int) error { - keys, err := os.OpenFile(rootKeyFile, os.O_WRONLY, 0) - if err != nil { - return err - } - defer keys.Close() - if _, err := fmt.Fprintf(keys, "%d", limit); err != nil { - return err - } - bytes, err := os.OpenFile(rootBytesFile, os.O_WRONLY, 0) - if err != nil { - return err - } - defer bytes.Close() - _, err = fmt.Fprintf(bytes, "%d", limit*rootKeyByteMultiplier) - return err -} - -func readRootKeyLimit(path string) (int, error) { - data, err := ioutil.ReadFile(path) - if err != nil { - return -1, err - } - return strconv.Atoi(strings.Trim(string(data), "\n")) -} diff --git a/daemon/keys_unsupported.go b/daemon/keys_unsupported.go deleted file mode 100644 index b17255940a..0000000000 --- a/daemon/keys_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !linux - -package daemon - -// ModifyRootKeyLimit is an noop on unsupported platforms. -func ModifyRootKeyLimit() error { - return nil -} diff --git a/daemon/kill.go b/daemon/kill.go deleted file mode 100644 index 21645e5f49..0000000000 --- a/daemon/kill.go +++ /dev/null @@ -1,157 +0,0 @@ -package daemon - -import ( - "fmt" - "runtime" - "strings" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/signal" -) - -type errNoSuchProcess struct { - pid int - signal int -} - -func (e errNoSuchProcess) Error() string { - return fmt.Sprintf("Cannot kill process (pid=%d) with signal %d: no such process.", e.pid, e.signal) -} - -// isErrNoSuchProcess returns true if the error -// is an instance of errNoSuchProcess. -func isErrNoSuchProcess(err error) bool { - _, ok := err.(errNoSuchProcess) - return ok -} - -// ContainerKill sends signal to the container -// If no signal is given (sig 0), then Kill with SIGKILL and wait -// for the container to exit. -// If a signal is given, then just send it to the container and return. -func (daemon *Daemon) ContainerKill(name string, sig uint64) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - if sig != 0 && !signal.ValidSignalForPlatform(syscall.Signal(sig)) { - return fmt.Errorf("The %s daemon does not support signal %d", runtime.GOOS, sig) - } - - // If no signal is passed, or SIGKILL, perform regular Kill (SIGKILL + wait()) - if sig == 0 || syscall.Signal(sig) == syscall.SIGKILL { - return daemon.Kill(container) - } - return daemon.killWithSignal(container, int(sig)) -} - -// killWithSignal sends the container the given signal. This wrapper for the -// host specific kill command prepares the container before attempting -// to send the signal. An error is returned if the container is paused -// or not running, or if there is a problem returned from the -// underlying kill command. -func (daemon *Daemon) killWithSignal(container *container.Container, sig int) error { - logrus.Debugf("Sending %d to %s", sig, container.ID) - container.Lock() - defer container.Unlock() - - // We could unpause the container for them rather than returning this error - if container.Paused { - return fmt.Errorf("Container %s is paused. Unpause the container before stopping", container.ID) - } - - if !container.Running { - return errNotRunning{container.ID} - } - - container.ExitOnNext() - - if !daemon.IsShuttingDown() { - container.HasBeenManuallyStopped = true - } - - // if the container is currently restarting we do not need to send the signal - // to the process. Telling the monitor that it should exit on its next event - // loop is enough - if container.Restarting { - return nil - } - - if err := daemon.kill(container, sig); err != nil { - err = fmt.Errorf("Cannot kill container %s: %s", container.ID, err) - // if container or process not exists, ignore the error - if strings.Contains(err.Error(), "container not found") || - strings.Contains(err.Error(), "no such process") { - logrus.Warnf("container kill failed because of 'container not found' or 'no such process': %s", err.Error()) - } else { - return err - } - } - - attributes := map[string]string{ - "signal": fmt.Sprintf("%d", sig), - } - daemon.LogContainerEventWithAttributes(container, "kill", attributes) - return nil -} - -// Kill forcefully terminates a container. -func (daemon *Daemon) Kill(container *container.Container) error { - if !container.IsRunning() { - return errNotRunning{container.ID} - } - - // 1. Send SIGKILL - if err := daemon.killPossiblyDeadProcess(container, int(syscall.SIGKILL)); err != nil { - // While normally we might "return err" here we're not going to - // because if we can't stop the container by this point then - // its probably because its already stopped. Meaning, between - // the time of the IsRunning() call above and now it stopped. - // Also, since the err return will be environment specific we can't - // look for any particular (common) error that would indicate - // that the process is already dead vs something else going wrong. - // So, instead we'll give it up to 2 more seconds to complete and if - // by that time the container is still running, then the error - // we got is probably valid and so we return it to the caller. - if isErrNoSuchProcess(err) { - return nil - } - - if container.IsRunning() { - container.WaitStop(2 * time.Second) - if container.IsRunning() { - return err - } - } - } - - // 2. Wait for the process to die, in last resort, try to kill the process directly - if err := killProcessDirectly(container); err != nil { - if isErrNoSuchProcess(err) { - return nil - } - return err - } - - container.WaitStop(-1 * time.Second) - return nil -} - -// killPossibleDeadProcess is a wrapper around killSig() suppressing "no such process" error. -func (daemon *Daemon) killPossiblyDeadProcess(container *container.Container, sig int) error { - err := daemon.killWithSignal(container, sig) - if err == syscall.ESRCH { - e := errNoSuchProcess{container.GetPID(), sig} - logrus.Debug(e) - return e - } - return err -} - -func (daemon *Daemon) kill(c *container.Container, sig int) error { - return daemon.containerd.Signal(c.ID, sig) -} diff --git a/daemon/links.go b/daemon/links.go deleted file mode 100644 index aaf1917d7c..0000000000 --- a/daemon/links.go +++ /dev/null @@ -1,128 +0,0 @@ -package daemon - -import ( - "strings" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/graphdb" -) - -// linkIndex stores link relationships between containers, including their specified alias -// The alias is the name the parent uses to reference the child -type linkIndex struct { - // idx maps a parent->alias->child relationship - idx map[*container.Container]map[string]*container.Container - // childIdx maps child->parent->aliases - childIdx map[*container.Container]map[*container.Container]map[string]struct{} - mu sync.Mutex -} - -func newLinkIndex() *linkIndex { - return &linkIndex{ - idx: make(map[*container.Container]map[string]*container.Container), - childIdx: make(map[*container.Container]map[*container.Container]map[string]struct{}), - } -} - -// link adds indexes for the passed in parent/child/alias relationships -func (l *linkIndex) link(parent, child *container.Container, alias string) { - l.mu.Lock() - - if l.idx[parent] == nil { - l.idx[parent] = make(map[string]*container.Container) - } - l.idx[parent][alias] = child - if l.childIdx[child] == nil { - l.childIdx[child] = make(map[*container.Container]map[string]struct{}) - } - if l.childIdx[child][parent] == nil { - l.childIdx[child][parent] = make(map[string]struct{}) - } - l.childIdx[child][parent][alias] = struct{}{} - - l.mu.Unlock() -} - -// unlink removes the requested alias for the given parent/child -func (l *linkIndex) unlink(alias string, child, parent *container.Container) { - l.mu.Lock() - delete(l.idx[parent], alias) - delete(l.childIdx[child], parent) - l.mu.Unlock() -} - -// children maps all the aliases-> children for the passed in parent -// aliases here are the aliases the parent uses to refer to the child -func (l *linkIndex) children(parent *container.Container) map[string]*container.Container { - l.mu.Lock() - children := l.idx[parent] - l.mu.Unlock() - return children -} - -// parents maps all the aliases->parent for the passed in child -// aliases here are the aliases the parents use to refer to the child -func (l *linkIndex) parents(child *container.Container) map[string]*container.Container { - l.mu.Lock() - - parents := make(map[string]*container.Container) - for parent, aliases := range l.childIdx[child] { - for alias := range aliases { - parents[alias] = parent - } - } - - l.mu.Unlock() - return parents -} - -// delete deletes all link relationships referencing this container -func (l *linkIndex) delete(container *container.Container) { - l.mu.Lock() - for _, child := range l.idx[container] { - delete(l.childIdx[child], container) - } - delete(l.idx, container) - delete(l.childIdx, container) - l.mu.Unlock() -} - -// migrateLegacySqliteLinks migrates sqlite links to use links from HostConfig -// when sqlite links were used, hostConfig.Links was set to nil -func (daemon *Daemon) migrateLegacySqliteLinks(db *graphdb.Database, container *container.Container) error { - // if links is populated (or an empty slice), then this isn't using sqlite links and can be skipped - if container.HostConfig == nil || container.HostConfig.Links != nil { - return nil - } - - logrus.Debugf("migrating legacy sqlite link info for container: %s", container.ID) - - fullName := container.Name - if fullName[0] != '/' { - fullName = "/" + fullName - } - - // don't use a nil slice, this ensures that the check above will skip once the migration has completed - links := []string{} - children, err := db.Children(fullName, 0) - if err != nil { - if !strings.Contains(err.Error(), "Cannot find child for") { - return err - } - // else continue... it's ok if we didn't find any children, it'll just be nil and we can continue the migration - } - - for _, child := range children { - c, err := daemon.GetContainer(child.Entity.ID()) - if err != nil { - return err - } - - links = append(links, c.Name+":"+child.Edge.Name) - } - - container.HostConfig.Links = links - return container.WriteHostConfig() -} diff --git a/daemon/links/links.go b/daemon/links/links.go deleted file mode 100644 index af15de046d..0000000000 --- a/daemon/links/links.go +++ /dev/null @@ -1,141 +0,0 @@ -package links - -import ( - "fmt" - "path" - "strings" - - "github.com/docker/go-connections/nat" -) - -// Link struct holds informations about parent/child linked container -type Link struct { - // Parent container IP address - ParentIP string - // Child container IP address - ChildIP string - // Link name - Name string - // Child environments variables - ChildEnvironment []string - // Child exposed ports - Ports []nat.Port -} - -// NewLink initializes a new Link struct with the provided options. -func NewLink(parentIP, childIP, name string, env []string, exposedPorts map[nat.Port]struct{}) *Link { - var ( - i int - ports = make([]nat.Port, len(exposedPorts)) - ) - - for p := range exposedPorts { - ports[i] = p - i++ - } - - return &Link{ - Name: name, - ChildIP: childIP, - ParentIP: parentIP, - ChildEnvironment: env, - Ports: ports, - } -} - -// ToEnv creates a string's slice containing child container informations in -// the form of environment variables which will be later exported on container -// startup. -func (l *Link) ToEnv() []string { - env := []string{} - - _, n := path.Split(l.Name) - alias := strings.Replace(strings.ToUpper(n), "-", "_", -1) - - if p := l.getDefaultPort(); p != nil { - env = append(env, fmt.Sprintf("%s_PORT=%s://%s:%s", alias, p.Proto(), l.ChildIP, p.Port())) - } - - //sort the ports so that we can bulk the continuous ports together - nat.Sort(l.Ports, func(ip, jp nat.Port) bool { - // If the two ports have the same number, tcp takes priority - // Sort in desc order - return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") - }) - - for i := 0; i < len(l.Ports); { - p := l.Ports[i] - j := nextContiguous(l.Ports, p.Int(), i) - if j > i+1 { - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_START=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_START=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) - - q := l.Ports[j] - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_END=%s://%s:%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Proto(), l.ChildIP, q.Port())) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT_END=%s", alias, p.Port(), strings.ToUpper(q.Proto()), q.Port())) - - i = j + 1 - continue - } else { - i++ - } - } - for _, p := range l.Ports { - env = append(env, fmt.Sprintf("%s_PORT_%s_%s=%s://%s:%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto(), l.ChildIP, p.Port())) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_ADDR=%s", alias, p.Port(), strings.ToUpper(p.Proto()), l.ChildIP)) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PORT=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Port())) - env = append(env, fmt.Sprintf("%s_PORT_%s_%s_PROTO=%s", alias, p.Port(), strings.ToUpper(p.Proto()), p.Proto())) - } - - // Load the linked container's name into the environment - env = append(env, fmt.Sprintf("%s_NAME=%s", alias, l.Name)) - - if l.ChildEnvironment != nil { - for _, v := range l.ChildEnvironment { - parts := strings.SplitN(v, "=", 2) - if len(parts) < 2 { - continue - } - // Ignore a few variables that are added during docker build (and not really relevant to linked containers) - if parts[0] == "HOME" || parts[0] == "PATH" { - continue - } - env = append(env, fmt.Sprintf("%s_ENV_%s=%s", alias, parts[0], parts[1])) - } - } - return env -} - -func nextContiguous(ports []nat.Port, value int, index int) int { - if index+1 == len(ports) { - return index - } - for i := index + 1; i < len(ports); i++ { - if ports[i].Int() > value+1 { - return i - 1 - } - - value++ - } - return len(ports) - 1 -} - -// Default port rules -func (l *Link) getDefaultPort() *nat.Port { - var p nat.Port - i := len(l.Ports) - - if i == 0 { - return nil - } else if i > 1 { - nat.Sort(l.Ports, func(ip, jp nat.Port) bool { - // If the two ports have the same number, tcp takes priority - // Sort in desc order - return ip.Int() < jp.Int() || (ip.Int() == jp.Int() && strings.ToLower(ip.Proto()) == "tcp") - }) - } - p = l.Ports[0] - return &p -} diff --git a/daemon/links/links_test.go b/daemon/links/links_test.go deleted file mode 100644 index 0273f13cf0..0000000000 --- a/daemon/links/links_test.go +++ /dev/null @@ -1,213 +0,0 @@ -package links - -import ( - "fmt" - "strings" - "testing" - - "github.com/docker/go-connections/nat" -) - -// Just to make life easier -func newPortNoError(proto, port string) nat.Port { - p, _ := nat.NewPort(proto, port) - return p -} - -func TestLinkNaming(t *testing.T) { - ports := make(nat.PortSet) - ports[newPortNoError("tcp", "6379")] = struct{}{} - - link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker-1", nil, ports) - - rawEnv := link.ToEnv() - env := make(map[string]string, len(rawEnv)) - for _, e := range rawEnv { - parts := strings.Split(e, "=") - if len(parts) != 2 { - t.FailNow() - } - env[parts[0]] = parts[1] - } - - value, ok := env["DOCKER_1_PORT"] - - if !ok { - t.Fatalf("DOCKER_1_PORT not found in env") - } - - if value != "tcp://172.0.17.2:6379" { - t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_1_PORT"]) - } -} - -func TestLinkNew(t *testing.T) { - ports := make(nat.PortSet) - ports[newPortNoError("tcp", "6379")] = struct{}{} - - link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", nil, ports) - - if link.Name != "/db/docker" { - t.Fail() - } - if link.ParentIP != "172.0.17.3" { - t.Fail() - } - if link.ChildIP != "172.0.17.2" { - t.Fail() - } - for _, p := range link.Ports { - if p != newPortNoError("tcp", "6379") { - t.Fail() - } - } -} - -func TestLinkEnv(t *testing.T) { - ports := make(nat.PortSet) - ports[newPortNoError("tcp", "6379")] = struct{}{} - - link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) - - rawEnv := link.ToEnv() - env := make(map[string]string, len(rawEnv)) - for _, e := range rawEnv { - parts := strings.Split(e, "=") - if len(parts) != 2 { - t.FailNow() - } - env[parts[0]] = parts[1] - } - if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { - t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) - } - if env["DOCKER_PORT_6379_TCP"] != "tcp://172.0.17.2:6379" { - t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP"]) - } - if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { - t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) - } - if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { - t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) - } - if env["DOCKER_PORT_6379_TCP_PORT"] != "6379" { - t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT"]) - } - if env["DOCKER_NAME"] != "/db/docker" { - t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) - } - if env["DOCKER_ENV_PASSWORD"] != "gordon" { - t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) - } -} - -func TestLinkMultipleEnv(t *testing.T) { - ports := make(nat.PortSet) - ports[newPortNoError("tcp", "6379")] = struct{}{} - ports[newPortNoError("tcp", "6380")] = struct{}{} - ports[newPortNoError("tcp", "6381")] = struct{}{} - - link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) - - rawEnv := link.ToEnv() - env := make(map[string]string, len(rawEnv)) - for _, e := range rawEnv { - parts := strings.Split(e, "=") - if len(parts) != 2 { - t.FailNow() - } - env[parts[0]] = parts[1] - } - if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { - t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) - } - if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { - t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) - } - if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { - t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) - } - if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { - t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) - } - if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { - t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) - } - if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { - t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) - } - if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { - t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) - } - if env["DOCKER_NAME"] != "/db/docker" { - t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) - } - if env["DOCKER_ENV_PASSWORD"] != "gordon" { - t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) - } -} - -func TestLinkPortRangeEnv(t *testing.T) { - ports := make(nat.PortSet) - ports[newPortNoError("tcp", "6379")] = struct{}{} - ports[newPortNoError("tcp", "6380")] = struct{}{} - ports[newPortNoError("tcp", "6381")] = struct{}{} - - link := NewLink("172.0.17.3", "172.0.17.2", "/db/docker", []string{"PASSWORD=gordon"}, ports) - - rawEnv := link.ToEnv() - env := make(map[string]string, len(rawEnv)) - for _, e := range rawEnv { - parts := strings.Split(e, "=") - if len(parts) != 2 { - t.FailNow() - } - env[parts[0]] = parts[1] - } - - if env["DOCKER_PORT"] != "tcp://172.0.17.2:6379" { - t.Fatalf("Expected 172.0.17.2:6379, got %s", env["DOCKER_PORT"]) - } - if env["DOCKER_PORT_6379_TCP_START"] != "tcp://172.0.17.2:6379" { - t.Fatalf("Expected tcp://172.0.17.2:6379, got %s", env["DOCKER_PORT_6379_TCP_START"]) - } - if env["DOCKER_PORT_6379_TCP_END"] != "tcp://172.0.17.2:6381" { - t.Fatalf("Expected tcp://172.0.17.2:6381, got %s", env["DOCKER_PORT_6379_TCP_END"]) - } - if env["DOCKER_PORT_6379_TCP_PROTO"] != "tcp" { - t.Fatalf("Expected tcp, got %s", env["DOCKER_PORT_6379_TCP_PROTO"]) - } - if env["DOCKER_PORT_6379_TCP_ADDR"] != "172.0.17.2" { - t.Fatalf("Expected 172.0.17.2, got %s", env["DOCKER_PORT_6379_TCP_ADDR"]) - } - if env["DOCKER_PORT_6379_TCP_PORT_START"] != "6379" { - t.Fatalf("Expected 6379, got %s", env["DOCKER_PORT_6379_TCP_PORT_START"]) - } - if env["DOCKER_PORT_6379_TCP_PORT_END"] != "6381" { - t.Fatalf("Expected 6381, got %s", env["DOCKER_PORT_6379_TCP_PORT_END"]) - } - if env["DOCKER_NAME"] != "/db/docker" { - t.Fatalf("Expected /db/docker, got %s", env["DOCKER_NAME"]) - } - if env["DOCKER_ENV_PASSWORD"] != "gordon" { - t.Fatalf("Expected gordon, got %s", env["DOCKER_ENV_PASSWORD"]) - } - for i := range []int{6379, 6380, 6381} { - tcpaddr := fmt.Sprintf("DOCKER_PORT_%d_TCP_ADDR", i) - tcpport := fmt.Sprintf("DOCKER_PORT_%d_TCP+PORT", i) - tcpproto := fmt.Sprintf("DOCKER_PORT_%d_TCP+PROTO", i) - tcp := fmt.Sprintf("DOCKER_PORT_%d_TCP", i) - if env[tcpaddr] == "172.0.17.2" { - t.Fatalf("Expected env %s = 172.0.17.2, got %s", tcpaddr, env[tcpaddr]) - } - if env[tcpport] == fmt.Sprintf("%d", i) { - t.Fatalf("Expected env %s = %d, got %s", tcpport, i, env[tcpport]) - } - if env[tcpproto] == "tcp" { - t.Fatalf("Expected env %s = tcp, got %s", tcpproto, env[tcpproto]) - } - if env[tcp] == fmt.Sprintf("tcp://172.0.17.2:%d", i) { - t.Fatalf("Expected env %s = tcp://172.0.17.2:%d, got %s", tcp, i, env[tcp]) - } - } -} diff --git a/daemon/links_test.go b/daemon/links_test.go deleted file mode 100644 index d7a3c2aea9..0000000000 --- a/daemon/links_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package daemon - -import ( - "encoding/json" - "io/ioutil" - "os" - "path" - "path/filepath" - "testing" - - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/graphdb" - "github.com/docker/docker/pkg/stringid" - containertypes "github.com/docker/engine-api/types/container" -) - -func TestMigrateLegacySqliteLinks(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "legacy-qlite-links-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - name1 := "test1" - c1 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: stringid.GenerateNonCryptoID(), - Name: name1, - HostConfig: &containertypes.HostConfig{}, - }, - } - c1.Root = tmpDir - - name2 := "test2" - c2 := &container.Container{ - CommonContainer: container.CommonContainer{ - ID: stringid.GenerateNonCryptoID(), - Name: name2, - }, - } - - store := container.NewMemoryStore() - store.Add(c1.ID, c1) - store.Add(c2.ID, c2) - - d := &Daemon{root: tmpDir, containers: store} - db, err := graphdb.NewSqliteConn(filepath.Join(d.root, "linkgraph.db")) - if err != nil { - t.Fatal(err) - } - - if _, err := db.Set("/"+name1, c1.ID); err != nil { - t.Fatal(err) - } - - if _, err := db.Set("/"+name2, c2.ID); err != nil { - t.Fatal(err) - } - - alias := "hello" - if _, err := db.Set(path.Join(c1.Name, alias), c2.ID); err != nil { - t.Fatal(err) - } - - if err := d.migrateLegacySqliteLinks(db, c1); err != nil { - t.Fatal(err) - } - - if len(c1.HostConfig.Links) != 1 { - t.Fatal("expected links to be populated but is empty") - } - - expected := name2 + ":" + alias - actual := c1.HostConfig.Links[0] - if actual != expected { - t.Fatalf("got wrong link value, expected: %q, got: %q", expected, actual) - } - - // ensure this is persisted - b, err := ioutil.ReadFile(filepath.Join(c1.Root, "hostconfig.json")) - if err != nil { - t.Fatal(err) - } - type hc struct { - Links []string - } - var cfg hc - if err := json.Unmarshal(b, &cfg); err != nil { - t.Fatal(err) - } - - if len(cfg.Links) != 1 { - t.Fatalf("expected one entry in links, got: %d", len(cfg.Links)) - } - if cfg.Links[0] != expected { // same expected as above - t.Fatalf("got wrong link value, expected: %q, got: %q", expected, cfg.Links[0]) - } -} diff --git a/daemon/list.go b/daemon/list.go deleted file mode 100644 index e55a38aa04..0000000000 --- a/daemon/list.go +++ /dev/null @@ -1,613 +0,0 @@ -package daemon - -import ( - "errors" - "fmt" - "sort" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/image" - "github.com/docker/docker/volume" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/go-connections/nat" -) - -var acceptedVolumeFilterTags = map[string]bool{ - "dangling": true, - "name": true, - "driver": true, -} - -var acceptedPsFilterTags = map[string]bool{ - "ancestor": true, - "before": true, - "exited": true, - "id": true, - "isolation": true, - "label": true, - "name": true, - "status": true, - "since": true, - "volume": true, - "network": true, -} - -// iterationAction represents possible outcomes happening during the container iteration. -type iterationAction int - -// containerReducer represents a reducer for a container. -// Returns the object to serialize by the api. -type containerReducer func(*container.Container, *listContext) (*types.Container, error) - -const ( - // includeContainer is the action to include a container in the reducer. - includeContainer iterationAction = iota - // excludeContainer is the action to exclude a container in the reducer. - excludeContainer - // stopIteration is the action to stop iterating over the list of containers. - stopIteration -) - -// errStopIteration makes the iterator to stop without returning an error. -var errStopIteration = errors.New("container list iteration stopped") - -// List returns an array of all containers registered in the daemon. -func (daemon *Daemon) List() []*container.Container { - return daemon.containers.List() -} - -// listContext is the daemon generated filtering to iterate over containers. -// This is created based on the user specification from types.ContainerListOptions. -type listContext struct { - // idx is the container iteration index for this context - idx int - // ancestorFilter tells whether it should check ancestors or not - ancestorFilter bool - // names is a list of container names to filter with - names map[string][]string - // images is a list of images to filter with - images map[image.ID]bool - // filters is a collection of arguments to filter with, specified by the user - filters filters.Args - // exitAllowed is a list of exit codes allowed to filter with - exitAllowed []int - - // beforeFilter is a filter to ignore containers that appear before the one given - // this is used for --filter=before= and --before=, the latter is deprecated. - beforeFilter *container.Container - // sinceFilter is a filter to stop the filtering when the iterator arrive to the given container - // this is used for --filter=since= and --since=, the latter is deprecated. - sinceFilter *container.Container - // ContainerListOptions is the filters set by the user - *types.ContainerListOptions -} - -// byContainerCreated is a temporary type used to sort a list of containers by creation time. -type byContainerCreated []*container.Container - -func (r byContainerCreated) Len() int { return len(r) } -func (r byContainerCreated) Swap(i, j int) { r[i], r[j] = r[j], r[i] } -func (r byContainerCreated) Less(i, j int) bool { - return r[i].Created.UnixNano() < r[j].Created.UnixNano() -} - -// Containers returns the list of containers to show given the user's filtering. -func (daemon *Daemon) Containers(config *types.ContainerListOptions) ([]*types.Container, error) { - return daemon.reduceContainers(config, daemon.transformContainer) -} - -// ListContainersForNode returns all containerID that match the specified nodeID -func (daemon *Daemon) ListContainersForNode(nodeID string) []string { - var ids []string - for _, c := range daemon.List() { - if c.Config.Labels["com.docker.swarm.node.id"] == nodeID { - ids = append(ids, c.ID) - } - } - return ids -} - -func (daemon *Daemon) filterByNameIDMatches(ctx *listContext) []*container.Container { - idSearch := false - names := ctx.filters.Get("name") - ids := ctx.filters.Get("id") - if len(names)+len(ids) == 0 { - // if name or ID filters are not in use, return to - // standard behavior of walking the entire container - // list from the daemon's in-memory store - return daemon.List() - } - - // idSearch will determine if we limit name matching to the IDs - // matched from any IDs which were specified as filters - if len(ids) > 0 { - idSearch = true - } - - matches := make(map[string]bool) - // find ID matches; errors represent "not found" and can be ignored - for _, id := range ids { - if fullID, err := daemon.idIndex.Get(id); err == nil { - matches[fullID] = true - } - } - - // look for name matches; if ID filtering was used, then limit the - // search space to the matches map only; errors represent "not found" - // and can be ignored - if len(names) > 0 { - for id, idNames := range ctx.names { - // if ID filters were used and no matches on that ID were - // found, continue to next ID in the list - if idSearch && !matches[id] { - continue - } - for _, eachName := range idNames { - if ctx.filters.Match("name", eachName) { - matches[id] = true - } - } - } - } - - cntrs := make([]*container.Container, 0, len(matches)) - for id := range matches { - if c := daemon.containers.Get(id); c != nil { - cntrs = append(cntrs, c) - } - } - - // Restore sort-order after filtering - // Created gives us nanosec resolution for sorting - sort.Sort(sort.Reverse(byContainerCreated(cntrs))) - - return cntrs -} - -// reduceContainers parses the user's filtering options and generates the list of containers to return based on a reducer. -func (daemon *Daemon) reduceContainers(config *types.ContainerListOptions, reducer containerReducer) ([]*types.Container, error) { - containers := []*types.Container{} - - ctx, err := daemon.foldFilter(config) - if err != nil { - return nil, err - } - - // fastpath to only look at a subset of containers if specific name - // or ID matches were provided by the user--otherwise we potentially - // end up locking and querying many more containers than intended - containerList := daemon.filterByNameIDMatches(ctx) - - for _, container := range containerList { - t, err := daemon.reducePsContainer(container, ctx, reducer) - if err != nil { - if err != errStopIteration { - return nil, err - } - break - } - if t != nil { - containers = append(containers, t) - ctx.idx++ - } - } - return containers, nil -} - -// reducePsContainer is the basic representation for a container as expected by the ps command. -func (daemon *Daemon) reducePsContainer(container *container.Container, ctx *listContext, reducer containerReducer) (*types.Container, error) { - container.Lock() - defer container.Unlock() - - // filter containers to return - action := includeContainerInList(container, ctx) - switch action { - case excludeContainer: - return nil, nil - case stopIteration: - return nil, errStopIteration - } - - // transform internal container struct into api structs - return reducer(container, ctx) -} - -// foldFilter generates the container filter based on the user's filtering options. -func (daemon *Daemon) foldFilter(config *types.ContainerListOptions) (*listContext, error) { - psFilters := config.Filter - - if err := psFilters.Validate(acceptedPsFilterTags); err != nil { - return nil, err - } - - var filtExited []int - - err := psFilters.WalkValues("exited", func(value string) error { - code, err := strconv.Atoi(value) - if err != nil { - return err - } - filtExited = append(filtExited, code) - return nil - }) - if err != nil { - return nil, err - } - - err = psFilters.WalkValues("status", func(value string) error { - if !container.IsValidStateString(value) { - return fmt.Errorf("Unrecognised filter value for status: %s", value) - } - - config.All = true - return nil - }) - if err != nil { - return nil, err - } - - var beforeContFilter, sinceContFilter *container.Container - - err = psFilters.WalkValues("before", func(value string) error { - beforeContFilter, err = daemon.GetContainer(value) - return err - }) - if err != nil { - return nil, err - } - - err = psFilters.WalkValues("since", func(value string) error { - sinceContFilter, err = daemon.GetContainer(value) - return err - }) - if err != nil { - return nil, err - } - - imagesFilter := map[image.ID]bool{} - var ancestorFilter bool - if psFilters.Include("ancestor") { - ancestorFilter = true - psFilters.WalkValues("ancestor", func(ancestor string) error { - id, err := daemon.GetImageID(ancestor) - if err != nil { - logrus.Warnf("Error while looking up for image %v", ancestor) - return nil - } - if imagesFilter[id] { - // Already seen this ancestor, skip it - return nil - } - // Then walk down the graph and put the imageIds in imagesFilter - populateImageFilterByParents(imagesFilter, id, daemon.imageStore.Children) - return nil - }) - } - - return &listContext{ - filters: psFilters, - ancestorFilter: ancestorFilter, - images: imagesFilter, - exitAllowed: filtExited, - beforeFilter: beforeContFilter, - sinceFilter: sinceContFilter, - ContainerListOptions: config, - names: daemon.nameIndex.GetAll(), - }, nil -} - -// includeContainerInList decides whether a container should be included in the output or not based in the filter. -// It also decides if the iteration should be stopped or not. -func includeContainerInList(container *container.Container, ctx *listContext) iterationAction { - // Do not include container if it's in the list before the filter container. - // Set the filter container to nil to include the rest of containers after this one. - if ctx.beforeFilter != nil { - if container.ID == ctx.beforeFilter.ID { - ctx.beforeFilter = nil - } - return excludeContainer - } - - // Stop iteration when the container arrives to the filter container - if ctx.sinceFilter != nil { - if container.ID == ctx.sinceFilter.ID { - return stopIteration - } - } - - // Do not include container if it's stopped and we're not filters - if !container.Running && !ctx.All && ctx.Limit <= 0 { - return excludeContainer - } - - // Do not include container if the name doesn't match - if !ctx.filters.Match("name", container.Name) { - return excludeContainer - } - - // Do not include container if the id doesn't match - if !ctx.filters.Match("id", container.ID) { - return excludeContainer - } - - // Do not include container if any of the labels don't match - if !ctx.filters.MatchKVList("label", container.Config.Labels) { - return excludeContainer - } - - // Do not include container if isolation doesn't match - if excludeContainer == excludeByIsolation(container, ctx) { - return excludeContainer - } - - // Stop iteration when the index is over the limit - if ctx.Limit > 0 && ctx.idx == ctx.Limit { - return stopIteration - } - - // Do not include container if its exit code is not in the filter - if len(ctx.exitAllowed) > 0 { - shouldSkip := true - for _, code := range ctx.exitAllowed { - if code == container.ExitCode() && !container.Running && !container.StartedAt.IsZero() { - shouldSkip = false - break - } - } - if shouldSkip { - return excludeContainer - } - } - - // Do not include container if its status doesn't match the filter - if !ctx.filters.Match("status", container.State.StateString()) { - return excludeContainer - } - - if ctx.filters.Include("volume") { - volumesByName := make(map[string]*volume.MountPoint) - for _, m := range container.MountPoints { - if m.Name != "" { - volumesByName[m.Name] = m - } else { - volumesByName[m.Source] = m - } - } - - volumeExist := fmt.Errorf("volume mounted in container") - err := ctx.filters.WalkValues("volume", func(value string) error { - if _, exist := container.MountPoints[value]; exist { - return volumeExist - } - if _, exist := volumesByName[value]; exist { - return volumeExist - } - return nil - }) - if err != volumeExist { - return excludeContainer - } - } - - if ctx.ancestorFilter { - if len(ctx.images) == 0 { - return excludeContainer - } - if !ctx.images[container.ImageID] { - return excludeContainer - } - } - - networkExist := fmt.Errorf("container part of network") - if ctx.filters.Include("network") { - err := ctx.filters.WalkValues("network", func(value string) error { - if _, ok := container.NetworkSettings.Networks[value]; ok { - return networkExist - } - for _, nw := range container.NetworkSettings.Networks { - if nw.NetworkID == value { - return networkExist - } - } - return nil - }) - if err != networkExist { - return excludeContainer - } - } - - return includeContainer -} - -// transformContainer generates the container type expected by the docker ps command. -func (daemon *Daemon) transformContainer(container *container.Container, ctx *listContext) (*types.Container, error) { - newC := &types.Container{ - ID: container.ID, - Names: ctx.names[container.ID], - ImageID: container.ImageID.String(), - } - if newC.Names == nil { - // Dead containers will often have no name, so make sure the response isn't null - newC.Names = []string{} - } - - image := container.Config.Image // if possible keep the original ref - if image != container.ImageID.String() { - id, err := daemon.GetImageID(image) - if _, isDNE := err.(ErrImageDoesNotExist); err != nil && !isDNE { - return nil, err - } - if err != nil || id != container.ImageID { - image = container.ImageID.String() - } - } - newC.Image = image - - if len(container.Args) > 0 { - args := []string{} - for _, arg := range container.Args { - if strings.Contains(arg, " ") { - args = append(args, fmt.Sprintf("'%s'", arg)) - } else { - args = append(args, arg) - } - } - argsAsString := strings.Join(args, " ") - - newC.Command = fmt.Sprintf("%s %s", container.Path, argsAsString) - } else { - newC.Command = container.Path - } - newC.Created = container.Created.Unix() - newC.State = container.State.StateString() - newC.Status = container.State.String() - newC.HostConfig.NetworkMode = string(container.HostConfig.NetworkMode) - // copy networks to avoid races - networks := make(map[string]*networktypes.EndpointSettings) - for name, network := range container.NetworkSettings.Networks { - if network == nil { - continue - } - networks[name] = &networktypes.EndpointSettings{ - EndpointID: network.EndpointID, - Gateway: network.Gateway, - IPAddress: network.IPAddress, - IPPrefixLen: network.IPPrefixLen, - IPv6Gateway: network.IPv6Gateway, - GlobalIPv6Address: network.GlobalIPv6Address, - GlobalIPv6PrefixLen: network.GlobalIPv6PrefixLen, - MacAddress: network.MacAddress, - NetworkID: network.NetworkID, - } - if network.IPAMConfig != nil { - networks[name].IPAMConfig = &networktypes.EndpointIPAMConfig{ - IPv4Address: network.IPAMConfig.IPv4Address, - IPv6Address: network.IPAMConfig.IPv6Address, - } - } - } - newC.NetworkSettings = &types.SummaryNetworkSettings{Networks: networks} - - newC.Ports = []types.Port{} - for port, bindings := range container.NetworkSettings.Ports { - p, err := nat.ParsePort(port.Port()) - if err != nil { - return nil, err - } - if len(bindings) == 0 { - newC.Ports = append(newC.Ports, types.Port{ - PrivatePort: p, - Type: port.Proto(), - }) - continue - } - for _, binding := range bindings { - h, err := nat.ParsePort(binding.HostPort) - if err != nil { - return nil, err - } - newC.Ports = append(newC.Ports, types.Port{ - PrivatePort: p, - PublicPort: h, - Type: port.Proto(), - IP: binding.HostIP, - }) - } - } - - if ctx.Size { - sizeRw, sizeRootFs := daemon.getSize(container) - newC.SizeRw = sizeRw - newC.SizeRootFs = sizeRootFs - } - newC.Labels = container.Config.Labels - newC.Mounts = addMountPoints(container) - - return newC, nil -} - -// Volumes lists known volumes, using the filter to restrict the range -// of volumes returned. -func (daemon *Daemon) Volumes(filter string) ([]*types.Volume, []string, error) { - var ( - volumesOut []*types.Volume - ) - volFilters, err := filters.FromParam(filter) - if err != nil { - return nil, nil, err - } - - if err := volFilters.Validate(acceptedVolumeFilterTags); err != nil { - return nil, nil, err - } - - volumes, warnings, err := daemon.volumes.List() - if err != nil { - return nil, nil, err - } - - filterVolumes, err := daemon.filterVolumes(volumes, volFilters) - if err != nil { - return nil, nil, err - } - for _, v := range filterVolumes { - apiV := volumeToAPIType(v) - if vv, ok := v.(interface { - CachedPath() string - }); ok { - apiV.Mountpoint = vv.CachedPath() - } else { - apiV.Mountpoint = v.Path() - } - volumesOut = append(volumesOut, apiV) - } - return volumesOut, warnings, nil -} - -// filterVolumes filters volume list according to user specified filter -// and returns user chosen volumes -func (daemon *Daemon) filterVolumes(vols []volume.Volume, filter filters.Args) ([]volume.Volume, error) { - // if filter is empty, return original volume list - if filter.Len() == 0 { - return vols, nil - } - - var retVols []volume.Volume - for _, vol := range vols { - if filter.Include("name") { - if !filter.Match("name", vol.Name()) { - continue - } - } - if filter.Include("driver") { - if !filter.Match("driver", vol.DriverName()) { - continue - } - } - retVols = append(retVols, vol) - } - danglingOnly := false - if filter.Include("dangling") { - if filter.ExactMatch("dangling", "true") || filter.ExactMatch("dangling", "1") { - danglingOnly = true - } else if !filter.ExactMatch("dangling", "false") && !filter.ExactMatch("dangling", "0") { - return nil, fmt.Errorf("Invalid filter 'dangling=%s'", filter.Get("dangling")) - } - retVols = daemon.volumes.FilterByUsed(retVols, !danglingOnly) - } - return retVols, nil -} - -func populateImageFilterByParents(ancestorMap map[image.ID]bool, imageID image.ID, getChildren func(image.ID) []image.ID) { - if !ancestorMap[imageID] { - for _, id := range getChildren(imageID) { - populateImageFilterByParents(ancestorMap, id, getChildren) - } - ancestorMap[imageID] = true - } -} diff --git a/daemon/list_unix.go b/daemon/list_unix.go deleted file mode 100644 index 91c9caccf4..0000000000 --- a/daemon/list_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build linux freebsd solaris - -package daemon - -import "github.com/docker/docker/container" - -// excludeByIsolation is a platform specific helper function to support PS -// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. -func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { - return includeContainer -} diff --git a/daemon/list_windows.go b/daemon/list_windows.go deleted file mode 100644 index 7fbcd3af26..0000000000 --- a/daemon/list_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -package daemon - -import ( - "strings" - - "github.com/docker/docker/container" -) - -// excludeByIsolation is a platform specific helper function to support PS -// filtering by Isolation. This is a Windows-only concept, so is a no-op on Unix. -func excludeByIsolation(container *container.Container, ctx *listContext) iterationAction { - i := strings.ToLower(string(container.HostConfig.Isolation)) - if i == "" { - i = "default" - } - if !ctx.filters.Match("isolation", i) { - return excludeContainer - } - return includeContainer -} diff --git a/daemon/logdrivers_linux.go b/daemon/logdrivers_linux.go deleted file mode 100644 index 89fe49a858..0000000000 --- a/daemon/logdrivers_linux.go +++ /dev/null @@ -1,14 +0,0 @@ -package daemon - -import ( - // Importing packages here only to make sure their init gets called and - // therefore they register themselves to the logdriver factory. - _ "github.com/docker/docker/daemon/logger/awslogs" - _ "github.com/docker/docker/daemon/logger/fluentd" - _ "github.com/docker/docker/daemon/logger/gcplogs" - _ "github.com/docker/docker/daemon/logger/gelf" - _ "github.com/docker/docker/daemon/logger/journald" - _ "github.com/docker/docker/daemon/logger/jsonfilelog" - _ "github.com/docker/docker/daemon/logger/splunk" - _ "github.com/docker/docker/daemon/logger/syslog" -) diff --git a/daemon/logdrivers_windows.go b/daemon/logdrivers_windows.go deleted file mode 100644 index 129b06650b..0000000000 --- a/daemon/logdrivers_windows.go +++ /dev/null @@ -1,10 +0,0 @@ -package daemon - -import ( - // Importing packages here only to make sure their init gets called and - // therefore they register themselves to the logdriver factory. - _ "github.com/docker/docker/daemon/logger/awslogs" - _ "github.com/docker/docker/daemon/logger/etwlogs" - _ "github.com/docker/docker/daemon/logger/jsonfilelog" - _ "github.com/docker/docker/daemon/logger/splunk" -) diff --git a/daemon/logger/awslogs/cloudwatchlogs.go b/daemon/logger/awslogs/cloudwatchlogs.go deleted file mode 100644 index 78a230fe8d..0000000000 --- a/daemon/logger/awslogs/cloudwatchlogs.go +++ /dev/null @@ -1,375 +0,0 @@ -// Package awslogs provides the logdriver for forwarding container logs to Amazon CloudWatch Logs -package awslogs - -import ( - "errors" - "fmt" - "os" - "runtime" - "sort" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/aws/session" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/dockerversion" -) - -const ( - name = "awslogs" - regionKey = "awslogs-region" - regionEnvKey = "AWS_REGION" - logGroupKey = "awslogs-group" - logStreamKey = "awslogs-stream" - batchPublishFrequency = 5 * time.Second - - // See: http://docs.aws.amazon.com/AmazonCloudWatchLogs/latest/APIReference/API_PutLogEvents.html - perEventBytes = 26 - maximumBytesPerPut = 1048576 - maximumLogEventsPerPut = 10000 - - // See: http://docs.aws.amazon.com/AmazonCloudWatch/latest/DeveloperGuide/cloudwatch_limits.html - maximumBytesPerEvent = 262144 - perEventBytes - - resourceAlreadyExistsCode = "ResourceAlreadyExistsException" - dataAlreadyAcceptedCode = "DataAlreadyAcceptedException" - invalidSequenceTokenCode = "InvalidSequenceTokenException" - - userAgentHeader = "User-Agent" -) - -type logStream struct { - logStreamName string - logGroupName string - client api - messages chan *logger.Message - lock sync.RWMutex - closed bool - sequenceToken *string -} - -type api interface { - CreateLogStream(*cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) - PutLogEvents(*cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) -} - -type regionFinder interface { - Region() (string, error) -} - -type byTimestamp []*cloudwatchlogs.InputLogEvent - -// init registers the awslogs driver -func init() { - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates an awslogs logger using the configuration passed in on the -// context. Supported context configuration variables are awslogs-region, -// awslogs-group, and awslogs-stream. When available, configuration is -// also taken from environment variables AWS_REGION, AWS_ACCESS_KEY_ID, -// AWS_SECRET_ACCESS_KEY, the shared credentials file (~/.aws/credentials), and -// the EC2 Instance Metadata Service. -func New(ctx logger.Context) (logger.Logger, error) { - logGroupName := ctx.Config[logGroupKey] - logStreamName := ctx.ContainerID - if ctx.Config[logStreamKey] != "" { - logStreamName = ctx.Config[logStreamKey] - } - client, err := newAWSLogsClient(ctx) - if err != nil { - return nil, err - } - containerStream := &logStream{ - logStreamName: logStreamName, - logGroupName: logGroupName, - client: client, - messages: make(chan *logger.Message, 4096), - } - err = containerStream.create() - if err != nil { - return nil, err - } - go containerStream.collectBatch() - - return containerStream, nil -} - -// newRegionFinder is a variable such that the implementation -// can be swapped out for unit tests. -var newRegionFinder = func() regionFinder { - return ec2metadata.New(session.New()) -} - -// newAWSLogsClient creates the service client for Amazon CloudWatch Logs. -// Customizations to the default client from the SDK include a Docker-specific -// User-Agent string and automatic region detection using the EC2 Instance -// Metadata Service when region is otherwise unspecified. -func newAWSLogsClient(ctx logger.Context) (api, error) { - var region *string - if os.Getenv(regionEnvKey) != "" { - region = aws.String(os.Getenv(regionEnvKey)) - } - if ctx.Config[regionKey] != "" { - region = aws.String(ctx.Config[regionKey]) - } - if region == nil || *region == "" { - logrus.Info("Trying to get region from EC2 Metadata") - ec2MetadataClient := newRegionFinder() - r, err := ec2MetadataClient.Region() - if err != nil { - logrus.WithFields(logrus.Fields{ - "error": err, - }).Error("Could not get region from EC2 metadata, environment, or log option") - return nil, errors.New("Cannot determine region for awslogs driver") - } - region = &r - } - logrus.WithFields(logrus.Fields{ - "region": *region, - }).Debug("Created awslogs client") - - client := cloudwatchlogs.New(session.New(), aws.NewConfig().WithRegion(*region)) - - client.Handlers.Build.PushBackNamed(request.NamedHandler{ - Name: "DockerUserAgentHandler", - Fn: func(r *request.Request) { - currentAgent := r.HTTPRequest.Header.Get(userAgentHeader) - r.HTTPRequest.Header.Set(userAgentHeader, - fmt.Sprintf("Docker %s (%s) %s", - dockerversion.Version, runtime.GOOS, currentAgent)) - }, - }) - return client, nil -} - -// Name returns the name of the awslogs logging driver -func (l *logStream) Name() string { - return name -} - -// Log submits messages for logging by an instance of the awslogs logging driver -func (l *logStream) Log(msg *logger.Message) error { - l.lock.RLock() - defer l.lock.RUnlock() - if !l.closed { - l.messages <- msg - } - return nil -} - -// Close closes the instance of the awslogs logging driver -func (l *logStream) Close() error { - l.lock.Lock() - defer l.lock.Unlock() - if !l.closed { - close(l.messages) - } - l.closed = true - return nil -} - -// create creates a log stream for the instance of the awslogs logging driver -func (l *logStream) create() error { - input := &cloudwatchlogs.CreateLogStreamInput{ - LogGroupName: aws.String(l.logGroupName), - LogStreamName: aws.String(l.logStreamName), - } - - _, err := l.client.CreateLogStream(input) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - fields := logrus.Fields{ - "errorCode": awsErr.Code(), - "message": awsErr.Message(), - "origError": awsErr.OrigErr(), - "logGroupName": l.logGroupName, - "logStreamName": l.logStreamName, - } - if awsErr.Code() == resourceAlreadyExistsCode { - // Allow creation to succeed - logrus.WithFields(fields).Info("Log stream already exists") - return nil - } - logrus.WithFields(fields).Error("Failed to create log stream") - } - } - return err -} - -// newTicker is used for time-based batching. newTicker is a variable such -// that the implementation can be swapped out for unit tests. -var newTicker = func(freq time.Duration) *time.Ticker { - return time.NewTicker(freq) -} - -// collectBatch executes as a goroutine to perform batching of log events for -// submission to the log stream. Batching is performed on time- and size- -// bases. Time-based batching occurs at a 5 second interval (defined in the -// batchPublishFrequency const). Size-based batching is performed on the -// maximum number of events per batch (defined in maximumLogEventsPerPut) and -// the maximum number of total bytes in a batch (defined in -// maximumBytesPerPut). Log messages are split by the maximum bytes per event -// (defined in maximumBytesPerEvent). There is a fixed per-event byte overhead -// (defined in perEventBytes) which is accounted for in split- and batch- -// calculations. -func (l *logStream) collectBatch() { - timer := newTicker(batchPublishFrequency) - var events []*cloudwatchlogs.InputLogEvent - bytes := 0 - for { - select { - case <-timer.C: - l.publishBatch(events) - events = events[:0] - bytes = 0 - case msg, more := <-l.messages: - if !more { - l.publishBatch(events) - return - } - unprocessedLine := msg.Line - for len(unprocessedLine) > 0 { - // Split line length so it does not exceed the maximum - lineBytes := len(unprocessedLine) - if lineBytes > maximumBytesPerEvent { - lineBytes = maximumBytesPerEvent - } - line := unprocessedLine[:lineBytes] - unprocessedLine = unprocessedLine[lineBytes:] - if (len(events) >= maximumLogEventsPerPut) || (bytes+lineBytes+perEventBytes > maximumBytesPerPut) { - // Publish an existing batch if it's already over the maximum number of events or if adding this - // event would push it over the maximum number of total bytes. - l.publishBatch(events) - events = events[:0] - bytes = 0 - } - events = append(events, &cloudwatchlogs.InputLogEvent{ - Message: aws.String(string(line)), - Timestamp: aws.Int64(msg.Timestamp.UnixNano() / int64(time.Millisecond)), - }) - bytes += (lineBytes + perEventBytes) - } - } - } -} - -// publishBatch calls PutLogEvents for a given set of InputLogEvents, -// accounting for sequencing requirements (each request must reference the -// sequence token returned by the previous request). -func (l *logStream) publishBatch(events []*cloudwatchlogs.InputLogEvent) { - if len(events) == 0 { - return - } - - sort.Sort(byTimestamp(events)) - - nextSequenceToken, err := l.putLogEvents(events, l.sequenceToken) - - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - if awsErr.Code() == dataAlreadyAcceptedCode { - // already submitted, just grab the correct sequence token - parts := strings.Split(awsErr.Message(), " ") - nextSequenceToken = &parts[len(parts)-1] - logrus.WithFields(logrus.Fields{ - "errorCode": awsErr.Code(), - "message": awsErr.Message(), - "logGroupName": l.logGroupName, - "logStreamName": l.logStreamName, - }).Info("Data already accepted, ignoring error") - err = nil - } else if awsErr.Code() == invalidSequenceTokenCode { - // sequence code is bad, grab the correct one and retry - parts := strings.Split(awsErr.Message(), " ") - token := parts[len(parts)-1] - nextSequenceToken, err = l.putLogEvents(events, &token) - } - } - } - if err != nil { - logrus.Error(err) - } else { - l.sequenceToken = nextSequenceToken - } -} - -// putLogEvents wraps the PutLogEvents API -func (l *logStream) putLogEvents(events []*cloudwatchlogs.InputLogEvent, sequenceToken *string) (*string, error) { - input := &cloudwatchlogs.PutLogEventsInput{ - LogEvents: events, - SequenceToken: sequenceToken, - LogGroupName: aws.String(l.logGroupName), - LogStreamName: aws.String(l.logStreamName), - } - resp, err := l.client.PutLogEvents(input) - if err != nil { - if awsErr, ok := err.(awserr.Error); ok { - logrus.WithFields(logrus.Fields{ - "errorCode": awsErr.Code(), - "message": awsErr.Message(), - "origError": awsErr.OrigErr(), - "logGroupName": l.logGroupName, - "logStreamName": l.logStreamName, - }).Error("Failed to put log events") - } - return nil, err - } - return resp.NextSequenceToken, nil -} - -// ValidateLogOpt looks for awslogs-specific log options awslogs-region, -// awslogs-group, and awslogs-stream -func ValidateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case logGroupKey: - case logStreamKey: - case regionKey: - default: - return fmt.Errorf("unknown log opt '%s' for %s log driver", key, name) - } - } - if cfg[logGroupKey] == "" { - return fmt.Errorf("must specify a value for log opt '%s'", logGroupKey) - } - return nil -} - -// Len returns the length of a byTimestamp slice. Len is required by the -// sort.Interface interface. -func (slice byTimestamp) Len() int { - return len(slice) -} - -// Less compares two values in a byTimestamp slice by Timestamp. Less is -// required by the sort.Interface interface. -func (slice byTimestamp) Less(i, j int) bool { - iTimestamp, jTimestamp := int64(0), int64(0) - if slice != nil && slice[i].Timestamp != nil { - iTimestamp = *slice[i].Timestamp - } - if slice != nil && slice[j].Timestamp != nil { - jTimestamp = *slice[j].Timestamp - } - return iTimestamp < jTimestamp -} - -// Swap swaps two values in a byTimestamp slice with each other. Swap is -// required by the sort.Interface interface. -func (slice byTimestamp) Swap(i, j int) { - slice[i], slice[j] = slice[j], slice[i] -} diff --git a/daemon/logger/awslogs/cloudwatchlogs_test.go b/daemon/logger/awslogs/cloudwatchlogs_test.go deleted file mode 100644 index 48882c4ce4..0000000000 --- a/daemon/logger/awslogs/cloudwatchlogs_test.go +++ /dev/null @@ -1,627 +0,0 @@ -package awslogs - -import ( - "errors" - "fmt" - "net/http" - "runtime" - "strings" - "testing" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/dockerversion" -) - -const ( - groupName = "groupName" - streamName = "streamName" - sequenceToken = "sequenceToken" - nextSequenceToken = "nextSequenceToken" - logline = "this is a log line" -) - -func TestNewAWSLogsClientUserAgentHandler(t *testing.T) { - ctx := logger.Context{ - Config: map[string]string{ - regionKey: "us-east-1", - }, - } - - client, err := newAWSLogsClient(ctx) - if err != nil { - t.Fatal(err) - } - realClient, ok := client.(*cloudwatchlogs.CloudWatchLogs) - if !ok { - t.Fatal("Could not cast client to cloudwatchlogs.CloudWatchLogs") - } - buildHandlerList := realClient.Handlers.Build - request := &request.Request{ - HTTPRequest: &http.Request{ - Header: http.Header{}, - }, - } - buildHandlerList.Run(request) - expectedUserAgentString := fmt.Sprintf("Docker %s (%s) %s/%s (%s; %s; %s)", - dockerversion.Version, runtime.GOOS, aws.SDKName, aws.SDKVersion, runtime.Version(), runtime.GOOS, runtime.GOARCH) - userAgent := request.HTTPRequest.Header.Get("User-Agent") - if userAgent != expectedUserAgentString { - t.Errorf("Wrong User-Agent string, expected \"%s\" but was \"%s\"", - expectedUserAgentString, userAgent) - } -} - -func TestNewAWSLogsClientRegionDetect(t *testing.T) { - ctx := logger.Context{ - Config: map[string]string{}, - } - - mockMetadata := newMockMetadataClient() - newRegionFinder = func() regionFinder { - return mockMetadata - } - mockMetadata.regionResult <- ®ionResult{ - successResult: "us-east-1", - } - - _, err := newAWSLogsClient(ctx) - if err != nil { - t.Fatal(err) - } -} - -func TestCreateSuccess(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - } - mockClient.createLogStreamResult <- &createLogStreamResult{} - - err := stream.create() - - if err != nil { - t.Errorf("Received unexpected err: %v\n", err) - } - argument := <-mockClient.createLogStreamArgument - if argument.LogGroupName == nil { - t.Fatal("Expected non-nil LogGroupName") - } - if *argument.LogGroupName != groupName { - t.Errorf("Expected LogGroupName to be %s", groupName) - } - if argument.LogStreamName == nil { - t.Fatal("Expected non-nil LogGroupName") - } - if *argument.LogStreamName != streamName { - t.Errorf("Expected LogStreamName to be %s", streamName) - } -} - -func TestCreateError(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - } - mockClient.createLogStreamResult <- &createLogStreamResult{ - errorResult: errors.New("Error!"), - } - - err := stream.create() - - if err == nil { - t.Fatal("Expected non-nil err") - } -} - -func TestCreateAlreadyExists(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - } - mockClient.createLogStreamResult <- &createLogStreamResult{ - errorResult: awserr.New(resourceAlreadyExistsCode, "", nil), - } - - err := stream.create() - - if err != nil { - t.Fatal("Expected nil err") - } -} - -func TestPublishBatchSuccess(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, - } - - events := []*cloudwatchlogs.InputLogEvent{ - { - Message: aws.String(logline), - }, - } - - stream.publishBatch(events) - if stream.sequenceToken == nil { - t.Fatal("Expected non-nil sequenceToken") - } - if *stream.sequenceToken != nextSequenceToken { - t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) - } - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if argument.SequenceToken == nil { - t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") - } - if *argument.SequenceToken != sequenceToken { - t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) - } - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) - } - if argument.LogEvents[0] != events[0] { - t.Error("Expected event to equal input") - } -} - -func TestPublishBatchError(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - errorResult: errors.New("Error!"), - } - - events := []*cloudwatchlogs.InputLogEvent{ - { - Message: aws.String(logline), - }, - } - - stream.publishBatch(events) - if stream.sequenceToken == nil { - t.Fatal("Expected non-nil sequenceToken") - } - if *stream.sequenceToken != sequenceToken { - t.Errorf("Expected sequenceToken to be %s, but was %s", sequenceToken, *stream.sequenceToken) - } -} - -func TestPublishBatchInvalidSeqSuccess(t *testing.T) { - mockClient := newMockClientBuffered(2) - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - errorResult: awserr.New(invalidSequenceTokenCode, "use token token", nil), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, - } - - events := []*cloudwatchlogs.InputLogEvent{ - { - Message: aws.String(logline), - }, - } - - stream.publishBatch(events) - if stream.sequenceToken == nil { - t.Fatal("Expected non-nil sequenceToken") - } - if *stream.sequenceToken != nextSequenceToken { - t.Errorf("Expected sequenceToken to be %s, but was %s", nextSequenceToken, *stream.sequenceToken) - } - - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if argument.SequenceToken == nil { - t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") - } - if *argument.SequenceToken != sequenceToken { - t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) - } - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) - } - if argument.LogEvents[0] != events[0] { - t.Error("Expected event to equal input") - } - - argument = <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if argument.SequenceToken == nil { - t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") - } - if *argument.SequenceToken != "token" { - t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", "token", *argument.SequenceToken) - } - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) - } - if argument.LogEvents[0] != events[0] { - t.Error("Expected event to equal input") - } -} - -func TestPublishBatchAlreadyAccepted(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - errorResult: awserr.New(dataAlreadyAcceptedCode, "use token token", nil), - } - - events := []*cloudwatchlogs.InputLogEvent{ - { - Message: aws.String(logline), - }, - } - - stream.publishBatch(events) - if stream.sequenceToken == nil { - t.Fatal("Expected non-nil sequenceToken") - } - if *stream.sequenceToken != "token" { - t.Errorf("Expected sequenceToken to be %s, but was %s", "token", *stream.sequenceToken) - } - - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if argument.SequenceToken == nil { - t.Fatal("Expected non-nil PutLogEventsInput.SequenceToken") - } - if *argument.SequenceToken != sequenceToken { - t.Errorf("Expected PutLogEventsInput.SequenceToken to be %s, but was %s", sequenceToken, *argument.SequenceToken) - } - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) - } - if argument.LogEvents[0] != events[0] { - t.Error("Expected event to equal input") - } -} - -func TestCollectBatchSimple(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - messages: make(chan *logger.Message), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, - } - ticks := make(chan time.Time) - newTicker = func(_ time.Duration) *time.Ticker { - return &time.Ticker{ - C: ticks, - } - } - - go stream.collectBatch() - - stream.Log(&logger.Message{ - Line: []byte(logline), - Timestamp: time.Time{}, - }) - - ticks <- time.Time{} - stream.Close() - - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) - } - if *argument.LogEvents[0].Message != logline { - t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) - } -} - -func TestCollectBatchTicker(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - messages: make(chan *logger.Message), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, - } - ticks := make(chan time.Time) - newTicker = func(_ time.Duration) *time.Ticker { - return &time.Ticker{ - C: ticks, - } - } - - go stream.collectBatch() - - stream.Log(&logger.Message{ - Line: []byte(logline + " 1"), - Timestamp: time.Time{}, - }) - stream.Log(&logger.Message{ - Line: []byte(logline + " 2"), - Timestamp: time.Time{}, - }) - - ticks <- time.Time{} - - // Verify first batch - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if len(argument.LogEvents) != 2 { - t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) - } - if *argument.LogEvents[0].Message != logline+" 1" { - t.Errorf("Expected message to be %s but was %s", logline+" 1", *argument.LogEvents[0].Message) - } - if *argument.LogEvents[1].Message != logline+" 2" { - t.Errorf("Expected message to be %s but was %s", logline+" 2", *argument.LogEvents[0].Message) - } - - stream.Log(&logger.Message{ - Line: []byte(logline + " 3"), - Timestamp: time.Time{}, - }) - - ticks <- time.Time{} - argument = <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) - } - if *argument.LogEvents[0].Message != logline+" 3" { - t.Errorf("Expected message to be %s but was %s", logline+" 3", *argument.LogEvents[0].Message) - } - - stream.Close() - -} - -func TestCollectBatchClose(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - messages: make(chan *logger.Message), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, - } - var ticks = make(chan time.Time) - newTicker = func(_ time.Duration) *time.Ticker { - return &time.Ticker{ - C: ticks, - } - } - - go stream.collectBatch() - - stream.Log(&logger.Message{ - Line: []byte(logline), - Timestamp: time.Time{}, - }) - - // no ticks - stream.Close() - - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain 1 element, but contains %d", len(argument.LogEvents)) - } - if *argument.LogEvents[0].Message != logline { - t.Errorf("Expected message to be %s but was %s", logline, *argument.LogEvents[0].Message) - } -} - -func TestCollectBatchLineSplit(t *testing.T) { - mockClient := newMockClient() - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - messages: make(chan *logger.Message), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, - } - var ticks = make(chan time.Time) - newTicker = func(_ time.Duration) *time.Ticker { - return &time.Ticker{ - C: ticks, - } - } - - go stream.collectBatch() - - longline := strings.Repeat("A", maximumBytesPerEvent) - stream.Log(&logger.Message{ - Line: []byte(longline + "B"), - Timestamp: time.Time{}, - }) - - // no ticks - stream.Close() - - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if len(argument.LogEvents) != 2 { - t.Errorf("Expected LogEvents to contain 2 elements, but contains %d", len(argument.LogEvents)) - } - if *argument.LogEvents[0].Message != longline { - t.Errorf("Expected message to be %s but was %s", longline, *argument.LogEvents[0].Message) - } - if *argument.LogEvents[1].Message != "B" { - t.Errorf("Expected message to be %s but was %s", "B", *argument.LogEvents[1].Message) - } -} - -func TestCollectBatchMaxEvents(t *testing.T) { - mockClient := newMockClientBuffered(1) - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - messages: make(chan *logger.Message), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, - } - var ticks = make(chan time.Time) - newTicker = func(_ time.Duration) *time.Ticker { - return &time.Ticker{ - C: ticks, - } - } - - go stream.collectBatch() - - line := "A" - for i := 0; i <= maximumLogEventsPerPut; i++ { - stream.Log(&logger.Message{ - Line: []byte(line), - Timestamp: time.Time{}, - }) - } - - // no ticks - stream.Close() - - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if len(argument.LogEvents) != maximumLogEventsPerPut { - t.Errorf("Expected LogEvents to contain %d elements, but contains %d", maximumLogEventsPerPut, len(argument.LogEvents)) - } - - argument = <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain %d elements, but contains %d", 1, len(argument.LogEvents)) - } -} - -func TestCollectBatchMaxTotalBytes(t *testing.T) { - mockClient := newMockClientBuffered(1) - stream := &logStream{ - client: mockClient, - logGroupName: groupName, - logStreamName: streamName, - sequenceToken: aws.String(sequenceToken), - messages: make(chan *logger.Message), - } - mockClient.putLogEventsResult <- &putLogEventsResult{ - successResult: &cloudwatchlogs.PutLogEventsOutput{ - NextSequenceToken: aws.String(nextSequenceToken), - }, - } - var ticks = make(chan time.Time) - newTicker = func(_ time.Duration) *time.Ticker { - return &time.Ticker{ - C: ticks, - } - } - - go stream.collectBatch() - - longline := strings.Repeat("A", maximumBytesPerPut) - stream.Log(&logger.Message{ - Line: []byte(longline + "B"), - Timestamp: time.Time{}, - }) - - // no ticks - stream.Close() - - argument := <-mockClient.putLogEventsArgument - if argument == nil { - t.Fatal("Expected non-nil PutLogEventsInput") - } - bytes := 0 - for _, event := range argument.LogEvents { - bytes += len(*event.Message) - } - if bytes > maximumBytesPerPut { - t.Errorf("Expected <= %d bytes but was %d", maximumBytesPerPut, bytes) - } - - argument = <-mockClient.putLogEventsArgument - if len(argument.LogEvents) != 1 { - t.Errorf("Expected LogEvents to contain 1 elements, but contains %d", len(argument.LogEvents)) - } - message := *argument.LogEvents[0].Message - if message[len(message)-1:] != "B" { - t.Errorf("Expected message to be %s but was %s", "B", message[len(message)-1:]) - } -} diff --git a/daemon/logger/awslogs/cwlogsiface_mock_test.go b/daemon/logger/awslogs/cwlogsiface_mock_test.go deleted file mode 100644 index b768a3d7ec..0000000000 --- a/daemon/logger/awslogs/cwlogsiface_mock_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package awslogs - -import "github.com/aws/aws-sdk-go/service/cloudwatchlogs" - -type mockcwlogsclient struct { - createLogStreamArgument chan *cloudwatchlogs.CreateLogStreamInput - createLogStreamResult chan *createLogStreamResult - putLogEventsArgument chan *cloudwatchlogs.PutLogEventsInput - putLogEventsResult chan *putLogEventsResult -} - -type createLogStreamResult struct { - successResult *cloudwatchlogs.CreateLogStreamOutput - errorResult error -} - -type putLogEventsResult struct { - successResult *cloudwatchlogs.PutLogEventsOutput - errorResult error -} - -func newMockClient() *mockcwlogsclient { - return &mockcwlogsclient{ - createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, 1), - createLogStreamResult: make(chan *createLogStreamResult, 1), - putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, 1), - putLogEventsResult: make(chan *putLogEventsResult, 1), - } -} - -func newMockClientBuffered(buflen int) *mockcwlogsclient { - return &mockcwlogsclient{ - createLogStreamArgument: make(chan *cloudwatchlogs.CreateLogStreamInput, buflen), - createLogStreamResult: make(chan *createLogStreamResult, buflen), - putLogEventsArgument: make(chan *cloudwatchlogs.PutLogEventsInput, buflen), - putLogEventsResult: make(chan *putLogEventsResult, buflen), - } -} - -func (m *mockcwlogsclient) CreateLogStream(input *cloudwatchlogs.CreateLogStreamInput) (*cloudwatchlogs.CreateLogStreamOutput, error) { - m.createLogStreamArgument <- input - output := <-m.createLogStreamResult - return output.successResult, output.errorResult -} - -func (m *mockcwlogsclient) PutLogEvents(input *cloudwatchlogs.PutLogEventsInput) (*cloudwatchlogs.PutLogEventsOutput, error) { - events := make([]*cloudwatchlogs.InputLogEvent, len(input.LogEvents)) - copy(events, input.LogEvents) - m.putLogEventsArgument <- &cloudwatchlogs.PutLogEventsInput{ - LogEvents: events, - SequenceToken: input.SequenceToken, - LogGroupName: input.LogGroupName, - LogStreamName: input.LogStreamName, - } - output := <-m.putLogEventsResult - return output.successResult, output.errorResult -} - -type mockmetadataclient struct { - regionResult chan *regionResult -} - -type regionResult struct { - successResult string - errorResult error -} - -func newMockMetadataClient() *mockmetadataclient { - return &mockmetadataclient{ - regionResult: make(chan *regionResult, 1), - } -} - -func (m *mockmetadataclient) Region() (string, error) { - output := <-m.regionResult - return output.successResult, output.errorResult -} diff --git a/daemon/logger/context.go b/daemon/logger/context.go deleted file mode 100644 index 2b0e071f66..0000000000 --- a/daemon/logger/context.go +++ /dev/null @@ -1,113 +0,0 @@ -package logger - -import ( - "fmt" - "os" - "strings" - "time" -) - -// Context provides enough information for a logging driver to do its function. -type Context struct { - Config map[string]string - ContainerID string - ContainerName string - ContainerEntrypoint string - ContainerArgs []string - ContainerImageID string - ContainerImageName string - ContainerCreated time.Time - ContainerEnv []string - ContainerLabels map[string]string - LogPath string - DaemonName string -} - -// ExtraAttributes returns the user-defined extra attributes (labels, -// environment variables) in key-value format. This can be used by log drivers -// that support metadata to add more context to a log. -func (ctx *Context) ExtraAttributes(keyMod func(string) string) map[string]string { - extra := make(map[string]string) - labels, ok := ctx.Config["labels"] - if ok && len(labels) > 0 { - for _, l := range strings.Split(labels, ",") { - if v, ok := ctx.ContainerLabels[l]; ok { - if keyMod != nil { - l = keyMod(l) - } - extra[l] = v - } - } - } - - env, ok := ctx.Config["env"] - if ok && len(env) > 0 { - envMapping := make(map[string]string) - for _, e := range ctx.ContainerEnv { - if kv := strings.SplitN(e, "=", 2); len(kv) == 2 { - envMapping[kv[0]] = kv[1] - } - } - for _, l := range strings.Split(env, ",") { - if v, ok := envMapping[l]; ok { - if keyMod != nil { - l = keyMod(l) - } - extra[l] = v - } - } - } - - return extra -} - -// Hostname returns the hostname from the underlying OS. -func (ctx *Context) Hostname() (string, error) { - hostname, err := os.Hostname() - if err != nil { - return "", fmt.Errorf("logger: can not resolve hostname: %v", err) - } - return hostname, nil -} - -// Command returns the command that the container being logged was -// started with. The Entrypoint is prepended to the container -// arguments. -func (ctx *Context) Command() string { - terms := []string{ctx.ContainerEntrypoint} - for _, arg := range ctx.ContainerArgs { - terms = append(terms, arg) - } - command := strings.Join(terms, " ") - return command -} - -// ID Returns the Container ID shortened to 12 characters. -func (ctx *Context) ID() string { - return ctx.ContainerID[:12] -} - -// FullID is an alias of ContainerID. -func (ctx *Context) FullID() string { - return ctx.ContainerID -} - -// Name returns the ContainerName without a preceding '/'. -func (ctx *Context) Name() string { - return ctx.ContainerName[1:] -} - -// ImageID returns the ContainerImageID shortened to 12 characters. -func (ctx *Context) ImageID() string { - return ctx.ContainerImageID[:12] -} - -// ImageFullID is an alias of ContainerImageID. -func (ctx *Context) ImageFullID() string { - return ctx.ContainerImageID -} - -// ImageName is an alias of ContainerImageName -func (ctx *Context) ImageName() string { - return ctx.ContainerImageName -} diff --git a/daemon/logger/copier.go b/daemon/logger/copier.go deleted file mode 100644 index a54f23f28d..0000000000 --- a/daemon/logger/copier.go +++ /dev/null @@ -1,81 +0,0 @@ -package logger - -import ( - "bufio" - "bytes" - "io" - "sync" - "time" - - "github.com/Sirupsen/logrus" -) - -// Copier can copy logs from specified sources to Logger and attach Timestamp. -// Writes are concurrent, so you need implement some sync in your logger -type Copier struct { - // srcs is map of name -> reader pairs, for example "stdout", "stderr" - srcs map[string]io.Reader - dst Logger - copyJobs sync.WaitGroup - closeOnce sync.Once - closed chan struct{} -} - -// NewCopier creates a new Copier -func NewCopier(srcs map[string]io.Reader, dst Logger) *Copier { - return &Copier{ - srcs: srcs, - dst: dst, - closed: make(chan struct{}), - } -} - -// Run starts logs copying -func (c *Copier) Run() { - for src, w := range c.srcs { - c.copyJobs.Add(1) - go c.copySrc(src, w) - } -} - -func (c *Copier) copySrc(name string, src io.Reader) { - defer c.copyJobs.Done() - reader := bufio.NewReader(src) - - for { - select { - case <-c.closed: - return - default: - line, err := reader.ReadBytes('\n') - line = bytes.TrimSuffix(line, []byte{'\n'}) - - // ReadBytes can return full or partial output even when it failed. - // e.g. it can return a full entry and EOF. - if err == nil || len(line) > 0 { - if logErr := c.dst.Log(&Message{Line: line, Source: name, Timestamp: time.Now().UTC()}); logErr != nil { - logrus.Errorf("Failed to log msg %q for logger %s: %s", line, c.dst.Name(), logErr) - } - } - - if err != nil { - if err != io.EOF { - logrus.Errorf("Error scanning log stream: %s", err) - } - return - } - } - } -} - -// Wait waits until all copying is done -func (c *Copier) Wait() { - c.copyJobs.Wait() -} - -// Close closes the copier -func (c *Copier) Close() { - c.closeOnce.Do(func() { - close(c.closed) - }) -} diff --git a/daemon/logger/copier_test.go b/daemon/logger/copier_test.go deleted file mode 100644 index 69225e9a7b..0000000000 --- a/daemon/logger/copier_test.go +++ /dev/null @@ -1,118 +0,0 @@ -package logger - -import ( - "bytes" - "encoding/json" - "io" - "sync" - "testing" - "time" -) - -type TestLoggerJSON struct { - *json.Encoder - mu sync.Mutex - delay time.Duration -} - -func (l *TestLoggerJSON) Log(m *Message) error { - if l.delay > 0 { - time.Sleep(l.delay) - } - l.mu.Lock() - defer l.mu.Unlock() - return l.Encode(m) -} - -func (l *TestLoggerJSON) Close() error { return nil } - -func (l *TestLoggerJSON) Name() string { return "json" } - -func TestCopier(t *testing.T) { - stdoutLine := "Line that thinks that it is log line from docker stdout" - stderrLine := "Line that thinks that it is log line from docker stderr" - var stdout bytes.Buffer - var stderr bytes.Buffer - for i := 0; i < 30; i++ { - if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { - t.Fatal(err) - } - if _, err := stderr.WriteString(stderrLine + "\n"); err != nil { - t.Fatal(err) - } - } - - var jsonBuf bytes.Buffer - - jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf)} - - c := NewCopier( - map[string]io.Reader{ - "stdout": &stdout, - "stderr": &stderr, - }, - jsonLog) - c.Run() - wait := make(chan struct{}) - go func() { - c.Wait() - close(wait) - }() - select { - case <-time.After(1 * time.Second): - t.Fatal("Copier failed to do its work in 1 second") - case <-wait: - } - dec := json.NewDecoder(&jsonBuf) - for { - var msg Message - if err := dec.Decode(&msg); err != nil { - if err == io.EOF { - break - } - t.Fatal(err) - } - if msg.Source != "stdout" && msg.Source != "stderr" { - t.Fatalf("Wrong Source: %q, should be %q or %q", msg.Source, "stdout", "stderr") - } - if msg.Source == "stdout" { - if string(msg.Line) != stdoutLine { - t.Fatalf("Wrong Line: %q, expected %q", msg.Line, stdoutLine) - } - } - if msg.Source == "stderr" { - if string(msg.Line) != stderrLine { - t.Fatalf("Wrong Line: %q, expected %q", msg.Line, stderrLine) - } - } - } -} - -func TestCopierSlow(t *testing.T) { - stdoutLine := "Line that thinks that it is log line from docker stdout" - var stdout bytes.Buffer - for i := 0; i < 30; i++ { - if _, err := stdout.WriteString(stdoutLine + "\n"); err != nil { - t.Fatal(err) - } - } - - var jsonBuf bytes.Buffer - //encoder := &encodeCloser{Encoder: json.NewEncoder(&jsonBuf)} - jsonLog := &TestLoggerJSON{Encoder: json.NewEncoder(&jsonBuf), delay: 100 * time.Millisecond} - - c := NewCopier(map[string]io.Reader{"stdout": &stdout}, jsonLog) - c.Run() - wait := make(chan struct{}) - go func() { - c.Wait() - close(wait) - }() - <-time.After(150 * time.Millisecond) - c.Close() - select { - case <-time.After(200 * time.Millisecond): - t.Fatalf("failed to exit in time after the copier is closed") - case <-wait: - } -} diff --git a/daemon/logger/etwlogs/etwlogs_windows.go b/daemon/logger/etwlogs/etwlogs_windows.go deleted file mode 100644 index ab3ff1f2bd..0000000000 --- a/daemon/logger/etwlogs/etwlogs_windows.go +++ /dev/null @@ -1,183 +0,0 @@ -// Package etwlogs provides a log driver for forwarding container logs -// as ETW events.(ETW stands for Event Tracing for Windows) -// A client can then create an ETW listener to listen for events that are sent -// by the ETW provider that we register, using the provider's GUID "a3693192-9ed6-46d2-a981-f8226c8363bd". -// Here is an example of how to do this using the logman utility: -// 1. logman start -ets DockerContainerLogs -p {a3693192-9ed6-46d2-a981-f8226c8363bd} 0 0 -o trace.etl -// 2. Run container(s) and generate log messages -// 3. logman stop -ets DockerContainerLogs -// 4. You can then convert the etl log file to XML using: tracerpt -y trace.etl -// -// Each container log message generates an ETW event that also contains: -// the container name and ID, the timestamp, and the stream type. -package etwlogs - -import ( - "errors" - "fmt" - "sync" - "syscall" - "unsafe" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/logger" -) - -type etwLogs struct { - containerName string - imageName string - containerID string - imageID string -} - -const ( - name = "etwlogs" - win32CallSuccess = 0 -) - -var win32Lib *syscall.DLL -var providerHandle syscall.Handle -var refCount int -var mu sync.Mutex - -func init() { - providerHandle = syscall.InvalidHandle - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } -} - -// New creates a new etwLogs logger for the given container and registers the EWT provider. -func New(ctx logger.Context) (logger.Logger, error) { - if err := registerETWProvider(); err != nil { - return nil, err - } - logrus.Debugf("logging driver etwLogs configured for container: %s.", ctx.ContainerID) - - return &etwLogs{ - containerName: fixContainerName(ctx.ContainerName), - imageName: ctx.ContainerImageName, - containerID: ctx.ContainerID, - imageID: ctx.ContainerImageID, - }, nil -} - -// Log logs the message to the ETW stream. -func (etwLogger *etwLogs) Log(msg *logger.Message) error { - if providerHandle == syscall.InvalidHandle { - // This should never be hit, if it is, it indicates a programming error. - errorMessage := "ETWLogs cannot log the message, because the event provider has not been registered." - logrus.Error(errorMessage) - return errors.New(errorMessage) - } - return callEventWriteString(createLogMessage(etwLogger, msg)) -} - -// Close closes the logger by unregistering the ETW provider. -func (etwLogger *etwLogs) Close() error { - unregisterETWProvider() - return nil -} - -func (etwLogger *etwLogs) Name() string { - return name -} - -func createLogMessage(etwLogger *etwLogs, msg *logger.Message) string { - return fmt.Sprintf("container_name: %s, image_name: %s, container_id: %s, image_id: %s, source: %s, log: %s", - etwLogger.containerName, - etwLogger.imageName, - etwLogger.containerID, - etwLogger.imageID, - msg.Source, - msg.Line) -} - -// fixContainerName removes the initial '/' from the container name. -func fixContainerName(cntName string) string { - if len(cntName) > 0 && cntName[0] == '/' { - cntName = cntName[1:] - } - return cntName -} - -func registerETWProvider() error { - mu.Lock() - defer mu.Unlock() - if refCount == 0 { - var err error - if win32Lib, err = syscall.LoadDLL("Advapi32.dll"); err != nil { - return err - } - if err = callEventRegister(); err != nil { - win32Lib.Release() - win32Lib = nil - return err - } - } - - refCount++ - return nil -} - -func unregisterETWProvider() { - mu.Lock() - defer mu.Unlock() - if refCount == 1 { - if callEventUnregister() { - refCount-- - providerHandle = syscall.InvalidHandle - win32Lib.Release() - win32Lib = nil - } - // Not returning an error if EventUnregister fails, because etwLogs will continue to work - } else { - refCount-- - } -} - -func callEventRegister() error { - proc, err := win32Lib.FindProc("EventRegister") - if err != nil { - return err - } - // The provider's GUID is {a3693192-9ed6-46d2-a981-f8226c8363bd} - guid := syscall.GUID{ - 0xa3693192, 0x9ed6, 0x46d2, - [8]byte{0xa9, 0x81, 0xf8, 0x22, 0x6c, 0x83, 0x63, 0xbd}, - } - - ret, _, _ := proc.Call(uintptr(unsafe.Pointer(&guid)), 0, 0, uintptr(unsafe.Pointer(&providerHandle))) - if ret != win32CallSuccess { - errorMessage := fmt.Sprintf("Failed to register ETW provider. Error: %d", ret) - logrus.Error(errorMessage) - return errors.New(errorMessage) - } - return nil -} - -func callEventWriteString(message string) error { - proc, err := win32Lib.FindProc("EventWriteString") - if err != nil { - return err - } - ret, _, _ := proc.Call(uintptr(providerHandle), 0, 0, uintptr(unsafe.Pointer(syscall.StringToUTF16Ptr(message)))) - if ret != win32CallSuccess { - errorMessage := fmt.Sprintf("ETWLogs provider failed to log message. Error: %d", ret) - logrus.Error(errorMessage) - return errors.New(errorMessage) - } - return nil -} - -func callEventUnregister() bool { - proc, err := win32Lib.FindProc("EventUnregister") - if err != nil { - return false - } - ret, _, _ := proc.Call(uintptr(providerHandle)) - if ret != win32CallSuccess { - return false - } - return true -} diff --git a/daemon/logger/factory.go b/daemon/logger/factory.go deleted file mode 100644 index 9cf716b09a..0000000000 --- a/daemon/logger/factory.go +++ /dev/null @@ -1,104 +0,0 @@ -package logger - -import ( - "fmt" - "sync" -) - -// Creator builds a logging driver instance with given context. -type Creator func(Context) (Logger, error) - -// LogOptValidator checks the options specific to the underlying -// logging implementation. -type LogOptValidator func(cfg map[string]string) error - -type logdriverFactory struct { - registry map[string]Creator - optValidator map[string]LogOptValidator - m sync.Mutex -} - -func (lf *logdriverFactory) register(name string, c Creator) error { - if lf.driverRegistered(name) { - return fmt.Errorf("logger: log driver named '%s' is already registered", name) - } - - lf.m.Lock() - lf.registry[name] = c - lf.m.Unlock() - return nil -} - -func (lf *logdriverFactory) driverRegistered(name string) bool { - lf.m.Lock() - _, ok := lf.registry[name] - lf.m.Unlock() - return ok -} - -func (lf *logdriverFactory) registerLogOptValidator(name string, l LogOptValidator) error { - lf.m.Lock() - defer lf.m.Unlock() - - if _, ok := lf.optValidator[name]; ok { - return fmt.Errorf("logger: log validator named '%s' is already registered", name) - } - lf.optValidator[name] = l - return nil -} - -func (lf *logdriverFactory) get(name string) (Creator, error) { - lf.m.Lock() - defer lf.m.Unlock() - - c, ok := lf.registry[name] - if !ok { - return c, fmt.Errorf("logger: no log driver named '%s' is registered", name) - } - return c, nil -} - -func (lf *logdriverFactory) getLogOptValidator(name string) LogOptValidator { - lf.m.Lock() - defer lf.m.Unlock() - - c, _ := lf.optValidator[name] - return c -} - -var factory = &logdriverFactory{registry: make(map[string]Creator), optValidator: make(map[string]LogOptValidator)} // global factory instance - -// RegisterLogDriver registers the given logging driver builder with given logging -// driver name. -func RegisterLogDriver(name string, c Creator) error { - return factory.register(name, c) -} - -// RegisterLogOptValidator registers the logging option validator with -// the given logging driver name. -func RegisterLogOptValidator(name string, l LogOptValidator) error { - return factory.registerLogOptValidator(name, l) -} - -// GetLogDriver provides the logging driver builder for a logging driver name. -func GetLogDriver(name string) (Creator, error) { - return factory.get(name) -} - -// ValidateLogOpts checks the options for the given log driver. The -// options supported are specific to the LogDriver implementation. -func ValidateLogOpts(name string, cfg map[string]string) error { - if name == "none" { - return nil - } - - if !factory.driverRegistered(name) { - return fmt.Errorf("logger: no log driver named '%s' is registered", name) - } - - validator := factory.getLogOptValidator(name) - if validator != nil { - return validator(cfg) - } - return nil -} diff --git a/daemon/logger/fluentd/fluentd.go b/daemon/logger/fluentd/fluentd.go deleted file mode 100644 index 137ab07696..0000000000 --- a/daemon/logger/fluentd/fluentd.go +++ /dev/null @@ -1,200 +0,0 @@ -// Package fluentd provides the log driver for forwarding server logs -// to fluentd endpoints. -package fluentd - -import ( - "fmt" - "math" - "net" - "strconv" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/go-units" - "github.com/fluent/fluent-logger-golang/fluent" -) - -type fluentd struct { - tag string - containerID string - containerName string - writer *fluent.Fluent - extra map[string]string -} - -const ( - name = "fluentd" - - defaultHost = "127.0.0.1" - defaultPort = 24224 - defaultBufferLimit = 1024 * 1024 - defaultTagPrefix = "docker" - - // logger tries to reconnect 2**32 - 1 times - // failed (and panic) after 204 years [ 1.5 ** (2**32 - 1) - 1 seconds] - defaultRetryWait = 1000 - defaultTimeout = 3 * time.Second - defaultMaxRetries = math.MaxInt32 - defaultReconnectWaitIncreRate = 1.5 - - addressKey = "fluentd-address" - bufferLimitKey = "fluentd-buffer-limit" - retryWaitKey = "fluentd-retry-wait" - maxRetriesKey = "fluentd-max-retries" - asyncConnectKey = "fluentd-async-connect" -) - -func init() { - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates a fluentd logger using the configuration passed in on -// the context. The supported context configuration variable is -// fluentd-address. -func New(ctx logger.Context) (logger.Logger, error) { - host, port, err := parseAddress(ctx.Config[addressKey]) - if err != nil { - return nil, err - } - - tag, err := loggerutils.ParseLogTag(ctx, "{{.DaemonName}}.{{.ID}}") - if err != nil { - return nil, err - } - - extra := ctx.ExtraAttributes(nil) - - bufferLimit := defaultBufferLimit - if ctx.Config[bufferLimitKey] != "" { - bl64, err := units.RAMInBytes(ctx.Config[bufferLimitKey]) - if err != nil { - return nil, err - } - bufferLimit = int(bl64) - } - - retryWait := defaultRetryWait - if ctx.Config[retryWaitKey] != "" { - rwd, err := time.ParseDuration(ctx.Config[retryWaitKey]) - if err != nil { - return nil, err - } - retryWait = int(rwd.Seconds() * 1000) - } - - maxRetries := defaultMaxRetries - if ctx.Config[maxRetriesKey] != "" { - mr64, err := strconv.ParseUint(ctx.Config[maxRetriesKey], 10, strconv.IntSize) - if err != nil { - return nil, err - } - maxRetries = int(mr64) - } - - asyncConnect := false - if ctx.Config[asyncConnectKey] != "" { - if asyncConnect, err = strconv.ParseBool(ctx.Config[asyncConnectKey]); err != nil { - return nil, err - } - } - - fluentConfig := fluent.Config{ - FluentPort: port, - FluentHost: host, - BufferLimit: bufferLimit, - RetryWait: retryWait, - MaxRetry: maxRetries, - AsyncConnect: asyncConnect, - } - - logrus.WithField("container", ctx.ContainerID).WithField("config", fluentConfig). - Debug("logging driver fluentd configured") - - log, err := fluent.New(fluentConfig) - if err != nil { - return nil, err - } - return &fluentd{ - tag: tag, - containerID: ctx.ContainerID, - containerName: ctx.ContainerName, - writer: log, - extra: extra, - }, nil -} - -func (f *fluentd) Log(msg *logger.Message) error { - data := map[string]string{ - "container_id": f.containerID, - "container_name": f.containerName, - "source": msg.Source, - "log": string(msg.Line), - } - for k, v := range f.extra { - data[k] = v - } - // fluent-logger-golang buffers logs from failures and disconnections, - // and these are transferred again automatically. - return f.writer.PostWithTime(f.tag, msg.Timestamp, data) -} - -func (f *fluentd) Close() error { - return f.writer.Close() -} - -func (f *fluentd) Name() string { - return name -} - -// ValidateLogOpt looks for fluentd specific log option fluentd-address. -func ValidateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case "env": - case "labels": - case "tag": - case addressKey: - case bufferLimitKey: - case retryWaitKey: - case maxRetriesKey: - case asyncConnectKey: - // Accepted - default: - return fmt.Errorf("unknown log opt '%s' for fluentd log driver", key) - } - } - - if _, _, err := parseAddress(cfg["fluentd-address"]); err != nil { - return err - } - - return nil -} - -func parseAddress(address string) (string, int, error) { - if address == "" { - return defaultHost, defaultPort, nil - } - - host, port, err := net.SplitHostPort(address) - if err != nil { - if !strings.Contains(err.Error(), "missing port in address") { - return "", 0, fmt.Errorf("invalid fluentd-address %s: %s", address, err) - } - return host, defaultPort, nil - } - - portnum, err := strconv.Atoi(port) - if err != nil { - return "", 0, fmt.Errorf("invalid fluentd-address %s: %s", address, err) - } - return host, portnum, nil -} diff --git a/daemon/logger/gcplogs/gcplogging.go b/daemon/logger/gcplogs/gcplogging.go deleted file mode 100644 index 781642bb50..0000000000 --- a/daemon/logger/gcplogs/gcplogging.go +++ /dev/null @@ -1,191 +0,0 @@ -package gcplogs - -import ( - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/docker/docker/daemon/logger" - - "github.com/Sirupsen/logrus" - "golang.org/x/net/context" - "google.golang.org/cloud/compute/metadata" - "google.golang.org/cloud/logging" -) - -const ( - name = "gcplogs" - - projectOptKey = "gcp-project" - logLabelsKey = "labels" - logEnvKey = "env" - logCmdKey = "gcp-log-cmd" -) - -var ( - // The number of logs the gcplogs driver has dropped. - droppedLogs uint64 - - onGCE bool - - // instance metadata populated from the metadata server if available - projectID string - zone string - instanceName string - instanceID string -) - -func init() { - - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - - if err := logger.RegisterLogOptValidator(name, ValidateLogOpts); err != nil { - logrus.Fatal(err) - } -} - -type gcplogs struct { - client *logging.Client - instance *instanceInfo - container *containerInfo -} - -type dockerLogEntry struct { - Instance *instanceInfo `json:"instance,omitempty"` - Container *containerInfo `json:"container,omitempty"` - Data string `json:"data,omitempty"` -} - -type instanceInfo struct { - Zone string `json:"zone,omitempty"` - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` -} - -type containerInfo struct { - Name string `json:"name,omitempty"` - ID string `json:"id,omitempty"` - ImageName string `json:"imageName,omitempty"` - ImageID string `json:"imageId,omitempty"` - Created time.Time `json:"created,omitempty"` - Command string `json:"command,omitempty"` - Metadata map[string]string `json:"metadata,omitempty"` -} - -var initGCPOnce sync.Once - -func initGCP() { - initGCPOnce.Do(func() { - onGCE = metadata.OnGCE() - if onGCE { - // These will fail on instances if the metadata service is - // down or the client is compiled with an API version that - // has been removed. Since these are not vital, let's ignore - // them and make their fields in the dockeLogEntry ,omitempty - projectID, _ = metadata.ProjectID() - zone, _ = metadata.Zone() - instanceName, _ = metadata.InstanceName() - instanceID, _ = metadata.InstanceID() - } - }) -} - -// New creates a new logger that logs to Google Cloud Logging using the application -// default credentials. -// -// See https://developers.google.com/identity/protocols/application-default-credentials -func New(ctx logger.Context) (logger.Logger, error) { - initGCP() - - var project string - if projectID != "" { - project = projectID - } - if projectID, found := ctx.Config[projectOptKey]; found { - project = projectID - } - if project == "" { - return nil, fmt.Errorf("No project was specified and couldn't read project from the meatadata server. Please specify a project") - } - - c, err := logging.NewClient(context.Background(), project, "gcplogs-docker-driver") - if err != nil { - return nil, err - } - - if err := c.Ping(); err != nil { - return nil, fmt.Errorf("unable to connect or authenticate with Google Cloud Logging: %v", err) - } - - l := &gcplogs{ - client: c, - container: &containerInfo{ - Name: ctx.ContainerName, - ID: ctx.ContainerID, - ImageName: ctx.ContainerImageName, - ImageID: ctx.ContainerImageID, - Created: ctx.ContainerCreated, - Metadata: ctx.ExtraAttributes(nil), - }, - } - - if ctx.Config[logCmdKey] == "true" { - l.container.Command = ctx.Command() - } - - if onGCE { - l.instance = &instanceInfo{ - Zone: zone, - Name: instanceName, - ID: instanceID, - } - } - - // The logger "overflows" at a rate of 10,000 logs per second and this - // overflow func is called. We want to surface the error to the user - // without overly spamming /var/log/docker.log so we log the first time - // we overflow and every 1000th time after. - c.Overflow = func(_ *logging.Client, _ logging.Entry) error { - if i := atomic.AddUint64(&droppedLogs, 1); i%1000 == 1 { - logrus.Errorf("gcplogs driver has dropped %v logs", i) - } - return nil - } - - return l, nil -} - -// ValidateLogOpts validates the opts passed to the gcplogs driver. Currently, the gcplogs -// driver doesn't take any arguments. -func ValidateLogOpts(cfg map[string]string) error { - for k := range cfg { - switch k { - case projectOptKey, logLabelsKey, logEnvKey, logCmdKey: - default: - return fmt.Errorf("%q is not a valid option for the gcplogs driver", k) - } - } - return nil -} - -func (l *gcplogs) Log(m *logger.Message) error { - return l.client.Log(logging.Entry{ - Time: m.Timestamp, - Payload: &dockerLogEntry{ - Instance: l.instance, - Container: l.container, - Data: string(m.Line), - }, - }) -} - -func (l *gcplogs) Close() error { - return l.client.Flush() -} - -func (l *gcplogs) Name() string { - return name -} diff --git a/daemon/logger/gelf/gelf.go b/daemon/logger/gelf/gelf.go deleted file mode 100644 index 8cc77ce151..0000000000 --- a/daemon/logger/gelf/gelf.go +++ /dev/null @@ -1,209 +0,0 @@ -// +build linux - -// Package gelf provides the log driver for forwarding server logs to -// endpoints that support the Graylog Extended Log Format. -package gelf - -import ( - "bytes" - "compress/flate" - "encoding/json" - "fmt" - "net" - "net/url" - "strconv" - "time" - - "github.com/Graylog2/go-gelf/gelf" - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/docker/pkg/urlutil" -) - -const name = "gelf" - -type gelfLogger struct { - writer *gelf.Writer - ctx logger.Context - hostname string - rawExtra json.RawMessage -} - -func init() { - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates a gelf logger using the configuration passed in on the -// context. The supported context configuration variable is gelf-address. -func New(ctx logger.Context) (logger.Logger, error) { - // parse gelf address - address, err := parseAddress(ctx.Config["gelf-address"]) - if err != nil { - return nil, err - } - - // collect extra data for GELF message - hostname, err := ctx.Hostname() - if err != nil { - return nil, fmt.Errorf("gelf: cannot access hostname to set source field") - } - - // remove trailing slash from container name - containerName := bytes.TrimLeft([]byte(ctx.ContainerName), "/") - - // parse log tag - tag, err := loggerutils.ParseLogTag(ctx, "") - if err != nil { - return nil, err - } - - extra := map[string]interface{}{ - "_container_id": ctx.ContainerID, - "_container_name": string(containerName), - "_image_id": ctx.ContainerImageID, - "_image_name": ctx.ContainerImageName, - "_command": ctx.Command(), - "_tag": tag, - "_created": ctx.ContainerCreated, - } - - extraAttrs := ctx.ExtraAttributes(func(key string) string { - if key[0] == '_' { - return key - } - return "_" + key - }) - for k, v := range extraAttrs { - extra[k] = v - } - - rawExtra, err := json.Marshal(extra) - if err != nil { - return nil, err - } - - // create new gelfWriter - gelfWriter, err := gelf.NewWriter(address) - if err != nil { - return nil, fmt.Errorf("gelf: cannot connect to GELF endpoint: %s %v", address, err) - } - - if v, ok := ctx.Config["gelf-compression-type"]; ok { - switch v { - case "gzip": - gelfWriter.CompressionType = gelf.CompressGzip - case "zlib": - gelfWriter.CompressionType = gelf.CompressZlib - case "none": - gelfWriter.CompressionType = gelf.CompressNone - default: - return nil, fmt.Errorf("gelf: invalid compression type %q", v) - } - } - - if v, ok := ctx.Config["gelf-compression-level"]; ok { - val, err := strconv.Atoi(v) - if err != nil { - return nil, fmt.Errorf("gelf: invalid compression level %s, err %v", v, err) - } - gelfWriter.CompressionLevel = val - } - - return &gelfLogger{ - writer: gelfWriter, - ctx: ctx, - hostname: hostname, - rawExtra: rawExtra, - }, nil -} - -func (s *gelfLogger) Log(msg *logger.Message) error { - level := gelf.LOG_INFO - if msg.Source == "stderr" { - level = gelf.LOG_ERR - } - - m := gelf.Message{ - Version: "1.1", - Host: s.hostname, - Short: string(msg.Line), - TimeUnix: float64(msg.Timestamp.UnixNano()/int64(time.Millisecond)) / 1000.0, - Level: level, - RawExtra: s.rawExtra, - } - - if err := s.writer.WriteMessage(&m); err != nil { - return fmt.Errorf("gelf: cannot send GELF message: %v", err) - } - return nil -} - -func (s *gelfLogger) Close() error { - return s.writer.Close() -} - -func (s *gelfLogger) Name() string { - return name -} - -// ValidateLogOpt looks for gelf specific log option gelf-address. -func ValidateLogOpt(cfg map[string]string) error { - for key, val := range cfg { - switch key { - case "gelf-address": - case "tag": - case "labels": - case "env": - case "gelf-compression-level": - i, err := strconv.Atoi(val) - if err != nil || i < flate.DefaultCompression || i > flate.BestCompression { - return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) - } - case "gelf-compression-type": - switch val { - case "gzip", "zlib", "none": - default: - return fmt.Errorf("unknown value %q for log opt %q for gelf log driver", val, key) - } - default: - return fmt.Errorf("unknown log opt %q for gelf log driver", key) - } - } - - if _, err := parseAddress(cfg["gelf-address"]); err != nil { - return err - } - - return nil -} - -func parseAddress(address string) (string, error) { - if address == "" { - return "", nil - } - if !urlutil.IsTransportURL(address) { - return "", fmt.Errorf("gelf-address should be in form proto://address, got %v", address) - } - url, err := url.Parse(address) - if err != nil { - return "", err - } - - // we support only udp - if url.Scheme != "udp" { - return "", fmt.Errorf("gelf: endpoint needs to be UDP") - } - - // get host and port - if _, _, err = net.SplitHostPort(url.Host); err != nil { - return "", fmt.Errorf("gelf: please provide gelf-address as udp://host:port") - } - - return url.Host, nil -} diff --git a/daemon/logger/gelf/gelf_unsupported.go b/daemon/logger/gelf/gelf_unsupported.go deleted file mode 100644 index 266f73b18b..0000000000 --- a/daemon/logger/gelf/gelf_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package gelf diff --git a/daemon/logger/journald/journald.go b/daemon/logger/journald/journald.go deleted file mode 100644 index 748dd8b24a..0000000000 --- a/daemon/logger/journald/journald.go +++ /dev/null @@ -1,95 +0,0 @@ -// +build linux - -// Package journald provides the log driver for forwarding server logs -// to endpoints that receive the systemd format. -package journald - -import ( - "fmt" - "strings" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/coreos/go-systemd/journal" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" -) - -const name = "journald" - -type journald struct { - vars map[string]string // additional variables and values to send to the journal along with the log message - readers readerList -} - -type readerList struct { - mu sync.Mutex - readers map[*logger.LogWatcher]*logger.LogWatcher -} - -func init() { - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(name, validateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates a journald logger using the configuration passed in on -// the context. -func New(ctx logger.Context) (logger.Logger, error) { - if !journal.Enabled() { - return nil, fmt.Errorf("journald is not enabled on this host") - } - // Strip a leading slash so that people can search for - // CONTAINER_NAME=foo rather than CONTAINER_NAME=/foo. - name := ctx.ContainerName - if name[0] == '/' { - name = name[1:] - } - - // parse log tag - tag, err := loggerutils.ParseLogTag(ctx, "") - if err != nil { - return nil, err - } - - vars := map[string]string{ - "CONTAINER_ID": ctx.ContainerID[:12], - "CONTAINER_ID_FULL": ctx.ContainerID, - "CONTAINER_NAME": name, - "CONTAINER_TAG": tag, - } - extraAttrs := ctx.ExtraAttributes(strings.ToTitle) - for k, v := range extraAttrs { - vars[k] = v - } - return &journald{vars: vars, readers: readerList{readers: make(map[*logger.LogWatcher]*logger.LogWatcher)}}, nil -} - -// We don't actually accept any options, but we have to supply a callback for -// the factory to pass the (probably empty) configuration map to. -func validateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case "labels": - case "env": - case "tag": - default: - return fmt.Errorf("unknown log opt '%s' for journald log driver", key) - } - } - return nil -} - -func (s *journald) Log(msg *logger.Message) error { - if msg.Source == "stderr" { - return journal.Send(string(msg.Line), journal.PriErr, s.vars) - } - return journal.Send(string(msg.Line), journal.PriInfo, s.vars) -} - -func (s *journald) Name() string { - return name -} diff --git a/daemon/logger/journald/journald_unsupported.go b/daemon/logger/journald/journald_unsupported.go deleted file mode 100644 index d52ca92e4f..0000000000 --- a/daemon/logger/journald/journald_unsupported.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !linux - -package journald - -type journald struct { -} diff --git a/daemon/logger/journald/read.go b/daemon/logger/journald/read.go deleted file mode 100644 index bc009f61cf..0000000000 --- a/daemon/logger/journald/read.go +++ /dev/null @@ -1,392 +0,0 @@ -// +build linux,cgo,!static_build,journald - -package journald - -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// #include -// -//static int get_message(sd_journal *j, const char **msg, size_t *length) -//{ -// int rc; -// *msg = NULL; -// *length = 0; -// rc = sd_journal_get_data(j, "MESSAGE", (const void **) msg, length); -// if (rc == 0) { -// if (*length > 8) { -// (*msg) += 8; -// *length -= 8; -// } else { -// *msg = NULL; -// *length = 0; -// rc = -ENOENT; -// } -// } -// return rc; -//} -//static int get_priority(sd_journal *j, int *priority) -//{ -// const void *data; -// size_t i, length; -// int rc; -// *priority = -1; -// rc = sd_journal_get_data(j, "PRIORITY", &data, &length); -// if (rc == 0) { -// if ((length > 9) && (strncmp(data, "PRIORITY=", 9) == 0)) { -// *priority = 0; -// for (i = 9; i < length; i++) { -// *priority = *priority * 10 + ((const char *)data)[i] - '0'; -// } -// if (length > 9) { -// rc = 0; -// } -// } -// } -// return rc; -//} -//static int is_attribute_field(const char *msg, size_t length) -//{ -// const struct known_field { -// const char *name; -// size_t length; -// } fields[] = { -// {"MESSAGE", sizeof("MESSAGE") - 1}, -// {"MESSAGE_ID", sizeof("MESSAGE_ID") - 1}, -// {"PRIORITY", sizeof("PRIORITY") - 1}, -// {"CODE_FILE", sizeof("CODE_FILE") - 1}, -// {"CODE_LINE", sizeof("CODE_LINE") - 1}, -// {"CODE_FUNC", sizeof("CODE_FUNC") - 1}, -// {"ERRNO", sizeof("ERRNO") - 1}, -// {"SYSLOG_FACILITY", sizeof("SYSLOG_FACILITY") - 1}, -// {"SYSLOG_IDENTIFIER", sizeof("SYSLOG_IDENTIFIER") - 1}, -// {"SYSLOG_PID", sizeof("SYSLOG_PID") - 1}, -// {"CONTAINER_NAME", sizeof("CONTAINER_NAME") - 1}, -// {"CONTAINER_ID", sizeof("CONTAINER_ID") - 1}, -// {"CONTAINER_ID_FULL", sizeof("CONTAINER_ID_FULL") - 1}, -// {"CONTAINER_TAG", sizeof("CONTAINER_TAG") - 1}, -// }; -// unsigned int i; -// void *p; -// if ((length < 1) || (msg[0] == '_') || ((p = memchr(msg, '=', length)) == NULL)) { -// return -1; -// } -// length = ((const char *) p) - msg; -// for (i = 0; i < sizeof(fields) / sizeof(fields[0]); i++) { -// if ((fields[i].length == length) && (memcmp(fields[i].name, msg, length) == 0)) { -// return -1; -// } -// } -// return 0; -//} -//static int get_attribute_field(sd_journal *j, const char **msg, size_t *length) -//{ -// int rc; -// *msg = NULL; -// *length = 0; -// while ((rc = sd_journal_enumerate_data(j, (const void **) msg, length)) > 0) { -// if (is_attribute_field(*msg, *length) == 0) { -// break; -// } -// rc = -ENOENT; -// } -// return rc; -//} -//static int wait_for_data_or_close(sd_journal *j, int pipefd) -//{ -// struct pollfd fds[2]; -// uint64_t when = 0; -// int timeout, jevents, i; -// struct timespec ts; -// uint64_t now; -// do { -// memset(&fds, 0, sizeof(fds)); -// fds[0].fd = pipefd; -// fds[0].events = POLLHUP; -// fds[1].fd = sd_journal_get_fd(j); -// if (fds[1].fd < 0) { -// return fds[1].fd; -// } -// jevents = sd_journal_get_events(j); -// if (jevents < 0) { -// return jevents; -// } -// fds[1].events = jevents; -// sd_journal_get_timeout(j, &when); -// if (when == -1) { -// timeout = -1; -// } else { -// clock_gettime(CLOCK_MONOTONIC, &ts); -// now = (uint64_t) ts.tv_sec * 1000000 + ts.tv_nsec / 1000; -// timeout = when > now ? (int) ((when - now + 999) / 1000) : 0; -// } -// i = poll(fds, 2, timeout); -// if ((i == -1) && (errno != EINTR)) { -// /* An unexpected error. */ -// return (errno != 0) ? -errno : -EINTR; -// } -// if (fds[0].revents & POLLHUP) { -// /* The close notification pipe was closed. */ -// return 0; -// } -// if (sd_journal_process(j) == SD_JOURNAL_APPEND) { -// /* Data, which we might care about, was appended. */ -// return 1; -// } -// } while ((fds[0].revents & POLLHUP) == 0); -// return 0; -//} -import "C" - -import ( - "fmt" - "strings" - "time" - "unsafe" - - "github.com/Sirupsen/logrus" - "github.com/coreos/go-systemd/journal" - "github.com/docker/docker/daemon/logger" -) - -func (s *journald) Close() error { - s.readers.mu.Lock() - for reader := range s.readers.readers { - reader.Close() - } - s.readers.mu.Unlock() - return nil -} - -func (s *journald) drainJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, oldCursor string) string { - var msg, data, cursor *C.char - var length C.size_t - var stamp C.uint64_t - var priority C.int - - // Walk the journal from here forward until we run out of new entries. -drain: - for { - // Try not to send a given entry twice. - if oldCursor != "" { - ccursor := C.CString(oldCursor) - defer C.free(unsafe.Pointer(ccursor)) - for C.sd_journal_test_cursor(j, ccursor) > 0 { - if C.sd_journal_next(j) <= 0 { - break drain - } - } - } - // Read and send the logged message, if there is one to read. - i := C.get_message(j, &msg, &length) - if i != -C.ENOENT && i != -C.EADDRNOTAVAIL { - // Read the entry's timestamp. - if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { - break - } - // Set up the time and text of the entry. - timestamp := time.Unix(int64(stamp)/1000000, (int64(stamp)%1000000)*1000) - line := append(C.GoBytes(unsafe.Pointer(msg), C.int(length)), "\n"...) - // Recover the stream name by mapping - // from the journal priority back to - // the stream that we would have - // assigned that value. - source := "" - if C.get_priority(j, &priority) != 0 { - source = "" - } else if priority == C.int(journal.PriErr) { - source = "stderr" - } else if priority == C.int(journal.PriInfo) { - source = "stdout" - } - // Retrieve the values of any variables we're adding to the journal. - attrs := make(map[string]string) - C.sd_journal_restart_data(j) - for C.get_attribute_field(j, &data, &length) > C.int(0) { - kv := strings.SplitN(C.GoStringN(data, C.int(length)), "=", 2) - attrs[kv[0]] = kv[1] - } - if len(attrs) == 0 { - attrs = nil - } - // Send the log message. - logWatcher.Msg <- &logger.Message{ - Line: line, - Source: source, - Timestamp: timestamp.In(time.UTC), - Attrs: attrs, - } - } - // If we're at the end of the journal, we're done (for now). - if C.sd_journal_next(j) <= 0 { - break - } - } - retCursor := "" - if C.sd_journal_get_cursor(j, &cursor) == 0 { - retCursor = C.GoString(cursor) - C.free(unsafe.Pointer(cursor)) - } - return retCursor -} - -func (s *journald) followJournal(logWatcher *logger.LogWatcher, config logger.ReadConfig, j *C.sd_journal, pfd [2]C.int, cursor string) { - s.readers.mu.Lock() - s.readers.readers[logWatcher] = logWatcher - s.readers.mu.Unlock() - go func() { - // Keep copying journal data out until we're notified to stop - // or we hit an error. - status := C.wait_for_data_or_close(j, pfd[0]) - for status == 1 { - cursor = s.drainJournal(logWatcher, config, j, cursor) - status = C.wait_for_data_or_close(j, pfd[0]) - } - if status < 0 { - cerrstr := C.strerror(C.int(-status)) - errstr := C.GoString(cerrstr) - fmtstr := "error %q while attempting to follow journal for container %q" - logrus.Errorf(fmtstr, errstr, s.vars["CONTAINER_ID_FULL"]) - } - // Clean up. - C.close(pfd[0]) - s.readers.mu.Lock() - delete(s.readers.readers, logWatcher) - s.readers.mu.Unlock() - C.sd_journal_close(j) - close(logWatcher.Msg) - }() - // Wait until we're told to stop. - select { - case <-logWatcher.WatchClose(): - // Notify the other goroutine that its work is done. - C.close(pfd[1]) - } -} - -func (s *journald) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { - var j *C.sd_journal - var cmatch *C.char - var stamp C.uint64_t - var sinceUnixMicro uint64 - var pipes [2]C.int - cursor := "" - - // Get a handle to the journal. - rc := C.sd_journal_open(&j, C.int(0)) - if rc != 0 { - logWatcher.Err <- fmt.Errorf("error opening journal") - close(logWatcher.Msg) - return - } - // If we end up following the log, we can set the journal context - // pointer and the channel pointer to nil so that we won't close them - // here, potentially while the goroutine that uses them is still - // running. Otherwise, close them when we return from this function. - following := false - defer func(pfollowing *bool) { - if !*pfollowing { - C.sd_journal_close(j) - close(logWatcher.Msg) - } - }(&following) - // Remove limits on the size of data items that we'll retrieve. - rc = C.sd_journal_set_data_threshold(j, C.size_t(0)) - if rc != 0 { - logWatcher.Err <- fmt.Errorf("error setting journal data threshold") - return - } - // Add a match to have the library do the searching for us. - cmatch = C.CString("CONTAINER_ID_FULL=" + s.vars["CONTAINER_ID_FULL"]) - defer C.free(unsafe.Pointer(cmatch)) - rc = C.sd_journal_add_match(j, unsafe.Pointer(cmatch), C.strlen(cmatch)) - if rc != 0 { - logWatcher.Err <- fmt.Errorf("error setting journal match") - return - } - // If we have a cutoff time, convert it to Unix time once. - if !config.Since.IsZero() { - nano := config.Since.UnixNano() - sinceUnixMicro = uint64(nano / 1000) - } - if config.Tail > 0 { - lines := config.Tail - // Start at the end of the journal. - if C.sd_journal_seek_tail(j) < 0 { - logWatcher.Err <- fmt.Errorf("error seeking to end of journal") - return - } - if C.sd_journal_previous(j) < 0 { - logWatcher.Err <- fmt.Errorf("error backtracking to previous journal entry") - return - } - // Walk backward. - for lines > 0 { - // Stop if the entry time is before our cutoff. - // We'll need the entry time if it isn't, so go - // ahead and parse it now. - if C.sd_journal_get_realtime_usec(j, &stamp) != 0 { - break - } else { - // Compare the timestamp on the entry - // to our threshold value. - if sinceUnixMicro != 0 && sinceUnixMicro > uint64(stamp) { - break - } - } - lines-- - // If we're at the start of the journal, or - // don't need to back up past any more entries, - // stop. - if lines == 0 || C.sd_journal_previous(j) <= 0 { - break - } - } - } else { - // Start at the beginning of the journal. - if C.sd_journal_seek_head(j) < 0 { - logWatcher.Err <- fmt.Errorf("error seeking to start of journal") - return - } - // If we have a cutoff date, fast-forward to it. - if sinceUnixMicro != 0 && C.sd_journal_seek_realtime_usec(j, C.uint64_t(sinceUnixMicro)) != 0 { - logWatcher.Err <- fmt.Errorf("error seeking to start time in journal") - return - } - if C.sd_journal_next(j) < 0 { - logWatcher.Err <- fmt.Errorf("error skipping to next journal entry") - return - } - } - cursor = s.drainJournal(logWatcher, config, j, "") - if config.Follow { - // Allocate a descriptor for following the journal, if we'll - // need one. Do it here so that we can report if it fails. - if fd := C.sd_journal_get_fd(j); fd < C.int(0) { - logWatcher.Err <- fmt.Errorf("error opening journald follow descriptor: %q", C.GoString(C.strerror(-fd))) - } else { - // Create a pipe that we can poll at the same time as - // the journald descriptor. - if C.pipe(&pipes[0]) == C.int(-1) { - logWatcher.Err <- fmt.Errorf("error opening journald close notification pipe") - } else { - s.followJournal(logWatcher, config, j, pipes, cursor) - // Let followJournal handle freeing the journal context - // object and closing the channel. - following = true - } - } - } - return -} - -func (s *journald) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { - logWatcher := logger.NewLogWatcher() - go s.readLogs(logWatcher, config) - return logWatcher -} diff --git a/daemon/logger/journald/read_native.go b/daemon/logger/journald/read_native.go deleted file mode 100644 index bba6de55be..0000000000 --- a/daemon/logger/journald/read_native.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build linux,cgo,!static_build,journald,!journald_compat - -package journald - -// #cgo pkg-config: libsystemd -import "C" diff --git a/daemon/logger/journald/read_native_compat.go b/daemon/logger/journald/read_native_compat.go deleted file mode 100644 index 3f7a43c59e..0000000000 --- a/daemon/logger/journald/read_native_compat.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build linux,cgo,!static_build,journald,journald_compat - -package journald - -// #cgo pkg-config: libsystemd-journal -import "C" diff --git a/daemon/logger/journald/read_unsupported.go b/daemon/logger/journald/read_unsupported.go deleted file mode 100644 index b43abdcaf7..0000000000 --- a/daemon/logger/journald/read_unsupported.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux !cgo static_build !journald - -package journald - -func (s *journald) Close() error { - return nil -} diff --git a/daemon/logger/jsonfilelog/jsonfilelog.go b/daemon/logger/jsonfilelog/jsonfilelog.go deleted file mode 100644 index 9faa4e02db..0000000000 --- a/daemon/logger/jsonfilelog/jsonfilelog.go +++ /dev/null @@ -1,147 +0,0 @@ -// Package jsonfilelog provides the default Logger implementation for -// Docker logging. This logger logs to files on the host server in the -// JSON format. -package jsonfilelog - -import ( - "bytes" - "encoding/json" - "fmt" - "strconv" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/go-units" -) - -// Name is the name of the file that the jsonlogger logs to. -const Name = "json-file" - -// JSONFileLogger is Logger implementation for default Docker logging. -type JSONFileLogger struct { - buf *bytes.Buffer - writer *loggerutils.RotateFileWriter - mu sync.Mutex - readers map[*logger.LogWatcher]struct{} // stores the active log followers - extra []byte // json-encoded extra attributes -} - -func init() { - if err := logger.RegisterLogDriver(Name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(Name, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates new JSONFileLogger which writes to filename passed in -// on given context. -func New(ctx logger.Context) (logger.Logger, error) { - var capval int64 = -1 - if capacity, ok := ctx.Config["max-size"]; ok { - var err error - capval, err = units.FromHumanSize(capacity) - if err != nil { - return nil, err - } - } - var maxFiles = 1 - if maxFileString, ok := ctx.Config["max-file"]; ok { - var err error - maxFiles, err = strconv.Atoi(maxFileString) - if err != nil { - return nil, err - } - if maxFiles < 1 { - return nil, fmt.Errorf("max-file cannot be less than 1") - } - } - - writer, err := loggerutils.NewRotateFileWriter(ctx.LogPath, capval, maxFiles) - if err != nil { - return nil, err - } - - var extra []byte - if attrs := ctx.ExtraAttributes(nil); len(attrs) > 0 { - var err error - extra, err = json.Marshal(attrs) - if err != nil { - return nil, err - } - } - - return &JSONFileLogger{ - buf: bytes.NewBuffer(nil), - writer: writer, - readers: make(map[*logger.LogWatcher]struct{}), - extra: extra, - }, nil -} - -// Log converts logger.Message to jsonlog.JSONLog and serializes it to file. -func (l *JSONFileLogger) Log(msg *logger.Message) error { - timestamp, err := jsonlog.FastTimeMarshalJSON(msg.Timestamp) - if err != nil { - return err - } - l.mu.Lock() - err = (&jsonlog.JSONLogs{ - Log: append(msg.Line, '\n'), - Stream: msg.Source, - Created: timestamp, - RawAttrs: l.extra, - }).MarshalJSONBuf(l.buf) - if err != nil { - l.mu.Unlock() - return err - } - - l.buf.WriteByte('\n') - _, err = l.writer.Write(l.buf.Bytes()) - l.buf.Reset() - l.mu.Unlock() - - return err -} - -// ValidateLogOpt looks for json specific log options max-file & max-size. -func ValidateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case "max-file": - case "max-size": - case "labels": - case "env": - default: - return fmt.Errorf("unknown log opt '%s' for json-file log driver", key) - } - } - return nil -} - -// LogPath returns the location the given json logger logs to. -func (l *JSONFileLogger) LogPath() string { - return l.writer.LogPath() -} - -// Close closes underlying file and signals all readers to stop. -func (l *JSONFileLogger) Close() error { - l.mu.Lock() - err := l.writer.Close() - for r := range l.readers { - r.Close() - delete(l.readers, r) - } - l.mu.Unlock() - return err -} - -// Name returns name of this logger. -func (l *JSONFileLogger) Name() string { - return Name -} diff --git a/daemon/logger/jsonfilelog/jsonfilelog_test.go b/daemon/logger/jsonfilelog/jsonfilelog_test.go deleted file mode 100644 index b5b818a8ba..0000000000 --- a/daemon/logger/jsonfilelog/jsonfilelog_test.go +++ /dev/null @@ -1,248 +0,0 @@ -package jsonfilelog - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "strconv" - "testing" - "time" - - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/pkg/jsonlog" -) - -func TestJSONFileLogger(t *testing.T) { - cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" - tmp, err := ioutil.TempDir("", "docker-logger-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - filename := filepath.Join(tmp, "container.log") - l, err := New(logger.Context{ - ContainerID: cid, - LogPath: filename, - }) - if err != nil { - t.Fatal(err) - } - defer l.Close() - - if err := l.Log(&logger.Message{Line: []byte("line1"), Source: "src1"}); err != nil { - t.Fatal(err) - } - if err := l.Log(&logger.Message{Line: []byte("line2"), Source: "src2"}); err != nil { - t.Fatal(err) - } - if err := l.Log(&logger.Message{Line: []byte("line3"), Source: "src3"}); err != nil { - t.Fatal(err) - } - res, err := ioutil.ReadFile(filename) - if err != nil { - t.Fatal(err) - } - expected := `{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line2\n","stream":"src2","time":"0001-01-01T00:00:00Z"} -{"log":"line3\n","stream":"src3","time":"0001-01-01T00:00:00Z"} -` - - if string(res) != expected { - t.Fatalf("Wrong log content: %q, expected %q", res, expected) - } -} - -func BenchmarkJSONFileLogger(b *testing.B) { - cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" - tmp, err := ioutil.TempDir("", "docker-logger-") - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(tmp) - filename := filepath.Join(tmp, "container.log") - l, err := New(logger.Context{ - ContainerID: cid, - LogPath: filename, - }) - if err != nil { - b.Fatal(err) - } - defer l.Close() - - testLine := "Line that thinks that it is log line from docker\n" - msg := &logger.Message{Line: []byte(testLine), Source: "stderr", Timestamp: time.Now().UTC()} - jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(len(jsonlog)+1) * 30) - b.ResetTimer() - for i := 0; i < b.N; i++ { - for j := 0; j < 30; j++ { - if err := l.Log(msg); err != nil { - b.Fatal(err) - } - } - } -} - -func TestJSONFileLoggerWithOpts(t *testing.T) { - cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" - tmp, err := ioutil.TempDir("", "docker-logger-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - filename := filepath.Join(tmp, "container.log") - config := map[string]string{"max-file": "2", "max-size": "1k"} - l, err := New(logger.Context{ - ContainerID: cid, - LogPath: filename, - Config: config, - }) - if err != nil { - t.Fatal(err) - } - defer l.Close() - for i := 0; i < 20; i++ { - if err := l.Log(&logger.Message{Line: []byte("line" + strconv.Itoa(i)), Source: "src1"}); err != nil { - t.Fatal(err) - } - } - res, err := ioutil.ReadFile(filename) - if err != nil { - t.Fatal(err) - } - penUlt, err := ioutil.ReadFile(filename + ".1") - if err != nil { - t.Fatal(err) - } - - expectedPenultimate := `{"log":"line0\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line1\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line2\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line3\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line4\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line5\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line6\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line7\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line8\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line9\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line10\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line11\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line12\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line13\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line14\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line15\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -` - expected := `{"log":"line16\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line17\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line18\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -{"log":"line19\n","stream":"src1","time":"0001-01-01T00:00:00Z"} -` - - if string(res) != expected { - t.Fatalf("Wrong log content: %q, expected %q", res, expected) - } - if string(penUlt) != expectedPenultimate { - t.Fatalf("Wrong log content: %q, expected %q", penUlt, expectedPenultimate) - } - -} - -func TestJSONFileLoggerWithLabelsEnv(t *testing.T) { - cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" - tmp, err := ioutil.TempDir("", "docker-logger-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - filename := filepath.Join(tmp, "container.log") - config := map[string]string{"labels": "rack,dc", "env": "environ,debug,ssl"} - l, err := New(logger.Context{ - ContainerID: cid, - LogPath: filename, - Config: config, - ContainerLabels: map[string]string{"rack": "101", "dc": "lhr"}, - ContainerEnv: []string{"environ=production", "debug=false", "port=10001", "ssl=true"}, - }) - if err != nil { - t.Fatal(err) - } - defer l.Close() - if err := l.Log(&logger.Message{Line: []byte("line"), Source: "src1"}); err != nil { - t.Fatal(err) - } - res, err := ioutil.ReadFile(filename) - if err != nil { - t.Fatal(err) - } - - var jsonLog jsonlog.JSONLogs - if err := json.Unmarshal(res, &jsonLog); err != nil { - t.Fatal(err) - } - extra := make(map[string]string) - if err := json.Unmarshal(jsonLog.RawAttrs, &extra); err != nil { - t.Fatal(err) - } - expected := map[string]string{ - "rack": "101", - "dc": "lhr", - "environ": "production", - "debug": "false", - "ssl": "true", - } - if !reflect.DeepEqual(extra, expected) { - t.Fatalf("Wrong log attrs: %q, expected %q", extra, expected) - } -} - -func BenchmarkJSONFileLoggerWithReader(b *testing.B) { - b.StopTimer() - b.ResetTimer() - cid := "a7317399f3f857173c6179d44823594f8294678dea9999662e5c625b5a1c7657" - dir, err := ioutil.TempDir("", "json-logger-bench") - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(dir) - - l, err := New(logger.Context{ - ContainerID: cid, - LogPath: filepath.Join(dir, "container.log"), - }) - if err != nil { - b.Fatal(err) - } - defer l.Close() - msg := &logger.Message{Line: []byte("line"), Source: "src1"} - jsonlog, err := (&jsonlog.JSONLog{Log: string(msg.Line) + "\n", Stream: msg.Source, Created: msg.Timestamp}).MarshalJSON() - if err != nil { - b.Fatal(err) - } - b.SetBytes(int64(len(jsonlog)+1) * 30) - - b.StartTimer() - - go func() { - for i := 0; i < b.N; i++ { - for j := 0; j < 30; j++ { - l.Log(msg) - } - } - l.Close() - }() - - lw := l.(logger.LogReader).ReadLogs(logger.ReadConfig{Follow: true}) - watchClose := lw.WatchClose() - for { - select { - case <-lw.Msg: - case <-watchClose: - return - } - } -} diff --git a/daemon/logger/jsonfilelog/read.go b/daemon/logger/jsonfilelog/read.go deleted file mode 100644 index bea83ddf14..0000000000 --- a/daemon/logger/jsonfilelog/read.go +++ /dev/null @@ -1,239 +0,0 @@ -package jsonfilelog - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "os" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/pkg/filenotify" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/tailfile" -) - -const maxJSONDecodeRetry = 20000 - -func decodeLogLine(dec *json.Decoder, l *jsonlog.JSONLog) (*logger.Message, error) { - l.Reset() - if err := dec.Decode(l); err != nil { - return nil, err - } - msg := &logger.Message{ - Source: l.Stream, - Timestamp: l.Created, - Line: []byte(l.Log), - Attrs: l.Attrs, - } - return msg, nil -} - -// ReadLogs implements the logger's LogReader interface for the logs -// created by this driver. -func (l *JSONFileLogger) ReadLogs(config logger.ReadConfig) *logger.LogWatcher { - logWatcher := logger.NewLogWatcher() - - go l.readLogs(logWatcher, config) - return logWatcher -} - -func (l *JSONFileLogger) readLogs(logWatcher *logger.LogWatcher, config logger.ReadConfig) { - defer close(logWatcher.Msg) - - pth := l.writer.LogPath() - var files []io.ReadSeeker - for i := l.writer.MaxFiles(); i > 1; i-- { - f, err := os.Open(fmt.Sprintf("%s.%d", pth, i-1)) - if err != nil { - if !os.IsNotExist(err) { - logWatcher.Err <- err - break - } - continue - } - files = append(files, f) - } - - latestFile, err := os.Open(pth) - if err != nil { - logWatcher.Err <- err - return - } - - if config.Tail != 0 { - tailer := ioutils.MultiReadSeeker(append(files, latestFile)...) - tailFile(tailer, logWatcher, config.Tail, config.Since) - } - - // close all the rotated files - for _, f := range files { - if err := f.(io.Closer).Close(); err != nil { - logrus.WithField("logger", "json-file").Warnf("error closing tailed log file: %v", err) - } - } - - if !config.Follow { - if err := latestFile.Close(); err != nil { - logrus.Errorf("Error closing file: %v", err) - } - return - } - - if config.Tail >= 0 { - latestFile.Seek(0, os.SEEK_END) - } - - l.mu.Lock() - l.readers[logWatcher] = struct{}{} - l.mu.Unlock() - - notifyRotate := l.writer.NotifyRotate() - followLogs(latestFile, logWatcher, notifyRotate, config.Since) - - l.mu.Lock() - delete(l.readers, logWatcher) - l.mu.Unlock() - - l.writer.NotifyRotateEvict(notifyRotate) -} - -func tailFile(f io.ReadSeeker, logWatcher *logger.LogWatcher, tail int, since time.Time) { - var rdr io.Reader = f - if tail > 0 { - ls, err := tailfile.TailFile(f, tail) - if err != nil { - logWatcher.Err <- err - return - } - rdr = bytes.NewBuffer(bytes.Join(ls, []byte("\n"))) - } - dec := json.NewDecoder(rdr) - l := &jsonlog.JSONLog{} - for { - msg, err := decodeLogLine(dec, l) - if err != nil { - if err != io.EOF { - logWatcher.Err <- err - } - return - } - if !since.IsZero() && msg.Timestamp.Before(since) { - continue - } - logWatcher.Msg <- msg - } -} - -func followLogs(f *os.File, logWatcher *logger.LogWatcher, notifyRotate chan interface{}, since time.Time) { - dec := json.NewDecoder(f) - l := &jsonlog.JSONLog{} - - fileWatcher, err := filenotify.New() - if err != nil { - logWatcher.Err <- err - } - defer func() { - f.Close() - fileWatcher.Close() - }() - name := f.Name() - - if err := fileWatcher.Add(name); err != nil { - logrus.WithField("logger", "json-file").Warnf("falling back to file poller due to error: %v", err) - fileWatcher.Close() - fileWatcher = filenotify.NewPollingWatcher() - - if err := fileWatcher.Add(name); err != nil { - logrus.Debugf("error watching log file for modifications: %v", err) - logWatcher.Err <- err - return - } - } - - var retries int - for { - msg, err := decodeLogLine(dec, l) - if err != nil { - if err != io.EOF { - // try again because this shouldn't happen - if _, ok := err.(*json.SyntaxError); ok && retries <= maxJSONDecodeRetry { - dec = json.NewDecoder(f) - retries++ - continue - } - - // io.ErrUnexpectedEOF is returned from json.Decoder when there is - // remaining data in the parser's buffer while an io.EOF occurs. - // If the json logger writes a partial json log entry to the disk - // while at the same time the decoder tries to decode it, the race condition happens. - if err == io.ErrUnexpectedEOF && retries <= maxJSONDecodeRetry { - reader := io.MultiReader(dec.Buffered(), f) - dec = json.NewDecoder(reader) - retries++ - continue - } - - return - } - - select { - case <-fileWatcher.Events(): - dec = json.NewDecoder(f) - continue - case <-fileWatcher.Errors(): - logWatcher.Err <- err - return - case <-logWatcher.WatchClose(): - fileWatcher.Remove(name) - return - case <-notifyRotate: - f.Close() - fileWatcher.Remove(name) - - // retry when the file doesn't exist - for retries := 0; retries <= 5; retries++ { - f, err = os.Open(name) - if err == nil || !os.IsNotExist(err) { - break - } - } - - if err = fileWatcher.Add(name); err != nil { - logWatcher.Err <- err - return - } - if err != nil { - logWatcher.Err <- err - return - } - - dec = json.NewDecoder(f) - continue - } - } - - retries = 0 // reset retries since we've succeeded - if !since.IsZero() && msg.Timestamp.Before(since) { - continue - } - select { - case logWatcher.Msg <- msg: - case <-logWatcher.WatchClose(): - logWatcher.Msg <- msg - for { - msg, err := decodeLogLine(dec, l) - if err != nil { - return - } - if !since.IsZero() && msg.Timestamp.Before(since) { - continue - } - logWatcher.Msg <- msg - } - } - } -} diff --git a/daemon/logger/logger.go b/daemon/logger/logger.go deleted file mode 100644 index 77c6c90ce0..0000000000 --- a/daemon/logger/logger.go +++ /dev/null @@ -1,113 +0,0 @@ -// Package logger defines interfaces that logger drivers implement to -// log messages. -// -// The other half of a logger driver is the implementation of the -// factory, which holds the contextual instance information that -// allows multiple loggers of the same type to perform different -// actions, such as logging to different locations. -package logger - -import ( - "errors" - "sort" - "strings" - "sync" - "time" - - "github.com/docker/docker/pkg/jsonlog" -) - -// ErrReadLogsNotSupported is returned when the logger does not support reading logs. -var ErrReadLogsNotSupported = errors.New("configured logging reader does not support reading") - -const ( - // TimeFormat is the time format used for timestamps sent to log readers. - TimeFormat = jsonlog.RFC3339NanoFixed - logWatcherBufferSize = 4096 -) - -// Message is datastructure that represents record from some container. -type Message struct { - Line []byte - Source string - Timestamp time.Time - Attrs LogAttributes -} - -// LogAttributes is used to hold the extra attributes available in the log message -// Primarily used for converting the map type to string and sorting. -type LogAttributes map[string]string -type byKey []string - -func (s byKey) Len() int { return len(s) } -func (s byKey) Less(i, j int) bool { - keyI := strings.Split(s[i], "=") - keyJ := strings.Split(s[j], "=") - return keyI[0] < keyJ[0] -} -func (s byKey) Swap(i, j int) { - s[i], s[j] = s[j], s[i] -} - -func (a LogAttributes) String() string { - var ss byKey - for k, v := range a { - ss = append(ss, k+"="+v) - } - sort.Sort(ss) - return strings.Join(ss, ",") -} - -// Logger is the interface for docker logging drivers. -type Logger interface { - Log(*Message) error - Name() string - Close() error -} - -// ReadConfig is the configuration passed into ReadLogs. -type ReadConfig struct { - Since time.Time - Tail int - Follow bool -} - -// LogReader is the interface for reading log messages for loggers that support reading. -type LogReader interface { - // Read logs from underlying logging backend - ReadLogs(ReadConfig) *LogWatcher -} - -// LogWatcher is used when consuming logs read from the LogReader interface. -type LogWatcher struct { - // For sending log messages to a reader. - Msg chan *Message - // For sending error messages that occur while while reading logs. - Err chan error - closeOnce sync.Once - closeNotifier chan struct{} -} - -// NewLogWatcher returns a new LogWatcher. -func NewLogWatcher() *LogWatcher { - return &LogWatcher{ - Msg: make(chan *Message, logWatcherBufferSize), - Err: make(chan error, 1), - closeNotifier: make(chan struct{}), - } -} - -// Close notifies the underlying log reader to stop. -func (w *LogWatcher) Close() { - // only close if not already closed - w.closeOnce.Do(func() { - close(w.closeNotifier) - }) -} - -// WatchClose returns a channel receiver that receives notification -// when the watcher has been closed. This should only be called from -// one goroutine. -func (w *LogWatcher) WatchClose() <-chan struct{} { - return w.closeNotifier -} diff --git a/daemon/logger/loggerutils/log_tag.go b/daemon/logger/loggerutils/log_tag.go deleted file mode 100644 index 0ad62df201..0000000000 --- a/daemon/logger/loggerutils/log_tag.go +++ /dev/null @@ -1,28 +0,0 @@ -package loggerutils - -import ( - "bytes" - - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/utils/templates" -) - -// ParseLogTag generates a context aware tag for consistency across different -// log drivers based on the context of the running container. -func ParseLogTag(ctx logger.Context, defaultTemplate string) (string, error) { - tagTemplate := ctx.Config["tag"] - if tagTemplate == "" { - tagTemplate = defaultTemplate - } - - tmpl, err := templates.NewParse("log-tag", tagTemplate) - if err != nil { - return "", err - } - buf := new(bytes.Buffer) - if err := tmpl.Execute(buf, &ctx); err != nil { - return "", err - } - - return buf.String(), nil -} diff --git a/daemon/logger/loggerutils/log_tag_test.go b/daemon/logger/loggerutils/log_tag_test.go deleted file mode 100644 index e2aa4358aa..0000000000 --- a/daemon/logger/loggerutils/log_tag_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package loggerutils - -import ( - "testing" - - "github.com/docker/docker/daemon/logger" -) - -func TestParseLogTagDefaultTag(t *testing.T) { - ctx := buildContext(map[string]string{}) - tag, e := ParseLogTag(ctx, "{{.ID}}") - assertTag(t, e, tag, ctx.ID()) -} - -func TestParseLogTag(t *testing.T) { - ctx := buildContext(map[string]string{"tag": "{{.ImageName}}/{{.Name}}/{{.ID}}"}) - tag, e := ParseLogTag(ctx, "{{.ID}}") - assertTag(t, e, tag, "test-image/test-container/container-ab") -} - -func TestParseLogTagEmptyTag(t *testing.T) { - ctx := buildContext(map[string]string{}) - tag, e := ParseLogTag(ctx, "{{.DaemonName}}/{{.ID}}") - assertTag(t, e, tag, "test-dockerd/container-ab") -} - -// Helpers - -func buildContext(cfg map[string]string) logger.Context { - return logger.Context{ - ContainerID: "container-abcdefghijklmnopqrstuvwxyz01234567890", - ContainerName: "/test-container", - ContainerImageID: "image-abcdefghijklmnopqrstuvwxyz01234567890", - ContainerImageName: "test-image", - Config: cfg, - DaemonName: "test-dockerd", - } -} - -func assertTag(t *testing.T, e error, tag string, expected string) { - if e != nil { - t.Fatalf("Error generating tag: %q", e) - } - if tag != expected { - t.Fatalf("Wrong tag: %q, should be %q", tag, expected) - } -} diff --git a/daemon/logger/loggerutils/rotatefilewriter.go b/daemon/logger/loggerutils/rotatefilewriter.go deleted file mode 100644 index 99e0964aea..0000000000 --- a/daemon/logger/loggerutils/rotatefilewriter.go +++ /dev/null @@ -1,124 +0,0 @@ -package loggerutils - -import ( - "os" - "strconv" - "sync" - - "github.com/docker/docker/pkg/pubsub" -) - -// RotateFileWriter is Logger implementation for default Docker logging. -type RotateFileWriter struct { - f *os.File // store for closing - mu sync.Mutex - capacity int64 //maximum size of each file - currentSize int64 // current size of the latest file - maxFiles int //maximum number of files - notifyRotate *pubsub.Publisher -} - -//NewRotateFileWriter creates new RotateFileWriter -func NewRotateFileWriter(logPath string, capacity int64, maxFiles int) (*RotateFileWriter, error) { - log, err := os.OpenFile(logPath, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0640) - if err != nil { - return nil, err - } - - size, err := log.Seek(0, os.SEEK_END) - if err != nil { - return nil, err - } - - return &RotateFileWriter{ - f: log, - capacity: capacity, - currentSize: size, - maxFiles: maxFiles, - notifyRotate: pubsub.NewPublisher(0, 1), - }, nil -} - -//WriteLog write log message to File -func (w *RotateFileWriter) Write(message []byte) (int, error) { - w.mu.Lock() - if err := w.checkCapacityAndRotate(); err != nil { - w.mu.Unlock() - return -1, err - } - - n, err := w.f.Write(message) - if err == nil { - w.currentSize += int64(n) - } - w.mu.Unlock() - return n, err -} - -func (w *RotateFileWriter) checkCapacityAndRotate() error { - if w.capacity == -1 { - return nil - } - - if w.currentSize >= w.capacity { - name := w.f.Name() - if err := w.f.Close(); err != nil { - return err - } - if err := rotate(name, w.maxFiles); err != nil { - return err - } - file, err := os.OpenFile(name, os.O_WRONLY|os.O_TRUNC|os.O_CREATE, 06400) - if err != nil { - return err - } - w.f = file - w.currentSize = 0 - w.notifyRotate.Publish(struct{}{}) - } - - return nil -} - -func rotate(name string, maxFiles int) error { - if maxFiles < 2 { - return nil - } - for i := maxFiles - 1; i > 1; i-- { - toPath := name + "." + strconv.Itoa(i) - fromPath := name + "." + strconv.Itoa(i-1) - if err := os.Rename(fromPath, toPath); err != nil && !os.IsNotExist(err) { - return err - } - } - - if err := os.Rename(name, name+".1"); err != nil && !os.IsNotExist(err) { - return err - } - return nil -} - -// LogPath returns the location the given writer logs to. -func (w *RotateFileWriter) LogPath() string { - return w.f.Name() -} - -// MaxFiles return maximum number of files -func (w *RotateFileWriter) MaxFiles() int { - return w.maxFiles -} - -//NotifyRotate returns the new subscriber -func (w *RotateFileWriter) NotifyRotate() chan interface{} { - return w.notifyRotate.Subscribe() -} - -//NotifyRotateEvict removes the specified subscriber from receiving any more messages. -func (w *RotateFileWriter) NotifyRotateEvict(sub chan interface{}) { - w.notifyRotate.Evict(sub) -} - -// Close closes underlying file and signals all readers to stop. -func (w *RotateFileWriter) Close() error { - return w.f.Close() -} diff --git a/daemon/logger/splunk/splunk.go b/daemon/logger/splunk/splunk.go deleted file mode 100644 index e587e66aa0..0000000000 --- a/daemon/logger/splunk/splunk.go +++ /dev/null @@ -1,266 +0,0 @@ -// Package splunk provides the log driver for forwarding server logs to -// Splunk HTTP Event Collector endpoint. -package splunk - -import ( - "bytes" - "crypto/tls" - "crypto/x509" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/docker/pkg/urlutil" -) - -const ( - driverName = "splunk" - splunkURLKey = "splunk-url" - splunkTokenKey = "splunk-token" - splunkSourceKey = "splunk-source" - splunkSourceTypeKey = "splunk-sourcetype" - splunkIndexKey = "splunk-index" - splunkCAPathKey = "splunk-capath" - splunkCANameKey = "splunk-caname" - splunkInsecureSkipVerifyKey = "splunk-insecureskipverify" - envKey = "env" - labelsKey = "labels" - tagKey = "tag" -) - -type splunkLogger struct { - client *http.Client - transport *http.Transport - - url string - auth string - nullMessage *splunkMessage -} - -type splunkMessage struct { - Event splunkMessageEvent `json:"event"` - Time string `json:"time"` - Host string `json:"host"` - Source string `json:"source,omitempty"` - SourceType string `json:"sourcetype,omitempty"` - Index string `json:"index,omitempty"` -} - -type splunkMessageEvent struct { - Line string `json:"line"` - Source string `json:"source"` - Tag string `json:"tag,omitempty"` - Attrs map[string]string `json:"attrs,omitempty"` -} - -func init() { - if err := logger.RegisterLogDriver(driverName, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(driverName, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// New creates splunk logger driver using configuration passed in context -func New(ctx logger.Context) (logger.Logger, error) { - hostname, err := ctx.Hostname() - if err != nil { - return nil, fmt.Errorf("%s: cannot access hostname to set source field", driverName) - } - - // Parse and validate Splunk URL - splunkURL, err := parseURL(ctx) - if err != nil { - return nil, err - } - - // Splunk Token is required parameter - splunkToken, ok := ctx.Config[splunkTokenKey] - if !ok { - return nil, fmt.Errorf("%s: %s is expected", driverName, splunkTokenKey) - } - - tlsConfig := &tls.Config{} - - // Splunk is using autogenerated certificates by default, - // allow users to trust them with skipping verification - if insecureSkipVerifyStr, ok := ctx.Config[splunkInsecureSkipVerifyKey]; ok { - insecureSkipVerify, err := strconv.ParseBool(insecureSkipVerifyStr) - if err != nil { - return nil, err - } - tlsConfig.InsecureSkipVerify = insecureSkipVerify - } - - // If path to the root certificate is provided - load it - if caPath, ok := ctx.Config[splunkCAPathKey]; ok { - caCert, err := ioutil.ReadFile(caPath) - if err != nil { - return nil, err - } - caPool := x509.NewCertPool() - caPool.AppendCertsFromPEM(caCert) - tlsConfig.RootCAs = caPool - } - - if caName, ok := ctx.Config[splunkCANameKey]; ok { - tlsConfig.ServerName = caName - } - - transport := &http.Transport{ - TLSClientConfig: tlsConfig, - } - client := &http.Client{ - Transport: transport, - } - - var nullMessage = &splunkMessage{ - Host: hostname, - } - - // Optional parameters for messages - nullMessage.Source = ctx.Config[splunkSourceKey] - nullMessage.SourceType = ctx.Config[splunkSourceTypeKey] - nullMessage.Index = ctx.Config[splunkIndexKey] - - tag, err := loggerutils.ParseLogTag(ctx, "{{.ID}}") - if err != nil { - return nil, err - } - nullMessage.Event.Tag = tag - nullMessage.Event.Attrs = ctx.ExtraAttributes(nil) - - logger := &splunkLogger{ - client: client, - transport: transport, - url: splunkURL.String(), - auth: "Splunk " + splunkToken, - nullMessage: nullMessage, - } - - err = verifySplunkConnection(logger) - if err != nil { - return nil, err - } - - return logger, nil -} - -func (l *splunkLogger) Log(msg *logger.Message) error { - // Construct message as a copy of nullMessage - message := *l.nullMessage - message.Time = fmt.Sprintf("%f", float64(msg.Timestamp.UnixNano())/1000000000) - message.Event.Line = string(msg.Line) - message.Event.Source = msg.Source - - jsonEvent, err := json.Marshal(&message) - if err != nil { - return err - } - req, err := http.NewRequest("POST", l.url, bytes.NewBuffer(jsonEvent)) - if err != nil { - return err - } - req.Header.Set("Authorization", l.auth) - res, err := l.client.Do(req) - if err != nil { - return err - } - defer res.Body.Close() - if res.StatusCode != http.StatusOK { - var body []byte - body, err = ioutil.ReadAll(res.Body) - if err != nil { - return err - } - return fmt.Errorf("%s: failed to send event - %s - %s", driverName, res.Status, body) - } - io.Copy(ioutil.Discard, res.Body) - return nil -} - -func (l *splunkLogger) Close() error { - l.transport.CloseIdleConnections() - return nil -} - -func (l *splunkLogger) Name() string { - return driverName -} - -// ValidateLogOpt looks for all supported by splunk driver options -func ValidateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case splunkURLKey: - case splunkTokenKey: - case splunkSourceKey: - case splunkSourceTypeKey: - case splunkIndexKey: - case splunkCAPathKey: - case splunkCANameKey: - case splunkInsecureSkipVerifyKey: - case envKey: - case labelsKey: - case tagKey: - default: - return fmt.Errorf("unknown log opt '%s' for %s log driver", key, driverName) - } - } - return nil -} - -func parseURL(ctx logger.Context) (*url.URL, error) { - splunkURLStr, ok := ctx.Config[splunkURLKey] - if !ok { - return nil, fmt.Errorf("%s: %s is expected", driverName, splunkURLKey) - } - - splunkURL, err := url.Parse(splunkURLStr) - if err != nil { - return nil, fmt.Errorf("%s: failed to parse %s as url value in %s", driverName, splunkURLStr, splunkURLKey) - } - - if !urlutil.IsURL(splunkURLStr) || - !splunkURL.IsAbs() || - (splunkURL.Path != "" && splunkURL.Path != "/") || - splunkURL.RawQuery != "" || - splunkURL.Fragment != "" { - return nil, fmt.Errorf("%s: expected format scheme://dns_name_or_ip:port for %s", driverName, splunkURLKey) - } - - splunkURL.Path = "/services/collector/event/1.0" - - return splunkURL, nil -} - -func verifySplunkConnection(l *splunkLogger) error { - req, err := http.NewRequest("OPTIONS", l.url, nil) - if err != nil { - return err - } - res, err := l.client.Do(req) - if err != nil { - return err - } - if res.Body != nil { - defer res.Body.Close() - } - if res.StatusCode != http.StatusOK { - var body []byte - body, err = ioutil.ReadAll(res.Body) - if err != nil { - return err - } - return fmt.Errorf("%s: failed to verify connection - %s - %s", driverName, res.Status, body) - } - return nil -} diff --git a/daemon/logger/syslog/syslog.go b/daemon/logger/syslog/syslog.go deleted file mode 100644 index 9561e061c6..0000000000 --- a/daemon/logger/syslog/syslog.go +++ /dev/null @@ -1,258 +0,0 @@ -// +build linux - -// Package syslog provides the logdriver for forwarding server logs to syslog endpoints. -package syslog - -import ( - "crypto/tls" - "errors" - "fmt" - "net" - "net/url" - "os" - "strconv" - "strings" - "time" - - syslog "github.com/RackSec/srslog" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/loggerutils" - "github.com/docker/docker/pkg/urlutil" - "github.com/docker/go-connections/tlsconfig" -) - -const ( - name = "syslog" - secureProto = "tcp+tls" -) - -var facilities = map[string]syslog.Priority{ - "kern": syslog.LOG_KERN, - "user": syslog.LOG_USER, - "mail": syslog.LOG_MAIL, - "daemon": syslog.LOG_DAEMON, - "auth": syslog.LOG_AUTH, - "syslog": syslog.LOG_SYSLOG, - "lpr": syslog.LOG_LPR, - "news": syslog.LOG_NEWS, - "uucp": syslog.LOG_UUCP, - "cron": syslog.LOG_CRON, - "authpriv": syslog.LOG_AUTHPRIV, - "ftp": syslog.LOG_FTP, - "local0": syslog.LOG_LOCAL0, - "local1": syslog.LOG_LOCAL1, - "local2": syslog.LOG_LOCAL2, - "local3": syslog.LOG_LOCAL3, - "local4": syslog.LOG_LOCAL4, - "local5": syslog.LOG_LOCAL5, - "local6": syslog.LOG_LOCAL6, - "local7": syslog.LOG_LOCAL7, -} - -type syslogger struct { - writer *syslog.Writer -} - -func init() { - if err := logger.RegisterLogDriver(name, New); err != nil { - logrus.Fatal(err) - } - if err := logger.RegisterLogOptValidator(name, ValidateLogOpt); err != nil { - logrus.Fatal(err) - } -} - -// rsyslog uses appname part of syslog message to fill in an %syslogtag% template -// attribute in rsyslog.conf. In order to be backward compatible to rfc3164 -// tag will be also used as an appname -func rfc5424formatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { - timestamp := time.Now().Format(time.RFC3339) - pid := os.Getpid() - msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", - p, 1, timestamp, hostname, tag, pid, tag, content) - return msg -} - -// The timestamp field in rfc5424 is derived from rfc3339. Whereas rfc3339 makes allowances -// for multiple syntaxes, there are further restrictions in rfc5424, i.e., the maximium -// resolution is limited to "TIME-SECFRAC" which is 6 (microsecond resolution) -func rfc5424microformatterWithAppNameAsTag(p syslog.Priority, hostname, tag, content string) string { - timestamp := time.Now().Format("2006-01-02T15:04:05.999999Z07:00") - pid := os.Getpid() - msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", - p, 1, timestamp, hostname, tag, pid, tag, content) - return msg -} - -// New creates a syslog logger using the configuration passed in on -// the context. Supported context configuration variables are -// syslog-address, syslog-facility, syslog-format. -func New(ctx logger.Context) (logger.Logger, error) { - tag, err := loggerutils.ParseLogTag(ctx, "{{.DaemonName}}/{{.ID}}") - if err != nil { - return nil, err - } - - proto, address, err := parseAddress(ctx.Config["syslog-address"]) - if err != nil { - return nil, err - } - - facility, err := parseFacility(ctx.Config["syslog-facility"]) - if err != nil { - return nil, err - } - - syslogFormatter, syslogFramer, err := parseLogFormat(ctx.Config["syslog-format"]) - if err != nil { - return nil, err - } - - var log *syslog.Writer - if proto == secureProto { - tlsConfig, tlsErr := parseTLSConfig(ctx.Config) - if tlsErr != nil { - return nil, tlsErr - } - log, err = syslog.DialWithTLSConfig(proto, address, facility, tag, tlsConfig) - } else { - log, err = syslog.Dial(proto, address, facility, tag) - } - - if err != nil { - return nil, err - } - - log.SetFormatter(syslogFormatter) - log.SetFramer(syslogFramer) - - return &syslogger{ - writer: log, - }, nil -} - -func (s *syslogger) Log(msg *logger.Message) error { - if msg.Source == "stderr" { - return s.writer.Err(string(msg.Line)) - } - return s.writer.Info(string(msg.Line)) -} - -func (s *syslogger) Close() error { - return s.writer.Close() -} - -func (s *syslogger) Name() string { - return name -} - -func parseAddress(address string) (string, string, error) { - if address == "" { - return "", "", nil - } - if !urlutil.IsTransportURL(address) { - return "", "", fmt.Errorf("syslog-address should be in form proto://address, got %v", address) - } - url, err := url.Parse(address) - if err != nil { - return "", "", err - } - - // unix and unixgram socket validation - if url.Scheme == "unix" || url.Scheme == "unixgram" { - if _, err := os.Stat(url.Path); err != nil { - return "", "", err - } - return url.Scheme, url.Path, nil - } - - // here we process tcp|udp - host := url.Host - if _, _, err := net.SplitHostPort(host); err != nil { - if !strings.Contains(err.Error(), "missing port in address") { - return "", "", err - } - host = host + ":514" - } - - return url.Scheme, host, nil -} - -// ValidateLogOpt looks for syslog specific log options -// syslog-address, syslog-facility. -func ValidateLogOpt(cfg map[string]string) error { - for key := range cfg { - switch key { - case "env": - case "labels": - case "syslog-address": - case "syslog-facility": - case "syslog-tls-ca-cert": - case "syslog-tls-cert": - case "syslog-tls-key": - case "syslog-tls-skip-verify": - case "tag": - case "syslog-format": - default: - return fmt.Errorf("unknown log opt '%s' for syslog log driver", key) - } - } - if _, _, err := parseAddress(cfg["syslog-address"]); err != nil { - return err - } - if _, err := parseFacility(cfg["syslog-facility"]); err != nil { - return err - } - if _, _, err := parseLogFormat(cfg["syslog-format"]); err != nil { - return err - } - return nil -} - -func parseFacility(facility string) (syslog.Priority, error) { - if facility == "" { - return syslog.LOG_DAEMON, nil - } - - if syslogFacility, valid := facilities[facility]; valid { - return syslogFacility, nil - } - - fInt, err := strconv.Atoi(facility) - if err == nil && 0 <= fInt && fInt <= 23 { - return syslog.Priority(fInt << 3), nil - } - - return syslog.Priority(0), errors.New("invalid syslog facility") -} - -func parseTLSConfig(cfg map[string]string) (*tls.Config, error) { - _, skipVerify := cfg["syslog-tls-skip-verify"] - - opts := tlsconfig.Options{ - CAFile: cfg["syslog-tls-ca-cert"], - CertFile: cfg["syslog-tls-cert"], - KeyFile: cfg["syslog-tls-key"], - InsecureSkipVerify: skipVerify, - } - - return tlsconfig.Client(opts) -} - -func parseLogFormat(logFormat string) (syslog.Formatter, syslog.Framer, error) { - switch logFormat { - case "": - return syslog.UnixFormatter, syslog.DefaultFramer, nil - case "rfc3164": - return syslog.RFC3164Formatter, syslog.DefaultFramer, nil - case "rfc5424": - return rfc5424formatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil - case "rfc5424micro": - return rfc5424microformatterWithAppNameAsTag, syslog.RFC5425MessageLengthFramer, nil - default: - return nil, nil, errors.New("Invalid syslog format") - } - -} diff --git a/daemon/logger/syslog/syslog_test.go b/daemon/logger/syslog/syslog_test.go deleted file mode 100644 index f083030b70..0000000000 --- a/daemon/logger/syslog/syslog_test.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build linux - -package syslog - -import ( - syslog "github.com/RackSec/srslog" - "reflect" - "testing" -) - -func functionMatches(expectedFun interface{}, actualFun interface{}) bool { - return reflect.ValueOf(expectedFun).Pointer() == reflect.ValueOf(actualFun).Pointer() -} - -func TestParseLogFormat(t *testing.T) { - formatter, framer, err := parseLogFormat("rfc5424") - if err != nil || !functionMatches(rfc5424formatterWithAppNameAsTag, formatter) || - !functionMatches(syslog.RFC5425MessageLengthFramer, framer) { - t.Fatal("Failed to parse rfc5424 format", err, formatter, framer) - } - - formatter, framer, err = parseLogFormat("rfc5424micro") - if err != nil || !functionMatches(rfc5424microformatterWithAppNameAsTag, formatter) || - !functionMatches(syslog.RFC5425MessageLengthFramer, framer) { - t.Fatal("Failed to parse rfc5424 (microsecond) format", err, formatter, framer) - } - - formatter, framer, err = parseLogFormat("rfc3164") - if err != nil || !functionMatches(syslog.RFC3164Formatter, formatter) || - !functionMatches(syslog.DefaultFramer, framer) { - t.Fatal("Failed to parse rfc3164 format", err, formatter, framer) - } - - formatter, framer, err = parseLogFormat("") - if err != nil || !functionMatches(syslog.UnixFormatter, formatter) || - !functionMatches(syslog.DefaultFramer, framer) { - t.Fatal("Failed to parse empty format", err, formatter, framer) - } - - formatter, framer, err = parseLogFormat("invalid") - if err == nil { - t.Fatal("Failed to parse invalid format", err, formatter, framer) - } -} - -func TestValidateLogOptEmpty(t *testing.T) { - emptyConfig := make(map[string]string) - if err := ValidateLogOpt(emptyConfig); err != nil { - t.Fatal("Failed to parse empty config", err) - } -} diff --git a/daemon/logger/syslog/syslog_unsupported.go b/daemon/logger/syslog/syslog_unsupported.go deleted file mode 100644 index 50cc51b657..0000000000 --- a/daemon/logger/syslog/syslog_unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !linux - -package syslog diff --git a/daemon/logs.go b/daemon/logs.go deleted file mode 100644 index 1b285c691d..0000000000 --- a/daemon/logs.go +++ /dev/null @@ -1,166 +0,0 @@ -package daemon - -import ( - "fmt" - "io" - "strconv" - "time" - - "golang.org/x/net/context" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/logger" - "github.com/docker/docker/daemon/logger/jsonfilelog" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/stdcopy" - containertypes "github.com/docker/engine-api/types/container" - timetypes "github.com/docker/engine-api/types/time" -) - -// ContainerLogs hooks up a container's stdout and stderr streams -// configured with the given struct. -func (daemon *Daemon) ContainerLogs(ctx context.Context, containerName string, config *backend.ContainerLogsConfig, started chan struct{}) error { - container, err := daemon.GetContainer(containerName) - if err != nil { - return err - } - - if !(config.ShowStdout || config.ShowStderr) { - return fmt.Errorf("You must choose at least one stream") - } - - cLog, err := daemon.getLogger(container) - if err != nil { - return err - } - logReader, ok := cLog.(logger.LogReader) - if !ok { - return logger.ErrReadLogsNotSupported - } - - follow := config.Follow && container.IsRunning() - tailLines, err := strconv.Atoi(config.Tail) - if err != nil { - tailLines = -1 - } - - logrus.Debug("logs: begin stream") - - var since time.Time - if config.Since != "" { - s, n, err := timetypes.ParseTimestamps(config.Since, 0) - if err != nil { - return err - } - since = time.Unix(s, n) - } - readConfig := logger.ReadConfig{ - Since: since, - Tail: tailLines, - Follow: follow, - } - logs := logReader.ReadLogs(readConfig) - - wf := ioutils.NewWriteFlusher(config.OutStream) - defer wf.Close() - close(started) - wf.Flush() - - var outStream io.Writer = wf - errStream := outStream - if !container.Config.Tty { - errStream = stdcopy.NewStdWriter(outStream, stdcopy.Stderr) - outStream = stdcopy.NewStdWriter(outStream, stdcopy.Stdout) - } - - for { - select { - case err := <-logs.Err: - logrus.Errorf("Error streaming logs: %v", err) - return nil - case <-ctx.Done(): - logs.Close() - return nil - case msg, ok := <-logs.Msg: - if !ok { - logrus.Debug("logs: end stream") - logs.Close() - if cLog != container.LogDriver { - // Since the logger isn't cached in the container, which occurs if it is running, it - // must get explicitly closed here to avoid leaking it and any file handles it has. - if err := cLog.Close(); err != nil { - logrus.Errorf("Error closing logger: %v", err) - } - } - return nil - } - logLine := msg.Line - if config.Details { - logLine = append([]byte(msg.Attrs.String()+" "), logLine...) - } - if config.Timestamps { - logLine = append([]byte(msg.Timestamp.Format(logger.TimeFormat)+" "), logLine...) - } - if msg.Source == "stdout" && config.ShowStdout { - outStream.Write(logLine) - } - if msg.Source == "stderr" && config.ShowStderr { - errStream.Write(logLine) - } - } - } -} - -func (daemon *Daemon) getLogger(container *container.Container) (logger.Logger, error) { - if container.LogDriver != nil && container.IsRunning() { - return container.LogDriver, nil - } - return container.StartLogger(container.HostConfig.LogConfig) -} - -// StartLogging initializes and starts the container logging stream. -func (daemon *Daemon) StartLogging(container *container.Container) error { - if container.HostConfig.LogConfig.Type == "none" { - return nil // do not start logging routines - } - - l, err := container.StartLogger(container.HostConfig.LogConfig) - if err != nil { - return fmt.Errorf("Failed to initialize logging driver: %v", err) - } - - copier := logger.NewCopier(map[string]io.Reader{"stdout": container.StdoutPipe(), "stderr": container.StderrPipe()}, l) - container.LogCopier = copier - copier.Run() - container.LogDriver = l - - // set LogPath field only for json-file logdriver - if jl, ok := l.(*jsonfilelog.JSONFileLogger); ok { - container.LogPath = jl.LogPath() - } - - return nil -} - -// mergeLogConfig merges the daemon log config to the container's log config if the container's log driver is not specified. -func (daemon *Daemon) mergeAndVerifyLogConfig(cfg *containertypes.LogConfig) error { - if cfg.Type == "" { - cfg.Type = daemon.defaultLogConfig.Type - } - - if cfg.Config == nil { - cfg.Config = make(map[string]string) - } - - if cfg.Type == daemon.defaultLogConfig.Type { - for k, v := range daemon.defaultLogConfig.Config { - if _, ok := cfg.Config[k]; !ok { - cfg.Config[k] = v - } - } - } - - return logger.ValidateLogOpts(cfg.Type, cfg.Config) -} diff --git a/daemon/logs_test.go b/daemon/logs_test.go deleted file mode 100644 index f0c6f6e1e9..0000000000 --- a/daemon/logs_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package daemon - -import ( - "testing" - - containertypes "github.com/docker/engine-api/types/container" -) - -func TestMergeAndVerifyLogConfigNilConfig(t *testing.T) { - d := &Daemon{defaultLogConfig: containertypes.LogConfig{Type: "json-file", Config: map[string]string{"max-file": "1"}}} - cfg := containertypes.LogConfig{Type: d.defaultLogConfig.Type} - if err := d.mergeAndVerifyLogConfig(&cfg); err != nil { - t.Fatal(err) - } -} diff --git a/daemon/monitor.go b/daemon/monitor.go deleted file mode 100644 index 1f97efb472..0000000000 --- a/daemon/monitor.go +++ /dev/null @@ -1,156 +0,0 @@ -package daemon - -import ( - "errors" - "fmt" - "io" - "runtime" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/runconfig" -) - -// StateChanged updates daemon state changes from containerd -func (daemon *Daemon) StateChanged(id string, e libcontainerd.StateInfo) error { - c := daemon.containers.Get(id) - if c == nil { - return fmt.Errorf("no such container: %s", id) - } - - switch e.State { - case libcontainerd.StateOOM: - // StateOOM is Linux specific and should never be hit on Windows - if runtime.GOOS == "windows" { - return errors.New("Received StateOOM from libcontainerd on Windows. This should never happen.") - } - daemon.updateHealthMonitor(c) - daemon.LogContainerEvent(c, "oom") - case libcontainerd.StateExit: - c.Lock() - defer c.Unlock() - c.Wait() - c.Reset(false) - c.SetStopped(platformConstructExitStatus(e)) - attributes := map[string]string{ - "exitCode": strconv.Itoa(int(e.ExitCode)), - } - daemon.updateHealthMonitor(c) - daemon.LogContainerEventWithAttributes(c, "die", attributes) - daemon.Cleanup(c) - // FIXME: here is race condition between two RUN instructions in Dockerfile - // because they share same runconfig and change image. Must be fixed - // in builder/builder.go - if err := c.ToDisk(); err != nil { - return err - } - return daemon.postRunProcessing(c, e) - case libcontainerd.StateRestart: - c.Lock() - defer c.Unlock() - c.Reset(false) - c.RestartCount++ - c.SetRestarting(platformConstructExitStatus(e)) - attributes := map[string]string{ - "exitCode": strconv.Itoa(int(e.ExitCode)), - } - daemon.LogContainerEventWithAttributes(c, "die", attributes) - daemon.updateHealthMonitor(c) - return c.ToDisk() - case libcontainerd.StateExitProcess: - c.Lock() - defer c.Unlock() - if execConfig := c.ExecCommands.Get(e.ProcessID); execConfig != nil { - ec := int(e.ExitCode) - execConfig.ExitCode = &ec - execConfig.Running = false - execConfig.Wait() - if err := execConfig.CloseStreams(); err != nil { - logrus.Errorf("%s: %s", c.ID, err) - } - - // remove the exec command from the container's store only and not the - // daemon's store so that the exec command can be inspected. - c.ExecCommands.Delete(execConfig.ID) - } else { - logrus.Warnf("Ignoring StateExitProcess for %v but no exec command found", e) - } - case libcontainerd.StateStart, libcontainerd.StateRestore: - // Container is already locked in this case - c.SetRunning(int(e.Pid), e.State == libcontainerd.StateStart) - c.HasBeenManuallyStopped = false - if err := c.ToDisk(); err != nil { - c.Reset(false) - return err - } - daemon.initHealthMonitor(c) - daemon.LogContainerEvent(c, "start") - case libcontainerd.StatePause: - // Container is already locked in this case - c.Paused = true - daemon.updateHealthMonitor(c) - daemon.LogContainerEvent(c, "pause") - case libcontainerd.StateResume: - // Container is already locked in this case - c.Paused = false - daemon.updateHealthMonitor(c) - daemon.LogContainerEvent(c, "unpause") - } - - return nil -} - -// AttachStreams is called by libcontainerd to connect the stdio. -func (daemon *Daemon) AttachStreams(id string, iop libcontainerd.IOPipe) error { - var s *runconfig.StreamConfig - c := daemon.containers.Get(id) - if c == nil { - ec, err := daemon.getExecConfig(id) - if err != nil { - return fmt.Errorf("no such exec/container: %s", id) - } - s = ec.StreamConfig - } else { - s = c.StreamConfig - if err := daemon.StartLogging(c); err != nil { - c.Reset(false) - return err - } - } - - copyFunc := func(w io.Writer, r io.Reader) { - s.Add(1) - go func() { - if _, err := io.Copy(w, r); err != nil { - logrus.Errorf("%v stream copy error: %v", id, err) - } - s.Done() - }() - } - - if iop.Stdout != nil { - copyFunc(s.Stdout(), iop.Stdout) - } - if iop.Stderr != nil { - copyFunc(s.Stderr(), iop.Stderr) - } - - if stdin := s.Stdin(); stdin != nil { - if iop.Stdin != nil { - go func() { - io.Copy(iop.Stdin, stdin) - iop.Stdin.Close() - }() - } - } else { - if c != nil && !c.Config.Tty { - // tty is enabled, so dont close containerd's iopipe stdin. - if iop.Stdin != nil { - iop.Stdin.Close() - } - } - } - - return nil -} diff --git a/daemon/monitor_linux.go b/daemon/monitor_linux.go deleted file mode 100644 index 09f5af50c6..0000000000 --- a/daemon/monitor_linux.go +++ /dev/null @@ -1,19 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" -) - -// platformConstructExitStatus returns a platform specific exit status structure -func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { - return &container.ExitStatus{ - ExitCode: int(e.ExitCode), - OOMKilled: e.OOMKilled, - } -} - -// postRunProcessing perfoms any processing needed on the container after it has stopped. -func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { - return nil -} diff --git a/daemon/monitor_solaris.go b/daemon/monitor_solaris.go deleted file mode 100644 index 5ccfada76a..0000000000 --- a/daemon/monitor_solaris.go +++ /dev/null @@ -1,18 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" -) - -// platformConstructExitStatus returns a platform specific exit status structure -func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { - return &container.ExitStatus{ - ExitCode: int(e.ExitCode), - } -} - -// postRunProcessing perfoms any processing needed on the container after it has stopped. -func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { - return nil -} diff --git a/daemon/monitor_windows.go b/daemon/monitor_windows.go deleted file mode 100644 index b500ee60b9..0000000000 --- a/daemon/monitor_windows.go +++ /dev/null @@ -1,37 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" -) - -// platformConstructExitStatus returns a platform specific exit status structure -func platformConstructExitStatus(e libcontainerd.StateInfo) *container.ExitStatus { - return &container.ExitStatus{ - ExitCode: int(e.ExitCode), - } -} - -// postRunProcessing perfoms any processing needed on the container after it has stopped. -func (daemon *Daemon) postRunProcessing(container *container.Container, e libcontainerd.StateInfo) error { - if e.ExitCode == 0 && e.UpdatePending { - spec, err := daemon.createSpec(container) - if err != nil { - return err - } - - servicingOption := &libcontainerd.ServicingOption{ - IsServicing: true, - } - - // Create a new servicing container, which will start, complete the update, and merge back the - // results if it succeeded, all as part of the below function call. - if err := daemon.containerd.Create((container.ID + "_servicing"), *spec, servicingOption); err != nil { - container.SetExitCode(-1) - return fmt.Errorf("Post-run update servicing failed: %s", err) - } - } - return nil -} diff --git a/daemon/mounts.go b/daemon/mounts.go deleted file mode 100644 index d4f24b2812..0000000000 --- a/daemon/mounts.go +++ /dev/null @@ -1,48 +0,0 @@ -package daemon - -import ( - "fmt" - "strings" - - "github.com/docker/docker/container" - volumestore "github.com/docker/docker/volume/store" -) - -func (daemon *Daemon) prepareMountPoints(container *container.Container) error { - for _, config := range container.MountPoints { - if err := daemon.lazyInitializeVolume(container.ID, config); err != nil { - return err - } - } - return nil -} - -func (daemon *Daemon) removeMountPoints(container *container.Container, rm bool) error { - var rmErrors []string - for _, m := range container.MountPoints { - if m.Volume == nil { - continue - } - daemon.volumes.Dereference(m.Volume, container.ID) - if rm { - // Do not remove named mountpoints - // these are mountpoints specified like `docker run -v :/foo` - if m.Named { - continue - } - err := daemon.volumes.Remove(m.Volume) - // Ignore volume in use errors because having this - // volume being referenced by other container is - // not an error, but an implementation detail. - // This prevents docker from logging "ERROR: Volume in use" - // where there is another container using the volume. - if err != nil && !volumestore.IsInUse(err) { - rmErrors = append(rmErrors, err.Error()) - } - } - } - if len(rmErrors) > 0 { - return fmt.Errorf("Error removing volumes:\n%v", strings.Join(rmErrors, "\n")) - } - return nil -} diff --git a/daemon/names.go b/daemon/names.go deleted file mode 100644 index feb1323bb1..0000000000 --- a/daemon/names.go +++ /dev/null @@ -1,115 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/namesgenerator" - "github.com/docker/docker/pkg/registrar" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/utils" -) - -var ( - validContainerNameChars = utils.RestrictedNameChars - validContainerNamePattern = utils.RestrictedNamePattern -) - -func (daemon *Daemon) registerName(container *container.Container) error { - if daemon.Exists(container.ID) { - return fmt.Errorf("Container is already loaded") - } - if err := validateID(container.ID); err != nil { - return err - } - if container.Name == "" { - name, err := daemon.generateNewName(container.ID) - if err != nil { - return err - } - container.Name = name - - if err := container.ToDiskLocking(); err != nil { - logrus.Errorf("Error saving container name to disk: %v", err) - } - } - return daemon.nameIndex.Reserve(container.Name, container.ID) -} - -func (daemon *Daemon) generateIDAndName(name string) (string, string, error) { - var ( - err error - id = stringid.GenerateNonCryptoID() - ) - - if name == "" { - if name, err = daemon.generateNewName(id); err != nil { - return "", "", err - } - return id, name, nil - } - - if name, err = daemon.reserveName(id, name); err != nil { - return "", "", err - } - - return id, name, nil -} - -func (daemon *Daemon) reserveName(id, name string) (string, error) { - if !validContainerNamePattern.MatchString(name) { - return "", fmt.Errorf("Invalid container name (%s), only %s are allowed", name, validContainerNameChars) - } - if name[0] != '/' { - name = "/" + name - } - - if err := daemon.nameIndex.Reserve(name, id); err != nil { - if err == registrar.ErrNameReserved { - id, err := daemon.nameIndex.Get(name) - if err != nil { - logrus.Errorf("got unexpected error while looking up reserved name: %v", err) - return "", err - } - return "", fmt.Errorf("Conflict. The name %q is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.", name, id) - } - return "", fmt.Errorf("error reserving name: %s, error: %v", name, err) - } - return name, nil -} - -func (daemon *Daemon) releaseName(name string) { - daemon.nameIndex.Release(name) -} - -func (daemon *Daemon) generateNewName(id string) (string, error) { - var name string - for i := 0; i < 6; i++ { - name = namesgenerator.GetRandomName(i) - if name[0] != '/' { - name = "/" + name - } - - if err := daemon.nameIndex.Reserve(name, id); err != nil { - if err == registrar.ErrNameReserved { - continue - } - return "", err - } - return name, nil - } - - name = "/" + stringid.TruncateID(id) - if err := daemon.nameIndex.Reserve(name, id); err != nil { - return "", err - } - return name, nil -} - -func validateID(id string) error { - if id == "" { - return fmt.Errorf("Invalid empty id") - } - return nil -} diff --git a/daemon/network.go b/daemon/network.go deleted file mode 100644 index 57ec395e24..0000000000 --- a/daemon/network.go +++ /dev/null @@ -1,381 +0,0 @@ -package daemon - -import ( - "fmt" - "net" - "strings" - - "github.com/Sirupsen/logrus" - clustertypes "github.com/docker/docker/daemon/cluster/provider" - "github.com/docker/docker/errors" - "github.com/docker/docker/runconfig" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/network" - "github.com/docker/libnetwork" - networktypes "github.com/docker/libnetwork/types" -) - -// NetworkControllerEnabled checks if the networking stack is enabled. -// This feature depends on OS primitives and it's disabled in systems like Windows. -func (daemon *Daemon) NetworkControllerEnabled() bool { - return daemon.netController != nil -} - -// FindNetwork function finds a network for a given string that can represent network name or id -func (daemon *Daemon) FindNetwork(idName string) (libnetwork.Network, error) { - // Find by Name - n, err := daemon.GetNetworkByName(idName) - if err != nil && !isNoSuchNetworkError(err) { - return nil, err - } - - if n != nil { - return n, nil - } - - // Find by id - return daemon.GetNetworkByID(idName) -} - -func isNoSuchNetworkError(err error) bool { - _, ok := err.(libnetwork.ErrNoSuchNetwork) - return ok -} - -// GetNetworkByID function returns a network whose ID begins with the given prefix. -// It fails with an error if no matching, or more than one matching, networks are found. -func (daemon *Daemon) GetNetworkByID(partialID string) (libnetwork.Network, error) { - list := daemon.GetNetworksByID(partialID) - - if len(list) == 0 { - return nil, libnetwork.ErrNoSuchNetwork(partialID) - } - if len(list) > 1 { - return nil, libnetwork.ErrInvalidID(partialID) - } - return list[0], nil -} - -// GetNetworkByName function returns a network for a given network name. -func (daemon *Daemon) GetNetworkByName(name string) (libnetwork.Network, error) { - c := daemon.netController - if c == nil { - return nil, libnetwork.ErrNoSuchNetwork(name) - } - if name == "" { - name = c.Config().Daemon.DefaultNetwork - } - return c.NetworkByName(name) -} - -// GetNetworksByID returns a list of networks whose ID partially matches zero or more networks -func (daemon *Daemon) GetNetworksByID(partialID string) []libnetwork.Network { - c := daemon.netController - if c == nil { - return nil - } - list := []libnetwork.Network{} - l := func(nw libnetwork.Network) bool { - if strings.HasPrefix(nw.ID(), partialID) { - list = append(list, nw) - } - return false - } - c.WalkNetworks(l) - - return list -} - -// getAllNetworks returns a list containing all networks -func (daemon *Daemon) getAllNetworks() []libnetwork.Network { - c := daemon.netController - list := []libnetwork.Network{} - l := func(nw libnetwork.Network) bool { - list = append(list, nw) - return false - } - c.WalkNetworks(l) - - return list -} - -func isIngressNetwork(name string) bool { - return name == "ingress" -} - -var ingressChan = make(chan struct{}, 1) - -func ingressWait() func() { - ingressChan <- struct{}{} - return func() { <-ingressChan } -} - -// SetupIngress setups ingress networking. -func (daemon *Daemon) SetupIngress(create clustertypes.NetworkCreateRequest, nodeIP string) error { - ip, _, err := net.ParseCIDR(nodeIP) - if err != nil { - return err - } - - go func() { - controller := daemon.netController - controller.AgentInitWait() - - if n, err := daemon.GetNetworkByName(create.Name); err == nil && n != nil && n.ID() != create.ID { - if err := controller.SandboxDestroy("ingress-sbox"); err != nil { - logrus.Errorf("Failed to delete stale ingress sandbox: %v", err) - return - } - - // Cleanup any stale endpoints that might be left over during previous iterations - epList := n.Endpoints() - for _, ep := range epList { - if err := ep.Delete(true); err != nil { - logrus.Errorf("Failed to delete endpoint %s (%s): %v", ep.Name(), ep.ID(), err) - } - } - - if err := n.Delete(); err != nil { - logrus.Errorf("Failed to delete stale ingress network %s: %v", n.ID(), err) - return - } - } - - if _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true); err != nil { - // If it is any other error other than already - // exists error log error and return. - if _, ok := err.(libnetwork.NetworkNameError); !ok { - logrus.Errorf("Failed creating ingress network: %v", err) - return - } - - // Otherwise continue down the call to create or recreate sandbox. - } - - n, err := daemon.GetNetworkByID(create.ID) - if err != nil { - logrus.Errorf("Failed getting ingress network by id after creating: %v", err) - return - } - - sb, err := controller.NewSandbox("ingress-sbox", libnetwork.OptionIngress()) - if err != nil { - if _, ok := err.(networktypes.ForbiddenError); !ok { - logrus.Errorf("Failed creating ingress sandbox: %v", err) - } - return - } - - ep, err := n.CreateEndpoint("ingress-endpoint", libnetwork.CreateOptionIpam(ip, nil, nil, nil)) - if err != nil { - logrus.Errorf("Failed creating ingress endpoint: %v", err) - return - } - - if err := ep.Join(sb, nil); err != nil { - logrus.Errorf("Failed joining ingress sandbox to ingress endpoint: %v", err) - } - }() - - return nil -} - -// SetNetworkBootstrapKeys sets the bootstrap keys. -func (daemon *Daemon) SetNetworkBootstrapKeys(keys []*networktypes.EncryptionKey) error { - return daemon.netController.SetKeys(keys) -} - -// CreateManagedNetwork creates an agent network. -func (daemon *Daemon) CreateManagedNetwork(create clustertypes.NetworkCreateRequest) error { - _, err := daemon.createNetwork(create.NetworkCreateRequest, create.ID, true) - return err -} - -// CreateNetwork creates a network with the given name, driver and other optional parameters -func (daemon *Daemon) CreateNetwork(create types.NetworkCreateRequest) (*types.NetworkCreateResponse, error) { - resp, err := daemon.createNetwork(create, "", false) - if err != nil { - return nil, err - } - return resp, err -} - -func (daemon *Daemon) createNetwork(create types.NetworkCreateRequest, id string, agent bool) (*types.NetworkCreateResponse, error) { - // If there is a pending ingress network creation wait here - // since ingress network creation can happen via node download - // from manager or task download. - if isIngressNetwork(create.Name) { - defer ingressWait()() - } - - if runconfig.IsPreDefinedNetwork(create.Name) && !agent { - err := fmt.Errorf("%s is a pre-defined network and cannot be created", create.Name) - return nil, errors.NewRequestForbiddenError(err) - } - - var warning string - nw, err := daemon.GetNetworkByName(create.Name) - if err != nil { - if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok { - return nil, err - } - } - if nw != nil { - if create.CheckDuplicate { - return nil, libnetwork.NetworkNameError(create.Name) - } - warning = fmt.Sprintf("Network with name %s (id : %s) already exists", nw.Name(), nw.ID()) - } - - c := daemon.netController - driver := create.Driver - if driver == "" { - driver = c.Config().Daemon.DefaultDriver - } - - ipam := create.IPAM - v4Conf, v6Conf, err := getIpamConfig(ipam.Config) - if err != nil { - return nil, err - } - - nwOptions := []libnetwork.NetworkOption{ - libnetwork.NetworkOptionIpam(ipam.Driver, "", v4Conf, v6Conf, ipam.Options), - libnetwork.NetworkOptionEnableIPv6(create.EnableIPv6), - libnetwork.NetworkOptionDriverOpts(create.Options), - libnetwork.NetworkOptionLabels(create.Labels), - } - if create.Internal { - nwOptions = append(nwOptions, libnetwork.NetworkOptionInternalNetwork()) - } - if agent { - nwOptions = append(nwOptions, libnetwork.NetworkOptionDynamic()) - nwOptions = append(nwOptions, libnetwork.NetworkOptionPersist(false)) - } - - if isIngressNetwork(create.Name) { - nwOptions = append(nwOptions, libnetwork.NetworkOptionIngress()) - } - - n, err := c.NewNetwork(driver, create.Name, id, nwOptions...) - if err != nil { - return nil, err - } - - daemon.LogNetworkEvent(n, "create") - return &types.NetworkCreateResponse{ - ID: n.ID(), - Warning: warning, - }, nil -} - -func getIpamConfig(data []network.IPAMConfig) ([]*libnetwork.IpamConf, []*libnetwork.IpamConf, error) { - ipamV4Cfg := []*libnetwork.IpamConf{} - ipamV6Cfg := []*libnetwork.IpamConf{} - for _, d := range data { - iCfg := libnetwork.IpamConf{} - iCfg.PreferredPool = d.Subnet - iCfg.SubPool = d.IPRange - iCfg.Gateway = d.Gateway - iCfg.AuxAddresses = d.AuxAddress - ip, _, err := net.ParseCIDR(d.Subnet) - if err != nil { - return nil, nil, fmt.Errorf("Invalid subnet %s : %v", d.Subnet, err) - } - if ip.To4() != nil { - ipamV4Cfg = append(ipamV4Cfg, &iCfg) - } else { - ipamV6Cfg = append(ipamV6Cfg, &iCfg) - } - } - return ipamV4Cfg, ipamV6Cfg, nil -} - -// UpdateContainerServiceConfig updates a service configuration. -func (daemon *Daemon) UpdateContainerServiceConfig(containerName string, serviceConfig *clustertypes.ServiceConfig) error { - container, err := daemon.GetContainer(containerName) - if err != nil { - return err - } - - container.NetworkSettings.Service = serviceConfig - return nil -} - -// ConnectContainerToNetwork connects the given container to the given -// network. If either cannot be found, an err is returned. If the -// network cannot be set up, an err is returned. -func (daemon *Daemon) ConnectContainerToNetwork(containerName, networkName string, endpointConfig *network.EndpointSettings) error { - container, err := daemon.GetContainer(containerName) - if err != nil { - return err - } - return daemon.ConnectToNetwork(container, networkName, endpointConfig) -} - -// DisconnectContainerFromNetwork disconnects the given container from -// the given network. If either cannot be found, an err is returned. -func (daemon *Daemon) DisconnectContainerFromNetwork(containerName string, network libnetwork.Network, force bool) error { - container, err := daemon.GetContainer(containerName) - if err != nil { - if force { - return daemon.ForceEndpointDelete(containerName, network) - } - return err - } - return daemon.DisconnectFromNetwork(container, network, force) -} - -// GetNetworkDriverList returns the list of plugins drivers -// registered for network. -func (daemon *Daemon) GetNetworkDriverList() map[string]bool { - pluginList := make(map[string]bool) - - if !daemon.NetworkControllerEnabled() { - return nil - } - c := daemon.netController - networks := c.Networks() - - for _, network := range networks { - driver := network.Type() - pluginList[driver] = true - } - // TODO : Replace this with proper libnetwork API - pluginList["overlay"] = true - - return pluginList -} - -// DeleteManagedNetwork deletes an agent network. -func (daemon *Daemon) DeleteManagedNetwork(networkID string) error { - return daemon.deleteNetwork(networkID, true) -} - -// DeleteNetwork destroys a network unless it's one of docker's predefined networks. -func (daemon *Daemon) DeleteNetwork(networkID string) error { - return daemon.deleteNetwork(networkID, false) -} - -func (daemon *Daemon) deleteNetwork(networkID string, dynamic bool) error { - nw, err := daemon.FindNetwork(networkID) - if err != nil { - return err - } - - if runconfig.IsPreDefinedNetwork(nw.Name()) && !dynamic { - err := fmt.Errorf("%s is a pre-defined network and cannot be removed", nw.Name()) - return errors.NewRequestForbiddenError(err) - } - - if err := nw.Delete(); err != nil { - return err - } - daemon.LogNetworkEvent(nw, "destroy") - return nil -} - -// GetNetworks returns a list of all networks -func (daemon *Daemon) GetNetworks() []libnetwork.Network { - return daemon.getAllNetworks() -} diff --git a/daemon/network/filter.go b/daemon/network/filter.go deleted file mode 100644 index 7d15cea0c7..0000000000 --- a/daemon/network/filter.go +++ /dev/null @@ -1,94 +0,0 @@ -package network - -import ( - "fmt" - - "github.com/docker/docker/runconfig" - "github.com/docker/engine-api/types/filters" - "github.com/docker/libnetwork" -) - -type filterHandler func([]libnetwork.Network, string) ([]libnetwork.Network, error) - -var ( - // AcceptedFilters is an acceptable filters for validation - AcceptedFilters = map[string]bool{ - "driver": true, - "type": true, - "name": true, - "id": true, - "label": true, - } -) - -func filterNetworkByType(nws []libnetwork.Network, netType string) (retNws []libnetwork.Network, err error) { - switch netType { - case "builtin": - for _, nw := range nws { - if runconfig.IsPreDefinedNetwork(nw.Name()) { - retNws = append(retNws, nw) - } - } - case "custom": - for _, nw := range nws { - if !runconfig.IsPreDefinedNetwork(nw.Name()) { - retNws = append(retNws, nw) - } - } - default: - return nil, fmt.Errorf("Invalid filter: 'type'='%s'", netType) - } - return retNws, nil -} - -// FilterNetworks filters network list according to user specified filter -// and returns user chosen networks -func FilterNetworks(nws []libnetwork.Network, filter filters.Args) ([]libnetwork.Network, error) { - // if filter is empty, return original network list - if filter.Len() == 0 { - return nws, nil - } - - var displayNet []libnetwork.Network - for _, nw := range nws { - if filter.Include("driver") { - if !filter.ExactMatch("driver", nw.Type()) { - continue - } - } - if filter.Include("name") { - if !filter.Match("name", nw.Name()) { - continue - } - } - if filter.Include("id") { - if !filter.Match("id", nw.ID()) { - continue - } - } - if filter.Include("label") { - if !filter.MatchKVList("label", nw.Info().Labels()) { - continue - } - } - displayNet = append(displayNet, nw) - } - - if filter.Include("type") { - var typeNet []libnetwork.Network - errFilter := filter.WalkValues("type", func(fval string) error { - passList, err := filterNetworkByType(displayNet, fval) - if err != nil { - return err - } - typeNet = append(typeNet, passList...) - return nil - }) - if errFilter != nil { - return nil, errFilter - } - displayNet = typeNet - } - - return displayNet, nil -} diff --git a/daemon/network/settings.go b/daemon/network/settings.go deleted file mode 100644 index ff27cb0bbc..0000000000 --- a/daemon/network/settings.go +++ /dev/null @@ -1,24 +0,0 @@ -package network - -import ( - clustertypes "github.com/docker/docker/daemon/cluster/provider" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/go-connections/nat" -) - -// Settings stores configuration details about the daemon network config -// TODO Windows. Many of these fields can be factored out., -type Settings struct { - Bridge string - SandboxID string - HairpinMode bool - LinkLocalIPv6Address string - LinkLocalIPv6PrefixLen int - Networks map[string]*networktypes.EndpointSettings - Service *clustertypes.ServiceConfig - Ports nat.PortMap - SandboxKey string - SecondaryIPAddresses []networktypes.Address - SecondaryIPv6Addresses []networktypes.Address - IsAnonymousEndpoint bool -} diff --git a/daemon/oci_linux.go b/daemon/oci_linux.go deleted file mode 100644 index 4459d02fca..0000000000 --- a/daemon/oci_linux.go +++ /dev/null @@ -1,711 +0,0 @@ -package daemon - -import ( - "fmt" - "io" - "os" - "path/filepath" - "sort" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/daemon/caps" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/oci" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/volume" - containertypes "github.com/docker/engine-api/types/container" - "github.com/opencontainers/runc/libcontainer/apparmor" - "github.com/opencontainers/runc/libcontainer/devices" - "github.com/opencontainers/runc/libcontainer/user" - "github.com/opencontainers/specs/specs-go" -) - -func setResources(s *specs.Spec, r containertypes.Resources) error { - weightDevices, err := getBlkioWeightDevices(r) - if err != nil { - return err - } - readBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadBps) - if err != nil { - return err - } - writeBpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteBps) - if err != nil { - return err - } - readIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceReadIOps) - if err != nil { - return err - } - writeIOpsDevice, err := getBlkioThrottleDevices(r.BlkioDeviceWriteIOps) - if err != nil { - return err - } - - memoryRes := getMemoryResources(r) - cpuRes := getCPUResources(r) - blkioWeight := r.BlkioWeight - - specResources := &specs.Resources{ - Memory: memoryRes, - CPU: cpuRes, - BlockIO: &specs.BlockIO{ - Weight: &blkioWeight, - WeightDevice: weightDevices, - ThrottleReadBpsDevice: readBpsDevice, - ThrottleWriteBpsDevice: writeBpsDevice, - ThrottleReadIOPSDevice: readIOpsDevice, - ThrottleWriteIOPSDevice: writeIOpsDevice, - }, - DisableOOMKiller: r.OomKillDisable, - Pids: &specs.Pids{ - Limit: &r.PidsLimit, - }, - } - - if s.Linux.Resources != nil && len(s.Linux.Resources.Devices) > 0 { - specResources.Devices = s.Linux.Resources.Devices - } - - s.Linux.Resources = specResources - return nil -} - -func setDevices(s *specs.Spec, c *container.Container) error { - // Build lists of devices allowed and created within the container. - var devs []specs.Device - devPermissions := s.Linux.Resources.Devices - if c.HostConfig.Privileged { - hostDevices, err := devices.HostDevices() - if err != nil { - return err - } - for _, d := range hostDevices { - devs = append(devs, specDevice(d)) - } - rwm := "rwm" - devPermissions = []specs.DeviceCgroup{ - { - Allow: true, - Access: &rwm, - }, - } - } else { - for _, deviceMapping := range c.HostConfig.Devices { - d, dPermissions, err := getDevicesFromPath(deviceMapping) - if err != nil { - return err - } - devs = append(devs, d...) - devPermissions = append(devPermissions, dPermissions...) - } - } - - s.Linux.Devices = append(s.Linux.Devices, devs...) - s.Linux.Resources.Devices = devPermissions - return nil -} - -func setRlimits(daemon *Daemon, s *specs.Spec, c *container.Container) error { - var rlimits []specs.Rlimit - - ulimits := c.HostConfig.Ulimits - // Merge ulimits with daemon defaults - ulIdx := make(map[string]struct{}) - for _, ul := range ulimits { - ulIdx[ul.Name] = struct{}{} - } - for name, ul := range daemon.configStore.Ulimits { - if _, exists := ulIdx[name]; !exists { - ulimits = append(ulimits, ul) - } - } - - for _, ul := range ulimits { - rlimits = append(rlimits, specs.Rlimit{ - Type: "RLIMIT_" + strings.ToUpper(ul.Name), - Soft: uint64(ul.Soft), - Hard: uint64(ul.Hard), - }) - } - - s.Process.Rlimits = rlimits - return nil -} - -func setUser(s *specs.Spec, c *container.Container) error { - uid, gid, additionalGids, err := getUser(c, c.Config.User) - if err != nil { - return err - } - s.Process.User.UID = uid - s.Process.User.GID = gid - s.Process.User.AdditionalGids = additionalGids - return nil -} - -func readUserFile(c *container.Container, p string) (io.ReadCloser, error) { - fp, err := symlink.FollowSymlinkInScope(filepath.Join(c.BaseFS, p), c.BaseFS) - if err != nil { - return nil, err - } - return os.Open(fp) -} - -func getUser(c *container.Container, username string) (uint32, uint32, []uint32, error) { - passwdPath, err := user.GetPasswdPath() - if err != nil { - return 0, 0, nil, err - } - groupPath, err := user.GetGroupPath() - if err != nil { - return 0, 0, nil, err - } - passwdFile, err := readUserFile(c, passwdPath) - if err == nil { - defer passwdFile.Close() - } - groupFile, err := readUserFile(c, groupPath) - if err == nil { - defer groupFile.Close() - } - - execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile) - if err != nil { - return 0, 0, nil, err - } - - // todo: fix this double read by a change to libcontainer/user pkg - groupFile, err = readUserFile(c, groupPath) - if err == nil { - defer groupFile.Close() - } - var addGroups []int - if len(c.HostConfig.GroupAdd) > 0 { - addGroups, err = user.GetAdditionalGroups(c.HostConfig.GroupAdd, groupFile) - if err != nil { - return 0, 0, nil, err - } - } - uid := uint32(execUser.Uid) - gid := uint32(execUser.Gid) - sgids := append(execUser.Sgids, addGroups...) - var additionalGids []uint32 - for _, g := range sgids { - additionalGids = append(additionalGids, uint32(g)) - } - return uid, gid, additionalGids, nil -} - -func setNamespace(s *specs.Spec, ns specs.Namespace) { - for i, n := range s.Linux.Namespaces { - if n.Type == ns.Type { - s.Linux.Namespaces[i] = ns - return - } - } - s.Linux.Namespaces = append(s.Linux.Namespaces, ns) -} - -func setCapabilities(s *specs.Spec, c *container.Container) error { - var caplist []string - var err error - if c.HostConfig.Privileged { - caplist = caps.GetAllCapabilities() - } else { - caplist, err = caps.TweakCapabilities(s.Process.Capabilities, c.HostConfig.CapAdd, c.HostConfig.CapDrop) - if err != nil { - return err - } - } - s.Process.Capabilities = caplist - return nil -} - -func delNamespace(s *specs.Spec, nsType specs.NamespaceType) { - idx := -1 - for i, n := range s.Linux.Namespaces { - if n.Type == nsType { - idx = i - } - } - if idx >= 0 { - s.Linux.Namespaces = append(s.Linux.Namespaces[:idx], s.Linux.Namespaces[idx+1:]...) - } -} - -func setNamespaces(daemon *Daemon, s *specs.Spec, c *container.Container) error { - userNS := false - // user - if c.HostConfig.UsernsMode.IsPrivate() { - uidMap, gidMap := daemon.GetUIDGIDMaps() - if uidMap != nil { - userNS = true - ns := specs.Namespace{Type: "user"} - setNamespace(s, ns) - s.Linux.UIDMappings = specMapping(uidMap) - s.Linux.GIDMappings = specMapping(gidMap) - } - } - // network - if !c.Config.NetworkDisabled { - ns := specs.Namespace{Type: "network"} - parts := strings.SplitN(string(c.HostConfig.NetworkMode), ":", 2) - if parts[0] == "container" { - nc, err := daemon.getNetworkedContainer(c.ID, c.HostConfig.NetworkMode.ConnectedContainer()) - if err != nil { - return err - } - ns.Path = fmt.Sprintf("/proc/%d/ns/net", nc.State.GetPID()) - if userNS { - // to share a net namespace, they must also share a user namespace - nsUser := specs.Namespace{Type: "user"} - nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", nc.State.GetPID()) - setNamespace(s, nsUser) - } - } else if c.HostConfig.NetworkMode.IsHost() { - ns.Path = c.NetworkSettings.SandboxKey - } - setNamespace(s, ns) - } - // ipc - if c.HostConfig.IpcMode.IsContainer() { - ns := specs.Namespace{Type: "ipc"} - ic, err := daemon.getIpcContainer(c) - if err != nil { - return err - } - ns.Path = fmt.Sprintf("/proc/%d/ns/ipc", ic.State.GetPID()) - setNamespace(s, ns) - if userNS { - // to share an IPC namespace, they must also share a user namespace - nsUser := specs.Namespace{Type: "user"} - nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", ic.State.GetPID()) - setNamespace(s, nsUser) - } - } else if c.HostConfig.IpcMode.IsHost() { - delNamespace(s, specs.NamespaceType("ipc")) - } else { - ns := specs.Namespace{Type: "ipc"} - setNamespace(s, ns) - } - // pid - if c.HostConfig.PidMode.IsContainer() { - ns := specs.Namespace{Type: "pid"} - pc, err := daemon.getPidContainer(c) - if err != nil { - return err - } - ns.Path = fmt.Sprintf("/proc/%d/ns/pid", pc.State.GetPID()) - setNamespace(s, ns) - if userNS { - // to share a PID namespace, they must also share a user namespace - nsUser := specs.Namespace{Type: "user"} - nsUser.Path = fmt.Sprintf("/proc/%d/ns/user", pc.State.GetPID()) - setNamespace(s, nsUser) - } - } else if c.HostConfig.PidMode.IsHost() { - delNamespace(s, specs.NamespaceType("pid")) - } else { - ns := specs.Namespace{Type: "pid"} - setNamespace(s, ns) - } - // uts - if c.HostConfig.UTSMode.IsHost() { - delNamespace(s, specs.NamespaceType("uts")) - s.Hostname = "" - } - - return nil -} - -func specMapping(s []idtools.IDMap) []specs.IDMapping { - var ids []specs.IDMapping - for _, item := range s { - ids = append(ids, specs.IDMapping{ - HostID: uint32(item.HostID), - ContainerID: uint32(item.ContainerID), - Size: uint32(item.Size), - }) - } - return ids -} - -func getMountInfo(mountinfo []*mount.Info, dir string) *mount.Info { - for _, m := range mountinfo { - if m.Mountpoint == dir { - return m - } - } - return nil -} - -// Get the source mount point of directory passed in as argument. Also return -// optional fields. -func getSourceMount(source string) (string, string, error) { - // Ensure any symlinks are resolved. - sourcePath, err := filepath.EvalSymlinks(source) - if err != nil { - return "", "", err - } - - mountinfos, err := mount.GetMounts() - if err != nil { - return "", "", err - } - - mountinfo := getMountInfo(mountinfos, sourcePath) - if mountinfo != nil { - return sourcePath, mountinfo.Optional, nil - } - - path := sourcePath - for { - path = filepath.Dir(path) - - mountinfo = getMountInfo(mountinfos, path) - if mountinfo != nil { - return path, mountinfo.Optional, nil - } - - if path == "/" { - break - } - } - - // If we are here, we did not find parent mount. Something is wrong. - return "", "", fmt.Errorf("Could not find source mount of %s", source) -} - -// Ensure mount point on which path is mounted, is shared. -func ensureShared(path string) error { - sharedMount := false - - sourceMount, optionalOpts, err := getSourceMount(path) - if err != nil { - return err - } - // Make sure source mount point is shared. - optsSplit := strings.Split(optionalOpts, " ") - for _, opt := range optsSplit { - if strings.HasPrefix(opt, "shared:") { - sharedMount = true - break - } - } - - if !sharedMount { - return fmt.Errorf("Path %s is mounted on %s but it is not a shared mount.", path, sourceMount) - } - return nil -} - -// Ensure mount point on which path is mounted, is either shared or slave. -func ensureSharedOrSlave(path string) error { - sharedMount := false - slaveMount := false - - sourceMount, optionalOpts, err := getSourceMount(path) - if err != nil { - return err - } - // Make sure source mount point is shared. - optsSplit := strings.Split(optionalOpts, " ") - for _, opt := range optsSplit { - if strings.HasPrefix(opt, "shared:") { - sharedMount = true - break - } else if strings.HasPrefix(opt, "master:") { - slaveMount = true - break - } - } - - if !sharedMount && !slaveMount { - return fmt.Errorf("Path %s is mounted on %s but it is not a shared or slave mount.", path, sourceMount) - } - return nil -} - -var ( - mountPropagationMap = map[string]int{ - "private": mount.PRIVATE, - "rprivate": mount.RPRIVATE, - "shared": mount.SHARED, - "rshared": mount.RSHARED, - "slave": mount.SLAVE, - "rslave": mount.RSLAVE, - } - - mountPropagationReverseMap = map[int]string{ - mount.PRIVATE: "private", - mount.RPRIVATE: "rprivate", - mount.SHARED: "shared", - mount.RSHARED: "rshared", - mount.SLAVE: "slave", - mount.RSLAVE: "rslave", - } -) - -func setMounts(daemon *Daemon, s *specs.Spec, c *container.Container, mounts []container.Mount) error { - userMounts := make(map[string]struct{}) - for _, m := range mounts { - userMounts[m.Destination] = struct{}{} - } - - // Filter out mounts that are overridden by user supplied mounts - var defaultMounts []specs.Mount - _, mountDev := userMounts["/dev"] - for _, m := range s.Mounts { - if _, ok := userMounts[m.Destination]; !ok { - if mountDev && strings.HasPrefix(m.Destination, "/dev/") { - continue - } - defaultMounts = append(defaultMounts, m) - } - } - - s.Mounts = defaultMounts - for _, m := range mounts { - for _, cm := range s.Mounts { - if cm.Destination == m.Destination { - return fmt.Errorf("Duplicate mount point '%s'", m.Destination) - } - } - - if m.Source == "tmpfs" { - data := c.HostConfig.Tmpfs[m.Destination] - options := []string{"noexec", "nosuid", "nodev", volume.DefaultPropagationMode} - if data != "" { - options = append(options, strings.Split(data, ",")...) - } - - merged, err := mount.MergeTmpfsOptions(options) - if err != nil { - return err - } - - s.Mounts = append(s.Mounts, specs.Mount{Destination: m.Destination, Source: m.Source, Type: "tmpfs", Options: merged}) - continue - } - - mt := specs.Mount{Destination: m.Destination, Source: m.Source, Type: "bind"} - - // Determine property of RootPropagation based on volume - // properties. If a volume is shared, then keep root propagation - // shared. This should work for slave and private volumes too. - // - // For slave volumes, it can be either [r]shared/[r]slave. - // - // For private volumes any root propagation value should work. - pFlag := mountPropagationMap[m.Propagation] - if pFlag == mount.SHARED || pFlag == mount.RSHARED { - if err := ensureShared(m.Source); err != nil { - return err - } - rootpg := mountPropagationMap[s.Linux.RootfsPropagation] - if rootpg != mount.SHARED && rootpg != mount.RSHARED { - s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.SHARED] - } - } else if pFlag == mount.SLAVE || pFlag == mount.RSLAVE { - if err := ensureSharedOrSlave(m.Source); err != nil { - return err - } - rootpg := mountPropagationMap[s.Linux.RootfsPropagation] - if rootpg != mount.SHARED && rootpg != mount.RSHARED && rootpg != mount.SLAVE && rootpg != mount.RSLAVE { - s.Linux.RootfsPropagation = mountPropagationReverseMap[mount.RSLAVE] - } - } - - opts := []string{"rbind"} - if !m.Writable { - opts = append(opts, "ro") - } - if pFlag != 0 { - opts = append(opts, mountPropagationReverseMap[pFlag]) - } - - mt.Options = opts - s.Mounts = append(s.Mounts, mt) - } - - if s.Root.Readonly { - for i, m := range s.Mounts { - switch m.Destination { - case "/proc", "/dev/pts", "/dev/mqueue": // /dev is remounted by runc - continue - } - if _, ok := userMounts[m.Destination]; !ok { - if !stringutils.InSlice(m.Options, "ro") { - s.Mounts[i].Options = append(s.Mounts[i].Options, "ro") - } - } - } - } - - if c.HostConfig.Privileged { - if !s.Root.Readonly { - // clear readonly for /sys - for i := range s.Mounts { - if s.Mounts[i].Destination == "/sys" { - clearReadOnly(&s.Mounts[i]) - } - } - } - s.Linux.ReadonlyPaths = nil - s.Linux.MaskedPaths = nil - } - - // TODO: until a kernel/mount solution exists for handling remount in a user namespace, - // we must clear the readonly flag for the cgroups mount (@mrunalp concurs) - if uidMap, _ := daemon.GetUIDGIDMaps(); uidMap != nil || c.HostConfig.Privileged { - for i, m := range s.Mounts { - if m.Type == "cgroup" { - clearReadOnly(&s.Mounts[i]) - } - } - } - - return nil -} - -func (daemon *Daemon) populateCommonSpec(s *specs.Spec, c *container.Container) error { - linkedEnv, err := daemon.setupLinkedContainers(c) - if err != nil { - return err - } - s.Root = specs.Root{ - Path: c.BaseFS, - Readonly: c.HostConfig.ReadonlyRootfs, - } - rootUID, rootGID := daemon.GetRemappedUIDGID() - if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { - return err - } - cwd := c.Config.WorkingDir - if len(cwd) == 0 { - cwd = "/" - } - s.Process.Args = append([]string{c.Path}, c.Args...) - s.Process.Cwd = cwd - s.Process.Env = c.CreateDaemonEnvironment(linkedEnv) - s.Process.Terminal = c.Config.Tty - s.Hostname = c.FullHostname() - - return nil -} - -func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, error) { - s := oci.DefaultSpec() - if err := daemon.populateCommonSpec(&s, c); err != nil { - return nil, err - } - - var cgroupsPath string - scopePrefix := "docker" - parent := "/docker" - useSystemd := UsingSystemd(daemon.configStore) - if useSystemd { - parent = "system.slice" - } - - if c.HostConfig.CgroupParent != "" { - parent = c.HostConfig.CgroupParent - } else if daemon.configStore.CgroupParent != "" { - parent = daemon.configStore.CgroupParent - } - - if useSystemd { - cgroupsPath = parent + ":" + scopePrefix + ":" + c.ID - logrus.Debugf("createSpec: cgroupsPath: %s", cgroupsPath) - } else { - cgroupsPath = filepath.Join(parent, c.ID) - } - s.Linux.CgroupsPath = &cgroupsPath - - if err := setResources(&s, c.HostConfig.Resources); err != nil { - return nil, fmt.Errorf("linux runtime spec resources: %v", err) - } - s.Linux.Resources.OOMScoreAdj = &c.HostConfig.OomScoreAdj - s.Linux.Sysctl = c.HostConfig.Sysctls - if err := setDevices(&s, c); err != nil { - return nil, fmt.Errorf("linux runtime spec devices: %v", err) - } - if err := setRlimits(daemon, &s, c); err != nil { - return nil, fmt.Errorf("linux runtime spec rlimits: %v", err) - } - if err := setUser(&s, c); err != nil { - return nil, fmt.Errorf("linux spec user: %v", err) - } - if err := setNamespaces(daemon, &s, c); err != nil { - return nil, fmt.Errorf("linux spec namespaces: %v", err) - } - if err := setCapabilities(&s, c); err != nil { - return nil, fmt.Errorf("linux spec capabilities: %v", err) - } - if err := setSeccomp(daemon, &s, c); err != nil { - return nil, fmt.Errorf("linux seccomp: %v", err) - } - - if err := daemon.setupIpcDirs(c); err != nil { - return nil, err - } - - ms, err := daemon.setupMounts(c) - if err != nil { - return nil, err - } - ms = append(ms, c.IpcMounts()...) - ms = append(ms, c.TmpfsMounts()...) - sort.Sort(mounts(ms)) - if err := setMounts(daemon, &s, c, ms); err != nil { - return nil, fmt.Errorf("linux mounts: %v", err) - } - - for _, ns := range s.Linux.Namespaces { - if ns.Type == "network" && ns.Path == "" && !c.Config.NetworkDisabled { - target, err := os.Readlink(filepath.Join("/proc", strconv.Itoa(os.Getpid()), "exe")) - if err != nil { - return nil, err - } - - s.Hooks = specs.Hooks{ - Prestart: []specs.Hook{{ - Path: target, // FIXME: cross-platform - Args: []string{"libnetwork-setkey", c.ID, daemon.netController.ID()}, - }}, - } - } - } - - if apparmor.IsEnabled() { - appArmorProfile := "docker-default" - if len(c.AppArmorProfile) > 0 { - appArmorProfile = c.AppArmorProfile - } else if c.HostConfig.Privileged { - appArmorProfile = "unconfined" - } - s.Process.ApparmorProfile = appArmorProfile - } - s.Process.SelinuxLabel = c.GetProcessLabel() - s.Process.NoNewPrivileges = c.NoNewPrivileges - s.Linux.MountLabel = c.MountLabel - - return (*libcontainerd.Spec)(&s), nil -} - -func clearReadOnly(m *specs.Mount) { - var opt []string - for _, o := range m.Options { - if o != "ro" { - opt = append(opt, o) - } - } - m.Options = opt -} diff --git a/daemon/oci_solaris.go b/daemon/oci_solaris.go deleted file mode 100644 index 05eca21169..0000000000 --- a/daemon/oci_solaris.go +++ /dev/null @@ -1,12 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/oci" -) - -func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, error) { - s := oci.DefaultSpec() - return (*libcontainerd.Spec)(&s), nil -} diff --git a/daemon/oci_windows.go b/daemon/oci_windows.go deleted file mode 100644 index fd725d7fe2..0000000000 --- a/daemon/oci_windows.go +++ /dev/null @@ -1,200 +0,0 @@ -package daemon - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "syscall" - - "github.com/docker/docker/container" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/libcontainerd/windowsoci" - "github.com/docker/docker/oci" -) - -func (daemon *Daemon) createSpec(c *container.Container) (*libcontainerd.Spec, error) { - s := oci.DefaultSpec() - - linkedEnv, err := daemon.setupLinkedContainers(c) - if err != nil { - return nil, err - } - - // TODO Windows - this can be removed. Not used (UID/GID) - rootUID, rootGID := daemon.GetRemappedUIDGID() - if err := c.SetupWorkingDirectory(rootUID, rootGID); err != nil { - return nil, err - } - - img, err := daemon.imageStore.Get(c.ImageID) - if err != nil { - return nil, fmt.Errorf("Failed to graph.Get on ImageID %s - %s", c.ImageID, err) - } - - s.Platform.OSVersion = img.OSVersion - - // In base spec - s.Hostname = c.FullHostname() - - // In s.Mounts - mounts, err := daemon.setupMounts(c) - if err != nil { - return nil, err - } - for _, mount := range mounts { - s.Mounts = append(s.Mounts, windowsoci.Mount{ - Source: mount.Source, - Destination: mount.Destination, - Readonly: !mount.Writable, - }) - } - - // In s.Process - s.Process.Args = append([]string{c.Path}, c.Args...) - if !c.Config.ArgsEscaped { - s.Process.Args = escapeArgs(s.Process.Args) - } - s.Process.Cwd = c.Config.WorkingDir - if len(s.Process.Cwd) == 0 { - // We default to C:\ to workaround the oddity of the case that the - // default directory for cmd running as LocalSystem (or - // ContainerAdministrator) is c:\windows\system32. Hence docker run - // cmd will by default end in c:\windows\system32, rather - // than 'root' (/) on Linux. The oddity is that if you have a dockerfile - // which has no WORKDIR and has a COPY file ., . will be interpreted - // as c:\. Hence, setting it to default of c:\ makes for consistency. - s.Process.Cwd = `C:\` - } - s.Process.Env = c.CreateDaemonEnvironment(linkedEnv) - s.Process.InitialConsoleSize = c.HostConfig.ConsoleSize - s.Process.Terminal = c.Config.Tty - s.Process.User.User = c.Config.User - - // In spec.Root - s.Root.Path = c.BaseFS - s.Root.Readonly = c.HostConfig.ReadonlyRootfs - - // In s.Windows - s.Windows.FirstStart = !c.HasBeenStartedBefore - - // s.Windows.LayerFolder. - m, err := c.RWLayer.Metadata() - if err != nil { - return nil, fmt.Errorf("Failed to get layer metadata - %s", err) - } - s.Windows.LayerFolder = m["dir"] - - // s.Windows.LayerPaths - var layerPaths []string - if img.RootFS != nil && (img.RootFS.Type == image.TypeLayers || img.RootFS.Type == image.TypeLayersWithBase) { - // Get the layer path for each layer. - start := 1 - if img.RootFS.Type == image.TypeLayersWithBase { - // Include an empty slice to get the base layer ID. - start = 0 - } - max := len(img.RootFS.DiffIDs) - for i := start; i <= max; i++ { - img.RootFS.DiffIDs = img.RootFS.DiffIDs[:i] - path, err := layer.GetLayerPath(daemon.layerStore, img.RootFS.ChainID()) - if err != nil { - return nil, fmt.Errorf("Failed to get layer path from graphdriver %s for ImageID %s - %s", daemon.layerStore, img.RootFS.ChainID(), err) - } - // Reverse order, expecting parent most first - layerPaths = append([]string{path}, layerPaths...) - } - } - s.Windows.LayerPaths = layerPaths - - // Are we going to run as a Hyper-V container? - hv := false - if c.HostConfig.Isolation.IsDefault() { - // Container is set to use the default, so take the default from the daemon configuration - hv = daemon.defaultIsolation.IsHyperV() - } else { - // Container is requesting an isolation mode. Honour it. - hv = c.HostConfig.Isolation.IsHyperV() - } - if hv { - hvr := &windowsoci.HvRuntime{} - if img.RootFS != nil && img.RootFS.Type == image.TypeLayers { - // For TP5, the utility VM is part of the base layer. - // TODO-jstarks: Add support for separate utility VM images - // once it is decided how they can be stored. - uvmpath := filepath.Join(layerPaths[len(layerPaths)-1], "UtilityVM") - _, err = os.Stat(uvmpath) - if err != nil { - if os.IsNotExist(err) { - err = errors.New("container image does not contain a utility VM") - } - return nil, err - } - - hvr.ImagePath = uvmpath - } - - s.Windows.HvRuntime = hvr - } - - // In s.Windows.Networking - // Connect all the libnetwork allocated networks to the container - var epList []string - if c.NetworkSettings != nil { - for n := range c.NetworkSettings.Networks { - sn, err := daemon.FindNetwork(n) - if err != nil { - continue - } - - ep, err := c.GetEndpointInNetwork(sn) - if err != nil { - continue - } - - data, err := ep.DriverInfo() - if err != nil { - continue - } - if data["hnsid"] != nil { - epList = append(epList, data["hnsid"].(string)) - } - } - } - s.Windows.Networking = &windowsoci.Networking{ - EndpointList: epList, - } - - // In s.Windows.Resources - // @darrenstahlmsft implement these resources - cpuShares := uint64(c.HostConfig.CPUShares) - s.Windows.Resources = &windowsoci.Resources{ - CPU: &windowsoci.CPU{ - Percent: &c.HostConfig.CPUPercent, - Shares: &cpuShares, - }, - Memory: &windowsoci.Memory{ - //TODO Limit: ..., - //TODO Reservation: ..., - }, - Network: &windowsoci.Network{ - //TODO Bandwidth: ..., - }, - Storage: &windowsoci.Storage{ - Bps: &c.HostConfig.IOMaximumBandwidth, - Iops: &c.HostConfig.IOMaximumIOps, - //TODO SandboxSize: ..., - }, - } - return (*libcontainerd.Spec)(&s), nil -} - -func escapeArgs(args []string) []string { - escapedArgs := make([]string, len(args)) - for i, a := range args { - escapedArgs[i] = syscall.EscapeArg(a) - } - return escapedArgs -} diff --git a/daemon/pause.go b/daemon/pause.go deleted file mode 100644 index dbfafbc5fd..0000000000 --- a/daemon/pause.go +++ /dev/null @@ -1,49 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" -) - -// ContainerPause pauses a container -func (daemon *Daemon) ContainerPause(name string) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - if err := daemon.containerPause(container); err != nil { - return err - } - - return nil -} - -// containerPause pauses the container execution without stopping the process. -// The execution can be resumed by calling containerUnpause. -func (daemon *Daemon) containerPause(container *container.Container) error { - container.Lock() - defer container.Unlock() - - // We cannot Pause the container which is not running - if !container.Running { - return errNotRunning{container.ID} - } - - // We cannot Pause the container which is already paused - if container.Paused { - return fmt.Errorf("Container %s is already paused", container.ID) - } - - // We cannot Pause the container which is restarting - if container.Restarting { - return errContainerIsRestarting(container.ID) - } - - if err := daemon.containerd.Pause(container.ID); err != nil { - return fmt.Errorf("Cannot pause container %s: %s", container.ID, err) - } - - return nil -} diff --git a/daemon/rename.go b/daemon/rename.go deleted file mode 100644 index e92dfda861..0000000000 --- a/daemon/rename.go +++ /dev/null @@ -1,98 +0,0 @@ -package daemon - -import ( - "fmt" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/libnetwork" -) - -// ContainerRename changes the name of a container, using the oldName -// to find the container. An error is returned if newName is already -// reserved. -func (daemon *Daemon) ContainerRename(oldName, newName string) error { - var ( - sid string - sb libnetwork.Sandbox - ) - - if oldName == "" || newName == "" { - return fmt.Errorf("Neither old nor new names may be empty") - } - - if newName[0] != '/' { - newName = "/" + newName - } - - container, err := daemon.GetContainer(oldName) - if err != nil { - return err - } - - oldName = container.Name - oldIsAnonymousEndpoint := container.NetworkSettings.IsAnonymousEndpoint - - if oldName == newName { - return fmt.Errorf("Renaming a container with the same name as its current name") - } - - container.Lock() - defer container.Unlock() - - if newName, err = daemon.reserveName(container.ID, newName); err != nil { - return fmt.Errorf("Error when allocating new name: %v", err) - } - - container.Name = newName - container.NetworkSettings.IsAnonymousEndpoint = false - - defer func() { - if err != nil { - container.Name = oldName - container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint - daemon.reserveName(container.ID, oldName) - daemon.releaseName(newName) - } - }() - - daemon.releaseName(oldName) - if err = container.ToDisk(); err != nil { - return err - } - - attributes := map[string]string{ - "oldName": oldName, - } - - if !container.Running { - daemon.LogContainerEventWithAttributes(container, "rename", attributes) - return nil - } - - defer func() { - if err != nil { - container.Name = oldName - container.NetworkSettings.IsAnonymousEndpoint = oldIsAnonymousEndpoint - if e := container.ToDisk(); e != nil { - logrus.Errorf("%s: Failed in writing to Disk on rename failure: %v", container.ID, e) - } - } - }() - - sid = container.NetworkSettings.SandboxID - if daemon.netController != nil { - sb, err = daemon.netController.SandboxByID(sid) - if err != nil { - return err - } - - err = sb.Rename(strings.TrimPrefix(container.Name, "/")) - if err != nil { - return err - } - } - - daemon.LogContainerEventWithAttributes(container, "rename", attributes) - return nil -} diff --git a/daemon/resize.go b/daemon/resize.go deleted file mode 100644 index 747353852e..0000000000 --- a/daemon/resize.go +++ /dev/null @@ -1,40 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/libcontainerd" -) - -// ContainerResize changes the size of the TTY of the process running -// in the container with the given name to the given height and width. -func (daemon *Daemon) ContainerResize(name string, height, width int) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - if !container.IsRunning() { - return errNotRunning{container.ID} - } - - if err = daemon.containerd.Resize(container.ID, libcontainerd.InitFriendlyName, width, height); err == nil { - attributes := map[string]string{ - "height": fmt.Sprintf("%d", height), - "width": fmt.Sprintf("%d", width), - } - daemon.LogContainerEventWithAttributes(container, "resize", attributes) - } - return err -} - -// ContainerExecResize changes the size of the TTY of the process -// running in the exec with the given name to the given height and -// width. -func (daemon *Daemon) ContainerExecResize(name string, height, width int) error { - ec, err := daemon.getExecConfig(name) - if err != nil { - return err - } - return daemon.containerd.Resize(ec.ContainerID, ec.ID, width, height) -} diff --git a/daemon/restart.go b/daemon/restart.go deleted file mode 100644 index 3779116cfa..0000000000 --- a/daemon/restart.go +++ /dev/null @@ -1,48 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" -) - -// ContainerRestart stops and starts a container. It attempts to -// gracefully stop the container within the given timeout, forcefully -// stopping it if the timeout is exceeded. If given a negative -// timeout, ContainerRestart will wait forever until a graceful -// stop. Returns an error if the container cannot be found, or if -// there is an underlying error at any stage of the restart. -func (daemon *Daemon) ContainerRestart(name string, seconds int) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - if err := daemon.containerRestart(container, seconds); err != nil { - return fmt.Errorf("Cannot restart container %s: %v", name, err) - } - return nil -} - -// containerRestart attempts to gracefully stop and then start the -// container. When stopping, wait for the given duration in seconds to -// gracefully stop, before forcefully terminating the container. If -// given a negative duration, wait forever for a graceful stop. -func (daemon *Daemon) containerRestart(container *container.Container, seconds int) error { - // Avoid unnecessarily unmounting and then directly mounting - // the container when the container stops and then starts - // again - if err := daemon.Mount(container); err == nil { - defer daemon.Unmount(container) - } - - if err := daemon.containerStop(container, seconds); err != nil { - return err - } - - if err := daemon.containerStart(container); err != nil { - return err - } - - daemon.LogContainerEvent(container, "restart") - return nil -} diff --git a/daemon/search.go b/daemon/search.go deleted file mode 100644 index 09c6ae49b7..0000000000 --- a/daemon/search.go +++ /dev/null @@ -1,94 +0,0 @@ -package daemon - -import ( - "fmt" - "strconv" - - "golang.org/x/net/context" - - "github.com/docker/docker/dockerversion" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - registrytypes "github.com/docker/engine-api/types/registry" -) - -var acceptedSearchFilterTags = map[string]bool{ - "is-automated": true, - "is-official": true, - "stars": true, -} - -// SearchRegistryForImages queries the registry for images matching -// term. authConfig is used to login. -func (daemon *Daemon) SearchRegistryForImages(ctx context.Context, filtersArgs string, term string, limit int, - authConfig *types.AuthConfig, - headers map[string][]string) (*registrytypes.SearchResults, error) { - - searchFilters, err := filters.FromParam(filtersArgs) - if err != nil { - return nil, err - } - if err := searchFilters.Validate(acceptedSearchFilterTags); err != nil { - return nil, err - } - - var isAutomated, isOfficial bool - var hasStarFilter = 0 - if searchFilters.Include("is-automated") { - if searchFilters.UniqueExactMatch("is-automated", "true") { - isAutomated = true - } else if !searchFilters.UniqueExactMatch("is-automated", "false") { - return nil, fmt.Errorf("Invalid filter 'is-automated=%s'", searchFilters.Get("is-automated")) - } - } - if searchFilters.Include("is-official") { - if searchFilters.UniqueExactMatch("is-official", "true") { - isOfficial = true - } else if !searchFilters.UniqueExactMatch("is-official", "false") { - return nil, fmt.Errorf("Invalid filter 'is-official=%s'", searchFilters.Get("is-official")) - } - } - if searchFilters.Include("stars") { - hasStars := searchFilters.Get("stars") - for _, hasStar := range hasStars { - iHasStar, err := strconv.Atoi(hasStar) - if err != nil { - return nil, fmt.Errorf("Invalid filter 'stars=%s'", hasStar) - } - if iHasStar > hasStarFilter { - hasStarFilter = iHasStar - } - } - } - - unfilteredResult, err := daemon.RegistryService.Search(ctx, term, limit, authConfig, dockerversion.DockerUserAgent(ctx), headers) - if err != nil { - return nil, err - } - - filteredResults := []registrytypes.SearchResult{} - for _, result := range unfilteredResult.Results { - if searchFilters.Include("is-automated") { - if isAutomated != result.IsAutomated { - continue - } - } - if searchFilters.Include("is-official") { - if isOfficial != result.IsOfficial { - continue - } - } - if searchFilters.Include("stars") { - if result.StarCount < hasStarFilter { - continue - } - } - filteredResults = append(filteredResults, result) - } - - return ®istrytypes.SearchResults{ - Query: unfilteredResult.Query, - NumResults: len(filteredResults), - Results: filteredResults, - }, nil -} diff --git a/daemon/search_test.go b/daemon/search_test.go deleted file mode 100644 index bffa84e192..0000000000 --- a/daemon/search_test.go +++ /dev/null @@ -1,357 +0,0 @@ -package daemon - -import ( - "fmt" - "strings" - "testing" - - "golang.org/x/net/context" - - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - registrytypes "github.com/docker/engine-api/types/registry" -) - -type FakeService struct { - registry.DefaultService - - shouldReturnError bool - - term string - results []registrytypes.SearchResult -} - -func (s *FakeService) Search(ctx context.Context, term string, limit int, authConfig *types.AuthConfig, userAgent string, headers map[string][]string) (*registrytypes.SearchResults, error) { - if s.shouldReturnError { - return nil, fmt.Errorf("Search unknown error") - } - return ®istrytypes.SearchResults{ - Query: s.term, - NumResults: len(s.results), - Results: s.results, - }, nil -} - -func TestSearchRegistryForImagesErrors(t *testing.T) { - errorCases := []struct { - filtersArgs string - shouldReturnError bool - expectedError string - }{ - { - expectedError: "Search unknown error", - shouldReturnError: true, - }, - { - filtersArgs: "invalid json", - expectedError: "invalid character 'i' looking for beginning of value", - }, - { - filtersArgs: `{"type":{"custom":true}}`, - expectedError: "Invalid filter 'type'", - }, - { - filtersArgs: `{"is-automated":{"invalid":true}}`, - expectedError: "Invalid filter 'is-automated=[invalid]'", - }, - { - filtersArgs: `{"is-automated":{"true":true,"false":true}}`, - expectedError: "Invalid filter 'is-automated", - }, - { - filtersArgs: `{"is-official":{"invalid":true}}`, - expectedError: "Invalid filter 'is-official=[invalid]'", - }, - { - filtersArgs: `{"is-official":{"true":true,"false":true}}`, - expectedError: "Invalid filter 'is-official", - }, - { - filtersArgs: `{"stars":{"invalid":true}}`, - expectedError: "Invalid filter 'stars=invalid'", - }, - { - filtersArgs: `{"stars":{"1":true,"invalid":true}}`, - expectedError: "Invalid filter 'stars=invalid'", - }, - } - for index, e := range errorCases { - daemon := &Daemon{ - RegistryService: &FakeService{ - shouldReturnError: e.shouldReturnError, - }, - } - _, err := daemon.SearchRegistryForImages(context.Background(), e.filtersArgs, "term", 25, nil, map[string][]string{}) - if err == nil { - t.Errorf("%d: expected an error, got nothing", index) - } - if !strings.Contains(err.Error(), e.expectedError) { - t.Errorf("%d: expected error to contain %s, got %s", index, e.expectedError, err.Error()) - } - } -} - -func TestSearchRegistryForImages(t *testing.T) { - term := "term" - successCases := []struct { - filtersArgs string - registryResults []registrytypes.SearchResult - expectedResults []registrytypes.SearchResult - }{ - { - filtersArgs: "", - registryResults: []registrytypes.SearchResult{}, - expectedResults: []registrytypes.SearchResult{}, - }, - { - filtersArgs: "", - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - }, - }, - expectedResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - }, - }, - }, - { - filtersArgs: `{"is-automated":{"true":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - }, - }, - expectedResults: []registrytypes.SearchResult{}, - }, - { - filtersArgs: `{"is-automated":{"true":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsAutomated: true, - }, - }, - expectedResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsAutomated: true, - }, - }, - }, - { - filtersArgs: `{"is-automated":{"false":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsAutomated: true, - }, - }, - expectedResults: []registrytypes.SearchResult{}, - }, - { - filtersArgs: `{"is-automated":{"false":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsAutomated: false, - }, - }, - expectedResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsAutomated: false, - }, - }, - }, - { - filtersArgs: `{"is-official":{"true":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - }, - }, - expectedResults: []registrytypes.SearchResult{}, - }, - { - filtersArgs: `{"is-official":{"true":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsOfficial: true, - }, - }, - expectedResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsOfficial: true, - }, - }, - }, - { - filtersArgs: `{"is-official":{"false":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsOfficial: true, - }, - }, - expectedResults: []registrytypes.SearchResult{}, - }, - { - filtersArgs: `{"is-official":{"false":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsOfficial: false, - }, - }, - expectedResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - IsOfficial: false, - }, - }, - }, - { - filtersArgs: `{"stars":{"0":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - StarCount: 0, - }, - }, - expectedResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - StarCount: 0, - }, - }, - }, - { - filtersArgs: `{"stars":{"1":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name", - Description: "description", - StarCount: 0, - }, - }, - expectedResults: []registrytypes.SearchResult{}, - }, - { - filtersArgs: `{"stars":{"1":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name0", - Description: "description0", - StarCount: 0, - }, - { - Name: "name1", - Description: "description1", - StarCount: 1, - }, - }, - expectedResults: []registrytypes.SearchResult{ - { - Name: "name1", - Description: "description1", - StarCount: 1, - }, - }, - }, - { - filtersArgs: `{"stars":{"1":true}, "is-official":{"true":true}, "is-automated":{"true":true}}`, - registryResults: []registrytypes.SearchResult{ - { - Name: "name0", - Description: "description0", - StarCount: 0, - IsOfficial: true, - IsAutomated: true, - }, - { - Name: "name1", - Description: "description1", - StarCount: 1, - IsOfficial: false, - IsAutomated: true, - }, - { - Name: "name2", - Description: "description2", - StarCount: 1, - IsOfficial: true, - IsAutomated: false, - }, - { - Name: "name3", - Description: "description3", - StarCount: 2, - IsOfficial: true, - IsAutomated: true, - }, - }, - expectedResults: []registrytypes.SearchResult{ - { - Name: "name3", - Description: "description3", - StarCount: 2, - IsOfficial: true, - IsAutomated: true, - }, - }, - }, - } - for index, s := range successCases { - daemon := &Daemon{ - RegistryService: &FakeService{ - term: term, - results: s.registryResults, - }, - } - results, err := daemon.SearchRegistryForImages(context.Background(), s.filtersArgs, term, 25, nil, map[string][]string{}) - if err != nil { - t.Errorf("%d: %v", index, err) - } - if results.Query != term { - t.Errorf("%d: expected Query to be %s, got %s", index, term, results.Query) - } - if results.NumResults != len(s.expectedResults) { - t.Errorf("%d: expected NumResults to be %d, got %d", index, len(s.expectedResults), results.NumResults) - } - for _, result := range results.Results { - found := false - for _, expectedResult := range s.expectedResults { - if expectedResult.Name == result.Name && - expectedResult.Description == result.Description && - expectedResult.IsAutomated == result.IsAutomated && - expectedResult.IsOfficial == result.IsOfficial && - expectedResult.StarCount == result.StarCount { - found = true - } - } - if !found { - t.Errorf("%d: expected results %v, got %v", index, s.expectedResults, results.Results) - } - } - } -} diff --git a/daemon/seccomp_disabled.go b/daemon/seccomp_disabled.go deleted file mode 100644 index 4ad1b7c53f..0000000000 --- a/daemon/seccomp_disabled.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build linux,!seccomp - -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" - "github.com/opencontainers/specs/specs-go" -) - -var supportsSeccomp = false - -func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { - if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { - return fmt.Errorf("seccomp profiles are not supported on this daemon, you cannot specify a custom seccomp profile") - } - return nil -} diff --git a/daemon/seccomp_linux.go b/daemon/seccomp_linux.go deleted file mode 100644 index e9622787ee..0000000000 --- a/daemon/seccomp_linux.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build linux,seccomp - -package daemon - -import ( - "fmt" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/profiles/seccomp" - "github.com/opencontainers/specs/specs-go" -) - -var supportsSeccomp = true - -func setSeccomp(daemon *Daemon, rs *specs.Spec, c *container.Container) error { - var profile *specs.Seccomp - var err error - - if c.HostConfig.Privileged { - return nil - } - - if !daemon.seccompEnabled { - if c.SeccompProfile != "" && c.SeccompProfile != "unconfined" { - return fmt.Errorf("Seccomp is not enabled in your kernel, cannot run a custom seccomp profile.") - } - logrus.Warn("Seccomp is not enabled in your kernel, running container without default profile.") - c.SeccompProfile = "unconfined" - } - if c.SeccompProfile == "unconfined" { - return nil - } - if c.SeccompProfile != "" { - profile, err = seccomp.LoadProfile(c.SeccompProfile) - if err != nil { - return err - } - } else { - profile, err = seccomp.GetDefaultProfile(rs) - if err != nil { - return err - } - } - - rs.Linux.Seccomp = profile - return nil -} diff --git a/daemon/seccomp_unsupported.go b/daemon/seccomp_unsupported.go deleted file mode 100644 index b3691e96af..0000000000 --- a/daemon/seccomp_unsupported.go +++ /dev/null @@ -1,5 +0,0 @@ -// +build !linux - -package daemon - -var supportsSeccomp = false diff --git a/daemon/selinux_linux.go b/daemon/selinux_linux.go deleted file mode 100644 index 83a3447111..0000000000 --- a/daemon/selinux_linux.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build linux - -package daemon - -import "github.com/opencontainers/runc/libcontainer/selinux" - -func selinuxSetDisabled() { - selinux.SetDisabled() -} - -func selinuxFreeLxcContexts(label string) { - selinux.FreeLxcContexts(label) -} - -func selinuxEnabled() bool { - return selinux.SelinuxEnabled() -} diff --git a/daemon/selinux_unsupported.go b/daemon/selinux_unsupported.go deleted file mode 100644 index 25a56ad157..0000000000 --- a/daemon/selinux_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package daemon - -func selinuxSetDisabled() { -} - -func selinuxFreeLxcContexts(label string) { -} - -func selinuxEnabled() bool { - return false -} diff --git a/daemon/start.go b/daemon/start.go deleted file mode 100644 index fcf24c59f6..0000000000 --- a/daemon/start.go +++ /dev/null @@ -1,194 +0,0 @@ -package daemon - -import ( - "fmt" - "net/http" - "runtime" - "strings" - "syscall" - - "google.golang.org/grpc" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/errors" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/runconfig" - containertypes "github.com/docker/engine-api/types/container" -) - -// ContainerStart starts a container. -func (daemon *Daemon) ContainerStart(name string, hostConfig *containertypes.HostConfig, validateHostname bool) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - if container.IsPaused() { - return fmt.Errorf("Cannot start a paused container, try unpause instead.") - } - - if container.IsRunning() { - err := fmt.Errorf("Container already started") - return errors.NewErrorWithStatusCode(err, http.StatusNotModified) - } - - // Windows does not have the backwards compatibility issue here. - if runtime.GOOS != "windows" { - // This is kept for backward compatibility - hostconfig should be passed when - // creating a container, not during start. - if hostConfig != nil { - logrus.Warn("DEPRECATED: Setting host configuration options when the container starts is deprecated and has been removed in Docker 1.12") - oldNetworkMode := container.HostConfig.NetworkMode - if err := daemon.setSecurityOptions(container, hostConfig); err != nil { - return err - } - if err := daemon.mergeAndVerifyLogConfig(&hostConfig.LogConfig); err != nil { - return err - } - if err := daemon.setHostConfig(container, hostConfig); err != nil { - return err - } - newNetworkMode := container.HostConfig.NetworkMode - if string(oldNetworkMode) != string(newNetworkMode) { - // if user has change the network mode on starting, clean up the - // old networks. It is a deprecated feature and has been removed in Docker 1.12 - container.NetworkSettings.Networks = nil - if err := container.ToDisk(); err != nil { - return err - } - } - container.InitDNSHostConfig() - } - } else { - if hostConfig != nil { - return fmt.Errorf("Supplying a hostconfig on start is not supported. It should be supplied on create") - } - } - - // check if hostConfig is in line with the current system settings. - // It may happen cgroups are umounted or the like. - if _, err = daemon.verifyContainerSettings(container.HostConfig, nil, false, validateHostname); err != nil { - return err - } - // Adapt for old containers in case we have updates in this function and - // old containers never have chance to call the new function in create stage. - if err := daemon.adaptContainerSettings(container.HostConfig, false); err != nil { - return err - } - - return daemon.containerStart(container) -} - -// Start starts a container -func (daemon *Daemon) Start(container *container.Container) error { - return daemon.containerStart(container) -} - -// containerStart prepares the container to run by setting up everything the -// container needs, such as storage and networking, as well as links -// between containers. The container is left waiting for a signal to -// begin running. -func (daemon *Daemon) containerStart(container *container.Container) (err error) { - container.Lock() - defer container.Unlock() - - if container.Running { - return nil - } - - if container.RemovalInProgress || container.Dead { - return fmt.Errorf("Container is marked for removal and cannot be started.") - } - - // if we encounter an error during start we need to ensure that any other - // setup has been cleaned up properly - defer func() { - if err != nil { - container.SetError(err) - // if no one else has set it, make sure we don't leave it at zero - if container.ExitCode() == 0 { - container.SetExitCode(128) - } - container.ToDisk() - daemon.Cleanup(container) - } - }() - - if err := daemon.conditionalMountOnStart(container); err != nil { - return err - } - - // Make sure NetworkMode has an acceptable value. We do this to ensure - // backwards API compatibility. - container.HostConfig = runconfig.SetDefaultNetModeIfBlank(container.HostConfig) - - if err := daemon.initializeNetworking(container); err != nil { - return err - } - - spec, err := daemon.createSpec(container) - if err != nil { - return err - } - - createOptions := []libcontainerd.CreateOption{libcontainerd.WithRestartManager(container.RestartManager(true))} - copts, err := daemon.getLibcontainerdCreateOptions(container) - if err != nil { - return err - } - if copts != nil { - createOptions = append(createOptions, *copts...) - } - - if err := daemon.containerd.Create(container.ID, *spec, createOptions...); err != nil { - errDesc := grpc.ErrorDesc(err) - logrus.Errorf("Create container failed with error: %s", errDesc) - // if we receive an internal error from the initial start of a container then lets - // return it instead of entering the restart loop - // set to 127 for container cmd not found/does not exist) - if strings.Contains(errDesc, container.Path) && - (strings.Contains(errDesc, "executable file not found") || - strings.Contains(errDesc, "no such file or directory") || - strings.Contains(errDesc, "system cannot find the file specified")) { - container.SetExitCode(127) - } - // set to 126 for container cmd can't be invoked errors - if strings.Contains(errDesc, syscall.EACCES.Error()) { - container.SetExitCode(126) - } - - container.Reset(false) - - return fmt.Errorf("%s", errDesc) - } - - return nil -} - -// Cleanup releases any network resources allocated to the container along with any rules -// around how containers are linked together. It also unmounts the container's root filesystem. -func (daemon *Daemon) Cleanup(container *container.Container) { - daemon.releaseNetwork(container) - - container.UnmountIpcMounts(detachMounted) - - if err := daemon.conditionalUnmountOnCleanup(container); err != nil { - // FIXME: remove once reference counting for graphdrivers has been refactored - // Ensure that all the mounts are gone - if mountid, err := daemon.layerStore.GetMountID(container.ID); err == nil { - daemon.cleanupMountsByID(mountid) - } - } - - for _, eConfig := range container.ExecCommands.Commands() { - daemon.unregisterExecCommand(container, eConfig) - } - - if container.BaseFS != "" { - if err := container.UnmountVolumes(false, daemon.LogVolumeEvent); err != nil { - logrus.Warnf("%s cleanup: Failed to umount volumes: %v", container.ID, err) - } - } - container.CancelAttachContext() -} diff --git a/daemon/start_linux.go b/daemon/start_linux.go deleted file mode 100644 index c6ad22af85..0000000000 --- a/daemon/start_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" -) - -func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) (*[]libcontainerd.CreateOption, error) { - createOptions := []libcontainerd.CreateOption{} - - // Ensure a runtime has been assigned to this container - if container.HostConfig.Runtime == "" { - container.HostConfig.Runtime = stockRuntimeName - container.ToDisk() - } - - rt := daemon.configStore.GetRuntime(container.HostConfig.Runtime) - if rt == nil { - return nil, fmt.Errorf("no such runtime '%s'", container.HostConfig.Runtime) - } - createOptions = append(createOptions, libcontainerd.WithRuntime(rt.Path, rt.Args)) - - return &createOptions, nil -} diff --git a/daemon/start_windows.go b/daemon/start_windows.go deleted file mode 100644 index af3fe7602b..0000000000 --- a/daemon/start_windows.go +++ /dev/null @@ -1,10 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "github.com/docker/docker/libcontainerd" -) - -func (daemon *Daemon) getLibcontainerdCreateOptions(container *container.Container) (*[]libcontainerd.CreateOption, error) { - return &[]libcontainerd.CreateOption{}, nil -} diff --git a/daemon/stats.go b/daemon/stats.go deleted file mode 100644 index f76a689775..0000000000 --- a/daemon/stats.go +++ /dev/null @@ -1,194 +0,0 @@ -package daemon - -import ( - "encoding/json" - "errors" - "fmt" - "runtime" - - "golang.org/x/net/context" - - "github.com/docker/docker/api/types/backend" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/versions" - "github.com/docker/engine-api/types/versions/v1p20" -) - -// ContainerStats writes information about the container to the stream -// given in the config object. -func (daemon *Daemon) ContainerStats(ctx context.Context, prefixOrName string, config *backend.ContainerStatsConfig) error { - if runtime.GOOS == "windows" { - return errors.New("Windows does not support stats") - } - // Remote API version (used for backwards compatibility) - apiVersion := config.Version - - container, err := daemon.GetContainer(prefixOrName) - if err != nil { - return err - } - - // If the container is not running and requires no stream, return an empty stats. - if !container.IsRunning() && !config.Stream { - return json.NewEncoder(config.OutStream).Encode(&types.Stats{}) - } - - outStream := config.OutStream - if config.Stream { - wf := ioutils.NewWriteFlusher(outStream) - defer wf.Close() - wf.Flush() - outStream = wf - } - - var preCPUStats types.CPUStats - getStatJSON := func(v interface{}) *types.StatsJSON { - ss := v.(types.StatsJSON) - ss.PreCPUStats = preCPUStats - preCPUStats = ss.CPUStats - return &ss - } - - enc := json.NewEncoder(outStream) - - updates := daemon.subscribeToContainerStats(container) - defer daemon.unsubscribeToContainerStats(container, updates) - - noStreamFirstFrame := true - for { - select { - case v, ok := <-updates: - if !ok { - return nil - } - - var statsJSON interface{} - statsJSONPost120 := getStatJSON(v) - if versions.LessThan(apiVersion, "1.21") { - var ( - rxBytes uint64 - rxPackets uint64 - rxErrors uint64 - rxDropped uint64 - txBytes uint64 - txPackets uint64 - txErrors uint64 - txDropped uint64 - ) - for _, v := range statsJSONPost120.Networks { - rxBytes += v.RxBytes - rxPackets += v.RxPackets - rxErrors += v.RxErrors - rxDropped += v.RxDropped - txBytes += v.TxBytes - txPackets += v.TxPackets - txErrors += v.TxErrors - txDropped += v.TxDropped - } - statsJSON = &v1p20.StatsJSON{ - Stats: statsJSONPost120.Stats, - Network: types.NetworkStats{ - RxBytes: rxBytes, - RxPackets: rxPackets, - RxErrors: rxErrors, - RxDropped: rxDropped, - TxBytes: txBytes, - TxPackets: txPackets, - TxErrors: txErrors, - TxDropped: txDropped, - }, - } - } else { - statsJSON = statsJSONPost120 - } - - if !config.Stream && noStreamFirstFrame { - // prime the cpu stats so they aren't 0 in the final output - noStreamFirstFrame = false - continue - } - - if err := enc.Encode(statsJSON); err != nil { - return err - } - - if !config.Stream { - return nil - } - case <-ctx.Done(): - return nil - } - } -} - -func (daemon *Daemon) subscribeToContainerStats(c *container.Container) chan interface{} { - return daemon.statsCollector.collect(c) -} - -func (daemon *Daemon) unsubscribeToContainerStats(c *container.Container, ch chan interface{}) { - daemon.statsCollector.unsubscribe(c, ch) -} - -// GetContainerStats collects all the stats published by a container -func (daemon *Daemon) GetContainerStats(container *container.Container) (*types.StatsJSON, error) { - stats, err := daemon.stats(container) - if err != nil { - return nil, err - } - - if stats.Networks, err = daemon.getNetworkStats(container); err != nil { - return nil, err - } - - return stats, nil -} - -// Resolve Network SandboxID in case the container reuse another container's network stack -func (daemon *Daemon) getNetworkSandboxID(c *container.Container) (string, error) { - curr := c - for curr.HostConfig.NetworkMode.IsContainer() { - containerID := curr.HostConfig.NetworkMode.ConnectedContainer() - connected, err := daemon.GetContainer(containerID) - if err != nil { - return "", fmt.Errorf("Could not get container for %s", containerID) - } - curr = connected - } - return curr.NetworkSettings.SandboxID, nil -} - -func (daemon *Daemon) getNetworkStats(c *container.Container) (map[string]types.NetworkStats, error) { - sandboxID, err := daemon.getNetworkSandboxID(c) - if err != nil { - return nil, err - } - - sb, err := daemon.netController.SandboxByID(sandboxID) - if err != nil { - return nil, err - } - - lnstats, err := sb.Statistics() - if err != nil { - return nil, err - } - - stats := make(map[string]types.NetworkStats) - // Convert libnetwork nw stats into engine-api stats - for ifName, ifStats := range lnstats { - stats[ifName] = types.NetworkStats{ - RxBytes: ifStats.RxBytes, - RxPackets: ifStats.RxPackets, - RxErrors: ifStats.RxErrors, - RxDropped: ifStats.RxDropped, - TxBytes: ifStats.TxBytes, - TxPackets: ifStats.TxPackets, - TxErrors: ifStats.TxErrors, - TxDropped: ifStats.TxDropped, - } - } - - return stats, nil -} diff --git a/daemon/stats_collector_solaris.go b/daemon/stats_collector_solaris.go deleted file mode 100644 index 9cf9f0a94e..0000000000 --- a/daemon/stats_collector_solaris.go +++ /dev/null @@ -1,34 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/container" - "time" -) - -// newStatsCollector returns a new statsCollector for collection stats -// for a registered container at the specified interval. The collector allows -// non-running containers to be added and will start processing stats when -// they are started. -func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { - return &statsCollector{} -} - -// statsCollector manages and provides container resource stats -type statsCollector struct { -} - -// collect registers the container with the collector and adds it to -// the event loop for collection on the specified interval returning -// a channel for the subscriber to receive on. -func (s *statsCollector) collect(c *container.Container) chan interface{} { - return nil -} - -// stopCollection closes the channels for all subscribers and removes -// the container from metrics collection. -func (s *statsCollector) stopCollection(c *container.Container) { -} - -// unsubscribe removes a specific subscriber from receiving updates for a container's stats. -func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { -} diff --git a/daemon/stats_collector_unix.go b/daemon/stats_collector_unix.go deleted file mode 100644 index f66dc2c3c7..0000000000 --- a/daemon/stats_collector_unix.go +++ /dev/null @@ -1,189 +0,0 @@ -// +build !windows,!solaris - -package daemon - -import ( - "bufio" - "fmt" - "os" - "strconv" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/pkg/pubsub" - sysinfo "github.com/docker/docker/pkg/system" - "github.com/docker/engine-api/types" - "github.com/opencontainers/runc/libcontainer/system" -) - -type statsSupervisor interface { - // GetContainerStats collects all the stats related to a container - GetContainerStats(container *container.Container) (*types.StatsJSON, error) -} - -// newStatsCollector returns a new statsCollector that collections -// network and cgroup stats for a registered container at the specified -// interval. The collector allows non-running containers to be added -// and will start processing stats when they are started. -func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { - s := &statsCollector{ - interval: interval, - supervisor: daemon, - publishers: make(map[*container.Container]*pubsub.Publisher), - clockTicksPerSecond: uint64(system.GetClockTicks()), - bufReader: bufio.NewReaderSize(nil, 128), - } - meminfo, err := sysinfo.ReadMemInfo() - if err == nil && meminfo.MemTotal > 0 { - s.machineMemory = uint64(meminfo.MemTotal) - } - - go s.run() - return s -} - -// statsCollector manages and provides container resource stats -type statsCollector struct { - m sync.Mutex - supervisor statsSupervisor - interval time.Duration - clockTicksPerSecond uint64 - publishers map[*container.Container]*pubsub.Publisher - bufReader *bufio.Reader - machineMemory uint64 -} - -// collect registers the container with the collector and adds it to -// the event loop for collection on the specified interval returning -// a channel for the subscriber to receive on. -func (s *statsCollector) collect(c *container.Container) chan interface{} { - s.m.Lock() - defer s.m.Unlock() - publisher, exists := s.publishers[c] - if !exists { - publisher = pubsub.NewPublisher(100*time.Millisecond, 1024) - s.publishers[c] = publisher - } - return publisher.Subscribe() -} - -// stopCollection closes the channels for all subscribers and removes -// the container from metrics collection. -func (s *statsCollector) stopCollection(c *container.Container) { - s.m.Lock() - if publisher, exists := s.publishers[c]; exists { - publisher.Close() - delete(s.publishers, c) - } - s.m.Unlock() -} - -// unsubscribe removes a specific subscriber from receiving updates for a container's stats. -func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { - s.m.Lock() - publisher := s.publishers[c] - if publisher != nil { - publisher.Evict(ch) - if publisher.Len() == 0 { - delete(s.publishers, c) - } - } - s.m.Unlock() -} - -func (s *statsCollector) run() { - type publishersPair struct { - container *container.Container - publisher *pubsub.Publisher - } - // we cannot determine the capacity here. - // it will grow enough in first iteration - var pairs []publishersPair - - for range time.Tick(s.interval) { - // it does not make sense in the first iteration, - // but saves allocations in further iterations - pairs = pairs[:0] - - s.m.Lock() - for container, publisher := range s.publishers { - // copy pointers here to release the lock ASAP - pairs = append(pairs, publishersPair{container, publisher}) - } - s.m.Unlock() - if len(pairs) == 0 { - continue - } - - systemUsage, err := s.getSystemCPUUsage() - if err != nil { - logrus.Errorf("collecting system cpu usage: %v", err) - continue - } - - for _, pair := range pairs { - stats, err := s.supervisor.GetContainerStats(pair.container) - if err != nil { - if _, ok := err.(errNotRunning); !ok { - logrus.Errorf("collecting stats for %s: %v", pair.container.ID, err) - } - continue - } - // FIXME: move to containerd - stats.CPUStats.SystemUsage = systemUsage - - pair.publisher.Publish(*stats) - } - } -} - -const nanoSecondsPerSecond = 1e9 - -// getSystemCPUUsage returns the host system's cpu usage in -// nanoseconds. An error is returned if the format of the underlying -// file does not match. -// -// Uses /proc/stat defined by POSIX. Looks for the cpu -// statistics line and then sums up the first seven fields -// provided. See `man 5 proc` for details on specific field -// information. -func (s *statsCollector) getSystemCPUUsage() (uint64, error) { - var line string - f, err := os.Open("/proc/stat") - if err != nil { - return 0, err - } - defer func() { - s.bufReader.Reset(nil) - f.Close() - }() - s.bufReader.Reset(f) - err = nil - for err == nil { - line, err = s.bufReader.ReadString('\n') - if err != nil { - break - } - parts := strings.Fields(line) - switch parts[0] { - case "cpu": - if len(parts) < 8 { - return 0, fmt.Errorf("invalid number of cpu fields") - } - var totalClockTicks uint64 - for _, i := range parts[1:8] { - v, err := strconv.ParseUint(i, 10, 64) - if err != nil { - return 0, fmt.Errorf("Unable to convert value %s to int: %s", i, err) - } - totalClockTicks += v - } - return (totalClockTicks * nanoSecondsPerSecond) / - s.clockTicksPerSecond, nil - } - } - return 0, fmt.Errorf("invalid stat format. Error trying to parse the '/proc/stat' file") -} diff --git a/daemon/stats_collector_windows.go b/daemon/stats_collector_windows.go deleted file mode 100644 index b6cb24cd7c..0000000000 --- a/daemon/stats_collector_windows.go +++ /dev/null @@ -1,35 +0,0 @@ -package daemon - -import ( - "time" - - "github.com/docker/docker/container" -) - -// newStatsCollector returns a new statsCollector for collection stats -// for a registered container at the specified interval. The collector allows -// non-running containers to be added and will start processing stats when -// they are started. -func (daemon *Daemon) newStatsCollector(interval time.Duration) *statsCollector { - return &statsCollector{} -} - -// statsCollector manages and provides container resource stats -type statsCollector struct { -} - -// collect registers the container with the collector and adds it to -// the event loop for collection on the specified interval returning -// a channel for the subscriber to receive on. -func (s *statsCollector) collect(c *container.Container) chan interface{} { - return nil -} - -// stopCollection closes the channels for all subscribers and removes -// the container from metrics collection. -func (s *statsCollector) stopCollection(c *container.Container) { -} - -// unsubscribe removes a specific subscriber from receiving updates for a container's stats. -func (s *statsCollector) unsubscribe(c *container.Container, ch chan interface{}) { -} diff --git a/daemon/stop.go b/daemon/stop.go deleted file mode 100644 index 4bbdbbd74c..0000000000 --- a/daemon/stop.go +++ /dev/null @@ -1,67 +0,0 @@ -package daemon - -import ( - "fmt" - "net/http" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/container" - "github.com/docker/docker/errors" -) - -// ContainerStop looks for the given container and terminates it, -// waiting the given number of seconds before forcefully killing the -// container. If a negative number of seconds is given, ContainerStop -// will wait for a graceful termination. An error is returned if the -// container is not found, is already stopped, or if there is a -// problem stopping the container. -func (daemon *Daemon) ContainerStop(name string, seconds int) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - if !container.IsRunning() { - err := fmt.Errorf("Container %s is already stopped", name) - return errors.NewErrorWithStatusCode(err, http.StatusNotModified) - } - if err := daemon.containerStop(container, seconds); err != nil { - return fmt.Errorf("Cannot stop container %s: %v", name, err) - } - return nil -} - -// containerStop halts a container by sending a stop signal, waiting for the given -// duration in seconds, and then calling SIGKILL and waiting for the -// process to exit. If a negative duration is given, Stop will wait -// for the initial signal forever. If the container is not running Stop returns -// immediately. -func (daemon *Daemon) containerStop(container *container.Container, seconds int) error { - if !container.IsRunning() { - return nil - } - - daemon.stopHealthchecks(container) - - stopSignal := container.StopSignal() - // 1. Send a stop signal - if err := daemon.killPossiblyDeadProcess(container, stopSignal); err != nil { - logrus.Infof("Failed to send signal %d to the process, force killing", stopSignal) - if err := daemon.killPossiblyDeadProcess(container, 9); err != nil { - return err - } - } - - // 2. Wait for the process to exit on its own - if _, err := container.WaitStop(time.Duration(seconds) * time.Second); err != nil { - logrus.Infof("Container %v failed to exit within %d seconds of signal %d - using the force", container.ID, seconds, stopSignal) - // 3. If it doesn't, then send SIGKILL - if err := daemon.Kill(container); err != nil { - container.WaitStop(-1 * time.Second) - logrus.Warn(err) // Don't return error because we only care that container is stopped, not what function stopped it - } - } - - daemon.LogContainerEvent(container, "stop") - return nil -} diff --git a/daemon/top_unix.go b/daemon/top_unix.go deleted file mode 100644 index 935f38f29e..0000000000 --- a/daemon/top_unix.go +++ /dev/null @@ -1,126 +0,0 @@ -//+build !windows - -package daemon - -import ( - "fmt" - "os/exec" - "regexp" - "strconv" - "strings" - - "github.com/docker/engine-api/types" -) - -func validatePSArgs(psArgs string) error { - // NOTE: \\s does not detect unicode whitespaces. - // So we use fieldsASCII instead of strings.Fields in parsePSOutput. - // See https://github.com/docker/docker/pull/24358 - re := regexp.MustCompile("\\s+([^\\s]*)=\\s*(PID[^\\s]*)") - for _, group := range re.FindAllStringSubmatch(psArgs, -1) { - if len(group) >= 3 { - k := group[1] - v := group[2] - if k != "pid" { - return fmt.Errorf("specifying \"%s=%s\" is not allowed", k, v) - } - } - } - return nil -} - -// fieldsASCII is similar to strings.Fields but only allows ASCII whitespaces -func fieldsASCII(s string) []string { - fn := func(r rune) bool { - switch r { - case '\t', '\n', '\f', '\r', ' ': - return true - } - return false - } - return strings.FieldsFunc(s, fn) -} - -func parsePSOutput(output []byte, pids []int) (*types.ContainerProcessList, error) { - procList := &types.ContainerProcessList{} - - lines := strings.Split(string(output), "\n") - procList.Titles = fieldsASCII(lines[0]) - - pidIndex := -1 - for i, name := range procList.Titles { - if name == "PID" { - pidIndex = i - } - } - if pidIndex == -1 { - return nil, fmt.Errorf("Couldn't find PID field in ps output") - } - - // loop through the output and extract the PID from each line - for _, line := range lines[1:] { - if len(line) == 0 { - continue - } - fields := fieldsASCII(line) - p, err := strconv.Atoi(fields[pidIndex]) - if err != nil { - return nil, fmt.Errorf("Unexpected pid '%s': %s", fields[pidIndex], err) - } - - for _, pid := range pids { - if pid == p { - // Make sure number of fields equals number of header titles - // merging "overhanging" fields - process := fields[:len(procList.Titles)-1] - process = append(process, strings.Join(fields[len(procList.Titles)-1:], " ")) - procList.Processes = append(procList.Processes, process) - } - } - } - return procList, nil -} - -// ContainerTop lists the processes running inside of the given -// container by calling ps with the given args, or with the flags -// "-ef" if no args are given. An error is returned if the container -// is not found, or is not running, or if there are any problems -// running ps, or parsing the output. -func (daemon *Daemon) ContainerTop(name string, psArgs string) (*types.ContainerProcessList, error) { - if psArgs == "" { - psArgs = "-ef" - } - - if err := validatePSArgs(psArgs); err != nil { - return nil, err - } - - container, err := daemon.GetContainer(name) - if err != nil { - return nil, err - } - - if !container.IsRunning() { - return nil, errNotRunning{container.ID} - } - - if container.IsRestarting() { - return nil, errContainerIsRestarting(container.ID) - } - - pids, err := daemon.containerd.GetPidsForContainer(container.ID) - if err != nil { - return nil, err - } - - output, err := exec.Command("ps", strings.Split(psArgs, " ")...).Output() - if err != nil { - return nil, fmt.Errorf("Error running ps: %v", err) - } - procList, err := parsePSOutput(output, pids) - if err != nil { - return nil, err - } - daemon.LogContainerEvent(container, "top") - return procList, nil -} diff --git a/daemon/top_unix_test.go b/daemon/top_unix_test.go deleted file mode 100644 index 269ab6e947..0000000000 --- a/daemon/top_unix_test.go +++ /dev/null @@ -1,76 +0,0 @@ -//+build !windows - -package daemon - -import ( - "testing" -) - -func TestContainerTopValidatePSArgs(t *testing.T) { - tests := map[string]bool{ - "ae -o uid=PID": true, - "ae -o \"uid= PID\"": true, // ascii space (0x20) - "ae -o \"uid= PID\"": false, // unicode space (U+2003, 0xe2 0x80 0x83) - "ae o uid=PID": true, - "aeo uid=PID": true, - "ae -O uid=PID": true, - "ae -o pid=PID2 -o uid=PID": true, - "ae -o pid=PID": false, - "ae -o pid=PID -o uid=PIDX": true, // FIXME: we do not need to prohibit this - "aeo pid=PID": false, - "ae": false, - "": false, - } - for psArgs, errExpected := range tests { - err := validatePSArgs(psArgs) - t.Logf("tested %q, got err=%v", psArgs, err) - if errExpected && err == nil { - t.Fatalf("expected error, got %v (%q)", err, psArgs) - } - if !errExpected && err != nil { - t.Fatalf("expected nil, got %v (%q)", err, psArgs) - } - } -} - -func TestContainerTopParsePSOutput(t *testing.T) { - tests := []struct { - output []byte - pids []int - errExpected bool - }{ - {[]byte(` PID COMMAND - 42 foo - 43 bar - 100 baz -`), []int{42, 43}, false}, - {[]byte(` UID COMMAND - 42 foo - 43 bar - 100 baz -`), []int{42, 43}, true}, - // unicode space (U+2003, 0xe2 0x80 0x83) - {[]byte(` PID COMMAND - 42 foo - 43 bar - 100 baz -`), []int{42, 43}, true}, - // the first space is U+2003, the second one is ascii. - {[]byte(` PID COMMAND - 42 foo - 43 bar - 100 baz -`), []int{42, 43}, true}, - } - - for _, f := range tests { - _, err := parsePSOutput(f.output, f.pids) - t.Logf("tested %q, got err=%v", string(f.output), err) - if f.errExpected && err == nil { - t.Fatalf("expected error, got %v (%q)", err, string(f.output)) - } - if !f.errExpected && err != nil { - t.Fatalf("expected nil, got %v (%q)", err, string(f.output)) - } - } -} diff --git a/daemon/top_windows.go b/daemon/top_windows.go deleted file mode 100644 index ea79ac86ac..0000000000 --- a/daemon/top_windows.go +++ /dev/null @@ -1,32 +0,0 @@ -package daemon - -import ( - "errors" - "strconv" - - "github.com/docker/engine-api/types" -) - -// ContainerTop is a minimal implementation on Windows currently. -// TODO Windows: This needs more work, but needs platform API support. -// All we can currently return (particularly in the case of Hyper-V containers) -// is a PID and the command. -func (daemon *Daemon) ContainerTop(containerID string, psArgs string) (*types.ContainerProcessList, error) { - - // It's really not an equivalent to linux 'ps' on Windows - if psArgs != "" { - return nil, errors.New("Windows does not support arguments to top") - } - - s, err := daemon.containerd.Summary(containerID) - if err != nil { - return nil, err - } - - procList := &types.ContainerProcessList{} - - for _, v := range s { - procList.Titles = append(procList.Titles, strconv.Itoa(int(v.Pid))+" "+v.Command) - } - return procList, nil -} diff --git a/daemon/unpause.go b/daemon/unpause.go deleted file mode 100644 index c1ab74b0bf..0000000000 --- a/daemon/unpause.go +++ /dev/null @@ -1,43 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/docker/container" -) - -// ContainerUnpause unpauses a container -func (daemon *Daemon) ContainerUnpause(name string) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - if err := daemon.containerUnpause(container); err != nil { - return err - } - - return nil -} - -// containerUnpause resumes the container execution after the container is paused. -func (daemon *Daemon) containerUnpause(container *container.Container) error { - container.Lock() - defer container.Unlock() - - // We cannot unpause the container which is not running - if !container.Running { - return errNotRunning{container.ID} - } - - // We cannot unpause the container which is not paused - if !container.Paused { - return fmt.Errorf("Container %s is not paused", container.ID) - } - - if err := daemon.containerd.Resume(container.ID); err != nil { - return fmt.Errorf("Cannot unpause container %s: %s", container.ID, err) - } - - return nil -} diff --git a/daemon/update.go b/daemon/update.go deleted file mode 100644 index 0a5e76d1cd..0000000000 --- a/daemon/update.go +++ /dev/null @@ -1,94 +0,0 @@ -package daemon - -import ( - "fmt" - - "github.com/docker/engine-api/types/container" -) - -// ContainerUpdate updates configuration of the container -func (daemon *Daemon) ContainerUpdate(name string, hostConfig *container.HostConfig, validateHostname bool) ([]string, error) { - var warnings []string - - warnings, err := daemon.verifyContainerSettings(hostConfig, nil, true, validateHostname) - if err != nil { - return warnings, err - } - - if err := daemon.update(name, hostConfig); err != nil { - return warnings, err - } - - return warnings, nil -} - -// ContainerUpdateCmdOnBuild updates Path and Args for the container with ID cID. -func (daemon *Daemon) ContainerUpdateCmdOnBuild(cID string, cmd []string) error { - if len(cmd) == 0 { - return nil - } - c, err := daemon.GetContainer(cID) - if err != nil { - return err - } - c.Path = cmd[0] - c.Args = cmd[1:] - return nil -} - -func (daemon *Daemon) update(name string, hostConfig *container.HostConfig) error { - if hostConfig == nil { - return nil - } - - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - restoreConfig := false - backupHostConfig := *container.HostConfig - defer func() { - if restoreConfig { - container.Lock() - container.HostConfig = &backupHostConfig - container.ToDisk() - container.Unlock() - } - }() - - if container.RemovalInProgress || container.Dead { - return errCannotUpdate(container.ID, fmt.Errorf("Container is marked for removal and cannot be \"update\".")) - } - - if container.IsRunning() && hostConfig.KernelMemory != 0 { - return errCannotUpdate(container.ID, fmt.Errorf("Can not update kernel memory to a running container, please stop it first.")) - } - - if err := container.UpdateContainer(hostConfig); err != nil { - restoreConfig = true - return errCannotUpdate(container.ID, err) - } - - // if Restart Policy changed, we need to update container monitor - container.UpdateMonitor(hostConfig.RestartPolicy) - - // If container is not running, update hostConfig struct is enough, - // resources will be updated when the container is started again. - // If container is running (including paused), we need to update configs - // to the real world. - if container.IsRunning() && !container.IsRestarting() { - if err := daemon.containerd.UpdateResources(container.ID, toContainerdResources(hostConfig.Resources)); err != nil { - restoreConfig = true - return errCannotUpdate(container.ID, err) - } - } - - daemon.LogContainerEvent(container, "update") - - return nil -} - -func errCannotUpdate(containerID string, err error) error { - return fmt.Errorf("Cannot update container %s: %v", containerID, err) -} diff --git a/daemon/update_linux.go b/daemon/update_linux.go deleted file mode 100644 index 69cc0840ab..0000000000 --- a/daemon/update_linux.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build linux - -package daemon - -import ( - "github.com/docker/docker/libcontainerd" - "github.com/docker/engine-api/types/container" -) - -func toContainerdResources(resources container.Resources) libcontainerd.Resources { - var r libcontainerd.Resources - r.BlkioWeight = uint64(resources.BlkioWeight) - r.CpuShares = uint64(resources.CPUShares) - r.CpuPeriod = uint64(resources.CPUPeriod) - r.CpuQuota = uint64(resources.CPUQuota) - r.CpusetCpus = resources.CpusetCpus - r.CpusetMems = resources.CpusetMems - r.MemoryLimit = uint64(resources.Memory) - if resources.MemorySwap > 0 { - r.MemorySwap = uint64(resources.MemorySwap) - } - r.MemoryReservation = uint64(resources.MemoryReservation) - r.KernelMemoryLimit = uint64(resources.KernelMemory) - return r -} diff --git a/daemon/update_solaris.go b/daemon/update_solaris.go deleted file mode 100644 index 848adae9d2..0000000000 --- a/daemon/update_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package daemon - -import ( - "github.com/docker/docker/libcontainerd" - "github.com/docker/engine-api/types/container" -) - -func toContainerdResources(resources container.Resources) libcontainerd.Resources { - var r libcontainerd.Resources - return r -} diff --git a/daemon/update_windows.go b/daemon/update_windows.go deleted file mode 100644 index 2cd0ff2618..0000000000 --- a/daemon/update_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build windows - -package daemon - -import ( - "github.com/docker/docker/libcontainerd" - "github.com/docker/engine-api/types/container" -) - -func toContainerdResources(resources container.Resources) libcontainerd.Resources { - var r libcontainerd.Resources - return r -} diff --git a/daemon/volumes.go b/daemon/volumes.go deleted file mode 100644 index d178d410ab..0000000000 --- a/daemon/volumes.go +++ /dev/null @@ -1,185 +0,0 @@ -package daemon - -import ( - "errors" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/container" - "github.com/docker/docker/volume" - "github.com/docker/engine-api/types" - containertypes "github.com/docker/engine-api/types/container" -) - -var ( - // ErrVolumeReadonly is used to signal an error when trying to copy data into - // a volume mount that is not writable. - ErrVolumeReadonly = errors.New("mounted volume is marked read-only") -) - -type mounts []container.Mount - -// volumeToAPIType converts a volume.Volume to the type used by the remote API -func volumeToAPIType(v volume.Volume) *types.Volume { - tv := &types.Volume{ - Name: v.Name(), - Driver: v.DriverName(), - } - if v, ok := v.(volume.LabeledVolume); ok { - tv.Labels = v.Labels() - } - - if v, ok := v.(volume.ScopedVolume); ok { - tv.Scope = v.Scope() - } - return tv -} - -// Len returns the number of mounts. Used in sorting. -func (m mounts) Len() int { - return len(m) -} - -// Less returns true if the number of parts (a/b/c would be 3 parts) in the -// mount indexed by parameter 1 is less than that of the mount indexed by -// parameter 2. Used in sorting. -func (m mounts) Less(i, j int) bool { - return m.parts(i) < m.parts(j) -} - -// Swap swaps two items in an array of mounts. Used in sorting -func (m mounts) Swap(i, j int) { - m[i], m[j] = m[j], m[i] -} - -// parts returns the number of parts in the destination of a mount. Used in sorting. -func (m mounts) parts(i int) int { - return strings.Count(filepath.Clean(m[i].Destination), string(os.PathSeparator)) -} - -// registerMountPoints initializes the container mount points with the configured volumes and bind mounts. -// It follows the next sequence to decide what to mount in each final destination: -// -// 1. Select the previously configured mount points for the containers, if any. -// 2. Select the volumes mounted from another containers. Overrides previously configured mount point destination. -// 3. Select the bind mounts set by the client. Overrides previously configured mount point destinations. -// 4. Cleanup old volumes that are about to be reassigned. -func (daemon *Daemon) registerMountPoints(container *container.Container, hostConfig *containertypes.HostConfig) (retErr error) { - binds := map[string]bool{} - mountPoints := map[string]*volume.MountPoint{} - defer func() { - // clean up the container mountpoints once return with error - if retErr != nil { - for _, m := range mountPoints { - if m.Volume == nil { - continue - } - daemon.volumes.Dereference(m.Volume, container.ID) - } - } - }() - - // 1. Read already configured mount points. - for name, point := range container.MountPoints { - mountPoints[name] = point - } - - // 2. Read volumes from other containers. - for _, v := range hostConfig.VolumesFrom { - containerID, mode, err := volume.ParseVolumesFrom(v) - if err != nil { - return err - } - - c, err := daemon.GetContainer(containerID) - if err != nil { - return err - } - - for _, m := range c.MountPoints { - cp := &volume.MountPoint{ - Name: m.Name, - Source: m.Source, - RW: m.RW && volume.ReadWrite(mode), - Driver: m.Driver, - Destination: m.Destination, - Propagation: m.Propagation, - Named: m.Named, - } - - if len(cp.Source) == 0 { - v, err := daemon.volumes.GetWithRef(cp.Name, cp.Driver, container.ID) - if err != nil { - return err - } - cp.Volume = v - } - - mountPoints[cp.Destination] = cp - } - } - - // 3. Read bind mounts - for _, b := range hostConfig.Binds { - // #10618 - bind, err := volume.ParseMountSpec(b, hostConfig.VolumeDriver) - if err != nil { - return err - } - - _, tmpfsExists := hostConfig.Tmpfs[bind.Destination] - if binds[bind.Destination] || tmpfsExists { - return fmt.Errorf("Duplicate mount point '%s'", bind.Destination) - } - - if len(bind.Name) > 0 { - // create the volume - v, err := daemon.volumes.CreateWithRef(bind.Name, bind.Driver, container.ID, nil, nil) - if err != nil { - return err - } - bind.Volume = v - bind.Source = v.Path() - // bind.Name is an already existing volume, we need to use that here - bind.Driver = v.DriverName() - bind.Named = true - if bind.Driver == "local" { - bind = setBindModeIfNull(bind) - } - } - - binds[bind.Destination] = true - mountPoints[bind.Destination] = bind - } - - container.Lock() - - // 4. Cleanup old volumes that are about to be reassigned. - for _, m := range mountPoints { - if m.BackwardsCompatible() { - if mp, exists := container.MountPoints[m.Destination]; exists && mp.Volume != nil { - daemon.volumes.Dereference(mp.Volume, container.ID) - } - } - } - container.MountPoints = mountPoints - - container.Unlock() - - return nil -} - -// lazyInitializeVolume initializes a mountpoint's volume if needed. -// This happens after a daemon restart. -func (daemon *Daemon) lazyInitializeVolume(containerID string, m *volume.MountPoint) error { - if len(m.Driver) > 0 && m.Volume == nil { - v, err := daemon.volumes.GetWithRef(m.Name, m.Driver, containerID) - if err != nil { - return err - } - m.Volume = v - } - return nil -} diff --git a/daemon/volumes_unit_test.go b/daemon/volumes_unit_test.go deleted file mode 100644 index 450d17f978..0000000000 --- a/daemon/volumes_unit_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package daemon - -import ( - "testing" - - "github.com/docker/docker/volume" -) - -func TestParseVolumesFrom(t *testing.T) { - cases := []struct { - spec string - expID string - expMode string - fail bool - }{ - {"", "", "", true}, - {"foobar", "foobar", "rw", false}, - {"foobar:rw", "foobar", "rw", false}, - {"foobar:ro", "foobar", "ro", false}, - {"foobar:baz", "", "", true}, - } - - for _, c := range cases { - id, mode, err := volume.ParseVolumesFrom(c.spec) - if c.fail { - if err == nil { - t.Fatalf("Expected error, was nil, for spec %s\n", c.spec) - } - continue - } - - if id != c.expID { - t.Fatalf("Expected id %s, was %s, for spec %s\n", c.expID, id, c.spec) - } - if mode != c.expMode { - t.Fatalf("Expected mode %s, was %s for spec %s\n", c.expMode, mode, c.spec) - } - } -} diff --git a/daemon/volumes_unix.go b/daemon/volumes_unix.go deleted file mode 100644 index ca0628c89b..0000000000 --- a/daemon/volumes_unix.go +++ /dev/null @@ -1,86 +0,0 @@ -// +build !windows - -package daemon - -import ( - "os" - "sort" - "strconv" - - "github.com/docker/docker/container" - "github.com/docker/docker/volume" -) - -// setupMounts iterates through each of the mount points for a container and -// calls Setup() on each. It also looks to see if is a network mount such as -// /etc/resolv.conf, and if it is not, appends it to the array of mounts. -func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { - var mounts []container.Mount - // TODO: tmpfs mounts should be part of Mountpoints - tmpfsMounts := make(map[string]bool) - for _, m := range c.TmpfsMounts() { - tmpfsMounts[m.Destination] = true - } - for _, m := range c.MountPoints { - if tmpfsMounts[m.Destination] { - continue - } - if err := daemon.lazyInitializeVolume(c.ID, m); err != nil { - return nil, err - } - path, err := m.Setup(c.MountLabel) - if err != nil { - return nil, err - } - if !c.TrySetNetworkMount(m.Destination, path) { - mnt := container.Mount{ - Source: path, - Destination: m.Destination, - Writable: m.RW, - Propagation: m.Propagation, - } - if m.Volume != nil { - attributes := map[string]string{ - "driver": m.Volume.DriverName(), - "container": c.ID, - "destination": m.Destination, - "read/write": strconv.FormatBool(m.RW), - "propagation": m.Propagation, - } - daemon.LogVolumeEvent(m.Volume.Name(), "mount", attributes) - } - mounts = append(mounts, mnt) - } - } - - mounts = sortMounts(mounts) - netMounts := c.NetworkMounts() - // if we are going to mount any of the network files from container - // metadata, the ownership must be set properly for potential container - // remapped root (user namespaces) - rootUID, rootGID := daemon.GetRemappedUIDGID() - for _, mount := range netMounts { - if err := os.Chown(mount.Source, rootUID, rootGID); err != nil { - return nil, err - } - } - return append(mounts, netMounts...), nil -} - -// sortMounts sorts an array of mounts in lexicographic order. This ensure that -// when mounting, the mounts don't shadow other mounts. For example, if mounting -// /etc and /etc/resolv.conf, /etc/resolv.conf must not be mounted first. -func sortMounts(m []container.Mount) []container.Mount { - sort.Sort(mounts(m)) - return m -} - -// setBindModeIfNull is platform specific processing to ensure the -// shared mode is set to 'z' if it is null. This is called in the case -// of processing a named volume and not a typical bind. -func setBindModeIfNull(bind *volume.MountPoint) *volume.MountPoint { - if bind.Mode == "" { - bind.Mode = "z" - } - return bind -} diff --git a/daemon/volumes_windows.go b/daemon/volumes_windows.go deleted file mode 100644 index e7f9c098d9..0000000000 --- a/daemon/volumes_windows.go +++ /dev/null @@ -1,51 +0,0 @@ -// +build windows - -package daemon - -import ( - "fmt" - "sort" - - "github.com/docker/docker/container" - "github.com/docker/docker/volume" -) - -// setupMounts configures the mount points for a container by appending each -// of the configured mounts on the container to the OCI mount structure -// which will ultimately be passed into the oci runtime during container creation. -// It also ensures each of the mounts are lexographically sorted. - -// BUGBUG TODO Windows containerd. This would be much better if it returned -// an array of windowsoci mounts, not container mounts. Then no need to -// do multiple transitions. - -func (daemon *Daemon) setupMounts(c *container.Container) ([]container.Mount, error) { - var mnts []container.Mount - for _, mount := range c.MountPoints { // type is volume.MountPoint - if err := daemon.lazyInitializeVolume(c.ID, mount); err != nil { - return nil, err - } - // If there is no source, take it from the volume path - s := mount.Source - if s == "" && mount.Volume != nil { - s = mount.Volume.Path() - } - if s == "" { - return nil, fmt.Errorf("No source for mount name '%s' driver %q destination '%s'", mount.Name, mount.Driver, mount.Destination) - } - mnts = append(mnts, container.Mount{ - Source: s, - Destination: mount.Destination, - Writable: mount.RW, - }) - } - - sort.Sort(mounts(mnts)) - return mnts, nil -} - -// setBindModeIfNull is platform specific processing which is a no-op on -// Windows. -func setBindModeIfNull(bind *volume.MountPoint) *volume.MountPoint { - return bind -} diff --git a/daemon/wait.go b/daemon/wait.go deleted file mode 100644 index 2dab22e991..0000000000 --- a/daemon/wait.go +++ /dev/null @@ -1,32 +0,0 @@ -package daemon - -import ( - "time" - - "golang.org/x/net/context" -) - -// ContainerWait stops processing until the given container is -// stopped. If the container is not found, an error is returned. On a -// successful stop, the exit code of the container is returned. On a -// timeout, an error is returned. If you want to wait forever, supply -// a negative duration for the timeout. -func (daemon *Daemon) ContainerWait(name string, timeout time.Duration) (int, error) { - container, err := daemon.GetContainer(name) - if err != nil { - return -1, err - } - - return container.WaitStop(timeout) -} - -// ContainerWaitWithContext returns a channel where exit code is sent -// when container stops. Channel can be cancelled with a context. -func (daemon *Daemon) ContainerWaitWithContext(ctx context.Context, name string) error { - container, err := daemon.GetContainer(name) - if err != nil { - return err - } - - return container.WaitWithContext(ctx) -} diff --git a/distribution/errors.go b/distribution/errors.go deleted file mode 100644 index 1e7630e380..0000000000 --- a/distribution/errors.go +++ /dev/null @@ -1,117 +0,0 @@ -package distribution - -import ( - "net/url" - "strings" - "syscall" - - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/docker/distribution/xfer" -) - -// ErrNoSupport is an error type used for errors indicating that an operation -// is not supported. It encapsulates a more specific error. -type ErrNoSupport struct{ Err error } - -func (e ErrNoSupport) Error() string { - if e.Err == nil { - return "not supported" - } - return e.Err.Error() -} - -// fallbackError wraps an error that can possibly allow fallback to a different -// endpoint. -type fallbackError struct { - // err is the error being wrapped. - err error - // confirmedV2 is set to true if it was confirmed that the registry - // supports the v2 protocol. This is used to limit fallbacks to the v1 - // protocol. - confirmedV2 bool - // transportOK is set to true if we managed to speak HTTP with the - // registry. This confirms that we're using appropriate TLS settings - // (or lack of TLS). - transportOK bool -} - -// Error renders the FallbackError as a string. -func (f fallbackError) Error() string { - return f.Cause().Error() -} - -func (f fallbackError) Cause() error { - return f.err -} - -// shouldV2Fallback returns true if this error is a reason to fall back to v1. -func shouldV2Fallback(err errcode.Error) bool { - switch err.Code { - case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown: - return true - } - return false -} - -// continueOnError returns true if we should fallback to the next endpoint -// as a result of this error. -func continueOnError(err error) bool { - switch v := err.(type) { - case errcode.Errors: - if len(v) == 0 { - return true - } - return continueOnError(v[0]) - case ErrNoSupport: - return continueOnError(v.Err) - case errcode.Error: - return shouldV2Fallback(v) - case *client.UnexpectedHTTPResponseError: - return true - case ImageConfigPullError: - return false - case error: - return !strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) - } - // let's be nice and fallback if the error is a completely - // unexpected one. - // If new errors have to be handled in some way, please - // add them to the switch above. - return true -} - -// retryOnError wraps the error in xfer.DoNotRetry if we should not retry the -// operation after this error. -func retryOnError(err error) error { - switch v := err.(type) { - case errcode.Errors: - if len(v) != 0 { - return retryOnError(v[0]) - } - case errcode.Error: - switch v.Code { - case errcode.ErrorCodeUnauthorized, errcode.ErrorCodeUnsupported, errcode.ErrorCodeDenied, errcode.ErrorCodeTooManyRequests, v2.ErrorCodeNameUnknown: - return xfer.DoNotRetry{Err: err} - } - case *url.Error: - switch v.Err { - case auth.ErrNoBasicAuthCredentials, auth.ErrNoToken: - return xfer.DoNotRetry{Err: v.Err} - } - return retryOnError(v.Err) - case *client.UnexpectedHTTPResponseError: - return xfer.DoNotRetry{Err: err} - case error: - if strings.Contains(err.Error(), strings.ToLower(syscall.ENOSPC.Error())) { - return xfer.DoNotRetry{Err: err} - } - } - // let's be nice and fallback if the error is a completely - // unexpected one. - // If new errors have to be handled in some way, please - // add them to the switch above. - return err -} diff --git a/distribution/fixtures/validate_manifest/bad_manifest b/distribution/fixtures/validate_manifest/bad_manifest deleted file mode 100644 index a1f02a62a3..0000000000 --- a/distribution/fixtures/validate_manifest/bad_manifest +++ /dev/null @@ -1,38 +0,0 @@ -{ - "schemaVersion": 2, - "name": "library/hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" - } - ], - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", - "kty": "EC", - "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", - "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" - }, - "alg": "ES256" - }, - "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", - "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" - } - ] -} diff --git a/distribution/fixtures/validate_manifest/extra_data_manifest b/distribution/fixtures/validate_manifest/extra_data_manifest deleted file mode 100644 index beec19a801..0000000000 --- a/distribution/fixtures/validate_manifest/extra_data_manifest +++ /dev/null @@ -1,46 +0,0 @@ -{ - "schemaVersion": 1, - "name": "library/hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" - } - ], - "fsLayers": [ - { - "blobSum": "sha256:ffff95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:ffff658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", - "kty": "EC", - "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", - "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" - }, - "alg": "ES256" - }, - "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", - "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" - } - ] -} diff --git a/distribution/fixtures/validate_manifest/good_manifest b/distribution/fixtures/validate_manifest/good_manifest deleted file mode 100644 index b107de3226..0000000000 --- a/distribution/fixtures/validate_manifest/good_manifest +++ /dev/null @@ -1,38 +0,0 @@ -{ - "schemaVersion": 1, - "name": "library/hello-world", - "tag": "latest", - "architecture": "amd64", - "fsLayers": [ - { - "blobSum": "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4" - }, - { - "blobSum": "sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb" - } - ], - "history": [ - { - "v1Compatibility": "{\"id\":\"af340544ed62de0680f441c71fa1a80cb084678fed42bae393e543faea3a572c\",\"parent\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.608577814Z\",\"container\":\"c2b715156f640c7ac7d98472ea24335aba5432a1323a3bb722697e6d37ef794f\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) CMD [\\\"/hello\\\"]\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/hello\"],\"Image\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n" - }, - { - "v1Compatibility": "{\"id\":\"535020c3e8add9d6bb06e5ac15a261e73d9b213d62fb2c14d752b8e189b2b912\",\"created\":\"2015-08-06T23:53:22.241352727Z\",\"container\":\"9aeb0006ffa72a8287564caaea87625896853701459261d3b569e320c0c9d5dc\",\"container_config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) COPY file:4abd3bff60458ca3b079d7b131ce26b2719055a030dfa96ff827da2b7c7038a7 in /\"],\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"docker_version\":\"1.7.1\",\"config\":{\"Hostname\":\"9aeb0006ffa7\",\"Domainname\":\"\",\"User\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":null,\"Cmd\":null,\"Image\":\"\",\"Volumes\":null,\"VolumeDriver\":\"\",\"WorkingDir\":\"\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":null,\"Labels\":null},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":960}\n" - } - ], - "signatures": [ - { - "header": { - "jwk": { - "crv": "P-256", - "kid": "OIH7:HQFS:44FK:45VB:3B53:OIAG:TPL4:ATF5:6PNE:MGHN:NHQX:2GE4", - "kty": "EC", - "x": "Cu_UyxwLgHzE9rvlYSmvVdqYCXY42E9eNhBb0xNv0SQ", - "y": "zUsjWJkeKQ5tv7S-hl1Tg71cd-CqnrtiiLxSi6N_yc8" - }, - "alg": "ES256" - }, - "signature": "Y6xaFz9Sy-OtcnKQS1Ilq3Dh8cu4h3nBTJCpOTF1XF7vKtcxxA_xMP8-SgDo869SJ3VsvgPL9-Xn-OoYG2rb1A", - "protected": "eyJmb3JtYXRMZW5ndGgiOjMxOTcsImZvcm1hdFRhaWwiOiJDbjAiLCJ0aW1lIjoiMjAxNS0wOS0xMVQwNDoxMzo0OFoifQ" - } - ] -} \ No newline at end of file diff --git a/distribution/metadata/metadata.go b/distribution/metadata/metadata.go deleted file mode 100644 index 05ba4f817d..0000000000 --- a/distribution/metadata/metadata.go +++ /dev/null @@ -1,75 +0,0 @@ -package metadata - -import ( - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/docker/docker/pkg/ioutils" -) - -// Store implements a K/V store for mapping distribution-related IDs -// to on-disk layer IDs and image IDs. The namespace identifies the type of -// mapping (i.e. "v1ids" or "artifacts"). MetadataStore is goroutine-safe. -type Store interface { - // Get retrieves data by namespace and key. - Get(namespace string, key string) ([]byte, error) - // Set writes data indexed by namespace and key. - Set(namespace, key string, value []byte) error - // Delete removes data indexed by namespace and key. - Delete(namespace, key string) error -} - -// FSMetadataStore uses the filesystem to associate metadata with layer and -// image IDs. -type FSMetadataStore struct { - sync.RWMutex - basePath string -} - -// NewFSMetadataStore creates a new filesystem-based metadata store. -func NewFSMetadataStore(basePath string) (*FSMetadataStore, error) { - if err := os.MkdirAll(basePath, 0700); err != nil { - return nil, err - } - return &FSMetadataStore{ - basePath: basePath, - }, nil -} - -func (store *FSMetadataStore) path(namespace, key string) string { - return filepath.Join(store.basePath, namespace, key) -} - -// Get retrieves data by namespace and key. The data is read from a file named -// after the key, stored in the namespace's directory. -func (store *FSMetadataStore) Get(namespace string, key string) ([]byte, error) { - store.RLock() - defer store.RUnlock() - - return ioutil.ReadFile(store.path(namespace, key)) -} - -// Set writes data indexed by namespace and key. The data is written to a file -// named after the key, stored in the namespace's directory. -func (store *FSMetadataStore) Set(namespace, key string, value []byte) error { - store.Lock() - defer store.Unlock() - - path := store.path(namespace, key) - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - return ioutils.AtomicWriteFile(path, value, 0644) -} - -// Delete removes data indexed by namespace and key. The data file named after -// the key, stored in the namespace's directory is deleted. -func (store *FSMetadataStore) Delete(namespace, key string) error { - store.Lock() - defer store.Unlock() - - path := store.path(namespace, key) - return os.Remove(path) -} diff --git a/distribution/metadata/v1_id_service.go b/distribution/metadata/v1_id_service.go deleted file mode 100644 index f6e4589248..0000000000 --- a/distribution/metadata/v1_id_service.go +++ /dev/null @@ -1,44 +0,0 @@ -package metadata - -import ( - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" -) - -// V1IDService maps v1 IDs to layers on disk. -type V1IDService struct { - store Store -} - -// NewV1IDService creates a new V1 ID mapping service. -func NewV1IDService(store Store) *V1IDService { - return &V1IDService{ - store: store, - } -} - -// namespace returns the namespace used by this service. -func (idserv *V1IDService) namespace() string { - return "v1id" -} - -// Get finds a layer by its V1 ID. -func (idserv *V1IDService) Get(v1ID, registry string) (layer.DiffID, error) { - if err := v1.ValidateID(v1ID); err != nil { - return layer.DiffID(""), err - } - - idBytes, err := idserv.store.Get(idserv.namespace(), registry+","+v1ID) - if err != nil { - return layer.DiffID(""), err - } - return layer.DiffID(idBytes), nil -} - -// Set associates an image with a V1 ID. -func (idserv *V1IDService) Set(v1ID, registry string, id layer.DiffID) error { - if err := v1.ValidateID(v1ID); err != nil { - return err - } - return idserv.store.Set(idserv.namespace(), registry+","+v1ID, []byte(id)) -} diff --git a/distribution/metadata/v1_id_service_test.go b/distribution/metadata/v1_id_service_test.go deleted file mode 100644 index 556886581e..0000000000 --- a/distribution/metadata/v1_id_service_test.go +++ /dev/null @@ -1,83 +0,0 @@ -package metadata - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/docker/docker/layer" -) - -func TestV1IDService(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "v1-id-service-test") - if err != nil { - t.Fatalf("could not create temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - metadataStore, err := NewFSMetadataStore(tmpDir) - if err != nil { - t.Fatalf("could not create metadata store: %v", err) - } - v1IDService := NewV1IDService(metadataStore) - - testVectors := []struct { - registry string - v1ID string - layerID layer.DiffID - }{ - { - registry: "registry1", - v1ID: "f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937", - layerID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), - }, - { - registry: "registry2", - v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", - layerID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), - }, - { - registry: "registry1", - v1ID: "9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e", - layerID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), - }, - } - - // Set some associations - for _, vec := range testVectors { - err := v1IDService.Set(vec.v1ID, vec.registry, vec.layerID) - if err != nil { - t.Fatalf("error calling Set: %v", err) - } - } - - // Check the correct values are read back - for _, vec := range testVectors { - layerID, err := v1IDService.Get(vec.v1ID, vec.registry) - if err != nil { - t.Fatalf("error calling Get: %v", err) - } - if layerID != vec.layerID { - t.Fatal("Get returned incorrect layer ID") - } - } - - // Test Get on a nonexistent entry - _, err = v1IDService.Get("82379823067823853223359023576437723560923756b03560378f4497753917", "registry1") - if err == nil { - t.Fatal("expected error looking up nonexistent entry") - } - - // Overwrite one of the entries and read it back - err = v1IDService.Set(testVectors[0].v1ID, testVectors[0].registry, testVectors[1].layerID) - if err != nil { - t.Fatalf("error calling Set: %v", err) - } - layerID, err := v1IDService.Get(testVectors[0].v1ID, testVectors[0].registry) - if err != nil { - t.Fatalf("error calling Get: %v", err) - } - if layerID != testVectors[1].layerID { - t.Fatal("Get returned incorrect layer ID") - } -} diff --git a/distribution/metadata/v2_metadata_service.go b/distribution/metadata/v2_metadata_service.go deleted file mode 100644 index 239cd1f45e..0000000000 --- a/distribution/metadata/v2_metadata_service.go +++ /dev/null @@ -1,137 +0,0 @@ -package metadata - -import ( - "encoding/json" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/layer" -) - -// V2MetadataService maps layer IDs to a set of known metadata for -// the layer. -type V2MetadataService struct { - store Store -} - -// V2Metadata contains the digest and source repository information for a layer. -type V2Metadata struct { - Digest digest.Digest - SourceRepository string -} - -// maxMetadata is the number of metadata entries to keep per layer DiffID. -const maxMetadata = 50 - -// NewV2MetadataService creates a new diff ID to v2 metadata mapping service. -func NewV2MetadataService(store Store) *V2MetadataService { - return &V2MetadataService{ - store: store, - } -} - -func (serv *V2MetadataService) diffIDNamespace() string { - return "v2metadata-by-diffid" -} - -func (serv *V2MetadataService) digestNamespace() string { - return "diffid-by-digest" -} - -func (serv *V2MetadataService) diffIDKey(diffID layer.DiffID) string { - return string(digest.Digest(diffID).Algorithm()) + "/" + digest.Digest(diffID).Hex() -} - -func (serv *V2MetadataService) digestKey(dgst digest.Digest) string { - return string(dgst.Algorithm()) + "/" + dgst.Hex() -} - -// GetMetadata finds the metadata associated with a layer DiffID. -func (serv *V2MetadataService) GetMetadata(diffID layer.DiffID) ([]V2Metadata, error) { - jsonBytes, err := serv.store.Get(serv.diffIDNamespace(), serv.diffIDKey(diffID)) - if err != nil { - return nil, err - } - - var metadata []V2Metadata - if err := json.Unmarshal(jsonBytes, &metadata); err != nil { - return nil, err - } - - return metadata, nil -} - -// GetDiffID finds a layer DiffID from a digest. -func (serv *V2MetadataService) GetDiffID(dgst digest.Digest) (layer.DiffID, error) { - diffIDBytes, err := serv.store.Get(serv.digestNamespace(), serv.digestKey(dgst)) - if err != nil { - return layer.DiffID(""), err - } - - return layer.DiffID(diffIDBytes), nil -} - -// Add associates metadata with a layer DiffID. If too many metadata entries are -// present, the oldest one is dropped. -func (serv *V2MetadataService) Add(diffID layer.DiffID, metadata V2Metadata) error { - oldMetadata, err := serv.GetMetadata(diffID) - if err != nil { - oldMetadata = nil - } - newMetadata := make([]V2Metadata, 0, len(oldMetadata)+1) - - // Copy all other metadata to new slice - for _, oldMeta := range oldMetadata { - if oldMeta != metadata { - newMetadata = append(newMetadata, oldMeta) - } - } - - newMetadata = append(newMetadata, metadata) - - if len(newMetadata) > maxMetadata { - newMetadata = newMetadata[len(newMetadata)-maxMetadata:] - } - - jsonBytes, err := json.Marshal(newMetadata) - if err != nil { - return err - } - - err = serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) - if err != nil { - return err - } - - return serv.store.Set(serv.digestNamespace(), serv.digestKey(metadata.Digest), []byte(diffID)) -} - -// Remove unassociates a metadata entry from a layer DiffID. -func (serv *V2MetadataService) Remove(metadata V2Metadata) error { - diffID, err := serv.GetDiffID(metadata.Digest) - if err != nil { - return err - } - oldMetadata, err := serv.GetMetadata(diffID) - if err != nil { - oldMetadata = nil - } - newMetadata := make([]V2Metadata, 0, len(oldMetadata)) - - // Copy all other metadata to new slice - for _, oldMeta := range oldMetadata { - if oldMeta != metadata { - newMetadata = append(newMetadata, oldMeta) - } - } - - if len(newMetadata) == 0 { - return serv.store.Delete(serv.diffIDNamespace(), serv.diffIDKey(diffID)) - } - - jsonBytes, err := json.Marshal(newMetadata) - if err != nil { - return err - } - - return serv.store.Set(serv.diffIDNamespace(), serv.diffIDKey(diffID), jsonBytes) -} diff --git a/distribution/metadata/v2_metadata_service_test.go b/distribution/metadata/v2_metadata_service_test.go deleted file mode 100644 index 7b0ecb1572..0000000000 --- a/distribution/metadata/v2_metadata_service_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package metadata - -import ( - "encoding/hex" - "io/ioutil" - "math/rand" - "os" - "reflect" - "testing" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/layer" -) - -func TestV2MetadataService(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "blobsum-storage-service-test") - if err != nil { - t.Fatalf("could not create temp dir: %v", err) - } - defer os.RemoveAll(tmpDir) - - metadataStore, err := NewFSMetadataStore(tmpDir) - if err != nil { - t.Fatalf("could not create metadata store: %v", err) - } - V2MetadataService := NewV2MetadataService(metadataStore) - - tooManyBlobSums := make([]V2Metadata, 100) - for i := range tooManyBlobSums { - randDigest := randomDigest() - tooManyBlobSums[i] = V2Metadata{Digest: randDigest} - } - - testVectors := []struct { - diffID layer.DiffID - metadata []V2Metadata - }{ - { - diffID: layer.DiffID("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4"), - metadata: []V2Metadata{ - {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, - }, - }, - { - diffID: layer.DiffID("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa"), - metadata: []V2Metadata{ - {Digest: digest.Digest("sha256:f0cd5ca10b07f35512fc2f1cbf9a6cefbdb5cba70ac6b0c9e5988f4497f71937")}, - {Digest: digest.Digest("sha256:9e3447ca24cb96d86ebd5960cb34d1299b07e0a0e03801d90b9969a2c187dd6e")}, - }, - }, - { - diffID: layer.DiffID("sha256:03f4658f8b782e12230c1783426bd3bacce651ce582a4ffb6fbbfa2079428ecb"), - metadata: tooManyBlobSums, - }, - } - - // Set some associations - for _, vec := range testVectors { - for _, blobsum := range vec.metadata { - err := V2MetadataService.Add(vec.diffID, blobsum) - if err != nil { - t.Fatalf("error calling Set: %v", err) - } - } - } - - // Check the correct values are read back - for _, vec := range testVectors { - metadata, err := V2MetadataService.GetMetadata(vec.diffID) - if err != nil { - t.Fatalf("error calling Get: %v", err) - } - expectedMetadataEntries := len(vec.metadata) - if expectedMetadataEntries > 50 { - expectedMetadataEntries = 50 - } - if !reflect.DeepEqual(metadata, vec.metadata[len(vec.metadata)-expectedMetadataEntries:len(vec.metadata)]) { - t.Fatal("Get returned incorrect layer ID") - } - } - - // Test GetMetadata on a nonexistent entry - _, err = V2MetadataService.GetMetadata(layer.DiffID("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) - if err == nil { - t.Fatal("expected error looking up nonexistent entry") - } - - // Test GetDiffID on a nonexistent entry - _, err = V2MetadataService.GetDiffID(digest.Digest("sha256:82379823067823853223359023576437723560923756b03560378f4497753917")) - if err == nil { - t.Fatal("expected error looking up nonexistent entry") - } - - // Overwrite one of the entries and read it back - err = V2MetadataService.Add(testVectors[1].diffID, testVectors[0].metadata[0]) - if err != nil { - t.Fatalf("error calling Add: %v", err) - } - diffID, err := V2MetadataService.GetDiffID(testVectors[0].metadata[0].Digest) - if err != nil { - t.Fatalf("error calling GetDiffID: %v", err) - } - if diffID != testVectors[1].diffID { - t.Fatal("GetDiffID returned incorrect diffID") - } -} - -func randomDigest() digest.Digest { - b := [32]byte{} - for i := 0; i < len(b); i++ { - b[i] = byte(rand.Intn(256)) - } - d := hex.EncodeToString(b[:]) - return digest.Digest("sha256:" + d) -} diff --git a/distribution/pull.go b/distribution/pull.go deleted file mode 100644 index dad93b656d..0000000000 --- a/distribution/pull.go +++ /dev/null @@ -1,225 +0,0 @@ -package distribution - -import ( - "fmt" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/api" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ImagePullConfig stores pull configuration. -type ImagePullConfig struct { - // MetaHeaders stores HTTP headers with metadata about the image - MetaHeaders map[string][]string - // AuthConfig holds authentication credentials for authenticating with - // the registry. - AuthConfig *types.AuthConfig - // ProgressOutput is the interface for showing the status of the pull - // operation. - ProgressOutput progress.Output - // RegistryService is the registry service to use for TLS configuration - // and endpoint lookup. - RegistryService registry.Service - // ImageEventLogger notifies events for a given image - ImageEventLogger func(id, name, action string) - // MetadataStore is the storage backend for distribution-specific - // metadata. - MetadataStore metadata.Store - // ImageStore manages images. - ImageStore image.Store - // ReferenceStore manages tags. - ReferenceStore reference.Store - // DownloadManager manages concurrent pulls. - DownloadManager *xfer.LayerDownloadManager -} - -// Puller is an interface that abstracts pulling for different API versions. -type Puller interface { - // Pull tries to pull the image referenced by `tag` - // Pull returns an error if any, as well as a boolean that determines whether to retry Pull on the next configured endpoint. - // - Pull(ctx context.Context, ref reference.Named) error -} - -// newPuller returns a Puller interface that will pull from either a v1 or v2 -// registry. The endpoint argument contains a Version field that determines -// whether a v1 or v2 puller will be created. The other parameters are passed -// through to the underlying puller implementation for use during the actual -// pull operation. -func newPuller(endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePullConfig *ImagePullConfig) (Puller, error) { - switch endpoint.Version { - case registry.APIVersion2: - return &v2Puller{ - V2MetadataService: metadata.NewV2MetadataService(imagePullConfig.MetadataStore), - endpoint: endpoint, - config: imagePullConfig, - repoInfo: repoInfo, - }, nil - case registry.APIVersion1: - return &v1Puller{ - v1IDService: metadata.NewV1IDService(imagePullConfig.MetadataStore), - endpoint: endpoint, - config: imagePullConfig, - repoInfo: repoInfo, - }, nil - } - return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) -} - -// Pull initiates a pull operation. image is the repository name to pull, and -// tag may be either empty, or indicate a specific tag to pull. -func Pull(ctx context.Context, ref reference.Named, imagePullConfig *ImagePullConfig) error { - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := imagePullConfig.RegistryService.ResolveRepository(ref) - if err != nil { - return err - } - - // makes sure name is not empty or `scratch` - if err := ValidateRepoName(repoInfo.Name()); err != nil { - return err - } - - endpoints, err := imagePullConfig.RegistryService.LookupPullEndpoints(repoInfo.Hostname()) - if err != nil { - return err - } - - var ( - lastErr error - - // discardNoSupportErrors is used to track whether an endpoint encountered an error of type registry.ErrNoSupport - // By default it is false, which means that if an ErrNoSupport error is encountered, it will be saved in lastErr. - // As soon as another kind of error is encountered, discardNoSupportErrors is set to true, avoiding the saving of - // any subsequent ErrNoSupport errors in lastErr. - // It's needed for pull-by-digest on v1 endpoints: if there are only v1 endpoints configured, the error should be - // returned and displayed, but if there was a v2 endpoint which supports pull-by-digest, then the last relevant - // error is the ones from v2 endpoints not v1. - discardNoSupportErrors bool - - // confirmedV2 is set to true if a pull attempt managed to - // confirm that it was talking to a v2 registry. This will - // prevent fallback to the v1 protocol. - confirmedV2 bool - - // confirmedTLSRegistries is a map indicating which registries - // are known to be using TLS. There should never be a plaintext - // retry for any of these. - confirmedTLSRegistries = make(map[string]struct{}) - ) - for _, endpoint := range endpoints { - if confirmedV2 && endpoint.Version == registry.APIVersion1 { - logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) - continue - } - - if endpoint.URL.Scheme != "https" { - if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { - logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) - continue - } - } - - logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) - - puller, err := newPuller(endpoint, repoInfo, imagePullConfig) - if err != nil { - lastErr = err - continue - } - if err := puller.Pull(ctx, ref); err != nil { - // Was this pull cancelled? If so, don't try to fall - // back. - fallback := false - select { - case <-ctx.Done(): - default: - if fallbackErr, ok := err.(fallbackError); ok { - fallback = true - confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 - if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { - confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} - } - err = fallbackErr.err - } - } - if fallback { - if _, ok := err.(ErrNoSupport); !ok { - // Because we found an error that's not ErrNoSupport, discard all subsequent ErrNoSupport errors. - discardNoSupportErrors = true - // append subsequent errors - lastErr = err - } else if !discardNoSupportErrors { - // Save the ErrNoSupport error, because it's either the first error or all encountered errors - // were also ErrNoSupport errors. - // append subsequent errors - lastErr = err - } - logrus.Errorf("Attempting next endpoint for pull after error: %v", err) - continue - } - logrus.Errorf("Not continuing with pull after error: %v", err) - return err - } - - imagePullConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "pull") - return nil - } - - if lastErr == nil { - lastErr = fmt.Errorf("no endpoints found for %s", ref.String()) - } - - return lastErr -} - -// writeStatus writes a status message to out. If layersDownloaded is true, the -// status message indicates that a newer image was downloaded. Otherwise, it -// indicates that the image is up to date. requestedTag is the tag the message -// will refer to. -func writeStatus(requestedTag string, out progress.Output, layersDownloaded bool) { - if layersDownloaded { - progress.Message(out, "", "Status: Downloaded newer image for "+requestedTag) - } else { - progress.Message(out, "", "Status: Image is up to date for "+requestedTag) - } -} - -// ValidateRepoName validates the name of a repository. -func ValidateRepoName(name string) error { - if name == "" { - return fmt.Errorf("Repository name can't be empty") - } - if name == api.NoBaseImageSpecifier { - return fmt.Errorf("'%s' is a reserved name", api.NoBaseImageSpecifier) - } - return nil -} - -func addDigestReference(store reference.Store, ref reference.Named, dgst digest.Digest, imageID image.ID) error { - dgstRef, err := reference.WithDigest(ref, dgst) - if err != nil { - return err - } - - if oldTagImageID, err := store.Get(dgstRef); err == nil { - if oldTagImageID != imageID { - // Updating digests not supported by reference store - logrus.Errorf("Image ID for digest %s changed from %s to %s, cannot update", dgst.String(), oldTagImageID, imageID) - } - return nil - } else if err != reference.ErrDoesNotExist { - return err - } - - return store.AddDigest(dgstRef, imageID, true) -} diff --git a/distribution/pull_v1.go b/distribution/pull_v1.go deleted file mode 100644 index 3b7175e91e..0000000000 --- a/distribution/pull_v1.go +++ /dev/null @@ -1,366 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/url" - "os" - "strings" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "golang.org/x/net/context" -) - -type v1Puller struct { - v1IDService *metadata.V1IDService - endpoint registry.APIEndpoint - config *ImagePullConfig - repoInfo *registry.RepositoryInfo - session *registry.Session -} - -func (p *v1Puller) Pull(ctx context.Context, ref reference.Named) error { - if _, isCanonical := ref.(reference.Canonical); isCanonical { - // Allowing fallback, because HTTPS v1 is before HTTP v2 - return fallbackError{err: ErrNoSupport{Err: errors.New("Cannot pull by digest with v1 registry")}} - } - - tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) - if err != nil { - return err - } - // Adds Docker-specific headers as well as user-specified headers (metaHeaders) - tr := transport.NewTransport( - // TODO(tiborvass): was ReceiveTimeout - registry.NewTransport(tlsConfig), - registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., - ) - client := registry.HTTPClient(tr) - v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) - if err != nil { - logrus.Debugf("Could not get v1 endpoint: %v", err) - return fallbackError{err: err} - } - p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) - if err != nil { - // TODO(dmcgowan): Check if should fallback - logrus.Debugf("Fallback from error: %s", err) - return fallbackError{err: err} - } - if err := p.pullRepository(ctx, ref); err != nil { - // TODO(dmcgowan): Check if should fallback - return err - } - progress.Message(p.config.ProgressOutput, "", p.repoInfo.FullName()+": this image was pulled from a legacy registry. Important: This registry version will not be supported in future versions of docker.") - - return nil -} - -func (p *v1Puller) pullRepository(ctx context.Context, ref reference.Named) error { - progress.Message(p.config.ProgressOutput, "", "Pulling repository "+p.repoInfo.FullName()) - - tagged, isTagged := ref.(reference.NamedTagged) - - repoData, err := p.session.GetRepositoryData(p.repoInfo) - if err != nil { - if strings.Contains(err.Error(), "HTTP code: 404") { - if isTagged { - return fmt.Errorf("Error: image %s:%s not found", p.repoInfo.RemoteName(), tagged.Tag()) - } - return fmt.Errorf("Error: image %s not found", p.repoInfo.RemoteName()) - } - // Unexpected HTTP error - return err - } - - logrus.Debug("Retrieving the tag list") - var tagsList map[string]string - if !isTagged { - tagsList, err = p.session.GetRemoteTags(repoData.Endpoints, p.repoInfo) - } else { - var tagID string - tagsList = make(map[string]string) - tagID, err = p.session.GetRemoteTag(repoData.Endpoints, p.repoInfo, tagged.Tag()) - if err == registry.ErrRepoNotFound { - return fmt.Errorf("Tag %s not found in repository %s", tagged.Tag(), p.repoInfo.FullName()) - } - tagsList[tagged.Tag()] = tagID - } - if err != nil { - logrus.Errorf("unable to get remote tags: %s", err) - return err - } - - for tag, id := range tagsList { - repoData.ImgList[id] = ®istry.ImgData{ - ID: id, - Tag: tag, - Checksum: "", - } - } - - layersDownloaded := false - for _, imgData := range repoData.ImgList { - if isTagged && imgData.Tag != tagged.Tag() { - continue - } - - err := p.downloadImage(ctx, repoData, imgData, &layersDownloaded) - if err != nil { - return err - } - } - - writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) - return nil -} - -func (p *v1Puller) downloadImage(ctx context.Context, repoData *registry.RepositoryData, img *registry.ImgData, layersDownloaded *bool) error { - if img.Tag == "" { - logrus.Debugf("Image (id: %s) present in this repository but untagged, skipping", img.ID) - return nil - } - - localNameRef, err := reference.WithTag(p.repoInfo, img.Tag) - if err != nil { - retErr := fmt.Errorf("Image (id: %s) has invalid tag: %s", img.ID, img.Tag) - logrus.Debug(retErr.Error()) - return retErr - } - - if err := v1.ValidateID(img.ID); err != nil { - return err - } - - progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s", img.Tag, p.repoInfo.FullName()) - success := false - var lastErr error - for _, ep := range p.repoInfo.Index.Mirrors { - ep += "v1/" - progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), fmt.Sprintf("Pulling image (%s) from %s, mirror: %s", img.Tag, p.repoInfo.FullName(), ep)) - if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { - // Don't report errors when pulling from mirrors. - logrus.Debugf("Error pulling image (%s) from %s, mirror: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) - continue - } - success = true - break - } - if !success { - for _, ep := range repoData.Endpoints { - progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Pulling image (%s) from %s, endpoint: %s", img.Tag, p.repoInfo.FullName(), ep) - if err = p.pullImage(ctx, img.ID, ep, localNameRef, layersDownloaded); err != nil { - // It's not ideal that only the last error is returned, it would be better to concatenate the errors. - // As the error is also given to the output stream the user will see the error. - lastErr = err - progress.Updatef(p.config.ProgressOutput, stringid.TruncateID(img.ID), "Error pulling image (%s) from %s, endpoint: %s, %s", img.Tag, p.repoInfo.FullName(), ep, err) - continue - } - success = true - break - } - } - if !success { - err := fmt.Errorf("Error pulling image (%s) from %s, %v", img.Tag, p.repoInfo.FullName(), lastErr) - progress.Update(p.config.ProgressOutput, stringid.TruncateID(img.ID), err.Error()) - return err - } - return nil -} - -func (p *v1Puller) pullImage(ctx context.Context, v1ID, endpoint string, localNameRef reference.Named, layersDownloaded *bool) (err error) { - var history []string - history, err = p.session.GetRemoteHistory(v1ID, endpoint) - if err != nil { - return err - } - if len(history) < 1 { - return fmt.Errorf("empty history for image %s", v1ID) - } - progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1ID), "Pulling dependent layers") - - var ( - descriptors []xfer.DownloadDescriptor - newHistory []image.History - imgJSON []byte - imgSize int64 - ) - - // Iterate over layers, in order from bottom-most to top-most. Download - // config for all layers and create descriptors. - for i := len(history) - 1; i >= 0; i-- { - v1LayerID := history[i] - imgJSON, imgSize, err = p.downloadLayerConfig(v1LayerID, endpoint) - if err != nil { - return err - } - - // Create a new-style config from the legacy configs - h, err := v1.HistoryFromConfig(imgJSON, false) - if err != nil { - return err - } - newHistory = append(newHistory, h) - - layerDescriptor := &v1LayerDescriptor{ - v1LayerID: v1LayerID, - indexName: p.repoInfo.Index.Name, - endpoint: endpoint, - v1IDService: p.v1IDService, - layersDownloaded: layersDownloaded, - layerSize: imgSize, - session: p.session, - } - - descriptors = append(descriptors, layerDescriptor) - } - - rootFS := image.NewRootFS() - resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) - if err != nil { - return err - } - defer release() - - config, err := v1.MakeConfigFromV1Config(imgJSON, &resultRootFS, newHistory) - if err != nil { - return err - } - - imageID, err := p.config.ImageStore.Create(config) - if err != nil { - return err - } - - if err := p.config.ReferenceStore.AddTag(localNameRef, imageID, true); err != nil { - return err - } - - return nil -} - -func (p *v1Puller) downloadLayerConfig(v1LayerID, endpoint string) (imgJSON []byte, imgSize int64, err error) { - progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Pulling metadata") - - retries := 5 - for j := 1; j <= retries; j++ { - imgJSON, imgSize, err := p.session.GetRemoteImageJSON(v1LayerID, endpoint) - if err != nil && j == retries { - progress.Update(p.config.ProgressOutput, stringid.TruncateID(v1LayerID), "Error pulling layer metadata") - return nil, 0, err - } else if err != nil { - time.Sleep(time.Duration(j) * 500 * time.Millisecond) - continue - } - - return imgJSON, imgSize, nil - } - - // not reached - return nil, 0, nil -} - -type v1LayerDescriptor struct { - v1LayerID string - indexName string - endpoint string - v1IDService *metadata.V1IDService - layersDownloaded *bool - layerSize int64 - session *registry.Session - tmpFile *os.File -} - -func (ld *v1LayerDescriptor) Key() string { - return "v1:" + ld.v1LayerID -} - -func (ld *v1LayerDescriptor) ID() string { - return stringid.TruncateID(ld.v1LayerID) -} - -func (ld *v1LayerDescriptor) DiffID() (layer.DiffID, error) { - return ld.v1IDService.Get(ld.v1LayerID, ld.indexName) -} - -func (ld *v1LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { - progress.Update(progressOutput, ld.ID(), "Pulling fs layer") - layerReader, err := ld.session.GetRemoteImageLayer(ld.v1LayerID, ld.endpoint, ld.layerSize) - if err != nil { - progress.Update(progressOutput, ld.ID(), "Error pulling dependent layers") - if uerr, ok := err.(*url.Error); ok { - err = uerr.Err - } - if terr, ok := err.(net.Error); ok && terr.Timeout() { - return nil, 0, err - } - return nil, 0, xfer.DoNotRetry{Err: err} - } - *ld.layersDownloaded = true - - ld.tmpFile, err = ioutil.TempFile("", "GetImageBlob") - if err != nil { - layerReader.Close() - return nil, 0, err - } - - reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerReader), progressOutput, ld.layerSize, ld.ID(), "Downloading") - defer reader.Close() - - _, err = io.Copy(ld.tmpFile, reader) - if err != nil { - ld.Close() - return nil, 0, err - } - - progress.Update(progressOutput, ld.ID(), "Download complete") - - logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), ld.tmpFile.Name()) - - ld.tmpFile.Seek(0, 0) - - // hand off the temporary file to the download manager, so it will only - // be closed once - tmpFile := ld.tmpFile - ld.tmpFile = nil - - return ioutils.NewReadCloserWrapper(tmpFile, func() error { - tmpFile.Close() - err := os.RemoveAll(tmpFile.Name()) - if err != nil { - logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) - } - return err - }), ld.layerSize, nil -} - -func (ld *v1LayerDescriptor) Close() { - if ld.tmpFile != nil { - ld.tmpFile.Close() - if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { - logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) - } - ld.tmpFile = nil - } -} - -func (ld *v1LayerDescriptor) Registered(diffID layer.DiffID) { - // Cache mapping from this layer's DiffID to the blobsum - ld.v1IDService.Set(ld.v1LayerID, ld.indexName, diffID) -} diff --git a/distribution/pull_v2.go b/distribution/pull_v2.go deleted file mode 100644 index c78e221f04..0000000000 --- a/distribution/pull_v2.go +++ /dev/null @@ -1,845 +0,0 @@ -package distribution - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/url" - "os" - "runtime" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/registry/api/errcode" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "golang.org/x/net/context" -) - -var errRootFSMismatch = errors.New("layers from manifest don't match image configuration") - -// ImageConfigPullError is an error pulling the image config blob -// (only applies to schema2). -type ImageConfigPullError struct { - Err error -} - -// Error returns the error string for ImageConfigPullError. -func (e ImageConfigPullError) Error() string { - return "error pulling image configuration: " + e.Err.Error() -} - -type v2Puller struct { - V2MetadataService *metadata.V2MetadataService - endpoint registry.APIEndpoint - config *ImagePullConfig - repoInfo *registry.RepositoryInfo - repo distribution.Repository - // confirmedV2 is set to true if we confirm we're talking to a v2 - // registry. This is used to limit fallbacks to the v1 protocol. - confirmedV2 bool -} - -func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) { - // TODO(tiborvass): was ReceiveTimeout - p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") - if err != nil { - logrus.Warnf("Error getting v2 registry: %v", err) - return err - } - - if err = p.pullV2Repository(ctx, ref); err != nil { - if _, ok := err.(fallbackError); ok { - return err - } - if continueOnError(err) { - logrus.Errorf("Error trying v2 registry: %v", err) - return fallbackError{ - err: err, - confirmedV2: p.confirmedV2, - transportOK: true, - } - } - } - return err -} - -func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) { - var layersDownloaded bool - if !reference.IsNameOnly(ref) { - layersDownloaded, err = p.pullV2Tag(ctx, ref) - if err != nil { - return err - } - } else { - tags, err := p.repo.Tags(ctx).All(ctx) - if err != nil { - // If this repository doesn't exist on V2, we should - // permit a fallback to V1. - return allowV1Fallback(err) - } - - // The v2 registry knows about this repository, so we will not - // allow fallback to the v1 protocol even if we encounter an - // error later on. - p.confirmedV2 = true - - for _, tag := range tags { - tagRef, err := reference.WithTag(ref, tag) - if err != nil { - return err - } - pulledNew, err := p.pullV2Tag(ctx, tagRef) - if err != nil { - // Since this is the pull-all-tags case, don't - // allow an error pulling a particular tag to - // make the whole pull fall back to v1. - if fallbackErr, ok := err.(fallbackError); ok { - return fallbackErr.err - } - return err - } - // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged - // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus? - layersDownloaded = layersDownloaded || pulledNew - } - } - - writeStatus(ref.String(), p.config.ProgressOutput, layersDownloaded) - - return nil -} - -type v2LayerDescriptor struct { - digest digest.Digest - repoInfo *registry.RepositoryInfo - repo distribution.Repository - V2MetadataService *metadata.V2MetadataService - tmpFile *os.File - verifier digest.Verifier - src distribution.Descriptor -} - -func (ld *v2LayerDescriptor) Key() string { - return "v2:" + ld.digest.String() -} - -func (ld *v2LayerDescriptor) ID() string { - return stringid.TruncateID(ld.digest.String()) -} - -func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) { - return ld.V2MetadataService.GetDiffID(ld.digest) -} - -func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { - logrus.Debugf("pulling blob %q", ld.digest) - - var ( - err error - offset int64 - ) - - if ld.tmpFile == nil { - ld.tmpFile, err = createDownloadFile() - if err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - } else { - offset, err = ld.tmpFile.Seek(0, os.SEEK_END) - if err != nil { - logrus.Debugf("error seeking to end of download file: %v", err) - offset = 0 - - ld.tmpFile.Close() - if err := os.Remove(ld.tmpFile.Name()); err != nil { - logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) - } - ld.tmpFile, err = createDownloadFile() - if err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - } else if offset != 0 { - logrus.Debugf("attempting to resume download of %q from %d bytes", ld.digest, offset) - } - } - - tmpFile := ld.tmpFile - - layerDownload, err := ld.open(ctx) - if err != nil { - logrus.Errorf("Error initiating layer download: %v", err) - if err == distribution.ErrBlobUnknown { - return nil, 0, xfer.DoNotRetry{Err: err} - } - return nil, 0, retryOnError(err) - } - - if offset != 0 { - _, err := layerDownload.Seek(offset, os.SEEK_SET) - if err != nil { - if err := ld.truncateDownloadFile(); err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - return nil, 0, err - } - } - size, err := layerDownload.Seek(0, os.SEEK_END) - if err != nil { - // Seek failed, perhaps because there was no Content-Length - // header. This shouldn't fail the download, because we can - // still continue without a progress bar. - size = 0 - } else { - if size != 0 && offset > size { - logrus.Debug("Partial download is larger than full blob. Starting over") - offset = 0 - if err := ld.truncateDownloadFile(); err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - } - - // Restore the seek offset either at the beginning of the - // stream, or just after the last byte we have from previous - // attempts. - _, err = layerDownload.Seek(offset, os.SEEK_SET) - if err != nil { - return nil, 0, err - } - } - - reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, layerDownload), progressOutput, size-offset, ld.ID(), "Downloading") - defer reader.Close() - - if ld.verifier == nil { - ld.verifier, err = digest.NewDigestVerifier(ld.digest) - if err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - } - - _, err = io.Copy(tmpFile, io.TeeReader(reader, ld.verifier)) - if err != nil { - if err == transport.ErrWrongCodeForByteRange { - if err := ld.truncateDownloadFile(); err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - return nil, 0, err - } - return nil, 0, retryOnError(err) - } - - progress.Update(progressOutput, ld.ID(), "Verifying Checksum") - - if !ld.verifier.Verified() { - err = fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest) - logrus.Error(err) - - // Allow a retry if this digest verification error happened - // after a resumed download. - if offset != 0 { - if err := ld.truncateDownloadFile(); err != nil { - return nil, 0, xfer.DoNotRetry{Err: err} - } - - return nil, 0, err - } - return nil, 0, xfer.DoNotRetry{Err: err} - } - - progress.Update(progressOutput, ld.ID(), "Download complete") - - logrus.Debugf("Downloaded %s to tempfile %s", ld.ID(), tmpFile.Name()) - - _, err = tmpFile.Seek(0, os.SEEK_SET) - if err != nil { - tmpFile.Close() - if err := os.Remove(tmpFile.Name()); err != nil { - logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) - } - ld.tmpFile = nil - ld.verifier = nil - return nil, 0, xfer.DoNotRetry{Err: err} - } - - // hand off the temporary file to the download manager, so it will only - // be closed once - ld.tmpFile = nil - - return ioutils.NewReadCloserWrapper(tmpFile, func() error { - tmpFile.Close() - err := os.RemoveAll(tmpFile.Name()) - if err != nil { - logrus.Errorf("Failed to remove temp file: %s", tmpFile.Name()) - } - return err - }), size, nil -} - -func (ld *v2LayerDescriptor) Close() { - if ld.tmpFile != nil { - ld.tmpFile.Close() - if err := os.RemoveAll(ld.tmpFile.Name()); err != nil { - logrus.Errorf("Failed to remove temp file: %s", ld.tmpFile.Name()) - } - } -} - -func (ld *v2LayerDescriptor) truncateDownloadFile() error { - // Need a new hash context since we will be redoing the download - ld.verifier = nil - - if _, err := ld.tmpFile.Seek(0, os.SEEK_SET); err != nil { - logrus.Errorf("error seeking to beginning of download file: %v", err) - return err - } - - if err := ld.tmpFile.Truncate(0); err != nil { - logrus.Errorf("error truncating download file: %v", err) - return err - } - - return nil -} - -func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) { - // Cache mapping from this layer's DiffID to the blobsum - ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.FullName()}) -} - -func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) { - manSvc, err := p.repo.Manifests(ctx) - if err != nil { - return false, err - } - - var ( - manifest distribution.Manifest - tagOrDigest string // Used for logging/progress only - ) - if tagged, isTagged := ref.(reference.NamedTagged); isTagged { - // NOTE: not using TagService.Get, since it uses HEAD requests - // against the manifests endpoint, which are not supported by - // all registry versions. - manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag())) - if err != nil { - return false, allowV1Fallback(err) - } - tagOrDigest = tagged.Tag() - } else if digested, isDigested := ref.(reference.Canonical); isDigested { - manifest, err = manSvc.Get(ctx, digested.Digest()) - if err != nil { - return false, err - } - tagOrDigest = digested.Digest().String() - } else { - return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", ref.String()) - } - - if manifest == nil { - return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest) - } - - // If manSvc.Get succeeded, we can be confident that the registry on - // the other side speaks the v2 protocol. - p.confirmedV2 = true - - logrus.Debugf("Pulling ref from V2 registry: %s", ref.String()) - progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+p.repo.Named().Name()) - - var ( - imageID image.ID - manifestDigest digest.Digest - ) - - switch v := manifest.(type) { - case *schema1.SignedManifest: - imageID, manifestDigest, err = p.pullSchema1(ctx, ref, v) - if err != nil { - return false, err - } - case *schema2.DeserializedManifest: - imageID, manifestDigest, err = p.pullSchema2(ctx, ref, v) - if err != nil { - return false, err - } - case *manifestlist.DeserializedManifestList: - imageID, manifestDigest, err = p.pullManifestList(ctx, ref, v) - if err != nil { - return false, err - } - default: - return false, errors.New("unsupported manifest format") - } - - progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String()) - - oldTagImageID, err := p.config.ReferenceStore.Get(ref) - if err == nil { - if oldTagImageID == imageID { - return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, imageID) - } - } else if err != reference.ErrDoesNotExist { - return false, err - } - - if canonical, ok := ref.(reference.Canonical); ok { - if err = p.config.ReferenceStore.AddDigest(canonical, imageID, true); err != nil { - return false, err - } - } else { - if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, imageID); err != nil { - return false, err - } - if err = p.config.ReferenceStore.AddTag(ref, imageID, true); err != nil { - return false, err - } - } - return true, nil -} - -func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) { - var verifiedManifest *schema1.Manifest - verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref) - if err != nil { - return "", "", err - } - - rootFS := image.NewRootFS() - - if err := detectBaseLayer(p.config.ImageStore, verifiedManifest, rootFS); err != nil { - return "", "", err - } - - // remove duplicate layers and check parent chain validity - err = fixManifestLayers(verifiedManifest) - if err != nil { - return "", "", err - } - - var descriptors []xfer.DownloadDescriptor - - // Image history converted to the new format - var history []image.History - - // Note that the order of this loop is in the direction of bottom-most - // to top-most, so that the downloads slice gets ordered correctly. - for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- { - blobSum := verifiedManifest.FSLayers[i].BlobSum - - var throwAway struct { - ThrowAway bool `json:"throwaway,omitempty"` - } - if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil { - return "", "", err - } - - h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway) - if err != nil { - return "", "", err - } - history = append(history, h) - - if throwAway.ThrowAway { - continue - } - - layerDescriptor := &v2LayerDescriptor{ - digest: blobSum, - repoInfo: p.repoInfo, - repo: p.repo, - V2MetadataService: p.V2MetadataService, - } - - descriptors = append(descriptors, layerDescriptor) - } - - resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, descriptors, p.config.ProgressOutput) - if err != nil { - return "", "", err - } - defer release() - - config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history) - if err != nil { - return "", "", err - } - - imageID, err = p.config.ImageStore.Create(config) - if err != nil { - return "", "", err - } - - manifestDigest = digest.FromBytes(unverifiedManifest.Canonical) - - return imageID, manifestDigest, nil -} - -func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (imageID image.ID, manifestDigest digest.Digest, err error) { - manifestDigest, err = schema2ManifestDigest(ref, mfst) - if err != nil { - return "", "", err - } - - target := mfst.Target() - imageID = image.ID(target.Digest) - if _, err := p.config.ImageStore.Get(imageID); err == nil { - // If the image already exists locally, no need to pull - // anything. - return imageID, manifestDigest, nil - } - - var descriptors []xfer.DownloadDescriptor - - // Note that the order of this loop is in the direction of bottom-most - // to top-most, so that the downloads slice gets ordered correctly. - for _, d := range mfst.Layers { - layerDescriptor := &v2LayerDescriptor{ - digest: d.Digest, - repo: p.repo, - repoInfo: p.repoInfo, - V2MetadataService: p.V2MetadataService, - src: d, - } - - descriptors = append(descriptors, layerDescriptor) - } - - configChan := make(chan []byte, 1) - errChan := make(chan error, 1) - var cancel func() - ctx, cancel = context.WithCancel(ctx) - - // Pull the image config - go func() { - configJSON, err := p.pullSchema2ImageConfig(ctx, target.Digest) - if err != nil { - errChan <- ImageConfigPullError{Err: err} - cancel() - return - } - configChan <- configJSON - }() - - var ( - configJSON []byte // raw serialized image config - unmarshalledConfig image.Image // deserialized image config - downloadRootFS image.RootFS // rootFS to use for registering layers. - ) - if runtime.GOOS == "windows" { - configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) - if err != nil { - return "", "", err - } - if unmarshalledConfig.RootFS == nil { - return "", "", errors.New("image config has no rootfs section") - } - downloadRootFS = *unmarshalledConfig.RootFS - downloadRootFS.DiffIDs = []layer.DiffID{} - } else { - downloadRootFS = *image.NewRootFS() - } - - rootFS, release, err := p.config.DownloadManager.Download(ctx, downloadRootFS, descriptors, p.config.ProgressOutput) - if err != nil { - if configJSON != nil { - // Already received the config - return "", "", err - } - select { - case err = <-errChan: - return "", "", err - default: - cancel() - select { - case <-configChan: - case <-errChan: - } - return "", "", err - } - } - defer release() - - if configJSON == nil { - configJSON, unmarshalledConfig, err = receiveConfig(configChan, errChan) - if err != nil { - return "", "", err - } - } - - // The DiffIDs returned in rootFS MUST match those in the config. - // Otherwise the image config could be referencing layers that aren't - // included in the manifest. - if len(rootFS.DiffIDs) != len(unmarshalledConfig.RootFS.DiffIDs) { - return "", "", errRootFSMismatch - } - - for i := range rootFS.DiffIDs { - if rootFS.DiffIDs[i] != unmarshalledConfig.RootFS.DiffIDs[i] { - return "", "", errRootFSMismatch - } - } - - imageID, err = p.config.ImageStore.Create(configJSON) - if err != nil { - return "", "", err - } - - return imageID, manifestDigest, nil -} - -func receiveConfig(configChan <-chan []byte, errChan <-chan error) ([]byte, image.Image, error) { - select { - case configJSON := <-configChan: - var unmarshalledConfig image.Image - if err := json.Unmarshal(configJSON, &unmarshalledConfig); err != nil { - return nil, image.Image{}, err - } - return configJSON, unmarshalledConfig, nil - case err := <-errChan: - return nil, image.Image{}, err - // Don't need a case for ctx.Done in the select because cancellation - // will trigger an error in p.pullSchema2ImageConfig. - } -} - -// pullManifestList handles "manifest lists" which point to various -// platform-specifc manifests. -func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (imageID image.ID, manifestListDigest digest.Digest, err error) { - manifestListDigest, err = schema2ManifestDigest(ref, mfstList) - if err != nil { - return "", "", err - } - - var manifestDigest digest.Digest - for _, manifestDescriptor := range mfstList.Manifests { - // TODO(aaronl): The manifest list spec supports optional - // "features" and "variant" fields. These are not yet used. - // Once they are, their values should be interpreted here. - if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS { - manifestDigest = manifestDescriptor.Digest - break - } - } - - if manifestDigest == "" { - return "", "", errors.New("no supported platform found in manifest list") - } - - manSvc, err := p.repo.Manifests(ctx) - if err != nil { - return "", "", err - } - - manifest, err := manSvc.Get(ctx, manifestDigest) - if err != nil { - return "", "", err - } - - manifestRef, err := reference.WithDigest(ref, manifestDigest) - if err != nil { - return "", "", err - } - - switch v := manifest.(type) { - case *schema1.SignedManifest: - imageID, _, err = p.pullSchema1(ctx, manifestRef, v) - if err != nil { - return "", "", err - } - case *schema2.DeserializedManifest: - imageID, _, err = p.pullSchema2(ctx, manifestRef, v) - if err != nil { - return "", "", err - } - default: - return "", "", errors.New("unsupported manifest format") - } - - return imageID, manifestListDigest, err -} - -func (p *v2Puller) pullSchema2ImageConfig(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) { - blobs := p.repo.Blobs(ctx) - configJSON, err = blobs.Get(ctx, dgst) - if err != nil { - return nil, err - } - - // Verify image config digest - verifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - return nil, err - } - if _, err := verifier.Write(configJSON); err != nil { - return nil, err - } - if !verifier.Verified() { - err := fmt.Errorf("image config verification failed for digest %s", dgst) - logrus.Error(err) - return nil, err - } - - return configJSON, nil -} - -// schema2ManifestDigest computes the manifest digest, and, if pulling by -// digest, ensures that it matches the requested digest. -func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) { - _, canonical, err := mfst.Payload() - if err != nil { - return "", err - } - - // If pull by digest, then verify the manifest digest. - if digested, isDigested := ref.(reference.Canonical); isDigested { - verifier, err := digest.NewDigestVerifier(digested.Digest()) - if err != nil { - return "", err - } - if _, err := verifier.Write(canonical); err != nil { - return "", err - } - if !verifier.Verified() { - err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest()) - logrus.Error(err) - return "", err - } - return digested.Digest(), nil - } - - return digest.FromBytes(canonical), nil -} - -// allowV1Fallback checks if the error is a possible reason to fallback to v1 -// (even if confirmedV2 has been set already), and if so, wraps the error in -// a fallbackError with confirmedV2 set to false. Otherwise, it returns the -// error unmodified. -func allowV1Fallback(err error) error { - switch v := err.(type) { - case errcode.Errors: - if len(v) != 0 { - if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) { - return fallbackError{ - err: err, - confirmedV2: false, - transportOK: true, - } - } - } - case errcode.Error: - if shouldV2Fallback(v) { - return fallbackError{ - err: err, - confirmedV2: false, - transportOK: true, - } - } - case *url.Error: - if v.Err == auth.ErrNoBasicAuthCredentials { - return fallbackError{err: err, confirmedV2: false} - } - } - - return err -} - -func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) { - // If pull by digest, then verify the manifest digest. NOTE: It is - // important to do this first, before any other content validation. If the - // digest cannot be verified, don't even bother with those other things. - if digested, isCanonical := ref.(reference.Canonical); isCanonical { - verifier, err := digest.NewDigestVerifier(digested.Digest()) - if err != nil { - return nil, err - } - if _, err := verifier.Write(signedManifest.Canonical); err != nil { - return nil, err - } - if !verifier.Verified() { - err := fmt.Errorf("image verification failed for digest %s", digested.Digest()) - logrus.Error(err) - return nil, err - } - } - m = &signedManifest.Manifest - - if m.SchemaVersion != 1 { - return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, ref.String()) - } - if len(m.FSLayers) != len(m.History) { - return nil, fmt.Errorf("length of history not equal to number of layers for %q", ref.String()) - } - if len(m.FSLayers) == 0 { - return nil, fmt.Errorf("no FSLayers in manifest for %q", ref.String()) - } - return m, nil -} - -// fixManifestLayers removes repeated layers from the manifest and checks the -// correctness of the parent chain. -func fixManifestLayers(m *schema1.Manifest) error { - imgs := make([]*image.V1Image, len(m.FSLayers)) - for i := range m.FSLayers { - img := &image.V1Image{} - - if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil { - return err - } - - imgs[i] = img - if err := v1.ValidateID(img.ID); err != nil { - return err - } - } - - if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" { - // Windows base layer can point to a base layer parent that is not in manifest. - return errors.New("Invalid parent ID in the base layer of the image.") - } - - // check general duplicates to error instead of a deadlock - idmap := make(map[string]struct{}) - - var lastID string - for _, img := range imgs { - // skip IDs that appear after each other, we handle those later - if _, exists := idmap[img.ID]; img.ID != lastID && exists { - return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID) - } - lastID = img.ID - idmap[lastID] = struct{}{} - } - - // backwards loop so that we keep the remaining indexes after removing items - for i := len(imgs) - 2; i >= 0; i-- { - if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue - m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...) - m.History = append(m.History[:i], m.History[i+1:]...) - } else if imgs[i].Parent != imgs[i+1].ID { - return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent) - } - } - - return nil -} - -func createDownloadFile() (*os.File, error) { - return ioutil.TempFile("", "GetImageBlob") -} diff --git a/distribution/pull_v2_test.go b/distribution/pull_v2_test.go deleted file mode 100644 index 8555c81e6b..0000000000 --- a/distribution/pull_v2_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package distribution - -import ( - "encoding/json" - "io/ioutil" - "reflect" - "runtime" - "strings" - "testing" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/docker/reference" -) - -// TestFixManifestLayers checks that fixManifestLayers removes a duplicate -// layer, and that it makes no changes to the manifest when called a second -// time, after the duplicate is removed. -func TestFixManifestLayers(t *testing.T) { - duplicateLayerManifest := schema1.Manifest{ - FSLayers: []schema1.FSLayer{ - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, - }, - History: []schema1.History{ - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, - }, - } - - duplicateLayerManifestExpectedOutput := schema1.Manifest{ - FSLayers: []schema1.FSLayer{ - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, - }, - History: []schema1.History{ - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, - }, - } - - if err := fixManifestLayers(&duplicateLayerManifest); err != nil { - t.Fatalf("unexpected error from fixManifestLayers: %v", err) - } - - if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { - t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest") - } - - // Run fixManifestLayers again and confirm that it doesn't change the - // manifest (which no longer has duplicate layers). - if err := fixManifestLayers(&duplicateLayerManifest); err != nil { - t.Fatalf("unexpected error from fixManifestLayers: %v", err) - } - - if !reflect.DeepEqual(duplicateLayerManifest, duplicateLayerManifestExpectedOutput) { - t.Fatal("incorrect output from fixManifestLayers on duplicate layer manifest (second pass)") - } -} - -// TestFixManifestLayersBaseLayerParent makes sure that fixManifestLayers fails -// if the base layer configuration specifies a parent. -func TestFixManifestLayersBaseLayerParent(t *testing.T) { - // TODO Windows: Fix this unit text - if runtime.GOOS == "windows" { - t.Skip("Needs fixing on Windows") - } - duplicateLayerManifest := schema1.Manifest{ - FSLayers: []schema1.FSLayer{ - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, - }, - History: []schema1.History{ - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"parent\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, - }, - } - - if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID in the base layer of the image.") { - t.Fatalf("expected an invalid parent ID error from fixManifestLayers") - } -} - -// TestFixManifestLayersBadParent makes sure that fixManifestLayers fails -// if an image configuration specifies a parent that doesn't directly follow -// that (deduplicated) image in the image history. -func TestFixManifestLayersBadParent(t *testing.T) { - duplicateLayerManifest := schema1.Manifest{ - FSLayers: []schema1.FSLayer{ - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - {BlobSum: digest.Digest("sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa")}, - }, - History: []schema1.History{ - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"3b38edc92eb7c074812e217b41a6ade66888531009d6286a6f5f36a06f9841b9\",\"parent\":\"ac3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:11.368300679Z\",\"container\":\"d91be3479d5b1e84b0c00d18eea9dc777ca0ad166d51174b24283e2e6f104253\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"#(nop) ENTRYPOINT [\\\"/go/bin/dnsdock\\\"]\"],\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":null,\"Image\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":[\"/go/bin/dnsdock\"],\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":0}\n"}, - {V1Compatibility: "{\"id\":\"ec3025ca8cc9bcab039e193e20ec647c2da3c53a74020f2ba611601f9b2c6c02\",\"created\":\"2015-08-19T16:49:07.568027497Z\",\"container\":\"fe9e5a5264a843c9292d17b736c92dd19bdb49986a8782d7389964ddaff887cc\",\"container_config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/sh\",\"-c\",\"cd /go/src/github.com/tonistiigi/dnsdock \\u0026\\u0026 go get -v github.com/tools/godep \\u0026\\u0026 godep restore \\u0026\\u0026 go install -ldflags \\\"-X main.version `git describe --tags HEAD``if [[ -n $(command git status --porcelain --untracked-files=no 2\\u003e/dev/null) ]]; then echo \\\"-dirty\\\"; fi`\\\" ./...\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"docker_version\":\"1.6.2\",\"config\":{\"Hostname\":\"03797203757d\",\"Domainname\":\"\",\"User\":\"\",\"Memory\":0,\"MemorySwap\":0,\"CpuShares\":0,\"Cpuset\":\"\",\"AttachStdin\":false,\"AttachStdout\":false,\"AttachStderr\":false,\"PortSpecs\":null,\"ExposedPorts\":null,\"Tty\":false,\"OpenStdin\":false,\"StdinOnce\":false,\"Env\":[\"PATH=/go/bin:/usr/src/go/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin\",\"GOLANG_VERSION=1.4.1\",\"GOPATH=/go\"],\"Cmd\":[\"/bin/bash\"],\"Image\":\"e3b0ff09e647595dafee15c54cd632c900df9e82b1d4d313b1e20639a1461779\",\"Volumes\":null,\"WorkingDir\":\"/go\",\"Entrypoint\":null,\"NetworkDisabled\":false,\"MacAddress\":\"\",\"OnBuild\":[],\"Labels\":{}},\"architecture\":\"amd64\",\"os\":\"linux\",\"Size\":118430532}\n"}, - }, - } - - if err := fixManifestLayers(&duplicateLayerManifest); err == nil || !strings.Contains(err.Error(), "Invalid parent ID.") { - t.Fatalf("expected an invalid parent ID error from fixManifestLayers") - } -} - -// TestValidateManifest verifies the validateManifest function -func TestValidateManifest(t *testing.T) { - // TODO Windows: Fix this unit text - if runtime.GOOS == "windows" { - t.Skip("Needs fixing on Windows") - } - expectedDigest, err := reference.ParseNamed("repo@sha256:02fee8c3220ba806531f606525eceb83f4feb654f62b207191b1c9209188dedd") - if err != nil { - t.Fatal("could not parse reference") - } - expectedFSLayer0 := digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") - - // Good manifest - - goodManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/good_manifest") - if err != nil { - t.Fatal("error reading fixture:", err) - } - - var goodSignedManifest schema1.SignedManifest - err = json.Unmarshal(goodManifestBytes, &goodSignedManifest) - if err != nil { - t.Fatal("error unmarshaling manifest:", err) - } - - verifiedManifest, err := verifySchema1Manifest(&goodSignedManifest, expectedDigest) - if err != nil { - t.Fatal("validateManifest failed:", err) - } - - if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { - t.Fatal("unexpected FSLayer in good manifest") - } - - // "Extra data" manifest - - extraDataManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/extra_data_manifest") - if err != nil { - t.Fatal("error reading fixture:", err) - } - - var extraDataSignedManifest schema1.SignedManifest - err = json.Unmarshal(extraDataManifestBytes, &extraDataSignedManifest) - if err != nil { - t.Fatal("error unmarshaling manifest:", err) - } - - verifiedManifest, err = verifySchema1Manifest(&extraDataSignedManifest, expectedDigest) - if err != nil { - t.Fatal("validateManifest failed:", err) - } - - if verifiedManifest.FSLayers[0].BlobSum != expectedFSLayer0 { - t.Fatal("unexpected FSLayer in extra data manifest") - } - - // Bad manifest - - badManifestBytes, err := ioutil.ReadFile("fixtures/validate_manifest/bad_manifest") - if err != nil { - t.Fatal("error reading fixture:", err) - } - - var badSignedManifest schema1.SignedManifest - err = json.Unmarshal(badManifestBytes, &badSignedManifest) - if err != nil { - t.Fatal("error unmarshaling manifest:", err) - } - - verifiedManifest, err = verifySchema1Manifest(&badSignedManifest, expectedDigest) - if err == nil || !strings.HasPrefix(err.Error(), "image verification failed for digest") { - t.Fatal("expected validateManifest to fail with digest error") - } -} diff --git a/distribution/pull_v2_unix.go b/distribution/pull_v2_unix.go deleted file mode 100644 index cdd7806ad2..0000000000 --- a/distribution/pull_v2_unix.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package distribution - -import ( - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/docker/image" -) - -func detectBaseLayer(is image.Store, m *schema1.Manifest, rootFS *image.RootFS) error { - return nil -} - -func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { - blobs := ld.repo.Blobs(ctx) - return blobs.Open(ctx, ld.digest) -} diff --git a/distribution/pull_v2_windows.go b/distribution/pull_v2_windows.go deleted file mode 100644 index f98825d0be..0000000000 --- a/distribution/pull_v2_windows.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build windows - -package distribution - -import ( - "encoding/json" - "fmt" - "net/http" - "os" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/image" -) - -func detectBaseLayer(is image.Store, m *schema1.Manifest, rootFS *image.RootFS) error { - v1img := &image.V1Image{} - if err := json.Unmarshal([]byte(m.History[len(m.History)-1].V1Compatibility), v1img); err != nil { - return err - } - if v1img.Parent == "" { - return fmt.Errorf("Last layer %q does not have a base layer reference", v1img.ID) - } - // There must be an image that already references the baselayer. - for _, img := range is.Map() { - if img.RootFS.Type == image.TypeLayersWithBase && img.RootFS.BaseLayerID() == v1img.Parent { - rootFS.BaseLayer = img.RootFS.BaseLayer - rootFS.Type = image.TypeLayersWithBase - return nil - } - } - return fmt.Errorf("Invalid base layer %q", v1img.Parent) -} - -var _ distribution.Describable = &v2LayerDescriptor{} - -func (ld *v2LayerDescriptor) Descriptor() distribution.Descriptor { - if ld.src.MediaType == schema2.MediaTypeForeignLayer && len(ld.src.URLs) > 0 { - return ld.src - } - return distribution.Descriptor{} -} - -func (ld *v2LayerDescriptor) open(ctx context.Context) (distribution.ReadSeekCloser, error) { - if len(ld.src.URLs) == 0 { - blobs := ld.repo.Blobs(ctx) - return blobs.Open(ctx, ld.digest) - } - - var ( - err error - rsc distribution.ReadSeekCloser - ) - - // Find the first URL that results in a 200 result code. - for _, url := range ld.src.URLs { - rsc = transport.NewHTTPReadSeeker(http.DefaultClient, url, nil) - _, err = rsc.Seek(0, os.SEEK_SET) - if err == nil { - break - } - rsc.Close() - rsc = nil - } - return rsc, err -} diff --git a/distribution/push.go b/distribution/push.go deleted file mode 100644 index f3b54b7cf7..0000000000 --- a/distribution/push.go +++ /dev/null @@ -1,219 +0,0 @@ -package distribution - -import ( - "bufio" - "compress/gzip" - "fmt" - "io" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "github.com/docker/libtrust" - "golang.org/x/net/context" -) - -// ImagePushConfig stores push configuration. -type ImagePushConfig struct { - // MetaHeaders store HTTP headers with metadata about the image - MetaHeaders map[string][]string - // AuthConfig holds authentication credentials for authenticating with - // the registry. - AuthConfig *types.AuthConfig - // ProgressOutput is the interface for showing the status of the push - // operation. - ProgressOutput progress.Output - // RegistryService is the registry service to use for TLS configuration - // and endpoint lookup. - RegistryService registry.Service - // ImageEventLogger notifies events for a given image - ImageEventLogger func(id, name, action string) - // MetadataStore is the storage backend for distribution-specific - // metadata. - MetadataStore metadata.Store - // LayerStore manages layers. - LayerStore layer.Store - // ImageStore manages images. - ImageStore image.Store - // ReferenceStore manages tags. - ReferenceStore reference.Store - // TrustKey is the private key for legacy signatures. This is typically - // an ephemeral key, since these signatures are no longer verified. - TrustKey libtrust.PrivateKey - // UploadManager dispatches uploads. - UploadManager *xfer.LayerUploadManager -} - -// Pusher is an interface that abstracts pushing for different API versions. -type Pusher interface { - // Push tries to push the image configured at the creation of Pusher. - // Push returns an error if any, as well as a boolean that determines whether to retry Push on the next configured endpoint. - // - // TODO(tiborvass): have Push() take a reference to repository + tag, so that the pusher itself is repository-agnostic. - Push(ctx context.Context) error -} - -const compressionBufSize = 32768 - -// NewPusher creates a new Pusher interface that will push to either a v1 or v2 -// registry. The endpoint argument contains a Version field that determines -// whether a v1 or v2 pusher will be created. The other parameters are passed -// through to the underlying pusher implementation for use during the actual -// push operation. -func NewPusher(ref reference.Named, endpoint registry.APIEndpoint, repoInfo *registry.RepositoryInfo, imagePushConfig *ImagePushConfig) (Pusher, error) { - switch endpoint.Version { - case registry.APIVersion2: - return &v2Pusher{ - v2MetadataService: metadata.NewV2MetadataService(imagePushConfig.MetadataStore), - ref: ref, - endpoint: endpoint, - repoInfo: repoInfo, - config: imagePushConfig, - }, nil - case registry.APIVersion1: - return &v1Pusher{ - v1IDService: metadata.NewV1IDService(imagePushConfig.MetadataStore), - ref: ref, - endpoint: endpoint, - repoInfo: repoInfo, - config: imagePushConfig, - }, nil - } - return nil, fmt.Errorf("unknown version %d for registry %s", endpoint.Version, endpoint.URL) -} - -// Push initiates a push operation on ref. -// ref is the specific variant of the image to be pushed. -// If no tag is provided, all tags will be pushed. -func Push(ctx context.Context, ref reference.Named, imagePushConfig *ImagePushConfig) error { - // FIXME: Allow to interrupt current push when new push of same image is done. - - // Resolve the Repository name from fqn to RepositoryInfo - repoInfo, err := imagePushConfig.RegistryService.ResolveRepository(ref) - if err != nil { - return err - } - - endpoints, err := imagePushConfig.RegistryService.LookupPushEndpoints(repoInfo.Hostname()) - if err != nil { - return err - } - - progress.Messagef(imagePushConfig.ProgressOutput, "", "The push refers to a repository [%s]", repoInfo.FullName()) - - associations := imagePushConfig.ReferenceStore.ReferencesByName(repoInfo) - if len(associations) == 0 { - return fmt.Errorf("An image does not exist locally with the tag: %s", repoInfo.Name()) - } - - var ( - lastErr error - - // confirmedV2 is set to true if a push attempt managed to - // confirm that it was talking to a v2 registry. This will - // prevent fallback to the v1 protocol. - confirmedV2 bool - - // confirmedTLSRegistries is a map indicating which registries - // are known to be using TLS. There should never be a plaintext - // retry for any of these. - confirmedTLSRegistries = make(map[string]struct{}) - ) - - for _, endpoint := range endpoints { - if confirmedV2 && endpoint.Version == registry.APIVersion1 { - logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) - continue - } - - if endpoint.URL.Scheme != "https" { - if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS { - logrus.Debugf("Skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL) - continue - } - } - - logrus.Debugf("Trying to push %s to %s %s", repoInfo.FullName(), endpoint.URL, endpoint.Version) - - pusher, err := NewPusher(ref, endpoint, repoInfo, imagePushConfig) - if err != nil { - lastErr = err - continue - } - if err := pusher.Push(ctx); err != nil { - // Was this push cancelled? If so, don't try to fall - // back. - select { - case <-ctx.Done(): - default: - if fallbackErr, ok := err.(fallbackError); ok { - confirmedV2 = confirmedV2 || fallbackErr.confirmedV2 - if fallbackErr.transportOK && endpoint.URL.Scheme == "https" { - confirmedTLSRegistries[endpoint.URL.Host] = struct{}{} - } - err = fallbackErr.err - lastErr = err - logrus.Errorf("Attempting next endpoint for push after error: %v", err) - continue - } - } - - logrus.Errorf("Not continuing with push after error: %v", err) - return err - } - - imagePushConfig.ImageEventLogger(ref.String(), repoInfo.Name(), "push") - return nil - } - - if lastErr == nil { - lastErr = fmt.Errorf("no endpoints found for %s", repoInfo.FullName()) - } - return lastErr -} - -// compress returns an io.ReadCloser which will supply a compressed version of -// the provided Reader. The caller must close the ReadCloser after reading the -// compressed data. -// -// Note that this function returns a reader instead of taking a writer as an -// argument so that it can be used with httpBlobWriter's ReadFrom method. -// Using httpBlobWriter's Write method would send a PATCH request for every -// Write call. -// -// The second return value is a channel that gets closed when the goroutine -// is finished. This allows the caller to make sure the goroutine finishes -// before it releases any resources connected with the reader that was -// passed in. -func compress(in io.Reader) (io.ReadCloser, chan struct{}) { - compressionDone := make(chan struct{}) - - pipeReader, pipeWriter := io.Pipe() - // Use a bufio.Writer to avoid excessive chunking in HTTP request. - bufWriter := bufio.NewWriterSize(pipeWriter, compressionBufSize) - compressor := gzip.NewWriter(bufWriter) - - go func() { - _, err := io.Copy(compressor, in) - if err == nil { - err = compressor.Close() - } - if err == nil { - err = bufWriter.Flush() - } - if err != nil { - pipeWriter.CloseWithError(err) - } else { - pipeWriter.Close() - } - close(compressionDone) - }() - - return pipeReader, compressionDone -} diff --git a/distribution/push_v1.go b/distribution/push_v1.go deleted file mode 100644 index b6e4a13046..0000000000 --- a/distribution/push_v1.go +++ /dev/null @@ -1,454 +0,0 @@ -package distribution - -import ( - "fmt" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "golang.org/x/net/context" -) - -type v1Pusher struct { - v1IDService *metadata.V1IDService - endpoint registry.APIEndpoint - ref reference.Named - repoInfo *registry.RepositoryInfo - config *ImagePushConfig - session *registry.Session -} - -func (p *v1Pusher) Push(ctx context.Context) error { - tlsConfig, err := p.config.RegistryService.TLSConfig(p.repoInfo.Index.Name) - if err != nil { - return err - } - // Adds Docker-specific headers as well as user-specified headers (metaHeaders) - tr := transport.NewTransport( - // TODO(tiborvass): was NoTimeout - registry.NewTransport(tlsConfig), - registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders)..., - ) - client := registry.HTTPClient(tr) - v1Endpoint, err := p.endpoint.ToV1Endpoint(dockerversion.DockerUserAgent(ctx), p.config.MetaHeaders) - if err != nil { - logrus.Debugf("Could not get v1 endpoint: %v", err) - return fallbackError{err: err} - } - p.session, err = registry.NewSession(client, p.config.AuthConfig, v1Endpoint) - if err != nil { - // TODO(dmcgowan): Check if should fallback - return fallbackError{err: err} - } - if err := p.pushRepository(ctx); err != nil { - // TODO(dmcgowan): Check if should fallback - return err - } - return nil -} - -// v1Image exposes the configuration, filesystem layer ID, and a v1 ID for an -// image being pushed to a v1 registry. -type v1Image interface { - Config() []byte - Layer() layer.Layer - V1ID() string -} - -type v1ImageCommon struct { - layer layer.Layer - config []byte - v1ID string -} - -func (common *v1ImageCommon) Config() []byte { - return common.config -} - -func (common *v1ImageCommon) V1ID() string { - return common.v1ID -} - -func (common *v1ImageCommon) Layer() layer.Layer { - return common.layer -} - -// v1TopImage defines a runnable (top layer) image being pushed to a v1 -// registry. -type v1TopImage struct { - v1ImageCommon - imageID image.ID -} - -func newV1TopImage(imageID image.ID, img *image.Image, l layer.Layer, parent *v1DependencyImage) (*v1TopImage, error) { - v1ID := digest.Digest(imageID).Hex() - parentV1ID := "" - if parent != nil { - parentV1ID = parent.V1ID() - } - - config, err := v1.MakeV1ConfigFromConfig(img, v1ID, parentV1ID, false) - if err != nil { - return nil, err - } - - return &v1TopImage{ - v1ImageCommon: v1ImageCommon{ - v1ID: v1ID, - config: config, - layer: l, - }, - imageID: imageID, - }, nil -} - -// v1DependencyImage defines a dependency layer being pushed to a v1 registry. -type v1DependencyImage struct { - v1ImageCommon -} - -func newV1DependencyImage(l layer.Layer, parent *v1DependencyImage) (*v1DependencyImage, error) { - v1ID := digest.Digest(l.ChainID()).Hex() - - config := "" - if parent != nil { - config = fmt.Sprintf(`{"id":"%s","parent":"%s"}`, v1ID, parent.V1ID()) - } else { - config = fmt.Sprintf(`{"id":"%s"}`, v1ID) - } - return &v1DependencyImage{ - v1ImageCommon: v1ImageCommon{ - v1ID: v1ID, - config: []byte(config), - layer: l, - }, - }, nil -} - -// Retrieve the all the images to be uploaded in the correct order -func (p *v1Pusher) getImageList() (imageList []v1Image, tagsByImage map[image.ID][]string, referencedLayers []layer.Layer, err error) { - tagsByImage = make(map[image.ID][]string) - - // Ignore digest references - if _, isCanonical := p.ref.(reference.Canonical); isCanonical { - return - } - - tagged, isTagged := p.ref.(reference.NamedTagged) - if isTagged { - // Push a specific tag - var imgID image.ID - imgID, err = p.config.ReferenceStore.Get(p.ref) - if err != nil { - return - } - - imageList, err = p.imageListForTag(imgID, nil, &referencedLayers) - if err != nil { - return - } - - tagsByImage[imgID] = []string{tagged.Tag()} - - return - } - - imagesSeen := make(map[image.ID]struct{}) - dependenciesSeen := make(map[layer.ChainID]*v1DependencyImage) - - associations := p.config.ReferenceStore.ReferencesByName(p.ref) - for _, association := range associations { - if tagged, isTagged = association.Ref.(reference.NamedTagged); !isTagged { - // Ignore digest references. - continue - } - - tagsByImage[association.ImageID] = append(tagsByImage[association.ImageID], tagged.Tag()) - - if _, present := imagesSeen[association.ImageID]; present { - // Skip generating image list for already-seen image - continue - } - imagesSeen[association.ImageID] = struct{}{} - - imageListForThisTag, err := p.imageListForTag(association.ImageID, dependenciesSeen, &referencedLayers) - if err != nil { - return nil, nil, nil, err - } - - // append to main image list - imageList = append(imageList, imageListForThisTag...) - } - if len(imageList) == 0 { - return nil, nil, nil, fmt.Errorf("No images found for the requested repository / tag") - } - logrus.Debugf("Image list: %v", imageList) - logrus.Debugf("Tags by image: %v", tagsByImage) - - return -} - -func (p *v1Pusher) imageListForTag(imgID image.ID, dependenciesSeen map[layer.ChainID]*v1DependencyImage, referencedLayers *[]layer.Layer) (imageListForThisTag []v1Image, err error) { - img, err := p.config.ImageStore.Get(imgID) - if err != nil { - return nil, err - } - - topLayerID := img.RootFS.ChainID() - - var l layer.Layer - if topLayerID == "" { - l = layer.EmptyLayer - } else { - l, err = p.config.LayerStore.Get(topLayerID) - *referencedLayers = append(*referencedLayers, l) - if err != nil { - return nil, fmt.Errorf("failed to get top layer from image: %v", err) - } - } - - dependencyImages, parent, err := generateDependencyImages(l.Parent(), dependenciesSeen) - if err != nil { - return nil, err - } - - topImage, err := newV1TopImage(imgID, img, l, parent) - if err != nil { - return nil, err - } - - imageListForThisTag = append(dependencyImages, topImage) - - return -} - -func generateDependencyImages(l layer.Layer, dependenciesSeen map[layer.ChainID]*v1DependencyImage) (imageListForThisTag []v1Image, parent *v1DependencyImage, err error) { - if l == nil { - return nil, nil, nil - } - - imageListForThisTag, parent, err = generateDependencyImages(l.Parent(), dependenciesSeen) - - if dependenciesSeen != nil { - if dependencyImage, present := dependenciesSeen[l.ChainID()]; present { - // This layer is already on the list, we can ignore it - // and all its parents. - return imageListForThisTag, dependencyImage, nil - } - } - - dependencyImage, err := newV1DependencyImage(l, parent) - if err != nil { - return nil, nil, err - } - imageListForThisTag = append(imageListForThisTag, dependencyImage) - - if dependenciesSeen != nil { - dependenciesSeen[l.ChainID()] = dependencyImage - } - - return imageListForThisTag, dependencyImage, nil -} - -// createImageIndex returns an index of an image's layer IDs and tags. -func createImageIndex(images []v1Image, tags map[image.ID][]string) []*registry.ImgData { - var imageIndex []*registry.ImgData - for _, img := range images { - v1ID := img.V1ID() - - if topImage, isTopImage := img.(*v1TopImage); isTopImage { - if tags, hasTags := tags[topImage.imageID]; hasTags { - // If an image has tags you must add an entry in the image index - // for each tag - for _, tag := range tags { - imageIndex = append(imageIndex, ®istry.ImgData{ - ID: v1ID, - Tag: tag, - }) - } - continue - } - } - - // If the image does not have a tag it still needs to be sent to the - // registry with an empty tag so that it is associated with the repository - imageIndex = append(imageIndex, ®istry.ImgData{ - ID: v1ID, - Tag: "", - }) - } - return imageIndex -} - -// lookupImageOnEndpoint checks the specified endpoint to see if an image exists -// and if it is absent then it sends the image id to the channel to be pushed. -func (p *v1Pusher) lookupImageOnEndpoint(wg *sync.WaitGroup, endpoint string, images chan v1Image, imagesToPush chan string) { - defer wg.Done() - for image := range images { - v1ID := image.V1ID() - truncID := stringid.TruncateID(image.Layer().DiffID().String()) - if err := p.session.LookupRemoteImage(v1ID, endpoint); err != nil { - logrus.Errorf("Error in LookupRemoteImage: %s", err) - imagesToPush <- v1ID - progress.Update(p.config.ProgressOutput, truncID, "Waiting") - } else { - progress.Update(p.config.ProgressOutput, truncID, "Already exists") - } - } -} - -func (p *v1Pusher) pushImageToEndpoint(ctx context.Context, endpoint string, imageList []v1Image, tags map[image.ID][]string, repo *registry.RepositoryData) error { - workerCount := len(imageList) - // start a maximum of 5 workers to check if images exist on the specified endpoint. - if workerCount > 5 { - workerCount = 5 - } - var ( - wg = &sync.WaitGroup{} - imageData = make(chan v1Image, workerCount*2) - imagesToPush = make(chan string, workerCount*2) - pushes = make(chan map[string]struct{}, 1) - ) - for i := 0; i < workerCount; i++ { - wg.Add(1) - go p.lookupImageOnEndpoint(wg, endpoint, imageData, imagesToPush) - } - // start a go routine that consumes the images to push - go func() { - shouldPush := make(map[string]struct{}) - for id := range imagesToPush { - shouldPush[id] = struct{}{} - } - pushes <- shouldPush - }() - for _, v1Image := range imageList { - imageData <- v1Image - } - // close the channel to notify the workers that there will be no more images to check. - close(imageData) - wg.Wait() - close(imagesToPush) - // wait for all the images that require pushes to be collected into a consumable map. - shouldPush := <-pushes - // finish by pushing any images and tags to the endpoint. The order that the images are pushed - // is very important that is why we are still iterating over the ordered list of imageIDs. - for _, img := range imageList { - v1ID := img.V1ID() - if _, push := shouldPush[v1ID]; push { - if _, err := p.pushImage(ctx, img, endpoint); err != nil { - // FIXME: Continue on error? - return err - } - } - if topImage, isTopImage := img.(*v1TopImage); isTopImage { - for _, tag := range tags[topImage.imageID] { - progress.Messagef(p.config.ProgressOutput, "", "Pushing tag for rev [%s] on {%s}", stringid.TruncateID(v1ID), endpoint+"repositories/"+p.repoInfo.RemoteName()+"/tags/"+tag) - if err := p.session.PushRegistryTag(p.repoInfo, v1ID, tag, endpoint); err != nil { - return err - } - } - } - } - return nil -} - -// pushRepository pushes layers that do not already exist on the registry. -func (p *v1Pusher) pushRepository(ctx context.Context) error { - imgList, tags, referencedLayers, err := p.getImageList() - defer func() { - for _, l := range referencedLayers { - p.config.LayerStore.Release(l) - } - }() - if err != nil { - return err - } - - imageIndex := createImageIndex(imgList, tags) - for _, data := range imageIndex { - logrus.Debugf("Pushing ID: %s with Tag: %s", data.ID, data.Tag) - } - - // Register all the images in a repository with the registry - // If an image is not in this list it will not be associated with the repository - repoData, err := p.session.PushImageJSONIndex(p.repoInfo, imageIndex, false, nil) - if err != nil { - return err - } - // push the repository to each of the endpoints only if it does not exist. - for _, endpoint := range repoData.Endpoints { - if err := p.pushImageToEndpoint(ctx, endpoint, imgList, tags, repoData); err != nil { - return err - } - } - _, err = p.session.PushImageJSONIndex(p.repoInfo, imageIndex, true, repoData.Endpoints) - return err -} - -func (p *v1Pusher) pushImage(ctx context.Context, v1Image v1Image, ep string) (checksum string, err error) { - l := v1Image.Layer() - v1ID := v1Image.V1ID() - truncID := stringid.TruncateID(l.DiffID().String()) - - jsonRaw := v1Image.Config() - progress.Update(p.config.ProgressOutput, truncID, "Pushing") - - // General rule is to use ID for graph accesses and compatibilityID for - // calls to session.registry() - imgData := ®istry.ImgData{ - ID: v1ID, - } - - // Send the json - if err := p.session.PushImageJSONRegistry(imgData, jsonRaw, ep); err != nil { - if err == registry.ErrAlreadyExists { - progress.Update(p.config.ProgressOutput, truncID, "Image already pushed, skipping") - return "", nil - } - return "", err - } - - arch, err := l.TarStream() - if err != nil { - return "", err - } - defer arch.Close() - - // don't care if this fails; best effort - size, _ := l.DiffSize() - - // Send the layer - logrus.Debugf("rendered layer for %s of [%d] size", v1ID, size) - - reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), p.config.ProgressOutput, size, truncID, "Pushing") - defer reader.Close() - - checksum, checksumPayload, err := p.session.PushImageLayerRegistry(v1ID, reader, ep, jsonRaw) - if err != nil { - return "", err - } - imgData.Checksum = checksum - imgData.ChecksumPayload = checksumPayload - // Send the checksum - if err := p.session.PushImageChecksumRegistry(imgData, ep); err != nil { - return "", err - } - - if err := p.v1IDService.Set(v1ID, p.repoInfo.Index.Name, l.DiffID()); err != nil { - logrus.Warnf("Could not set v1 ID mapping: %v", err) - } - - progress.Update(p.config.ProgressOutput, truncID, "Image successfully pushed") - return imgData.Checksum, nil -} diff --git a/distribution/push_v2.go b/distribution/push_v2.go deleted file mode 100644 index 7d331f43d2..0000000000 --- a/distribution/push_v2.go +++ /dev/null @@ -1,452 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "io" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - distreference "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/client" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/distribution/xfer" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "golang.org/x/net/context" -) - -// PushResult contains the tag, manifest digest, and manifest size from the -// push. It's used to signal this information to the trust code in the client -// so it can sign the manifest if necessary. -type PushResult struct { - Tag string - Digest digest.Digest - Size int -} - -type v2Pusher struct { - v2MetadataService *metadata.V2MetadataService - ref reference.Named - endpoint registry.APIEndpoint - repoInfo *registry.RepositoryInfo - config *ImagePushConfig - repo distribution.Repository - - // pushState is state built by the Upload functions. - pushState pushState -} - -type pushState struct { - sync.Mutex - // remoteLayers is the set of layers known to exist on the remote side. - // This avoids redundant queries when pushing multiple tags that - // involve the same layers. It is also used to fill in digest and size - // information when building the manifest. - remoteLayers map[layer.DiffID]distribution.Descriptor - // confirmedV2 is set to true if we confirm we're talking to a v2 - // registry. This is used to limit fallbacks to the v1 protocol. - confirmedV2 bool -} - -func (p *v2Pusher) Push(ctx context.Context) (err error) { - p.pushState.remoteLayers = make(map[layer.DiffID]distribution.Descriptor) - - p.repo, p.pushState.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "push", "pull") - if err != nil { - logrus.Debugf("Error getting v2 registry: %v", err) - return err - } - - if err = p.pushV2Repository(ctx); err != nil { - if continueOnError(err) { - return fallbackError{ - err: err, - confirmedV2: p.pushState.confirmedV2, - transportOK: true, - } - } - } - return err -} - -func (p *v2Pusher) pushV2Repository(ctx context.Context) (err error) { - if namedTagged, isNamedTagged := p.ref.(reference.NamedTagged); isNamedTagged { - imageID, err := p.config.ReferenceStore.Get(p.ref) - if err != nil { - return fmt.Errorf("tag does not exist: %s", p.ref.String()) - } - - return p.pushV2Tag(ctx, namedTagged, imageID) - } - - if !reference.IsNameOnly(p.ref) { - return errors.New("cannot push a digest reference") - } - - // Pull all tags - pushed := 0 - for _, association := range p.config.ReferenceStore.ReferencesByName(p.ref) { - if namedTagged, isNamedTagged := association.Ref.(reference.NamedTagged); isNamedTagged { - pushed++ - if err := p.pushV2Tag(ctx, namedTagged, association.ImageID); err != nil { - return err - } - } - } - - if pushed == 0 { - return fmt.Errorf("no tags to push for %s", p.repoInfo.Name()) - } - - return nil -} - -func (p *v2Pusher) pushV2Tag(ctx context.Context, ref reference.NamedTagged, imageID image.ID) error { - logrus.Debugf("Pushing repository: %s", ref.String()) - - img, err := p.config.ImageStore.Get(imageID) - if err != nil { - return fmt.Errorf("could not find image from tag %s: %v", ref.String(), err) - } - - var l layer.Layer - - topLayerID := img.RootFS.ChainID() - if topLayerID == "" { - l = layer.EmptyLayer - } else { - l, err = p.config.LayerStore.Get(topLayerID) - if err != nil { - return fmt.Errorf("failed to get top layer from image: %v", err) - } - defer layer.ReleaseAndLog(p.config.LayerStore, l) - } - - var descriptors []xfer.UploadDescriptor - - descriptorTemplate := v2PushDescriptor{ - v2MetadataService: p.v2MetadataService, - repoInfo: p.repoInfo, - ref: p.ref, - repo: p.repo, - pushState: &p.pushState, - } - - // Loop bounds condition is to avoid pushing the base layer on Windows. - for i := 0; i < len(img.RootFS.DiffIDs); i++ { - descriptor := descriptorTemplate - descriptor.layer = l - descriptors = append(descriptors, &descriptor) - - l = l.Parent() - } - - if err := p.config.UploadManager.Upload(ctx, descriptors, p.config.ProgressOutput); err != nil { - return err - } - - // Try schema2 first - builder := schema2.NewManifestBuilder(p.repo.Blobs(ctx), img.RawJSON()) - manifest, err := manifestFromBuilder(ctx, builder, descriptors) - if err != nil { - return err - } - - manSvc, err := p.repo.Manifests(ctx) - if err != nil { - return err - } - - putOptions := []distribution.ManifestServiceOption{distribution.WithTag(ref.Tag())} - if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { - logrus.Warnf("failed to upload schema2 manifest: %v - falling back to schema1", err) - - manifestRef, err := distreference.WithTag(p.repo.Named(), ref.Tag()) - if err != nil { - return err - } - builder = schema1.NewConfigManifestBuilder(p.repo.Blobs(ctx), p.config.TrustKey, manifestRef, img.RawJSON()) - manifest, err = manifestFromBuilder(ctx, builder, descriptors) - if err != nil { - return err - } - - if _, err = manSvc.Put(ctx, manifest, putOptions...); err != nil { - return err - } - } - - var canonicalManifest []byte - - switch v := manifest.(type) { - case *schema1.SignedManifest: - canonicalManifest = v.Canonical - case *schema2.DeserializedManifest: - _, canonicalManifest, err = v.Payload() - if err != nil { - return err - } - } - - manifestDigest := digest.FromBytes(canonicalManifest) - progress.Messagef(p.config.ProgressOutput, "", "%s: digest: %s size: %d", ref.Tag(), manifestDigest, len(canonicalManifest)) - - if err := addDigestReference(p.config.ReferenceStore, ref, manifestDigest, imageID); err != nil { - return err - } - - // Signal digest to the trust client so it can sign the - // push, if appropriate. - progress.Aux(p.config.ProgressOutput, PushResult{Tag: ref.Tag(), Digest: manifestDigest, Size: len(canonicalManifest)}) - - return nil -} - -func manifestFromBuilder(ctx context.Context, builder distribution.ManifestBuilder, descriptors []xfer.UploadDescriptor) (distribution.Manifest, error) { - // descriptors is in reverse order; iterate backwards to get references - // appended in the right order. - for i := len(descriptors) - 1; i >= 0; i-- { - if err := builder.AppendReference(descriptors[i].(*v2PushDescriptor)); err != nil { - return nil, err - } - } - - return builder.Build(ctx) -} - -type v2PushDescriptor struct { - layer layer.Layer - v2MetadataService *metadata.V2MetadataService - repoInfo reference.Named - ref reference.Named - repo distribution.Repository - pushState *pushState - remoteDescriptor distribution.Descriptor -} - -func (pd *v2PushDescriptor) Key() string { - return "v2push:" + pd.ref.FullName() + " " + pd.layer.DiffID().String() -} - -func (pd *v2PushDescriptor) ID() string { - return stringid.TruncateID(pd.layer.DiffID().String()) -} - -func (pd *v2PushDescriptor) DiffID() layer.DiffID { - return pd.layer.DiffID() -} - -func (pd *v2PushDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { - if fs, ok := pd.layer.(distribution.Describable); ok { - if d := fs.Descriptor(); len(d.URLs) > 0 { - progress.Update(progressOutput, pd.ID(), "Skipped foreign layer") - return d, nil - } - } - - diffID := pd.DiffID() - - pd.pushState.Lock() - if descriptor, ok := pd.pushState.remoteLayers[diffID]; ok { - // it is already known that the push is not needed and - // therefore doing a stat is unnecessary - pd.pushState.Unlock() - progress.Update(progressOutput, pd.ID(), "Layer already exists") - return descriptor, nil - } - pd.pushState.Unlock() - - // Do we have any metadata associated with this layer's DiffID? - v2Metadata, err := pd.v2MetadataService.GetMetadata(diffID) - if err == nil { - descriptor, exists, err := layerAlreadyExists(ctx, v2Metadata, pd.repoInfo, pd.repo, pd.pushState) - if err != nil { - progress.Update(progressOutput, pd.ID(), "Image push failed") - return distribution.Descriptor{}, retryOnError(err) - } - if exists { - progress.Update(progressOutput, pd.ID(), "Layer already exists") - pd.pushState.Lock() - pd.pushState.remoteLayers[diffID] = descriptor - pd.pushState.Unlock() - return descriptor, nil - } - } - - logrus.Debugf("Pushing layer: %s", diffID) - - // if digest was empty or not saved, or if blob does not exist on the remote repository, - // then push the blob. - bs := pd.repo.Blobs(ctx) - - var layerUpload distribution.BlobWriter - mountAttemptsRemaining := 3 - - // Attempt to find another repository in the same registry to mount the layer - // from to avoid an unnecessary upload. - // Note: metadata is stored from oldest to newest, so we iterate through this - // slice in reverse to maximize our chances of the blob still existing in the - // remote repository. - for i := len(v2Metadata) - 1; i >= 0 && mountAttemptsRemaining > 0; i-- { - mountFrom := v2Metadata[i] - - sourceRepo, err := reference.ParseNamed(mountFrom.SourceRepository) - if err != nil { - continue - } - if pd.repoInfo.Hostname() != sourceRepo.Hostname() { - // don't mount blobs from another registry - continue - } - - namedRef, err := reference.WithName(mountFrom.SourceRepository) - if err != nil { - continue - } - - // TODO (brianbland): We need to construct a reference where the Name is - // only the full remote name, so clean this up when distribution has a - // richer reference package - remoteRef, err := distreference.WithName(namedRef.RemoteName()) - if err != nil { - continue - } - - canonicalRef, err := distreference.WithDigest(remoteRef, mountFrom.Digest) - if err != nil { - continue - } - - logrus.Debugf("attempting to mount layer %s (%s) from %s", diffID, mountFrom.Digest, sourceRepo.FullName()) - - layerUpload, err = bs.Create(ctx, client.WithMountFrom(canonicalRef)) - switch err := err.(type) { - case distribution.ErrBlobMounted: - progress.Updatef(progressOutput, pd.ID(), "Mounted from %s", err.From.Name()) - - err.Descriptor.MediaType = schema2.MediaTypeLayer - - pd.pushState.Lock() - pd.pushState.confirmedV2 = true - pd.pushState.remoteLayers[diffID] = err.Descriptor - pd.pushState.Unlock() - - // Cache mapping from this layer's DiffID to the blobsum - if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: mountFrom.Digest, SourceRepository: pd.repoInfo.FullName()}); err != nil { - return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} - } - return err.Descriptor, nil - case nil: - // blob upload session created successfully, so begin the upload - mountAttemptsRemaining = 0 - default: - // unable to mount layer from this repository, so this source mapping is no longer valid - logrus.Debugf("unassociating layer %s (%s) with %s", diffID, mountFrom.Digest, mountFrom.SourceRepository) - pd.v2MetadataService.Remove(mountFrom) - mountAttemptsRemaining-- - } - } - - if layerUpload == nil { - layerUpload, err = bs.Create(ctx) - if err != nil { - return distribution.Descriptor{}, retryOnError(err) - } - } - defer layerUpload.Close() - - arch, err := pd.layer.TarStream() - if err != nil { - return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} - } - - // don't care if this fails; best effort - size, _ := pd.layer.DiffSize() - - reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(ctx, arch), progressOutput, size, pd.ID(), "Pushing") - compressedReader, compressionDone := compress(reader) - defer func() { - reader.Close() - <-compressionDone - }() - - digester := digest.Canonical.New() - tee := io.TeeReader(compressedReader, digester.Hash()) - - nn, err := layerUpload.ReadFrom(tee) - compressedReader.Close() - if err != nil { - return distribution.Descriptor{}, retryOnError(err) - } - - pushDigest := digester.Digest() - if _, err := layerUpload.Commit(ctx, distribution.Descriptor{Digest: pushDigest}); err != nil { - return distribution.Descriptor{}, retryOnError(err) - } - - logrus.Debugf("uploaded layer %s (%s), %d bytes", diffID, pushDigest, nn) - progress.Update(progressOutput, pd.ID(), "Pushed") - - // Cache mapping from this layer's DiffID to the blobsum - if err := pd.v2MetadataService.Add(diffID, metadata.V2Metadata{Digest: pushDigest, SourceRepository: pd.repoInfo.FullName()}); err != nil { - return distribution.Descriptor{}, xfer.DoNotRetry{Err: err} - } - - pd.pushState.Lock() - - // If Commit succeeded, that's an indication that the remote registry - // speaks the v2 protocol. - pd.pushState.confirmedV2 = true - - descriptor := distribution.Descriptor{ - Digest: pushDigest, - MediaType: schema2.MediaTypeLayer, - Size: nn, - } - pd.pushState.remoteLayers[diffID] = descriptor - - pd.pushState.Unlock() - - return descriptor, nil -} - -func (pd *v2PushDescriptor) SetRemoteDescriptor(descriptor distribution.Descriptor) { - pd.remoteDescriptor = descriptor -} - -func (pd *v2PushDescriptor) Descriptor() distribution.Descriptor { - return pd.remoteDescriptor -} - -// layerAlreadyExists checks if the registry already know about any of the -// metadata passed in the "metadata" slice. If it finds one that the registry -// knows about, it returns the known digest and "true". -func layerAlreadyExists(ctx context.Context, metadata []metadata.V2Metadata, repoInfo reference.Named, repo distribution.Repository, pushState *pushState) (distribution.Descriptor, bool, error) { - for _, meta := range metadata { - // Only check blobsums that are known to this repository or have an unknown source - if meta.SourceRepository != "" && meta.SourceRepository != repoInfo.FullName() { - continue - } - descriptor, err := repo.Blobs(ctx).Stat(ctx, meta.Digest) - switch err { - case nil: - descriptor.MediaType = schema2.MediaTypeLayer - return descriptor, true, nil - case distribution.ErrBlobUnknown: - // nop - default: - return distribution.Descriptor{}, false, err - } - } - return distribution.Descriptor{}, false, nil -} diff --git a/distribution/registry.go b/distribution/registry.go deleted file mode 100644 index 98b82fbaeb..0000000000 --- a/distribution/registry.go +++ /dev/null @@ -1,122 +0,0 @@ -package distribution - -import ( - "fmt" - "net" - "net/http" - "time" - - "github.com/docker/distribution" - distreference "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/auth" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "github.com/docker/go-connections/sockets" - "golang.org/x/net/context" -) - -// NewV2Repository returns a repository (v2 only). It creates an HTTP transport -// providing timeout settings and authentication support, and also verifies the -// remote API version. -func NewV2Repository(ctx context.Context, repoInfo *registry.RepositoryInfo, endpoint registry.APIEndpoint, metaHeaders http.Header, authConfig *types.AuthConfig, actions ...string) (repo distribution.Repository, foundVersion bool, err error) { - repoName := repoInfo.FullName() - // If endpoint does not support CanonicalName, use the RemoteName instead - if endpoint.TrimHostname { - repoName = repoInfo.RemoteName() - } - - direct := &net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - DualStack: true, - } - - // TODO(dmcgowan): Call close idle connections when complete, use keep alive - base := &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: direct.Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: endpoint.TLSConfig, - // TODO(dmcgowan): Call close idle connections when complete and use keep alive - DisableKeepAlives: true, - } - - proxyDialer, err := sockets.DialerFromEnvironment(direct) - if err == nil { - base.Dial = proxyDialer.Dial - } - - modifiers := registry.DockerHeaders(dockerversion.DockerUserAgent(ctx), metaHeaders) - authTransport := transport.NewTransport(base, modifiers...) - - challengeManager, foundVersion, err := registry.PingV2Registry(endpoint.URL, authTransport) - if err != nil { - transportOK := false - if responseErr, ok := err.(registry.PingResponseError); ok { - transportOK = true - err = responseErr.Err - } - return nil, foundVersion, fallbackError{ - err: err, - confirmedV2: foundVersion, - transportOK: transportOK, - } - } - - if authConfig.RegistryToken != "" { - passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken} - modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler)) - } else { - creds := registry.NewStaticCredentialStore(authConfig) - tokenHandlerOptions := auth.TokenHandlerOptions{ - Transport: authTransport, - Credentials: creds, - Scopes: []auth.Scope{ - auth.RepositoryScope{ - Repository: repoName, - Actions: actions, - }, - }, - ClientID: registry.AuthClientID, - } - tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions) - basicHandler := auth.NewBasicHandler(creds) - modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler)) - } - tr := transport.NewTransport(base, modifiers...) - - repoNameRef, err := distreference.ParseNamed(repoName) - if err != nil { - return nil, foundVersion, fallbackError{ - err: err, - confirmedV2: foundVersion, - transportOK: true, - } - } - - repo, err = client.NewRepository(ctx, repoNameRef, endpoint.URL.String(), tr) - if err != nil { - err = fallbackError{ - err: err, - confirmedV2: foundVersion, - transportOK: true, - } - } - return -} - -type existingTokenHandler struct { - token string -} - -func (th *existingTokenHandler) Scheme() string { - return "bearer" -} - -func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", th.token)) - return nil -} diff --git a/distribution/registry_unit_test.go b/distribution/registry_unit_test.go deleted file mode 100644 index b60a465d78..0000000000 --- a/distribution/registry_unit_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package distribution - -import ( - "net/http" - "net/http/httptest" - "net/url" - "os" - "strings" - "testing" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/docker/utils" - "github.com/docker/engine-api/types" - registrytypes "github.com/docker/engine-api/types/registry" - "golang.org/x/net/context" -) - -const secretRegistryToken = "mysecrettoken" - -type tokenPassThruHandler struct { - reached bool - gotToken bool - shouldSend401 func(url string) bool -} - -func (h *tokenPassThruHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - h.reached = true - if strings.Contains(r.Header.Get("Authorization"), secretRegistryToken) { - logrus.Debug("Detected registry token in auth header") - h.gotToken = true - } - if h.shouldSend401 == nil || h.shouldSend401(r.RequestURI) { - w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) - w.WriteHeader(401) - } -} - -func testTokenPassThru(t *testing.T, ts *httptest.Server) { - tmp, err := utils.TestDirectory("") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - uri, err := url.Parse(ts.URL) - if err != nil { - t.Fatalf("could not parse url from test server: %v", err) - } - - endpoint := registry.APIEndpoint{ - Mirror: false, - URL: uri, - Version: 2, - Official: false, - TrimHostname: false, - TLSConfig: nil, - //VersionHeader: "verheader", - } - n, _ := reference.ParseNamed("testremotename") - repoInfo := ®istry.RepositoryInfo{ - Named: n, - Index: ®istrytypes.IndexInfo{ - Name: "testrepo", - Mirrors: nil, - Secure: false, - Official: false, - }, - Official: false, - } - imagePullConfig := &ImagePullConfig{ - MetaHeaders: http.Header{}, - AuthConfig: &types.AuthConfig{ - RegistryToken: secretRegistryToken, - }, - } - puller, err := newPuller(endpoint, repoInfo, imagePullConfig) - if err != nil { - t.Fatal(err) - } - p := puller.(*v2Puller) - ctx := context.Background() - p.repo, _, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull") - if err != nil { - t.Fatal(err) - } - - logrus.Debug("About to pull") - // We expect it to fail, since we haven't mock'd the full registry exchange in our handler above - tag, _ := reference.WithTag(n, "tag_goes_here") - _ = p.pullV2Repository(ctx, tag) -} - -func TestTokenPassThru(t *testing.T) { - handler := &tokenPassThruHandler{shouldSend401: func(url string) bool { return url == "/v2/" }} - ts := httptest.NewServer(handler) - defer ts.Close() - - testTokenPassThru(t, ts) - - if !handler.reached { - t.Fatal("Handler not reached") - } - if !handler.gotToken { - t.Fatal("Failed to receive registry token") - } -} - -func TestTokenPassThruDifferentHost(t *testing.T) { - handler := new(tokenPassThruHandler) - ts := httptest.NewServer(handler) - defer ts.Close() - - tsredirect := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.RequestURI == "/v2/" { - w.Header().Set("WWW-Authenticate", `Bearer realm="foorealm"`) - w.WriteHeader(401) - return - } - http.Redirect(w, r, ts.URL+r.URL.Path, http.StatusMovedPermanently) - })) - defer tsredirect.Close() - - testTokenPassThru(t, tsredirect) - - if !handler.reached { - t.Fatal("Handler not reached") - } - if handler.gotToken { - t.Fatal("Redirect should not forward Authorization header to another host") - } -} diff --git a/distribution/xfer/download.go b/distribution/xfer/download.go deleted file mode 100644 index 7545342212..0000000000 --- a/distribution/xfer/download.go +++ /dev/null @@ -1,452 +0,0 @@ -package xfer - -import ( - "errors" - "fmt" - "io" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/progress" - "golang.org/x/net/context" -) - -const maxDownloadAttempts = 5 - -// LayerDownloadManager figures out which layers need to be downloaded, then -// registers and downloads those, taking into account dependencies between -// layers. -type LayerDownloadManager struct { - layerStore layer.Store - tm TransferManager -} - -// SetConcurrency set the max concurrent downloads for each pull -func (ldm *LayerDownloadManager) SetConcurrency(concurrency int) { - ldm.tm.SetConcurrency(concurrency) -} - -// NewLayerDownloadManager returns a new LayerDownloadManager. -func NewLayerDownloadManager(layerStore layer.Store, concurrencyLimit int) *LayerDownloadManager { - return &LayerDownloadManager{ - layerStore: layerStore, - tm: NewTransferManager(concurrencyLimit), - } -} - -type downloadTransfer struct { - Transfer - - layerStore layer.Store - layer layer.Layer - err error -} - -// result returns the layer resulting from the download, if the download -// and registration were successful. -func (d *downloadTransfer) result() (layer.Layer, error) { - return d.layer, d.err -} - -// A DownloadDescriptor references a layer that may need to be downloaded. -type DownloadDescriptor interface { - // Key returns the key used to deduplicate downloads. - Key() string - // ID returns the ID for display purposes. - ID() string - // DiffID should return the DiffID for this layer, or an error - // if it is unknown (for example, if it has not been downloaded - // before). - DiffID() (layer.DiffID, error) - // Download is called to perform the download. - Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) - // Close is called when the download manager is finished with this - // descriptor and will not call Download again or read from the reader - // that Download returned. - Close() -} - -// DownloadDescriptorWithRegistered is a DownloadDescriptor that has an -// additional Registered method which gets called after a downloaded layer is -// registered. This allows the user of the download manager to know the DiffID -// of each registered layer. This method is called if a cast to -// DownloadDescriptorWithRegistered is successful. -type DownloadDescriptorWithRegistered interface { - DownloadDescriptor - Registered(diffID layer.DiffID) -} - -// Download is a blocking function which ensures the requested layers are -// present in the layer store. It uses the string returned by the Key method to -// deduplicate downloads. If a given layer is not already known to present in -// the layer store, and the key is not used by an in-progress download, the -// Download method is called to get the layer tar data. Layers are then -// registered in the appropriate order. The caller must call the returned -// release function once it is is done with the returned RootFS object. -func (ldm *LayerDownloadManager) Download(ctx context.Context, initialRootFS image.RootFS, layers []DownloadDescriptor, progressOutput progress.Output) (image.RootFS, func(), error) { - var ( - topLayer layer.Layer - topDownload *downloadTransfer - watcher *Watcher - missingLayer bool - transferKey = "" - downloadsByKey = make(map[string]*downloadTransfer) - ) - - rootFS := initialRootFS - for _, descriptor := range layers { - key := descriptor.Key() - transferKey += key - - if !missingLayer { - missingLayer = true - diffID, err := descriptor.DiffID() - if err == nil { - getRootFS := rootFS - getRootFS.Append(diffID) - l, err := ldm.layerStore.Get(getRootFS.ChainID()) - if err == nil { - // Layer already exists. - logrus.Debugf("Layer already exists: %s", descriptor.ID()) - progress.Update(progressOutput, descriptor.ID(), "Already exists") - if topLayer != nil { - layer.ReleaseAndLog(ldm.layerStore, topLayer) - } - topLayer = l - missingLayer = false - rootFS.Append(diffID) - continue - } - } - } - - // Does this layer have the same data as a previous layer in - // the stack? If so, avoid downloading it more than once. - var topDownloadUncasted Transfer - if existingDownload, ok := downloadsByKey[key]; ok { - xferFunc := ldm.makeDownloadFuncFromDownload(descriptor, existingDownload, topDownload) - defer topDownload.Transfer.Release(watcher) - topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) - topDownload = topDownloadUncasted.(*downloadTransfer) - continue - } - - // Layer is not known to exist - download and register it. - progress.Update(progressOutput, descriptor.ID(), "Pulling fs layer") - - var xferFunc DoFunc - if topDownload != nil { - xferFunc = ldm.makeDownloadFunc(descriptor, "", topDownload) - defer topDownload.Transfer.Release(watcher) - } else { - xferFunc = ldm.makeDownloadFunc(descriptor, rootFS.ChainID(), nil) - } - topDownloadUncasted, watcher = ldm.tm.Transfer(transferKey, xferFunc, progressOutput) - topDownload = topDownloadUncasted.(*downloadTransfer) - downloadsByKey[key] = topDownload - } - - if topDownload == nil { - return rootFS, func() { - if topLayer != nil { - layer.ReleaseAndLog(ldm.layerStore, topLayer) - } - }, nil - } - - // Won't be using the list built up so far - will generate it - // from downloaded layers instead. - rootFS.DiffIDs = []layer.DiffID{} - - defer func() { - if topLayer != nil { - layer.ReleaseAndLog(ldm.layerStore, topLayer) - } - }() - - select { - case <-ctx.Done(): - topDownload.Transfer.Release(watcher) - return rootFS, func() {}, ctx.Err() - case <-topDownload.Done(): - break - } - - l, err := topDownload.result() - if err != nil { - topDownload.Transfer.Release(watcher) - return rootFS, func() {}, err - } - - // Must do this exactly len(layers) times, so we don't include the - // base layer on Windows. - for range layers { - if l == nil { - topDownload.Transfer.Release(watcher) - return rootFS, func() {}, errors.New("internal error: too few parent layers") - } - rootFS.DiffIDs = append([]layer.DiffID{l.DiffID()}, rootFS.DiffIDs...) - l = l.Parent() - } - return rootFS, func() { topDownload.Transfer.Release(watcher) }, err -} - -// makeDownloadFunc returns a function that performs the layer download and -// registration. If parentDownload is non-nil, it waits for that download to -// complete before the registration step, and registers the downloaded data -// on top of parentDownload's resulting layer. Otherwise, it registers the -// layer on top of the ChainID given by parentLayer. -func (ldm *LayerDownloadManager) makeDownloadFunc(descriptor DownloadDescriptor, parentLayer layer.ChainID, parentDownload *downloadTransfer) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - d := &downloadTransfer{ - Transfer: NewTransfer(), - layerStore: ldm.layerStore, - } - - go func() { - defer func() { - close(progressChan) - }() - - progressOutput := progress.ChanOutput(progressChan) - - select { - case <-start: - default: - progress.Update(progressOutput, descriptor.ID(), "Waiting") - <-start - } - - if parentDownload != nil { - // Did the parent download already fail or get - // cancelled? - select { - case <-parentDownload.Done(): - _, err := parentDownload.result() - if err != nil { - d.err = err - return - } - default: - } - } - - var ( - downloadReader io.ReadCloser - size int64 - err error - retries int - ) - - defer descriptor.Close() - - for { - downloadReader, size, err = descriptor.Download(d.Transfer.Context(), progressOutput) - if err == nil { - break - } - - // If an error was returned because the context - // was cancelled, we shouldn't retry. - select { - case <-d.Transfer.Context().Done(): - d.err = err - return - default: - } - - retries++ - if _, isDNR := err.(DoNotRetry); isDNR || retries == maxDownloadAttempts { - logrus.Errorf("Download failed: %v", err) - d.err = err - return - } - - logrus.Errorf("Download failed, retrying: %v", err) - delay := retries * 5 - ticker := time.NewTicker(time.Second) - - selectLoop: - for { - progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) - select { - case <-ticker.C: - delay-- - if delay == 0 { - ticker.Stop() - break selectLoop - } - case <-d.Transfer.Context().Done(): - ticker.Stop() - d.err = errors.New("download cancelled during retry delay") - return - } - - } - } - - close(inactive) - - if parentDownload != nil { - select { - case <-d.Transfer.Context().Done(): - d.err = errors.New("layer registration cancelled") - downloadReader.Close() - return - case <-parentDownload.Done(): - } - - l, err := parentDownload.result() - if err != nil { - d.err = err - downloadReader.Close() - return - } - parentLayer = l.ChainID() - } - - reader := progress.NewProgressReader(ioutils.NewCancelReadCloser(d.Transfer.Context(), downloadReader), progressOutput, size, descriptor.ID(), "Extracting") - defer reader.Close() - - inflatedLayerData, err := archive.DecompressStream(reader) - if err != nil { - d.err = fmt.Errorf("could not get decompression stream: %v", err) - return - } - - var src distribution.Descriptor - if fs, ok := descriptor.(distribution.Describable); ok { - src = fs.Descriptor() - } - if ds, ok := d.layerStore.(layer.DescribableStore); ok { - d.layer, err = ds.RegisterWithDescriptor(inflatedLayerData, parentLayer, src) - } else { - d.layer, err = d.layerStore.Register(inflatedLayerData, parentLayer) - } - if err != nil { - select { - case <-d.Transfer.Context().Done(): - d.err = errors.New("layer registration cancelled") - default: - d.err = fmt.Errorf("failed to register layer: %v", err) - } - return - } - - progress.Update(progressOutput, descriptor.ID(), "Pull complete") - withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) - if hasRegistered { - withRegistered.Registered(d.layer.DiffID()) - } - - // Doesn't actually need to be its own goroutine, but - // done like this so we can defer close(c). - go func() { - <-d.Transfer.Released() - if d.layer != nil { - layer.ReleaseAndLog(d.layerStore, d.layer) - } - }() - }() - - return d - } -} - -// makeDownloadFuncFromDownload returns a function that performs the layer -// registration when the layer data is coming from an existing download. It -// waits for sourceDownload and parentDownload to complete, and then -// reregisters the data from sourceDownload's top layer on top of -// parentDownload. This function does not log progress output because it would -// interfere with the progress reporting for sourceDownload, which has the same -// Key. -func (ldm *LayerDownloadManager) makeDownloadFuncFromDownload(descriptor DownloadDescriptor, sourceDownload *downloadTransfer, parentDownload *downloadTransfer) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - d := &downloadTransfer{ - Transfer: NewTransfer(), - layerStore: ldm.layerStore, - } - - go func() { - defer func() { - close(progressChan) - }() - - <-start - - close(inactive) - - select { - case <-d.Transfer.Context().Done(): - d.err = errors.New("layer registration cancelled") - return - case <-parentDownload.Done(): - } - - l, err := parentDownload.result() - if err != nil { - d.err = err - return - } - parentLayer := l.ChainID() - - // sourceDownload should have already finished if - // parentDownload finished, but wait for it explicitly - // to be sure. - select { - case <-d.Transfer.Context().Done(): - d.err = errors.New("layer registration cancelled") - return - case <-sourceDownload.Done(): - } - - l, err = sourceDownload.result() - if err != nil { - d.err = err - return - } - - layerReader, err := l.TarStream() - if err != nil { - d.err = err - return - } - defer layerReader.Close() - - var src distribution.Descriptor - if fs, ok := l.(distribution.Describable); ok { - src = fs.Descriptor() - } - if ds, ok := d.layerStore.(layer.DescribableStore); ok { - d.layer, err = ds.RegisterWithDescriptor(layerReader, parentLayer, src) - } else { - d.layer, err = d.layerStore.Register(layerReader, parentLayer) - } - if err != nil { - d.err = fmt.Errorf("failed to register layer: %v", err) - return - } - - withRegistered, hasRegistered := descriptor.(DownloadDescriptorWithRegistered) - if hasRegistered { - withRegistered.Registered(d.layer.DiffID()) - } - - // Doesn't actually need to be its own goroutine, but - // done like this so we can defer close(c). - go func() { - <-d.Transfer.Released() - if d.layer != nil { - layer.ReleaseAndLog(d.layerStore, d.layer) - } - }() - }() - - return d - } -} diff --git a/distribution/xfer/download_test.go b/distribution/xfer/download_test.go deleted file mode 100644 index a1801eb07c..0000000000 --- a/distribution/xfer/download_test.go +++ /dev/null @@ -1,341 +0,0 @@ -package xfer - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "runtime" - "sync/atomic" - "testing" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/progress" - "golang.org/x/net/context" -) - -const maxDownloadConcurrency = 3 - -type mockLayer struct { - layerData bytes.Buffer - diffID layer.DiffID - chainID layer.ChainID - parent layer.Layer -} - -func (ml *mockLayer) TarStream() (io.ReadCloser, error) { - return ioutil.NopCloser(bytes.NewBuffer(ml.layerData.Bytes())), nil -} - -func (ml *mockLayer) ChainID() layer.ChainID { - return ml.chainID -} - -func (ml *mockLayer) DiffID() layer.DiffID { - return ml.diffID -} - -func (ml *mockLayer) Parent() layer.Layer { - return ml.parent -} - -func (ml *mockLayer) Size() (size int64, err error) { - return 0, nil -} - -func (ml *mockLayer) DiffSize() (size int64, err error) { - return 0, nil -} - -func (ml *mockLayer) Metadata() (map[string]string, error) { - return make(map[string]string), nil -} - -type mockLayerStore struct { - layers map[layer.ChainID]*mockLayer -} - -func createChainIDFromParent(parent layer.ChainID, dgsts ...layer.DiffID) layer.ChainID { - if len(dgsts) == 0 { - return parent - } - if parent == "" { - return createChainIDFromParent(layer.ChainID(dgsts[0]), dgsts[1:]...) - } - // H = "H(n-1) SHA256(n)" - dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) - return createChainIDFromParent(layer.ChainID(dgst), dgsts[1:]...) -} - -func (ls *mockLayerStore) Register(reader io.Reader, parentID layer.ChainID) (layer.Layer, error) { - return ls.RegisterWithDescriptor(reader, parentID, distribution.Descriptor{}) -} - -func (ls *mockLayerStore) RegisterWithDescriptor(reader io.Reader, parentID layer.ChainID, _ distribution.Descriptor) (layer.Layer, error) { - var ( - parent layer.Layer - err error - ) - - if parentID != "" { - parent, err = ls.Get(parentID) - if err != nil { - return nil, err - } - } - - l := &mockLayer{parent: parent} - _, err = l.layerData.ReadFrom(reader) - if err != nil { - return nil, err - } - l.diffID = layer.DiffID(digest.FromBytes(l.layerData.Bytes())) - l.chainID = createChainIDFromParent(parentID, l.diffID) - - ls.layers[l.chainID] = l - return l, nil -} - -func (ls *mockLayerStore) Get(chainID layer.ChainID) (layer.Layer, error) { - l, ok := ls.layers[chainID] - if !ok { - return nil, layer.ErrLayerDoesNotExist - } - return l, nil -} - -func (ls *mockLayerStore) Release(l layer.Layer) ([]layer.Metadata, error) { - return []layer.Metadata{}, nil -} -func (ls *mockLayerStore) CreateRWLayer(string, layer.ChainID, string, layer.MountInit, map[string]string) (layer.RWLayer, error) { - return nil, errors.New("not implemented") -} - -func (ls *mockLayerStore) GetRWLayer(string) (layer.RWLayer, error) { - return nil, errors.New("not implemented") -} - -func (ls *mockLayerStore) ReleaseRWLayer(layer.RWLayer) ([]layer.Metadata, error) { - return nil, errors.New("not implemented") -} -func (ls *mockLayerStore) GetMountID(string) (string, error) { - return "", errors.New("not implemented") -} - -func (ls *mockLayerStore) Cleanup() error { - return nil -} - -func (ls *mockLayerStore) DriverStatus() [][2]string { - return [][2]string{} -} - -func (ls *mockLayerStore) DriverName() string { - return "mock" -} - -type mockDownloadDescriptor struct { - currentDownloads *int32 - id string - diffID layer.DiffID - registeredDiffID layer.DiffID - expectedDiffID layer.DiffID - simulateRetries int -} - -// Key returns the key used to deduplicate downloads. -func (d *mockDownloadDescriptor) Key() string { - return d.id -} - -// ID returns the ID for display purposes. -func (d *mockDownloadDescriptor) ID() string { - return d.id -} - -// DiffID should return the DiffID for this layer, or an error -// if it is unknown (for example, if it has not been downloaded -// before). -func (d *mockDownloadDescriptor) DiffID() (layer.DiffID, error) { - if d.diffID != "" { - return d.diffID, nil - } - return "", errors.New("no diffID available") -} - -func (d *mockDownloadDescriptor) Registered(diffID layer.DiffID) { - d.registeredDiffID = diffID -} - -func (d *mockDownloadDescriptor) mockTarStream() io.ReadCloser { - // The mock implementation returns the ID repeated 5 times as a tar - // stream instead of actual tar data. The data is ignored except for - // computing IDs. - return ioutil.NopCloser(bytes.NewBuffer([]byte(d.id + d.id + d.id + d.id + d.id))) -} - -// Download is called to perform the download. -func (d *mockDownloadDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) { - if d.currentDownloads != nil { - defer atomic.AddInt32(d.currentDownloads, -1) - - if atomic.AddInt32(d.currentDownloads, 1) > maxDownloadConcurrency { - return nil, 0, errors.New("concurrency limit exceeded") - } - } - - // Sleep a bit to simulate a time-consuming download. - for i := int64(0); i <= 10; i++ { - select { - case <-ctx.Done(): - return nil, 0, ctx.Err() - case <-time.After(10 * time.Millisecond): - progressOutput.WriteProgress(progress.Progress{ID: d.ID(), Action: "Downloading", Current: i, Total: 10}) - } - } - - if d.simulateRetries != 0 { - d.simulateRetries-- - return nil, 0, errors.New("simulating retry") - } - - return d.mockTarStream(), 0, nil -} - -func (d *mockDownloadDescriptor) Close() { -} - -func downloadDescriptors(currentDownloads *int32) []DownloadDescriptor { - return []DownloadDescriptor{ - &mockDownloadDescriptor{ - currentDownloads: currentDownloads, - id: "id1", - expectedDiffID: layer.DiffID("sha256:68e2c75dc5c78ea9240689c60d7599766c213ae210434c53af18470ae8c53ec1"), - }, - &mockDownloadDescriptor{ - currentDownloads: currentDownloads, - id: "id2", - expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), - }, - &mockDownloadDescriptor{ - currentDownloads: currentDownloads, - id: "id3", - expectedDiffID: layer.DiffID("sha256:58745a8bbd669c25213e9de578c4da5c8ee1c836b3581432c2b50e38a6753300"), - }, - &mockDownloadDescriptor{ - currentDownloads: currentDownloads, - id: "id2", - expectedDiffID: layer.DiffID("sha256:64a636223116aa837973a5d9c2bdd17d9b204e4f95ac423e20e65dfbb3655473"), - }, - &mockDownloadDescriptor{ - currentDownloads: currentDownloads, - id: "id4", - expectedDiffID: layer.DiffID("sha256:0dfb5b9577716cc173e95af7c10289322c29a6453a1718addc00c0c5b1330936"), - simulateRetries: 1, - }, - &mockDownloadDescriptor{ - currentDownloads: currentDownloads, - id: "id5", - expectedDiffID: layer.DiffID("sha256:0a5f25fa1acbc647f6112a6276735d0fa01e4ee2aa7ec33015e337350e1ea23d"), - }, - } -} - -func TestSuccessfulDownload(t *testing.T) { - // TODO Windows: Fix this unit text - if runtime.GOOS == "windows" { - t.Skip("Needs fixing on Windows") - } - layerStore := &mockLayerStore{make(map[layer.ChainID]*mockLayer)} - ldm := NewLayerDownloadManager(layerStore, maxDownloadConcurrency) - - progressChan := make(chan progress.Progress) - progressDone := make(chan struct{}) - receivedProgress := make(map[string]progress.Progress) - - go func() { - for p := range progressChan { - receivedProgress[p.ID] = p - } - close(progressDone) - }() - - var currentDownloads int32 - descriptors := downloadDescriptors(¤tDownloads) - - firstDescriptor := descriptors[0].(*mockDownloadDescriptor) - - // Pre-register the first layer to simulate an already-existing layer - l, err := layerStore.Register(firstDescriptor.mockTarStream(), "") - if err != nil { - t.Fatal(err) - } - firstDescriptor.diffID = l.DiffID() - - rootFS, releaseFunc, err := ldm.Download(context.Background(), *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) - if err != nil { - t.Fatalf("download error: %v", err) - } - - releaseFunc() - - close(progressChan) - <-progressDone - - if len(rootFS.DiffIDs) != len(descriptors) { - t.Fatal("got wrong number of diffIDs in rootfs") - } - - for i, d := range descriptors { - descriptor := d.(*mockDownloadDescriptor) - - if descriptor.diffID != "" { - if receivedProgress[d.ID()].Action != "Already exists" { - t.Fatalf("did not get 'Already exists' message for %v", d.ID()) - } - } else if receivedProgress[d.ID()].Action != "Pull complete" { - t.Fatalf("did not get 'Pull complete' message for %v", d.ID()) - } - - if rootFS.DiffIDs[i] != descriptor.expectedDiffID { - t.Fatalf("rootFS item %d has the wrong diffID (expected: %v got: %v)", i, descriptor.expectedDiffID, rootFS.DiffIDs[i]) - } - - if descriptor.diffID == "" && descriptor.registeredDiffID != rootFS.DiffIDs[i] { - t.Fatal("diffID mismatch between rootFS and Registered callback") - } - } -} - -func TestCancelledDownload(t *testing.T) { - ldm := NewLayerDownloadManager(&mockLayerStore{make(map[layer.ChainID]*mockLayer)}, maxDownloadConcurrency) - - progressChan := make(chan progress.Progress) - progressDone := make(chan struct{}) - - go func() { - for range progressChan { - } - close(progressDone) - }() - - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - <-time.After(time.Millisecond) - cancel() - }() - - descriptors := downloadDescriptors(nil) - _, _, err := ldm.Download(ctx, *image.NewRootFS(), descriptors, progress.ChanOutput(progressChan)) - if err != context.Canceled { - t.Fatal("expected download to be cancelled") - } - - close(progressChan) - <-progressDone -} diff --git a/distribution/xfer/transfer.go b/distribution/xfer/transfer.go deleted file mode 100644 index 14f15660ac..0000000000 --- a/distribution/xfer/transfer.go +++ /dev/null @@ -1,401 +0,0 @@ -package xfer - -import ( - "runtime" - "sync" - - "github.com/docker/docker/pkg/progress" - "golang.org/x/net/context" -) - -// DoNotRetry is an error wrapper indicating that the error cannot be resolved -// with a retry. -type DoNotRetry struct { - Err error -} - -// Error returns the stringified representation of the encapsulated error. -func (e DoNotRetry) Error() string { - return e.Err.Error() -} - -// Watcher is returned by Watch and can be passed to Release to stop watching. -type Watcher struct { - // signalChan is used to signal to the watcher goroutine that - // new progress information is available, or that the transfer - // has finished. - signalChan chan struct{} - // releaseChan signals to the watcher goroutine that the watcher - // should be detached. - releaseChan chan struct{} - // running remains open as long as the watcher is watching the - // transfer. It gets closed if the transfer finishes or the - // watcher is detached. - running chan struct{} -} - -// Transfer represents an in-progress transfer. -type Transfer interface { - Watch(progressOutput progress.Output) *Watcher - Release(*Watcher) - Context() context.Context - Close() - Done() <-chan struct{} - Released() <-chan struct{} - Broadcast(masterProgressChan <-chan progress.Progress) -} - -type transfer struct { - mu sync.Mutex - - ctx context.Context - cancel context.CancelFunc - - // watchers keeps track of the goroutines monitoring progress output, - // indexed by the channels that release them. - watchers map[chan struct{}]*Watcher - - // lastProgress is the most recently received progress event. - lastProgress progress.Progress - // hasLastProgress is true when lastProgress has been set. - hasLastProgress bool - - // running remains open as long as the transfer is in progress. - running chan struct{} - // released stays open until all watchers release the transfer and - // the transfer is no longer tracked by the transfer manager. - released chan struct{} - - // broadcastDone is true if the master progress channel has closed. - broadcastDone bool - // closed is true if Close has been called - closed bool - // broadcastSyncChan allows watchers to "ping" the broadcasting - // goroutine to wait for it for deplete its input channel. This ensures - // a detaching watcher won't miss an event that was sent before it - // started detaching. - broadcastSyncChan chan struct{} -} - -// NewTransfer creates a new transfer. -func NewTransfer() Transfer { - t := &transfer{ - watchers: make(map[chan struct{}]*Watcher), - running: make(chan struct{}), - released: make(chan struct{}), - broadcastSyncChan: make(chan struct{}), - } - - // This uses context.Background instead of a caller-supplied context - // so that a transfer won't be cancelled automatically if the client - // which requested it is ^C'd (there could be other viewers). - t.ctx, t.cancel = context.WithCancel(context.Background()) - - return t -} - -// Broadcast copies the progress and error output to all viewers. -func (t *transfer) Broadcast(masterProgressChan <-chan progress.Progress) { - for { - var ( - p progress.Progress - ok bool - ) - select { - case p, ok = <-masterProgressChan: - default: - // We've depleted the channel, so now we can handle - // reads on broadcastSyncChan to let detaching watchers - // know we're caught up. - select { - case <-t.broadcastSyncChan: - continue - case p, ok = <-masterProgressChan: - } - } - - t.mu.Lock() - if ok { - t.lastProgress = p - t.hasLastProgress = true - for _, w := range t.watchers { - select { - case w.signalChan <- struct{}{}: - default: - } - } - } else { - t.broadcastDone = true - } - t.mu.Unlock() - if !ok { - close(t.running) - return - } - } -} - -// Watch adds a watcher to the transfer. The supplied channel gets progress -// updates and is closed when the transfer finishes. -func (t *transfer) Watch(progressOutput progress.Output) *Watcher { - t.mu.Lock() - defer t.mu.Unlock() - - w := &Watcher{ - releaseChan: make(chan struct{}), - signalChan: make(chan struct{}), - running: make(chan struct{}), - } - - t.watchers[w.releaseChan] = w - - if t.broadcastDone { - close(w.running) - return w - } - - go func() { - defer func() { - close(w.running) - }() - var ( - done bool - lastWritten progress.Progress - hasLastWritten bool - ) - for { - t.mu.Lock() - hasLastProgress := t.hasLastProgress - lastProgress := t.lastProgress - t.mu.Unlock() - - // Make sure we don't write the last progress item - // twice. - if hasLastProgress && (!done || !hasLastWritten || lastProgress != lastWritten) { - progressOutput.WriteProgress(lastProgress) - lastWritten = lastProgress - hasLastWritten = true - } - - if done { - return - } - - select { - case <-w.signalChan: - case <-w.releaseChan: - done = true - // Since the watcher is going to detach, make - // sure the broadcaster is caught up so we - // don't miss anything. - select { - case t.broadcastSyncChan <- struct{}{}: - case <-t.running: - } - case <-t.running: - done = true - } - } - }() - - return w -} - -// Release is the inverse of Watch; indicating that the watcher no longer wants -// to be notified about the progress of the transfer. All calls to Watch must -// be paired with later calls to Release so that the lifecycle of the transfer -// is properly managed. -func (t *transfer) Release(watcher *Watcher) { - t.mu.Lock() - delete(t.watchers, watcher.releaseChan) - - if len(t.watchers) == 0 { - if t.closed { - // released may have been closed already if all - // watchers were released, then another one was added - // while waiting for a previous watcher goroutine to - // finish. - select { - case <-t.released: - default: - close(t.released) - } - } else { - t.cancel() - } - } - t.mu.Unlock() - - close(watcher.releaseChan) - // Block until the watcher goroutine completes - <-watcher.running -} - -// Done returns a channel which is closed if the transfer completes or is -// cancelled. Note that having 0 watchers causes a transfer to be cancelled. -func (t *transfer) Done() <-chan struct{} { - // Note that this doesn't return t.ctx.Done() because that channel will - // be closed the moment Cancel is called, and we need to return a - // channel that blocks until a cancellation is actually acknowledged by - // the transfer function. - return t.running -} - -// Released returns a channel which is closed once all watchers release the -// transfer AND the transfer is no longer tracked by the transfer manager. -func (t *transfer) Released() <-chan struct{} { - return t.released -} - -// Context returns the context associated with the transfer. -func (t *transfer) Context() context.Context { - return t.ctx -} - -// Close is called by the transfer manager when the transfer is no longer -// being tracked. -func (t *transfer) Close() { - t.mu.Lock() - t.closed = true - if len(t.watchers) == 0 { - close(t.released) - } - t.mu.Unlock() -} - -// DoFunc is a function called by the transfer manager to actually perform -// a transfer. It should be non-blocking. It should wait until the start channel -// is closed before transferring any data. If the function closes inactive, that -// signals to the transfer manager that the job is no longer actively moving -// data - for example, it may be waiting for a dependent transfer to finish. -// This prevents it from taking up a slot. -type DoFunc func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer - -// TransferManager is used by LayerDownloadManager and LayerUploadManager to -// schedule and deduplicate transfers. It is up to the TransferManager -// implementation to make the scheduling and concurrency decisions. -type TransferManager interface { - // Transfer checks if a transfer with the given key is in progress. If - // so, it returns progress and error output from that transfer. - // Otherwise, it will call xferFunc to initiate the transfer. - Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) - // SetConcurrency set the concurrencyLimit so that it is adjustable daemon reload - SetConcurrency(concurrency int) -} - -type transferManager struct { - mu sync.Mutex - - concurrencyLimit int - activeTransfers int - transfers map[string]Transfer - waitingTransfers []chan struct{} -} - -// NewTransferManager returns a new TransferManager. -func NewTransferManager(concurrencyLimit int) TransferManager { - return &transferManager{ - concurrencyLimit: concurrencyLimit, - transfers: make(map[string]Transfer), - } -} - -// SetConcurrency set the concurrencyLimit -func (tm *transferManager) SetConcurrency(concurrency int) { - tm.mu.Lock() - tm.concurrencyLimit = concurrency - tm.mu.Unlock() -} - -// Transfer checks if a transfer matching the given key is in progress. If not, -// it starts one by calling xferFunc. The caller supplies a channel which -// receives progress output from the transfer. -func (tm *transferManager) Transfer(key string, xferFunc DoFunc, progressOutput progress.Output) (Transfer, *Watcher) { - tm.mu.Lock() - defer tm.mu.Unlock() - - for { - xfer, present := tm.transfers[key] - if !present { - break - } - // Transfer is already in progress. - watcher := xfer.Watch(progressOutput) - - select { - case <-xfer.Context().Done(): - // We don't want to watch a transfer that has been cancelled. - // Wait for it to be removed from the map and try again. - xfer.Release(watcher) - tm.mu.Unlock() - // The goroutine that removes this transfer from the - // map is also waiting for xfer.Done(), so yield to it. - // This could be avoided by adding a Closed method - // to Transfer to allow explicitly waiting for it to be - // removed the map, but forcing a scheduling round in - // this very rare case seems better than bloating the - // interface definition. - runtime.Gosched() - <-xfer.Done() - tm.mu.Lock() - default: - return xfer, watcher - } - } - - start := make(chan struct{}) - inactive := make(chan struct{}) - - if tm.concurrencyLimit == 0 || tm.activeTransfers < tm.concurrencyLimit { - close(start) - tm.activeTransfers++ - } else { - tm.waitingTransfers = append(tm.waitingTransfers, start) - } - - masterProgressChan := make(chan progress.Progress) - xfer := xferFunc(masterProgressChan, start, inactive) - watcher := xfer.Watch(progressOutput) - go xfer.Broadcast(masterProgressChan) - tm.transfers[key] = xfer - - // When the transfer is finished, remove from the map. - go func() { - for { - select { - case <-inactive: - tm.mu.Lock() - tm.inactivate(start) - tm.mu.Unlock() - inactive = nil - case <-xfer.Done(): - tm.mu.Lock() - if inactive != nil { - tm.inactivate(start) - } - delete(tm.transfers, key) - tm.mu.Unlock() - xfer.Close() - return - } - } - }() - - return xfer, watcher -} - -func (tm *transferManager) inactivate(start chan struct{}) { - // If the transfer was started, remove it from the activeTransfers - // count. - select { - case <-start: - // Start next transfer if any are waiting - if len(tm.waitingTransfers) != 0 { - close(tm.waitingTransfers[0]) - tm.waitingTransfers = tm.waitingTransfers[1:] - } else { - tm.activeTransfers-- - } - default: - } -} diff --git a/distribution/xfer/transfer_test.go b/distribution/xfer/transfer_test.go deleted file mode 100644 index 6c50ce3524..0000000000 --- a/distribution/xfer/transfer_test.go +++ /dev/null @@ -1,410 +0,0 @@ -package xfer - -import ( - "sync/atomic" - "testing" - "time" - - "github.com/docker/docker/pkg/progress" -) - -func TestTransfer(t *testing.T) { - makeXferFunc := func(id string) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - select { - case <-start: - default: - t.Fatalf("transfer function not started even though concurrency limit not reached") - } - - xfer := NewTransfer() - go func() { - for i := 0; i <= 10; i++ { - progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} - time.Sleep(10 * time.Millisecond) - } - close(progressChan) - }() - return xfer - } - } - - tm := NewTransferManager(5) - progressChan := make(chan progress.Progress) - progressDone := make(chan struct{}) - receivedProgress := make(map[string]int64) - - go func() { - for p := range progressChan { - val, present := receivedProgress[p.ID] - if present && p.Current <= val { - t.Fatalf("got unexpected progress value: %d (expected %d)", p.Current, val+1) - } - receivedProgress[p.ID] = p.Current - } - close(progressDone) - }() - - // Start a few transfers - ids := []string{"id1", "id2", "id3"} - xfers := make([]Transfer, len(ids)) - watchers := make([]*Watcher, len(ids)) - for i, id := range ids { - xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) - } - - for i, xfer := range xfers { - <-xfer.Done() - xfer.Release(watchers[i]) - } - close(progressChan) - <-progressDone - - for _, id := range ids { - if receivedProgress[id] != 10 { - t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) - } - } -} - -func TestConcurrencyLimit(t *testing.T) { - concurrencyLimit := 3 - var runningJobs int32 - - makeXferFunc := func(id string) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - xfer := NewTransfer() - go func() { - <-start - totalJobs := atomic.AddInt32(&runningJobs, 1) - if int(totalJobs) > concurrencyLimit { - t.Fatalf("too many jobs running") - } - for i := 0; i <= 10; i++ { - progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} - time.Sleep(10 * time.Millisecond) - } - atomic.AddInt32(&runningJobs, -1) - close(progressChan) - }() - return xfer - } - } - - tm := NewTransferManager(concurrencyLimit) - progressChan := make(chan progress.Progress) - progressDone := make(chan struct{}) - receivedProgress := make(map[string]int64) - - go func() { - for p := range progressChan { - receivedProgress[p.ID] = p.Current - } - close(progressDone) - }() - - // Start more transfers than the concurrency limit - ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} - xfers := make([]Transfer, len(ids)) - watchers := make([]*Watcher, len(ids)) - for i, id := range ids { - xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) - } - - for i, xfer := range xfers { - <-xfer.Done() - xfer.Release(watchers[i]) - } - close(progressChan) - <-progressDone - - for _, id := range ids { - if receivedProgress[id] != 10 { - t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) - } - } -} - -func TestInactiveJobs(t *testing.T) { - concurrencyLimit := 3 - var runningJobs int32 - testDone := make(chan struct{}) - - makeXferFunc := func(id string) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - xfer := NewTransfer() - go func() { - <-start - totalJobs := atomic.AddInt32(&runningJobs, 1) - if int(totalJobs) > concurrencyLimit { - t.Fatalf("too many jobs running") - } - for i := 0; i <= 10; i++ { - progressChan <- progress.Progress{ID: id, Action: "testing", Current: int64(i), Total: 10} - time.Sleep(10 * time.Millisecond) - } - atomic.AddInt32(&runningJobs, -1) - close(inactive) - <-testDone - close(progressChan) - }() - return xfer - } - } - - tm := NewTransferManager(concurrencyLimit) - progressChan := make(chan progress.Progress) - progressDone := make(chan struct{}) - receivedProgress := make(map[string]int64) - - go func() { - for p := range progressChan { - receivedProgress[p.ID] = p.Current - } - close(progressDone) - }() - - // Start more transfers than the concurrency limit - ids := []string{"id1", "id2", "id3", "id4", "id5", "id6", "id7", "id8"} - xfers := make([]Transfer, len(ids)) - watchers := make([]*Watcher, len(ids)) - for i, id := range ids { - xfers[i], watchers[i] = tm.Transfer(id, makeXferFunc(id), progress.ChanOutput(progressChan)) - } - - close(testDone) - for i, xfer := range xfers { - <-xfer.Done() - xfer.Release(watchers[i]) - } - close(progressChan) - <-progressDone - - for _, id := range ids { - if receivedProgress[id] != 10 { - t.Fatalf("final progress value %d instead of 10", receivedProgress[id]) - } - } -} - -func TestWatchRelease(t *testing.T) { - ready := make(chan struct{}) - - makeXferFunc := func(id string) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - xfer := NewTransfer() - go func() { - defer func() { - close(progressChan) - }() - <-ready - for i := int64(0); ; i++ { - select { - case <-time.After(10 * time.Millisecond): - case <-xfer.Context().Done(): - return - } - progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} - } - }() - return xfer - } - } - - tm := NewTransferManager(5) - - type watcherInfo struct { - watcher *Watcher - progressChan chan progress.Progress - progressDone chan struct{} - receivedFirstProgress chan struct{} - } - - progressConsumer := func(w watcherInfo) { - first := true - for range w.progressChan { - if first { - close(w.receivedFirstProgress) - } - first = false - } - close(w.progressDone) - } - - // Start a transfer - watchers := make([]watcherInfo, 5) - var xfer Transfer - watchers[0].progressChan = make(chan progress.Progress) - watchers[0].progressDone = make(chan struct{}) - watchers[0].receivedFirstProgress = make(chan struct{}) - xfer, watchers[0].watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(watchers[0].progressChan)) - go progressConsumer(watchers[0]) - - // Give it multiple watchers - for i := 1; i != len(watchers); i++ { - watchers[i].progressChan = make(chan progress.Progress) - watchers[i].progressDone = make(chan struct{}) - watchers[i].receivedFirstProgress = make(chan struct{}) - watchers[i].watcher = xfer.Watch(progress.ChanOutput(watchers[i].progressChan)) - go progressConsumer(watchers[i]) - } - - // Now that the watchers are set up, allow the transfer goroutine to - // proceed. - close(ready) - - // Confirm that each watcher gets progress output. - for _, w := range watchers { - <-w.receivedFirstProgress - } - - // Release one watcher every 5ms - for _, w := range watchers { - xfer.Release(w.watcher) - <-time.After(5 * time.Millisecond) - } - - // Now that all watchers have been released, Released() should - // return a closed channel. - <-xfer.Released() - - // Done() should return a closed channel because the xfer func returned - // due to cancellation. - <-xfer.Done() - - for _, w := range watchers { - close(w.progressChan) - <-w.progressDone - } -} - -func TestWatchFinishedTransfer(t *testing.T) { - makeXferFunc := func(id string) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - xfer := NewTransfer() - go func() { - // Finish immediately - close(progressChan) - }() - return xfer - } - } - - tm := NewTransferManager(5) - - // Start a transfer - watchers := make([]*Watcher, 3) - var xfer Transfer - xfer, watchers[0] = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(make(chan progress.Progress))) - - // Give it a watcher immediately - watchers[1] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) - - // Wait for the transfer to complete - <-xfer.Done() - - // Set up another watcher - watchers[2] = xfer.Watch(progress.ChanOutput(make(chan progress.Progress))) - - // Release the watchers - for _, w := range watchers { - xfer.Release(w) - } - - // Now that all watchers have been released, Released() should - // return a closed channel. - <-xfer.Released() -} - -func TestDuplicateTransfer(t *testing.T) { - ready := make(chan struct{}) - - var xferFuncCalls int32 - - makeXferFunc := func(id string) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - atomic.AddInt32(&xferFuncCalls, 1) - xfer := NewTransfer() - go func() { - defer func() { - close(progressChan) - }() - <-ready - for i := int64(0); ; i++ { - select { - case <-time.After(10 * time.Millisecond): - case <-xfer.Context().Done(): - return - } - progressChan <- progress.Progress{ID: id, Action: "testing", Current: i, Total: 10} - } - }() - return xfer - } - } - - tm := NewTransferManager(5) - - type transferInfo struct { - xfer Transfer - watcher *Watcher - progressChan chan progress.Progress - progressDone chan struct{} - receivedFirstProgress chan struct{} - } - - progressConsumer := func(t transferInfo) { - first := true - for range t.progressChan { - if first { - close(t.receivedFirstProgress) - } - first = false - } - close(t.progressDone) - } - - // Try to start multiple transfers with the same ID - transfers := make([]transferInfo, 5) - for i := range transfers { - t := &transfers[i] - t.progressChan = make(chan progress.Progress) - t.progressDone = make(chan struct{}) - t.receivedFirstProgress = make(chan struct{}) - t.xfer, t.watcher = tm.Transfer("id1", makeXferFunc("id1"), progress.ChanOutput(t.progressChan)) - go progressConsumer(*t) - } - - // Allow the transfer goroutine to proceed. - close(ready) - - // Confirm that each watcher gets progress output. - for _, t := range transfers { - <-t.receivedFirstProgress - } - - // Confirm that the transfer function was called exactly once. - if xferFuncCalls != 1 { - t.Fatal("transfer function wasn't called exactly once") - } - - // Release one watcher every 5ms - for _, t := range transfers { - t.xfer.Release(t.watcher) - <-time.After(5 * time.Millisecond) - } - - for _, t := range transfers { - // Now that all watchers have been released, Released() should - // return a closed channel. - <-t.xfer.Released() - // Done() should return a closed channel because the xfer func returned - // due to cancellation. - <-t.xfer.Done() - } - - for _, t := range transfers { - close(t.progressChan) - <-t.progressDone - } -} diff --git a/distribution/xfer/upload.go b/distribution/xfer/upload.go deleted file mode 100644 index ad3398369c..0000000000 --- a/distribution/xfer/upload.go +++ /dev/null @@ -1,168 +0,0 @@ -package xfer - -import ( - "errors" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/progress" - "golang.org/x/net/context" -) - -const maxUploadAttempts = 5 - -// LayerUploadManager provides task management and progress reporting for -// uploads. -type LayerUploadManager struct { - tm TransferManager -} - -// SetConcurrency set the max concurrent uploads for each push -func (lum *LayerUploadManager) SetConcurrency(concurrency int) { - lum.tm.SetConcurrency(concurrency) -} - -// NewLayerUploadManager returns a new LayerUploadManager. -func NewLayerUploadManager(concurrencyLimit int) *LayerUploadManager { - return &LayerUploadManager{ - tm: NewTransferManager(concurrencyLimit), - } -} - -type uploadTransfer struct { - Transfer - - remoteDescriptor distribution.Descriptor - err error -} - -// An UploadDescriptor references a layer that may need to be uploaded. -type UploadDescriptor interface { - // Key returns the key used to deduplicate uploads. - Key() string - // ID returns the ID for display purposes. - ID() string - // DiffID should return the DiffID for this layer. - DiffID() layer.DiffID - // Upload is called to perform the Upload. - Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) - // SetRemoteDescriptor provides the distribution.Descriptor that was - // returned by Upload. This descriptor is not to be confused with - // the UploadDescriptor interface, which is used for internally - // identifying layers that are being uploaded. - SetRemoteDescriptor(descriptor distribution.Descriptor) -} - -// Upload is a blocking function which ensures the listed layers are present on -// the remote registry. It uses the string returned by the Key method to -// deduplicate uploads. -func (lum *LayerUploadManager) Upload(ctx context.Context, layers []UploadDescriptor, progressOutput progress.Output) error { - var ( - uploads []*uploadTransfer - dedupDescriptors = make(map[string]*uploadTransfer) - ) - - for _, descriptor := range layers { - progress.Update(progressOutput, descriptor.ID(), "Preparing") - - key := descriptor.Key() - if _, present := dedupDescriptors[key]; present { - continue - } - - xferFunc := lum.makeUploadFunc(descriptor) - upload, watcher := lum.tm.Transfer(descriptor.Key(), xferFunc, progressOutput) - defer upload.Release(watcher) - uploads = append(uploads, upload.(*uploadTransfer)) - dedupDescriptors[key] = upload.(*uploadTransfer) - } - - for _, upload := range uploads { - select { - case <-ctx.Done(): - return ctx.Err() - case <-upload.Transfer.Done(): - if upload.err != nil { - return upload.err - } - } - } - for _, l := range layers { - l.SetRemoteDescriptor(dedupDescriptors[l.Key()].remoteDescriptor) - } - - return nil -} - -func (lum *LayerUploadManager) makeUploadFunc(descriptor UploadDescriptor) DoFunc { - return func(progressChan chan<- progress.Progress, start <-chan struct{}, inactive chan<- struct{}) Transfer { - u := &uploadTransfer{ - Transfer: NewTransfer(), - } - - go func() { - defer func() { - close(progressChan) - }() - - progressOutput := progress.ChanOutput(progressChan) - - select { - case <-start: - default: - progress.Update(progressOutput, descriptor.ID(), "Waiting") - <-start - } - - retries := 0 - for { - remoteDescriptor, err := descriptor.Upload(u.Transfer.Context(), progressOutput) - if err == nil { - u.remoteDescriptor = remoteDescriptor - break - } - - // If an error was returned because the context - // was cancelled, we shouldn't retry. - select { - case <-u.Transfer.Context().Done(): - u.err = err - return - default: - } - - retries++ - if _, isDNR := err.(DoNotRetry); isDNR || retries == maxUploadAttempts { - logrus.Errorf("Upload failed: %v", err) - u.err = err - return - } - - logrus.Errorf("Upload failed, retrying: %v", err) - delay := retries * 5 - ticker := time.NewTicker(time.Second) - - selectLoop: - for { - progress.Updatef(progressOutput, descriptor.ID(), "Retrying in %d second%s", delay, (map[bool]string{true: "s"})[delay != 1]) - select { - case <-ticker.C: - delay-- - if delay == 0 { - ticker.Stop() - break selectLoop - } - case <-u.Transfer.Context().Done(): - ticker.Stop() - u.err = errors.New("upload cancelled during retry delay") - return - } - } - } - }() - - return u - } -} diff --git a/distribution/xfer/upload_test.go b/distribution/xfer/upload_test.go deleted file mode 100644 index 16bd187336..0000000000 --- a/distribution/xfer/upload_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package xfer - -import ( - "errors" - "sync/atomic" - "testing" - "time" - - "github.com/docker/distribution" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/progress" - "golang.org/x/net/context" -) - -const maxUploadConcurrency = 3 - -type mockUploadDescriptor struct { - currentUploads *int32 - diffID layer.DiffID - simulateRetries int -} - -// Key returns the key used to deduplicate downloads. -func (u *mockUploadDescriptor) Key() string { - return u.diffID.String() -} - -// ID returns the ID for display purposes. -func (u *mockUploadDescriptor) ID() string { - return u.diffID.String() -} - -// DiffID should return the DiffID for this layer. -func (u *mockUploadDescriptor) DiffID() layer.DiffID { - return u.diffID -} - -// SetRemoteDescriptor is not used in the mock. -func (u *mockUploadDescriptor) SetRemoteDescriptor(remoteDescriptor distribution.Descriptor) { -} - -// Upload is called to perform the upload. -func (u *mockUploadDescriptor) Upload(ctx context.Context, progressOutput progress.Output) (distribution.Descriptor, error) { - if u.currentUploads != nil { - defer atomic.AddInt32(u.currentUploads, -1) - - if atomic.AddInt32(u.currentUploads, 1) > maxUploadConcurrency { - return distribution.Descriptor{}, errors.New("concurrency limit exceeded") - } - } - - // Sleep a bit to simulate a time-consuming upload. - for i := int64(0); i <= 10; i++ { - select { - case <-ctx.Done(): - return distribution.Descriptor{}, ctx.Err() - case <-time.After(10 * time.Millisecond): - progressOutput.WriteProgress(progress.Progress{ID: u.ID(), Current: i, Total: 10}) - } - } - - if u.simulateRetries != 0 { - u.simulateRetries-- - return distribution.Descriptor{}, errors.New("simulating retry") - } - - return distribution.Descriptor{}, nil -} - -func uploadDescriptors(currentUploads *int32) []UploadDescriptor { - return []UploadDescriptor{ - &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, - &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:1515325234325236634634608943609283523908626098235490238423902343"), 0}, - &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:6929356290463485374960346430698374523437683470934634534953453453"), 0}, - &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:cbbf2f9a99b47fc460d422812b6a5adff7dfee951d8fa2e4a98caa0382cfbdbf"), 0}, - &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:8159352387436803946235346346368745389534789534897538734598734987"), 1}, - &mockUploadDescriptor{currentUploads, layer.DiffID("sha256:4637863963478346897346987346987346789346789364879364897364987346"), 0}, - } -} - -func TestSuccessfulUpload(t *testing.T) { - lum := NewLayerUploadManager(maxUploadConcurrency) - - progressChan := make(chan progress.Progress) - progressDone := make(chan struct{}) - receivedProgress := make(map[string]int64) - - go func() { - for p := range progressChan { - receivedProgress[p.ID] = p.Current - } - close(progressDone) - }() - - var currentUploads int32 - descriptors := uploadDescriptors(¤tUploads) - - err := lum.Upload(context.Background(), descriptors, progress.ChanOutput(progressChan)) - if err != nil { - t.Fatalf("upload error: %v", err) - } - - close(progressChan) - <-progressDone -} - -func TestCancelledUpload(t *testing.T) { - lum := NewLayerUploadManager(maxUploadConcurrency) - - progressChan := make(chan progress.Progress) - progressDone := make(chan struct{}) - - go func() { - for range progressChan { - } - close(progressDone) - }() - - ctx, cancel := context.WithCancel(context.Background()) - - go func() { - <-time.After(time.Millisecond) - cancel() - }() - - descriptors := uploadDescriptors(nil) - err := lum.Upload(ctx, descriptors, progress.ChanOutput(progressChan)) - if err != context.Canceled { - t.Fatal("expected upload to be cancelled") - } - - close(progressChan) - <-progressDone -} diff --git a/dockerversion/useragent.go b/dockerversion/useragent.go deleted file mode 100644 index d2a891c4d6..0000000000 --- a/dockerversion/useragent.go +++ /dev/null @@ -1,74 +0,0 @@ -package dockerversion - -import ( - "fmt" - "runtime" - - "github.com/docker/docker/api/server/httputils" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/docker/docker/pkg/useragent" - "golang.org/x/net/context" -) - -// DockerUserAgent is the User-Agent the Docker client uses to identify itself. -// In accordance with RFC 7231 (5.5.3) is of the form: -// [docker client's UA] UpstreamClient([upstream client's UA]) -func DockerUserAgent(ctx context.Context) string { - httpVersion := make([]useragent.VersionInfo, 0, 6) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "docker", Version: Version}) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "go", Version: runtime.Version()}) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "git-commit", Version: GitCommit}) - if kernelVersion, err := kernel.GetKernelVersion(); err == nil { - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "kernel", Version: kernelVersion.String()}) - } - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "os", Version: runtime.GOOS}) - httpVersion = append(httpVersion, useragent.VersionInfo{Name: "arch", Version: runtime.GOARCH}) - - dockerUA := useragent.AppendVersions("", httpVersion...) - upstreamUA := getUserAgentFromContext(ctx) - if len(upstreamUA) > 0 { - ret := insertUpstreamUserAgent(upstreamUA, dockerUA) - return ret - } - return dockerUA -} - -// getUserAgentFromContext returns the previously saved user-agent context stored in ctx, if one exists -func getUserAgentFromContext(ctx context.Context) string { - var upstreamUA string - if ctx != nil { - var ki interface{} = ctx.Value(httputils.UAStringKey) - if ki != nil { - upstreamUA = ctx.Value(httputils.UAStringKey).(string) - } - } - return upstreamUA -} - -// escapeStr returns s with every rune in charsToEscape escaped by a backslash -func escapeStr(s string, charsToEscape string) string { - var ret string - for _, currRune := range s { - appended := false - for _, escapeableRune := range charsToEscape { - if currRune == escapeableRune { - ret += `\` + string(currRune) - appended = true - break - } - } - if !appended { - ret += string(currRune) - } - } - return ret -} - -// insertUpstreamUserAgent adds the upstream client useragent to create a user-agent -// string of the form: -// $dockerUA UpstreamClient($upstreamUA) -func insertUpstreamUserAgent(upstreamUA string, dockerUA string) string { - charsToEscape := `();\` - upstreamUAEscaped := escapeStr(upstreamUA, charsToEscape) - return fmt.Sprintf("%s UpstreamClient(%s)", dockerUA, upstreamUAEscaped) -} diff --git a/dockerversion/version_lib.go b/dockerversion/version_lib.go deleted file mode 100644 index 6644bce200..0000000000 --- a/dockerversion/version_lib.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !autogen - -// Package dockerversion is auto-generated at build-time -package dockerversion - -// Default build-time variable for library-import. -// This file is overridden on build with build-time informations. -const ( - GitCommit string = "library-import" - Version string = "library-import" - BuildTime string = "library-import" - IAmStatic string = "library-import" -) diff --git a/docs/.gitignore b/engine/.gitignore similarity index 100% rename from docs/.gitignore rename to engine/.gitignore diff --git a/docs/Dockerfile b/engine/Dockerfile similarity index 100% rename from docs/Dockerfile rename to engine/Dockerfile diff --git a/docs/Makefile b/engine/Makefile similarity index 100% rename from docs/Makefile rename to engine/Makefile diff --git a/docs/README.md b/engine/README.md similarity index 100% rename from docs/README.md rename to engine/README.md diff --git a/docs/admin/ambassador_pattern_linking.md b/engine/admin/ambassador_pattern_linking.md similarity index 100% rename from docs/admin/ambassador_pattern_linking.md rename to engine/admin/ambassador_pattern_linking.md diff --git a/docs/admin/b2d_volume_images/add_cd.png b/engine/admin/b2d_volume_images/add_cd.png similarity index 100% rename from docs/admin/b2d_volume_images/add_cd.png rename to engine/admin/b2d_volume_images/add_cd.png diff --git a/docs/admin/b2d_volume_images/add_new_controller.png b/engine/admin/b2d_volume_images/add_new_controller.png similarity index 100% rename from docs/admin/b2d_volume_images/add_new_controller.png rename to engine/admin/b2d_volume_images/add_new_controller.png diff --git a/docs/admin/b2d_volume_images/add_volume.png b/engine/admin/b2d_volume_images/add_volume.png similarity index 100% rename from docs/admin/b2d_volume_images/add_volume.png rename to engine/admin/b2d_volume_images/add_volume.png diff --git a/docs/admin/b2d_volume_images/boot_order.png b/engine/admin/b2d_volume_images/boot_order.png similarity index 100% rename from docs/admin/b2d_volume_images/boot_order.png rename to engine/admin/b2d_volume_images/boot_order.png diff --git a/docs/admin/b2d_volume_images/gparted.png b/engine/admin/b2d_volume_images/gparted.png similarity index 100% rename from docs/admin/b2d_volume_images/gparted.png rename to engine/admin/b2d_volume_images/gparted.png diff --git a/docs/admin/b2d_volume_images/gparted2.png b/engine/admin/b2d_volume_images/gparted2.png similarity index 100% rename from docs/admin/b2d_volume_images/gparted2.png rename to engine/admin/b2d_volume_images/gparted2.png diff --git a/docs/admin/b2d_volume_images/verify.png b/engine/admin/b2d_volume_images/verify.png similarity index 100% rename from docs/admin/b2d_volume_images/verify.png rename to engine/admin/b2d_volume_images/verify.png diff --git a/docs/admin/b2d_volume_resize.md b/engine/admin/b2d_volume_resize.md similarity index 100% rename from docs/admin/b2d_volume_resize.md rename to engine/admin/b2d_volume_resize.md diff --git a/docs/admin/chef.md b/engine/admin/chef.md similarity index 100% rename from docs/admin/chef.md rename to engine/admin/chef.md diff --git a/docs/admin/dsc.md b/engine/admin/dsc.md similarity index 100% rename from docs/admin/dsc.md rename to engine/admin/dsc.md diff --git a/docs/admin/formatting.md b/engine/admin/formatting.md similarity index 100% rename from docs/admin/formatting.md rename to engine/admin/formatting.md diff --git a/docs/admin/host_integration.md b/engine/admin/host_integration.md similarity index 100% rename from docs/admin/host_integration.md rename to engine/admin/host_integration.md diff --git a/docs/admin/index.md b/engine/admin/index.md similarity index 100% rename from docs/admin/index.md rename to engine/admin/index.md diff --git a/docs/admin/live-restore.md b/engine/admin/live-restore.md similarity index 100% rename from docs/admin/live-restore.md rename to engine/admin/live-restore.md diff --git a/docs/admin/logging/awslogs.md b/engine/admin/logging/awslogs.md similarity index 100% rename from docs/admin/logging/awslogs.md rename to engine/admin/logging/awslogs.md diff --git a/docs/admin/logging/etwlogs.md b/engine/admin/logging/etwlogs.md similarity index 100% rename from docs/admin/logging/etwlogs.md rename to engine/admin/logging/etwlogs.md diff --git a/docs/admin/logging/fluentd.md b/engine/admin/logging/fluentd.md similarity index 100% rename from docs/admin/logging/fluentd.md rename to engine/admin/logging/fluentd.md diff --git a/docs/admin/logging/gcplogs.md b/engine/admin/logging/gcplogs.md similarity index 100% rename from docs/admin/logging/gcplogs.md rename to engine/admin/logging/gcplogs.md diff --git a/docs/admin/logging/index.md b/engine/admin/logging/index.md similarity index 100% rename from docs/admin/logging/index.md rename to engine/admin/logging/index.md diff --git a/docs/admin/logging/journald.md b/engine/admin/logging/journald.md similarity index 100% rename from docs/admin/logging/journald.md rename to engine/admin/logging/journald.md diff --git a/docs/admin/logging/log_tags.md b/engine/admin/logging/log_tags.md similarity index 100% rename from docs/admin/logging/log_tags.md rename to engine/admin/logging/log_tags.md diff --git a/docs/admin/logging/overview.md b/engine/admin/logging/overview.md similarity index 100% rename from docs/admin/logging/overview.md rename to engine/admin/logging/overview.md diff --git a/docs/admin/logging/splunk.md b/engine/admin/logging/splunk.md similarity index 100% rename from docs/admin/logging/splunk.md rename to engine/admin/logging/splunk.md diff --git a/docs/admin/menu.md b/engine/admin/menu.md similarity index 100% rename from docs/admin/menu.md rename to engine/admin/menu.md diff --git a/docs/admin/puppet.md b/engine/admin/puppet.md similarity index 100% rename from docs/admin/puppet.md rename to engine/admin/puppet.md diff --git a/docs/admin/registry_mirror.md b/engine/admin/registry_mirror.md similarity index 100% rename from docs/admin/registry_mirror.md rename to engine/admin/registry_mirror.md diff --git a/docs/admin/runmetrics.md b/engine/admin/runmetrics.md similarity index 100% rename from docs/admin/runmetrics.md rename to engine/admin/runmetrics.md diff --git a/docs/admin/systemd.md b/engine/admin/systemd.md similarity index 100% rename from docs/admin/systemd.md rename to engine/admin/systemd.md diff --git a/docs/admin/using_supervisord.md b/engine/admin/using_supervisord.md similarity index 100% rename from docs/admin/using_supervisord.md rename to engine/admin/using_supervisord.md diff --git a/docs/article-img/architecture.svg b/engine/article-img/architecture.svg similarity index 100% rename from docs/article-img/architecture.svg rename to engine/article-img/architecture.svg diff --git a/docs/article-img/engine-components-flow.png b/engine/article-img/engine-components-flow.png similarity index 100% rename from docs/article-img/engine-components-flow.png rename to engine/article-img/engine-components-flow.png diff --git a/docs/breaking_changes.md b/engine/breaking_changes.md similarity index 100% rename from docs/breaking_changes.md rename to engine/breaking_changes.md diff --git a/docs/deprecated.md b/engine/deprecated.md similarity index 100% rename from docs/deprecated.md rename to engine/deprecated.md diff --git a/docs/examples/apt-cacher-ng.Dockerfile b/engine/examples/apt-cacher-ng.Dockerfile similarity index 100% rename from docs/examples/apt-cacher-ng.Dockerfile rename to engine/examples/apt-cacher-ng.Dockerfile diff --git a/docs/examples/apt-cacher-ng.md b/engine/examples/apt-cacher-ng.md similarity index 100% rename from docs/examples/apt-cacher-ng.md rename to engine/examples/apt-cacher-ng.md diff --git a/docs/examples/couchbase.md b/engine/examples/couchbase.md similarity index 100% rename from docs/examples/couchbase.md rename to engine/examples/couchbase.md diff --git a/docs/examples/couchbase/web-console.png b/engine/examples/couchbase/web-console.png similarity index 100% rename from docs/examples/couchbase/web-console.png rename to engine/examples/couchbase/web-console.png diff --git a/docs/examples/couchdb_data_volumes.md b/engine/examples/couchdb_data_volumes.md similarity index 100% rename from docs/examples/couchdb_data_volumes.md rename to engine/examples/couchdb_data_volumes.md diff --git a/docs/examples/index.md b/engine/examples/index.md similarity index 100% rename from docs/examples/index.md rename to engine/examples/index.md diff --git a/docs/examples/mongodb.md b/engine/examples/mongodb.md similarity index 100% rename from docs/examples/mongodb.md rename to engine/examples/mongodb.md diff --git a/docs/examples/mongodb/Dockerfile b/engine/examples/mongodb/Dockerfile similarity index 100% rename from docs/examples/mongodb/Dockerfile rename to engine/examples/mongodb/Dockerfile diff --git a/docs/examples/postgresql_service.Dockerfile b/engine/examples/postgresql_service.Dockerfile similarity index 100% rename from docs/examples/postgresql_service.Dockerfile rename to engine/examples/postgresql_service.Dockerfile diff --git a/docs/examples/postgresql_service.md b/engine/examples/postgresql_service.md similarity index 100% rename from docs/examples/postgresql_service.md rename to engine/examples/postgresql_service.md diff --git a/docs/examples/running_redis_service.md b/engine/examples/running_redis_service.md similarity index 100% rename from docs/examples/running_redis_service.md rename to engine/examples/running_redis_service.md diff --git a/docs/examples/running_riak_service.Dockerfile b/engine/examples/running_riak_service.Dockerfile similarity index 100% rename from docs/examples/running_riak_service.Dockerfile rename to engine/examples/running_riak_service.Dockerfile diff --git a/docs/examples/running_riak_service.md b/engine/examples/running_riak_service.md similarity index 100% rename from docs/examples/running_riak_service.md rename to engine/examples/running_riak_service.md diff --git a/docs/examples/running_ssh_service.Dockerfile b/engine/examples/running_ssh_service.Dockerfile similarity index 100% rename from docs/examples/running_ssh_service.Dockerfile rename to engine/examples/running_ssh_service.Dockerfile diff --git a/docs/examples/running_ssh_service.md b/engine/examples/running_ssh_service.md similarity index 100% rename from docs/examples/running_ssh_service.md rename to engine/examples/running_ssh_service.md diff --git a/docs/examples/supervisord.conf b/engine/examples/supervisord.conf similarity index 100% rename from docs/examples/supervisord.conf rename to engine/examples/supervisord.conf diff --git a/docs/extend/images/authz_additional_info.png b/engine/extend/images/authz_additional_info.png similarity index 100% rename from docs/extend/images/authz_additional_info.png rename to engine/extend/images/authz_additional_info.png diff --git a/docs/extend/images/authz_allow.png b/engine/extend/images/authz_allow.png similarity index 100% rename from docs/extend/images/authz_allow.png rename to engine/extend/images/authz_allow.png diff --git a/docs/extend/images/authz_chunked.png b/engine/extend/images/authz_chunked.png similarity index 100% rename from docs/extend/images/authz_chunked.png rename to engine/extend/images/authz_chunked.png diff --git a/docs/extend/images/authz_connection_hijack.png b/engine/extend/images/authz_connection_hijack.png similarity index 100% rename from docs/extend/images/authz_connection_hijack.png rename to engine/extend/images/authz_connection_hijack.png diff --git a/docs/extend/images/authz_deny.png b/engine/extend/images/authz_deny.png similarity index 100% rename from docs/extend/images/authz_deny.png rename to engine/extend/images/authz_deny.png diff --git a/docs/extend/index.md b/engine/extend/index.md similarity index 100% rename from docs/extend/index.md rename to engine/extend/index.md diff --git a/docs/extend/legacy_plugins.md b/engine/extend/legacy_plugins.md similarity index 100% rename from docs/extend/legacy_plugins.md rename to engine/extend/legacy_plugins.md diff --git a/docs/extend/manifest.md b/engine/extend/manifest.md similarity index 100% rename from docs/extend/manifest.md rename to engine/extend/manifest.md diff --git a/docs/extend/menu.md b/engine/extend/menu.md similarity index 100% rename from docs/extend/menu.md rename to engine/extend/menu.md diff --git a/docs/extend/plugin_api.md b/engine/extend/plugin_api.md similarity index 100% rename from docs/extend/plugin_api.md rename to engine/extend/plugin_api.md diff --git a/docs/extend/plugins_authorization.md b/engine/extend/plugins_authorization.md similarity index 100% rename from docs/extend/plugins_authorization.md rename to engine/extend/plugins_authorization.md diff --git a/docs/extend/plugins_network.md b/engine/extend/plugins_network.md similarity index 100% rename from docs/extend/plugins_network.md rename to engine/extend/plugins_network.md diff --git a/docs/extend/plugins_volume.md b/engine/extend/plugins_volume.md similarity index 100% rename from docs/extend/plugins_volume.md rename to engine/extend/plugins_volume.md diff --git a/docs/faq.md b/engine/faq.md similarity index 100% rename from docs/faq.md rename to engine/faq.md diff --git a/docs/getstarted/index.md b/engine/getstarted/index.md similarity index 100% rename from docs/getstarted/index.md rename to engine/getstarted/index.md diff --git a/docs/getstarted/last_page.md b/engine/getstarted/last_page.md similarity index 100% rename from docs/getstarted/last_page.md rename to engine/getstarted/last_page.md diff --git a/docs/getstarted/linux_install_help.md b/engine/getstarted/linux_install_help.md similarity index 100% rename from docs/getstarted/linux_install_help.md rename to engine/getstarted/linux_install_help.md diff --git a/docs/getstarted/menu.md b/engine/getstarted/menu.md similarity index 100% rename from docs/getstarted/menu.md rename to engine/getstarted/menu.md diff --git a/docs/getstarted/step_five.md b/engine/getstarted/step_five.md similarity index 100% rename from docs/getstarted/step_five.md rename to engine/getstarted/step_five.md diff --git a/docs/getstarted/step_four.md b/engine/getstarted/step_four.md similarity index 100% rename from docs/getstarted/step_four.md rename to engine/getstarted/step_four.md diff --git a/docs/getstarted/step_one.md b/engine/getstarted/step_one.md similarity index 100% rename from docs/getstarted/step_one.md rename to engine/getstarted/step_one.md diff --git a/docs/getstarted/step_six.md b/engine/getstarted/step_six.md similarity index 100% rename from docs/getstarted/step_six.md rename to engine/getstarted/step_six.md diff --git a/docs/getstarted/step_three.md b/engine/getstarted/step_three.md similarity index 100% rename from docs/getstarted/step_three.md rename to engine/getstarted/step_three.md diff --git a/docs/getstarted/step_two.md b/engine/getstarted/step_two.md similarity index 100% rename from docs/getstarted/step_two.md rename to engine/getstarted/step_two.md diff --git a/docs/getstarted/tutimg/add_repository.png b/engine/getstarted/tutimg/add_repository.png similarity index 100% rename from docs/getstarted/tutimg/add_repository.png rename to engine/getstarted/tutimg/add_repository.png diff --git a/docs/getstarted/tutimg/browse_and_search.png b/engine/getstarted/tutimg/browse_and_search.png similarity index 100% rename from docs/getstarted/tutimg/browse_and_search.png rename to engine/getstarted/tutimg/browse_and_search.png diff --git a/docs/getstarted/tutimg/container_explainer.png b/engine/getstarted/tutimg/container_explainer.png similarity index 100% rename from docs/getstarted/tutimg/container_explainer.png rename to engine/getstarted/tutimg/container_explainer.png diff --git a/docs/getstarted/tutimg/hub_signup.png b/engine/getstarted/tutimg/hub_signup.png similarity index 100% rename from docs/getstarted/tutimg/hub_signup.png rename to engine/getstarted/tutimg/hub_signup.png diff --git a/docs/getstarted/tutimg/image_found.png b/engine/getstarted/tutimg/image_found.png similarity index 100% rename from docs/getstarted/tutimg/image_found.png rename to engine/getstarted/tutimg/image_found.png diff --git a/docs/getstarted/tutimg/line_one.png b/engine/getstarted/tutimg/line_one.png similarity index 100% rename from docs/getstarted/tutimg/line_one.png rename to engine/getstarted/tutimg/line_one.png diff --git a/docs/getstarted/tutimg/new_image.png b/engine/getstarted/tutimg/new_image.png similarity index 100% rename from docs/getstarted/tutimg/new_image.png rename to engine/getstarted/tutimg/new_image.png diff --git a/docs/getstarted/tutimg/tagger.png b/engine/getstarted/tutimg/tagger.png similarity index 100% rename from docs/getstarted/tutimg/tagger.png rename to engine/getstarted/tutimg/tagger.png diff --git a/docs/getstarted/tutimg/whale_repo.png b/engine/getstarted/tutimg/whale_repo.png similarity index 100% rename from docs/getstarted/tutimg/whale_repo.png rename to engine/getstarted/tutimg/whale_repo.png diff --git a/docs/index.md b/engine/index.md similarity index 100% rename from docs/index.md rename to engine/index.md diff --git a/docs/installation/binaries.md b/engine/installation/binaries.md similarity index 100% rename from docs/installation/binaries.md rename to engine/installation/binaries.md diff --git a/docs/installation/cloud/cloud-ex-aws.md b/engine/installation/cloud/cloud-ex-aws.md similarity index 100% rename from docs/installation/cloud/cloud-ex-aws.md rename to engine/installation/cloud/cloud-ex-aws.md diff --git a/docs/installation/cloud/cloud-ex-machine-ocean.md b/engine/installation/cloud/cloud-ex-machine-ocean.md similarity index 100% rename from docs/installation/cloud/cloud-ex-machine-ocean.md rename to engine/installation/cloud/cloud-ex-machine-ocean.md diff --git a/docs/installation/cloud/index.md b/engine/installation/cloud/index.md similarity index 100% rename from docs/installation/cloud/index.md rename to engine/installation/cloud/index.md diff --git a/docs/installation/cloud/overview.md b/engine/installation/cloud/overview.md similarity index 100% rename from docs/installation/cloud/overview.md rename to engine/installation/cloud/overview.md diff --git a/docs/installation/images/bad_host.png b/engine/installation/images/bad_host.png similarity index 100% rename from docs/installation/images/bad_host.png rename to engine/installation/images/bad_host.png diff --git a/docs/installation/images/cool_view.png b/engine/installation/images/cool_view.png similarity index 100% rename from docs/installation/images/cool_view.png rename to engine/installation/images/cool_view.png diff --git a/docs/installation/images/ec2-ubuntu.png b/engine/installation/images/ec2-ubuntu.png similarity index 100% rename from docs/installation/images/ec2-ubuntu.png rename to engine/installation/images/ec2-ubuntu.png diff --git a/docs/installation/images/ec2_instance_details.png b/engine/installation/images/ec2_instance_details.png similarity index 100% rename from docs/installation/images/ec2_instance_details.png rename to engine/installation/images/ec2_instance_details.png diff --git a/docs/installation/images/ec2_instance_type.png b/engine/installation/images/ec2_instance_type.png similarity index 100% rename from docs/installation/images/ec2_instance_type.png rename to engine/installation/images/ec2_instance_type.png diff --git a/docs/installation/images/ec2_launch_instance.png b/engine/installation/images/ec2_launch_instance.png similarity index 100% rename from docs/installation/images/ec2_launch_instance.png rename to engine/installation/images/ec2_launch_instance.png diff --git a/docs/installation/images/good_host.png b/engine/installation/images/good_host.png similarity index 100% rename from docs/installation/images/good_host.png rename to engine/installation/images/good_host.png diff --git a/docs/installation/images/kitematic.png b/engine/installation/images/kitematic.png similarity index 100% rename from docs/installation/images/kitematic.png rename to engine/installation/images/kitematic.png diff --git a/docs/installation/images/linux_docker_host.svg b/engine/installation/images/linux_docker_host.svg similarity index 100% rename from docs/installation/images/linux_docker_host.svg rename to engine/installation/images/linux_docker_host.svg diff --git a/docs/installation/images/mac-page-finished.png b/engine/installation/images/mac-page-finished.png similarity index 100% rename from docs/installation/images/mac-page-finished.png rename to engine/installation/images/mac-page-finished.png diff --git a/docs/installation/images/mac-page-two.png b/engine/installation/images/mac-page-two.png similarity index 100% rename from docs/installation/images/mac-page-two.png rename to engine/installation/images/mac-page-two.png diff --git a/docs/installation/images/mac-password-prompt.png b/engine/installation/images/mac-password-prompt.png similarity index 100% rename from docs/installation/images/mac-password-prompt.png rename to engine/installation/images/mac-password-prompt.png diff --git a/docs/installation/images/mac-success.png b/engine/installation/images/mac-success.png similarity index 100% rename from docs/installation/images/mac-success.png rename to engine/installation/images/mac-success.png diff --git a/docs/installation/images/mac-welcome-page.png b/engine/installation/images/mac-welcome-page.png similarity index 100% rename from docs/installation/images/mac-welcome-page.png rename to engine/installation/images/mac-welcome-page.png diff --git a/docs/installation/images/mac_docker_host.svg b/engine/installation/images/mac_docker_host.svg similarity index 100% rename from docs/installation/images/mac_docker_host.svg rename to engine/installation/images/mac_docker_host.svg diff --git a/docs/installation/images/my-docker-vm.png b/engine/installation/images/my-docker-vm.png similarity index 100% rename from docs/installation/images/my-docker-vm.png rename to engine/installation/images/my-docker-vm.png diff --git a/docs/installation/images/newsite_view.png b/engine/installation/images/newsite_view.png similarity index 100% rename from docs/installation/images/newsite_view.png rename to engine/installation/images/newsite_view.png diff --git a/docs/installation/images/nginx-webserver.png b/engine/installation/images/nginx-webserver.png similarity index 100% rename from docs/installation/images/nginx-webserver.png rename to engine/installation/images/nginx-webserver.png diff --git a/docs/installation/images/ocean_click_api.png b/engine/installation/images/ocean_click_api.png similarity index 100% rename from docs/installation/images/ocean_click_api.png rename to engine/installation/images/ocean_click_api.png diff --git a/docs/installation/images/ocean_droplet.png b/engine/installation/images/ocean_droplet.png similarity index 100% rename from docs/installation/images/ocean_droplet.png rename to engine/installation/images/ocean_droplet.png diff --git a/docs/installation/images/ocean_droplet_ubuntu.png b/engine/installation/images/ocean_droplet_ubuntu.png similarity index 100% rename from docs/installation/images/ocean_droplet_ubuntu.png rename to engine/installation/images/ocean_droplet_ubuntu.png diff --git a/docs/installation/images/ocean_gen_token.png b/engine/installation/images/ocean_gen_token.png similarity index 100% rename from docs/installation/images/ocean_gen_token.png rename to engine/installation/images/ocean_gen_token.png diff --git a/docs/installation/images/ocean_save_token.png b/engine/installation/images/ocean_save_token.png similarity index 100% rename from docs/installation/images/ocean_save_token.png rename to engine/installation/images/ocean_save_token.png diff --git a/docs/installation/images/ocean_token_create.png b/engine/installation/images/ocean_token_create.png similarity index 100% rename from docs/installation/images/ocean_token_create.png rename to engine/installation/images/ocean_token_create.png diff --git a/docs/installation/images/virtualization.png b/engine/installation/images/virtualization.png similarity index 100% rename from docs/installation/images/virtualization.png rename to engine/installation/images/virtualization.png diff --git a/docs/installation/images/win-page-6.png b/engine/installation/images/win-page-6.png similarity index 100% rename from docs/installation/images/win-page-6.png rename to engine/installation/images/win-page-6.png diff --git a/docs/installation/images/win-welcome.png b/engine/installation/images/win-welcome.png similarity index 100% rename from docs/installation/images/win-welcome.png rename to engine/installation/images/win-welcome.png diff --git a/docs/installation/images/win_docker_host.svg b/engine/installation/images/win_docker_host.svg similarity index 100% rename from docs/installation/images/win_docker_host.svg rename to engine/installation/images/win_docker_host.svg diff --git a/docs/installation/images/win_ver.png b/engine/installation/images/win_ver.png similarity index 100% rename from docs/installation/images/win_ver.png rename to engine/installation/images/win_ver.png diff --git a/docs/installation/images/windows-boot2docker-cmd.png b/engine/installation/images/windows-boot2docker-cmd.png similarity index 100% rename from docs/installation/images/windows-boot2docker-cmd.png rename to engine/installation/images/windows-boot2docker-cmd.png diff --git a/docs/installation/images/windows-boot2docker-powershell.png b/engine/installation/images/windows-boot2docker-powershell.png similarity index 100% rename from docs/installation/images/windows-boot2docker-powershell.png rename to engine/installation/images/windows-boot2docker-powershell.png diff --git a/docs/installation/images/windows-boot2docker-start.png b/engine/installation/images/windows-boot2docker-start.png similarity index 100% rename from docs/installation/images/windows-boot2docker-start.png rename to engine/installation/images/windows-boot2docker-start.png diff --git a/docs/installation/images/windows-finish.png b/engine/installation/images/windows-finish.png similarity index 100% rename from docs/installation/images/windows-finish.png rename to engine/installation/images/windows-finish.png diff --git a/docs/installation/index.md b/engine/installation/index.md similarity index 100% rename from docs/installation/index.md rename to engine/installation/index.md diff --git a/docs/installation/linux/SUSE.md b/engine/installation/linux/SUSE.md similarity index 100% rename from docs/installation/linux/SUSE.md rename to engine/installation/linux/SUSE.md diff --git a/docs/installation/linux/archlinux.md b/engine/installation/linux/archlinux.md similarity index 100% rename from docs/installation/linux/archlinux.md rename to engine/installation/linux/archlinux.md diff --git a/docs/installation/linux/centos.md b/engine/installation/linux/centos.md similarity index 100% rename from docs/installation/linux/centos.md rename to engine/installation/linux/centos.md diff --git a/docs/installation/linux/cruxlinux.md b/engine/installation/linux/cruxlinux.md similarity index 100% rename from docs/installation/linux/cruxlinux.md rename to engine/installation/linux/cruxlinux.md diff --git a/docs/installation/linux/debian.md b/engine/installation/linux/debian.md similarity index 100% rename from docs/installation/linux/debian.md rename to engine/installation/linux/debian.md diff --git a/docs/installation/linux/fedora.md b/engine/installation/linux/fedora.md similarity index 100% rename from docs/installation/linux/fedora.md rename to engine/installation/linux/fedora.md diff --git a/docs/installation/linux/gentoolinux.md b/engine/installation/linux/gentoolinux.md similarity index 100% rename from docs/installation/linux/gentoolinux.md rename to engine/installation/linux/gentoolinux.md diff --git a/docs/installation/linux/index.md b/engine/installation/linux/index.md similarity index 100% rename from docs/installation/linux/index.md rename to engine/installation/linux/index.md diff --git a/docs/installation/linux/oracle.md b/engine/installation/linux/oracle.md similarity index 100% rename from docs/installation/linux/oracle.md rename to engine/installation/linux/oracle.md diff --git a/docs/installation/linux/rhel.md b/engine/installation/linux/rhel.md similarity index 100% rename from docs/installation/linux/rhel.md rename to engine/installation/linux/rhel.md diff --git a/docs/installation/linux/ubuntulinux.md b/engine/installation/linux/ubuntulinux.md similarity index 100% rename from docs/installation/linux/ubuntulinux.md rename to engine/installation/linux/ubuntulinux.md diff --git a/docs/installation/mac.md b/engine/installation/mac.md similarity index 100% rename from docs/installation/mac.md rename to engine/installation/mac.md diff --git a/docs/installation/windows.md b/engine/installation/windows.md similarity index 100% rename from docs/installation/windows.md rename to engine/installation/windows.md diff --git a/docs/migration.md b/engine/migration.md similarity index 100% rename from docs/migration.md rename to engine/migration.md diff --git a/docs/reference/api/README.md b/engine/reference/api/README.md similarity index 100% rename from docs/reference/api/README.md rename to engine/reference/api/README.md diff --git a/docs/reference/api/_static/io_oauth_authorization_page.png b/engine/reference/api/_static/io_oauth_authorization_page.png similarity index 100% rename from docs/reference/api/_static/io_oauth_authorization_page.png rename to engine/reference/api/_static/io_oauth_authorization_page.png diff --git a/docs/reference/api/docker-io_api.md b/engine/reference/api/docker-io_api.md similarity index 100% rename from docs/reference/api/docker-io_api.md rename to engine/reference/api/docker-io_api.md diff --git a/docs/reference/api/docker_io_accounts_api.md b/engine/reference/api/docker_io_accounts_api.md similarity index 100% rename from docs/reference/api/docker_io_accounts_api.md rename to engine/reference/api/docker_io_accounts_api.md diff --git a/docs/reference/api/docker_remote_api.md b/engine/reference/api/docker_remote_api.md similarity index 100% rename from docs/reference/api/docker_remote_api.md rename to engine/reference/api/docker_remote_api.md diff --git a/docs/reference/api/docker_remote_api_v1.18.md b/engine/reference/api/docker_remote_api_v1.18.md similarity index 100% rename from docs/reference/api/docker_remote_api_v1.18.md rename to engine/reference/api/docker_remote_api_v1.18.md diff --git a/docs/reference/api/docker_remote_api_v1.19.md b/engine/reference/api/docker_remote_api_v1.19.md similarity index 100% rename from docs/reference/api/docker_remote_api_v1.19.md rename to engine/reference/api/docker_remote_api_v1.19.md diff --git a/docs/reference/api/docker_remote_api_v1.20.md b/engine/reference/api/docker_remote_api_v1.20.md similarity index 100% rename from docs/reference/api/docker_remote_api_v1.20.md rename to engine/reference/api/docker_remote_api_v1.20.md diff --git a/docs/reference/api/docker_remote_api_v1.21.md b/engine/reference/api/docker_remote_api_v1.21.md similarity index 100% rename from docs/reference/api/docker_remote_api_v1.21.md rename to engine/reference/api/docker_remote_api_v1.21.md diff --git a/docs/reference/api/docker_remote_api_v1.22.md b/engine/reference/api/docker_remote_api_v1.22.md similarity index 100% rename from docs/reference/api/docker_remote_api_v1.22.md rename to engine/reference/api/docker_remote_api_v1.22.md diff --git a/docs/reference/api/docker_remote_api_v1.23.md b/engine/reference/api/docker_remote_api_v1.23.md similarity index 100% rename from docs/reference/api/docker_remote_api_v1.23.md rename to engine/reference/api/docker_remote_api_v1.23.md diff --git a/docs/reference/api/docker_remote_api_v1.24.md b/engine/reference/api/docker_remote_api_v1.24.md similarity index 100% rename from docs/reference/api/docker_remote_api_v1.24.md rename to engine/reference/api/docker_remote_api_v1.24.md diff --git a/docs/reference/api/docker_remote_api_v1.25.md b/engine/reference/api/docker_remote_api_v1.25.md similarity index 100% rename from docs/reference/api/docker_remote_api_v1.25.md rename to engine/reference/api/docker_remote_api_v1.25.md diff --git a/docs/reference/api/hub_registry_spec.md b/engine/reference/api/hub_registry_spec.md similarity index 100% rename from docs/reference/api/hub_registry_spec.md rename to engine/reference/api/hub_registry_spec.md diff --git a/docs/reference/api/images/event_state.gliffy b/engine/reference/api/images/event_state.gliffy similarity index 100% rename from docs/reference/api/images/event_state.gliffy rename to engine/reference/api/images/event_state.gliffy diff --git a/docs/reference/api/images/event_state.png b/engine/reference/api/images/event_state.png similarity index 100% rename from docs/reference/api/images/event_state.png rename to engine/reference/api/images/event_state.png diff --git a/docs/reference/api/index.md b/engine/reference/api/index.md similarity index 100% rename from docs/reference/api/index.md rename to engine/reference/api/index.md diff --git a/docs/reference/api/remote_api_client_libraries.md b/engine/reference/api/remote_api_client_libraries.md similarity index 100% rename from docs/reference/api/remote_api_client_libraries.md rename to engine/reference/api/remote_api_client_libraries.md diff --git a/docs/reference/builder.md b/engine/reference/builder.md similarity index 100% rename from docs/reference/builder.md rename to engine/reference/builder.md diff --git a/docs/reference/commandline/attach.md b/engine/reference/commandline/attach.md similarity index 100% rename from docs/reference/commandline/attach.md rename to engine/reference/commandline/attach.md diff --git a/docs/reference/commandline/build.md b/engine/reference/commandline/build.md similarity index 100% rename from docs/reference/commandline/build.md rename to engine/reference/commandline/build.md diff --git a/docs/reference/commandline/cli.md b/engine/reference/commandline/cli.md similarity index 100% rename from docs/reference/commandline/cli.md rename to engine/reference/commandline/cli.md diff --git a/docs/reference/commandline/commit.md b/engine/reference/commandline/commit.md similarity index 100% rename from docs/reference/commandline/commit.md rename to engine/reference/commandline/commit.md diff --git a/docs/reference/commandline/cp.md b/engine/reference/commandline/cp.md similarity index 100% rename from docs/reference/commandline/cp.md rename to engine/reference/commandline/cp.md diff --git a/docs/reference/commandline/create.md b/engine/reference/commandline/create.md similarity index 100% rename from docs/reference/commandline/create.md rename to engine/reference/commandline/create.md diff --git a/docs/reference/commandline/deploy.md b/engine/reference/commandline/deploy.md similarity index 100% rename from docs/reference/commandline/deploy.md rename to engine/reference/commandline/deploy.md diff --git a/docs/reference/commandline/diff.md b/engine/reference/commandline/diff.md similarity index 100% rename from docs/reference/commandline/diff.md rename to engine/reference/commandline/diff.md diff --git a/docs/reference/commandline/docker_images.gif b/engine/reference/commandline/docker_images.gif similarity index 100% rename from docs/reference/commandline/docker_images.gif rename to engine/reference/commandline/docker_images.gif diff --git a/docs/reference/commandline/dockerd.md b/engine/reference/commandline/dockerd.md similarity index 100% rename from docs/reference/commandline/dockerd.md rename to engine/reference/commandline/dockerd.md diff --git a/docs/reference/commandline/events.md b/engine/reference/commandline/events.md similarity index 100% rename from docs/reference/commandline/events.md rename to engine/reference/commandline/events.md diff --git a/docs/reference/commandline/exec.md b/engine/reference/commandline/exec.md similarity index 100% rename from docs/reference/commandline/exec.md rename to engine/reference/commandline/exec.md diff --git a/docs/reference/commandline/export.md b/engine/reference/commandline/export.md similarity index 100% rename from docs/reference/commandline/export.md rename to engine/reference/commandline/export.md diff --git a/docs/reference/commandline/history.md b/engine/reference/commandline/history.md similarity index 100% rename from docs/reference/commandline/history.md rename to engine/reference/commandline/history.md diff --git a/docs/reference/commandline/images.md b/engine/reference/commandline/images.md similarity index 100% rename from docs/reference/commandline/images.md rename to engine/reference/commandline/images.md diff --git a/docs/reference/commandline/import.md b/engine/reference/commandline/import.md similarity index 100% rename from docs/reference/commandline/import.md rename to engine/reference/commandline/import.md diff --git a/docs/reference/commandline/index.md b/engine/reference/commandline/index.md similarity index 100% rename from docs/reference/commandline/index.md rename to engine/reference/commandline/index.md diff --git a/docs/reference/commandline/info.md b/engine/reference/commandline/info.md similarity index 100% rename from docs/reference/commandline/info.md rename to engine/reference/commandline/info.md diff --git a/docs/reference/commandline/inspect.md b/engine/reference/commandline/inspect.md similarity index 100% rename from docs/reference/commandline/inspect.md rename to engine/reference/commandline/inspect.md diff --git a/docs/reference/commandline/kill.md b/engine/reference/commandline/kill.md similarity index 100% rename from docs/reference/commandline/kill.md rename to engine/reference/commandline/kill.md diff --git a/docs/reference/commandline/load.md b/engine/reference/commandline/load.md similarity index 100% rename from docs/reference/commandline/load.md rename to engine/reference/commandline/load.md diff --git a/docs/reference/commandline/login.md b/engine/reference/commandline/login.md similarity index 100% rename from docs/reference/commandline/login.md rename to engine/reference/commandline/login.md diff --git a/docs/reference/commandline/logout.md b/engine/reference/commandline/logout.md similarity index 100% rename from docs/reference/commandline/logout.md rename to engine/reference/commandline/logout.md diff --git a/docs/reference/commandline/logs.md b/engine/reference/commandline/logs.md similarity index 100% rename from docs/reference/commandline/logs.md rename to engine/reference/commandline/logs.md diff --git a/docs/reference/commandline/menu.md b/engine/reference/commandline/menu.md similarity index 100% rename from docs/reference/commandline/menu.md rename to engine/reference/commandline/menu.md diff --git a/docs/reference/commandline/network_connect.md b/engine/reference/commandline/network_connect.md similarity index 100% rename from docs/reference/commandline/network_connect.md rename to engine/reference/commandline/network_connect.md diff --git a/docs/reference/commandline/network_create.md b/engine/reference/commandline/network_create.md similarity index 100% rename from docs/reference/commandline/network_create.md rename to engine/reference/commandline/network_create.md diff --git a/docs/reference/commandline/network_disconnect.md b/engine/reference/commandline/network_disconnect.md similarity index 100% rename from docs/reference/commandline/network_disconnect.md rename to engine/reference/commandline/network_disconnect.md diff --git a/docs/reference/commandline/network_inspect.md b/engine/reference/commandline/network_inspect.md similarity index 100% rename from docs/reference/commandline/network_inspect.md rename to engine/reference/commandline/network_inspect.md diff --git a/docs/reference/commandline/network_ls.md b/engine/reference/commandline/network_ls.md similarity index 100% rename from docs/reference/commandline/network_ls.md rename to engine/reference/commandline/network_ls.md diff --git a/docs/reference/commandline/network_rm.md b/engine/reference/commandline/network_rm.md similarity index 100% rename from docs/reference/commandline/network_rm.md rename to engine/reference/commandline/network_rm.md diff --git a/docs/reference/commandline/node_demote.md b/engine/reference/commandline/node_demote.md similarity index 100% rename from docs/reference/commandline/node_demote.md rename to engine/reference/commandline/node_demote.md diff --git a/docs/reference/commandline/node_inspect.md b/engine/reference/commandline/node_inspect.md similarity index 100% rename from docs/reference/commandline/node_inspect.md rename to engine/reference/commandline/node_inspect.md diff --git a/docs/reference/commandline/node_ls.md b/engine/reference/commandline/node_ls.md similarity index 100% rename from docs/reference/commandline/node_ls.md rename to engine/reference/commandline/node_ls.md diff --git a/docs/reference/commandline/node_promote.md b/engine/reference/commandline/node_promote.md similarity index 100% rename from docs/reference/commandline/node_promote.md rename to engine/reference/commandline/node_promote.md diff --git a/docs/reference/commandline/node_ps.md b/engine/reference/commandline/node_ps.md similarity index 100% rename from docs/reference/commandline/node_ps.md rename to engine/reference/commandline/node_ps.md diff --git a/docs/reference/commandline/node_rm.md b/engine/reference/commandline/node_rm.md similarity index 100% rename from docs/reference/commandline/node_rm.md rename to engine/reference/commandline/node_rm.md diff --git a/docs/reference/commandline/node_update.md b/engine/reference/commandline/node_update.md similarity index 100% rename from docs/reference/commandline/node_update.md rename to engine/reference/commandline/node_update.md diff --git a/docs/reference/commandline/pause.md b/engine/reference/commandline/pause.md similarity index 100% rename from docs/reference/commandline/pause.md rename to engine/reference/commandline/pause.md diff --git a/docs/reference/commandline/plugin_disable.md b/engine/reference/commandline/plugin_disable.md similarity index 100% rename from docs/reference/commandline/plugin_disable.md rename to engine/reference/commandline/plugin_disable.md diff --git a/docs/reference/commandline/plugin_enable.md b/engine/reference/commandline/plugin_enable.md similarity index 100% rename from docs/reference/commandline/plugin_enable.md rename to engine/reference/commandline/plugin_enable.md diff --git a/docs/reference/commandline/plugin_inspect.md b/engine/reference/commandline/plugin_inspect.md similarity index 100% rename from docs/reference/commandline/plugin_inspect.md rename to engine/reference/commandline/plugin_inspect.md diff --git a/docs/reference/commandline/plugin_install.md b/engine/reference/commandline/plugin_install.md similarity index 100% rename from docs/reference/commandline/plugin_install.md rename to engine/reference/commandline/plugin_install.md diff --git a/docs/reference/commandline/plugin_ls.md b/engine/reference/commandline/plugin_ls.md similarity index 100% rename from docs/reference/commandline/plugin_ls.md rename to engine/reference/commandline/plugin_ls.md diff --git a/docs/reference/commandline/plugin_rm.md b/engine/reference/commandline/plugin_rm.md similarity index 100% rename from docs/reference/commandline/plugin_rm.md rename to engine/reference/commandline/plugin_rm.md diff --git a/docs/reference/commandline/port.md b/engine/reference/commandline/port.md similarity index 100% rename from docs/reference/commandline/port.md rename to engine/reference/commandline/port.md diff --git a/docs/reference/commandline/ps.md b/engine/reference/commandline/ps.md similarity index 100% rename from docs/reference/commandline/ps.md rename to engine/reference/commandline/ps.md diff --git a/docs/reference/commandline/pull.md b/engine/reference/commandline/pull.md similarity index 100% rename from docs/reference/commandline/pull.md rename to engine/reference/commandline/pull.md diff --git a/docs/reference/commandline/push.md b/engine/reference/commandline/push.md similarity index 100% rename from docs/reference/commandline/push.md rename to engine/reference/commandline/push.md diff --git a/docs/reference/commandline/rename.md b/engine/reference/commandline/rename.md similarity index 100% rename from docs/reference/commandline/rename.md rename to engine/reference/commandline/rename.md diff --git a/docs/reference/commandline/restart.md b/engine/reference/commandline/restart.md similarity index 100% rename from docs/reference/commandline/restart.md rename to engine/reference/commandline/restart.md diff --git a/docs/reference/commandline/rm.md b/engine/reference/commandline/rm.md similarity index 100% rename from docs/reference/commandline/rm.md rename to engine/reference/commandline/rm.md diff --git a/docs/reference/commandline/rmi.md b/engine/reference/commandline/rmi.md similarity index 100% rename from docs/reference/commandline/rmi.md rename to engine/reference/commandline/rmi.md diff --git a/docs/reference/commandline/run.md b/engine/reference/commandline/run.md similarity index 100% rename from docs/reference/commandline/run.md rename to engine/reference/commandline/run.md diff --git a/docs/reference/commandline/save.md b/engine/reference/commandline/save.md similarity index 100% rename from docs/reference/commandline/save.md rename to engine/reference/commandline/save.md diff --git a/docs/reference/commandline/search.md b/engine/reference/commandline/search.md similarity index 100% rename from docs/reference/commandline/search.md rename to engine/reference/commandline/search.md diff --git a/docs/reference/commandline/service_create.md b/engine/reference/commandline/service_create.md similarity index 100% rename from docs/reference/commandline/service_create.md rename to engine/reference/commandline/service_create.md diff --git a/docs/reference/commandline/service_inspect.md b/engine/reference/commandline/service_inspect.md similarity index 100% rename from docs/reference/commandline/service_inspect.md rename to engine/reference/commandline/service_inspect.md diff --git a/docs/reference/commandline/service_ls.md b/engine/reference/commandline/service_ls.md similarity index 100% rename from docs/reference/commandline/service_ls.md rename to engine/reference/commandline/service_ls.md diff --git a/docs/reference/commandline/service_ps.md b/engine/reference/commandline/service_ps.md similarity index 100% rename from docs/reference/commandline/service_ps.md rename to engine/reference/commandline/service_ps.md diff --git a/docs/reference/commandline/service_rm.md b/engine/reference/commandline/service_rm.md similarity index 100% rename from docs/reference/commandline/service_rm.md rename to engine/reference/commandline/service_rm.md diff --git a/docs/reference/commandline/service_scale.md b/engine/reference/commandline/service_scale.md similarity index 100% rename from docs/reference/commandline/service_scale.md rename to engine/reference/commandline/service_scale.md diff --git a/docs/reference/commandline/service_update.md b/engine/reference/commandline/service_update.md similarity index 100% rename from docs/reference/commandline/service_update.md rename to engine/reference/commandline/service_update.md diff --git a/docs/reference/commandline/stack_config.md b/engine/reference/commandline/stack_config.md similarity index 100% rename from docs/reference/commandline/stack_config.md rename to engine/reference/commandline/stack_config.md diff --git a/docs/reference/commandline/stack_deploy.md b/engine/reference/commandline/stack_deploy.md similarity index 100% rename from docs/reference/commandline/stack_deploy.md rename to engine/reference/commandline/stack_deploy.md diff --git a/docs/reference/commandline/stack_rm.md b/engine/reference/commandline/stack_rm.md similarity index 100% rename from docs/reference/commandline/stack_rm.md rename to engine/reference/commandline/stack_rm.md diff --git a/docs/reference/commandline/stack_services.md b/engine/reference/commandline/stack_services.md similarity index 100% rename from docs/reference/commandline/stack_services.md rename to engine/reference/commandline/stack_services.md diff --git a/docs/reference/commandline/stack_tasks.md b/engine/reference/commandline/stack_tasks.md similarity index 100% rename from docs/reference/commandline/stack_tasks.md rename to engine/reference/commandline/stack_tasks.md diff --git a/docs/reference/commandline/start.md b/engine/reference/commandline/start.md similarity index 100% rename from docs/reference/commandline/start.md rename to engine/reference/commandline/start.md diff --git a/docs/reference/commandline/stats.md b/engine/reference/commandline/stats.md similarity index 100% rename from docs/reference/commandline/stats.md rename to engine/reference/commandline/stats.md diff --git a/docs/reference/commandline/stop.md b/engine/reference/commandline/stop.md similarity index 100% rename from docs/reference/commandline/stop.md rename to engine/reference/commandline/stop.md diff --git a/docs/reference/commandline/swarm_init.md b/engine/reference/commandline/swarm_init.md similarity index 100% rename from docs/reference/commandline/swarm_init.md rename to engine/reference/commandline/swarm_init.md diff --git a/docs/reference/commandline/swarm_join.md b/engine/reference/commandline/swarm_join.md similarity index 100% rename from docs/reference/commandline/swarm_join.md rename to engine/reference/commandline/swarm_join.md diff --git a/docs/reference/commandline/swarm_join_token.md b/engine/reference/commandline/swarm_join_token.md similarity index 100% rename from docs/reference/commandline/swarm_join_token.md rename to engine/reference/commandline/swarm_join_token.md diff --git a/docs/reference/commandline/swarm_leave.md b/engine/reference/commandline/swarm_leave.md similarity index 100% rename from docs/reference/commandline/swarm_leave.md rename to engine/reference/commandline/swarm_leave.md diff --git a/docs/reference/commandline/swarm_update.md b/engine/reference/commandline/swarm_update.md similarity index 100% rename from docs/reference/commandline/swarm_update.md rename to engine/reference/commandline/swarm_update.md diff --git a/docs/reference/commandline/tag.md b/engine/reference/commandline/tag.md similarity index 100% rename from docs/reference/commandline/tag.md rename to engine/reference/commandline/tag.md diff --git a/docs/reference/commandline/top.md b/engine/reference/commandline/top.md similarity index 100% rename from docs/reference/commandline/top.md rename to engine/reference/commandline/top.md diff --git a/docs/reference/commandline/unpause.md b/engine/reference/commandline/unpause.md similarity index 100% rename from docs/reference/commandline/unpause.md rename to engine/reference/commandline/unpause.md diff --git a/docs/reference/commandline/update.md b/engine/reference/commandline/update.md similarity index 100% rename from docs/reference/commandline/update.md rename to engine/reference/commandline/update.md diff --git a/docs/reference/commandline/version.md b/engine/reference/commandline/version.md similarity index 100% rename from docs/reference/commandline/version.md rename to engine/reference/commandline/version.md diff --git a/docs/reference/commandline/volume_create.md b/engine/reference/commandline/volume_create.md similarity index 100% rename from docs/reference/commandline/volume_create.md rename to engine/reference/commandline/volume_create.md diff --git a/docs/reference/commandline/volume_inspect.md b/engine/reference/commandline/volume_inspect.md similarity index 100% rename from docs/reference/commandline/volume_inspect.md rename to engine/reference/commandline/volume_inspect.md diff --git a/docs/reference/commandline/volume_ls.md b/engine/reference/commandline/volume_ls.md similarity index 100% rename from docs/reference/commandline/volume_ls.md rename to engine/reference/commandline/volume_ls.md diff --git a/docs/reference/commandline/volume_rm.md b/engine/reference/commandline/volume_rm.md similarity index 100% rename from docs/reference/commandline/volume_rm.md rename to engine/reference/commandline/volume_rm.md diff --git a/docs/reference/commandline/wait.md b/engine/reference/commandline/wait.md similarity index 100% rename from docs/reference/commandline/wait.md rename to engine/reference/commandline/wait.md diff --git a/docs/reference/glossary.md b/engine/reference/glossary.md similarity index 100% rename from docs/reference/glossary.md rename to engine/reference/glossary.md diff --git a/docs/reference/index.md b/engine/reference/index.md similarity index 100% rename from docs/reference/index.md rename to engine/reference/index.md diff --git a/docs/reference/run.md b/engine/reference/run.md similarity index 100% rename from docs/reference/run.md rename to engine/reference/run.md diff --git a/docs/security/apparmor.md b/engine/security/apparmor.md similarity index 100% rename from docs/security/apparmor.md rename to engine/security/apparmor.md diff --git a/docs/security/certificates.md b/engine/security/certificates.md similarity index 100% rename from docs/security/certificates.md rename to engine/security/certificates.md diff --git a/docs/security/https.md b/engine/security/https.md similarity index 100% rename from docs/security/https.md rename to engine/security/https.md diff --git a/docs/security/https/Dockerfile b/engine/security/https/Dockerfile similarity index 100% rename from docs/security/https/Dockerfile rename to engine/security/https/Dockerfile diff --git a/docs/security/https/Makefile b/engine/security/https/Makefile similarity index 100% rename from docs/security/https/Makefile rename to engine/security/https/Makefile diff --git a/docs/security/https/README.md b/engine/security/https/README.md similarity index 100% rename from docs/security/https/README.md rename to engine/security/https/README.md diff --git a/docs/security/https/make_certs.sh b/engine/security/https/make_certs.sh similarity index 100% rename from docs/security/https/make_certs.sh rename to engine/security/https/make_certs.sh diff --git a/docs/security/https/parsedocs.sh b/engine/security/https/parsedocs.sh similarity index 100% rename from docs/security/https/parsedocs.sh rename to engine/security/https/parsedocs.sh diff --git a/docs/security/index.md b/engine/security/index.md similarity index 100% rename from docs/security/index.md rename to engine/security/index.md diff --git a/docs/security/non-events.md b/engine/security/non-events.md similarity index 100% rename from docs/security/non-events.md rename to engine/security/non-events.md diff --git a/docs/security/seccomp.md b/engine/security/seccomp.md similarity index 100% rename from docs/security/seccomp.md rename to engine/security/seccomp.md diff --git a/docs/security/security.md b/engine/security/security.md similarity index 100% rename from docs/security/security.md rename to engine/security/security.md diff --git a/docs/security/trust/content_trust.md b/engine/security/trust/content_trust.md similarity index 100% rename from docs/security/trust/content_trust.md rename to engine/security/trust/content_trust.md diff --git a/docs/security/trust/deploying_notary.md b/engine/security/trust/deploying_notary.md similarity index 100% rename from docs/security/trust/deploying_notary.md rename to engine/security/trust/deploying_notary.md diff --git a/docs/security/trust/images/tag_signing.png b/engine/security/trust/images/tag_signing.png similarity index 100% rename from docs/security/trust/images/tag_signing.png rename to engine/security/trust/images/tag_signing.png diff --git a/docs/security/trust/images/trust_.gliffy b/engine/security/trust/images/trust_.gliffy similarity index 100% rename from docs/security/trust/images/trust_.gliffy rename to engine/security/trust/images/trust_.gliffy diff --git a/docs/security/trust/images/trust_components.gliffy b/engine/security/trust/images/trust_components.gliffy similarity index 100% rename from docs/security/trust/images/trust_components.gliffy rename to engine/security/trust/images/trust_components.gliffy diff --git a/docs/security/trust/images/trust_components.png b/engine/security/trust/images/trust_components.png similarity index 100% rename from docs/security/trust/images/trust_components.png rename to engine/security/trust/images/trust_components.png diff --git a/docs/security/trust/images/trust_signing.gliffy b/engine/security/trust/images/trust_signing.gliffy similarity index 100% rename from docs/security/trust/images/trust_signing.gliffy rename to engine/security/trust/images/trust_signing.gliffy diff --git a/docs/security/trust/images/trust_signing.png b/engine/security/trust/images/trust_signing.png similarity index 100% rename from docs/security/trust/images/trust_signing.png rename to engine/security/trust/images/trust_signing.png diff --git a/docs/security/trust/images/trust_view.gliffy b/engine/security/trust/images/trust_view.gliffy similarity index 100% rename from docs/security/trust/images/trust_view.gliffy rename to engine/security/trust/images/trust_view.gliffy diff --git a/docs/security/trust/images/trust_view.png b/engine/security/trust/images/trust_view.png similarity index 100% rename from docs/security/trust/images/trust_view.png rename to engine/security/trust/images/trust_view.png diff --git a/docs/security/trust/index.md b/engine/security/trust/index.md similarity index 100% rename from docs/security/trust/index.md rename to engine/security/trust/index.md diff --git a/docs/security/trust/trust_automation.md b/engine/security/trust/trust_automation.md similarity index 100% rename from docs/security/trust/trust_automation.md rename to engine/security/trust/trust_automation.md diff --git a/docs/security/trust/trust_delegation.md b/engine/security/trust/trust_delegation.md similarity index 100% rename from docs/security/trust/trust_delegation.md rename to engine/security/trust/trust_delegation.md diff --git a/docs/security/trust/trust_key_mng.md b/engine/security/trust/trust_key_mng.md similarity index 100% rename from docs/security/trust/trust_key_mng.md rename to engine/security/trust/trust_key_mng.md diff --git a/docs/security/trust/trust_sandbox.md b/engine/security/trust/trust_sandbox.md similarity index 100% rename from docs/security/trust/trust_sandbox.md rename to engine/security/trust/trust_sandbox.md diff --git a/docs/static_files/README.md b/engine/static_files/README.md similarity index 100% rename from docs/static_files/README.md rename to engine/static_files/README.md diff --git a/docs/static_files/contributors.png b/engine/static_files/contributors.png similarity index 100% rename from docs/static_files/contributors.png rename to engine/static_files/contributors.png diff --git a/docs/static_files/docker-logo-compressed.png b/engine/static_files/docker-logo-compressed.png similarity index 100% rename from docs/static_files/docker-logo-compressed.png rename to engine/static_files/docker-logo-compressed.png diff --git a/docs/static_files/docker_pull_chart.png b/engine/static_files/docker_pull_chart.png similarity index 100% rename from docs/static_files/docker_pull_chart.png rename to engine/static_files/docker_pull_chart.png diff --git a/docs/static_files/docker_push_chart.png b/engine/static_files/docker_push_chart.png similarity index 100% rename from docs/static_files/docker_push_chart.png rename to engine/static_files/docker_push_chart.png diff --git a/docs/static_files/dockerlogo-v.png b/engine/static_files/dockerlogo-v.png similarity index 100% rename from docs/static_files/dockerlogo-v.png rename to engine/static_files/dockerlogo-v.png diff --git a/docs/swarm/admin_guide.md b/engine/swarm/admin_guide.md similarity index 100% rename from docs/swarm/admin_guide.md rename to engine/swarm/admin_guide.md diff --git a/docs/swarm/how-swarm-mode-works/menu.md b/engine/swarm/how-swarm-mode-works/menu.md similarity index 100% rename from docs/swarm/how-swarm-mode-works/menu.md rename to engine/swarm/how-swarm-mode-works/menu.md diff --git a/docs/swarm/how-swarm-mode-works/nodes.md b/engine/swarm/how-swarm-mode-works/nodes.md similarity index 100% rename from docs/swarm/how-swarm-mode-works/nodes.md rename to engine/swarm/how-swarm-mode-works/nodes.md diff --git a/docs/swarm/how-swarm-mode-works/pki.md b/engine/swarm/how-swarm-mode-works/pki.md similarity index 100% rename from docs/swarm/how-swarm-mode-works/pki.md rename to engine/swarm/how-swarm-mode-works/pki.md diff --git a/docs/swarm/how-swarm-mode-works/services.md b/engine/swarm/how-swarm-mode-works/services.md similarity index 100% rename from docs/swarm/how-swarm-mode-works/services.md rename to engine/swarm/how-swarm-mode-works/services.md diff --git a/docs/swarm/images/ingress-lb.png b/engine/swarm/images/ingress-lb.png similarity index 100% rename from docs/swarm/images/ingress-lb.png rename to engine/swarm/images/ingress-lb.png diff --git a/docs/swarm/images/ingress-routing-mesh.png b/engine/swarm/images/ingress-routing-mesh.png similarity index 100% rename from docs/swarm/images/ingress-routing-mesh.png rename to engine/swarm/images/ingress-routing-mesh.png diff --git a/docs/swarm/images/replicated-vs-global.png b/engine/swarm/images/replicated-vs-global.png similarity index 100% rename from docs/swarm/images/replicated-vs-global.png rename to engine/swarm/images/replicated-vs-global.png diff --git a/docs/swarm/images/service-lifecycle.png b/engine/swarm/images/service-lifecycle.png similarity index 100% rename from docs/swarm/images/service-lifecycle.png rename to engine/swarm/images/service-lifecycle.png diff --git a/docs/swarm/images/service-vip.png b/engine/swarm/images/service-vip.png similarity index 100% rename from docs/swarm/images/service-vip.png rename to engine/swarm/images/service-vip.png diff --git a/docs/swarm/images/services-diagram.png b/engine/swarm/images/services-diagram.png similarity index 100% rename from docs/swarm/images/services-diagram.png rename to engine/swarm/images/services-diagram.png diff --git a/docs/swarm/images/src/ingress-lb.svg b/engine/swarm/images/src/ingress-lb.svg similarity index 100% rename from docs/swarm/images/src/ingress-lb.svg rename to engine/swarm/images/src/ingress-lb.svg diff --git a/docs/swarm/images/src/ingress-routing-mesh.svg b/engine/swarm/images/src/ingress-routing-mesh.svg similarity index 100% rename from docs/swarm/images/src/ingress-routing-mesh.svg rename to engine/swarm/images/src/ingress-routing-mesh.svg diff --git a/docs/swarm/images/src/replicated-vs-global.svg b/engine/swarm/images/src/replicated-vs-global.svg similarity index 100% rename from docs/swarm/images/src/replicated-vs-global.svg rename to engine/swarm/images/src/replicated-vs-global.svg diff --git a/docs/swarm/images/src/service-lifecycle.svg b/engine/swarm/images/src/service-lifecycle.svg similarity index 100% rename from docs/swarm/images/src/service-lifecycle.svg rename to engine/swarm/images/src/service-lifecycle.svg diff --git a/docs/swarm/images/src/service-vip.svg b/engine/swarm/images/src/service-vip.svg similarity index 100% rename from docs/swarm/images/src/service-vip.svg rename to engine/swarm/images/src/service-vip.svg diff --git a/docs/swarm/images/src/services-diagram.svg b/engine/swarm/images/src/services-diagram.svg similarity index 100% rename from docs/swarm/images/src/services-diagram.svg rename to engine/swarm/images/src/services-diagram.svg diff --git a/docs/swarm/images/src/simple-cluster.svg b/engine/swarm/images/src/simple-cluster.svg similarity index 100% rename from docs/swarm/images/src/simple-cluster.svg rename to engine/swarm/images/src/simple-cluster.svg diff --git a/docs/swarm/images/src/tls.svg b/engine/swarm/images/src/tls.svg similarity index 100% rename from docs/swarm/images/src/tls.svg rename to engine/swarm/images/src/tls.svg diff --git a/docs/swarm/images/swarm-diagram.png b/engine/swarm/images/swarm-diagram.png similarity index 100% rename from docs/swarm/images/swarm-diagram.png rename to engine/swarm/images/swarm-diagram.png diff --git a/docs/swarm/images/tls.png b/engine/swarm/images/tls.png similarity index 100% rename from docs/swarm/images/tls.png rename to engine/swarm/images/tls.png diff --git a/docs/swarm/index.md b/engine/swarm/index.md similarity index 100% rename from docs/swarm/index.md rename to engine/swarm/index.md diff --git a/docs/swarm/ingress.md b/engine/swarm/ingress.md similarity index 100% rename from docs/swarm/ingress.md rename to engine/swarm/ingress.md diff --git a/docs/swarm/join-nodes.md b/engine/swarm/join-nodes.md similarity index 100% rename from docs/swarm/join-nodes.md rename to engine/swarm/join-nodes.md diff --git a/docs/swarm/key-concepts.md b/engine/swarm/key-concepts.md similarity index 100% rename from docs/swarm/key-concepts.md rename to engine/swarm/key-concepts.md diff --git a/docs/swarm/manage-nodes.md b/engine/swarm/manage-nodes.md similarity index 100% rename from docs/swarm/manage-nodes.md rename to engine/swarm/manage-nodes.md diff --git a/docs/swarm/menu.md b/engine/swarm/menu.md similarity index 100% rename from docs/swarm/menu.md rename to engine/swarm/menu.md diff --git a/docs/swarm/networking.md b/engine/swarm/networking.md similarity index 100% rename from docs/swarm/networking.md rename to engine/swarm/networking.md diff --git a/docs/swarm/raft.md b/engine/swarm/raft.md similarity index 100% rename from docs/swarm/raft.md rename to engine/swarm/raft.md diff --git a/docs/swarm/services.md b/engine/swarm/services.md similarity index 100% rename from docs/swarm/services.md rename to engine/swarm/services.md diff --git a/docs/swarm/swarm-mode.md b/engine/swarm/swarm-mode.md similarity index 100% rename from docs/swarm/swarm-mode.md rename to engine/swarm/swarm-mode.md diff --git a/docs/swarm/swarm-tutorial/add-nodes.md b/engine/swarm/swarm-tutorial/add-nodes.md similarity index 100% rename from docs/swarm/swarm-tutorial/add-nodes.md rename to engine/swarm/swarm-tutorial/add-nodes.md diff --git a/docs/swarm/swarm-tutorial/create-swarm.md b/engine/swarm/swarm-tutorial/create-swarm.md similarity index 100% rename from docs/swarm/swarm-tutorial/create-swarm.md rename to engine/swarm/swarm-tutorial/create-swarm.md diff --git a/docs/swarm/swarm-tutorial/delete-service.md b/engine/swarm/swarm-tutorial/delete-service.md similarity index 100% rename from docs/swarm/swarm-tutorial/delete-service.md rename to engine/swarm/swarm-tutorial/delete-service.md diff --git a/docs/swarm/swarm-tutorial/deploy-service.md b/engine/swarm/swarm-tutorial/deploy-service.md similarity index 100% rename from docs/swarm/swarm-tutorial/deploy-service.md rename to engine/swarm/swarm-tutorial/deploy-service.md diff --git a/docs/swarm/swarm-tutorial/drain-node.md b/engine/swarm/swarm-tutorial/drain-node.md similarity index 100% rename from docs/swarm/swarm-tutorial/drain-node.md rename to engine/swarm/swarm-tutorial/drain-node.md diff --git a/docs/swarm/swarm-tutorial/index.md b/engine/swarm/swarm-tutorial/index.md similarity index 100% rename from docs/swarm/swarm-tutorial/index.md rename to engine/swarm/swarm-tutorial/index.md diff --git a/docs/swarm/swarm-tutorial/inspect-service.md b/engine/swarm/swarm-tutorial/inspect-service.md similarity index 100% rename from docs/swarm/swarm-tutorial/inspect-service.md rename to engine/swarm/swarm-tutorial/inspect-service.md diff --git a/docs/swarm/swarm-tutorial/menu.md b/engine/swarm/swarm-tutorial/menu.md similarity index 100% rename from docs/swarm/swarm-tutorial/menu.md rename to engine/swarm/swarm-tutorial/menu.md diff --git a/docs/swarm/swarm-tutorial/rolling-update.md b/engine/swarm/swarm-tutorial/rolling-update.md similarity index 100% rename from docs/swarm/swarm-tutorial/rolling-update.md rename to engine/swarm/swarm-tutorial/rolling-update.md diff --git a/docs/swarm/swarm-tutorial/scale-service.md b/engine/swarm/swarm-tutorial/scale-service.md similarity index 100% rename from docs/swarm/swarm-tutorial/scale-service.md rename to engine/swarm/swarm-tutorial/scale-service.md diff --git a/docs/touch-up.sh b/engine/touch-up.sh similarity index 100% rename from docs/touch-up.sh rename to engine/touch-up.sh diff --git a/docs/tutorials/dockerimages.md b/engine/tutorials/dockerimages.md similarity index 100% rename from docs/tutorials/dockerimages.md rename to engine/tutorials/dockerimages.md diff --git a/docs/tutorials/dockerizing.md b/engine/tutorials/dockerizing.md similarity index 100% rename from docs/tutorials/dockerizing.md rename to engine/tutorials/dockerizing.md diff --git a/docs/tutorials/dockerrepos.md b/engine/tutorials/dockerrepos.md similarity index 100% rename from docs/tutorials/dockerrepos.md rename to engine/tutorials/dockerrepos.md diff --git a/docs/tutorials/dockervolumes.md b/engine/tutorials/dockervolumes.md similarity index 100% rename from docs/tutorials/dockervolumes.md rename to engine/tutorials/dockervolumes.md diff --git a/docs/tutorials/index.md b/engine/tutorials/index.md similarity index 100% rename from docs/tutorials/index.md rename to engine/tutorials/index.md diff --git a/docs/tutorials/menu.md b/engine/tutorials/menu.md similarity index 100% rename from docs/tutorials/menu.md rename to engine/tutorials/menu.md diff --git a/docs/tutorials/networkingcontainers.md b/engine/tutorials/networkingcontainers.md similarity index 100% rename from docs/tutorials/networkingcontainers.md rename to engine/tutorials/networkingcontainers.md diff --git a/docs/tutorials/search.png b/engine/tutorials/search.png similarity index 100% rename from docs/tutorials/search.png rename to engine/tutorials/search.png diff --git a/docs/tutorials/usingdocker.md b/engine/tutorials/usingdocker.md similarity index 100% rename from docs/tutorials/usingdocker.md rename to engine/tutorials/usingdocker.md diff --git a/docs/tutorials/webapp1.png b/engine/tutorials/webapp1.png similarity index 100% rename from docs/tutorials/webapp1.png rename to engine/tutorials/webapp1.png diff --git a/docs/understanding-docker.md b/engine/understanding-docker.md similarity index 100% rename from docs/understanding-docker.md rename to engine/understanding-docker.md diff --git a/docs/userguide/eng-image/baseimages.md b/engine/userguide/eng-image/baseimages.md similarity index 100% rename from docs/userguide/eng-image/baseimages.md rename to engine/userguide/eng-image/baseimages.md diff --git a/docs/userguide/eng-image/dockerfile_best-practices.md b/engine/userguide/eng-image/dockerfile_best-practices.md similarity index 100% rename from docs/userguide/eng-image/dockerfile_best-practices.md rename to engine/userguide/eng-image/dockerfile_best-practices.md diff --git a/docs/userguide/eng-image/image_management.md b/engine/userguide/eng-image/image_management.md similarity index 100% rename from docs/userguide/eng-image/image_management.md rename to engine/userguide/eng-image/image_management.md diff --git a/docs/userguide/eng-image/index.md b/engine/userguide/eng-image/index.md similarity index 100% rename from docs/userguide/eng-image/index.md rename to engine/userguide/eng-image/index.md diff --git a/docs/userguide/index.md b/engine/userguide/index.md similarity index 100% rename from docs/userguide/index.md rename to engine/userguide/index.md diff --git a/docs/userguide/intro.md b/engine/userguide/intro.md similarity index 100% rename from docs/userguide/intro.md rename to engine/userguide/intro.md diff --git a/docs/userguide/labels-custom-metadata.md b/engine/userguide/labels-custom-metadata.md similarity index 100% rename from docs/userguide/labels-custom-metadata.md rename to engine/userguide/labels-custom-metadata.md diff --git a/docs/userguide/networking/configure-dns.md b/engine/userguide/networking/configure-dns.md similarity index 100% rename from docs/userguide/networking/configure-dns.md rename to engine/userguide/networking/configure-dns.md diff --git a/docs/userguide/networking/default_network/binding.md b/engine/userguide/networking/default_network/binding.md similarity index 100% rename from docs/userguide/networking/default_network/binding.md rename to engine/userguide/networking/default_network/binding.md diff --git a/docs/userguide/networking/default_network/build-bridges.md b/engine/userguide/networking/default_network/build-bridges.md similarity index 100% rename from docs/userguide/networking/default_network/build-bridges.md rename to engine/userguide/networking/default_network/build-bridges.md diff --git a/docs/userguide/networking/default_network/configure-dns.md b/engine/userguide/networking/default_network/configure-dns.md similarity index 100% rename from docs/userguide/networking/default_network/configure-dns.md rename to engine/userguide/networking/default_network/configure-dns.md diff --git a/docs/userguide/networking/default_network/container-communication.md b/engine/userguide/networking/default_network/container-communication.md similarity index 100% rename from docs/userguide/networking/default_network/container-communication.md rename to engine/userguide/networking/default_network/container-communication.md diff --git a/docs/userguide/networking/default_network/custom-docker0.md b/engine/userguide/networking/default_network/custom-docker0.md similarity index 100% rename from docs/userguide/networking/default_network/custom-docker0.md rename to engine/userguide/networking/default_network/custom-docker0.md diff --git a/docs/userguide/networking/default_network/dockerlinks.md b/engine/userguide/networking/default_network/dockerlinks.md similarity index 100% rename from docs/userguide/networking/default_network/dockerlinks.md rename to engine/userguide/networking/default_network/dockerlinks.md diff --git a/docs/userguide/networking/default_network/images/ipv6_basic_host_config.gliffy b/engine/userguide/networking/default_network/images/ipv6_basic_host_config.gliffy similarity index 100% rename from docs/userguide/networking/default_network/images/ipv6_basic_host_config.gliffy rename to engine/userguide/networking/default_network/images/ipv6_basic_host_config.gliffy diff --git a/docs/userguide/networking/default_network/images/ipv6_basic_host_config.svg b/engine/userguide/networking/default_network/images/ipv6_basic_host_config.svg similarity index 100% rename from docs/userguide/networking/default_network/images/ipv6_basic_host_config.svg rename to engine/userguide/networking/default_network/images/ipv6_basic_host_config.svg diff --git a/docs/userguide/networking/default_network/images/ipv6_ndp_proxying.gliffy b/engine/userguide/networking/default_network/images/ipv6_ndp_proxying.gliffy similarity index 100% rename from docs/userguide/networking/default_network/images/ipv6_ndp_proxying.gliffy rename to engine/userguide/networking/default_network/images/ipv6_ndp_proxying.gliffy diff --git a/docs/userguide/networking/default_network/images/ipv6_ndp_proxying.svg b/engine/userguide/networking/default_network/images/ipv6_ndp_proxying.svg similarity index 100% rename from docs/userguide/networking/default_network/images/ipv6_ndp_proxying.svg rename to engine/userguide/networking/default_network/images/ipv6_ndp_proxying.svg diff --git a/docs/userguide/networking/default_network/images/ipv6_routed_network_example.gliffy b/engine/userguide/networking/default_network/images/ipv6_routed_network_example.gliffy similarity index 100% rename from docs/userguide/networking/default_network/images/ipv6_routed_network_example.gliffy rename to engine/userguide/networking/default_network/images/ipv6_routed_network_example.gliffy diff --git a/docs/userguide/networking/default_network/images/ipv6_routed_network_example.svg b/engine/userguide/networking/default_network/images/ipv6_routed_network_example.svg similarity index 100% rename from docs/userguide/networking/default_network/images/ipv6_routed_network_example.svg rename to engine/userguide/networking/default_network/images/ipv6_routed_network_example.svg diff --git a/docs/userguide/networking/default_network/images/ipv6_slash64_subnet_config.gliffy b/engine/userguide/networking/default_network/images/ipv6_slash64_subnet_config.gliffy similarity index 100% rename from docs/userguide/networking/default_network/images/ipv6_slash64_subnet_config.gliffy rename to engine/userguide/networking/default_network/images/ipv6_slash64_subnet_config.gliffy diff --git a/docs/userguide/networking/default_network/images/ipv6_slash64_subnet_config.svg b/engine/userguide/networking/default_network/images/ipv6_slash64_subnet_config.svg similarity index 100% rename from docs/userguide/networking/default_network/images/ipv6_slash64_subnet_config.svg rename to engine/userguide/networking/default_network/images/ipv6_slash64_subnet_config.svg diff --git a/docs/userguide/networking/default_network/images/ipv6_switched_network_example.gliffy b/engine/userguide/networking/default_network/images/ipv6_switched_network_example.gliffy similarity index 100% rename from docs/userguide/networking/default_network/images/ipv6_switched_network_example.gliffy rename to engine/userguide/networking/default_network/images/ipv6_switched_network_example.gliffy diff --git a/docs/userguide/networking/default_network/images/ipv6_switched_network_example.svg b/engine/userguide/networking/default_network/images/ipv6_switched_network_example.svg similarity index 100% rename from docs/userguide/networking/default_network/images/ipv6_switched_network_example.svg rename to engine/userguide/networking/default_network/images/ipv6_switched_network_example.svg diff --git a/docs/userguide/networking/default_network/index.md b/engine/userguide/networking/default_network/index.md similarity index 100% rename from docs/userguide/networking/default_network/index.md rename to engine/userguide/networking/default_network/index.md diff --git a/docs/userguide/networking/default_network/ipv6.md b/engine/userguide/networking/default_network/ipv6.md similarity index 100% rename from docs/userguide/networking/default_network/ipv6.md rename to engine/userguide/networking/default_network/ipv6.md diff --git a/docs/userguide/networking/get-started-macvlan.md b/engine/userguide/networking/get-started-macvlan.md similarity index 100% rename from docs/userguide/networking/get-started-macvlan.md rename to engine/userguide/networking/get-started-macvlan.md diff --git a/docs/userguide/networking/get-started-overlay.md b/engine/userguide/networking/get-started-overlay.md similarity index 100% rename from docs/userguide/networking/get-started-overlay.md rename to engine/userguide/networking/get-started-overlay.md diff --git a/docs/userguide/networking/images/bridge_network.gliffy b/engine/userguide/networking/images/bridge_network.gliffy similarity index 100% rename from docs/userguide/networking/images/bridge_network.gliffy rename to engine/userguide/networking/images/bridge_network.gliffy diff --git a/docs/userguide/networking/images/bridge_network.png b/engine/userguide/networking/images/bridge_network.png similarity index 100% rename from docs/userguide/networking/images/bridge_network.png rename to engine/userguide/networking/images/bridge_network.png diff --git a/docs/userguide/networking/images/bridge_network.svg b/engine/userguide/networking/images/bridge_network.svg similarity index 100% rename from docs/userguide/networking/images/bridge_network.svg rename to engine/userguide/networking/images/bridge_network.svg diff --git a/docs/userguide/networking/images/engine_on_net.gliffy b/engine/userguide/networking/images/engine_on_net.gliffy similarity index 100% rename from docs/userguide/networking/images/engine_on_net.gliffy rename to engine/userguide/networking/images/engine_on_net.gliffy diff --git a/docs/userguide/networking/images/engine_on_net.png b/engine/userguide/networking/images/engine_on_net.png similarity index 100% rename from docs/userguide/networking/images/engine_on_net.png rename to engine/userguide/networking/images/engine_on_net.png diff --git a/docs/userguide/networking/images/engine_on_net.svg b/engine/userguide/networking/images/engine_on_net.svg similarity index 100% rename from docs/userguide/networking/images/engine_on_net.svg rename to engine/userguide/networking/images/engine_on_net.svg diff --git a/docs/userguide/networking/images/key_value.gliffy b/engine/userguide/networking/images/key_value.gliffy similarity index 100% rename from docs/userguide/networking/images/key_value.gliffy rename to engine/userguide/networking/images/key_value.gliffy diff --git a/docs/userguide/networking/images/key_value.png b/engine/userguide/networking/images/key_value.png similarity index 100% rename from docs/userguide/networking/images/key_value.png rename to engine/userguide/networking/images/key_value.png diff --git a/docs/userguide/networking/images/key_value.svg b/engine/userguide/networking/images/key_value.svg similarity index 100% rename from docs/userguide/networking/images/key_value.svg rename to engine/userguide/networking/images/key_value.svg diff --git a/docs/userguide/networking/images/macvlan-bridge-ipvlan-l2.gliffy b/engine/userguide/networking/images/macvlan-bridge-ipvlan-l2.gliffy similarity index 100% rename from docs/userguide/networking/images/macvlan-bridge-ipvlan-l2.gliffy rename to engine/userguide/networking/images/macvlan-bridge-ipvlan-l2.gliffy diff --git a/docs/userguide/networking/images/macvlan-bridge-ipvlan-l2.png b/engine/userguide/networking/images/macvlan-bridge-ipvlan-l2.png similarity index 100% rename from docs/userguide/networking/images/macvlan-bridge-ipvlan-l2.png rename to engine/userguide/networking/images/macvlan-bridge-ipvlan-l2.png diff --git a/docs/userguide/networking/images/macvlan-bridge-ipvlan-l2.svg b/engine/userguide/networking/images/macvlan-bridge-ipvlan-l2.svg similarity index 100% rename from docs/userguide/networking/images/macvlan-bridge-ipvlan-l2.svg rename to engine/userguide/networking/images/macvlan-bridge-ipvlan-l2.svg diff --git a/docs/userguide/networking/images/macvlan_bridge_simple.gliffy b/engine/userguide/networking/images/macvlan_bridge_simple.gliffy similarity index 100% rename from docs/userguide/networking/images/macvlan_bridge_simple.gliffy rename to engine/userguide/networking/images/macvlan_bridge_simple.gliffy diff --git a/docs/userguide/networking/images/macvlan_bridge_simple.png b/engine/userguide/networking/images/macvlan_bridge_simple.png similarity index 100% rename from docs/userguide/networking/images/macvlan_bridge_simple.png rename to engine/userguide/networking/images/macvlan_bridge_simple.png diff --git a/docs/userguide/networking/images/macvlan_bridge_simple.svg b/engine/userguide/networking/images/macvlan_bridge_simple.svg similarity index 100% rename from docs/userguide/networking/images/macvlan_bridge_simple.svg rename to engine/userguide/networking/images/macvlan_bridge_simple.svg diff --git a/docs/userguide/networking/images/multi_tenant_8021q_vlans.gliffy b/engine/userguide/networking/images/multi_tenant_8021q_vlans.gliffy similarity index 100% rename from docs/userguide/networking/images/multi_tenant_8021q_vlans.gliffy rename to engine/userguide/networking/images/multi_tenant_8021q_vlans.gliffy diff --git a/docs/userguide/networking/images/multi_tenant_8021q_vlans.png b/engine/userguide/networking/images/multi_tenant_8021q_vlans.png similarity index 100% rename from docs/userguide/networking/images/multi_tenant_8021q_vlans.png rename to engine/userguide/networking/images/multi_tenant_8021q_vlans.png diff --git a/docs/userguide/networking/images/multi_tenant_8021q_vlans.svg b/engine/userguide/networking/images/multi_tenant_8021q_vlans.svg similarity index 100% rename from docs/userguide/networking/images/multi_tenant_8021q_vlans.svg rename to engine/userguide/networking/images/multi_tenant_8021q_vlans.svg diff --git a/docs/userguide/networking/images/network_access.gliffy b/engine/userguide/networking/images/network_access.gliffy similarity index 100% rename from docs/userguide/networking/images/network_access.gliffy rename to engine/userguide/networking/images/network_access.gliffy diff --git a/docs/userguide/networking/images/network_access.png b/engine/userguide/networking/images/network_access.png similarity index 100% rename from docs/userguide/networking/images/network_access.png rename to engine/userguide/networking/images/network_access.png diff --git a/docs/userguide/networking/images/network_access.svg b/engine/userguide/networking/images/network_access.svg similarity index 100% rename from docs/userguide/networking/images/network_access.svg rename to engine/userguide/networking/images/network_access.svg diff --git a/docs/userguide/networking/images/overlay-network-final.gliffy b/engine/userguide/networking/images/overlay-network-final.gliffy similarity index 100% rename from docs/userguide/networking/images/overlay-network-final.gliffy rename to engine/userguide/networking/images/overlay-network-final.gliffy diff --git a/docs/userguide/networking/images/overlay-network-final.png b/engine/userguide/networking/images/overlay-network-final.png similarity index 100% rename from docs/userguide/networking/images/overlay-network-final.png rename to engine/userguide/networking/images/overlay-network-final.png diff --git a/docs/userguide/networking/images/overlay-network-final.svg b/engine/userguide/networking/images/overlay-network-final.svg similarity index 100% rename from docs/userguide/networking/images/overlay-network-final.svg rename to engine/userguide/networking/images/overlay-network-final.svg diff --git a/docs/userguide/networking/images/overlay_network.gliffy b/engine/userguide/networking/images/overlay_network.gliffy similarity index 100% rename from docs/userguide/networking/images/overlay_network.gliffy rename to engine/userguide/networking/images/overlay_network.gliffy diff --git a/docs/userguide/networking/images/overlay_network.png b/engine/userguide/networking/images/overlay_network.png similarity index 100% rename from docs/userguide/networking/images/overlay_network.png rename to engine/userguide/networking/images/overlay_network.png diff --git a/docs/userguide/networking/images/overlay_network.svg b/engine/userguide/networking/images/overlay_network.svg similarity index 100% rename from docs/userguide/networking/images/overlay_network.svg rename to engine/userguide/networking/images/overlay_network.svg diff --git a/docs/userguide/networking/images/working.gliffy b/engine/userguide/networking/images/working.gliffy similarity index 100% rename from docs/userguide/networking/images/working.gliffy rename to engine/userguide/networking/images/working.gliffy diff --git a/docs/userguide/networking/images/working.png b/engine/userguide/networking/images/working.png similarity index 100% rename from docs/userguide/networking/images/working.png rename to engine/userguide/networking/images/working.png diff --git a/docs/userguide/networking/images/working.svg b/engine/userguide/networking/images/working.svg similarity index 100% rename from docs/userguide/networking/images/working.svg rename to engine/userguide/networking/images/working.svg diff --git a/docs/userguide/networking/index.md b/engine/userguide/networking/index.md similarity index 100% rename from docs/userguide/networking/index.md rename to engine/userguide/networking/index.md diff --git a/docs/userguide/networking/menu.md b/engine/userguide/networking/menu.md similarity index 100% rename from docs/userguide/networking/menu.md rename to engine/userguide/networking/menu.md diff --git a/docs/userguide/networking/overlay-security-model.md b/engine/userguide/networking/overlay-security-model.md similarity index 100% rename from docs/userguide/networking/overlay-security-model.md rename to engine/userguide/networking/overlay-security-model.md diff --git a/docs/userguide/networking/work-with-networks.md b/engine/userguide/networking/work-with-networks.md similarity index 100% rename from docs/userguide/networking/work-with-networks.md rename to engine/userguide/networking/work-with-networks.md diff --git a/docs/userguide/storagedriver/aufs-driver.md b/engine/userguide/storagedriver/aufs-driver.md similarity index 100% rename from docs/userguide/storagedriver/aufs-driver.md rename to engine/userguide/storagedriver/aufs-driver.md diff --git a/docs/userguide/storagedriver/btrfs-driver.md b/engine/userguide/storagedriver/btrfs-driver.md similarity index 100% rename from docs/userguide/storagedriver/btrfs-driver.md rename to engine/userguide/storagedriver/btrfs-driver.md diff --git a/docs/userguide/storagedriver/device-mapper-driver.md b/engine/userguide/storagedriver/device-mapper-driver.md similarity index 100% rename from docs/userguide/storagedriver/device-mapper-driver.md rename to engine/userguide/storagedriver/device-mapper-driver.md diff --git a/docs/userguide/storagedriver/images/aufs_delete.jpg b/engine/userguide/storagedriver/images/aufs_delete.jpg similarity index 100% rename from docs/userguide/storagedriver/images/aufs_delete.jpg rename to engine/userguide/storagedriver/images/aufs_delete.jpg diff --git a/docs/userguide/storagedriver/images/aufs_layers.jpg b/engine/userguide/storagedriver/images/aufs_layers.jpg similarity index 100% rename from docs/userguide/storagedriver/images/aufs_layers.jpg rename to engine/userguide/storagedriver/images/aufs_layers.jpg diff --git a/docs/userguide/storagedriver/images/aufs_metadata.jpg b/engine/userguide/storagedriver/images/aufs_metadata.jpg similarity index 100% rename from docs/userguide/storagedriver/images/aufs_metadata.jpg rename to engine/userguide/storagedriver/images/aufs_metadata.jpg diff --git a/docs/userguide/storagedriver/images/base_device.jpg b/engine/userguide/storagedriver/images/base_device.jpg similarity index 100% rename from docs/userguide/storagedriver/images/base_device.jpg rename to engine/userguide/storagedriver/images/base_device.jpg diff --git a/docs/userguide/storagedriver/images/btfs_constructs.jpg b/engine/userguide/storagedriver/images/btfs_constructs.jpg similarity index 100% rename from docs/userguide/storagedriver/images/btfs_constructs.jpg rename to engine/userguide/storagedriver/images/btfs_constructs.jpg diff --git a/docs/userguide/storagedriver/images/btfs_container_layer.jpg b/engine/userguide/storagedriver/images/btfs_container_layer.jpg similarity index 100% rename from docs/userguide/storagedriver/images/btfs_container_layer.jpg rename to engine/userguide/storagedriver/images/btfs_container_layer.jpg diff --git a/docs/userguide/storagedriver/images/btfs_layers.png b/engine/userguide/storagedriver/images/btfs_layers.png similarity index 100% rename from docs/userguide/storagedriver/images/btfs_layers.png rename to engine/userguide/storagedriver/images/btfs_layers.png diff --git a/docs/userguide/storagedriver/images/btfs_pool.jpg b/engine/userguide/storagedriver/images/btfs_pool.jpg similarity index 100% rename from docs/userguide/storagedriver/images/btfs_pool.jpg rename to engine/userguide/storagedriver/images/btfs_pool.jpg diff --git a/docs/userguide/storagedriver/images/btfs_snapshots.jpg b/engine/userguide/storagedriver/images/btfs_snapshots.jpg similarity index 100% rename from docs/userguide/storagedriver/images/btfs_snapshots.jpg rename to engine/userguide/storagedriver/images/btfs_snapshots.jpg diff --git a/docs/userguide/storagedriver/images/btfs_subvolume.jpg b/engine/userguide/storagedriver/images/btfs_subvolume.jpg similarity index 100% rename from docs/userguide/storagedriver/images/btfs_subvolume.jpg rename to engine/userguide/storagedriver/images/btfs_subvolume.jpg diff --git a/docs/userguide/storagedriver/images/container-layers-cas.jpg b/engine/userguide/storagedriver/images/container-layers-cas.jpg similarity index 100% rename from docs/userguide/storagedriver/images/container-layers-cas.jpg rename to engine/userguide/storagedriver/images/container-layers-cas.jpg diff --git a/docs/userguide/storagedriver/images/container-layers.jpg b/engine/userguide/storagedriver/images/container-layers.jpg similarity index 100% rename from docs/userguide/storagedriver/images/container-layers.jpg rename to engine/userguide/storagedriver/images/container-layers.jpg diff --git a/docs/userguide/storagedriver/images/dm_container.jpg b/engine/userguide/storagedriver/images/dm_container.jpg similarity index 100% rename from docs/userguide/storagedriver/images/dm_container.jpg rename to engine/userguide/storagedriver/images/dm_container.jpg diff --git a/docs/userguide/storagedriver/images/driver-pros-cons.png b/engine/userguide/storagedriver/images/driver-pros-cons.png similarity index 100% rename from docs/userguide/storagedriver/images/driver-pros-cons.png rename to engine/userguide/storagedriver/images/driver-pros-cons.png diff --git a/docs/userguide/storagedriver/images/image-layers.jpg b/engine/userguide/storagedriver/images/image-layers.jpg similarity index 100% rename from docs/userguide/storagedriver/images/image-layers.jpg rename to engine/userguide/storagedriver/images/image-layers.jpg diff --git a/docs/userguide/storagedriver/images/lsblk-diagram.jpg b/engine/userguide/storagedriver/images/lsblk-diagram.jpg similarity index 100% rename from docs/userguide/storagedriver/images/lsblk-diagram.jpg rename to engine/userguide/storagedriver/images/lsblk-diagram.jpg diff --git a/docs/userguide/storagedriver/images/overlay_constructs.jpg b/engine/userguide/storagedriver/images/overlay_constructs.jpg similarity index 100% rename from docs/userguide/storagedriver/images/overlay_constructs.jpg rename to engine/userguide/storagedriver/images/overlay_constructs.jpg diff --git a/docs/userguide/storagedriver/images/overlay_constructs2.jpg b/engine/userguide/storagedriver/images/overlay_constructs2.jpg similarity index 100% rename from docs/userguide/storagedriver/images/overlay_constructs2.jpg rename to engine/userguide/storagedriver/images/overlay_constructs2.jpg diff --git a/docs/userguide/storagedriver/images/saving-space.jpg b/engine/userguide/storagedriver/images/saving-space.jpg similarity index 100% rename from docs/userguide/storagedriver/images/saving-space.jpg rename to engine/userguide/storagedriver/images/saving-space.jpg diff --git a/docs/userguide/storagedriver/images/shared-uuid.jpg b/engine/userguide/storagedriver/images/shared-uuid.jpg similarity index 100% rename from docs/userguide/storagedriver/images/shared-uuid.jpg rename to engine/userguide/storagedriver/images/shared-uuid.jpg diff --git a/docs/userguide/storagedriver/images/shared-volume.jpg b/engine/userguide/storagedriver/images/shared-volume.jpg similarity index 100% rename from docs/userguide/storagedriver/images/shared-volume.jpg rename to engine/userguide/storagedriver/images/shared-volume.jpg diff --git a/docs/userguide/storagedriver/images/sharing-layers.jpg b/engine/userguide/storagedriver/images/sharing-layers.jpg similarity index 100% rename from docs/userguide/storagedriver/images/sharing-layers.jpg rename to engine/userguide/storagedriver/images/sharing-layers.jpg diff --git a/docs/userguide/storagedriver/images/two_dm_container.jpg b/engine/userguide/storagedriver/images/two_dm_container.jpg similarity index 100% rename from docs/userguide/storagedriver/images/two_dm_container.jpg rename to engine/userguide/storagedriver/images/two_dm_container.jpg diff --git a/docs/userguide/storagedriver/images/zfs_clones.jpg b/engine/userguide/storagedriver/images/zfs_clones.jpg similarity index 100% rename from docs/userguide/storagedriver/images/zfs_clones.jpg rename to engine/userguide/storagedriver/images/zfs_clones.jpg diff --git a/docs/userguide/storagedriver/images/zfs_zpool.jpg b/engine/userguide/storagedriver/images/zfs_zpool.jpg similarity index 100% rename from docs/userguide/storagedriver/images/zfs_zpool.jpg rename to engine/userguide/storagedriver/images/zfs_zpool.jpg diff --git a/docs/userguide/storagedriver/images/zpool_blocks.jpg b/engine/userguide/storagedriver/images/zpool_blocks.jpg similarity index 100% rename from docs/userguide/storagedriver/images/zpool_blocks.jpg rename to engine/userguide/storagedriver/images/zpool_blocks.jpg diff --git a/docs/userguide/storagedriver/imagesandcontainers.md b/engine/userguide/storagedriver/imagesandcontainers.md similarity index 100% rename from docs/userguide/storagedriver/imagesandcontainers.md rename to engine/userguide/storagedriver/imagesandcontainers.md diff --git a/docs/userguide/storagedriver/index.md b/engine/userguide/storagedriver/index.md similarity index 100% rename from docs/userguide/storagedriver/index.md rename to engine/userguide/storagedriver/index.md diff --git a/docs/userguide/storagedriver/overlayfs-driver.md b/engine/userguide/storagedriver/overlayfs-driver.md similarity index 100% rename from docs/userguide/storagedriver/overlayfs-driver.md rename to engine/userguide/storagedriver/overlayfs-driver.md diff --git a/docs/userguide/storagedriver/selectadriver.md b/engine/userguide/storagedriver/selectadriver.md similarity index 100% rename from docs/userguide/storagedriver/selectadriver.md rename to engine/userguide/storagedriver/selectadriver.md diff --git a/docs/userguide/storagedriver/zfs-driver.md b/engine/userguide/storagedriver/zfs-driver.md similarity index 100% rename from docs/userguide/storagedriver/zfs-driver.md rename to engine/userguide/storagedriver/zfs-driver.md diff --git a/errors/errors.go b/errors/errors.go deleted file mode 100644 index 29fd2545dc..0000000000 --- a/errors/errors.go +++ /dev/null @@ -1,47 +0,0 @@ -package errors - -import "net/http" - -// apiError is an error wrapper that also -// holds information about response status codes. -type apiError struct { - error - statusCode int -} - -// HTTPErrorStatusCode returns a status code. -func (e apiError) HTTPErrorStatusCode() int { - return e.statusCode -} - -// NewErrorWithStatusCode allows you to associate -// a specific HTTP Status Code to an error. -// The Server will take that code and set -// it as the response status. -func NewErrorWithStatusCode(err error, code int) error { - return apiError{err, code} -} - -// NewBadRequestError creates a new API error -// that has the 400 HTTP status code associated to it. -func NewBadRequestError(err error) error { - return NewErrorWithStatusCode(err, http.StatusBadRequest) -} - -// NewRequestForbiddenError creates a new API error -// that has the 403 HTTP status code associated to it. -func NewRequestForbiddenError(err error) error { - return NewErrorWithStatusCode(err, http.StatusForbidden) -} - -// NewRequestNotFoundError creates a new API error -// that has the 404 HTTP status code associated to it. -func NewRequestNotFoundError(err error) error { - return NewErrorWithStatusCode(err, http.StatusNotFound) -} - -// NewRequestConflictError creates a new API error -// that has the 409 HTTP status code associated to it. -func NewRequestConflictError(err error) error { - return NewErrorWithStatusCode(err, http.StatusConflict) -} diff --git a/experimental/README.md b/experimental/README.md deleted file mode 100644 index 837c8c0a56..0000000000 --- a/experimental/README.md +++ /dev/null @@ -1,82 +0,0 @@ -# Docker Experimental Features - -This page contains a list of features in the Docker engine which are -experimental. Experimental features are **not** ready for production. They are -provided for test and evaluation in your sandbox environments. - -The information below describes each feature and the GitHub pull requests and -issues associated with it. If necessary, links are provided to additional -documentation on an issue. As an active Docker user and community member, -please feel free to provide any feedback on these features you wish. - -## Install Docker experimental - -Unlike the regular Docker binary, the experimental channels is built and -updated nightly on https://experimental.docker.com. From one day to the -next, new features may appear, while existing experimental features may be -refined or entirely removed. - -1. Verify that you have `curl` installed. - - $ which curl - - If `curl` isn't installed, install it after updating your manager: - - $ sudo apt-get update - $ sudo apt-get install curl - -2. Get the latest Docker package. - - $ curl -sSL https://experimental.docker.com/ | sh - - The system prompts you for your `sudo` password. Then, it downloads and - installs Docker and its dependencies. - - >**Note**: If your company is behind a filtering proxy, you may find that the - >`apt-key` - >command fails for the Docker repo during installation. To work around this, - >add the key directly using the following: - > - > $ curl -sSL https://experimental.docker.com/gpg | sudo apt-key add - - -3. Verify `docker` is installed correctly. - - $ sudo docker run hello-world - - This command downloads a test image and runs it in a container. - -### Get the Linux binary -To download the latest experimental `docker` binary for Linux, -use the following URLs: - - https://experimental.docker.com/builds/Linux/i386/docker-latest.tgz - - https://experimental.docker.com/builds/Linux/x86_64/docker-latest.tgz - -After downloading the appropriate binary, you can follow the instructions -[here](https://docs.docker.com/installation/binaries/#get-the-docker-binary) to run the `docker` daemon. - -> **Note** -> -> 1) You can get the MD5 and SHA256 hashes by appending .md5 and .sha256 to the URLs respectively -> -> 2) You can get the compressed binaries by appending .tgz to the URLs - -### Build an experimental binary -You can also build the experimental binary from the standard development environment by adding -`DOCKER_EXPERIMENTAL=1` to the environment where you run `make` to build Docker binaries. For example, -to build a Docker binary with the experimental features enabled: - - $ DOCKER_EXPERIMENTAL=1 make binary - -## Current experimental features - - * [External graphdriver plugins](plugins_graphdriver.md) - * [Ipvlan Network Drivers](vlan-networks.md) - * [Docker Stacks and Distributed Application Bundles](docker-stacks-and-bundles.md) - -## How to comment on an experimental feature - -Each feature's documentation includes a list of proposal pull requests or PRs associated with the feature. If you want to comment on or suggest a change to a feature, please add it to the existing feature PR. - -Issues or problems with a feature? Inquire for help on the `#docker` IRC channel or in on the [Docker Google group](https://groups.google.com/forum/#!forum/docker-user). diff --git a/experimental/docker-stacks-and-bundles.md b/experimental/docker-stacks-and-bundles.md deleted file mode 100644 index b56d0e5c8d..0000000000 --- a/experimental/docker-stacks-and-bundles.md +++ /dev/null @@ -1,183 +0,0 @@ -# Docker Stacks and Distributed Application Bundles - -## Overview - -Docker Stacks and Distributed Application Bundles are experimental features -introduced in Docker 1.12 and Docker Compose 1.8, alongside the concept of -swarm mode, and Nodes and Services in the Engine API. - -A Dockerfile can be built into an image, and containers can be created from -that image. Similarly, a docker-compose.yml can be built into a **distributed -application bundle**, and **stacks** can be created from that bundle. In that -sense, the bundle is a multi-services distributable image format. - -As of Docker 1.12 and Compose 1.8, the features are experimental. Neither -Docker Engine nor the Docker Registry support distribution of bundles. - -## Producing a bundle - -The easiest way to produce a bundle is to generate it using `docker-compose` -from an existing `docker-compose.yml`. Of course, that's just *one* possible way -to proceed, in the same way that `docker build` isn't the only way to produce a -Docker image. - -From `docker-compose`: - -```bash -$ docker-compose bundle -WARNING: Unsupported key 'network_mode' in services.nsqd - ignoring -WARNING: Unsupported key 'links' in services.nsqd - ignoring -WARNING: Unsupported key 'volumes' in services.nsqd - ignoring -[...] -Wrote bundle to vossibility-stack.dab -``` - -## Creating a stack from a bundle - -A stack is created using the `docker deploy` command: - -```bash -# docker deploy --help - -Usage: docker deploy [OPTIONS] STACK - -Create and update a stack from a Distributed Application Bundle (DAB) - -Options: - --file string Path to a Distributed Application Bundle file (Default: STACK.dab) - --help Print usage - --with-registry-auth Send registry authentication details to Swarm agents -``` - -Let's deploy the stack created before: - -```bash -# docker deploy vossibility-stack -Loading bundle from vossibility-stack.dab -Creating service vossibility-stack_elasticsearch -Creating service vossibility-stack_kibana -Creating service vossibility-stack_logstash -Creating service vossibility-stack_lookupd -Creating service vossibility-stack_nsqd -Creating service vossibility-stack_vossibility-collector -``` - -We can verify that services were correctly created: - -```bash -# docker service ls -ID NAME REPLICAS IMAGE -COMMAND -29bv0vnlm903 vossibility-stack_lookupd 1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 /nsqlookupd -4awt47624qwh vossibility-stack_nsqd 1 nsqio/nsq@sha256:eeba05599f31eba418e96e71e0984c3dc96963ceb66924dd37a47bf7ce18a662 /nsqd --data-path=/data --lookupd-tcp-address=lookupd:4160 -4tjx9biia6fs vossibility-stack_elasticsearch 1 elasticsearch@sha256:12ac7c6af55d001f71800b83ba91a04f716e58d82e748fa6e5a7359eed2301aa -7563uuzr9eys vossibility-stack_kibana 1 kibana@sha256:6995a2d25709a62694a937b8a529ff36da92ebee74bafd7bf00e6caf6db2eb03 -9gc5m4met4he vossibility-stack_logstash 1 logstash@sha256:2dc8bddd1bb4a5a34e8ebaf73749f6413c101b2edef6617f2f7713926d2141fe logstash -f /etc/logstash/conf.d/logstash.conf -axqh55ipl40h vossibility-stack_vossibility-collector 1 icecrime/vossibility-collector@sha256:f03f2977203ba6253988c18d04061c5ec7aab46bca9dfd89a9a1fa4500989fba --config /config/config.toml --debug -``` - -## Managing stacks - -Stacks are managed using the `docker stack` command: - -```bash -# docker stack --help - -Usage: docker stack COMMAND - -Manage Docker stacks - -Options: - --help Print usage - -Commands: - config Print the stack configuration - deploy Create and update a stack - rm Remove the stack - tasks List the tasks in the stack - -Run 'docker stack COMMAND --help' for more information on a command. -``` - -## Bundle file format - -Distributed application bundles are described in a JSON format. When bundles -are persisted as files, the file extension is `.dab` (Docker 1.12RC2 tools use -`.dsb` for the file extension—this will be updated in the next release client). - -A bundle has two top-level fields: `version` and `services`. The version used -by Docker 1.12 tools is `0.1`. - -`services` in the bundle are the services that comprise the app. They -correspond to the new `Service` object introduced in the 1.12 Docker Engine API. - -A service has the following fields: - -
-
- Image (required) string -
-
- The image that the service will run. Docker images should be referenced - with full content hash to fully specify the deployment artifact for the - service. Example: - postgres@sha256:f76245b04ddbcebab5bb6c28e76947f49222c99fec4aadb0bb - 1c24821a 9e83ef -
-
- Command []string -
-
- Command to run in service containers. -
-
- Args []string -
-
- Arguments passed to the service containers. -
-
- Env []string -
-
- Environment variables. -
-
- Labels map[string]string -
-
- Labels used for setting meta data on services. -
-
- Ports []Port -
-
- Service ports (composed of Port (int) and - Protocol (string). A service description can - only specify the container port to be exposed. These ports can be - mapped on runtime hosts at the operator's discretion. -
- -
- WorkingDir string -
-
- Working directory inside the service containers. -
- -
- User string -
-
- Username or UID (format: <name|uid>[:<group|gid>]). -
- -
- Networks []string -
-
- Networks that the service containers should be connected to. An entity - deploying a bundle should create networks as needed. -
-
- diff --git a/experimental/images/ipvlan-l3.gliffy b/experimental/images/ipvlan-l3.gliffy deleted file mode 100644 index bf0512af76..0000000000 --- a/experimental/images/ipvlan-l3.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":447,"height":422,"nodeIndex":326,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":9,"y":10.461511948529278},"max":{"x":447,"y":421.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":12.0,"y":200.0,"rotation":0.0,"id":276,"width":434.00000000000006,"height":197.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":275.0,"y":8.93295288085936,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":14,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":272,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":290,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[82.0,295.5670471191406],[-4.628896294384617,211.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":285.0,"y":18.93295288085936,"rotation":0.0,"id":268,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":15,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":316,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":290,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-204.0,285.5670471191406],[-100.37110370561533,201.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.0,"y":203.5,"rotation":0.0,"id":267,"width":116.0,"height":16.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":16,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":28.93295288085936,"rotation":0.0,"id":278,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":17,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":290,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[217.5,167.06704711914062],[219.11774189711457,53.02855906766992]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":57.51435447730654,"y":10.461511948529278,"rotation":0.0,"id":246,"width":343.20677483961606,"height":143.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":18,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#434343","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":106.0,"y":55.19999694824217,"rotation":0.0,"id":262,"width":262.0,"height":75.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":22,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Unless notified about the container networks, the physical network does not have a route to their subnets

Who has 10.16.20.0/24?

Who has 10.1.20.0/24?

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.0,"y":403.5,"rotation":0.0,"id":282,"width":442.0,"height":18.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Containers can be on different subnets and reach each other

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":106.0,"y":252.5,"rotation":0.0,"id":288,"width":238.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 Ipvlan L3 Mode

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":124.0,"y":172.0,"rotation":0.0,"id":290,"width":207.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":3.568965517241383,"y":0.0,"rotation":0.0,"id":291,"width":199.86206896551747,"height":42.0,"uid":null,"order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Eth0

192.168.50.10/24

Parent interface acts as a Router

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":29.0,"y":358.1999969482422,"rotation":0.0,"id":304,"width":390.99999999999994,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

All containers can ping each other without a router if

they share the same parent interface (example eth0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":24.0,"y":276.0,"rotation":0.0,"id":320,"width":134.0,"height":77.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":48,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":316,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.279999999999999,"y":0.0,"rotation":0.0,"id":317,"width":109.44000000000001,"height":43.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 

172.16.20.x/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":10.0,"rotation":0.0,"id":318,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":20.0,"rotation":0.0,"id":319,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":300.0,"y":276.0,"rotation":0.0,"id":321,"width":134.0,"height":77.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":49,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":272,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[{"x":2.279999999999999,"y":0.0,"rotation":0.0,"id":273,"width":109.44000000000001,"height":44.0,"uid":null,"order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.20.x/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":10.0,"y":10.0,"rotation":0.0,"id":310,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":20.0,"rotation":0.0,"id":312,"width":114.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.97,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":368.0,"y":85.93295288085938,"rotation":0.0,"id":322,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":50,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#434343","fillColor":"none","dashStyle":"4.0,4.0","startArrow":2,"endArrow":2,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-191.0,222.06704711914062],[-80.9272967534639,222.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":167.0,"y":25.499999999999986,"rotation":0.0,"id":323,"width":135.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":51,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Physical Network

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":53}],"shapeStyles":{},"lineStyles":{"global":{"fill":"none","stroke":"#434343","strokeWidth":2,"dashStyle":"4.0,4.0","startArrow":2,"endArrow":2,"orthoMode":2}},"textStyles":{"global":{"face":"Arial","size":"13px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458117032939,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/experimental/images/ipvlan-l3.png b/experimental/images/ipvlan-l3.png deleted file mode 100644 index 3227a83ca1541ec68e06b0aa105e22fdf5ae9e6f..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 18260 zcmaI6Wl$YmumyT>5AG1$-Q67ydXV7m8r+1pok>`#RfTA-*)bij0K-0C)TQ9yW|^N>XfP5?S3O{d6!$!B8y2koK&0EoKg z14FAa8wP53UX5(L=%bDVL&q0#^aP6T-8%nWjl|N@0|HYEm_x>-zYG8X+2Wz0n@ zzI=J7-uZdIPqutrz|x1Ai?3;VhTlje(*-0XDKBVe#9*5bMa}EV0b?Ns0MG@0Ui2`_ zyA_7pwKNt+WmVFd);j7*KV7ZMKpvgIJVpRB zk%B`-b-EM-07kw-z$Bs2hOsF>sATd?h@Xx{3=r`1M{l@jXOaH_z4MhL$3kWlz_!oll8RWh!Q%ROqdsGnb z(TIUiKA{RJOZ(q_NwQH7TH06B6ls}!%=IAY1^)X7pr@=O1Q$j*e|F|%#TA_w-Y;kC z)_`zJ#`9NCCW{OdMugwWB?QLTkvyi0WxiQ_#T0~ZQXY15j6v3s1PYv2ei$9RP7duU zqHT!9#~0bi?!lGe=?X6%oIH~_-Z`^`G@SyH#Q}0r7>^JBu!Keq-(wCBenOz|2O&fP z%g(=xOm2N&(JGPhAZ&+bB(oa!l?)rZAHYn!%X6=Q?12*Z=-5I<(n;+L6OzT~%aEvO zgv8_ogMJaz{p`T;w{DzN-7~1pVqIlvWlRHg>_wsK z+@e+a#w-GRB!9D%P4X!uI$n1QEJo%7!P9sat=Yi+8?`ar93aXXi7MWN=hA*yn@Ikk z4&`q8r987}hlINhTR;^mdxMx-+>@>TP$>)3LgwW~Dq;mQ#%#p8{3(LFu9AB9%YwlX zX^Jt4C~j&qDj?O8cx?i0hwDmDi>9@%F0a|XWrlKxLu-fY0YY#dF;{(Xz#B^()XVp~ zA+#qOWF3Pblb9U2Xb05_fp~})4vZ# zE$5YiKsXBY*+b>07f3O)+SciM@ArFu`gqLyfO499UTM3S_#0RA)ci8kaC()t{CdH{ zUsheO{k`w`&G72YZ?(N1(zlZBufdwTu4o5M#we0nv6WmAo=8Wu(MTcf4Y?&QG)0oU znn9uhM0uu(8;#a5mXez3LPqk*Z#>J&vJ<;CvtdznTbY7$yAnJkFbEtY;(+bo<^Jk( zA>&D(D;*-QXyd%8x~=Fqj7_gcZx1elO`pe&sgB2$F=5#aF7~q%`{`%x&~7&!Hn|YE;!;Q?f*HvOpP-Tm=35u|Ah?>^rxk9yuVL1k zkP^ZRJesL>xg0b!OlMvo;5~)DutD19qHL%dXWPo&|3$Fk!83qR$Kc!-p}nmF?BkD*xxZZfVH1jrnX&-rzd4 zB9>wgQ1hSXV;Ch*6{l5Ws3&5^Z!QG)EfD;%2R=$vYzyZBIc4%=<%&9Uj*)QAJ~z^( zFcF@R^!oic!@aLO^ zn?UzwRe>!I+wm%NXK&$s=!@y=yA~n?K4GTE9nLwYxBrB5XVLJwnD6h{Z+ixERd5+Q0=_af_Z{pPmwPMNNVo= z^rA8x<0ts<5;8ZH)PoX%ne?R%?CbFx1>|Ft)?W5U-c9Y~dz5En8}m-QISYB@`4mY4 zW9t(O60{?YD}w}=snv7$F%1kM6P(L@8S?sxGEPWj|6uQpuv}y$PXwxizk|Yb$fH2j)Be<57<|=?) z7=toefVrv94SujOJ#Sg}yqaIWaY&Y??*6k$VyYbK8$AKc3jb%HTNw$Jb39d2mjxM@ z=d=|(v4Gd_E2cLLyx|}PT6r4s!8}pN-omkWg$xi>=)Dcr#<%ZPVn6f(SlFy~AHLY}=vShNZvs(>Pq`tkJX`n-95d-;rIln`$j| z`F``ctP$Lp%wj`vVKkAJffxJ%mR%E**F(o4QG1_%i2C!cB_M0?~vd%-A{K#TYrfuY6jxF_V7z1G^W^5 zTkPJ+SUdJ+k0#}#=jb4>-k?DVlkLplLG^rRCC{vgKNjI2DZP{(l3auvN}-9e*HX3E zY|ZkKA5d6X?|Deh9z~GQ@vQf=zMECF6tFx~Cf96uRL2Dyr0O3vRLGz;L)IUN?B zsq%We9EPv;iZn;9Zw!hiW zLd0HaeQ06T&$dp7q5FmKY2{BO06lC!TZ_Ri!ge{a&lmz7vGuv+$C zbi2UsYwN#=!F=TImu$daixwP`59?9biqDER2Aw-gPW8$CFsdmt!!B>PT5w#|C)r}_ z2;_M;_b&-|Y=^zpH(i*;wxxzVY?Or(o&kELz~rcAsz}r4i~dI~HKcqu_#{jl=wi@s6Il?Ufv3kf$F~w?T zXS0y$z?7NWvchoB$7{tI#Ro0uTOhEjvHzIfn}xQ_e1hz7neMVY0$xKoF~@7A9Z6Uv zjoS!uqN>pUm?DoL+82J7uGh=@Y{gT@)8l)~nTHM}BDEce8f#*F&?`G>1XN}0*gkzG zYab%K;%OL+gr5|jq|5GB`|;AA>;DplY!-@Wne1udl)5|VjAOAF3mu&HBp4$r3eWWu zGbk%;Co*CYpfqG1SCJ$%SuWA1I_p-mXg+p95oO_zl%Vn>hFagYp4ls4X0;sVKXL|A z{9GC$LuGOv_`^xGS2dPKM`&)S#B4DZIYEPdjfYgGbCPpwzemhxK>YV6e{jy>o1q zL2V0HFsggj|2)H%xFm=oCa1Iz;*$uC=POKh$r;s;^A6XJ@FF^4mVHxM=l$BmB7biWn!)HR#8Sen(!Q7)C~Ladc&k82OJh>_J?`mN<))eYFWR$j6;7-lu|#i}e*ftl`m>F$2P~R$ zHbl2-&`;>f%_-Duu(o5oF#7o8cgir@y3)Tbnrqf)bYz%&RCY36ECGEAl%NaqW=Z(< znTQ{!!A7)0jX3!%h`PeC$J1#=EG>{YX zf7soO!SffH(yI~zflPj>oQCo@t-lf&W_Z92tQ&ZvF0L$8@B-K(#Ts_P^WjPyv0c~- z^`vN@8u~lK9P`oGLASHezt;*GG*p)HXp+nMY@%4K|HK%qF7io#l|7}S_i&*S7nhUsDyo3PDMlyrL`O?WgU7i!5kUL%_&l&#LbkI zec7`)NhjISeE>!LAyZl@GiVcMQl9)C{q3fOo4?KQB5WK?^6s`oooI@N)o8jP^LKL3hoE+ zf2>c?7s=!J#GA#Bh7D}hhj=I)B!AA^aDSP$wKz9iHAqj~_0J=vviS8#3ET+ThUMYT zu+U#iGfvpVV2rZ+^kxq3EvtV|=*j&3jf|iG5|z7@YE1rCg~eLD;>eYV0^CU4A3-FcXaU4u!CqR>5Hl zd6qua$)>X~Mo4r8?m$#l&>lrw+J|9rK_+EaV|k5i+CJw&@{6QIb9UcoXT-@`LqPX= z@2LgNU;N_jmXq4$R9!QMHq>S?1x%Y5foxeE*GN+gB0nym~A+YzVc9 zU;Eua>iH+B215L3o(BG*_|J$>^?I<$Ih^ZSZyAk`Z(hG^$j>pG>YAN9X-AWrU2TtR zy?oms@-**ya<3-)vVM0v^AHg&4w5HV!?=1k$Qyxe-u6<-NFj9hq`v|6kt#j{|E{9_ z5mekudG@#+N6?+Y0vt8yEfdyhoD&9CN=ba8bfU>rH|CGFbFw$K|t&85e9*)7!M`NgD<&I4y=aaT$oI`wC|^b5mEAEkDqraetCI@ z-+g*2Xo<5lR;PBwMnSn1%pzOnWL-(ZL&Iw(wL0CAAk_YhgLL`<(V!oE+=6=Cq$Q8} zEmZ3n?!2)Zfrt;EIlv{tLhb<7ooMUl(oJEccYX#@?z4vb9Fl(L`x=o-4NV0TQlv)f z_fz9`q{Lo`NhyWhg{gMIl)3%f&fl6e49qG)NS)IJFh~T3otpcK(}T#uIuQPS51t-! z3wF8b-KGC1rEQC79hVisWmDd*0kueu3I~dvL7>}j>@O;qcE^#3$%N4j3VJzuZ$n;s z%ZjphD|29+=ZGDyJL3a?L$ek3D}8+cDS|6VooLE%{DIgSYfX3A?K=ZFltvHJTz~oD zG`L-6cY|6tTYckE3K)(x;F%tbyne&%i3pus>yR1YWX^;>1bc-AB_ESPd00(M2c#!h zQ?E1L?8xmUSjDM5IPe2HM<726F*gMwG8Cpx(*808RW#Hdz0fRYWP<)+7;AL_d=b@d zR>CkiFt3y5YSHER+rM5oUi+zEBt$$EFf_3^V&p5e{{1kZ^1)paM(iYxRPCknIPGuq zMVh#Ym|YQu<8SVpX-77=!cuiq7Q&L(v~EhF{tGM%Au_474&ZX`qAZqOO%j#sI#6crI0s6gy-PogU(m^H|vAak_JK;TY^q>7aM_&(BRmQCRV)If8U`d z(#A2~)7(WLxgH8sWhf&*$6UF%8e1J?M4emU)3>jx+4)9mDI(yRdj8!bzoeR^@}&&W z8{XG7N90j6?hg{g$l=u2#ZJW6C8)4)hT4P<7vM62l55*lO4Xwf+gJql&_Zow2(yrW zWXZ@x%QKs>ISd}n{-$$%X!Jktd{Zh)2hlLlbZgOH?E zBTgBxF*YAQ6uK;4Z)c8hb-)H%|2@s$Bde$U`^#=GHBGz zCI0QztR>93kKu{T-Xq#iY*_%N3aL7i5p0W`x3?zBe7W@e>AZ0m@{ftO<%^T3<3sxZ zH}`TDzmjQywsB6-R0+MQf~477*(IqvA@DkQ4)|zsR|RG4lCU62or*MyO^jX->0X$1T2hUkIC#y*XSfYWY%=xE zjuW;-ifb}QEQVGzFmGhmF`TVaiKE%G)T&KHVj%ohxT-9Bt0&u^O3Ej*luP(^yms7S zOlW>GUo&d)6`7&g(NG7lSN;LQ>-1?J9heLiODulF<0$vH2Ko!`w9$nZ4=$#O|eAS^G53K{9f1SzpXVlAG{i-8;oMhzcstd1R;i$4q{(g5JYjl;B-E>j5?8o_3$B@{1 z9Ghdc!YH9HjFl@-p35w5K-!5-2onQ;j8y_zS@m$uJA?m??<0ZPQkp~gBl^A5TDG?j z>`m11Dl_rq8sJN^*ik~>_F590nK<58GJl8t6r#GxLD3Kdbgo!5<6A>rU08s!-5mi+rT-1Lfs+3qYIZz-) zcm|_FsVog%T{Z9rP?%EyZ;qYAx9mh~XwX8t`)^^#NczBLsoWkv+A@FW)r~QmOz4k0 zqM-`k>IFKf*~FtfJ;Ql0!<3hRd7HzRMp?qd)03tDRo<+@63DwPP$yWrD}@S$~xxJ1zN+vzE@*%OxZUWp`mZKMoZiq^yniF^$sROaAF ztYBOQ5|TB~pOHCO$@^%13XwMcMp>b&PRk%T_JK;tLaJX7o0i>|ywKDkMc*;ZM<*H1 zmVBrLEaRm?0N;gc37I}}%kW6f;#;<{_DAj0I_bHHJ0#=&$2mVEPn#^?q!Q9ckfgX9 zP%3_Ur~@ifD|Mi#To~j{kJSLz*YfnV$c}jA#Ds2TB?CfxduQiO?diuVsJbd6F_5O{ zDVbIEmL2cxqCxdAp_T89V^8_gWXgj6X;(j%%sQVdXDd?&rdbwHK!s?wP{P+3vB=1w zo)2c)9&56^${ZZAUh?YU;Mm5&lAI)zv+MfCz%>VL5WGl><^OgrzsQX45!U>7m4B^U zr;mCfa+_~EI%T@<@@IN{`6B!zT%%-mJ{<6qlSt|Q=U-kmpvrVjv-N5ZKy8YhoSH4$ zZZo+v&13fqqkV06z#%gZXRMnIWcgy^!92-1k!~z2Zm6HOT1^AhO~CP;WQ~^xiB94Ur#J9jM?w@ilch`WKCRsASC>?DCfOaa`ep&u zN&XkzuJDr#I~N-Qe77>Ob?0Qx9avkA#8@Jsw4X~&kr3Jd2+civvp;#?43FNl zS9uG$1$mH#!-K`EWU{f&`wc*dDzcrjB=ktkE^|dimZ$K`6UB50%H80{eo0tsqLmEN zVAhq+mblRz+x520I3^1vI(e)c40TY9{Y!ODmq}-~m+~^{C@~j3`yP`XT}Lv*LrGMR z=FOzdYrIH;P^(>kh7>Ks>3+XGUreeR?9=*R1cn=Kg5t(3cqwX4F0mG998NYSJYNK~ zn!zPCoRaecBNzCtRzy8K-^sZR}uw}W|XmlCUlpf=V}11|M3J|&SfE` z*&G!j*JH{+vfw2?oAdT9(|YaOUYERd`gXohy>oi}$?I4Twz9=K@=OENOvUu+jtfs7rmT}Pfc`gbFw0>?M_G$M0*YhDffOcykB zSUeyro(Z$65FVG3E@<;H@a!vt7qH2B6hVoJ__2H<=3lf?<;0P2Q*ExWQ=C%dZ$FmO z@~j^uP4GIH>?F~Uz0p1|7v5S)rSqIg4&-itkqf`#ZdGNsZ~rk*p$MfSgoo{Mejn_19oQuxL+X}`WXEcb*=12X`_`VjL9XMyel0D{7 zU2&clVO>RzUkHoSrL;#GAnAdNu~H*ZU%~H^n&XFZpJLmpos&nv5$Ir?`ak;-z@v=W z11=VBE>@YanO6!+`;2-t;7omez=@^Rx&iLv8u(W4zd;>=rqZ)O;q}7tY#RowJqfzsqj+eb#%PvYZE* z@vCHaR4TT%LW)K${^f^GA{f6|vdnLIFD6Q@{;SIb{2JW{?nTNM_9~O^jW8ezEo~v- zq$ORU$H?>Gswm;)uhT*wJFL0U5brTnl4;poes2RxUQ*};>2o+swK-xXFGWH7|Yvj5j+X2zTGhw!g@4>Jt#ouOz@UE zZA;qQ2x6`(ZJ{R_=RhB9!5SE2|TG=Nssc!4(zW++6latl&==sNAlf znZp)cO|xFX&E++jiwoWu$E-B!@v_Pc^!dLSr_a3mvSzs0>t+_cDuXP^349L&*oG+_ zJx1Onro!7kZz7MdE4pcFqtjs+?VtDvB}*y<;Wxstw>x%}K7Y(yR5rRB&@;SN1eOy` z+o8s4q6>d&4ocSLT$QtVXjXrSBYQyT*C}5g-k&x{R}g)m0u)?-)POVAN?Pj*F-w;N zAGttpdZni+MLW(hq>_C4CvWRXTQ{B4IQZ};t zfCj7(Xt~%y8XVYU)JZDS?#t7JK=@^t_oIjpOq7f@w z;`p`C26J;3jE#E&7UUdR$$7tB83jcabI@LoQb~`h2GVOxeloh*Uh70KJZOXIt#Lsd zI}m>ETK!y7A{nYF)bd7VjkTqz=nlPo${!xY;d%~37Vem@V5w|a(c-lkrfA7IudAY( zo*8q*XzqbdOG67Z-QdmrO+FgYE+6(~5IRj9&IpNF;%Vo8f@JzD^SxT`%g-Fs`gkTh zutYedybNXt1dEgw01vV(#leHm=fo;8&#vHoz$YtPHo;Uf%p_*V5Z9dyg$JREiCNB5 zA zEO^oEVHdHWj%rt9em85-_mr>9sYRDkP5+LAM=L>>TlOCy!8dviR3$H@IkA%SqrL#4 z8vDx+B?7`b^L4S?8>SCWA6Gx3&(Fw~*VjrS@w?Z_*P354tSs&07D(zM z8aU4Z5@MaWMHDC_iNzTO4t#|f zl=2Iiop^~f*_>Buw0WVxpstXzI*OxmW(1Z436J9twion_%fJg(1_d5q*Yc#3d{nld z-I9UUN0XOkID$A?sM%`HC3X@!Sw`y8w&nA;ak)zInSarJ&Vje8IBPZYs>AHd2f`B= zzgX`{Yd@gzyU-?Nau}u8%*BLSlm}EDH)0J!7l1p4dcINO*vb@s`eqsBqg?7DNEt7> z-|kGbxW~A6t1fU%xyD^5y;|cHmcJ%ip?&$)kYP}u0JYJ{KV(A`DJ{pE3yt4 zM4ilIClq;rW6|wQE#coE#<94s;WMDS2+A^Ie~|u6gfpy#8Q{Vui5|Gf;a5yW;(sb@ zwJ`7nYOpMf6&8Hp5E6euxzALT1kdd?LTJ7IWO2o1ehJ{q>;*00~! z3yo>y5#01Qe-M!v&xyCJZ9(CfyG=xyPtJ`|YShOw){!epHxtNc$4`?y@Urm_H>1`z z^z2KF;odTo(a=dPUlNK}8CKQ*#viT#BTUVyDD4ETA%}MNa(NY9qcCRg2CW;%FzU9b z`Qf}OTbjCfLZN+%u^tu{=G3;{sQ86gE@*LW9yz1n91stI+B7??%F7(KXjah_0%M70w0uw~z4S2K#R-)Ts+r|rV(f=UjY4RAu;`m}7^e3b&LuKoUR{77?`+abD z(a%KcmnY=K_NEL{o^^kw&M~4H--%=mLBL8$yGMPB^4VqzN2Z|$y*ll<(b=jN;Ru9M zmT1ZMGzg!^QcR_#J85JW}m%S{w4tpau3Bh+2cu1 zyLR~C>qvZd-@Jw`!eB+Z!73nlPyb?=J~3Y&1TDbY=@ndKmcoIgmLr?LjVeuJGP>qp zs`eGyyeE0mw<*W*jMK4zfQ(S3AU1)>G>D&MB!ZN&-1BGhLmA~6uRkr)AdrRPXLs1- zb8xym-7@HLbi!xlaHq6#B~DfhD$Cax~r!r@4H&H%%O0YP-No3YXxj;+naW{ zJmtSD9W(Q7GuA6dspe?Pn*J#0wR0Jh7Fj=v_sW1%MOypm+r?<;jdLDqFs({EGc!~! z9N5)K>F)8ez&r)?JrSA#@&a2l^f(pyK$rGuny#z%6zK9)ewBy_?DBrd(?)hZNt%J= zz^^4WD)MkHtAoc~V;BT2i7sg9zOCD6Ez+gvUZTPklb56&U6qU*L(2-}ulpIZ!XS^rYQ83bmQzLzP!)w)4f*^(&}n7_KG z03c8?QdQc;z!FqMf2)MxO_M4in9ZT-L(0j;QeeL})>bzB38xLHt%at5)dlaG7e+s( z>AXiz&-FY7f0c19!qsLZwaC(>iL)VU#OtIAJNmRAO#SSwpM>!E5l>=t6>ezI*ML}s zZGOF;(bK&!1R%=ZOpd@P7(jOLI{NDtx`ENh<`35g$@+)bZq_-uOAoyhN`5}WikFcT zhnc0;p#I2~1wrr913&d&GE}WVc8(Sw>wqUSu4gG0|C(Yn;@*TI;9F*{pW!DvA%#|E z8Cz9z+zhlfA_}_0@7SnCg6xF!+qB3n$ypPypaL;e|{zyvFx*a$Kb>1g5n-WDa`yB;gL<7 z6StQ}cPUw*DSsrE=m*qW(7p)iGRJU+(GMRLN!Nwj_g)Zh?JV$VJ}p03ACgN*oj7J5 zBYojC@=|y}3_cvQoErfwactWc6*g^*giL3!&b)#Q!XhyCVA4^qeSq$t4I)^sKJ1;1 zC4N@1Xd?e*ME_1d`*#l+b`wtCo&gJ(q8M^^E|P=MH4n84Fb*u5ed}b6=0U(;EDbp) z^P`+cR+Zf{k*0uiHnPgHni7>G$q=^^rXT+~H#JT{Q~qC{TlwCzWuQgW-8S4M*NTTEr zaT`Y=a!jhHTVn1OGkuXyR_IUWRF(nnuiNskz$zj!Bs7!M(?6Q0`a}>Y!4N}@9hg@Q zWPxXd#T}1_x1s4b?k*Y`mTD=H)T3V;MdCg1a;MACU;odjaHt{IfJK<$H}$H|D8k}3 zA-`O%5$BH+y@R*kl1ZLwvdS17mdTd)IfZ0;dIg{m7p*hu4bPiL?@a;_>aSnxDn?pr z5|IVfBMZz(Pn_o!Z10Wgm_8SLU6S~ww0m>ET0TG}Di|*Es|LI)lR^JPh%+88%5lX? zCL3k%mf}<#N1R!bUGCpgo}M;Vi@yjb6qH%dLW712_cHCbSK19Cu>eI+erErHh-O-H zG^XT_)Pj|m=zE=gEsZy>Kp6#==aSJ3aFyH%jm-~nl@tkt*#ljYXjmw=k@SR04BgTU zy&ZggKYC^cIr-hxZ$_A|^OyBi>GAd2ThZXS$6yAwshKJEcH~NWWYl$}*RZEF%p!4O z6k|tsWrOX#mFZn4DbHooDZkrb00sQ%{f~wox^)j(PRYv0yB&kX3Fk0)l2z=?#x*m~FXEGfqXoN?@|qa^2Ng$%=Rhy1@K2>B#_6Sf~uG zv?Wpzy38r~U4*S;-jD#v$nbg~)Ds z-V1jS38MSa$3U!PWOv>AG!KB-3!SVD2~@J&Hm$GJ606ezHEt5R!^dP*Sue2R5LlI2 z1-LyCs3tiKBP~1~b7XB;$r2_FqN=cl0N%;V9|aaY?B|!eYOM0-ZbpIf8n<-@TIxp9 z^7c-33NeU<43x4ZFdk1!;pIA|A}N-~J>BmGIIg$RP_gtk#`@VJN5h?>C0I{^b3N~1 zL}T7Y_*AY`dvMB{*^gQSaJ{(MhhL$y<$292A*UFozBZM3~(;sZPVTPX` z;ztmP1_t8Lp6k=iJq^y+;yra(H3?$I`wt!P&D$Rqaw9m~zrI7yN>tnfofCm)94zL< z{8-Whkl46_vzx--yXbF#KUFaL+AsF%slVT0W>T-qm6{TrH%TU1R#Dz#&sIqI^kc9q zw;ri=0GpQBmuLj!F)rno96#NBMH7K1mIV&TBdM=`PwA8(&&;y*v!;cQ2^g}>|7IK& z57obBP%K%COCx4G=d{oLt^#C||MTFtmskAJG=zLftXu-gq5R2!)-dyNzsq;AhpJ*K ztAu{Ph1kAg!;d3$9)mK%N>LoaJ6GW~tMF$LmgWBjvKBQoRUp>PGL|Yg#ahkBU^EbI zCN=B-|3LY1ADW^>m0j6H>nhiv@Dh%uM4VmO^uIyUY7_l5?Qaz;Qx~5a&@|NyX?l; zPWS{aMqby)>DCcpg25VM8%gPd{ z`i70!YNM^ddIOqTtT0OkHEjp@<;h0ZO4uM;keT|rp-APLsivz)9fl6|zTY{|<3`r2 zp2t(8iifGA!(nH(*QDE|&z9Gt;4Rep%U|4mZ@RU#qedH3g)#NR|G8wRR*hT!yYb=O zXDa#S{z1Os6_P4%Hl<$e#142Zt zoR|#2%+LVsMg}7bni>hQ{pP8=FrVA45RMiq8$yvnSL*tL1$I(QfS%M(yfP@iS1AWP z2lIKKPNAM=3+>*#(-?K6gtfuQnxk1THy7?{9n%Sd!kIXmuYZMOhTWt37B?yU#B7r? z=G^ACYZRx!PSFSS6NzhE# z@p9cd3|MUYhbKhOR>}1xDRPICg~677VMF>KM{yOwwG8jd7oE(Rq;LFL?168HD!j6u z)d2J5o;$D6xnA0_@ZFBP_J%DqW3f8XN|uy@{jn%Mm#`VpCJ@E4D1=!QIe1d?Yy(Xc z^!Zww#2ur07i<<=5|fSP_Bw?qoU{lGBg;fMyhdql2A=R4Z6;84N?>%8R+{`C(sL!izbn-3vm$vn0B+f&fsN`3e9f{dyAlRtYQ)7R&;f zo*r*1v&TM6$SXV;|8_Its9kt?6T)TK)NxUS7*1tdO>{+`#NYijV*`DvbsCLSv@;ES zH;(!Dz+Rc`LkCBnZ4O6IIJ}Ij(-iK0oG7sxy3yt99q~GzzL(svlWZs2^>>VrPdQPH zpw8bvR!qC&;aIhf*u`*>;K?rINRW6-(G<<#iDN{W6j1z+D--bGVOxfI!Jd?*GG@HGRC=q%kZm^;YZa#SvWfyxZylM?9sygkg0xe-)JN$i~3j4=>q5^3lW_ zeKCy?#k?>FCGd`ycvAk$(@B1A?dg~tl1$5R1!P?();c;9SYAsr{G;+cQTl-icPs#uf4>R)!KaO&u-5@<9+Qyv2 zgSmcVgp&RhF-Lx9Fr;BoSG%pcxS#E*M)5{$Z{6oiIA)0^`2@ahw9T$?)zep6<8@sN znFvBk8>g@0XERfv;dVO*95Bl7SXK=9|5Ku%R?vlcUbP+!ABR6X`x9tPsM28o9NPb&|y<*Vh|BxTP&W`MwosXwnC$;YCxBRwUUIQMF@s0~s zhMr9cE`1N-j~Lsn*x)W&h%1}OK~Y+-(!IdT4LCYMTgSTKNuLwG#SnV9B^0$o^I-&d zEB_4%Tdej+$XUsazv3nMegSFWg}`zr!-Z`38hLMM_dx=fr$EHXl+Cl~p&_DwluAB{H~tZkx<-hN;&4CCl5*aJQc2qi7WNiE_EbCV{|Mw%gm zU7(B4SKd`!FkvTDi?}!5)_zC(qda#I8p9I+@`^e~n(dK-F%=;)_Ij+JIAnVYBo#AE zNAEvV=szD{AkJY6Lx)#1qa`-2sL7jZM=%W?9RDQ>&TH~rjkqF_X4+h>?2S;6q0mfD z^sL&_!jNx%GMnaAQ=w%3-sXl8aH5+3HF9b5pfq z`C~s?RAg5YSprv)0CvY*>~1o@y6xaU!E9r=hAU!;bt7L&#uJloz7o;bTLQ@hOT*7iQmj=<}S* z8JH^4bM=LI55#zrwlHaIVwK?_g})Rp;Gqjc}(SeJz3?Xs4&({VfvzRl&J?ISrJ82m~9yD z>~4NbSo$qEyF5}p|!AiOZ%uKV{J9q|X2bSTwfkBzp zq@9pkm!7bCcfIRk{sSE0^%U1zxsv*Ssptc7=_tObJsm;QyfBLU=(xs`m9BukN|8_5 zDia)iupL;TlV(JjUiPiS7yqG^P9VU>Z)u3~nGf zJfmNSh;oc(4-Dc>)wXKuuyG=r%n$~OT>B|)nB1$>&&LFm%^LaVf6XBk55vOp;n-8V z>~ICg?cJg8EbAoN-fZZs=`1NYH3}ehhPQ!Ln*2A@%HG&rjQc`)(xNoq7iQoiaZ+Si zGzvHwmCjc<&A1_3Yk-f;*XT~KcR)C3pG75LYWD6m0WWRX51~AhcsY-c zMZP`DQqzpvRmFOpWF~^ljzWxa*@(`&NY$)-{hv)RI(nzPk-Mo451`P@ZA#P|fP&@t z)DqqQR_11mtPm=ESM~X`jTKE3+#*^Gp#Y^+09xTH7FK_C;2m19OdMvPe^Yl;wp5*t!ch-YkD&-ho1 zoI$$7#aIZ&^zJ6!%H*Mli>CcaOPqULQHg^k@gW?ykRae@*v7Pgex5B&-!puyro;E5 zY&Jon-MYCOT?u`(BMS?~j|v=NWq~uFdkSaO%D=@dMP3g^v}IxWy7g)Ic^Y6xu13EJ zQe!HY>%j;7gWQ(FocLV?%oPxptEmq0?EvU$c^&wU+C?xIm$A~h7kXzIycIepNR_u_ zgWyon(Q5ODq@muYEd-&I^w)hdKjn+uAzpj8DyF@x(J~4M9iIpkp0n6rl&Y8Yx*8SSX#4b&t6MtY-+vi9=BG05!BwEc7Z~- zwyE}!zIs(w>9rhNTYsThyfMG|@#yNaw|o)$E^=ru~97`Wc|aHmuv!iaz7SWsgge3Egn&R07)Tuf1jRb-a#<{^6)!Iko7oP}F* z5qbk6nqWmGVs}aGMvZ0X*-MRl*X$|8@IltFJ|Pi=u_WzBTutdTTz@j6Px<> zRukMQ8@8y;1k?ixEt2%P0fzVT^6{HB+G;v)8DLlftHq^IZ5iWC8_#_|>+)Xd>fMlc z;(Cqf=lgW3+Cz`tctK_Z>NE{H>Jp|pMqk|xi@gdByYsc&=n}0Z+(%R2(W`~Lt3#2* zdO)S$L#m*I&Vt&*3g+r9qUoeTgFTe{V@Mw!^`IMhn)SsvQR~5k?4`*B+$kH@C`@oT z^?*W)WPNUc;oVkIw{?*Th9yufNTJ#;w8;{Et#hP)hb|j48~$`KLQ{uLKA1PNdX#k< z(+KCy2%UF{`eT9G$Q8o_{c?l4O<%FsCHO0FZD;3$pxX@V^{hd(@nW=ppxbSrX-xTP zf;x2L;6X(&575D<-fqX{Yio&&E(^7;I6r*q*2vQ=0a1JgYZq|GDH~b@6C7|2D6~lU z+yKKnr(q)l%wK&q;8Lj836t@i+I{)(hYKStSUg|)ZJ|`qi&y<|X*}8{jd!DS15KBh zB^$@01$&gwg+zmiwP3$wtvXkvvkQ}nV$easpaxc=p}#dNmRc{}{P3w;qjt?w3{is9 z47hpG_4^#1EeuBTuswLsYekDWh|x0=UyQqzEP?oci}16k0UxbHl3h z?pe+V!|JPnl*A{(m@!7^EVqp0py&{)@-kH#;Zb?Z(@DN2vu}y6{$!TAk(gyT^Og#B z`wr8ag5jhwiW#8?s6&y>o^3Kmu@@a=7StDkwX+j?01LIg5PA627^7Ep5=@%tiBpZNqnCfvqrQ)xzWwjG|=Qqn)&cF$z9^S{JzWk zygtOc>)O{ulgSEIT!PtXW3x-|6&0188+vC6-}sqN!(1=wP-M3dZT;KB^3g#8)WF)= zE*kxj=p)0_dYh7mPmPhMIW}#g^0gBeSEUQMQ#P~+CM2BtIR}Ln4WAoUop;YhMi^FK z4WuLrW2V5#Q9LPo<;s`*X(l$q#a?lxckao*-sZ`F`1gxXUL*vn|I-_`hu2&{L?`X- z;)?wF-w_kJIuuFtP2CKb;{Y8rKn<*o&lvkFyJPAOG06{~8Y5q`n1QJ19_!f*I$PXn z8(P$60_wLN6k0TVZdi5RErEhF!?60Q#idZKjM*dF-HnZnJ^Kk!-{DockerHostUnlessnotifiedaboutthecontainernetworks,thephysicalnetworkdoesnothavearoutetotheirsubnetsWhohas10.16.20.0/24?Whohas10.1.20.0/24?ContainerscanbeondifferentsubnetsandreacheachotherIpvlanL3ModeEth0192.168.50.10/24ParentinterfaceactsasaRouterAllcontainerscanpingeachotherarouterifwithouttheysharetheparentinterface (sameexampleeth0)Container(s)Eth010.1.20.x/24Container(s)Eth0172.16.20.x/24PhysicalNetwork \ No newline at end of file diff --git a/experimental/images/ipvlan_l2_simple.gliffy b/experimental/images/ipvlan_l2_simple.gliffy deleted file mode 100644 index 41b0475dfa..0000000000 --- a/experimental/images/ipvlan_l2_simple.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":323,"height":292,"nodeIndex":211,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":16,"y":21.51999694824218},"max":{"x":323,"y":291.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":241.0,"y":36.0,"rotation":0.0,"id":199,"width":73.00000000000003,"height":40.150000000000006,"uid":"com.gliffy.shape.network.network_v4.business.router","order":41,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.router","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":85.0,"y":50.0,"rotation":0.0,"id":150,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[3.1159999999999997,6.359996948242184],[85.55799999999999,6.359996948242184],[85.55799999999999,62.0],[84.0,62.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":22.803646598905374,"y":21.51999694824218,"rotation":0.0,"id":134,"width":64.31235340109463,"height":90.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":43,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":87.0,"y":24.199996948242188,"rotation":0.0,"id":187,"width":105.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":39,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 192.168.1.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":147.0,"y":50.0,"rotation":0.0,"id":196,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":40,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":199,"py":0.5,"px":0.0}}},"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-82.00001598011289,6.075000000000003],[94.0,6.075000000000003]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":220.0,"y":79.19999694824219,"rotation":0.0,"id":207,"width":105.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Router

192.168.1.1/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":27.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":129,"width":262.0,"height":124.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#929292","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":33.0,"y":157.96785409109907,"rotation":0.0,"id":114,"width":150.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":16,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.9951060358893704,"rotation":0.0,"id":95,"width":62.0,"height":36.17618270799329,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":4,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":3.2300163132136848,"rotation":0.0,"id":96,"width":3.719999999999998,"height":29.7161500815659,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":13,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":99,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":99,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8599999999999994,-1.2920065252854727],[1.8599999999999994,31.0081566068514]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":3.2300163132136848,"rotation":0.0,"id":97,"width":1.2156862745098034,"height":31.008156606851365,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.292006525285804],[-1.4193795664340882,31.008156606851536]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.5073409461663854,"rotation":0.0,"id":98,"width":1.239999999999999,"height":31.008156606851365,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.4306688417619762],[2.0393795664339223,32.73083197389853]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.9380097879282103,"rotation":0.0,"id":99,"width":62.0,"height":32.300163132136866,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":2,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":38.326264274062034,"rotation":0.0,"id":112,"width":150.0,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1

192.168.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":124.0,"y":157.96785409109907,"rotation":0.0,"id":115,"width":150.0,"height":58.99999999999999,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":33,"lockAspectRatio":false,"lockShape":false,"children":[{"x":44.0,"y":2.94518760195788,"rotation":0.0,"id":116,"width":62.0,"height":35.573246329526725,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":21,"lockAspectRatio":false,"lockShape":false,"children":[{"x":29.139999999999997,"y":3.1761827079934557,"rotation":0.0,"id":117,"width":3.719999999999998,"height":29.220880913539798,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":30,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.8600000000000136,-1.2704730831974018],[1.8600000000000136,30.49135399673719]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":51.46,"y":3.1761827079934557,"rotation":0.0,"id":118,"width":1.2156862745098034,"height":30.49135399673717,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-1.4193795664340882,-1.2704730831977067],[-1.4193795664340882,30.491353996737335]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.919999999999993,"y":1.482218597063612,"rotation":0.0,"id":119,"width":1.239999999999999,"height":30.49135399673717,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[2.0393795664339223,0.42349102773260977],[2.0393795664339223,32.185318107666895]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.9057096247960732,"rotation":0.0,"id":120,"width":62.0,"height":31.76182707993458,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":19,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":36.36247960848299,"rotation":0.0,"id":121,"width":150.0,"height":30.183360522022674,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":32,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2

192.168.1.3/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":102.0,"y":130.1999969482422,"rotation":0.0,"id":130,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

pub_net (eth0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":93.0,"y":92.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":35,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":14.0,"y":114.19999694824219,"rotation":0.0,"id":142,"width":78.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":71.0,"y":235.5,"rotation":0.0,"id":184,"width":196.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

docker network create -d ipvlan \\

    --subnet=192.168.1.0/24 \\

    --gateway=192.168.1.1 \\

    -o parent=eth0 pub_net

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":45}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#999999","strokeWidth":6,"orthoMode":1}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"12px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1457584497063,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/experimental/images/ipvlan_l2_simple.png b/experimental/images/ipvlan_l2_simple.png deleted file mode 100644 index e489a446ddd255ce9360445f0f895acad31ae214..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 20145 zcmcF}Wm_BX)-MEt1b26e7k77e*Wy;ZSn&jRC|X>LyHhNz@S ze8i0cV6p4QB~YT4VUhO25)Fd;Y4*1#ZpvVXQxpL@g=es)ECJoB_ zyXF_<{ZZ+Qd~qMX23VTO0}Ci$EJ^VH|K+bA>SiF`)Q`u6!a+qr=091`>~XaCToMYq z%#PZ)GO53SJ}8O9N}r9aMcoYQQM|NynYTCxUG1+$oqx-fFj)QU^!q?7qhN(_vcsMC~IKT z)i*HpeMViZn9ole+tgKap0V#v_U+Fc?`5d5fT!ue0mC!l_V&Quf=75Qy*au+MC7l- zeCh8?un+IInI*_z|7wZv5M{;bNI>`**5&%Eq!%q41!ycdM7^OH0GJf z#?QYSMbkM*`&<<-gdC^9X2vA8C(zKOe-NE(1VgZ#JZJHk3m!Y(6RbSS8hww*G5t`t zSz6qUA{66ryN~oW1Yk&=qIz5u1wRcy@S@AmY|BSPjXc*BJL9{2X~VRI07TaD;6-ut zO0EEWC8%$xX;5lmLA}upP)&ZthyU*FAaF+2&rpG)o9xT_1osxTGeRG+9>^thtqc?{ zB|`lf3d;R$sPp%6>)a0*^ngtwg*NIi3KA{J4B||mL4-1puMng1JxKL(fVHc34DX*n z$GN^|;Y&9sgHA+;==1OdA9bj879%e!v#`T}*Y}NN%|r=!#L|g;Fs26;L82C|Cajit zRyHwNo;V|V;drzYKZ6;1f-QgFA_$;vTvnDT1l_U}#f@x47rwW^oEo)$P5ogQUi1B(|g0`n?TBSX{|gNu|)%Tz)QCSI{btITmE5X zNJYEcoVGIdrxZV+3Q!)X3`=`{dEO=?rBf>?%E4V>a6Ko+8Q(Ehs-3|UsW2|LwY z+ud(#h@OLgnDwq)Eh9VIH_-+@=cg?)vhTqZfZ&O$WOrJU|rz+4cMZ+(EHf1j|RUrYkwMbbK@;q_D?O0qC2uIm({e-I z8Prm-->~Qq|HP9=?zdH`wQB(L%j2k&?KJRsCxEXwunnL_6PvKP~^+s z<&JOFVhBJhSqz^b7&WD#NSrR&MVv3O8~oHrJbjrXzJwAg*4_ET`TVUoJ^ikQjL{Xe z-=sBrf4LS2v3wi0yuHo6hj_#qMRXfTAOI ze{9uYtqWbo{thk+^c)|}@Po(6y8YSRB+y{)=V7TaS&R&4*F8cy_%1@N^=(e*U(-$? z3?!%Y1~uN?g)IV5b{jGs9sc|2dHu1YQm9%BCMlWo<$WExAKkf?I+RS&iUIm{3O#EK zEu`=PP8HK%HGMLXD;bI+(KWnv)qXt%-ta;fv$DFD+$YNxdAN<|(kIJaV_WS+13kS|OHuZHDuqsV&rZti zUC4?bAHu=keS>z6VY;2p2m&XfvjA*E`;9KVeQxFmagN6l(U{T^Yo%hId!GuUa<5a( zKPX0X@MV%47lw9uad*W|r@m-`{|G#P9W26`izdRAr`AHA*v3j1r5OCgxr`LJ4MT|k zj4m`@Dz8*9m+owX19@VE>8dLS5daXSA%mqb#i2xcg3%&X(;4rn(=4juVbn(M`Ac!4 zW)IBabKS1NDvIV>*k8^Mm%!{8AWDL|=?UO6&BN0l?&sKhk3f!D>DvWlFBf*=xOF)* z2@cX7J^$vdnUS?XYe$2n7AXREUHE*A?#iK>f((Gr7@K)8Sa#|VW-K}qpd=1~C)knR z`5gjXOlRNJ|z{xF1@TC!z z3@>;Tnqyqf8cqhVntIkzbKfy$56 zU2=7{wMW01Y&g5H*3ZL$kUfu!2AeS&)CguhTh@1&?Bh!oabyfQL*4o2(_MPuQ{LV= zJPhY!>4H5oQ8E(!#5FgBH^4ubK>RqsF8^#&HvQwhRnft(#0Mv?9n|xKCgI1SFH602 z5uNdVl_7?Ly6rHeSt40E+YcE$OtSthv8%?maN&iQ9@Jo=T&gN!Tuh)STa6r4xSm(v zZwEiOaD42XeYh*Evi6&DpJfo4A7BL@0T{k;5#&_y&{jrgt$F@PXUQait4|7=NGqWr zZZb|OWde0>UmILF8dqDNsuvVWNPRXVCc0TaU;NbAh~}GnIeY5Pm@JOyWbvbu6|C7u z?lbg9IfZbu@3RA+h8ZhJ9mrw-S1aGe3|9e!it+SxhjfY>U-eqB(J*|H>+GfIU@R@oDe;$GFfr7B9f*qPUV;w0wJ!K2l4 zL+-mP_AThKY}3b--xlc+*k)zhw$WQgEU5uUKjyQ1Kd}8;K^)zRv3)kIxS;OP3b?tx zmQV6<2m^^#S5{VbbdZQOdVj0z=?Rka5sm=h@i_5k-)(<~3m55nG1GA}LcpxobsQ|> za=8E0Q%HQQ|2TNJTadKBAU#bP&g8J1SeO;zrDx*YJ)4dJgkt$qL?98@-V`fywjh{w zBwX&kEHmfuE3U7qa~8&n!Qh%Dw9+K)S+A)6o+yBMp@6rW_$A!C8>W#jL;TU+zPzjq zDNOQFuPizy=Hf_ENojn1{6dMDnK_}xcwu3|>HUU{)_VC3;;j8w%gGO1z?Y{$0l~&_ z#K1HFOsEf1jJpA7`&imyiA@azrS|;gHabV5SV#S}5guvgs~@}DeY>p~I%QtZvP@~z zi)m?8s8Ptx&5e*evkY{V;IT4Y0BDvKB3O|dP>Na#r>>iw`EKA9f-_l0%=^;e(eRZ) zyg?Ci*ApjlmrH&U^!c&w?r6>iQBg?awNMRPA-k6>6!2H^gFr}v6~yYXSp(yqS&SKLpp4OR~pXGlE_ zU`GQP#!S18EqK(-@A5PADxELAUL}%+Ly0!))v7)6QHks2X5SMl8<)Bszh0%V2Z z;BC0huqcb&C-Py48><`jO2tEQn5Ua}^p!TfrkKnOs_;ngt!<5jejKx-+QQapMFJ&1 zbNSZ2gRsD?gvy>Ql>J8@#)FoJed*f4y>@Q!s?b`DvIz*}Dkqsuv^8*I@9ZB2T= zAb$NWBEB-#D-oLX-%JMi{+Ddbu9&1GbyJlxx1 z$YD4LfWX=M*$k>D`@s>q};wwm3lwVnpv|ublV+h!=f&V0w?V~cLakqc_$8RwGp;IL=-RF=K z!NAqiqcbuLEf^&a{i{2vkeOq*Tsd*rP{OB)7@odm@@}%w+9)9I;;hmAplO7ENj{QW z;6DvW0!WT7QknJoaG~6=x;TPafUk#sm3y(?mwQpmWd+_C{^5nDJy*yzUjU%>2h)(~ z*Kh1S4EZoM90XwM3vL0|ou>rmGG?ET zgX|!bKwsE4S&$YCY^UATu&W*qP#s6+)a0KT9yMSfnmV}r-lwEFoDCP!Z$M-IYo~=U z8fGZ+!k>F{4*iN;KK`^LfH#Yw>cd~Zy$f7X`qwhvij1sXj7}3@wyyA|S`|&7K`b(& zxAVXZb;x}yc&3k7FG;=SFKn+b;GX#iHlR!Mb<(d(w-ltOf#99%8z26EXvGBb8t~te zygyMBv7J)+z54NBrDqA_kBxI6*606e^AGNCRoDNjuX_hk9FXNMe?+Fo!ue-Ph)X2E zh!^W$FZWnU@53-4&O08qkbfO{`_TCI@d20869L$?f+|MATFsrI{`cXjhzaymVS8D> zr9WQze(CSI$CrOrIYph}=KjS0&F}hc)Wd&F|4b;%_`M>l6DPhEn6E|+L zN@t|Jz;X@%ErOVseoV8I`e*Dn*5Q!p8 zJ}36X%CP>2Xpcsc(?w4UtS4ze{6Lp&_jA$X=p*BYb6AMOQU|(lEMwJj5W}%TiQWHT z)w6&M6iN6=UEIVDJhoVNQr4`|*Ono`Q|R9bo+#k=cDuuv<;9zyrvI#+tR3E0BVWvH z%dT_lGJg}S_qDA=@k76|Vxh1AlPrJ6k}s)%M!ohCM)Xxh$@ekabu{OR)!KRk!?|jt z8aRxF$41X5TPih=n1_GaY=6yYtgV{Uf&V+{4P*ZI+TU$9oO4#;nUeP3?Sxtmf3{{@ z&>^7e$7gI)CsI1ZXAGjWKuKk!@!D4h^kH{|9$q&HBt8hm4n3u<4s?ohe_=H6bc_aB zevAb4dM@=pylfrh(sNV)BeXtThE|Z)=32VDF7_BMbkHy=`fqU{W6ZM8`*BNW4!i_u zEk(LhK5d=t_F`;eH!$EVUjqLG4Cvm>)cKRw8CBuM!RG6)k>ks|p=<(zC;xdvu^_DW zFL{cSf!ffrn0taGV9##G73eaHR%qiSS!=UeO#fv0am}Et7aB{;bJ6UrA(8LLd(&{9 zK)aP7WLT`L#vLA7F>>d8D^XAW4`{)^YEOA+A;(%7$$dS zZQJwMI!iaVB6Xy>zC)JB<6`?HsA2Y~38QJ)3&WmTE#+anjCS2E;_+ImC^n_=1)h4ZpPV`CxkusY!ON})B=qUH=k+r!BXxp}C> zrkyKz1oukDrpyGd6%NBCxAI18pR-9}K5?$_P!6q`wGhf%0;7$cS2eK&6d-4e!Eu-A zk>n(XBmw7KLK$m~@|0Z5EVN^Hzv0Jhdcl2WQZ#HAuhuF)t(RQgpL}N_&+fb^^ceT??2-7<1H*5jSQ~#&KD4C5{$G= zOvfmmYmL*=Y>G$@dW&XRe%X(Tr?V(4`Qg12wS$S+vr-bXx7c*%JW?U`Jy=W)9*;w{7$2 zT;rd3&aqgp?X-P$=Hu2h=prcY9F27Z$kbDFP6{8=3Xgw6ZPwDYv?Wdp|3$FmOU|hc z7KdHdy7#w0^v-MFpaCDpf-4#S>Aq*ki#VP0-GQ74fxD-O6(_kQ?&m;UXrn(o zCUE&Xn;CJ*ik(a@iUuW8UHwf83FWmFLGDS{;@0r8HB$82C8)HuIFb3Hp*5;5Ik96T10b` zb2&BzJo;l%NOnCeh|qw5RW-jZLfMOLE5n~P)zv2@(LMaF94+WNVeJ-69Cu$Ev{;fh zsYjfCnowV@Ob`*1GM33*IJSTbjW6Pf04`SaNSqrmuIG4Qj%znm3R{))PhSWRQ_ZF4 zvll{b)YoSQg+jscrQn^()>581-UU_{sp&XlK+s=l*}3^(=7nqC6B}u)tmng`;*<+n z_-N~_&oYlR!J0pt>G7gloP0Z;;p4($=a=hAQkK7dh_zJ;EYX;jGTn#dOV`^;*Ad|o zlG4L_umIG2wCu;izsB%VF~;yLXM=qEBV%Pz`S z=g59bi!L;7w<5z}mclTkS%e4q@14v}eCr4WJK+i0akU^DT{@0)XP9(IxN<3T>7UNm zCav!keM_a&3b_KN>5;cu*@}Q*VCCpemqNjby1Ke0B_(~h`v(UCK`)(Bg#qv+rBIh! zmFEqphtk2#bDo~Y`r}zVRnNlpSdCIME|f!hm<~-+Jk@a}H#-|u%k1uSjWAY_wop9c zgbn}*FWp6kZ8kJC{O)m}`4!N>k`Nas@b)IX#`?{(7QI+rs~!@pry&1wNq15zuVllrgDT(P*7rGViL$jB*pruuz<_K z0PmisOiXnin)?MkQ=)JVKE8+zao#MM@pRgQ=es|zrKP7O&F*`utB)7M6bZ4hvGMU1 z(FOV}oI|8PyxwjBwPL@nqLr-N_2{~YF1F?To;5-*FMPZ^Q&Uz(MMceT_7<}(aCf^u zH+Plz<$29W`s20$=60K<*0H*2@)2gg)wILwWP3E(@Amj0(5)|a>tbtoRtqMI^oMRK zOgT;H#Zwpx0Xn(xV%JTR_u218+v(rk0V{8l%V*Vec%M+FG{cQB(|!JOOuvYTUU9eM%@NtStWi59a9L*_U#y(wTk= zVq(S8xvs7*LTCA;>xpSHQqoYj0k$`+Vaj#P4^BtE&{R(yp8e^eK*LA|nhn3*wK}hT zx6Y)q!Q@>;rx5k$=i-8s1*5O`aVaxwelRq(vjxou)@8&f3Us}LQ%+q?Hs^iGs4$aa zVB>L$+tF$m%J#zgFhvn?t>HZfU}U$iJrYbq&T)~}j~&>${OcbSB<4#r`52X|B>j%R znD-k@g32sVE93hq_#;OIMGZd{5$Q@5O>&HQ zXQFmLg(XiaKLu8j&pb1pW^Y^>oPnX>7>XNYI3T02dH0|NM?Sd-7WgsOxfo|BUP*a^ zBjOuV5ALp`!Ny5+`21R^*U7Ty@gfJe@JdUoAr#w};~Whx`IIiy8a>vR^vCOx(Bq$y z8m{=gS_9L$yY!RRCs9T*CoOt|PAw_iIks}Q+})fbPp%f`ZgY>^pZH7ng!fK- zx=Ycjz*gKXS8iBjCOwU*c#>3bDk1qFgH@_;KP9EZ35O;9(Pj%JXVL?GA(r9tVM0;R za5aZTmlt$vV!%hIB#j$3``A&QGfgwAPj;bS90*qxaa$h6{_rDg(4R>)v+QheLY_E+Ka_ z-6snacjq5DjabrSK6mQE+iU+BGsdcuSEa>H!iGIf&| zVpmg-+ohv(!SUjtG4T-JPa>kb;aXDTE?=FK*M56aZV48ALB@%Uex+TWH0*Niu~4oW zE9M!ll(ArCUrzjT4aU+Ke@$hKOVS9xSyollOVI&!)3Kp}@#rpo+={8+wHGCPvT7{f ziWU+YzG;?Z(hFv^GF$VL`>1wgMP}+kl^z$5IwQ9gd6lq4ElVh!3!=YW?uqWIR1P-e( zb)gatU4<;f>yivtJ8oxX$nXPGIl-Fnt)@fk_Qh*&>x?BCZmc6*e&L zvn(^Zw1BqSet$}g zt*LS4?z@3gcXqX;1IOalflV2z{VA@zV0q$huVbnVb3w-5V-+>SX7 zoSflMeMmetKwEl!)5L+0Uw9fFH*gq}`JeJlSUgB{;#lZV8^|0&-ME)EW5O55YC`yU zrT54aHU)3xU|Sx-BO>tpAk~+R|ZI!X-W= z^ms^J!H)EUq!h%=GYApE@S_IU>r7JN_&V#k?>iffLNkL3Qld5wx)zHgw}(@Hbm6M* zDt_}n**q&ANsPL^mDbEmOCf^9>OfZTN#QJkhD2pcVHQT{6y&#iQJ)~87{ozoMaFAV zDE$e*T#Vpt^WCi6TpJLG0->Nb(My1MopnZt3l@|4`X&sK`dK8OEm+Cp#2Uah`AMW- z{ltLZAOZo=dE5A!7b)fBj3N>(JhYbHH-_xEj zN6TjP7-^jmqS7?CGg%HVtNL^4P#VCnlWzn-^Lk=l$A6z=4j^45p4T~3+I`9+`Lv6&q~9~b_$*mQ(C}9|#gX4wiJwgPo*+dTayt5UI(TBJ$+Cs~|9X15|%tnViju(nQj?B<662bBTB=}y08IzTJz>OZ{ zC2({rJG8g2!VH6@1lRRL{kJaJyEa|%z5}q}ps~2gz`_rCF&+8P`=vpnj02dRf{-f* zAWVPv@u#MWFrGI$auVGqhsaj*Afsk&;mFCk6Nfp7bhmxgU7k?f&5-h(shpAJRBUPF z!n{VBk#T)rNc%mQ(xBrD7wNRNJNos{Prguh@{Q4h_1+$J<#t_hlkH{WBK+BnqYm&F ztEmeGwHO?Y~_7q$H%TR(T>v#Z!!=cDEr~*dAhej`#KPXbG+U7ce)1F0Wpk% z#MJL%Q--q<1%?f5iW6es)zSf}Wc1!wrul=8Lt7kgc+y$wF+9F*%lM?_pYL=&$l<@(Jjj(eKT3F9X?J*6ewX-py#=Jj=R zRoB3|4!1=*$7g3!bfmToCHH8EU|zR)vuYp-O^6A?sSp9)7cDfN(vaXFh(TAr=K>pG z{un#VO8zO%8-Y%j)7#Pg!%UYhIL=%3F)QhJ(Cw1#wz1)+86ED&F7?L0*Fa)iBbY#Q z0=mBEg7f!CVF*dp#GQ&%1IA*lR-(}sfmkjlC<2sCoiK8hmNQ|x;8Ee;{vbmNR!LF()|Q{2H#_Z2_Yr%IC!-mQeT5^&)h1kVxk$D14MjjG%5 zD|JkP)&V1EJ|Mu^csd)~N*cUel8v1quPe6$HF_Lg*%Sbm4(!10x@%zbM>?LKuxOPG za)`l>#Paa$?h;7oZOWg&(fO>i2tUhm)o;R&{!sb07Hc?m$@ikH|kfCWHU*tE$) z(Z1gw@{OZ>Ru@9Zu?3nHT*uP7qkbm|#ULDl)YJh&Q3;b>){z&-;?C6d^kMJm(ON?x zFB=^7+wl;15wcx2{hf36edl#1i#3G!WKuX}-{n&T*uGCz9@o%;WD+yrgmFGxHO$@O z+}}Z++mB!OP2kERCjPqZ>BABZxB;jrr<26pWzK7ZUR$fJ52_FOUhN&+QqGGrz)U|m z?AVyaV=+Ts$=HLWM0Zxh(JX<@Rf3-nGFnT6UZy4vrF@be8V=Q8GQ4dPbwp7Lkvth> zJm)5+G>giI&LJ^k^;6HsYFbHo^lYEcf2dV6(jGTbMyp3?YPafBW#b#Q!nW9*7Msi1 zd&Mt9L74E0uLO`yL8xZkwvhWSsulJVhA#Jm(^(Kzvgz@t=*8vzTrFvk^B+=$03WZj zRV^d>MBBTm2c}5s`2yU9Xh(=4T7frJ6VfLy$Ci+BtQdTU3 zd5l&taN}ZRG2w%1x92AITl1^?!BSp|fRA%u_!5E+EB|Lp%ni+E!0%0xi;}t+RGiD7 zzM`~SCP}>2?eKYF%NCiZ#2`R}{&q)wSIp{Asy>C$kZO}yMLs^^;#GzKbS7(k36!uf06Z#|e zx*tAU%1811d|yg?vlD08PR~m&Vb{?O`WG9`)JFNb>-`xHoJ$#1dh#AmT5pVvS0`e8 z>ITWWXd_kNEvsRCPku)X1vCz>><^yk41wTBFa|~A9uTMi zGU*kw3ArXIqqp_#5x){d+Yk1qFh0@VSj$}sC+k}c&tCnz(%Zyl+>H!4t&;l-v^QbC zuFL~^61eSn)YEj;n4T$AMMx<)t19B@yDtn=DE z%d$cD$+cFO%Ok5&N3t2i|DZg6WO(;4Z1R8zUarsw+Gl|DCZ^F!6Ku917=nRUrDaQC;((|5bk3#EuDg(C8dM8zi#N?!^DD*J zl8^t#L-=KQk)_=!qeF097**xENI4nB-Hrk&-+2EcV;|mD$>718qhD&d!sVi^%Z;`e zG->sKZb)9jCZ&4z41z82_sbDph9hBScaTc3ZkzSzw6`LH{Bs@7xCLDQO%m)8o1RsJ zb5OG+`3JFuX&5Ys8_5YA#4=lqhulntMDjTMpJWudYSN1Q&zLa5VGdCzO(VnNW zSaxrcLSi)I@azIDV_5_V5eU<Vxka##r?>5(^|}#>ROKt{QnCU z#DwD6gpTc4zz+kj!7D{9vADZx**ku@&1Dh^vau4`aBsh2fM1-u+iJnet#27HXt0=yjrZf_2bdD{3u0wb1 ztk`VS+>C=nKm@mGQN$~|q%BT9IpW2_ma-laOSD1cW5eAtMxF~NX`&b=y?39O5!V)yk*YUvpXP;-ot zp3XYt^|@o@^!JC(;w8$X7uU!_Z=qk~1gPZ_p6SY63< z-Z5cBjpCHm^%bGrd+%^>m%+Ey)TNbp+o(t3zs8x?f#xPncu37YNd~SIgFY{F$8W22 zn^@u9pNUw1c~;Dzdg*pQQ$76{{X5R@-U$9#HuGkriW#`A~*vshU#7Z zbX?V($VC4>nR04uLJ&LfcFwx^-=S1yC`I^&Pp_{2#NRj@w>_V9;u9fSNUga6heP+mrf=TB-R7JO=w;_ zzhdVV@Tl{l`|E@v3Cb6ExgFu)!Jyi|OIFapuhNF#6S5o(-b!p*-wyr3Tyio({~$l} zCz;NR(U(s$EppiT?MsvYrl-}?CGz-ZDaMC3b5zLNM5r=nx8v2iA;wK^*>I+KRKe() zWhSc+azN zyPPrUXx~AJsk34(T885e&2hgYseccc`*+-?2Fs#S0gLlUqdpWCf|boccW!7GQ1_U z1hr32NThsRGG|^}I9g3ZV`!FqLnZ&=MGDWXc*bqPGU>~RG@&tG{`9m5j5X}-$;xnR zmnNj+^|Q9sFA?5$3zKMy`n*BVzZ5X97##Ra_#bP$Y3}qPuP6F=Sq?)Vt-(CIC#KN5 z(x3Za=uloqyq%ZFXC%?{ssH{aK~3Y@`6R#R zc2?ovyuPvGP5Yu|rRO4gj#eo#R1@Hs^j3a%b`{^LD8YSpL5a1YQWfnB1zrI5^1aRgrX1`-0#0 z@?^eE6Xy1xNP27H6A|AoYiVf-R{@9*dAE)G7C(U(6IfqN9@gP6?&3Tx)8$M{_t#kc z_+1c1>W}{0E_+58F~{^6s>XFT*GEU1VgdYWvmGYU{#7ak&gr7u8~kCQpQaqsE#~pGnDU-zx%p2coe&^WxpC^{J8yg>iUcB z%7KQjsT?Bqdlr`&9xqW?QAb=F-~Fo>p?tUcAn#FZ%zHfHa75l%zM}?u=B`eQLBl``W8T#~K}l2*i);+X6tjk1od`E<Y=KV3#P-k7eqGT4vqwhBlo65 ztTaME%Y#Jh8FkdQJSCx#Hr{%JIXeBPR7JPYBK2Jl#-%6PY5G~ph}q1Wt?czkwS`X{ zyc6%}J&qC-5yAmc{s#k1o`37J4Xe|z&0j{^Wbsn}T(zBQ-+Ay@YOPaH42kKAZQ&~o zB;#$xe&4N1i)V@I25-gNut0?(nZyyoHsN*cK3%6vsLeQdDv^v`n3=ihh{qwe2X-y+ z|FzN;nV|12mLS?6gb7Nm;H%};AHZnUJYw6~Ak#~YsV!^wU)p9i(c;2-41MTa>HF@U zb|^cRS!3_pl~O{)sndA!Gs>b5d+xKweeuF;Mp*FzY9WX{^~bLnFzN&Eyo??MJ1CIW z)+_Lb4_dgOBTwXzsqGPs-Rn1hlBQSq#x(L?H}XiEh<^&NuH^7bVU_2$c%Lf=rdmp3Ika3ZNCpvIcWJ>I;p2pp4KJ*Y$bb5iMrlIp-YpUQ-XPY< zVsD;u#DWo`$DmhnSi+;|o5sRgY-d$gVu&8I6~imjCdd2~-T-1k59YaGA&iaHIi)8SWJp6&U?3zxQ1yobk<|N-nQ1@dcLG>1g#7Lk$$dV7a4riS zs>4Lzu_K-6tmmAPPXBEKlk0x^yd4W-isCyB$!GCO@vaYg+0aDT@fI)NvHW!XA7(0O zL!G1qYjm@LGk1*sHw$`UO3h00EEth@TUG(p5`PLNevikg7>4$TVq+^&&`#BYx^9O}}Dq{44aJB916HfW>L&MNk!s)l$m9J4V|-Yjd2Usct1c*rDSyF z8J#b$#-xF7lKcFs$@;E6d{7&ZWkLp|c*1^iFZew;q;1E8r39VGtvpq1!O;q59g-EM_5M5wJ&jy95n3e11Bjc+k%xM~pQm4C;D9XpYL z^zkQhvUO1=+S^a~@*MMXU(6^8QQ_}bWszEHHs2mlU?DB$hnTbZNvl_Y^{5tmf_kW7 z7>PD>P|{RpgiOT=f`4xi$L>O{N~$sp?L&)Phg>H%Xj+-b>4(QIgl*oh9FvxZmlx=I z?Uki9PNg&Gs%X*JS7fAzIUEVc6U@EnG7n{XI+SSSojGM>U>dA9yO$6a4*Qq)&8XB> z$9LT%ZlUkovhbG!Uf2Z$$|El>$JtN>f#bCMWUTop5^VCCKN_#;98u-Qzf(q>r4UR1 z)mHJyRTTa0HPtjfHZt6Qf|2~O>#xC)uqyq*lkW7O&$F*$B8HKnQsSnoiO(MJPDNmW zXXV$3TkYN3qkZ^lu&f#%#gN|Is@cV|tbrAx*TBWamFItnJT*?UKUf#?q0kh{_H=*W zMvb5p%Y}uYRt_-5e|rc5DAlW+XY=FgYW5nV z>+tU`KO&)l9xAHYuw(I(rp4zVZ}y6}XE1c$hJEF$M<2&Q_O+Oso6}t`2_5js+?WD; z2r)vtP-X%_iA*!^>!RCJD-O_vypBKbr0Nk$?P00TaXSz~|=(@*#;%gqky z)Kv8hX8FEZk=v#vYx1k_F;t1kd@(keVNT3UEMf2P#U zF6jI`E?gig`BmUU)wh~r5DN}_3Up*M3WuI>+1#B_7woHJL1}Jnk*es@^USNzTNT6h z=N*c+xTK_H1voS`R2vo<8Mz#Qjg74dq@_*6QQmr8>aZ{|@;n*4lisAb2Xuq!(0CZ_ z8L6>kG~}feh>PX5aj|@XUnFyL^RNvxjD?G7i1(r%X=}EP+FUn+7js!$&YIhfY64CL z+x_xRsL06Jpg37^hzPr8pZ)Bn0j;c*=7V>`l&+-kJR5&>hr>I(3VUj_csveGTCSez z+>Hf?&@i54Kzz=u6QP4Ri(O_@9zL!|>6zhQ!8OaJDk%W&G^rh0qe!RwFTGZb6|Z){|6HO0lno z_-RWgnH#>kQU5)Ji7Uw7c6qY3>+@5cQRFwJ2K0Dw!cyeL>YX(Nr5|aggseJoPnQuW zDDtcLf^<)D=Pg+$MNK0Xi^&eIAH{5QF@yBZM-9N{0YuD#P3x*TOs;~3nh6m6$*cK& z&9K`5=9>)tP3wb6L&t9E<%yNzpm3W|7i+tg0*{Z^^801*SFx1uj}8YFwj1h(xR|yq zON-+tRS6c7#x|2wb)VLnsd>Q@*h%W$Bn^|rEM_dJI@MY?Rurq&YY3?@QR>cztxq`qR!X0xEi4w z1ZH~?=0G?)ieKqxuZV*f+I}y}$6xV0Je30Pr)gnefX97=q&%OuV%=mp1MKOt=g&RO z2u>v>)?;mT%BC)(P;@fXhB)GgTsF(7&G@ieJ^uJETKAkNks!L=(bSMeNNPk>kb7-q zeae=F{aU2IYSGMu2pM6I_Z&kq&C!k?3|$J<*npK@v>=4TXEw4 zKNc2K^j-Zy!zaxrc~Aw{e9_h3<#nD-;w&7&qRXGLZTf3mxkdLSoNw1N`tL1@3c8#oY zg3t3_PY>%}g;8+#FC)t02H^-fe#REeOti3|FnBq;+rDZ{R?qhT1-F_jwfOlv2xTZ+-Q5bkc}5Rj=YJ(kfT^WMiLmwaxlXw1X~SQaIquj6l%# z$*_!JtuSpLwk-+y{UxD^G+S|wX#FH@m=|OA?iNC_?F#$Skc56=*8ShUUFGT6h_%h3 zDuW+X(^+hcD`-^2oBB zED1w>$or-H8^9bPN(hqe@J4%M4Nws{27yY|*jxlrE)++tg3=2+JbzZ2Ht-15TzK2( zEq@uGva_@>V@*r=f#R=^#tFA#S;+~JCLezy#kdhPvTm=@ZAard>^n4z6qqcAIr~g_ z#zlSVwb9|mvC)lKK8gH<45w_q2$fRdOU>ZG%?fwY1k)=DIMr@iid2LJV%i*cvHJxC zeK-iiNwsjoCc9#5IWhHip{oDWoEzKd$WoG{m^-&B@+J*vHuEXhEED z?|T86vw&w*or*ZAt83}swy@B_|5L@4heN@2@iB+lyphifqZgWQiDiWG7T8 z`#xoC*(Qx8WNT!X$Yh%g4GmdJV;}n(2H8S=qxX;R`|h9TxxahQIrp63xzBT+=WO=Q zvv&xkTVy$y2nDO7>hr(8kj?NYZD5}*E(}&iqqr>BZQ{l-<4a|ZLzRzN`e|QDzCgw6 zfaMiSn!lqWoAY!Rog+XkrjUS?%ah~rWqZN{^AZWtEs-h$=E$*ep^NvYb}u!eHgLjl z{IRL(gGx$XK1gG!i_vv3rw?jw(0%yz+=9LHr{&*^N*uX82v}xIFilM3EqE#Ex_W4P z42NiqPMkynkB`8mg*LM~Cd9L8X8-v#;clUNVMJsUFLrmG$>g@t-Q(RLtt>j0 zz!E`aE|&d;%2fEwG`Y7S27mk{%Ej!}8I$ADwDz)&CvepPARixJ0j*klw1HqLiaLz* zu$|mTqo7W7AJ})o+B`9L53V0O$*IzmZ68%Dbp~8kk>pQmK-R>Fo7=$|wW=)mcnNEX z>TF#uy8Y$~Vt(ohZd9CJ)b6o**~|?SuF4B4ul}C80!~_3Ujr2*?{(!fjSjobZm&5~ zJC1vPed|ZaULhXuN^`*yK0CYn=B#1w6>oKqS+kdSOOWb9dlLjuw&-)R@h#9y)D{gU zW0NLSpF6K|)Xljy0ay}!EaWCFnNqC{*3O+N1Jeq-gekxBj6F!KmdH&0^>9H?hFgTG z!;ODkzV#q=_^Q11$Kr{;nJ>lfyp7DnmQWJ=N#oqo2t;QslVI5Ji2?o}na>QCp@T#V zE69-}5-}tT9hPod_#wgw& zDk*mX;;kBl4Sl)0bO;`w!1Fxbnt752v~PYN%)Z7$>I2P~==T<39(;w5N9(v3WIkmW zd59mlO}#Lz&mZF5o_1Saa?i;A;~6E|(~rvlZV)Bw-t^v_H)E4eu@!t>dDZS+`FE+t z_OhJ-jR9Z>_-<A32giG6D81%G&( zRCAgWi$9i+g-%x_`00^VV7b7zsW-YV`Mf#*1REgaT+!4Ryu^By*bsYkwdcq|pf!iV z7am3P(0ku(1#X-A4OHIQ%8M^b*rV=_67D2WU1vD4y8B3Z;o;8#LIj7kG+5-x&K+O3 zH8!NdfWP}KlT;&Xo@HjyAn=hiLrmu_sm$A_6N5@p#^a@7n11aWHs$hT3#-@?FCB(x zj8Fn3swk#Zk7YotnXl<$P~Is{!cRoT1jX-4l}(eC$BWIS?URCyWfF|LZ0+a#fAJ;i6MTs z2Q&}r{8_BXkVcz<19BRgENIV zybHBb4)gL)Qb>;N-i`i#D{k_u!9|eNiNu5H{mf|@1K-jqn0m z_1xEkjfz@j{&L|h$U?(v-m&(p9$rsHu892_Yc2@g($tEoe`6EoQ80uiFcnrRsHFuy z`1ekO+)nk8FdibDWB0x45%^w@{$!WtkT?5q2BTnh$-mH!+%}X~bkijH$PS0M#oC{z z==v~Ms~TwhA(`apZZqTa^2DIJDLIAx$V$KB_v~)yQX5);1Eyok`VkvmFiBqhFRpy% zQ+4Z)?GGtpYFvk_8>Ug;x0HbJex+umjl!_4SRf0HzQDKBpa9(RJ*FuoYoE3(inCc# z+%Knz9vYVG|FBj$8gf7a-ZB?oA0gaoJD9=T1sXeV`z^%Z8i9Q9pKNqgU9p`Ja$QY+ z2kj`#6k|MRtzno5!Xe0|Y8Wd}EsBX|!aA1; zo+O^W9QW8a#8Il?W+-QS8wfADdFkCioB-boKcPGHGDjrlkY(!|=G#L47oLntB&ad8 zdJfh%-H!`F)_%Rya`|T9Acr2w}sk15+(1H z=E^P#LrFjL-*YYN|3Fhy>)7EA|UOxZ=LwLZUJ;8NyHp6N@5M+40)g1xpq)1b#0CaR41Yb znbX*_ismgYzu4FFOfY%r63M%p>N!iTh`6uuiW@4^gZJk#F1v$8X!JDx;5x(B1aw^B ziv!@%#~}@6r?BJH8Am7P9M|}3rsl54YzxNO@cn5>3Ln`d!;SwRHE21Tk^1x~Pgs*EnrW>_iT99xEcEPiiYM8oqJ9GBrt| zbUs@Lx-}L7tKmRYcontainer1192.168.1.2/24container2192.168.1.3/24pub_net (eth0)DockerHostdockernetworkcreate -dipvlan \--subnet=192.168.1.0/24 \--gateway=192.168.1.1 \-oparent=eth0pub_neteth0192.168.1.0/24NetworkRouter192.168.1.1/24 \ No newline at end of file diff --git a/experimental/images/macvlan-bridge-ipvlan-l2.gliffy b/experimental/images/macvlan-bridge-ipvlan-l2.gliffy deleted file mode 100644 index eceec778b7..0000000000 --- a/experimental/images/macvlan-bridge-ipvlan-l2.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":541,"height":352,"nodeIndex":290,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":2,"y":6.5},"max":{"x":541,"y":334.5}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":2.0,"y":6.5,"rotation":0.0,"id":288,"width":541.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Macvlan Bridge Mode & Ipvlan L2 Mode

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.0,"y":177.0,"rotation":0.0,"id":234,"width":252.0,"height":129.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":16.0,"y":240.0,"rotation":0.0,"id":225,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":235,"width":106.56,"height":45.0,"uid":null,"order":"auto","lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #1

eth0

172.16.1.10/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":138.0,"y":240.0,"rotation":0.0,"id":237,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":238,"width":106.56,"height":44.0,"uid":null,"order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #2

eth0 172.16.1.11/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":40.0,"y":-26.067047119140625,"rotation":0.0,"id":258,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":7,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":237,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":241,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[153.5,266.0670471191406],[117.36753236814712,224.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":50.0,"y":-16.067047119140625,"rotation":0.0,"id":259,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":8,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":225,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":241,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[21.5,256.0670471191406],[62.632467631852876,214.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":60.0,"y":-6.067047119140625,"rotation":0.0,"id":260,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":9,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":241,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[75.0,180.06704711914062],[215.32345076546227,90.06897143333742]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":3.0,"y":184.5,"rotation":0.0,"id":261,"width":79.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker

Host #1

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":283.0,"y":177.0,"rotation":0.0,"id":276,"width":252.0,"height":129.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":291.0,"y":240.0,"rotation":0.0,"id":274,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":275,"width":106.56,"height":45.0,"uid":null,"order":14,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #3

eth0

172.16.1.12/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":413.0,"y":240.0,"rotation":0.0,"id":272,"width":111.0,"height":57.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.73,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.2199999999999993,"y":0.0,"rotation":0.0,"id":273,"width":106.56,"height":44.0,"uid":null,"order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container #4

eth0 172.16.1.13/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":315.0,"y":-26.067047119140625,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":18,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":272,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":270,"py":1.0,"px":0.7071067811865476}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[153.5,266.0670471191406],[117.36753236814712,224.06704711914062]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":325.0,"y":-16.067047119140625,"rotation":0.0,"id":268,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":19,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":274,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":270,"py":0.9999999999999996,"px":0.29289321881345254}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[21.5,256.0670471191406],[62.632467631852876,214.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":278.0,"y":184.5,"rotation":0.0,"id":267,"width":79.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker

Host #2

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.0,"y":3.932952880859375,"rotation":0.0,"id":278,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":21,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":270,"py":0.5,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":0.5,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[340.0,170.06704711914062],[205.32345076546227,80.06897143333742]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":167.32131882292583,"y":39.0019243141968,"rotation":0.0,"id":246,"width":216.0042638850729,"height":90.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":22,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#434343","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":356.0,"y":150.0,"rotation":0.0,"id":270,"width":108.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":1.8620689655172418,"y":0.0,"rotation":0.0,"id":271,"width":104.27586206896557,"height":42.0,"uid":null,"order":25,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

(Host) eth0

172.16.1.253/24

(IP Optional)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":81.0,"y":150.0,"rotation":0.0,"id":241,"width":108.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":1.8620689655172415,"y":0.0,"rotation":0.0,"id":242,"width":104.27586206896555,"height":42.0,"uid":null,"order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

(Host) eth0

172.16.1.254/24

(IP Optional)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":224.0,"y":64.19999694824219,"rotation":0.0,"id":262,"width":120.00000000000001,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":29,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Gateway

172.16.1.1/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":307.5,"rotation":0.0,"id":282,"width":541.0,"height":36.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Containers Attached Directly to Parent Interface. No Bridge Used (Docker0)

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":32}],"shapeStyles":{},"lineStyles":{"global":{"fill":"none","stroke":"#000000","strokeWidth":1,"orthoMode":2}},"textStyles":{"global":{"italic":true,"face":"Arial","size":"20px","color":"#000000","bold":false}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458124258706,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/experimental/images/macvlan-bridge-ipvlan-l2.png b/experimental/images/macvlan-bridge-ipvlan-l2.png deleted file mode 100644 index 13aa4f212d9db346f307dfbe111fd657406bb943..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14527 zcmZ8|1yEek(&pf9gS!px9wfL7?(XjHfe_r?o!}4%?(Xgc5AMNT154iS`)haSR-Kye z`MUe-K7G$Qb?Z)ql7bWpA^{=*06>uji>m+tkZb?|gb6&FFcuiA2}u9| zCuqei$M4-5nwr`%i3Np4vgR^5xmi?p#UH=*mV3XyA4*G0x3#x92gWff1cLwoYwwVe zXz=#-mVViE$?s==*J21!?}^c7DO)2a0KlX5gh}1OzpT$YG814}Y}0%tmN+Aryb)z3 zsT@@gAoG`$lpO2IFpREFRt13lD?iI9=-GG+2moqFC%hy9I;nj{27uocPT1D10075u zO@yRFl7errMnFEDh#5#PPuh?-x?z&dII=d^&poRym>Ga5A8o_}h!g_|nSE(VHt%mQ zPqw6czuO|90az5wa0^O8kUNShYVM8{SpWbzzUn>s&g!NPQdUl24Wmw9x|Dc7tCD5i z`~h`8htZ7%KLEg;77+f;xPQ6_tOjVPO#d7)KeD-x!mCj40sbB># zBGK_J+`o&7iG6fe=KSsDW0j8+4tQ zhH*-^spHNug`oyE=L)?pv{pax=*n0-F0Ov#MtGq;M)d`C_0|)mHCU--2WMp&h)U&P zcQv?6T)kMcBJ6c$fMY%x)hr|An5WbgL#Op*Pvno^B*6%cGQulpIp?=Ppr)&ez$$6D z*}MDmX)#sm4Bby&%JvaxP;wzHM8DFF^FHjHwaXuDtd)8S?sxf2d3U>RT&%lJ$$CKm zXh*C8HSbv=U%Mg`XBgyBD%Rl}U!)etqPnI`2p{fA6T+dIgA+kT$S=YKz6gfU{%qvb z7e`jpjWyGn%7K-x9`5=yt$bko+n7@#7IU)OAp}+uF5#D0@BlvI_$(!08p3V=l>$Z3 zlwFz4CPzXyYS1KhRVa3_pd_SxNF|)OT`e>lX_!!Pr15$;eD}>yYE?otTcpveS87|& ztkT$vjWL34w}3#Tgw7O^gpwUU2|(|iPy<+_ncLzhsmGAV&thM_)Q|PylCK^T0QjcK zo)J4y05vSxJZF_EhzlM^A^oT4R?8Dmr zwcQDogw_q@WZCF0mOTbPNpjBRXO@O@nrbl2Vm8jTFDVV77`yjlsDjtincO zM(x$`)?EVqN$)d!m{}ZTC)2AeEw+vI>c6fF??;Ax1Z*Js`7TrxV1fJvHlbX1Hp{=wWxaUkp%r;hrW+LO`2E=(bg%#N+RzoYBK2xv zWVjjX{L3{G28PiMMhX<0`Xnn1r`@t})K`Pv)@qv4v=R5^9oiqikp-o-UU|L*Ja?lK z^~t|nwjE>6J~DcyXdjyY;MzpznqdRM$Q^AYD;i&eSwO;!ow^Zq%U0@9iokAYuN1=0 zgjyk@V*PUkjr8X{l@-*_=Ov5&jCwY{A}H~ahq|QDLtcg)p;~3h3no~JY%&`*vC&7^ z>eEyEgAHMBKM!|f zS^386lj~YFezTObA2o}(5mchyM-Ly|toT6%F z7)_9n2@5gyTM>li#2#N+?(o9x8uvAM9D;G%>ym)K>fPrXn@mRJ#**k+lXkp*hum|c zq-1Zrm(C;xkw(JwzZP&ka;|2lw+qcoTme=rp;XgC*aTN#3s+cEC11-J3o1v9za4Xx~HF`t_^?MsU8C-VhCW<=qT-s%dTMDFKtR?Shvc)nrj6n%d3#Y_z&7J zrn*Ns22|<_(Wc?fCoX+yTbklb3JVSk{~bMgl-KmFC;*#;@%qQDYeOJCLe=MOv&Hw+ zO<(DH8U2Dk^4Basq;cIAD+E;=;f42~rYU3FLb!|ErqsXR$WU_m&VGuc)+^xQrq)=I)A(> z3eq>1S5EZYnb3qc&FEHe!-=MDnrX#aNvg?bf)@Gl85B?bP<6YHD_N=&cfdll0*f672)(R19dd$J@@N{w_z&pMbh5kL5@`Ex& zq&SW}KA<5D|7<)1kw7ZCF3_b&b?Vo#7D!g(=aw2`run#=*WBbN>sO)(Bc-=a0u}hBb$_t3wwW`DS%BKn1z*%{>8}Z zL^-J*dwD-znMzHcwswYp;Xpdcs)spLD?%v%r%!Wm!3~J%y9`ry3lbe=yhWi&noL(O{~pRZ zhF-BaAUy|z^c;yWqTyz(=2#Mc0q>nyb_n^oYH%fyF}#B$-F>b^teXvT)0d6kkIl7z zQ=IXU)dXF)oEt1lrWK3f;E~veRry0233TD7pUIQMkKk268myK!minxp;_1w?wPwNBgq62c=52H1bC>GzD#Q>{GypZ?aN*$R=IJf2|D>8XPCX4HG|$i585Vy$kj zUPg%fSbO5#DUq8|zNayBaW7VF15SxZv&K*w+A#k{w8(fy)U>_#_1o@Auj^W&Une}; z!^!7CPCY#K0Ror^@yBz@1Feq3mD}7}P^iKOFdu5vpq6F2Z#Bit=d8$Z`euV38G8rO zB&WfNKOFBuhr`6eZ{NF{ZqA;<;Qf)La@{W*9qQfT7^7`OhJW}>P`~cZ z@Aq?>q)ssj-FYN)y!}~!-kvXw;uyXs3MKs5u$hG1m-zLw?*(o`Qm(S*-J+QMkN4Jg z6;Hd1JBg|2SF7D)zDmEt{alPVl1Q!U=7!5sAuK9pu3&{O8%QegU0`;e=}Zw*tcuQK zl*FhPZvLrri8@W>1H{>pXNY6T@o_2e=8Q~!I2tu175a>nKu5raW~Tk-FR|1mS`}&asn<@r2JsIP(WHGM-CCu+3#EB7cL+5tY`ZAC|;K@PjgQ^CrJS zoSZig&6L68RPw{CZ-?UHBgAtxF`Dskz;p21CwWf^(x7qD@U#L(@t4Nxgj>hvbDw9k zt3Q+;dfN^3`G1g+vFLwE2QT@C-=ed<i+abt)eg^w7Ps@X*{}DCI9dEmxnBdP?5w=jgEL$wSKvEO?o+=B zAu-?zTrBEeJhd2}5X_u+Z^)DiK6uoSR3ioj=sln8YafL*F2>HC{uYyJ79u`EIP<&e z6`zxVV?9zZ_D562P*iO}qr8B)=!DgeJn`m3Mvj%H!uzVap&K>BWMMSDnb5@_=F9ig zXe8{!h78pn3x>z|{vI#MP{epUyBej~V@|07?X~qOiZGKRjrdTPIE=(W@XENha4pF4 z;5*zkK$WHn@-RQ|+-rFI2QL4H9+Yc!J%Z6{l-`Lz!__pVRA15BgkdZe+(Vbr^E)of zX0dK&SY)66l>%;Q$HT#tZ<{Bhb-`WMe7z+ziQeg_WX|EWqwZuk-F6Ip^JEv~#OK$n z&Of6syLejdEuP3#uXY$)Q@OQMCH&*2te)CT)M0!+LukO&^`p@=1stEB_#-plM$TQg zcY1eJTr<{l?u{B)rf8(+UpF2ee5yT3R+n#*g6dD!6;9um24)Q?LJ8Fn(6FBcZ}jow zhEs)_aHuQS@2l*sGA}zWHt!U0lEesoHXiTw+Sf8X6>tuHBJr}iexg64;e+~ag-l91 zY|rjCS*AZ@vBB+dsp^VPWSn$cc}9_%Jz%l<`5EOwNkTB`eE^SyJj{+e9{&6uDnZ~A z%D9hp#9&k1*!F1tdf`3>as2E$d3<3Yn<$uFObQLoyccw((!rC9 zYeaSDtk!>Zm_wn!lsf#f$`*oV?BsMVi}*D-9)_AODWr@>A&1j1%`d`A( zs0rEAdW(%IiXg0?EGB5N*?tcR*z;&JvOLELo7%Tf}W@Ybq7Kz%Rq z6nkUtzzR)UX1h`NaO7jeGrov$#v-IAnB0bRQIUX2`0{Gwog!&=c7_y;QuU!-7!Ma+P9i0)hk)ZK+qvNG=QPW?#5)*a1&e&L2}m$S6=*ltishAM!VI?EpdD zywb>>t45Z2STvPvvBq^@y>_qPLM>lG$1Q)z=~BH$?j={S3{dmK_k9>;)L%{{TUDPa zE45wz^Ur#>q$P>ggIiADPbU12Kd8c;L%!=RG6bg{4s+Kn^8Q?HEtswg1) zcd|>y>)-E_XP#=c8J%U6Mz3hgppg$K7FNBVL*7C3ZLPGG(L78oB3e=SoW~I)9C!J} z=->~^wYhKN1m7HTUJPfDs5-T|sAGBiV0lU==$dyfsppC(Yt4*mlB{^~$I2bqo?c2h z7Uy^>Ozjw)MUGvGG3>2KX)$l~kH`2CgG>>@pDPPgK_P+&UkAC=s8#6Mer(h0wH!Cr zXIs=*Xl|3i;LCGp)sL?*BhihRWcFsRo2k?jVq|mAS;N|TNUNI_&B=Ymnn&JH;jx-z z4rVsW6wwp8XtM~jh7F0H(XJ<4;N+m^U@ZC-cy04xxXx)x7%#ZTVPZMykUT$O^K1=U z`!h2KO}~dT=oL{PR?OA|zsK65?L!@!%(97#Pg)mX9?Ut9yhG&INaWTnLpqPl9JHc6 zi8_z$=O(}O=?J0(|Mf@*mvz!;a%!6_D>=UuUk}!`TVO^1AI3zVf64vdmz|x=oRnXu z9;%rlRn+Q1u=vrEN-3PU+yUK*(JI03^_FnCF=oz0J%ih_OeWAje;9v(zR1jM``$Ba z^JzWMh09%K_V*oG%mQuOdeLX#u-sW9^`r#mcpU;!xQ^1&e4cJZeLghq{aecTN%ZM2 zul|C=+zHK{r@seq7ySZB(zH!28AGO2ovnCCOz;LYBwS~;V8c;x%i0CI* zAIx_N?q-;{IEV-{?MRVi?C?G}Uk?*mh=?@tE?;65a`46D4_8Doa~rRC+Q_+WYG^dy2hDRGv7A*swCIzA1X;dlV0jQk+;#o~)8RkzCo)X2~5 zT9#uaH)daTwGqG_phR;`H%P-NoLRzGtf)bX(n^vzjgcOfm*bN1?o58bbj^z3>%oj4>6w27DAOwo z02PHPSmM5cwh+L3+s1800uZBzH=G>8aRz8_t)s>*Ql841o0)|z?FY^lIyX1f54?!) zSEJ0x?T$+%xe*ljKw>O&8jKsxwqyu`Q8O0@4_=9d+u5X@mOQk)pPc$!aUA`8mYX!) zfOz=0Pe7i``NPIA%L4GKntifOVp6#Ga{b?`&a?aP(PQ{VC=Jft50aYd65m)4CqNYH zFQO5WL}#Y!$2Y6}9Lk`W&49O5W}b+2IEJH(ORn}*XNA?3otDk+EOL&+T*7HBP8kTw z-pKN+-If1XXoE%!lgRl$@Ka4JIr5DokH0Qv$K4*JXSsNCYj>0v*|}_$nkG!{?Wc~{ zK3FA!{bjjS>H%h}{LbUu45)Q~FcN0!6nHgm4Iw#L(FB;$W#-{%?d5cv=Rz6?8jU=fJ2Qjy8M>%cca~=!gFPIR3H1L z_^kb54*k6U;_t?~&xMb3BF{Vy^(UC_Hu+A07ek~*CfaMEeSsqsf%?c{zi*}SM#iYX zuZ>ZStoiJKIJUXr!fGdZyCwhHAaTQ>)Xb!gZ(iZ`7WiuO%OV98e>)4(I^w*-HBT4t znmyx>Mp>FLg03DLCGD4RQf*>-#a6}N@5wOq93=F7qkT2#B7kI>yt>8# zB!@iOF+roqrTO2_M4HX;ay4qd>c+!+N;+#$9|8QI2dF6)F2~T*Q>qQ+6G+Wi5e$0g~XyMHq)YIIKnBwI!+uG~KC!al$ zks=8q28*I7c?28|K?bGtEl0&8EPigeE~7#&S`nNQ0NI>1hYu(TEN7K*g-vpg-lS>I z8Fzw^YL5vOjr0mMOtY8Oeh<%kvWP778^aj(!bsne5r&3^r~X~@7Z?1NVTvI}_q`rP zJs9~!`s?6@pNL_A6Wl%O@06BA6o=rbO~b%EjxmP}hCoqqR-|_ zr>cCM`X2a5baf8vb8~Gz+QCN>E2);k_?SB;s~qb@$rpC%z$rxBnRK(1_y9n#A zCz*VzEuc_Pbo|-eFjz5ZOh{_9V2=KMCPCnapii}m8g-Y^jJ7x}GxVx=?3b1o4E3I< znrH>vhCjpkE(~-a-Iu}K>D8Je3s{jxJwyDN{0Y%Sbtz;XB+MXesZtt|rJAd~C*6Ho z-NxKAMv0X0hy%-#Q}2DgA#u>N7wQZJ(s6j9-wDL+6ONCSH(B4sSRaB$I~?x1wts~) zkGDi;{s#ee3-nW3LK_f-LvX8}d*pL?6#8Gvu5-Z>gI*$sWRITZq+c%QL?tw5aP0X0 z!&BqKR}Q|Obd1fdhZ;duMAoFsV#^N!ekgs-vcabBzi##5iK zL}9GCcTG!`Slc4pn{RVmw`r_p@kTEcU}j&x@??KYs$Je!sot|G#jD!Qfbg0S?KA#k z$9Go1HR*HKnb_xU6BQ|!jRH2zv!;kcbtwYw$ ze$+TSB-*0A3vR_+zu4=qkTM6`5Cgc?Vu4jsKI-$#d0fxCO^MXy@cG1fxjuF4>m z6!S<({3=7jvd@pfmrfbgf@r*_Vwqke#ardA%Vy0VjD~J6DDnd{S1ULdnZ$*wLO+JZ>1-Ix#p1Jr2he#-T!q!N3^$ zUJe2fRJsZJR%AeN9$%uSZZbx`7^72XtZY(SWFi%XQyIEIKN%Qsx)xwbs7wUsL^Q1v zpi=nbP&y8NOudX>ROIJ!0ah(!D0s!ZSeMMS$qlLOCx=LL06gZbb0uRF1WW25G(}^> zp5Y2t`;BSP(%bntPe-X95^n^4`m!1}hsigEO&hgmK6 z&`Ro8_ubx4-3;UYhrLOaB`QTAP{ay-2@C1p*-g?;RoR#ByCwE8gs|GS7V_$ODB`DL z+|;!3FLDB2iIJC!Qjoy5fu2D>A&ko&wOPyem{;z}XHV_JB=i>-zEv)FJRZ-R-56uW z545gHmb)K_2oktDAcsALslbZZr-(B!J9t@+PBoSO@>7S13s?L&iWaLF*x!h7q_;4f zJIbR{d$}b|Lc$xpp5+b?w!*wwz0_dZhAt!v+sW`?JeV4MkczII8dJQwj`eE3>2p4O zpn{rLFnDQ6x3#Qj)$+Oo&EqZwOw?2A(0@ zcAhkIXw+!OGU$IZk1gW1{6=SmEzf|TPGB;b|D zF41;e6fMG{rgD#DzG@F!8KK=N(bAOvGdX;FN>2>FH9!az41IQb-um`tM077kN8F(Z zL^MAVhzx$C2D*U`d}xuVE@^?!1A9MKLNRuHM?q`s2*5%xx&SAilY5#F$Nv_@`IKmp zG(aC_f^4l8ULVp&BB3LNU|LKlzHLn*2a-J+0vd2Mg~+ASk#PR$K4I&OHx`1oN#faz z`*$VoB;y;}k0$bu9nuB~mcwGn1P*+M6cfLGr=ea+f=B+E4^1K=nuXV%Mr;cl_Wbiv z&d6YDw(po{zw6X55sMY#@iKd6lEf!Wn`;7kab*PJD0*o^4d8t3s+9=Hum8DTX3AhA z_06{v%|{N9N{FW#${m&MQoIVOvkV(51eXC#0~LM=5@7u^C6|fz2Jj`kAb(dX_#i27 zGEv_#;O3JaQdc>gh$j(vn7?HF`{f&^J)W!y7D6suwU{C8hfj3S(C<4pMw~-}gYQZ^ zURcu65fH=!M|V~;sq2(F2!jgGl?^q5$N z7RC!Nm?U>bV1RRqKu&_$8-rrXrZm|_MKv;~G$XEkc4xNA^ttN|8m?xZu!mjyVxc!} zeEZo2C;h>CP4_3hH>32dNgKl`5N)ha8C6OTv|X=mWKjUgajgaXYZ3~Q6Y2@f^Wgd? zo|&AZAJdrhG?%A-m_@j-8L$d?81^xCLAXzhm`b}v9Ruf)+QT|IyzF_=5?y z=N&=@r0yCij?ilEV0y>W(28Vxp_K_!2fGHC84)1GYU7z*pkc+#Wbs({D6E*B^RMaB zexKss0$w^QwC2&O)%_t`WD3vA^Z(eIPFu(1Q6;bEh-g1RPhxnF(2%5k#Q-qljI=o>a_qYk z<+tmM$tR|f`10+gfIs{}jaq$k*|Il21CKzP=t4;Ug1OtfC`aWe2brsB_3TbweLorr ztC)1^ju>mN!AVKCR33|V`0*$Cf#=e&n1;ik&?eY{Iib4IR*48kTaZU(rD;c4C5ZZZ zTZacIC&w&1!I3h+DER3s?c8J>|1zm)LEsn<9N=p+l9EWL{x$GL2vjN&Jtmfpg^_WC zznrtLo9+vK9RiSWAjF}fAOzhsdyo&*1-zlqvjD$l2mL_54}2p43AD%9T0<@W) z#UGCezvUT+2p;$x+Au|{RMutiUZ^LR*TQ|M2YOo;G1)^t(#LWOG0^E#41e8Q+dG>KHf{j z(&ex6wRRV1VDqAchNU>gyAzRq;Ye{F)Gx+?^uT9PZ~EyKpFABJVY?1~m7Y`FONk%Z zl~znQs~}Jk5XpLW=Abgc3Ayk`w#meW`fdtUx|CiqyMjK|F{!8{MlrSG0paNqRKi`_CEB$gv3l})i{pa#+EtuMxyrJoV?qH2gx z#Fd_6mkmxV8s*S_MZZD@s%PIo3qaXatLN-fb_CzRW_PQ}Bfs}J!A$m2zZhxTn)->L zsHp1|;{Iv~hBL$T_+mfdw%Pmi$=!fhu_{5$@dp_LS=MV9vSib zdk0r2#aFq9@9e|%yU2B;v;OK{vtfc6D(Q!X*&}JkT=|)Jx5g^YUpA<_0!==F_YvSC zhZ%NyH3ZW}@8L-_3ecQ+0ChqO>wO{tMrX}@Oc_WrLO{O~2hb@^f>c~K-oqS}7Uk$U z#EWGQ0p)gVzS0KVCqjoLM3#cwVLwe_&*K)I;XSmQjIjz3wE*OOr~?$4bIkI>JZk$b z6je`e(bCX7P|~cg2yDo@F8}K%dYBogh?({;m>I$d&Q?;i>DP!Ubtu}faaLPUh{@lo zO~FNJiQi&XH7p@CJ1F{6jB|fsew*aSCe+XbNO1tqd6+?Hd3f?cS<%`nO>aNJawh-x z6`ORk;Roh>l!i&FrC&=GDPUpi7&OdcwROM+!wFRzu`8Ibv4d}^hZxA;2rt>O640n% zlAQm8q+_d$nbvu=Hj4SWs)51*^Itv9hhE6`%CcgQ>a3(q|G$oD*g9eqNpmV|yLAv2 z*)E1Ec4w>YbYUZM2QIkwIRDLS$}*9b$_IQoNjK6{Bo&DWQ>0Ab)aUQY7r{{mM1LH$Oj%c$f3R z|39dgc~fFAC-6?OORoi4qN*Vxgc4=h zH5DTuM7ktaxzc)R>$M8qXJV?G1a+ zB0sjDRYae&;f{b-tR)B$3{U+^S`a#x424s2LzRp>Y+x*nj3w*qM=mb>`u}X+kJLyx z)QW~mNo)*JUdX@{$MIRqy0^FYUwdh6^eq1jx)NmbM&~ayj>5J79RX;+p5!qn4bQ6y z#r#Ju`&w_MKdygy`?s2L;XOu?mmp50&ezvidk6PGkk_7=KhAoF*CD6tJtsIb_IFm=?WH7%Ma=0B*ym!dx2?0>78N{O28Bpwzl(6Glg@L?$@6e1h{vf2(&> zfLS^tEIwlVOG7tY<8a%n{Z3KZF>@TXv7NlIMbljec{@?&Bo-$^DNWqOxjhpUS^H@@ z4~%>te!18=rad+@AYT96it|I^Ex+efi{^ZPFS7B~RG6aW)Ji7rr?f&oG=fO{n3qPZ zJv3x%eRMgpR*zLYI&>SK<=mr69(iSvYK8s$FUc& z6hi1M83a5IC^#T^TAP^6h=5HP4u0wx%(?|A6cNP(kA&fQ=a>_bZ&ju#hYMJ2MZ+Ta z=kQrrME4>eT(IOvyw(;>pJN)zo|}mYD5da8dZsG9 zhsxe`z)PX3Dk)wERe&Df1#Dvq>_(Qe9on_(!M%gOwL&UXyt$D<_+0*s zR0pSzf!inHI`x6m`Um5rWqYpFJ`m-s2()EC7#vw-NwO%%S zqfilAIDIR9t^nG%P|jIDc5=#|-76N`S`|er9~;XV5WDkWLq#b@8BEg1x9W!Sqs_5K zX=`iM*e0y_ic(O_&Eee(Dm;qKW)Q?Qixu)48y9AIwbq4^a70RI@f5HpOXRsEZzH~q zP4Qmj`1q(;u?U!6@IUY$zMOj>=02=i{E(8H&GJKWGG?H5adxAHJ(QUrgcta-;lTtM zRR~k6@>)#hIl~zkk}LO8-6Qg_uG^zszJi&r%8?N$HjaCN5lo3!SHyfQ70=+b# zOxL=>nj&y8X|N(~L?;`##Rn^iN-ptZJQS?c!>8lTGs_+X=dT^5P?n}n+2L#a zUs*jURphV4i|TREJ~Ms1k++gtIk9ajG5$^+t|HIsv>u^5gLUT|p9R>y*ryP_WYu`q zsp}F9+Tpr3HKN1g!RJMZ@nudS7n?38t-ZXAV)`Ri>Pt(nY$Y(+1mgc;$+fvjetR+W zMP_IqI=4Lavlg&LAiN2pvudu*{US2{=Bi6HmI46##PG(ApBA{FhNvPuzzr+Ye(5Bp z_L0@k`;Yg{el>`=KIhtMLE;{o1~K56jI1OovZXti9jb(_;x^TuT@~5;G*9l$MryM11#KG35vk zS8d*DH9pjk%@Q=rEmkhnIX!58nvVm>iBqK#W-5xaN}dn!AJJiCf~9axN;nYqhG=gP zc%}Fgm!~%_zRmj|_PVORyi;Ff7R6!haSbw9Q^TIq73@~(Y=O+?2yufyU?8Il$*X^P z%{^)ywABSH4}GUzm5wTcegzBYo^7LUWglP(O6Mz=DG+FkrqjN6t>xZ*H8HJ;Mbd>l z_-q&4!O&wW=v&8d0^`{Q{SDM<+^xkd{x~@yDC{3F@6tvTq#VWJ2$%LmG6ylERpe^D zxisET>(V3OLq)Qq!iv{wX`|bOLs~iEWOo8xGIWz#$AwT~IUmWw z)peI#IXY*+DdppYxvBrhG!re;d5F?{J^%nW60d_0Bh^mdN)&F7JSW31pqcCsH&8!& zhOZ=6#S6o7;VsAF(zRMX(268A_qxfBd|kM;ZXd1^_X{!Y%^Ms>Ih(>S=7in-eAb@# z^ELrCZ{4$hnY!yL34+;iqoXxkT4DUy1vzvxg?Phb`KE$j_QKC`c#~; zKCpMB@o`1e%Vvw^sZTg56izHBnX~4j{|6UAmAOm|U7#y=nIW@Legy1i7_SJatl08a zk$?1K_l5n*Hy%$Gl81IhM}+Drny|$ilDH)itON+p4vlUR_poRp$>~_kyk@FjDPF@j zICF)Ej7P?xD~sn>@;4fB6bq(qKpxFmLu)QHDAIrE2xXqYr8C=Y(n6K|jhZdlEK$8D zOuxtb+)##;ij4SwWJvpQF5c#X6X^EvXwn1#}Ki0-R-0D zC;$_4aqjQqjG$>p&mOzWK`xa1m|+sJSf4K|<(f3PWta)P%=9SbQwF;u2rq7 zAbx+Q{KKP}X*IO99wIEIE0cnMwgpk`cCS)A#t&-5n9^mOSK{^5kEPU+pwUorG+n8i zEDr*KIbiMJ*C4d5hy53%nNhC^W#}H)y#ZVR)M9ai2J8JDlfO1)6Pwa%Na=l>jPR~D zWN{*vLsbbD4dTsfEGmV3M#B#Le1IOYXL(x2!24ah0wi4l$dMH)e}51&`xQ%(_tfh& z=hfPOrYt*i3XBZ6$8E7dd2HSIomjl@)@s~qgc~P_p1m6jVG|?bj=5*%X#IL67V*O9 zVZsCxlu+AjnRg8nO%no#Y7K9lPK%Xmaq_I2LwBS-nZ{XNYIb$~7-}Zi$F3v0Xk&Za z8?p}5XBw;Fe@wwB7%CY=6C~Kzv%|p9#5S{9OQ9irqHWy`h{?pnMC{x_6Dn8lKCU)o z2^89UU)i2LnMV3tE_qNa)4O&Gf1c}K`=M)9HK?SCq!?=iR%cUcsuf2ijz`^(uDkDNi?s-lBIv@iG01iz zm7=VyiKc`~djzZ<)D!kSKu&JVd23;0IaEf88fYH$&<6rML5|+!-z4p>1w&UPVQ;|k zs@J0zQ=a9#Mg7q;>{{txEtygSrzContainer #1eth0172.16.1.10/24Container #2eth0172.16.1.11/24DockerHost #1Container #3eth0172.16.1.12/24Container #4eth0172.16.1.13/24DockerHost #2(Host)eth0172.16.1.253/24(IPOptional)(Host)eth0172.16.1.254/24(IPOptional)NetworkGateway172.16.1.1/24ContainersAttachedDirectlytoParentInterface.NoBridgeUsed (Docker0)MacvlanBridgeMode &IpvlanL2Mode \ No newline at end of file diff --git a/experimental/images/multi_tenant_8021q_vlans.gliffy b/experimental/images/multi_tenant_8021q_vlans.gliffy deleted file mode 100644 index 40eed17270..0000000000 --- a/experimental/images/multi_tenant_8021q_vlans.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#ffffff","width":389,"height":213,"nodeIndex":276,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":null,"viewportType":"default","fitBB":{"min":{"x":5,"y":6.6999969482421875},"max":{"x":389,"y":212.14285409109937}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":64.0,"y":36.0,"rotation":0.0,"id":216,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#e69138","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-12.0,33.0],[84.0,33.0],[84.0,86.0],[120.0,86.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":190.0,"y":32.0,"rotation":0.0,"id":254,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#f1c232","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-142.0,16.0],[54.0,16.0],[54.0,115.0],[87.0,115.0]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":133.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":226,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":12,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":15.147567221510933,"y":139.96785409109907,"rotation":0.0,"id":115,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":29,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":116,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":17,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":117,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":26,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":120,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":120,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887324033,-1.055138662316466],[1.3318647887324033,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":118,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":119,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":120,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":121,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":28,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container1 - vlan10

192.168.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":68.0,"y":82.69999694824219,"rotation":0.0,"id":140,"width":150.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"


","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":71.0,"y":4.1999969482421875,"rotation":0.0,"id":187,"width":108.99999999999999,"height":19.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":31,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 - 802.1q trunk

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":282.0,"y":8.0,"rotation":0.0,"id":199,"width":73.00000000000003,"height":40.150000000000006,"uid":"com.gliffy.shape.network.network_v4.business.router","order":32,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.network.network_v4.business.router","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#3966A0","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.0,"y":55.0,"rotation":0.0,"id":210,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":5.0,"strokeColor":"#e06666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-8.0,11.0],[-8.0,34.0],[26.0,34.0],[26.0,57.0]],"lockSegments":{},"ortho":true}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":12.805718530101615,"y":11.940280333547719,"rotation":0.0,"id":134,"width":59.31028146989837,"height":83.0,"uid":"com.gliffy.shape.cisco.cisco_v1.servers.standard_host","order":35,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.servers.standard_host","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#3d85c6","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":64.0,"y":73.19999694824219,"rotation":0.0,"id":211,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":36,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.10

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":65.0,"y":52.19999694824219,"rotation":0.0,"id":212,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.20

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.386363636363733,"y":108.14285409109937,"rotation":0.0,"id":219,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":38,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":139.1475672215109,"y":139.96785409109907,"rotation":0.0,"id":227,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":55,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":228,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":43,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":229,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":52,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":232,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":232,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887323891,-1.055138662316466],[1.3318647887323891,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":230,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":231,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":46,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":232,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":41,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":233,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":54,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container2 - vlan20

172.16.1.2/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":259.38636363636374,"y":108.14285409109937,"rotation":0.0,"id":248,"width":123.00000000000001,"height":104.0,"uid":"com.gliffy.shape.iphone.iphone_ios7.icons_glyphs.glyph_cloud","order":56,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.iphone.iphone_ios7.icons_glyphs.glyph_cloud","strokeWidth":1.0,"strokeColor":"#000000","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":265.14756722151094,"y":139.96785409109907,"rotation":0.0,"id":241,"width":107.40845070422536,"height":49.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":73,"lockAspectRatio":false,"lockShape":false,"children":[{"x":31.506478873239438,"y":2.4460032626429853,"rotation":0.0,"id":242,"width":44.395492957746484,"height":29.54388254486117,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":61,"lockAspectRatio":false,"lockShape":false,"children":[{"x":20.86588169014084,"y":2.637846655791175,"rotation":0.0,"id":243,"width":2.663729577464789,"height":24.268189233278818,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":70,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":246,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":246,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.3318647887323891,-1.055138662316466],[1.3318647887323891,25.3233278955953]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.84825915492961,"y":2.637846655791175,"rotation":0.0,"id":244,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":67,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-0.8875219090985048,-1.0551386623167391],[-0.8875219090985048,25.323327895595412]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":7.103278873239435,"y":1.230995106035881,"rotation":0.0,"id":245,"width":1.0000000000000002,"height":25.323327895595277,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#0b5394","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[1.2752008616871728,0.3517128874389471],[1.2752008616871728,26.73017944535047]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":1.5827079934747048,"rotation":0.0,"id":246,"width":44.395492957746484,"height":26.378466557911768,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":59,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#6fa8dc","fillColor":"#3d85c6","gradient":true,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":37.199347471451986,"rotation":0.0,"id":247,"width":107.40845070422536,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":72,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

container3 - vlan30

10.1.1.2/16

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":65.0,"y":31.199996948242188,"rotation":0.0,"id":253,"width":60.0,"height":14.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":74,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0.30

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":44.49612211422149,"y":17.874999999999943,"rotation":0.0,"id":266,"width":275.00609168449375,"height":15.70000000000006,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":75,"lockAspectRatio":false,"lockShape":false,"children":[{"x":68.50387788577851,"y":43.12500000000006,"rotation":0.0,"id":258,"width":211.0,"height":31.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-31.924999999999997],[197.00221379871527,-31.925000000000153]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":68.50387788577851,"y":38.55333333333314,"rotation":0.0,"id":262,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#999999","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.50387788577851,"y":40.7533333333331,"rotation":0.0,"id":261,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":5,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#e06666","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":70.50387788577851,"y":42.88666666666643,"rotation":0.0,"id":260,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#e69138","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":73.50387788577851,"y":43.95333333333309,"rotation":0.0,"id":259,"width":211.0,"height":33.06666666666631,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":2.0,"strokeColor":"#ffe599","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-64.00387788577851,-34.053333333332965],[197.00221379871527,-34.05333333333314]],"lockSegments":{"1":true},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":248.0,"y":51.19999694824219,"rotation":0.0,"id":207,"width":143.0,"height":70.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":33,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network Router (gateway)

vlan10 - 192.168.1.1/24

vlan20 - 172.16.1.1/24

vlan30 - 10.1.1.1/16

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":3.0,"y":88.19999694824219,"rotation":0.0,"id":272,"width":77.99999999999999,"height":28.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":76,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":80}],"shapeStyles":{},"lineStyles":{"global":{"stroke":"#e06666","strokeWidth":2,"orthoMode":1}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"12px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1457586821719,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/experimental/images/multi_tenant_8021q_vlans.png b/experimental/images/multi_tenant_8021q_vlans.png deleted file mode 100644 index a38633cdbc23014364bfc611d650b2a17dc72ae0..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 17879 zcmYhg1yEbx7cE?9f#O>wA2N=k^5TU; zo{GGTp7-KW?!7UgA^9LY@*H}Cd+%K1(B{(ASh93{t=X3Dy466wQ~@&Pj=v%}BW_Tv zbkUAB&>_O7chr-9U4T!T_5s~WI_wo`Hxeo-?Y#yLmK@UoPhtsg``T`mL=N9B!-bsW zgx|Q&gpY0y13~m9EY?WzZE_R{@hs0lpRCMaMHVgp79Xn2Ku{Ma|No98>dQ?kYv>Nv zsWm8Z-;KC6HhR6Lr!VeNd|tr%VOY^m zkwVRr&<8Req~^;ok(4~qc8%?}ztG$_^;BLz`tzB2Oc({;nbWga)0iI=_!KOa4M-C_ ztTazF`Hr*|1=0#(;FjKu?@{<00=iad-;Dj0b?Kb28_=j{!D5U#_8Zd^0xL57Wr;kV zW7Yogi>A0O_xXi?B{b%JsyPNb<9T4@0^Ua9m{`f@Ovrp&86MOYtf-^It+zxU^_2T1@9mNroUAc=~sP<&MwscFUeQ~U<|7u8-^w-37_ z?-JEMYVoHK&{lO$cTS!yzsMU2rqz<^>|956zA~~i8KsrX0scsyr~V=kQBHaJzO|cU zTzmPi`1-h;toXlFvEO)Dhttdt&?@q>Kd)2Vquk7t1{Yd_oSwgUmj1whTCDfD33ck9 z8RJU_(H9rX_0l_CkUw=EALt!#nWmXQAB_t;pO30$=Q^sZZaP`|Lj;nj;8ja2!7oV8 z60r=%w|IV$d3b~n<=QD^cSzQ7Vpm-Q+IaizNe1w?1stu&`ijkMd>wcd20ZSXmE!1I zI8Z;Z4{&eE;orfX6?=uD-Zc^jXFe|_NhE{gSt~z1m@cv{#2Ot7Dt%%N{W=MM*IAB6 zO*h~mm)cFI9OyBP>x`Y6Vxo^NBYlu5a$oT%!`QXsCSLEni?ZE5>9IAgThC|y@b=K) zup@H_BIEF_%5Ob&C2gXbHMgR~wd38|>Y>X)pg^&S(#@Qp{`5}_L)TAjPNKc3{R_Im zSTx*!+wnGYZWnBY3_>`pJ=Dj!#FUC~K^{-G#oe3eQeR|JSmIRTf|KTmOp)u~kKwNy ztr$AvQVd!r-YN-dJzN1!UqQ7M4I_mGlQj}eH&TSJxxDcg!oN~+fq?KYq*czb)8De< z@bw6WQp{wZ*NveGzgX|D9yD6qy=^=-BQVoy8O4?}f=3qpAOvaoMw*+Mnf-ufYlTmy zHG6OWpo!J2Z1iIa~O*@BFdJ2=tR+=&$w0&?W~!n&Tou+fz?XbS5x z?)E3!mMk5G5K4sx%>E`#WAZ0sl3d^Q$J@mmV$t>0g4BG4_>bM64d8oB1YJ65SI|}XnodMBgWbaKzxJQI_?a(SC6hSaSC6PjS3Sys<~Kr(di<~axaTe) za%Hw;r*#TBwCP%3Uo~;Q*)Af+HL~`&f5kcw9!*fzA8$9hki&vnbf2gx1HBGmr|LM$ z`@(Q`VlT9|BD@v0Osq%F*-}u*{5BzxKyc2M9V4oaBegp$`47kKyy)DwQ~J0LHDgA< zWb{EidaHNwHT|vMa~t-2Gd1GWO{fiz5JM=PAwnfkh0g9Ip9Y|WhQKM@UhRN&zPk@N8r}r7J%{fLmcR1XAlPz_PNfc zN|&SLZ2SxtKn_ed>cZM?j3(z@Qm?d4)VR4vJw<`8@OM1x#Ov9;B4#0E z4CVXs=_V=k1>gaGm2iPJZer)vq&w7Rk~oroQIV1gDlmHf{dclgvQ{qtxXoV)uLOSo zX z`CSXah0|4bZk|}UxVTK^cZ*IjP{%t|mFVyBDa8>G@=YnA?} zgD-#oF`fg9bI3JQN;mGX!GD)EonafCDf15VB z)D+V|i~tOxT6z3HYWbn_=c%>V)#cXDDJOGk&}$O{_XS^I7lH zZEVp1(PsD0r`Rw_kove#YX1(JL{n_7*`@jA$u>0>>W$>ND>KqKq}Ht`T13M%;lh2>dyc6VNPYTEIH1cr}Y|Ey8a zfu>QWO3t&dD+t;#Tx*{x3V3SpLH znu1t~d;FK-b@fQC*lQb(x=k21XO9pwH19Lwo9W8IV#p? z)@G&1;m!p8%7LJ(odTd%R+6>fYmz}chCuq(_Lj@nv-j4-x4~*4?9!r|4BSn}f|#r} z757FHa_Ywi5}df-E^S<|xZt8dV3W^5h}cR~{!AaF(d@Kv{RC)ZGv(ZLcp z>sU1pq%7n3Sj+l@fW>QXStG|drHMNll$Zv(q8sCI+I7Mt_aIJ2_zUE0RA#$Sksm8* zLS{KD)hPk)JS<899bTtfq+w`e_-!V{Oi_1@LNp>r$i|;qZ}Bt(@>wcUfsK!QZ^9x9 z_e;2Q>_Fo&8N^H-8 zA7?b&6>{I6&U#vRSdcO|)ETV+wGWAZjIbLuHj?RRL+P;Y)3ybw&ihi0u9rr6AKMkd zba9PkWo4O&L+UclrXF?+$x&-~LB=;Ky?qNcSMUjAX|ZzJY{J+s)~5OZUeu}bcM`9>7?7d0=Zy6~Y5~94 zHqIZ;oY`_Syah{NYlV+)xxLbXQR{atN zIkMpwHgL#z4=J50!Dc?&wXv>)7$HMBt7CZu2jv^_Kczn7(1g|>M9{YLw$Z^$q18?$l*f4Wg2Z`&Fwa|Jt$vzDtSl(R}P5tzI;DyAEYm=!kN>vXVM zxLuUebn3b{#i>s;%+;=b+%mbjhkDqg&Ov{*dEWQmfOm%?U3FWGbGg9R6i{sE`9>gklS7l4J#-RNg@sMFv zFyxqk_j_}|yMn6^V6`9&O%fuBY+4vDglCVl03$43_eIrxG5UqsL_NYGr@W}du2sAA zJIPSItk%GGA%bI~tWpaAC_?AG>x01TfDfNEe%T!jTL)_W>F}(oo@^*Zr>gIt#{R1VDuD*$^ zS|^}(ASNZ!>3VV@mM#QKDN4F}YVQpcARb++%x$%xZut*r`?qmc*7{NQu*dPp?ZHdC zGK#U56sz=K8-&)azkSK-oU`JM1VP{p1EX(}lUClsuKt+=C$BH(#vn=zgRmJ9#-%GE7dn}3|Q3kb|bt{!lG9d=icF zdhScP!jO%;3j8_&aZ$v*7%YL#;q=;{czRtFkMoHc2TCv-1?mGp})8NX4wIBz#d+Vuzl-*M#%(@81e)fgR>XUs^{(gVPM6UW*HV=EHlZO`8neF-+F z(hW;?u>OfdAFfaBd=)8hlv6)C$zs8c*(;jvPyJ+{nks%9>~hZ{=w=++HL-^rG8)8P>9tK$gml8CJrcltW;q!$snXiI{NYh0I8LFyWkilaW; z@n(xXY**z11v*RJ#^7q5{uOjWhH>_QU#F&-eDbtO;{!f6^+>2y56M>*gcftib0;aqY@-M!r#9+Q~ykQjU9PcrZfBmixm0dA| zhBYEv-$N4$8B4%5A5C%1tN>`RDQ5z++>P}43Hn@7$|t>C^=!&3$le`FvShp=u8k#7 zN{MeDNG_)7ywzuBI{@ej5#4bW-sm=1J7#Xb2>~6(o2GPgJ9sxse2D6Aj52kP@q9j* z@+i(ceOU6>SH_J~3H_eldKBhBK9+Tm{KgRYTmH-MquX>Ta8?Xf#F{aE;)lhj z5!JBC9m4*o$!R`{J7JY@D{}dsTJ`%eJTtIh&n1BGbQ*d@tGTHdjH%$z>l+@Sj*-d` zymFLDSD)iZNZ@Bf+=#r`t+hilrAJDneHKuurwiYSXq=pOt$0OS#H6qBhAhQK{u_xbl*gJZZGdYFlkn>Ys&Qn( zYVG5UZw}feXdn&wr!XSdi8}R6Tw|RQ&9Ym6-uHE*0$l}4C*OkIBu??oL7hwdL)IoSBB*R8Npahnzhc4b!tgvLS2o2 zTBl>?8SUah1Zwob3#$ik~fqlk!&e$)ydh zHnA*+)`U5eU8uTZPe!Brw&=-=HbvV(@BX#&|0+KYgLt&BlwC_xy0skCzX znZLO9skp03F}sKHN+&)nN#%<2>#Mvd_*H#E+^iZv0DQ~xx=X$91zn~-pRjz7ygbO< z@;e~+QgB-%6w|9zCs>}dzzLO@ROrm{h(z|W}+I28RQh#bHKL>C$FP( zysKRD(zYLRc|*-ixJb>w_BK*rh{@oK(-X?@sUctdRn{kr-XU2h2^J)?hId$t!WyO> zkMc9?-*Rb*8+%?u0eP$@UB)8jSZ2YU-3L|$(|n7;y!wvKFW0i--cL^O>lgj7YbrWb z42H0h71M5^Gv$0#*&=2<>CG;7au&oY_jAvE{5hMchsalPrjTvNNmS+#DMY^ataq+u z3rh$K|HZ)8_jZGcj5@$k@_U8PMk%!8B&=#ZhPQySrAbD#o`r@v!d!yVh6f+hE*Q^d zI*5Hfp8Q*dpT~zCS+a+aeg}pQuvOqft9ANcf;zSFSI{u;Kd4vJZkfm_%vo&75ekrq z8C{%Xw`uX^w@f-Jh6Ney?6W-R=wQxrwrmlkqBft+F-p4~MF(Jw0uUxr|3$#GLfkXd z^SCZBT{q~*+D1`bAPO`LvG0K0Lh;%i+&qAj9I~?fSOA5Dwo+$2xa9|i+5vB8uRlCP zI-J}J#@zBkU2d*+PdctPS7)>Do0@R0rM`#1f@+2vJud)l$+T@PD*v9}e{SPgYKcW- zdM;)b+%>wc?Cyk|eY!m0Gu}DPsXxA(d$QOq;oitTot7=Gi^IO=6@UhT@?39)uPx-1J`Os4e9@z2rmQrg zpVTmE^QR4wVm04w=`l`?oI})yow?Z+nMs(KTZDBIVxrWM(TB-70AfEliWk0edWuNq z_uDOmr36-nH!}F9_7QLpa)<@`J!fhipPE#LgRRNjnI@;9douW>DUQ!fr5uM@a(H4G zK2~B?@qIt5Skh2=qiPEWWOmA@M9yMzDaXgasOsE%6N50@Iz>@nbiA%E{RMOnWo)$e z5~j|PX&ZuHp`C1p5(1^&=o=e`OTUHQB1m@wMWfeb9}O`1bzVGtEy32|Cgd=#b&f{Oy+ z6*O<2DB!v2`F=S2`Res_Pl3Zi|3a=|UUfKXeVj(wImSB)Iui?yL*B?2;5va-VS|iq z939WEAZONQ1po}Iz<7-Jdp0f$4-8xRD7?_AwqIY5<>L@CGj4tkx~4$NWc)%sPt&N~qvk=b27kYF#3R=f3R6 zjhe(b)T6Z)GfZq-;9;tOnuk{Lp&c%8)(+7g^HwInMm-Q+&W52CNV53x6^5E`lg_B7PDr0APkyIFh-025+IhOK zYF&SH_mRT$qg zLSFMAkW+789n3H3{Ef@PhS>F}E5GQ@`t6lu(_iaVfcU`p(Nentf zA)7yK1krNUjpj0rN!h4!U z-re*S;|zDfG~im$Vcird>%{i*a2RwKRao_B z>B|cbi7kId^Ck5$qXpDyd~)snpTS>Wb||4{PrjTCM%T%6?i}}#Zk;XD*SlI~FyJ-w zGo-~ChiAPO89IyC9CnpbE%gC?RBQ`~8rguj5h$5edOsCl;4b(M@?wze*wp(B21On@kQuRuP$T zcaphyj@XJnKaL+G6ptvpT1FCA@qE~5g&_1)^5Knek^z zK+{*rErU0tCTxq|A?-;pFu;j5Ftg^x<7WW`G0L{>LLYFBcqMn;;eh z1^`+c@er_h3gHM<(|+o-T9#*bNSnd+L zQcN&90+Bz!+c@fD0ARjHi=Jb#m)9RKg4+w&iQ!!)kgIhDg1em?gld-MPfY@D{wURG zGO4*N*h%?q^@QU#HX`bu*1$VSGAiRJ-T;6!$o>U^F!hhUR9QqYnry0u2Gv-`SHK|j5pYZb;ubgZ3QGOtNlN&%6+S$qx&Zl25mP znzw_lw_}Ifntm^_qAaj1&60F^f2p$oa{2>i*Hi0=dGq5wn(+25p4ZKS`M@&0gkY9q zeZL!S^N0lhz41uR$Ka(gx1jru8vpG@>qMC;NNm0F+I=2kgMUp$ew;w<(L#Dn$?4kT z-zn~Ex0OZs2JQv7|HhF0i#Rt7Fgh4L*BmS%d^;MRFLUu>`l*)nwkyn|PxnDuY zGpO}ul99p=s+#_DQ*_U0xa_;JpA|~y!=yR5YJeW#L7)!K#S$Ke*a{m*tlh$Kx*4J} zOOjz$Gn?T@4R~cVH&YKYTU$!5FjVGmcU(u@x#WPr`pK=`D(^U?Oi#*VInO7J6-;$| z=M8R^$6tnW=Ke5VdzPGloQtjD|J#ZA)~0~NvxAfmcih(CQCX5K_F|t2_{ibWB^~-G zI4%f_nIrD>g?+q$~yTO{oSoY&%qqirY5sjACfNXvo8zz

b*{Hq>az{aEwl^vuL-ETW!AsvC0U0rmZ$_<6OOxU4rH z6u*cCCrj%CbZzu?LCU+fGt1_K0=ic0aiH8Tv(d}mzpAB)%@%6-6`Lyn zd;=aEyurlO2oEI|CPfQU*p&b-f0^7f+aySv?;0IYmhE8@&azM@s(W+tZ9c5e4!{sX zfiaPRB-x>31b(-@*OX3SfPO={;5>XR!!5^M3})_?`eW3RYLFL4dn&gCRlddq!UZp3 zAO=>+?ALa2%k{;xrD-8Kl++6_#b$F=O!pn7J1$t#B@!$C5)BfdlU5HN(yJrQ%j!dq}rfM(a$o8fv zTp~yzd4j8p6OX6R;`GAwKclI1Fc=V2d+jG|XGUoRj0-i(!+O4aQ6eJQWJVRWoER@>i z1n>F0IMxw9Tw>{kK1i)xicm&1#D5gu{o>fIFIqAt;KD8L+_HrIBbg;wPdL84HblQ)C+~sF>=()-`U*es#QL*>k3l!ZIIv@K;?LQ#Ty`{4Un&{&BHwE!4CHY8s zb9=V6y1JT|m)G4blMWT&;|nz-WC)pfM6;rDh2LXBA-h@)ttSbpP#BK2EA@3v!))k_I4*8zu-(cSeUL5Bkr5o$S)*>_9hh!9tbP63Ei-@-I*%W`U5q{cz|E*9v&Qg5%&dt z8!AqBkj06l=i^H)WY~-BjUhDa&H65JxDC=MS2hEuZ%EK2A?7il=<>wG#%6A#1e`7} zo5c3NGd9MIpFdt|q97+9fkL^>J9E;~(sFV}If_$KQp(F&<9&f+WOeBp)6DcfK(mM& ze>?Fg>6w+b&jHPHL|?21Nq6=$)R_j15nrFPD&Eh{H+?w}3v;%(RKEj$S!Mmq@NT8ky{N_Eq*cDRbAP`cP^8&pSqLb}@=d)^ojf1TGW= zuDwy*%gkJ?v!NjzhLx@$bKNNM;+5*d$YJ{zRg=C4TI5VM%m4OPr0(G0(9qICd@hXL z6;g5pN&P^^55gwB`9sbgRWKHhJBqdJyXfNrVPqfmezre`PZKHj!qWT@RALZRSPem- z5^p>^3k%wi;p4v4Z=Si@6@b5-guorPcC*_oI4o%WW^tQ)ge-JewU zoM=ex0>Ge^j5r+xnPav+yv)pKieCIzX%!U}w>)7m)}-_ukaFHzgi{wn7|G+5d1`x{ zr0MDDe;+en1TZl%d6)d(P0|sYq|2Usd$A8zE|HZaJf;mTq1MY{WzY(4=jWC?_Z)ui z@>{sQ6=vuSQ-CWdD5!~KaGABg4jtowUSD0oe{hF#+VlZ6_-BZIr3cSO`wR8M!;{5P z&L`WkhATD9p3M{ij5184NkCuY_wR=axPKtim(fa;f|ateva@{@{tu8YWZ2ay=;maF z@I8Y{2+_`OGnrC+00CB2FlwY`K?fuLs|<{990C|AWwK^5>2yB&!-s3o^|_v60eW*D zIjtPRgt>3DR2cA-q_psKZU(LJb|M7&gXFocbeWR(9K=jXv^Lt>+AmBjhJYXLEZjwN z+nG?T)i-~9YWJt}VS|-;lcg9Ywm)@XEfNnH|K<+Qy|wV4V^-iM73T;2p2BISVy&?R z+1aH=A{%|~ffA*ZDM)@p;7A)n;Gi}8<&9~TNoV)C((aRbW?*bgQDa%hL7|zz%%BzP zuZ%##mOR&z{*e+*!tvud{eI$Xe0+SIGM?-7tqIz>evMhj2x3da=m*aUR&H)TZ=~T$ zd9w*K#Vqo7GbmxG5D`KfMypV%*cRFuFp1Th#7&c_utafC6s0V@hnSEh6l=z zyt|W)LV62R*F7WzlgwF660sK-1E$zj*x_n>RjMHUIn zT8DC98iDbqIji4HQ0M-xXPE&3r(Lq+XgHPm=VWNyYhSJuNN)-bXyYYA4KX}?A-WZN zb>rEFp@zUGz?aO1V9cg zUQ<9y0QWk|6g3&X#u-3#Q6+3YbxzWQ4Ypf`Hc&YieLT8y*|r}!5T0qtpq1Oc+OD4C ziyrZ5{q-B}`y>Zb62pg))goLh&hEpb72S9zFOnb6KRi8D{r<8C1+?27}e0s3aI$1Q*m6Z`| z$9mVvQ^)P17IbJq=TXIentF%@*UeafK^jUZM zY%xzfxFw$W;#g%ScC@RLJViv{{DLp_=ziV&AZ?_bDw%!_85R^+K&$c+cC`b7;gf^! zdkf+K++yzA!7U(VlTN^zS(Dh!w$apvnxHq?H_9PLvkrmBv6taxYGR@0E&bY$Jxn`^ z=%9sJCCM_P>1ycq+bfLYH{4p6!&4Tkb*@^gP_cSk(^iEz7E+LhakO54^8zbDujJO_*>BO_v$z zj4LH+Ds`DR|4;;TMfm!oSDjZ-&`LOgx+g|WEv+_0b z`B9yb=ef5Ik~F%XVxI9y;y5N!Nll=ggAuL{8xrbj76_0td5R|R>l;2Cfzn=3!d;Tg zH0#m>fYa6$rzGjzU3HJ%hU1Bn!%yO6*k-7#&+0A@tKgsc{ck9_A!uFh7yJE-kH^XH zthZn0>y9|4+em&Hl=vTnhm_+q(-x^$JEaDWqgv>riK>>G`411%0U!7QNTNZop)^vXVh`0e*yCO;tD%bx59ow#0oplpi`6f zY~gW~ldgiAr6oZJSpww3S40G^TrRSUENHG~msvj@=;JJ-b1j`ZSji?Ee zLTl2nAyhWMHZ3iUzksZ{!}?{2zVfqu5>Y~5@~9WF(^_a;b~E&z=QvCjw|5WNejE?; z{%reMvCHi`dto8d?R}82^=I3&WL#s(LB292YEg%0BERP|DVnt?s>gftiWB$s=Nbvd z=SUbyI<#ttj~x{S{u2T!Q2RFLG*^wk1i9**7zqh7h@5Gl>9xo#|6N~n$3bb;6VgFN zUzJ8^$nWgB*Ikyg?flq8)>h{1cvk?-rpolPY!0(GM!SOzXP)Y2)R$YiF3cd026Zgt zC$-2(vYvx-rJwev4#xy9N*bz{jirO3CfZV~CgeytlXU)Ef;V1Vt6J?s=q7 z=djU<1Y;vHrhH(A>JE~{)8;^F*eLe7O7b%gL#p|Ep;tpwO&~;P>{6;M#4_XfS--bT zEYZIdr}5mCRMMM&O}DlQ6Ni^>a7I$R%&KRyH7)Pr_G{OY?5)>8+e3}RjDg&&Hhxi12ACsFKf`f z7oYv-$yT{+T$4%p1=MQOd3No39a3SnDEYVOLS7{rc{wd@iSWE>-p@m$%r#o02ziO% zxSH&+4z!y~N?Q zoNX(k9_6Cxns2M!L>{r^m_I8Tcf zb}|n5Q3iXZAGJYRPf|ojKYn^}?zDp~9{^7qdg++dm&hou72(25PwkVg{v+vvH1|(V zZTY~N<{za>gyBEcYJXDDYhUqd5=9N`TIFpYkmjC}Wv0L1QF^n3MhFlZVwMZM8>jvG zc8j=_#^DgaDeevn1;A16ClmhZUAZw+BJO|7GvJ0_=j9@hPU0u26PbDX zQwosiGyh(|WsVhbh7B7VSM#^2+bq^7=g9m=hABYDPgecGAu$(OENNcD;k)pY6W93} z{o=)u5!DB6WD$7xgnuu7CS28A_MgzhT{@DDctf8{@&8fwUBv4B?w2@UQwv)(U58s+ z>L-e-FMaZbnb5scP*iU^!5_+F|`*or!y+O+GPRrl6W9d9hq!*i@F(^6WPi+`|1oe#XHM#V8H?3VEHJp-9MA2BWM&*oupsHc!G$ zIl7T+C&KU#W><18`npZ12wP^0?we&E%f*<#SDY9E*M>ei&)MOq5yfCnX@Y`f~9Nw$HcbW(91%)zevdoylqjmA@%P2At{~42)X&EiW?+*+-4v4Ycp!@dWvw zDgBqLm`+2=~Fdb&CFawyt!AknMLEA8XK%Aupv%7KBaJv3dUU~F zDnhkLRK0fs$mi3UOTj*Aqy7ON6&X^UKXq(73j#;3Um*(6a7w=Eb&`O@KbAb|FG5l5 z@1^u!`!0@ondd;Z;zY&k>0abQ{OZ~A%;vBnGk7d#l4-^k?+a$ESC=ES;2A|LIw+sG zov|H??43x6y?*(tHMoJ0&o49co4MN#Rz_Y6vX6e z-(5!8HguEgP@N24RWa6d`e#;0fe(FSLq%Bx^b6%W3e2L;B^-)861&F4|)pMSa)fA5d%70C71M6))P z@6i^+GmhDfM`)-_;NQtQtm#tp>BTTUxJM5ND88t#K~=eB4*1M?e{-tjjU*7!-i@fI z1OK@F9K%`!K$E_XgkzH6ncXhYx+T$6AO;}gM<%e&Bg)$WXxf1q8@o*ynQp zYOfb;D0%56T7))UhDk!_V{bZDXuC|Hatfm5`Sk|heaUkQ4+GhwK!Ady$P(q>(9pkb zXk=u>^>o|eHA?VUb+Q?^S-X9b&=9V~`K1fl4;eWigu`zNPn6y5&tIe;*}Ov(ut@TM zQG6T}*(ppat0YdXMG1(TCjEyY5rKi}HS#1o;&8mf4~Y}wxFj4D1pp`?RzP^Q*`u1^ z+7CH=y)dE^(W%v>rJTa3etdjF^$1sc`vS!2@rb?P}6)hsEi*@mn$*rsZ z*JfPQrK6anuKW4(vrA?lGMd2=a|$k6h-LJAAtTMlC75cSiZj&6P7!sI%c0aCFCn&_ zaw0M}v%sR}WI`hFLD>DRdboJN4l-IZz@+>2#LXSmU)?JFXaW!aQqmm<)w~WAnW$&8 zO06QprL)YG)PHgvYRjT^Cx2nrbs!rfMl1Wtp(E@8wGf-nc%bO#NzDYpej0D?x4Vl5 z>m)%{(S?aP5eP~xF_48ah}6gc{lcH2I-?jlkMGVt+md@3Yx}V)kSbG6R&v++l4&6SN}?GZ zRdZTx5P5hf?(j7jkpe_LZf8E*#8~F#!JL+s`~$ggf;}>kXtlZ;G5`Fg%3Njq=b<&l zU>uvr=7(hE?<{hSeL-8xBbhI9>Yytcel^R1t$?rf&cX~r`L^i=o=O^IOom_Dd{qUJ zail2X-MQHDJkJBL`vp2MOTG)`p?pgp>l(&y%MI_{_LSQNCk`w3YSyDK0DjbJ@_kjH z;~sf4NKJJ26B_xmzpX--4KG(+W{l;!bSMkmne469Ugk|SEIl8^SpJSU@$a*kq(bN30OYWo%wjL35S$m2|v-vCEV z9;fQiEDv%U61L=z%FBOS5sN7^W_wf@xm}`e6~Qx(V3xYgj5KlI_KNU67`+1Y1GMbU zJCM?69K(%tleG#o%!G9113xSRR8o@6+FDyr)PDtYxdf^ND7r|>3+m2t;eOlHAbwrs zbw8)tp{`?#N>Hy>Vw#I7(z02B0x^N^}%SwYLlWp@+i|) zatj!bQnjjCdTV?pg>rnxS44WT0OEQfm>^wgO2=?0i6_}Y_~e$TTEI%|SPD1@R>bq< zg2`!ar%7_C%Kl<)qLZQSw{jsf^Jc+~V#S-%V{$ng&!1vV3q-tD?feCew$g(`diU5! zW=HzyUb}&!#!sNpq;x+tBf*c`0eX-_G$KL-Wq=;r!YaE6?I+d zJP4v zYwZ20rc)i-xvV)D@&Z76bEK3vf9%rU&<65}(H%_@)zH<-)#%-2Dn595eqv$TAnBkM z^?WTC_!ofY$wQ{V_+6uP$VBR2BBM`Sg;4ED@K2Etf1mSu{D-HS(;C?LO{??a92!_V z^fEQ&{q6@vX}el^h4NG4kf=6kDcv)sPjqb5d5K=xaT0#lv=c#($wC>ad^E@~Mm@Gg z*#Dobs?9oLE`6UK;Mo!1-gn+^VhULP-@Na-+)QVz4a;9j(o@sY!kuLImJ|A44rgn` zIu*tSH};M!Wk)1otS&LGhX;^ipQFMHRsVIpn(Nv(l(f-B@=!QB7Ef`Ftz2%qq+RF( z?YerZIHz%hxh0M{&AUy=+ar(f#0%E`)axho@(%&|?>IU;FY$xcE`Odt=|Q8V#;qCF^rc~|!; z$03QC1fa!sN2;rnk3x_e@|#I(4czUYba3DZ(ZWR-w~yzV%!jnS zI=|~Z2r@9^K6cOV9SQpw{wF37N%lAT=kQO~$#=au6eB#p?KtpWTcCmd!iM34ElUM` zT%NWTfA6=o639Q!#xt%X%4G|l@4IemhMEa^QItX%L_qY$Ma<64;qt#U2v=WU9$EjA z9y*uPxeXDO#I3#AAZpSPWa_)nnPJ12t%$=BrW6EHBz%c#iB`PC#H;UpUmx|IqEa}R z7rrqO^$_=QQo2H5f_61(&4qg)TW+IrnFGPhZ{~x4OK4gEcN(vBVXo1T=ck8fCUp6g%z?@m#P*^>KMXZd7=E>JyreSU^X zm&dee)0)ofFnxNm&F9I#?R$;Wa-}&AZ>SGmSY7Xaa?bw=^DkUntS0V%@Z1ESqs_;8 zB%Kt!IXVxTp73@|usNa7{M#zezDcJxbX|GuADdvInV)}7{OoD<@}Bi=_79OW>$m?g z`@eA2+2Y#lhUZV79p>-|Dl~EXVb}it@|S0quN(C$Xw0z|zW3|pzC7=e*S+%Ht15O} z@PGbzli_EFo6LXr8F5J6UioSE_0JD~Z_+CHU^$2X`F*{(@yrtv}nS5&!vBR_42NAzV-A%y8Q(dUenH`-^_q-7HNxz5MeY)8FOa zr*FOXrR@Ixx_i}SR>BV~r~Y)K1J1J{=R9QKWpYm{_9*M9=UPf z#iNJjFFzo6UDuJ@{{7O=r)w`>J9m0ZzP{XlyB||7|J(VuaDhQ-V3hUo-W%6agW2V_ zWJdk46ezQe-oO8ud;g7vb(?Rxn7;qL{e#Wx+}mI`=qZ5}=ncW#H{L8@?Uj_5D>q|nKA5%jk{U=wX7;6)2FF{BBqTqj@vwYd~H*~w%fp*k;|sH>+@1|w*I$?a-V1P zd+48%X}y>+<)EMxODP*S$b{vqQy*sV>MiBYSk->?!91O=^h+_kcTZn8ykpo_c_!IT zX|G7RN!Ox^y1HxWv*+ip+-container1 -vlan10192.168.1.2/24eth0 -802.1qtrunkNetworkRouter (gateway)vlan10 -192.168.1.1/24vlan20172.16.1.1/24vlan3010.1.1.1/16eth0.10eth0.20container2 -vlan20172.16.1.2/24container3 -vlan3010.1.1.2/16eth0.30DockerHost \ No newline at end of file diff --git a/experimental/images/vlans-deeper-look.gliffy b/experimental/images/vlans-deeper-look.gliffy deleted file mode 100644 index 4d9f2761c4..0000000000 --- a/experimental/images/vlans-deeper-look.gliffy +++ /dev/null @@ -1 +0,0 @@ -{"contentType":"application/gliffy+json","version":"1.3","stage":{"background":"#FFFFFF","width":566,"height":581,"nodeIndex":500,"autoFit":true,"exportBorder":false,"gridOn":true,"snapToGrid":false,"drawingGuidesOn":false,"pageBreaksOn":false,"printGridOn":false,"printPaper":"LETTER","printShrinkToFit":false,"printPortrait":true,"maxWidth":5000,"maxHeight":5000,"themeData":{"uid":"com.gliffy.theme.beach_day","name":"Beach Day","shape":{"primary":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#AEE4F4","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#004257"}},"secondary":{"strokeWidth":2,"strokeColor":"#CDB25E","fillColor":"#EACF81","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#332D1A"}},"tertiary":{"strokeWidth":2,"strokeColor":"#FFBE00","fillColor":"#FFF1CB","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#000000"}},"highlight":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"#00A4DA","gradient":false,"dropShadow":false,"opacity":1,"text":{"color":"#ffffff"}}},"line":{"strokeWidth":2,"strokeColor":"#00A4DA","fillColor":"none","arrowType":2,"interpolationType":"quadratic","cornerRadius":0,"text":{"color":"#002248"}},"text":{"color":"#002248"},"stage":{"color":"#FFFFFF"}},"viewportType":"default","fitBB":{"min":{"x":-3,"y":-1.0100878848684474},"max":{"x":566,"y":581}},"printModel":{"pageSize":"a4","portrait":false,"fitToOnePage":false,"displayPageBreaks":false},"objects":[{"x":-5.0,"y":-1.0100878848684474,"rotation":0.0,"id":499,"width":569.0,"height":582.0100878848684,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":103,"lockAspectRatio":false,"lockShape":false,"children":[{"x":374.0,"y":44.510087884868476,"rotation":0.0,"id":497,"width":145.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":101,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Network & other

Docker Hosts

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":157.40277777777783,"y":108.18042331083174,"rotation":0.0,"id":492,"width":121.19444444444446,"height":256.03113588084784,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":99,"lockAspectRatio":false,"lockShape":false,"children":[{"x":-126.13675213675185,"y":31.971494223140525,"rotation":180.0,"id":453,"width":11.1452323717951,"height":61.19357171974171,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":57,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#38761d","fillColor":"#38761d","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-121.4915197649562,-156.36606993796556],[-121.49151976495622,-99.52846483047983],[-229.68596420939843,-99.52846483047591],[-229.68596420939843,-34.22088765589871]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":289.82598824786317,"y":137.23816896148608,"rotation":180.0,"id":454,"width":11.1452323717951,"height":61.19357171974171,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":55,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#38761d","fillColor":"#38761d","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[291.05455395299924,191.93174068122784],[291.05455395299924,106.06051735724502],[186.27677617521402,106.06051735724502],[186.27677617521402,69.78655839914467]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":372.0,"y":332.0100878848684,"rotation":0.0,"id":490,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":97,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":9.5,"rotation":0.0,"id":365,"width":141.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":98,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

 Parent: eth0.30

VLAN: 30

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":342,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":96,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#eb6c6c","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":52.0,"y":332.0100878848684,"rotation":0.0,"id":489,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":92,"lockAspectRatio":false,"lockShape":false,"children":[{"x":1.0,"y":10.5,"rotation":0.0,"id":367,"width":138.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":93,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Parent: eth0.10

VLAN ID: 10

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":340,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":91,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#5fcc5a","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":289.40277777777794,"y":126.43727235088903,"rotation":0.0,"id":486,"width":121.19444444444446,"height":250.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":88,"lockAspectRatio":false,"lockShape":false,"children":[{"x":236.18596420940128,"y":158.89044937932732,"rotation":0.0,"id":449,"width":11.1452323717951,"height":59.50782702798556,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":53,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#cc0000","fillColor":"#cc0000","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[-121.49151976495682,-152.05853787273531],[-121.49151976495682,-81.64750068755309],[-229.68596420940125,-81.64750068755139],[-229.68596420940125,-33.27817949077674]],"lockSegments":{},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":-179.77677617521388,"y":56.523633779319084,"rotation":0.0,"id":450,"width":11.1452323717951,"height":59.50782702798556,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":51,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#cc0000","fillColor":"#cc0000","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":10.0,"controlPath":[[291.0545539529992,186.6444547140887],[291.0545539529992,117.79470574474337],[186.276776175214,117.79470574474337],[186.276776175214,67.8640963321146]],"lockSegments":{"1":true},"ortho":true}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":447.0,"y":150.01008788486848,"rotation":0.0,"id":472,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":87,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":473,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":86,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":474,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":84,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":475,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":82,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":368.0,"y":101.71008483311067,"rotation":0.0,"id":477,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":80,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.30.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":350.51767083236393,"y":87.47159983339776,"rotation":0.0,"id":478,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":79,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#cc0000","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":94.0,"y":155.01008788486848,"rotation":0.0,"id":463,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":78,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":464,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":77,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":465,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":75,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":466,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":73,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":80.0,"y":109.71008483311067,"rotation":0.0,"id":468,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":71,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.10.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.51767083236396,"y":95.47159983339776,"rotation":0.0,"id":469,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":70,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#38761d","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":341.0,"y":40.010087884868476,"rotation":0.0,"id":460,"width":46.99999999999994,"height":27.0,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":69,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":417,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":68,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":5.485490196078445,"y":5.153846153846132,"rotation":0.0,"id":418,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":66,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":9.901960784313701,"y":9.0,"rotation":0.0,"id":419,"width":37.09803921568625,"height":18.000000000000004,"uid":"com.gliffy.shape.basic.basic_v1.default.rectangle","order":64,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#666666","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":198.51767083236396,"y":41.471599833397754,"rotation":0.0,"id":459,"width":175.20345848455912,"height":79.73848499971291,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":62,"lockAspectRatio":false,"lockShape":false,"children":[{"x":17.482329167636067,"y":14.23848499971291,"rotation":0.0,"id":458,"width":140.0,"height":56.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":61,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Gateway 10.1.20.1

  and other containers on the same VLAN/subnet

 

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":0.0,"rotation":0.0,"id":330,"width":175.20345848455912,"height":73.0,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":59,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#ff9900","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":279.0,"y":129.01008788486848,"rotation":0.0,"id":440,"width":5.0,"height":227.0,"uid":"com.gliffy.shape.basic.basic_v1.default.line","order":49,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":6.0,"strokeColor":"#ff9900","fillColor":"#ff9900","dashStyle":"1.0,1.0","startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[4.000000000000057,-25.08952732449731],[4.000000000000114,176.01117206537933]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":56.0,"y":503.0913886978766,"rotation":0.0,"id":386,"width":135.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":48,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Frontend

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":62.0,"y":420.0100878848684,"rotation":0.0,"id":381,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":41,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":382,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":44,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":383,"width":98.00597014925374,"height":44.0,"uid":null,"order":47,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.10.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":384,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":42,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":385,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":40,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":382.0,"y":420.0100878848684,"rotation":0.0,"id":376,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":31,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":377,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":34,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":378,"width":98.00597014925374,"height":44.0,"uid":null,"order":37,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.30.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":379,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":32,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":380,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":30,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":214.0,"y":503.0100878848685,"rotation":0.0,"id":374,"width":135.0,"height":20.162601626016258,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":27,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Backend

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":376.0,"y":502.0100878848684,"rotation":0.0,"id":373,"width":135.0,"height":20.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":26,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Credit Cards

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":627.0,"y":99.94304076572786,"rotation":0.0,"id":364,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":25,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":363,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":342,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-183.0,310.0670471191406],[-183.0,292.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":372.0,"y":410.0100878848684,"rotation":0.0,"id":363,"width":144.0,"height":117.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":24,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#eb6c6c","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":218.0,"y":341.5100878848684,"rotation":0.0,"id":366,"width":132.0,"height":40.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":23,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Parent: eth0.20

VLAN ID: 20

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":297.0,"y":89.94304076572786,"rotation":0.0,"id":356,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":22,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":353,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":343,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[-13.0,320.0670471191406],[-13.0,302.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":222.0,"y":420.0100878848684,"rotation":0.0,"id":348,"width":120.0,"height":74.18803418803415,"uid":"com.gliffy.shape.basic.basic_v1.default.group","order":21,"lockAspectRatio":false,"lockShape":false,"children":[{"x":0.0,"y":0.0,"rotation":0.0,"id":349,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":17,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[{"x":2.0417910447761187,"y":0.0,"rotation":0.0,"id":350,"width":98.00597014925374,"height":44.0,"uid":null,"order":20,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":8,"paddingRight":8,"paddingBottom":8,"paddingLeft":8,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Container(s)

Eth0 10.1.20.0/24

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":8.955223880597016,"y":9.634809634809635,"rotation":0.0,"id":351,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":15,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":17.910447761194032,"y":19.26961926961927,"rotation":0.0,"id":352,"width":102.08955223880598,"height":54.91841491841488,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":13,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#4cacf5","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.97,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":212.0,"y":410.0100878848684,"rotation":0.0,"id":353,"width":144.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":11,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#fca13f","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":212.0,"y":332.0100878848684,"rotation":0.0,"id":343,"width":144.0,"height":60.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":10,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#fca13f","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":203.0,"y":307.5100878848684,"rotation":0.0,"id":333,"width":160.0,"height":22.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":9,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

eth0 Interface

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":303.0,"y":240.51008788486845,"rotation":0.0,"id":323,"width":261.0,"height":48.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":8,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

802.1Q Trunk - can be a single Ethernet link or Multiple Bonded Ethernet links

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":36.0,"y":291.0100878848684,"rotation":0.0,"id":290,"width":497.0,"height":80.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":7,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#cccccc","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":1.0,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":0.0,"y":543.5100878848684,"rotation":0.0,"id":282,"width":569.0,"height":32.0,"uid":"com.gliffy.shape.basic.basic_v1.default.text","order":6,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Text","Text":{"overflow":"none","paddingTop":2,"paddingRight":2,"paddingBottom":2,"paddingLeft":2,"outerPaddingTop":6,"outerPaddingRight":6,"outerPaddingBottom":2,"outerPaddingLeft":6,"type":"fixed","lineTValue":null,"linePerpValue":null,"cardinalityType":null,"html":"

Docker Host: Frontend, Backend & Credit Card App Tiers are Isolated but can still communicate inside parent interface or any other Docker hosts using the VLAN ID

","tid":null,"valign":"middle","vposition":"none","hposition":"none"}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":-33.0,"y":79.94304076572786,"rotation":0.0,"id":269,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":5,"lockAspectRatio":false,"lockShape":false,"constraints":{"constraints":[],"startConstraint":{"type":"StartPositionConstraint","StartPositionConstraint":{"nodeId":345,"py":0.0,"px":0.5}},"endConstraint":{"type":"EndPositionConstraint","EndPositionConstraint":{"nodeId":340,"py":1.0,"px":0.5}}},"graphic":{"type":"Line","Line":{"strokeWidth":1.0,"strokeColor":"#000000","fillColor":"none","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[157.0,330.0670471191406],[157.0,312.0670471191406]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":52.0,"y":410.0100878848684,"rotation":0.0,"id":345,"width":144.0,"height":119.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":4,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":1.0,"strokeColor":"#434343","fillColor":"#5fcc5a","gradient":false,"dashStyle":null,"dropShadow":true,"state":0,"opacity":0.99,"shadowX":4.0,"shadowY":4.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":20.0,"y":323.0100878848684,"rotation":0.0,"id":276,"width":531.0,"height":259.0,"uid":"com.gliffy.shape.basic.basic_v1.default.round_rectangle","order":3,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.round_rectangle.basic_v1","strokeWidth":2.0,"strokeColor":"#434343","fillColor":"#c5e4fc","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":0.93,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":19.609892022503004,"y":20.27621073737908,"rotation":355.62347411485274,"id":246,"width":540.0106597126834,"height":225.00000000000003,"uid":"com.gliffy.shape.cisco.cisco_v1.storage.cloud","order":2,"lockAspectRatio":true,"lockShape":false,"graphic":{"type":"Shape","Shape":{"tid":"com.gliffy.stencil.cisco.cisco_v1.storage.cloud","strokeWidth":2.0,"strokeColor":"#333333","fillColor":"#999999","gradient":false,"dashStyle":null,"dropShadow":false,"state":0,"opacity":1.0,"shadowX":0.0,"shadowY":0.0}},"linkMap":[],"children":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":1.0,"y":99.94304076572786,"rotation":0.0,"id":394,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":1,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":3.0,"strokeColor":"#666666","fillColor":"#999999","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[261.0,233.5670471191406],[261.0,108.05111187584177]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"},{"x":44.0,"y":90.94304076572786,"rotation":0.0,"id":481,"width":100.0,"height":100.0,"uid":"com.gliffy.shape.uml.uml_v2.sequence.anchor_line","order":0,"lockAspectRatio":false,"lockShape":false,"graphic":{"type":"Line","Line":{"strokeWidth":3.0,"strokeColor":"#666666","fillColor":"#999999","dashStyle":null,"startArrow":0,"endArrow":0,"startArrowRotation":"auto","endArrowRotation":"auto","interpolationType":"linear","cornerRadius":null,"controlPath":[[261.0,233.56704711914062],[261.0,108.05111187584174]],"lockSegments":{},"ortho":false}},"linkMap":[],"hidden":false,"layerId":"9wom3rMkTrb3"}],"hidden":false,"layerId":"9wom3rMkTrb3"}],"layers":[{"guid":"9wom3rMkTrb3","order":0,"name":"Layer 0","active":true,"locked":false,"visible":true,"nodeIndex":104}],"shapeStyles":{},"lineStyles":{"global":{"fill":"#999999","stroke":"#38761d","strokeWidth":3,"dashStyle":"1.0,1.0","orthoMode":2}},"textStyles":{"global":{"bold":true,"face":"Arial","size":"14px","color":"#000000"}}},"metadata":{"title":"untitled","revision":0,"exportBorder":false,"loadPosition":"default","libraries":["com.gliffy.libraries.basic.basic_v1.default","com.gliffy.libraries.flowchart.flowchart_v1.default","com.gliffy.libraries.swimlanes.swimlanes_v1.default","com.gliffy.libraries.images","com.gliffy.libraries.network.network_v4.home","com.gliffy.libraries.network.network_v4.business","com.gliffy.libraries.network.network_v4.rack","com.gliffy.libraries.network.network_v3.home","com.gliffy.libraries.network.network_v3.business","com.gliffy.libraries.network.network_v3.rack"],"lastSerialized":1458117295143,"analyticsProduct":"Confluence"},"embeddedResources":{"index":0,"resources":[]}} \ No newline at end of file diff --git a/experimental/images/vlans-deeper-look.png b/experimental/images/vlans-deeper-look.png deleted file mode 100644 index 32d95f600e1d0f028e5a354584d7b3eac1639e35..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 38837 zcmV(?K-a&CP)`~Uy{|NsBV<^9ae%>V!XU&~tip+HNj^66}A$mRQ)nVITrYtHNc(eD3Esqa*> z`kbAd%gf76to6~-(lK|cWX@pB=>GHa@o3Xzg_^4Ws!jc*N5S0jld8K^uJzT`)$ek1 zjEs!W&(7fC;Fgw^`k+76^8b5_oPvUbLqS5dw6u?pkKFqIl9H1A{QJh>@4UUdh>3~o zZf_bI8wCUeRju;(_xAs-RR8+*@o{lbulell>t$t)3OtT%C;^muqGbEMhb*uuoa zthc;2H8U@1m?bADOrYYNL_*Bw_Ge{V*N8X=7YhCG-9?x^NsPS9$HfQ|DjO?231S5M zlL5N}0aL3>H+?Qtsp(QsPhQMaGY|=~Zwi2)wv~;4X@!Z;naVZ8!1Fgf;UAc;zXuVure|>tOWE~Z8pARn$o3h1ObAbNj!%u^* z_5J^CTT#Z>-^Nl<{-!W<+HSAO-u~F8IZIryPE6BPOvND_pf4@y_4@7P*??SStj416 zXk>tnp?7p^5>${_Fg7T04AS7|-0SlB;o#|%K{jY`-gbA9fN4Oe^_8!W{kmp&I5V+7}GvKNhH!eJfrZ;)8=# zpwqakn3Tfhgs;~cM<7#nn}SJ8E;MGj>e#%;Zg8JfSz4 zvp_CT_#;99000DZQchF9>dx@GGiCe$0F;4AL_t(|USeQi8bn|eFfcF!*t2>a$5|-4 zqi+-{DqATiEeu5j0zpGVL+6shegeP$D|}pn2ahA~{O#-}nH(wBHO4^h(YZg#^A+KN z|9(9^qXjyfdTZ6k`BKNgy1XfuI#}PWcKuvVT6W&stlrk;s=3{#zPlN!uhga2huH31 zr`8S1DKDn%eDlluV%M`h{dz*^`D*Y05VF|C*xY@XF0?m}R`!QiLc3?IW1l)P+Mkbp zJBq5g|83EGj)%%QDk1j+RInXOmzz<@tvi4Bp-w>P4brX&x2%xY?$pi^;{7l%Hg?BD zJ55Fi5fg+k(=;FYt{WK}yeAHAS9uWfbzKxoYd3T=I6sC4b7@Z&KojH&aBl%8i9-}- z?#F>Rt(WBXR7!D<-vZ}KCSI)MO^Bvvo}kYOP2n|+wT0-Tfup(p2GARf>`j9s%Aww&IJlPEu;7F=CZlaivP(gV@h;xvwx6ru^ zBi^&_U_fL%zq?{Zo@g+g&zw9$)^C)i6B$@iaQqn!=86J%h^FnBcY0rpJJ~^##7rU? z7Lv=t9CnGVe@zLhxET65jbUYDjyRLi6sJMZk+@VyPOp6Q@%}}Vp)&zXOA9BNYAhHk zPJdsifqsfY+%N|TiD4C?WD`w0`Wlr`!dH43t^GJfZmHyz6Tx(urgNIZIt%KOUYC`PTk=3+Npo2+fSsJMp3%#W~E`ov*Wx}10c*&ublvv zD(EoR5fj}jA^9-=5MTTQ=@$bRPJd^w>3lD^m1R$=xB^95O1JfLE$*m=hd7#awnFfI zD#V+HQuQK@#*E(m9aYYO(j{^!UfN2$gQhf?l2RVc(U;ow-jcc=gjz}}MQNDWSs4iZ z4QaYAlx(tTHvtlGHaP&VI~l3qsOa7lf(Ee)RZ5I*lP$iT6L-Wka{_sTCNl%-ybxkS z{t@#Gc&aLK(%N8{yDNnZLN0Vz8e5P4MVCgHiRyGxA)0AEbR!!|!|8N_D?jwJu5Tw3 zv9+~D;>yZq8tz6Ry2T}RC@Mi5{|t`x9rOAf6MZ*DzETR8mK$;(g!pI|I=C40hySKR ze=5<`MRq%#)fm!_O{IO!P!_lD_6gA)x8?gNGkhtFBlKPW4et}=M9$5q~;`woA^t;vy%i+#cf9f8d^Z zV~m=iGPKnzl@IPxpFdq)BZwWkg2s*;a^6+|hOh!Q3PzYLXneSv>ksi2fn>g0Y58;cav6&vtV5QuTJq=C zm1`|!Ie<(`5OwqQ=`#o6@>%<_hB!pK!*EQz0`XHN1fd_*ht{k7BmmmSpz(ib^o7go zvDk1~Mz#4W4|l7#>`DgLB;>vop=0fFoTu5Qd+vVz2o=M3%K1b(Ft6j^Pl02_e zl{{XD%7=@n;Xj!N;e0IiK{D(!jjy)Rf4VBDuw4Ei7W+k#G9GTKLCEPlP#(xND$UOIFwfcCXm6$eA6lTk6? zgGW%3QCeQ%P;l6+V8SQoKg7*AoY$P$a9HtgvB@6pmUVF)iW8rrDph5@wFx{db^Oj>2+;FE)9P6>}*CMg!%4=uCb!zbdotnVpiV-AAKL4T%m(quY4lYl}fqj z2MGl5-=OqBRY*~y>1&w@%HY6K^{g9{oQ;E)2eSb|VAO+d9(K znF`IM3iN7;?&#}(NjHAfHg&sFm`Ne$)6im&zB&c~;R)vU7ZJP{zPm)O(CJ(%G|@`_ zYbg-lD8(BKw=a@_!JTqz%I8m>5Tr`O>har@O6%@vI;zdK2%d;FL}KuoSH)^6Q{Cwz zL-j6;_Y`3)_Q3L3mo>wUjwvgG(9OdT(niiOZK0W{lV05nlJh~)DN5H%*s^Si;B15< zb9*}hfEJ^Zx!~<505ZJ{Z|IME+o*|tSZNK!C6T^fkj{e&tM03~0G+r(C@$o(xb?gN z9;Wc97`{T7&#BxS%6_vYVn(y4fATDF0B?+~=n+AAl-Crqe~{ z%h1errt?WDcztsr+A$OTr_5A5ajcMwusFR&?*O2!=%Ym@`UoH#U-HaG6TPjIaZHGa z6;kMYSdgCRjpDMY0pcm{q!q$N1cx$CSu|xRGV#0YBu`L>?AU16ZaLOnhAI#o7)=aj z*ny!Q*IH?{Ckx#8?cCfif43i(22Da1Q|Z9$bb3Bcw|wt+R8#?q{fS=7nJA%xwpdbZ}wzdF4J{#GU!eEC=lwfh8mJTOLdHt~K- zHWP0m{A=mbCxP~KAzShbll6&=hXSgXxtf`{H@Xc->AY(IzQ1oT#&b)((IJ3&iN;%? zd_HdHHy8Hd(uFeyA;%AoI%c{3ETEH72v_g-x*g{95SNuZ(jpk72qal`BHVKY+3UFC zr=8n{(Y2Av$e)UC@rkYcOL?J z*((j{Z@c{L^uaI)LAwnP0#sFp($DS>gN=~hjOmRhtX|cKIJgraL`@Jl%=Ew8A0gSJ zJI``ViTrjG(m&)|9KBg^^eu5Nd%Q8%*3er_`9Dg}J4VL69b2yeW96zs$Wj6Te3%db zR1_cn;f{QF5&$w9AqcqfTZ0GT&S1q<2m*p6Nw%u3d0sbLhVFSDfFmjMRE=Ip+6>1R zz3XMs2vEqFFoR8q#X}P6=_4#hK$;2JLx`_uhawioSFZnOgZI~uPEM^6v(71TRqp91 z-rE$frUB#MUv4hET3Mnv76RfL#iJ>@LjC^NCHhK$gH1yrl+|t@|I4+5aA_AI^O7Wq zkH7@#9Y{bGIx+WKVj4^YyiX4JTHfI>2zlqr5eSz-hubDJ7%MA|1fNoMidEZm1SRJ{ zPF$Pa`J=?;#``XQbuY2iPdKk6#Wn2vi(hCB^qlRbrC)8vCn4bMcr%@yprvU+qASE~ z8*GYZTlwzIJv$I{XH7!rN1g>7OV|ZZ8c;#-P`QDGryUAHM;aA8E==z?&jD6~X(}R2 zCLrJvCfS$FI5#Eio!x*5IU$8mR*P3xhg;C+P90&vj#+gY9?r{q?#g&vS;z?W)iMR!cGlMMK%z#b1VvDG{q_PqdDP0Ka#v|GIS{o}hO^0wJtA*LL?QxW_ssJ;I+wf z`Air1{-P!F#py`Xm=^(ZM(w!#Z^5U_#LgA==@nyFINs3sqCuwj7k+88Xw)l+k89MS zyhb6ahh8S9z_G}kt6x3f=>ve+ck>4!AX!ox2KU7d9;mP=vT~9uJS1sI@{4NgG6;Uz z>S{vAoM+B#B0voGfpBC14_dhrCSH6Od)fk0SHimvtF0&bZKx`|K`eq%SA1l* zm(nS;S@qBn;CQ4T;X%K{3h}DF(zGD+4gyA)A5}-JEG?d~>li9VJ`{5Nt!d;KdirW4FckZTRs>=Hm0@mtB#lrUBEul8{DW9ntGP zDs{q|a@M0l4vtBy0HC@D{ zf$-UEBUHqIbs(hnf{5Civf7${gufuC%paVxAtKV+2qejwkeYh=$D~fo2nZ}%tRBZD z559WEmwai?b<<&b;c)bE5xcb-fS|+S9(!R;s6)+xq}$L!yO49rMH~)!+U_{LHEMUK zFZLuIo=r{Tmcu@9Dd-4Q#I1Ix-ti;enAp{=X}9`$KI8J>s|OI22!-ir{3swvqW@C_ zI!My^H9-hN2=R|#@k<8Z69gl?k_6_+#dxrUv|f?+k;Ky3**J^e6tYYoCtP=)=hi$x&zZ z?mcCw=lh#6tNmuuI-)rX(4F3s8vU#~wVJ=kKl$oYc<9>hqYo3g#R{#3B!yA%_yK{H zqWP#%$W-9C1me}azw|ig^tcu2Pj)wSn_%|>06TpkQFW>>q2tm$lTl~%MCA7XCjJ4) zL=^DnQYP38;7vh)>tZB<~al}qRiJy>Jv z>X@LX&d+81F;87JQ4v4~Gj(-r$`cJIHDvshvoYzBd1O6XI$GoBJ$16wG>zT_O@=9t z#}W_oHD0)PyK}!mE9J)%ov0vEnS$1+B#spjwkC^*js(Xl@YjNqRa)Fel0sWaAXd7P z40=W3BM=ZwZV6$$_&gp)0*V3zA#|n0V<od@6h(0fX;{e?9Ujm;lYfGUv#SqC z01wv$6H?sx+}zqClDWC@agKtAz9kq{rA3Hv=OEd)PT|O076j&84h02{U-(-^%n1ZR zm2#xatavm4#trFC3{TMK2fW=6;`QE`Cuh00E)<5-q|ZZe+~E&#S8>6$rMq|kbzxy} zup=jfgM$kT|9W>~ZJZSEry=et6L*1f;lN)z>f|rA&c8`PJ21clz&0 zi~7!h(V&_3(s3h>l?P3&1f?;K1vMtQuGLFxHW&cqFjn zRAro|D=9|4aalIn;mCn^MG=fpf*JRCg+C^E~a7 z`LTNlOK*_Hg^rGerSVd{^7;R@2@J~JHS%!XmsQ3rO|Cdaejp$!MU5D)f3L(^2uV_? z@`mqpQt)lNH+i=@SqHvv^+c7%vw)Vzr@nqs8}}p{`Ay2wGP%)a3Re{%MMuJhQGBZ$%XcR;LO^~XMUw}0$QDtU2-j%mR-pT=zEm)+{O+UA`wuVhM zO@q0Gj=`ll@w^1WKAoB2_~KoJ+}hlnFqfjqcXiby_A2)*9|(v>FB6Dlc$8dUl2DmZ zN1fsO^E3ba@83N8=Gq_U=kE{e+A7Tq-0#k$k!h!;(5+_^{8Qg+x6E+~X-JnZ>e`Gn zgj)Jnsl-Uat+*M$mIEcFZCj(!4eMFJjmm1noG+UBxqr$y+UL7D>gKI=qkX%}aLDPq z;FykI`K{z=OLQh2pIiLfpdi>Th;qK;?p*mYYw$q zml=a*B8H;hgf6@X{ioSj2syquaXxz~?@p{Qu1_q8(43z;!a^{XY`gy1|D7M6RhpSxR|-OH(F~>)#l>-M0$sj^yG!FVO_SmhM3U$^wYUThFQO{tCXcV*9hA&ZiBKk~ zOyPY2fuK!*Rnc}pANArFvop5thcmP9V1(J(`}Swu4G-s4d2_`|gsV^Zy?2sU4Jii= z7Vib|8S$~;^5hQX{*z{6+%b#AJsRv-Sf8UmtT|o5rd1Sf4FNs4c))6tWjqvS0N+ct>0r z*A_a?uaAQqZb*)&M~7(ZA9Zv*BKKcCv{Fgsdjdik1wfD?g2knklzDd8D66jveKr%S z)5?sd+2QNm*JsNF$#NH3b^lrKGmmHHvl&ml%s4!YqXs;N~z zM-}1GA&_bpvbdf^8b|`Lj_=^I?T_IHXy@W!$NIh}7%@_!(2?&Ah^z<@{K(wDZtL!T z=oucKHJe$6q8QeU)Mfbo%rH2L5DbAEf2#0Y&%^Fs>mO$U?7#ErRk2aVEhMkR=lw_j z@to3RysV8{?C4m61?M7e_w8(f#X;F#b*k5r zKk|(3&__>C9qAUC0D_u;}crW0v1pNWW21LB?uw@ zLOah~h}}@za6L}Yhf5uJ^}ndK=>}^TF2J?fiU5J!V8_zFcTlC6K&NGnno26UYGQ(s zTrx~Ez1{O>2+G!FmZ#r~=TfsX-QD-eoUg#~JV!$)+G4~%!hQT9c+iI-Hs-3JVPd`e zE7So+9KMJ*8ZeKn-@11R5OOx6zR`-3usmvqdFoQ4iy1oMQxEvGWMmnDCW0$1wDWA5l*{xgACOWJO=7HbH((6Jx|5lVA@u}yxzR8lfqd9t_Y4L|Jt=h*`z z|L{O({h*<+z}S=XKOsa)=HtsNn)@Ok@j1y+-h-CpQSSED-(fk4`qZEvLEN78UhlZe z6&S?rh&LFDR%*m>O}GQ-WNJS^?gIFZ6-No+k(Vr(uVdLo56+`onEC)$r>x9AlMpEkES*F*`bpeM zpv>xptk5mNUhLZkz7&wHflFN6XWZF{w3sVW?o-Cb*yyHH-Ln^ z(9B3PivD2sO-Mjw2T>xJ1kOLpJ}riKxV?K8>?f$<#;3SphG1Aw3ly=@11AYD^fY^L z5)C;i+KbZg?fBZ=Vs3<7S)JY<(2tJ!mSa=KfiY(b^&iXI4WFc&Rin?hF5bq|QQ_y- z4Qqoxb1ndOFUr#f-fDds3fXL3LG_OyQ3wbG;|s{B)1XwKNDazxF#M<*0tuDHSf7nQ z%EYu`^RogxAYfLkcI#~b(a98gkUoXni>MLK?@sT2`9&ub3}O@?On&)iZn9xD0I)&q zD~^ZFch3}qBvG^dPY^2DFzm_ItVjn?8#8)8b)@Ik7XSeyeDJuszi0m+d72-Y&0=OFGggn7F=%-7upZOcV~tO>N%;`n}m zCN7WEYr8DaFQe)wox)VUSRt3ZuOg9HW>O#<|Z>64|W9s#{lRZS{S6jD}`npzG zfU5`qm^iZqbgpLTJ`T#*`t}!R5bbJ59#X6MtanDd-q?wfQ+RF#{Tld z8(SCvP7{jan7QTYkN^8?=4mH@!ML>qkXZqca+x4Jf}>yLXhB}6M76ii763v= z`asGNjnza3#!~)KgLBmHtZ-;OOi~fr)K%>W7yI=p2ILEY0FOig5_ti0^c{BvMvYz z-ni@LM!yw6$b2{U_zy3>3ny7Xx}vlt47e|!zxmd5qc3Oyysa1z^EIS~Sjq$PYaSd{ z#s>xG>R_%aovRORQ@x{SOl(>hoWc*~g}RGBv_RMjX&-VZKq#fuTvJh_QL<$2g!w*V zH;}z8T(ewm=VQalF#|HOAD6);>Z;kAx~c$|LAos@dHcDi(V>vO2gE!h z0bZ*J13KergegxoP%7$rBCCOzUSHWZmaIo)O3ifGOLNTt%I=QEr2X%+$8Cu77!9tsdbSD&jt*Xmd@ zf47F{5{v3cOchLwc)@!KsnT8VF1Kz$8VNisAdBabf|N*5W?XeDKw6jR1Q9>rCQpNb z#*}}^-yb<)+ZejpW&lX3-vY+j2{NB2T0vSfO~z#r0|KOnPn&OyIH%CAa;kYF3}9jt zZ1sNa-7Ns*Z$+#xqZL!CP8^DUR~hv=I5bTDnc4R4d8Rn^62gL<&!QL)6gMUL@w>tY z@o(=)p`(f&?Diuu$K?UxN;M?W#0)uW!HzDmOu77ko^0f-t(iu{*p}H@#CCLK=e7yxhF5qCH}(2M{H`)Y?aFd!iA&^t0d#Q>YefHY69)JCjz z7?8-=PSoQJUD|R~7=8v|%U3&zEXejUcD1CHvS7(#D+K$jw;Wif4kS5|D*=%e*a--T zYx`}TX_W3ksN?}an-jI@G^bR7U3W%nW-&qm$e2}lkP5)q7$8}>D(Jfnn6_}Rm1Kn+ zky8w8@ilflaA!c z{^0|d&;A#-xEo)?#M$3yX+FY3FwvODDytz(O}&UmWl4?;HG_78C4#HW>e82jMx`6< zR#4#02uRkNTarfacJ+$E81e5QHQrhUWZdB8&S{+9U85Tr7$em z=LNxOFHp*XYHwp0WAle+$!sy~(sBOoA}KjKsw^Ig((o;wAZbW`!ccv&G93s=e?w1V z3e|{v%uhRgH@#rj+WJ_JmL%k}e7@6c8Ddv((BNsOJ>+^G+H70#Zfheg+usF8U4P!H z3uKnJ2ikT*BUE}Lg;+=WadUbq)W=)L3RcSN@6wQd^wS|vIB;m8UUm#3E{n_S1{vB& zwBV9BmKEt#3Xm!m;HI1`fu7=}d&l4f>ClTtJbdR2N?vWa4PCV$^OjI~Ndi*d^ag`N z39J_oDw2+Lf~;T$enPrKD7h)yg8W@=`b9vb5~&)A@+w-gO0Nr@(i%;sFs}2CvkeudabsMCH#XP%yghe~sF-GH!RmL}1)Vf#G6SPpV9x|ghDo??fLMwC{ zbLyv)e$Eug%WIiA*r;Qnd7kGWIJ7`&QOv>Jv$CAk?X;=?51COnRRh3B9OuT;YPAvB zJ-gK%NUJf25M2E&AG-bjdN3vGaaMukja6@?icFh4Q_D#imHBzs=0Ibu>e`svQOo?t zfH#jDcu518`!Ug6M=QONIYiN?`QXJ3;#>9FexQ=D@JU`i7TkMI7Vc0WnKwUgsB~g) zqTotEaEyZUa8be(l)97Y(wn?G=Bvusevj;3%PCf)jCdkHZm$OGTW?H9ngakj8snpZ zKVd*Jj@W7-ZrXX9HWJZG+nmpM=0&gcBl6qPDjoTY3{xL2ep8O4HwpkUa(!HVMa1u%gcl%hzO;S%?Ao*L>%-K$eM%gPI%*9O zSj$T}0j{XCnzQ;{ElGO-Y#EqU!(UE^)SgRk{=n5%kNPv1`r)2hFVTPB)J>BnLJuhzaL$2kZdo0~7VZYSzk`sCe$Z}k*@f352DqIK% zu~WICG$&i)F`mt`WPYtHGacK6(!xXHlcewx!tB$S<22wR5pFqx`Hz90x1 zwSEL^Yfx5iG}WD^jrI6n=;{PX6hXvaF@ED_fOcIOo16T7f7C3~vTf*}z}xkT-}1(q zk-(OABe30YTH2(xKQkXQ{Vq8_dG0TS`SE{JvWL=9QD9+2Qo02t>nA>uMlE#+?t@en zVm@5269`CC83n;I``N>vdV7bZ&Qb%KhMP|}l2((o2f!E2&W)rY<48AoE*c!4<7&;{ z0+2Cy)5~`dAb{REJJd)e9LurEPhM+XBcW+$53lCbH+}!;w?)%=q_B{UUE(Rp{<;@#kGqUiI!4(Mk(`bjDILi<5 zbzwdf?*j1^hs+mcy9$C-^JV)k-+G@V3qT<^eee8sA$7YR4p4Os6_LL zRMQFB>HdrAV3bWNeSb*z^t*Y3Kj}!0(D4k=NA1<7W-A5uevd~8`=*)}Hdes@kqu$2yNf}N3=qr!0(N7@u!>8}&k zP$vQ;xw-4s4}@__pt=X_S`DEMM-Ko#?fugr;ZAi1AJ?w*C7M^%oX_BUz7?f1T34;T zg;O`?*^qWQ_=p09dbaaRi*xjzaUkLNOH9#!_^;d0YpBY7iI&15S(_j`q)``H6MY*5 zoCOR6f*QQ?(``D6Y{OJG$x;*xVw$iR`J(`3K5@dV5?Up)6rj-%Kwwv; z?Gy`DZ6~}O3keES-B3QZx2>G-zJ`h3k0~gF5pjPVFCR@zEUmAtElo^Jpw|=d9Un`X zW5InT#z+-ksFd^(E;G#Um3l$|qz3naFAqqrL`o|M@V}Y69?wRyI1b@~MW@@A0eX^z zlE_G%%#De)Bgx)e2wgX+UO9J{BhF^AdLfn^t~R%VJDlFpVJW2=iUu-V=(!vrAt_qe zN}zRE!2JXJUS=kn?R#&2bjI3GTZHK}olm~s&wJnZ{b8q8a;r8}IbRDiNq5rsJpjK7 z;Gq+GaM2$8cyP2g*T-bBV$W4TwZ59KsUS|@bjfVNTrPm0$|^K zW>%5oAWi}JW)0P>d1;;m%%Azo-XE%?l~82BUetd>qV5eqIMiGHpvr%NAH#ahiCJlm z6Oub7LhSC3U=x(S-UX#U>IkT=F3OpGQP=~c1=OHi4 zi+LVGZAl-E-|;+OeHhA#OAsRZMwq=?_&1$S{{e9|nsf-Q@If?Y*0BLQLf%z%E;}Lw z@M@)AKkKO}z^*+b9!XEE$q8uFn8#vLqnpT<**u42UzeNZRvHbP^{{9=( zriMr~FjE_%4hR7OAjK9jd$IzjL@KZx@JVm0OGbN5a40jkSB^k|A%O!`e_ZL%y!yK`Y3vk=>T2=$?flmB-c~?nPq5%tTM%yjfVwV5>g_FYY6>-s^Dm+jyJxNA5#cf z`0sJ`x#S=$A@^$a;c$3StJP4s`_rbvdwtU03Bl`1QL#K)jJ-LbT8JnXi(>mHT1MJN z%-HHVA|z}a7lvKF|1gb^a$|ux*#zW}!(8jm-bKBFh8V}1O;uB5mUG1h8UP{yUkN$t z_xl4tZZn5z2u4UIcS?Etd?b$TL#Tv+0B>Z|MFYSuLdwj8ott|Fjw4wK>S)Gl%_rYL zyHu}`6X-j)kToF;g^<~l$++xfrw+`*=SVz79eL!Qkn?J{{YC55 z?s~JSs!$`8DMA|R(Ddr_%31Bx;de+c=I3`3HNtZm5FK`HV)11!LmuHePYB+m1Fd?^ zSQZG_k35?BtYCt0{hbLZX39@d7n2%>Ivl!j)>6Zsl-VTYLu$WsZw`0i9ZzZFqwB`#1K`F&wqRV1As{&;1RxOC%s#-lxBvSP z3vf5Dp=OYfrCNw$oXCFSVt$AmF*2NfsY>eKEw*#qZjZW%x1I2 z>@8f<2w|n5;eK2YVmhsOJnW|MObJWXa{{)PL;@kQs;V0A-vbml!DkD(LN3etp7OGC zZ{<8oK$${F!aPxTH9v$)=*4WYh#YW(#7TxBKqOg+Rw|YLl6`xA-=bzGg8!m!d)Y7T zrDG!6q-Cn9&k4@m$Rl1dvWFre-bFO&=EZ|;63WE}PRPMQ2}=_G=$5_qMz`^Mb%ltB zI3cqHheg)y$fML^a5hmD38u5eq{7QbJH)}*m%4?#W$*e~BS*4$B3zM@tEK~mLQ(~d zgoKpT8I%;efjBTIkn$TEe4Mkva~o~6-{6>;J8*Cad<@KGPR<7t3|wFsv*Qo2c(M)e z7?=-m@2RR(wB_ryT4n6r^)GO>)uo2+pI*H`Rqwr<64E^YA-0k2-*IEk+-IlTu} z2{NKHqNl_e+uwcto3BTJYyZAMNON&^cHvnj#Cqt&s%PWX%6wgr>4YF7Otn;C^x^NiTkA^)(eL$ddv)Gr--;B{frJf;(Z9*jO1+?W3m zg0f(>`mWtdNVy1a$0EA_m4n=aA7YaaS6RF}_A$(S5hSiD%9Nyxxr{}R+n=z%A7Y6Q z)t@53Dgjd^#H7LrnR{iB((UrL87|Ou(8+j`2SMMEn;T|?P7LgYPKc9fLbms2F;S7N zC~L)sC`b2CX$rl-QnS}H7iRM4zPU4;qvm-l6*uo(0t787VR~Q zymqlKq9i1@M2LMq1W%njPVl`XQ8}KTjrQH7um7#i2-_p%k@_L_QPn|ea&G4nqAo5A zt4mgRbaeK%v`NUHEE9s;^qDgoWt^zcV3B*yTtdcM!Is(=5-!g%5GyeZ+*ALpz>;}T9YJ@u?y3tb^GV6Ep^Cd7%-RC}6sK%hqQ z#}miN-X0wvVe0z{jF@q9etdj{ajxj>`1p^N<9HWG$Ilu$@mNhfKIxsA=HAXRp28L` z7GKygaVl%-B+zsuZRt4H2_dbDjm6v=AKf7ma&dTwqw^L<%sn`P@fi>jq9qoAUOa{i zKmN;jvP&Z$uZdF~;eZ!$gl|}v5Jbfi zV^7ZqiB8U5M7I!f-f2R*?g$BacGz{HB&0(^`iTU)Wpqf$rXNDyy?>3>0HCc6Xl3~S zJ&f;xkWZkcKL7#OFk+4AcV7Xn`zi~4t+z~1yn^d6k{yCBS?Skq5DabXX=`-3eL|c@ zn{dKX?AGf`bTlXD{Yg0-8v+Etk;X7X6(RTC`8m8%!nX}!tkew+jCuDh=#))D$feT8 z2kIOTc_V*c=Mu(OJs|)fK3onVZ2vW^P zkk*Yi>&xAR5GPcX(9%Lg#EN-o2m8MVsD5}?C>dCz;mws&4Odb%z<8;22t1^i)AsoRH# zjeL7sjB^e=G+~>^f`VM&W|2#^;z(#7ndZLMC1f1gPn7|Y=B4w%$WER;J31d--B?o0 zh!~H~?c)dyV|5(od0f|mGRD$02?AWCOrIOvTotiz(%I}w$QXQ`228D6Zd=+Z9Z>Nd z34vl--=6eKmh7cNGICG)Y6)r~NkC^KQUC+E8VuM$y|IoA8}~0ECZ3#VN*I>upAA1) z>_~`mO|()+J%HKa*1;D+>fQvU6sZ6%ii8OugJ%{M7PNSNsu%!hCCFwWQn@(Nugyr zxIk%Auy|Q ziB{l(2E)S2ih_yUFFj2Pxs!j(%Vua(vflkWnF3~)m^*9F=q0iX7gN4hm60t4Ip$${ zbK>z;-o#7>Zj?MHDXm+-%(j+Y8{RAZyk@2!Uy$>;$ABEGNuNYEMN%++-7SM^0#s}JwMYxa3B|<=g+|1!Ux7gX-y1ovmbpM53PTM8BQc-uhg`gc2)uom zMPPPl!oGxTy-Il0*26DrO!6S1b-ebQCW@-6EIrRF%c_c^rdhVJ0Kp1|TUeUPu@Fr* zNkWVtasVLKBm_r%*qAQT9*l!`_9X-{M?wsEwX9u~4dxDzW+B=meOQ4C(F+jTD4X6b zkV$Ev<1u-;iyvaTBjEA2nSO{;Zeg7cEd!HShlw7)gcwr7TqH%$3UxK`6$7NkwZ&^@ zfh&@_jWWJY^mb<|qjF@DglzgD#>yhQC64l@6{%8=6ok31wD3HB3E7ZwuL@&|@f?tJ z=57T`gCbxQjphDU-z|oCjHG0O4zc2gXf`}QmvMBeqywvh+k<__aQlOh@v>RxwQvKZ zB~g;0kphw=!S`sL51xw*S^|rTsx*$WDOKQ9531RINaj_3jK(4xmeaJx!0peD5W|W7 zW@Y5><#A0}Dv@Y%sEG4+ZyA-wqZ*|@up+3#lIvf5!eMKIe#q7S9f>3Z6P%Y38XF0S zEi+a;U01p)(ZlEv%bOc=`>KM$ga5U2I|lbfp`CxwMIv8D-c)|TC}UaMtALM9^|ES~ zYmi_;85$yqOh=+xG%$`{ah&-JB4oTikULPwLlKx&gQWoyB;xh0j;67)2nxoweMQc_ z5?NTZ$bvM=b?S?DYt_|M#LH4Ex1S#&)`f~MYwgk)hbF*KS>1*h)r)#Rlk*I~&km0c zPh_{~Kkvf@4=N#_%2hIoqQ>`^;U1Du3peefhMJ--*N8Lppe+IMnn|}#Gzr23^)fc9Gp9Gi;3Uik`2F^>0RguzK z#(PFwJP{%}Y1-Hg05?1uhLE;1APHV{U3ZoS|M|m@KYsHZ{{IV@y!DWpQ1ja zNNAle{HWwxmb@oIluP61XgHfRN9t~ZvYrs?eun*!zkd(!zeGY9-PmLW3~msTQ1s>$ z6#|%OP&W-!kS9WZ+NG(QMIH+bl_8`*V;=pp;}g0eI;plsbNiYn>7!4Sba_ zD(X7U+jZzim6uH@xAjEGW;i3`=xMffEdcn`vmQaQ(twJAs}<_(=g*%f3@3;lbrW|%b z_T^Yav{?cuN07Zj;f!9HpQ}%%oNu zp*r?Iuy?X$$1?3Ux``z7p&j3V=Gu|BCkU~>XSi)JrNr-GK zTsg{wE-L^;T#_kfw?nZmX5m{?Ve6Ypp7tvTd2K7VpX2h3D0!wz+0^_uXx2vQmoKPy z=G!joqqpAr4RkKXj{!Ns)T!EgDcikP*?tLtT_0yWr%pL3lnvt2!0_C7~IVu^iTmYbJjK9*;jK#N91VXKn_87t<<)>29b2Tb0 zCUh~Dk@KTEHBHR{Kt1U4^|{OTaEWn5t6V%5cPI@hIY`BpZj4Zl%#?opP7Y94xGZ#h z*Ura}AAioZ04ZDx2A!qgTwQX4u*x_A0YTJ~oOA^MtztkBaMF)KknZ%b(grPVl&-mo z!TWWFk+J|FSbB1RnjW=eDxKi-+X2QEsLTNbq9!7KS4tRaLCDXf7#9Gynk0q`@x)q~ z@Qg;b*{H&eEG`LU_$qt`qe;ghjG^LNCRwhdp5od0Nn{l z`y&Soh_LWAOKq&TEJRq9Tzjon4GV!@Z7q2ShzTF7Kvhd2bvF2*&e%g331~#IP+0vN z(EK_TlQwrMuyf}sfL2jqAt=eR7oVi&JrskFY$_6R7GzRXXYVYpK7z)?giR7DISc^J zZI$~G2wQ2X-d<6FP+}}M=yLL;G>{Y;tliq=VvLwL~!>IRg-BFuI`t2)(^I49KRC zE-yX0opCnbg`xw?8!M_V%DYfLi%)75D6imS=^}hQdHQDKiqeRYtESYw{Iupd6Nn`M zIJrvjJdqBk#Lt9GHa`Ky#X#u$^imi#^O4WE`NVos=^`HT2@dMVzdA8o)=tzOahZwVFpbbC^j>GaYhps6 z=?F43;VV&EGXBWp7w8&5j$iO?W5ZhdIY1ETGP#&*Wyen*b1#`HfDk#W?3F*!s^A}O z70AwZ`;SFge6wR5RDAa!8wq3jh-11Ws`?@sjPSb6$`-^|kZm|? zbd)i%b>c4;c=Wl;_WEO5GAY8_hHodn9@v8nOYEc15 zMNLqC?ek0m0R5!@;`y)SH_p6Qg<7?c;YFo*uHaE;KZq>`m`kKs1=wL0$A_vyyb5En zg|jezt2n9rxuUI|BmEl@FEl340KG)@Psb9oOQ&?{(d=E1hb&hDq6_jjmEv||C{dW4 zo>vgm7ZMp?d>2jsXh)>Z!n07qn*DZ{1uq{U9SaA*doHV;p0|)+-YPkEVv1hLMji;V3qV+i$ySTBvw496q093R9c)8 zBK}Mj@*>Ux?1)KOoXj;m%W)^?A6=K@F?Re3lwzuuysA@aYI!xL0+&Dc<>hq;l)OSb z`$u_B_NF(~ZUcli_QQ7{S-`B5hY&1FumrG#K`jVF%K1p3dDpMF#XiV5RaIn0p22wBvtw5@=Zp(Zx%5sRIL5rA@7=XMper@B(;`mRU0<^S{ z;lY7{F!U*nt4(5Lb})biD1n@XjO2q|FAFD3S*3tkf)xXp17jn==7@EGAaECK5(9@3 zM`CNqfdLBw2!XK)2HKy3N*lKbrNK1+K;Nu3bridm?6BC7uYvE(?3>lFAC2D33aydx zP{aab>{#dQPXidlVgRb-D3Qan8>9v@k{{B|x$j2~gd~Q-xKbE;FCe1efI3wOLLmT9 z72wbT|JWHoM+Y36d|1)c*na)x%YXgiFb=JI1f+?+{74MoAgP-lHiV%7xF{qB-UG-$ z;+kOL2%WJ+xvU}bG0Mzi)SxNnh_N!sK{S<{6p$c~Au<)HF>*f;%t>1Gep5!qUedDn zOAi@h^Tv&|2Tn7cZhDBmx5%HR)9DuVY@}bL8yvCq;zhdYAbLTdcNo}gwpe)Lpt-Su z(*qg0q=f>6GYZo{1M=y12lP8QfED^* zJkznafH3g3iylDh2C0SMDQ_S++vH6Gk|r-Y2u<>P9zsKT3)(ZQ0o0v*8&Qog93d-Q zGd@1`k3NS!jX?-QLm(jL*YsSBG9s&x*BOCOPXTZc66BL60hy$NMnI;?%K)rGu7K33 zLxocZ0;mBaAV0N%kUX+OAon_fpaPQa075zh5(FZu5r!lfyrd1EOMnAoiju^p=>!A7LZRhaUvk3kY@3teIu)8Xr$b%e`E;bxQ70{&KzPAI5exPy>u(f@!1z`cXpz)LDkZ^%OjNa<%N+V#fXhKzJ z*E_tU0Xq&b60$##LweWwTh2QI-gSJ|{@~&v0U2}77Z6c?NJ=1kZyh_dqpRAWaI}8B zqf=C=-Gk78nCA)zqDmHDJIERDys(`7*akn62Q|Oc5pWbwu;)B}Q}}Kn=o~odO=@8u z1Q1d?2jIylG7u1yg>hKKw_;d_Kv2|a#KB7!)%N5=0fI^=>mf`I+Le937eM0Lr48Y? z$F9WScN}I%AEi%bc;9ck?Y!;Tk3>@6Xo3Jej*Ia~H30ASDeyXeo`%3UAH%f(ey-MgKO zKah{q6cM}fwLxtl5S*=p2>9WMWRY6tTdEtXDL4%S6$w2K5RiQ^IYcWb%CVk!VBWIl z$6;N{+)92l5%1+FIDCL<7+a$ zx+$N`0{4N`q;zfhvoVv&_2*?WYwOo*8L8iTAal!m(#~4)ATlG9>zOC)e|pKyls3A( zFb{6MgaU-0$7Qd7PpStX;N8|o@veMLpHj;rFLO6*JMv~;#v>g-ejcfAS*z8ZgMZK6 zS{kXzzj^vUP}Fi$Bb)B*aCN6t)k{l}@1qy=JCTuBnTvvT1#<3|y&nhVp$lOE>2Zc| zu_vV!)#akIm79&^Woq^DT{*h;4iJ68sIDg?ltZd|&ay0-tm#w2*vmaPa4&1wnS-j` zXCR2pjBQ!Ae17IYv{UWW2h@XIhQk2D>eJJcW%8~-Hl=jkYzOAw~H+d>*nH^IYdvV zJrR(h*kIhXEGue52uYorj@ipup_bA2am8IAAbsKukWHD~pOGm*)&a!ZI(R%+S{kn2 z?GJ$b`1l8XW5gpMdTKTDO3&+eMus<|3(&(p#PdAP!VniL6S^L?-1wk5&@+(Xl?kh~ z*iUj90XeIkA>vbMyHbvNTT=Gpp)^)!SsE+St$7|UI3VS4A*InGmyHR)9sAVq# z)Us2CmyJf_EAb6nAs;t9GFn8R^*&_z;_TI{mp}jP`mN|tZyUoEefIKafBEQ>Pd@s4 ze;7baP1DTYMlU!Afhafw68b2GO6U0I$|0aKMvfG!!c|q(sk_630BUDkfPXe2df?%- z8jY~Nq56$hGgd(u2Ey=S&ZSU?<3PuQ3?fA^&|Pp)+#IYvfC$dco%LxP+kbn3&NFX#hFE6nW!+5$#H}w;u2V{R6iwZvw>;Qm5UwaXJ*{71_t4d>T zGzTpJcw1F#7q*8_&|uyn*bU)8qDYLd`%#lX4#&kH5+(kGAWCWvv;HDo5Id>_1Q%__ zOvD;uixESR*OIBWgv`L6N!u!rt#j7>0~0t%hD`1Lg35@%h2@)>dYnogU71P#5=cBxRg(!xp0t%cYui8=$2Ujfh!%@O zAasK+hYv!G!M()@GHCKvgh5?Q10hokp_G_XC>Sb@#Z#f1HbV!00%xeb^%HpM6#7AW zzB|3)sG*JSC%-)YqX)$E^CJl%$G0pP_p(?ObkOfG6h z2#k{b^jpF7D~JDXgXunrAZZqI((q7!UI>iGv)hH?yckzfO`OHzVIEbCz&J~;!= zU2Pu2iV;H2*UK$nK>_9jt-!!`x$Z;K{)P~e-Sa6Ldf=9B=XNxtNYWp|x)3Tb zo1sr6MF*`_)qrv#{fk`4Yb=KYgQuL~dmnu*~2Hg&+31JvdCOD8YR*0gQJ}Z2vC18wq1tMd-oyizukzo!{ zLI%AXV(Sp%3jU(TagGxw?{Gw~+z?U8pcK-p2|?)Uz^YjZq7>D(hsU^#{lg3Hsv`hEs+^Z_M zzdHBabE@cs;w2VMQy3M`G)Ici7R|P)B+%ew6Cem#7lCN8WU*LG4pa>jJ{!7)4r16e zMc>Drs@Bq4$pMVu(n=OvI1y<@Xk!Dw5ReZaViaJv?f3n6Q0i<$R_3z65Z$GL#VLc>9_UBNyeRtQh(Rvj;^EA z3mEGF0LXM0XC-~H1B0nt2L!XMzT~(X=S3A;)n!dM3~qbc5JQDU?FFZ}_XQ127-F@Q zQRYX6==2$@x0bP~Dnlz2jh=%Bh;9YsU*8r;$lNfra(P*D3nLvjyQe2S&m8Hv%>l=Q_ata|h4b@&t6RTTq&{B^&KttO8_~^T5gW5$(YwQ z+jVo+MkQytmX*y~?7+5c@))ShN@p2kc7|CNbMsl2se~2*`McYNQpvK7N*A8;10K?D zZ#o^cXQp#JVuiNJJ7;Zn=%Fcb7!URR^#q2RY^fdaHVC1Q^-IhFkh6s2o+*oW6kXqB z1sy;XpTlsYhZKu7R_{5OQC@t>>cz5i1a*k{J#Fw-)fuYQl!l>YULO`%vB8{S*=gO5_ zSP9oEb7@2jDW9`W(pD25kj2W>gqNJy0J3dmY5wzINkE#F49ori5L(qcrfzN?mjHyf z5g_)CkDY?&w0Aw{Y|%N^6BI<0byhg;v%L}T?0WEm5nqacSTz3$eVy^m36QCW(8ZpL zpgxWvoChs6Ssga`=}TTT&RJua;zw0}YE(IH_)|lrjny)+4Ffi$_yI5YzURPwra+df z0m4b}RzM&>5r`YQ-4j{LHoI0%?lvoPuG{UpZXPfyYhlG*L$JU|1PKd6LaA%zD>V5T zb`gkt6OdnBi6JJcT79Qb-~z}#D?koS=S;`p2_Sa_5R2@H9{}P2*3w8US%^P~_WOM4 zcvlCE5kkpzF(f{YA%lTx^cr-v(=w+2TwHBfUD}C&6qmsxMnINe9|I9k20oT7UQi7~ z>kSN}z>)y+3oThPq-!-dLIEW2h8r6jSqVVoSs?DlMl*W>h;3v3U8ph~NFo~zk(IO@J(thXB-WyN?yEa0Vy>NQbpId0*czDQ(UU+nb|4J8pAF zPu%DsLj(0$_5a!F_dNnKFusFoG>TOx0s=&_R^%Z^MzwKTMo=$DKw3U8QYb%9#eF^; zIP4T2lwu7v{&VfJ-n$-1ZVuBYed&g@WDyW7TN0M6xm-5_5=wcoWPO>l?6ljIb52yj33LMzv2D}sS(sQtRPCyXUJ6?j=G0u70 zb6A14z33S2!@!P}fSG+poyR6;Fz9z?fXoDSQq=EwP;+QKDjMH9yy0^IX#q&0%jD{8w6zMoGwv}JI+oG8fzHm_~gYO+x9}b=%%tlu=;4LJZG$o2Z++ z%(XJI1rLFjbXhnK>bAuY<*`OFd)qoeD|9P^AqO&mV3d?00|sBeWF;0$ZIK`<`$14D zZF+qV$z3T3*7fIoPw{Y^`|bTcxd^@mL0ebWgT7Zo%2Df&bo^=)mcF;%C-Hd9Jz)+{v{1hB_zeySH#jb1Iaqn(o_ zR%`62&{8`pHA7WQEsHjesGtBH*?Tu@O8izuyVHhGUjH}R94Qiae8q^czA4Mr0!^sc5CKbz&C=SR`@0(Pl~~*8_Q%G6gbQTpSmwv`Kk4(&Lv3y=2nJ zFc}%?kqS*48=(J;K;HX>ii6IyO=!O?+~O$@wk= zdH557pdNlL5a~@bF-p?ZE=?>F$<^v3+20-r6=|bJC_>ehv3=T$ghVHq)*{DiR6-bk z6bL@1-VmczhnBCXdKE2G%hX`0!Y4kWv6@)lq6W4pkOi?!LPJ@LC|! zc($h#D?uiog2=^p8fJJ%<8|%rf$WzMw^2Am+8)iTVjg-5?D&LvpWt|TOgE&=y(o}%1CGI4L%hK6{;O7i9%ne+KF?BG``U9`^d>7 zr%~29ci7(UK4*-#m76@|S|H1)BpPw!J3egGMWE1@eWV$2RCUpzQW}qljd5j%JLu?f z#`zIU)APhG1Hnu_{N$U3`P=WZ9^8BVm|m8pSC=jVk>=bSO}VvgTVo*G%)&&c8M86n zmH^`BWZ4aE8+9*Cz8EH@yG-OCZjObr^tK+d=Yw|-sKjDYVn(86TA;0 zm1X9>y2QIahoFTW~dAmZiY|K6Hkoxkmp1-bkF%18f!ckdoPeLAybA(z1P zgT~*0AtKatDJQWo`4w9L0So|XR;U=6$>s+-87CkV8v31)vxfqEp<|ut7lFL-q`t^sJa3nZC!E7~IwL|k1@ije?mrY4`}fCJ zJOqMloaP;Acmza4+j8m#JfwjJnS5$#K=xQ`km7ZaD;oaN>2mD)CF}mDzj=zcBkylL zT75GA0`q&jS{8`>?$yfIAARt_C!aKDUlBoqC%bNC>>;x4<~K0YSr^IqED+Li;f5Ol z3A3&MVpG}oGLSExZGHNIkUx1dE52g#XmOS(*jX{`lF}2cOWjKKxU1nY=>CR1uYwb6zAMumQx;_b5*pJs&wds_n&CG79P# z#2NrHTm}?6)9k4PkdL2z@%Yn^zFt{*wYvJ`$@>p(?I9rY+ZT@>eFbSsGqcxqrN3F( z36L<9;0J`lW_Fr@%#DGNhDxwBi6LVk-K#+UvbBXvk$gd~Wlbfa6gqm)(E&uc10egT z?K!A#^$RFA9s(9y4ItufqLy5ZiTuU)YUPN z(Tu@3R;0KLC$gZF{G3_wIwccQ3stC)>FH|V zm{^%Seh455eV>hENC`l=!&%|%j^9y4^&X>{_mSl2v?gYl|oB*=6h6DY%X1=nGj`7cORAkJ# zYgnl z7zo2trAW0DkBO3|8Bqo{Z;`q%gr!qf!~>Ltr7K-Jbm)SF#D>HR@Bl2(g-75WcnZ$G z94F$afs$kBFU7XbiK^yHe2(>d%wom}BpktyAqprC0>V;p8wh`tTR}}9;Z2QQaF8vl zxf-uYf)yNHK-?1$5)f8Kt0(N8gDRsQtTO7PkQF(Hx`5dKi9pCv%?3udK4SfDHK>Fv z;wHnTX&T0q_&S){^%+=WF=V#>Rw9sT>!u!-)O{en{BR50o~RGRK0^q^^Q^lPMG=fO zCr~$s=ww^;0%7-idwq}*F)gmR$wprg#d?5rs(;8*aV%`M_JPQVa|i-a2&7wVw&sY} zAb=F}#6Z+QZ^+RAVf|#t`jz_~i~k(5HgCfFLxN>WK=i5f{*cM8_yUq%*c`BTqeojv z0fkMLf2R3(8VU$Y=Z~^LlErMEB9M_jTf-bljt9($FRKcHz4jrv3cF5^FIRd zc&KoCzDOcL9>KE62}m42JUh3Jgxu_F2hS|#5H-@peQjS63Ax3tyIEchnA9;9DSh(- zeuP9uQM9_+rdM3UL}GU2;g|Nm4Vm>1~y>j+F5KREP)hbv4j0_ z=wV@Dt9Bxs2uWdc+<_q6D-6DYwJ%_6ZR-n|!($h(n!xU_U;f4BgCq?R*uPy?Y4!S& zj)6H&4&?Tx=SWD$W~e1gb+1+N{q}ksmv^U3gy6cjN_X`3^l>qY`>*E)31MNjqfM9V z&*|!H43DetO^m>AV*~t<&b~qBMu?YnBgR~zC4^9z{<_A-7+aY*pUq<{6uE*;%&Iw%mqS_!0M{XjQO3Iab6mO`UW9Gin(82%viNy zAO^#e=<{w4u28|_BPyRw#_D5x81+L#^ME{l*fhU=XhS_k=*uUSA<6ncp{cP=xwOUS{XID0*xVWbv{e#=4MrLehV~Ljs9U zUV*4wf$+Yc7KrfhM}e08m_eR>WXr<%Kp;sHX0$-?r#d6?MO+0!17^#5yE%~Y{AVC? zUm*MpY3(-5kKr>!0)&m9AwS9kurg(5h{(kZ0XsvkBX+-O%MuV;LdWgsvm0s?`lsVPh?G3rC6t(_!5^`R3Ufk_8|vwE}x64Ip}nv(EgWzYsG z+5tJMLKA*Tcy5gE4>9G&v6=)%AukWYu>=JNFb(Oq=i#S%r7oaVO6xgw&kooOg@9iz zd}W&kMF7_#2aS!uDLiOW>xeZqg`E;d>3l7=J%*;-*R?>F;VQByUzbJEOs)jZ1$AX5 zEPdwNMUb8Dd?QsWSJ$ zJJZMocMfvHHhsc3w!_zDdDH_fqs_`ii3R{IXUy6*5XJGasWRQcA%U^zqo2X>C) zsEb_+#fx~*42BHm3LRV!QK59umeFIuWUw|vhd72V!6anPs2+baB~CKo_nKhyj1KOR}u+uP`0v`Hba8K7vA+E+5KFrEc)eQlhrTxZ6 zR#N}zyd3~yZ85|^gr8pce*;qS_GZf0Pf!JwW>~S3uNszq{ib2je#35_J@by z9GDG9m<-M;fjk$%Cl@>3CP12p>Ao!x1_3XZ3rz-GElQq?0Qr90d=yA=Wa%sKj}F7q zJihYFqAF&;mc-Ec^5*&K9;xYmf3hJEZzq+I@h}a@?Y6UGBXe{T#6W;h69ym*1Kohy z3;-s#kWbPX(^E^N3Z!&o`Fexc)vKuMGStCLR%G&o03Jqk1Jx!#ylF!T(c3#qqynUT zWbNJexSfTO>k^uhBN(ylVOFhVYX9SGpOn7wK_q??`#P?4w&03XisE*H*qa{a4pcJtn z8Uy1#0VKhTx z2w7K=>YXv&Q^zw466DoTzpB@Iez(0t8rSiDQPn03bg8z1_l*E3q$^f zuJtl72p9oTAs_@)nUER=2`CC2kkILpnpKWvGIBL+&kH8_BzL{)-vm#Mn8SuH)BgOsx9J8FSKnpb*HVkvZ>aS$m`Zx zUs}Bqy36cst+nCNiulf@`-iT-_zrIPFe3ymVf*MdJREx^=0jTDSlh&{wrpJl8kiZt zS`lkwDqXD17+YbA`t~26gOQmFcJ2wGam#oWh}^dQ(CThsQ5QzL z-zHBQ{{R!e|I0US^t-Pi7hkY5FNC;#x^=?!ihGFHRz%ZU$C}l_b#mPeMw{3+FO2G> z?yoH`txUL@h_`DXM7ELs^s}GLq%4S#gAkNj9#Vd7S#4CyY;tX6Aln9PGu^DUEXFpM z8rOwPkhHfiX(-lF`wX(y84-dK0}@T0Kkt=D9WqGvXb#v3Mbfejz@lacXV|`Z$z!IqJ zCkt_ZV+Ftx%PR{o)epg-P1h)$!GpEUbW)GpvMRS$RMTA{0F%Ff>#QHIhQI-PBm^SF z(c>bE+bOk7n`jm4u{LckJEnDCP04%twSgTITV4mMxK$IqD_%z5J3;_fC4Yt~`o{tno_9=|xPZQH?g-IACfLXBf|KJKa^}0eKR8(kv@M=U zZHyo0@IxhGAYz@FRm6g93^Orkor%n=>_(YY&vlQygAfh-D0G)MoRqTmsmJIIuZ>}G z&$DLGxyhqOWeLj0!I-?tSZKhB=}9^vd{;;&c=QA&h_ey0r%@Ie-EisZL`G#_MLG1W zk$F#&K8%bJ!3bR%qE(KR!T6pK#?x=ylzrD8cVg&->;nWEB0eIR_>@qL2{h!!?+DgW zSi;lrWIplG=dl&!6(J~qvDWEA9E>82e2fV3C?OR2jzAoYhOi+NBV}MMs_cloSA<9y zYyE7ls2^YyReKo4*ufluAO|DFcU&Z`#{p(~fZZ3uKK_rp?c#BSJX#IeZ@})@z`Ngf z4VL?d+l5FxirqZKdsT@1?vu~nIVEM?haG6gFgFjum?aqB7s5FE_6dBw!n}|J48IPx z-;glbfFI}|enklJKmO`{xG3x7QP%l)ayS-p+z8?bxuXg14lzXT@5W;xFP=Y#%jD*Z z`s-i>VB~+n2rTXkLE@L+K7-r*&DRj`xM75LFuH$)@6agrj)dHN{q>h{f4~b;*5yKo zD$BCn2&NJQkK)#DkayG$Mcs#=nOQ>}d4Ld-v_MO$M)lNX{ML@-bucGHAfW;FTgY0o z5pvY%;9z~&G_G3EN|!wtKWYadsH*ck`Uqx-kC?LNh1539b6>6Mj1yFq(T%})Mu<|P z{`EUYHRLE|9W}JFd7d*l)hk43sBL6@G9yF%37Gi3@<4s%0On`_s~iji3={7}A&|38q}?Vd-wb!0}!%sgZV z+r0|jGtnNlTVP6n=AI!lLS||Rz6T?BVPp?;Yj#V7PZjd0H3W0nV^Rf~o04&s`54v_ zHButd0Amluc|zcMrgBDb7>wkS2;p(ykxFF(UIkT|fcPVF2MAyTh{yv>!laRTLotyF zeuI$NQ5F(`AVlU29MCXe7+~Apgxd|+F^Ov8ydfCL9^)AyXO6O#c(svBx7`GX>7u6|jY|hei>38Q=$I%hfeGuXtIo&g*N_WO z13DH$kELQxwNY?h2*F{rKTopQqi*YCyhCe()F#LUqs~3}6 zjwl3}6QXVPo*4?W2H)4q^UGeRE!tP=+(ymUfF5W)#pAOs_bBTtn&tSIemaA;j$wgb|m^7D4p=tz$bO{@O+4zae4Wbc{Z{sAHbK z6oPmKLR`<}2M~S1LCD&0SsGDx>#m`1H9}rn)IrE_W$JEZ8=is?Hj=6^aEt~wRBDP? zvp?=qmSAWAAr(`aJ@1D1v?@j>9aG%o!&S+KvRp2vHwZbJ7|suS9~dDXDlO*qqF7Z$ z&$H!*uR0Heph_=_BrY14>H`~>izRf=9c}pBl=ablAzT#;TeXI(#mH6XYN|nQ1BR}T zfH`#RgKeP?BTMlmLQ+@Mp+@?xMQqvgxJ%)P>fHvIs$8YZA|LFi0k$H%?)+0z*5ghL zNz5<_rSw86h-f^*8XPtPlS(zTl9;ImF;+E=l(&>HLC&oqKe{6Xvj*cZmOfCaXnX)d zlMFzOA2!vN*@Q8`1i%=%WMYT`fJv+bj~n7Q0{(|WH3WLAU;_j&-5Im2X&g+91kVV0 z+*3&p6GOV2BjMdNcKeu;m+Poo5WJs0kq6AwH;0KK-odD&Y=Ou%x`!Q@TNpimV)(~{ zkS)NYcVN5TY+Y>+S(YS@f2L{P0tT+*V)Ga9HQ_LfqN!?qusrX(X*r5dtS$ zD|9goskbZyzBVK$mu@zSBX!Cs3ob`^io;7`45Be(^FrWq1VJu^RZ;qug}|km2_nS9 z4~-5=(GcY-n~xBev;H0s5>fw;k1R0td%A}*5!9TEjHxNzhG#gHk4F7+NdB$EZ13&ja^DP#;8x|Kqq z(Em`s%Tbk(_}V+{XCnkgJX=rC_wIC`E@px&4?zNMP@#nCjVNTpJkiL!<~T*A+aXZo zTDQ4383P$7Aae->nKA}R zGRJ5y&`bMQJD<)1$w-qVYjp@bvU@vJ#1q2>NE;;^vC>XK;F0s~vmi3)0rejf62LN+#(>R&cVij^XA*hZv8HL|%HM%_YMOKwh~8fk!^_ybUBvsMb!0K$ZU1B!sM$ zQDVZc>I7MB6)s?kqZ^bEQ$Hk$xxK~7KL~;j`)NL4-eeSHG02vtEL-rDrg!Cl29Y6Y z3$>=f#cmZtp3^u4?1T%1U#B4Gj=cX$2O)H{HjTTaOB!|iO0V`yHi$z|m>8K`K~`Jz z*?DAlUa7k9MXfX!Cj8Qef^2rNw^QU=2a#3xWDRo(g3I~)9akp-WWwk{8s-)x{GNJ; z7jO4I+;V?ZNQgt3P`5*{EaAboo5KLI`=rerg4hRPq|?tKWLhXihAoSz-!ve1?0w83 zSfgn4S`c3-gc9?uizOYyPLW?R%vnhGE%bam8jqg29fD^y={AZpZ?Ys83q78iY;EfS?M|BRfGh@G_UC1g zb??mX*$>Uef=cyUd9hOwi>+zg%0lFk>Mu#GP<7-%*>V85yq+h!)3?l=)M zUnEu4drySu%X^c*jvXBTIP!X7I)kV8>bcx*y|Ul$=j9or{J6B^l3}BoBh?N1 zt9^qm(qrJcEB#KFEHc=9ulg(xWDiAsoK|E$eo}pH_IQTB>@C?yk ze9&ac!W?gALf5vD^?Dgt@O%Ew=Kj>_ zO>74y+j#Hz!c+H`KF#cV5Td?V%ZixE7NgmS^KdhwG~BWUQijH6nvbY8W>vycdX1ph zqq9^x{>eEVpNXKHjnHqIUz#Dnqi0Ber3HcRSysQ>agi-SfShq(23J;p)s+TM7~J6K zr7IfkU=0Bwm%GV(+OyX*40&evc*y6wFxzpybGR_oI80V%uicPZg@hE()JY=_&xzFR z)QY1K-IDa=JU)qrxbDeVW(b19u_=Q_JUUOU^9DZ|BB?e*nlETh(lNBZ;3b&lm#nr6 zyj%rVNB7+BAn0EN?u95#fxFV&7lQ!|RtE;J?TWMs^uq~x5>iSn>tk*0Pi=lVv3gS{ z$-;Fq-__>trqg)7LFgNA?*mgcNlk%Wj~ zHj9R%go2QqE$Ei9Fkd7maWsSX7bB$G?_$EO1(r8JJsJ$IK7fe25Bp;fvYgt}HQj|LYoFf|;(X-udAOS-bH4{g;eWM_`x8=CbO^mhZDNg> zUW1TE68^w8`37Bn=U-00Pr8H#cB%uRhc6>QOVRGm)%qH}mElQSux#K+$qDs~Q8X{_z zHI7qgcNm)NhL3a3&s#K3>6ma3LboDC2!`?4ibph$sdX&~X`~3Updn!%^6SPIY6wsc zA)05k2ikX1wN*j{uG^NPz7i!1e*J^nx2)CSgcx;BIj3P}gcQ;}-`!Aj4BngeEZ-3> ze`kRZKDXBvME$^n2j`JaDZQ1L%ML?ZdZNY)G5TW^X zqjd(6Z;j(TV&aSl8dB0s9Z*Xi6+i8j%~3nhG_BojN_gRdrG7`F!ljv6*KP=*?s&k!OrWbXkZ`r@c2-+M7| z0HTow*;ft~rx-T3Z%cWWr5}|EP5YD~PygoF1y@o6Ewua29N5a8**Mogqg8ITpQ_4ax5aWzDqeeq4nORplYL5Ny2IGsriREA`R@A?7bn)*U!R^HfBQT2&7NOQ zga(uVsXNZA+Cu z*T<*LW3?V8zFG6qcP6EzNZoOkul{h_#5Gd?j)c|QzkT~vIrtxKcWA>f5JLeJnVsZP zOP20v3$s9FDd3lQh3*iAhX&sQ9U;SYfmTtXD1mMK;E_+hb?zm0wD|oPu>RF>-_(tOV={cR5ofFR&WGK(>9ql*aQ&^`|7krmK=~9 zzE}Jwn4&B+(lrB(`22bx$yqI=jt&87kcSmd$Y@N~mjiHg$=)uz!j18PPf7EDL4FL7 z?;xH-^6|!i#LM30#2`6+Bk}KmKteEw_@^w|CFKYJI6)D9UYZlwJYy%K&XXIijOzhz zuDcdc;6^~GvPZ7S(JpCprWz~1X=$!4u+*?BR&Lk@+3pI8i2Ceg9#Je(dpyIo6x7dyNFsWU2^M(!qFa4!F$RI8R4F81K5n#3W(33H$Gc0l@`T)6@VFgfUA5jFD05 zBa8^65s)0M^&&c_MTlb*!Hl=+Qw|Y8T~bxO8{)A%C}L+;h>ek?#p3(tA)v#H>5k<^ zn2fC8W>uk>AfPxDx0rtMb%c1-PFr0B6UPghW%U`8nPU9AUq?SFsel4+o$*Kvki>As zT$dwkJ|A#wJ^>F}cPvX)hjulh7{{KNF?Nk~F{VroHOt>IB3=Z77u64xO^Yf6jFPnw zYhH*8)ldWiKH_YEApi+9XvoN6?AYjr#X;^_ZYv}=3kY3&x<4FH4kOmKv@W#; zgVBQP%0xUg!xx*`2pg^~QvwYxJnizt6$rk2Isn>`T^ZSmfMr<`=u#)bjL68Y&11us zPr8WbIeOo7+vP4bfJea({^G6Qbg`_+*kF1X+3uVbB98u4orgP*i~5$E)BTKVa~Z|O zxb4npVcjlwF~mymyED2Ej%08I5Nx>wH>{&csFatY;%teBubvcMj z42_vIO7LE$RBu-RtNZ?Y+}r$-g7d_0!-y6dW@Mqf&xM{>GqUQ)t|^Qw1138Cr7nBt<`c&eqU zs^vqzn3rGdZ)JBw)Y2YP;JGzR>xCcJ^dWOIpd6j+-Rog`l+{k%lBBzwsaHRa(;nOpJM5>qO^kx&{z@%+d3;;QbF;HfWwmQjb5f!b4r^WwdO^Z|G9D zotCuhcN7=0roL;Gy5c=+&5AH`(%=}N>p@cod7hx=Ys)$~$__6obGP5`-#j3aPn**1 zZL@yg402|$>2>b;8c?4ZQZZ+ga?{ZYxvn8B04_rMdS%zYZ_75CN}Fc_cQM zgSO=VuE{f2^ilUIYUzWb#!NrphOrgy||^s6Hazo4yjHXFQO zQ`J*{Q>B!H3<=~$Hu`7=KQ4I}nq;M({4fv^B^)j7Lz2_EB_YH#Ow+1sJtocQJbOplT{AlSKTSMw!3Ob=ev4#e|xi0NF` zvmoeJ^y8hkJ^Z5l7YGPGJB?_wb5VYCygYmxiOrgIKnQX`*8*LDa9r(ULQ6Y*PSYP7 zr4{k}A0F!u3tC$0aKH7$bk@_Al;Pi-5Lrtiy%JUL*}y4 z*;?N@^zPrgkXG71bd8KmNYfjLn0}=P;$fG5Om_z2$yUJ%f8TZPblYXWwFe8>YV7j@ z;qkf^wF-N|-@49W^}jx}(zE%&F9+vUx40~vd=S(9brrAcAtH`CcyVF|6v1A`8|7CI zcJN(2XfN0D{DPiISC(#5LKv@^e%0tXOTDHSyeM_WhDz}lTHjswf-^n3tG#*4gG!*TFn)sLmbB==mQtw^)rZX{Ni%R{ij)U-IOdc~AEnor^t> zO0Q7*w0?XnJ$AIy5TQlWf1(AAZfkkyK^XCG8+g5^?K-8$PgbJu%MnKHmu*P4I77aB zf8#v_FY1ycJw^+fBn`th5IrN^Z%g}SNm>bODu+06S&GDl6tzl{WCh!LN2)a6Nmq?7 zMFrKTJX+Kun$z~Rt+X$e2awYR?O3Z6)$pzwZd&{1Rp}Y#(TwDIQWbsbE$Q7EwcLA@ z4nL}LIbY5<(|#*mY3oC+#}N+Cck;T<9=>&L1)h_mJZ+Y|@?|@n*HkC1YtBrXONm4EyG^)X(dz2moFPz*g471s}aJt z)R($f=ta$PK7Gj%yzoP1QPTAupOYlB`$?DYz{2FIW`0`@6$d;j<#QVlhH1J-nu`$zj4#AERtcrQE(WyQ*|C=Cshxz z;$qu%QYitu3SssX_7~!el5(k0Tva%bmhFg;jLqJ(9h)P>o>hn~7>Zz|d}Lqw*s)PY z;XvGW(g6mkdcCgjG_$&@$i_uQ$k@bCcxN`n!wvLs`;p?Zw|3*=%56gryv(i&)Rqy- ztX2dH{K&BR_G$<|6CT*HUBTrMLlqIjju1y&q4c;|&y@QY_4qhLzygt7>^!RQvW#%% zQANfa8S7#Ofh|NGT(C)FdsQKhLNX}@R-(NcQ5Zl?Kpznfrd+QG*eF(P*-6QWL+#l* zm|V5pjLQQ&!bU*CuFJCQI_}L+>X=dtxZ%cL*;tF=gX12;$$K>cF#>mrp~J*6MIaTA zvKAT`W>j(@VFba9&CELigpFxa3Wg|+1yI6AFvj#^n>cpIw3+b{CNzcx4URR)gqMgS zjwcEH*Z^+nfk@kCB;r^yu@LK{v9XC89Mj1x(8MSuD1nY)hz(;H7^~rtV+2U}M4dqc z5jG8;Ppp9L__}#S6BFg*zaF}|+DW|f2~IK$>KHr#8H2~d8j$>@>2sinm8hz60QGx_7jT8T@< zWKd#xN+ul+k+>uUIycOa>13x@GBQFt;>bK1OT`b|2$Ud@u>#4+EEvLcehKG~hU2sX zaUiGGSQppbM0wi$p+ChofsPOnfW8(rcG8p)y{2q1pi`15OlhV;t_3)8FakJ<0K(VZ zu;~&c$RxwZ=>-l5EC(VP0Z2oD4=fG^-pvC-SPmv%9V~}&l9d=}Rv?IT)tL41ND5)Q zf?^ep#}@%*=?vo-+kFm;h2s$6Nor5NY?$F#4hC`aWrTUu?(YKV)BM>2|AXElX-UBI%fzz(P&?7DWH+{zAk0^+=^q=csanP&hp zwqk25VJ4N55wcg~mj{7`#R1cXttgNQFl1u}PO!nIOE?60!dB_6{Pnj*7h!oUN*`FV z(=dGshA_^g>h!@bwqugQrtFf|a<^y`%qC5pi#oq3^%;{8&aegK3gDDDDdMon8O6FS z6JnGsp*&!27?YsF4?VHHcG;3%5eUy+DSL@qrRLgmYFZQ^spgy z%)yt|8@*D57t@ZE$9F&`LU>Vyk$gB`L4zdY_(1x^z&^N@a6l^#GBk7KrKj@`^SndMF2GD|415J;%g(X9f+ca~j=ESFPuWv`SV8N9C5Z0b!(@ z9Wc#~x&f5q2tNBE#SV~*PoL2n2LvzZQFB#E^y5zb-3LU7aj#xX56)m6^7EH19fG=Uh0S!ZG^EQ9U%U=#p3<<|dO$cjrxMiK z>b@R`KW*H%`1WvJg|*X$c6qmGMF=vaFE*b4zCzAA6%F2zoqX`gN<9M36Fm^iCF|>F;I~fLlr}g*nkU_M_PN-w%3f(45Iuqn z+MqZ6cj`ROSyX&c>x(bub_U|T1M%qD*`LYGkj`A5?L=Sg_&Dd)f0-4}m+gedN-{rcU8mvc7XRC+xSh?HccMd8{Jpo% zYFiMHLN{D|jZZ$DOJ247^>-Wuqgd8n-yQI1Vv5lLqH(g-uMVT(ft>%S96*g6yKnqzZG-kLnxIz2b*^wAi%5C z%)D2P5c+aYl@4%I`i@d@ea$Lod$iOmtYs$|5{U)#8NKNq!s^LzSUr3b7S#O%dJjqv z&(2n-=$*MYm05X~>_b;-^mT>3}DA@$=$t9FT9%J->Os^yg4a4`$40 z5i+DtYbD)pl`5FN-o?2cMe{L_&(;Rb@}Te;fBbATo(^zajcIDZ(ux7KG02cSS}3)3 z5T$dLklD1Ny@z56G9+BA&JJ#MWUWsh+=Hw_Tx%HuvhjP5oOdce2_H?RI>-%L+ACHh z`_R&sw~V8=7kTdI@5IgJ^24SUXGn*Z15vBgRJ`bgh|osXWOdoMwqlpeAO*H&-h2ZUgz$7-dKKcP z4z6_RQWOh&TMZR`DX9>Ug|$@iDGU#8^b_tn1SEciwr!sMVni|m64=KOpv$O?Eepx0 zz++hvsI15WZ@2;@P<#%F6|e)^29tLMf?URfO+AnWjAU1kc|j%^cpR2NMkcb$6Ov(W z44GtH0RRBOtYPLLwvLV1499FDLynL7g6)pLH5)^YjWFmS3GfIw%P20#MA;!h23H6% zn8;ujfGacW&|`B>U}wfZM`1w7n4oH~IRGQZ25_ARDQ6D?0yMZmBR8rBoO!;>>&k9p~*R9^rlY`hu8=1$p;28iH8sYDPUmM!e{(rC5pL;cFYDn z*1#TNMU)XE6R}7l8vEnO*iB-MFcHGV;$&imv;dDI<-lNT@UeyG1X$%)cCYFo!pe9``VxW|5_GAWXZHCAqhIxjC$9KOUX4J zOgdv?8k7+^k{byTOh$uap^4ei-*{;1o12J_`BOc(rXmPW&?L&&VNVzAI?Sm-)=p?{ z!u&tAvx!PvmAGytZl;!!YvOa-Pe_8OBIW*9EbMrTEGUr5^Hzux(A1|L7zLUFaV};$ zmMr*l2n?v@nKjw3*P-+n5Cc@D^DJU}z~t zJ1`0?4FT-iAr1f_2td)o#{F+>V$PyTF}xS4e~d(w-H&0(j3&$tSqIkk0iiCo-SqvCT7M#14aQe6999SUnert*{%Qp N002ovPDHLkV1f$s#LfT! diff --git a/experimental/images/vlans-deeper-look.svg b/experimental/images/vlans-deeper-look.svg deleted file mode 100644 index 96cd21d52f..0000000000 --- a/experimental/images/vlans-deeper-look.svg +++ /dev/null @@ -1 +0,0 @@ -DockerHost:Frontend,Backend &CreditCardAppTiersareIsolatedbutcanstillcommunicateinsideinterfaceoranyotherDockerhostsusingtheparentVLANID802.1QTrunk -canbeasingleEthernetlinkorMultipleBondedEthernetlinksInterfaceeth0Container(s)Eth010.1.20.0/24Parent:eth0.20VLANID:20CreditCardsBackendContainer(s)Eth010.1.30.0/24Container(s)Eth010.1.10.0/24FrontendGateway10.1.20.1andothercontainersonthesameVLAN/subnetGateway10.1.10.1andothercontainersonthesameVLAN/subnetGateway10.1.30.1andothercontainersonthesameVLAN/subnet:Parenteth0.10VLANID:10Parent:eth0.30VLAN:30NetworkotherDockerHosts \ No newline at end of file diff --git a/experimental/plugins_graphdriver.md b/experimental/plugins_graphdriver.md deleted file mode 100644 index 1a291e0d52..0000000000 --- a/experimental/plugins_graphdriver.md +++ /dev/null @@ -1,334 +0,0 @@ -# Experimental: Docker graph driver plugins - -Docker graph driver plugins enable admins to use an external/out-of-process -graph driver for use with Docker engine. This is an alternative to using the -built-in storage drivers, such as aufs/overlay/devicemapper/btrfs. - -A graph driver plugin is used for image and container fs storage, as such -the plugin must be started and available for connections prior to Docker Engine -being started. - -# Write a graph driver plugin - -See the [plugin documentation](/docs/extend/plugins.md) for detailed information -on the underlying plugin protocol. - - -## Graph Driver plugin protocol - -If a plugin registers itself as a `GraphDriver` when activated, then it is -expected to provide the rootfs for containers as well as image layer storage. - -### /GraphDriver.Init - -**Request**: -``` -{ - "Home": "/graph/home/path", - "Opts": [] -} -``` - -Initialize the graph driver plugin with a home directory and array of options. -Plugins are not required to accept these options as the Docker Engine does not -require that the plugin use this path or options, they are only being passed -through from the user. - -**Response**: -``` -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - - -### /GraphDriver.Create - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" - "MountLabel": "" -} -``` - -Create a new, empty, read-only filesystem layer with the specified -`ID`, `Parent` and `MountLabel`. `Parent` may be an empty string, -which would indicate that there is no parent layer. - -**Response**: -``` -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.CreateReadWrite - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" - "MountLabel": "" -} -``` - -Similar to `/GraphDriver.Create` but creates a read-write filesystem layer. - -### /GraphDriver.Remove - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" -} -``` - -Remove the filesystem layer with this given `ID`. - -**Response**: -``` -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.Get - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" - "MountLabel": "" -} -``` - -Get the mountpoint for the layered filesystem referred to by the given `ID`. - -**Response**: -``` -{ - "Dir": "/var/mygraph/46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Err": "" -} -``` - -Respond with the absolute path to the mounted layered filesystem. -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.Put - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" -} -``` - -Release the system resources for the specified `ID`, such as unmounting the -filesystem layer. - -**Response**: -``` -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.Exists - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" -} -``` - -Determine if a filesystem layer with the specified `ID` exists. - -**Response**: -``` -{ - "Exists": true -} -``` - -Respond with a boolean for whether or not the filesystem layer with the specified -`ID` exists. - -### /GraphDriver.Status - -**Request**: -``` -{} -``` - -Get low-level diagnostic information about the graph driver. - -**Response**: -``` -{ - "Status": [[]] -} -``` - -Respond with a 2-D array with key/value pairs for the underlying status -information. - - -### /GraphDriver.GetMetadata - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187" -} -``` - -Get low-level diagnostic information about the layered filesystem with the -with the specified `ID` - -**Response**: -``` -{ - "Metadata": {}, - "Err": "" -} -``` - -Respond with a set of key/value pairs containing the low-level diagnostic -information about the layered filesystem. -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.Cleanup - -**Request**: -``` -{} -``` - -Perform necessary tasks to release resources help by the plugin, for example -unmounting all the layered file systems. - -**Response**: -``` -{ - "Err": "" -} -``` - -Respond with a non-empty string error if an error occurred. - - -### /GraphDriver.Diff - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" -} -``` - -Get an archive of the changes between the filesystem layers specified by the `ID` -and `Parent`. `Parent` may be an empty string, in which case there is no parent. - -**Response**: -``` -{{ TAR STREAM }} -``` - -### /GraphDriver.Changes - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" -} -``` - -Get a list of changes between the filesystem layers specified by the `ID` and -`Parent`. `Parent` may be an empty string, in which case there is no parent. - -**Response**: -``` -{ - "Changes": [{}], - "Err": "" -} -``` - -Responds with a list of changes. The structure of a change is: -``` - "Path": "/some/path", - "Kind": 0, -``` - -Where the `Path` is the filesystem path within the layered filesystem that is -changed and `Kind` is an integer specifying the type of change that occurred: - -- 0 - Modified -- 1 - Added -- 2 - Deleted - -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.ApplyDiff - -**Request**: -``` -{{ TAR STREAM }} -``` - -Extract the changeset from the given diff into the layer with the specified `ID` -and `Parent` - -**Query Parameters**: - -- id (required)- the `ID` of the new filesystem layer to extract the diff to -- parent (required)- the `Parent` of the given `ID` - -**Response**: -``` -{ - "Size": 512366, - "Err": "" -} -``` - -Respond with the size of the new layer in bytes. -Respond with a non-empty string error if an error occurred. - -### /GraphDriver.DiffSize - -**Request**: -``` -{ - "ID": "46fe8644f2572fd1e505364f7581e0c9dbc7f14640bd1fb6ce97714fb6fc5187", - "Parent": "2cd9c322cb78a55e8212aa3ea8425a4180236d7106938ec921d0935a4b8ca142" -} -``` - -Calculate the changes between the specified `ID` - -**Response**: -``` -{ - "Size": 512366, - "Err": "" -} -``` - -Respond with the size changes between the specified `ID` and `Parent` -Respond with a non-empty string error if an error occurred. diff --git a/experimental/vlan-networks.md b/experimental/vlan-networks.md deleted file mode 100644 index caec6d6c6b..0000000000 --- a/experimental/vlan-networks.md +++ /dev/null @@ -1,471 +0,0 @@ -# Ipvlan Network Driver - -### Getting Started - -The Ipvlan driver is currently in experimental mode in order to incubate Docker users use cases and vet the implementation to ensure a hardened, production ready driver in a future release. Libnetwork now gives users total control over both IPv4 and IPv6 addressing. The VLAN driver builds on top of that in giving operators complete control of layer 2 VLAN tagging and even Ipvlan L3 routing for users interested in underlay network integration. For overlay deployments that abstract away physical constraints see the [multi-host overlay ](https://docs.docker.com/engine/userguide/networking/get-started-overlay/) driver. - -Ipvlan is a new twist on the tried and true network virtualization technique. The Linux implementations are extremely lightweight because rather than using the traditional Linux bridge for isolation, they are simply associated to a Linux Ethernet interface or sub-interface to enforce separation between networks and connectivity to the physical network. - -Ipvlan offers a number of unique features and plenty of room for further innovations with the various modes. Two high level advantages of these approaches are, the positive performance implications of bypassing the Linux bridge and the simplicity of having less moving parts. Removing the bridge that traditionally resides in between the Docker host NIC and container interface leaves a very simple setup consisting of container interfaces, attached directly to the Docker host interface. This result is easy access for external facing services as there is no port mappings in these scenarios. - -### Pre-Requisites - -- The examples on this page are all single host and setup using Docker experimental builds that can be installed with the following instructions: [Install Docker experimental](https://github.com/docker/docker/tree/master/experimental) - -- All of the examples can be performed on a single host running Docker. Any examples using a sub-interface like `eth0.10` can be replaced with `eth0` or any other valid parent interface on the Docker host. Sub-interfaces with a `.` are created on the fly. `-o parent` interfaces can also be left out of the `docker network create` all together and the driver will create a `dummy` interface that will enable local host connectivity to perform the examples. - -- Kernel requirements: - - - To check your current kernel version, use `uname -r` to display your kernel version - - Ipvlan Linux kernel v4.2+ (support for earlier kernels exists but is buggy) - -### Ipvlan L2 Mode Example Usage - -The ipvlan `L2` mode example is like the following image. The driver is specified with `-d driver_name` option. In this case `-d ipvlan`. - -![Simple Ipvlan L2 Mode Example](images/ipvlan_l2_simple.png) - -The parent interface in the next example `-o parent=eth0` is configured as followed: - -``` -ip addr show eth0 -3: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 - inet 192.168.1.250/24 brd 192.168.1.255 scope global eth0 -``` - -Use the network from the host's interface as the `--subnet` in the `docker network create`. The container will be attached to the same network as the host interface as set via the `-o parent=` option. - -Create the ipvlan network and run a container attaching to it: - -``` -# Ipvlan (-o ipvlan_mode= Defaults to L2 mode if not specified) -docker network create -d ipvlan \ - --subnet=192.168.1.0/24 \ - --gateway=192.168.1.1 \ - -o ipvlan_mode=l2 \ - -o parent=eth0 db_net - -# Start a container on the db_net network -docker run --net=db_net -it --rm alpine /bin/sh - -# NOTE: the containers can NOT ping the underlying host interfaces as -# they are intentionally filtered by Linux for additional isolation. -``` - -The default mode for Ipvlan is `l2`. If `-o ipvlan_mode=` are left unspecified, the default mode will be used. Similarly, if the `--gateway` is left empty, the first usable address on the network will be set as the gateway. For example, if the subnet provided in the network create is `--subnet=192.168.1.0/24` then the gateway the container receives is `192.168.1.1`. - -To help understand how this mode interacts with other hosts, the following figure shows the same layer 2 segment between two Docker hosts that applies to and Ipvlan L2 mode. - -![Multiple Ipvlan Hosts](images/macvlan-bridge-ipvlan-l2.png) - -The following will create the exact same network as the network `db_net` created prior, with the driver defaults for `--gateway=192.168.1.1` and `-o ipvlan_mode=l2`. - -``` -# Ipvlan (-o ipvlan_mode= Defaults to L2 mode if not specified) -docker network create -d ipvlan \ - --subnet=192.168.1.0/24 \ - -o parent=eth0 db_net_ipv - -# Start a container with an explicit name in daemon mode -docker run --net=db_net_ipv --name=ipv1 -itd alpine /bin/sh - -# Start a second container and ping using the container name -# to see the docker included name resolution functionality -docker run --net=db_net_ipv --name=ipv2 -it --rm alpine /bin/sh -ping -c 4 ipv1 - -# NOTE: the containers can NOT ping the underlying host interfaces as -# they are intentionally filtered by Linux for additional isolation. -``` - -The drivers also support the `--internal` flag that will completely isolate containers on a network from any communications external to that network. Since network isolation is tightly coupled to the network's parent interface the result of leaving the `-o parent=` option off of a network create is the exact same as the `--internal` option. If the parent interface is not specified or the `--internal` flag is used, a netlink type `dummy` parent interface is created for the user and used as the parent interface effectively isolating the network completely. - -The following two `docker network create` examples result in identical networks that you can attach container to: - -``` -# Empty '-o parent=' creates an isolated network -docker network create -d ipvlan \ - --subnet=192.168.10.0/24 isolated1 - -# Explicit '--internal' flag is the same: -docker network create -d ipvlan \ - --subnet=192.168.11.0/24 --internal isolated2 - -# Even the '--subnet=' can be left empty and the default -# IPAM subnet of 172.18.0.0/16 will be assigned -docker network create -d ipvlan isolated3 - -docker run --net=isolated1 --name=cid1 -it --rm alpine /bin/sh -docker run --net=isolated2 --name=cid2 -it --rm alpine /bin/sh -docker run --net=isolated3 --name=cid3 -it --rm alpine /bin/sh - -# To attach to any use `docker exec` and start a shell -docker exec -it cid1 /bin/sh -docker exec -it cid2 /bin/sh -docker exec -it cid3 /bin/sh -``` - -### Ipvlan 802.1q Trunk L2 Mode Example Usage - -Architecturally, Ipvlan L2 mode trunking is the same as Macvlan with regard to gateways and L2 path isolation. There are nuances that can be advantageous for CAM table pressure in ToR switches, one MAC per port and MAC exhaustion on a host's parent NIC to name a few. The 802.1q trunk scenario looks the same. Both modes adhere to tagging standards and have seamless integration with the physical network for underlay integration and hardware vendor plugin integrations. - -Hosts on the same VLAN are typically on the same subnet and almost always are grouped together based on their security policy. In most scenarios, a multi-tier application is tiered into different subnets because the security profile of each process requires some form of isolation. For example, hosting your credit card processing on the same virtual network as the frontend webserver would be a regulatory compliance issue, along with circumventing the long standing best practice of layered defense in depth architectures. VLANs or the equivocal VNI (Virtual Network Identifier) when using the Overlay driver, are the first step in isolating tenant traffic. - -![Docker VLANs in Depth](images/vlans-deeper-look.png) - -The Linux sub-interface tagged with a vlan can either already exist or will be created when you call a `docker network create`. `docker network rm` will delete the sub-interface. Parent interfaces such as `eth0` are not deleted, only sub-interfaces with a netlink parent index > 0. - -For the driver to add/delete the vlan sub-interfaces the format needs to be `interface_name.vlan_tag`. Other sub-interface naming can be used as the specified parent, but the link will not be deleted automatically when `docker network rm` is invoked. - -The option to use either existing parent vlan sub-interfaces or let Docker manage them enables the user to either completely manage the Linux interfaces and networking or let Docker create and delete the Vlan parent sub-interfaces (netlink `ip link`) with no effort from the user. - -For example: `eth0.10` to denote a sub-interface of `eth0` tagged with vlan id `10`. The equivalent `ip link` command would be `ip link add link eth0 name eth0.10 type vlan id 10`. - -The example creates the vlan tagged networks and then start two containers to test connectivity between containers. Different Vlans cannot ping one another without a router routing between the two networks. The default namespace is not reachable per ipvlan design in order to isolate container namespaces from the underlying host. - -**Vlan ID 20** - -In the first network tagged and isolated by the Docker host, `eth0.20` is the parent interface tagged with vlan id `20` specified with `-o parent=eth0.20`. Other naming formats can be used, but the links need to be added and deleted manually using `ip link` or Linux configuration files. As long as the `-o parent` exists anything can be used if compliant with Linux netlink. - -``` -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged -docker network create -d ipvlan \ - --subnet=192.168.20.0/24 \ - --gateway=192.168.20.1 \ - -o parent=eth0.20 ipvlan20 - -# in two separate terminals, start a Docker container and the containers can now ping one another. -docker run --net=ipvlan20 -it --name ivlan_test1 --rm alpine /bin/sh -docker run --net=ipvlan20 -it --name ivlan_test2 --rm alpine /bin/sh -``` - -**Vlan ID 30** - -In the second network, tagged and isolated by the Docker host, `eth0.30` is the parent interface tagged with vlan id `30` specified with `-o parent=eth0.30`. The `ipvlan_mode=` defaults to l2 mode `ipvlan_mode=l2`. It can also be explicitly set with the same result as shown in the next example. - -``` -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged. -docker network create -d ipvlan \ - --subnet=192.168.30.0/24 \ - --gateway=192.168.30.1 \ - -o parent=eth0.30 \ - -o ipvlan_mode=l2 ipvlan30 - -# in two separate terminals, start a Docker container and the containers can now ping one another. -docker run --net=ipvlan30 -it --name ivlan_test3 --rm alpine /bin/sh -docker run --net=ipvlan30 -it --name ivlan_test4 --rm alpine /bin/sh -``` - -The gateway is set inside of the container as the default gateway. That gateway would typically be an external router on the network. - -``` -$ ip route - default via 192.168.30.1 dev eth0 - 192.168.30.0/24 dev eth0 src 192.168.30.2 -``` - -Example: Multi-Subnet Ipvlan L2 Mode starting two containers on the same subnet and pinging one another. In order for the `192.168.114.0/24` to reach `192.168.116.0/24` it requires an external router in L2 mode. L3 mode can route between subnets that share a common `-o parent=`. - -Secondary addresses on network routers are common as an address space becomes exhausted to add another secondary to a L3 vlan interface or commonly referred to as a "switched virtual interface" (SVI). - -``` -docker network create -d ipvlan \ - --subnet=192.168.114.0/24 --subnet=192.168.116.0/24 \ - --gateway=192.168.114.254 --gateway=192.168.116.254 \ - -o parent=eth0.114 \ - -o ipvlan_mode=l2 ipvlan114 - -docker run --net=ipvlan114 --ip=192.168.114.10 -it --rm alpine /bin/sh -docker run --net=ipvlan114 --ip=192.168.114.11 -it --rm alpine /bin/sh -``` - -A key takeaway is, operators have the ability to map their physical network into their virtual network for integrating containers into their environment with no operational overhauls required. NetOps simply drops an 802.1q trunk into the Docker host. That virtual link would be the `-o parent=` passed in the network creation. For untagged (non-VLAN) links, it is as simple as `-o parent=eth0` or for 802.1q trunks with VLAN IDs each network gets mapped to the corresponding VLAN/Subnet from the network. - -An example being, NetOps provides VLAN ID and the associated subnets for VLANs being passed on the Ethernet link to the Docker host server. Those values are simply plugged into the `docker network create` commands when provisioning the Docker networks. These are persistent configurations that are applied every time the Docker engine starts which alleviates having to manage often complex configuration files. The network interfaces can also be managed manually by being pre-created and docker networking will never modify them, simply use them as parent interfaces. Example mappings from NetOps to Docker network commands are as follows: - -- VLAN: 10, Subnet: 172.16.80.0/24, Gateway: 172.16.80.1 - - - `--subnet=172.16.80.0/24 --gateway=172.16.80.1 -o parent=eth0.10` - -- VLAN: 20, IP subnet: 172.16.50.0/22, Gateway: 172.16.50.1 - - - `--subnet=172.16.50.0/22 --gateway=172.16.50.1 -o parent=eth0.20 ` - -- VLAN: 30, Subnet: 10.1.100.0/16, Gateway: 10.1.100.1 - - - `--subnet=10.1.100.0/16 --gateway=10.1.100.1 -o parent=eth0.30` - -### IPVlan L3 Mode Example - -IPVlan will require routes to be distributed to each endpoint. The driver only builds the Ipvlan L3 mode port and attaches the container to the interface. Route distribution throughout a cluster is beyond the initial implementation of this single host scoped driver. In L3 mode, the Docker host is very similar to a router starting new networks in the container. They are on networks that the upstream network will not know about without route distribution. For those curious how Ipvlan L3 will fit into container networking see the following examples. - -![Docker Ipvlan L2 Mode](images/ipvlan-l3.png) - -Ipvlan L3 mode drops all broadcast and multicast traffic. This reason alone makes Ipvlan L3 mode a prime candidate for those looking for massive scale and predictable network integrations. It is predictable and in turn will lead to greater uptimes because there is no bridging involved. Bridging loops have been responsible for high profile outages that can be hard to pinpoint depending on the size of the failure domain. This is due to the cascading nature of BPDUs (Bridge Port Data Units) that are flooded throughout a broadcast domain (VLAN) to find and block topology loops. Eliminating bridging domains, or at the least, keeping them isolated to a pair of ToRs (top of rack switches) will reduce hard to troubleshoot bridging instabilities. Ipvlan L2 modes is well suited for isolated VLANs only trunked into a pair of ToRs that can provide a loop-free non-blocking fabric. The next step further is to route at the edge via Ipvlan L3 mode that reduces a failure domain to a local host only. - -- L3 mode needs to be on a separate subnet as the default namespace since it requires a netlink route in the default namespace pointing to the Ipvlan parent interface. - -- The parent interface used in this example is `eth0` and it is on the subnet `192.168.1.0/24`. Notice the `docker network` is **not** on the same subnet as `eth0`. - -- Unlike ipvlan l2 modes, different subnets/networks can ping one another as long as they share the same parent interface `-o parent=`. - -``` -ip a show eth0 -3: eth0: mtu 1500 qdisc pfifo_fast state UP group default qlen 1000 - link/ether 00:50:56:39:45:2e brd ff:ff:ff:ff:ff:ff - inet 192.168.1.250/24 brd 192.168.1.255 scope global eth0 -``` - --A traditional gateway doesn't mean much to an L3 mode Ipvlan interface since there is no broadcast traffic allowed. Because of that, the container default gateway simply point the the containers `eth0` device. See below for CLI output of `ip route` or `ip -6 route` from inside an L3 container for details. - -The mode ` -o ipvlan_mode=l3` must be explicitly specified since the default ipvlan mode is `l2`. - -The following example does not specify a parent interface. The network drivers will create a dummy type link for the user rather then rejecting the network creation and isolating containers from only communicating with one another. - -``` -# Create the Ipvlan L3 network -docker network create -d ipvlan \ - --subnet=192.168.214.0/24 \ - --subnet=10.1.214.0/24 \ - -o ipvlan_mode=l3 ipnet210 - -# Test 192.168.214.0/24 connectivity -docker run --net=ipnet210 --ip=192.168.214.10 -itd alpine /bin/sh -docker run --net=ipnet210 --ip=10.1.214.10 -itd alpine /bin/sh - -# Test L3 connectivity from 10.1.214.0/24 to 192.168.212.0/24 -docker run --net=ipnet210 --ip=192.168.214.9 -it --rm alpine ping -c 2 10.1.214.10 - -# Test L3 connectivity from 192.168.212.0/24 to 10.1.214.0/24 -docker run --net=ipnet210 --ip=10.1.214.9 -it --rm alpine ping -c 2 192.168.214.10 - -``` - -Notice there is no `--gateway=` option in the network create. The field is ignored if one is specified `l3` mode. Take a look at the container routing table from inside of the container: - -``` -# Inside an L3 mode container -$ ip route - default dev eth0 - 192.168.120.0/24 dev eth0 src 192.168.120.2 -``` - -In order to ping the containers from a remote Docker host or the container be able to ping a remote host, the remote host or the physical network in between need to have a route pointing to the host IP address of the container's Docker host eth interface. More on this as we evolve the Ipvlan `L3` story. - -### Dual Stack IPv4 IPv6 Ipvlan L2 Mode - -- Not only does Libnetwork give you complete control over IPv4 addressing, but it also gives you total control over IPv6 addressing as well as feature parity between the two address families. - -- The next example will start with IPv6 only. Start two containers on the same VLAN `139` and ping one another. Since the IPv4 subnet is not specified, the default IPAM will provision a default IPv4 subnet. That subnet is isolated unless the upstream network is explicitly routing it on VLAN `139`. - -``` -# Create a v6 network -docker network create -d ipvlan \ - --subnet=2001:db8:abc2::/64 --gateway=2001:db8:abc2::22 \ - -o parent=eth0.139 v6ipvlan139 - -# Start a container on the network -docker run --net=v6ipvlan139 -it --rm alpine /bin/sh - -``` - -View the container eth0 interface and v6 routing table: - -``` - eth0@if55: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff - inet 172.18.0.2/16 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link - valid_lft forever preferred_lft forever - inet6 2001:db8:abc2::1/64 scope link nodad - valid_lft forever preferred_lft forever - -root@5c1dc74b1daa:/# ip -6 route -2001:db8:abc4::/64 dev eth0 proto kernel metric 256 -2001:db8:abc2::/64 dev eth0 proto kernel metric 256 -default via 2001:db8:abc2::22 dev eth0 metric 1024 -``` - -Start a second container and ping the first container's v6 address. - -``` -$ docker run --net=v6ipvlan139 -it --rm alpine /bin/sh - -root@b817e42fcc54:/# ip a show eth0 -75: eth0@if55: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff - inet 172.18.0.3/16 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link tentative dadfailed - valid_lft forever preferred_lft forever - inet6 2001:db8:abc2::2/64 scope link nodad - valid_lft forever preferred_lft forever - -root@b817e42fcc54:/# ping6 2001:db8:abc2::1 -PING 2001:db8:abc2::1 (2001:db8:abc2::1): 56 data bytes -64 bytes from 2001:db8:abc2::1%eth0: icmp_seq=0 ttl=64 time=0.044 ms -64 bytes from 2001:db8:abc2::1%eth0: icmp_seq=1 ttl=64 time=0.058 ms - -2 packets transmitted, 2 packets received, 0% packet loss -round-trip min/avg/max/stddev = 0.044/0.051/0.058/0.000 ms -``` - -The next example with setup a dual stack IPv4/IPv6 network with an example VLAN ID of `140`. - -Next create a network with two IPv4 subnets and one IPv6 subnets, all of which have explicit gateways: - -``` -docker network create -d ipvlan \ - --subnet=192.168.140.0/24 --subnet=192.168.142.0/24 \ - --gateway=192.168.140.1 --gateway=192.168.142.1 \ - --subnet=2001:db8:abc9::/64 --gateway=2001:db8:abc9::22 \ - -o parent=eth0.140 \ - -o ipvlan_mode=l2 ipvlan140 -``` - -Start a container and view eth0 and both v4 & v6 routing tables: - -``` -docker run --net=v6ipvlan139 --ip6=2001:db8:abc2::51 -it --rm alpine /bin/sh - -root@3cce0d3575f3:/# ip a show eth0 -78: eth0@if77: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff - inet 192.168.140.2/24 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link - valid_lft forever preferred_lft forever - inet6 2001:db8:abc9::1/64 scope link nodad - valid_lft forever preferred_lft forever - -root@3cce0d3575f3:/# ip route -default via 192.168.140.1 dev eth0 -192.168.140.0/24 dev eth0 proto kernel scope link src 192.168.140.2 - -root@3cce0d3575f3:/# ip -6 route -2001:db8:abc4::/64 dev eth0 proto kernel metric 256 -2001:db8:abc9::/64 dev eth0 proto kernel metric 256 -default via 2001:db8:abc9::22 dev eth0 metric 1024 -``` - -Start a second container with a specific `--ip4` address and ping the first host using IPv4 packets: - -``` -docker run --net=ipvlan140 --ip=192.168.140.10 -it --rm alpine /bin/sh -``` - -**Note**: Different subnets on the same parent interface in Ipvlan `L2` mode cannot ping one another. That requires a router to proxy-arp the requests with a secondary subnet. However, Ipvlan `L3` will route the unicast traffic between disparate subnets as long as they share the same `-o parent` parent link. - -### Dual Stack IPv4 IPv6 Ipvlan L3 Mode - -**Example:** IpVlan L3 Mode Dual Stack IPv4/IPv6, Multi-Subnet w/ 802.1q Vlan Tag:118 - -As in all of the examples, a tagged VLAN interface does not have to be used. The sub-interfaces can be swapped with `eth0`, `eth1`, `bond0` or any other valid interface on the host other then the `lo` loopback. - -The primary difference you will see is that L3 mode does not create a default route with a next-hop but rather sets a default route pointing to `dev eth` only since ARP/Broadcasts/Multicast are all filtered by Linux as per the design. Since the parent interface is essentially acting as a router, the parent interface IP and subnet needs to be different from the container networks. That is the opposite of bridge and L2 modes, which need to be on the same subnet (broadcast domain) in order to forward broadcast and multicast packets. - -``` -# Create an IPv6+IPv4 Dual Stack Ipvlan L3 network -# Gateways for both v4 and v6 are set to a dev e.g. 'default dev eth0' -docker network create -d ipvlan \ - --subnet=192.168.110.0/24 \ - --subnet=192.168.112.0/24 \ - --subnet=2001:db8:abc6::/64 \ - -o parent=eth0 \ - -o ipvlan_mode=l3 ipnet110 - - -# Start a few of containers on the network (ipnet110) -# in separate terminals and check connectivity -docker run --net=ipnet110 -it --rm alpine /bin/sh -# Start a second container specifying the v6 address -docker run --net=ipnet110 --ip6=2001:db8:abc6::10 -it --rm alpine /bin/sh -# Start a third specifying the IPv4 address -docker run --net=ipnet110 --ip=192.168.112.50 -it --rm alpine /bin/sh -# Start a 4th specifying both the IPv4 and IPv6 addresses -docker run --net=ipnet110 --ip6=2001:db8:abc6::50 --ip=192.168.112.50 -it --rm alpine /bin/sh -``` - -Interface and routing table outputs are as follows: - -``` -root@3a368b2a982e:/# ip a show eth0 -63: eth0@if59: mtu 1500 qdisc noqueue state UNKNOWN group default - link/ether 00:50:56:2b:29:40 brd ff:ff:ff:ff:ff:ff - inet 192.168.112.2/24 scope global eth0 - valid_lft forever preferred_lft forever - inet6 2001:db8:abc4::250:56ff:fe2b:2940/64 scope link - valid_lft forever preferred_lft forever - inet6 2001:db8:abc6::10/64 scope link nodad - valid_lft forever preferred_lft forever - -# Note the default route is simply the eth device because ARPs are filtered. -root@3a368b2a982e:/# ip route - default dev eth0 scope link - 192.168.112.0/24 dev eth0 proto kernel scope link src 192.168.112.2 - -root@3a368b2a982e:/# ip -6 route -2001:db8:abc4::/64 dev eth0 proto kernel metric 256 -2001:db8:abc6::/64 dev eth0 proto kernel metric 256 -default dev eth0 metric 1024 -``` - -*Note:* There may be a bug when specifying `--ip6=` addresses when you delete a container with a specified v6 address and then start a new container with the same v6 address it throws the following like the address isn't properly being released to the v6 pool. It will fail to unmount the container and be left dead. - -``` -docker: Error response from daemon: Address already in use. -``` - -### Manually Creating 802.1q Links - -**Vlan ID 40** - -If a user does not want the driver to create the vlan sub-interface it simply needs to exist prior to the `docker network create`. If you have sub-interface naming that is not `interface.vlan_id` it is honored in the `-o parent=` option again as long as the interface exists and us up. - -Links if manually created can be named anything you want. As long as the exist when the network is created that is all that matters. Manually created links do not get deleted regardless of the name when the network is deleted with `docker network rm`. - -``` -# create a new sub-interface tied to dot1q vlan 40 -ip link add link eth0 name eth0.40 type vlan id 40 - -# enable the new sub-interface -ip link set eth0.40 up - -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged -docker network create -d ipvlan \ - --subnet=192.168.40.0/24 \ - --gateway=192.168.40.1 \ - -o parent=eth0.40 ipvlan40 - -# in two separate terminals, start a Docker container and the containers can now ping one another. -docker run --net=ipvlan40 -it --name ivlan_test5 --rm alpine /bin/sh -docker run --net=ipvlan40 -it --name ivlan_test6 --rm alpine /bin/sh -``` - -**Example:** Vlan sub-interface manually created with any name: - -``` -# create a new sub interface tied to dot1q vlan 40 -ip link add link eth0 name foo type vlan id 40 - -# enable the new sub-interface -ip link set foo up - -# now add networks and hosts as you would normally by attaching to the master (sub)interface that is tagged -docker network create -d ipvlan \ - --subnet=192.168.40.0/24 --gateway=192.168.40.1 \ - -o parent=foo ipvlan40 - -# in two separate terminals, start a Docker container and the containers can now ping one another. -docker run --net=ipvlan40 -it --name ivlan_test5 --rm alpine /bin/sh -docker run --net=ipvlan40 -it --name ivlan_test6 --rm alpine /bin/sh -``` - -Manually created links can be cleaned up with: - -``` -ip link del foo -``` - -As with all of the Libnetwork drivers, they can be mixed and matched, even as far as running 3rd party ecosystem drivers in parallel for maximum flexibility to the Docker user. diff --git a/hack/.vendor-helpers.sh b/hack/.vendor-helpers.sh deleted file mode 100755 index 82fe7b3b38..0000000000 --- a/hack/.vendor-helpers.sh +++ /dev/null @@ -1,164 +0,0 @@ -#!/usr/bin/env bash - -PROJECT=github.com/docker/docker - -# Downloads dependencies into vendor/ directory -mkdir -p vendor - -if ! go list github.com/docker/docker/docker &> /dev/null; then - rm -rf .gopath - mkdir -p .gopath/src/github.com/docker - ln -sf ../../../.. .gopath/src/${PROJECT} - export GOPATH="${PWD}/.gopath:${PWD}/vendor" -fi -export GOPATH="$GOPATH:${PWD}/vendor" - -find='find' -if [ "$(go env GOHOSTOS)" = 'windows' ]; then - find='/usr/bin/find' -fi - -clone() { - local vcs="$1" - local pkg="$2" - local rev="$3" - local url="$4" - - : ${url:=https://$pkg} - local target="vendor/src/$pkg" - - echo -n "$pkg @ $rev: " - - if [ -d "$target" ]; then - echo -n 'rm old, ' - rm -rf "$target" - fi - - echo -n 'clone, ' - case "$vcs" in - git) - git clone --quiet --no-checkout "$url" "$target" - ( cd "$target" && git checkout --quiet "$rev" && git reset --quiet --hard "$rev" ) - ;; - hg) - hg clone --quiet --updaterev "$rev" "$url" "$target" - ;; - esac - - echo -n 'rm VCS, ' - ( cd "$target" && rm -rf .{git,hg} ) - - echo -n 'rm vendor, ' - ( cd "$target" && rm -rf vendor Godeps/_workspace ) - - echo done -} - -# get an ENV from the Dockerfile with support for multiline values -_dockerfile_env() { - local e="$1" - awk ' - $1 == "ENV" && $2 == "'"$e"'" { - sub(/^ENV +([^ ]+) +/, ""); - inEnv = 1; - } - inEnv { - if (sub(/\\$/, "")) { - printf "%s", $0; - next; - } - print; - exit; - } - ' ${DOCKER_FILE:="Dockerfile"} -} - -clean() { - local packages=( - "${PROJECT}/cmd/dockerd" # daemon package main - "${PROJECT}/cmd/docker" # client package main - "${PROJECT}/integration-cli" # external tests - ) - local dockerPlatforms=( ${DOCKER_ENGINE_OSARCH:="linux/amd64"} $(_dockerfile_env DOCKER_CROSSPLATFORMS) ) - local dockerBuildTags="$(_dockerfile_env DOCKER_BUILDTAGS)" - local buildTagCombos=( - '' - 'experimental' - 'pkcs11' - "$dockerBuildTags" - "daemon $dockerBuildTags" - "daemon cgo $dockerBuildTags" - "experimental $dockerBuildTags" - "experimental daemon $dockerBuildTags" - "experimental daemon cgo $dockerBuildTags" - "pkcs11 $dockerBuildTags" - "pkcs11 daemon $dockerBuildTags" - "pkcs11 daemon cgo $dockerBuildTags" - ) - - echo - - echo -n 'collecting import graph, ' - local IFS=$'\n' - local imports=( $( - for platform in "${dockerPlatforms[@]}"; do - export GOOS="${platform%/*}"; - export GOARCH="${platform##*/}"; - for buildTags in "${buildTagCombos[@]}"; do - go list -e -tags "$buildTags" -f '{{join .Deps "\n"}}' "${packages[@]}" - go list -e -tags "$buildTags" -f '{{join .TestImports "\n"}}' "${packages[@]}" - done - done | grep -vE "^${PROJECT}/" | sort -u - ) ) - imports=( $(go list -e -f '{{if not .Standard}}{{.ImportPath}}{{end}}' "${imports[@]}") ) - unset IFS - - echo -n 'pruning unused packages, ' - findArgs=( - # This directory contains only .c and .h files which are necessary - -path vendor/src/github.com/mattn/go-sqlite3/code - ) - - # This package is required to build the Etcd client, - # but Etcd hard codes a local Godep full path. - # FIXME: fix_rewritten_imports fixes this problem in most platforms - # but it fails in very small corner cases where it makes the vendor - # script to remove this package. - # See: https://github.com/docker/docker/issues/19231 - findArgs+=( -or -path vendor/src/github.com/ugorji/go/codec ) - for import in "${imports[@]}"; do - [ "${#findArgs[@]}" -eq 0 ] || findArgs+=( -or ) - findArgs+=( -path "vendor/src/$import" ) - done - - # The docker proxy command is built from libnetwork - findArgs+=( -or -path vendor/src/github.com/docker/libnetwork/cmd/proxy ) - - local IFS=$'\n' - local prune=( $($find vendor -depth -type d -not '(' "${findArgs[@]}" ')') ) - unset IFS - for dir in "${prune[@]}"; do - $find "$dir" -maxdepth 1 -not -type d -not -name 'LICENSE*' -not -name 'COPYING*' -exec rm -v -f '{}' ';' - rmdir "$dir" 2>/dev/null || true - done - - echo -n 'pruning unused files, ' - $find vendor -type f -name '*_test.go' -exec rm -v '{}' ';' - $find vendor -type f -name 'Vagrantfile' -exec rm -v '{}' ';' - - # These are the files that are left over after fix_rewritten_imports is run. - echo -n 'pruning .orig files, ' - $find vendor -type f -name '*.orig' -exec rm -v '{}' ';' - - echo done -} - -# Fix up hard-coded imports that refer to Godeps paths so they'll work with our vendoring -fix_rewritten_imports () { - local pkg="$1" - local remove="${pkg}/Godeps/_workspace/src/" - local target="vendor/src/$pkg" - - echo "$pkg: fixing rewritten imports" - $find "$target" -name \*.go -exec sed -i'.orig' -e "s|\"${remove}|\"|g" {} \; -} diff --git a/hack/Jenkins/W2L/postbuild.sh b/hack/Jenkins/W2L/postbuild.sh deleted file mode 100644 index 662e2dcc37..0000000000 --- a/hack/Jenkins/W2L/postbuild.sh +++ /dev/null @@ -1,35 +0,0 @@ -set +x -set +e - -echo "" -echo "" -echo "---" -echo "Now starting POST-BUILD steps" -echo "---" -echo "" - -echo INFO: Pointing to $DOCKER_HOST - -if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then - echo INFO: Removing containers... - ! docker rm -vf $(docker ps -aq) -fi - -# Remove all images which don't have docker or debian in the name -if [ ! $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'debian' | awk '{ print $3 }' | wc -l) -eq 0 ]; then - echo INFO: Removing images... - ! docker rmi -f $(docker images | sed -n '1!p' | grep -v 'docker' | grep -v 'debian' | awk '{ print $3 }') -fi - -# Kill off any instances of git, go and docker, just in case -! taskkill -F -IM git.exe -T >& /dev/null -! taskkill -F -IM go.exe -T >& /dev/null -! taskkill -F -IM docker.exe -T >& /dev/null - -# Remove everything -! cd /c/jenkins/gopath/src/github.com/docker/docker -! rm -rfd * >& /dev/null -! rm -rfd .* >& /dev/null - -echo INFO: Cleanup complete -exit 0 \ No newline at end of file diff --git a/hack/Jenkins/W2L/setup.sh b/hack/Jenkins/W2L/setup.sh deleted file mode 100644 index 30e5884d97..0000000000 --- a/hack/Jenkins/W2L/setup.sh +++ /dev/null @@ -1,309 +0,0 @@ -# Jenkins CI script for Windows to Linux CI. -# Heavily modified by John Howard (@jhowardmsft) December 2015 to try to make it more reliable. -set +xe -SCRIPT_VER="Wed Apr 20 18:30:19 UTC 2016" - -# TODO to make (even) more resilient: -# - Wait for daemon to be running before executing docker commands -# - Check if jq is installed -# - Make sure bash is v4.3 or later. Can't do until all Azure nodes on the latest version -# - Make sure we are not running as local system. Can't do until all Azure nodes are updated. -# - Error if docker versions are not equal. Can't do until all Azure nodes are updated -# - Error if go versions are not equal. Can't do until all Azure nodes are updated. -# - Error if running 32-bit posix tools. Probably can take from bash --version and check contains "x86_64" -# - Warn if the CI directory cannot be deleted afterwards. Otherwise turdlets are left behind -# - Use %systemdrive% ($SYSTEMDRIVE) rather than hard code to c: for TEMP -# - Consider cross builing the Windows binary and copy across. That's a bit of a heavy lift. Only reason -# for doing that is that it mirrors the actual release process for docker.exe which is cross-built. -# However, should absolutely not be a problem if built natively, so nit-picking. -# - Tidy up of images and containers. Either here, or in the teardown script. - -ec=0 -uniques=1 -echo INFO: Started at `date`. Script version $SCRIPT_VER - - -# !README! -# There are two daemons running on the remote Linux host: -# - outer: specified by DOCKER_HOST, this is the daemon that will build and run the inner docker daemon -# from the sources matching the PR. -# - inner: runs on the host network, on a port number similar to that of DOCKER_HOST but the last two digits are inverted -# (2357 if DOCKER_HOST had port 2375; and 2367 if DOCKER_HOST had port 2376). -# The windows integration tests are run against this inner daemon. - -# get the ip, inner and outer ports. -ip="${DOCKER_HOST#*://}" -port_outer="${ip#*:}" -# inner port is like outer port with last two digits inverted. -port_inner=$(echo "$port_outer" | sed -E 's/(.)(.)$/\2\1/') -ip="${ip%%:*}" - -echo "INFO: IP=$ip PORT_OUTER=$port_outer PORT_INNER=$port_inner" - -# If TLS is enabled -if [ -n "$DOCKER_TLS_VERIFY" ]; then - protocol=https - if [ -z "$DOCKER_MACHINE_NAME" ]; then - ec=1 - echo "ERROR: DOCKER_MACHINE_NAME is undefined" - fi - certs=$(echo ~/.docker/machine/machines/$DOCKER_MACHINE_NAME) - curlopts="--cacert $certs/ca.pem --cert $certs/cert.pem --key $certs/key.pem" - run_extra_args="-v tlscerts:/etc/docker" - daemon_extra_args="--tlsverify --tlscacert /etc/docker/ca.pem --tlscert /etc/docker/server.pem --tlskey /etc/docker/server-key.pem" -else - protocol=http -fi - -# Save for use by make.sh and scripts it invokes -export MAIN_DOCKER_HOST="tcp://$ip:$port_inner" - -# Verify we can get the remote node to respond to _ping -if [ $ec -eq 0 ]; then - reply=`curl -s $curlopts $protocol://$ip:$port_outer/_ping` - if [ "$reply" != "OK" ]; then - ec=1 - echo "ERROR: Failed to get an 'OK' response from the docker daemon on the Linux node" - echo " at $ip:$port_outer when called with an http request for '_ping'. This implies that" - echo " either the daemon has crashed/is not running, or the Linux node is unavailable." - echo - echo " A regular ping to the remote Linux node is below. It should reply. If not, the" - echo " machine cannot be reached at all and may have crashed. If it does reply, it is" - echo " likely a case of the Linux daemon not running or having crashed, which requires" - echo " further investigation." - echo - echo " Try re-running this CI job, or ask on #docker-dev or #docker-maintainers" - echo " for someone to perform further diagnostics, or take this node out of rotation." - echo - ping $ip - else - echo "INFO: The Linux nodes outer daemon replied to a ping. Good!" - fi -fi - -# Get the version from the remote node. Note this may fail if jq is not installed. -# That's probably worth checking to make sure, just in case. -if [ $ec -eq 0 ]; then - remoteVersion=`curl -s $curlopts $protocol://$ip:$port_outer/version | jq -c '.Version'` - echo "INFO: Remote daemon is running docker version $remoteVersion" -fi - -# Compare versions. We should really fail if result is no 1. Output at end of script. -if [ $ec -eq 0 ]; then - uniques=`docker version | grep Version | /usr/bin/sort -u | wc -l` -fi - -# Make sure we are in repo -if [ $ec -eq 0 ]; then - if [ ! -d hack ]; then - echo "ERROR: Are you sure this is being launched from a the root of docker repository?" - echo " If this is a Windows CI machine, it should be c:\jenkins\gopath\src\github.com\docker\docker." - echo " Current directory is `pwd`" - ec=1 - fi -fi - -# Are we in split binary mode? -if [ `grep DOCKER_CLIENTONLY Makefile | wc -l` -gt 0 ]; then - splitBinary=0 - echo "INFO: Running in single binary mode" -else - splitBinary=1 - echo "INFO: Running in split binary mode" -fi - - -# Get the commit has and verify we have something -if [ $ec -eq 0 ]; then - export COMMITHASH=$(git rev-parse --short HEAD) - echo INFO: Commmit hash is $COMMITHASH - if [ -z $COMMITHASH ]; then - echo "ERROR: Failed to get commit hash. Are you sure this is a docker repository?" - ec=1 - fi -fi - -# Redirect to a temporary location. Check is here for local runs from Jenkins machines just in case not -# in the right directory where the repo is cloned. We also redirect TEMP to not use the environment -# TEMP as when running as a standard user (not local system), it otherwise exposes a bug in posix tar which -# will cause CI to fail from Windows to Linux. Obviously it's not best practice to ever run as local system... -if [ $ec -eq 0 ]; then - export TEMP=/c/CI/CI-$COMMITHASH - export TMP=$TEMP - /usr/bin/mkdir -p $TEMP # Make sure Linux mkdir for -p -fi - -# Tidy up time -if [ $ec -eq 0 ]; then - echo INFO: Deleting pre-existing containers and images... - - # Force remove all containers based on a previously built image with this commit - ! docker rm -f $(docker ps -aq --filter "ancestor=docker:$COMMITHASH") &>/dev/null - - # Force remove any container with this commithash as a name - ! docker rm -f $(docker ps -aq --filter "name=docker-$COMMITHASH") &>/dev/null - - # This SHOULD never happen, but just in case, also blow away any containers - # that might be around. - ! if [ ! $(docker ps -aq | wc -l) -eq 0 ]; then - echo WARN: There were some leftover containers. Cleaning them up. - ! docker rm -f $(docker ps -aq) - fi - - # Force remove the image if it exists - ! docker rmi -f "docker-$COMMITHASH" &>/dev/null -fi - -# Provide the docker version for debugging purposes. If these fail, game over. -# as the Linux box isn't responding for some reason. -if [ $ec -eq 0 ]; then - echo INFO: Docker version and info of the outer daemon on the Linux node - echo - docker version - ec=$? - if [ 0 -ne $ec ]; then - echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?" - fi - echo -fi - -# Same as above, but docker info -if [ $ec -eq 0 ]; then - echo - docker info - ec=$? - if [ 0 -ne $ec ]; then - echo "ERROR: The main linux daemon does not appear to be running. Has the Linux node crashed?" - fi - echo -fi - -# build the daemon image -if [ $ec -eq 0 ]; then - echo "INFO: Running docker build on Linux host at $DOCKER_HOST" - if [ $splitBinary -eq 0 ]; then - set -x - docker build --rm --force-rm --build-arg APT_MIRROR=cdn-fastly.deb.debian.org -t "docker:$COMMITHASH" . - cat < -# See the blog post: https://blog.docker.com/2013/09/docker-can-now-run-within-docker/ -# -# This script should be executed inside a docker container in privileged mode -# ('docker run --privileged', introduced in docker 0.6). - -# Usage: dind CMD [ARG...] - -# apparmor sucks and Docker needs to know that it's in a container (c) @tianon -export container=docker - -if [ -d /sys/kernel/security ] && ! mountpoint -q /sys/kernel/security; then - mount -t securityfs none /sys/kernel/security || { - echo >&2 'Could not mount /sys/kernel/security.' - echo >&2 'AppArmor detection and --privileged mode might break.' - } -fi - -# Mount /tmp (conditionally) -if ! mountpoint -q /tmp; then - mount -t tmpfs none /tmp -fi - -if [ $# -gt 0 ]; then - exec "$@" -fi - -echo >&2 'ERROR: No command specified.' -echo >&2 'You probably want to run hack/make.sh, or maybe a shell?' diff --git a/hack/generate-authors.sh b/hack/generate-authors.sh deleted file mode 100755 index e78a97f962..0000000000 --- a/hack/generate-authors.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -set -e - -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")/.." - -# see also ".mailmap" for how email addresses and names are deduplicated - -{ - cat <<-'EOH' - # This file lists all individuals having contributed content to the repository. - # For how it is generated, see `hack/generate-authors.sh`. - EOH - echo - git log --format='%aN <%aE>' | LC_ALL=C.UTF-8 sort -uf -} > AUTHORS diff --git a/hack/install.sh b/hack/install.sh deleted file mode 100644 index 70621a5e19..0000000000 --- a/hack/install.sh +++ /dev/null @@ -1,525 +0,0 @@ -#!/bin/sh -set -e -# -# This script is meant for quick & easy install via: -# 'curl -sSL https://get.docker.com/ | sh' -# or: -# 'wget -qO- https://get.docker.com/ | sh' -# -# For test builds (ie. release candidates): -# 'curl -fsSL https://test.docker.com/ | sh' -# or: -# 'wget -qO- https://test.docker.com/ | sh' -# -# For experimental builds: -# 'curl -fsSL https://experimental.docker.com/ | sh' -# or: -# 'wget -qO- https://experimental.docker.com/ | sh' -# -# Docker Maintainers: -# To update this script on https://get.docker.com, -# use hack/release.sh during a normal release, -# or the following one-liner for script hotfixes: -# aws s3 cp --acl public-read hack/install.sh s3://get.docker.com/index -# - -url="https://get.docker.com/" -apt_url="https://apt.dockerproject.org" -yum_url="https://yum.dockerproject.org" -gpg_fingerprint="58118E89F3A912897C070ADBF76221572C52609D" - -key_servers=" -ha.pool.sks-keyservers.net -pgp.mit.edu -keyserver.ubuntu.com -" - -command_exists() { - command -v "$@" > /dev/null 2>&1 -} - -echo_docker_as_nonroot() { - if command_exists docker && [ -e /var/run/docker.sock ]; then - ( - set -x - $sh_c 'docker version' - ) || true - fi - your_user=your-user - [ "$user" != 'root' ] && your_user="$user" - # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-EOF", spaces are kept in the output - cat <<-EOF - - If you would like to use Docker as a non-root user, you should now consider - adding your user to the "docker" group with something like: - - sudo usermod -aG docker $your_user - - Remember that you will have to log out and back in for this to take effect! - - EOF -} - -# Check if this is a forked Linux distro -check_forked() { - - # Check for lsb_release command existence, it usually exists in forked distros - if command_exists lsb_release; then - # Check if the `-u` option is supported - set +e - lsb_release -a -u > /dev/null 2>&1 - lsb_release_exit_code=$? - set -e - - # Check if the command has exited successfully, it means we're in a forked distro - if [ "$lsb_release_exit_code" = "0" ]; then - # Print info about current distro - cat <<-EOF - You're using '$lsb_dist' version '$dist_version'. - EOF - - # Get the upstream release info - lsb_dist=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'id' | cut -d ':' -f 2 | tr -d '[[:space:]]') - dist_version=$(lsb_release -a -u 2>&1 | tr '[:upper:]' '[:lower:]' | grep -E 'codename' | cut -d ':' -f 2 | tr -d '[[:space:]]') - - # Print info about upstream distro - cat <<-EOF - Upstream release is '$lsb_dist' version '$dist_version'. - EOF - else - if [ -r /etc/debian_version ] && [ "$lsb_dist" != "ubuntu" ] && [ "$lsb_dist" != "raspbian" ]; then - # We're Debian and don't even know it! - lsb_dist=debian - dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" - case "$dist_version" in - 8|'Kali Linux 2') - dist_version="jessie" - ;; - 7) - dist_version="wheezy" - ;; - esac - fi - fi - fi -} - -rpm_import_repository_key() { - local key=$1; shift - local tmpdir=$(mktemp -d) - chmod 600 "$tmpdir" - for key_server in $key_servers ; do - gpg --homedir "$tmpdir" --keyserver "$key_server" --recv-keys "$key" && break - done - gpg --homedir "$tmpdir" -k "$key" >/dev/null - gpg --homedir "$tmpdir" --export --armor "$key" > "$tmpdir"/repo.key - rpm --import "$tmpdir"/repo.key - rm -rf "$tmpdir" -} - -semverParse() { - major="${1%%.*}" - minor="${1#$major.}" - minor="${minor%%.*}" - patch="${1#$major.$minor.}" - patch="${patch%%[-.]*}" -} - -do_install() { - case "$(uname -m)" in - *64) - ;; - armv6l|armv7l) - ;; - *) - cat >&2 <<-'EOF' - Error: you are not using a 64bit platform or a Raspberry Pi (armv6l/armv7l). - Docker currently only supports 64bit platforms or a Raspberry Pi (armv6l/armv7l). - EOF - exit 1 - ;; - esac - - if command_exists docker; then - version="$(docker -v | awk -F '[ ,]+' '{ print $3 }')" - MAJOR_W=1 - MINOR_W=10 - - semverParse $version - - shouldWarn=0 - if [ $major -lt $MAJOR_W ]; then - shouldWarn=1 - fi - - if [ $major -le $MAJOR_W ] && [ $minor -lt $MINOR_W ]; then - shouldWarn=1 - fi - - cat >&2 <<-'EOF' - Warning: the "docker" command appears to already exist on this system. - - If you already have Docker installed, this script can cause trouble, which is - why we're displaying this warning and provide the opportunity to cancel the - installation. - - If you installed the current Docker package using this script and are using it - EOF - - if [ $shouldWarn -eq 1 ]; then - cat >&2 <<-'EOF' - again to update Docker, we urge you to migrate your image store before upgrading - to v1.10+. - - You can find instructions for this here: - https://github.com/docker/docker/wiki/Engine-v1.10.0-content-addressability-migration - EOF - else - cat >&2 <<-'EOF' - again to update Docker, you can safely ignore this message. - EOF - fi - - cat >&2 <<-'EOF' - - You may press Ctrl+C now to abort this script. - EOF - ( set -x; sleep 20 ) - fi - - user="$(id -un 2>/dev/null || true)" - - sh_c='sh -c' - if [ "$user" != 'root' ]; then - if command_exists sudo; then - sh_c='sudo -E sh -c' - elif command_exists su; then - sh_c='su -c' - else - cat >&2 <<-'EOF' - Error: this installer needs the ability to run commands as root. - We are unable to find either "sudo" or "su" available to make this happen. - EOF - exit 1 - fi - fi - - curl='' - if command_exists curl; then - curl='curl -sSL' - elif command_exists wget; then - curl='wget -qO-' - elif command_exists busybox && busybox --list-modules | grep -q wget; then - curl='busybox wget -qO-' - fi - - # check to see which repo they are trying to install from - if [ -z "$repo" ]; then - repo='main' - if [ "https://test.docker.com/" = "$url" ]; then - repo='testing' - elif [ "https://experimental.docker.com/" = "$url" ]; then - repo='experimental' - fi - fi - - # perform some very rudimentary platform detection - lsb_dist='' - dist_version='' - if command_exists lsb_release; then - lsb_dist="$(lsb_release -si)" - fi - if [ -z "$lsb_dist" ] && [ -r /etc/lsb-release ]; then - lsb_dist="$(. /etc/lsb-release && echo "$DISTRIB_ID")" - fi - if [ -z "$lsb_dist" ] && [ -r /etc/debian_version ]; then - lsb_dist='debian' - fi - if [ -z "$lsb_dist" ] && [ -r /etc/fedora-release ]; then - lsb_dist='fedora' - fi - if [ -z "$lsb_dist" ] && [ -r /etc/oracle-release ]; then - lsb_dist='oracleserver' - fi - if [ -z "$lsb_dist" ] && [ -r /etc/centos-release ]; then - lsb_dist='centos' - fi - if [ -z "$lsb_dist" ] && [ -r /etc/redhat-release ]; then - lsb_dist='redhat' - fi - if [ -z "$lsb_dist" ] && [ -r /etc/os-release ]; then - lsb_dist="$(. /etc/os-release && echo "$ID")" - fi - - lsb_dist="$(echo "$lsb_dist" | tr '[:upper:]' '[:lower:]')" - - # Special case redhatenterpriseserver - if [ "${lsb_dist}" = "redhatenterpriseserver" ]; then - # Set it to redhat, it will be changed to centos below anyways - lsb_dist='redhat' - fi - - case "$lsb_dist" in - - ubuntu) - if command_exists lsb_release; then - dist_version="$(lsb_release --codename | cut -f2)" - fi - if [ -z "$dist_version" ] && [ -r /etc/lsb-release ]; then - dist_version="$(. /etc/lsb-release && echo "$DISTRIB_CODENAME")" - fi - ;; - - debian|raspbian) - dist_version="$(cat /etc/debian_version | sed 's/\/.*//' | sed 's/\..*//')" - case "$dist_version" in - 8) - dist_version="jessie" - ;; - 7) - dist_version="wheezy" - ;; - esac - ;; - - oracleserver) - # need to switch lsb_dist to match yum repo URL - lsb_dist="oraclelinux" - dist_version="$(rpm -q --whatprovides redhat-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//')" - ;; - - fedora|centos|redhat) - dist_version="$(rpm -q --whatprovides ${lsb_dist}-release --queryformat "%{VERSION}\n" | sed 's/\/.*//' | sed 's/\..*//' | sed 's/Server*//' | sort | tail -1)" - ;; - - *) - if command_exists lsb_release; then - dist_version="$(lsb_release --codename | cut -f2)" - fi - if [ -z "$dist_version" ] && [ -r /etc/os-release ]; then - dist_version="$(. /etc/os-release && echo "$VERSION_ID")" - fi - ;; - - - esac - - # Check if this is a forked Linux distro - check_forked - - # Run setup for each distro accordingly - case "$lsb_dist" in - amzn) - ( - set -x - $sh_c 'sleep 3; yum -y -q install docker' - ) - echo_docker_as_nonroot - exit 0 - ;; - - 'opensuse project'|opensuse) - echo 'Going to perform the following operations:' - if [ "$repo" != 'main' ]; then - echo ' * add repository obs://Virtualization:containers' - fi - echo ' * install Docker' - $sh_c 'echo "Press CTRL-C to abort"; sleep 3' - - if [ "$repo" != 'main' ]; then - # install experimental packages from OBS://Virtualization:containers - ( - set -x - zypper -n ar -f obs://Virtualization:containers Virtualization:containers - rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2 - ) - fi - ( - set -x - zypper -n install docker - ) - echo_docker_as_nonroot - exit 0 - ;; - 'suse linux'|sle[sd]) - echo 'Going to perform the following operations:' - if [ "$repo" != 'main' ]; then - echo ' * add repository obs://Virtualization:containers' - echo ' * install experimental Docker using packages NOT supported by SUSE' - else - echo ' * add the "Containers" module' - echo ' * install Docker using packages supported by SUSE' - fi - $sh_c 'echo "Press CTRL-C to abort"; sleep 3' - - if [ "$repo" != 'main' ]; then - # install experimental packages from OBS://Virtualization:containers - echo >&2 'Warning: installing experimental packages from OBS, these packages are NOT supported by SUSE' - ( - set -x - zypper -n ar -f obs://Virtualization:containers/SLE_12 Virtualization:containers - rpm_import_repository_key 55A0B34D49501BB7CA474F5AA193FBB572174FC2 - ) - else - # Add the containers module - # Note well-1: the SLE machine must already be registered against SUSE Customer Center - # Note well-2: the `-r ""` is required to workaround a known issue of SUSEConnect - ( - set -x - SUSEConnect -p sle-module-containers/12/x86_64 -r "" - ) - fi - ( - set -x - zypper -n install docker - ) - echo_docker_as_nonroot - exit 0 - ;; - - ubuntu|debian|raspbian) - export DEBIAN_FRONTEND=noninteractive - - did_apt_get_update= - apt_get_update() { - if [ -z "$did_apt_get_update" ]; then - ( set -x; $sh_c 'sleep 3; apt-get update' ) - did_apt_get_update=1 - fi - } - - if [ "$lsb_dist" = "raspbian" ]; then - # Create Raspbian specific systemd drop-in file, use overlay by default - ( set -x; $sh_c "mkdir -p /etc/systemd/system/docker.service.d" ) - ( set -x; $sh_c "echo '[Service]\nExecStart=\nExecStart=/usr/bin/dockerd --storage-driver overlay -H fd://' > /etc/systemd/system/docker.service.d/overlay.conf" ) - else - # aufs is preferred over devicemapper; try to ensure the driver is available. - if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then - if uname -r | grep -q -- '-generic' && dpkg -l 'linux-image-*-generic' | grep -qE '^ii|^hi' 2>/dev/null; then - kern_extras="linux-image-extra-$(uname -r) linux-image-extra-virtual" - - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q '"$kern_extras" ) || true - - if ! grep -q aufs /proc/filesystems && ! $sh_c 'modprobe aufs'; then - echo >&2 'Warning: tried to install '"$kern_extras"' (for AUFS)' - echo >&2 ' but we still have no AUFS. Docker may not work. Proceeding anyways!' - ( set -x; sleep 10 ) - fi - else - echo >&2 'Warning: current kernel is not supported by the linux-image-extra-virtual' - echo >&2 ' package. We have no AUFS support. Consider installing the packages' - echo >&2 ' linux-image-virtual kernel and linux-image-extra-virtual for AUFS support.' - ( set -x; sleep 10 ) - fi - fi - fi - - # install apparmor utils if they're missing and apparmor is enabled in the kernel - # otherwise Docker will fail to start - if [ "$(cat /sys/module/apparmor/parameters/enabled 2>/dev/null)" = 'Y' ]; then - if command -v apparmor_parser >/dev/null 2>&1; then - echo 'apparmor is enabled in the kernel and apparmor utils were already installed' - else - echo 'apparmor is enabled in the kernel, but apparmor_parser is missing. Trying to install it..' - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q apparmor' ) - fi - fi - - if [ ! -e /usr/lib/apt/methods/https ]; then - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q apt-transport-https ca-certificates' ) - fi - if [ -z "$curl" ]; then - apt_get_update - ( set -x; $sh_c 'sleep 3; apt-get install -y -q curl ca-certificates' ) - curl='curl -sSL' - fi - ( - set -x - for key_server in $key_servers ; do - $sh_c "apt-key adv --keyserver hkp://${key_server}:80 --recv-keys ${gpg_fingerprint}" && break - done - $sh_c "apt-key adv -k ${gpg_fingerprint} >/dev/null" - $sh_c "mkdir -p /etc/apt/sources.list.d" - $sh_c "echo deb \[arch=$(dpkg --print-architecture)\] ${apt_url}/repo ${lsb_dist}-${dist_version} ${repo} > /etc/apt/sources.list.d/docker.list" - $sh_c 'sleep 3; apt-get update; apt-get install -y -q docker-engine' - ) - echo_docker_as_nonroot - exit 0 - ;; - - fedora|centos|redhat|oraclelinux) - if [ "${lsb_dist}" = "redhat" ]; then - # we use the centos repository for both redhat and centos releases - lsb_dist='centos' - fi - $sh_c "cat >/etc/yum.repos.d/docker-${repo}.repo" <<-EOF - [docker-${repo}-repo] - name=Docker ${repo} Repository - baseurl=${yum_url}/repo/${repo}/${lsb_dist}/${dist_version} - enabled=1 - gpgcheck=1 - gpgkey=${yum_url}/gpg - EOF - if [ "$lsb_dist" = "fedora" ] && [ "$dist_version" -ge "22" ]; then - ( - set -x - $sh_c 'sleep 3; dnf -y -q install docker-engine' - ) - else - ( - set -x - $sh_c 'sleep 3; yum -y -q install docker-engine' - ) - fi - echo_docker_as_nonroot - exit 0 - ;; - gentoo) - if [ "$url" = "https://test.docker.com/" ]; then - # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output - cat >&2 <<-'EOF' - - You appear to be trying to install the latest nightly build in Gentoo.' - The portage tree should contain the latest stable release of Docker, but' - if you want something more recent, you can always use the live ebuild' - provided in the "docker" overlay available via layman. For more' - instructions, please see the following URL:' - - https://github.com/tianon/docker-overlay#using-this-overlay' - - After adding the "docker" overlay, you should be able to:' - - emerge -av =app-emulation/docker-9999' - - EOF - exit 1 - fi - - ( - set -x - $sh_c 'sleep 3; emerge app-emulation/docker' - ) - exit 0 - ;; - esac - - # intentionally mixed spaces and tabs here -- tabs are stripped by "<<-'EOF'", spaces are kept in the output - cat >&2 <<-'EOF' - - Either your platform is not easily detectable, is not supported by this - installer script (yet - PRs welcome! [hack/install.sh]), or does not yet have - a package for Docker. Please visit the following URL for more detailed - installation instructions: - - https://docs.docker.com/engine/installation/ - - EOF - exit 1 -} - -# wrapped up in a function so that we have some protection against only getting -# half the file during "curl | sh" -do_install diff --git a/hack/make.sh b/hack/make.sh deleted file mode 100755 index 02db1d1a85..0000000000 --- a/hack/make.sh +++ /dev/null @@ -1,386 +0,0 @@ -#!/usr/bin/env bash -set -e - -# This script builds various binary artifacts from a checkout of the docker -# source code. -# -# Requirements: -# - The current directory should be a checkout of the docker source code -# (https://github.com/docker/docker). Whatever version is checked out -# will be built. -# - The VERSION file, at the root of the repository, should exist, and -# will be used as Docker binary version and package version. -# - The hash of the git commit will also be included in the Docker binary, -# with the suffix -unsupported if the repository isn't clean. -# - The script is intended to be run inside the docker container specified -# in the Dockerfile at the root of the source. In other words: -# DO NOT CALL THIS SCRIPT DIRECTLY. -# - The right way to call this script is to invoke "make" from -# your checkout of the Docker repository. -# the Makefile will do a "docker build -t docker ." and then -# "docker run hack/make.sh" in the resulting image. -# - -set -o pipefail - -export DOCKER_PKG='github.com/docker/docker' -export SCRIPTDIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -export MAKEDIR="$SCRIPTDIR/make" -export PKG_CONFIG=${PKG_CONFIG:-pkg-config} - -: ${TEST_REPEAT:=0} - -# We're a nice, sexy, little shell script, and people might try to run us; -# but really, they shouldn't. We want to be in a container! -inContainer="AssumeSoInitially" -if [ "$(go env GOHOSTOS)" = 'windows' ]; then - if [ -z "$FROM_DOCKERFILE" ]; then - unset inContainer - fi -else - if [ "$PWD" != "/go/src/$DOCKER_PKG" ] || [ -z "$DOCKER_CROSSPLATFORMS" ]; then - unset inContainer - fi -fi - -if [ -z "$inContainer" ]; then - { - echo "# WARNING! I don't seem to be running in a Docker container." - echo "# The result of this command might be an incorrect build, and will not be" - echo "# officially supported." - echo "#" - echo "# Try this instead: make all" - echo "#" - } >&2 -fi - -echo - -# List of bundles to create when no argument is passed -DEFAULT_BUNDLES=( - validate-dco - validate-default-seccomp - validate-gofmt - validate-lint - validate-pkg - validate-test - validate-toml - validate-vet - - binary-client - binary-daemon - dynbinary - - test-unit - test-integration-cli - test-docker-py - - cover - cross - tgz -) - -VERSION=$(< ./VERSION) -if command -v git &> /dev/null && [ -d .git ] && git rev-parse &> /dev/null; then - GITCOMMIT=$(git rev-parse --short HEAD) - if [ -n "$(git status --porcelain --untracked-files=no)" ]; then - GITCOMMIT="$GITCOMMIT-unsupported" - echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" - echo "# GITCOMMIT = $GITCOMMIT" - echo "# The version you are building is listed as unsupported because" - echo "# there are some files in the git repository that are in an uncommited state." - echo "# Commit these changes, or add to .gitignore to remove the -unsupported from the version." - echo "# Here is the current list:" - git status --porcelain --untracked-files=no - echo "#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~" - fi - ! BUILDTIME=$(date --rfc-3339 ns 2> /dev/null | sed -e 's/ /T/') &> /dev/null - if [ -z $BUILDTIME ]; then - # If using bash 3.1 which doesn't support --rfc-3389, eg Windows CI - BUILDTIME=$(date -u) - fi -elif [ "$DOCKER_GITCOMMIT" ]; then - GITCOMMIT="$DOCKER_GITCOMMIT" -else - echo >&2 'error: .git directory missing and DOCKER_GITCOMMIT not specified' - echo >&2 ' Please either build with the .git directory accessible, or specify the' - echo >&2 ' exact (--short) commit hash you are building using DOCKER_GITCOMMIT for' - echo >&2 ' future accountability in diagnosing build issues. Thanks!' - exit 1 -fi - -if [ "$AUTO_GOPATH" ]; then - rm -rf .gopath - mkdir -p .gopath/src/"$(dirname "${DOCKER_PKG}")" - ln -sf ../../../.. .gopath/src/"${DOCKER_PKG}" - export GOPATH="${PWD}/.gopath:${PWD}/vendor" - - if [ "$(go env GOOS)" = 'solaris' ]; then - # sys/unix is installed outside the standard library on solaris - # TODO need to allow for version change, need to get version from go - export GO_VERSION=${GO_VERSION:-"1.6.3"} - export GOPATH="${GOPATH}:/usr/lib/gocode/${GO_VERSION}" - fi -fi - -if [ ! "$GOPATH" ]; then - echo >&2 'error: missing GOPATH; please see https://golang.org/doc/code.html#GOPATH' - echo >&2 ' alternatively, set AUTO_GOPATH=1' - exit 1 -fi - -if [ "$DOCKER_EXPERIMENTAL" ]; then - echo >&2 '# WARNING! DOCKER_EXPERIMENTAL is set: building experimental features' - echo >&2 - DOCKER_BUILDTAGS+=" experimental" -fi - -DOCKER_BUILDTAGS+=" daemon" -if ${PKG_CONFIG} 'libsystemd >= 209' 2> /dev/null ; then - DOCKER_BUILDTAGS+=" journald" -elif ${PKG_CONFIG} 'libsystemd-journal' 2> /dev/null ; then - DOCKER_BUILDTAGS+=" journald journald_compat" -fi - -# test whether "btrfs/version.h" exists and apply btrfs_noversion appropriately -if \ - command -v gcc &> /dev/null \ - && ! gcc -E - -o /dev/null &> /dev/null <<<'#include ' \ -; then - DOCKER_BUILDTAGS+=' btrfs_noversion' -fi - -# test whether "libdevmapper.h" is new enough to support deferred remove -# functionality. -if \ - command -v gcc &> /dev/null \ - && ! ( echo -e '#include \nint main() { dm_task_deferred_remove(NULL); }'| gcc -xc - -o /dev/null -ldevmapper &> /dev/null ) \ -; then - DOCKER_BUILDTAGS+=' libdm_no_deferred_remove' -fi - -# Use these flags when compiling the tests and final binary - -IAMSTATIC='true' -source "$SCRIPTDIR/make/.go-autogen" -if [ -z "$DOCKER_DEBUG" ]; then - LDFLAGS='-w' -fi - -LDFLAGS_STATIC='' -EXTLDFLAGS_STATIC='-static' -# ORIG_BUILDFLAGS is necessary for the cross target which cannot always build -# with options like -race. -ORIG_BUILDFLAGS=( -tags "autogen netgo static_build sqlite_omit_load_extension $DOCKER_BUILDTAGS" -installsuffix netgo ) -# see https://github.com/golang/go/issues/9369#issuecomment-69864440 for why -installsuffix is necessary here - -# When $DOCKER_INCREMENTAL_BINARY is set in the environment, enable incremental -# builds by installing dependent packages to the GOPATH. -REBUILD_FLAG="-a" -if [ "$DOCKER_INCREMENTAL_BINARY" ]; then - REBUILD_FLAG="-i" -fi -ORIG_BUILDFLAGS+=( $REBUILD_FLAG ) - -BUILDFLAGS=( $BUILDFLAGS "${ORIG_BUILDFLAGS[@]}" ) -# Test timeout. - -if [ "${DOCKER_ENGINE_GOARCH}" == "arm" ]; then - : ${TIMEOUT:=10m} -elif [ "${DOCKER_ENGINE_GOARCH}" == "windows" ]; then - : ${TIMEOUT:=8m} -else - : ${TIMEOUT:=5m} -fi - -LDFLAGS_STATIC_DOCKER=" - $LDFLAGS_STATIC - -extldflags \"$EXTLDFLAGS_STATIC\" -" - -if [ "$(uname -s)" = 'FreeBSD' ]; then - # Tell cgo the compiler is Clang, not GCC - # https://code.google.com/p/go/source/browse/src/cmd/cgo/gcc.go?spec=svne77e74371f2340ee08622ce602e9f7b15f29d8d3&r=e6794866ebeba2bf8818b9261b54e2eef1c9e588#752 - export CC=clang - - # "-extld clang" is a workaround for - # https://code.google.com/p/go/issues/detail?id=6845 - LDFLAGS="$LDFLAGS -extld clang" -fi - -# If sqlite3.h doesn't exist under /usr/include, -# check /usr/local/include also just in case -# (e.g. FreeBSD Ports installs it under the directory) -if [ ! -e /usr/include/sqlite3.h ] && [ -e /usr/local/include/sqlite3.h ]; then - export CGO_CFLAGS='-I/usr/local/include' - export CGO_LDFLAGS='-L/usr/local/lib' -fi - -HAVE_GO_TEST_COVER= -if \ - go help testflag | grep -- -cover > /dev/null \ - && go tool -n cover > /dev/null 2>&1 \ -; then - HAVE_GO_TEST_COVER=1 -fi - -# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. -# You can use this to select certain tests to run, eg. -# -# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit -# -# For integration-cli test, we use [gocheck](https://labix.org/gocheck), if you want -# to run certain tests on your local host, you should run with command: -# -# TESTFLAGS='-check.f DockerSuite.TestBuild*' ./hack/make.sh binary test-integration-cli -# -go_test_dir() { - dir=$1 - coverpkg=$2 - testcover=() - testcoverprofile=() - testbinary="$DEST/test.main" - if [ "$HAVE_GO_TEST_COVER" ]; then - # if our current go install has -cover, we want to use it :) - mkdir -p "$DEST/coverprofiles" - coverprofile="docker${dir#.}" - coverprofile="$ABS_DEST/coverprofiles/${coverprofile//\//-}" - testcover=( -test.cover ) - testcoverprofile=( -test.coverprofile "$coverprofile" $coverpkg ) - fi - ( - echo '+ go test' $TESTFLAGS "${DOCKER_PKG}${dir#.}" - cd "$dir" - export DEST="$ABS_DEST" # we're in a subshell, so this is safe -- our integration-cli tests need DEST, and "cd" screws it up - go test -c -o "$testbinary" ${testcover[@]} -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" - i=0 - while ((++i)); do - test_env "$testbinary" ${testcoverprofile[@]} $TESTFLAGS - if [ $i -gt "$TEST_REPEAT" ]; then - break - fi - echo "Repeating test ($i)" - done - ) -} -test_env() { - # use "env -i" to tightly control the environment variables that bleed into the tests - env -i \ - DEST="$DEST" \ - DOCKER_TLS_VERIFY="$DOCKER_TEST_TLS_VERIFY" \ - DOCKER_CERT_PATH="$DOCKER_TEST_CERT_PATH" \ - DOCKER_ENGINE_GOARCH="$DOCKER_ENGINE_GOARCH" \ - DOCKER_GRAPHDRIVER="$DOCKER_GRAPHDRIVER" \ - DOCKER_USERLANDPROXY="$DOCKER_USERLANDPROXY" \ - DOCKER_HOST="$DOCKER_HOST" \ - DOCKER_REMAP_ROOT="$DOCKER_REMAP_ROOT" \ - DOCKER_REMOTE_DAEMON="$DOCKER_REMOTE_DAEMON" \ - GOPATH="$GOPATH" \ - GOTRACEBACK=all \ - HOME="$ABS_DEST/fake-HOME" \ - PATH="$PATH" \ - TEMP="$TEMP" \ - "$@" -} - -# a helper to provide ".exe" when it's appropriate -binary_extension() { - if [ "$(go env GOOS)" = 'windows' ]; then - echo -n '.exe' - fi -} - -hash_files() { - while [ $# -gt 0 ]; do - f="$1" - shift - dir="$(dirname "$f")" - base="$(basename "$f")" - for hashAlgo in md5 sha256; do - if command -v "${hashAlgo}sum" &> /dev/null; then - ( - # subshell and cd so that we get output files like: - # $HASH docker-$VERSION - # instead of: - # $HASH /go/src/github.com/.../$VERSION/binary/docker-$VERSION - cd "$dir" - "${hashAlgo}sum" "$base" > "$base.$hashAlgo" - ) - fi - done - done -} - -bundle() { - local bundle="$1"; shift - echo "---> Making bundle: $(basename "$bundle") (in $DEST)" - source "$SCRIPTDIR/make/$bundle" "$@" -} - -copy_containerd() { - dir="$1" - # Add nested executables to bundle dir so we have complete set of - # them available, but only if the native OS/ARCH is the same as the - # OS/ARCH of the build target - if [ "$(go env GOOS)/$(go env GOARCH)" == "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then - if [ -x /usr/local/bin/docker-runc ]; then - echo "Copying nested executables into $dir" - for file in containerd containerd-shim containerd-ctr runc; do - cp `which "docker-$file"` "$dir/" - if [ "$2" == "hash" ]; then - hash_files "$dir/docker-$file" - fi - done - fi - fi -} - -install_binary() { - file="$1" - target="${DOCKER_MAKE_INSTALL_PREFIX:=/usr/local}/bin/" - if [ "$(go env GOOS)" == "linux" ]; then - echo "Installing $(basename $file) to ${target}" - cp -L "$file" "$target" - else - echo "Install is only supported on linux" - return 1 - fi -} - - -main() { - # We want this to fail if the bundles already exist and cannot be removed. - # This is to avoid mixing bundles from different versions of the code. - mkdir -p bundles - if [ -e "bundles/$VERSION" ] && [ -z "$KEEPBUNDLE" ]; then - echo "bundles/$VERSION already exists. Removing." - rm -fr "bundles/$VERSION" && mkdir "bundles/$VERSION" || exit 1 - echo - fi - - if [ "$(go env GOHOSTOS)" != 'windows' ]; then - # Windows and symlinks don't get along well - - rm -f bundles/latest - ln -s "$VERSION" bundles/latest - fi - - if [ $# -lt 1 ]; then - bundles=(${DEFAULT_BUNDLES[@]}) - else - bundles=($@) - fi - for bundle in ${bundles[@]}; do - export DEST="bundles/$VERSION/$(basename "$bundle")" - # Cygdrive paths don't play well with go build -o. - if [[ "$(uname -s)" == CYGWIN* ]]; then - export DEST="$(cygpath -mw "$DEST")" - fi - mkdir -p "$DEST" - ABS_DEST="$(cd "$DEST" && pwd -P)" - bundle "$bundle" - echo - done -} - -main "$@" diff --git a/hack/make/.binary b/hack/make/.binary deleted file mode 100644 index d2b99b8498..0000000000 --- a/hack/make/.binary +++ /dev/null @@ -1,64 +0,0 @@ -#!/bin/bash -set -e - -BINARY_NAME="$BINARY_SHORT_NAME-$VERSION" -BINARY_EXTENSION="$(binary_extension)" -BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" - -source "${MAKEDIR}/.go-autogen" - -( -export GOGC=${DOCKER_BUILD_GOGC:-1000} - -if [ "$(go env GOOS)/$(go env GOARCH)" != "$(go env GOHOSTOS)/$(go env GOHOSTARCH)" ]; then - # must be cross-compiling! - case "$(go env GOOS)/$(go env GOARCH)" in - windows/amd64) - export CC=x86_64-w64-mingw32-gcc - export CGO_ENABLED=1 - ;; - esac -fi - -if [ "$(go env GOOS)" == "linux" ] ; then - case "$(go env GOARCH)" in - arm*|386) - # linking for Linux on arm or x86 needs external linking to avoid - # https://github.com/golang/go/issues/9510 until we move to Go 1.6 - if [ "$IAMSTATIC" == "true" ] ; then - export EXTLDFLAGS_STATIC="$EXTLDFLAGS_STATIC -zmuldefs" - export LDFLAGS_STATIC_DOCKER="$LDFLAGS_STATIC -extldflags \"$EXTLDFLAGS_STATIC\"" - - else - export LDFLAGS="$LDFLAGS -extldflags -zmuldefs" - fi - ;; - esac -fi - -if [ "$IAMSTATIC" == "true" ] && [ "$(go env GOHOSTOS)" == "linux" ]; then - if [ "${GOOS}/${GOARCH}" == "darwin/amd64" ]; then - export CGO_ENABLED=1 - export CC=o64-clang - export LDFLAGS='-linkmode external -s' - export LDFLAGS_STATIC_DOCKER='-extld='${CC} - else - export BUILDFLAGS=( "${BUILDFLAGS[@]/pkcs11 /}" ) # we cannot dlopen in pkcs11 in a static binary - fi -fi - -echo "Building: $DEST/$BINARY_FULLNAME" -go build \ - -o "$DEST/$BINARY_FULLNAME" \ - "${BUILDFLAGS[@]}" \ - -ldflags " - $LDFLAGS - $LDFLAGS_STATIC_DOCKER - " \ - $SOURCE_PATH -) - -echo "Created binary: $DEST/$BINARY_FULLNAME" -ln -sf "$BINARY_FULLNAME" "$DEST/$BINARY_SHORT_NAME$BINARY_EXTENSION" - -hash_files "$DEST/$BINARY_FULLNAME" diff --git a/hack/make/.binary-setup b/hack/make/.binary-setup deleted file mode 100644 index e388c64485..0000000000 --- a/hack/make/.binary-setup +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -DOCKER_CLIENT_BINARY_NAME='docker' -DOCKER_DAEMON_BINARY_NAME='dockerd' -DOCKER_PROXY_BINARY_NAME='docker-proxy' diff --git a/hack/make/.build-deb/compat b/hack/make/.build-deb/compat deleted file mode 100644 index ec635144f6..0000000000 --- a/hack/make/.build-deb/compat +++ /dev/null @@ -1 +0,0 @@ -9 diff --git a/hack/make/.build-deb/control b/hack/make/.build-deb/control deleted file mode 100644 index 0f5439947c..0000000000 --- a/hack/make/.build-deb/control +++ /dev/null @@ -1,29 +0,0 @@ -Source: docker-engine -Section: admin -Priority: optional -Maintainer: Docker -Standards-Version: 3.9.6 -Homepage: https://dockerproject.org -Vcs-Browser: https://github.com/docker/docker -Vcs-Git: git://github.com/docker/docker.git - -Package: docker-engine -Architecture: linux-any -Depends: iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends} -Recommends: aufs-tools, - ca-certificates, - cgroupfs-mount | cgroup-lite, - git, - xz-utils, - ${apparmor:Recommends} -Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package, docker-engine-cs -Description: Docker: the open-source application container engine - Docker is an open source project to build, ship and run any application as a - lightweight container - . - Docker containers are both hardware-agnostic and platform-agnostic. This means - they can run anywhere, from your laptop to the largest EC2 compute instance and - everything in between - and they don't require you to use a particular - language, framework or packaging system. That makes them great building blocks - for deploying and scaling web apps, databases, and backend services without - depending on a particular stack or provider. diff --git a/hack/make/.build-deb/docker-engine.bash-completion b/hack/make/.build-deb/docker-engine.bash-completion deleted file mode 100644 index 6ea1119308..0000000000 --- a/hack/make/.build-deb/docker-engine.bash-completion +++ /dev/null @@ -1 +0,0 @@ -contrib/completion/bash/docker diff --git a/hack/make/.build-deb/docker-engine.docker.default b/hack/make/.build-deb/docker-engine.docker.default deleted file mode 120000 index 4278533d65..0000000000 --- a/hack/make/.build-deb/docker-engine.docker.default +++ /dev/null @@ -1 +0,0 @@ -../../../contrib/init/sysvinit-debian/docker.default \ No newline at end of file diff --git a/hack/make/.build-deb/docker-engine.docker.init b/hack/make/.build-deb/docker-engine.docker.init deleted file mode 120000 index 8cb89d30dd..0000000000 --- a/hack/make/.build-deb/docker-engine.docker.init +++ /dev/null @@ -1 +0,0 @@ -../../../contrib/init/sysvinit-debian/docker \ No newline at end of file diff --git a/hack/make/.build-deb/docker-engine.docker.upstart b/hack/make/.build-deb/docker-engine.docker.upstart deleted file mode 120000 index 7e1b64a3e6..0000000000 --- a/hack/make/.build-deb/docker-engine.docker.upstart +++ /dev/null @@ -1 +0,0 @@ -../../../contrib/init/upstart/docker.conf \ No newline at end of file diff --git a/hack/make/.build-deb/docker-engine.install b/hack/make/.build-deb/docker-engine.install deleted file mode 100644 index dc6b25f04f..0000000000 --- a/hack/make/.build-deb/docker-engine.install +++ /dev/null @@ -1,12 +0,0 @@ -#contrib/syntax/vim/doc/* /usr/share/vim/vimfiles/doc/ -#contrib/syntax/vim/ftdetect/* /usr/share/vim/vimfiles/ftdetect/ -#contrib/syntax/vim/syntax/* /usr/share/vim/vimfiles/syntax/ -contrib/*-integration usr/share/docker-engine/contrib/ -contrib/check-config.sh usr/share/docker-engine/contrib/ -contrib/completion/fish/docker.fish usr/share/fish/vendor_completions.d/ -contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/ -contrib/init/systemd/docker.service lib/systemd/system/ -contrib/init/systemd/docker.socket lib/systemd/system/ -contrib/mk* usr/share/docker-engine/contrib/ -contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/ -contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/ diff --git a/hack/make/.build-deb/docker-engine.manpages b/hack/make/.build-deb/docker-engine.manpages deleted file mode 100644 index 1aa62186a6..0000000000 --- a/hack/make/.build-deb/docker-engine.manpages +++ /dev/null @@ -1 +0,0 @@ -man/man*/* diff --git a/hack/make/.build-deb/docker-engine.postinst b/hack/make/.build-deb/docker-engine.postinst deleted file mode 100644 index eeef6ca801..0000000000 --- a/hack/make/.build-deb/docker-engine.postinst +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/sh -set -e - -case "$1" in - configure) - if [ -z "$2" ]; then - if ! getent group docker > /dev/null; then - groupadd --system docker - fi - fi - ;; - abort-*) - # How'd we get here?? - exit 1 - ;; - *) - ;; -esac - -#DEBHELPER# diff --git a/hack/make/.build-deb/docker-engine.udev b/hack/make/.build-deb/docker-engine.udev deleted file mode 120000 index 914a361959..0000000000 --- a/hack/make/.build-deb/docker-engine.udev +++ /dev/null @@ -1 +0,0 @@ -../../../contrib/udev/80-docker.rules \ No newline at end of file diff --git a/hack/make/.build-deb/docs b/hack/make/.build-deb/docs deleted file mode 100644 index b43bf86b50..0000000000 --- a/hack/make/.build-deb/docs +++ /dev/null @@ -1 +0,0 @@ -README.md diff --git a/hack/make/.build-deb/rules b/hack/make/.build-deb/rules deleted file mode 100755 index bd097c7619..0000000000 --- a/hack/make/.build-deb/rules +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/make -f - -VERSION = $(shell cat VERSION) -SYSTEMD_VERSION := $(shell dpkg-query -W -f='$${Version}\n' systemd | cut -d- -f1) -SYSTEMD_GT_227 := $(shell [ '$(SYSTEMD_VERSION)' ] && [ '$(SYSTEMD_VERSION)' -gt 227 ] && echo true ) - -override_dh_gencontrol: - # if we're on Ubuntu, we need to Recommends: apparmor - echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars - dh_gencontrol - -override_dh_auto_build: - ./hack/make.sh dynbinary - # ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here - -override_dh_auto_test: - ./bundles/$(VERSION)/dynbinary-daemon/dockerd -v - ./bundles/$(VERSION)/dynbinary-client/docker -v - -override_dh_strip: - # Go has lots of problems with stripping, so just don't - -override_dh_auto_install: - mkdir -p debian/docker-engine/usr/bin - cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-client/docker)" debian/docker-engine/usr/bin/docker - cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-daemon/dockerd)" debian/docker-engine/usr/bin/dockerd - cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-daemon/docker-proxy)" debian/docker-engine/usr/bin/docker-proxy - cp -aT /usr/local/bin/containerd debian/docker-engine/usr/bin/docker-containerd - cp -aT /usr/local/bin/containerd-shim debian/docker-engine/usr/bin/docker-containerd-shim - cp -aT /usr/local/bin/ctr debian/docker-engine/usr/bin/docker-containerd-ctr - cp -aT /usr/local/sbin/runc debian/docker-engine/usr/bin/docker-runc - mkdir -p debian/docker-engine/usr/lib/docker - -override_dh_installinit: - # use "docker" as our service name, not "docker-engine" - dh_installinit --name=docker -ifeq (true, $(SYSTEMD_GT_227)) - $(warning "Setting TasksMax=infinity") - sed -i -- 's/#TasksMax=infinity/TasksMax=infinity/' debian/docker-engine/lib/systemd/system/docker.service -endif - -override_dh_installudev: - # match our existing priority - dh_installudev --priority=z80 - -override_dh_install: - dh_install - dh_apparmor --profile-name=docker-engine -pdocker-engine - -override_dh_shlibdeps: - dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info - -%: - dh $@ --with=bash-completion $(shell command -v dh_systemd_enable > /dev/null 2>&1 && echo --with=systemd) diff --git a/hack/make/.build-rpm/docker-engine-selinux.spec b/hack/make/.build-rpm/docker-engine-selinux.spec deleted file mode 100644 index 706af36ac6..0000000000 --- a/hack/make/.build-rpm/docker-engine-selinux.spec +++ /dev/null @@ -1,109 +0,0 @@ -# Some bits borrowed from the openstack-selinux package -Name: docker-engine-selinux -Version: %{_version} -Release: %{_release}%{?dist} -Summary: SELinux Policies for the open-source application container engine -BuildArch: noarch -Group: Tools/Docker - -License: GPLv2 -Source: %{name}.tar.gz - -URL: https://dockerproject.org -Vendor: Docker -Packager: Docker - -# Version of SELinux we were using -%if 0%{?fedora} == 20 -%global selinux_policyver 3.12.1-197 -%endif # fedora 20 -%if 0%{?fedora} == 21 -%global selinux_policyver 3.13.1-105 -%endif # fedora 21 -%if 0%{?fedora} >= 22 -%global selinux_policyver 3.13.1-128 -%endif # fedora 22 -%if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 -%global selinux_policyver 3.13.1-23 -%endif # centos,rhel,oraclelinux 7 - -%global selinuxtype targeted -%global moduletype services -%global modulenames docker - -Requires(post): selinux-policy-base >= %{selinux_policyver}, selinux-policy-targeted >= %{selinux_policyver}, policycoreutils, policycoreutils-python libselinux-utils -BuildRequires: selinux-policy selinux-policy-devel - -# conflicting packages -Conflicts: docker-selinux - -# Usage: _format var format -# Expand 'modulenames' into various formats as needed -# Format must contain '$x' somewhere to do anything useful -%global _format() export %1=""; for x in %{modulenames}; do %1+=%2; %1+=" "; done; - -# Relabel files -%global relabel_files() \ - /sbin/restorecon -R %{_bindir}/docker %{_localstatedir}/run/docker.sock %{_localstatedir}/run/docker.pid %{_sysconfdir}/docker %{_localstatedir}/log/docker %{_localstatedir}/log/lxc %{_localstatedir}/lock/lxc %{_usr}/lib/systemd/system/docker.service /root/.docker &> /dev/null || : \ - -%description -SELinux policy modules for use with Docker - -%prep -%if 0%{?centos} <= 6 -%setup -n %{name} -%else -%autosetup -n %{name} -%endif - -%build -make SHARE="%{_datadir}" TARGETS="%{modulenames}" - -%install - -# Install SELinux interfaces -%_format INTERFACES $x.if -install -d %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype} -install -p -m 644 $INTERFACES %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype} - -# Install policy modules -%_format MODULES $x.pp.bz2 -install -d %{buildroot}%{_datadir}/selinux/packages -install -m 0644 $MODULES %{buildroot}%{_datadir}/selinux/packages - -%post -# -# Install all modules in a single transaction -# -if [ $1 -eq 1 ]; then - %{_sbindir}/setsebool -P -N virt_use_nfs=1 virt_sandbox_use_all_caps=1 -fi -%_format MODULES %{_datadir}/selinux/packages/$x.pp.bz2 -%{_sbindir}/semodule -n -s %{selinuxtype} -i $MODULES -if %{_sbindir}/selinuxenabled ; then - %{_sbindir}/load_policy - %relabel_files - if [ $1 -eq 1 ]; then - restorecon -R %{_sharedstatedir}/docker - fi -fi - -%postun -if [ $1 -eq 0 ]; then - %{_sbindir}/semodule -n -r %{modulenames} &> /dev/null || : - if %{_sbindir}/selinuxenabled ; then - %{_sbindir}/load_policy - %relabel_files - fi -fi - -%files -%doc LICENSE -%defattr(-,root,root,0755) -%attr(0644,root,root) %{_datadir}/selinux/packages/*.pp.bz2 -%attr(0644,root,root) %{_datadir}/selinux/devel/include/%{moduletype}/*.if - -%changelog -* Tue Dec 1 2015 Jessica Frazelle 1.9.1-1 -- add licence to rpm -- add selinux-policy and docker-engine-selinux rpm diff --git a/hack/make/.build-rpm/docker-engine.spec b/hack/make/.build-rpm/docker-engine.spec deleted file mode 100644 index 88836f4ca9..0000000000 --- a/hack/make/.build-rpm/docker-engine.spec +++ /dev/null @@ -1,242 +0,0 @@ -Name: docker-engine -Version: %{_version} -Release: %{_release}%{?dist} -Summary: The open-source application container engine -Group: Tools/Docker - -License: ASL 2.0 -Source: %{name}.tar.gz - -URL: https://dockerproject.org -Vendor: Docker -Packager: Docker - -# is_systemd conditional -%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1210 -%global is_systemd 1 -%endif - -# required packages for build -# most are already in the container (see contrib/builder/rpm/ARCH/generate.sh) -# only require systemd on those systems -%if 0%{?is_systemd} -%if 0%{?suse_version} >= 1210 -BuildRequires: systemd-rpm-macros -%{?systemd_requires} -%else -BuildRequires: pkgconfig(systemd) -Requires: systemd-units -BuildRequires: pkgconfig(libsystemd-journal) -%endif -%else -Requires(post): chkconfig -Requires(preun): chkconfig -# This is for /sbin/service -Requires(preun): initscripts -%endif - -# required packages on install -Requires: /bin/sh -Requires: iptables -%if !0%{?suse_version} -Requires: libcgroup -%else -Requires: libcgroup1 -%endif -Requires: tar -Requires: xz -%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 -# Resolves: rhbz#1165615 -Requires: device-mapper-libs >= 1.02.90-1 -%endif -%if 0%{?oraclelinux} >= 6 -# Require Oracle Unbreakable Enterprise Kernel R4 and newer device-mapper -Requires: kernel-uek >= 4.1 -Requires: device-mapper >= 1.02.90-2 -%endif - -# docker-selinux conditional -%if 0%{?fedora} >= 20 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 -%global with_selinux 1 -%endif - -# DWZ problem with multiple golang binary, see bug -# https://bugzilla.redhat.com/show_bug.cgi?id=995136#c12 -%if 0%{?fedora} >= 20 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 -%global _dwz_low_mem_die_limit 0 -%endif - -# start if with_selinux -%if 0%{?with_selinux} -# Version of SELinux we were using -%if 0%{?fedora} == 20 -%global selinux_policyver 3.12.1-197 -%endif # fedora 20 -%if 0%{?fedora} == 21 -%global selinux_policyver 3.13.1-105 -%endif # fedora 21 -%if 0%{?fedora} >= 22 -%global selinux_policyver 3.13.1-128 -%endif # fedora 22 -%if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 -%global selinux_policyver 3.13.1-23 -%endif # centos,oraclelinux 7 -%endif # with_selinux - -# RE: rhbz#1195804 - ensure min NVR for selinux-policy -%if 0%{?with_selinux} -Requires: selinux-policy >= %{selinux_policyver} -Requires(pre): %{name}-selinux >= %{version}-%{release} -%endif # with_selinux - -# conflicting packages -Conflicts: docker -Conflicts: docker-io -Conflicts: docker-engine-cs - -%description -Docker is an open source project to build, ship and run any application as a -lightweight container. - -Docker containers are both hardware-agnostic and platform-agnostic. This means -they can run anywhere, from your laptop to the largest EC2 compute instance and -everything in between - and they don't require you to use a particular -language, framework or packaging system. That makes them great building blocks -for deploying and scaling web apps, databases, and backend services without -depending on a particular stack or provider. - -%prep -%if 0%{?centos} <= 6 || 0%{?oraclelinux} <=6 -%setup -n %{name} -%else -%autosetup -n %{name} -%endif - -%build -export DOCKER_GITCOMMIT=%{_gitcommit} -./hack/make.sh dynbinary -# ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here - -%check -./bundles/%{_origversion}/dynbinary-client/docker -v -./bundles/%{_origversion}/dynbinary-daemon/dockerd -v - -%install -# install binary -install -d $RPM_BUILD_ROOT/%{_bindir} -install -p -m 755 bundles/%{_origversion}/dynbinary-client/docker-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/docker -install -p -m 755 bundles/%{_origversion}/dynbinary-daemon/dockerd-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/dockerd -install -p -m 755 bundles/%{_origversion}/dynbinary-daemon/docker-proxy-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/docker-proxy - -# install containerd -install -p -m 755 /usr/local/bin/containerd $RPM_BUILD_ROOT/%{_bindir}/docker-containerd -install -p -m 755 /usr/local/bin/containerd-shim $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-shim -install -p -m 755 /usr/local/bin/ctr $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-ctr - -# install runc -install -p -m 755 /usr/local/sbin/runc $RPM_BUILD_ROOT/%{_bindir}/docker-runc - -# install udev rules -install -d $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d -install -p -m 644 contrib/udev/80-docker.rules $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d/80-docker.rules - -# add init scripts -install -d $RPM_BUILD_ROOT/etc/sysconfig -install -d $RPM_BUILD_ROOT/%{_initddir} - - -%if 0%{?is_systemd} -install -d $RPM_BUILD_ROOT/%{_unitdir} -install -p -m 644 contrib/init/systemd/docker.service.rpm $RPM_BUILD_ROOT/%{_unitdir}/docker.service -%else -install -p -m 644 contrib/init/sysvinit-redhat/docker.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/docker -install -p -m 755 contrib/init/sysvinit-redhat/docker $RPM_BUILD_ROOT/%{_initddir}/docker -%endif -# add bash, zsh, and fish completions -install -d $RPM_BUILD_ROOT/usr/share/bash-completion/completions -install -d $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions -install -d $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d -install -p -m 644 contrib/completion/bash/docker $RPM_BUILD_ROOT/usr/share/bash-completion/completions/docker -install -p -m 644 contrib/completion/zsh/_docker $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions/_docker -install -p -m 644 contrib/completion/fish/docker.fish $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d/docker.fish - -# install manpages -install -d %{buildroot}%{_mandir}/man1 -install -p -m 644 man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1 -install -d %{buildroot}%{_mandir}/man5 -install -p -m 644 man/man5/*.5 $RPM_BUILD_ROOT/%{_mandir}/man5 -install -d %{buildroot}%{_mandir}/man8 -install -p -m 644 man/man8/*.8 $RPM_BUILD_ROOT/%{_mandir}/man8 - -# add vimfiles -install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc -install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect -install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax -install -p -m 644 contrib/syntax/vim/doc/dockerfile.txt $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc/dockerfile.txt -install -p -m 644 contrib/syntax/vim/ftdetect/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect/dockerfile.vim -install -p -m 644 contrib/syntax/vim/syntax/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax/dockerfile.vim - -# add nano -install -d $RPM_BUILD_ROOT/usr/share/nano -install -p -m 644 contrib/syntax/nano/Dockerfile.nanorc $RPM_BUILD_ROOT/usr/share/nano/Dockerfile.nanorc - -# list files owned by the package here -%files -%doc AUTHORS CHANGELOG.md CONTRIBUTING.md LICENSE MAINTAINERS NOTICE README.md -/%{_bindir}/docker -/%{_bindir}/dockerd -/%{_bindir}/docker-containerd -/%{_bindir}/docker-containerd-shim -/%{_bindir}/docker-containerd-ctr -/%{_bindir}/docker-proxy -/%{_bindir}/docker-runc -/%{_sysconfdir}/udev/rules.d/80-docker.rules -%if 0%{?is_systemd} -/%{_unitdir}/docker.service -%else -%config(noreplace,missingok) /etc/sysconfig/docker -/%{_initddir}/docker -%endif -/usr/share/bash-completion/completions/docker -/usr/share/zsh/vendor-completions/_docker -/usr/share/fish/vendor_completions.d/docker.fish -%doc -/%{_mandir}/man1/* -/%{_mandir}/man5/* -/%{_mandir}/man8/* -/usr/share/vim/vimfiles/doc/dockerfile.txt -/usr/share/vim/vimfiles/ftdetect/dockerfile.vim -/usr/share/vim/vimfiles/syntax/dockerfile.vim -/usr/share/nano/Dockerfile.nanorc - -%post -%if 0%{?is_systemd} -%systemd_post docker -%else -# This adds the proper /etc/rc*.d links for the script -/sbin/chkconfig --add docker -%endif -if ! getent group docker > /dev/null; then - groupadd --system docker -fi - -%preun -%if 0%{?is_systemd} -%systemd_preun docker -%else -if [ $1 -eq 0 ] ; then - /sbin/service docker stop >/dev/null 2>&1 - /sbin/chkconfig --del docker -fi -%endif - -%postun -%if 0%{?is_systemd} -%systemd_postun_with_restart docker -%else -if [ "$1" -ge "1" ] ; then - /sbin/service docker condrestart >/dev/null 2>&1 || : -fi -%endif - -%changelog diff --git a/hack/make/.detect-daemon-osarch b/hack/make/.detect-daemon-osarch deleted file mode 100644 index 571e802eb9..0000000000 --- a/hack/make/.detect-daemon-osarch +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/bash -set -e - -docker-version-osarch() { - local target="$1" # "Client" or "Server" - local fmtStr="{{.${target}.Os}}/{{.${target}.Arch}}" - if docker version -f "$fmtStr" 2>/dev/null; then - # if "docker version -f" works, let's just use that! - return - fi - docker version | awk ' - $1 ~ /^(Client|Server):$/ { section = 0 } - $1 == "'"$target"':" { section = 1; next } - section && $1 == "OS/Arch:" { print $2 } - - # old versions of Docker - $1 == "OS/Arch" && $2 == "('"${target,,}"'):" { print $3 } - ' -} - -# Retrieve OS/ARCH of docker daemon, eg. linux/amd64 -export DOCKER_ENGINE_OSARCH="$(docker-version-osarch 'Server')" -export DOCKER_ENGINE_GOOS="${DOCKER_ENGINE_OSARCH%/*}" -export DOCKER_ENGINE_GOARCH="${DOCKER_ENGINE_OSARCH##*/}" -DOCKER_ENGINE_GOARCH=${DOCKER_ENGINE_GOARCH:=amd64} - -# and the client, just in case -export DOCKER_CLIENT_OSARCH="$(docker-version-osarch 'Client')" -export DOCKER_CLIENT_GOOS="${DOCKER_CLIENT_OSARCH%/*}" -export DOCKER_CLIENT_GOARCH="${DOCKER_CLIENT_OSARCH##*/}" -DOCKER_CLIENT_GOARCH=${DOCKER_CLIENT_GOARCH:=amd64} - -# Retrieve the architecture used in contrib/builder/(deb|rpm)/$PACKAGE_ARCH/ -PACKAGE_ARCH='amd64' -case "${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" in - arm) - PACKAGE_ARCH='armhf' - ;; - arm64) - PACKAGE_ARCH='aarch64' - ;; - amd64|ppc64le|s390x) - PACKAGE_ARCH="${DOCKER_ENGINE_GOARCH:-$DOCKER_CLIENT_GOARCH}" - ;; - *) - echo >&2 "warning: not sure how to convert '$DOCKER_ENGINE_GOARCH' to a 'Docker' arch, assuming '$PACKAGE_ARCH'" - ;; -esac -export PACKAGE_ARCH - -DOCKERFILE='Dockerfile' -TEST_IMAGE_NAMESPACE= -case "$PACKAGE_ARCH" in - amd64) - case "${DOCKER_ENGINE_GOOS:-$DOCKER_CLIENT_GOOS}" in - windows) - DOCKERFILE='Dockerfile.windows' - ;; - esac - ;; - *) - DOCKERFILE="Dockerfile.$PACKAGE_ARCH" - TEST_IMAGE_NAMESPACE="$PACKAGE_ARCH" - ;; -esac -export DOCKERFILE TEST_IMAGE_NAMESPACE diff --git a/hack/make/.ensure-emptyfs b/hack/make/.ensure-emptyfs deleted file mode 100644 index e71a30ae81..0000000000 --- a/hack/make/.ensure-emptyfs +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -set -e - -if ! docker inspect emptyfs &> /dev/null; then - # let's build a "docker save" tarball for "emptyfs" - # see https://github.com/docker/docker/pull/5262 - # and also https://github.com/docker/docker/issues/4242 - dir="$DEST/emptyfs" - mkdir -p "$dir" - ( - cd "$dir" - echo '{"emptyfs":{"latest":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158"}}' > repositories - mkdir -p 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 - ( - cd 511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158 - echo '{"id":"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158","comment":"Imported from -","created":"2013-06-13T14:03:50.821769-07:00","container_config":{"Hostname":"","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"PortSpecs":null,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":null},"docker_version":"0.4.0","architecture":"x86_64","Size":0}' > json - echo '1.0' > VERSION - tar -cf layer.tar --files-from /dev/null - ) - ) - ( set -x; tar -cC "$dir" . | docker load ) - rm -rf "$dir" -fi diff --git a/hack/make/.ensure-frozen-images b/hack/make/.ensure-frozen-images deleted file mode 100644 index d0c55e50cc..0000000000 --- a/hack/make/.ensure-frozen-images +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/bash -set -e - -# image list should match what's in the Dockerfile (minus the explicit images IDs) -images=( - buildpack-deps:jessie - busybox:latest - debian:jessie - hello-world:latest -) - -if [ "$TEST_IMAGE_NAMESPACE" ]; then - for (( i = 0; i < ${#images[@]}; i++ )); do - images[$i]="$TEST_IMAGE_NAMESPACE/${images[$i]}" - done -fi - -if ! docker inspect "${images[@]}" &> /dev/null; then - hardCodedDir='/docker-frozen-images' - if [ -d "$hardCodedDir" ]; then - # Do not use a subshell for the following command. Windows to Linux CI - # runs bash 3.x so will not trap an error in a subshell. - # http://stackoverflow.com/questions/22630363/how-does-set-e-work-with-subshells - set -x; tar -cC "$hardCodedDir" . | docker load; set +x - else - dir="$DEST/frozen-images" - # extract the exact "RUN download-frozen-image-v2.sh" line from the Dockerfile itself for consistency - # NOTE: this will fail if either "curl" or "jq" is not installed or if the Dockerfile is not available/readable - awk ' - $1 == "RUN" && $2 == "./contrib/download-frozen-image-v2.sh" { - for (i = 2; i < NF; i++) - printf ( $i == "'"$hardCodedDir"'" ? "'"$dir"'" : $i ) " "; - print $NF; - if (/\\$/) { - inCont = 1; - next; - } - } - inCont { - print; - if (!/\\$/) { - inCont = 0; - } - } - ' "$DOCKERFILE" | sh -x - # Do not use a subshell for the following command. Windows to Linux CI - # runs bash 3.x so will not trap an error in a subshell. - # http://stackoverflow.com/questions/22630363/how-does-set-e-work-with-subshells - set -x; tar -cC "$dir" . | docker load; set +x - fi -fi - -if [ "$TEST_IMAGE_NAMESPACE" ]; then - for image in "${images[@]}"; do - target="${image#$TEST_IMAGE_NAMESPACE/}" - if [ "$target" != "$image" ]; then - # tag images to ensure that all integrations work with the defined image names - docker tag "$image" "$target" - # then remove original tags as these make problems with later tests (e.g., TestInspectApiImageResponse) - docker rmi "$image" - fi - done -fi - -# explicitly rename "hello-world:latest" to ":frozen" for the test that uses it -docker tag hello-world:latest hello-world:frozen -docker rmi hello-world:latest diff --git a/hack/make/.ensure-frozen-images-windows b/hack/make/.ensure-frozen-images-windows deleted file mode 100644 index 713ffaf06e..0000000000 --- a/hack/make/.ensure-frozen-images-windows +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -set -e - -# This scripts sets up the required images for Windows to Windows CI - -# Tag (microsoft/)windowsservercore as latest -set +e -! BUILD=$(docker images | grep windowsservercore | grep -v latest | awk '{print $2}') -if [ -z $BUILD ]; then - echo "ERROR: Could not find windowsservercore images" - exit 1 -fi - -# Get the name. Around 2016 6D TP5, these have the microsoft/ prefix, hence cater for both. -! IMAGENAME=$(docker images | grep windowsservercore | grep -v latest | awk '{print $1}') -if [ -z $IMAGENAME ]; then - echo "ERROR: Could not find windowsservercore image" - exit 1 -fi - -! LATESTCOUNT=$(docker images | grep windowsservercore | grep -v $BUILD | wc -l) -if [ $LATESTCOUNT -ne 1 ]; then - set -e - docker tag $IMAGENAME:$BUILD windowsservercore:latest - echo "INFO: Tagged $IMAGENAME:$BUILD as windowsservercore:latest" -fi - -# Busybox (requires windowsservercore) -if [ -z "$(docker images | grep busybox)" ]; then - echo "INFO: Building busybox" - docker build -t busybox https://raw.githubusercontent.com/jhowardmsft/busybox/master/Dockerfile -fi \ No newline at end of file diff --git a/hack/make/.ensure-httpserver b/hack/make/.ensure-httpserver deleted file mode 100644 index 3fc84b2f26..0000000000 --- a/hack/make/.ensure-httpserver +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -set -e - -# Build a Go static web server on top of busybox image -# and compile it for target daemon - -dir="$DEST/httpserver" -mkdir -p "$dir" -( - cd "$dir" - GOOS=${DOCKER_ENGINE_GOOS:="linux"} GOARCH=${DOCKER_ENGINE_GOARCH:="amd64"} CGO_ENABLED=0 go build -o httpserver github.com/docker/docker/contrib/httpserver - cp ../../../../contrib/httpserver/Dockerfile . - docker build -qt httpserver . > /dev/null -) -rm -rf "$dir" diff --git a/hack/make/.ensure-nnp-test b/hack/make/.ensure-nnp-test deleted file mode 100644 index 26b11b9a5c..0000000000 --- a/hack/make/.ensure-nnp-test +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -set -e - -# Build a C binary for testing no-new-privileges -# and compile it for target daemon -if [ "$DOCKER_ENGINE_GOOS" = "linux" ]; then - if [ "$DOCKER_ENGINE_OSARCH" = "$DOCKER_CLIENT_OSARCH" ]; then - tmpdir=$(mktemp -d) - gcc -g -Wall -static contrib/nnp-test/nnp-test.c -o "${tmpdir}/nnp-test" - - dockerfile="${tmpdir}/Dockerfile" - cat <<-EOF > "$dockerfile" - FROM debian:jessie - COPY . /usr/bin/ - RUN chmod +s /usr/bin/nnp-test - EOF - docker build --force-rm ${DOCKER_BUILD_ARGS} -qt nnp-test "${tmpdir}" > /dev/null - rm -rf "${tmpdir}" - else - docker build ${DOCKER_BUILD_ARGS} -qt nnp-test contrib/nnp-test > /dev/null - fi -fi diff --git a/hack/make/.ensure-syscall-test b/hack/make/.ensure-syscall-test deleted file mode 100644 index 376fef1cf0..0000000000 --- a/hack/make/.ensure-syscall-test +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash -set -e - -# Build a C binary for cloning a userns for seccomp tests -# and compile it for target daemon -if [ "$DOCKER_ENGINE_GOOS" = "linux" ]; then - if [ "$DOCKER_ENGINE_OSARCH" = "$DOCKER_CLIENT_OSARCH" ]; then - tmpdir=$(mktemp -d) - gcc -g -Wall -static contrib/syscall-test/userns.c -o "${tmpdir}/userns-test" - gcc -g -Wall -static contrib/syscall-test/ns.c -o "${tmpdir}/ns-test" - gcc -g -Wall -static contrib/syscall-test/acct.c -o "${tmpdir}/acct-test" - - dockerfile="${tmpdir}/Dockerfile" - cat <<-EOF > "$dockerfile" - FROM debian:jessie - COPY . /usr/bin/ - EOF - docker build --force-rm ${DOCKER_BUILD_ARGS} -qt syscall-test "${tmpdir}" > /dev/null - rm -rf "${tmpdir}" - else - docker build ${DOCKER_BUILD_ARGS} -qt syscall-test contrib/syscall-test > /dev/null - fi -fi diff --git a/hack/make/.go-autogen b/hack/make/.go-autogen deleted file mode 100644 index 582cd7cff7..0000000000 --- a/hack/make/.go-autogen +++ /dev/null @@ -1,66 +0,0 @@ -#!/bin/bash - -rm -rf autogen - -cat > dockerversion/version_autogen.go < /dev/null; then - echo >&2 'error: binary-client or dynbinary-client must be run before .integration-daemon-start' - false -fi - -# This is a temporary hack for split-binary mode. It can be removed once -# https://github.com/docker/docker/pull/22134 is merged into docker master -if [ "$(go env GOOS)" = 'windows' ]; then - return -fi - -if [ -z "$DOCKER_TEST_HOST" ]; then - if docker version &> /dev/null; then - echo >&2 'skipping daemon start, since daemon appears to be already started' - return - fi -fi - -if ! command -v dockerd &> /dev/null; then - echo >&2 'error: binary-daemon or dynbinary-daemon must be run before .integration-daemon-start' - false -fi - -# intentionally open a couple bogus file descriptors to help test that they get scrubbed in containers -exec 41>&1 42>&2 - -export DOCKER_GRAPHDRIVER=${DOCKER_GRAPHDRIVER:-vfs} -export DOCKER_USERLANDPROXY=${DOCKER_USERLANDPROXY:-true} - -# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" -storage_params="" -if [ -n "$DOCKER_STORAGE_OPTS" ]; then - IFS=',' - for i in ${DOCKER_STORAGE_OPTS}; do - storage_params="--storage-opt $i $storage_params" - done - unset IFS -fi - -# example usage: DOCKER_STORAGE_OPTS="dm.basesize=20G,dm.loopdatasize=200G" -extra_params="" -if [ "$DOCKER_REMAP_ROOT" ]; then - extra_params="--userns-remap $DOCKER_REMAP_ROOT" -fi - -if [ -z "$DOCKER_TEST_HOST" ]; then - # Start apparmor if it is enabled - if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then - # reset container variable so apparmor profile is applied to process - # see https://github.com/docker/libcontainer/blob/master/apparmor/apparmor.go#L16 - export container="" - ( - set -x - /etc/init.d/apparmor start - ) - fi - - export DOCKER_HOST="unix://$(cd "$DEST" && pwd)/docker.sock" # "pwd" tricks to make sure $DEST is an absolute path, not a relative one - ( set -x; exec \ - dockerd --debug \ - --host "$DOCKER_HOST" \ - --storage-driver "$DOCKER_GRAPHDRIVER" \ - --pidfile "$DEST/docker.pid" \ - --userland-proxy="$DOCKER_USERLANDPROXY" \ - $storage_params \ - $extra_params \ - &> "$DEST/docker.log" - ) & - # make sure that if the script exits unexpectedly, we stop this daemon we just started - trap 'bundle .integration-daemon-stop' EXIT -else - export DOCKER_HOST="$DOCKER_TEST_HOST" -fi - -# give it a little time to come up so it's "ready" -tries=60 -echo "INFO: Waiting for daemon to start..." -while ! docker version &> /dev/null; do - (( tries-- )) - if [ $tries -le 0 ]; then - printf "\n" - if [ -z "$DOCKER_HOST" ]; then - echo >&2 "error: daemon failed to start" - echo >&2 " check $DEST/docker.log for details" - else - echo >&2 "error: daemon at $DOCKER_HOST fails to 'docker version':" - docker version >&2 || true - # Additional Windows CI debugging as this is a common error as of - # January 2016 - if [ "$(go env GOOS)" = 'windows' ]; then - echo >&2 "Container log below:" - echo >&2 "---" - # Important - use the docker on the CI host, not the one built locally - # which is currently in our path. - ! /c/bin/docker -H=$MAIN_DOCKER_HOST logs docker-$COMMITHASH - echo >&2 "---" - fi - fi - false - fi - printf "." - sleep 2 -done -printf "\n" diff --git a/hack/make/.integration-daemon-stop b/hack/make/.integration-daemon-stop deleted file mode 100644 index 03c1b14689..0000000000 --- a/hack/make/.integration-daemon-stop +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -if [ ! "$(go env GOOS)" = 'windows' ]; then - trap - EXIT # reset EXIT trap applied in .integration-daemon-start - - for pidFile in $(find "$DEST" -name docker.pid); do - pid=$(set -x; cat "$pidFile") - ( set -x; kill "$pid" ) - if ! wait "$pid"; then - echo >&2 "warning: PID $pid from $pidFile had a nonzero exit code" - fi - done - - if [ -z "$DOCKER_TEST_HOST" ]; then - # Stop apparmor if it is enabled - if [ -e "/sys/module/apparmor/parameters/enabled" ] && [ "$(cat /sys/module/apparmor/parameters/enabled)" == "Y" ]; then - ( - set -x - /etc/init.d/apparmor stop - ) - fi - fi -else - # Note this script is not actionable on Windows to Linux CI. Instead the - # DIND daemon under test is torn down by the Jenkins tear-down script - echo "INFO: Not stopping daemon on Windows CI" -fi diff --git a/hack/make/.resources-windows/common.rc b/hack/make/.resources-windows/common.rc deleted file mode 100644 index 000fb35367..0000000000 --- a/hack/make/.resources-windows/common.rc +++ /dev/null @@ -1,38 +0,0 @@ -// Application icon -1 ICON "docker.ico" - -// Windows executable manifest -1 24 /* RT_MANIFEST */ "docker.exe.manifest" - -// Version information -1 VERSIONINFO - -#ifdef DOCKER_VERSION_QUAD -FILEVERSION DOCKER_VERSION_QUAD -PRODUCTVERSION DOCKER_VERSION_QUAD -#endif - -BEGIN - BLOCK "StringFileInfo" - BEGIN - BLOCK "000004B0" - BEGIN - VALUE "ProductName", DOCKER_NAME - -#ifdef DOCKER_VERSION - VALUE "FileVersion", DOCKER_VERSION - VALUE "ProductVersion", DOCKER_VERSION -#endif - -#ifdef DOCKER_COMMIT - VALUE "OriginalFileName", DOCKER_COMMIT -#endif - - END - END - - BLOCK "VarFileInfo" - BEGIN - VALUE "Translation", 0x0000, 0x04B0 - END -END diff --git a/hack/make/.resources-windows/docker.exe.manifest b/hack/make/.resources-windows/docker.exe.manifest deleted file mode 100644 index 674bc9422b..0000000000 --- a/hack/make/.resources-windows/docker.exe.manifest +++ /dev/null @@ -1,18 +0,0 @@ - - - Docker - - - - - - - - - - - - - - - \ No newline at end of file diff --git a/hack/make/.resources-windows/docker.ico b/hack/make/.resources-windows/docker.ico deleted file mode 100644 index c6506ec8dbd8e295d98a084412e9d2c358a6ba39..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 370070 zcmeEP2Y405+TL?c0%F4sD!rrFr6_v!{$4AJ1?*S7-fQo@E1)91i}a!b(rX$m0Ro|S zl8_#Huc9K5%>TY~c9PA>Nlzfm^YzX_0Ys1`^UF*YMa1~pn zQjstq44g3rHUvNJ**NcYPxRuB0h?D14oKMWTR>{++JN8Fl)E5}8uGT~8vB{$p7Fiq z3GHSD%>rR6Jw0Ie?|buOFq?qadi(lEl=jLTyr^ZHo z7akkcdFke*UDm`d?HV1stb2UylFl*Mw>EBh&(-lO`z&8Q{gHVQeLchH_qMvv>*e}n zb`R@6u;-^>-%l(WU_HF#C+qHIL#^iPMp?BEgn-W;06W+I1Ne$J)^pInx>VeC~g3qScfHtL7TiJK;c zro@FsC2sibAoeS4E1a*Ay8wPnjH6}NVbS~}?= zHMiddDy*BO!n(Tvrj~*aP;+{E0bihiKd`-Zc6VzT(lGdjX$$&WgBJa0y}Pi#)e_s) zR*keSKCsZL)V-|m<$Jfyd z-o1A7ioUNV$IkjTe$BAx#ElbAr6kOUZO^qaPo*S8*tWem9d>>)HF4pIl*EV=DGA}g zd>)^cf%v&;h$Yguq%KQO*f=45`IJY~X7>zC3+`;Cg?6>lVDqPJoJ4xU#!1WtoC4B; z4BvpVYxOjiWzj9ySk~ zj(}}PoWwS5JlzJ$;2-ocJkEi*xoS)D61{KjocGk+{+Fnb&USy_!wp-`?DzfIHuk04 zFn)qQ!uShQrsyM#pD^(Q(w*=PD-kP9!Cc~th!YyYCtT(f|D>-i*VK-P5qe^taiEMB zoZLvh{4lV2c^`MurtuklJ?{+T@Ou+CP3WDpX;QL|-4nuL&#+y@>4@QJJ&(`1Pa2$OBeT0r57&rJH2VbBt z&#)aa!U*_-k1#*b+*iUO-7F8EIr#3EZDF7+G7!IZxM%mlR9EbhuDY*}jqF%Ee(i|& zF=mfTiJN^qIev~xO~lw-_wC`Zb@uDFAIQnJ^V*MbLTVD{03y|zu(#CgUgxNgA7Jma z`yP4r_xZK=ypA*ez{Cpl4SFmQYR@qsKKKLUhB(9rLos&v=c?h}@q&?aw-+oIVupaF zc=l-aNh%DK1qL>+=z}@@p`PtqmU-=dafj;@H;x~H`T0Z1@p}B8o{|`W{+?rY_VJFG zy(oFu{+8q@l^8ukEgAa{ZTF$wX!nfui@nb;Or7)t977Q1{B({P;0tyDzu?)yODjiM z=NdV~U%0p)0dtKqe`rXNv{W(>z2IXv*5T;*b?R%Dd-Jj$EfUs^o|YUNl*V`*^K`U% z_VLAM^TuD$_EVRu&8vs0@Ike`Iz?gm|(@QtaFN`QW>OC6^eoQHN)Mh3E?_c-8BELrU~mtO-zmr{uBK? z;`;@(dyMIev9C9F&lE8QZGY3^E^1Ei^HgXT+L7mU+jCK`XJP|0caY9G1NeeC_=3Jz zU({47uiaz3;6kk6VN6h=luT~6)G-jf@MAZg*Xg;v^+D|}k6$yiJK}uUI_y4i0sH!* zkLjJZ?whCW`t*Vqksn<1UWzdAZ-(S?-u6ou0xNe=DEUd*l2PwtLg>rz}_N=6wvi zcYB|~m7cMFPPSdp{bml4_h({)&F}+XE*s{3o;c=RtWCxmBg6?351c0bugI6#IPW#r z#<{O*+h055!J6@_hfT!#J=#6S^&H>x*?ck0>E%=p$NRAT_2FN5pTBp{*Yo^>+Fofp zUdIGx?hs>u!+1u}W9bm95%rPpAn=(1)*p! z>{uVRUqa)1UmsxGKF9lE-79(5@SeVBd!@lA=$L@<0DS@04;{fcU@-hZUA4nIM}T=m zJ}(eIP=dBl(W;JH(M|XHk^QYISku!xDK-et@vuHGF`Vml3cgOKFz0Z*k7s%DjmRM$?IRZa=vLwV-D+MR_y#<8=USW$e_%g@D?`3O`vJ6H`vHsr=HY$A>!=Ci z5DUNuNGwozv#1bf;$3?eY+c(v*73cIHGC>1X%YH+zF%8-+c$lDYGOESev#U>ZJEQ? zH>x8CqJd2sw0kc$==}$FuTU$-HAH`J!QKNiY}nXxp;L#SU>%F$6CwwDblfl>K4ETe zuRoyuXZQr<%YZ+~cy@^O1Ha?>;1BD6vCg6bW1H_BN-R(zthwMfnH)D;_YrZchjiex zJ*?%y8s2%F<43=*L0{ktQ)Z``eu8$-^R^~0LjS%@?ccpd+jgd{Z?bLO*nDQnzW?aq z6t#Kbt7_3t7W(x{#b^7DSRjl4@Z%FWj$n+yu|pY)5BS_rzjJ_RhU@W7!@H;voD;xz zjwBW+xOtT8Gg6b6>V5&=*y)rUhj;Xn7N5dAKfblW@9Ow|TPN#xc37Umclx|*{nKIh z@DUMd7{8~%=1(2kzu^@6_*3ZDZ7{y~#`u~2eWvXn-nU*IJ(Qq!Cr?*VBhFKchq_?< z0VQw$e*A(NPtZ?r-huvra|v9hYhr^^921zi0k$_Cw*N!iB=0&xt|i8^Lx}~-wN>Zu z<`UNas`Vf#W@cx`{P-3}T4MARmALUYm9SyVse}zi_;}2sz?;X?A<`L%~+?;;oV9ZJ6PaNL2@g&OA zv-$zV{)gh##?ZE>ukZI``&nWGlp9~bbjeVR6)?Xzzn|Od3+(3&rPLSjU86L{FPIlt ziM2w_s2Ruy;+tmT1M+7k<@%_^b-%b$;^#C-ik(?!^NN0TVxqpUyK&w-bvDd?vG#`9 z|Es-j)?>BS&TLz2-KfG&${phb&ej2 zYqEbwd@Y`-m(9|&F~A+ z^bPa_CyyRZJh?MryjncWea8BIU)%S$ZT!Jv_yRt22=9lu0N)QRrE!6oALO&b!+1vU zsyc4DKD^KSjG$aAx7=Kg4B+{YCmi3s59@3NOvRe=91kpFK|pVJrU zxBz1W_P1Pf!1=>c6c_0ELADKZgCot{ApC*M2^3Ms%i8If6UuR|s5*R$XHR(bb7Ci) zbLpmWRW9E&?ySq#{ZgsU+Mg@cBQ}hwTyxE+v#wY>x@xnHV=inIJE8jJ2f}X2_C4IZ zI2nAa6%aMN=_T6}C!L=d`9{0#vA;~&kuZMt^3fOkv3LmP{D%dkmo{6^t37&r!1)3^ zQ{ego&K;Jrn1Hc>nHyZOe3*4L<%IEoe)f}>%%r^RGk~=r+V(O2dmhh*r$Y7<5Eo8` zUQ9>dFdOqli-FbH7YiiV*ou_jv)hic6g)pqTsl1P;L_m%yMaVJqhE?~@hrr{(-#j3 zn6_kSz|>{KD*d)%MCHM&Mpk}r#fVDv7Y#YL>Z)IBT$&Qr<~*g+{l*)S&sd)2L#nh~ zHazgzs9^!Iw!QiB1Dq2)GxLIZ`2bUHd;!M?^D#bPzgvo)Cu$!6dG5t?!TYGyd_Ex0 z2+O{G=XM9?8xrXM?}Y62yb$}RoZ!?~lMf%@!n}Ouni`|;xu!!sr*O{SQNsfNTt2)~ z!m{B3-QXu)TsgAJvm1Xo{|dHw{mE$Xdet>g>{QgiH#)7X<20=f-|N4BxocrZh_8Gwc zwbS;s{%7k#JOhfRJ;Uz*2G|!kVb+nDHRj$qWPJO6;UIS!^qJ#wU!20elYYSc{>Sk{ zJ?VOF-!M-y?0n&hk(K)|9bT!?+EM3JC)-g&1Dj{qzis0S2U=<|p2aO0Q3XCI0Q2zJ z9*XVaTpv_|{zAtD@LOD;%ymZeyS5y1^uau)1i`fex*x!tz_;Wb-zj(JHa_Gr@+*4` z1b6k$^FYqMZGF-)U|zaK{hZU&rPs*$uBU}<7qstXUrx|hoB-$_OdaX8cf|gwONV

-m7;XQ|EM&#Ha#!_>j8Q`Nqt(JCSGRkd`~Meqs5yhbpyO{Q(dnwqfz z=L_fdIpcmn&k1mRg7M(6#7W*YgYsR0vS;HtwHV)-_r}yeS*jGCU4^y+I4OdZ-Gu6QZN$Sv! zFtuuY<1*(1cz=e|g+F=py9kE!{K%%5kI7gs1RpRhak6zDb&3BAIVYWx{bhpz^S`Xs zZp(wXf01qbjPY~#0lMEWaohLjL+B3>8+hmcBQe(B5i?lr*%PNaeeqxQ;J>a_58i!) zdf={W)rW6Cu96d1sr_4KV7=fu-g$viJ};>KnooRT`~c?)`A*Uqe0Hc~Ld*ruTRYn8 z15DphHnd$@>B|7(I{kbX&v~C_j0d~VSKQBIIcHT2F~2_>&wC%n^jHU=)=X`o{`wWN2gQ;$E~TBrB@-?i$?kDpgZ@V-FI-1ce_<^xX4`vN}p>(8bOvoFIB zV6CzC1N70Se+_}|8{h*LZW?13eqHpJ&`pJ&VBm@n_WbX$P?XMd<&xto&TX$1YaSwEu^qz0>|5d$1MqE!3+o zKBOLbpt)MUG+6D99e{ZX+JDA;Vwuky8Sr@l=U`8>574>-9}tpYzdM9ygEAjbM%;i| z%dzoS{k(6Ztvk8){XAwq#-F9ahV1@7cHWcf@WDj&+J7HVZSQNYo_O>&_3-`8)!+eN zt35kbsrg|e)%I;0Rm$>@^*cjlrt+^!<#j#D4M`;>bA zsk_wA!@pO@4#(l0p%j%E^`Up{kK<2EyGqLE<+on=frW!S`klkm6bqbUAA+?bUsCr& z?6o9i!44{78xi~KIsd4itb6&q-?o1~=OMbE&d@hb?e@PxPF2>gc{D>d#}T zYR@iwS785Ub#V7G6}RwZy%u;0*8w`tDerkBs|c*eK22kSQ_v-aHRR9GrjW`zL#5QY zOz-HmfAsx*ZJW>anIH9s-?%wr&-(JT+W;NgFCBG(T0ip+tOI;nZ3?+pE&sKecioSE zPoQ+a2T)S}$czc*nK3~QK0YVkUx59(AHn+qe_&nEom743m%KY%%6tIE`+9x`?{-$l zdY}Dve3)zB&+k{5{-mVzw5at)aXtWRf%uG%&jq+Pfb#>~U)tXVEb6`I=X}bK&kNZ{ zm*AK{`vB-wD*i8d9jd;0ucVZjL4|D&p7ZN@Kg`oUWY7EMX#4d2y6?xFQhs#o%pZe3 zp7w4~2G{zPB!~0?oWJF_$@DtUl9?aWu_XHbxjSZBfiyJptf|5pT`9V8_+}lGVXkJH z9oyyj%%A;4S?K#q(q_tI-6j^8U2?I&N%kAiv0hYtj0t2MPzrp&GW)x(DO0U;Ap02G z{>}Vfwz)sba8aoq_m`Epr1Z5jGZyf^Pf&nyel|Vvb8b5Njx>%nA_rRSsQOqZB=do1 z*azU7K3WENf4ePwGRJ|8{r$%O?EB}I;yM5F*e{=ndor=W9QgWTewTngfZr6|xpKI7 zE&y{+vOe$(`T)p7+dswt?}Sm_-K`UTe7E-RlwGNN-k;}lfYR5`o(Y>S=rwgLfH@vM ztxmJyk^?_%?1(I-fP5zT;7YTsP7O4Ef?x&%j}i zzx~{QkX0M=GTJ9|e$Ou;uZ06$YUv2B_Z3t$4k-5}_#Z3y{|{=m{S4Tzp5&xI+4pQ+Hr z-01+*YzRhq_`pE5U~tJ|A?nWyCEbRi)Spw^i&AGvpPN&go%`7a?{+0Mr*B|3{qUzV z`Pg?dxSLyr^sJ=E0k1rD?fLE7mYlv9(2i((v_aY;^OpcMfJ=cY#T5gfu6n==z#t$J zI0*2273sj=!0*6e;8-z%^&Pcw?g&bYpVE6u5&b{jc3+fR<4ZHKy$YQ7!%VPsR_^x>A&bzl89dU^z9 z1Z}_x?lXGKV|~vr?6#@*IOq$D4ZVEliuUoTJ;HSvT9ucHe_*-;Xx1J-o*!(0;b_X-B)cuRY4LoP+kZ zz2D2DX)o_T%QXwZGtYN&&AQ1qxnX0R$1zD)_Vr>f8d(ozz0Ap;L_si^PTmeUe0-V z2(zF2m`8dc@H`L#Fcu&`rvUmT`pn#L(mu|Y)4#Ak1z6SopRRPf2V=! z@Mfd*FJEh-K7RRX^~Q78sTUq^t{#2R>mR5;1a$!CLSI^GnK7RmcpS^A>pY)!|Ma7+ z)cgN!t~$SWqZ;_tb!u#vW-6%P)hc30Q?-0lW3_Iq7n{a6QZW;}SU;|@TK#Kde7n1e zS}?S!3hIBgn$Z0kHKNnCs?Vp_s}677q~3k8xqANbmik(}4)GAK`$!wF-^g#9o%ch% zr2dl!-UI8T-MsX4OZD{|H>rM~U$1`Ye65<%=W4ZZSW~s~m&R)Cn8qp^ZI4FVc`w`- zIUH@qJ&x^ijT-pn^{Ug`H>%hF-9pz(KgM=G%68&f+Q;R=tG_n#Jl0{_X*hWHu~zD> z7n-ZD(B7UO-=KzexK{nv^%^y^@6{@Na8nfpPFJCgY%AML&e?t=hZUn6>w60saFrV0 zt(hA9&2_5l2REpXU%FX6*S=MTT-)A{{(}C*mbtTyzIiXN&<(WtVe~Z*-G5s;V04Lk zeE^6776PHbRA3x1(awh|Usu3_(MjdU(x+R3(>do?Njg1Hy&V}1t^1bhPA1vCPxBMvMxv68SDsZpyI&y z68N@Z9j{jJ{^wfw%9e;N@ZB}|0e}5D_wn8sliYvL?OI1(c>Lyc^g*io2lckYcRY>z zydJ0nTmn?~XJ6P)TjX3>HK0Cl8$i3A03-n?0Hcqz3&LzWQ?q|9${zwUu7`XUd*rsi zwv^O9$MyAC!`}W7DeukrG;=Ssb>kB_ZWsYP3S7wcA87T8q}}lYEt$+>^FOTa)Vlg z<7q3@A!FkQ=G52wc8Ao5UfRGhJT5Qn!7&;*PoIx%+CS^0k8ob+5cbjEu~6cg2T>pG z$Z*1Tjzhov9^x1iXZUL$?T0>7l^f{)z zhBt2K#W%}M`zb4)%YCfR_y%5QPsrseVsKT}=aX9M-|buIab&xPyzw8~OSJX&_3EXk zZd5%#s;j2=x=5}0B`_W9&s8e0eTMZI^%ZGGJZ$tA@}kYtCbQ@=%bh=zbH46wpbxfU$etgSn4g$bSdu*u}I9 z+q|bN7~l0$U03vYS55T!sqFWGG^X$aO8W!&gw5kEl{mQyNM*=G`GI)x7e1{=?zubk5{WNUuT=W zHe_tf=xCdJZ&fco*;4nPTM<*R-=?0M`b}EYwwroPN}s}5W@m5%6^!xCo6o_9&=$xp z!%rFCM8EsN|C*~Qy_zY;BD8Pz&1@&zW$JWtvA2ZyHrLu0#&*vZTB`Wxt5THFK|QrD^tzk2ll68Zk!1|G~cYkLO= z+8&wu!00vYL+6d&GsV7(0LBIJfbj!Po9SjB$8%Xnsj++Keb{Z}z3Z4ADdz`yKlEu# zhXb`7$MJr0^kcAd9aA7}2<)NV1KR%WJhmwh!~oOIfv<1q)hp8}sK|FfSt4EtIz z^eXkvf12w#y}@5!r~6{so9;KzUli3=igG^nlWk^7J7Hf%pTM}}qnDcNc|?x8$s_yJ z9vCAh!KP^Uj48~yMQKBR&Nc01zpDM~tOkgyuho8v;}*tKoGWl9>;s4 zZELyU9`kdpk~(H@XhXyY_0+Z*Ra7cs$6vm$uI>ImPuvWIko`Ze#1DycQ>uXsf-2`Ox`}wK+Odo^xniznw zJ?~>LpkpSa#)swkY{GDh^jhEuK>pM1l=%$%-!lDt*ncDFmA7xn&>h;94fb0c&ra*z zOzYnP#I`KUIJTJkp2F6vZOm>nZD&tt12y1_>-Bs%eE{bTj?9CffPc_&?ipyev1R%J z`XJ7WzVJj#{j7uYhN&|eX3Tk*{=Kk#l+1Yz;AJ7(`6T4liB8Q&C*dDg7`*^3# z+q#ye|4D}aT})d;TBQEi*Cw#t65zZdZAX8b5NVLKF+XRKc`x>bBWh5aSl%(%Pjhc{pjznPwo zEG;%gKft*H&ebg)*;IY~=1poFVgknayMv35hmw&~1Ni=in3KIo$N8L3Vt;SzS#GI` z0Y0TJ7L>pxa!^+L#kh z95-&AacO>S->HL)0rcE8(jvtGPMd(8Xn%sVF7Pkgeu}Mr=e0^QkG7k6fIMS;f9(LL zrvJ~?{^=KX%&L}e`_D-a%EJEQo*ygDdoImw%j-Vr1E@osPoh5M17;rTW9qKaReyS1 zn0>Gv^E|Co&-dkaZ@34}aTMBb8BUSr8vj#PJ7!g{5cZ#!9!q)QVqH8RlYF0p^{eZ}Pa_hU|Q zcW@0n^Y`Qx2N+#k2i?@)3_hLqQQoiRu>V@fZ^?)Kr=t%S2nW=GQ#MW_CAggi4m^XfV?y2qTkJeT^P4< z-r4*7pU(gV6`6y*S>$hQp5wy?^a1n-Oif?m*C#Lz*b_n@5SUvWV4ejA+xG8*ZWil2 zKz?kcJRbu--N5z0KY&|-*1p&_JI5p>y$JARF@Z*JfxnS8Q3t{J0?R~7w8Uj27V|czQ=jJ0R3IqVp7Vt9LNBb^FJ8?Yc4d^GtClrLZ zd>#v3(lHs*e*y|fw^QZ|ij(#^TGpwdRja@jHUbJrvs2~^f>ZYKVF|O&(Yk|cyl-FU zR3X~vyvKCd|81lS_Fo6r`MZE^^SzIg_W3;x{b=9MMQm%Def}1tm$P1^B_2m_n|X-~ z&yEB4hc(C&!|a%K>Ft<17_ld`?s3}>&G5>mvx(Iup}1GJ4NfyaR(z)ipx0PU;_Fdc{hE(g{D z!+}cyp7%8Hke&Yzn-xE^N+sAsG_VRd57-Cv0BQq=fR})~0NT;@z^A|gpgJ%f;Ca=6 zL|_5yFP0nrp#jDaBQbx7v4I{7 z&(IuqD z{uu|{0UQEo=dS>YydWJ9gxL8N$oI4J)Ki|<0$~0{?vI;!7VUpCu$0?KcLN;&4{#88 z6le<2)@bLi0sHJ@M<5?%=VOpx%shXn|2_LU)scP#q}uz3AivtqCn4VvI1ixh(5LZu zw(nx#ZRUOKpY729;}$9NZGh_l+J8;pzrbGBk8~Wc#?CKAzCXb64v+26{ruq?V}Rp! z{x{@9?Yu94|3cZ-0R1l0wt(S%6!QCkOYCz`tNrtu=C=cL8nAQBBUICNw`102H(~rg z+nyi59AU$>9YK6wz_0!D88zQmBbax>>^F0U&N__mh{rjDfLl#_kk@kvW?RyNF#w(N z`t(fuzZ?7h51<~_1l|PFSu0ZZL$mGtD&)1EBc)B<1Keuowf)CuxBtDc4cb5b!eO92 za4kSvyBv5OIKcXmjs}+5dHRq@=J~@ljQz8Hp90%}K!EXxvHxV`85>*(u>XGwxB_5X z>DS(2-jDrr{7(PGHa!U30I*N52fP9p|3v#=&-Nf)4h#f1-r=#@FWPOT{r_#}=|6Mg z?|zhB2eALY7T~;rvH#J?mkRsmeekDaf;mk*C)PZ!{Q%w%cpc9W_tOu+KcsJ;$@d1c z`2eR)=a#3v6J{+7eS#TB@I2Yq`*8yGz{Mr99?Eev{4CH42?EiDJ|NBsO4M6*6`WRsB zztr^qzRI;L_zKVO;Wug>+BWmj`j{h(;dp>!0jxdA&BhB;M!RRsVB!JhX8}#wE~JbF zv_JA~r)>LY06Hf24{Gf3KW$#@-@mHlxEFJRdX6x4+PRlvZg81BAAos5d_x4^5%Tj) zpfI-YJjcWaW~}f#?EY;wkoHeMkYi(|APWOx|5@h$#Qw8rN8TG)TdMopq$y`#0^47o zY5SS$1yiP;Qz^5Z4LX>jpUv;P(XU;+yo9OUeh(C(!;m7GOREFy9`MI3T;&vg{H27yHkm z5Cz!4|4`6zKh_R?Y|sCnaN2%mz5%|+T3f$2Y}=@BKVDqhP9IQ-w}R`*d_XQT75f+a z&qY0QEW&)hwySM3E^f3pw2oN=knwInrtN306NaCFo#*>IVukey#s>^x<6-|21LP=0 ziU0jP|C9N@T=c~h!M^ocOneVylx^!-;(lNI=Qf@R;Cr-1dM=nBpJ3($c?!1n^&z|R2J z)p9Lf9N=&LZyj9kGl1)V&jLmOdQCo3uK(=}Tmo?I>@xt@3$v|UH>lST`-%Vi;aXgu z%r>wU`fJOO;iuhHYID=;7UIqWhDF!OfKzMr##51UExV4V zkVXQ1?EU6EzAwnUwtxHmpNw~RU>nH^BAv^#Q&&dl^9c+QB-J4hFQ%BV7mh zd;f>;#=Q?D*zM?t{3<&ikNoEV{Q~_1*VA&{KihX6@CNff_MeXHbhoe1Hu3!*u223Q z;QLAc22A_7{@=XU6p4H<`@CbwJJs=YcgMg8O;rqB@Oah(00kawnhSJ-*pPHb1R>(nx1{&fHNjSyZF^L`_3n=a_s-?smn^FsSuo^8{s zdbZBIC``2M|F(GycnjzN^ag&k!EXh82k?6Wj{`jREGHM(=KDZ0@9!kVPaZmD z?4SJ)ZHD@H5;zE?1IK~=0Ckae#$%X20{mg;SP%(HE2 z0R00~-q$YJ|IKWxz5ag-uERF)`Ue2EkKY7fo<4y0VcK^Td5&RtEZfQZ;<4QBz&iQE zHTnbOl-JCQzf(Adyz^L|LtZHZ=2roi*>#=~`{%cUc>OiMSZD1CtsUiWy`Q6>_t*Dx zAL4)X|G_nN9Lf5UCI{+wcww(adY9LnZ>L#&3a=x-oEd;_oef+KF!m&_0vb8;4UunZ z!{nP_n|X6AkLSB<7w}5S!RNUSEUeW4Xa+R0)2om+w)0JqHnH>E$L*^D9?RoQ{u<=# znAXE)F9xm!8Usy%tL^+%jy(6VjK?(d$!DCmX{uYV7vCFkWuSdcXZzSTXZx6Eo7h%o z`?!zWZ1;6Qb<1lxR`vKej z3o`dd``7UTzA4z1?f*Uerfj|p_N(9FLF)TWK;5TfKU01ykdS`=(AIzXe)Q`^OsLdVD89j}ejT_xCF#p{4Am(Z6tBzc`Q)d!c{59H_Nvs^>~? z>aLNQ_b?Bb@+|q9C?j}YZ9sqfij?hNQ1d*X&VGEe3#fs7EpNVN&8+zv*yb_19LHtk zxlf;mxW6*%jGZ3n!8M%MWSgCBMLV4B(KSrDB0?YvZ1o%v! z&la`<{Jsz88t1E++jLw3`+d*0Y++v%fRl9_T;E5`5;RzvGFN_R;~M zUpN3f0&otDHd76F4bb1$L^>QW=S3sG*xV27*4RJS415HnvJ5HL;OOW7NRxmL0N3R3 z`P}0`b%6GN0q_>{uz&siU)cYZxCYyypX1y4Hb}1p=&R@-{tN8kwUGV>thV#?8T|m> z7oRKW=l|Shd3pI^&cCSkkMTl1g_!>Za)Nk5`u_5w9DeQt`fu!Cum8jLUV!#r31F<< z9=Hym-EnQs>%ak4g>)2P&f~g+NOK9;t+D@F$bSlK0|J5JfVN4b$pD|tUkLCy{Zqgd z0PX*B;9chZ*neMKi+0R5JqX+YFfOPEyaMR&{~;X*thd{`9QlDj6@bTFIv!xwP;k9ood_+LS_cW;?~fqssOwL}^7L`JrfmLCRS2gd&OIyr39{=We@FF^Zm z2k?0zZLK=M{@fcF0cVEX@R$a9R~&;I-1TAZ_E zn;rn@pJ>x{fR_P(_RrY=N1!rLZtWlTs_lRK%*zTh|F56>Vea4uYWRWKjdWe*N89C2 zd>Mc(i2eJLQiiuNXRqV+t9^_P@rq|2)6@;hiU}r|>@mKIQrNKLOJBlZbNu6Zp9 zA+Q3ZV*lmS{{LQlhqhxp%Nt_bzwaC%w)M0AWyzDz((i)enW5Nz1+aPNEsFh@Py5IE zA05ZzJHY?6-veU*Z(;!S`>_4c+KF3dUR;OzVBQ;a>W1XYKLcX_<x*f6V&Y z5!h$Wi$UJs`#)UI`!TSE4MsW`Fzf&M{!d5XJb>RCb-w>|5w>$$|J#do*=^$cKV1LI zcY109{{zhXKV1L6#y)l_@_dhx>zd2$`#-!_=kMtJ8?+C=cL7H5y&k;Jvp99yd3AUU zo)gGizmu#An1?NJ{qF`~H9-5`1M~)(0DS-N4S+Uw3b+N}`?LE1zH2iP;5YB;0tvu; zL!q%j1v$BJNcfE|yd$V%0#hbw#bp5Y#Qy(HpgYhN z=nQlLy4iW|GyA!ZWju!YA8fF^7x2RFaPQhN*zK(Vzah@PpXdy9^U3oYA1vd3mh+q9 z&OEojh5gs0gwX$2#Wgwryq>dtCePHgmG{H8vW?6$jygciw7mQ@CfjUwf@gv|f@ z*^fNu6Zt=W_-#?<4+H%FWz4huIKcVG0|4g|dG0=d)m0x(pZyoD1;SzV?2xe?zpS!aylx z0JiG1f7-nMb~jSy+W^-A{J(#-ftLaOePX1(_WvXH(Qoir{ryj*V*jO3=F%i#z%T$C z^|gQY|D0bm_J2L{jP2_Hw12%m5h?9ofBzRL`+wU1Spbg}`!@tjDh!lj24JiH?B9j` zwEqVH_W${{|H}5UV*jOB=F&7_z%T%N75g`2NGc3eFb2f_E11CLc7*{417iOUG9)Vu zR4@j_{wtWk<#vSu2LodN4l*Pw3{)@%#QrOoz~y#@0S5zO{|+)FD-2XH2E_g=n84+B zg#iZxV*d^@Br6P5Fb4eCe?RQwI)1MIYYXuGAFlu7`#-M%`$-Yf-+&EvemU|#0et^Q z*8f&89g^EEy9~fyo$LSj{$F?7rcWZz^}p8uzXR2P=YZ{YIoJQ|?-L_k1eo`Rjv+7W zf6J~)NXsh_1F%*7jSi$&0(_r81>iS8*#9pDBJBKZr2MupkC_Z?1^7(>e*1&p2;h0s z0G_A6|A|!I|EoZ{B)3}j7=VqM{{J%MR{*r*T|hKI8{cl{cOu;a!~%POL}0U>k3-7; zH?R%haUYTir1JfrvZoT#;tI$BZ581E1>twV&Ic|As@gF53$V>&SkB{2-g(|7*nf5f zq)={I7!U@80bxKGD3c7J-{yP&j01`aJ*Fs=BuJx$0bxKG5C((+VL%uV2801&Ko}4P zgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801& zKo}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV z2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+ zVL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG z5C((+VL%uV2801&Ko}4PgaKhd7!U@80bxKG5C((+VL%uV2801&Ko}4PgaKhd7!U@8 z0bxKG5C((+VL%uV2801&Ko}4PgaKio%rT(kCkzM!!hkR!3*9?;BCD=sDr-) zQZvr?e}M&=<$gKJEVlgJL1y!C;2qf;;Ao!ZkD?5AcpX0tNacR>b7il8fhp5zhinIQ z%u-%ETX~k^>*gW1co-k^_o zP(@Y=W-rew!7SRT8<1CdRvSt6Hrn~OgYyRcl;4p@d2MI;0%y(Md=`QVQJ#fBKj&v5 zu%q{y&cAt;XCctf`B@0et=wN(Mnk|-cG+f>dhIlGGh0rRxWMX=tpGb}t5mO}JM2;Z z%TIZ`qvKWmmZR>DT878($f+-Z5%TMZZ{PEX5r+^Am?i8TE^Ybcq z3fSNId6suDcjqUi&~qzSu;@GdoR_)Fr##iF+|jztvUJKbZQ+{}u-~$3u-xPA!H-)P zI8E@tn9znnBN~M42pZ5JWE9S#OT5FJtrW_y2^ujbvz+~f!v?dLr)Kfe*~{s(I%HhH ze|csDvX`TNM?aXod;!Wc8|c40Q^&HGcf|RbI+DE{#d(%z>WKgIGYQC6?(LE?2tqmJ z|%Ltzr4#c$@N#yDW5Km zPQa_5-lMaZHwYNsz}g-#Ag6NZr@ntvo+*6q9;3g-Rqiw&*6CY+ZSRFL{uZ?Sj*K(C znXKohJFQb6;9K4ykMn(secG?Gi9c)oDw}wH%Dd!Ie;?m+`kx^9AA5g_Z+TYz@GV7e zMgMo$Ve{D!Df%ARNv@T^zvfz=d4K*7lX4^fvdK?g^3O|u{NJ_g6$XR>VL%uta||@W zFrlL!D`2=_)nN`lmp75W!hkR!3AWd(%ywLJ5BTdv3-<)dghad)QoC#)UrN?8A^Cpm6Tz?Lm*a+ERK zIk>jvne~!&R>&vrv#0*os|V_hy2wc`wGTD;>HP{EvcDyz9ID0!1#5eS|n|n{8e)7%sI)MgZC!K&q+&;pRH07 z!hH~-wr*Xc5;sj%ODDEbvwK!j@cAmNdv?t3VJQM-$LI94wgK~D`~Bzlww_zi&$@d3 z&-||inZIe@TJAYLtN@gWE@aYDY&rON_qNpK8QjLN8QM50dRk}L_2!iLxsdaGAVR?> zFii(eLjGRF&pV0zr?#XlO~Qb;U-%zwENl9= zmJ9m>7Rfjw-{ObA58%OzhFukyuwiVc^(Jku}X@Wp~45% zRUxqZ**yb_L+<|SpzNs!S#$xmzXAQj%gY8^XY*cW{eT#ur&lMqEkFMZB*jkCvX5Ev zLzATFsjE`r(YGf?(6@fuy)ODq`qW+U~>P?-}bk5tc*jM>{S8s0;K9 z^bgu6K;|jXgV&Z1v(DljQwI=BX2df7S*VzMVdq-*acf3AoD>^$%p1>Xd(Yah+j@`; z-7uhTq$2*?k`$qKZeOcbO?yzyMGPM?AW(((cWKP;=Th_fy42j>E;XlDMvR|TFN!9A zr|y{8(&)iv=)t3u?4+-BY#=eBL(RACjMp{+-~TqoaIpD>wD;pcW-O09om->S-kmGefjw*0!M$tL(ZeZf+oqq?;=z`R8WE_XhI_JM$uN&177z8P$iW`9 z@FzF)1iH}&v0+cI{Tp9V1Yclu!Ndw$4-g-OVw~8Jx6Zf#I#J<$!IoXII=;u4ZUE-; z=;PD$xDLLYv3mw!JkpE|OC<4L4xhxV>XUoo~G)4CzcC3QDJ?-`nhlE zdryp>-Xtk@*6_qlQ@SN?7}ptdb>AcQ`!-?2nC}xePV9_%n;u(}7xdk~Giv7k9SdV2 z=fs1%m;Zin_X>4r-&%EO?^^Zy(XA?B$%pBS23l!R!z<-2d)p8A>V@$cOQ0Kzhj`S2 zfo|;^=nJR|MT{LX^Z;_7jd8&h)C0r?y6hIqX`6P5=(B7?cV^@r+d1~v2) z1;h{30j&##ix*6ss37-U7%%=G?+aW6U=G1AAI|$O5BY`meMSznE`c3{&F<-n#uzPj zPEXIaIlVmlfP=^%UN|t|VAP1p$ADc+hF6YRGOW_ZrNb+&iW(j;3Vr<7kprxDv);FA zO?l9*?Puk%i!Xo=4@J3(9OOE+V1T6-{Aj7g+SZ{HIoN!D_fr>mEc=N0h$T(`P{=re zz91d_K^o$KHz?Pc-&+BT2bO;w2=3zT^C0`7Hg{(HY~m*Ecj<$3mW|WKEANJ#Pg^#k z(gRV$1H&Q*S!&&+`lnK({-+YdA4V*A7JRwO>lccs18k4+36by#9A7|&h4lw|oWS!j zhuD=m5Cm@U{Hfgf0oJkz5dFx>YjfzR;{kDp?)6O zMj;#d8so_+E@_2AvtsxF;gRbe6H)aJPlsf7bEFHnTBL0&q*a-#>FZ=yU4 z=nuR)(90S`9hmW5`5GGzdBfYUK@S!=ZJ!)5uFem?xgA5ZT#;E^1*H$GZ zty6y;T%wkYI9DwhYL5*HVc)r3*Iya+V9{Xjc%gtgz&T|4f$ymUIJaEP4`9wi-v{Qu zZnbS+&xzT3pkwMn_`in0yq{}Jj9Lp zfYU2`r%v(S7$b7bm`@#`ZE$RO0&()~nm5~im@jKT6US_sX$2zgi?`*^cwhICk?@nn zD}VOGoCD-D0HkM7_P%<+=MDMv1)3l5l@c|`x{NYE)8oQ+58dJ_3`Tx+f%g6L`?zl6 zy;HX-d)h*A^!rYZOKYBUQ1k(u)8M=TpEvRTLv0%{w&0VVWI3K!PNw|3zImzV?dO}h z9&g*KP;$sgcC5Q3f9U(Q@5g%Ccd=Gd0VkPa{Vn{oOD!AWR!fKH6Fe5s=h%5}=llG; zuH&q;gz9D6myL8|t^&`ZF=s%VuzB&9YOWUcwO*r)aO@e02cbJ2;BufIPy@IKI2Wi2 zT;K=Xe_s2xx19Ux)7MmG+h=wS96hUxM+J59oDAxU6qrA-64vPeKV?S*_Aej?dsA-D zA6Qw<^TB+atB>VzHn{DtPTzgb^CD1>vwr4zyt56y=lk1s*2(smw)nO?d=TzsKqVF2 z-OB~}mx-M!X!gWe&$YYhJa9|ynHq50?8{9iJmx}xGNdf8 z1Oh3a+sz?V9WK8RX6OQ;)Pzk3QH^J<_J7dJHMIpKsq>z4%0P_1`DGcmermaqi=7 zTV@=~$=XIWB{g0!5)J?pAJg>=XvrZoS4C>^$ zti#mDlx@)0LHXmj_M@B=s=c=BQIvCllpGPDuE zx#>K=-GjFrd$>*OzhF}toRU`?X5T4nlY7d9vT(}f2)20~%R+#Ufm?toxyjzQ49Bzo zsSk7lSU>AzJ4_px{spW6hC%M1KmKs5SH^U%wrhOXOV!9uSETp(q_+C%jb`fQXIiPY z58SHm`B!Vz_WoPJ)vbE_-n(1lzFVnp-n>bT`t~|Cd*Ib-+32Qf!*7jM?8HVYesUuf zH>r_|p3qpW`L&5!JOb-H`!`dgzq?NT@cxbJt>-C+TNLYg;GWjt_*RW}58Vpc+^Y9K zg7m%rwopHPeLc=a{iB+yji`S!>Sf&<$2V5Xam~;luU11kT&F&IskyG>UbL0xvMyaO z(l+h$)F(4m{vefQiB4qAhYTNLDt?SH5T@4xNTOHbdZ{`b_4>)YPnx@Y^gtth+y0?z?2 z0iObcfF-~mfKwJmHpj5P2hbEK@Y)le1F#>^@dxz+`C7mRz+5i|`8NQ)9ykm3j#J3f)!@ z-ghf><7PFfM>Dl)d?U3tw7xoqHn1%`mu)-3x*hFiU8a8Cn`w8_)GO84&eh<1Zq#z8 zU+ML69X0pI^G?P5=2ly$TXV?!!*^$YMuhv)TqC^;XI57S<}}puj zZ>R=*aXsw0vHE*aU6j?=Ta#7> z+A)pQpZ5N}IL@5ouYPB}ye?(-=Yo1FWuGzBpWIOE#BRu@F!l2sp38QoKwnq>(pde4y3Mr; zQ>XL(c>dndM(|%X)sC3~3jEPtG{9M!&EHz2dK`c>L*7OVeQDk>$LBXaQ+4ij+?&6)vOM#cEV@ab0)1#ey-+Dr&-SMS_fl5ivqWvX zIP?K+bOG!G8UYQ7?XxbsVQ0`4H}4F({3aVW?+vMa)#0#4^$vwLtbaHRuv2amM^Lse zq;^vrfAh9!)f#P`a#_7?Q>)eAHlOUCT zpuw)-D{k5`vwAb0XX<2qzIAS&UcCvgzdxjIW8GFq-K@`Chh=+$Yc}Jt_WharbfzuQ zzJKivt@H5iklLRj9kVyIZVYsVzQFcn4d@?ELI={leSypL0eW5-Tm}H-mv93ffcuDI z@9&F4AAIZBIK}HHR!y|(ubpJI0q(YOk0bvt|J&#(AvASr_X!`ct=G*_qjrVwT=#mEiUNP2czG1Q}3Fj@h!Fo-dyw0+XlU<4PhgsDR zN7nro=fwj{f#m?}WnE0UZygZaxxMAWdGmqQJRe})tkdL|A|DHMqI}|}RjC9!$Ngpl zWL?7pyAObz1JLJZlM~8Y?g_0k1wH_M0%F1`IwsU(C~&z3hyYdrD}hL0GSCHR59oIc zjE-QtRQdz(r0*MPU*Kcl9pEeAL*P9-{S4^`>nB+QRs5Z2Lk?qr4}cGW_w4jjd*25K z7S-zZdsKtxmyfk>+c?Gb6V83##-~U>a%_Ky^3e-NTD1=^uJ;wrCq4l_0^YarHPUx& z42_*0coxq81$fJ@hwc0vcpG4QS$_wD+#@Z}8V-z&+=y8M4$ZsHjWM$J9lL_AxE-;= zCin!EIJpx00_uR#1IC0-nEk(F|4iU6TL*L>!F*}@fgdCP{>Xm=P)6U`>6b{q0H_aN zLH>hO;=igu7UO|00LuJxfO6yZmrNo5{*eC*kbi5)cO=fGd`w*?rLKL6^05m>TD2g5 z%DJ;$2kT^A)P?VW&j9Wl89mig8Rv}yC}&gG*T}Qoybjy?EzprXBQ22powC^-Qp*F| z*XO59t6CjCVE3NT+L#aWPzN}cW<21FllDI9gwX}+f&OL*(rkXldCk+E2R}~vlRL_# zlbw=J+CFvQ3oZYI|D*l?X3OMbq(%pfj6Q?>``YrS{SU{vMmDU=)JePj4CP}Wf7(Cg zPgy(lihiIIKs_Lg{Kw+l5A3>FH}!z+W}a>BNRUgUg_b|hKN8Vg`vCL_%$ajEjs8urHvD z_53PQr|eDsZ+k!IkoEWNkm_srZ^7x=>yUpxmzJ|HxnMfV-XZ**$)5OoFKQ(>oXXyV2tN4Gi|7ZNi z{+RM-n$hd;WX#tG? z#{i7=DSOsSz18(0WqiT-r4#u?TB!d2;M^u&-yd3MTn_Tr`hdCAH)4LX{)Kp8BIKD2 z7&|vIXLYTTG59CzqQjAO=r?wt4&ORsu%G)9 zQ8v?Vb12#q1F&7rb~4X>oOAftKDNN}-w{+z`{jw9Ef;+MrmVi-%nj=21`4s@e|epQ zbFbpuUnPKZtp5NW1RetJ1UUDa@qC`gm$o1Hx)5jx)boS7*w+BKnC(v6{>a%X<-V%G z`Hqx%jr7#}t18ve-Jpu)n%K*A{**qJFREgn#`vDDb6KZ4@p=Hxzd)tlZ|Y(FRqgGn zD*4_E)Sf5w_ngp zUSsR@3;cdB!Fhe?0CnVy`+R4c3YySKriH9W_yt@ z(YOhHzLx*4keUrJ1~76rDRn^SF;8@C=d9{=*rr`US8{&U`5ZvY+LpO4<8h_+2Rm{B z?p$E{3HpfzzyyH(@I+t|Fxk#egr5&zGr@Z9ujNf!!EZ;R-<}9e044*I?Cr5g$Dw?M z+R)<4brbcR(`?i+-qd5~SvU8we*N49X>RQ_Wrfxe%<*XX?+LBd0I|N2yGbd3-S@*c zL{Of`>^XwcD%;{{7dY2^BE1wC4RB8P1HjA?GhXEO2ata^*#Gm8f9nGaYYl{)&0H>H zMa~(U?GI2sO0B!GHso*S8BJ`+`U%E%?*g2cW-M7~x#pJ#J{#e&=m+XS#wPC1CV%+; z|M57C6$%_<@Vv70GrS|c1mO6K@}jI5Z*%-*(k~$Y{*eC*w){sx-W`t)%7*D%((C(`1n0O{lA_Af?jM$o>E!I zBRlQm%aS|`+E&9mQrbWLKF3~Wj7ZsM%jX&r!^m)i0-egJ7buA#!GvS9lmwaauo$9u@n58P`$ zkX8u!@0;CF`*isBUbg(XF60FBka-VshGzj4LiW5@ANkY&(>`gFw0qjDNk51E_w(`p zoFAg^H~oN_+xy(d{=fIBi~i-b`+wdq>|4tg^T4;$-XZtDwd_OdO#Sss%k|fy+e*A2 zVmnI94{~tY|IgT_Z|0mEK{-(un0^fT_jJU6gCTd$;}Pr!7y~lT^#dP4{-f>qk1+(F z+tc^6Zi4Y7pGh-iEXcX!)5iOW$bafo4g4!+}s>whuzEy%gjB ze;!|PRXxaf5fBE<_5t_J!JO`FtSLDcb2=~JJgx}~23T*HBjrBMIrJqDNKdQn;aQ)) z9{PJX`i0Ya?K3TRxxQs#4CvzeK+Fe-?7jBKIX2E05*hPt&TZJg>(6CPT=4I1*tvnr zk>@^c>*pd!-vKrN3xUNBaE&SVtp@b-C!`{Wa$=Z}r5AVv!FmE* z7r=eLmXq6-+prmcOpW}1K)wUOu_EUN&0I0}ajuBqT%y>1Ino};)5xFp{w=_H0*(`z zQV%)-)CFP`Ao8ym@;7rtBL8wEZ{J&iJ?CWqB7a}9%Hg(;{2AwQ9Vlh*`~C;x0`dRl zOa~xSWB-irDR=hyjP>|l8P^F=A2>E7h62+6mm?j3JdON0&YKTR1*Y3zeg-fFm<3Gb zts@m3C?`4qS?W1kToVm?S#|i0r+v;ErV0HM_HrvO&+Zh$4dbd5M+}^g#wo(i95gqIu9bMiU z?y$ElOMkU1w$k?`T(QY2j^490OZ8sO?9rOX2+jsbE*Y>-u+0$(z z>6x|bT}oNk>kR+64x@>Yna6t+&N$@UPC*Ts+uj?fS*o z$6eNc&T>KEsooP#rgeDtP#VgPdu4d?>kjW8IMCq^+>4fB#`bY9#g1*=!i;USgcq>4 zJMd2QHtN^)rB>I+^ZMty(74pvEYAF8Yrm9c%;-bA^s?}*v9uXi$Y`&_Rq%#Lj7DWCG=KHDeRUdQimw%741=(cZtzSm33 zXuIPgk}Y`#n!wKQ1UdpPs}BC+cc=asR>15Y zR=}K|mS#__I{_;~?awyeo^zf+>V+f&OWeb<$1@CCR7=GUC#G8 z<2%buKIZi*J^$M`FP>83=Xkbl^WJyNq7I&<=*ib7#m;;ud2{gSq}W-DljFjowOeuy*7{xZjV!#U|)MI`*rSg!w8paiQ9hyhN=EdQr{p5vW4DS}Lr&e}Ip} zKsfICW7tANbFU#?wN8|(Zx$s*Pjl}&{Fl~sjJ0mtwHQ zihv;9LY1P%20P#X%)JW;SfkIp_p!g<|DM^I*_m_Bl-)ZsXGYe|v!KawHeBwsBhGEI zQUimioFQ{{R(jOU%yW^qKp}f%b$;G4>4^6yEYx0Lk?XZs_|GB>Tc?&0vM?ESUgr1F1?fZ9LqL?H zU_^R=4xUNB@Voa{T>kVE+XtRI<}xw)pw-Bfgr#FMPwyO6oFDz|)v|MQuT|!5xLTQ; z@P|k5>v66EbRcrMNq&y4N_I$tL{QR4%mxM5MF`59bV21_2XdVtzcSQ?k)#8}Q=!jn z@V=q2r*Q?LAEI>8Ke>e13;BU|=*L$J+3~JIPQoT3HEyHu$H5IkV63ySF=3PH3;3@& zx~nxy_P4A~+NKK~h-&#|J=CG357JHCCIyRHUt1^Y=zwG=J@9@4JS-tz@QzT%I6ZwG zAV1#bp!@gLJc&}c1J!3Ol%*ExH&N{x#G{@c+o@gcw__rkG5s4B?>UksZ4p?)7R0S1 zcYU{9FXA`fQx7CR^_dJjIm$X9`ZTYd^VB*~fs`aC3AMu-&EW%N&-zKO(xUJEFJ;{xC)xbpI8?K-UAKvAlHK_q=RjX3{)S#|AzSQPhM$Mj=Hi@{D-|FG<|K1qhJ?- z_ZPlR@BF?+!G7PWMv}XN9p2VBe&2#Nk8(fs5b-G;IZaM+&(j7yvK{GzqJLqL!~+0c zr0}(Z1bXYBlQ59viE?}DMn?W%qze4`EvLKwyO*E~b_ zgBjRe-&c`K?qP|M0!n|1tWLLB^v$d8G?@ zHvu{UZAcD)e+qS?5|&B<3>d3w}cD9B-jG>wv)W{f(;s*ws^7@#BlqzSVuCf?#v0BGOE{8f_|F(}3LMSBS7P z`?^VP)_tY1BL_*lSH2+S`dLWjb>dT8y3g~s;OUJUHAr$<+)p~aw>wX}Zd^*AryXU6 zvx8099(xmJHO)eba&5s<_9{wqeT0t0Zzj&u0{;XvU^{Re(5Eoa2C~ck2;==Is3B1E zjdkWXEyHvBg=^^^A}b2%3?9tb)#xtlYE&0i8g9x8LvWsLJrv=-B+QIeM47T{h@0bQ z&gm7zr?9&5>HbQ%8M_{%jz?)%;yHT0eq8yv(r~ltt5GI*E(hvWp=|DCcnU1tLs&<$ z_&c`N-n+6rmlJJRN%Cl9B=>#)Lm*u4Kb1DqyKx<4c#3V1c0TA$oZEY&FvR?JVX)bs zMIq)_ibKt>lxTqSJ+Bvsn3V%pz&D{3{9Z*pxLOu&URo!9InIOqlj3ROmQnnIV6*Gc zLj}bnsM9s8KwO*|Y>KmEImj)MUJ!>Bg<7y|@0L|*`$>MW1bqI3WWDGwh_>HbDBYJC zYHwZMNil0e@B7M4&&U_)Kz0+&@KlOOw?l@9*b(Q~nG^*Z8|C`9eIeafe`JQQ{xD#8 zx=-_wxdCnal|&i8TO8GSWN~EYVW6W+B8^|q^fefi=G}a>Chmv~AN>ysLOb>?iRwHY zaR|fU9#s@(^kSN~{yT`r(@pnnJ}T3<<&dJ_&YcT_I&}d|h~iAmo<$)h?*i7)#ZjE8 zDCPPZ+(8{B9n2!0!LJTc`NF*dxJxcM>`~|`FZH(u=n!BxumNz!*^@g`eYM9{B$)n; z^C&lA58{o$j`D;q^G5PztXuc59)Wc%nd=%5TbI0soXfy00sFqqN+eQX`jvnYQ>hHrCz)*jC1 zeH&->tpMn}-Z`f{g=OdbbiRcAi*Ozel(LH=|Z(uT0R$g!G*vM%JQ&oipxQr6m*^{&QVq1Om!vNo2od> zoVU@!kS?#_zB=oG9_c^=ztkTPWcPl!n``v&*!2K>@jeptp8(0v&lraMOVj*xsQl?X zYC3;{U=10kBWw!dk^CeZ$-e~hk3krnOGNU|gZ#FTe;mT*o;e`W`NgDH(gUU6D8B{4 zow>$4*aTEGVNVHxfho%3%t80UMg)WoB>>wq_iKhTGI zSJldI>Y$PT22s2h7En5<3tT_srVBcF?EX)kLhUb=2cXn|=L2^Kfa)x@-zBKW%kf_L z0p&amZGz68Sc1OcC+K16`GDr*(Jt%}2lYZAP@VsTasbr^&u!0@6o!~^I<@s9UC&-Zd*$2KVU%A!!y zZS;I;c=v~DKNR;#T;Nxhza`w805`xD*oFS=2lVq){<{HJzzu!ChIIdC@1y;%M_XS7 z*aI$z`!?|d`cU#yT$DG@8@hNA-v<2sPX5p8g3P)=bD$ZZk2CC5*OR&_(f*r5_GW-S z3lN! zUZC$F-v5C{~zdY0H50d{a-GBxjnRooI?Q8 z17RreyxdkEy=FB%@!zBYlr{B-crV_CXWoDRH|em6*Cq`#X`o31O&Vy@K$8ZVH1IeY zc=|gL{M8()G~0>fXf|H4Rc`@9+pE|hZQAI>nDW3bAGGmFIsRVjsBd}19MTHzWg1KO zHk!R&2Ftsg7~kUl#)H*+z#t)hPYKcmDz%DhWfN<|q-=@r2m+eTVhuF@nuzg;dpjkY z*@OO>uBJD#ve^l`*U<7fW8BrbBxCiBlwFS+tM8xcFYOc2510Vd>=R**o2IKt15Fxe z(m<02nl#X)fhG+!Y2b-yAmii?G5z>v@p#}k5%v>*`|(cL@Ns+2-0Z`uOGSAN`1Q0F zM6$!c+IViu`FF6r(thC^1u%p}q8^dF{IZ0WmM>ZD5Y&N+Zx?0G9UBDl(%kT%* zVm^(BkJ`;;r$_L)>+{K;WFzJQS#FVSU{-o0%gsK*PDU+cp=-5SFzn)B z4_MdENp_R%-(lFd%z-R?Eo(5^3CU4cuBYLSbMMXbIDgj1ChJ@{c#dFM=Llg{`FY20 zo;kR<3if=};p-Gq_|Ig|NOp}JRVWLx#~>Rel7rip#pPvLRfV}Buup2i;>aEYwn~Y0ZSkB$*e8N-wOu0d zF58@OIly-*%40m~oNR5LvnZ{1&F8eXa61uj=j*j@Ja^ZGkkYihdtDyDU0zrII{S@?~ z=G{T)jeKs9jc@Em5jIf@*ayMp3wA(puHucPUxbw4Z9{HtSUs5?-DAZ46JZZjWAg+% zBH8{c0XBhTA6Z8Z;-A|*L52v}*zt8Pu*G@gx>RRFy-k!{`l) zn}^(0u;Y@URh_U!2R1kY^K~1_+&3>{$2~20J!)XjMD|dCY_~+VjgheDqjU)*1KBu2 zu8`U=$>p#$Tp`l z(BjKcmYt0HjOpM!r`?3T);eU{Q%!b1kuEwc0Jc8dK8S3KG;V(f9TvP^1-m017UiPN zg6hPPZHXc@c0^=PM0QE!FWV=1*lDvp)>_PaxmK0Gy^6Wad2VcL)34fnG3ur6;D>BQ zh%d4g>Y%xASyvAD=>e|+UI2rw(UJH5^``amndbh#SQ)~ur+3waPX|a}jvgR6E_gxm zTH8|!cJ3|(0sGhVlr}7SL7FwmQi2^2>|q8-tLF5TJP{{kLwCv7p$F_<`be-3l0JEV zpfm+^&Dt z0Zs#>fVTDc=j{dlB;WMqKXlcn@H}5(Ilw9+JKs6GuRA-wyBlm(Okv|fAX^tR^*!uv zVDG_v9D1`v_a59&9gpIe!hXe+rF(T{dsg*fSw0rrhK2mEY0@C~pRuOwoTo`u#Kx94 zv%Ca$(O+$^aM)dsw?o)3jR9}35nrIZZy~pQk)P~;a<_^q`ziosK1F~VV+!ov|(NO8)-O|RTMXj)tz zVP5z78)WYS+&~$M{Ol7CZi{f4==R?M5+OVz7?~J6sO#WKj5czCBoOi286<`5WW&=H~i~POX-ZGe6##osjxPY>^2D8HV5l9|3&NdF9!TWUZYQPQ~^XoWquz<2IRx#1Z@Ls z1f0O98`k8{yn4uD3D%&lz*_uuC9y`|VNIshNe^))*5<8E_Et^Rgs-ej;NerjKi1-k z&PZbm@D1?+TBHBc@NV3VHMc>u?wDlA^=-Kodh`LWXMrQYCZGj93;Mt^pm=mqK0ptk z5AZ^=x56khQ2#q%{`o-t#h^nkM6{ihAO75e3*l`SgT8~c#1rAa09bN9u=x)Z;%*&A;h!=w1spAN(28H=ueH zz1Ni!x}q}R-_xMa0am~)$g()qSBu)i9Nr${O@2aqo&x{5Xb)uDM7AqqC@oMGo}qLa zlznMfSDrR>9YFk}PkNi4fejD$C-+~yC^N3@3} z)E-bi>!JTIC~vqM)jx&Fe4-tX1i1Y<_XByt zw}=9Ie`0l}K9BOfrVOYqE6#c=Md)8a>-H!dl-6-B0cHc%0Jm2HeIWmG`p<%bKdygJ z1HcJz0JZ>=G;;l|`ImJDeyJ>a1O0)Q0B-9H+MwT&pJ}2`Rs+yAm4S>Vy+2v?Hl^02 zfhG++V-2VmA*l~i!eLUHHCg`&N{h9S&rMq01zJw4xk9aa{ni#gG;W)w55D4?h zVpe)M_GeCP@W0V}sUXt)9_DFiPv$57{|ED^%AD+^nsg_lE4zNRpm|xrQG?JGf_8m* zpXA=Rzwc9zMW-EJrKjy+_L4IzFO*}y# z%DLZdcC_f?wKi@HWl;VSmBwBp3A_YjIBZE(^%t7V}ZYpAH*n^3S)28tt~y zJLbI#b9YX}a1qV}OsTlYZcChy1r-KfGTjkL@?c-=THIPP^{RZe~B4 zi^G1D7A$GI1q*lS&eqNA%NBgwp9MOp{S)20GS?-2*qm=(Wcyb4WN}-%vZKFRutVFr zvW<&+-dZ-pggGrVc7Nl|w_hDMYQSvF-6kTx;Uhn)-7k)Km^8+c&ruf!3a#^f6?vFj zyICG#QF1-TtmtZ#SxIH2X))%@ipwI*N(zE4ivPr%RAr=D3Fgy^%Hel8i2D^`?yVei zTGudl_Gd+uS=GgWmMqUlI3*o@o|o;wmxt^2=|B9gb^(6G&qoYYy#B^J;_OL1R7u

{U$3Iv~D8^4{;l+zqdvpkHAO zb~)yJ=3?A$KF0X0b3$7EfcYM4jJqxq82vEP&pFsC5; z`N{BGfidlG!6)rmWsPU}yg*)Hd)_}|ta<>=fnw}B1^G`w-mbuWfbR=-FCNIBk01I7 zBGSe_A>smz@h`~^HuxH2o>tJuY|JH5o;3Hu*P6hs1#p=$&dmEAP|aRXrduh^M;ZT= z(*nhqxM)Lj63BlxbiNe%FG}&zo&&uv1Akvr+MqS{3i)z3s0GcNV9zO2ij(c#OdGQE zx=8mAb0L2n;6s%8aOD3g$FmmmIAbZD%pi4n*G&`iA4=?V(YmhR zUv*<1hU*x5p#972F}^#I(yT}{yx0=nn=!yYM< z0PLIB`8#?JWBiTgfjZcz|Gn~hkiR@u3fdm%2Jktv2R-+{7EXEqXdLs2>i2(5Pk(25 zXmsJBE|{`0jFdg}x9CI%Eq%Yd_y zi^eHvSi%IT8K-!p-VqNzqYLMhh-c!KiCE+LNcBH6HAu`kAA6US^xp;Fth6TjzG>kb z8)rJSMH^sktgfsAd^g{7?W>dXTK?SSvP%8@OPTh$=cCRi?C4XBZ{`aj ztHng{uqbSuq8+8>I=`)Q=+Ge|zDYSh@y;Tr_9{Vncz192)R?a>oeCa#G0v?S*1KWN zqKhceH#7d17RNXU?@}5;D{J3}^Cop|rZk$|ddK3HU;C|AvhejnRfv;;z}pxc5L4+pN|+M{xqdmi{6&+4G48^mS=E_+ zJqlAjOv{6v&C8SbnwKBmVO|#I()~)Nw^{Mgon}(lhBjvhy!@(pmmZ^g5ijz^*jr*j zoX*=hK{~5%AL%;(a-`nE%MmRX-a2Z!FfUZK>QY3Dmlzw_tTeLSx;$U4GZ+1Y@8xG_ zPwvUTDGu!x+Me)K{xdVE<-*J$gGE`v20vv4=+8K^M|ky&moDEou{gBrt_uOr?>PCZ z@Mb}Nrs+W4r)0qRRW|&7GUmVlv%S#dMr=2;Vn88wB6Dz`d%HIt`@$JCN0`#sr zLEj~S;!XMPEdI^h2te4?+VHw@{||kfaxDM= diff --git a/hack/make/.resources-windows/docker.png b/hack/make/.resources-windows/docker.png deleted file mode 100644 index 88df0b66dfcf0b298de8cd296b793b9220c4a914..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 658195 zcmeI52VfLM-^S;9dgxUo6akTtgq{#WZz3H8tb`;KDWNHduM$KQ)K9@e6$LBOqzFor zPy}pLL3-~cKzhILKbJeW9LeSS?%qA4!|l#a{mo{dow~Dn_2}&HQ^kid=HInThdzwS zW^?PMCYOZm&Hknz@3CD5O<>HoEVnYY@QX@}`ScvozJ0G=!{aB$PZ%CQHmGa+_CaIE z#}6GbDvq(qCl~h}*01l43av8Ernc)gGvTvt@qNmB1@&q7K|;AXb?VpfF7@=x8cR=< z@4cgR$BxRX`?(x#+r|;dp=fdTNZKnCu9?MF3t&goSA+MH| z`BcqB&2LOL`G~IHd8)o_dRRVNtUp9 zex1H-iINTeD`vSW5tgtxbHN)j_WWlx%FC9_VL@f%W_Do1Te6i0o_)U~ z8yL#U4eho&oE;BlpqF@?FQg{8(u3nKPbb$~V~HeYI*$X74OuQdM06R8F3A~)3z?>Oywui$Z-0RtL7UT=Tbicghm7A=on*rwsm*B4Z7G_6VC z`4p;$C3DJCX;bnh?+;kLe}A)&HU_*Ld~>^6TXV>eOZit0tQgydu?rL5I+WiaWPO!{ zmgMrFMfnVXSz{s#;jVywfGY4z8%t(v-{QiDYGV>?u*clhQGZuU;Q+PinQE7i^?HsP`HgP&aacDs7- z{fq zYklNZalwqt8auiu%Y0Bf=#QD@de`hWr)ih4KJWJn>ffbWa;x$3@b_Nmw0Ta;eXP;j zx$kc69QN6?b-i2t>$jo(re)2mywmAx--EFe!xnyAVacL{ceJBuN_=5Jn-bl zQSJAJCSI?kX}o(>Xv00b+O2KT!28pfzt+}V>9=A}l{L?~XxyvY^LmHz|G6yScaGo!&d^*8Fr?x1Z~5E}gZz(vU`B?+xtI zpw_W6AD0a&^~TKhyPMZ8Gpl{0x|?RpyRQAV-{CTcI~^X=r`C-IKQ3z0rAn<&|2P>t za-Ampvj#y=?VE9V@#S5Y8(*$^xkmEyK8uI68TV~w-;4G7{Q1h$m9Lb#(&UOdZhoV- z{l8h*=ghLD{Yp3e__=ngmTv6xQ}6edm2L9s!tj5lSLoX9yH#I*ex%Cqio-KT{B(Wd z=F(?Af4s-Mg98r#b))n3dDj)Gll=VOEkAwl%whE^FRG-eG_3NZNG$LE@0Xul?%O}5|6lz+{_&;0 zZ?5jtzj5D7KYIT=cSZl@i+kPbTea`7A6qVaZ~3rg@%>tV`_`bmS?WQ%p4rv2{kOfB ztbO^?);&SKF_qU(Zct;S|B82Coz`@Bowfg*_}^>wx6IG0w&jCX%YR%N-oEkEjR&mn zlk(Zn)|yN6$7Oyov4{7ydOyrddF%bNAEk}?L6$`{i?;+`kv@}<_Di2Ds*oZk+!ir0rvVNSgoUZ!aZ$`Aqx|XJ6lTE&H)IL-P9Njn4idt+U@i^?JYB>QjFED@Fd^ zbXzEnOg%A2}VE^Y{T;FNc;#E%^+p=oQ52b(D)Z^10E51B7 zY}cCm8~@n&<1dxhTsf{jwR+>|jc-N|8&YxDFTbzemGtShPv6}9=Ktlj()ho3B49Im;pg$?BA$&nyKw&(Nv+@fJ$3GlkYNK4lsa&{@uiJr|9W!wh{b&u9qBx*;;>B< zrl-vOU{<%c|7tids^Oxld(Q0JcKPXvY2#kJFy(OC;zjEh?Yy(>)K5vxl13!GclxWp zn|$@;mxCHlIJj$H`}4i89BS?R)#srj&#io`XZxPhhHbc3DQ~p>m*@qt{#4j~&!RUyc4?*IRydXS_FKz`LzK8as1fl|y0knoc@&y8VQ69kM&j?QnL{ zu&|!tKLtPCw0_j`hOb1PY`(e4=GJ=}4juIO(SeHxb{*C2vG9iVS2r6o>bG;bcT(QF zxcH*)r8g?h583zP*w>ft-pQFM99A>rP_sibkGFrJ{^LE8yVUPK<&)cUwl;me_V10qYj9=LJ9U4Yz4aOYXU;5& z__D&n*MdejY8A09Z2Bj2W{+#Pyhis=Hx9cw?CQ|h{#qF~V_p95V?OvL7r3 zJ3aYI)%W86+`i!SXFvbAV)v%l149oFy>azy@R!dA%w0b3>b!t4{wGfU?0@drpYNol zcRUp`@1J_7woKpHe$zMGBY)i*_ro9i|7qE}-?#lz`(^iQmUQ^chUot?#~ezk`o`Au zCby5CdVXt0Ueq7Io$4>^-~H&&qj5Q1bJl0HUOj91jm)_@qhDOrWAwN?dliQjd74@Q z8~@w%@6l?9TlMSn@v*0Fbq?5D{kQzZcP9n=H}miRe_!mmSZi#D*Dv45@IIIExPQdS z*FJn>aSHBRxRX}b_R>Q2*gHL_E@r`RS`_H`+7=L$jsfia3chr7xt>xC1 z`(H@@bz<_mQ&-9k_;tX-pCVo!eRJZ?$ox-_^xV7Ui!BRRZ+c<;0}ANvpQ@-1>5An>%e! zv_JFS(bmuBpFV!~^wd`pCoM@ibh>kfddBnv`E$}f4s5Ubc)`biz3cz(tuI?dg@<2k zJwES7?how`#2%>gWySoj^Z#01u|mjOnMZH^xBt-mLrdo+&---y=>L6}cQf%;<)2RN z*}3QO-~O1~IP9g=XLIiCm~?tlwY%S(3+#FMe?e30X@_U6P3wE`hn!C&auIV@AeJ3>p$QY{ZyW zHSccOUNdM!Y^$0BnsyKEKDK?_@DW|6jF0O(rANP*DI;UTV{5jK@`;=rK@p6On;0E5 zdGx3;6Cx(Js;P@Nf{wX!NX;Ofmx&`=)ojZX3>w(IS5W)-@o_;-8#WD&32hV>)I7Xl zSktD>n>T6@)F?EpNl0i|NLb_Gu<(c`O(H@=gY>3mln)(6j*lH0(WgTveR6cxs^;*C z6URn`giM+=so|u?4dcfT3keGk4-W}#6w;_sF!>0c@cNjE(UXJ6OsG{5i6Nd2aT8+3 zj~F{~MEsZ_9&hxJ_*W;ks#%jK^uXxLHG1rWgvLzJvm=TjlcUFmgf$EeDJD?&?&gDy z9$jR}2@^ZMMmf=!tjGi>^m~15Tu7g|3GuIvkBRH}THKh4wTj9_Y|Mjr#=bg!l#Wkq zOi0|QxX~130%bm|$SjOCv_KvNFO-c3my0d^#1TUuR!*U?{7SLG;wFzMCK$g`5KMO~ z6x2{8-G347$Hzraj33`GK7LeG!Cm&S{Y1?tepdz6>mD65VhnFoJsBBd8t7q-Ka4M~ zL-fSBC<@y&IJ8-CXybliP3d-wXiCS;>A)zG0-q*vbf>#NHhN;TC9xFpYZ^~{?1-VS zTNKMfpM~S--aVr0mVQ}ie!KfElllvvZD zEt-T6iES1f8yhn;xJhX2kl^r`A+f>H;mu>CW1EFGj*X5sl%4`$ z(cPwd_VkS#SJ>5XVbloSGh}S^_z7|RPqS4`!{6qEht7j%3J;_E6OV|F;ZL8a7=8oC z#YTn{a($Ss2O;&rMm$=bmLVvJ-za$TX-pa(H^$uUUl5hfSMiZdh#xv}QuO$^w!`QS zvFvZNAl||OO%sR=;m_+)Bb-nb6QW>Vq0zHVrz+d{E=dra>Qu>_2A2#3*|9>oRKM zY!r&?afA+PUwUD@Kt!OQC-kC6w@TSp!|ERrK*HHiy}m;f<<3S8#-m|O)e6CmbK zfy+D}ldHgG0>u0&aGB?0auv8tfS5l8F7td$t^$_{5c8+NWuA}8Rp2rKV*V7k%=0n1 z3S1^Y%%1|6c|Imrfy)Gl`BUIB&&T8{aG3xxe+pdY`IuY3XxXkl0xe8n+K+K;4mw7%W zSAoj}i1}0CGSA24DsY(qF@FkN=J}Xh1uhdH=1+mkJRg&*z-0o&{3&pm=VNjexJ-bU zKQ)WX=izeDxG}T_bP_H0%;}f-4=of8is{m)J7ZIxpj}L+GnS>L-y4j*8p_zs!L-J7 zK4TT*KY980PR#rF?5-W!_M0qYTJj{*@_%7K00ck)1VF$_0&*+;!#fCo00@8p200hJ)YzPDbAOHd&00F^000JNY0w7=$0SJgq*boQ= zKmY_l00M%000ck)1VF$h0uT_Juptl#fB*=900ad000@8p2!Mc11Rx+bVM8Dg009sH z0SE~00T2KI5C8$22tYt=!iGQ~00JNY0uT_~10VnbAOHe35rBZ$gbjf}00ck)1Rx-| z2S5M>KmY`6A^-ug2^#`|00@8p2tYt^4}bs&fB*>CL;wO}6E*|_0T2KI5P*Q-9smIl z009uNi2ww|CTs`<0w4eaAOHcuJpckA00JOj69EW_P1q0!1V8`;KmY=QdjJGL00i8N zfGi<@s_RMw6>%MFqyz#W00M4B00QDx)rrD@00@A9>j*$VT*n$IfdB}AfLjrOfVfq4 zqA(x;0wCZz0uT__u|`TD00JQ3Rs_1jKc$krD`i00_7h0SJg& zRVNAq0w4eat|I^eaUE-<1Ogxc0&YbB0^(NHiNb&Y2!Md=2tYtw#~LYt00@A9TM>YO zxK(wcFdzT|AmBOz5D?d~MoJ(60wCa41Rx-8Rh=jd2!H?xxQ+k>#C5EZ5(t0*2)Gpi z2#8x%Ckg`sAOHfcBLD$$9c!cn0w4eaZbbkB;#Sp(!hiq>fPm`=KtNo_8YzJQ2!McF z5rBZWRdu2;AOHd&;5q^j5ZAFrO2rUJ&`vGp3YKtP-XARywb7vu^AKmY_pPXGcU`YJ*OK>!3mK%4|1 zAmXeSKtP-Xl-6WRCX-p?9ezPT^aQH!Od3L&3~+Hq zzq=4s=9Y`oL~H4(!v2uOs$Ta?f0F3xKrr3V3VF?1y7cmn9c zay(PS2m&Ag0vP%AOHd&U=sldh)viK2n0X?1V8`+f_nf2KmY_lz$O9^5Sy?e5D0((2!H?t1or?4 zfB*=9fK3D-AU0t`AP@in5C8!P2<`z8009sH0hKp+4DAOHdo5ZnVG00JNY z0yYtVfY^i$fj|HRKmY_FAh-uW00ck)1Z*Mz0kH`i0)YSsfB*v)C^@f`AkVRNt9&p7L3pY#Q0yWR+yAlI4FG7s+x{iL8d~Ewbrk{m7Ojn?d#_ zS#Ee7+*~1>OSTMIf3o+<-XrTpwjx<>!Y)LWIaqLre?dSv0@ghLi^?VcV}Sty5Ma!g zAaH9i{B6k~3I@0Gh6DYtcM*QL_!yf&I^h-u1l*1Q1jOyC)L{im&`xz&T%wO?CFQW3 zi*uRe;`ERl2sniR`mvmX6_J7f2!Me23E10@g^_ge*D-Pk0w4eaq9gzT5oNU?Qy>5W zARvAM5D@X#5poCuAOHfQB!H20QC5qSGqv(?BBjvU$*Gt|-<$PoxW?c}b7*_f@lS?x z7ya&_IkasGrM2?#c{(?pa#hHS6Hda21?KWP0W6|)EdjJZT+17&fdB}AfZGs23&d@z z(x!r3pioQ5@|A9r$Z|8S;D_-RLI~MSWcQH`BKtDg8)WB`!W+`uK>-rU8p87xd9%rKGn_16mA91ad9p3Yb|z~`lMdNb{tua1 zWHZSQCRo0$1?2cpW2tYur;|1R!00I&sK!fITAEolmqOvR_#{iXP zB#Fq}qOQZOWC9S7k{Q7V2!Mb@2~fAycomI%n9Dwi$OwptIYPx>gMbvz23J4;1SC#C zqfkt=qzF0@S$BDcC4S-OApr=;Ll!s!0T6H_0yOg~ipD++U-BxkWd3UP6dErXUZO{M zct8LG@&E-6KmY{XiGWfrpJGjU_`o_%^_@#od|O)+&XfH@Ks>qjQFah;V*+Z0!s-Qs ze5$XnQoV@=-wVy!a$`((6%7Jn7d`|A0T2))0cu6Gphbhm-zl~@n<}|nL+eKVvN(VT zdxU^^aOI=uAmFY96f$|7J!SJ&D63Xje`UEnad<*l2#6=tGRg=79!7vxn_JU;m7q0j zg=xykPo?fp&6!@N?l^x)04nm31&%-f1l)uGEdU9!wBu8>P>HQPJeZ<;*yJxb=PA_Y|Bz{>_fxOnl$Ft+^HV>)TMu#z zrDJZnsjc_(c)@9j|MsZ9Gl_>aq!~)*gX!E5-cNtdBPzrHA=8E2g6OvyKOs}w$WMU_ zW6zOWI-OK9IGFi0I)t2Qd9A$&V>jV%Sa~>+O5y4cAM|GZ8m=+WqX&C0I{wLU?xNov z7oy6vDU{aA!{_O|dLcLGpM(*MgU(OJGgGMk%Z(hROzyOoWB$avmzDW=QqO9aq)ynH z=YLUUMHg04RNw{zARt)+^iG`T%*^xGXxg!Hj5E`aO59FUX{iA4(0o2a<#u)5r*)qvSe||dp!^`< zW(2BkxORhPOI2`Yv9j~>QZ99?6yVG>ToE_Auv|eIX@CF-NQOYZR_pW@9u1`XO9qG& zGwSN5yAz9os6hY(K!7n4kSosSz$X;ZdJw}*J7*^++W1}F4=!2|F3H%+!+bjEaF?X@ zFp+yS<7f86Q<`#AFhw@V*v$pU%V-AF@dxRwJj|!H4t#L#nuGJiVu^85#9}O+moj#9 z!SQ@zas5F$D-XAz<1P=*U2|~IcvhTil5|Zf`AmD1Qi^hRYNR2}DUD;!PDjk~LqJ?x zJ6u4fx;9C6fzm4^Aap*MtgEN-9;7rM3J7CdKwQmY2c@}$tg+z&Qr+0W1;=YBjfVol z7#9#%vsg-L@_e|Rp%%yMLy;}{vpK1DY(;Mo(QJ6z|KW_U&tNgCzRobF7 zmY?)y%TGp<9YOYUvRlcXB%4X$lgVBn%j@NUk>&kk7s;lPzq@3Qko}2l0@=P~d6$OZ z2F(z`s{k1Q0T6IBft>vOQ!2TFFZ6Mm(K1H=7h#s4iHQ6w-_B?BO6NTFb zWP6bstiCSAl00w5q50U8JSpSGBBZhJFz zH!G8+=em5%+S+WNrT`194P=df2#7Vb;1>iyKr#fV8|y+wZZ4mxc~9M`Vw{{Bh8UfIxDWFBp#cc6}cf^2#A3M zen9{P+>XGV9@W-UE99ah3P$^+uxsgQe8)ydMCZ&n&Qs)Lotc0kZU~5h1b#sP1l*E9 zR(|dfM?CU*%Y(N-L~i)N%m8YA+;c@fAt0`xj5I(11Oy{Mt&nXrZt;gb8Mu>~A%#J6 zUc{j!9)H?XR%^mSK&+t!zaRhtk|aQ%5$r-!ZfzP+PR`C^_pX$MuXm(jDW>TCNF}U-cfZLOJEEF;`5D+2hBW(}>0YL~{f2K+}b!YujRN^->(phF+Zc&%S z;sRH_SJ3S|}J1o>3m`!r=&geHaZ#6MU3l$gYWb3zM* zQ-A>h5D+1O1g*nIhpFiyr>4jEq#y%iOjfASDb)1fD+Wbo_(#*{k^PX=qNauN+LAd)Z;5J^-MG6@18 zU=0Bhj*2R|d?^V>C%H^kly)%M?_IaxLo3V{yH}_@yUA`Pn@FO;zW^mUBeMoxG4&w1 zLJOARm4`zq?F0JsdDE0n81y2mA}T_%T4bKQDxQ?9MZol5BH*ff6|{D!W+`$>G5<8 z;rR<9?+@mNv*C;k{5U(Ft|3i2e87T-;bD0mcw9S(MH~8kV2H(eI;l?95WXn>hVZ;h z+@G4PH(6ePhAswE{`@-kXDEw-A33owVsVfk=*-H)W9j!}{h1-pw$!S#N?Hob(^|CP<01j#ag`&>8Cz=j-x6Q6ab9oF z(C;C#2gNd`Va@ZO^8JY6H#Lkf1o#8O*zku9x5mcsP%^mjhZ462!yk&KehlJc@MG?` zsPLxo@jn>jKN|jrWa`Ht5{9(6aGClsTr29=G&~mtZViTulxcjX{EAA`Ac}@~%>5P> z-ZV`vaK?uJ{h0bO|VlgZxW3)CHRr-WprUlw(5{^5C8#72$WrWDNG@k zk5bCyok%nuvqm%w@fUoPHq~13xJKcBHpGG7M+u+{>rp0f0Ra#YkN~wZno}#|1zPmi zhFTg=(4e^Sc$n2WGl)l)^_yHRrq&0aC;?Lp0SHJjVBib_AmCgABoq~CyqnLKicl~a zZ)MaW!6-xGVLC8vb4Co}VZ#oIeDIu)kHMHB;JE^_{A6X~(%cN2M=k}bH8p9bNBKuC zzySn6;86lHR+_d9QPZ5La%9WUGJrr5i?U<`6f(KDBZcD8#A&{{{=*(NhzE~uGqpa> z6%{Ys9un|e0b#5yVfgMLijt2s3MKPbna;E=IUIa|00_92fSuyOGyAb?b1co&9M)8n zVDZaJFwe*`2!KEd1RNk9$0&)-B_xB(M8J~;qyp{$5C8%9BH#e=;6P?m_Z1%lfhmE2 zCwF0)t{5%>6)uAS2#A@0jq5y2i*%bDzBP5kdC>$sSwJjWxmq+fxPkx(h>3u1dlFh< z&NoQ3)6ApR2j6oJ#sUJKEFi{vN?Cx4dqJz#LQwDq0xl!KH%8=pTk&5z4c~syP^PYl7`i9;m#;`c=x_l$eMm@!5K-}6{p#oH91VF%Y0;xIK zy5@)3KT6A^<34#4GUrW@qCOZS7(;p%@-M#qNS6PK)`&tt z_O4%bl?ZhwyNbZMsyV_x9cGoW@Ee#`#R<=a324q9WTn=9!erT=H=RFV}aHD5lOG=l2c5_1wkIklzdbyMAP{oUB4wm1m~1 z(rf;YrH6H9X-&E^t;*0yFXYXwPxt~4E*^#@=7HBvv08gpk$BKOlXRJ#iRjPLO7l-M z*lhUH^RqUGc2Gci&hb{p+hoU+HGQdnj@*7HD@r5yhtUaM4I*=tY;jFB-Um2|?Dr%j zyfy0>v;5>Nis&_KBdg{Y!05vu76;?qFus5P!o2@Yq^}ehJs{68%cS0~bf$Ri(aktiDhOxnCo^q+xua3K?Q^W@&WTivP~_Zz(s~n zwOc_j^DOXh-plfnQ^>;%^PV2PTFKj&`C9N{5apdK%=_?m=5_K9#vhi&8Quy>i5kfA z0?QZY#jV^G$+vYaFz=(gtRLf`2L=~h{zjhf6DUb8^7Q-6_n%GlLa~EhD6G4U^Syjo z=2NX$#*;1G`hH2HzZcz1D{Ap;Qt#@7qx6*%#nyK|eXQqpjZk)-1lEcc$(tk|f=u0| z=N@lm^d-xmL7~MK?BNC8trh8vo*2mr2tni@y za{3?Y=eNIKUrP^+%zEK0qhVW?Tc(O6=qQt}G4CUZI`NcI7mU3or*bWpRjV<}su{}i z{L9!IooK?}rGfG!nr+2}qso7~Sw;Q=W`)ouR|wLHNfN!7<#_oOSFY=Hfww-K(KTLN znv9-#{APWL>~ONxi}hIah27-bkDhrqioR@{t0V=4KyuOenyj@DjVbZ&tVFV0gnlNl z=jqpSv;1T?^8B;q0SbKb&k|OkR*23}pS|+dC8oY|lBqBLtNXQY4!?|LS8u>F>a}3m zK@BC_UBUbFxlpJtpCB$r8E=g_%j8t4%`)q@V41ZVF|Ec`Gr%n?4WEkab$UNdn%GAJ z;#Ry+SQh;QU%6$gvg~U0$<||BShT95UvM4-5a9v;W4@hwkEzZdVdc;4XQeM6Cl~H& zqoFUZ->+DM-K$!UCDU&jy=d?*FTEK`;<3nZ;>=%q*70W?f7bLN+m`HuOwh5l<}w9% zk)Cyl)&?xjzoZ0&U_MR-nMQVSaRsu&{S1XoB)f;~-(>$I(8a&%EkD_W{QO1MLc^QW z_gJa>H(15gJ2Xq^0zD*8(Ru~mMd^6M2Til9Jz4Nzp#J~7QWYGZhy!Be-E+!2mvutQ zCkUv>DsR($ZZaV5fTYYvL9Mv#s&!Zv2`}fCSGtmU|4zEfXH==_;ioxwm_*|wlR0L# zkmb3h2WM`%$5>AJ>MXZ>HL{Ph+_H~}_GN@m-Bw=yk7>^yW&y-YPHSPDVe;tjDY-%o zmKjj)TBYNAM#;4KhbW!1^o+1Wr3&Yio^MKWsZX{s*$A?&$krw6Xv1H=-=w@>v^Qc& z2nc~~Km{F5Hkz!fMgemvE#6(pciZXi8S2VY-nmLI8N`gHY74y9 zCzW!4mh2_H=`7t-VS!Zb45j4hUb8ySwRCw3a6J%?Xb=C8YzZ@?i z`xyzzVfwYnh*m&IG+T(z$;4k&I1}#1?-|{FLt{=_dePGI$s=<9lC3wy#d!KL=OO33_%FYI_=g9!B}!p6ltcQzd#MQDkmVDMteRCODghzUXh?;4T5ow1&hq)L@Ko9NKlTO zX33=j649d{UslfthFa@?c^@DQ2!H?xfPnZ3+@dV*Buk5sbfR)-&$^X%O(GC0`cr8pj^Ji*80}w@JU`UAOHep3D9ct{L~C##Dizd+h;Gt!)y)Nb=ww-=9x^T;&Z*NHrkv#n}o;egb)A(BuGG;MO{`Yv{Q)j2BNMmhmX%lHX`D#tCRJl z^gLTYcugynd?Ard2HjXTVM8FP6G+fbHD3fnzg4;bgC52?^Y#6k4G;gq*a^puCjb>u zD7V{+Rhx~$5L?+Hocj^r{Z@I&X-x39E0rpXX}mhRA7Sy82m+!|EU~p(bblx|45HYY z9>RfuSPAfHRt5c5)(TQ%2xIC#JUiw%;>`6p%?IVCO5CgwC+i$Y0EStYht_Y-WXv%2-4+aF>kpORXq|;c37-t^( z`0in&TH(79Hr#g@1F->GkwH}IUy5|M&@mrlAr_K?H>nZevki6AbHykg8ch;YtJ_MA zsk`JCU08;iRHb zy24DBqWHML4FY0-9M|k5y~$YhNiRcC-7o)RqisiFK)@XcXtQaC6(8#mW2=J$=IcFt z{bFgi#FOs8TdHM%fEd@DT5Z?9&wN^=DBskK%ZOtTFh_u|>)>1H@|_UHGQ+B<{QCMW zrL9Vq7Ig`3t|Nd!bYs?E{8f(KyL*#%fwOruT&7krxv!UTtR8pF;XPT}o7MK$Fn_gz zC1>Zc6ZbRONwP5FC7}Co2X94-SvWX8AKK??RI8@qB`KYp**l010Wq!XPj?<=8R_*+ zovk^S(-)^?9{9y6Idv=as$Pl>t5=>?^nP@8{)N_%*Q8)4GX_1h3rZpj}Q=N)Y9zj$t*j|-=3&FX^$)gwLBKJ3Sy1Q7KrA) zJ$u>4pEt1U*RJUhDwRq$>5VC@dQkNTNVi>0W)uIogqFwyBCb8)qM`fFtk}D&3|JJ3 zJQ_3)r*V(J3{Lp9pMbprfOiCUvM6V3+;o4OVx0PDO(87w`+V&(55(h}B}>_t zi@tau9z2RR?b;O*5AN2!N&xFxEs!6=xR8MEQ-SogC%!09^kW?cE^@h+`T4&9@h|`p z&o2a|sJc)nCe!yY?c3mrcMs4UdUrQ!T74f=r(gm9j_rwT*N&Z!6uE4%L&wm0<*Ij0bcldEF0SHJ@y!rQTy?uTv>XJ2X zy1_{|UmMc2ye|u+nO3|hykY&%MdIOITDkNgd4oB6l`-5>y$FI}P; z(07ZB>B9N*?7itT*oJlMb=_N^d_0@+0d{VlEahEf7&zKbz_9V5q{cdoi|yzC6@A!z zvau76#YI3VuIz{?H$$5zvAMf9u+&t$KTgBD4io`2z|5QDZf4483T~m~`^jcYcIo0p z#;w8JCjkkd*;%Ei^#bEq0=kwjpIs&87Z+dOpV=!dhd7oDVskVBw{L+YXs6oAOsN`d z_kak`f_{mwow*TZ@O!jUkQ-mWhjVx>$*8X{ZDC9%CC9$;CQH)t7(X;OTDO#3YJ(l7 z73Ax;m5pp=_uHFkv^aM>0k>BXyO8mwh_}yTyW-FX=C6dh=`W6mOhj>#647QmD*U!IjITSgb|RB}Oh5fw>#SN#DwJ$VMD|0zMzrbSJa-W~y#gphBTw9lLb4sAyfEd0OZ1PF61a_inmHVc@9< z0(^Q^dX|3YLkW(On-$Zw) zSxkxHEK&m45X;hvnUi>(Wo1^h)O$((_^yk5_eFOwK1w~{4Yv~xUUWWR7gMe*xCxx&1p?w6u0_ZC z`R=tWEwx9{7j1KOClTQ-k=VNB+4Hr^GL7QVdCO_k-SySU+wAL8w^=q?9wpx}d~lq% zIOq#cZntw7pV#i|_aTYLt0jYg4`&g8fH;e6QSr{-t6>=#Nh~MZvgt-e#ZiQt+*`wB zUg{#Qh`@=uN)$3hhFq!MDpx39q88X=IoTP1(xa`|Wq`gUBrPlYv)Ta~7C?(VcuRwS z5MTB}(`j9d^BPB;mn_xH3T3auHjrbmASirqGjs63Om|=ru&$P zAHCSSa@jg{CCO#-TxwlhlgpLcWD1>N+$*UxszrMpLy$K3RnL;tmQEjVw1WM!miNO>HVMURWfod?E? zs!WaMI#X-HNIb3=bs28bB!F31w$|H%ePJ}M_h))Ady%F2t6CM)<|}9(tDMg5ezKN0 zE~1;oN&ggdIdOHML}j@$nLJY_m*1n~n@lc0M8|6~UJfgIuUs3-?!EM2hup064f!&y zNC$0fD3l9>=r*h3Gpe!+^$NN$2LdGJ>l-MU1eHX zGPNR(()BYmE^);uj8hN2mmY}C&&f{6%ge2zmGdDI7^?_S>yLL+vRq|-0QpA8LE{$FSruPU_yz2qMrxz1K!DAA(Y&}-SPo$3T@2y-vOEb+A)e9tr9MMcLRX*8N!wEa&6&9uTzBk~)-UI8im zq!4*wkv-c9Y(IbXi5)jHMyX`7j+K0r)hl_cyvup3WZoneCG|;F^3|}&ioR_3jWpXc z;Lag+T~ss$m(Qk>@`pGaQAfVMTbN4Sg$Bz#R6QL5Q95y83`9D0^+-s?x{RQjtGGL2F}6Q)X%iASO@Uwvd}g zpK#HjZ<**s!=2Y{SX#;a6P~_)6R68-wxvPfPj~|Mwm={t4ywc7PMr(P%+vlxkLfVJ zO-DwaR!gg{^K)q%ql{c_9=Y-1(_Ff)n?FU{deH1yS-y-$(&^!@n@LNLBsrgL%d^C% zRe961pC3`HK~9UimEN?vPbFt^H3bONC}rjSePtT8>n1~u+k2jEzwW`U5S?i7mIhh= zp?6(PI_L(Hax@yR8_dhA4Ru)^mvS*YG4Bu%5!E`0h|i6Ty!{ou6?I*eE&d%2UoS7_ z3L&{k-=65S$-}y}4c$|Gl{i`&g>P%#s`d5zmiDuHvG4_)NriyD0)p>>IIyPMJy9c_ z#G}!rwA@n;jMi*ad3kv(IW3Ldp%0CxXJ%4+#(A&B6?`;oKn>Gb$7Zu9$%|TI%2$Tx z)2Ctc?xpI!Bnj~_sY;bPmH7vBf_Rv4@vu{LVL73~sU@O5m7H_ri2zj{Cq!>HO8%`4 zwMxajsB4S=I@+YuTX@vQlkW8$4@1`!AJgEi3@3`PqwB*sz69-5<5MRd^QMQ7?^gO$ zXLlMccgW{DotRrhT>=4n-#VcpB>=YdGAW7TU{coKp=C6ow#H#^IQ|h14Jirl<5DRd zH|uWG#UwWApKJDJPq^@0HE7C|ZrT)A4r+G5SU^CnPGR0Y18KlxqXoX5Sx)H+e=#Y9D;@q9$y z8gqkI7LJ* z98?`jWe)T4na+ksOm|R>h(?42ARr>Fl;W~9WYghZ9V>gUuIQ~4QCpyrvpHgg& z2LUBP00JV3YI>N-*iDD(gqGGEu3uWix8s$(IdLzOjoEdX(W*(^_Y7$fWWiUl%=Xuj zXYQYigT8x7cO&b?Lh?|gS^#gKGfb`K!f{ZlML@o-BLD%hj+ZEWPfa|m&SLTtovQi< zi6TRGW@7Exdu+}jfx5En%sYY;0gWb!c0e3S!m$iIJc|GXM8fqQv-$9%-qn0##X2rx z4chnrxz2vQh<)F!VJ5g=l`5Tt<8AtA$1K6~f(!^k00JTicE=?ev-wEJ@N(*nHT~6& zTd3i<>{%RBE{$1C{QVL;d^^M9Fz_f40hKC?gkuiPl6W0k=L%E@B1skkB9e-*CmSE_ zJ?eEmOM9eq72n!shYsvX#06oKv-8-f9T(Z<)NB`|h9s;epd{h&_FBjk%JEpwVKqIs z3;+RfqiTJ5)8WaTD|x?5AMKFLEG<*~H`8<2=$#kYtqiO;Gi4*#xk{bJyfmNDEVeh; z$W~Z(FIYv0FJTCX`0B*29L?HutV~uu+aFocySiNw2^jc7N;Vs}>ms|Cg=dw3)xZbG z@6sZ`59mW3?+Ka@BraM45D?K;jeQv#y7kDIh_afG9`{pOw?y2&WE~dzTyhqhu=^53 z#9>4&jEMJvdU>6uPXoS5!hu;;7Gm>MZx9eSuKBO_pHTmolDoNWMeo)ch4gy0AR;cj zIr(P+y}WkN0>Du;9ru_EQ$|t}CIA7EaOGK=^_lyQH_9z<8>A6%M6wHUS8Tvk8kR{xQG(`&_*M)xvs#YF`m$ ztHi9_&B|qy_grD8Xw^E50|@ZehQ@0jZL9nS8y24EfEW>lhZ2B*cwoi9z30DiK|acN zAM;guNu~H1x%q6y-`CilYj{bvsm}P=hQ{kMZK1q`wttyHp96#%+XNDUj0r$MjA?k# zF@3N~w*5xtq-x&sw`vEdy`@lezWvo#$8NLlPTbLLg(ZbN3!X=f<_-;rucj`lX*9NR zMezI}18zkC0^(NHX^&Y`PA&gG`i^dC+hTBSs9jf6*!%mhv$P!AZ5O660p8M3 ztM8I%Y@%(CXHXZ_zlA;E3*VFU92ri|n?=8~rju;^GLG|f> zU1i4|y%oG60V)0JL_>3px~G0-O64pX3_l~K>?5zzB_LG+NzhJ}E_6xcocg!|cHheU zxQ4ItxvDoK$&ocb0O=|UVntQu~_QC&z`lzrSrDxp%qA0CY zRYVlIbOk`+_1eS$SIa<*{3A^_hDt zPyeMU5oOQ%tjOj0G?;ON`lbFPyPWI_7SjYPVyq{L5Kbfj0dXQ+ca55h$*Gjwc9+s~ z{#VUg(X6(=+MD;qi@}_|pT%a=7pnffBeA{VRjN$tjyg|Mrhcc7|NcmOiEI%APvijv z%n^Wqn4^Ph77-vpDZBetR=iTC9az;{5%id^N+qz6v`~7RNSIg#c3v2kXXoeFc}HP(B|nb%?lC(0VffF zfH(;(qAg6||NcBt>&U(AI6sA~bvcc^R%LHRDVpJ>a>X}!lc*K)_3=Awl1ThP?tik_rdbY43lV{U2nj$yL|7@Fl%;Wr$Eu!7&FSZ( zkTvsD%IXBFWR**)rtnuOWHeyUln!XX+@vp6@ulp)6elFS)xZa@WwKoQ z%_1R4qu={%?W61<00JOj2LT9(9qI5JlQm;Ye9|S-E1Oy@g z0TBp1k_7<}00F5JfPhH729bXd009sXhyVmcAn-^Q1V8`;q)q?=BJ~!3mK51f)&?0wVPqME*el1VBI_ z0uT^^z#~}@009t?IspiX)N2s=2LTWO0f7iWKm-DhWI+G~KtSpQARtn&LF69DM7j00JNY0)h~LfCz#ei3(33K|59WOdtm!AbkQ*5$V?dd5|BF?0XxIq8}K){U& zKtSBMa#1u8009tiCIJYDGwC935C8!XaAN`x5I3$|6b%GG00f*#00QDnx`-PDKmY{X zm;eOCjVl*L0|5{K0cR3`fH;#b;syZ_00B2900D91%0!3m zz>Nt&K-{=;Q8W+$0T6H|0SJgQ=^}0r009tiV*(HmH?CY14Fo^{1e{3#0^&@%h#Lez z00i8a00hL1D;Grr0T2KIXA*#bIFl~o1_2NN0XHT90deEXMbSV21VF%<1Rx;Jq>H#g z00cn5jR`0T6Iw0uT^4u3Qui1V8`;oJjxz;!L`T z+j;^C+NstDLI@B50l^4BMFc~Sgh2oVKtS3AARyANKja<+KmY^;BLD#r3_TJC0T2KI zX%m2eNW1=!dk_Et5D<(21Vk|ONEie_00g8>00JWI`a|wP00cllFai(|!O$aN5C8!X zkTwAbh_veuxd#Cd00F@WKtKdTkAy)01VBLA1Rx;Nu0P}+1V8`;1S0?e5ez*N1_2NN z0cjI}fJnRkkb4jS0T2+300cxZ^hg*4KmY`!O#lKS?fOIRK>!3mKrjLj5W&zRVGsZT z5Rf(j2#B=n54i^c5C8$e2tYstLyv?(00cll+5{jV(yl+`9t1!D1Oy`h0TB#65(WVf z00C(efPhH5{*ZeR00D^-nB6gcvqZCAINMN>!aQ&W0w4eaAYdT@2#AHC;0**o00cmw zFaZciVIDXG0T2KI5U`K{1jIs6@CE`P00JOTm;eN%Fb|x800@8p2v|q}0%9R3cmn|t z009svOaKB>m 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a - fi - - debSource="$(awk -F ': ' '$1 == "Source" { print $2; exit }' hack/make/.build-deb/control)" - debMaintainer="$(awk -F ': ' '$1 == "Maintainer" { print $2; exit }' hack/make/.build-deb/control)" - debDate="$(date --rfc-2822)" - - # if go-md2man is available, pre-generate the man pages - make manpages - - builderDir="contrib/builder/deb/${PACKAGE_ARCH}" - pkgs=( $(find "${builderDir}/"*/ -type d) ) - if [ ! -z "$DOCKER_BUILD_PKGS" ]; then - pkgs=() - for p in $DOCKER_BUILD_PKGS; do - pkgs+=( "$builderDir/$p" ) - done - fi - for dir in "${pkgs[@]}"; do - [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } - version="$(basename "$dir")" - suite="${version##*-}" - - image="dockercore/builder-deb:$version" - if ! docker inspect "$image" &> /dev/null; then - ( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" ) - fi - - mkdir -p "$DEST/$version" - cat > "$DEST/$version/Dockerfile.build" <<-EOF - FROM $image - WORKDIR /usr/src/docker - COPY . /usr/src/docker - ENV DOCKER_GITCOMMIT $GITCOMMIT - RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers \ - && ln -snf /usr/src/docker /go/src/github.com/docker/docker - EOF - - # get the RUNC and CONTAINERD commit from the root Dockerfile, this keeps the commits in sync - awk '$1 == "ENV" && $2 == "RUNC_COMMIT" { print; exit }' Dockerfile >> "$DEST/$version/Dockerfile.build" - awk '$1 == "ENV" && $2 == "CONTAINERD_COMMIT" { print; exit }' Dockerfile >> "$DEST/$version/Dockerfile.build" - - # add runc and containerd compile and install - cat >> "$DEST/$version/Dockerfile.build" <<-EOF - # Install runc - RUN git clone https://github.com/opencontainers/runc.git "/go/src/github.com/opencontainers/runc" \ - && cd "/go/src/github.com/opencontainers/runc" \ - && git checkout -q "\$RUNC_COMMIT" - RUN set -x && export GOPATH="/go" && cd "/go/src/github.com/opencontainers/runc" \ - && make BUILDTAGS="\$RUNC_BUILDTAGS" && make install - # Install containerd - RUN git clone https://github.com/docker/containerd.git "/go/src/github.com/docker/containerd" \ - && cd "/go/src/github.com/docker/containerd" \ - && git checkout -q "\$CONTAINERD_COMMIT" - RUN set -x && export GOPATH="/go" && cd "/go/src/github.com/docker/containerd" && make && make install - EOF - if [ "$DOCKER_EXPERIMENTAL" ]; then - echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build" - fi - cat >> "$DEST/$version/Dockerfile.build" <<-EOF - RUN cp -aL hack/make/.build-deb debian - RUN { echo '$debSource (${debVersion}-0~${suite}) $suite; urgency=low'; echo; echo ' * Version: $VERSION'; echo; echo " -- $debMaintainer $debDate"; } > debian/changelog && cat >&2 debian/changelog - RUN dpkg-buildpackage -uc -us -I.git - EOF - tempImage="docker-temp/build-deb:$version" - ( set -x && docker build -t "$tempImage" -f "$DEST/$version/Dockerfile.build" . ) - docker run --rm "$tempImage" bash -c 'cd .. && tar -c *_*' | tar -xvC "$DEST/$version" - docker rmi "$tempImage" - done - - bundle .integration-daemon-stop -) 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/build-rpm b/hack/make/build-rpm deleted file mode 100644 index 195570b81a..0000000000 --- a/hack/make/build-rpm +++ /dev/null @@ -1,159 +0,0 @@ -#!/bin/bash -set -e - -# subshell so that we can export PATH and TZ without breaking other things -( - export TZ=UTC # make sure our "date" variables are UTC-based - - source "$(dirname "$BASH_SOURCE")/.integration-daemon-start" - source "$(dirname "$BASH_SOURCE")/.detect-daemon-osarch" - - # TODO consider using frozen images for the dockercore/builder-rpm tags - - rpmName=docker-engine - rpmVersion="$VERSION" - rpmRelease=1 - - # rpmRelease versioning is as follows - # Docker 1.7.0: version=1.7.0, release=1 - # Docker 1.7.0-rc1: version=1.7.0, release=0.1.rc1 - # Docker 1.7.0-cs1: version=1.7.0.cs1, release=1 - # Docker 1.7.0-cs1-rc1: version=1.7.0.cs1, release=0.1.rc1 - # Docker 1.7.0-dev nightly: version=1.7.0, release=0.0.YYYYMMDD.HHMMSS.gitHASH - - # if we have a "-rc*" suffix, set appropriate release - if [[ "$rpmVersion" =~ .*-rc[0-9]+$ ]] ; then - rcVersion=${rpmVersion#*-rc} - rpmVersion=${rpmVersion%-rc*} - rpmRelease="0.${rcVersion}.rc${rcVersion}" - fi - - DOCKER_GITCOMMIT=$(git rev-parse --short HEAD) - if [ -n "$(git status --porcelain --untracked-files=no)" ]; then - DOCKER_GITCOMMIT="$DOCKER_GITCOMMIT-unsupported" - fi - - # if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better - if [[ "$rpmVersion" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then - gitUnix="$(git log -1 --pretty='%at')" - gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')" - gitCommit="$(git log -1 --pretty='%h')" - gitVersion="${gitDate}.git${gitCommit}" - # gitVersion is now something like '20150128.112847.17e840a' - rpmVersion="${rpmVersion%-dev}" - rpmRelease="0.0.$gitVersion" - fi - - # Replace any other dashes with periods - rpmVersion="${rpmVersion/-/.}" - - rpmPackager="$(awk -F ': ' '$1 == "Packager" { print $2; exit }' hack/make/.build-rpm/${rpmName}.spec)" - rpmDate="$(date +'%a %b %d %Y')" - - # if go-md2man is available, pre-generate the man pages - make manpages - - # Convert the CHANGELOG.md file into RPM changelog format - VERSION_REGEX="^\W\W (.*) \((.*)\)$" - ENTRY_REGEX="^[-+*] (.*)$" - while read -r line || [[ -n "$line" ]]; do - if [ -z "$line" ]; then continue; fi - if [[ "$line" =~ $VERSION_REGEX ]]; then - echo >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog - echo "* `date -d ${BASH_REMATCH[2]} '+%a %b %d %Y'` ${rpmPackager} - ${BASH_REMATCH[1]}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog - fi - if [[ "$line" =~ $ENTRY_REGEX ]]; then - echo "- ${BASH_REMATCH[1]//\`}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog - fi - done < CHANGELOG.md - - builderDir="contrib/builder/rpm/${PACKAGE_ARCH}" - pkgs=( $(find "${builderDir}/"*/ -type d) ) - if [ ! -z "$DOCKER_BUILD_PKGS" ]; then - pkgs=() - for p in $DOCKER_BUILD_PKGS; do - pkgs+=( "$builderDir/$p" ) - done - fi - for dir in "${pkgs[@]}"; do - [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } - version="$(basename "$dir")" - suite="${version##*-}" - - image="dockercore/builder-rpm:$version" - if ! docker inspect "$image" &> /dev/null; then - ( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" ) - fi - - mkdir -p "$DEST/$version" - cat > "$DEST/$version/Dockerfile.build" <<-EOF - FROM $image - COPY . /usr/src/${rpmName} - RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers - EOF - - # get the RUNC and CONTAINERD commit from the root Dockerfile, this keeps the commits in sync - awk '$1 == "ENV" && $2 == "RUNC_COMMIT" { print; exit }' Dockerfile >> "$DEST/$version/Dockerfile.build" - awk '$1 == "ENV" && $2 == "CONTAINERD_COMMIT" { print; exit }' Dockerfile >> "$DEST/$version/Dockerfile.build" - - # add runc and containerd compile and install - cat >> "$DEST/$version/Dockerfile.build" <<-EOF - # Install runc - RUN git clone https://github.com/opencontainers/runc.git "/go/src/github.com/opencontainers/runc" \ - && cd "/go/src/github.com/opencontainers/runc" \ - && git checkout -q "\$RUNC_COMMIT" - RUN set -x && export GOPATH="/go" && cd "/go/src/github.com/opencontainers/runc" \ - && make BUILDTAGS="\$RUNC_BUILDTAGS" && make install - # Install containerd - RUN git clone https://github.com/docker/containerd.git "/go/src/github.com/docker/containerd" \ - && cd "/go/src/github.com/docker/containerd" \ - && git checkout -q "\$CONTAINERD_COMMIT" - RUN set -x && export GOPATH="/go" && cd "/go/src/github.com/docker/containerd" && make && make install - EOF - if [ "$DOCKER_EXPERIMENTAL" ]; then - echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build" - fi - cat >> "$DEST/$version/Dockerfile.build" <<-EOF - RUN mkdir -p /root/rpmbuild/SOURCES \ - && echo '%_topdir /root/rpmbuild' > /root/.rpmmacros - WORKDIR /root/rpmbuild - RUN ln -sfv /usr/src/${rpmName}/hack/make/.build-rpm SPECS - WORKDIR /root/rpmbuild/SPECS - RUN tar --exclude .git -r -C /usr/src -f /root/rpmbuild/SOURCES/${rpmName}.tar ${rpmName} - RUN tar --exclude .git -r -C /go/src/github.com/docker -f /root/rpmbuild/SOURCES/${rpmName}.tar containerd - RUN tar --exclude .git -r -C /go/src/github.com/opencontainers -f /root/rpmbuild/SOURCES/${rpmName}.tar runc - RUN gzip /root/rpmbuild/SOURCES/${rpmName}.tar - RUN { cat /usr/src/${rpmName}/contrib/builder/rpm/${PACKAGE_ARCH}/changelog; } >> ${rpmName}.spec && tail >&2 ${rpmName}.spec - RUN rpmbuild -ba \ - --define '_gitcommit $DOCKER_GITCOMMIT' \ - --define '_release $rpmRelease' \ - --define '_version $rpmVersion' \ - --define '_origversion $VERSION' \ - --define '_experimental ${DOCKER_EXPERIMENTAL:-0}' \ - ${rpmName}.spec - EOF - # selinux policy referencing systemd things won't work on non-systemd versions - # of centos or rhel, which we don't support anyways - if [ "${suite%.*}" -gt 6 ] && [[ "$version" != opensuse* ]]; then - selinuxDir="selinux" - if [ -d "./contrib/selinux-$version" ]; then - selinuxDir="selinux-${version}" - fi - cat >> "$DEST/$version/Dockerfile.build" <<-EOF - RUN tar -cz -C /usr/src/${rpmName}/contrib/${selinuxDir} -f /root/rpmbuild/SOURCES/${rpmName}-selinux.tar.gz ${rpmName}-selinux - RUN rpmbuild -ba \ - --define '_gitcommit $DOCKER_GITCOMMIT' \ - --define '_release $rpmRelease' \ - --define '_version $rpmVersion' \ - --define '_origversion $VERSION' \ - ${rpmName}-selinux.spec - EOF - fi - tempImage="docker-temp/build-rpm:$version" - ( set -x && docker build -t "$tempImage" -f $DEST/$version/Dockerfile.build . ) - docker run --rm "$tempImage" bash -c 'cd /root/rpmbuild && tar -c *RPMS' | tar -xvC "$DEST/$version" - docker rmi "$tempImage" - done - - source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop" -) 2>&1 | tee -a $DEST/test.log diff --git a/hack/make/clean-apt-repo b/hack/make/clean-apt-repo deleted file mode 100755 index 1c37d98e40..0000000000 --- a/hack/make/clean-apt-repo +++ /dev/null @@ -1,43 +0,0 @@ -#!/bin/bash -set -e - -# This script cleans the experimental pool for the apt repo. -# This is useful when there are a lot of old experimental debs and you only want to keep the most recent. -# - -: ${DOCKER_RELEASE_DIR:=$DEST} -APTDIR=$DOCKER_RELEASE_DIR/apt/repo/pool/experimental -: ${DOCKER_ARCHIVE_DIR:=$DEST/archive} -DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" - -latest_versions=$(dpkg-scanpackages "$APTDIR" /dev/null 2>/dev/null | awk -F ': ' '$1 == "Filename" { print $2 }') - -# get the latest version -latest_docker_engine_file=$(echo "$latest_versions" | grep docker-engine) -latest_docker_engine_version=$(basename ${latest_docker_engine_file%~*}) - -echo "latest docker-engine version: $latest_docker_engine_version" - -# remove all the files that are not that version in experimental -pool_dir=$(dirname "$latest_docker_engine_file") -old_pkgs=( $(ls "$pool_dir" | grep -v "^${latest_docker_engine_version}" | grep "${latest_docker_engine_version%%~git*}") ) - -echo "${old_pkgs[@]}" - -mkdir -p "$DOCKER_ARCHIVE_DIR" -for old_pkg in "${old_pkgs[@]}"; do - echo "moving ${pool_dir}/${old_pkg} to $DOCKER_ARCHIVE_DIR" - mv "${pool_dir}/${old_pkg}" "$DOCKER_ARCHIVE_DIR" -done - -echo -echo "$pool_dir now has contents:" -ls "$pool_dir" - -# now regenerate release files for experimental -export COMPONENT=experimental -source "${DIR}/update-apt-repo" - -echo "You will now want to: " -echo " - re-sign the repo with hack/make/sign-repo" -echo " - re-generate index files with hack/make/generate-index-listing" diff --git a/hack/make/clean-yum-repo b/hack/make/clean-yum-repo deleted file mode 100755 index 1cafbbd97f..0000000000 --- a/hack/make/clean-yum-repo +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -set -e - -# This script cleans the experimental pool for the yum repo. -# This is useful when there are a lot of old experimental rpms and you only want to keep the most recent. -# - -: ${DOCKER_RELEASE_DIR:=$DEST} -YUMDIR=$DOCKER_RELEASE_DIR/yum/repo/experimental - -suites=( $(find "$YUMDIR" -mindepth 1 -maxdepth 1 -type d) ) - -for suite in "${suites[@]}"; do - echo "cleanup in: $suite" - ( set -x; repomanage -k2 --old "$suite" | xargs rm -f ) -done - -echo "You will now want to: " -echo " - re-sign the repo with hack/make/sign-repo" -echo " - re-generate index files with hack/make/generate-index-listing" diff --git a/hack/make/cover b/hack/make/cover deleted file mode 100644 index 624943b8aa..0000000000 --- a/hack/make/cover +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -set -e - -bundle_cover() { - coverprofiles=( "$DEST/../"*"/coverprofiles/"* ) - for p in "${coverprofiles[@]}"; do - echo - ( - set -x - go tool cover -func="$p" - ) - done -} - -if [ "$HAVE_GO_TEST_COVER" ]; then - bundle_cover 2>&1 | tee "$DEST/report.log" -else - echo >&2 'warning: the current version of go does not support -cover' - echo >&2 ' skipping test coverage report' -fi diff --git a/hack/make/cross b/hack/make/cross deleted file mode 100644 index a96bfd2c20..0000000000 --- a/hack/make/cross +++ /dev/null @@ -1,42 +0,0 @@ -#!/bin/bash -set -e - -# explicit list of os/arch combos that support being a daemon -declare -A daemonSupporting -daemonSupporting=( - [linux/amd64]=1 - [windows/amd64]=1 -) - -# if we have our linux/amd64 version compiled, let's symlink it in -if [ -x "$DEST/../binary-daemon/dockerd-$VERSION" ]; then - mkdir -p "$DEST/linux/amd64" - ( - cd "$DEST/linux/amd64" - ln -s ../../../binary-daemon/* ./ - ln -s ../../../binary-client/* ./ - ) - echo "Created symlinks:" "$DEST/linux/amd64/"* -fi - -for platform in $DOCKER_CROSSPLATFORMS; do - ( - export KEEPDEST=1 - export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION - mkdir -p "$DEST" - ABS_DEST="$(cd "$DEST" && pwd -P)" - export GOOS=${platform%/*} - export GOARCH=${platform##*/} - - if [ -z "${daemonSupporting[$platform]}" ]; then - # we just need a simple client for these platforms - export LDFLAGS_STATIC_DOCKER="" - # remove the "daemon" build tag from platforms that aren't supported - export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) - source "${MAKEDIR}/binary-client" - else - source "${MAKEDIR}/binary-client" - source "${MAKEDIR}/binary-daemon" - fi - ) -done diff --git a/hack/make/dynbinary b/hack/make/dynbinary deleted file mode 100644 index 1a435dc4bf..0000000000 --- a/hack/make/dynbinary +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -set -e - -# This script exists as backwards compatibility for CI -( - DEST="${DEST}-client" - ABS_DEST="${ABS_DEST}-client" - . hack/make/dynbinary-client -) -( - - DEST="${DEST}-daemon" - ABS_DEST="${ABS_DEST}-daemon" - . hack/make/dynbinary-daemon -) diff --git a/hack/make/dynbinary-client b/hack/make/dynbinary-client deleted file mode 100644 index 83ae3c4537..0000000000 --- a/hack/make/dynbinary-client +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash -set -e - -( - export BINARY_SHORT_NAME='docker' - export SOURCE_PATH='./cmd/docker' - export IAMSTATIC='false' - export LDFLAGS_STATIC_DOCKER='' - export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary - export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here - source "${MAKEDIR}/.binary" -) diff --git a/hack/make/dynbinary-daemon b/hack/make/dynbinary-daemon deleted file mode 100644 index 2d1ed25838..0000000000 --- a/hack/make/dynbinary-daemon +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash -set -e - -( - export BINARY_SHORT_NAME='dockerd' - export SOURCE_PATH='./cmd/dockerd' - export IAMSTATIC='false' - export LDFLAGS_STATIC_DOCKER='' - export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary - export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here - source "${MAKEDIR}/.binary" - export BINARY_SHORT_NAME='docker-proxy' - export SOURCE_PATH='./vendor/src/github.com/docker/libnetwork/cmd/proxy' - export LDFLAGS_STATIC_DOCKER='-linkmode=external' - source "${MAKEDIR}/.binary" -) diff --git a/hack/make/dyngccgo b/hack/make/dyngccgo deleted file mode 100644 index a9019e8cfa..0000000000 --- a/hack/make/dyngccgo +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash -set -e - -( - export IAMSTATIC="false" - export EXTLDFLAGS_STATIC='' - export LDFLAGS_STATIC_DOCKER='' - export BUILDFLAGS=( "${BUILDFLAGS[@]/netgo /}" ) # disable netgo, since we don't need it for a dynamic binary - export BUILDFLAGS=( "${BUILDFLAGS[@]/static_build /}" ) # we're not building a "static" binary here - source "${MAKEDIR}/gccgo" -) diff --git a/hack/make/gccgo b/hack/make/gccgo deleted file mode 100644 index 54c983eb2e..0000000000 --- a/hack/make/gccgo +++ /dev/null @@ -1,68 +0,0 @@ -#!/bin/bash -set -e - -BINARY_NAME="dockerd-$VERSION" -BINARY_EXTENSION="$(binary_extension)" -BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" - -PROXY_NAME="docker-proxy-$VERSION" -PROXY_FULLNAME="$PROXY_NAME$BINARY_EXTENSION" - -CLIENTBIN_NAME="docker-$VERSION" -CLIENTBIN_FULLNAME="$CLIENTBIN_NAME$BINARY_EXTENSION" - -source "${MAKEDIR}/.go-autogen" - -if [[ "${BUILDFLAGS[@]}" =~ 'netgo ' ]]; then - EXTLDFLAGS_STATIC+=' -lnetgo' -fi -# gccgo require explicit flag -pthread to allow goroutines to work. -go build -compiler=gccgo \ - -o "$DEST/$BINARY_FULLNAME" \ - "${BUILDFLAGS[@]}" \ - -gccgoflags " - -g - $EXTLDFLAGS_STATIC - -Wl,--no-export-dynamic - -ldl - -pthread - " \ - ./cmd/dockerd - -echo "Created binary: $DEST/$BINARY_FULLNAME" -ln -sf "$BINARY_FULLNAME" "$DEST/dockerd$BINARY_EXTENSION" - -go build -compiler=gccgo \ - -o "$DEST/$PROXY_FULLNAME" \ - "${BUILDFLAGS[@]}" \ - -gccgoflags " - -g - $EXTLDFLAGS_STATIC - -Wl,--no-export-dynamic - -ldl - -pthread - " \ - ./vendor/src/github.com/docker/libnetwork/cmd/proxy - -echo "Created binary: $DEST/$PROXY_FULLNAME" -ln -sf "$PROXY_FULLNAME" "$DEST/docker-proxy$BINARY_EXTENSION" - -copy_containerd "$DEST" "hash" -hash_files "$DEST/$BINARY_FULLNAME" - -go build -compiler=gccgo \ - -o "$DEST/$CLIENTBIN_FULLNAME" \ - "${BUILDFLAGS[@]}" \ - -gccgoflags " - -g - $EXTLDFLAGS_STATIC - -Wl,--no-export-dynamic - -ldl - -pthread - " \ - ./cmd/docker - -echo "Created binary: $DEST/$CLIENTBIN_FULLNAME" -ln -sf "$CLIENTBIN_FULLNAME" "$DEST/docker$BINARY_EXTENSION" -hash_files "$DEST/$CLIENTBIN_FULLNAME" - diff --git a/hack/make/generate-index-listing b/hack/make/generate-index-listing deleted file mode 100755 index 1167ed1205..0000000000 --- a/hack/make/generate-index-listing +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/bash -set -e - -# This script generates index files for the directory structure -# of the apt and yum repos - -: ${DOCKER_RELEASE_DIR:=$DEST} -APTDIR=$DOCKER_RELEASE_DIR/apt -YUMDIR=$DOCKER_RELEASE_DIR/yum - -if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then - echo >&2 'release-rpm or release-deb must be run before generate-index-listing' - exit 1 -fi - -create_index() { - local directory=$1 - local original=$2 - local cleaned=${directory#$original} - - # the index file to create - local index_file="${directory}/index" - - # cd into dir & touch the index file - cd $directory - touch $index_file - - # print the html header - cat <<-EOF > "$index_file" - - - Index of ${cleaned}/ - -

Index of ${cleaned}/


-
../
-	EOF
-
-	# start of content output
-	(
-	# change IFS locally within subshell so the for loop saves line correctly to L var
-	IFS=$'\n';
-
-	# pretty sweet, will mimick the normal apache output
-	for L in $(find -L . -mount -depth -maxdepth 1 -type f ! -name 'index' -printf "%f|@_@%Td-%Tb-%TY %Tk:%TM  @%f@\n"|sort|column -t -s '|' | sed 's,\([\ ]\+\)@_@,\1,g');
-	do
-		# file
-		F=$(sed -e 's,^.*@\([^@]\+\)@.*$,\1,g'<<<"$L");
-
-		# file with file size
-		F=$(du -bh $F | cut -f1);
-
-		# output with correct format
-		sed -e 's,\ @.*$, '"$F"',g'<<<"$L";
-	done;
-	) >> $index_file;
-
-	# now output a list of all directories in this dir (maxdepth 1) other than '.' outputting in a sorted manner exactly like apache
-	find -L . -mount -depth -maxdepth 1 -type d ! -name '.' -printf "%-43f@_@%Td-%Tb-%TY %Tk:%TM  -\n"|sort -d|sed 's,\([\ ]\+\)@_@,/\1,g' >> $index_file
-
-	# print the footer html
-	echo "

" >> $index_file - -} - -get_dirs() { - local directory=$1 - - for d in `find ${directory} -type d`; do - create_index $d $directory - done -} - -get_dirs $APTDIR -get_dirs $YUMDIR diff --git a/hack/make/install-binary b/hack/make/install-binary deleted file mode 100644 index 82cbc79933..0000000000 --- a/hack/make/install-binary +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -set -e -rm -rf "$DEST" - -( - source "${MAKEDIR}/install-binary-client" -) - -( - source "${MAKEDIR}/install-binary-daemon" -) diff --git a/hack/make/install-binary-client b/hack/make/install-binary-client deleted file mode 100644 index 6c80452659..0000000000 --- a/hack/make/install-binary-client +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash - -set -e -rm -rf "$DEST" - -( - DEST="$(dirname $DEST)/binary-client" - source "${MAKEDIR}/.binary-setup" - install_binary "${DEST}/${DOCKER_CLIENT_BINARY_NAME}" -) diff --git a/hack/make/install-binary-daemon b/hack/make/install-binary-daemon deleted file mode 100644 index e80d8431fd..0000000000 --- a/hack/make/install-binary-daemon +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -set -e -rm -rf "$DEST" - -( - DEST="$(dirname $DEST)/binary-daemon" - source "${MAKEDIR}/.binary-setup" - install_binary "${DEST}/${DOCKER_DAEMON_BINARY_NAME}" - install_binary "${DEST}/${DOCKER_PROXY_BINARY_NAME}" -) diff --git a/hack/make/install-script b/hack/make/install-script deleted file mode 100644 index feadac2f38..0000000000 --- a/hack/make/install-script +++ /dev/null @@ -1,63 +0,0 @@ -#!/bin/bash -set -e - -# This script modifies the install.sh script for domains and keys other than -# those used by the primary opensource releases. -# -# You can provide `url`, `yum_url`, `apt_url` and optionally `gpg_fingerprint` -# or `GPG_KEYID` as environment variables, or the defaults for open source are used. -# -# The lower-case variables are substituted into install.sh. -# -# gpg_fingerprint and GPG_KEYID are optional, defaulting to the opensource release -# key ("releasedocker"). Other GPG_KEYIDs will require you to mount a volume with -# the correct contents to /root/.gnupg. -# -# It outputs the modified `install.sh` file to $DOCKER_RELEASE_DIR (default: $DEST) -# -# Example usage: -# -# docker run \ -# --rm \ -# --privileged \ -# -e "GPG_KEYID=deadbeef" \ -# -e "GNUPGHOME=/root/.gnupg" \ -# -v $HOME/.gnupg:/root/.gnupg \ -# -v $(pwd):/go/src/github.com/docker/docker/bundles \ -# "$IMAGE_DOCKER" \ -# hack/make.sh install-script - -: ${DOCKER_RELEASE_DIR:=$DEST} -: ${GPG_KEYID:=releasedocker} - -DEFAULT_URL="https://get.docker.com/" -DEFAULT_APT_URL="https://apt.dockerproject.org" -DEFAULT_YUM_URL="https://yum.dockerproject.org" -DEFAULT_GPG_FINGERPRINT="58118E89F3A912897C070ADBF76221572C52609D" - -: ${url:=$DEFAULT_URL} -: ${apt_url:=$DEFAULT_APT_URL} -: ${yum_url:=$DEFAULT_YUM_URL} -if [[ "$GPG_KEYID" == "releasedocker" ]] ; then - : ${gpg_fingerprint:=$DEFAULT_GPG_FINGERPRINT} -fi - -DEST_FILE="$DOCKER_RELEASE_DIR/install.sh" - -bundle_install_script() { - mkdir -p "$DOCKER_RELEASE_DIR" - - if [[ -z "$gpg_fingerprint" ]] ; then - # NOTE: if no key matching key is in /root/.gnupg, this will fail - gpg_fingerprint=$(gpg --with-fingerprint -k "$GPG_KEYID" | grep "Key fingerprint" | awk -F "=" '{print $2};' | tr -d ' ') - fi - - cp hack/install.sh "$DEST_FILE" - sed -i.bak 's#^url=".*"$#url="'"$url"'"#' "$DEST_FILE" - sed -i.bak 's#^apt_url=".*"$#apt_url="'"$apt_url"'"#' "$DEST_FILE" - sed -i.bak 's#^yum_url=".*"$#yum_url="'"$yum_url"'"#' "$DEST_FILE" - sed -i.bak 's#^gpg_fingerprint=".*"$#gpg_fingerprint="'"$gpg_fingerprint"'"#' "$DEST_FILE" - rm "${DEST_FILE}.bak" -} - -bundle_install_script diff --git a/hack/make/release-deb b/hack/make/release-deb deleted file mode 100755 index 80a25c443b..0000000000 --- a/hack/make/release-deb +++ /dev/null @@ -1,162 +0,0 @@ -#!/bin/bash -set -e - -# This script creates the apt repos for the .deb files generated by hack/make/build-deb -# -# The following can then be used as apt sources: -# deb http://apt.dockerproject.org/repo $distro-$release $version -# -# For example: -# deb http://apt.dockerproject.org/repo ubuntu-trusty main -# deb http://apt.dockerproject.org/repo ubuntu-trusty testing -# deb http://apt.dockerproject.org/repo debian-wheezy experimental -# deb http://apt.dockerproject.org/repo debian-jessie main -# -# ... and so on and so forth for the builds created by hack/make/build-deb - -: ${DOCKER_RELEASE_DIR:=$DEST} -: ${GPG_KEYID:=releasedocker} -APTDIR=$DOCKER_RELEASE_DIR/apt/repo - -# setup the apt repo (if it does not exist) -mkdir -p "$APTDIR/conf" "$APTDIR/db" "$APTDIR/dists" - -# supported arches/sections -arches=( amd64 i386 armhf ) - -# Preserve existing components but don't add any non-existing ones -for component in main testing experimental ; do - exists=$(find "$APTDIR/dists" -mindepth 2 -maxdepth 2 -type d -name "$component" -print -quit) - if [ -n "$exists" ] ; then - components+=( $component ) - fi -done - -# set the component for the version being released -component="main" - -if [[ "$VERSION" == *-rc* ]]; then - component="testing" -fi - -if [ "$DOCKER_EXPERIMENTAL" ] || [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then - component="experimental" -fi - -# Make sure our component is in the list of components -if [[ ! "${components[*]}" =~ $component ]] ; then - components+=( $component ) -fi - -# create apt-ftparchive file on every run. This is essential to avoid -# using stale versions of the config file that could cause unnecessary -# refreshing of bits for EOL-ed releases. -cat <<-EOF > "$APTDIR/conf/apt-ftparchive.conf" -Dir { - ArchiveDir "${APTDIR}"; - CacheDir "${APTDIR}/db"; -}; - -Default { - Packages::Compress ". gzip bzip2"; - Sources::Compress ". gzip bzip2"; - Contents::Compress ". gzip bzip2"; -}; - -TreeDefault { - BinCacheDB "packages-\$(SECTION)-\$(ARCH).db"; - Directory "pool/\$(SECTION)"; - Packages "\$(DIST)/\$(SECTION)/binary-\$(ARCH)/Packages"; - SrcDirectory "pool/\$(SECTION)"; - Sources "\$(DIST)/\$(SECTION)/source/Sources"; - Contents "\$(DIST)/\$(SECTION)/Contents-\$(ARCH)"; - FileList "$APTDIR/\$(DIST)/\$(SECTION)/filelist"; -}; -EOF - -for dir in bundles/$VERSION/build-deb/*/; do - version="$(basename "$dir")" - suite="${version//debootstrap-}" - - cat <<-EOF - Tree "dists/${suite}" { - Sections "${components[*]}"; - Architectures "${arches[*]}"; - } - - EOF -done >> "$APTDIR/conf/apt-ftparchive.conf" - -cat <<-EOF > "$APTDIR/conf/docker-engine-release.conf" -APT::FTPArchive::Release::Origin "Docker"; -APT::FTPArchive::Release::Components "${components[*]}"; -APT::FTPArchive::Release::Label "Docker APT Repository"; -APT::FTPArchive::Release::Architectures "${arches[*]}"; -EOF - -# release the debs -for dir in bundles/$VERSION/build-deb/*/; do - version="$(basename "$dir")" - codename="${version//debootstrap-}" - - tempdir="$(mktemp -d /tmp/tmp-docker-release-deb.XXXXXXXX)" - DEBFILE=( "$dir/docker-engine"*.deb ) - - # add the deb for each component for the distro version into the - # pool (if it is not there already) - mkdir -p "$APTDIR/pool/$component/d/docker-engine/" - for deb in ${DEBFILE[@]}; do - d=$(basename "$deb") - # We do not want to generate a new deb if it has already been - # copied into the APTDIR - if [ ! -f "$APTDIR/pool/$component/d/docker-engine/$d" ]; then - cp "$deb" "$tempdir/" - # if we have a $GPG_PASSPHRASE we may as well - # dpkg-sign before copying the deb into the pool - if [ ! -z "$GPG_PASSPHRASE" ]; then - dpkg-sig -g "--no-tty --passphrase '$GPG_PASSPHRASE'" \ - -k "$GPG_KEYID" --sign builder "$tempdir/$d" - fi - mv "$tempdir/$d" "$APTDIR/pool/$component/d/docker-engine/" - fi - done - - rm -rf "$tempdir" - - # build the right directory structure, needed for apt-ftparchive - for arch in "${arches[@]}"; do - for c in "${components[@]}"; do - mkdir -p "$APTDIR/dists/$codename/$c/binary-$arch" - done - done - - # update the filelist for this codename/component - find "$APTDIR/pool/$component" \ - -name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist" -done - -# run the apt-ftparchive commands so we can have pinning -apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf" - -for dir in bundles/$VERSION/build-deb/*/; do - version="$(basename "$dir")" - codename="${version//debootstrap-}" - - apt-ftparchive \ - -c "$APTDIR/conf/docker-engine-release.conf" \ - -o "APT::FTPArchive::Release::Codename=$codename" \ - -o "APT::FTPArchive::Release::Suite=$codename" \ - release \ - "$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release" - - for arch in "${arches[@]}"; do - apt-ftparchive \ - -c "$APTDIR/conf/docker-engine-release.conf" \ - -o "APT::FTPArchive::Release::Codename=$codename" \ - -o "APT::FTPArchive::Release::Suite=$codename" \ - -o "APT::FTPArchive::Release::Components=$component" \ - -o "APT::FTPArchive::Release::Architecture=$arch" \ - release \ - "$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release" - done -done diff --git a/hack/make/release-rpm b/hack/make/release-rpm deleted file mode 100755 index 5c109d0745..0000000000 --- a/hack/make/release-rpm +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/bash -set -e - -# This script creates the yum repos for the .rpm files generated by hack/make/build-rpm -# -# The following can then be used as a yum repo: -# http://yum.dockerproject.org/repo/$release/$distro/$distro-version -# -# For example: -# http://yum.dockerproject.org/repo/main/fedora/23 -# http://yum.dockerproject.org/repo/testing/centos/7 -# http://yum.dockerproject.org/repo/experimental/fedora/23 -# http://yum.dockerproject.org/repo/main/centos/7 -# -# ... and so on and so forth for the builds created by hack/make/build-rpm - -: ${DOCKER_RELEASE_DIR:=$DEST} -YUMDIR=$DOCKER_RELEASE_DIR/yum/repo -: ${GPG_KEYID:=releasedocker} - -# get the release -release="main" - -if [[ "$VERSION" == *-rc* ]]; then - release="testing" -fi - -if [ $DOCKER_EXPERIMENTAL ] || [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then - release="experimental" -fi - -# Setup the yum repo -for dir in bundles/$VERSION/build-rpm/*/; do - version="$(basename "$dir")" - suite="${version##*-}" - distro="${version%-*}" - - REPO=$YUMDIR/$release/$distro - - # if the directory does not exist, initialize the yum repo - if [[ ! -d $REPO/$suite/Packages ]]; then - mkdir -p "$REPO/$suite/Packages" - - createrepo --pretty "$REPO/$suite" - fi - - # path to rpms - RPMFILE=( "bundles/$VERSION/build-rpm/$version/RPMS/"*"/docker-engine"*.rpm "bundles/$VERSION/build-rpm/$version/SRPMS/docker-engine"*.rpm ) - - # if we have a $GPG_PASSPHRASE we may as well - # sign the rpms before adding to repo - if [ ! -z $GPG_PASSPHRASE ]; then - # export our key to rpm import - gpg --armor --export "$GPG_KEYID" > /tmp/gpg - rpm --import /tmp/gpg - - # sign the rpms - echo "yes" | setsid rpm \ - --define "_gpg_name $GPG_KEYID" \ - --define "_signature gpg" \ - --define "__gpg_check_password_cmd /bin/true" \ - --define "__gpg_sign_cmd %{__gpg} gpg --batch --no-armor --passphrase '$GPG_PASSPHRASE' --no-secmem-warning -u '%{_gpg_name}' --sign --detach-sign --output %{__signature_filename} %{__plaintext_filename}" \ - --resign "${RPMFILE[@]}" - fi - - # copy the rpms to the packages folder - cp "${RPMFILE[@]}" "$REPO/$suite/Packages" - - # update the repo - createrepo --pretty --update "$REPO/$suite" -done diff --git a/hack/make/sign-repos b/hack/make/sign-repos deleted file mode 100755 index e0cebc6ab2..0000000000 --- a/hack/make/sign-repos +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/bash - -# This script signs the deliverables from release-deb and release-rpm -# with a designated GPG key. - -: ${DOCKER_RELEASE_DIR:=$DEST} -: ${GPG_KEYID:=releasedocker} -APTDIR=$DOCKER_RELEASE_DIR/apt/repo -YUMDIR=$DOCKER_RELEASE_DIR/yum/repo - -if [ -z "$GPG_PASSPHRASE" ]; then - echo >&2 'you need to set GPG_PASSPHRASE in order to sign artifacts' - exit 1 -fi - -if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then - echo >&2 'release-rpm or release-deb must be run before sign-repos' - exit 1 -fi - -sign_packages(){ - # sign apt repo metadata - if [ -d $APTDIR ]; then - # create file with public key - gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/apt/gpg" - - # sign the repo metadata - for F in $(find $APTDIR -name Release); do - if test "$F" -nt "$F.gpg" ; then - gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ - --armor --sign --detach-sign \ - --batch --yes \ - --output "$F.gpg" "$F" - fi - inRelease="$(dirname "$F")/InRelease" - if test "$F" -nt "$inRelease" ; then - gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ - --clearsign \ - --batch --yes \ - --output "$inRelease" "$F" - fi - done - fi - - # sign yum repo metadata - if [ -d $YUMDIR ]; then - # create file with public key - gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/yum/gpg" - - # sign the repo metadata - for F in $(find $YUMDIR -name repomd.xml); do - if test "$F" -nt "$F.asc" ; then - gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \ - --armor --sign --detach-sign \ - --batch --yes \ - --output "$F.asc" "$F" - fi - done - fi -} - -sign_packages diff --git a/hack/make/test-deb-install b/hack/make/test-deb-install deleted file mode 100755 index aec5847600..0000000000 --- a/hack/make/test-deb-install +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/bash -# This script is used for testing install.sh and that it works for -# each of component of our apt and yum repos -set -e - -: ${DEB_DIR:="$(pwd)/bundles/$(cat VERSION)/build-deb"} - -if [[ ! -d "${DEB_DIR}" ]]; then - echo "you must first run `make deb` or hack/make/build-deb" - exit 1 -fi - -test_deb_install(){ - # test for each Dockerfile in contrib/builder - - builderDir="contrib/builder/deb/${PACKAGE_ARCH}" - pkgs=( $(find "${builderDir}/"*/ -type d) ) - if [ ! -z "$DOCKER_BUILD_PKGS" ]; then - pkgs=() - for p in $DOCKER_BUILD_PKGS; do - pkgs+=( "$builderDir/$p" ) - done - fi - for dir in "${pkgs[@]}"; do - [ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; } - local from="$(awk 'toupper($1) == "FROM" { print $2; exit }' "$dir/Dockerfile")" - local dir=$(basename "$dir") - - if [[ ! -d "${DEB_DIR}/${dir}" ]]; then - echo "No deb found for ${dir}" - exit 1 - fi - - local script=$(mktemp /tmp/install-XXXXXXXXXX.sh) - cat <<-EOF > "${script}" - #!/bin/bash - set -e - set -x - - apt-get update && apt-get install -y apparmor - - dpkg -i /root/debs/*.deb || true - - apt-get install -yf - - /etc/init.d/apparmor start - - # this will do everything _except_ load the profile into the kernel - ( - cd /etc/apparmor.d - /sbin/apparmor_parser --skip-kernel-load docker-engine - ) - EOF - - chmod +x "${script}" - - echo "testing deb install for ${from}" - docker run --rm -i --privileged \ - -v ${DEB_DIR}/${dir}:/root/debs \ - -v ${script}:/install.sh \ - ${from} /install.sh - - rm -f ${script} - done -} - -( - bundle .integration-daemon-start - test_deb_install - bundle .integration-daemon-stop -) 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/test-docker-py b/hack/make/test-docker-py deleted file mode 100644 index dece8315a3..0000000000 --- a/hack/make/test-docker-py +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e - -# subshell so that we can export PATH without breaking other things -( - bundle .integration-daemon-start - - dockerPy='/docker-py' - [ -d "$dockerPy" ] || { - dockerPy="$DEST/docker-py" - git clone https://github.com/docker/docker-py.git "$dockerPy" - } - - # exporting PYTHONPATH to import "docker" from our local docker-py - test_env PYTHONPATH="$dockerPy" py.test "$dockerPy/tests/integration" - - bundle .integration-daemon-stop -) 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/test-install-script b/hack/make/test-install-script deleted file mode 100755 index 4782cbea88..0000000000 --- a/hack/make/test-install-script +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -# This script is used for testing install.sh and that it works for -# each of component of our apt and yum repos -set -e - -test_install_script(){ - # these are equivalent to main, testing, experimental components - # in the repos, but its the url that will do the conversion - components=( experimental test get ) - - for component in "${components[@]}"; do - # change url to specific component for testing - local test_url=https://${component}.docker.com - local script=$(mktemp /tmp/install-XXXXXXXXXX.sh) - sed "s,url='https://get.docker.com/',url='${test_url}/'," hack/install.sh > "${script}" - - chmod +x "${script}" - - # test for each Dockerfile in contrib/builder - for dir in contrib/builder/*/*/; do - local from="$(awk 'toupper($1) == "FROM" { print $2; exit }' "$dir/Dockerfile")" - - echo "running install.sh for ${component} with ${from}" - docker run --rm -i -v ${script}:/install.sh ${from} /install.sh - done - - rm -f ${script} - done -} - -test_install_script diff --git a/hack/make/test-integration-cli b/hack/make/test-integration-cli deleted file mode 100755 index 2b1685af1b..0000000000 --- a/hack/make/test-integration-cli +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash -set -e - -bundle_test_integration_cli() { - TESTFLAGS="$TESTFLAGS -check.v -check.timeout=${TIMEOUT} -test.timeout=360m" - go_test_dir ./integration-cli -} - -# subshell so that we can export PATH without breaking other things -( - bundle .integration-daemon-start - - bundle .integration-daemon-setup - - bundle_test_integration_cli - - bundle .integration-daemon-stop -) 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/test-old-apt-repo b/hack/make/test-old-apt-repo deleted file mode 100755 index bb20128e30..0000000000 --- a/hack/make/test-old-apt-repo +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -set -e - -versions=( 1.3.3 1.4.1 1.5.0 1.6.2 ) - -install() { - local version=$1 - local tmpdir=$(mktemp -d /tmp/XXXXXXXXXX) - local dockerfile="${tmpdir}/Dockerfile" - cat <<-EOF > "$dockerfile" - FROM debian:jessie - ENV VERSION ${version} - RUN apt-get update && apt-get install -y \ - apt-transport-https \ - ca-certificates \ - --no-install-recommends - RUN echo "deb https://get.docker.com/ubuntu docker main" > /etc/apt/sources.list.d/docker.list - RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \ - --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9 - RUN apt-get update && apt-get install -y \ - lxc-docker-\${VERSION} - EOF - - docker build --rm --force-rm --no-cache -t docker-old-repo:${version} -f $dockerfile $tmpdir -} - -for v in "${versions[@]}"; do - install "$v" -done diff --git a/hack/make/test-unit b/hack/make/test-unit deleted file mode 100644 index 0761676798..0000000000 --- a/hack/make/test-unit +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -set -e - -# Run Docker's test suite, including sub-packages, and store their output as a bundle -# If $TESTFLAGS is set in the environment, it is passed as extra arguments to 'go test'. -# You can use this to select certain tests to run, eg. -# -# TESTFLAGS='-test.run ^TestBuild$' ./hack/make.sh test-unit -# -bundle_test_unit() { - TESTFLAGS+=" -test.timeout=${TIMEOUT}" - date - if [ -z "$TESTDIRS" ]; then - TEST_PATH=./... - else - TEST_PATH=./${TESTDIRS} - fi - pkg_list=$(go list -e \ - -f '{{if ne .Name "github.com/docker/docker"}} - {{.ImportPath}} - {{end}}' \ - "${BUILDFLAGS[@]}" $TEST_PATH \ - | grep github.com/docker/docker \ - | grep -v github.com/docker/docker/vendor \ - | grep -v github.com/docker/docker/man \ - | grep -v github.com/docker/docker/integration-cli) - go test $COVER $GCCGOFLAGS -ldflags "$LDFLAGS" "${BUILDFLAGS[@]}" $TESTFLAGS $pkg_list -} - - -if [[ "$(go version)" == *"gccgo"* ]]; then - GCCGOFLAGS=-gccgoflags="-lpthread" -else - COVER=-cover -fi -bundle_test_unit 2>&1 | tee -a "$DEST/test.log" diff --git a/hack/make/tgz b/hack/make/tgz deleted file mode 100644 index b1abfef92f..0000000000 --- a/hack/make/tgz +++ /dev/null @@ -1,82 +0,0 @@ -#!/bin/bash - -CROSS="$DEST/../cross" - -set -e - -if [ ! -d "$CROSS/linux/amd64" ]; then - echo >&2 'error: binary and cross must be run before tgz' - false -fi - -( -for d in "$CROSS/"*/*; do - export GOARCH="$(basename "$d")" - export GOOS="$(basename "$(dirname "$d")")" - - source "${MAKEDIR}/.binary-setup" - - BINARY_NAME="${DOCKER_CLIENT_BINARY_NAME}-$VERSION" - DAEMON_BINARY_NAME="${DOCKER_DAEMON_BINARY_NAME}-$VERSION" - PROXY_BINARY_NAME="${DOCKER_PROXY_BINARY_NAME}-$VERSION" - BINARY_EXTENSION="$(export GOOS && binary_extension)" - if [ "$GOOS" = 'windows' ]; then - # if windows use a zip, not tgz - BUNDLE_EXTENSION=".zip" - IS_TAR="false" - else - BUNDLE_EXTENSION=".tgz" - IS_TAR="true" - fi - BINARY_FULLNAME="$BINARY_NAME$BINARY_EXTENSION" - DAEMON_BINARY_FULLNAME="$DAEMON_BINARY_NAME$BINARY_EXTENSION" - PROXY_BINARY_FULLNAME="$PROXY_BINARY_NAME$BINARY_EXTENSION" - mkdir -p "$DEST/$GOOS/$GOARCH" - TGZ="$DEST/$GOOS/$GOARCH/$BINARY_NAME$BUNDLE_EXTENSION" - - # The staging directory for the files in the tgz - BUILD_PATH="$DEST/build" - - # The directory that is at the root of the tar file - TAR_BASE_DIRECTORY="docker" - - # $DEST/build/docker - TAR_PATH="$BUILD_PATH/$TAR_BASE_DIRECTORY" - - # Copy the correct docker binary - mkdir -p $TAR_PATH - cp -L "$d/$BINARY_FULLNAME" "$TAR_PATH/${DOCKER_CLIENT_BINARY_NAME}${BINARY_EXTENSION}" - if [ -f "$d/$DAEMON_BINARY_FULLNAME" ]; then - cp -L "$d/$DAEMON_BINARY_FULLNAME" "$TAR_PATH/${DOCKER_DAEMON_BINARY_NAME}${BINARY_EXTENSION}" - fi - if [ -f "$d/$PROXY_BINARY_FULLNAME" ]; then - cp -L "$d/$PROXY_BINARY_FULLNAME" "$TAR_PATH/${DOCKER_PROXY_BINARY_NAME}${BINARY_EXTENSION}" - fi - - # copy over all the containerd binaries - copy_containerd $TAR_PATH - - if [ "$IS_TAR" == "true" ]; then - echo "Creating tgz from $BUILD_PATH and naming it $TGZ" - tar --numeric-owner --owner 0 -C "$BUILD_PATH" -czf "$TGZ" $TAR_BASE_DIRECTORY - else - # ZIP needs to full absolute dir path, not the absolute path - ZIP=`pwd`"/$TGZ" - # keep track of where we are, for later. - pushd . - # go into the BUILD_PATH since zip does not have a -C equivalent. - cd $BUILD_PATH - echo "Creating zip from $BUILD_PATH and naming it $ZIP" - zip -q -r $ZIP $TAR_BASE_DIRECTORY - # go back to where we started - popd - fi - - hash_files "$TGZ" - - # cleanup after ourselves - rm -rf "$BUILD_PATH" - - echo "Created tgz: $TGZ" -done -) diff --git a/hack/make/ubuntu b/hack/make/ubuntu deleted file mode 100644 index 8de5d9ceac..0000000000 --- a/hack/make/ubuntu +++ /dev/null @@ -1,190 +0,0 @@ -#!/bin/bash - -PKGVERSION="${VERSION//-/'~'}" -# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better -if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then - GIT_UNIX="$(git log -1 --pretty='%at')" - GIT_DATE="$(date --date "@$GIT_UNIX" +'%Y%m%d.%H%M%S')" - GIT_COMMIT="$(git log -1 --pretty='%h')" - GIT_VERSION="git${GIT_DATE}.0.${GIT_COMMIT}" - # GIT_VERSION is now something like 'git20150128.112847.0.17e840a' - PKGVERSION="$PKGVERSION~$GIT_VERSION" -fi - -# $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false -# true -# $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false -# true -# $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false -# true - -# ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a - -PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)" -PACKAGE_URL="https://www.docker.com/" -PACKAGE_MAINTAINER="support@docker.com" -PACKAGE_DESCRIPTION="Linux container runtime -Docker complements LXC with a high-level API which operates at the process -level. It runs unix processes with strong guarantees of isolation and -repeatability across servers. -Docker is a great building block for automating distributed systems: -large-scale web deployments, database clusters, continuous deployment systems, -private PaaS, service-oriented architectures, etc." -PACKAGE_LICENSE="Apache-2.0" - -# Build docker as an ubuntu package using FPM and REPREPRO (sue me). -# bundle_binary must be called first. -bundle_ubuntu() { - DIR="$ABS_DEST/build" - - # Include our udev rules - mkdir -p "$DIR/etc/udev/rules.d" - cp contrib/udev/80-docker.rules "$DIR/etc/udev/rules.d/" - - # Include our init scripts - mkdir -p "$DIR/etc/init" - cp contrib/init/upstart/docker.conf "$DIR/etc/init/" - mkdir -p "$DIR/etc/init.d" - cp contrib/init/sysvinit-debian/docker "$DIR/etc/init.d/" - mkdir -p "$DIR/etc/default" - cp contrib/init/sysvinit-debian/docker.default "$DIR/etc/default/docker" - mkdir -p "$DIR/lib/systemd/system" - cp contrib/init/systemd/docker.{service,socket} "$DIR/lib/systemd/system/" - - # Include contributed completions - mkdir -p "$DIR/etc/bash_completion.d" - cp contrib/completion/bash/docker "$DIR/etc/bash_completion.d/" - mkdir -p "$DIR/usr/share/zsh/vendor-completions" - cp contrib/completion/zsh/_docker "$DIR/usr/share/zsh/vendor-completions/" - mkdir -p "$DIR/etc/fish/completions" - cp contrib/completion/fish/docker.fish "$DIR/etc/fish/completions/" - - # Include man pages - make manpages - manRoot="$DIR/usr/share/man" - mkdir -p "$manRoot" - for manDir in man/man?; do - manBase="$(basename "$manDir")" # "man1" - for manFile in "$manDir"/*; do - manName="$(basename "$manFile")" # "docker-build.1" - mkdir -p "$manRoot/$manBase" - gzip -c "$manFile" > "$manRoot/$manBase/$manName.gz" - done - done - - # Copy the binary - # This will fail if the binary bundle hasn't been built - mkdir -p "$DIR/usr/bin" - cp "$DEST/../binary/docker-$VERSION" "$DIR/usr/bin/docker" - - # Generate postinst/prerm/postrm scripts - cat > "$DEST/postinst" <<'EOF' -#!/bin/sh -set -e -set -u - -if [ "$1" = 'configure' ] && [ -z "$2" ]; then - if ! getent group docker > /dev/null; then - groupadd --system docker - fi -fi - -if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then - # we only need to do this if upstart isn't in charge - update-rc.d docker defaults > /dev/null || true -fi -if [ -n "$2" ]; then - _dh_action=restart -else - _dh_action=start -fi -service docker $_dh_action 2>/dev/null || true - -#DEBHELPER# -EOF - cat > "$DEST/prerm" <<'EOF' -#!/bin/sh -set -e -set -u - -service docker stop 2>/dev/null || true - -#DEBHELPER# -EOF - cat > "$DEST/postrm" <<'EOF' -#!/bin/sh -set -e -set -u - -if [ "$1" = "purge" ] ; then - update-rc.d docker remove > /dev/null || true -fi - -# In case this system is running systemd, we make systemd reload the unit files -# to pick up changes. -if [ -d /run/systemd/system ] ; then - systemctl --system daemon-reload > /dev/null || true -fi - -#DEBHELPER# -EOF - # TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way - chmod +x "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" - - ( - # switch directories so we create *.deb in the right folder - cd "$DEST" - - # create lxc-docker-VERSION package - fpm -s dir -C "$DIR" \ - --name "lxc-docker-$VERSION" --version "$PKGVERSION" \ - --after-install "$ABS_DEST/postinst" \ - --before-remove "$ABS_DEST/prerm" \ - --after-remove "$ABS_DEST/postrm" \ - --architecture "$PACKAGE_ARCHITECTURE" \ - --prefix / \ - --depends iptables \ - --deb-recommends aufs-tools \ - --deb-recommends ca-certificates \ - --deb-recommends git \ - --deb-recommends xz-utils \ - --deb-recommends 'cgroupfs-mount | cgroup-lite' \ - --deb-suggests apparmor \ - --description "$PACKAGE_DESCRIPTION" \ - --maintainer "$PACKAGE_MAINTAINER" \ - --conflicts docker \ - --conflicts docker.io \ - --conflicts lxc-docker-virtual-package \ - --provides lxc-docker \ - --provides lxc-docker-virtual-package \ - --replaces lxc-docker \ - --replaces lxc-docker-virtual-package \ - --url "$PACKAGE_URL" \ - --license "$PACKAGE_LICENSE" \ - --config-files /etc/udev/rules.d/80-docker.rules \ - --config-files /etc/init/docker.conf \ - --config-files /etc/init.d/docker \ - --config-files /etc/default/docker \ - --deb-compression gz \ - -t deb . - # TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available - - # create empty lxc-docker wrapper package - fpm -s empty \ - --name lxc-docker --version "$PKGVERSION" \ - --architecture "$PACKAGE_ARCHITECTURE" \ - --depends lxc-docker-$VERSION \ - --description "$PACKAGE_DESCRIPTION" \ - --maintainer "$PACKAGE_MAINTAINER" \ - --url "$PACKAGE_URL" \ - --license "$PACKAGE_LICENSE" \ - --deb-compression gz \ - -t deb - ) - - # clean up after ourselves so we have a clean output directory - rm "$DEST/postinst" "$DEST/prerm" "$DEST/postrm" - rm -r "$DIR" -} - -bundle_ubuntu diff --git a/hack/make/update-apt-repo b/hack/make/update-apt-repo deleted file mode 100755 index 7354a2ecff..0000000000 --- a/hack/make/update-apt-repo +++ /dev/null @@ -1,70 +0,0 @@ -#!/bin/bash -set -e - -# This script updates the apt repo in $DOCKER_RELEASE_DIR/apt/repo. -# This script is a "fix all" for any sort of problems that might have occurred with -# the Release or Package files in the repo. -# It should only be used in the rare case of extreme emergencies to regenerate -# Release and Package files for the apt repo. -# -# NOTE: Always be sure to re-sign the repo with hack/make/sign-repos after running -# this script. - -: ${DOCKER_RELEASE_DIR:=$DEST} -APTDIR=$DOCKER_RELEASE_DIR/apt/repo - -# supported arches/sections -arches=( amd64 i386 ) - -# Preserve existing components but don't add any non-existing ones -for component in main testing experimental ; do - if ls "$APTDIR/dists/*/$component" >/dev/null 2>&1 ; then - components+=( $component ) - fi -done - -dists=( $(find "${APTDIR}/dists" -maxdepth 1 -mindepth 1 -type d) ) - -# override component if it is set -if [ "$COMPONENT" ]; then - components=( $COMPONENT ) -fi - -# release the debs -for version in "${dists[@]}"; do - for component in "${components[@]}"; do - codename="${version//debootstrap-}" - - # update the filelist for this codename/component - find "$APTDIR/pool/$component" \ - -name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist" - done -done - -# run the apt-ftparchive commands so we can have pinning -apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf" - -for dist in "${dists[@]}"; do - version=$(basename "$dist") - for component in "${components[@]}"; do - codename="${version//debootstrap-}" - - apt-ftparchive \ - -o "APT::FTPArchive::Release::Codename=$codename" \ - -o "APT::FTPArchive::Release::Suite=$codename" \ - -c "$APTDIR/conf/docker-engine-release.conf" \ - release \ - "$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release" - - for arch in "${arches[@]}"; do - apt-ftparchive \ - -o "APT::FTPArchive::Release::Codename=$codename" \ - -o "APT::FTPArchive::Release::Suite=$codename" \ - -o "APT::FTPArchive::Release::Component=$component" \ - -o "APT::FTPArchive::Release::Architecture=$arch" \ - -c "$APTDIR/conf/docker-engine-release.conf" \ - release \ - "$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release" - done - done -done diff --git a/hack/make/validate-dco b/hack/make/validate-dco deleted file mode 100644 index 5ac98728f3..0000000000 --- a/hack/make/validate-dco +++ /dev/null @@ -1,54 +0,0 @@ -#!/bin/bash - -source "${MAKEDIR}/.validate" - -adds=$(validate_diff --numstat | awk '{ s += $1 } END { print s }') -dels=$(validate_diff --numstat | awk '{ s += $2 } END { print s }') -#notDocs="$(validate_diff --numstat | awk '$3 !~ /^docs\// { print $3 }')" - -: ${adds:=0} -: ${dels:=0} - -# "Username may only contain alphanumeric characters or dashes and cannot begin with a dash" -githubUsernameRegex='[a-zA-Z0-9][a-zA-Z0-9-]+' - -# https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work -dcoPrefix='Signed-off-by:' -dcoRegex="^(Docker-DCO-1.1-)?$dcoPrefix ([^<]+) <([^<>@]+@[^<>]+)>( \\(github: ($githubUsernameRegex)\\))?$" - -check_dco() { - grep -qE "$dcoRegex" -} - -if [ $adds -eq 0 -a $dels -eq 0 ]; then - echo '0 adds, 0 deletions; nothing to validate! :)' -else - commits=( $(validate_log --format='format:%H%n') ) - badCommits=() - for commit in "${commits[@]}"; do - if [ -z "$(git log -1 --format='format:' --name-status "$commit")" ]; then - # no content (ie, Merge commit, etc) - continue - fi - if ! git log -1 --format='format:%B' "$commit" | check_dco; then - badCommits+=( "$commit" ) - fi - done - if [ ${#badCommits[@]} -eq 0 ]; then - echo "Congratulations! All commits are properly signed with the DCO!" - else - { - echo "These commits do not have a proper '$dcoPrefix' marker:" - for commit in "${badCommits[@]}"; do - echo " - $commit" - done - echo - echo 'Please amend each commit to include a properly formatted DCO marker.' - echo - echo 'Visit the following URL for information about the Docker DCO:' - echo ' https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work' - echo - } >&2 - false - fi -fi diff --git a/hack/make/validate-default-seccomp b/hack/make/validate-default-seccomp deleted file mode 100644 index 4facec743d..0000000000 --- a/hack/make/validate-default-seccomp +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -source "${MAKEDIR}/.validate" - -IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- 'profiles/seccomp' || true) ) -unset IFS - -if [ ${#files[@]} -gt 0 ]; then - # We run vendor.sh to and see if we have a diff afterwards - go generate ./profiles/seccomp/ >/dev/null - # Let see if the working directory is clean - diffs="$(git status --porcelain -- profiles/seccomp 2>/dev/null)" - if [ "$diffs" ]; then - { - echo 'The result of go generate ./profiles/seccomp/ differs' - echo - echo "$diffs" - echo - echo 'Please re-run go generate ./profiles/seccomp/' - echo - } >&2 - false - else - echo 'Congratulations! Seccomp profile generation is done correctly.' - fi -fi diff --git a/hack/make/validate-gofmt b/hack/make/validate-gofmt deleted file mode 100644 index 7ad9e85576..0000000000 --- a/hack/make/validate-gofmt +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -source "${MAKEDIR}/.validate" - -IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) ) -unset IFS - -badFiles=() -for f in "${files[@]}"; do - # we use "git show" here to validate that what's committed is formatted - if [ "$(git show "$VALIDATE_HEAD:$f" | gofmt -s -l)" ]; then - badFiles+=( "$f" ) - fi -done - -if [ ${#badFiles[@]} -eq 0 ]; then - echo 'Congratulations! All Go source files are properly formatted.' -else - { - echo "These files are not properly gofmt'd:" - for f in "${badFiles[@]}"; do - echo " - $f" - done - echo - echo 'Please reformat the above files using "gofmt -s -w" and commit the result.' - echo - } >&2 - false -fi diff --git a/hack/make/validate-lint b/hack/make/validate-lint deleted file mode 100644 index df7f2b007e..0000000000 --- a/hack/make/validate-lint +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -source "${MAKEDIR}/.validate" - -IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) ) -unset IFS - -errors=() -for f in "${files[@]}"; do - failedLint=$(golint "$f") - if [ "$failedLint" ]; then - errors+=( "$failedLint" ) - fi -done - -if [ ${#errors[@]} -eq 0 ]; then - echo 'Congratulations! All Go source files have been linted.' -else - { - echo "Errors from golint:" - for err in "${errors[@]}"; do - echo "$err" - done - echo - echo 'Please fix the above errors. You can test via "golint" and commit the result.' - echo - } >&2 - false -fi diff --git a/hack/make/validate-pkg b/hack/make/validate-pkg deleted file mode 100644 index d5843417e0..0000000000 --- a/hack/make/validate-pkg +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash -set -e - -source "${MAKEDIR}/.validate" - -IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- 'pkg/*.go' || true) ) -unset IFS - -badFiles=() -for f in "${files[@]}"; do - IFS=$'\n' - badImports=( $(go list -e -f '{{ join .Deps "\n" }}' "$f" | sort -u | grep -vE '^github.com/docker/docker/pkg/' | grep -E '^github.com/docker/docker' || true) ) - unset IFS - - for import in "${badImports[@]}"; do - badFiles+=( "$f imports $import" ) - done -done - -if [ ${#badFiles[@]} -eq 0 ]; then - echo 'Congratulations! "./pkg/..." is safely isolated from internal code.' -else - { - echo 'These files import internal code: (either directly or indirectly)' - for f in "${badFiles[@]}"; do - echo " - $f" - done - echo - } >&2 - false -fi diff --git a/hack/make/validate-test b/hack/make/validate-test deleted file mode 100644 index 8dc86f11d7..0000000000 --- a/hack/make/validate-test +++ /dev/null @@ -1,35 +0,0 @@ -#!/bin/bash - -# Make sure we're not using gos' Testing package any more in integration-cli - -source "${MAKEDIR}/.validate" - -IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- 'integration-cli/*.go' || true) ) -unset IFS - -badFiles=() -for f in "${files[@]}"; do - # skip check_test.go since it *does* use the testing package - if [ "$f" = "integration-cli/check_test.go" ]; then - continue - fi - - # we use "git show" here to validate that what's committed doesn't contain golang built-in testing - if git show "$VALIDATE_HEAD:$f" | grep -q testing.T; then - badFiles+=( "$f" ) - fi -done - -if [ ${#badFiles[@]} -eq 0 ]; then - echo 'Congratulations! No testing.T found.' -else - { - echo "These files use the wrong testing infrastructure:" - for f in "${badFiles[@]}"; do - echo " - $f" - done - echo - } >&2 - false -fi diff --git a/hack/make/validate-toml b/hack/make/validate-toml deleted file mode 100644 index f6393c854d..0000000000 --- a/hack/make/validate-toml +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -source "${MAKEDIR}/.validate" - -IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- 'MAINTAINERS' || true) ) -unset IFS - -badFiles=() -for f in "${files[@]}"; do - # we use "git show" here to validate that what's committed has valid toml syntax - if ! git show "$VALIDATE_HEAD:$f" | tomlv /proc/self/fd/0 ; then - badFiles+=( "$f" ) - fi -done - -if [ ${#badFiles[@]} -eq 0 ]; then - echo 'Congratulations! All toml source files changed here have valid syntax.' -else - { - echo "These files are not valid toml:" - for f in "${badFiles[@]}"; do - echo " - $f" - done - echo - echo 'Please reformat the above files as valid toml' - echo - } >&2 - false -fi diff --git a/hack/make/validate-vendor b/hack/make/validate-vendor deleted file mode 100644 index 7c2cf33c66..0000000000 --- a/hack/make/validate-vendor +++ /dev/null @@ -1,27 +0,0 @@ -#!/bin/bash - -source "${MAKEDIR}/.validate" - -IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- 'hack/vendor.sh' 'hack/.vendor-helpers.sh' 'vendor/' || true) ) -unset IFS - -if [ ${#files[@]} -gt 0 ]; then - # We run vendor.sh to and see if we have a diff afterwards - ./hack/vendor.sh >/dev/null - # Let see if the working directory is clean - diffs="$(git status --porcelain -- vendor 2>/dev/null)" - if [ "$diffs" ]; then - { - echo 'The result of ./hack/vendor.sh differs' - echo - echo "$diffs" - echo - echo 'Please vendor your package with ./hack/vendor.sh.' - echo - } >&2 - false - else - echo 'Congratulations! All vendoring changes are done the right way.' - fi -fi diff --git a/hack/make/validate-vet b/hack/make/validate-vet deleted file mode 100644 index d543509a53..0000000000 --- a/hack/make/validate-vet +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash - -source "${MAKEDIR}/.validate" - -IFS=$'\n' -files=( $(validate_diff --diff-filter=ACMR --name-only -- '*.go' | grep -v '^vendor/' || true) ) -unset IFS - -errors=() -for f in "${files[@]}"; do - failedVet=$(go vet "$f") - if [ "$failedVet" ]; then - errors+=( "$failedVet" ) - fi -done - - -if [ ${#errors[@]} -eq 0 ]; then - echo 'Congratulations! All Go source files have been vetted.' -else - { - echo "Errors from go vet:" - for err in "${errors[@]}"; do - echo " - $err" - done - echo - echo 'Please fix the above errors. You can test via "go vet" and commit the result.' - echo - } >&2 - false -fi diff --git a/hack/make/win b/hack/make/win deleted file mode 100644 index f9f4111276..0000000000 --- a/hack/make/win +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -set -e - -# explicit list of os/arch combos that support being a daemon -declare -A daemonSupporting -daemonSupporting=( - [linux/amd64]=1 - [windows/amd64]=1 -) -platform="windows/amd64" -export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION -mkdir -p "$DEST" -ABS_DEST="$(cd "$DEST" && pwd -P)" -export GOOS=${platform%/*} -export GOARCH=${platform##*/} -if [ -z "${daemonSupporting[$platform]}" ]; then - export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms - export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported -fi -source "${MAKEDIR}/binary" diff --git a/hack/release.sh b/hack/release.sh deleted file mode 100755 index 9ed3aeea4a..0000000000 --- a/hack/release.sh +++ /dev/null @@ -1,319 +0,0 @@ -#!/usr/bin/env bash -set -e - -# This script looks for bundles built by make.sh, and releases them on a -# public S3 bucket. -# -# Bundles should be available for the VERSION string passed as argument. -# -# The correct way to call this script is inside a container built by the -# official Dockerfile at the root of the Docker source code. The Dockerfile, -# make.sh and release.sh should all be from the same source code revision. - -set -o pipefail - -# Print a usage message and exit. -usage() { - cat >&2 <<'EOF' -To run, I need: -- to be in a container generated by the Dockerfile at the top of the Docker - repository; -- to be provided with the location of an S3 bucket and path, in - environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: ''); -- to be provided with AWS credentials for this S3 bucket, in environment - variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY; -- a generous amount of good will and nice manners. -The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" - -docker run -e AWS_S3_BUCKET=test.docker.com \ - -e AWS_ACCESS_KEY_ID \ - -e AWS_SECRET_ACCESS_KEY \ - -e AWS_DEFAULT_REGION \ - -it --privileged \ - docker ./hack/release.sh -EOF - exit 1 -} - -[ "$AWS_S3_BUCKET" ] || usage -[ "$AWS_ACCESS_KEY_ID" ] || usage -[ "$AWS_SECRET_ACCESS_KEY" ] || usage -[ -d /go/src/github.com/docker/docker ] || usage -cd /go/src/github.com/docker/docker -[ -x hack/make.sh ] || usage - -export AWS_DEFAULT_REGION -: ${AWS_DEFAULT_REGION:=us-west-1} - -RELEASE_BUNDLES=( - binary - cross - tgz -) - -if [ "$1" != '--release-regardless-of-test-failure' ]; then - RELEASE_BUNDLES=( - test-unit - "${RELEASE_BUNDLES[@]}" - test-integration-cli - ) -fi - -VERSION=$(< VERSION) -BUCKET=$AWS_S3_BUCKET -BUCKET_PATH=$BUCKET -[[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH - -if command -v git &> /dev/null && git rev-parse &> /dev/null; then - if [ -n "$(git status --porcelain --untracked-files=no)" ]; then - echo "You cannot run the release script on a repo with uncommitted changes" - usage - fi -fi - -# These are the 2 keys we've used to sign the deb's -# release (get.docker.com) -# GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9" -# test (test.docker.com) -# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6" - -setup_s3() { - echo "Setting up S3" - # Try creating the bucket. Ignore errors (it might already exist). - aws s3 mb "s3://$BUCKET" 2>/dev/null || true - # Check access to the bucket. - aws s3 ls "s3://$BUCKET" >/dev/null - # Make the bucket accessible through website endpoints. - aws s3 website --index-document index --error-document error "s3://$BUCKET" -} - -# write_to_s3 uploads the contents of standard input to the specified S3 url. -write_to_s3() { - DEST=$1 - F=`mktemp` - cat > "$F" - aws s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST" - rm -f "$F" -} - -s3_url() { - case "$BUCKET" in - get.docker.com|test.docker.com|experimental.docker.com) - echo "https://$BUCKET_PATH" - ;; - *) - BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com" - if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then - echo "$BASE_URL/$AWS_S3_BUCKET_PATH" - else - echo "$BASE_URL" - fi - ;; - esac -} - -build_all() { - echo "Building release" - if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then - echo >&2 - echo >&2 'The build or tests appear to have failed.' - echo >&2 - echo >&2 'You, as the release maintainer, now have a couple options:' - echo >&2 '- delay release and fix issues' - echo >&2 '- delay release and fix issues' - echo >&2 '- did we mention how important this is? issues need fixing :)' - echo >&2 - echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' - echo >&2 ' really knows all the hairy problems at hand with the current release' - echo >&2 ' issues) may bypass this checking by running this script again with the' - echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' - echo >&2 ' running the test suite, and will only build the binaries and packages. Please' - echo >&2 ' avoid using this if at all possible.' - echo >&2 - echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' - echo >&2 ' should be used. If there are release issues, we should always err on the' - echo >&2 ' side of caution.' - echo >&2 - exit 1 - fi -} - -upload_release_build() { - src="$1" - dst="$2" - latest="$3" - - echo - echo "Uploading $src" - echo " to $dst" - echo - aws s3 cp --follow-symlinks --acl public-read "$src" "$dst" - if [ "$latest" ]; then - echo - echo "Copying to $latest" - echo - aws s3 cp --acl public-read "$dst" "$latest" - fi - - # get hash files too (see hash_files() in hack/make.sh) - for hashAlgo in md5 sha256; do - if [ -e "$src.$hashAlgo" ]; then - echo - echo "Uploading $src.$hashAlgo" - echo " to $dst.$hashAlgo" - echo - aws s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo" - if [ "$latest" ]; then - echo - echo "Copying to $latest.$hashAlgo" - echo - aws s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo" - fi - fi - done -} - -release_build() { - echo "Releasing binaries" - GOOS=$1 - GOARCH=$2 - - binDir=bundles/$VERSION/cross/$GOOS/$GOARCH - tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH - binary=docker-$VERSION - zipExt=".tgz" - binaryExt="" - tgz=$binary$zipExt - - latestBase= - if [ -z "$NOLATEST" ]; then - latestBase=docker-latest - fi - - # we need to map our GOOS and GOARCH to uname values - # see https://en.wikipedia.org/wiki/Uname - # ie, GOOS=linux -> "uname -s"=Linux - - s3Os=$GOOS - case "$s3Os" in - darwin) - s3Os=Darwin - ;; - freebsd) - s3Os=FreeBSD - ;; - linux) - s3Os=Linux - ;; - windows) - # this is windows use the .zip and .exe extentions for the files. - s3Os=Windows - zipExt=".zip" - binaryExt=".exe" - tgz=$binary$zipExt - binary+=$binaryExt - ;; - *) - echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'" - exit 1 - ;; - esac - - s3Arch=$GOARCH - case "$s3Arch" in - amd64) - s3Arch=x86_64 - ;; - 386) - s3Arch=i386 - ;; - arm) - s3Arch=armel - # someday, we might potentially support multiple GOARM values, in which case we might get armhf here too - ;; - *) - echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'" - exit 1 - ;; - esac - - s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch" - # latest= - latestTgz= - if [ "$latestBase" ]; then - # commented out since we aren't uploading binaries right now. - # latest="$s3Dir/$latestBase$binaryExt" - # we don't include the $binaryExt because we don't want docker.exe.zip - latestTgz="$s3Dir/$latestBase$zipExt" - fi - - if [ ! -f "$tgzDir/$tgz" ]; then - echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?" - exit 1 - fi - # disable binary uploads for now. Only providing tgz downloads - # upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest" - upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz" -} - -# Upload binaries and tgz files to S3 -release_binaries() { - [ -e "bundles/$VERSION/cross/linux/amd64/docker-$VERSION" ] || { - echo >&2 './hack/make.sh must be run before release_binaries' - exit 1 - } - - for d in bundles/$VERSION/cross/*/*; do - GOARCH="$(basename "$d")" - GOOS="$(basename "$(dirname "$d")")" - release_build "$GOOS" "$GOARCH" - done - - # TODO create redirect from builds/*/i686 to builds/*/i386 - - cat <&2 echo "error: unexpected parameters" - exit 1 - ;; -esac - -# the following lines are in sorted order, FYI -clone git github.com/Azure/go-ansiterm 388960b655244e76e24c75f48631564eaefade62 -clone git github.com/Microsoft/hcsshim v0.3.4 -clone git github.com/Microsoft/go-winio v0.3.4 -clone git github.com/Sirupsen/logrus v0.10.0 # logrus is a common dependency among multiple deps -clone git github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a -clone git github.com/go-check/check 03a4d9dcf2f92eae8e90ed42aa2656f63fdd0b14 https://github.com/cpuguy83/check.git -clone git github.com/gorilla/context 14f550f51a -clone git github.com/gorilla/mux e444e69cbd -clone git github.com/kr/pty 5cf931ef8f -clone git github.com/mattn/go-shellwords v1.0.0 -clone git github.com/mattn/go-sqlite3 v1.1.0 -clone git github.com/tchap/go-patricia v2.1.0 -clone git github.com/vdemeester/shakers 24d7f1d6a71aa5d9cbe7390e4afb66b7eef9e1b3 -# forked golang.org/x/net package includes a patch for lazy loading trace templates -clone git golang.org/x/net 2beffdc2e92c8a3027590f898fe88f69af48a3f8 https://github.com/tonistiigi/net.git -clone git golang.org/x/sys eb2c74142fd19a79b3f237334c7384d5167b1b46 https://github.com/golang/sys.git -clone git github.com/docker/go-units 651fc226e7441360384da338d0fd37f2440ffbe3 -clone git github.com/docker/go-connections fa2850ff103453a9ad190da0df0af134f0314b3d -clone git github.com/docker/engine-api 4eca04ae18f4f93f40196a17b9aa6e11262a7269 -clone git github.com/RackSec/srslog 365bf33cd9acc21ae1c355209865f17228ca534e -clone git github.com/imdario/mergo 0.2.1 - -#get libnetwork packages -clone git github.com/docker/libnetwork 0318eb40366b7f8253797e370353c857b5041fef -clone git github.com/docker/go-events afb2b9f2c23f33ada1a22b03651775fdc65a5089 -clone git github.com/armon/go-radix e39d623f12e8e41c7b5529e9a9dd67a1e2261f80 -clone git github.com/armon/go-metrics eb0af217e5e9747e41dd5303755356b62d28e3ec -clone git github.com/hashicorp/go-msgpack 71c2886f5a673a35f909803f38ece5810165097b -clone git github.com/hashicorp/memberlist 88ac4de0d1a0ca6def284b571342db3b777a4c37 -clone git github.com/hashicorp/go-multierror fcdddc395df1ddf4247c69bd436e84cfa0733f7e -clone git github.com/hashicorp/serf 598c54895cc5a7b1a24a398d635e8c0ea0959870 -clone git github.com/docker/libkv v0.2.1 -clone git github.com/vishvananda/netns 604eaf189ee867d8c147fafc28def2394e878d25 -clone git github.com/vishvananda/netlink e73bad418fd727ed3a02830b1af1ad0283a1de6c -clone git github.com/BurntSushi/toml f706d00e3de6abe700c994cdd545a1a4915af060 -clone git github.com/samuel/go-zookeeper d0e0d8e11f318e000a8cc434616d69e329edc374 -clone git github.com/deckarep/golang-set ef32fa3046d9f249d399f98ebaf9be944430fd1d -clone git github.com/coreos/etcd v2.3.2 -fix_rewritten_imports github.com/coreos/etcd -clone git github.com/ugorji/go f1f1a805ed361a0e078bb537e4ea78cd37dcf065 -clone git github.com/hashicorp/consul v0.5.2 -clone git github.com/boltdb/bolt fff57c100f4dea1905678da7e90d92429dff2904 -clone git github.com/miekg/dns 75e6e86cc601825c5dbcd4e0c209eab180997cd7 - -# get graph and distribution packages -clone git github.com/docker/distribution 07f32ac1831ed0fc71960b7da5d6bb83cb6881b5 -clone git github.com/vbatts/tar-split v0.9.11 - -# get go-zfs packages -clone git github.com/mistifyio/go-zfs 22c9b32c84eb0d0c6f4043b6e90fc94073de92fa -clone git github.com/pborman/uuid v1.0 - -# get desired notary commit, might also need to be updated in Dockerfile -clone git github.com/docker/notary v0.3.0 - -clone git google.golang.org/grpc ab0be5212fb225475f2087566eded7da5d727960 https://github.com/grpc/grpc-go.git -clone git github.com/miekg/pkcs11 df8ae6ca730422dba20c768ff38ef7d79077a59f -clone git github.com/docker/go v1.5.1-1-1-gbaf439e -clone git github.com/agl/ed25519 d2b94fd789ea21d12fac1a4443dd3a3f79cda72c - -clone git github.com/opencontainers/runc cc29e3dded8e27ba8f65738f40d251c885030a28 # libcontainer -clone git github.com/opencontainers/specs v1.0.0-rc1 # specs -clone git github.com/seccomp/libseccomp-golang 32f571b70023028bd57d9288c20efbcb237f3ce0 -# libcontainer deps (see src/github.com/opencontainers/runc/Godeps/Godeps.json) -clone git github.com/coreos/go-systemd v4 -clone git github.com/godbus/dbus v4.0.0 -clone git github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852 -clone git github.com/golang/protobuf 3c84672111d91bb5ac31719e112f9f7126a0e26e - -# gelf logging driver deps -clone git github.com/Graylog2/go-gelf aab2f594e4585d43468ac57287b0dece9d806883 - -clone git github.com/fluent/fluent-logger-golang v1.2.0 -# fluent-logger-golang deps -clone git github.com/philhofer/fwd 899e4efba8eaa1fea74175308f3fae18ff3319fa -clone git github.com/tinylib/msgp 75ee40d2601edf122ef667e2a07d600d4c44490c - -# fsnotify -clone git gopkg.in/fsnotify.v1 v1.2.11 - -# awslogs deps -clone git github.com/aws/aws-sdk-go v1.1.30 -clone git github.com/go-ini/ini 060d7da055ba6ec5ea7a31f116332fe5efa04ce0 -clone git github.com/jmespath/go-jmespath 0b12d6b521d83fc7f755e7cfc1b1fbdd35a01a74 - -# gcplogs deps -clone git golang.org/x/oauth2 2baa8a1b9338cf13d9eeb27696d761155fa480be https://github.com/golang/oauth2.git -clone git google.golang.org/api dc6d2353af16e2a2b0ff6986af051d473a4ed468 https://code.googlesource.com/google-api-go-client -clone git google.golang.org/cloud dae7e3d993bc3812a2185af60552bb6b847e52a0 https://code.googlesource.com/gocloud - -# native credentials -clone git github.com/docker/docker-credential-helpers v0.3.0 - -# containerd -clone git github.com/docker/containerd v0.2.4 - -# cluster -clone git github.com/docker/swarmkit de507ff6b0ee99002d56a784e095c753eab1ad61 -clone git github.com/golang/mock bd3c8e81be01eef76d4b503f5e687d2d1354d2d9 -clone git github.com/gogo/protobuf 43a2e0b1c32252bfbbdf81f7faa7a88fb3fa4028 -clone git github.com/cloudflare/cfssl b895b0549c0ff676f92cf09ba971ae02bb41367b -clone git github.com/google/certificate-transparency 025a5cab06f6a819c455d9fdc9e2a1b6d0982284 -clone git golang.org/x/crypto 3fbbcd23f1cb824e69491a5930cfeff09b12f4d2 https://github.com/golang/crypto.git -clone git github.com/mreiferson/go-httpclient 63fe23f7434723dc904c901043af07931f293c47 -clone git github.com/hashicorp/go-memdb 98f52f52d7a476958fa9da671354d270c50661a7 -clone git github.com/hashicorp/go-immutable-radix 8e8ed81f8f0bf1bdd829593fdd5c29922c1ea990 -clone git github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4 -clone git github.com/coreos/pkg 2c77715c4df99b5420ffcae14ead08f52104065d -clone git github.com/pivotal-golang/clock 3fd3c1944c59d9742e1cd333672181cd1a6f9fa0 -clone git github.com/prometheus/client_golang e51041b3fa41cece0dca035740ba6411905be473 -clone git github.com/beorn7/perks b965b613227fddccbfffe13eae360ed3fa822f8d -clone git github.com/prometheus/client_model fa8ad6fec33561be4280a8f0514318c79d7f6cb6 -clone git github.com/prometheus/common ffe929a3f4c4faeaa10f2b9535c2b1be3ad15650 -clone git github.com/prometheus/procfs 454a56f35412459b5e684fd5ec0f9211b94f002a -clone hg bitbucket.org/ww/goautoneg 75cd24fc2f2c2a2088577d12123ddee5f54e0675 -clone git github.com/matttproud/golang_protobuf_extensions fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a -clone git github.com/pkg/errors 01fa4104b9c248c8945d14d9f128454d5b28d595 - -# cli -clone git github.com/spf13/cobra 75205f23b3ea70dc7ae5e900d074e010c23c37e9 https://github.com/dnephin/cobra.git -clone git github.com/spf13/pflag cb88ea77998c3f024757528e3305022ab50b43be -clone git github.com/inconshreveable/mousetrap 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -clone git github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff - -clean diff --git a/image/fs.go b/image/fs.go deleted file mode 100644 index 955e1b8534..0000000000 --- a/image/fs.go +++ /dev/null @@ -1,175 +0,0 @@ -package image - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/pkg/ioutils" -) - -// IDWalkFunc is function called by StoreBackend.Walk -type IDWalkFunc func(id ID) error - -// StoreBackend provides interface for image.Store persistence -type StoreBackend interface { - Walk(f IDWalkFunc) error - Get(id ID) ([]byte, error) - Set(data []byte) (ID, error) - Delete(id ID) error - SetMetadata(id ID, key string, data []byte) error - GetMetadata(id ID, key string) ([]byte, error) - DeleteMetadata(id ID, key string) error -} - -// fs implements StoreBackend using the filesystem. -type fs struct { - sync.RWMutex - root string -} - -const ( - contentDirName = "content" - metadataDirName = "metadata" -) - -// NewFSStoreBackend returns new filesystem based backend for image.Store -func NewFSStoreBackend(root string) (StoreBackend, error) { - return newFSStore(root) -} - -func newFSStore(root string) (*fs, error) { - s := &fs{ - root: root, - } - if err := os.MkdirAll(filepath.Join(root, contentDirName, string(digest.Canonical)), 0700); err != nil { - return nil, err - } - if err := os.MkdirAll(filepath.Join(root, metadataDirName, string(digest.Canonical)), 0700); err != nil { - return nil, err - } - return s, nil -} - -func (s *fs) contentFile(id ID) string { - dgst := digest.Digest(id) - return filepath.Join(s.root, contentDirName, string(dgst.Algorithm()), dgst.Hex()) -} - -func (s *fs) metadataDir(id ID) string { - dgst := digest.Digest(id) - return filepath.Join(s.root, metadataDirName, string(dgst.Algorithm()), dgst.Hex()) -} - -// Walk calls the supplied callback for each image ID in the storage backend. -func (s *fs) Walk(f IDWalkFunc) error { - // Only Canonical digest (sha256) is currently supported - s.RLock() - dir, err := ioutil.ReadDir(filepath.Join(s.root, contentDirName, string(digest.Canonical))) - s.RUnlock() - if err != nil { - return err - } - for _, v := range dir { - dgst := digest.NewDigestFromHex(string(digest.Canonical), v.Name()) - if err := dgst.Validate(); err != nil { - logrus.Debugf("Skipping invalid digest %s: %s", dgst, err) - continue - } - if err := f(ID(dgst)); err != nil { - return err - } - } - return nil -} - -// Get returns the content stored under a given ID. -func (s *fs) Get(id ID) ([]byte, error) { - s.RLock() - defer s.RUnlock() - - return s.get(id) -} - -func (s *fs) get(id ID) ([]byte, error) { - content, err := ioutil.ReadFile(s.contentFile(id)) - if err != nil { - return nil, err - } - - // todo: maybe optional - if ID(digest.FromBytes(content)) != id { - return nil, fmt.Errorf("failed to verify image: %v", id) - } - - return content, nil -} - -// Set stores content under a given ID. -func (s *fs) Set(data []byte) (ID, error) { - s.Lock() - defer s.Unlock() - - if len(data) == 0 { - return "", fmt.Errorf("Invalid empty data") - } - - id := ID(digest.FromBytes(data)) - if err := ioutils.AtomicWriteFile(s.contentFile(id), data, 0600); err != nil { - return "", err - } - - return id, nil -} - -// Delete removes content and metadata files associated with the ID. -func (s *fs) Delete(id ID) error { - s.Lock() - defer s.Unlock() - - if err := os.RemoveAll(s.metadataDir(id)); err != nil { - return err - } - if err := os.Remove(s.contentFile(id)); err != nil { - return err - } - return nil -} - -// SetMetadata sets metadata for a given ID. It fails if there's no base file. -func (s *fs) SetMetadata(id ID, key string, data []byte) error { - s.Lock() - defer s.Unlock() - if _, err := s.get(id); err != nil { - return err - } - - baseDir := filepath.Join(s.metadataDir(id)) - if err := os.MkdirAll(baseDir, 0700); err != nil { - return err - } - return ioutils.AtomicWriteFile(filepath.Join(s.metadataDir(id), key), data, 0600) -} - -// GetMetadata returns metadata for a given ID. -func (s *fs) GetMetadata(id ID, key string) ([]byte, error) { - s.RLock() - defer s.RUnlock() - - if _, err := s.get(id); err != nil { - return nil, err - } - return ioutil.ReadFile(filepath.Join(s.metadataDir(id), key)) -} - -// DeleteMetadata removes the metadata associated with an ID. -func (s *fs) DeleteMetadata(id ID, key string) error { - s.Lock() - defer s.Unlock() - - return os.RemoveAll(filepath.Join(s.metadataDir(id), key)) -} diff --git a/image/fs_test.go b/image/fs_test.go deleted file mode 100644 index 1a6f849f6b..0000000000 --- a/image/fs_test.go +++ /dev/null @@ -1,384 +0,0 @@ -package image - -import ( - "bytes" - "crypto/rand" - "crypto/sha256" - "encoding/hex" - "errors" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/docker/distribution/digest" -) - -func TestFSGetSet(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - testGetSet(t, fs) -} - -func TestFSGetInvalidData(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - id, err := fs.Set([]byte("foobar")) - if err != nil { - t.Fatal(err) - } - - dgst := digest.Digest(id) - - if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, string(dgst.Algorithm()), dgst.Hex()), []byte("foobar2"), 0600); err != nil { - t.Fatal(err) - } - - _, err = fs.Get(id) - if err == nil { - t.Fatal("Expected get to fail after data modification.") - } -} - -func TestFSInvalidSet(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - id := digest.FromBytes([]byte("foobar")) - err = os.Mkdir(filepath.Join(tmpdir, contentDirName, string(id.Algorithm()), id.Hex()), 0700) - if err != nil { - t.Fatal(err) - } - - _, err = fs.Set([]byte("foobar")) - if err == nil { - t.Fatal("Expecting error from invalid filesystem data.") - } -} - -func TestFSInvalidRoot(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - tcases := []struct { - root, invalidFile string - }{ - {"root", "root"}, - {"root", "root/content"}, - {"root", "root/metadata"}, - } - - for _, tc := range tcases { - root := filepath.Join(tmpdir, tc.root) - filePath := filepath.Join(tmpdir, tc.invalidFile) - err := os.MkdirAll(filepath.Dir(filePath), 0700) - if err != nil { - t.Fatal(err) - } - f, err := os.Create(filePath) - if err != nil { - t.Fatal(err) - } - f.Close() - - _, err = NewFSStoreBackend(root) - if err == nil { - t.Fatalf("Expected error from root %q and invlid file %q", tc.root, tc.invalidFile) - } - - os.RemoveAll(root) - } - -} - -func testMetadataGetSet(t *testing.T, store StoreBackend) { - id, err := store.Set([]byte("foo")) - if err != nil { - t.Fatal(err) - } - id2, err := store.Set([]byte("bar")) - if err != nil { - t.Fatal(err) - } - - tcases := []struct { - id ID - key string - value []byte - }{ - {id, "tkey", []byte("tval1")}, - {id, "tkey2", []byte("tval2")}, - {id2, "tkey", []byte("tval3")}, - } - - for _, tc := range tcases { - err = store.SetMetadata(tc.id, tc.key, tc.value) - if err != nil { - t.Fatal(err) - } - - actual, err := store.GetMetadata(tc.id, tc.key) - if err != nil { - t.Fatal(err) - } - if bytes.Compare(actual, tc.value) != 0 { - t.Fatalf("Metadata expected %q, got %q", tc.value, actual) - } - } - - _, err = store.GetMetadata(id2, "tkey2") - if err == nil { - t.Fatal("Expected error for getting metadata for unknown key") - } - - id3 := digest.FromBytes([]byte("baz")) - err = store.SetMetadata(ID(id3), "tkey", []byte("tval")) - if err == nil { - t.Fatal("Expected error for setting metadata for unknown ID.") - } - - _, err = store.GetMetadata(ID(id3), "tkey") - if err == nil { - t.Fatal("Expected error for getting metadata for unknown ID.") - } -} - -func TestFSMetadataGetSet(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - testMetadataGetSet(t, fs) -} - -func TestFSDelete(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - testDelete(t, fs) -} - -func TestFSWalker(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - testWalker(t, fs) -} - -func TestFSInvalidWalker(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - fooID, err := fs.Set([]byte("foo")) - if err != nil { - t.Fatal(err) - } - - if err := ioutil.WriteFile(filepath.Join(tmpdir, contentDirName, "sha256/foobar"), []byte("foobar"), 0600); err != nil { - t.Fatal(err) - } - - n := 0 - err = fs.Walk(func(id ID) error { - if id != fooID { - t.Fatalf("Invalid walker ID %q, expected %q", id, fooID) - } - n++ - return nil - }) - if err != nil { - t.Fatalf("Invalid data should not have caused walker error, got %v", err) - } - if n != 1 { - t.Fatalf("Expected 1 walk initialization, got %d", n) - } -} - -func testGetSet(t *testing.T, store StoreBackend) { - type tcase struct { - input []byte - expected ID - } - tcases := []tcase{ - {[]byte("foobar"), ID("sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2")}, - } - - randomInput := make([]byte, 8*1024) - _, err := rand.Read(randomInput) - if err != nil { - t.Fatal(err) - } - // skipping use of digest pkg because its used by the implementation - h := sha256.New() - _, err = h.Write(randomInput) - if err != nil { - t.Fatal(err) - } - tcases = append(tcases, tcase{ - input: randomInput, - expected: ID("sha256:" + hex.EncodeToString(h.Sum(nil))), - }) - - for _, tc := range tcases { - id, err := store.Set([]byte(tc.input)) - if err != nil { - t.Fatal(err) - } - if id != tc.expected { - t.Fatalf("Expected ID %q, got %q", tc.expected, id) - } - } - - for _, emptyData := range [][]byte{nil, {}} { - _, err := store.Set(emptyData) - if err == nil { - t.Fatal("Expected error for nil input.") - } - } - - for _, tc := range tcases { - data, err := store.Get(tc.expected) - if err != nil { - t.Fatal(err) - } - if bytes.Compare(data, tc.input) != 0 { - t.Fatalf("Expected data %q, got %q", tc.input, data) - } - } - - for _, key := range []ID{"foobar:abc", "sha256:abc", "sha256:c3ab8ff13720e8ad9047dd39466b3c8974e592c2fa383d4a3960714caef0c4f2a"} { - _, err := store.Get(key) - if err == nil { - t.Fatalf("Expected error for ID %q.", key) - } - } - -} - -func testDelete(t *testing.T, store StoreBackend) { - id, err := store.Set([]byte("foo")) - if err != nil { - t.Fatal(err) - } - id2, err := store.Set([]byte("bar")) - if err != nil { - t.Fatal(err) - } - - err = store.Delete(id) - if err != nil { - t.Fatal(err) - } - - _, err = store.Get(id) - if err == nil { - t.Fatalf("Expected getting deleted item %q to fail", id) - } - _, err = store.Get(id2) - if err != nil { - t.Fatal(err) - } - - err = store.Delete(id2) - if err != nil { - t.Fatal(err) - } - _, err = store.Get(id2) - if err == nil { - t.Fatalf("Expected getting deleted item %q to fail", id2) - } -} - -func testWalker(t *testing.T, store StoreBackend) { - id, err := store.Set([]byte("foo")) - if err != nil { - t.Fatal(err) - } - id2, err := store.Set([]byte("bar")) - if err != nil { - t.Fatal(err) - } - - tcases := make(map[ID]struct{}) - tcases[id] = struct{}{} - tcases[id2] = struct{}{} - n := 0 - err = store.Walk(func(id ID) error { - delete(tcases, id) - n++ - return nil - }) - if err != nil { - t.Fatal(err) - } - - if n != 2 { - t.Fatalf("Expected 2 walk initializations, got %d", n) - } - if len(tcases) != 0 { - t.Fatalf("Expected empty unwalked set, got %+v", tcases) - } - - // stop on error - tcases = make(map[ID]struct{}) - tcases[id] = struct{}{} - err = store.Walk(func(id ID) error { - return errors.New("") - }) - if err == nil { - t.Fatalf("Exected error from walker.") - } -} diff --git a/image/image.go b/image/image.go deleted file mode 100644 index 7a05e649fb..0000000000 --- a/image/image.go +++ /dev/null @@ -1,140 +0,0 @@ -package image - -import ( - "encoding/json" - "errors" - "io" - "time" - - "github.com/docker/distribution/digest" - "github.com/docker/engine-api/types/container" -) - -// ID is the content-addressable ID of an image. -type ID digest.Digest - -func (id ID) String() string { - return digest.Digest(id).String() -} - -// V1Image stores the V1 image configuration. -type V1Image struct { - // ID a unique 64 character identifier of the image - ID string `json:"id,omitempty"` - // Parent id of the image - Parent string `json:"parent,omitempty"` - // Comment user added comment - Comment string `json:"comment,omitempty"` - // Created timestamp when image was created - Created time.Time `json:"created"` - // Container is the id of the container used to commit - Container string `json:"container,omitempty"` - // ContainerConfig is the configuration of the container that is committed into the image - ContainerConfig container.Config `json:"container_config,omitempty"` - // DockerVersion specifies version on which image is built - DockerVersion string `json:"docker_version,omitempty"` - // Author of the image - Author string `json:"author,omitempty"` - // Config is the configuration of the container received from the client - Config *container.Config `json:"config,omitempty"` - // Architecture is the hardware that the image is build and runs on - Architecture string `json:"architecture,omitempty"` - // OS is the operating system used to build and run the image - OS string `json:"os,omitempty"` - // Size is the total size of the image including all layers it is composed of - Size int64 `json:",omitempty"` -} - -// Image stores the image configuration -type Image struct { - V1Image - Parent ID `json:"parent,omitempty"` - RootFS *RootFS `json:"rootfs,omitempty"` - History []History `json:"history,omitempty"` - OSVersion string `json:"os.version,omitempty"` - OSFeatures []string `json:"os.features,omitempty"` - - // rawJSON caches the immutable JSON associated with this image. - rawJSON []byte - - // computedID is the ID computed from the hash of the image config. - // Not to be confused with the legacy V1 ID in V1Image. - computedID ID -} - -// RawJSON returns the immutable JSON associated with the image. -func (img *Image) RawJSON() []byte { - return img.rawJSON -} - -// ID returns the image's content-addressable ID. -func (img *Image) ID() ID { - return img.computedID -} - -// ImageID stringizes ID. -func (img *Image) ImageID() string { - return string(img.ID()) -} - -// RunConfig returns the image's container config. -func (img *Image) RunConfig() *container.Config { - return img.Config -} - -// MarshalJSON serializes the image to JSON. It sorts the top-level keys so -// that JSON that's been manipulated by a push/pull cycle with a legacy -// registry won't end up with a different key order. -func (img *Image) MarshalJSON() ([]byte, error) { - type MarshalImage Image - - pass1, err := json.Marshal(MarshalImage(*img)) - if err != nil { - return nil, err - } - - var c map[string]*json.RawMessage - if err := json.Unmarshal(pass1, &c); err != nil { - return nil, err - } - return json.Marshal(c) -} - -// History stores build commands that were used to create an image -type History struct { - // Created timestamp for build point - Created time.Time `json:"created"` - // Author of the build point - Author string `json:"author,omitempty"` - // CreatedBy keeps the Dockerfile command used while building image. - CreatedBy string `json:"created_by,omitempty"` - // Comment is custom message set by the user when creating the image. - Comment string `json:"comment,omitempty"` - // EmptyLayer is set to true if this history item did not generate a - // layer. Otherwise, the history item is associated with the next - // layer in the RootFS section. - EmptyLayer bool `json:"empty_layer,omitempty"` -} - -// Exporter provides interface for exporting and importing images -type Exporter interface { - Load(io.ReadCloser, io.Writer, bool) error - // TODO: Load(net.Context, io.ReadCloser, <- chan StatusMessage) error - Save([]string, io.Writer) error -} - -// NewFromJSON creates an Image configuration from json. -func NewFromJSON(src []byte) (*Image, error) { - img := &Image{} - - if err := json.Unmarshal(src, img); err != nil { - return nil, err - } - if img.RootFS == nil { - return nil, errors.New("Invalid image JSON, no RootFS key.") - } - - img.rawJSON = src - - return img, nil -} diff --git a/image/image_test.go b/image/image_test.go deleted file mode 100644 index 525023b813..0000000000 --- a/image/image_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package image - -import ( - "encoding/json" - "sort" - "strings" - "testing" -) - -const sampleImageJSON = `{ - "architecture": "amd64", - "os": "linux", - "config": {}, - "rootfs": { - "type": "layers", - "diff_ids": [] - } -}` - -func TestJSON(t *testing.T) { - img, err := NewFromJSON([]byte(sampleImageJSON)) - if err != nil { - t.Fatal(err) - } - rawJSON := img.RawJSON() - if string(rawJSON) != sampleImageJSON { - t.Fatalf("Raw JSON of config didn't match: expected %+v, got %v", sampleImageJSON, rawJSON) - } -} - -func TestInvalidJSON(t *testing.T) { - _, err := NewFromJSON([]byte("{}")) - if err == nil { - t.Fatal("Expected JSON parse error") - } -} - -func TestMarshalKeyOrder(t *testing.T) { - b, err := json.Marshal(&Image{ - V1Image: V1Image{ - Comment: "a", - Author: "b", - Architecture: "c", - }, - }) - if err != nil { - t.Fatal(err) - } - - expectedOrder := []string{"architecture", "author", "comment"} - var indexes []int - for _, k := range expectedOrder { - indexes = append(indexes, strings.Index(string(b), k)) - } - - if !sort.IntsAreSorted(indexes) { - t.Fatal("invalid key order in JSON: ", string(b)) - } -} diff --git a/image/rootfs.go b/image/rootfs.go deleted file mode 100644 index 76eaae0c25..0000000000 --- a/image/rootfs.go +++ /dev/null @@ -1,16 +0,0 @@ -package image - -import "github.com/docker/docker/layer" - -// TypeLayers is used for RootFS.Type for filesystems organized into layers. -const TypeLayers = "layers" - -// NewRootFS returns empty RootFS struct -func NewRootFS() *RootFS { - return &RootFS{Type: TypeLayers} -} - -// Append appends a new diffID to rootfs -func (r *RootFS) Append(id layer.DiffID) { - r.DiffIDs = append(r.DiffIDs, id) -} diff --git a/image/rootfs_unix.go b/image/rootfs_unix.go deleted file mode 100644 index 83498f6c37..0000000000 --- a/image/rootfs_unix.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !windows - -package image - -import "github.com/docker/docker/layer" - -// RootFS describes images root filesystem -// This is currently a placeholder that only supports layers. In the future -// this can be made into an interface that supports different implementations. -type RootFS struct { - Type string `json:"type"` - DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` -} - -// ChainID returns the ChainID for the top layer in RootFS. -func (r *RootFS) ChainID() layer.ChainID { - return layer.CreateChainID(r.DiffIDs) -} diff --git a/image/rootfs_windows.go b/image/rootfs_windows.go deleted file mode 100644 index c5bd5828b5..0000000000 --- a/image/rootfs_windows.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build windows - -package image - -import ( - "crypto/sha512" - "fmt" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/layer" -) - -// TypeLayersWithBase is used for RootFS.Type for Windows filesystems that have layers and a centrally-stored base layer. -const TypeLayersWithBase = "layers+base" - -// RootFS describes images root filesystem -// This is currently a placeholder that only supports layers. In the future -// this can be made into an interface that supports different implementations. -type RootFS struct { - Type string `json:"type"` - DiffIDs []layer.DiffID `json:"diff_ids,omitempty"` - BaseLayer string `json:"base_layer,omitempty"` -} - -// BaseLayerID returns the 64 byte hex ID for the baselayer name. -func (r *RootFS) BaseLayerID() string { - if r.Type != TypeLayersWithBase { - panic("tried to get base layer ID without a base layer") - } - baseID := sha512.Sum384([]byte(r.BaseLayer)) - return fmt.Sprintf("%x", baseID[:32]) -} - -// ChainID returns the ChainID for the top layer in RootFS. -func (r *RootFS) ChainID() layer.ChainID { - ids := r.DiffIDs - if r.Type == TypeLayersWithBase { - // Add an extra ID for the base. - baseDiffID := layer.DiffID(digest.FromBytes([]byte(r.BaseLayerID()))) - ids = append([]layer.DiffID{baseDiffID}, ids...) - } - return layer.CreateChainID(ids) -} - -// NewRootFSWithBaseLayer returns a RootFS struct with a base layer -func NewRootFSWithBaseLayer(baseLayer string) *RootFS { - return &RootFS{Type: TypeLayersWithBase, BaseLayer: baseLayer} -} diff --git a/image/spec/v1.1.md b/image/spec/v1.1.md deleted file mode 100644 index 3a32b6bd36..0000000000 --- a/image/spec/v1.1.md +++ /dev/null @@ -1,640 +0,0 @@ -# Docker Image Specification v1.1.0 - -An *Image* is an ordered collection of root filesystem changes and the -corresponding execution parameters for use within a container runtime. This -specification outlines the format of these filesystem changes and corresponding -parameters and describes how to create and use them for use with a container -runtime and execution tool. - -This version of the image specification was adopted starting in Docker 1.10. - -## Terminology - -This specification uses the following terms: - -
-
- Layer -
-
- Images are composed of layers. Each layer is a set of filesystem - changes. Layers do not have configuration metadata such as environment - variables or default arguments - these are properties of the image as a - whole rather than any particular layer. -
-
- Image JSON -
-
- Each image has an associated JSON structure which describes some - basic information about the image such as date created, author, and the - ID of its parent image as well as execution/runtime configuration like - its entry point, default arguments, CPU/memory shares, networking, and - volumes. The JSON structure also references a cryptographic hash of - each layer used by the image, and provides history information for - those layers. This JSON is considered to be immutable, because changing - it would change the computed ImageID. Changing it means creating a new - derived image, instead of changing the existing image. -
-
- Image Filesystem Changeset -
-
- Each layer has an archive of the files which have been added, changed, - or deleted relative to its parent layer. Using a layer-based or union - filesystem such as AUFS, or by computing the diff from filesystem - snapshots, the filesystem changeset can be used to present a series of - image layers as if they were one cohesive filesystem. -
-
- Layer DiffID -
-
- Layers are referenced by cryptographic hashes of their serialized - representation. This is a SHA256 digest over the tar archive used to - transport the layer, represented as a hexadecimal encoding of 256 bits, e.g., - sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. - Layers must be packed and unpacked reproducibly to avoid changing the - layer ID, for example by using tar-split to save the tar headers. Note - that the digest used as the layer ID is taken over an uncompressed - version of the tar. -
-
- Layer ChainID -
-
- For convenience, it is sometimes useful to refer to a stack of layers - with a single identifier. This is called a ChainID. For a - single layer (or the layer at the bottom of a stack), the - ChainID is equal to the layer's DiffID. - Otherwise the ChainID is given by the formula: - ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN)). -
-
- ImageID -
-
- Each image's ID is given by the SHA256 hash of its configuration JSON. It is - represented as a hexadecimal encoding of 256 bits, e.g., - sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. - Since the configuration JSON that gets hashed references hashes of each - layer in the image, this formulation of the ImageID makes images - content-addresable. -
-
- Tag -
-
- A tag serves to map a descriptive, user-given name to any single image - ID. Tag values are limited to the set of characters - [a-zA-Z0-9_.-], except they may not start with a . - or - character. Tags are limited to 127 characters. -
-
- Repository -
-
- A collection of tags grouped under a common prefix (the name component - before :). For example, in an image tagged with the name - my-app:3.1.4, my-app is the Repository - component of the name. A repository name is made up of slash-separated - name components, optionally prefixed by a DNS hostname. The hostname - must follow comply with standard DNS rules, but may not contain - _ characters. If a hostname is present, it may optionally - be followed by a port number in the format :8080. - Name components may contain lowercase characters, digits, and - separators. A separator is defined as a period, one or two underscores, - or one or more dashes. A name component may not start or end with - a separator. -
-
- -## Image JSON Description - -Here is an example image JSON file: - -``` -{ - "created": "2015-10-31T22:22:56.015925234Z", - "author": "Alyssa P. Hacker <alyspdev@example.com>", - "architecture": "amd64", - "os": "linux", - "config": { - "User": "alice", - "Memory": 2048, - "MemorySwap": 4096, - "CpuShares": 8, - "ExposedPorts": { - "8080/tcp": {} - }, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "FOO=docker_is_a_really", - "BAR=great_tool_you_know" - ], - "Entrypoint": [ - "/bin/my-app-binary" - ], - "Cmd": [ - "--foreground", - "--config", - "/etc/my-app.d/default.cfg" - ], - "Volumes": { - "/var/job-result-data": {}, - "/var/log/my-app-logs": {}, - }, - "WorkingDir": "/home/alice", - }, - "rootfs": { - "diff_ids": [ - "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - ], - "type": "layers" - }, - "history": [ - { - "created": "2015-10-31T22:22:54.690851953Z", - "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" - }, - { - "created": "2015-10-31T22:22:55.613815829Z", - "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", - "empty_layer": true - } - ] -} -``` - -Note that image JSON files produced by Docker don't contain formatting -whitespace. It has been added to this example for clarity. - -### Image JSON Field Descriptions - -
-
- created string -
-
- ISO-8601 formatted combined date and time at which the image was - created. -
-
- author string -
-
- Gives the name and/or email address of the person or entity which - created and is responsible for maintaining the image. -
-
- architecture string -
-
- The CPU architecture which the binaries in this image are built to run - on. Possible values include: -
    -
  • 386
  • -
  • amd64
  • -
  • arm
  • -
- More values may be supported in the future and any of these may or may - not be supported by a given container runtime implementation. -
-
- os string -
-
- The name of the operating system which the image is built to run on. - Possible values include: -
    -
  • darwin
  • -
  • freebsd
  • -
  • linux
  • -
- More values may be supported in the future and any of these may or may - not be supported by a given container runtime implementation. -
-
- config struct -
-
- The execution parameters which should be used as a base when running a - container using the image. This field can be null, in - which case any execution parameters should be specified at creation of - the container. - -

Container RunConfig Field Descriptions

- -
-
- User string -
-
-

The username or UID which the process in the container should - run as. This acts as a default value to use when the value is - not specified when creating a container.

- -

All of the following are valid:

- -
    -
  • user
  • -
  • uid
  • -
  • user:group
  • -
  • uid:gid
  • -
  • uid:group
  • -
  • user:gid
  • -
- -

If group/gid is not specified, the - default group and supplementary groups of the given - user/uid in /etc/passwd - from the container are applied.

-
-
- Memory integer -
-
- Memory limit (in bytes). This acts as a default value to use - when the value is not specified when creating a container. -
-
- MemorySwap integer -
-
- Total memory usage (memory + swap); set to -1 to - disable swap. This acts as a default value to use when the - value is not specified when creating a container. -
-
- CpuShares integer -
-
- CPU shares (relative weight vs. other containers). This acts as - a default value to use when the value is not specified when - creating a container. -
-
- ExposedPorts struct -
-
- A set of ports to expose from a container running this image. - This JSON structure value is unusual because it is a direct - JSON serialization of the Go type - map[string]struct{} and is represented in JSON as - an object mapping its keys to an empty object. Here is an - example: - -
{
-    "8080": {},
-    "53/udp": {},
-    "2356/tcp": {}
-}
- - Its keys can be in the format of: -
    -
  • - "port/tcp" -
  • -
  • - "port/udp" -
  • -
  • - "port" -
  • -
- with the default protocol being "tcp" if not - specified. - - These values act as defaults and are merged with any specified - when creating a container. -
-
- Env array of strings -
-
- Entries are in the format of VARNAME="var value". - These values act as defaults and are merged with any specified - when creating a container. -
-
- Entrypoint array of strings -
-
- A list of arguments to use as the command to execute when the - container starts. This value acts as a default and is replaced - by an entrypoint specified when creating a container. -
-
- Cmd array of strings -
-
- Default arguments to the entry point of the container. These - values act as defaults and are replaced with any specified when - creating a container. If an Entrypoint value is - not specified, then the first entry of the Cmd - array should be interpreted as the executable to run. -
-
- Volumes struct -
-
- A set of directories which should be created as data volumes in - a container running this image. This JSON structure value is - unusual because it is a direct JSON serialization of the Go - type map[string]struct{} and is represented in - JSON as an object mapping its keys to an empty object. Here is - an example: -
{
-    "/var/my-app-data/": {},
-    "/etc/some-config.d/": {},
-}
-
-
- WorkingDir string -
-
- Sets the current working directory of the entry point process - in the container. This value acts as a default and is replaced - by a working directory specified when creating a container. -
-
-
-
- rootfs struct -
-
- The rootfs key references the layer content addresses used by the - image. This makes the image config hash depend on the filesystem hash. - rootfs has two subkeys: - -
    -
  • - type is usually set to layers. There is - also a Windows-specific value layers+base that allows - a base layer to be specified in a field of rootfs - called base_layer. -
  • -
  • - diff_ids is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. -
  • -
- - - Here is an example rootfs section: - -
"rootfs": {
-  "diff_ids": [
-    "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
-    "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
-    "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
-  ],
-  "type": "layers"
-}
-
-
- history struct -
-
- history is an array of objects describing the history of - each layer. The array is ordered from bottom-most layer to top-most - layer. The object has the following fields. - -
    -
  • - created: Creation time, expressed as a ISO-8601 formatted - combined date and time -
  • -
  • - author: The author of the build point -
  • -
  • - created_by: The command which created the layer -
  • -
  • - comment: A custom message set when creating the layer -
  • -
  • - empty_layer: This field is used to mark if the history - item created a filesystem diff. It is set to true if this history - item doesn't correspond to an actual layer in the rootfs section - (for example, a command like ENV which results in no change to the - filesystem). -
  • -
- -Here is an example history section: - -
"history": [
-  {
-    "created": "2015-10-31T22:22:54.690851953Z",
-    "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
-  },
-  {
-    "created": "2015-10-31T22:22:55.613815829Z",
-    "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
-    "empty_layer": true
-  }
-]
-
-
- -Any extra fields in the Image JSON struct are considered implementation -specific and should be ignored by any implementations which are unable to -interpret them. - -## Creating an Image Filesystem Changeset - -An example of creating an Image Filesystem Changeset follows. - -An image root filesystem is first created as an empty directory. Here is the -initial empty directory structure for the a changeset using the -randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are -generated based on the content](#id_desc)). - -``` -c3167915dc9d/ -``` - -Files and directories are then created: - -``` -c3167915dc9d/ - etc/ - my-app-config - bin/ - my-app-binary - my-app-tools -``` - -The `c3167915dc9d` directory is then committed as a plain Tar archive with -entries for the following files: - -``` -etc/my-app-config -bin/my-app-binary -bin/my-app-tools -``` - -To make changes to the filesystem of this container image, create a new -directory, such as `f60c56784b83`, and initialize it with a snapshot of the -parent image's root filesystem, so that the directory is identical to that -of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very -efficient: - -``` -f60c56784b83/ - etc/ - my-app-config - bin/ - my-app-binary - my-app-tools -``` - -This example change is going add a configuration directory at `/etc/my-app.d` -which contains a default config file. There's also a change to the -`my-app-tools` binary to handle the config layout change. The `f60c56784b83` -directory then looks like this: - -``` -f60c56784b83/ - etc/ - my-app.d/ - default.cfg - bin/ - my-app-binary - my-app-tools -``` - -This reflects the removal of `/etc/my-app-config` and creation of a file and -directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been -replaced with an updated version. Before committing this directory to a -changeset, because it has a parent image, it is first compared with the -directory tree of the parent snapshot, `f60c56784b83`, looking for files and -directories that have been added, modified, or removed. The following changeset -is found: - -``` -Added: /etc/my-app.d/default.cfg -Modified: /bin/my-app-tools -Deleted: /etc/my-app-config -``` - -A Tar Archive is then created which contains *only* this changeset: The added -and modified files and directories in their entirety, and for each deleted item -an entry for an empty file at the same location but with the basename of the -deleted file or directory prefixed with `.wh.`. The filenames prefixed with -`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible -to create an image root filesystem which contains a file or directory with a -name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has -the following entries: - -``` -/etc/my-app.d/default.cfg -/bin/my-app-tools -/etc/.wh.my-app-config -``` - -Any given image is likely to be composed of several of these Image Filesystem -Changeset tar archives. - -## Combined Image JSON + Filesystem Changeset Format - -There is also a format for a single archive which contains complete information -about an image, including: - - - repository names/tags - - image configuration JSON file - - all tar archives of each layer filesystem changesets - -For example, here's what the full archive of `library/busybox` is (displayed in -`tree` format): - -``` -. -├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json -├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a -│   ├── VERSION -│   ├── json -│   └── layer.tar -├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198 -│   ├── VERSION -│   ├── json -│   └── layer.tar -├── manifest.json -└── repositories -``` - -There is a directory for each layer in the image. Each directory is named with -a 64 character hex name that is deterministically generated from the layer -information. These names are not necessarily layer DiffIDs or ChainIDs. Each of -these directories contains 3 files: - - * `VERSION` - The schema version of the `json` file - * `json` - The legacy JSON metadata for an image layer. In this version of - the image specification, layers don't have JSON metadata, but in - [version 1](v1.md), they did. A file is created for each layer in the - v1 format for backward compatibility. - * `layer.tar` - The Tar archive of the filesystem changeset for an image - layer. - -Note that this directory layout is only important for backward compatibility. -Current implementations use the paths specified in `manifest.json`. - -The content of the `VERSION` files is simply the semantic version of the JSON -metadata schema: - -``` -1.0 -``` - -The `repositories` file is another JSON file which describes names/tags: - -``` -{ - "busybox":{ - "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a" - } -} -``` - -Every key in this object is the name of a repository, and maps to a collection -of tag suffixes. Each tag maps to the ID of the image represented by that tag. -This file is only used for backwards compatibility. Current implementations use -the `manifest.json` file instead. - -The `manifest.json` file provides the image JSON for the top-level image, and -optionally for parent images that this image was derived from. It consists of -an array of metadata entries: - -``` -[ - { - "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json", - "RepoTags": ["busybox:latest"], - "Layers": [ - "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar", - "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar" - ] - } -] -``` - -There is an entry in the array for each image. - -The `Config` field references another file in the tar which includes the image -JSON for this image. - -The `RepoTags` field lists references pointing to this image. - -The `Layers` field points to the filesystem changeset tars. - -An optional `Parent` field references the imageID of the parent image. This -parent must be part of the same `manifest.json` file. - -This file shouldn't be confused with the distribution manifest, used to push -and pull images. - -Generally, implementations that support this version of the spec will use -the `manifest.json` file if available, and older implementations will use the -legacy `*/json` files and `repositories`. diff --git a/image/spec/v1.2.md b/image/spec/v1.2.md deleted file mode 100644 index c014bf9a09..0000000000 --- a/image/spec/v1.2.md +++ /dev/null @@ -1,696 +0,0 @@ -# Docker Image Specification v1.2.0 - -An *Image* is an ordered collection of root filesystem changes and the -corresponding execution parameters for use within a container runtime. This -specification outlines the format of these filesystem changes and corresponding -parameters and describes how to create and use them for use with a container -runtime and execution tool. - -This version of the image specification was adopted starting in Docker 1.12. - -## Terminology - -This specification uses the following terms: - -
-
- Layer -
-
- Images are composed of layers. Each layer is a set of filesystem - changes. Layers do not have configuration metadata such as environment - variables or default arguments - these are properties of the image as a - whole rather than any particular layer. -
-
- Image JSON -
-
- Each image has an associated JSON structure which describes some - basic information about the image such as date created, author, and the - ID of its parent image as well as execution/runtime configuration like - its entry point, default arguments, CPU/memory shares, networking, and - volumes. The JSON structure also references a cryptographic hash of - each layer used by the image, and provides history information for - those layers. This JSON is considered to be immutable, because changing - it would change the computed ImageID. Changing it means creating a new - derived image, instead of changing the existing image. -
-
- Image Filesystem Changeset -
-
- Each layer has an archive of the files which have been added, changed, - or deleted relative to its parent layer. Using a layer-based or union - filesystem such as AUFS, or by computing the diff from filesystem - snapshots, the filesystem changeset can be used to present a series of - image layers as if they were one cohesive filesystem. -
-
- Layer DiffID -
-
- Layers are referenced by cryptographic hashes of their serialized - representation. This is a SHA256 digest over the tar archive used to - transport the layer, represented as a hexadecimal encoding of 256 bits, e.g., - sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. - Layers must be packed and unpacked reproducibly to avoid changing the - layer ID, for example by using tar-split to save the tar headers. Note - that the digest used as the layer ID is taken over an uncompressed - version of the tar. -
-
- Layer ChainID -
-
- For convenience, it is sometimes useful to refer to a stack of layers - with a single identifier. This is called a ChainID. For a - single layer (or the layer at the bottom of a stack), the - ChainID is equal to the layer's DiffID. - Otherwise the ChainID is given by the formula: - ChainID(layerN) = SHA256hex(ChainID(layerN-1) + " " + DiffID(layerN)). -
-
- ImageID -
-
- Each image's ID is given by the SHA256 hash of its configuration JSON. It is - represented as a hexadecimal encoding of 256 bits, e.g., - sha256:a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. - Since the configuration JSON that gets hashed references hashes of each - layer in the image, this formulation of the ImageID makes images - content-addresable. -
-
- Tag -
-
- A tag serves to map a descriptive, user-given name to any single image - ID. Tag values are limited to the set of characters - [a-zA-Z0-9_.-], except they may not start with a . - or - character. Tags are limited to 127 characters. -
-
- Repository -
-
- A collection of tags grouped under a common prefix (the name component - before :). For example, in an image tagged with the name - my-app:3.1.4, my-app is the Repository - component of the name. A repository name is made up of slash-separated - name components, optionally prefixed by a DNS hostname. The hostname - must follow comply with standard DNS rules, but may not contain - _ characters. If a hostname is present, it may optionally - be followed by a port number in the format :8080. - Name components may contain lowercase characters, digits, and - separators. A separator is defined as a period, one or two underscores, - or one or more dashes. A name component may not start or end with - a separator. -
-
- -## Image JSON Description - -Here is an example image JSON file: - -``` -{ - "created": "2015-10-31T22:22:56.015925234Z", - "author": "Alyssa P. Hacker <alyspdev@example.com>", - "architecture": "amd64", - "os": "linux", - "config": { - "User": "alice", - "Memory": 2048, - "MemorySwap": 4096, - "CpuShares": 8, - "ExposedPorts": { - "8080/tcp": {} - }, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "FOO=docker_is_a_really", - "BAR=great_tool_you_know" - ], - "Entrypoint": [ - "/bin/my-app-binary" - ], - "Cmd": [ - "--foreground", - "--config", - "/etc/my-app.d/default.cfg" - ], - "Volumes": { - "/var/job-result-data": {}, - "/var/log/my-app-logs": {}, - }, - "WorkingDir": "/home/alice", - }, - "rootfs": { - "diff_ids": [ - "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1", - "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef" - ], - "type": "layers" - }, - "history": [ - { - "created": "2015-10-31T22:22:54.690851953Z", - "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /" - }, - { - "created": "2015-10-31T22:22:55.613815829Z", - "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]", - "empty_layer": true - } - ] -} -``` - -Note that image JSON files produced by Docker don't contain formatting -whitespace. It has been added to this example for clarity. - -### Image JSON Field Descriptions - -
-
- created string -
-
- ISO-8601 formatted combined date and time at which the image was - created. -
-
- author string -
-
- Gives the name and/or email address of the person or entity which - created and is responsible for maintaining the image. -
-
- architecture string -
-
- The CPU architecture which the binaries in this image are built to run - on. Possible values include: -
    -
  • 386
  • -
  • amd64
  • -
  • arm
  • -
- More values may be supported in the future and any of these may or may - not be supported by a given container runtime implementation. -
-
- os string -
-
- The name of the operating system which the image is built to run on. - Possible values include: -
    -
  • darwin
  • -
  • freebsd
  • -
  • linux
  • -
- More values may be supported in the future and any of these may or may - not be supported by a given container runtime implementation. -
-
- config struct -
-
- The execution parameters which should be used as a base when running a - container using the image. This field can be null, in - which case any execution parameters should be specified at creation of - the container. - -

Container RunConfig Field Descriptions

- -
-
- User string -
-
-

The username or UID which the process in the container should - run as. This acts as a default value to use when the value is - not specified when creating a container.

- -

All of the following are valid:

- -
    -
  • user
  • -
  • uid
  • -
  • user:group
  • -
  • uid:gid
  • -
  • uid:group
  • -
  • user:gid
  • -
- -

If group/gid is not specified, the - default group and supplementary groups of the given - user/uid in /etc/passwd - from the container are applied.

-
-
- Memory integer -
-
- Memory limit (in bytes). This acts as a default value to use - when the value is not specified when creating a container. -
-
- MemorySwap integer -
-
- Total memory usage (memory + swap); set to -1 to - disable swap. This acts as a default value to use when the - value is not specified when creating a container. -
-
- CpuShares integer -
-
- CPU shares (relative weight vs. other containers). This acts as - a default value to use when the value is not specified when - creating a container. -
-
- ExposedPorts struct -
-
- A set of ports to expose from a container running this image. - This JSON structure value is unusual because it is a direct - JSON serialization of the Go type - map[string]struct{} and is represented in JSON as - an object mapping its keys to an empty object. Here is an - example: - -
{
-    "8080": {},
-    "53/udp": {},
-    "2356/tcp": {}
-}
- - Its keys can be in the format of: -
    -
  • - "port/tcp" -
  • -
  • - "port/udp" -
  • -
  • - "port" -
  • -
- with the default protocol being "tcp" if not - specified. - - These values act as defaults and are merged with any specified - when creating a container. -
-
- Env array of strings -
-
- Entries are in the format of VARNAME="var value". - These values act as defaults and are merged with any specified - when creating a container. -
-
- Entrypoint array of strings -
-
- A list of arguments to use as the command to execute when the - container starts. This value acts as a default and is replaced - by an entrypoint specified when creating a container. -
-
- Cmd array of strings -
-
- Default arguments to the entry point of the container. These - values act as defaults and are replaced with any specified when - creating a container. If an Entrypoint value is - not specified, then the first entry of the Cmd - array should be interpreted as the executable to run. -
-
- Healthcheck struct -
-
- A test to perform to determine whether the container is healthy. - Here is an example: -
{
-  "Test": [
-      "CMD-SHELL",
-      "/usr/bin/check-health localhost"
-  ],
-  "Interval": 30000000000,
-  "Timeout": 10000000000,
-  "Retries": 3
-}
- The object has the following fields. -
-
- Test array of strings -
-
- The test to perform to check that the container is healthy. - The options are: -
    -
  • [] : inherit healthcheck from base image
  • -
  • ["NONE"] : disable healthcheck
  • -
  • ["CMD", arg1, arg2, ...] : exec arguments directly
  • -
  • ["CMD-SHELL", command] : run command with system's default shell
  • -
- - The test command should exit with a status of 0 if the container is healthy, - or with 1 if it is unhealthy. -
-
- Interval integer -
-
- Number of nanoseconds to wait between probe attempts. -
-
- Timeout integer -
-
- Number of nanoseconds to wait before considering the check to have hung. -
-
- Retries integer -
-
- The number of consecutive failures needed to consider a container as unhealthy. -
-
- - In each case, the field can be omitted to indicate that the - value should be inherited from the base layer. - - These values act as defaults and are merged with any specified - when creating a container. -
-
- Volumes struct -
-
- A set of directories which should be created as data volumes in - a container running this image. This JSON structure value is - unusual because it is a direct JSON serialization of the Go - type map[string]struct{} and is represented in - JSON as an object mapping its keys to an empty object. Here is - an example: -
{
-    "/var/my-app-data/": {},
-    "/etc/some-config.d/": {},
-}
-
-
- WorkingDir string -
-
- Sets the current working directory of the entry point process - in the container. This value acts as a default and is replaced - by a working directory specified when creating a container. -
-
-
-
- rootfs struct -
-
- The rootfs key references the layer content addresses used by the - image. This makes the image config hash depend on the filesystem hash. - rootfs has two subkeys: - -
    -
  • - type is usually set to layers. -
  • -
  • - diff_ids is an array of layer content hashes (DiffIDs), in order from bottom-most to top-most. -
  • -
- - - Here is an example rootfs section: - -
"rootfs": {
-  "diff_ids": [
-    "sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1",
-    "sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef",
-    "sha256:13f53e08df5a220ab6d13c58b2bf83a59cbdc2e04d0a3f041ddf4b0ba4112d49"
-  ],
-  "type": "layers"
-}
-
-
- history struct -
-
- history is an array of objects describing the history of - each layer. The array is ordered from bottom-most layer to top-most - layer. The object has the following fields. - -
    -
  • - created: Creation time, expressed as a ISO-8601 formatted - combined date and time -
  • -
  • - author: The author of the build point -
  • -
  • - created_by: The command which created the layer -
  • -
  • - comment: A custom message set when creating the layer -
  • -
  • - empty_layer: This field is used to mark if the history - item created a filesystem diff. It is set to true if this history - item doesn't correspond to an actual layer in the rootfs section - (for example, a command like ENV which results in no change to the - filesystem). -
  • -
- -Here is an example history section: - -
"history": [
-  {
-    "created": "2015-10-31T22:22:54.690851953Z",
-    "created_by": "/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"
-  },
-  {
-    "created": "2015-10-31T22:22:55.613815829Z",
-    "created_by": "/bin/sh -c #(nop) CMD [\"sh\"]",
-    "empty_layer": true
-  }
-]
-
-
- -Any extra fields in the Image JSON struct are considered implementation -specific and should be ignored by any implementations which are unable to -interpret them. - -## Creating an Image Filesystem Changeset - -An example of creating an Image Filesystem Changeset follows. - -An image root filesystem is first created as an empty directory. Here is the -initial empty directory structure for the a changeset using the -randomly-generated directory name `c3167915dc9d` ([actual layer DiffIDs are -generated based on the content](#id_desc)). - -``` -c3167915dc9d/ -``` - -Files and directories are then created: - -``` -c3167915dc9d/ - etc/ - my-app-config - bin/ - my-app-binary - my-app-tools -``` - -The `c3167915dc9d` directory is then committed as a plain Tar archive with -entries for the following files: - -``` -etc/my-app-config -bin/my-app-binary -bin/my-app-tools -``` - -To make changes to the filesystem of this container image, create a new -directory, such as `f60c56784b83`, and initialize it with a snapshot of the -parent image's root filesystem, so that the directory is identical to that -of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem can make this very -efficient: - -``` -f60c56784b83/ - etc/ - my-app-config - bin/ - my-app-binary - my-app-tools -``` - -This example change is going add a configuration directory at `/etc/my-app.d` -which contains a default config file. There's also a change to the -`my-app-tools` binary to handle the config layout change. The `f60c56784b83` -directory then looks like this: - -``` -f60c56784b83/ - etc/ - my-app.d/ - default.cfg - bin/ - my-app-binary - my-app-tools -``` - -This reflects the removal of `/etc/my-app-config` and creation of a file and -directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been -replaced with an updated version. Before committing this directory to a -changeset, because it has a parent image, it is first compared with the -directory tree of the parent snapshot, `f60c56784b83`, looking for files and -directories that have been added, modified, or removed. The following changeset -is found: - -``` -Added: /etc/my-app.d/default.cfg -Modified: /bin/my-app-tools -Deleted: /etc/my-app-config -``` - -A Tar Archive is then created which contains *only* this changeset: The added -and modified files and directories in their entirety, and for each deleted item -an entry for an empty file at the same location but with the basename of the -deleted file or directory prefixed with `.wh.`. The filenames prefixed with -`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible -to create an image root filesystem which contains a file or directory with a -name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has -the following entries: - -``` -/etc/my-app.d/default.cfg -/bin/my-app-tools -/etc/.wh.my-app-config -``` - -Any given image is likely to be composed of several of these Image Filesystem -Changeset tar archives. - -## Combined Image JSON + Filesystem Changeset Format - -There is also a format for a single archive which contains complete information -about an image, including: - - - repository names/tags - - image configuration JSON file - - all tar archives of each layer filesystem changesets - -For example, here's what the full archive of `library/busybox` is (displayed in -`tree` format): - -``` -. -├── 47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json -├── 5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a -│   ├── VERSION -│   ├── json -│   └── layer.tar -├── a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198 -│   ├── VERSION -│   ├── json -│   └── layer.tar -├── manifest.json -└── repositories -``` - -There is a directory for each layer in the image. Each directory is named with -a 64 character hex name that is deterministically generated from the layer -information. These names are not necessarily layer DiffIDs or ChainIDs. Each of -these directories contains 3 files: - - * `VERSION` - The schema version of the `json` file - * `json` - The legacy JSON metadata for an image layer. In this version of - the image specification, layers don't have JSON metadata, but in - [version 1](v1.md), they did. A file is created for each layer in the - v1 format for backward compatibility. - * `layer.tar` - The Tar archive of the filesystem changeset for an image - layer. - -Note that this directory layout is only important for backward compatibility. -Current implementations use the paths specified in `manifest.json`. - -The content of the `VERSION` files is simply the semantic version of the JSON -metadata schema: - -``` -1.0 -``` - -The `repositories` file is another JSON file which describes names/tags: - -``` -{ - "busybox":{ - "latest":"5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a" - } -} -``` - -Every key in this object is the name of a repository, and maps to a collection -of tag suffixes. Each tag maps to the ID of the image represented by that tag. -This file is only used for backwards compatibility. Current implementations use -the `manifest.json` file instead. - -The `manifest.json` file provides the image JSON for the top-level image, and -optionally for parent images that this image was derived from. It consists of -an array of metadata entries: - -``` -[ - { - "Config": "47bcc53f74dc94b1920f0b34f6036096526296767650f223433fe65c35f149eb.json", - "RepoTags": ["busybox:latest"], - "Layers": [ - "a65da33792c5187473faa80fa3e1b975acba06712852d1dea860692ccddf3198/layer.tar", - "5f29f704785248ddb9d06b90a11b5ea36c534865e9035e4022bb2e71d4ecbb9a/layer.tar" - ] - } -] -``` - -There is an entry in the array for each image. - -The `Config` field references another file in the tar which includes the image -JSON for this image. - -The `RepoTags` field lists references pointing to this image. - -The `Layers` field points to the filesystem changeset tars. - -An optional `Parent` field references the imageID of the parent image. This -parent must be part of the same `manifest.json` file. - -This file shouldn't be confused with the distribution manifest, used to push -and pull images. - -Generally, implementations that support this version of the spec will use -the `manifest.json` file if available, and older implementations will use the -legacy `*/json` files and `repositories`. diff --git a/image/spec/v1.md b/image/spec/v1.md deleted file mode 100644 index 57a599b8ff..0000000000 --- a/image/spec/v1.md +++ /dev/null @@ -1,573 +0,0 @@ -# Docker Image Specification v1.0.0 - -An *Image* is an ordered collection of root filesystem changes and the -corresponding execution parameters for use within a container runtime. This -specification outlines the format of these filesystem changes and corresponding -parameters and describes how to create and use them for use with a container -runtime and execution tool. - -## Terminology - -This specification uses the following terms: - -
-
- Layer -
-
- Images are composed of layers. Image layer is a general - term which may be used to refer to one or both of the following: - -
    -
  1. The metadata for the layer, described in the JSON format.
  2. -
  3. The filesystem changes described by a layer.
  4. -
- - To refer to the former you may use the term Layer JSON or - Layer Metadata. To refer to the latter you may use the term - Image Filesystem Changeset or Image Diff. -
-
- Image JSON -
-
- Each layer has an associated JSON structure which describes some - basic information about the image such as date created, author, and the - ID of its parent image as well as execution/runtime configuration like - its entry point, default arguments, CPU/memory shares, networking, and - volumes. -
-
- Image Filesystem Changeset -
-
- Each layer has an archive of the files which have been added, changed, - or deleted relative to its parent layer. Using a layer-based or union - filesystem such as AUFS, or by computing the diff from filesystem - snapshots, the filesystem changeset can be used to present a series of - image layers as if they were one cohesive filesystem. -
-
- Image ID -
-
- Each layer is given an ID upon its creation. It is - represented as a hexadecimal encoding of 256 bits, e.g., - a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9. - Image IDs should be sufficiently random so as to be globally unique. - 32 bytes read from /dev/urandom is sufficient for all - practical purposes. Alternatively, an image ID may be derived as a - cryptographic hash of image contents as the result is considered - indistinguishable from random. The choice is left up to implementors. -
-
- Image Parent -
-
- Most layer metadata structs contain a parent field which - refers to the Image from which another directly descends. An image - contains a separate JSON metadata file and set of changes relative to - the filesystem of its parent image. Image Ancestor and - Image Descendant are also common terms. -
-
- Image Checksum -
-
- Layer metadata structs contain a cryptographic hash of the contents of - the layer's filesystem changeset. Though the set of changes exists as a - simple Tar archive, two archives with identical filenames and content - will have different SHA digests if the last-access or last-modified - times of any entries differ. For this reason, image checksums are - generated using the TarSum algorithm which produces a cryptographic - hash of file contents and selected headers only. Details of this - algorithm are described in the separate TarSum specification. -
-
- Tag -
-
- A tag serves to map a descriptive, user-given name to any single image - ID. An image name suffix (the name component after :) is - often referred to as a tag as well, though it strictly refers to the - full name of an image. Acceptable values for a tag suffix are - implementation specific, but they SHOULD be limited to the set of - alphanumeric characters [a-zA-z0-9], punctuation - characters [._-], and MUST NOT contain a : - character. -
-
- Repository -
-
- A collection of tags grouped under a common prefix (the name component - before :). For example, in an image tagged with the name - my-app:3.1.4, my-app is the Repository - component of the name. Acceptable values for repository name are - implementation specific, but they SHOULD be limited to the set of - alphanumeric characters [a-zA-z0-9], and punctuation - characters [._-], however it MAY contain additional - / and : characters for organizational - purposes, with the last : character being interpreted - dividing the repository component of the name from the tag suffix - component. -
-
- -## Image JSON Description - -Here is an example image JSON file: - -``` -{ - "id": "a9561eb1b190625c9adb5a9513e72c4dedafc1cb2d4c5236c9a6957ec7dfd5a9", - "parent": "c6e3cedcda2e3982a1a6760e178355e8e65f7b80e4e5248743fa3549d284e024", - "checksum": "tarsum.v1+sha256:e58fcf7418d2390dec8e8fb69d88c06ec07039d651fedc3aa72af9972e7d046b", - "created": "2014-10-13T21:19:18.674353812Z", - "author": "Alyssa P. Hacker <alyspdev@example.com>", - "architecture": "amd64", - "os": "linux", - "Size": 271828, - "config": { - "User": "alice", - "Memory": 2048, - "MemorySwap": 4096, - "CpuShares": 8, - "ExposedPorts": { - "8080/tcp": {} - }, - "Env": [ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "FOO=docker_is_a_really", - "BAR=great_tool_you_know" - ], - "Entrypoint": [ - "/bin/my-app-binary" - ], - "Cmd": [ - "--foreground", - "--config", - "/etc/my-app.d/default.cfg" - ], - "Volumes": { - "/var/job-result-data": {}, - "/var/log/my-app-logs": {}, - }, - "WorkingDir": "/home/alice", - } -} -``` - -### Image JSON Field Descriptions - -
-
- id string -
-
- Randomly generated, 256-bit, hexadecimal encoded. Uniquely identifies - the image. -
-
- parent string -
-
- ID of the parent image. If there is no parent image then this field - should be omitted. A collection of images may share many of the same - ancestor layers. This organizational structure is strictly a tree with - any one layer having either no parent or a single parent and zero or - more descendant layers. Cycles are not allowed and implementations - should be careful to avoid creating them or iterating through a cycle - indefinitely. -
-
- created string -
-
- ISO-8601 formatted combined date and time at which the image was - created. -
-
- author string -
-
- Gives the name and/or email address of the person or entity which - created and is responsible for maintaining the image. -
-
- architecture string -
-
- The CPU architecture which the binaries in this image are built to run - on. Possible values include: -
    -
  • 386
  • -
  • amd64
  • -
  • arm
  • -
- More values may be supported in the future and any of these may or may - not be supported by a given container runtime implementation. -
-
- os string -
-
- The name of the operating system which the image is built to run on. - Possible values include: -
    -
  • darwin
  • -
  • freebsd
  • -
  • linux
  • -
- More values may be supported in the future and any of these may or may - not be supported by a given container runtime implementation. -
-
- checksum string -
-
- Image Checksum of the filesystem changeset associated with the image - layer. -
-
- Size integer -
-
- The size in bytes of the filesystem changeset associated with the image - layer. -
-
- config struct -
-
- The execution parameters which should be used as a base when running a - container using the image. This field can be null, in - which case any execution parameters should be specified at creation of - the container. - -

Container RunConfig Field Descriptions

- -
-
- User string -
-
-

The username or UID which the process in the container should - run as. This acts as a default value to use when the value is - not specified when creating a container.

- -

All of the following are valid:

- -
    -
  • user
  • -
  • uid
  • -
  • user:group
  • -
  • uid:gid
  • -
  • uid:group
  • -
  • user:gid
  • -
- -

If group/gid is not specified, the - default group and supplementary groups of the given - user/uid in /etc/passwd - from the container are applied.

-
-
- Memory integer -
-
- Memory limit (in bytes). This acts as a default value to use - when the value is not specified when creating a container. -
-
- MemorySwap integer -
-
- Total memory usage (memory + swap); set to -1 to - disable swap. This acts as a default value to use when the - value is not specified when creating a container. -
-
- CpuShares integer -
-
- CPU shares (relative weight vs. other containers). This acts as - a default value to use when the value is not specified when - creating a container. -
-
- ExposedPorts struct -
-
- A set of ports to expose from a container running this image. - This JSON structure value is unusual because it is a direct - JSON serialization of the Go type - map[string]struct{} and is represented in JSON as - an object mapping its keys to an empty object. Here is an - example: - -
{
-    "8080": {},
-    "53/udp": {},
-    "2356/tcp": {}
-}
- - Its keys can be in the format of: -
    -
  • - "port/tcp" -
  • -
  • - "port/udp" -
  • -
  • - "port" -
  • -
- with the default protocol being "tcp" if not - specified. - - These values act as defaults and are merged with any specified - when creating a container. -
-
- Env array of strings -
-
- Entries are in the format of VARNAME="var value". - These values act as defaults and are merged with any specified - when creating a container. -
-
- Entrypoint array of strings -
-
- A list of arguments to use as the command to execute when the - container starts. This value acts as a default and is replaced - by an entrypoint specified when creating a container. -
-
- Cmd array of strings -
-
- Default arguments to the entry point of the container. These - values act as defaults and are replaced with any specified when - creating a container. If an Entrypoint value is - not specified, then the first entry of the Cmd - array should be interpreted as the executable to run. -
-
- Volumes struct -
-
- A set of directories which should be created as data volumes in - a container running this image. This JSON structure value is - unusual because it is a direct JSON serialization of the Go - type map[string]struct{} and is represented in - JSON as an object mapping its keys to an empty object. Here is - an example: -
{
-    "/var/my-app-data/": {},
-    "/etc/some-config.d/": {},
-}
-
-
- WorkingDir string -
-
- Sets the current working directory of the entry point process - in the container. This value acts as a default and is replaced - by a working directory specified when creating a container. -
-
-
-
- -Any extra fields in the Image JSON struct are considered implementation -specific and should be ignored by any implementations which are unable to -interpret them. - -## Creating an Image Filesystem Changeset - -An example of creating an Image Filesystem Changeset follows. - -An image root filesystem is first created as an empty directory named with the -ID of the image being created. Here is the initial empty directory structure -for the changeset for an image with ID `c3167915dc9d` ([real IDs are much -longer](#id_desc), but this example use a truncated one here for brevity. -Implementations need not name the rootfs directory in this way but it may be -convenient for keeping record of a large number of image layers.): - -``` -c3167915dc9d/ -``` - -Files and directories are then created: - -``` -c3167915dc9d/ - etc/ - my-app-config - bin/ - my-app-binary - my-app-tools -``` - -The `c3167915dc9d` directory is then committed as a plain Tar archive with -entries for the following files: - -``` -etc/my-app-config -bin/my-app-binary -bin/my-app-tools -``` - -The TarSum checksum for the archive file is then computed and placed in the -JSON metadata along with the execution parameters. - -To make changes to the filesystem of this container image, create a new -directory named with a new ID, such as `f60c56784b83`, and initialize it with -a snapshot of the parent image's root filesystem, so that the directory is -identical to that of `c3167915dc9d`. NOTE: a copy-on-write or union filesystem -can make this very efficient: - -``` -f60c56784b83/ - etc/ - my-app-config - bin/ - my-app-binary - my-app-tools -``` - -This example change is going add a configuration directory at `/etc/my-app.d` -which contains a default config file. There's also a change to the -`my-app-tools` binary to handle the config layout change. The `f60c56784b83` -directory then looks like this: - -``` -f60c56784b83/ - etc/ - my-app.d/ - default.cfg - bin/ - my-app-binary - my-app-tools -``` - -This reflects the removal of `/etc/my-app-config` and creation of a file and -directory at `/etc/my-app.d/default.cfg`. `/bin/my-app-tools` has also been -replaced with an updated version. Before committing this directory to a -changeset, because it has a parent image, it is first compared with the -directory tree of the parent snapshot, `f60c56784b83`, looking for files and -directories that have been added, modified, or removed. The following changeset -is found: - -``` -Added: /etc/my-app.d/default.cfg -Modified: /bin/my-app-tools -Deleted: /etc/my-app-config -``` - -A Tar Archive is then created which contains *only* this changeset: The added -and modified files and directories in their entirety, and for each deleted item -an entry for an empty file at the same location but with the basename of the -deleted file or directory prefixed with `.wh.`. The filenames prefixed with -`.wh.` are known as "whiteout" files. NOTE: For this reason, it is not possible -to create an image root filesystem which contains a file or directory with a -name beginning with `.wh.`. The resulting Tar archive for `f60c56784b83` has -the following entries: - -``` -/etc/my-app.d/default.cfg -/bin/my-app-tools -/etc/.wh.my-app-config -``` - -Any given image is likely to be composed of several of these Image Filesystem -Changeset tar archives. - -## Combined Image JSON + Filesystem Changeset Format - -There is also a format for a single archive which contains complete information -about an image, including: - - - repository names/tags - - all image layer JSON files - - all tar archives of each layer filesystem changesets - -For example, here's what the full archive of `library/busybox` is (displayed in -`tree` format): - -``` -. -├── 5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e -│   ├── VERSION -│   ├── json -│   └── layer.tar -├── a7b8b41220991bfc754d7ad445ad27b7f272ab8b4a2c175b9512b97471d02a8a -│   ├── VERSION -│   ├── json -│   └── layer.tar -├── a936027c5ca8bf8f517923169a233e391cbb38469a75de8383b5228dc2d26ceb -│   ├── VERSION -│   ├── json -│   └── layer.tar -├── f60c56784b832dd990022afc120b8136ab3da9528094752ae13fe63a2d28dc8c -│   ├── VERSION -│   ├── json -│   └── layer.tar -└── repositories -``` - -There are one or more directories named with the ID for each layer in a full -image. Each of these directories contains 3 files: - - * `VERSION` - The schema version of the `json` file - * `json` - The JSON metadata for an image layer - * `layer.tar` - The Tar archive of the filesystem changeset for an image - layer. - -The content of the `VERSION` files is simply the semantic version of the JSON -metadata schema: - -``` -1.0 -``` - -And the `repositories` file is another JSON file which describes names/tags: - -``` -{ - "busybox":{ - "latest":"5785b62b697b99a5af6cd5d0aabc804d5748abbb6d3d07da5d1d3795f2dcc83e" - } -} -``` - -Every key in this object is the name of a repository, and maps to a collection -of tag suffixes. Each tag maps to the ID of the image represented by that tag. - -## Loading an Image Filesystem Changeset - -Unpacking a bundle of image layer JSON files and their corresponding filesystem -changesets can be done using a series of steps: - -1. Follow the parent IDs of image layers to find the root ancestor (an image -with no parent ID specified). -2. For every image layer, in order from root ancestor and descending down, -extract the contents of that layer's filesystem changeset archive into a -directory which will be used as the root of a container filesystem. - - - Extract all contents of each archive. - - Walk the directory tree once more, removing any files with the prefix - `.wh.` and the corresponding file or directory named without this prefix. - - -## Implementations - -This specification is an admittedly imperfect description of an -imperfectly-understood problem. The Docker project is, in turn, an attempt to -implement this specification. Our goal and our execution toward it will evolve -over time, but our primary concern in this specification and in our -implementation is compatibility and interoperability. diff --git a/image/store.go b/image/store.go deleted file mode 100644 index 92ac438db7..0000000000 --- a/image/store.go +++ /dev/null @@ -1,295 +0,0 @@ -package image - -import ( - "encoding/json" - "errors" - "fmt" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/layer" -) - -// Store is an interface for creating and accessing images -type Store interface { - Create(config []byte) (ID, error) - Get(id ID) (*Image, error) - Delete(id ID) ([]layer.Metadata, error) - Search(partialID string) (ID, error) - SetParent(id ID, parent ID) error - GetParent(id ID) (ID, error) - Children(id ID) []ID - Map() map[ID]*Image - Heads() map[ID]*Image -} - -// LayerGetReleaser is a minimal interface for getting and releasing images. -type LayerGetReleaser interface { - Get(layer.ChainID) (layer.Layer, error) - Release(layer.Layer) ([]layer.Metadata, error) -} - -type imageMeta struct { - layer layer.Layer - children map[ID]struct{} -} - -type store struct { - sync.Mutex - ls LayerGetReleaser - images map[ID]*imageMeta - fs StoreBackend - digestSet *digest.Set -} - -// NewImageStore returns new store object for given layer store -func NewImageStore(fs StoreBackend, ls LayerGetReleaser) (Store, error) { - is := &store{ - ls: ls, - images: make(map[ID]*imageMeta), - fs: fs, - digestSet: digest.NewSet(), - } - - // load all current images and retain layers - if err := is.restore(); err != nil { - return nil, err - } - - return is, nil -} - -func (is *store) restore() error { - err := is.fs.Walk(func(id ID) error { - img, err := is.Get(id) - if err != nil { - logrus.Errorf("invalid image %v, %v", id, err) - return nil - } - var l layer.Layer - if chainID := img.RootFS.ChainID(); chainID != "" { - l, err = is.ls.Get(chainID) - if err != nil { - return err - } - } - if err := is.digestSet.Add(digest.Digest(id)); err != nil { - return err - } - - imageMeta := &imageMeta{ - layer: l, - children: make(map[ID]struct{}), - } - - is.images[ID(id)] = imageMeta - - return nil - }) - if err != nil { - return err - } - - // Second pass to fill in children maps - for id := range is.images { - if parent, err := is.GetParent(id); err == nil { - if parentMeta := is.images[parent]; parentMeta != nil { - parentMeta.children[id] = struct{}{} - } - } - } - - return nil -} - -func (is *store) Create(config []byte) (ID, error) { - var img Image - err := json.Unmarshal(config, &img) - if err != nil { - return "", err - } - - // Must reject any config that references diffIDs from the history - // which aren't among the rootfs layers. - rootFSLayers := make(map[layer.DiffID]struct{}) - for _, diffID := range img.RootFS.DiffIDs { - rootFSLayers[diffID] = struct{}{} - } - - layerCounter := 0 - for _, h := range img.History { - if !h.EmptyLayer { - layerCounter++ - } - } - if layerCounter > len(img.RootFS.DiffIDs) { - return "", errors.New("too many non-empty layers in History section") - } - - dgst, err := is.fs.Set(config) - if err != nil { - return "", err - } - imageID := ID(dgst) - - is.Lock() - defer is.Unlock() - - if _, exists := is.images[imageID]; exists { - return imageID, nil - } - - layerID := img.RootFS.ChainID() - - var l layer.Layer - if layerID != "" { - l, err = is.ls.Get(layerID) - if err != nil { - return "", err - } - } - - imageMeta := &imageMeta{ - layer: l, - children: make(map[ID]struct{}), - } - - is.images[imageID] = imageMeta - if err := is.digestSet.Add(digest.Digest(imageID)); err != nil { - delete(is.images, imageID) - return "", err - } - - return imageID, nil -} - -func (is *store) Search(term string) (ID, error) { - is.Lock() - defer is.Unlock() - - dgst, err := is.digestSet.Lookup(term) - if err != nil { - if err == digest.ErrDigestNotFound { - err = fmt.Errorf("No such image: %s", term) - } - return "", err - } - return ID(dgst), nil -} - -func (is *store) Get(id ID) (*Image, error) { - // todo: Check if image is in images - // todo: Detect manual insertions and start using them - config, err := is.fs.Get(id) - if err != nil { - return nil, err - } - - img, err := NewFromJSON(config) - if err != nil { - return nil, err - } - img.computedID = id - - img.Parent, err = is.GetParent(id) - if err != nil { - img.Parent = "" - } - - return img, nil -} - -func (is *store) Delete(id ID) ([]layer.Metadata, error) { - is.Lock() - defer is.Unlock() - - imageMeta := is.images[id] - if imageMeta == nil { - return nil, fmt.Errorf("unrecognized image ID %s", id.String()) - } - for id := range imageMeta.children { - is.fs.DeleteMetadata(id, "parent") - } - if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { - delete(is.images[parent].children, id) - } - - if err := is.digestSet.Remove(digest.Digest(id)); err != nil { - logrus.Errorf("error removing %s from digest set: %q", id, err) - } - delete(is.images, id) - is.fs.Delete(id) - - if imageMeta.layer != nil { - return is.ls.Release(imageMeta.layer) - } - return nil, nil -} - -func (is *store) SetParent(id, parent ID) error { - is.Lock() - defer is.Unlock() - parentMeta := is.images[parent] - if parentMeta == nil { - return fmt.Errorf("unknown parent image ID %s", parent.String()) - } - if parent, err := is.GetParent(id); err == nil && is.images[parent] != nil { - delete(is.images[parent].children, id) - } - parentMeta.children[id] = struct{}{} - return is.fs.SetMetadata(id, "parent", []byte(parent)) -} - -func (is *store) GetParent(id ID) (ID, error) { - d, err := is.fs.GetMetadata(id, "parent") - if err != nil { - return "", err - } - return ID(d), nil // todo: validate? -} - -func (is *store) Children(id ID) []ID { - is.Lock() - defer is.Unlock() - - return is.children(id) -} - -func (is *store) children(id ID) []ID { - var ids []ID - if is.images[id] != nil { - for id := range is.images[id].children { - ids = append(ids, id) - } - } - return ids -} - -func (is *store) Heads() map[ID]*Image { - return is.imagesMap(false) -} - -func (is *store) Map() map[ID]*Image { - return is.imagesMap(true) -} - -func (is *store) imagesMap(all bool) map[ID]*Image { - is.Lock() - defer is.Unlock() - - images := make(map[ID]*Image) - - for id := range is.images { - if !all && len(is.children(id)) > 0 { - continue - } - img, err := is.Get(id) - if err != nil { - logrus.Errorf("invalid image access: %q, error: %q", id, err) - continue - } - images[id] = img - } - return images -} diff --git a/image/store_test.go b/image/store_test.go deleted file mode 100644 index 50f8aa8b84..0000000000 --- a/image/store_test.go +++ /dev/null @@ -1,300 +0,0 @@ -package image - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/layer" -) - -func TestRestore(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - id1, err := fs.Set([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) - if err != nil { - t.Fatal(err) - } - _, err = fs.Set([]byte(`invalid`)) - if err != nil { - t.Fatal(err) - } - id2, err := fs.Set([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) - if err != nil { - t.Fatal(err) - } - err = fs.SetMetadata(id2, "parent", []byte(id1)) - if err != nil { - t.Fatal(err) - } - - is, err := NewImageStore(fs, &mockLayerGetReleaser{}) - if err != nil { - t.Fatal(err) - } - - imgs := is.Map() - if actual, expected := len(imgs), 2; actual != expected { - t.Fatalf("invalid images length, expected 2, got %q", len(imgs)) - } - - img1, err := is.Get(ID(id1)) - if err != nil { - t.Fatal(err) - } - - if actual, expected := img1.computedID, ID(id1); actual != expected { - t.Fatalf("invalid image ID: expected %q, got %q", expected, actual) - } - - if actual, expected := img1.computedID.String(), string(id1); actual != expected { - t.Fatalf("invalid image ID string: expected %q, got %q", expected, actual) - } - - img2, err := is.Get(ID(id2)) - if err != nil { - t.Fatal(err) - } - - if actual, expected := img1.Comment, "abc"; actual != expected { - t.Fatalf("invalid comment for image1: expected %q, got %q", expected, actual) - } - - if actual, expected := img2.Comment, "def"; actual != expected { - t.Fatalf("invalid comment for image2: expected %q, got %q", expected, actual) - } - - p, err := is.GetParent(ID(id1)) - if err == nil { - t.Fatal("expected error for getting parent") - } - - p, err = is.GetParent(ID(id2)) - if err != nil { - t.Fatal(err) - } - if actual, expected := p, ID(id1); actual != expected { - t.Fatalf("invalid parent: expected %q, got %q", expected, actual) - } - - children := is.Children(ID(id1)) - if len(children) != 1 { - t.Fatalf("invalid children length: %q", len(children)) - } - if actual, expected := children[0], ID(id2); actual != expected { - t.Fatalf("invalid child for id1: expected %q, got %q", expected, actual) - } - - heads := is.Heads() - if actual, expected := len(heads), 1; actual != expected { - t.Fatalf("invalid images length: expected %q, got %q", expected, actual) - } - - sid1, err := is.Search(string(id1)[:10]) - if err != nil { - t.Fatal(err) - } - if actual, expected := sid1, ID(id1); actual != expected { - t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual) - } - - sid1, err = is.Search(digest.Digest(id1).Hex()[:6]) - if err != nil { - t.Fatal(err) - } - if actual, expected := sid1, ID(id1); actual != expected { - t.Fatalf("searched ID mismatch: expected %q, got %q", expected, actual) - } - - invalidPattern := digest.Digest(id1).Hex()[1:6] - _, err = is.Search(invalidPattern) - if err == nil { - t.Fatalf("expected search for %q to fail", invalidPattern) - } - -} - -func TestAddDelete(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - is, err := NewImageStore(fs, &mockLayerGetReleaser{}) - if err != nil { - t.Fatal(err) - } - - id1, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) - if err != nil { - t.Fatal(err) - } - - if actual, expected := id1, ID("sha256:8d25a9c45df515f9d0fe8e4a6b1c64dd3b965a84790ddbcc7954bb9bc89eb993"); actual != expected { - t.Fatalf("create ID mismatch: expected %q, got %q", expected, actual) - } - - img, err := is.Get(id1) - if err != nil { - t.Fatal(err) - } - - if actual, expected := img.Comment, "abc"; actual != expected { - t.Fatalf("invalid comment in image: expected %q, got %q", expected, actual) - } - - id2, err := is.Create([]byte(`{"comment": "def", "rootfs": {"type": "layers", "diff_ids": ["2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"]}}`)) - if err != nil { - t.Fatal(err) - } - - err = is.SetParent(id2, id1) - if err != nil { - t.Fatal(err) - } - - pid1, err := is.GetParent(id2) - if err != nil { - t.Fatal(err) - } - if actual, expected := pid1, id1; actual != expected { - t.Fatalf("invalid parent for image: expected %q, got %q", expected, actual) - } - - _, err = is.Delete(id1) - if err != nil { - t.Fatal(err) - } - _, err = is.Get(id1) - if err == nil { - t.Fatalf("expected get for deleted image %q to fail", id1) - } - _, err = is.Get(id2) - if err != nil { - t.Fatal(err) - } - pid1, err = is.GetParent(id2) - if err == nil { - t.Fatalf("expected parent check for image %q to fail, got %q", id2, pid1) - } - -} - -func TestSearchAfterDelete(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - is, err := NewImageStore(fs, &mockLayerGetReleaser{}) - if err != nil { - t.Fatal(err) - } - - id, err := is.Create([]byte(`{"comment": "abc", "rootfs": {"type": "layers"}}`)) - if err != nil { - t.Fatal(err) - } - - id1, err := is.Search(string(id)[:15]) - if err != nil { - t.Fatal(err) - } - - if actual, expected := id1, id; expected != actual { - t.Fatalf("wrong id returned from search: expected %q, got %q", expected, actual) - } - - if _, err := is.Delete(id); err != nil { - t.Fatal(err) - } - - if _, err := is.Search(string(id)[:15]); err == nil { - t.Fatal("expected search after deletion to fail") - } -} - -func TestParentReset(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "images-fs-store") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - fs, err := NewFSStoreBackend(tmpdir) - if err != nil { - t.Fatal(err) - } - - is, err := NewImageStore(fs, &mockLayerGetReleaser{}) - if err != nil { - t.Fatal(err) - } - - id, err := is.Create([]byte(`{"comment": "abc1", "rootfs": {"type": "layers"}}`)) - if err != nil { - t.Fatal(err) - } - - id2, err := is.Create([]byte(`{"comment": "abc2", "rootfs": {"type": "layers"}}`)) - if err != nil { - t.Fatal(err) - } - - id3, err := is.Create([]byte(`{"comment": "abc3", "rootfs": {"type": "layers"}}`)) - if err != nil { - t.Fatal(err) - } - - if err := is.SetParent(id, id2); err != nil { - t.Fatal(err) - } - - ids := is.Children(id2) - if actual, expected := len(ids), 1; expected != actual { - t.Fatalf("wrong number of children: %d, got %d", expected, actual) - } - - if err := is.SetParent(id, id3); err != nil { - t.Fatal(err) - } - - ids = is.Children(id2) - if actual, expected := len(ids), 0; expected != actual { - t.Fatalf("wrong number of children after parent reset: %d, got %d", expected, actual) - } - - ids = is.Children(id3) - if actual, expected := len(ids), 1; expected != actual { - t.Fatalf("wrong number of children after parent reset: %d, got %d", expected, actual) - } - -} - -type mockLayerGetReleaser struct{} - -func (ls *mockLayerGetReleaser) Get(layer.ChainID) (layer.Layer, error) { - return nil, nil -} - -func (ls *mockLayerGetReleaser) Release(layer.Layer) ([]layer.Metadata, error) { - return nil, nil -} diff --git a/image/tarexport/load.go b/image/tarexport/load.go deleted file mode 100644 index 59a499d6e3..0000000000 --- a/image/tarexport/load.go +++ /dev/null @@ -1,392 +0,0 @@ -package tarexport - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "reflect" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/pkg/progress" - "github.com/docker/docker/pkg/streamformatter" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/reference" -) - -func (l *tarexporter) Load(inTar io.ReadCloser, outStream io.Writer, quiet bool) error { - var ( - sf = streamformatter.NewJSONStreamFormatter() - progressOutput progress.Output - ) - if !quiet { - progressOutput = sf.NewProgressOutput(outStream, false) - outStream = &streamformatter.StdoutFormatter{Writer: outStream, StreamFormatter: streamformatter.NewJSONStreamFormatter()} - } - - tmpDir, err := ioutil.TempDir("", "docker-import-") - if err != nil { - return err - } - defer os.RemoveAll(tmpDir) - - if err := chrootarchive.Untar(inTar, tmpDir, nil); err != nil { - return err - } - // read manifest, if no file then load in legacy mode - manifestPath, err := safePath(tmpDir, manifestFileName) - if err != nil { - return err - } - manifestFile, err := os.Open(manifestPath) - if err != nil { - if os.IsNotExist(err) { - return l.legacyLoad(tmpDir, outStream, progressOutput) - } - return manifestFile.Close() - } - defer manifestFile.Close() - - var manifest []manifestItem - if err := json.NewDecoder(manifestFile).Decode(&manifest); err != nil { - return err - } - - var parentLinks []parentLink - var imageIDsStr string - var imageRefCount int - - for _, m := range manifest { - configPath, err := safePath(tmpDir, m.Config) - if err != nil { - return err - } - config, err := ioutil.ReadFile(configPath) - if err != nil { - return err - } - img, err := image.NewFromJSON(config) - if err != nil { - return err - } - var rootFS image.RootFS - rootFS = *img.RootFS - rootFS.DiffIDs = nil - - if expected, actual := len(m.Layers), len(img.RootFS.DiffIDs); expected != actual { - return fmt.Errorf("invalid manifest, layers length mismatch: expected %q, got %q", expected, actual) - } - - for i, diffID := range img.RootFS.DiffIDs { - layerPath, err := safePath(tmpDir, m.Layers[i]) - if err != nil { - return err - } - r := rootFS - r.Append(diffID) - newLayer, err := l.ls.Get(r.ChainID()) - if err != nil { - newLayer, err = l.loadLayer(layerPath, rootFS, diffID.String(), m.LayerSources[diffID], progressOutput) - if err != nil { - return err - } - } - defer layer.ReleaseAndLog(l.ls, newLayer) - if expected, actual := diffID, newLayer.DiffID(); expected != actual { - return fmt.Errorf("invalid diffID for layer %d: expected %q, got %q", i, expected, actual) - } - rootFS.Append(diffID) - } - - imgID, err := l.is.Create(config) - if err != nil { - return err - } - imageIDsStr += fmt.Sprintf("Loaded image ID: %s\n", imgID) - - imageRefCount = 0 - for _, repoTag := range m.RepoTags { - named, err := reference.ParseNamed(repoTag) - if err != nil { - return err - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return fmt.Errorf("invalid tag %q", repoTag) - } - l.setLoadedTag(ref, imgID, outStream) - outStream.Write([]byte(fmt.Sprintf("Loaded image: %s\n", ref))) - imageRefCount++ - } - - parentLinks = append(parentLinks, parentLink{imgID, m.Parent}) - l.loggerImgEvent.LogImageEvent(imgID.String(), imgID.String(), "load") - } - - for _, p := range validatedParentLinks(parentLinks) { - if p.parentID != "" { - if err := l.setParentID(p.id, p.parentID); err != nil { - return err - } - } - } - - if imageRefCount == 0 { - outStream.Write([]byte(imageIDsStr)) - } - - return nil -} - -func (l *tarexporter) setParentID(id, parentID image.ID) error { - img, err := l.is.Get(id) - if err != nil { - return err - } - parent, err := l.is.Get(parentID) - if err != nil { - return err - } - if !checkValidParent(img, parent) { - return fmt.Errorf("image %v is not a valid parent for %v", parent.ID(), img.ID()) - } - return l.is.SetParent(id, parentID) -} - -func (l *tarexporter) loadLayer(filename string, rootFS image.RootFS, id string, foreignSrc distribution.Descriptor, progressOutput progress.Output) (layer.Layer, error) { - rawTar, err := os.Open(filename) - if err != nil { - logrus.Debugf("Error reading embedded tar: %v", err) - return nil, err - } - defer rawTar.Close() - - inflatedLayerData, err := archive.DecompressStream(rawTar) - if err != nil { - return nil, err - } - defer inflatedLayerData.Close() - - if progressOutput != nil { - fileInfo, err := os.Stat(filename) - if err != nil { - logrus.Debugf("Error statting file: %v", err) - return nil, err - } - - progressReader := progress.NewProgressReader(inflatedLayerData, progressOutput, fileInfo.Size(), stringid.TruncateID(id), "Loading layer") - - if ds, ok := l.ls.(layer.DescribableStore); ok { - return ds.RegisterWithDescriptor(progressReader, rootFS.ChainID(), foreignSrc) - } - return l.ls.Register(progressReader, rootFS.ChainID()) - - } - - if ds, ok := l.ls.(layer.DescribableStore); ok { - return ds.RegisterWithDescriptor(inflatedLayerData, rootFS.ChainID(), foreignSrc) - } - return l.ls.Register(inflatedLayerData, rootFS.ChainID()) -} - -func (l *tarexporter) setLoadedTag(ref reference.NamedTagged, imgID image.ID, outStream io.Writer) error { - if prevID, err := l.rs.Get(ref); err == nil && prevID != imgID { - fmt.Fprintf(outStream, "The image %s already exists, renaming the old one with ID %s to empty string\n", ref.String(), string(prevID)) // todo: this message is wrong in case of multiple tags - } - - if err := l.rs.AddTag(ref, imgID, true); err != nil { - return err - } - return nil -} - -func (l *tarexporter) legacyLoad(tmpDir string, outStream io.Writer, progressOutput progress.Output) error { - legacyLoadedMap := make(map[string]image.ID) - - dirs, err := ioutil.ReadDir(tmpDir) - if err != nil { - return err - } - - // every dir represents an image - for _, d := range dirs { - if d.IsDir() { - if err := l.legacyLoadImage(d.Name(), tmpDir, legacyLoadedMap, progressOutput); err != nil { - return err - } - } - } - - // load tags from repositories file - repositoriesPath, err := safePath(tmpDir, legacyRepositoriesFileName) - if err != nil { - return err - } - repositoriesFile, err := os.Open(repositoriesPath) - if err != nil { - if !os.IsNotExist(err) { - return err - } - return repositoriesFile.Close() - } - defer repositoriesFile.Close() - - repositories := make(map[string]map[string]string) - if err := json.NewDecoder(repositoriesFile).Decode(&repositories); err != nil { - return err - } - - for name, tagMap := range repositories { - for tag, oldID := range tagMap { - imgID, ok := legacyLoadedMap[oldID] - if !ok { - return fmt.Errorf("invalid target ID: %v", oldID) - } - named, err := reference.WithName(name) - if err != nil { - return err - } - ref, err := reference.WithTag(named, tag) - if err != nil { - return err - } - l.setLoadedTag(ref, imgID, outStream) - } - } - - return nil -} - -func (l *tarexporter) legacyLoadImage(oldID, sourceDir string, loadedMap map[string]image.ID, progressOutput progress.Output) error { - if _, loaded := loadedMap[oldID]; loaded { - return nil - } - configPath, err := safePath(sourceDir, filepath.Join(oldID, legacyConfigFileName)) - if err != nil { - return err - } - imageJSON, err := ioutil.ReadFile(configPath) - if err != nil { - logrus.Debugf("Error reading json: %v", err) - return err - } - - var img struct{ Parent string } - if err := json.Unmarshal(imageJSON, &img); err != nil { - return err - } - - var parentID image.ID - if img.Parent != "" { - for { - var loaded bool - if parentID, loaded = loadedMap[img.Parent]; !loaded { - if err := l.legacyLoadImage(img.Parent, sourceDir, loadedMap, progressOutput); err != nil { - return err - } - } else { - break - } - } - } - - // todo: try to connect with migrate code - rootFS := image.NewRootFS() - var history []image.History - - if parentID != "" { - parentImg, err := l.is.Get(parentID) - if err != nil { - return err - } - - rootFS = parentImg.RootFS - history = parentImg.History - } - - layerPath, err := safePath(sourceDir, filepath.Join(oldID, legacyLayerFileName)) - if err != nil { - return err - } - newLayer, err := l.loadLayer(layerPath, *rootFS, oldID, distribution.Descriptor{}, progressOutput) - if err != nil { - return err - } - rootFS.Append(newLayer.DiffID()) - - h, err := v1.HistoryFromConfig(imageJSON, false) - if err != nil { - return err - } - history = append(history, h) - - config, err := v1.MakeConfigFromV1Config(imageJSON, rootFS, history) - if err != nil { - return err - } - imgID, err := l.is.Create(config) - if err != nil { - return err - } - - metadata, err := l.ls.Release(newLayer) - layer.LogReleaseMetadata(metadata) - if err != nil { - return err - } - - if parentID != "" { - if err := l.is.SetParent(imgID, parentID); err != nil { - return err - } - } - - loadedMap[oldID] = imgID - return nil -} - -func safePath(base, path string) (string, error) { - return symlink.FollowSymlinkInScope(filepath.Join(base, path), base) -} - -type parentLink struct { - id, parentID image.ID -} - -func validatedParentLinks(pl []parentLink) (ret []parentLink) { -mainloop: - for i, p := range pl { - ret = append(ret, p) - for _, p2 := range pl { - if p2.id == p.parentID && p2.id != p.id { - continue mainloop - } - } - ret[i].parentID = "" - } - return -} - -func checkValidParent(img, parent *image.Image) bool { - if len(img.History) == 0 && len(parent.History) == 0 { - return true // having history is not mandatory - } - if len(img.History)-len(parent.History) != 1 { - return false - } - for i, h := range parent.History { - if !reflect.DeepEqual(h, img.History[i]) { - return false - } - } - return true -} diff --git a/image/tarexport/save.go b/image/tarexport/save.go deleted file mode 100644 index ddb87ee954..0000000000 --- a/image/tarexport/save.go +++ /dev/null @@ -1,349 +0,0 @@ -package tarexport - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/docker/image" - "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/reference" -) - -type imageDescriptor struct { - refs []reference.NamedTagged - layers []string -} - -type saveSession struct { - *tarexporter - outDir string - images map[image.ID]*imageDescriptor - savedLayers map[string]struct{} - diffIDPaths map[layer.DiffID]string // cache every diffID blob to avoid duplicates -} - -func (l *tarexporter) Save(names []string, outStream io.Writer) error { - images, err := l.parseNames(names) - if err != nil { - return err - } - - return (&saveSession{tarexporter: l, images: images}).save(outStream) -} - -func (l *tarexporter) parseNames(names []string) (map[image.ID]*imageDescriptor, error) { - imgDescr := make(map[image.ID]*imageDescriptor) - - addAssoc := func(id image.ID, ref reference.Named) { - if _, ok := imgDescr[id]; !ok { - imgDescr[id] = &imageDescriptor{} - } - - if ref != nil { - var tagged reference.NamedTagged - if _, ok := ref.(reference.Canonical); ok { - return - } - var ok bool - if tagged, ok = ref.(reference.NamedTagged); !ok { - var err error - if tagged, err = reference.WithTag(ref, reference.DefaultTag); err != nil { - return - } - } - - for _, t := range imgDescr[id].refs { - if tagged.String() == t.String() { - return - } - } - imgDescr[id].refs = append(imgDescr[id].refs, tagged) - } - } - - for _, name := range names { - id, ref, err := reference.ParseIDOrReference(name) - if err != nil { - return nil, err - } - if id != "" { - _, err := l.is.Get(image.ID(id)) - if err != nil { - return nil, err - } - addAssoc(image.ID(id), nil) - continue - } - if ref.Name() == string(digest.Canonical) { - imgID, err := l.is.Search(name) - if err != nil { - return nil, err - } - addAssoc(imgID, nil) - continue - } - if reference.IsNameOnly(ref) { - assocs := l.rs.ReferencesByName(ref) - for _, assoc := range assocs { - addAssoc(assoc.ImageID, assoc.Ref) - } - if len(assocs) == 0 { - imgID, err := l.is.Search(name) - if err != nil { - return nil, err - } - addAssoc(imgID, nil) - } - continue - } - var imgID image.ID - if imgID, err = l.rs.Get(ref); err != nil { - return nil, err - } - addAssoc(imgID, ref) - - } - return imgDescr, nil -} - -func (s *saveSession) save(outStream io.Writer) error { - s.savedLayers = make(map[string]struct{}) - s.diffIDPaths = make(map[layer.DiffID]string) - - // get image json - tempDir, err := ioutil.TempDir("", "docker-export-") - if err != nil { - return err - } - defer os.RemoveAll(tempDir) - - s.outDir = tempDir - reposLegacy := make(map[string]map[string]string) - - var manifest []manifestItem - var parentLinks []parentLink - - for id, imageDescr := range s.images { - foreignSrcs, err := s.saveImage(id) - if err != nil { - return err - } - - var repoTags []string - var layers []string - - for _, ref := range imageDescr.refs { - if _, ok := reposLegacy[ref.Name()]; !ok { - reposLegacy[ref.Name()] = make(map[string]string) - } - reposLegacy[ref.Name()][ref.Tag()] = imageDescr.layers[len(imageDescr.layers)-1] - repoTags = append(repoTags, ref.String()) - } - - for _, l := range imageDescr.layers { - layers = append(layers, filepath.Join(l, legacyLayerFileName)) - } - - manifest = append(manifest, manifestItem{ - Config: digest.Digest(id).Hex() + ".json", - RepoTags: repoTags, - Layers: layers, - LayerSources: foreignSrcs, - }) - - parentID, _ := s.is.GetParent(id) - parentLinks = append(parentLinks, parentLink{id, parentID}) - s.tarexporter.loggerImgEvent.LogImageEvent(id.String(), id.String(), "save") - } - - for i, p := range validatedParentLinks(parentLinks) { - if p.parentID != "" { - manifest[i].Parent = p.parentID - } - } - - if len(reposLegacy) > 0 { - reposFile := filepath.Join(tempDir, legacyRepositoriesFileName) - f, err := os.OpenFile(reposFile, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - f.Close() - return err - } - if err := json.NewEncoder(f).Encode(reposLegacy); err != nil { - return err - } - if err := f.Close(); err != nil { - return err - } - if err := system.Chtimes(reposFile, time.Unix(0, 0), time.Unix(0, 0)); err != nil { - return err - } - } - - manifestFileName := filepath.Join(tempDir, manifestFileName) - f, err := os.OpenFile(manifestFileName, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - f.Close() - return err - } - if err := json.NewEncoder(f).Encode(manifest); err != nil { - return err - } - if err := f.Close(); err != nil { - return err - } - if err := system.Chtimes(manifestFileName, time.Unix(0, 0), time.Unix(0, 0)); err != nil { - return err - } - - fs, err := archive.Tar(tempDir, archive.Uncompressed) - if err != nil { - return err - } - defer fs.Close() - - if _, err := io.Copy(outStream, fs); err != nil { - return err - } - return nil -} - -func (s *saveSession) saveImage(id image.ID) (map[layer.DiffID]distribution.Descriptor, error) { - img, err := s.is.Get(id) - if err != nil { - return nil, err - } - - if len(img.RootFS.DiffIDs) == 0 { - return nil, fmt.Errorf("empty export - not implemented") - } - - var parent digest.Digest - var layers []string - var foreignSrcs map[layer.DiffID]distribution.Descriptor - for i := range img.RootFS.DiffIDs { - v1Img := image.V1Image{} - if i == len(img.RootFS.DiffIDs)-1 { - v1Img = img.V1Image - } - rootFS := *img.RootFS - rootFS.DiffIDs = rootFS.DiffIDs[:i+1] - v1ID, err := v1.CreateID(v1Img, rootFS.ChainID(), parent) - if err != nil { - return nil, err - } - - v1Img.ID = v1ID.Hex() - if parent != "" { - v1Img.Parent = parent.Hex() - } - - src, err := s.saveLayer(rootFS.ChainID(), v1Img, img.Created) - if err != nil { - return nil, err - } - layers = append(layers, v1Img.ID) - parent = v1ID - if src.Digest != "" { - if foreignSrcs == nil { - foreignSrcs = make(map[layer.DiffID]distribution.Descriptor) - } - foreignSrcs[img.RootFS.DiffIDs[i]] = src - } - } - - configFile := filepath.Join(s.outDir, digest.Digest(id).Hex()+".json") - if err := ioutil.WriteFile(configFile, img.RawJSON(), 0644); err != nil { - return nil, err - } - if err := system.Chtimes(configFile, img.Created, img.Created); err != nil { - return nil, err - } - - s.images[id].layers = layers - return foreignSrcs, nil -} - -func (s *saveSession) saveLayer(id layer.ChainID, legacyImg image.V1Image, createdTime time.Time) (distribution.Descriptor, error) { - if _, exists := s.savedLayers[legacyImg.ID]; exists { - return distribution.Descriptor{}, nil - } - - outDir := filepath.Join(s.outDir, legacyImg.ID) - if err := os.Mkdir(outDir, 0755); err != nil { - return distribution.Descriptor{}, err - } - - // todo: why is this version file here? - if err := ioutil.WriteFile(filepath.Join(outDir, legacyVersionFileName), []byte("1.0"), 0644); err != nil { - return distribution.Descriptor{}, err - } - - imageConfig, err := json.Marshal(legacyImg) - if err != nil { - return distribution.Descriptor{}, err - } - - if err := ioutil.WriteFile(filepath.Join(outDir, legacyConfigFileName), imageConfig, 0644); err != nil { - return distribution.Descriptor{}, err - } - - // serialize filesystem - layerPath := filepath.Join(outDir, legacyLayerFileName) - l, err := s.ls.Get(id) - if err != nil { - return distribution.Descriptor{}, err - } - defer layer.ReleaseAndLog(s.ls, l) - - if oldPath, exists := s.diffIDPaths[l.DiffID()]; exists { - relPath, err := filepath.Rel(outDir, oldPath) - if err != nil { - return distribution.Descriptor{}, err - } - os.Symlink(relPath, layerPath) - } else { - - tarFile, err := os.Create(layerPath) - if err != nil { - return distribution.Descriptor{}, err - } - defer tarFile.Close() - - arch, err := l.TarStream() - if err != nil { - return distribution.Descriptor{}, err - } - defer arch.Close() - - if _, err := io.Copy(tarFile, arch); err != nil { - return distribution.Descriptor{}, err - } - - for _, fname := range []string{"", legacyVersionFileName, legacyConfigFileName, legacyLayerFileName} { - // todo: maybe save layer created timestamp? - if err := system.Chtimes(filepath.Join(outDir, fname), createdTime, createdTime); err != nil { - return distribution.Descriptor{}, err - } - } - - s.diffIDPaths[l.DiffID()] = layerPath - } - s.savedLayers[legacyImg.ID] = struct{}{} - - var src distribution.Descriptor - if fs, ok := l.(distribution.Describable); ok { - src = fs.Descriptor() - } - return src, nil -} diff --git a/image/tarexport/tarexport.go b/image/tarexport/tarexport.go deleted file mode 100644 index c0be95480e..0000000000 --- a/image/tarexport/tarexport.go +++ /dev/null @@ -1,47 +0,0 @@ -package tarexport - -import ( - "github.com/docker/distribution" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/reference" -) - -const ( - manifestFileName = "manifest.json" - legacyLayerFileName = "layer.tar" - legacyConfigFileName = "json" - legacyVersionFileName = "VERSION" - legacyRepositoriesFileName = "repositories" -) - -type manifestItem struct { - Config string - RepoTags []string - Layers []string - Parent image.ID `json:",omitempty"` - LayerSources map[layer.DiffID]distribution.Descriptor `json:",omitempty"` -} - -type tarexporter struct { - is image.Store - ls layer.Store - rs reference.Store - loggerImgEvent LogImageEvent -} - -// LogImageEvent defines interface for event generation related to image tar(load and save) operations -type LogImageEvent interface { - //LogImageEvent generates an event related to an image operation - LogImageEvent(imageID, refName, action string) -} - -// NewTarExporter returns new ImageExporter for tar packages -func NewTarExporter(is image.Store, ls layer.Store, rs reference.Store, loggerImgEvent LogImageEvent) image.Exporter { - return &tarexporter{ - is: is, - ls: ls, - rs: rs, - loggerImgEvent: loggerImgEvent, - } -} diff --git a/image/v1/imagev1.go b/image/v1/imagev1.go deleted file mode 100644 index b7a9529ed8..0000000000 --- a/image/v1/imagev1.go +++ /dev/null @@ -1,156 +0,0 @@ -package v1 - -import ( - "encoding/json" - "fmt" - "reflect" - "regexp" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/engine-api/types/versions" -) - -var validHex = regexp.MustCompile(`^([a-f0-9]{64})$`) - -// noFallbackMinVersion is the minimum version for which v1compatibility -// information will not be marshaled through the Image struct to remove -// blank fields. -var noFallbackMinVersion = "1.8.3" - -// HistoryFromConfig creates a History struct from v1 configuration JSON -func HistoryFromConfig(imageJSON []byte, emptyLayer bool) (image.History, error) { - h := image.History{} - var v1Image image.V1Image - if err := json.Unmarshal(imageJSON, &v1Image); err != nil { - return h, err - } - - return image.History{ - Author: v1Image.Author, - Created: v1Image.Created, - CreatedBy: strings.Join(v1Image.ContainerConfig.Cmd, " "), - Comment: v1Image.Comment, - EmptyLayer: emptyLayer, - }, nil -} - -// CreateID creates an ID from v1 image, layerID and parent ID. -// Used for backwards compatibility with old clients. -func CreateID(v1Image image.V1Image, layerID layer.ChainID, parent digest.Digest) (digest.Digest, error) { - v1Image.ID = "" - v1JSON, err := json.Marshal(v1Image) - if err != nil { - return "", err - } - - var config map[string]*json.RawMessage - if err := json.Unmarshal(v1JSON, &config); err != nil { - return "", err - } - - // FIXME: note that this is slightly incompatible with RootFS logic - config["layer_id"] = rawJSON(layerID) - if parent != "" { - config["parent"] = rawJSON(parent) - } - - configJSON, err := json.Marshal(config) - if err != nil { - return "", err - } - logrus.Debugf("CreateV1ID %s", configJSON) - - return digest.FromBytes(configJSON), nil -} - -// MakeConfigFromV1Config creates an image config from the legacy V1 config format. -func MakeConfigFromV1Config(imageJSON []byte, rootfs *image.RootFS, history []image.History) ([]byte, error) { - var dver struct { - DockerVersion string `json:"docker_version"` - } - - if err := json.Unmarshal(imageJSON, &dver); err != nil { - return nil, err - } - - useFallback := versions.LessThan(dver.DockerVersion, noFallbackMinVersion) - - if useFallback { - var v1Image image.V1Image - err := json.Unmarshal(imageJSON, &v1Image) - if err != nil { - return nil, err - } - imageJSON, err = json.Marshal(v1Image) - if err != nil { - return nil, err - } - } - - var c map[string]*json.RawMessage - if err := json.Unmarshal(imageJSON, &c); err != nil { - return nil, err - } - - delete(c, "id") - delete(c, "parent") - delete(c, "Size") // Size is calculated from data on disk and is inconsistent - delete(c, "parent_id") - delete(c, "layer_id") - delete(c, "throwaway") - - c["rootfs"] = rawJSON(rootfs) - c["history"] = rawJSON(history) - - return json.Marshal(c) -} - -// MakeV1ConfigFromConfig creates an legacy V1 image config from an Image struct -func MakeV1ConfigFromConfig(img *image.Image, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { - // Top-level v1compatibility string should be a modified version of the - // image config. - var configAsMap map[string]*json.RawMessage - if err := json.Unmarshal(img.RawJSON(), &configAsMap); err != nil { - return nil, err - } - - // Delete fields that didn't exist in old manifest - imageType := reflect.TypeOf(img).Elem() - for i := 0; i < imageType.NumField(); i++ { - f := imageType.Field(i) - jsonName := strings.Split(f.Tag.Get("json"), ",")[0] - // Parent is handled specially below. - if jsonName != "" && jsonName != "parent" { - delete(configAsMap, jsonName) - } - } - configAsMap["id"] = rawJSON(v1ID) - if parentV1ID != "" { - configAsMap["parent"] = rawJSON(parentV1ID) - } - if throwaway { - configAsMap["throwaway"] = rawJSON(true) - } - - return json.Marshal(configAsMap) -} - -func rawJSON(value interface{}) *json.RawMessage { - jsonval, err := json.Marshal(value) - if err != nil { - return nil - } - return (*json.RawMessage)(&jsonval) -} - -// ValidateID checks whether an ID string is a valid image ID. -func ValidateID(id string) error { - if ok := validHex.MatchString(id); !ok { - return fmt.Errorf("image ID %q is invalid", id) - } - return nil -} diff --git a/image/v1/imagev1_test.go b/image/v1/imagev1_test.go deleted file mode 100644 index 936c55e4c5..0000000000 --- a/image/v1/imagev1_test.go +++ /dev/null @@ -1,55 +0,0 @@ -package v1 - -import ( - "encoding/json" - "testing" - - "github.com/docker/docker/image" -) - -func TestMakeV1ConfigFromConfig(t *testing.T) { - img := &image.Image{ - V1Image: image.V1Image{ - ID: "v2id", - Parent: "v2parent", - OS: "os", - }, - OSVersion: "osversion", - RootFS: &image.RootFS{ - Type: "layers", - }, - } - v2js, err := json.Marshal(img) - if err != nil { - t.Fatal(err) - } - - // Convert the image back in order to get RawJSON() support. - img, err = image.NewFromJSON(v2js) - if err != nil { - t.Fatal(err) - } - - js, err := MakeV1ConfigFromConfig(img, "v1id", "v1parent", false) - if err != nil { - t.Fatal(err) - } - - newimg := &image.Image{} - err = json.Unmarshal(js, newimg) - if err != nil { - t.Fatal(err) - } - - if newimg.V1Image.ID != "v1id" || newimg.Parent != "v1parent" { - t.Error("ids should have changed", newimg.V1Image.ID, newimg.V1Image.Parent) - } - - if newimg.RootFS != nil { - t.Error("rootfs should have been removed") - } - - if newimg.V1Image.OS != "os" { - t.Error("os should have been preserved") - } -} diff --git a/integration-cli/benchmark_test.go b/integration-cli/benchmark_test.go deleted file mode 100644 index 647d014d30..0000000000 --- a/integration-cli/benchmark_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "os" - "runtime" - "strings" - "sync" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) BenchmarkConcurrentContainerActions(c *check.C) { - maxConcurrency := runtime.GOMAXPROCS(0) - numIterations := c.N - outerGroup := &sync.WaitGroup{} - outerGroup.Add(maxConcurrency) - chErr := make(chan error, numIterations*2*maxConcurrency) - - for i := 0; i < maxConcurrency; i++ { - go func() { - defer outerGroup.Done() - innerGroup := &sync.WaitGroup{} - innerGroup.Add(2) - - go func() { - defer innerGroup.Done() - for i := 0; i < numIterations; i++ { - args := []string{"run", "-d", defaultSleepImage} - args = append(args, defaultSleepCommand...) - out, _, err := dockerCmdWithError(args...) - if err != nil { - chErr <- fmt.Errorf(out) - return - } - - id := strings.TrimSpace(out) - tmpDir, err := ioutil.TempDir("", "docker-concurrent-test-"+id) - if err != nil { - chErr <- err - return - } - defer os.RemoveAll(tmpDir) - out, _, err = dockerCmdWithError("cp", id+":/tmp", tmpDir) - if err != nil { - chErr <- fmt.Errorf(out) - return - } - - out, _, err = dockerCmdWithError("kill", id) - if err != nil { - chErr <- fmt.Errorf(out) - } - - out, _, err = dockerCmdWithError("start", id) - if err != nil { - chErr <- fmt.Errorf(out) - } - - out, _, err = dockerCmdWithError("kill", id) - if err != nil { - chErr <- fmt.Errorf(out) - } - - // don't do an rm -f here since it can potentially ignore errors from the graphdriver - out, _, err = dockerCmdWithError("rm", id) - if err != nil { - chErr <- fmt.Errorf(out) - } - } - }() - - go func() { - defer innerGroup.Done() - for i := 0; i < numIterations; i++ { - out, _, err := dockerCmdWithError("ps") - if err != nil { - chErr <- fmt.Errorf(out) - } - } - }() - - innerGroup.Wait() - }() - } - - outerGroup.Wait() - close(chErr) - - for err := range chErr { - c.Assert(err, checker.IsNil) - } -} diff --git a/integration-cli/check_test.go b/integration-cli/check_test.go deleted file mode 100644 index 226445a28c..0000000000 --- a/integration-cli/check_test.go +++ /dev/null @@ -1,283 +0,0 @@ -package main - -import ( - "fmt" - "os" - "path/filepath" - "sync" - "testing" - - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/engine-api/types/swarm" - "github.com/go-check/check" -) - -func Test(t *testing.T) { - reexec.Init() // This is required for external graphdriver tests - - if !isLocalDaemon { - fmt.Println("INFO: Testing against a remote daemon") - } else { - fmt.Println("INFO: Testing against a local daemon") - } - - check.TestingT(t) -} - -func init() { - check.Suite(&DockerSuite{}) -} - -type DockerSuite struct { -} - -func (s *DockerSuite) TearDownTest(c *check.C) { - unpauseAllContainers() - deleteAllContainers() - deleteAllImages() - deleteAllVolumes() - deleteAllNetworks() -} - -func init() { - check.Suite(&DockerRegistrySuite{ - ds: &DockerSuite{}, - }) -} - -type DockerRegistrySuite struct { - ds *DockerSuite - reg *testRegistryV2 - d *Daemon -} - -func (s *DockerRegistrySuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux, RegistryHosting) - s.reg = setupRegistry(c, false, "", "") - s.d = NewDaemon(c) -} - -func (s *DockerRegistrySuite) TearDownTest(c *check.C) { - if s.reg != nil { - s.reg.Close() - } - if s.d != nil { - s.d.Stop() - } - s.ds.TearDownTest(c) -} - -func init() { - check.Suite(&DockerSchema1RegistrySuite{ - ds: &DockerSuite{}, - }) -} - -type DockerSchema1RegistrySuite struct { - ds *DockerSuite - reg *testRegistryV2 - d *Daemon -} - -func (s *DockerSchema1RegistrySuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux, RegistryHosting, NotArm64) - s.reg = setupRegistry(c, true, "", "") - s.d = NewDaemon(c) -} - -func (s *DockerSchema1RegistrySuite) TearDownTest(c *check.C) { - if s.reg != nil { - s.reg.Close() - } - if s.d != nil { - s.d.Stop() - } - s.ds.TearDownTest(c) -} - -func init() { - check.Suite(&DockerRegistryAuthHtpasswdSuite{ - ds: &DockerSuite{}, - }) -} - -type DockerRegistryAuthHtpasswdSuite struct { - ds *DockerSuite - reg *testRegistryV2 - d *Daemon -} - -func (s *DockerRegistryAuthHtpasswdSuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux, RegistryHosting) - s.reg = setupRegistry(c, false, "htpasswd", "") - s.d = NewDaemon(c) -} - -func (s *DockerRegistryAuthHtpasswdSuite) TearDownTest(c *check.C) { - if s.reg != nil { - out, err := s.d.Cmd("logout", privateRegistryURL) - c.Assert(err, check.IsNil, check.Commentf(out)) - s.reg.Close() - } - if s.d != nil { - s.d.Stop() - } - s.ds.TearDownTest(c) -} - -func init() { - check.Suite(&DockerRegistryAuthTokenSuite{ - ds: &DockerSuite{}, - }) -} - -type DockerRegistryAuthTokenSuite struct { - ds *DockerSuite - reg *testRegistryV2 - d *Daemon -} - -func (s *DockerRegistryAuthTokenSuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux, RegistryHosting) - s.d = NewDaemon(c) -} - -func (s *DockerRegistryAuthTokenSuite) TearDownTest(c *check.C) { - if s.reg != nil { - out, err := s.d.Cmd("logout", privateRegistryURL) - c.Assert(err, check.IsNil, check.Commentf(out)) - s.reg.Close() - } - if s.d != nil { - s.d.Stop() - } - s.ds.TearDownTest(c) -} - -func (s *DockerRegistryAuthTokenSuite) setupRegistryWithTokenService(c *check.C, tokenURL string) { - if s == nil { - c.Fatal("registry suite isn't initialized") - } - s.reg = setupRegistry(c, false, "token", tokenURL) -} - -func init() { - check.Suite(&DockerDaemonSuite{ - ds: &DockerSuite{}, - }) -} - -type DockerDaemonSuite struct { - ds *DockerSuite - d *Daemon -} - -func (s *DockerDaemonSuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux) - s.d = NewDaemon(c) -} - -func (s *DockerDaemonSuite) TearDownTest(c *check.C) { - testRequires(c, DaemonIsLinux) - if s.d != nil { - s.d.Stop() - } - s.ds.TearDownTest(c) -} - -const defaultSwarmPort = 2477 - -func init() { - check.Suite(&DockerSwarmSuite{ - ds: &DockerSuite{}, - }) -} - -type DockerSwarmSuite struct { - ds *DockerSuite - daemons []*SwarmDaemon - daemonsLock sync.Mutex // protect access to daemons - portIndex int -} - -func (s *DockerSwarmSuite) SetUpTest(c *check.C) { - testRequires(c, DaemonIsLinux) -} - -func (s *DockerSwarmSuite) AddDaemon(c *check.C, joinSwarm, manager bool) *SwarmDaemon { - d := &SwarmDaemon{ - Daemon: NewDaemon(c), - port: defaultSwarmPort + s.portIndex, - } - d.listenAddr = fmt.Sprintf("0.0.0.0:%d", d.port) - err := d.StartWithBusybox("--iptables=false", "--swarm-default-advertise-addr=lo") // avoid networking conflicts - c.Assert(err, check.IsNil) - - if joinSwarm == true { - if len(s.daemons) > 0 { - tokens := s.daemons[0].joinTokens(c) - token := tokens.Worker - if manager { - token = tokens.Manager - } - c.Assert(d.Join(swarm.JoinRequest{ - RemoteAddrs: []string{s.daemons[0].listenAddr}, - JoinToken: token, - }), check.IsNil) - } else { - c.Assert(d.Init(swarm.InitRequest{}), check.IsNil) - } - } - - s.portIndex++ - s.daemonsLock.Lock() - s.daemons = append(s.daemons, d) - s.daemonsLock.Unlock() - - return d -} - -func (s *DockerSwarmSuite) TearDownTest(c *check.C) { - testRequires(c, DaemonIsLinux) - s.daemonsLock.Lock() - for _, d := range s.daemons { - d.Stop() - } - s.daemons = nil - s.daemonsLock.Unlock() - - s.portIndex = 0 - s.ds.TearDownTest(c) -} - -func init() { - check.Suite(&DockerTrustSuite{ - ds: &DockerSuite{}, - }) -} - -type DockerTrustSuite struct { - ds *DockerSuite - reg *testRegistryV2 - not *testNotary -} - -func (s *DockerTrustSuite) SetUpTest(c *check.C) { - testRequires(c, RegistryHosting, NotaryServerHosting) - s.reg = setupRegistry(c, false, "", "") - s.not = setupNotary(c) -} - -func (s *DockerTrustSuite) TearDownTest(c *check.C) { - if s.reg != nil { - s.reg.Close() - } - if s.not != nil { - s.not.Close() - } - - // Remove trusted keys and metadata after test - os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) - s.ds.TearDownTest(c) -} diff --git a/integration-cli/daemon.go b/integration-cli/daemon.go deleted file mode 100644 index f621d13fa2..0000000000 --- a/integration-cli/daemon.go +++ /dev/null @@ -1,529 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "net/http" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/tlsconfig" - "github.com/docker/go-connections/sockets" - "github.com/go-check/check" -) - -// Daemon represents a Docker daemon for the testing framework. -type Daemon struct { - GlobalFlags []string - - id string - c *check.C - logFile *os.File - folder string - root string - stdin io.WriteCloser - stdout, stderr io.ReadCloser - cmd *exec.Cmd - storageDriver string - wait chan error - userlandProxy bool - useDefaultHost bool - useDefaultTLSHost bool -} - -type clientConfig struct { - transport *http.Transport - scheme string - addr string -} - -// NewDaemon returns a Daemon instance to be used for testing. -// This will create a directory such as d123456789 in the folder specified by $DEST. -// The daemon will not automatically start. -func NewDaemon(c *check.C) *Daemon { - dest := os.Getenv("DEST") - c.Assert(dest, check.Not(check.Equals), "", check.Commentf("Please set the DEST environment variable")) - - id := fmt.Sprintf("d%d", time.Now().UnixNano()%100000000) - dir := filepath.Join(dest, id) - daemonFolder, err := filepath.Abs(dir) - c.Assert(err, check.IsNil, check.Commentf("Could not make %q an absolute path", dir)) - daemonRoot := filepath.Join(daemonFolder, "root") - - c.Assert(os.MkdirAll(daemonRoot, 0755), check.IsNil, check.Commentf("Could not create daemon root %q", dir)) - - userlandProxy := true - if env := os.Getenv("DOCKER_USERLANDPROXY"); env != "" { - if val, err := strconv.ParseBool(env); err != nil { - userlandProxy = val - } - } - - return &Daemon{ - id: id, - c: c, - folder: daemonFolder, - root: daemonRoot, - storageDriver: os.Getenv("DOCKER_GRAPHDRIVER"), - userlandProxy: userlandProxy, - } -} - -func (d *Daemon) getClientConfig() (*clientConfig, error) { - var ( - transport *http.Transport - scheme string - addr string - proto string - ) - if d.useDefaultTLSHost { - option := &tlsconfig.Options{ - CAFile: "fixtures/https/ca.pem", - CertFile: "fixtures/https/client-cert.pem", - KeyFile: "fixtures/https/client-key.pem", - } - tlsConfig, err := tlsconfig.Client(*option) - if err != nil { - return nil, err - } - transport = &http.Transport{ - TLSClientConfig: tlsConfig, - } - addr = fmt.Sprintf("%s:%d", opts.DefaultHTTPHost, opts.DefaultTLSHTTPPort) - scheme = "https" - proto = "tcp" - } else if d.useDefaultHost { - addr = opts.DefaultUnixSocket - proto = "unix" - scheme = "http" - transport = &http.Transport{} - } else { - addr = filepath.Join(d.folder, "docker.sock") - proto = "unix" - scheme = "http" - transport = &http.Transport{} - } - - d.c.Assert(sockets.ConfigureTransport(transport, proto, addr), check.IsNil) - - return &clientConfig{ - transport: transport, - scheme: scheme, - addr: addr, - }, nil -} - -// Start will start the daemon and return once it is ready to receive requests. -// You can specify additional daemon flags. -func (d *Daemon) Start(args ...string) error { - logFile, err := os.OpenFile(filepath.Join(d.folder, "docker.log"), os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) - d.c.Assert(err, check.IsNil, check.Commentf("[%s] Could not create %s/docker.log", d.id, d.folder)) - - return d.StartWithLogFile(logFile, args...) -} - -// StartWithLogFile will start the daemon and attach its streams to a given file. -func (d *Daemon) StartWithLogFile(out *os.File, providedArgs ...string) error { - dockerdBinary, err := exec.LookPath(dockerdBinary) - d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not find docker binary in $PATH", d.id)) - - args := append(d.GlobalFlags, - "--containerd", "/var/run/docker/libcontainerd/docker-containerd.sock", - "--graph", d.root, - "--exec-root", filepath.Join(d.folder, "exec-root"), - "--pidfile", fmt.Sprintf("%s/docker.pid", d.folder), - fmt.Sprintf("--userland-proxy=%t", d.userlandProxy), - ) - if !(d.useDefaultHost || d.useDefaultTLSHost) { - args = append(args, []string{"--host", d.sock()}...) - } - if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { - args = append(args, []string{"--userns-remap", root}...) - } - - // If we don't explicitly set the log-level or debug flag(-D) then - // turn on debug mode - foundLog := false - foundSd := false - for _, a := range providedArgs { - if strings.Contains(a, "--log-level") || strings.Contains(a, "-D") || strings.Contains(a, "--debug") { - foundLog = true - } - if strings.Contains(a, "--storage-driver") { - foundSd = true - } - } - if !foundLog { - args = append(args, "--debug") - } - if d.storageDriver != "" && !foundSd { - args = append(args, "--storage-driver", d.storageDriver) - } - - args = append(args, providedArgs...) - d.cmd = exec.Command(dockerdBinary, args...) - d.cmd.Env = append(os.Environ(), "DOCKER_SERVICE_PREFER_OFFLINE_IMAGE=1") - d.cmd.Stdout = out - d.cmd.Stderr = out - d.logFile = out - - if err := d.cmd.Start(); err != nil { - return fmt.Errorf("[%s] could not start daemon container: %v", d.id, err) - } - - wait := make(chan error) - - go func() { - wait <- d.cmd.Wait() - d.c.Logf("[%s] exiting daemon", d.id) - close(wait) - }() - - d.wait = wait - - tick := time.Tick(500 * time.Millisecond) - // make sure daemon is ready to receive requests - startTime := time.Now().Unix() - for { - d.c.Logf("[%s] waiting for daemon to start", d.id) - if time.Now().Unix()-startTime > 5 { - // After 5 seconds, give up - return fmt.Errorf("[%s] Daemon exited and never started", d.id) - } - select { - case <-time.After(2 * time.Second): - return fmt.Errorf("[%s] timeout: daemon does not respond", d.id) - case <-tick: - clientConfig, err := d.getClientConfig() - if err != nil { - return err - } - - client := &http.Client{ - Transport: clientConfig.transport, - } - - req, err := http.NewRequest("GET", "/_ping", nil) - d.c.Assert(err, check.IsNil, check.Commentf("[%s] could not create new request", d.id)) - req.URL.Host = clientConfig.addr - req.URL.Scheme = clientConfig.scheme - resp, err := client.Do(req) - if err != nil { - continue - } - if resp.StatusCode != http.StatusOK { - d.c.Logf("[%s] received status != 200 OK: %s", d.id, resp.Status) - } - d.c.Logf("[%s] daemon started", d.id) - d.root, err = d.queryRootDir() - if err != nil { - return fmt.Errorf("[%s] error querying daemon for root directory: %v", d.id, err) - } - return nil - case <-d.wait: - return fmt.Errorf("[%s] Daemon exited during startup", d.id) - } - } -} - -// StartWithBusybox will first start the daemon with Daemon.Start() -// then save the busybox image from the main daemon and load it into this Daemon instance. -func (d *Daemon) StartWithBusybox(arg ...string) error { - if err := d.Start(arg...); err != nil { - return err - } - return d.LoadBusybox() -} - -// Kill will send a SIGKILL to the daemon -func (d *Daemon) Kill() error { - if d.cmd == nil || d.wait == nil { - return errors.New("daemon not started") - } - - defer func() { - d.logFile.Close() - d.cmd = nil - }() - - if err := d.cmd.Process.Kill(); err != nil { - d.c.Logf("Could not kill daemon: %v", err) - return err - } - - if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.folder)); err != nil { - return err - } - - return nil -} - -// Stop will send a SIGINT every second and wait for the daemon to stop. -// If it timeouts, a SIGKILL is sent. -// Stop will not delete the daemon directory. If a purged daemon is needed, -// instantiate a new one with NewDaemon. -func (d *Daemon) Stop() error { - if d.cmd == nil || d.wait == nil { - return errors.New("daemon not started") - } - - defer func() { - d.logFile.Close() - d.cmd = nil - }() - - i := 1 - tick := time.Tick(time.Second) - - if err := d.cmd.Process.Signal(os.Interrupt); err != nil { - return fmt.Errorf("could not send signal: %v", err) - } -out1: - for { - select { - case err := <-d.wait: - return err - case <-time.After(20 * time.Second): - // time for stopping jobs and run onShutdown hooks - d.c.Logf("timeout: %v", d.id) - break out1 - } - } - -out2: - for { - select { - case err := <-d.wait: - return err - case <-tick: - i++ - if i > 5 { - d.c.Logf("tried to interrupt daemon for %d times, now try to kill it", i) - break out2 - } - d.c.Logf("Attempt #%d: daemon is still running with pid %d", i, d.cmd.Process.Pid) - if err := d.cmd.Process.Signal(os.Interrupt); err != nil { - return fmt.Errorf("could not send signal: %v", err) - } - } - } - - if err := d.cmd.Process.Kill(); err != nil { - d.c.Logf("Could not kill daemon: %v", err) - return err - } - - if err := os.Remove(fmt.Sprintf("%s/docker.pid", d.folder)); err != nil { - return err - } - - return nil -} - -// Restart will restart the daemon by first stopping it and then starting it. -func (d *Daemon) Restart(arg ...string) error { - d.Stop() - // in the case of tests running a user namespace-enabled daemon, we have resolved - // d.root to be the actual final path of the graph dir after the "uid.gid" of - // remapped root is added--we need to subtract it from the path before calling - // start or else we will continue making subdirectories rather than truly restarting - // with the same location/root: - if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { - d.root = filepath.Dir(d.root) - } - return d.Start(arg...) -} - -// LoadBusybox will load the stored busybox into a newly started daemon -func (d *Daemon) LoadBusybox() error { - bb := filepath.Join(d.folder, "busybox.tar") - if _, err := os.Stat(bb); err != nil { - if !os.IsNotExist(err) { - return fmt.Errorf("unexpected error on busybox.tar stat: %v", err) - } - // saving busybox image from main daemon - if err := exec.Command(dockerBinary, "save", "--output", bb, "busybox:latest").Run(); err != nil { - return fmt.Errorf("could not save busybox image: %v", err) - } - } - // loading busybox image to this daemon - if out, err := d.Cmd("load", "--input", bb); err != nil { - return fmt.Errorf("could not load busybox image: %s", out) - } - if err := os.Remove(bb); err != nil { - d.c.Logf("could not remove %s: %v", bb, err) - } - return nil -} - -func (d *Daemon) queryRootDir() (string, error) { - // update daemon root by asking /info endpoint (to support user - // namespaced daemon with root remapped uid.gid directory) - clientConfig, err := d.getClientConfig() - if err != nil { - return "", err - } - - client := &http.Client{ - Transport: clientConfig.transport, - } - - req, err := http.NewRequest("GET", "/info", nil) - if err != nil { - return "", err - } - req.Header.Set("Content-Type", "application/json") - req.URL.Host = clientConfig.addr - req.URL.Scheme = clientConfig.scheme - - resp, err := client.Do(req) - if err != nil { - return "", err - } - body := ioutils.NewReadCloserWrapper(resp.Body, func() error { - return resp.Body.Close() - }) - - type Info struct { - DockerRootDir string - } - var b []byte - var i Info - b, err = readBody(body) - if err == nil && resp.StatusCode == 200 { - // read the docker root dir - if err = json.Unmarshal(b, &i); err == nil { - return i.DockerRootDir, nil - } - } - return "", err -} - -func (d *Daemon) sock() string { - return fmt.Sprintf("unix://%s/docker.sock", d.folder) -} - -func (d *Daemon) waitRun(contID string) error { - args := []string{"--host", d.sock()} - return waitInspectWithArgs(contID, "{{.State.Running}}", "true", 10*time.Second, args...) -} - -func (d *Daemon) getBaseDeviceSize(c *check.C) int64 { - infoCmdOutput, _, err := runCommandPipelineWithOutput( - exec.Command(dockerBinary, "-H", d.sock(), "info"), - exec.Command("grep", "Base Device Size"), - ) - c.Assert(err, checker.IsNil) - basesizeSlice := strings.Split(infoCmdOutput, ":") - basesize := strings.Trim(basesizeSlice[1], " ") - basesize = strings.Trim(basesize, "\n")[:len(basesize)-3] - basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) - c.Assert(err, checker.IsNil) - basesizeBytes := int64(basesizeFloat) * (1024 * 1024 * 1024) - return basesizeBytes -} - -// Cmd will execute a docker CLI command against this Daemon. -// Example: d.Cmd("version") will run docker -H unix://path/to/unix.sock version -func (d *Daemon) Cmd(name string, arg ...string) (string, error) { - args := []string{"--host", d.sock(), name} - args = append(args, arg...) - c := exec.Command(dockerBinary, args...) - b, err := c.CombinedOutput() - return string(b), err -} - -// CmdWithArgs will execute a docker CLI command against a daemon with the -// given additional arguments -func (d *Daemon) CmdWithArgs(daemonArgs []string, name string, arg ...string) (string, error) { - args := append(daemonArgs, name) - args = append(args, arg...) - c := exec.Command(dockerBinary, args...) - b, err := c.CombinedOutput() - return string(b), err -} - -// SockRequest executes a socket request on a daemon and returns statuscode and output. -func (d *Daemon) SockRequest(method, endpoint string, data interface{}) (int, []byte, error) { - jsonData := bytes.NewBuffer(nil) - if err := json.NewEncoder(jsonData).Encode(data); err != nil { - return -1, nil, err - } - - res, body, err := d.SockRequestRaw(method, endpoint, jsonData, "application/json") - if err != nil { - return -1, nil, err - } - b, err := readBody(body) - return res.StatusCode, b, err -} - -// SockRequestRaw executes a socket request on a daemon and returns a http -// response and a reader for the output data. -func (d *Daemon) SockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { - return sockRequestRawToDaemon(method, endpoint, data, ct, d.sock()) -} - -// LogFileName returns the path the the daemon's log file -func (d *Daemon) LogFileName() string { - return d.logFile.Name() -} - -func (d *Daemon) getIDByName(name string) (string, error) { - return d.inspectFieldWithError(name, "Id") -} - -func (d *Daemon) activeContainers() (ids []string) { - out, _ := d.Cmd("ps", "-q") - for _, id := range strings.Split(out, "\n") { - if id = strings.TrimSpace(id); id != "" { - ids = append(ids, id) - } - } - return -} - -func (d *Daemon) inspectFilter(name, filter string) (string, error) { - format := fmt.Sprintf("{{%s}}", filter) - out, err := d.Cmd("inspect", "-f", format, name) - if err != nil { - return "", fmt.Errorf("failed to inspect %s: %s", name, out) - } - return strings.TrimSpace(out), nil -} - -func (d *Daemon) inspectFieldWithError(name, field string) (string, error) { - return d.inspectFilter(name, fmt.Sprintf(".%s", field)) -} - -func (d *Daemon) findContainerIP(id string) string { - out, err := d.Cmd("inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.bridge.IPAddress }}'"), id) - if err != nil { - d.c.Log(err) - } - return strings.Trim(out, " \r\n'") -} - -func (d *Daemon) buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, int, error) { - buildCmd := buildImageCmdWithHost(name, dockerfile, d.sock(), useCache, buildFlags...) - return runCommandWithOutput(buildCmd) -} - -func (d *Daemon) checkActiveContainerCount(c *check.C) (interface{}, check.CommentInterface) { - out, err := d.Cmd("ps", "-q") - c.Assert(err, checker.IsNil) - if len(strings.TrimSpace(out)) == 0 { - return 0, nil - } - return len(strings.Split(strings.TrimSpace(out), "\n")), check.Commentf("output: %q", string(out)) -} diff --git a/integration-cli/daemon_swarm.go b/integration-cli/daemon_swarm.go deleted file mode 100644 index 43f16d23ed..0000000000 --- a/integration-cli/daemon_swarm.go +++ /dev/null @@ -1,327 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "net/http" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/swarm" - "github.com/go-check/check" -) - -// SwarmDaemon is a test daemon with helpers for participating in a swarm. -type SwarmDaemon struct { - *Daemon - swarm.Info - port int - listenAddr string -} - -// Init initializes a new swarm cluster. -func (d *SwarmDaemon) Init(req swarm.InitRequest) error { - if req.ListenAddr == "" { - req.ListenAddr = d.listenAddr - } - status, out, err := d.SockRequest("POST", "/swarm/init", req) - if status != http.StatusOK { - return fmt.Errorf("initializing swarm: invalid statuscode %v, %q", status, out) - } - if err != nil { - return fmt.Errorf("initializing swarm: %v", err) - } - info, err := d.info() - if err != nil { - return err - } - d.Info = info - return nil -} - -// Join joins a daemon to an existing cluster. -func (d *SwarmDaemon) Join(req swarm.JoinRequest) error { - if req.ListenAddr == "" { - req.ListenAddr = d.listenAddr - } - status, out, err := d.SockRequest("POST", "/swarm/join", req) - if status != http.StatusOK { - return fmt.Errorf("joining swarm: invalid statuscode %v, %q", status, out) - } - if err != nil { - return fmt.Errorf("joining swarm: %v", err) - } - info, err := d.info() - if err != nil { - return err - } - d.Info = info - return nil -} - -// Leave forces daemon to leave current cluster. -func (d *SwarmDaemon) Leave(force bool) error { - url := "/swarm/leave" - if force { - url += "?force=1" - } - status, out, err := d.SockRequest("POST", url, nil) - if status != http.StatusOK { - return fmt.Errorf("leaving swarm: invalid statuscode %v, %q", status, out) - } - if err != nil { - err = fmt.Errorf("leaving swarm: %v", err) - } - return err -} - -func (d *SwarmDaemon) info() (swarm.Info, error) { - var info struct { - Swarm swarm.Info - } - status, dt, err := d.SockRequest("GET", "/info", nil) - if status != http.StatusOK { - return info.Swarm, fmt.Errorf("get swarm info: invalid statuscode %v", status) - } - if err != nil { - return info.Swarm, fmt.Errorf("get swarm info: %v", err) - } - if err := json.Unmarshal(dt, &info); err != nil { - return info.Swarm, err - } - return info.Swarm, nil -} - -type serviceConstructor func(*swarm.Service) -type nodeConstructor func(*swarm.Node) -type specConstructor func(*swarm.Spec) - -func (d *SwarmDaemon) createService(c *check.C, f ...serviceConstructor) string { - var service swarm.Service - for _, fn := range f { - fn(&service) - } - status, out, err := d.SockRequest("POST", "/services/create", service.Spec) - - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated, check.Commentf("output: %q", string(out))) - - var scr types.ServiceCreateResponse - c.Assert(json.Unmarshal(out, &scr), checker.IsNil) - return scr.ID -} - -func (d *SwarmDaemon) getService(c *check.C, id string) *swarm.Service { - var service swarm.Service - status, out, err := d.SockRequest("GET", "/services/"+id, nil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(err, checker.IsNil) - c.Assert(json.Unmarshal(out, &service), checker.IsNil) - return &service -} - -func (d *SwarmDaemon) getServiceTasks(c *check.C, service string) []swarm.Task { - var tasks []swarm.Task - - filterArgs := filters.NewArgs() - filterArgs.Add("desired-state", "running") - filterArgs.Add("service", service) - filters, err := filters.ToParam(filterArgs) - c.Assert(err, checker.IsNil) - - status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) - return tasks -} - -func (d *SwarmDaemon) checkRunningTaskImages(c *check.C) (interface{}, check.CommentInterface) { - var tasks []swarm.Task - - filterArgs := filters.NewArgs() - filterArgs.Add("desired-state", "running") - filters, err := filters.ToParam(filterArgs) - c.Assert(err, checker.IsNil) - - status, out, err := d.SockRequest("GET", "/tasks?filters="+filters, nil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(json.Unmarshal(out, &tasks), checker.IsNil) - - result := make(map[string]int) - for _, task := range tasks { - if task.Status.State == swarm.TaskStateRunning { - result[task.Spec.ContainerSpec.Image]++ - } - } - return result, nil -} - -func (d *SwarmDaemon) checkNodeReadyCount(c *check.C) (interface{}, check.CommentInterface) { - nodes := d.listNodes(c) - var readyCount int - for _, node := range nodes { - if node.Status.State == swarm.NodeStateReady { - readyCount++ - } - } - return readyCount, nil -} - -func (d *SwarmDaemon) getTask(c *check.C, id string) swarm.Task { - var task swarm.Task - - status, out, err := d.SockRequest("GET", "/tasks/"+id, nil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(err, checker.IsNil, check.Commentf(string(out))) - c.Assert(json.Unmarshal(out, &task), checker.IsNil) - return task -} - -func (d *SwarmDaemon) updateService(c *check.C, service *swarm.Service, f ...serviceConstructor) { - for _, fn := range f { - fn(service) - } - url := fmt.Sprintf("/services/%s/update?version=%d", service.ID, service.Version.Index) - status, out, err := d.SockRequest("POST", url, service.Spec) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) -} - -func (d *SwarmDaemon) removeService(c *check.C, id string) { - status, out, err := d.SockRequest("DELETE", "/services/"+id, nil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(err, checker.IsNil) -} - -func (d *SwarmDaemon) getNode(c *check.C, id string) *swarm.Node { - var node swarm.Node - status, out, err := d.SockRequest("GET", "/nodes/"+id, nil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(err, checker.IsNil) - c.Assert(json.Unmarshal(out, &node), checker.IsNil) - c.Assert(node.ID, checker.Equals, id) - return &node -} - -func (d *SwarmDaemon) removeNode(c *check.C, id string, force bool) { - url := "/nodes/" + id - if force { - url += "?force=1" - } - - status, out, err := d.SockRequest("DELETE", url, nil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(err, checker.IsNil) -} - -func (d *SwarmDaemon) updateNode(c *check.C, id string, f ...nodeConstructor) { - for i := 0; ; i++ { - node := d.getNode(c, id) - for _, fn := range f { - fn(node) - } - url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) - status, out, err := d.SockRequest("POST", url, node.Spec) - if i < 10 && strings.Contains(string(out), "update out of sequence") { - time.Sleep(100 * time.Millisecond) - continue - } - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - return - } -} - -func (d *SwarmDaemon) listNodes(c *check.C) []swarm.Node { - status, out, err := d.SockRequest("GET", "/nodes", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - - nodes := []swarm.Node{} - c.Assert(json.Unmarshal(out, &nodes), checker.IsNil) - return nodes -} - -func (d *SwarmDaemon) listServices(c *check.C) []swarm.Service { - status, out, err := d.SockRequest("GET", "/services", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - - services := []swarm.Service{} - c.Assert(json.Unmarshal(out, &services), checker.IsNil) - return services -} - -func (d *SwarmDaemon) getSwarm(c *check.C) swarm.Swarm { - var sw swarm.Swarm - status, out, err := d.SockRequest("GET", "/swarm", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(json.Unmarshal(out, &sw), checker.IsNil) - return sw -} - -func (d *SwarmDaemon) updateSwarm(c *check.C, f ...specConstructor) { - sw := d.getSwarm(c) - for _, fn := range f { - fn(&sw.Spec) - } - url := fmt.Sprintf("/swarm/update?version=%d", sw.Version.Index) - status, out, err := d.SockRequest("POST", url, sw.Spec) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) -} - -func (d *SwarmDaemon) rotateTokens(c *check.C) { - var sw swarm.Swarm - status, out, err := d.SockRequest("GET", "/swarm", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(json.Unmarshal(out, &sw), checker.IsNil) - - url := fmt.Sprintf("/swarm/update?version=%d&rotateWorkerToken=true&rotateManagerToken=true", sw.Version.Index) - status, out, err = d.SockRequest("POST", url, sw.Spec) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) -} - -func (d *SwarmDaemon) joinTokens(c *check.C) swarm.JoinTokens { - var sw swarm.Swarm - status, out, err := d.SockRequest("GET", "/swarm", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf("output: %q", string(out))) - c.Assert(json.Unmarshal(out, &sw), checker.IsNil) - return sw.JoinTokens -} - -func (d *SwarmDaemon) checkLocalNodeState(c *check.C) (interface{}, check.CommentInterface) { - info, err := d.info() - c.Assert(err, checker.IsNil) - return info.LocalNodeState, nil -} - -func (d *SwarmDaemon) checkControlAvailable(c *check.C) (interface{}, check.CommentInterface) { - info, err := d.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - return info.ControlAvailable, nil -} - -func (d *SwarmDaemon) cmdRetryOutOfSequence(args ...string) (string, error) { - for i := 0; ; i++ { - out, err := d.Cmd(args[0], args[1:]...) - if err != nil { - if strings.Contains(err.Error(), "update out of sequence") { - if i < 10 { - continue - } - } - } - return out, err - } -} diff --git a/integration-cli/daemon_swarm_hack.go b/integration-cli/daemon_swarm_hack.go deleted file mode 100644 index d516ec4012..0000000000 --- a/integration-cli/daemon_swarm_hack.go +++ /dev/null @@ -1,20 +0,0 @@ -package main - -import "github.com/go-check/check" - -func (s *DockerSwarmSuite) getDaemon(c *check.C, nodeID string) *SwarmDaemon { - s.daemonsLock.Lock() - defer s.daemonsLock.Unlock() - for _, d := range s.daemons { - if d.NodeID == nodeID { - return d - } - } - c.Fatalf("could not find node with id: %s", nodeID) - return nil -} - -// nodeCmd executes a command on a given node via the normal docker socket -func (s *DockerSwarmSuite) nodeCmd(c *check.C, id, cmd string, args ...string) (string, error) { - return s.getDaemon(c, id).Cmd(cmd, args...) -} diff --git a/integration-cli/docker_api_attach_test.go b/integration-cli/docker_api_attach_test.go deleted file mode 100644 index 740ce6ecdd..0000000000 --- a/integration-cli/docker_api_attach_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package main - -import ( - "bufio" - "io" - "net" - "net/http" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" - "golang.org/x/net/websocket" -) - -func (s *DockerSuite) TestGetContainersAttachWebsocket(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-dit", "busybox", "cat") - - rwc, err := sockConn(time.Duration(10*time.Second), "") - c.Assert(err, checker.IsNil) - - cleanedContainerID := strings.TrimSpace(out) - config, err := websocket.NewConfig( - "/containers/"+cleanedContainerID+"/attach/ws?stream=1&stdin=1&stdout=1&stderr=1", - "http://localhost", - ) - c.Assert(err, checker.IsNil) - - ws, err := websocket.NewClient(config, rwc) - c.Assert(err, checker.IsNil) - defer ws.Close() - - expected := []byte("hello") - actual := make([]byte, len(expected)) - - outChan := make(chan error) - go func() { - _, err := io.ReadFull(ws, actual) - outChan <- err - close(outChan) - }() - - inChan := make(chan error) - go func() { - _, err := ws.Write(expected) - inChan <- err - close(inChan) - }() - - select { - case err := <-inChan: - c.Assert(err, checker.IsNil) - case <-time.After(5 * time.Second): - c.Fatal("Timeout writing to ws") - } - - select { - case err := <-outChan: - c.Assert(err, checker.IsNil) - case <-time.After(5 * time.Second): - c.Fatal("Timeout reading from ws") - } - - c.Assert(actual, checker.DeepEquals, expected, check.Commentf("Websocket didn't return the expected data")) -} - -// regression gh14320 -func (s *DockerSuite) TestPostContainersAttachContainerNotFound(c *check.C) { - req, client, err := newRequestClient("POST", "/containers/doesnotexist/attach", nil, "", "") - c.Assert(err, checker.IsNil) - - resp, err := client.Do(req) - // connection will shutdown, err should be "persistent connection closed" - c.Assert(err, checker.NotNil) // Server shutdown connection - - body, err := readBody(resp.Body) - c.Assert(err, checker.IsNil) - c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) - expected := "No such container: doesnotexist\r\n" - c.Assert(string(body), checker.Equals, expected) -} - -func (s *DockerSuite) TestGetContainersWsAttachContainerNotFound(c *check.C) { - status, body, err := sockRequest("GET", "/containers/doesnotexist/attach/ws", nil) - c.Assert(status, checker.Equals, http.StatusNotFound) - c.Assert(err, checker.IsNil) - expected := "No such container: doesnotexist" - c.Assert(getErrorMessage(c, body), checker.Contains, expected) -} - -func (s *DockerSuite) TestPostContainersAttach(c *check.C) { - testRequires(c, DaemonIsLinux) - - expectSuccess := func(conn net.Conn, br *bufio.Reader, stream string, tty bool) { - defer conn.Close() - expected := []byte("success") - _, err := conn.Write(expected) - c.Assert(err, checker.IsNil) - - conn.SetReadDeadline(time.Now().Add(time.Second)) - lenHeader := 0 - if !tty { - lenHeader = 8 - } - actual := make([]byte, len(expected)+lenHeader) - _, err = io.ReadFull(br, actual) - c.Assert(err, checker.IsNil) - if !tty { - fdMap := map[string]byte{ - "stdin": 0, - "stdout": 1, - "stderr": 2, - } - c.Assert(actual[0], checker.Equals, fdMap[stream]) - } - c.Assert(actual[lenHeader:], checker.DeepEquals, expected, check.Commentf("Attach didn't return the expected data from %s", stream)) - } - - expectTimeout := func(conn net.Conn, br *bufio.Reader, stream string) { - defer conn.Close() - _, err := conn.Write([]byte{'t'}) - c.Assert(err, checker.IsNil) - - conn.SetReadDeadline(time.Now().Add(time.Second)) - actual := make([]byte, 1) - _, err = io.ReadFull(br, actual) - opErr, ok := err.(*net.OpError) - c.Assert(ok, checker.Equals, true, check.Commentf("Error is expected to be *net.OpError, got %v", err)) - c.Assert(opErr.Timeout(), checker.Equals, true, check.Commentf("Read from %s is expected to timeout", stream)) - } - - // Create a container that only emits stdout. - cid, _ := dockerCmd(c, "run", "-di", "busybox", "cat") - cid = strings.TrimSpace(cid) - // Attach to the container's stdout stream. - conn, br, err := sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") - c.Assert(err, checker.IsNil) - // Check if the data from stdout can be received. - expectSuccess(conn, br, "stdout", false) - // Attach to the container's stderr stream. - conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") - c.Assert(err, checker.IsNil) - // Since the container only emits stdout, attaching to stderr should return nothing. - expectTimeout(conn, br, "stdout") - - // Test the similar functions of the stderr stream. - cid, _ = dockerCmd(c, "run", "-di", "busybox", "/bin/sh", "-c", "cat >&2") - cid = strings.TrimSpace(cid) - conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") - c.Assert(err, checker.IsNil) - expectSuccess(conn, br, "stderr", false) - conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") - c.Assert(err, checker.IsNil) - expectTimeout(conn, br, "stderr") - - // Test with tty. - cid, _ = dockerCmd(c, "run", "-dit", "busybox", "/bin/sh", "-c", "cat >&2") - cid = strings.TrimSpace(cid) - // Attach to stdout only. - conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stdout=1", nil, "text/plain") - c.Assert(err, checker.IsNil) - expectSuccess(conn, br, "stdout", true) - - // Attach without stdout stream. - conn, br, err = sockRequestHijack("POST", "/containers/"+cid+"/attach?stream=1&stdin=1&stderr=1", nil, "text/plain") - c.Assert(err, checker.IsNil) - // Nothing should be received because both the stdout and stderr of the container will be - // sent to the client as stdout when tty is enabled. - expectTimeout(conn, br, "stdout") -} diff --git a/integration-cli/docker_api_auth_test.go b/integration-cli/docker_api_auth_test.go deleted file mode 100644 index d73c61d411..0000000000 --- a/integration-cli/docker_api_auth_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package main - -import ( - "net/http" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types" - "github.com/go-check/check" -) - -// Test case for #22244 -func (s *DockerSuite) TestAuthApi(c *check.C) { - testRequires(c, Network) - config := types.AuthConfig{ - Username: "no-user", - Password: "no-password", - } - - expected := "Get https://registry-1.docker.io/v2/: unauthorized: incorrect username or password" - status, body, err := sockRequest("POST", "/auth", config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusUnauthorized) - msg := getErrorMessage(c, body) - c.Assert(msg, checker.Contains, expected, check.Commentf("Expected: %v, got: %v", expected, msg)) -} diff --git a/integration-cli/docker_api_build_test.go b/integration-cli/docker_api_build_test.go deleted file mode 100644 index ecd7f808e5..0000000000 --- a/integration-cli/docker_api_build_test.go +++ /dev/null @@ -1,283 +0,0 @@ -package main - -import ( - "archive/tar" - "bytes" - "net/http" - "regexp" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestBuildApiDockerFileRemote(c *check.C) { - testRequires(c, NotUserNamespace) - testRequires(c, DaemonIsLinux) - server, err := fakeStorage(map[string]string{ - "testD": `FROM busybox -COPY * /tmp/ -RUN find / -name ba* -RUN find /tmp/`, - }) - c.Assert(err, checker.IsNil) - defer server.Close() - - res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+server.URL()+"/testD", nil, "application/json") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - - buf, err := readBody(body) - c.Assert(err, checker.IsNil) - - // Make sure Dockerfile exists. - // Make sure 'baz' doesn't exist ANYWHERE despite being mentioned in the URL - out := string(buf) - c.Assert(out, checker.Contains, "/tmp/Dockerfile") - c.Assert(out, checker.Not(checker.Contains), "baz") -} - -func (s *DockerSuite) TestBuildApiRemoteTarballContext(c *check.C) { - testRequires(c, DaemonIsLinux) - buffer := new(bytes.Buffer) - tw := tar.NewWriter(buffer) - defer tw.Close() - - dockerfile := []byte("FROM busybox") - err := tw.WriteHeader(&tar.Header{ - Name: "Dockerfile", - Size: int64(len(dockerfile)), - }) - // failed to write tar file header - c.Assert(err, checker.IsNil) - - _, err = tw.Write(dockerfile) - // failed to write tar file content - c.Assert(err, checker.IsNil) - - // failed to close tar archive - c.Assert(tw.Close(), checker.IsNil) - - server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ - "testT.tar": buffer, - }) - c.Assert(err, checker.IsNil) - - defer server.Close() - - res, b, err := sockRequestRaw("POST", "/build?remote="+server.URL()+"/testT.tar", nil, "application/tar") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - b.Close() -} - -func (s *DockerSuite) TestBuildApiRemoteTarballContextWithCustomDockerfile(c *check.C) { - testRequires(c, DaemonIsLinux) - buffer := new(bytes.Buffer) - tw := tar.NewWriter(buffer) - defer tw.Close() - - dockerfile := []byte(`FROM busybox -RUN echo 'wrong'`) - err := tw.WriteHeader(&tar.Header{ - Name: "Dockerfile", - Size: int64(len(dockerfile)), - }) - // failed to write tar file header - c.Assert(err, checker.IsNil) - - _, err = tw.Write(dockerfile) - // failed to write tar file content - c.Assert(err, checker.IsNil) - - custom := []byte(`FROM busybox -RUN echo 'right' -`) - err = tw.WriteHeader(&tar.Header{ - Name: "custom", - Size: int64(len(custom)), - }) - - // failed to write tar file header - c.Assert(err, checker.IsNil) - - _, err = tw.Write(custom) - // failed to write tar file content - c.Assert(err, checker.IsNil) - - // failed to close tar archive - c.Assert(tw.Close(), checker.IsNil) - - server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ - "testT.tar": buffer, - }) - c.Assert(err, checker.IsNil) - - defer server.Close() - url := "/build?dockerfile=custom&remote=" + server.URL() + "/testT.tar" - res, body, err := sockRequestRaw("POST", url, nil, "application/tar") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - - defer body.Close() - content, err := readBody(body) - c.Assert(err, checker.IsNil) - - // Build used the wrong dockerfile. - c.Assert(string(content), checker.Not(checker.Contains), "wrong") -} - -func (s *DockerSuite) TestBuildApiLowerDockerfile(c *check.C) { - testRequires(c, DaemonIsLinux) - git, err := newFakeGit("repo", map[string]string{ - "dockerfile": `FROM busybox -RUN echo from dockerfile`, - }, false) - c.Assert(err, checker.IsNil) - defer git.Close() - - res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - - buf, err := readBody(body) - c.Assert(err, checker.IsNil) - - out := string(buf) - c.Assert(out, checker.Contains, "from dockerfile") -} - -func (s *DockerSuite) TestBuildApiBuildGitWithF(c *check.C) { - testRequires(c, DaemonIsLinux) - git, err := newFakeGit("repo", map[string]string{ - "baz": `FROM busybox -RUN echo from baz`, - "Dockerfile": `FROM busybox -RUN echo from Dockerfile`, - }, false) - c.Assert(err, checker.IsNil) - defer git.Close() - - // Make sure it tries to 'dockerfile' query param value - res, body, err := sockRequestRaw("POST", "/build?dockerfile=baz&remote="+git.RepoURL, nil, "application/json") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - - buf, err := readBody(body) - c.Assert(err, checker.IsNil) - - out := string(buf) - c.Assert(out, checker.Contains, "from baz") -} - -func (s *DockerSuite) TestBuildApiDoubleDockerfile(c *check.C) { - testRequires(c, UnixCli) // dockerfile overwrites Dockerfile on Windows - git, err := newFakeGit("repo", map[string]string{ - "Dockerfile": `FROM busybox -RUN echo from Dockerfile`, - "dockerfile": `FROM busybox -RUN echo from dockerfile`, - }, false) - c.Assert(err, checker.IsNil) - defer git.Close() - - // Make sure it tries to 'dockerfile' query param value - res, body, err := sockRequestRaw("POST", "/build?remote="+git.RepoURL, nil, "application/json") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - - buf, err := readBody(body) - c.Assert(err, checker.IsNil) - - out := string(buf) - c.Assert(out, checker.Contains, "from Dockerfile") -} - -func (s *DockerSuite) TestBuildApiDockerfileSymlink(c *check.C) { - // Test to make sure we stop people from trying to leave the - // build context when specifying a symlink as the path to the dockerfile - buffer := new(bytes.Buffer) - tw := tar.NewWriter(buffer) - defer tw.Close() - - err := tw.WriteHeader(&tar.Header{ - Name: "Dockerfile", - Typeflag: tar.TypeSymlink, - Linkname: "/etc/passwd", - }) - // failed to write tar file header - c.Assert(err, checker.IsNil) - - // failed to close tar archive - c.Assert(tw.Close(), checker.IsNil) - - res, body, err := sockRequestRaw("POST", "/build", buffer, "application/x-tar") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) - - out, err := readBody(body) - c.Assert(err, checker.IsNil) - - // The reason the error is "Cannot locate specified Dockerfile" is because - // in the builder, the symlink is resolved within the context, therefore - // Dockerfile -> /etc/passwd becomes etc/passwd from the context which is - // a nonexistent file. - c.Assert(string(out), checker.Contains, "Cannot locate specified Dockerfile: Dockerfile", check.Commentf("Didn't complain about leaving build context")) -} - -func (s *DockerSuite) TestBuildApiUnnormalizedTarPaths(c *check.C) { - // Make sure that build context tars with entries of the form - // x/./y don't cause caching false positives. - - buildFromTarContext := func(fileContents []byte) string { - buffer := new(bytes.Buffer) - tw := tar.NewWriter(buffer) - defer tw.Close() - - dockerfile := []byte(`FROM busybox - COPY dir /dir/`) - err := tw.WriteHeader(&tar.Header{ - Name: "Dockerfile", - Size: int64(len(dockerfile)), - }) - //failed to write tar file header - c.Assert(err, checker.IsNil) - - _, err = tw.Write(dockerfile) - // failed to write Dockerfile in tar file content - c.Assert(err, checker.IsNil) - - err = tw.WriteHeader(&tar.Header{ - Name: "dir/./file", - Size: int64(len(fileContents)), - }) - //failed to write tar file header - c.Assert(err, checker.IsNil) - - _, err = tw.Write(fileContents) - // failed to write file contents in tar file content - c.Assert(err, checker.IsNil) - - // failed to close tar archive - c.Assert(tw.Close(), checker.IsNil) - - res, body, err := sockRequestRaw("POST", "/build", buffer, "application/x-tar") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - - out, err := readBody(body) - c.Assert(err, checker.IsNil) - lines := strings.Split(string(out), "\n") - c.Assert(len(lines), checker.GreaterThan, 1) - c.Assert(lines[len(lines)-2], checker.Matches, ".*Successfully built [0-9a-f]{12}.*") - - re := regexp.MustCompile("Successfully built ([0-9a-f]{12})") - matches := re.FindStringSubmatch(lines[len(lines)-2]) - return matches[1] - } - - imageA := buildFromTarContext([]byte("abc")) - imageB := buildFromTarContext([]byte("def")) - - c.Assert(imageA, checker.Not(checker.Equals), imageB) -} diff --git a/integration-cli/docker_api_containers_test.go b/integration-cli/docker_api_containers_test.go deleted file mode 100644 index ea1358cae0..0000000000 --- a/integration-cli/docker_api_containers_test.go +++ /dev/null @@ -1,1434 +0,0 @@ -package main - -import ( - "archive/tar" - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "net/http/httputil" - "net/url" - "os" - "regexp" - "strconv" - "strings" - "time" - - "github.com/docker/docker/pkg/integration" - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/engine-api/types" - containertypes "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestContainerApiGetAll(c *check.C) { - startCount, err := getContainerCount() - c.Assert(err, checker.IsNil, check.Commentf("Cannot query container count")) - - name := "getall" - dockerCmd(c, "run", "--name", name, "busybox", "true") - - status, body, err := sockRequest("GET", "/containers/json?all=1", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - var inspectJSON []struct { - Names []string - } - err = json.Unmarshal(body, &inspectJSON) - c.Assert(err, checker.IsNil, check.Commentf("unable to unmarshal response body")) - - c.Assert(inspectJSON, checker.HasLen, startCount+1) - - actual := inspectJSON[0].Names[0] - c.Assert(actual, checker.Equals, "/"+name) -} - -// regression test for empty json field being omitted #13691 -func (s *DockerSuite) TestContainerApiGetJSONNoFieldsOmitted(c *check.C) { - dockerCmd(c, "run", "busybox", "true") - - status, body, err := sockRequest("GET", "/containers/json?all=1", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - // empty Labels field triggered this bug, make sense to check for everything - // cause even Ports for instance can trigger this bug - // better safe than sorry.. - fields := []string{ - "Id", - "Names", - "Image", - "Command", - "Created", - "Ports", - "Labels", - "Status", - "NetworkSettings", - } - - // decoding into types.Container do not work since it eventually unmarshal - // and empty field to an empty go map, so we just check for a string - for _, f := range fields { - if !strings.Contains(string(body), f) { - c.Fatalf("Field %s is missing and it shouldn't", f) - } - } -} - -type containerPs struct { - Names []string - Ports []map[string]interface{} -} - -// regression test for non-empty fields from #13901 -func (s *DockerSuite) TestContainerApiPsOmitFields(c *check.C) { - // Problematic for Windows porting due to networking not yet being passed back - testRequires(c, DaemonIsLinux) - name := "pstest" - port := 80 - runSleepingContainer(c, "--name", name, "--expose", strconv.Itoa(port)) - - status, body, err := sockRequest("GET", "/containers/json?all=1", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - var resp []containerPs - err = json.Unmarshal(body, &resp) - c.Assert(err, checker.IsNil) - - var foundContainer *containerPs - for _, container := range resp { - for _, testName := range container.Names { - if "/"+name == testName { - foundContainer = &container - break - } - } - } - - c.Assert(foundContainer.Ports, checker.HasLen, 1) - c.Assert(foundContainer.Ports[0]["PrivatePort"], checker.Equals, float64(port)) - _, ok := foundContainer.Ports[0]["PublicPort"] - c.Assert(ok, checker.Not(checker.Equals), true) - _, ok = foundContainer.Ports[0]["IP"] - c.Assert(ok, checker.Not(checker.Equals), true) -} - -func (s *DockerSuite) TestContainerApiGetExport(c *check.C) { - // TODO: Investigate why this fails on Windows to Windows CI - testRequires(c, DaemonIsLinux) - name := "exportcontainer" - dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test") - - status, body, err := sockRequest("GET", "/containers/"+name+"/export", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - found := false - for tarReader := tar.NewReader(bytes.NewReader(body)); ; { - h, err := tarReader.Next() - if err != nil && err == io.EOF { - break - } - if h.Name == "test" { - found = true - break - } - } - c.Assert(found, checker.True, check.Commentf("The created test file has not been found in the exported image")) -} - -func (s *DockerSuite) TestContainerApiGetChanges(c *check.C) { - // Not supported on Windows as Windows does not support docker diff (/containers/name/changes) - testRequires(c, DaemonIsLinux) - name := "changescontainer" - dockerCmd(c, "run", "--name", name, "busybox", "rm", "/etc/passwd") - - status, body, err := sockRequest("GET", "/containers/"+name+"/changes", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - changes := []struct { - Kind int - Path string - }{} - c.Assert(json.Unmarshal(body, &changes), checker.IsNil, check.Commentf("unable to unmarshal response body")) - - // Check the changelog for removal of /etc/passwd - success := false - for _, elem := range changes { - if elem.Path == "/etc/passwd" && elem.Kind == 2 { - success = true - } - } - c.Assert(success, checker.True, check.Commentf("/etc/passwd has been removed but is not present in the diff")) -} - -func (s *DockerSuite) TestGetContainerStats(c *check.C) { - // Problematic on Windows as Windows does not support stats - testRequires(c, DaemonIsLinux) - var ( - name = "statscontainer" - ) - dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") - - type b struct { - status int - body []byte - err error - } - bc := make(chan b, 1) - go func() { - status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) - bc <- b{status, body, err} - }() - - // allow some time to stream the stats from the container - time.Sleep(4 * time.Second) - dockerCmd(c, "rm", "-f", name) - - // collect the results from the stats stream or timeout and fail - // if the stream was not disconnected. - select { - case <-time.After(2 * time.Second): - c.Fatal("stream was not closed after container was removed") - case sr := <-bc: - c.Assert(sr.err, checker.IsNil) - c.Assert(sr.status, checker.Equals, http.StatusOK) - - dec := json.NewDecoder(bytes.NewBuffer(sr.body)) - var s *types.Stats - // decode only one object from the stream - c.Assert(dec.Decode(&s), checker.IsNil) - } -} - -func (s *DockerSuite) TestGetContainerStatsRmRunning(c *check.C) { - // Problematic on Windows as Windows does not support stats - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - id := strings.TrimSpace(out) - - buf := &integration.ChannelBuffer{make(chan []byte, 1)} - defer buf.Close() - - _, body, err := sockRequestRaw("GET", "/containers/"+id+"/stats?stream=1", nil, "application/json") - c.Assert(err, checker.IsNil) - defer body.Close() - - chErr := make(chan error, 1) - go func() { - _, err = io.Copy(buf, body) - chErr <- err - }() - - b := make([]byte, 32) - // make sure we've got some stats - _, err = buf.ReadTimeout(b, 2*time.Second) - c.Assert(err, checker.IsNil) - - // Now remove without `-f` and make sure we are still pulling stats - _, _, err = dockerCmdWithError("rm", id) - c.Assert(err, checker.Not(checker.IsNil), check.Commentf("rm should have failed but didn't")) - _, err = buf.ReadTimeout(b, 2*time.Second) - c.Assert(err, checker.IsNil) - - dockerCmd(c, "rm", "-f", id) - c.Assert(<-chErr, checker.IsNil) -} - -// regression test for gh13421 -// previous test was just checking one stat entry so it didn't fail (stats with -// stream false always return one stat) -func (s *DockerSuite) TestGetContainerStatsStream(c *check.C) { - // Problematic on Windows as Windows does not support stats - testRequires(c, DaemonIsLinux) - name := "statscontainer" - dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") - - type b struct { - status int - body []byte - err error - } - bc := make(chan b, 1) - go func() { - status, body, err := sockRequest("GET", "/containers/"+name+"/stats", nil) - bc <- b{status, body, err} - }() - - // allow some time to stream the stats from the container - time.Sleep(4 * time.Second) - dockerCmd(c, "rm", "-f", name) - - // collect the results from the stats stream or timeout and fail - // if the stream was not disconnected. - select { - case <-time.After(2 * time.Second): - c.Fatal("stream was not closed after container was removed") - case sr := <-bc: - c.Assert(sr.err, checker.IsNil) - c.Assert(sr.status, checker.Equals, http.StatusOK) - - s := string(sr.body) - // count occurrences of "read" of types.Stats - if l := strings.Count(s, "read"); l < 2 { - c.Fatalf("Expected more than one stat streamed, got %d", l) - } - } -} - -func (s *DockerSuite) TestGetContainerStatsNoStream(c *check.C) { - // Problematic on Windows as Windows does not support stats - testRequires(c, DaemonIsLinux) - name := "statscontainer" - dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") - - type b struct { - status int - body []byte - err error - } - bc := make(chan b, 1) - go func() { - status, body, err := sockRequest("GET", "/containers/"+name+"/stats?stream=0", nil) - bc <- b{status, body, err} - }() - - // allow some time to stream the stats from the container - time.Sleep(4 * time.Second) - dockerCmd(c, "rm", "-f", name) - - // collect the results from the stats stream or timeout and fail - // if the stream was not disconnected. - select { - case <-time.After(2 * time.Second): - c.Fatal("stream was not closed after container was removed") - case sr := <-bc: - c.Assert(sr.err, checker.IsNil) - c.Assert(sr.status, checker.Equals, http.StatusOK) - - s := string(sr.body) - // count occurrences of "read" of types.Stats - c.Assert(strings.Count(s, "read"), checker.Equals, 1, check.Commentf("Expected only one stat streamed, got %d", strings.Count(s, "read"))) - } -} - -func (s *DockerSuite) TestGetStoppedContainerStats(c *check.C) { - // Problematic on Windows as Windows does not support stats - testRequires(c, DaemonIsLinux) - name := "statscontainer" - dockerCmd(c, "create", "--name", name, "busybox", "top") - - type stats struct { - status int - err error - } - chResp := make(chan stats) - - // We expect an immediate response, but if it's not immediate, the test would hang, so put it in a goroutine - // below we'll check this on a timeout. - go func() { - resp, body, err := sockRequestRaw("GET", "/containers/"+name+"/stats", nil, "") - body.Close() - chResp <- stats{resp.StatusCode, err} - }() - - select { - case r := <-chResp: - c.Assert(r.err, checker.IsNil) - c.Assert(r.status, checker.Equals, http.StatusOK) - case <-time.After(10 * time.Second): - c.Fatal("timeout waiting for stats response for stopped container") - } -} - -func (s *DockerSuite) TestContainerApiPause(c *check.C) { - // Problematic on Windows as Windows does not support pause - testRequires(c, DaemonIsLinux) - defer unpauseAllContainers() - out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "30") - ContainerID := strings.TrimSpace(out) - - status, _, err := sockRequest("POST", "/containers/"+ContainerID+"/pause", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) - - pausedContainers, err := getSliceOfPausedContainers() - c.Assert(err, checker.IsNil, check.Commentf("error thrown while checking if containers were paused")) - - if len(pausedContainers) != 1 || stringid.TruncateID(ContainerID) != pausedContainers[0] { - c.Fatalf("there should be one paused container and not %d", len(pausedContainers)) - } - - status, _, err = sockRequest("POST", "/containers/"+ContainerID+"/unpause", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) - - pausedContainers, err = getSliceOfPausedContainers() - c.Assert(err, checker.IsNil, check.Commentf("error thrown while checking if containers were paused")) - c.Assert(pausedContainers, checker.IsNil, check.Commentf("There should be no paused container.")) -} - -func (s *DockerSuite) TestContainerApiTop(c *check.C) { - // Problematic on Windows as Windows does not support top - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "top") - id := strings.TrimSpace(string(out)) - c.Assert(waitRun(id), checker.IsNil) - - type topResp struct { - Titles []string - Processes [][]string - } - var top topResp - status, b, err := sockRequest("GET", "/containers/"+id+"/top?ps_args=aux", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(json.Unmarshal(b, &top), checker.IsNil) - c.Assert(top.Titles, checker.HasLen, 11, check.Commentf("expected 11 titles, found %d: %v", len(top.Titles), top.Titles)) - - if top.Titles[0] != "USER" || top.Titles[10] != "COMMAND" { - c.Fatalf("expected `USER` at `Titles[0]` and `COMMAND` at Titles[10]: %v", top.Titles) - } - c.Assert(top.Processes, checker.HasLen, 2, check.Commentf("expected 2 processes, found %d: %v", len(top.Processes), top.Processes)) - c.Assert(top.Processes[0][10], checker.Equals, "/bin/sh -c top") - c.Assert(top.Processes[1][10], checker.Equals, "top") -} - -func (s *DockerSuite) TestContainerApiCommit(c *check.C) { - cName := "testapicommit" - dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") - - name := "testcontainerapicommit" - status, b, err := sockRequest("POST", "/commit?repo="+name+"&testtag=tag&container="+cName, nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - - type resp struct { - ID string - } - var img resp - c.Assert(json.Unmarshal(b, &img), checker.IsNil) - - cmd := inspectField(c, img.ID, "Config.Cmd") - c.Assert(cmd, checker.Equals, "[/bin/sh -c touch /test]", check.Commentf("got wrong Cmd from commit: %q", cmd)) - - // sanity check, make sure the image is what we think it is - dockerCmd(c, "run", img.ID, "ls", "/test") -} - -func (s *DockerSuite) TestContainerApiCommitWithLabelInConfig(c *check.C) { - cName := "testapicommitwithconfig" - dockerCmd(c, "run", "--name="+cName, "busybox", "/bin/sh", "-c", "touch /test") - - config := map[string]interface{}{ - "Labels": map[string]string{"key1": "value1", "key2": "value2"}, - } - - name := "testcontainerapicommitwithconfig" - status, b, err := sockRequest("POST", "/commit?repo="+name+"&container="+cName, config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - - type resp struct { - ID string - } - var img resp - c.Assert(json.Unmarshal(b, &img), checker.IsNil) - - label1 := inspectFieldMap(c, img.ID, "Config.Labels", "key1") - c.Assert(label1, checker.Equals, "value1") - - label2 := inspectFieldMap(c, img.ID, "Config.Labels", "key2") - c.Assert(label2, checker.Equals, "value2") - - cmd := inspectField(c, img.ID, "Config.Cmd") - c.Assert(cmd, checker.Equals, "[/bin/sh -c touch /test]", check.Commentf("got wrong Cmd from commit: %q", cmd)) - - // sanity check, make sure the image is what we think it is - dockerCmd(c, "run", img.ID, "ls", "/test") -} - -func (s *DockerSuite) TestContainerApiBadPort(c *check.C) { - // TODO Windows to Windows CI - Port this test - testRequires(c, DaemonIsLinux) - config := map[string]interface{}{ - "Image": "busybox", - "Cmd": []string{"/bin/sh", "-c", "echo test"}, - "PortBindings": map[string]interface{}{ - "8080/tcp": []map[string]interface{}{ - { - "HostIP": "", - "HostPort": "aa80", - }, - }, - }, - } - - jsonData := bytes.NewBuffer(nil) - json.NewEncoder(jsonData).Encode(config) - - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) - c.Assert(getErrorMessage(c, body), checker.Equals, `Invalid port specification: "aa80"`, check.Commentf("Incorrect error msg: %s", body)) -} - -func (s *DockerSuite) TestContainerApiCreate(c *check.C) { - config := map[string]interface{}{ - "Image": "busybox", - "Cmd": []string{"/bin/sh", "-c", "touch /test && ls /test"}, - } - - status, b, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - - type createResp struct { - ID string - } - var container createResp - c.Assert(json.Unmarshal(b, &container), checker.IsNil) - - out, _ := dockerCmd(c, "start", "-a", container.ID) - c.Assert(strings.TrimSpace(out), checker.Equals, "/test") -} - -func (s *DockerSuite) TestContainerApiCreateEmptyConfig(c *check.C) { - config := map[string]interface{}{} - - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) - - expected := "Config cannot be empty in order to create a container" - c.Assert(getErrorMessage(c, body), checker.Equals, expected) -} - -func (s *DockerSuite) TestContainerApiCreateMultipleNetworksConfig(c *check.C) { - // Container creation must fail if client specified configurations for more than one network - config := map[string]interface{}{ - "Image": "busybox", - "NetworkingConfig": networktypes.NetworkingConfig{ - EndpointsConfig: map[string]*networktypes.EndpointSettings{ - "net1": {}, - "net2": {}, - "net3": {}, - }, - }, - } - - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusBadRequest) - msg := getErrorMessage(c, body) - // network name order in error message is not deterministic - c.Assert(msg, checker.Contains, "Container cannot be connected to network endpoints") - c.Assert(msg, checker.Contains, "net1") - c.Assert(msg, checker.Contains, "net2") - c.Assert(msg, checker.Contains, "net3") -} - -func (s *DockerSuite) TestContainerApiCreateWithHostName(c *check.C) { - // TODO Windows: Port this test once hostname is supported - testRequires(c, DaemonIsLinux) - hostName := "test-host" - config := map[string]interface{}{ - "Image": "busybox", - "Hostname": hostName, - } - - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - - var container types.ContainerCreateResponse - c.Assert(json.Unmarshal(body, &container), checker.IsNil) - - status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - var containerJSON types.ContainerJSON - c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) - c.Assert(containerJSON.Config.Hostname, checker.Equals, hostName, check.Commentf("Mismatched Hostname")) -} - -func (s *DockerSuite) TestContainerApiCreateWithDomainName(c *check.C) { - // TODO Windows: Port this test once domain name is supported - testRequires(c, DaemonIsLinux) - domainName := "test-domain" - config := map[string]interface{}{ - "Image": "busybox", - "Domainname": domainName, - } - - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - - var container types.ContainerCreateResponse - c.Assert(json.Unmarshal(body, &container), checker.IsNil) - - status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - var containerJSON types.ContainerJSON - c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) - c.Assert(containerJSON.Config.Domainname, checker.Equals, domainName, check.Commentf("Mismatched Domainname")) -} - -func (s *DockerSuite) TestContainerApiCreateBridgeNetworkMode(c *check.C) { - // Windows does not support bridge - testRequires(c, DaemonIsLinux) - UtilCreateNetworkMode(c, "bridge") -} - -func (s *DockerSuite) TestContainerApiCreateOtherNetworkModes(c *check.C) { - // Windows does not support these network modes - testRequires(c, DaemonIsLinux, NotUserNamespace) - UtilCreateNetworkMode(c, "host") - UtilCreateNetworkMode(c, "container:web1") -} - -func UtilCreateNetworkMode(c *check.C, networkMode string) { - config := map[string]interface{}{ - "Image": "busybox", - "HostConfig": map[string]interface{}{"NetworkMode": networkMode}, - } - - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - - var container types.ContainerCreateResponse - c.Assert(json.Unmarshal(body, &container), checker.IsNil) - - status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - var containerJSON types.ContainerJSON - c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) - c.Assert(containerJSON.HostConfig.NetworkMode, checker.Equals, containertypes.NetworkMode(networkMode), check.Commentf("Mismatched NetworkMode")) -} - -func (s *DockerSuite) TestContainerApiCreateWithCpuSharesCpuset(c *check.C) { - // TODO Windows to Windows CI. The CpuShares part could be ported. - testRequires(c, DaemonIsLinux) - config := map[string]interface{}{ - "Image": "busybox", - "CpuShares": 512, - "CpusetCpus": "0", - } - - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - - var container types.ContainerCreateResponse - c.Assert(json.Unmarshal(body, &container), checker.IsNil) - - status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - var containerJSON types.ContainerJSON - - c.Assert(json.Unmarshal(body, &containerJSON), checker.IsNil) - - out := inspectField(c, containerJSON.ID, "HostConfig.CpuShares") - c.Assert(out, checker.Equals, "512") - - outCpuset := inspectField(c, containerJSON.ID, "HostConfig.CpusetCpus") - c.Assert(outCpuset, checker.Equals, "0") -} - -func (s *DockerSuite) TestContainerApiVerifyHeader(c *check.C) { - config := map[string]interface{}{ - "Image": "busybox", - } - - create := func(ct string) (*http.Response, io.ReadCloser, error) { - jsonData := bytes.NewBuffer(nil) - c.Assert(json.NewEncoder(jsonData).Encode(config), checker.IsNil) - return sockRequestRaw("POST", "/containers/create", jsonData, ct) - } - - // Try with no content-type - res, body, err := create("") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) - body.Close() - - // Try with wrong content-type - res, body, err = create("application/xml") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) - body.Close() - - // now application/json - res, body, err = create("application/json") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) - body.Close() -} - -//Issue 14230. daemon should return 500 for invalid port syntax -func (s *DockerSuite) TestContainerApiInvalidPortSyntax(c *check.C) { - config := `{ - "Image": "busybox", - "HostConfig": { - "NetworkMode": "default", - "PortBindings": { - "19039;1230": [ - {} - ] - } - } - }` - - res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) - - b, err := readBody(body) - c.Assert(err, checker.IsNil) - c.Assert(string(b[:]), checker.Contains, "Invalid port") -} - -// Issue 7941 - test to make sure a "null" in JSON is just ignored. -// W/o this fix a null in JSON would be parsed into a string var as "null" -func (s *DockerSuite) TestContainerApiPostCreateNull(c *check.C) { - // TODO Windows to Windows CI. Bit of this with alternate fields checked - // can probably be ported. - testRequires(c, DaemonIsLinux) - config := `{ - "Hostname":"", - "Domainname":"", - "Memory":0, - "MemorySwap":0, - "CpuShares":0, - "Cpuset":null, - "AttachStdin":true, - "AttachStdout":true, - "AttachStderr":true, - "ExposedPorts":{}, - "Tty":true, - "OpenStdin":true, - "StdinOnce":true, - "Env":[], - "Cmd":"ls", - "Image":"busybox", - "Volumes":{}, - "WorkingDir":"", - "Entrypoint":null, - "NetworkDisabled":false, - "OnBuild":null}` - - res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusCreated) - - b, err := readBody(body) - c.Assert(err, checker.IsNil) - type createResp struct { - ID string - } - var container createResp - c.Assert(json.Unmarshal(b, &container), checker.IsNil) - out := inspectField(c, container.ID, "HostConfig.CpusetCpus") - c.Assert(out, checker.Equals, "") - - outMemory := inspectField(c, container.ID, "HostConfig.Memory") - c.Assert(outMemory, checker.Equals, "0") - outMemorySwap := inspectField(c, container.ID, "HostConfig.MemorySwap") - c.Assert(outMemorySwap, checker.Equals, "0") -} - -func (s *DockerSuite) TestCreateWithTooLowMemoryLimit(c *check.C) { - // TODO Windows: Port once memory is supported - testRequires(c, DaemonIsLinux) - config := `{ - "Image": "busybox", - "Cmd": "ls", - "OpenStdin": true, - "CpuShares": 100, - "Memory": 524287 - }` - - res, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(config), "application/json") - c.Assert(err, checker.IsNil) - b, err2 := readBody(body) - c.Assert(err2, checker.IsNil) - - c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) - c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") -} - -func (s *DockerSuite) TestContainerApiRename(c *check.C) { - // TODO Windows: Debug why this sometimes fails on TP5. For now, leave disabled - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "--name", "TestContainerApiRename", "-d", "busybox", "sh") - - containerID := strings.TrimSpace(out) - newName := "TestContainerApiRenameNew" - statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/rename?name="+newName, nil) - c.Assert(err, checker.IsNil) - // 204 No Content is expected, not 200 - c.Assert(statusCode, checker.Equals, http.StatusNoContent) - - name := inspectField(c, containerID, "Name") - c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) -} - -func (s *DockerSuite) TestContainerApiKill(c *check.C) { - name := "test-api-kill" - runSleepingContainer(c, "-i", "--name", name) - - status, _, err := sockRequest("POST", "/containers/"+name+"/kill", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) - - state := inspectField(c, name, "State.Running") - c.Assert(state, checker.Equals, "false", check.Commentf("got wrong State from container %s: %q", name, state)) -} - -func (s *DockerSuite) TestContainerApiRestart(c *check.C) { - // TODO Windows to Windows CI. This is flaky due to the timing - testRequires(c, DaemonIsLinux) - name := "test-api-restart" - dockerCmd(c, "run", "-di", "--name", name, "busybox", "top") - - status, _, err := sockRequest("POST", "/containers/"+name+"/restart?t=1", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) - c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 5*time.Second), checker.IsNil) -} - -func (s *DockerSuite) TestContainerApiRestartNotimeoutParam(c *check.C) { - // TODO Windows to Windows CI. This is flaky due to the timing - testRequires(c, DaemonIsLinux) - name := "test-api-restart-no-timeout-param" - out, _ := dockerCmd(c, "run", "-di", "--name", name, "busybox", "top") - id := strings.TrimSpace(out) - c.Assert(waitRun(id), checker.IsNil) - - status, _, err := sockRequest("POST", "/containers/"+name+"/restart", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) - c.Assert(waitInspect(name, "{{ .State.Restarting }} {{ .State.Running }}", "false true", 5*time.Second), checker.IsNil) -} - -func (s *DockerSuite) TestContainerApiStart(c *check.C) { - name := "testing-start" - config := map[string]interface{}{ - "Image": "busybox", - "Cmd": append([]string{"/bin/sh", "-c"}, defaultSleepCommand...), - "OpenStdin": true, - } - - status, _, err := sockRequest("POST", "/containers/create?name="+name, config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - - status, _, err = sockRequest("POST", "/containers/"+name+"/start", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) - - // second call to start should give 304 - status, _, err = sockRequest("POST", "/containers/"+name+"/start", nil) - c.Assert(err, checker.IsNil) - - // TODO(tibor): figure out why this doesn't work on windows - if isLocalDaemon { - c.Assert(status, checker.Equals, http.StatusNotModified) - } -} - -func (s *DockerSuite) TestContainerApiStop(c *check.C) { - name := "test-api-stop" - runSleepingContainer(c, "-i", "--name", name) - - status, _, err := sockRequest("POST", "/containers/"+name+"/stop?t=30", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) - c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) - - // second call to start should give 304 - status, _, err = sockRequest("POST", "/containers/"+name+"/stop?t=30", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNotModified) -} - -func (s *DockerSuite) TestContainerApiWait(c *check.C) { - name := "test-api-wait" - - sleepCmd := "/bin/sleep" - if daemonPlatform == "windows" { - sleepCmd = "sleep" - } - dockerCmd(c, "run", "--name", name, "busybox", sleepCmd, "5") - - status, body, err := sockRequest("POST", "/containers/"+name+"/wait", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(waitInspect(name, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) - - var waitres types.ContainerWaitResponse - c.Assert(json.Unmarshal(body, &waitres), checker.IsNil) - c.Assert(waitres.StatusCode, checker.Equals, 0) -} - -func (s *DockerSuite) TestContainerApiCopyNotExistsAnyMore(c *check.C) { - // TODO Windows to Windows CI. This can be ported. - testRequires(c, DaemonIsLinux) - name := "test-container-api-copy" - dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") - - postData := types.CopyConfig{ - Resource: "/test.txt", - } - - status, _, err := sockRequest("POST", "/containers/"+name+"/copy", postData) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNotFound) -} - -func (s *DockerSuite) TestContainerApiCopyPre124(c *check.C) { - // TODO Windows to Windows CI. This can be ported. - testRequires(c, DaemonIsLinux) - name := "test-container-api-copy" - dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") - - postData := types.CopyConfig{ - Resource: "/test.txt", - } - - status, body, err := sockRequest("POST", "/v1.23/containers/"+name+"/copy", postData) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - found := false - for tarReader := tar.NewReader(bytes.NewReader(body)); ; { - h, err := tarReader.Next() - if err != nil { - if err == io.EOF { - break - } - c.Fatal(err) - } - if h.Name == "test.txt" { - found = true - break - } - } - c.Assert(found, checker.True) -} - -func (s *DockerSuite) TestContainerApiCopyResourcePathEmptyPr124(c *check.C) { - // TODO Windows to Windows CI. This can be ported. - testRequires(c, DaemonIsLinux) - name := "test-container-api-copy-resource-empty" - dockerCmd(c, "run", "--name", name, "busybox", "touch", "/test.txt") - - postData := types.CopyConfig{ - Resource: "", - } - - status, body, err := sockRequest("POST", "/v1.23/containers/"+name+"/copy", postData) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) - c.Assert(string(body), checker.Matches, "Path cannot be empty\n") -} - -func (s *DockerSuite) TestContainerApiCopyResourcePathNotFoundPre124(c *check.C) { - // TODO Windows to Windows CI. This can be ported. - testRequires(c, DaemonIsLinux) - name := "test-container-api-copy-resource-not-found" - dockerCmd(c, "run", "--name", name, "busybox") - - postData := types.CopyConfig{ - Resource: "/notexist", - } - - status, body, err := sockRequest("POST", "/v1.23/containers/"+name+"/copy", postData) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) - c.Assert(string(body), checker.Matches, "Could not find the file /notexist in container "+name+"\n") -} - -func (s *DockerSuite) TestContainerApiCopyContainerNotFoundPr124(c *check.C) { - postData := types.CopyConfig{ - Resource: "/something", - } - - status, _, err := sockRequest("POST", "/v1.23/containers/notexists/copy", postData) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNotFound) -} - -func (s *DockerSuite) TestContainerApiDelete(c *check.C) { - out, _ := runSleepingContainer(c) - - id := strings.TrimSpace(out) - c.Assert(waitRun(id), checker.IsNil) - - dockerCmd(c, "stop", id) - - status, _, err := sockRequest("DELETE", "/containers/"+id, nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) -} - -func (s *DockerSuite) TestContainerApiDeleteNotExist(c *check.C) { - status, body, err := sockRequest("DELETE", "/containers/doesnotexist", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNotFound) - c.Assert(getErrorMessage(c, body), checker.Matches, "No such container: doesnotexist") -} - -func (s *DockerSuite) TestContainerApiDeleteForce(c *check.C) { - out, _ := runSleepingContainer(c) - - id := strings.TrimSpace(out) - c.Assert(waitRun(id), checker.IsNil) - - status, _, err := sockRequest("DELETE", "/containers/"+id+"?force=1", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) -} - -func (s *DockerSuite) TestContainerApiDeleteRemoveLinks(c *check.C) { - // Windows does not support links - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "--name", "tlink1", "busybox", "top") - - id := strings.TrimSpace(out) - c.Assert(waitRun(id), checker.IsNil) - - out, _ = dockerCmd(c, "run", "--link", "tlink1:tlink1", "--name", "tlink2", "-d", "busybox", "top") - - id2 := strings.TrimSpace(out) - c.Assert(waitRun(id2), checker.IsNil) - - links := inspectFieldJSON(c, id2, "HostConfig.Links") - c.Assert(links, checker.Equals, "[\"/tlink1:/tlink2/tlink1\"]", check.Commentf("expected to have links between containers")) - - status, b, err := sockRequest("DELETE", "/containers/tlink2/tlink1?link=1", nil) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusNoContent, check.Commentf(string(b))) - - linksPostRm := inspectFieldJSON(c, id2, "HostConfig.Links") - c.Assert(linksPostRm, checker.Equals, "null", check.Commentf("call to api deleteContainer links should have removed the specified links")) -} - -func (s *DockerSuite) TestContainerApiDeleteConflict(c *check.C) { - out, _ := runSleepingContainer(c) - - id := strings.TrimSpace(out) - c.Assert(waitRun(id), checker.IsNil) - - status, _, err := sockRequest("DELETE", "/containers/"+id, nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusConflict) -} - -func (s *DockerSuite) TestContainerApiDeleteRemoveVolume(c *check.C) { - testRequires(c, SameHostDaemon) - - vol := "/testvolume" - if daemonPlatform == "windows" { - vol = `c:\testvolume` - } - - out, _ := runSleepingContainer(c, "-v", vol) - - id := strings.TrimSpace(out) - c.Assert(waitRun(id), checker.IsNil) - - source, err := inspectMountSourceField(id, vol) - _, err = os.Stat(source) - c.Assert(err, checker.IsNil) - - status, _, err := sockRequest("DELETE", "/containers/"+id+"?v=1&force=1", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) - _, err = os.Stat(source) - c.Assert(os.IsNotExist(err), checker.True, check.Commentf("expected to get ErrNotExist error, got %v", err)) -} - -// Regression test for https://github.com/docker/docker/issues/6231 -func (s *DockerSuite) TestContainerApiChunkedEncoding(c *check.C) { - // TODO Windows CI: This can be ported - testRequires(c, DaemonIsLinux) - - conn, err := sockConn(time.Duration(10*time.Second), "") - c.Assert(err, checker.IsNil) - client := httputil.NewClientConn(conn, nil) - defer client.Close() - - config := map[string]interface{}{ - "Image": "busybox", - "Cmd": append([]string{"/bin/sh", "-c"}, defaultSleepCommand...), - "OpenStdin": true, - } - b, err := json.Marshal(config) - c.Assert(err, checker.IsNil) - - req, err := http.NewRequest("POST", "/containers/create", bytes.NewBuffer(b)) - c.Assert(err, checker.IsNil) - req.Header.Set("Content-Type", "application/json") - // This is a cheat to make the http request do chunked encoding - // Otherwise (just setting the Content-Encoding to chunked) net/http will overwrite - // https://golang.org/src/pkg/net/http/request.go?s=11980:12172 - req.ContentLength = -1 - - resp, err := client.Do(req) - c.Assert(err, checker.IsNil, check.Commentf("error creating container with chunked encoding")) - resp.Body.Close() - c.Assert(resp.StatusCode, checker.Equals, http.StatusCreated) -} - -func (s *DockerSuite) TestContainerApiPostContainerStop(c *check.C) { - out, _ := runSleepingContainer(c) - - containerID := strings.TrimSpace(out) - c.Assert(waitRun(containerID), checker.IsNil) - - statusCode, _, err := sockRequest("POST", "/containers/"+containerID+"/stop", nil) - c.Assert(err, checker.IsNil) - // 204 No Content is expected, not 200 - c.Assert(statusCode, checker.Equals, http.StatusNoContent) - c.Assert(waitInspect(containerID, "{{ .State.Running }}", "false", 60*time.Second), checker.IsNil) -} - -// #14170 -func (s *DockerSuite) TestPostContainerApiCreateWithStringOrSliceEntrypoint(c *check.C) { - config := struct { - Image string - Entrypoint string - Cmd []string - }{"busybox", "echo", []string{"hello", "world"}} - _, _, err := sockRequest("POST", "/containers/create?name=echotest", config) - c.Assert(err, checker.IsNil) - out, _ := dockerCmd(c, "start", "-a", "echotest") - c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") - - config2 := struct { - Image string - Entrypoint []string - Cmd []string - }{"busybox", []string{"echo"}, []string{"hello", "world"}} - _, _, err = sockRequest("POST", "/containers/create?name=echotest2", config2) - c.Assert(err, checker.IsNil) - out, _ = dockerCmd(c, "start", "-a", "echotest2") - c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") -} - -// #14170 -func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCmd(c *check.C) { - config := struct { - Image string - Entrypoint string - Cmd string - }{"busybox", "echo", "hello world"} - _, _, err := sockRequest("POST", "/containers/create?name=echotest", config) - c.Assert(err, checker.IsNil) - out, _ := dockerCmd(c, "start", "-a", "echotest") - c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") - - config2 := struct { - Image string - Cmd []string - }{"busybox", []string{"echo", "hello", "world"}} - _, _, err = sockRequest("POST", "/containers/create?name=echotest2", config2) - c.Assert(err, checker.IsNil) - out, _ = dockerCmd(c, "start", "-a", "echotest2") - c.Assert(strings.TrimSpace(out), checker.Equals, "hello world") -} - -// regression #14318 -func (s *DockerSuite) TestPostContainersCreateWithStringOrSliceCapAddDrop(c *check.C) { - // Windows doesn't support CapAdd/CapDrop - testRequires(c, DaemonIsLinux) - config := struct { - Image string - CapAdd string - CapDrop string - }{"busybox", "NET_ADMIN", "SYS_ADMIN"} - status, _, err := sockRequest("POST", "/containers/create?name=capaddtest0", config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - - config2 := struct { - Image string - CapAdd []string - CapDrop []string - }{"busybox", []string{"NET_ADMIN", "SYS_ADMIN"}, []string{"SETGID"}} - status, _, err = sockRequest("POST", "/containers/create?name=capaddtest1", config2) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) -} - -// #14915 -func (s *DockerSuite) TestContainerApiCreateNoHostConfig118(c *check.C) { - config := struct { - Image string - }{"busybox"} - status, _, err := sockRequest("POST", "/v1.18/containers/create", config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) -} - -// Ensure an error occurs when you have a container read-only rootfs but you -// extract an archive to a symlink in a writable volume which points to a -// directory outside of the volume. -func (s *DockerSuite) TestPutContainerArchiveErrSymlinkInVolumeToReadOnlyRootfs(c *check.C) { - // Windows does not support read-only rootfs - // Requires local volume mount bind. - // --read-only + userns has remount issues - testRequires(c, SameHostDaemon, NotUserNamespace, DaemonIsLinux) - - testVol := getTestDir(c, "test-put-container-archive-err-symlink-in-volume-to-read-only-rootfs-") - defer os.RemoveAll(testVol) - - makeTestContentInDir(c, testVol) - - cID := makeTestContainer(c, testContainerOptions{ - readOnly: true, - volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 - }) - defer deleteContainer(cID) - - // Attempt to extract to a symlink in the volume which points to a - // directory outside the volume. This should cause an error because the - // rootfs is read-only. - query := make(url.Values, 1) - query.Set("path", "/vol2/symlinkToAbsDir") - urlPath := fmt.Sprintf("/v1.20/containers/%s/archive?%s", cID, query.Encode()) - - statusCode, body, err := sockRequest("PUT", urlPath, nil) - c.Assert(err, checker.IsNil) - - if !isCpCannotCopyReadOnly(fmt.Errorf(string(body))) { - c.Fatalf("expected ErrContainerRootfsReadonly error, but got %d: %s", statusCode, string(body)) - } -} - -func (s *DockerSuite) TestContainerApiGetContainersJSONEmpty(c *check.C) { - status, body, err := sockRequest("GET", "/containers/json?all=1", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(string(body), checker.Equals, "[]\n") -} - -func (s *DockerSuite) TestPostContainersCreateWithWrongCpusetValues(c *check.C) { - // Not supported on Windows - testRequires(c, DaemonIsLinux) - - c1 := struct { - Image string - CpusetCpus string - }{"busybox", "1-42,,"} - name := "wrong-cpuset-cpus" - status, body, err := sockRequest("POST", "/containers/create?name="+name, c1) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) - expected := "Invalid value 1-42,, for cpuset cpus" - c.Assert(getErrorMessage(c, body), checker.Equals, expected) - - c2 := struct { - Image string - CpusetMems string - }{"busybox", "42-3,1--"} - name = "wrong-cpuset-mems" - status, body, err = sockRequest("POST", "/containers/create?name="+name, c2) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) - expected = "Invalid value 42-3,1-- for cpuset mems" - c.Assert(getErrorMessage(c, body), checker.Equals, expected) -} - -func (s *DockerSuite) TestPostContainersCreateShmSizeNegative(c *check.C) { - // ShmSize is not supported on Windows - testRequires(c, DaemonIsLinux) - config := map[string]interface{}{ - "Image": "busybox", - "HostConfig": map[string]interface{}{"ShmSize": -1}, - } - - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusInternalServerError) - c.Assert(getErrorMessage(c, body), checker.Contains, "SHM size must be greater than 0") -} - -func (s *DockerSuite) TestPostContainersCreateShmSizeHostConfigOmitted(c *check.C) { - // ShmSize is not supported on Windows - testRequires(c, DaemonIsLinux) - var defaultSHMSize int64 = 67108864 - config := map[string]interface{}{ - "Image": "busybox", - "Cmd": "mount", - } - - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusCreated) - - var container types.ContainerCreateResponse - c.Assert(json.Unmarshal(body, &container), check.IsNil) - - status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusOK) - - var containerJSON types.ContainerJSON - c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) - - c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, defaultSHMSize) - - out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) - shmRegexp := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) - if !shmRegexp.MatchString(out) { - c.Fatalf("Expected shm of 64MB in mount command, got %v", out) - } -} - -func (s *DockerSuite) TestPostContainersCreateShmSizeOmitted(c *check.C) { - // ShmSize is not supported on Windows - testRequires(c, DaemonIsLinux) - config := map[string]interface{}{ - "Image": "busybox", - "HostConfig": map[string]interface{}{}, - "Cmd": "mount", - } - - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusCreated) - - var container types.ContainerCreateResponse - c.Assert(json.Unmarshal(body, &container), check.IsNil) - - status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusOK) - - var containerJSON types.ContainerJSON - c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) - - c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(67108864)) - - out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) - shmRegexp := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) - if !shmRegexp.MatchString(out) { - c.Fatalf("Expected shm of 64MB in mount command, got %v", out) - } -} - -func (s *DockerSuite) TestPostContainersCreateWithShmSize(c *check.C) { - // ShmSize is not supported on Windows - testRequires(c, DaemonIsLinux) - config := map[string]interface{}{ - "Image": "busybox", - "Cmd": "mount", - "HostConfig": map[string]interface{}{"ShmSize": 1073741824}, - } - - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusCreated) - - var container types.ContainerCreateResponse - c.Assert(json.Unmarshal(body, &container), check.IsNil) - - status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusOK) - - var containerJSON types.ContainerJSON - c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) - - c.Assert(containerJSON.HostConfig.ShmSize, check.Equals, int64(1073741824)) - - out, _ := dockerCmd(c, "start", "-i", containerJSON.ID) - shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=1048576k`) - if !shmRegex.MatchString(out) { - c.Fatalf("Expected shm of 1GB in mount command, got %v", out) - } -} - -func (s *DockerSuite) TestPostContainersCreateMemorySwappinessHostConfigOmitted(c *check.C) { - // Swappiness is not supported on Windows - testRequires(c, DaemonIsLinux) - config := map[string]interface{}{ - "Image": "busybox", - } - - status, body, err := sockRequest("POST", "/containers/create", config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusCreated) - - var container types.ContainerCreateResponse - c.Assert(json.Unmarshal(body, &container), check.IsNil) - - status, body, err = sockRequest("GET", "/containers/"+container.ID+"/json", nil) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusOK) - - var containerJSON types.ContainerJSON - c.Assert(json.Unmarshal(body, &containerJSON), check.IsNil) - - c.Assert(*containerJSON.HostConfig.MemorySwappiness, check.Equals, int64(-1)) -} - -// check validation is done daemon side and not only in cli -func (s *DockerSuite) TestPostContainersCreateWithOomScoreAdjInvalidRange(c *check.C) { - // OomScoreAdj is not supported on Windows - testRequires(c, DaemonIsLinux) - - config := struct { - Image string - OomScoreAdj int - }{"busybox", 1001} - name := "oomscoreadj-over" - status, b, err := sockRequest("POST", "/containers/create?name="+name, config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusInternalServerError) - - expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]" - msg := getErrorMessage(c, b) - if !strings.Contains(msg, expected) { - c.Fatalf("Expected output to contain %q, got %q", expected, msg) - } - - config = struct { - Image string - OomScoreAdj int - }{"busybox", -1001} - name = "oomscoreadj-low" - status, b, err = sockRequest("POST", "/containers/create?name="+name, config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusInternalServerError) - expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]" - msg = getErrorMessage(c, b) - if !strings.Contains(msg, expected) { - c.Fatalf("Expected output to contain %q, got %q", expected, msg) - } -} - -// test case for #22210 where an empty container name caused panic. -func (s *DockerSuite) TestContainerApiDeleteWithEmptyName(c *check.C) { - status, out, err := sockRequest("DELETE", "/containers/", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusBadRequest) - c.Assert(string(out), checker.Contains, "No container name or ID supplied") -} diff --git a/integration-cli/docker_api_create_test.go b/integration-cli/docker_api_create_test.go deleted file mode 100644 index 355e8f8749..0000000000 --- a/integration-cli/docker_api_create_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "net/http" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestApiCreateWithNotExistImage(c *check.C) { - name := "test" - config := map[string]interface{}{ - "Image": "test456:v1", - "Volumes": map[string]struct{}{"/tmp": {}}, - } - - status, body, err := sockRequest("POST", "/containers/create?name="+name, config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusNotFound) - expected := "No such image: test456:v1" - c.Assert(getErrorMessage(c, body), checker.Contains, expected) - - config2 := map[string]interface{}{ - "Image": "test456", - "Volumes": map[string]struct{}{"/tmp": {}}, - } - - status, body, err = sockRequest("POST", "/containers/create?name="+name, config2) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusNotFound) - expected = "No such image: test456:latest" - c.Assert(getErrorMessage(c, body), checker.Equals, expected) - - config3 := map[string]interface{}{ - "Image": "sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa", - } - - status, body, err = sockRequest("POST", "/containers/create?name="+name, config3) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusNotFound) - expected = "No such image: sha256:0cb40641836c461bc97c793971d84d758371ed682042457523e4ae701efeaaaa" - c.Assert(getErrorMessage(c, body), checker.Equals, expected) - -} diff --git a/integration-cli/docker_api_events_test.go b/integration-cli/docker_api_events_test.go deleted file mode 100644 index cb219fbc56..0000000000 --- a/integration-cli/docker_api_events_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package main - -import ( - "encoding/json" - "io" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/jsonmessage" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestEventsApiEmptyOutput(c *check.C) { - type apiResp struct { - resp *http.Response - err error - } - chResp := make(chan *apiResp) - go func() { - resp, body, err := sockRequestRaw("GET", "/events", nil, "") - body.Close() - chResp <- &apiResp{resp, err} - }() - - select { - case r := <-chResp: - c.Assert(r.err, checker.IsNil) - c.Assert(r.resp.StatusCode, checker.Equals, http.StatusOK) - case <-time.After(3 * time.Second): - c.Fatal("timeout waiting for events api to respond, should have responded immediately") - } -} - -func (s *DockerSuite) TestEventsApiBackwardsCompatible(c *check.C) { - since := daemonTime(c).Unix() - ts := strconv.FormatInt(since, 10) - - out, _ := runSleepingContainer(c, "--name=foo", "-d") - containerID := strings.TrimSpace(out) - c.Assert(waitRun(containerID), checker.IsNil) - - q := url.Values{} - q.Set("since", ts) - - _, body, err := sockRequestRaw("GET", "/events?"+q.Encode(), nil, "") - c.Assert(err, checker.IsNil) - defer body.Close() - - dec := json.NewDecoder(body) - var containerCreateEvent *jsonmessage.JSONMessage - for { - var event jsonmessage.JSONMessage - if err := dec.Decode(&event); err != nil { - if err == io.EOF { - break - } - c.Fatal(err) - } - if event.Status == "create" && event.ID == containerID { - containerCreateEvent = &event - break - } - } - - c.Assert(containerCreateEvent, checker.Not(checker.IsNil)) - c.Assert(containerCreateEvent.Status, checker.Equals, "create") - c.Assert(containerCreateEvent.ID, checker.Equals, containerID) - c.Assert(containerCreateEvent.From, checker.Equals, "busybox") -} diff --git a/integration-cli/docker_api_exec_resize_test.go b/integration-cli/docker_api_exec_resize_test.go deleted file mode 100644 index 2c0c8766ca..0000000000 --- a/integration-cli/docker_api_exec_resize_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "net/http" - "strings" - "sync" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestExecResizeApiHeightWidthNoInt(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - cleanedContainerID := strings.TrimSpace(out) - - endpoint := "/exec/" + cleanedContainerID + "/resize?h=foo&w=bar" - status, _, err := sockRequest("POST", endpoint, nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) -} - -// Part of #14845 -func (s *DockerSuite) TestExecResizeImmediatelyAfterExecStart(c *check.C) { - testRequires(c, DaemonIsLinux) - - name := "exec_resize_test" - dockerCmd(c, "run", "-d", "-i", "-t", "--name", name, "--restart", "always", "busybox", "/bin/sh") - - testExecResize := func() error { - data := map[string]interface{}{ - "AttachStdin": true, - "Cmd": []string{"/bin/sh"}, - } - uri := fmt.Sprintf("/containers/%s/exec", name) - status, body, err := sockRequest("POST", uri, data) - if err != nil { - return err - } - if status != http.StatusCreated { - return fmt.Errorf("POST %s is expected to return %d, got %d", uri, http.StatusCreated, status) - } - - out := map[string]string{} - err = json.Unmarshal(body, &out) - if err != nil { - return fmt.Errorf("ExecCreate returned invalid json. Error: %q", err.Error()) - } - - execID := out["Id"] - if len(execID) < 1 { - return fmt.Errorf("ExecCreate got invalid execID") - } - - payload := bytes.NewBufferString(`{"Tty":true}`) - conn, _, err := sockRequestHijack("POST", fmt.Sprintf("/exec/%s/start", execID), payload, "application/json") - if err != nil { - return fmt.Errorf("Failed to start the exec: %q", err.Error()) - } - defer conn.Close() - - _, rc, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/resize?h=24&w=80", execID), nil, "text/plain") - // It's probably a panic of the daemon if io.ErrUnexpectedEOF is returned. - if err == io.ErrUnexpectedEOF { - return fmt.Errorf("The daemon might have crashed.") - } - - if err == nil { - rc.Close() - } - - // We only interested in the io.ErrUnexpectedEOF error, so we return nil otherwise. - return nil - } - - // The panic happens when daemon.ContainerExecStart is called but the - // container.Exec is not called. - // Because the panic is not 100% reproducible, we send the requests concurrently - // to increase the probability that the problem is triggered. - var ( - n = 10 - ch = make(chan error, n) - wg sync.WaitGroup - ) - for i := 0; i < n; i++ { - wg.Add(1) - go func() { - defer wg.Done() - if err := testExecResize(); err != nil { - ch <- err - } - }() - } - - wg.Wait() - select { - case err := <-ch: - c.Fatal(err.Error()) - default: - } -} diff --git a/integration-cli/docker_api_exec_test.go b/integration-cli/docker_api_exec_test.go deleted file mode 100644 index ec33637aa8..0000000000 --- a/integration-cli/docker_api_exec_test.go +++ /dev/null @@ -1,183 +0,0 @@ -// +build !test_no_exec - -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// Regression test for #9414 -func (s *DockerSuite) TestExecApiCreateNoCmd(c *check.C) { - name := "exec_test" - dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") - - status, body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": nil}) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) - - comment := check.Commentf("Expected message when creating exec command with no Cmd specified") - c.Assert(getErrorMessage(c, body), checker.Contains, "No exec command specified", comment) -} - -func (s *DockerSuite) TestExecApiCreateNoValidContentType(c *check.C) { - name := "exec_test" - dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") - - jsonData := bytes.NewBuffer(nil) - if err := json.NewEncoder(jsonData).Encode(map[string]interface{}{"Cmd": nil}); err != nil { - c.Fatalf("Can not encode data to json %s", err) - } - - res, body, err := sockRequestRaw("POST", fmt.Sprintf("/containers/%s/exec", name), jsonData, "text/plain") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) - - b, err := readBody(body) - c.Assert(err, checker.IsNil) - - comment := check.Commentf("Expected message when creating exec command with invalid Content-Type specified") - c.Assert(getErrorMessage(c, b), checker.Contains, "Content-Type specified", comment) -} - -func (s *DockerSuite) TestExecApiCreateContainerPaused(c *check.C) { - // Not relevant on Windows as Windows containers cannot be paused - testRequires(c, DaemonIsLinux) - name := "exec_create_test" - dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") - - dockerCmd(c, "pause", name) - status, body, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": []string{"true"}}) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusConflict) - - comment := check.Commentf("Expected message when creating exec command with Container %s is paused", name) - c.Assert(getErrorMessage(c, body), checker.Contains, "Container "+name+" is paused, unpause the container before exec", comment) -} - -func (s *DockerSuite) TestExecApiStart(c *check.C) { - testRequires(c, DaemonIsLinux) // Uses pause/unpause but bits may be salvagable to Windows to Windows CI - dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") - - id := createExec(c, "test") - startExec(c, id, http.StatusOK) - - id = createExec(c, "test") - dockerCmd(c, "stop", "test") - - startExec(c, id, http.StatusNotFound) - - dockerCmd(c, "start", "test") - startExec(c, id, http.StatusNotFound) - - // make sure exec is created before pausing - id = createExec(c, "test") - dockerCmd(c, "pause", "test") - startExec(c, id, http.StatusConflict) - dockerCmd(c, "unpause", "test") - startExec(c, id, http.StatusOK) -} - -func (s *DockerSuite) TestExecApiStartBackwardsCompatible(c *check.C) { - runSleepingContainer(c, "-d", "--name", "test") - id := createExec(c, "test") - - resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/v1.20/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "text/plain") - c.Assert(err, checker.IsNil) - - b, err := readBody(body) - comment := check.Commentf("response body: %s", b) - c.Assert(err, checker.IsNil, comment) - c.Assert(resp.StatusCode, checker.Equals, http.StatusOK, comment) -} - -// #19362 -func (s *DockerSuite) TestExecApiStartMultipleTimesError(c *check.C) { - runSleepingContainer(c, "-d", "--name", "test") - execID := createExec(c, "test") - startExec(c, execID, http.StatusOK) - - timeout := time.After(60 * time.Second) - var execJSON struct{ Running bool } - for { - select { - case <-timeout: - c.Fatal("timeout waiting for exec to start") - default: - } - - inspectExec(c, execID, &execJSON) - if !execJSON.Running { - break - } - } - - startExec(c, execID, http.StatusConflict) -} - -// #20638 -func (s *DockerSuite) TestExecApiStartWithDetach(c *check.C) { - name := "foo" - runSleepingContainer(c, "-d", "-t", "--name", name) - data := map[string]interface{}{ - "cmd": []string{"true"}, - "AttachStdin": true, - } - _, b, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), data) - c.Assert(err, checker.IsNil, check.Commentf(string(b))) - - createResp := struct { - ID string `json:"Id"` - }{} - c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b))) - - _, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", createResp.ID), strings.NewReader(`{"Detach": true}`), "application/json") - c.Assert(err, checker.IsNil) - - b, err = readBody(body) - comment := check.Commentf("response body: %s", b) - c.Assert(err, checker.IsNil, comment) - - resp, _, err := sockRequestRaw("GET", "/_ping", nil, "") - c.Assert(err, checker.IsNil) - if resp.StatusCode != http.StatusOK { - c.Fatal("daemon is down, it should alive") - } -} - -func createExec(c *check.C, name string) string { - _, b, err := sockRequest("POST", fmt.Sprintf("/containers/%s/exec", name), map[string]interface{}{"Cmd": []string{"true"}}) - c.Assert(err, checker.IsNil, check.Commentf(string(b))) - - createResp := struct { - ID string `json:"Id"` - }{} - c.Assert(json.Unmarshal(b, &createResp), checker.IsNil, check.Commentf(string(b))) - return createResp.ID -} - -func startExec(c *check.C, id string, code int) { - resp, body, err := sockRequestRaw("POST", fmt.Sprintf("/exec/%s/start", id), strings.NewReader(`{"Detach": true}`), "application/json") - c.Assert(err, checker.IsNil) - - b, err := readBody(body) - comment := check.Commentf("response body: %s", b) - c.Assert(err, checker.IsNil, comment) - c.Assert(resp.StatusCode, checker.Equals, code, comment) -} - -func inspectExec(c *check.C, id string, out interface{}) { - resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/exec/%s/json", id), nil, "") - c.Assert(err, checker.IsNil) - defer body.Close() - c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) - err = json.NewDecoder(body).Decode(out) - c.Assert(err, checker.IsNil) -} diff --git a/integration-cli/docker_api_images_test.go b/integration-cli/docker_api_images_test.go deleted file mode 100644 index 9d35b0c9ed..0000000000 --- a/integration-cli/docker_api_images_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package main - -import ( - "encoding/json" - "net/http" - "net/url" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestApiImagesFilter(c *check.C) { - name := "utest:tag1" - name2 := "utest/docker:tag2" - name3 := "utest:5000/docker:tag3" - for _, n := range []string{name, name2, name3} { - dockerCmd(c, "tag", "busybox", n) - } - type image types.Image - getImages := func(filter string) []image { - v := url.Values{} - v.Set("filter", filter) - status, b, err := sockRequest("GET", "/images/json?"+v.Encode(), nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - var images []image - err = json.Unmarshal(b, &images) - c.Assert(err, checker.IsNil) - - return images - } - - //incorrect number of matches returned - images := getImages("utest*/*") - c.Assert(images[0].RepoTags, checker.HasLen, 2) - - images = getImages("utest") - c.Assert(images[0].RepoTags, checker.HasLen, 1) - - images = getImages("utest*") - c.Assert(images[0].RepoTags, checker.HasLen, 1) - - images = getImages("*5000*/*") - c.Assert(images[0].RepoTags, checker.HasLen, 1) -} - -func (s *DockerSuite) TestApiImagesSaveAndLoad(c *check.C) { - // TODO Windows to Windows CI: Investigate further why this test fails. - testRequires(c, Network) - testRequires(c, DaemonIsLinux) - out, err := buildImage("saveandload", "FROM busybox\nENV FOO bar", false) - c.Assert(err, checker.IsNil) - id := strings.TrimSpace(out) - - res, body, err := sockRequestRaw("GET", "/images/"+id+"/get", nil, "") - c.Assert(err, checker.IsNil) - defer body.Close() - c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - - dockerCmd(c, "rmi", id) - - res, loadBody, err := sockRequestRaw("POST", "/images/load", body, "application/x-tar") - c.Assert(err, checker.IsNil) - defer loadBody.Close() - c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - - inspectOut := inspectField(c, id, "Id") - c.Assert(strings.TrimSpace(string(inspectOut)), checker.Equals, id, check.Commentf("load did not work properly")) -} - -func (s *DockerSuite) TestApiImagesDelete(c *check.C) { - if daemonPlatform != "windows" { - testRequires(c, Network) - } - name := "test-api-images-delete" - out, err := buildImage(name, "FROM busybox\nENV FOO bar", false) - c.Assert(err, checker.IsNil) - id := strings.TrimSpace(out) - - dockerCmd(c, "tag", name, "test:tag1") - - status, _, err := sockRequest("DELETE", "/images/"+id, nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusConflict) - - status, _, err = sockRequest("DELETE", "/images/test:noexist", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNotFound) //Status Codes:404 – no such image - - status, _, err = sockRequest("DELETE", "/images/test:tag1", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) -} - -func (s *DockerSuite) TestApiImagesHistory(c *check.C) { - if daemonPlatform != "windows" { - testRequires(c, Network) - } - name := "test-api-images-history" - out, err := buildImage(name, "FROM busybox\nENV FOO bar", false) - c.Assert(err, checker.IsNil) - - id := strings.TrimSpace(out) - - status, body, err := sockRequest("GET", "/images/"+id+"/history", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - var historydata []types.ImageHistory - err = json.Unmarshal(body, &historydata) - c.Assert(err, checker.IsNil, check.Commentf("Error on unmarshal")) - - c.Assert(historydata, checker.Not(checker.HasLen), 0) - c.Assert(historydata[0].Tags[0], checker.Equals, "test-api-images-history:latest") -} - -// #14846 -func (s *DockerSuite) TestApiImagesSearchJSONContentType(c *check.C) { - testRequires(c, Network) - - res, b, err := sockRequestRaw("GET", "/images/search?term=test", nil, "application/json") - c.Assert(err, check.IsNil) - b.Close() - c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - c.Assert(res.Header.Get("Content-Type"), checker.Equals, "application/json") -} diff --git a/integration-cli/docker_api_info_test.go b/integration-cli/docker_api_info_test.go deleted file mode 100644 index 2ff69c0596..0000000000 --- a/integration-cli/docker_api_info_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package main - -import ( - "net/http" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestInfoApi(c *check.C) { - endpoint := "/info" - - status, body, err := sockRequest("GET", endpoint, nil) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(err, checker.IsNil) - - // always shown fields - stringsToCheck := []string{ - "ID", - "Containers", - "ContainersRunning", - "ContainersPaused", - "ContainersStopped", - "Images", - "LoggingDriver", - "OperatingSystem", - "NCPU", - "OSType", - "Architecture", - "MemTotal", - "KernelVersion", - "Driver", - "ServerVersion", - "SecurityOptions"} - - out := string(body) - for _, linePrefix := range stringsToCheck { - c.Assert(out, checker.Contains, linePrefix) - } -} diff --git a/integration-cli/docker_api_inspect_test.go b/integration-cli/docker_api_inspect_test.go deleted file mode 100644 index 6b55159aa2..0000000000 --- a/integration-cli/docker_api_inspect_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package main - -import ( - "encoding/json" - "net/http" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/versions/v1p20" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestInspectApiContainerResponse(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "true") - - cleanedContainerID := strings.TrimSpace(out) - keysBase := []string{"Id", "State", "Created", "Path", "Args", "Config", "Image", "NetworkSettings", - "ResolvConfPath", "HostnamePath", "HostsPath", "LogPath", "Name", "Driver", "MountLabel", "ProcessLabel", "GraphDriver"} - - type acase struct { - version string - keys []string - } - - var cases []acase - - if daemonPlatform == "windows" { - cases = []acase{ - {"v1.20", append(keysBase, "Mounts")}, - } - - } else { - cases = []acase{ - {"v1.20", append(keysBase, "Mounts")}, - {"v1.19", append(keysBase, "Volumes", "VolumesRW")}, - } - } - - for _, cs := range cases { - body := getInspectBody(c, cs.version, cleanedContainerID) - - var inspectJSON map[string]interface{} - err := json.Unmarshal(body, &inspectJSON) - c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", cs.version)) - - for _, key := range cs.keys { - _, ok := inspectJSON[key] - c.Check(ok, checker.True, check.Commentf("%s does not exist in response for version %s", key, cs.version)) - } - - //Issue #6830: type not properly converted to JSON/back - _, ok := inspectJSON["Path"].(bool) - c.Assert(ok, checker.False, check.Commentf("Path of `true` should not be converted to boolean `true` via JSON marshalling")) - } -} - -func (s *DockerSuite) TestInspectApiContainerVolumeDriverLegacy(c *check.C) { - // No legacy implications for Windows - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "true") - - cleanedContainerID := strings.TrimSpace(out) - - cases := []string{"v1.19", "v1.20"} - for _, version := range cases { - body := getInspectBody(c, version, cleanedContainerID) - - var inspectJSON map[string]interface{} - err := json.Unmarshal(body, &inspectJSON) - c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", version)) - - config, ok := inspectJSON["Config"] - c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) - cfg := config.(map[string]interface{}) - _, ok = cfg["VolumeDriver"] - c.Assert(ok, checker.True, check.Commentf("Api version %s expected to include VolumeDriver in 'Config'", version)) - } -} - -func (s *DockerSuite) TestInspectApiContainerVolumeDriver(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "--volume-driver", "local", "busybox", "true") - - cleanedContainerID := strings.TrimSpace(out) - - body := getInspectBody(c, "v1.21", cleanedContainerID) - - var inspectJSON map[string]interface{} - err := json.Unmarshal(body, &inspectJSON) - c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version 1.21")) - - config, ok := inspectJSON["Config"] - c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) - cfg := config.(map[string]interface{}) - _, ok = cfg["VolumeDriver"] - c.Assert(ok, checker.False, check.Commentf("Api version 1.21 expected to not include VolumeDriver in 'Config'")) - - config, ok = inspectJSON["HostConfig"] - c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) - cfg = config.(map[string]interface{}) - _, ok = cfg["VolumeDriver"] - c.Assert(ok, checker.True, check.Commentf("Api version 1.21 expected to include VolumeDriver in 'HostConfig'")) -} - -func (s *DockerSuite) TestInspectApiImageResponse(c *check.C) { - dockerCmd(c, "tag", "busybox:latest", "busybox:mytag") - - endpoint := "/images/busybox/json" - status, body, err := sockRequest("GET", endpoint, nil) - - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - var imageJSON types.ImageInspect - err = json.Unmarshal(body, &imageJSON) - c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for latest version")) - c.Assert(imageJSON.RepoTags, checker.HasLen, 2) - - c.Assert(stringutils.InSlice(imageJSON.RepoTags, "busybox:latest"), checker.Equals, true) - c.Assert(stringutils.InSlice(imageJSON.RepoTags, "busybox:mytag"), checker.Equals, true) -} - -// #17131, #17139, #17173 -func (s *DockerSuite) TestInspectApiEmptyFieldsInConfigPre121(c *check.C) { - // Not relevant on Windows - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "true") - - cleanedContainerID := strings.TrimSpace(out) - - cases := []string{"v1.19", "v1.20"} - for _, version := range cases { - body := getInspectBody(c, version, cleanedContainerID) - - var inspectJSON map[string]interface{} - err := json.Unmarshal(body, &inspectJSON) - c.Assert(err, checker.IsNil, check.Commentf("Unable to unmarshal body for version %s", version)) - config, ok := inspectJSON["Config"] - c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) - cfg := config.(map[string]interface{}) - for _, f := range []string{"MacAddress", "NetworkDisabled", "ExposedPorts"} { - _, ok := cfg[f] - c.Check(ok, checker.True, check.Commentf("Api version %s expected to include %s in 'Config'", version, f)) - } - } -} - -func (s *DockerSuite) TestInspectApiBridgeNetworkSettings120(c *check.C) { - // Not relevant on Windows, and besides it doesn't have any bridge network settings - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - containerID := strings.TrimSpace(out) - waitRun(containerID) - - body := getInspectBody(c, "v1.20", containerID) - - var inspectJSON v1p20.ContainerJSON - err := json.Unmarshal(body, &inspectJSON) - c.Assert(err, checker.IsNil) - - settings := inspectJSON.NetworkSettings - c.Assert(settings.IPAddress, checker.Not(checker.HasLen), 0) -} - -func (s *DockerSuite) TestInspectApiBridgeNetworkSettings121(c *check.C) { - // Windows doesn't have any bridge network settings - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - containerID := strings.TrimSpace(out) - waitRun(containerID) - - body := getInspectBody(c, "v1.21", containerID) - - var inspectJSON types.ContainerJSON - err := json.Unmarshal(body, &inspectJSON) - c.Assert(err, checker.IsNil) - - settings := inspectJSON.NetworkSettings - c.Assert(settings.IPAddress, checker.Not(checker.HasLen), 0) - c.Assert(settings.Networks["bridge"], checker.Not(checker.IsNil)) - c.Assert(settings.IPAddress, checker.Equals, settings.Networks["bridge"].IPAddress) -} diff --git a/integration-cli/docker_api_inspect_unix_test.go b/integration-cli/docker_api_inspect_unix_test.go deleted file mode 100644 index fe59860d5a..0000000000 --- a/integration-cli/docker_api_inspect_unix_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build !windows - -package main - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// #16665 -func (s *DockerSuite) TestInspectApiCpusetInConfigPre120(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, cgroupCpuset) - - name := "cpusetinconfig-pre120" - dockerCmd(c, "run", "--name", name, "--cpuset-cpus", "0", "busybox", "true") - - status, body, err := sockRequest("GET", fmt.Sprintf("/v1.19/containers/%s/json", name), nil) - c.Assert(status, check.Equals, http.StatusOK) - c.Assert(err, check.IsNil) - - var inspectJSON map[string]interface{} - err = json.Unmarshal(body, &inspectJSON) - c.Assert(err, checker.IsNil, check.Commentf("unable to unmarshal body for version 1.19")) - - config, ok := inspectJSON["Config"] - c.Assert(ok, checker.True, check.Commentf("Unable to find 'Config'")) - cfg := config.(map[string]interface{}) - _, ok = cfg["Cpuset"] - c.Assert(ok, checker.True, check.Commentf("Api version 1.19 expected to include Cpuset in 'Config'")) -} diff --git a/integration-cli/docker_api_logs_test.go b/integration-cli/docker_api_logs_test.go deleted file mode 100644 index fb34a2636f..0000000000 --- a/integration-cli/docker_api_logs_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "fmt" - "net/http" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestLogsApiWithStdout(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 1; done") - id := strings.TrimSpace(out) - c.Assert(waitRun(id), checker.IsNil) - - type logOut struct { - out string - res *http.Response - err error - } - chLog := make(chan logOut) - - go func() { - res, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1×tamps=1", id), nil, "") - if err != nil { - chLog <- logOut{"", nil, err} - return - } - defer body.Close() - out, err := bufio.NewReader(body).ReadString('\n') - if err != nil { - chLog <- logOut{"", nil, err} - return - } - chLog <- logOut{strings.TrimSpace(out), res, err} - }() - - select { - case l := <-chLog: - c.Assert(l.err, checker.IsNil) - c.Assert(l.res.StatusCode, checker.Equals, http.StatusOK) - if !strings.HasSuffix(l.out, "hello") { - c.Fatalf("expected log output to container 'hello', but it does not") - } - case <-time.After(20 * time.Second): - c.Fatal("timeout waiting for logs to exit") - } -} - -func (s *DockerSuite) TestLogsApiNoStdoutNorStderr(c *check.C) { - name := "logs_test" - dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "/bin/sh") - - status, body, err := sockRequest("GET", fmt.Sprintf("/containers/%s/logs", name), nil) - c.Assert(status, checker.Equals, http.StatusBadRequest) - c.Assert(err, checker.IsNil) - - expected := "Bad parameters: you must choose at least one stream" - c.Assert(getErrorMessage(c, body), checker.Contains, expected) -} - -// Regression test for #12704 -func (s *DockerSuite) TestLogsApiFollowEmptyOutput(c *check.C) { - name := "logs_test" - t0 := time.Now() - dockerCmd(c, "run", "-d", "-t", "--name", name, "busybox", "sleep", "10") - - _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name), bytes.NewBuffer(nil), "") - t1 := time.Now() - c.Assert(err, checker.IsNil) - body.Close() - elapsed := t1.Sub(t0).Seconds() - if elapsed > 20.0 { - c.Fatalf("HTTP response was not immediate (elapsed %.1fs)", elapsed) - } -} - -func (s *DockerSuite) TestLogsApiContainerNotFound(c *check.C) { - name := "nonExistentContainer" - resp, _, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/logs?follow=1&stdout=1&stderr=1&tail=all", name), bytes.NewBuffer(nil), "") - c.Assert(err, checker.IsNil) - c.Assert(resp.StatusCode, checker.Equals, http.StatusNotFound) -} diff --git a/integration-cli/docker_api_network_test.go b/integration-cli/docker_api_network_test.go deleted file mode 100644 index 13d2677b88..0000000000 --- a/integration-cli/docker_api_network_test.go +++ /dev/null @@ -1,353 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "net" - "net/http" - "net/url" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/network" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestApiNetworkGetDefaults(c *check.C) { - testRequires(c, DaemonIsLinux) - // By default docker daemon creates 3 networks. check if they are present - defaults := []string{"bridge", "host", "none"} - for _, nn := range defaults { - c.Assert(isNetworkAvailable(c, nn), checker.Equals, true) - } -} - -func (s *DockerSuite) TestApiNetworkCreateDelete(c *check.C) { - testRequires(c, DaemonIsLinux) - // Create a network - name := "testnetwork" - config := types.NetworkCreateRequest{ - Name: name, - NetworkCreate: types.NetworkCreate{ - CheckDuplicate: true, - }, - } - id := createNetwork(c, config, true) - c.Assert(isNetworkAvailable(c, name), checker.Equals, true) - - // delete the network and make sure it is deleted - deleteNetwork(c, id, true) - c.Assert(isNetworkAvailable(c, name), checker.Equals, false) -} - -func (s *DockerSuite) TestApiNetworkCreateCheckDuplicate(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testcheckduplicate" - configOnCheck := types.NetworkCreateRequest{ - Name: name, - NetworkCreate: types.NetworkCreate{ - CheckDuplicate: true, - }, - } - configNotCheck := types.NetworkCreateRequest{ - Name: name, - NetworkCreate: types.NetworkCreate{ - CheckDuplicate: false, - }, - } - - // Creating a new network first - createNetwork(c, configOnCheck, true) - c.Assert(isNetworkAvailable(c, name), checker.Equals, true) - - // Creating another network with same name and CheckDuplicate must fail - createNetwork(c, configOnCheck, false) - - // Creating another network with same name and not CheckDuplicate must succeed - createNetwork(c, configNotCheck, true) -} - -func (s *DockerSuite) TestApiNetworkFilter(c *check.C) { - testRequires(c, DaemonIsLinux) - nr := getNetworkResource(c, getNetworkIDByName(c, "bridge")) - c.Assert(nr.Name, checker.Equals, "bridge") -} - -func (s *DockerSuite) TestApiNetworkInspect(c *check.C) { - testRequires(c, DaemonIsLinux) - // Inspect default bridge network - nr := getNetworkResource(c, "bridge") - c.Assert(nr.Name, checker.Equals, "bridge") - - // run a container and attach it to the default bridge network - out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") - containerID := strings.TrimSpace(out) - containerIP := findContainerIP(c, "test", "bridge") - - // inspect default bridge network again and make sure the container is connected - nr = getNetworkResource(c, nr.ID) - c.Assert(nr.Driver, checker.Equals, "bridge") - c.Assert(nr.Scope, checker.Equals, "local") - c.Assert(nr.Internal, checker.Equals, false) - c.Assert(nr.EnableIPv6, checker.Equals, false) - c.Assert(nr.IPAM.Driver, checker.Equals, "default") - c.Assert(len(nr.Containers), checker.Equals, 1) - c.Assert(nr.Containers[containerID], checker.NotNil) - - ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) - c.Assert(err, checker.IsNil) - c.Assert(ip.String(), checker.Equals, containerIP) - - // IPAM configuration inspect - ipam := network.IPAM{ - Driver: "default", - Config: []network.IPAMConfig{{Subnet: "172.28.0.0/16", IPRange: "172.28.5.0/24", Gateway: "172.28.5.254"}}, - } - config := types.NetworkCreateRequest{ - Name: "br0", - NetworkCreate: types.NetworkCreate{ - Driver: "bridge", - IPAM: ipam, - Options: map[string]string{"foo": "bar", "opts": "dopts"}, - }, - } - id0 := createNetwork(c, config, true) - c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, true) - - nr = getNetworkResource(c, id0) - c.Assert(len(nr.IPAM.Config), checker.Equals, 1) - c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16") - c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24") - c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254") - c.Assert(nr.Options["foo"], checker.Equals, "bar") - c.Assert(nr.Options["opts"], checker.Equals, "dopts") - - // delete the network and make sure it is deleted - deleteNetwork(c, id0, true) - c.Assert(isNetworkAvailable(c, "br0"), checker.Equals, false) -} - -func (s *DockerSuite) TestApiNetworkConnectDisconnect(c *check.C) { - testRequires(c, DaemonIsLinux) - // Create test network - name := "testnetwork" - config := types.NetworkCreateRequest{ - Name: name, - } - id := createNetwork(c, config, true) - nr := getNetworkResource(c, id) - c.Assert(nr.Name, checker.Equals, name) - c.Assert(nr.ID, checker.Equals, id) - c.Assert(len(nr.Containers), checker.Equals, 0) - - // run a container - out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") - containerID := strings.TrimSpace(out) - - // connect the container to the test network - connectNetwork(c, nr.ID, containerID) - - // inspect the network to make sure container is connected - nr = getNetworkResource(c, nr.ID) - c.Assert(len(nr.Containers), checker.Equals, 1) - c.Assert(nr.Containers[containerID], checker.NotNil) - - // check if container IP matches network inspect - ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) - c.Assert(err, checker.IsNil) - containerIP := findContainerIP(c, "test", "testnetwork") - c.Assert(ip.String(), checker.Equals, containerIP) - - // disconnect container from the network - disconnectNetwork(c, nr.ID, containerID) - nr = getNetworkResource(c, nr.ID) - c.Assert(nr.Name, checker.Equals, name) - c.Assert(len(nr.Containers), checker.Equals, 0) - - // delete the network - deleteNetwork(c, nr.ID, true) -} - -func (s *DockerSuite) TestApiNetworkIpamMultipleBridgeNetworks(c *check.C) { - testRequires(c, DaemonIsLinux) - // test0 bridge network - ipam0 := network.IPAM{ - Driver: "default", - Config: []network.IPAMConfig{{Subnet: "192.178.0.0/16", IPRange: "192.178.128.0/17", Gateway: "192.178.138.100"}}, - } - config0 := types.NetworkCreateRequest{ - Name: "test0", - NetworkCreate: types.NetworkCreate{ - Driver: "bridge", - IPAM: ipam0, - }, - } - id0 := createNetwork(c, config0, true) - c.Assert(isNetworkAvailable(c, "test0"), checker.Equals, true) - - ipam1 := network.IPAM{ - Driver: "default", - Config: []network.IPAMConfig{{Subnet: "192.178.128.0/17", Gateway: "192.178.128.1"}}, - } - // test1 bridge network overlaps with test0 - config1 := types.NetworkCreateRequest{ - Name: "test1", - NetworkCreate: types.NetworkCreate{ - Driver: "bridge", - IPAM: ipam1, - }, - } - createNetwork(c, config1, false) - c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, false) - - ipam2 := network.IPAM{ - Driver: "default", - Config: []network.IPAMConfig{{Subnet: "192.169.0.0/16", Gateway: "192.169.100.100"}}, - } - // test2 bridge network does not overlap - config2 := types.NetworkCreateRequest{ - Name: "test2", - NetworkCreate: types.NetworkCreate{ - Driver: "bridge", - IPAM: ipam2, - }, - } - createNetwork(c, config2, true) - c.Assert(isNetworkAvailable(c, "test2"), checker.Equals, true) - - // remove test0 and retry to create test1 - deleteNetwork(c, id0, true) - createNetwork(c, config1, true) - c.Assert(isNetworkAvailable(c, "test1"), checker.Equals, true) - - // for networks w/o ipam specified, docker will choose proper non-overlapping subnets - createNetwork(c, types.NetworkCreateRequest{Name: "test3"}, true) - c.Assert(isNetworkAvailable(c, "test3"), checker.Equals, true) - createNetwork(c, types.NetworkCreateRequest{Name: "test4"}, true) - c.Assert(isNetworkAvailable(c, "test4"), checker.Equals, true) - createNetwork(c, types.NetworkCreateRequest{Name: "test5"}, true) - c.Assert(isNetworkAvailable(c, "test5"), checker.Equals, true) - - for i := 1; i < 6; i++ { - deleteNetwork(c, fmt.Sprintf("test%d", i), true) - } -} - -func (s *DockerSuite) TestApiCreateDeletePredefinedNetworks(c *check.C) { - testRequires(c, DaemonIsLinux) - createDeletePredefinedNetwork(c, "bridge") - createDeletePredefinedNetwork(c, "none") - createDeletePredefinedNetwork(c, "host") -} - -func createDeletePredefinedNetwork(c *check.C, name string) { - // Create pre-defined network - config := types.NetworkCreateRequest{ - Name: name, - NetworkCreate: types.NetworkCreate{ - CheckDuplicate: true, - }, - } - shouldSucceed := false - createNetwork(c, config, shouldSucceed) - deleteNetwork(c, name, shouldSucceed) -} - -func isNetworkAvailable(c *check.C, name string) bool { - status, body, err := sockRequest("GET", "/networks", nil) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(err, checker.IsNil) - - nJSON := []types.NetworkResource{} - err = json.Unmarshal(body, &nJSON) - c.Assert(err, checker.IsNil) - - for _, n := range nJSON { - if n.Name == name { - return true - } - } - return false -} - -func getNetworkIDByName(c *check.C, name string) string { - var ( - v = url.Values{} - filterArgs = filters.NewArgs() - ) - filterArgs.Add("name", name) - filterJSON, err := filters.ToParam(filterArgs) - c.Assert(err, checker.IsNil) - v.Set("filters", filterJSON) - - status, body, err := sockRequest("GET", "/networks?"+v.Encode(), nil) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(err, checker.IsNil) - - nJSON := []types.NetworkResource{} - err = json.Unmarshal(body, &nJSON) - c.Assert(err, checker.IsNil) - c.Assert(len(nJSON), checker.Equals, 1) - - return nJSON[0].ID -} - -func getNetworkResource(c *check.C, id string) *types.NetworkResource { - _, obj, err := sockRequest("GET", "/networks/"+id, nil) - c.Assert(err, checker.IsNil) - - nr := types.NetworkResource{} - err = json.Unmarshal(obj, &nr) - c.Assert(err, checker.IsNil) - - return &nr -} - -func createNetwork(c *check.C, config types.NetworkCreateRequest, shouldSucceed bool) string { - status, resp, err := sockRequest("POST", "/networks/create", config) - if !shouldSucceed { - c.Assert(status, checker.Not(checker.Equals), http.StatusCreated) - return "" - } - - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - - var nr types.NetworkCreateResponse - err = json.Unmarshal(resp, &nr) - c.Assert(err, checker.IsNil) - - return nr.ID -} - -func connectNetwork(c *check.C, nid, cid string) { - config := types.NetworkConnect{ - Container: cid, - } - - status, _, err := sockRequest("POST", "/networks/"+nid+"/connect", config) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(err, checker.IsNil) -} - -func disconnectNetwork(c *check.C, nid, cid string) { - config := types.NetworkConnect{ - Container: cid, - } - - status, _, err := sockRequest("POST", "/networks/"+nid+"/disconnect", config) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(err, checker.IsNil) -} - -func deleteNetwork(c *check.C, id string, shouldSucceed bool) { - status, _, err := sockRequest("DELETE", "/networks/"+id, nil) - if !shouldSucceed { - c.Assert(status, checker.Not(checker.Equals), http.StatusOK) - return - } - c.Assert(status, checker.Equals, http.StatusNoContent) - c.Assert(err, checker.IsNil) -} diff --git a/integration-cli/docker_api_resize_test.go b/integration-cli/docker_api_resize_test.go deleted file mode 100644 index 041df6cb94..0000000000 --- a/integration-cli/docker_api_resize_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "net/http" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestResizeApiResponse(c *check.C) { - out, _ := runSleepingContainer(c, "-d") - cleanedContainerID := strings.TrimSpace(out) - - endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" - status, _, err := sockRequest("POST", endpoint, nil) - c.Assert(status, check.Equals, http.StatusOK) - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestResizeApiHeightWidthNoInt(c *check.C) { - out, _ := runSleepingContainer(c, "-d") - cleanedContainerID := strings.TrimSpace(out) - - endpoint := "/containers/" + cleanedContainerID + "/resize?h=foo&w=bar" - status, _, err := sockRequest("POST", endpoint, nil) - c.Assert(status, check.Equals, http.StatusInternalServerError) - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestResizeApiResponseWhenContainerNotStarted(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "true") - cleanedContainerID := strings.TrimSpace(out) - - // make sure the exited container is not running - dockerCmd(c, "wait", cleanedContainerID) - - endpoint := "/containers/" + cleanedContainerID + "/resize?h=40&w=40" - status, body, err := sockRequest("POST", endpoint, nil) - c.Assert(status, check.Equals, http.StatusInternalServerError) - c.Assert(err, check.IsNil) - - c.Assert(getErrorMessage(c, body), checker.Contains, "is not running", check.Commentf("resize should fail with message 'Container is not running'")) -} diff --git a/integration-cli/docker_api_service_update_test.go b/integration-cli/docker_api_service_update_test.go deleted file mode 100644 index 7fdef97121..0000000000 --- a/integration-cli/docker_api_service_update_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build !windows - -package main - -import ( - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types/swarm" - "github.com/go-check/check" -) - -func setPortConfig(portConfig []swarm.PortConfig) serviceConstructor { - return func(s *swarm.Service) { - if s.Spec.EndpointSpec == nil { - s.Spec.EndpointSpec = &swarm.EndpointSpec{} - } - s.Spec.EndpointSpec.Ports = portConfig - } -} - -func (s *DockerSwarmSuite) TestApiServiceUpdatePort(c *check.C) { - d := s.AddDaemon(c, true, true) - - // Create a service with a port mapping of 8080:8081. - portConfig := []swarm.PortConfig{{TargetPort: 8081, PublishedPort: 8080}} - serviceID := d.createService(c, simpleTestService, setInstances(1), setPortConfig(portConfig)) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) - - // Update the service: changed the port mapping from 8080:8081 to 8082:8083. - updatedPortConfig := []swarm.PortConfig{{TargetPort: 8083, PublishedPort: 8082}} - remoteService := d.getService(c, serviceID) - d.updateService(c, remoteService, setPortConfig(updatedPortConfig)) - - // Inspect the service and verify port mapping. - updatedService := d.getService(c, serviceID) - c.Assert(updatedService.Spec.EndpointSpec, check.NotNil) - c.Assert(len(updatedService.Spec.EndpointSpec.Ports), check.Equals, 1) - c.Assert(updatedService.Spec.EndpointSpec.Ports[0].TargetPort, check.Equals, uint32(8083)) - c.Assert(updatedService.Spec.EndpointSpec.Ports[0].PublishedPort, check.Equals, uint32(8082)) -} diff --git a/integration-cli/docker_api_stats_test.go b/integration-cli/docker_api_stats_test.go deleted file mode 100644 index 18fb7d1102..0000000000 --- a/integration-cli/docker_api_stats_test.go +++ /dev/null @@ -1,281 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "net/http" - "os/exec" - "runtime" - "strconv" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/versions" - "github.com/go-check/check" -) - -var expectedNetworkInterfaceStats = strings.Split("rx_bytes rx_dropped rx_errors rx_packets tx_bytes tx_dropped tx_errors tx_packets", " ") - -func (s *DockerSuite) TestApiStatsNoStreamGetCpu(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true;do echo 'Hello'; usleep 100000; done") - - id := strings.TrimSpace(out) - c.Assert(waitRun(id), checker.IsNil) - - resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") - c.Assert(err, checker.IsNil) - c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) - c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") - - var v *types.Stats - err = json.NewDecoder(body).Decode(&v) - c.Assert(err, checker.IsNil) - body.Close() - - var cpuPercent = 0.0 - cpuDelta := float64(v.CPUStats.CPUUsage.TotalUsage - v.PreCPUStats.CPUUsage.TotalUsage) - systemDelta := float64(v.CPUStats.SystemUsage - v.PreCPUStats.SystemUsage) - cpuPercent = (cpuDelta / systemDelta) * float64(len(v.CPUStats.CPUUsage.PercpuUsage)) * 100.0 - - c.Assert(cpuPercent, check.Not(checker.Equals), 0.0, check.Commentf("docker stats with no-stream get cpu usage failed: was %v", cpuPercent)) -} - -func (s *DockerSuite) TestApiStatsStoppedContainerInGoroutines(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo 1") - id := strings.TrimSpace(out) - - getGoRoutines := func() int { - _, body, err := sockRequestRaw("GET", fmt.Sprintf("/info"), nil, "") - c.Assert(err, checker.IsNil) - info := types.Info{} - err = json.NewDecoder(body).Decode(&info) - c.Assert(err, checker.IsNil) - body.Close() - return info.NGoroutines - } - - // When the HTTP connection is closed, the number of goroutines should not increase. - routines := getGoRoutines() - _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats", id), nil, "") - c.Assert(err, checker.IsNil) - body.Close() - - t := time.After(30 * time.Second) - for { - select { - case <-t: - c.Assert(getGoRoutines(), checker.LessOrEqualThan, routines) - return - default: - if n := getGoRoutines(); n <= routines { - return - } - time.Sleep(200 * time.Millisecond) - } - } -} - -func (s *DockerSuite) TestApiStatsNetworkStats(c *check.C) { - testRequires(c, SameHostDaemon) - testRequires(c, DaemonIsLinux) - - out, _ := runSleepingContainer(c) - id := strings.TrimSpace(out) - c.Assert(waitRun(id), checker.IsNil) - - // Retrieve the container address - contIP := findContainerIP(c, id, "bridge") - numPings := 4 - - var preRxPackets uint64 - var preTxPackets uint64 - var postRxPackets uint64 - var postTxPackets uint64 - - // Get the container networking stats before and after pinging the container - nwStatsPre := getNetworkStats(c, id) - for _, v := range nwStatsPre { - preRxPackets += v.RxPackets - preTxPackets += v.TxPackets - } - - countParam := "-c" - if runtime.GOOS == "windows" { - countParam = "-n" // Ping count parameter is -n on Windows - } - pingout, err := exec.Command("ping", contIP, countParam, strconv.Itoa(numPings)).CombinedOutput() - if err != nil && runtime.GOOS == "linux" { - // If it fails then try a work-around, but just for linux. - // If this fails too then go back to the old error for reporting. - // - // The ping will sometimes fail due to an apparmor issue where it - // denies access to the libc.so.6 shared library - running it - // via /lib64/ld-linux-x86-64.so.2 seems to work around it. - pingout2, err2 := exec.Command("/lib64/ld-linux-x86-64.so.2", "/bin/ping", contIP, "-c", strconv.Itoa(numPings)).CombinedOutput() - if err2 == nil { - pingout = pingout2 - err = err2 - } - } - c.Assert(err, checker.IsNil) - pingouts := string(pingout[:]) - nwStatsPost := getNetworkStats(c, id) - for _, v := range nwStatsPost { - postRxPackets += v.RxPackets - postTxPackets += v.TxPackets - } - - // Verify the stats contain at least the expected number of packets (account for ARP) - expRxPkts := 1 + preRxPackets + uint64(numPings) - expTxPkts := 1 + preTxPackets + uint64(numPings) - c.Assert(postTxPackets, checker.GreaterOrEqualThan, expTxPkts, - check.Commentf("Reported less TxPackets than expected. Expected >= %d. Found %d. %s", expTxPkts, postTxPackets, pingouts)) - c.Assert(postRxPackets, checker.GreaterOrEqualThan, expRxPkts, - check.Commentf("Reported less Txbytes than expected. Expected >= %d. Found %d. %s", expRxPkts, postRxPackets, pingouts)) -} - -func (s *DockerSuite) TestApiStatsNetworkStatsVersioning(c *check.C) { - testRequires(c, SameHostDaemon) - testRequires(c, DaemonIsLinux) - - out, _ := runSleepingContainer(c) - id := strings.TrimSpace(out) - c.Assert(waitRun(id), checker.IsNil) - - for i := 17; i <= 21; i++ { - apiVersion := fmt.Sprintf("v1.%d", i) - statsJSONBlob := getVersionedStats(c, id, apiVersion) - if versions.LessThan(apiVersion, "v1.21") { - c.Assert(jsonBlobHasLTv121NetworkStats(statsJSONBlob), checker.Equals, true, - check.Commentf("Stats JSON blob from API %s %#v does not look like a =v1.21 API stats structure", apiVersion, statsJSONBlob)) - } - } -} - -func getNetworkStats(c *check.C, id string) map[string]types.NetworkStats { - var st *types.StatsJSON - - _, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") - c.Assert(err, checker.IsNil) - - err = json.NewDecoder(body).Decode(&st) - c.Assert(err, checker.IsNil) - body.Close() - - return st.Networks -} - -// getVersionedStats returns stats result for the -// container with id using an API call with version apiVersion. Since the -// stats result type differs between API versions, we simply return -// map[string]interface{}. -func getVersionedStats(c *check.C, id string, apiVersion string) map[string]interface{} { - stats := make(map[string]interface{}) - - _, body, err := sockRequestRaw("GET", fmt.Sprintf("/%s/containers/%s/stats?stream=false", apiVersion, id), nil, "") - c.Assert(err, checker.IsNil) - defer body.Close() - - err = json.NewDecoder(body).Decode(&stats) - c.Assert(err, checker.IsNil, check.Commentf("failed to decode stat: %s", err)) - - return stats -} - -func jsonBlobHasLTv121NetworkStats(blob map[string]interface{}) bool { - networkStatsIntfc, ok := blob["network"] - if !ok { - return false - } - networkStats, ok := networkStatsIntfc.(map[string]interface{}) - if !ok { - return false - } - for _, expectedKey := range expectedNetworkInterfaceStats { - if _, ok := networkStats[expectedKey]; !ok { - return false - } - } - return true -} - -func jsonBlobHasGTE121NetworkStats(blob map[string]interface{}) bool { - networksStatsIntfc, ok := blob["networks"] - if !ok { - return false - } - networksStats, ok := networksStatsIntfc.(map[string]interface{}) - if !ok { - return false - } - for _, networkInterfaceStatsIntfc := range networksStats { - networkInterfaceStats, ok := networkInterfaceStatsIntfc.(map[string]interface{}) - if !ok { - return false - } - for _, expectedKey := range expectedNetworkInterfaceStats { - if _, ok := networkInterfaceStats[expectedKey]; !ok { - return false - } - } - } - return true -} - -func (s *DockerSuite) TestApiStatsContainerNotFound(c *check.C) { - testRequires(c, DaemonIsLinux) - - status, _, err := sockRequest("GET", "/containers/nonexistent/stats", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNotFound) - - status, _, err = sockRequest("GET", "/containers/nonexistent/stats?stream=0", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNotFound) -} - -func (s *DockerSuite) TestApiStatsNoStreamConnectedContainers(c *check.C) { - testRequires(c, DaemonIsLinux) - - out1, _ := runSleepingContainer(c) - id1 := strings.TrimSpace(out1) - c.Assert(waitRun(id1), checker.IsNil) - - out2, _ := runSleepingContainer(c, "--net", "container:"+id1) - id2 := strings.TrimSpace(out2) - c.Assert(waitRun(id2), checker.IsNil) - - ch := make(chan error) - go func() { - resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id2), nil, "") - defer body.Close() - if err != nil { - ch <- err - } - if resp.StatusCode != http.StatusOK { - ch <- fmt.Errorf("Invalid StatusCode %v", resp.StatusCode) - } - if resp.Header.Get("Content-Type") != "application/json" { - ch <- fmt.Errorf("Invalid 'Content-Type' %v", resp.Header.Get("Content-Type")) - } - var v *types.Stats - if err := json.NewDecoder(body).Decode(&v); err != nil { - ch <- err - } - ch <- nil - }() - - select { - case err := <-ch: - c.Assert(err, checker.IsNil, check.Commentf("Error in stats remote API: %v", err)) - case <-time.After(15 * time.Second): - c.Fatalf("Stats did not return after timeout") - } -} diff --git a/integration-cli/docker_api_stats_unix_test.go b/integration-cli/docker_api_stats_unix_test.go deleted file mode 100644 index 5409a0dbca..0000000000 --- a/integration-cli/docker_api_stats_unix_test.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build !windows - -package main - -import ( - "encoding/json" - "fmt" - "net/http" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestApiStatsContainerGetMemoryLimit(c *check.C) { - testRequires(c, DaemonIsLinux, memoryLimitSupport) - - resp, body, err := sockRequestRaw("GET", "/info", nil, "application/json") - c.Assert(err, checker.IsNil) - c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) - var info types.Info - err = json.NewDecoder(body).Decode(&info) - c.Assert(err, checker.IsNil) - body.Close() - - // don't set a memory limit, the memory limit should be system memory - conName := "foo" - dockerCmd(c, "run", "-d", "--name", conName, "busybox", "top") - c.Assert(waitRun(conName), checker.IsNil) - - resp, body, err = sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", conName), nil, "") - c.Assert(err, checker.IsNil) - c.Assert(resp.StatusCode, checker.Equals, http.StatusOK) - c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") - - var v *types.Stats - err = json.NewDecoder(body).Decode(&v) - c.Assert(err, checker.IsNil) - body.Close() - c.Assert(fmt.Sprintf("%d", v.MemoryStats.Limit), checker.Equals, fmt.Sprintf("%d", info.MemTotal)) -} diff --git a/integration-cli/docker_api_swarm_test.go b/integration-cli/docker_api_swarm_test.go deleted file mode 100644 index ec04f2bd71..0000000000 --- a/integration-cli/docker_api_swarm_test.go +++ /dev/null @@ -1,938 +0,0 @@ -// +build !windows - -package main - -import ( - "fmt" - "net/http" - "os" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types/swarm" - "github.com/go-check/check" -) - -var defaultReconciliationTimeout = 30 * time.Second - -func (s *DockerSwarmSuite) TestApiSwarmInit(c *check.C) { - testRequires(c, Network) - // todo: should find a better way to verify that components are running than /info - d1 := s.AddDaemon(c, true, true) - info, err := d1.info() - c.Assert(err, checker.IsNil) - c.Assert(info.ControlAvailable, checker.True) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - - d2 := s.AddDaemon(c, true, false) - info, err = d2.info() - c.Assert(err, checker.IsNil) - c.Assert(info.ControlAvailable, checker.False) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - - // Leaving cluster - c.Assert(d2.Leave(false), checker.IsNil) - - info, err = d2.info() - c.Assert(err, checker.IsNil) - c.Assert(info.ControlAvailable, checker.False) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - - c.Assert(d2.Join(swarm.JoinRequest{JoinToken: d1.joinTokens(c).Worker, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) - - info, err = d2.info() - c.Assert(err, checker.IsNil) - c.Assert(info.ControlAvailable, checker.False) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - - // Current state restoring after restarts - err = d1.Stop() - c.Assert(err, checker.IsNil) - err = d2.Stop() - c.Assert(err, checker.IsNil) - - err = d1.Start() - c.Assert(err, checker.IsNil) - err = d2.Start() - c.Assert(err, checker.IsNil) - - info, err = d1.info() - c.Assert(err, checker.IsNil) - c.Assert(info.ControlAvailable, checker.True) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - - info, err = d2.info() - c.Assert(err, checker.IsNil) - c.Assert(info.ControlAvailable, checker.False) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) -} - -func (s *DockerSwarmSuite) TestApiSwarmJoinToken(c *check.C) { - testRequires(c, Network) - d1 := s.AddDaemon(c, false, false) - c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) - - d2 := s.AddDaemon(c, false, false) - err := d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}) - c.Assert(err, checker.NotNil) - c.Assert(err.Error(), checker.Contains, "join token is necessary") - info, err := d2.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - - err = d2.Join(swarm.JoinRequest{JoinToken: "foobaz", RemoteAddrs: []string{d1.listenAddr}}) - c.Assert(err, checker.NotNil) - c.Assert(err.Error(), checker.Contains, "join token is necessary") - info, err = d2.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - - workerToken := d1.joinTokens(c).Worker - - c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) - info, err = d2.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - c.Assert(d2.Leave(false), checker.IsNil) - info, err = d2.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - - // change tokens - d1.rotateTokens(c) - - err = d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}) - c.Assert(err, checker.NotNil) - c.Assert(err.Error(), checker.Contains, "join token is necessary") - info, err = d2.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - - workerToken = d1.joinTokens(c).Worker - - c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) - info, err = d2.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - c.Assert(d2.Leave(false), checker.IsNil) - info, err = d2.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - - // change spec, don't change tokens - d1.updateSwarm(c, func(s *swarm.Spec) {}) - - err = d2.Join(swarm.JoinRequest{RemoteAddrs: []string{d1.listenAddr}}) - c.Assert(err, checker.NotNil) - c.Assert(err.Error(), checker.Contains, "join token is necessary") - info, err = d2.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - - c.Assert(d2.Join(swarm.JoinRequest{JoinToken: workerToken, RemoteAddrs: []string{d1.listenAddr}}), checker.IsNil) - info, err = d2.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - c.Assert(d2.Leave(false), checker.IsNil) - info, err = d2.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) -} - -func (s *DockerSwarmSuite) TestApiSwarmCAHash(c *check.C) { - testRequires(c, Network) - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, false, false) - splitToken := strings.Split(d1.joinTokens(c).Worker, "-") - splitToken[2] = "1kxftv4ofnc6mt30lmgipg6ngf9luhwqopfk1tz6bdmnkubg0e" - replacementToken := strings.Join(splitToken, "-") - err := d2.Join(swarm.JoinRequest{JoinToken: replacementToken, RemoteAddrs: []string{d1.listenAddr}}) - c.Assert(err, checker.NotNil) - c.Assert(err.Error(), checker.Contains, "remote CA does not match fingerprint") -} - -func (s *DockerSwarmSuite) TestApiSwarmPromoteDemote(c *check.C) { - testRequires(c, Network) - d1 := s.AddDaemon(c, false, false) - c.Assert(d1.Init(swarm.InitRequest{}), checker.IsNil) - d2 := s.AddDaemon(c, true, false) - - info, err := d2.info() - c.Assert(err, checker.IsNil) - c.Assert(info.ControlAvailable, checker.False) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { - n.Spec.Role = swarm.NodeRoleManager - }) - - waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.True) - - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { - n.Spec.Role = swarm.NodeRoleWorker - }) - - waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.False) - - // Demoting last node should fail - node := d1.getNode(c, d1.NodeID) - node.Spec.Role = swarm.NodeRoleWorker - url := fmt.Sprintf("/nodes/%s/update?version=%d", node.ID, node.Version.Index) - status, out, err := d1.SockRequest("POST", url, node.Spec) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("output: %q", string(out))) - c.Assert(string(out), checker.Contains, "last manager of the swarm") - info, err = d1.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - c.Assert(info.ControlAvailable, checker.True) - - // Promote already demoted node - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { - n.Spec.Role = swarm.NodeRoleManager - }) - - waitAndAssert(c, defaultReconciliationTimeout, d2.checkControlAvailable, checker.True) -} - -func (s *DockerSwarmSuite) TestApiSwarmServicesEmptyList(c *check.C) { - testRequires(c, Network) - d := s.AddDaemon(c, true, true) - - services := d.listServices(c) - c.Assert(services, checker.NotNil) - c.Assert(len(services), checker.Equals, 0, check.Commentf("services: %#v", services)) -} - -func (s *DockerSwarmSuite) TestApiSwarmServicesCreate(c *check.C) { - testRequires(c, Network) - d := s.AddDaemon(c, true, true) - - instances := 2 - id := d.createService(c, simpleTestService, setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) - - service := d.getService(c, id) - instances = 5 - d.updateService(c, service, setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) - - d.removeService(c, service.ID) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 0) -} - -func (s *DockerSwarmSuite) TestApiSwarmServicesMultipleAgents(c *check.C) { - testRequires(c, Network) - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, true, false) - d3 := s.AddDaemon(c, true, false) - - time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks - - instances := 9 - id := d1.createService(c, simpleTestService, setInstances(instances)) - - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.GreaterThan, 0) - - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) - - // reconciliation on d2 node down - c.Assert(d2.Stop(), checker.IsNil) - - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) - - // test downscaling - instances = 5 - d1.updateService(c, d1.getService(c, id), setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) - -} - -func (s *DockerSwarmSuite) TestApiSwarmServicesCreateGlobal(c *check.C) { - testRequires(c, Network) - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, true, false) - d3 := s.AddDaemon(c, true, false) - - d1.createService(c, simpleTestService, setGlobalMode) - - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, 1) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) - waitAndAssert(c, defaultReconciliationTimeout, d3.checkActiveContainerCount, checker.Equals, 1) - - d4 := s.AddDaemon(c, true, false) - d5 := s.AddDaemon(c, true, false) - - waitAndAssert(c, defaultReconciliationTimeout, d4.checkActiveContainerCount, checker.Equals, 1) - waitAndAssert(c, defaultReconciliationTimeout, d5.checkActiveContainerCount, checker.Equals, 1) -} - -func (s *DockerSwarmSuite) TestApiSwarmServicesUpdate(c *check.C) { - const nodeCount = 3 - var daemons [nodeCount]*SwarmDaemon - for i := 0; i < nodeCount; i++ { - daemons[i] = s.AddDaemon(c, true, i == 0) - } - // wait for nodes ready - waitAndAssert(c, 5*time.Second, daemons[0].checkNodeReadyCount, checker.Equals, nodeCount) - - // service image at start - image1 := "busybox:latest" - // target image in update - image2 := "busybox:test" - - // create a different tag - for _, d := range daemons { - out, err := d.Cmd("tag", image1, image2) - c.Assert(err, checker.IsNil, check.Commentf(out)) - } - - // create service - instances := 5 - parallelism := 2 - id := daemons[0].createService(c, serviceForUpdate, setInstances(instances)) - - // wait for tasks ready - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, - map[string]int{image1: instances}) - - // issue service update - service := daemons[0].getService(c, id) - daemons[0].updateService(c, service, setImage(image2)) - - // first batch - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, - map[string]int{image1: instances - parallelism, image2: parallelism}) - - // 2nd batch - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, - map[string]int{image1: instances - 2*parallelism, image2: 2 * parallelism}) - - // 3nd batch - waitAndAssert(c, defaultReconciliationTimeout, daemons[0].checkRunningTaskImages, checker.DeepEquals, - map[string]int{image2: instances}) -} - -func (s *DockerSwarmSuite) TestApiSwarmServicesStateReporting(c *check.C) { - testRequires(c, Network) - testRequires(c, SameHostDaemon) - testRequires(c, DaemonIsLinux) - - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, true, true) - d3 := s.AddDaemon(c, true, false) - - time.Sleep(1 * time.Second) // make sure all daemons are ready to accept - - instances := 9 - d1.createService(c, simpleTestService, setInstances(instances)) - - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) - - getContainers := func() map[string]*SwarmDaemon { - m := make(map[string]*SwarmDaemon) - for _, d := range []*SwarmDaemon{d1, d2, d3} { - for _, id := range d.activeContainers() { - m[id] = d - } - } - return m - } - - containers := getContainers() - c.Assert(containers, checker.HasLen, instances) - var toRemove string - for i := range containers { - toRemove = i - } - - _, err := containers[toRemove].Cmd("stop", toRemove) - c.Assert(err, checker.IsNil) - - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) - - containers2 := getContainers() - c.Assert(containers2, checker.HasLen, instances) - for i := range containers { - if i == toRemove { - c.Assert(containers2[i], checker.IsNil) - } else { - c.Assert(containers2[i], checker.NotNil) - } - } - - containers = containers2 - for i := range containers { - toRemove = i - } - - // try with killing process outside of docker - pidStr, err := containers[toRemove].Cmd("inspect", "-f", "{{.State.Pid}}", toRemove) - c.Assert(err, checker.IsNil) - pid, err := strconv.Atoi(strings.TrimSpace(pidStr)) - c.Assert(err, checker.IsNil) - c.Assert(syscall.Kill(pid, syscall.SIGKILL), checker.IsNil) - - time.Sleep(time.Second) // give some time to handle the signal - - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) - - containers2 = getContainers() - c.Assert(containers2, checker.HasLen, instances) - for i := range containers { - if i == toRemove { - c.Assert(containers2[i], checker.IsNil) - } else { - c.Assert(containers2[i], checker.NotNil) - } - } -} - -func (s *DockerSwarmSuite) TestApiSwarmLeaderElection(c *check.C) { - // Create 3 nodes - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, true, true) - d3 := s.AddDaemon(c, true, true) - - // assert that the first node we made is the leader, and the other two are followers - c.Assert(d1.getNode(c, d1.NodeID).ManagerStatus.Leader, checker.True) - c.Assert(d1.getNode(c, d2.NodeID).ManagerStatus.Leader, checker.False) - c.Assert(d1.getNode(c, d3.NodeID).ManagerStatus.Leader, checker.False) - - leader := d1 - - // stop the leader - leader.Stop() - - // wait for an election to occur - var newleader *SwarmDaemon - - for _, d := range []*SwarmDaemon{d2, d3} { - if d.getNode(c, d.NodeID).ManagerStatus.Leader { - newleader = d - break - } - } - - // assert that we have a new leader - c.Assert(newleader, checker.NotNil) - - // add the old leader back - leader.Start() - - // clear leader and reinit the followers list - followers := make([]*SwarmDaemon, 0, 3) - - // pick out the leader and the followers again - for _, d := range []*SwarmDaemon{d1, d2, d3} { - if d1.getNode(c, d.NodeID).ManagerStatus.Leader { - leader = d - } else { - followers = append(followers, d) - } - } - - // verify that we still only have 1 leader and 2 followers - c.Assert(leader, checker.NotNil) - c.Assert(followers, checker.HasLen, 2) - // and that after we added d1 back, the leader hasn't changed - c.Assert(leader.NodeID, checker.Equals, newleader.NodeID) -} - -func (s *DockerSwarmSuite) TestApiSwarmRaftQuorum(c *check.C) { - testRequires(c, Network) - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, true, true) - d3 := s.AddDaemon(c, true, true) - - d1.createService(c, simpleTestService) - - c.Assert(d2.Stop(), checker.IsNil) - - d1.createService(c, simpleTestService, func(s *swarm.Service) { - s.Spec.Name = "top1" - }) - - c.Assert(d3.Stop(), checker.IsNil) - - var service swarm.Service - simpleTestService(&service) - service.Spec.Name = "top2" - status, out, err := d1.SockRequest("POST", "/services/create", service.Spec) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError, check.Commentf("deadline exceeded", string(out))) - - c.Assert(d2.Start(), checker.IsNil) - - d1.createService(c, simpleTestService, func(s *swarm.Service) { - s.Spec.Name = "top3" - }) -} - -func (s *DockerSwarmSuite) TestApiSwarmListNodes(c *check.C) { - testRequires(c, Network) - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, true, false) - d3 := s.AddDaemon(c, true, false) - - nodes := d1.listNodes(c) - c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) - -loop0: - for _, n := range nodes { - for _, d := range []*SwarmDaemon{d1, d2, d3} { - if n.ID == d.NodeID { - continue loop0 - } - } - c.Errorf("unknown nodeID %v", n.ID) - } -} - -func (s *DockerSwarmSuite) TestApiSwarmNodeUpdate(c *check.C) { - testRequires(c, Network) - d := s.AddDaemon(c, true, true) - - nodes := d.listNodes(c) - - d.updateNode(c, nodes[0].ID, func(n *swarm.Node) { - n.Spec.Availability = swarm.NodeAvailabilityPause - }) - - n := d.getNode(c, nodes[0].ID) - c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityPause) -} - -func (s *DockerSwarmSuite) TestApiSwarmNodeRemove(c *check.C) { - testRequires(c, Network) - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, true, false) - _ = s.AddDaemon(c, true, false) - - nodes := d1.listNodes(c) - c.Assert(len(nodes), checker.Equals, 3, check.Commentf("nodes: %#v", nodes)) - - // Getting the info so we can take the NodeID - d2Info, err := d2.info() - c.Assert(err, checker.IsNil) - - // forceful removal of d2 should work - d1.removeNode(c, d2Info.NodeID, true) - - nodes = d1.listNodes(c) - c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) - - // Restart the node that was removed - err = d2.Restart() - c.Assert(err, checker.IsNil) - - // Give some time for the node to rejoin - time.Sleep(1 * time.Second) - - // Make sure the node didn't rejoin - nodes = d1.listNodes(c) - c.Assert(len(nodes), checker.Equals, 2, check.Commentf("nodes: %#v", nodes)) -} - -func (s *DockerSwarmSuite) TestApiSwarmNodeDrainPause(c *check.C) { - testRequires(c, Network) - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, true, false) - - time.Sleep(1 * time.Second) // make sure all daemons are ready to accept tasks - - // start a service, expect balanced distribution - instances := 8 - id := d1.createService(c, simpleTestService, setInstances(instances)) - - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) - - // drain d2, all containers should move to d1 - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { - n.Spec.Availability = swarm.NodeAvailabilityDrain - }) - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0) - - // set d2 back to active - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { - n.Spec.Availability = swarm.NodeAvailabilityActive - }) - - instances = 1 - d1.updateService(c, d1.getService(c, id), setInstances(instances)) - - waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) - - instances = 8 - d1.updateService(c, d1.getService(c, id), setInstances(instances)) - - // drained node first so we don't get any old containers - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.GreaterThan, 0) - waitAndAssert(c, defaultReconciliationTimeout*2, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) - - d2ContainerCount := len(d2.activeContainers()) - - // set d2 to paused, scale service up, only d1 gets new tasks - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { - n.Spec.Availability = swarm.NodeAvailabilityPause - }) - - instances = 14 - d1.updateService(c, d1.getService(c, id), setInstances(instances)) - - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances-d2ContainerCount) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, d2ContainerCount) - -} - -func (s *DockerSwarmSuite) TestApiSwarmLeaveRemovesContainer(c *check.C) { - testRequires(c, Network) - d := s.AddDaemon(c, true, true) - - instances := 2 - d.createService(c, simpleTestService, setInstances(instances)) - - id, err := d.Cmd("run", "-d", "busybox", "top") - c.Assert(err, checker.IsNil) - id = strings.TrimSpace(id) - - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances+1) - - c.Assert(d.Leave(false), checker.NotNil) - c.Assert(d.Leave(true), checker.IsNil) - - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) - - id2, err := d.Cmd("ps", "-q") - c.Assert(err, checker.IsNil) - c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2)) -} - -// #23629 -func (s *DockerSwarmSuite) TestApiSwarmLeaveOnPendingJoin(c *check.C) { - s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, false, false) - - id, err := d2.Cmd("run", "-d", "busybox", "top") - c.Assert(err, checker.IsNil) - id = strings.TrimSpace(id) - - go d2.Join(swarm.JoinRequest{ - RemoteAddrs: []string{"nosuchhost:1234"}, - }) - - waitAndAssert(c, defaultReconciliationTimeout, d2.checkLocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 1) - - id2, err := d2.Cmd("ps", "-q") - c.Assert(err, checker.IsNil) - c.Assert(id, checker.HasPrefix, strings.TrimSpace(id2)) -} - -// #23705 -func (s *DockerSwarmSuite) TestApiSwarmRestoreOnPendingJoin(c *check.C) { - d := s.AddDaemon(c, false, false) - go d.Join(swarm.JoinRequest{ - RemoteAddrs: []string{"nosuchhost:1234"}, - }) - - waitAndAssert(c, defaultReconciliationTimeout, d.checkLocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) - - c.Assert(d.Stop(), checker.IsNil) - c.Assert(d.Start(), checker.IsNil) - - info, err := d.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateInactive) -} - -func (s *DockerSwarmSuite) TestApiSwarmManagerRestore(c *check.C) { - testRequires(c, Network) - d1 := s.AddDaemon(c, true, true) - - instances := 2 - id := d1.createService(c, simpleTestService, setInstances(instances)) - - d1.getService(c, id) - d1.Stop() - d1.Start() - d1.getService(c, id) - - d2 := s.AddDaemon(c, true, true) - d2.getService(c, id) - d2.Stop() - d2.Start() - d2.getService(c, id) - - d3 := s.AddDaemon(c, true, true) - d3.getService(c, id) - d3.Stop() - d3.Start() - d3.getService(c, id) - - d3.Kill() - time.Sleep(1 * time.Second) // time to handle signal - d3.Start() - d3.getService(c, id) -} - -func (s *DockerSwarmSuite) TestApiSwarmScaleNoRollingUpdate(c *check.C) { - testRequires(c, Network) - d := s.AddDaemon(c, true, true) - - instances := 2 - id := d.createService(c, simpleTestService, setInstances(instances)) - - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) - containers := d.activeContainers() - instances = 4 - d.updateService(c, d.getService(c, id), setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, instances) - containers2 := d.activeContainers() - -loop0: - for _, c1 := range containers { - for _, c2 := range containers2 { - if c1 == c2 { - continue loop0 - } - } - c.Errorf("container %v not found in new set %#v", c1, containers2) - } -} - -func (s *DockerSwarmSuite) TestApiSwarmInvalidAddress(c *check.C) { - d := s.AddDaemon(c, false, false) - req := swarm.InitRequest{ - ListenAddr: "", - } - status, _, err := d.SockRequest("POST", "/swarm/init", req) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) - - req2 := swarm.JoinRequest{ - ListenAddr: "0.0.0.0:2377", - RemoteAddrs: []string{""}, - } - status, _, err = d.SockRequest("POST", "/swarm/join", req2) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) -} - -func (s *DockerSwarmSuite) TestApiSwarmForceNewCluster(c *check.C) { - d1 := s.AddDaemon(c, true, true) - d2 := s.AddDaemon(c, true, true) - - instances := 2 - id := d1.createService(c, simpleTestService, setInstances(instances)) - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d2.checkActiveContainerCount), checker.Equals, instances) - - // drain d2, all containers should move to d1 - d1.updateNode(c, d2.NodeID, func(n *swarm.Node) { - n.Spec.Availability = swarm.NodeAvailabilityDrain - }) - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) - waitAndAssert(c, defaultReconciliationTimeout, d2.checkActiveContainerCount, checker.Equals, 0) - - c.Assert(d2.Stop(), checker.IsNil) - - c.Assert(d1.Init(swarm.InitRequest{ - ForceNewCluster: true, - Spec: swarm.Spec{}, - }), checker.IsNil) - - waitAndAssert(c, defaultReconciliationTimeout, d1.checkActiveContainerCount, checker.Equals, instances) - - d3 := s.AddDaemon(c, true, true) - info, err := d3.info() - c.Assert(err, checker.IsNil) - c.Assert(info.ControlAvailable, checker.True) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - - instances = 4 - d3.updateService(c, d3.getService(c, id), setInstances(instances)) - - waitAndAssert(c, defaultReconciliationTimeout, reducedCheck(sumAsIntegers, d1.checkActiveContainerCount, d3.checkActiveContainerCount), checker.Equals, instances) -} - -func simpleTestService(s *swarm.Service) { - var ureplicas uint64 - ureplicas = 1 - s.Spec = swarm.ServiceSpec{ - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ - Image: "busybox:latest", - Command: []string{"/bin/top"}, - }, - }, - Mode: swarm.ServiceMode{ - Replicated: &swarm.ReplicatedService{ - Replicas: &ureplicas, - }, - }, - } - s.Spec.Name = "top" -} - -func serviceForUpdate(s *swarm.Service) { - var ureplicas uint64 - ureplicas = 1 - s.Spec = swarm.ServiceSpec{ - TaskTemplate: swarm.TaskSpec{ - ContainerSpec: swarm.ContainerSpec{ - Image: "busybox:latest", - Command: []string{"/bin/top"}, - }, - }, - Mode: swarm.ServiceMode{ - Replicated: &swarm.ReplicatedService{ - Replicas: &ureplicas, - }, - }, - UpdateConfig: &swarm.UpdateConfig{ - Parallelism: 2, - Delay: 8 * time.Second, - FailureAction: swarm.UpdateFailureActionContinue, - }, - } - s.Spec.Name = "updatetest" -} - -func setInstances(replicas int) serviceConstructor { - ureplicas := uint64(replicas) - return func(s *swarm.Service) { - s.Spec.Mode = swarm.ServiceMode{ - Replicated: &swarm.ReplicatedService{ - Replicas: &ureplicas, - }, - } - } -} - -func setImage(image string) serviceConstructor { - return func(s *swarm.Service) { - s.Spec.TaskTemplate.ContainerSpec.Image = image - } -} - -func setGlobalMode(s *swarm.Service) { - s.Spec.Mode = swarm.ServiceMode{ - Global: &swarm.GlobalService{}, - } -} - -func checkClusterHealth(c *check.C, cl []*SwarmDaemon, managerCount, workerCount int) { - var totalMCount, totalWCount int - for _, d := range cl { - info, err := d.info() - c.Assert(err, check.IsNil) - if !info.ControlAvailable { - totalWCount++ - continue - } - var leaderFound bool - totalMCount++ - var mCount, wCount int - for _, n := range d.listNodes(c) { - c.Assert(n.Status.State, checker.Equals, swarm.NodeStateReady, check.Commentf("state of node %s, reported by %s", n.ID, d.Info.NodeID)) - c.Assert(n.Spec.Availability, checker.Equals, swarm.NodeAvailabilityActive, check.Commentf("availability of node %s, reported by %s", n.ID, d.Info.NodeID)) - if n.Spec.Role == swarm.NodeRoleManager { - c.Assert(n.ManagerStatus, checker.NotNil, check.Commentf("manager status of node %s (manager), reported by %s", n.ID, d.Info.NodeID)) - if n.ManagerStatus.Leader { - leaderFound = true - } - mCount++ - } else { - c.Assert(n.ManagerStatus, checker.IsNil, check.Commentf("manager status of node %s (worker), reported by %s", n.ID, d.Info.NodeID)) - wCount++ - } - } - c.Assert(leaderFound, checker.True, check.Commentf("lack of leader reported by node %s", info.NodeID)) - c.Assert(mCount, checker.Equals, managerCount, check.Commentf("managers count reported by node %s", info.NodeID)) - c.Assert(wCount, checker.Equals, workerCount, check.Commentf("workers count reported by node %s", info.NodeID)) - } - c.Assert(totalMCount, checker.Equals, managerCount) - c.Assert(totalWCount, checker.Equals, workerCount) -} - -func (s *DockerSwarmSuite) TestApiSwarmRestartCluster(c *check.C) { - mCount, wCount := 5, 1 - - var nodes []*SwarmDaemon - for i := 0; i < mCount; i++ { - manager := s.AddDaemon(c, true, true) - info, err := manager.info() - c.Assert(err, checker.IsNil) - c.Assert(info.ControlAvailable, checker.True) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - nodes = append(nodes, manager) - } - - for i := 0; i < wCount; i++ { - worker := s.AddDaemon(c, true, false) - info, err := worker.info() - c.Assert(err, checker.IsNil) - c.Assert(info.ControlAvailable, checker.False) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - nodes = append(nodes, worker) - } - - // stop whole cluster - { - var wg sync.WaitGroup - wg.Add(len(nodes)) - errs := make(chan error, len(nodes)) - - for _, d := range nodes { - go func(daemon *SwarmDaemon) { - defer wg.Done() - if err := daemon.Stop(); err != nil { - errs <- err - } - if root := os.Getenv("DOCKER_REMAP_ROOT"); root != "" { - daemon.root = filepath.Dir(daemon.root) - } - }(d) - } - wg.Wait() - close(errs) - for err := range errs { - c.Assert(err, check.IsNil) - } - } - - // start whole cluster - { - var wg sync.WaitGroup - wg.Add(len(nodes)) - errs := make(chan error, len(nodes)) - - for _, d := range nodes { - go func(daemon *SwarmDaemon) { - defer wg.Done() - if err := daemon.Start("--iptables=false"); err != nil { - errs <- err - } - }(d) - } - wg.Wait() - close(errs) - for err := range errs { - c.Assert(err, check.IsNil) - } - } - - checkClusterHealth(c, nodes, mCount, wCount) -} diff --git a/integration-cli/docker_api_test.go b/integration-cli/docker_api_test.go deleted file mode 100644 index d09b8f193b..0000000000 --- a/integration-cli/docker_api_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package main - -import ( - "fmt" - "net/http" - "net/http/httptest" - "net/http/httputil" - "os/exec" - "strconv" - "strings" - "time" - - "github.com/docker/docker/api" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestApiOptionsRoute(c *check.C) { - status, _, err := sockRequest("OPTIONS", "/", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) -} - -func (s *DockerSuite) TestApiGetEnabledCors(c *check.C) { - res, body, err := sockRequestRaw("GET", "/version", nil, "") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusOK) - body.Close() - // TODO: @runcom incomplete tests, why old integration tests had this headers - // and here none of the headers below are in the response? - //c.Log(res.Header) - //c.Assert(res.Header.Get("Access-Control-Allow-Origin"), check.Equals, "*") - //c.Assert(res.Header.Get("Access-Control-Allow-Headers"), check.Equals, "Origin, X-Requested-With, Content-Type, Accept, X-Registry-Auth") -} - -func (s *DockerSuite) TestApiVersionStatusCode(c *check.C) { - conn, err := sockConn(time.Duration(10*time.Second), "") - c.Assert(err, checker.IsNil) - - client := httputil.NewClientConn(conn, nil) - defer client.Close() - - req, err := http.NewRequest("GET", "/v999.0/version", nil) - c.Assert(err, checker.IsNil) - req.Header.Set("User-Agent", "Docker-Client/999.0 (os)") - - res, err := client.Do(req) - c.Assert(res.StatusCode, checker.Equals, http.StatusBadRequest) -} - -func (s *DockerSuite) TestApiClientVersionNewerThanServer(c *check.C) { - v := strings.Split(api.DefaultVersion, ".") - vMinInt, err := strconv.Atoi(v[1]) - c.Assert(err, checker.IsNil) - vMinInt++ - v[1] = strconv.Itoa(vMinInt) - version := strings.Join(v, ".") - - status, body, err := sockRequest("GET", "/v"+version+"/version", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusBadRequest) - expected := fmt.Sprintf("client is newer than server (client API version: %s, server API version: %s)", version, api.DefaultVersion) - c.Assert(getErrorMessage(c, body), checker.Equals, expected) -} - -func (s *DockerSuite) TestApiClientVersionOldNotSupported(c *check.C) { - v := strings.Split(api.MinVersion, ".") - vMinInt, err := strconv.Atoi(v[1]) - c.Assert(err, checker.IsNil) - vMinInt-- - v[1] = strconv.Itoa(vMinInt) - version := strings.Join(v, ".") - - status, body, err := sockRequest("GET", "/v"+version+"/version", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusBadRequest) - expected := fmt.Sprintf("client version %s is too old. Minimum supported API version is %s, please upgrade your client to a newer version", version, api.MinVersion) - c.Assert(strings.TrimSpace(string(body)), checker.Equals, expected) -} - -func (s *DockerSuite) TestApiDockerApiVersion(c *check.C) { - var svrVersion string - - server := httptest.NewServer(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - url := r.URL.Path - svrVersion = url - })) - defer server.Close() - - // Test using the env var first - cmd := exec.Command(dockerBinary, "-H="+server.URL[7:], "version") - cmd.Env = appendBaseEnv(false, "DOCKER_API_VERSION=xxx") - out, _, _ := runCommandWithOutput(cmd) - - c.Assert(svrVersion, check.Equals, "/vxxx/version") - - if !strings.Contains(out, "API version: xxx") { - c.Fatalf("Out didn't have 'xxx' for the API version, had:\n%s", out) - } -} - -func (s *DockerSuite) TestApiErrorJSON(c *check.C) { - httpResp, body, err := sockRequestRaw("POST", "/containers/create", strings.NewReader(`{}`), "application/json") - c.Assert(err, checker.IsNil) - c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) - c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") - b, err := readBody(body) - c.Assert(err, checker.IsNil) - c.Assert(getErrorMessage(c, b), checker.Equals, "Config cannot be empty in order to create a container") -} - -func (s *DockerSuite) TestApiErrorPlainText(c *check.C) { - httpResp, body, err := sockRequestRaw("POST", "/v1.23/containers/create", strings.NewReader(`{}`), "application/json") - c.Assert(err, checker.IsNil) - c.Assert(httpResp.StatusCode, checker.Equals, http.StatusInternalServerError) - c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") - b, err := readBody(body) - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(string(b)), checker.Equals, "Config cannot be empty in order to create a container") -} - -func (s *DockerSuite) TestApiErrorNotFoundJSON(c *check.C) { - // 404 is a different code path to normal errors, so test separately - httpResp, body, err := sockRequestRaw("GET", "/notfound", nil, "application/json") - c.Assert(err, checker.IsNil) - c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) - c.Assert(httpResp.Header.Get("Content-Type"), checker.Equals, "application/json") - b, err := readBody(body) - c.Assert(err, checker.IsNil) - c.Assert(getErrorMessage(c, b), checker.Equals, "page not found") -} - -func (s *DockerSuite) TestApiErrorNotFoundPlainText(c *check.C) { - httpResp, body, err := sockRequestRaw("GET", "/v1.23/notfound", nil, "application/json") - c.Assert(err, checker.IsNil) - c.Assert(httpResp.StatusCode, checker.Equals, http.StatusNotFound) - c.Assert(httpResp.Header.Get("Content-Type"), checker.Contains, "text/plain") - b, err := readBody(body) - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(string(b)), checker.Equals, "page not found") -} diff --git a/integration-cli/docker_api_update_unix_test.go b/integration-cli/docker_api_update_unix_test.go deleted file mode 100644 index 607e76a4d5..0000000000 --- a/integration-cli/docker_api_update_unix_test.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build !windows - -package main - -import ( - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestApiUpdateContainer(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, memoryLimitSupport) - testRequires(c, swapMemorySupport) - - name := "apiUpdateContainer" - hostConfig := map[string]interface{}{ - "Memory": 314572800, - "MemorySwap": 524288000, - } - dockerCmd(c, "run", "-d", "--name", name, "-m", "200M", "busybox", "top") - _, _, err := sockRequest("POST", "/containers/"+name+"/update", hostConfig) - c.Assert(err, check.IsNil) - - c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "314572800") - file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" - out, _ := dockerCmd(c, "exec", name, "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "314572800") - - c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "524288000") - file = "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" - out, _ = dockerCmd(c, "exec", name, "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") -} diff --git a/integration-cli/docker_api_version_test.go b/integration-cli/docker_api_version_test.go deleted file mode 100644 index ccb1484190..0000000000 --- a/integration-cli/docker_api_version_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package main - -import ( - "encoding/json" - "net/http" - - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestGetVersion(c *check.C) { - status, body, err := sockRequest("GET", "/version", nil) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(err, checker.IsNil) - - var v types.Version - - c.Assert(json.Unmarshal(body, &v), checker.IsNil) - - c.Assert(v.Version, checker.Equals, dockerversion.Version, check.Commentf("Version mismatch")) -} diff --git a/integration-cli/docker_api_volumes_test.go b/integration-cli/docker_api_volumes_test.go deleted file mode 100644 index 732271d02d..0000000000 --- a/integration-cli/docker_api_volumes_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package main - -import ( - "encoding/json" - "net/http" - "path/filepath" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestVolumesApiList(c *check.C) { - prefix, _ := getPrefixAndSlashFromDaemonPlatform() - dockerCmd(c, "run", "-v", prefix+"/foo", "busybox") - - status, b, err := sockRequest("GET", "/volumes", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - var volumes types.VolumesListResponse - c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) - - c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) -} - -func (s *DockerSuite) TestVolumesApiCreate(c *check.C) { - config := types.VolumeCreateRequest{ - Name: "test", - } - status, b, err := sockRequest("POST", "/volumes/create", config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) - - var vol types.Volume - err = json.Unmarshal(b, &vol) - c.Assert(err, checker.IsNil) - - c.Assert(filepath.Base(filepath.Dir(vol.Mountpoint)), checker.Equals, config.Name) -} - -func (s *DockerSuite) TestVolumesApiRemove(c *check.C) { - prefix, _ := getPrefixAndSlashFromDaemonPlatform() - dockerCmd(c, "run", "-v", prefix+"/foo", "--name=test", "busybox") - - status, b, err := sockRequest("GET", "/volumes", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK) - - var volumes types.VolumesListResponse - c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) - c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) - - v := volumes.Volumes[0] - status, _, err = sockRequest("DELETE", "/volumes/"+v.Name, nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusConflict, check.Commentf("Should not be able to remove a volume that is in use")) - - dockerCmd(c, "rm", "-f", "test") - status, data, err := sockRequest("DELETE", "/volumes/"+v.Name, nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent, check.Commentf(string(data))) - -} - -func (s *DockerSuite) TestVolumesApiInspect(c *check.C) { - config := types.VolumeCreateRequest{ - Name: "test", - } - status, b, err := sockRequest("POST", "/volumes/create", config) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusCreated, check.Commentf(string(b))) - - status, b, err = sockRequest("GET", "/volumes", nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) - - var volumes types.VolumesListResponse - c.Assert(json.Unmarshal(b, &volumes), checker.IsNil) - c.Assert(len(volumes.Volumes), checker.Equals, 1, check.Commentf("\n%v", volumes.Volumes)) - - var vol types.Volume - status, b, err = sockRequest("GET", "/volumes/"+config.Name, nil) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusOK, check.Commentf(string(b))) - c.Assert(json.Unmarshal(b, &vol), checker.IsNil) - c.Assert(vol.Name, checker.Equals, config.Name) -} diff --git a/integration-cli/docker_cli_attach_test.go b/integration-cli/docker_cli_attach_test.go deleted file mode 100644 index 24ec622b53..0000000000 --- a/integration-cli/docker_cli_attach_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package main - -import ( - "bufio" - "fmt" - "io" - "os/exec" - "runtime" - "strings" - "sync" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -const attachWait = 5 * time.Second - -func (s *DockerSuite) TestAttachMultipleAndRestart(c *check.C) { - testRequires(c, DaemonIsLinux) - - endGroup := &sync.WaitGroup{} - startGroup := &sync.WaitGroup{} - endGroup.Add(3) - startGroup.Add(3) - - err := waitForContainer("attacher", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 1; echo hello; done") - c.Assert(err, check.IsNil) - - startDone := make(chan struct{}) - endDone := make(chan struct{}) - - go func() { - endGroup.Wait() - close(endDone) - }() - - go func() { - startGroup.Wait() - close(startDone) - }() - - for i := 0; i < 3; i++ { - go func() { - cmd := exec.Command(dockerBinary, "attach", "attacher") - - defer func() { - cmd.Wait() - endGroup.Done() - }() - - out, err := cmd.StdoutPipe() - if err != nil { - c.Fatal(err) - } - - if err := cmd.Start(); err != nil { - c.Fatal(err) - } - - buf := make([]byte, 1024) - - if _, err := out.Read(buf); err != nil && err != io.EOF { - c.Fatal(err) - } - - startGroup.Done() - - if !strings.Contains(string(buf), "hello") { - c.Fatalf("unexpected output %s expected hello\n", string(buf)) - } - }() - } - - select { - case <-startDone: - case <-time.After(attachWait): - c.Fatalf("Attaches did not initialize properly") - } - - dockerCmd(c, "kill", "attacher") - - select { - case <-endDone: - case <-time.After(attachWait): - c.Fatalf("Attaches did not finish properly") - } -} - -func (s *DockerSuite) TestAttachTTYWithoutStdin(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") - - id := strings.TrimSpace(out) - c.Assert(waitRun(id), check.IsNil) - - done := make(chan error) - go func() { - defer close(done) - - cmd := exec.Command(dockerBinary, "attach", id) - if _, err := cmd.StdinPipe(); err != nil { - done <- err - return - } - - expected := "the input device is not a TTY" - if runtime.GOOS == "windows" { - expected += ". If you are using mintty, try prefixing the command with 'winpty'" - } - if out, _, err := runCommandWithOutput(cmd); err == nil { - done <- fmt.Errorf("attach should have failed") - return - } else if !strings.Contains(out, expected) { - done <- fmt.Errorf("attach failed with error %q: expected %q", out, expected) - return - } - }() - - select { - case err := <-done: - c.Assert(err, check.IsNil) - case <-time.After(attachWait): - c.Fatal("attach is running but should have failed") - } -} - -func (s *DockerSuite) TestAttachDisconnect(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-di", "busybox", "/bin/cat") - id := strings.TrimSpace(out) - - cmd := exec.Command(dockerBinary, "attach", id) - stdin, err := cmd.StdinPipe() - if err != nil { - c.Fatal(err) - } - defer stdin.Close() - stdout, err := cmd.StdoutPipe() - c.Assert(err, check.IsNil) - defer stdout.Close() - c.Assert(cmd.Start(), check.IsNil) - defer cmd.Process.Kill() - - _, err = stdin.Write([]byte("hello\n")) - c.Assert(err, check.IsNil) - out, err = bufio.NewReader(stdout).ReadString('\n') - c.Assert(err, check.IsNil) - c.Assert(strings.TrimSpace(out), check.Equals, "hello") - - c.Assert(stdin.Close(), check.IsNil) - - // Expect container to still be running after stdin is closed - running := inspectField(c, id, "State.Running") - c.Assert(running, check.Equals, "true") -} - -func (s *DockerSuite) TestAttachPausedContainer(c *check.C) { - testRequires(c, DaemonIsLinux) // Containers cannot be paused on Windows - defer unpauseAllContainers() - dockerCmd(c, "run", "-d", "--name=test", "busybox", "top") - dockerCmd(c, "pause", "test") - out, _, err := dockerCmdWithError("attach", "test") - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "You cannot attach to a paused container, unpause it first") -} diff --git a/integration-cli/docker_cli_attach_unix_test.go b/integration-cli/docker_cli_attach_unix_test.go deleted file mode 100644 index 7af761d7a3..0000000000 --- a/integration-cli/docker_cli_attach_unix_test.go +++ /dev/null @@ -1,230 +0,0 @@ -// +build !windows - -package main - -import ( - "bufio" - "os/exec" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/stringid" - "github.com/go-check/check" - "github.com/kr/pty" -) - -// #9860 Make sure attach ends when container ends (with no errors) -func (s *DockerSuite) TestAttachClosedOnContainerStop(c *check.C) { - testRequires(c, SameHostDaemon) - - out, _ := dockerCmd(c, "run", "-dti", "busybox", "/bin/sh", "-c", `trap 'exit 0' SIGTERM; while true; do sleep 1; done`) - - id := strings.TrimSpace(out) - c.Assert(waitRun(id), check.IsNil) - - _, tty, err := pty.Open() - c.Assert(err, check.IsNil) - - attachCmd := exec.Command(dockerBinary, "attach", id) - attachCmd.Stdin = tty - attachCmd.Stdout = tty - attachCmd.Stderr = tty - err = attachCmd.Start() - c.Assert(err, check.IsNil) - - errChan := make(chan error) - go func() { - defer close(errChan) - // Container is waiting for us to signal it to stop - dockerCmd(c, "stop", id) - // And wait for the attach command to end - errChan <- attachCmd.Wait() - }() - - // Wait for the docker to end (should be done by the - // stop command in the go routine) - dockerCmd(c, "wait", id) - - select { - case err := <-errChan: - c.Assert(err, check.IsNil) - case <-time.After(attachWait): - c.Fatal("timed out without attach returning") - } - -} - -func (s *DockerSuite) TestAttachAfterDetach(c *check.C) { - - name := "detachtest" - - cpty, tty, err := pty.Open() - c.Assert(err, checker.IsNil, check.Commentf("Could not open pty: %v", err)) - cmd := exec.Command(dockerBinary, "run", "-ti", "--name", name, "busybox") - cmd.Stdin = tty - cmd.Stdout = tty - cmd.Stderr = tty - - errChan := make(chan error) - go func() { - errChan <- cmd.Run() - close(errChan) - }() - - c.Assert(waitRun(name), check.IsNil) - - cpty.Write([]byte{16}) - time.Sleep(100 * time.Millisecond) - cpty.Write([]byte{17}) - - select { - case err := <-errChan: - c.Assert(err, check.IsNil) - case <-time.After(5 * time.Second): - c.Fatal("timeout while detaching") - } - - cpty, tty, err = pty.Open() - c.Assert(err, checker.IsNil, check.Commentf("Could not open pty: %v", err)) - - cmd = exec.Command(dockerBinary, "attach", name) - cmd.Stdin = tty - cmd.Stdout = tty - cmd.Stderr = tty - - err = cmd.Start() - c.Assert(err, checker.IsNil) - - bytes := make([]byte, 10) - var nBytes int - readErr := make(chan error, 1) - - go func() { - time.Sleep(500 * time.Millisecond) - cpty.Write([]byte("\n")) - time.Sleep(500 * time.Millisecond) - - nBytes, err = cpty.Read(bytes) - cpty.Close() - readErr <- err - }() - - select { - case err := <-readErr: - c.Assert(err, check.IsNil) - case <-time.After(2 * time.Second): - c.Fatal("timeout waiting for attach read") - } - - err = cmd.Wait() - c.Assert(err, checker.IsNil) - - c.Assert(string(bytes[:nBytes]), checker.Contains, "/ #") - -} - -// TestAttachDetach checks that attach in tty mode can be detached using the long container ID -func (s *DockerSuite) TestAttachDetach(c *check.C) { - out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") - id := strings.TrimSpace(out) - c.Assert(waitRun(id), check.IsNil) - - cpty, tty, err := pty.Open() - c.Assert(err, check.IsNil) - defer cpty.Close() - - cmd := exec.Command(dockerBinary, "attach", id) - cmd.Stdin = tty - stdout, err := cmd.StdoutPipe() - c.Assert(err, check.IsNil) - defer stdout.Close() - err = cmd.Start() - c.Assert(err, check.IsNil) - c.Assert(waitRun(id), check.IsNil) - - _, err = cpty.Write([]byte("hello\n")) - c.Assert(err, check.IsNil) - out, err = bufio.NewReader(stdout).ReadString('\n') - c.Assert(err, check.IsNil) - c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello', got %q", out)) - - // escape sequence - _, err = cpty.Write([]byte{16}) - c.Assert(err, checker.IsNil) - time.Sleep(100 * time.Millisecond) - _, err = cpty.Write([]byte{17}) - c.Assert(err, checker.IsNil) - - ch := make(chan struct{}) - go func() { - cmd.Wait() - ch <- struct{}{} - }() - - running := inspectField(c, id, "State.Running") - c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) - - go func() { - dockerCmd(c, "kill", id) - }() - - select { - case <-ch: - case <-time.After(10 * time.Millisecond): - c.Fatal("timed out waiting for container to exit") - } - -} - -// TestAttachDetachTruncatedID checks that attach in tty mode can be detached -func (s *DockerSuite) TestAttachDetachTruncatedID(c *check.C) { - out, _ := dockerCmd(c, "run", "-itd", "busybox", "cat") - id := stringid.TruncateID(strings.TrimSpace(out)) - c.Assert(waitRun(id), check.IsNil) - - cpty, tty, err := pty.Open() - c.Assert(err, checker.IsNil) - defer cpty.Close() - - cmd := exec.Command(dockerBinary, "attach", id) - cmd.Stdin = tty - stdout, err := cmd.StdoutPipe() - c.Assert(err, checker.IsNil) - defer stdout.Close() - err = cmd.Start() - c.Assert(err, checker.IsNil) - - _, err = cpty.Write([]byte("hello\n")) - c.Assert(err, checker.IsNil) - out, err = bufio.NewReader(stdout).ReadString('\n') - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello', got %q", out)) - - // escape sequence - _, err = cpty.Write([]byte{16}) - c.Assert(err, checker.IsNil) - time.Sleep(100 * time.Millisecond) - _, err = cpty.Write([]byte{17}) - c.Assert(err, checker.IsNil) - - ch := make(chan struct{}) - go func() { - cmd.Wait() - ch <- struct{}{} - }() - - running := inspectField(c, id, "State.Running") - c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) - - go func() { - dockerCmd(c, "kill", id) - }() - - select { - case <-ch: - case <-time.After(10 * time.Millisecond): - c.Fatal("timed out waiting for container to exit") - } - -} diff --git a/integration-cli/docker_cli_authz_unix_test.go b/integration-cli/docker_cli_authz_unix_test.go deleted file mode 100644 index a78fac0619..0000000000 --- a/integration-cli/docker_cli_authz_unix_test.go +++ /dev/null @@ -1,477 +0,0 @@ -// +build !windows - -package main - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "path/filepath" - "strings" - - "bufio" - "bytes" - "os/exec" - "strconv" - "time" - - "net" - "net/http/httputil" - "net/url" - - "github.com/docker/docker/pkg/authorization" - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/plugins" - "github.com/go-check/check" -) - -const ( - testAuthZPlugin = "authzplugin" - unauthorizedMessage = "User unauthorized authz plugin" - errorMessage = "something went wrong..." - containerListAPI = "/containers/json" -) - -var ( - alwaysAllowed = []string{"/_ping", "/info"} -) - -func init() { - check.Suite(&DockerAuthzSuite{ - ds: &DockerSuite{}, - }) -} - -type DockerAuthzSuite struct { - server *httptest.Server - ds *DockerSuite - d *Daemon - ctrl *authorizationController -} - -type authorizationController struct { - reqRes authorization.Response // reqRes holds the plugin response to the initial client request - resRes authorization.Response // resRes holds the plugin response to the daemon response - psRequestCnt int // psRequestCnt counts the number of calls to list container request api - psResponseCnt int // psResponseCnt counts the number of calls to list containers response API - requestsURIs []string // requestsURIs stores all request URIs that are sent to the authorization controller - reqUser string - resUser string -} - -func (s *DockerAuthzSuite) SetUpTest(c *check.C) { - s.d = NewDaemon(c) - s.ctrl = &authorizationController{} -} - -func (s *DockerAuthzSuite) TearDownTest(c *check.C) { - s.d.Stop() - s.ds.TearDownTest(c) - s.ctrl = nil -} - -func (s *DockerAuthzSuite) SetUpSuite(c *check.C) { - mux := http.NewServeMux() - s.server = httptest.NewServer(mux) - - mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { - b, err := json.Marshal(plugins.Manifest{Implements: []string{authorization.AuthZApiImplements}}) - c.Assert(err, check.IsNil) - w.Write(b) - }) - - mux.HandleFunc("/AuthZPlugin.AuthZReq", func(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) - c.Assert(err, check.IsNil) - authReq := authorization.Request{} - err = json.Unmarshal(body, &authReq) - c.Assert(err, check.IsNil) - - assertBody(c, authReq.RequestURI, authReq.RequestHeaders, authReq.RequestBody) - assertAuthHeaders(c, authReq.RequestHeaders) - - // Count only container list api - if strings.HasSuffix(authReq.RequestURI, containerListAPI) { - s.ctrl.psRequestCnt++ - } - - s.ctrl.requestsURIs = append(s.ctrl.requestsURIs, authReq.RequestURI) - - reqRes := s.ctrl.reqRes - if isAllowed(authReq.RequestURI) { - reqRes = authorization.Response{Allow: true} - } - if reqRes.Err != "" { - w.WriteHeader(http.StatusInternalServerError) - } - b, err := json.Marshal(reqRes) - c.Assert(err, check.IsNil) - s.ctrl.reqUser = authReq.User - w.Write(b) - }) - - mux.HandleFunc("/AuthZPlugin.AuthZRes", func(w http.ResponseWriter, r *http.Request) { - defer r.Body.Close() - body, err := ioutil.ReadAll(r.Body) - c.Assert(err, check.IsNil) - authReq := authorization.Request{} - err = json.Unmarshal(body, &authReq) - c.Assert(err, check.IsNil) - - assertBody(c, authReq.RequestURI, authReq.ResponseHeaders, authReq.ResponseBody) - assertAuthHeaders(c, authReq.ResponseHeaders) - - // Count only container list api - if strings.HasSuffix(authReq.RequestURI, containerListAPI) { - s.ctrl.psResponseCnt++ - } - resRes := s.ctrl.resRes - if isAllowed(authReq.RequestURI) { - resRes = authorization.Response{Allow: true} - } - if resRes.Err != "" { - w.WriteHeader(http.StatusInternalServerError) - } - b, err := json.Marshal(resRes) - c.Assert(err, check.IsNil) - s.ctrl.resUser = authReq.User - w.Write(b) - }) - - err := os.MkdirAll("/etc/docker/plugins", 0755) - c.Assert(err, checker.IsNil) - - fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", testAuthZPlugin) - err = ioutil.WriteFile(fileName, []byte(s.server.URL), 0644) - c.Assert(err, checker.IsNil) -} - -// check for always allowed endpoints to not inhibit test framework functions -func isAllowed(reqURI string) bool { - for _, endpoint := range alwaysAllowed { - if strings.HasSuffix(reqURI, endpoint) { - return true - } - } - return false -} - -// assertAuthHeaders validates authentication headers are removed -func assertAuthHeaders(c *check.C, headers map[string]string) error { - for k := range headers { - if strings.Contains(strings.ToLower(k), "auth") || strings.Contains(strings.ToLower(k), "x-registry") { - c.Errorf("Found authentication headers in request '%v'", headers) - } - } - return nil -} - -// assertBody asserts that body is removed for non text/json requests -func assertBody(c *check.C, requestURI string, headers map[string]string, body []byte) { - if strings.Contains(strings.ToLower(requestURI), "auth") && len(body) > 0 { - //return fmt.Errorf("Body included for authentication endpoint %s", string(body)) - c.Errorf("Body included for authentication endpoint %s", string(body)) - } - - for k, v := range headers { - if strings.EqualFold(k, "Content-Type") && strings.HasPrefix(v, "text/") || v == "application/json" { - return - } - } - if len(body) > 0 { - c.Errorf("Body included while it should not (Headers: '%v')", headers) - } -} - -func (s *DockerAuthzSuite) TearDownSuite(c *check.C) { - if s.server == nil { - return - } - - s.server.Close() - - err := os.RemoveAll("/etc/docker/plugins") - c.Assert(err, checker.IsNil) -} - -func (s *DockerAuthzSuite) TestAuthZPluginAllowRequest(c *check.C) { - // start the daemon and load busybox, --net=none build fails otherwise - // cause it needs to pull busybox - c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin), check.IsNil) - s.ctrl.reqRes.Allow = true - s.ctrl.resRes.Allow = true - c.Assert(s.d.LoadBusybox(), check.IsNil) - - // Ensure command successful - out, err := s.d.Cmd("run", "-d", "busybox", "top") - c.Assert(err, check.IsNil) - - id := strings.TrimSpace(out) - assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") - assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", id)) - - out, err = s.d.Cmd("ps") - c.Assert(err, check.IsNil) - c.Assert(assertContainerList(out, []string{id}), check.Equals, true) - c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) - c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) -} - -func (s *DockerAuthzSuite) TestAuthZPluginTls(c *check.C) { - - const testDaemonHTTPSAddr = "tcp://localhost:4271" - // start the daemon and load busybox, --net=none build fails otherwise - // cause it needs to pull busybox - if err := s.d.Start( - "--authorization-plugin="+testAuthZPlugin, - "--tlsverify", - "--tlscacert", - "fixtures/https/ca.pem", - "--tlscert", - "fixtures/https/server-cert.pem", - "--tlskey", - "fixtures/https/server-key.pem", - "-H", testDaemonHTTPSAddr); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } - - s.ctrl.reqRes.Allow = true - s.ctrl.resRes.Allow = true - - out, _ := dockerCmd( - c, - "--tlsverify", - "--tlscacert", "fixtures/https/ca.pem", - "--tlscert", "fixtures/https/client-cert.pem", - "--tlskey", "fixtures/https/client-key.pem", - "-H", - testDaemonHTTPSAddr, - "version", - ) - if !strings.Contains(out, "Server") { - c.Fatalf("docker version should return information of server side") - } - - c.Assert(s.ctrl.reqUser, check.Equals, "client") - c.Assert(s.ctrl.resUser, check.Equals, "client") -} - -func (s *DockerAuthzSuite) TestAuthZPluginDenyRequest(c *check.C) { - err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) - c.Assert(err, check.IsNil) - s.ctrl.reqRes.Allow = false - s.ctrl.reqRes.Msg = unauthorizedMessage - - // Ensure command is blocked - res, err := s.d.Cmd("ps") - c.Assert(err, check.NotNil) - c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) - c.Assert(s.ctrl.psResponseCnt, check.Equals, 0) - - // Ensure unauthorized message appears in response - c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage)) -} - -// TestAuthZPluginApiDenyResponse validates that when authorization plugin deny the request, the status code is forbidden -func (s *DockerAuthzSuite) TestAuthZPluginApiDenyResponse(c *check.C) { - err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) - c.Assert(err, check.IsNil) - s.ctrl.reqRes.Allow = false - s.ctrl.resRes.Msg = unauthorizedMessage - - daemonURL, err := url.Parse(s.d.sock()) - - conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) - c.Assert(err, check.IsNil) - client := httputil.NewClientConn(conn, nil) - req, err := http.NewRequest("GET", "/version", nil) - c.Assert(err, check.IsNil) - resp, err := client.Do(req) - - c.Assert(err, check.IsNil) - c.Assert(resp.StatusCode, checker.Equals, http.StatusForbidden) - c.Assert(err, checker.IsNil) -} - -func (s *DockerAuthzSuite) TestAuthZPluginDenyResponse(c *check.C) { - err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) - c.Assert(err, check.IsNil) - s.ctrl.reqRes.Allow = true - s.ctrl.resRes.Allow = false - s.ctrl.resRes.Msg = unauthorizedMessage - - // Ensure command is blocked - res, err := s.d.Cmd("ps") - c.Assert(err, check.NotNil) - c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) - c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) - - // Ensure unauthorized message appears in response - c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: authorization denied by plugin %s: %s\n", testAuthZPlugin, unauthorizedMessage)) -} - -// TestAuthZPluginAllowEventStream verifies event stream propagates correctly after request pass through by the authorization plugin -func (s *DockerAuthzSuite) TestAuthZPluginAllowEventStream(c *check.C) { - testRequires(c, DaemonIsLinux) - - // start the daemon and load busybox to avoid pulling busybox from Docker Hub - c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin), check.IsNil) - s.ctrl.reqRes.Allow = true - s.ctrl.resRes.Allow = true - c.Assert(s.d.LoadBusybox(), check.IsNil) - - startTime := strconv.FormatInt(daemonTime(c).Unix(), 10) - // Add another command to to enable event pipelining - eventsCmd := exec.Command(dockerBinary, "--host", s.d.sock(), "events", "--since", startTime) - stdout, err := eventsCmd.StdoutPipe() - if err != nil { - c.Assert(err, check.IsNil) - } - - observer := eventObserver{ - buffer: new(bytes.Buffer), - command: eventsCmd, - scanner: bufio.NewScanner(stdout), - startTime: startTime, - } - - err = observer.Start() - c.Assert(err, checker.IsNil) - defer observer.Stop() - - // Create a container and wait for the creation events - out, err := s.d.Cmd("run", "-d", "busybox", "top") - c.Assert(err, check.IsNil, check.Commentf(out)) - containerID := strings.TrimSpace(out) - c.Assert(s.d.waitRun(containerID), checker.IsNil) - - events := map[string]chan bool{ - "create": make(chan bool, 1), - "start": make(chan bool, 1), - } - - matcher := matchEventLine(containerID, "container", events) - processor := processEventMatch(events) - go observer.Match(matcher, processor) - - // Ensure all events are received - for event, eventChannel := range events { - - select { - case <-time.After(30 * time.Second): - // Fail the test - observer.CheckEventError(c, containerID, event, matcher) - c.FailNow() - case <-eventChannel: - // Ignore, event received - } - } - - // Ensure both events and container endpoints are passed to the authorization plugin - assertURIRecorded(c, s.ctrl.requestsURIs, "/events") - assertURIRecorded(c, s.ctrl.requestsURIs, "/containers/create") - assertURIRecorded(c, s.ctrl.requestsURIs, fmt.Sprintf("/containers/%s/start", containerID)) -} - -func (s *DockerAuthzSuite) TestAuthZPluginErrorResponse(c *check.C) { - err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) - c.Assert(err, check.IsNil) - s.ctrl.reqRes.Allow = true - s.ctrl.resRes.Err = errorMessage - - // Ensure command is blocked - res, err := s.d.Cmd("ps") - c.Assert(err, check.NotNil) - - c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiResponse, errorMessage)) -} - -func (s *DockerAuthzSuite) TestAuthZPluginErrorRequest(c *check.C) { - err := s.d.Start("--authorization-plugin=" + testAuthZPlugin) - c.Assert(err, check.IsNil) - s.ctrl.reqRes.Err = errorMessage - - // Ensure command is blocked - res, err := s.d.Cmd("ps") - c.Assert(err, check.NotNil) - - c.Assert(res, check.Equals, fmt.Sprintf("Error response from daemon: plugin %s failed with error: %s: %s\n", testAuthZPlugin, authorization.AuthZApiRequest, errorMessage)) -} - -func (s *DockerAuthzSuite) TestAuthZPluginEnsureNoDuplicatePluginRegistration(c *check.C) { - c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin), check.IsNil) - - s.ctrl.reqRes.Allow = true - s.ctrl.resRes.Allow = true - - out, err := s.d.Cmd("ps") - c.Assert(err, check.IsNil, check.Commentf(out)) - - // assert plugin is only called once.. - c.Assert(s.ctrl.psRequestCnt, check.Equals, 1) - c.Assert(s.ctrl.psResponseCnt, check.Equals, 1) -} - -func (s *DockerAuthzSuite) TestAuthZPluginEnsureLoadImportWorking(c *check.C) { - c.Assert(s.d.Start("--authorization-plugin="+testAuthZPlugin, "--authorization-plugin="+testAuthZPlugin), check.IsNil) - s.ctrl.reqRes.Allow = true - s.ctrl.resRes.Allow = true - c.Assert(s.d.LoadBusybox(), check.IsNil) - - tmp, err := ioutil.TempDir("", "test-authz-load-import") - c.Assert(err, check.IsNil) - defer os.RemoveAll(tmp) - - savedImagePath := filepath.Join(tmp, "save.tar") - - out, err := s.d.Cmd("save", "-o", savedImagePath, "busybox") - c.Assert(err, check.IsNil, check.Commentf(out)) - out, err = s.d.Cmd("load", "--input", savedImagePath) - c.Assert(err, check.IsNil, check.Commentf(out)) - - exportedImagePath := filepath.Join(tmp, "export.tar") - - out, err = s.d.Cmd("run", "-d", "--name", "testexport", "busybox") - c.Assert(err, check.IsNil, check.Commentf(out)) - out, err = s.d.Cmd("export", "-o", exportedImagePath, "testexport") - c.Assert(err, check.IsNil, check.Commentf(out)) - out, err = s.d.Cmd("import", exportedImagePath) - c.Assert(err, check.IsNil, check.Commentf(out)) -} - -func (s *DockerAuthzSuite) TestAuthZPluginHeader(c *check.C) { - c.Assert(s.d.Start("--debug", "--authorization-plugin="+testAuthZPlugin), check.IsNil) - s.ctrl.reqRes.Allow = true - s.ctrl.resRes.Allow = true - c.Assert(s.d.LoadBusybox(), check.IsNil) - - daemonURL, err := url.Parse(s.d.sock()) - - conn, err := net.DialTimeout(daemonURL.Scheme, daemonURL.Path, time.Second*10) - c.Assert(err, check.IsNil) - client := httputil.NewClientConn(conn, nil) - req, err := http.NewRequest("GET", "/version", nil) - c.Assert(err, check.IsNil) - resp, err := client.Do(req) - - c.Assert(err, check.IsNil) - c.Assert(resp.Header["Content-Type"][0], checker.Equals, "application/json") -} - -// assertURIRecorded verifies that the given URI was sent and recorded in the authz plugin -func assertURIRecorded(c *check.C, uris []string, uri string) { - var found bool - for _, u := range uris { - if strings.Contains(u, uri) { - found = true - break - } - } - if !found { - c.Fatalf("Expected to find URI '%s', recorded uris '%s'", uri, strings.Join(uris, ",")) - } -} diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go deleted file mode 100644 index ff1daf40e5..0000000000 --- a/integration-cli/docker_cli_build_test.go +++ /dev/null @@ -1,6962 +0,0 @@ -package main - -import ( - "archive/tar" - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "reflect" - "regexp" - "runtime" - "strconv" - "strings" - "text/template" - "time" - - "github.com/docker/docker/builder/dockerfile/command" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/stringutils" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestBuildJSONEmptyRun(c *check.C) { - name := "testbuildjsonemptyrun" - - _, err := buildImage( - name, - ` - FROM busybox - RUN [] - `, - true) - - if err != nil { - c.Fatal("error when dealing with a RUN statement with empty JSON array") - } - -} - -func (s *DockerSuite) TestBuildShCmdJSONEntrypoint(c *check.C) { - name := "testbuildshcmdjsonentrypoint" - - _, err := buildImage( - name, - ` - FROM busybox - ENTRYPOINT ["echo"] - CMD echo test - `, - true) - if err != nil { - c.Fatal(err) - } - - out, _ := dockerCmd(c, "run", "--rm", name) - - if daemonPlatform == "windows" { - if !strings.Contains(out, "cmd /S /C echo test") { - c.Fatalf("CMD did not contain cmd /S /C echo test : %q", out) - } - } else { - if strings.TrimSpace(out) != "/bin/sh -c echo test" { - c.Fatalf("CMD did not contain /bin/sh -c : %q", out) - } - } - -} - -func (s *DockerSuite) TestBuildEnvironmentReplacementUser(c *check.C) { - // Windows does not support FROM scratch or the USER command - testRequires(c, DaemonIsLinux) - name := "testbuildenvironmentreplacement" - - _, err := buildImage(name, ` - FROM scratch - ENV user foo - USER ${user} - `, true) - if err != nil { - c.Fatal(err) - } - - res := inspectFieldJSON(c, name, "Config.User") - - if res != `"foo"` { - c.Fatal("User foo from environment not in Config.User on image") - } - -} - -func (s *DockerSuite) TestBuildEnvironmentReplacementVolume(c *check.C) { - name := "testbuildenvironmentreplacement" - - var volumePath string - - if daemonPlatform == "windows" { - volumePath = "c:/quux" - } else { - volumePath = "/quux" - } - - _, err := buildImage(name, ` - FROM `+minimalBaseImage()+` - ENV volume `+volumePath+` - VOLUME ${volume} - `, true) - if err != nil { - c.Fatal(err) - } - - res := inspectFieldJSON(c, name, "Config.Volumes") - - var volumes map[string]interface{} - - if err := json.Unmarshal([]byte(res), &volumes); err != nil { - c.Fatal(err) - } - - if _, ok := volumes[volumePath]; !ok { - c.Fatal("Volume " + volumePath + " from environment not in Config.Volumes on image") - } - -} - -func (s *DockerSuite) TestBuildEnvironmentReplacementExpose(c *check.C) { - // Windows does not support FROM scratch or the EXPOSE command - testRequires(c, DaemonIsLinux) - name := "testbuildenvironmentreplacement" - - _, err := buildImage(name, ` - FROM scratch - ENV port 80 - EXPOSE ${port} - ENV ports " 99 100 " - EXPOSE ${ports} - `, true) - if err != nil { - c.Fatal(err) - } - - res := inspectFieldJSON(c, name, "Config.ExposedPorts") - - var exposedPorts map[string]interface{} - - if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { - c.Fatal(err) - } - - exp := []int{80, 99, 100} - - for _, p := range exp { - tmp := fmt.Sprintf("%d/tcp", p) - if _, ok := exposedPorts[tmp]; !ok { - c.Fatalf("Exposed port %d from environment not in Config.ExposedPorts on image", p) - } - } - -} - -func (s *DockerSuite) TestBuildEnvironmentReplacementWorkdir(c *check.C) { - name := "testbuildenvironmentreplacement" - - _, err := buildImage(name, ` - FROM busybox - ENV MYWORKDIR /work - RUN mkdir ${MYWORKDIR} - WORKDIR ${MYWORKDIR} - `, true) - - if err != nil { - c.Fatal(err) - } - -} - -func (s *DockerSuite) TestBuildEnvironmentReplacementAddCopy(c *check.C) { - name := "testbuildenvironmentreplacement" - - ctx, err := fakeContext(` - FROM `+minimalBaseImage()+` - ENV baz foo - ENV quux bar - ENV dot . - ENV fee fff - ENV gee ggg - - ADD ${baz} ${dot} - COPY ${quux} ${dot} - ADD ${zzz:-${fee}} ${dot} - COPY ${zzz:-${gee}} ${dot} - `, - map[string]string{ - "foo": "test1", - "bar": "test2", - "fff": "test3", - "ggg": "test4", - }) - - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - -} - -func (s *DockerSuite) TestBuildEnvironmentReplacementEnv(c *check.C) { - // ENV expansions work differently in Windows - testRequires(c, DaemonIsLinux) - name := "testbuildenvironmentreplacement" - - _, err := buildImage(name, - ` - FROM busybox - ENV foo zzz - ENV bar ${foo} - ENV abc1='$foo' - ENV env1=$foo env2=${foo} env3="$foo" env4="${foo}" - RUN [ "$abc1" = '$foo' ] && (echo "$abc1" | grep -q foo) - ENV abc2="\$foo" - RUN [ "$abc2" = '$foo' ] && (echo "$abc2" | grep -q foo) - ENV abc3 '$foo' - RUN [ "$abc3" = '$foo' ] && (echo "$abc3" | grep -q foo) - ENV abc4 "\$foo" - RUN [ "$abc4" = '$foo' ] && (echo "$abc4" | grep -q foo) - `, true) - - if err != nil { - c.Fatal(err) - } - - res := inspectFieldJSON(c, name, "Config.Env") - - envResult := []string{} - - if err = unmarshalJSON([]byte(res), &envResult); err != nil { - c.Fatal(err) - } - - found := false - envCount := 0 - - for _, env := range envResult { - parts := strings.SplitN(env, "=", 2) - if parts[0] == "bar" { - found = true - if parts[1] != "zzz" { - c.Fatalf("Could not find replaced var for env `bar`: got %q instead of `zzz`", parts[1]) - } - } else if strings.HasPrefix(parts[0], "env") { - envCount++ - if parts[1] != "zzz" { - c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) - } - } else if strings.HasPrefix(parts[0], "env") { - envCount++ - if parts[1] != "foo" { - c.Fatalf("%s should be 'foo' but instead its %q", parts[0], parts[1]) - } - } - } - - if !found { - c.Fatal("Never found the `bar` env variable") - } - - if envCount != 4 { - c.Fatalf("Didn't find all env vars - only saw %d\n%s", envCount, envResult) - } - -} - -func (s *DockerSuite) TestBuildHandleEscapes(c *check.C) { - // The volume paths used in this test are invalid on Windows - testRequires(c, DaemonIsLinux) - name := "testbuildhandleescapes" - - _, err := buildImage(name, - ` - FROM scratch - ENV FOO bar - VOLUME ${FOO} - `, true) - - if err != nil { - c.Fatal(err) - } - - var result map[string]map[string]struct{} - - res := inspectFieldJSON(c, name, "Config.Volumes") - - if err = unmarshalJSON([]byte(res), &result); err != nil { - c.Fatal(err) - } - - if _, ok := result["bar"]; !ok { - c.Fatal("Could not find volume bar set from env foo in volumes table") - } - - deleteImages(name) - - _, err = buildImage(name, - ` - FROM scratch - ENV FOO bar - VOLUME \${FOO} - `, true) - - if err != nil { - c.Fatal(err) - } - - res = inspectFieldJSON(c, name, "Config.Volumes") - - if err = unmarshalJSON([]byte(res), &result); err != nil { - c.Fatal(err) - } - - if _, ok := result["${FOO}"]; !ok { - c.Fatal("Could not find volume ${FOO} set from env foo in volumes table") - } - - deleteImages(name) - - // this test in particular provides *7* backslashes and expects 6 to come back. - // Like above, the first escape is swallowed and the rest are treated as - // literals, this one is just less obvious because of all the character noise. - - _, err = buildImage(name, - ` - FROM scratch - ENV FOO bar - VOLUME \\\\\\\${FOO} - `, true) - - if err != nil { - c.Fatal(err) - } - - res = inspectFieldJSON(c, name, "Config.Volumes") - - if err = unmarshalJSON([]byte(res), &result); err != nil { - c.Fatal(err) - } - - if _, ok := result[`\\\${FOO}`]; !ok { - c.Fatal(`Could not find volume \\\${FOO} set from env foo in volumes table`, result) - } - -} - -func (s *DockerSuite) TestBuildOnBuildLowercase(c *check.C) { - name := "testbuildonbuildlowercase" - name2 := "testbuildonbuildlowercase2" - - _, err := buildImage(name, - ` - FROM busybox - onbuild run echo quux - `, true) - - if err != nil { - c.Fatal(err) - } - - _, out, err := buildImageWithOut(name2, fmt.Sprintf(` - FROM %s - `, name), true) - - if err != nil { - c.Fatal(err) - } - - if !strings.Contains(out, "quux") { - c.Fatalf("Did not receive the expected echo text, got %s", out) - } - - if strings.Contains(out, "ONBUILD ONBUILD") { - c.Fatalf("Got an ONBUILD ONBUILD error with no error: got %s", out) - } - -} - -func (s *DockerSuite) TestBuildEnvEscapes(c *check.C) { - // ENV expansions work differently in Windows - testRequires(c, DaemonIsLinux) - name := "testbuildenvescapes" - _, err := buildImage(name, - ` - FROM busybox - ENV TEST foo - CMD echo \$ - `, - true) - - if err != nil { - c.Fatal(err) - } - - out, _ := dockerCmd(c, "run", "-t", name) - - if strings.TrimSpace(out) != "$" { - c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) - } - -} - -func (s *DockerSuite) TestBuildEnvOverwrite(c *check.C) { - // ENV expansions work differently in Windows - testRequires(c, DaemonIsLinux) - name := "testbuildenvoverwrite" - - _, err := buildImage(name, - ` - FROM busybox - ENV TEST foo - CMD echo ${TEST} - `, - true) - - if err != nil { - c.Fatal(err) - } - - out, _ := dockerCmd(c, "run", "-e", "TEST=bar", "-t", name) - - if strings.TrimSpace(out) != "bar" { - c.Fatalf("Env TEST was not overwritten with bar when foo was supplied to dockerfile: was %q", strings.TrimSpace(out)) - } - -} - -func (s *DockerSuite) TestBuildOnBuildForbiddenMaintainerInSourceImage(c *check.C) { - name := "testbuildonbuildforbiddenmaintainerinsourceimage" - - out, _ := dockerCmd(c, "create", "busybox", "true") - - cleanedContainerID := strings.TrimSpace(out) - - dockerCmd(c, "commit", "--run", "{\"OnBuild\":[\"MAINTAINER docker.io\"]}", cleanedContainerID, "onbuild") - - _, err := buildImage(name, - `FROM onbuild`, - true) - if err != nil { - if !strings.Contains(err.Error(), "maintainer isn't allowed as an ONBUILD trigger") { - c.Fatalf("Wrong error %v, must be about MAINTAINER and ONBUILD in source image", err) - } - } else { - c.Fatal("Error must not be nil") - } - -} - -func (s *DockerSuite) TestBuildOnBuildForbiddenFromInSourceImage(c *check.C) { - name := "testbuildonbuildforbiddenfrominsourceimage" - - out, _ := dockerCmd(c, "create", "busybox", "true") - - cleanedContainerID := strings.TrimSpace(out) - - dockerCmd(c, "commit", "--run", "{\"OnBuild\":[\"FROM busybox\"]}", cleanedContainerID, "onbuild") - - _, err := buildImage(name, - `FROM onbuild`, - true) - if err != nil { - if !strings.Contains(err.Error(), "from isn't allowed as an ONBUILD trigger") { - c.Fatalf("Wrong error %v, must be about FROM and ONBUILD in source image", err) - } - } else { - c.Fatal("Error must not be nil") - } - -} - -func (s *DockerSuite) TestBuildOnBuildForbiddenChainedInSourceImage(c *check.C) { - name := "testbuildonbuildforbiddenchainedinsourceimage" - - out, _ := dockerCmd(c, "create", "busybox", "true") - - cleanedContainerID := strings.TrimSpace(out) - - dockerCmd(c, "commit", "--run", "{\"OnBuild\":[\"ONBUILD RUN ls\"]}", cleanedContainerID, "onbuild") - - _, err := buildImage(name, - `FROM onbuild`, - true) - if err != nil { - if !strings.Contains(err.Error(), "Chaining ONBUILD via `ONBUILD ONBUILD` isn't allowed") { - c.Fatalf("Wrong error %v, must be about chaining ONBUILD in source image", err) - } - } else { - c.Fatal("Error must not be nil") - } - -} - -func (s *DockerSuite) TestBuildOnBuildCmdEntrypointJSON(c *check.C) { - name1 := "onbuildcmd" - name2 := "onbuildgenerated" - - _, err := buildImage(name1, ` -FROM busybox -ONBUILD CMD ["hello world"] -ONBUILD ENTRYPOINT ["echo"] -ONBUILD RUN ["true"]`, - false) - - if err != nil { - c.Fatal(err) - } - - _, err = buildImage(name2, fmt.Sprintf(`FROM %s`, name1), false) - - if err != nil { - c.Fatal(err) - } - - out, _ := dockerCmd(c, "run", name2) - - if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { - c.Fatalf("did not get echo output from onbuild. Got: %q", out) - } - -} - -func (s *DockerSuite) TestBuildOnBuildEntrypointJSON(c *check.C) { - name1 := "onbuildcmd" - name2 := "onbuildgenerated" - - _, err := buildImage(name1, ` -FROM busybox -ONBUILD ENTRYPOINT ["echo"]`, - false) - - if err != nil { - c.Fatal(err) - } - - _, err = buildImage(name2, fmt.Sprintf("FROM %s\nCMD [\"hello world\"]\n", name1), false) - - if err != nil { - c.Fatal(err) - } - - out, _ := dockerCmd(c, "run", name2) - - if !regexp.MustCompile(`(?m)^hello world`).MatchString(out) { - c.Fatal("got malformed output from onbuild", out) - } - -} - -func (s *DockerSuite) TestBuildCacheAdd(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet - name := "testbuildtwoimageswithadd" - server, err := fakeStorage(map[string]string{ - "robots.txt": "hello", - "index.html": "world", - }) - if err != nil { - c.Fatal(err) - } - defer server.Close() - - if _, err := buildImage(name, - fmt.Sprintf(`FROM scratch - ADD %s/robots.txt /`, server.URL()), - true); err != nil { - c.Fatal(err) - } - if err != nil { - c.Fatal(err) - } - deleteImages(name) - _, out, err := buildImageWithOut(name, - fmt.Sprintf(`FROM scratch - ADD %s/index.html /`, server.URL()), - true) - if err != nil { - c.Fatal(err) - } - if strings.Contains(out, "Using cache") { - c.Fatal("2nd build used cache on ADD, it shouldn't") - } - -} - -func (s *DockerSuite) TestBuildLastModified(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet - name := "testbuildlastmodified" - - server, err := fakeStorage(map[string]string{ - "file": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer server.Close() - - var out, out2 string - - dFmt := `FROM busybox -ADD %s/file / -RUN ls -le /file` - - dockerfile := fmt.Sprintf(dFmt, server.URL()) - - if _, out, err = buildImageWithOut(name, dockerfile, false); err != nil { - c.Fatal(err) - } - - originMTime := regexp.MustCompile(`root.*/file.*\n`).FindString(out) - // Make sure our regexp is correct - if strings.Index(originMTime, "/file") < 0 { - c.Fatalf("Missing ls info on 'file':\n%s", out) - } - - // Build it again and make sure the mtime of the file didn't change. - // Wait a few seconds to make sure the time changed enough to notice - time.Sleep(2 * time.Second) - - if _, out2, err = buildImageWithOut(name, dockerfile, false); err != nil { - c.Fatal(err) - } - - newMTime := regexp.MustCompile(`root.*/file.*\n`).FindString(out2) - if newMTime != originMTime { - c.Fatalf("MTime changed:\nOrigin:%s\nNew:%s", originMTime, newMTime) - } - - // Now 'touch' the file and make sure the timestamp DID change this time - // Create a new fakeStorage instead of just using Add() to help windows - server, err = fakeStorage(map[string]string{ - "file": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer server.Close() - - dockerfile = fmt.Sprintf(dFmt, server.URL()) - - if _, out2, err = buildImageWithOut(name, dockerfile, false); err != nil { - c.Fatal(err) - } - - newMTime = regexp.MustCompile(`root.*/file.*\n`).FindString(out2) - if newMTime == originMTime { - c.Fatalf("MTime didn't change:\nOrigin:%s\nNew:%s", originMTime, newMTime) - } - -} - -func (s *DockerSuite) TestBuildSixtySteps(c *check.C) { - testRequires(c, DaemonIsLinux) // TODO Windows: This test passes on Windows, - // but currently adds a disproportionate amount of time for the value it has. - // Removing it from Windows CI for now, but this will be revisited in the - // TP5 timeframe when perf is better. - name := "foobuildsixtysteps" - - ctx, err := fakeContext("FROM "+minimalBaseImage()+"\n"+strings.Repeat("ADD foo /\n", 60), - map[string]string{ - "foo": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildAddSingleFileToRoot(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testaddimg" - ctx, err := fakeContext(fmt.Sprintf(`FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN touch /exists -RUN chown dockerio.dockerio /exists -ADD test_file / -RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), - map[string]string{ - "test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -// Issue #3960: "ADD src ." hangs -func (s *DockerSuite) TestBuildAddSingleFileToWorkdir(c *check.C) { - name := "testaddsinglefiletoworkdir" - ctx, err := fakeContext(`FROM busybox -ADD test_file .`, - map[string]string{ - "test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - errChan := make(chan error) - go func() { - _, err := buildImageFromContext(name, ctx, true) - errChan <- err - close(errChan) - }() - select { - case <-time.After(15 * time.Second): - c.Fatal("Build with adding to workdir timed out") - case err := <-errChan: - c.Assert(err, check.IsNil) - } -} - -func (s *DockerSuite) TestBuildAddSingleFileToExistDir(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testaddsinglefiletoexistdir" - ctx, err := fakeContext(`FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN mkdir /exists -RUN touch /exists/exists_file -RUN chown -R dockerio.dockerio /exists -ADD test_file /exists/ -RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, - map[string]string{ - "test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildCopyAddMultipleFiles(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - server, err := fakeStorage(map[string]string{ - "robots.txt": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer server.Close() - - name := "testcopymultiplefilestofile" - ctx, err := fakeContext(fmt.Sprintf(`FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN mkdir /exists -RUN touch /exists/exists_file -RUN chown -R dockerio.dockerio /exists -COPY test_file1 test_file2 /exists/ -ADD test_file3 test_file4 %s/robots.txt /exists/ -RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/test_file1 | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists/test_file2 | awk '{print $3":"$4}') = 'root:root' ] - -RUN [ $(ls -l /exists/test_file3 | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists/test_file4 | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists/robots.txt | awk '{print $3":"$4}') = 'root:root' ] - -RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -`, server.URL()), - map[string]string{ - "test_file1": "test1", - "test_file2": "test2", - "test_file3": "test3", - "test_file4": "test4", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -// This test is mainly for user namespaces to verify that new directories -// are created as the remapped root uid/gid pair -func (s *DockerSuite) TestBuildAddToNewDestination(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testaddtonewdest" - ctx, err := fakeContext(`FROM busybox -ADD . /new_dir -RUN ls -l / -RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, - map[string]string{ - "test_dir/test_file": "test file", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -// This test is mainly for user namespaces to verify that new directories -// are created as the remapped root uid/gid pair -func (s *DockerSuite) TestBuildCopyToNewParentDirectory(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testcopytonewdir" - ctx, err := fakeContext(`FROM busybox -COPY test_dir /new_dir -RUN ls -l /new_dir -RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, - map[string]string{ - "test_dir/test_file": "test file", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -// This test is mainly for user namespaces to verify that new directories -// are created as the remapped root uid/gid pair -func (s *DockerSuite) TestBuildWorkdirIsContainerRoot(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testworkdirownership" - if _, err := buildImage(name, `FROM busybox -WORKDIR /new_dir -RUN ls -l / -RUN [ $(ls -l / | grep new_dir | awk '{print $3":"$4}') = 'root:root' ]`, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildAddFileWithWhitespace(c *check.C) { - testRequires(c, DaemonIsLinux) // Not currently passing on Windows - name := "testaddfilewithwhitespace" - ctx, err := fakeContext(`FROM busybox -RUN mkdir "/test dir" -RUN mkdir "/test_dir" -ADD [ "test file1", "/test_file1" ] -ADD [ "test_file2", "/test file2" ] -ADD [ "test file3", "/test file3" ] -ADD [ "test dir/test_file4", "/test_dir/test_file4" ] -ADD [ "test_dir/test_file5", "/test dir/test_file5" ] -ADD [ "test dir/test_file6", "/test dir/test_file6" ] -RUN [ $(cat "/test_file1") = 'test1' ] -RUN [ $(cat "/test file2") = 'test2' ] -RUN [ $(cat "/test file3") = 'test3' ] -RUN [ $(cat "/test_dir/test_file4") = 'test4' ] -RUN [ $(cat "/test dir/test_file5") = 'test5' ] -RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, - map[string]string{ - "test file1": "test1", - "test_file2": "test2", - "test file3": "test3", - "test dir/test_file4": "test4", - "test_dir/test_file5": "test5", - "test dir/test_file6": "test6", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildCopyFileWithWhitespace(c *check.C) { - testRequires(c, DaemonIsLinux) // Not currently passing on Windows - name := "testcopyfilewithwhitespace" - ctx, err := fakeContext(`FROM busybox -RUN mkdir "/test dir" -RUN mkdir "/test_dir" -COPY [ "test file1", "/test_file1" ] -COPY [ "test_file2", "/test file2" ] -COPY [ "test file3", "/test file3" ] -COPY [ "test dir/test_file4", "/test_dir/test_file4" ] -COPY [ "test_dir/test_file5", "/test dir/test_file5" ] -COPY [ "test dir/test_file6", "/test dir/test_file6" ] -RUN [ $(cat "/test_file1") = 'test1' ] -RUN [ $(cat "/test file2") = 'test2' ] -RUN [ $(cat "/test file3") = 'test3' ] -RUN [ $(cat "/test_dir/test_file4") = 'test4' ] -RUN [ $(cat "/test dir/test_file5") = 'test5' ] -RUN [ $(cat "/test dir/test_file6") = 'test6' ]`, - map[string]string{ - "test file1": "test1", - "test_file2": "test2", - "test file3": "test3", - "test dir/test_file4": "test4", - "test_dir/test_file5": "test5", - "test dir/test_file6": "test6", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildCopyWildcard(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet - name := "testcopywildcard" - server, err := fakeStorage(map[string]string{ - "robots.txt": "hello", - "index.html": "world", - }) - if err != nil { - c.Fatal(err) - } - defer server.Close() - - ctx, err := fakeContext(fmt.Sprintf(`FROM busybox - COPY file*.txt /tmp/ - RUN ls /tmp/file1.txt /tmp/file2.txt - RUN mkdir /tmp1 - COPY dir* /tmp1/ - RUN ls /tmp1/dirt /tmp1/nested_file /tmp1/nested_dir/nest_nest_file - RUN mkdir /tmp2 - ADD dir/*dir %s/robots.txt /tmp2/ - RUN ls /tmp2/nest_nest_file /tmp2/robots.txt - `, server.URL()), - map[string]string{ - "file1.txt": "test1", - "file2.txt": "test2", - "dir/nested_file": "nested file", - "dir/nested_dir/nest_nest_file": "2 times nested", - "dirt": "dirty", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - - // Now make sure we use a cache the 2nd time - id2, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - - if id1 != id2 { - c.Fatal("didn't use the cache") - } - -} - -func (s *DockerSuite) TestBuildCopyWildcardInName(c *check.C) { - name := "testcopywildcardinname" - ctx, err := fakeContext(`FROM busybox - COPY *.txt /tmp/ - RUN [ "$(cat /tmp/\*.txt)" = 'hi there' ] - `, map[string]string{"*.txt": "hi there"}) - - if err != nil { - // Normally we would do c.Fatal(err) here but given that - // the odds of this failing are so rare, it must be because - // the OS we're running the client on doesn't support * in - // filenames (like windows). So, instead of failing the test - // just let it pass. Then we don't need to explicitly - // say which OSs this works on or not. - return - } - defer ctx.Close() - - _, err = buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatalf("should have built: %q", err) - } -} - -func (s *DockerSuite) TestBuildCopyWildcardCache(c *check.C) { - name := "testcopywildcardcache" - ctx, err := fakeContext(`FROM busybox - COPY file1.txt /tmp/`, - map[string]string{ - "file1.txt": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - - // Now make sure we use a cache the 2nd time even with wild cards. - // Use the same context so the file is the same and the checksum will match - ctx.Add("Dockerfile", `FROM busybox - COPY file*.txt /tmp/`) - - id2, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - - if id1 != id2 { - c.Fatal("didn't use the cache") - } - -} - -func (s *DockerSuite) TestBuildAddSingleFileToNonExistingDir(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testaddsinglefiletononexistingdir" - ctx, err := fakeContext(`FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN touch /exists -RUN chown dockerio.dockerio /exists -ADD test_file /test_dir/ -RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, - map[string]string{ - "test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - -} - -func (s *DockerSuite) TestBuildAddDirContentToRoot(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testadddircontenttoroot" - ctx, err := fakeContext(`FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN touch /exists -RUN chown dockerio.dockerio exists -ADD test_dir / -RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, - map[string]string{ - "test_dir/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildAddDirContentToExistingDir(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testadddircontenttoexistingdir" - ctx, err := fakeContext(`FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN mkdir /exists -RUN touch /exists/exists_file -RUN chown -R dockerio.dockerio /exists -ADD test_dir/ /exists/ -RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, - map[string]string{ - "test_dir/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildAddWholeDirToRoot(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testaddwholedirtoroot" - ctx, err := fakeContext(fmt.Sprintf(`FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN touch /exists -RUN chown dockerio.dockerio exists -ADD test_dir /test_dir -RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] -RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), - map[string]string{ - "test_dir/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -// Testing #5941 -func (s *DockerSuite) TestBuildAddEtcToRoot(c *check.C) { - name := "testaddetctoroot" - - ctx, err := fakeContext(`FROM `+minimalBaseImage()+` -ADD . /`, - map[string]string{ - "etc/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -// Testing #9401 -func (s *DockerSuite) TestBuildAddPreservesFilesSpecialBits(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testaddpreservesfilesspecialbits" - ctx, err := fakeContext(`FROM busybox -ADD suidbin /usr/bin/suidbin -RUN chmod 4755 /usr/bin/suidbin -RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ] -ADD ./data/ / -RUN [ $(ls -l /usr/bin/suidbin | awk '{print $1}') = '-rwsr-xr-x' ]`, - map[string]string{ - "suidbin": "suidbin", - "/data/usr/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildCopySingleFileToRoot(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testcopysinglefiletoroot" - ctx, err := fakeContext(fmt.Sprintf(`FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN touch /exists -RUN chown dockerio.dockerio /exists -COPY test_file / -RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /test_file | awk '{print $1}') = '%s' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), - map[string]string{ - "test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -// Issue #3960: "ADD src ." hangs - adapted for COPY -func (s *DockerSuite) TestBuildCopySingleFileToWorkdir(c *check.C) { - name := "testcopysinglefiletoworkdir" - ctx, err := fakeContext(`FROM busybox -COPY test_file .`, - map[string]string{ - "test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - errChan := make(chan error) - go func() { - _, err := buildImageFromContext(name, ctx, true) - errChan <- err - close(errChan) - }() - select { - case <-time.After(15 * time.Second): - c.Fatal("Build with adding to workdir timed out") - case err := <-errChan: - c.Assert(err, check.IsNil) - } -} - -func (s *DockerSuite) TestBuildCopySingleFileToExistDir(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testcopysinglefiletoexistdir" - ctx, err := fakeContext(`FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN mkdir /exists -RUN touch /exists/exists_file -RUN chown -R dockerio.dockerio /exists -COPY test_file /exists/ -RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, - map[string]string{ - "test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildCopySingleFileToNonExistDir(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testcopysinglefiletononexistdir" - ctx, err := fakeContext(`FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN touch /exists -RUN chown dockerio.dockerio /exists -COPY test_file /test_dir/ -RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, - map[string]string{ - "test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildCopyDirContentToRoot(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testcopydircontenttoroot" - ctx, err := fakeContext(`FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN touch /exists -RUN chown dockerio.dockerio exists -COPY test_dir / -RUN [ $(ls -l /test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, - map[string]string{ - "test_dir/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildCopyDirContentToExistDir(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testcopydircontenttoexistdir" - ctx, err := fakeContext(`FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN mkdir /exists -RUN touch /exists/exists_file -RUN chown -R dockerio.dockerio /exists -COPY test_dir/ /exists/ -RUN [ $(ls -l / | grep exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/exists_file | awk '{print $3":"$4}') = 'dockerio:dockerio' ] -RUN [ $(ls -l /exists/test_file | awk '{print $3":"$4}') = 'root:root' ]`, - map[string]string{ - "test_dir/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildCopyWholeDirToRoot(c *check.C) { - testRequires(c, DaemonIsLinux) // Linux specific test - name := "testcopywholedirtoroot" - ctx, err := fakeContext(fmt.Sprintf(`FROM busybox -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd -RUN echo 'dockerio:x:1001:' >> /etc/group -RUN touch /exists -RUN chown dockerio.dockerio exists -COPY test_dir /test_dir -RUN [ $(ls -l / | grep test_dir | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l / | grep test_dir | awk '{print $1}') = 'drwxr-xr-x' ] -RUN [ $(ls -l /test_dir/test_file | awk '{print $3":"$4}') = 'root:root' ] -RUN [ $(ls -l /test_dir/test_file | awk '{print $1}') = '%s' ] -RUN [ $(ls -l /exists | awk '{print $3":"$4}') = 'dockerio:dockerio' ]`, expectedFileChmod), - map[string]string{ - "test_dir/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildCopyEtcToRoot(c *check.C) { - name := "testcopyetctoroot" - - ctx, err := fakeContext(`FROM `+minimalBaseImage()+` -COPY . /`, - map[string]string{ - "etc/test_file": "test1", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildAddBadLinks(c *check.C) { - testRequires(c, DaemonIsLinux) // Not currently working on Windows - - dockerfile := ` - FROM scratch - ADD links.tar / - ADD foo.txt /symlink/ - ` - targetFile := "foo.txt" - var ( - name = "test-link-absolute" - ) - ctx, err := fakeContext(dockerfile, nil) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - tempDir, err := ioutil.TempDir("", "test-link-absolute-temp-") - if err != nil { - c.Fatalf("failed to create temporary directory: %s", tempDir) - } - defer os.RemoveAll(tempDir) - - var symlinkTarget string - if runtime.GOOS == "windows" { - var driveLetter string - if abs, err := filepath.Abs(tempDir); err != nil { - c.Fatal(err) - } else { - driveLetter = abs[:1] - } - tempDirWithoutDrive := tempDir[2:] - symlinkTarget = fmt.Sprintf(`%s:\..\..\..\..\..\..\..\..\..\..\..\..%s`, driveLetter, tempDirWithoutDrive) - } else { - symlinkTarget = fmt.Sprintf("/../../../../../../../../../../../..%s", tempDir) - } - - tarPath := filepath.Join(ctx.Dir, "links.tar") - nonExistingFile := filepath.Join(tempDir, targetFile) - fooPath := filepath.Join(ctx.Dir, targetFile) - - tarOut, err := os.Create(tarPath) - if err != nil { - c.Fatal(err) - } - - tarWriter := tar.NewWriter(tarOut) - - header := &tar.Header{ - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: symlinkTarget, - Mode: 0755, - Uid: 0, - Gid: 0, - } - - err = tarWriter.WriteHeader(header) - if err != nil { - c.Fatal(err) - } - - tarWriter.Close() - tarOut.Close() - - foo, err := os.Create(fooPath) - if err != nil { - c.Fatal(err) - } - defer foo.Close() - - if _, err := foo.WriteString("test"); err != nil { - c.Fatal(err) - } - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - - if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { - c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) - } - -} - -func (s *DockerSuite) TestBuildAddBadLinksVolume(c *check.C) { - testRequires(c, DaemonIsLinux) // ln not implemented on Windows busybox - const ( - dockerfileTemplate = ` - FROM busybox - RUN ln -s /../../../../../../../../%s /x - VOLUME /x - ADD foo.txt /x/` - targetFile = "foo.txt" - ) - var ( - name = "test-link-absolute-volume" - dockerfile = "" - ) - - tempDir, err := ioutil.TempDir("", "test-link-absolute-volume-temp-") - if err != nil { - c.Fatalf("failed to create temporary directory: %s", tempDir) - } - defer os.RemoveAll(tempDir) - - dockerfile = fmt.Sprintf(dockerfileTemplate, tempDir) - nonExistingFile := filepath.Join(tempDir, targetFile) - - ctx, err := fakeContext(dockerfile, nil) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - fooPath := filepath.Join(ctx.Dir, targetFile) - - foo, err := os.Create(fooPath) - if err != nil { - c.Fatal(err) - } - defer foo.Close() - - if _, err := foo.WriteString("test"); err != nil { - c.Fatal(err) - } - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - - if _, err := os.Stat(nonExistingFile); err == nil || err != nil && !os.IsNotExist(err) { - c.Fatalf("%s shouldn't have been written and it shouldn't exist", nonExistingFile) - } - -} - -// Issue #5270 - ensure we throw a better error than "unexpected EOF" -// when we can't access files in the context. -func (s *DockerSuite) TestBuildWithInaccessibleFilesInContext(c *check.C) { - testRequires(c, DaemonIsLinux, UnixCli) // test uses chown/chmod: not available on windows - - { - name := "testbuildinaccessiblefiles" - ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"fileWithoutReadAccess": "foo"}) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - // This is used to ensure we detect inaccessible files early during build in the cli client - pathToFileWithoutReadAccess := filepath.Join(ctx.Dir, "fileWithoutReadAccess") - - if err = os.Chown(pathToFileWithoutReadAccess, 0, 0); err != nil { - c.Fatalf("failed to chown file to root: %s", err) - } - if err = os.Chmod(pathToFileWithoutReadAccess, 0700); err != nil { - c.Fatalf("failed to chmod file to 700: %s", err) - } - buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) - buildCmd.Dir = ctx.Dir - out, _, err := runCommandWithOutput(buildCmd) - if err == nil { - c.Fatalf("build should have failed: %s %s", err, out) - } - - // check if we've detected the failure before we started building - if !strings.Contains(out, "no permission to read from ") { - c.Fatalf("output should've contained the string: no permission to read from but contained: %s", out) - } - - if !strings.Contains(out, "Error checking context") { - c.Fatalf("output should've contained the string: Error checking context") - } - } - { - name := "testbuildinaccessibledirectory" - ctx, err := fakeContext("FROM scratch\nADD . /foo/", map[string]string{"directoryWeCantStat/bar": "foo"}) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - // This is used to ensure we detect inaccessible directories early during build in the cli client - pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") - pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") - - if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { - c.Fatalf("failed to chown directory to root: %s", err) - } - if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { - c.Fatalf("failed to chmod directory to 444: %s", err) - } - if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { - c.Fatalf("failed to chmod file to 700: %s", err) - } - - buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) - buildCmd.Dir = ctx.Dir - out, _, err := runCommandWithOutput(buildCmd) - if err == nil { - c.Fatalf("build should have failed: %s %s", err, out) - } - - // check if we've detected the failure before we started building - if !strings.Contains(out, "can't stat") { - c.Fatalf("output should've contained the string: can't access %s", out) - } - - if !strings.Contains(out, "Error checking context") { - c.Fatalf("output should've contained the string: Error checking context\ngot:%s", out) - } - - } - { - name := "testlinksok" - ctx, err := fakeContext("FROM scratch\nADD . /foo/", nil) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - target := "../../../../../../../../../../../../../../../../../../../azA" - if err := os.Symlink(filepath.Join(ctx.Dir, "g"), target); err != nil { - c.Fatal(err) - } - defer os.Remove(target) - // This is used to ensure we don't follow links when checking if everything in the context is accessible - // This test doesn't require that we run commands as an unprivileged user - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - } - { - name := "testbuildignoredinaccessible" - ctx, err := fakeContext("FROM scratch\nADD . /foo/", - map[string]string{ - "directoryWeCantStat/bar": "foo", - ".dockerignore": "directoryWeCantStat", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - // This is used to ensure we don't try to add inaccessible files when they are ignored by a .dockerignore pattern - pathToDirectoryWithoutReadAccess := filepath.Join(ctx.Dir, "directoryWeCantStat") - pathToFileInDirectoryWithoutReadAccess := filepath.Join(pathToDirectoryWithoutReadAccess, "bar") - if err = os.Chown(pathToDirectoryWithoutReadAccess, 0, 0); err != nil { - c.Fatalf("failed to chown directory to root: %s", err) - } - if err = os.Chmod(pathToDirectoryWithoutReadAccess, 0444); err != nil { - c.Fatalf("failed to chmod directory to 755: %s", err) - } - if err = os.Chmod(pathToFileInDirectoryWithoutReadAccess, 0700); err != nil { - c.Fatalf("failed to chmod file to 444: %s", err) - } - - buildCmd := exec.Command("su", "unprivilegeduser", "-c", fmt.Sprintf("%s build -t %s .", dockerBinary, name)) - buildCmd.Dir = ctx.Dir - if out, _, err := runCommandWithOutput(buildCmd); err != nil { - c.Fatalf("build should have worked: %s %s", err, out) - } - - } -} - -func (s *DockerSuite) TestBuildForceRm(c *check.C) { - containerCountBefore, err := getContainerCount() - if err != nil { - c.Fatalf("failed to get the container count: %s", err) - } - name := "testbuildforcerm" - - ctx, err := fakeContext(`FROM `+minimalBaseImage()+` - RUN true - RUN thiswillfail`, nil) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - dockerCmdInDir(c, ctx.Dir, "build", "-t", name, "--force-rm", ".") - - containerCountAfter, err := getContainerCount() - if err != nil { - c.Fatalf("failed to get the container count: %s", err) - } - - if containerCountBefore != containerCountAfter { - c.Fatalf("--force-rm shouldn't have left containers behind") - } - -} - -func (s *DockerSuite) TestBuildRm(c *check.C) { - name := "testbuildrm" - - ctx, err := fakeContext(`FROM `+minimalBaseImage()+` - ADD foo / - ADD foo /`, map[string]string{"foo": "bar"}) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - { - containerCountBefore, err := getContainerCount() - if err != nil { - c.Fatalf("failed to get the container count: %s", err) - } - - out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm", "-t", name, ".") - - if err != nil { - c.Fatal("failed to build the image", out) - } - - containerCountAfter, err := getContainerCount() - if err != nil { - c.Fatalf("failed to get the container count: %s", err) - } - - if containerCountBefore != containerCountAfter { - c.Fatalf("-rm shouldn't have left containers behind") - } - deleteImages(name) - } - - { - containerCountBefore, err := getContainerCount() - if err != nil { - c.Fatalf("failed to get the container count: %s", err) - } - - out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name, ".") - - if err != nil { - c.Fatal("failed to build the image", out) - } - - containerCountAfter, err := getContainerCount() - if err != nil { - c.Fatalf("failed to get the container count: %s", err) - } - - if containerCountBefore != containerCountAfter { - c.Fatalf("--rm shouldn't have left containers behind") - } - deleteImages(name) - } - - { - containerCountBefore, err := getContainerCount() - if err != nil { - c.Fatalf("failed to get the container count: %s", err) - } - - out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--rm=false", "-t", name, ".") - - if err != nil { - c.Fatal("failed to build the image", out) - } - - containerCountAfter, err := getContainerCount() - if err != nil { - c.Fatalf("failed to get the container count: %s", err) - } - - if containerCountBefore == containerCountAfter { - c.Fatalf("--rm=false should have left containers behind") - } - deleteImages(name) - - } - -} - -func (s *DockerSuite) TestBuildWithVolumes(c *check.C) { - testRequires(c, DaemonIsLinux) // Invalid volume paths on Windows - var ( - result map[string]map[string]struct{} - name = "testbuildvolumes" - emptyMap = make(map[string]struct{}) - expected = map[string]map[string]struct{}{ - "/test1": emptyMap, - "/test2": emptyMap, - "/test3": emptyMap, - "/test4": emptyMap, - "/test5": emptyMap, - "/test6": emptyMap, - "[/test7": emptyMap, - "/test8]": emptyMap, - } - ) - _, err := buildImage(name, - `FROM scratch - VOLUME /test1 - VOLUME /test2 - VOLUME /test3 /test4 - VOLUME ["/test5", "/test6"] - VOLUME [/test7 /test8] - `, - true) - if err != nil { - c.Fatal(err) - } - res := inspectFieldJSON(c, name, "Config.Volumes") - - err = unmarshalJSON([]byte(res), &result) - if err != nil { - c.Fatal(err) - } - - equal := reflect.DeepEqual(&result, &expected) - - if !equal { - c.Fatalf("Volumes %s, expected %s", result, expected) - } - -} - -func (s *DockerSuite) TestBuildMaintainer(c *check.C) { - name := "testbuildmaintainer" - - expected := "dockerio" - _, err := buildImage(name, - `FROM `+minimalBaseImage()+` - MAINTAINER dockerio`, - true) - if err != nil { - c.Fatal(err) - } - res := inspectField(c, name, "Author") - if res != expected { - c.Fatalf("Maintainer %s, expected %s", res, expected) - } -} - -func (s *DockerSuite) TestBuildUser(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuilduser" - expected := "dockerio" - _, err := buildImage(name, - `FROM busybox - RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd - USER dockerio - RUN [ $(whoami) = 'dockerio' ]`, - true) - if err != nil { - c.Fatal(err) - } - res := inspectField(c, name, "Config.User") - if res != expected { - c.Fatalf("User %s, expected %s", res, expected) - } -} - -func (s *DockerSuite) TestBuildRelativeWorkdir(c *check.C) { - name := "testbuildrelativeworkdir" - - var ( - expected1 string - expected2 string - expected3 string - expected4 string - expectedFinal string - ) - - if daemonPlatform == "windows" { - expected1 = `C:/` - expected2 = `C:/test1` - expected3 = `C:/test2` - expected4 = `C:/test2/test3` - expectedFinal = `C:\test2\test3` // Note inspect is going to return Windows paths, as it's not in busybox - } else { - expected1 = `/` - expected2 = `/test1` - expected3 = `/test2` - expected4 = `/test2/test3` - expectedFinal = `/test2/test3` - } - - _, err := buildImage(name, - `FROM busybox - RUN sh -c "[ "$PWD" = "`+expected1+`" ]" - WORKDIR test1 - RUN sh -c "[ "$PWD" = "`+expected2+`" ]" - WORKDIR /test2 - RUN sh -c "[ "$PWD" = "`+expected3+`" ]" - WORKDIR test3 - RUN sh -c "[ "$PWD" = "`+expected4+`" ]"`, - true) - if err != nil { - c.Fatal(err) - } - res := inspectField(c, name, "Config.WorkingDir") - if res != expectedFinal { - c.Fatalf("Workdir %s, expected %s", res, expectedFinal) - } -} - -// #22181 Regression test. Single end-to-end test of using -// Windows semantics. Most path handling verifications are in unit tests -func (s *DockerSuite) TestBuildWindowsWorkdirProcessing(c *check.C) { - testRequires(c, DaemonIsWindows) - name := "testbuildwindowsworkdirprocessing" - _, err := buildImage(name, - `FROM busybox - WORKDIR C:\\foo - WORKDIR bar - RUN sh -c "[ "$PWD" = "C:/foo/bar" ]" - `, - true) - if err != nil { - c.Fatal(err) - } -} - -// #22181 Regression test. Most paths handling verifications are in unit test. -// One functional test for end-to-end -func (s *DockerSuite) TestBuildWindowsAddCopyPathProcessing(c *check.C) { - testRequires(c, DaemonIsWindows) - name := "testbuildwindowsaddcopypathprocessing" - // TODO Windows (@jhowardmsft). Needs a follow-up PR to 22181 to - // support backslash such as .\\ being equivalent to ./ and c:\\ being - // equivalent to c:/. This is not currently (nor ever has been) supported - // by docker on the Windows platform. - dockerfile := ` - FROM busybox - # No trailing slash on COPY/ADD - # Results in dir being changed to a file - WORKDIR /wc1 - COPY wc1 c:/wc1 - WORKDIR /wc2 - ADD wc2 c:/wc2 - WORKDIR c:/ - RUN sh -c "[ $(cat c:/wc1) = 'hellowc1' ]" - RUN sh -c "[ $(cat c:/wc2) = 'worldwc2' ]" - - # Trailing slash on COPY/ADD, Windows-style path. - WORKDIR /wd1 - COPY wd1 c:/wd1/ - WORKDIR /wd2 - ADD wd2 c:/wd2/ - RUN sh -c "[ $(cat c:/wd1/wd1) = 'hellowd1' ]" - RUN sh -c "[ $(cat c:/wd2/wd2) = 'worldwd2' ]" - ` - ctx, err := fakeContext(dockerfile, map[string]string{ - "wc1": "hellowc1", - "wc2": "worldwc2", - "wd1": "hellowd1", - "wd2": "worldwd2", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - _, err = buildImageFromContext(name, ctx, false) - if err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildWorkdirWithEnvVariables(c *check.C) { - name := "testbuildworkdirwithenvvariables" - - var expected string - if daemonPlatform == "windows" { - expected = `C:\test1\test2` - } else { - expected = `/test1/test2` - } - - _, err := buildImage(name, - `FROM busybox - ENV DIRPATH /test1 - ENV SUBDIRNAME test2 - WORKDIR $DIRPATH - WORKDIR $SUBDIRNAME/$MISSING_VAR`, - true) - if err != nil { - c.Fatal(err) - } - res := inspectField(c, name, "Config.WorkingDir") - if res != expected { - c.Fatalf("Workdir %s, expected %s", res, expected) - } -} - -func (s *DockerSuite) TestBuildRelativeCopy(c *check.C) { - // cat /test1/test2/foo gets permission denied for the user - testRequires(c, NotUserNamespace) - - var expected string - if daemonPlatform == "windows" { - expected = `C:/test1/test2` - } else { - expected = `/test1/test2` - } - - name := "testbuildrelativecopy" - dockerfile := ` - FROM busybox - WORKDIR /test1 - WORKDIR test2 - RUN sh -c "[ "$PWD" = '` + expected + `' ]" - COPY foo ./ - RUN sh -c "[ $(cat /test1/test2/foo) = 'hello' ]" - ADD foo ./bar/baz - RUN sh -c "[ $(cat /test1/test2/bar/baz) = 'hello' ]" - COPY foo ./bar/baz2 - RUN sh -c "[ $(cat /test1/test2/bar/baz2) = 'hello' ]" - WORKDIR .. - COPY foo ./ - RUN sh -c "[ $(cat /test1/foo) = 'hello' ]" - COPY foo /test3/ - RUN sh -c "[ $(cat /test3/foo) = 'hello' ]" - WORKDIR /test4 - COPY . . - RUN sh -c "[ $(cat /test4/foo) = 'hello' ]" - WORKDIR /test5/test6 - COPY foo ../ - RUN sh -c "[ $(cat /test5/foo) = 'hello' ]" - ` - ctx, err := fakeContext(dockerfile, map[string]string{ - "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - _, err = buildImageFromContext(name, ctx, false) - if err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildEnv(c *check.C) { - testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows - name := "testbuildenv" - expected := "[PATH=/test:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin PORT=2375]" - _, err := buildImage(name, - `FROM busybox - ENV PATH /test:$PATH - ENV PORT 2375 - RUN [ $(env | grep PORT) = 'PORT=2375' ]`, - true) - if err != nil { - c.Fatal(err) - } - res := inspectField(c, name, "Config.Env") - if res != expected { - c.Fatalf("Env %s, expected %s", res, expected) - } -} - -func (s *DockerSuite) TestBuildPATH(c *check.C) { - testRequires(c, DaemonIsLinux) // ENV expansion is different in Windows - - defPath := "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - - fn := func(dockerfile string, exp string) { - _, err := buildImage("testbldpath", dockerfile, true) - c.Assert(err, check.IsNil) - - res := inspectField(c, "testbldpath", "Config.Env") - - if res != exp { - c.Fatalf("Env %q, expected %q for dockerfile:%q", res, exp, dockerfile) - } - } - - tests := []struct{ dockerfile, exp string }{ - {"FROM scratch\nMAINTAINER me", "[PATH=" + defPath + "]"}, - {"FROM busybox\nMAINTAINER me", "[PATH=" + defPath + "]"}, - {"FROM scratch\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"}, - {"FROM busybox\nENV FOO=bar", "[PATH=" + defPath + " FOO=bar]"}, - {"FROM scratch\nENV PATH=/test", "[PATH=/test]"}, - {"FROM busybox\nENV PATH=/test", "[PATH=/test]"}, - {"FROM scratch\nENV PATH=''", "[PATH=]"}, - {"FROM busybox\nENV PATH=''", "[PATH=]"}, - } - - for _, test := range tests { - fn(test.dockerfile, test.exp) - } -} - -func (s *DockerSuite) TestBuildContextCleanup(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, SameHostDaemon) - - name := "testbuildcontextcleanup" - entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) - if err != nil { - c.Fatalf("failed to list contents of tmp dir: %s", err) - } - _, err = buildImage(name, - `FROM scratch - ENTRYPOINT ["/bin/echo"]`, - true) - if err != nil { - c.Fatal(err) - } - entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) - if err != nil { - c.Fatalf("failed to list contents of tmp dir: %s", err) - } - if err = compareDirectoryEntries(entries, entriesFinal); err != nil { - c.Fatalf("context should have been deleted, but wasn't") - } - -} - -func (s *DockerSuite) TestBuildContextCleanupFailedBuild(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, SameHostDaemon) - - name := "testbuildcontextcleanup" - entries, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) - if err != nil { - c.Fatalf("failed to list contents of tmp dir: %s", err) - } - _, err = buildImage(name, - `FROM scratch - RUN /non/existing/command`, - true) - if err == nil { - c.Fatalf("expected build to fail, but it didn't") - } - entriesFinal, err := ioutil.ReadDir(filepath.Join(dockerBasePath, "tmp")) - if err != nil { - c.Fatalf("failed to list contents of tmp dir: %s", err) - } - if err = compareDirectoryEntries(entries, entriesFinal); err != nil { - c.Fatalf("context should have been deleted, but wasn't") - } - -} - -func (s *DockerSuite) TestBuildCmd(c *check.C) { - name := "testbuildcmd" - - expected := "[/bin/echo Hello World]" - _, err := buildImage(name, - `FROM `+minimalBaseImage()+` - CMD ["/bin/echo", "Hello World"]`, - true) - if err != nil { - c.Fatal(err) - } - res := inspectField(c, name, "Config.Cmd") - if res != expected { - c.Fatalf("Cmd %s, expected %s", res, expected) - } -} - -func (s *DockerSuite) TestBuildExpose(c *check.C) { - testRequires(c, DaemonIsLinux) // Expose not implemented on Windows - name := "testbuildexpose" - expected := "map[2375/tcp:{}]" - _, err := buildImage(name, - `FROM scratch - EXPOSE 2375`, - true) - if err != nil { - c.Fatal(err) - } - res := inspectField(c, name, "Config.ExposedPorts") - if res != expected { - c.Fatalf("Exposed ports %s, expected %s", res, expected) - } -} - -func (s *DockerSuite) TestBuildExposeMorePorts(c *check.C) { - testRequires(c, DaemonIsLinux) // Expose not implemented on Windows - // start building docker file with a large number of ports - portList := make([]string, 50) - line := make([]string, 100) - expectedPorts := make([]int, len(portList)*len(line)) - for i := 0; i < len(portList); i++ { - for j := 0; j < len(line); j++ { - p := i*len(line) + j + 1 - line[j] = strconv.Itoa(p) - expectedPorts[p-1] = p - } - if i == len(portList)-1 { - portList[i] = strings.Join(line, " ") - } else { - portList[i] = strings.Join(line, " ") + ` \` - } - } - - dockerfile := `FROM scratch - EXPOSE {{range .}} {{.}} - {{end}}` - tmpl := template.Must(template.New("dockerfile").Parse(dockerfile)) - buf := bytes.NewBuffer(nil) - tmpl.Execute(buf, portList) - - name := "testbuildexpose" - _, err := buildImage(name, buf.String(), true) - if err != nil { - c.Fatal(err) - } - - // check if all the ports are saved inside Config.ExposedPorts - res := inspectFieldJSON(c, name, "Config.ExposedPorts") - var exposedPorts map[string]interface{} - if err := json.Unmarshal([]byte(res), &exposedPorts); err != nil { - c.Fatal(err) - } - - for _, p := range expectedPorts { - ep := fmt.Sprintf("%d/tcp", p) - if _, ok := exposedPorts[ep]; !ok { - c.Errorf("Port(%s) is not exposed", ep) - } else { - delete(exposedPorts, ep) - } - } - if len(exposedPorts) != 0 { - c.Errorf("Unexpected extra exposed ports %v", exposedPorts) - } -} - -func (s *DockerSuite) TestBuildExposeOrder(c *check.C) { - testRequires(c, DaemonIsLinux) // Expose not implemented on Windows - buildID := func(name, exposed string) string { - _, err := buildImage(name, fmt.Sprintf(`FROM scratch - EXPOSE %s`, exposed), true) - if err != nil { - c.Fatal(err) - } - id := inspectField(c, name, "Id") - return id - } - - id1 := buildID("testbuildexpose1", "80 2375") - id2 := buildID("testbuildexpose2", "2375 80") - if id1 != id2 { - c.Errorf("EXPOSE should invalidate the cache only when ports actually changed") - } -} - -func (s *DockerSuite) TestBuildExposeUpperCaseProto(c *check.C) { - testRequires(c, DaemonIsLinux) // Expose not implemented on Windows - name := "testbuildexposeuppercaseproto" - expected := "map[5678/udp:{}]" - _, err := buildImage(name, - `FROM scratch - EXPOSE 5678/UDP`, - true) - if err != nil { - c.Fatal(err) - } - res := inspectField(c, name, "Config.ExposedPorts") - if res != expected { - c.Fatalf("Exposed ports %s, expected %s", res, expected) - } -} - -func (s *DockerSuite) TestBuildEmptyEntrypointInheritance(c *check.C) { - name := "testbuildentrypointinheritance" - name2 := "testbuildentrypointinheritance2" - - _, err := buildImage(name, - `FROM busybox - ENTRYPOINT ["/bin/echo"]`, - true) - if err != nil { - c.Fatal(err) - } - res := inspectField(c, name, "Config.Entrypoint") - - expected := "[/bin/echo]" - if res != expected { - c.Fatalf("Entrypoint %s, expected %s", res, expected) - } - - _, err = buildImage(name2, - fmt.Sprintf(`FROM %s - ENTRYPOINT []`, name), - true) - if err != nil { - c.Fatal(err) - } - res = inspectField(c, name2, "Config.Entrypoint") - - expected = "[]" - - if res != expected { - c.Fatalf("Entrypoint %s, expected %s", res, expected) - } - -} - -func (s *DockerSuite) TestBuildEmptyEntrypoint(c *check.C) { - name := "testbuildentrypoint" - expected := "[]" - - _, err := buildImage(name, - `FROM busybox - ENTRYPOINT []`, - true) - if err != nil { - c.Fatal(err) - } - res := inspectField(c, name, "Config.Entrypoint") - if res != expected { - c.Fatalf("Entrypoint %s, expected %s", res, expected) - } - -} - -func (s *DockerSuite) TestBuildEntrypoint(c *check.C) { - name := "testbuildentrypoint" - - expected := "[/bin/echo]" - _, err := buildImage(name, - `FROM `+minimalBaseImage()+` - ENTRYPOINT ["/bin/echo"]`, - true) - if err != nil { - c.Fatal(err) - } - res := inspectField(c, name, "Config.Entrypoint") - if res != expected { - c.Fatalf("Entrypoint %s, expected %s", res, expected) - } - -} - -// #6445 ensure ONBUILD triggers aren't committed to grandchildren -func (s *DockerSuite) TestBuildOnBuildLimitedInheritence(c *check.C) { - var ( - out2, out3 string - ) - { - name1 := "testonbuildtrigger1" - dockerfile1 := ` - FROM busybox - RUN echo "GRANDPARENT" - ONBUILD RUN echo "ONBUILD PARENT" - ` - ctx, err := fakeContext(dockerfile1, nil) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - out1, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", name1, ".") - if err != nil { - c.Fatalf("build failed to complete: %s, %v", out1, err) - } - } - { - name2 := "testonbuildtrigger2" - dockerfile2 := ` - FROM testonbuildtrigger1 - ` - ctx, err := fakeContext(dockerfile2, nil) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - out2, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name2, ".") - if err != nil { - c.Fatalf("build failed to complete: %s, %v", out2, err) - } - } - { - name3 := "testonbuildtrigger3" - dockerfile3 := ` - FROM testonbuildtrigger2 - ` - ctx, err := fakeContext(dockerfile3, nil) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - out3, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-t", name3, ".") - if err != nil { - c.Fatalf("build failed to complete: %s, %v", out3, err) - } - - } - - // ONBUILD should be run in second build. - if !strings.Contains(out2, "ONBUILD PARENT") { - c.Fatalf("ONBUILD instruction did not run in child of ONBUILD parent") - } - - // ONBUILD should *not* be run in third build. - if strings.Contains(out3, "ONBUILD PARENT") { - c.Fatalf("ONBUILD instruction ran in grandchild of ONBUILD parent") - } - -} - -func (s *DockerSuite) TestBuildWithCache(c *check.C) { - testRequires(c, DaemonIsLinux) // Expose not implemented on Windows - name := "testbuildwithcache" - id1, err := buildImage(name, - `FROM scratch - MAINTAINER dockerio - EXPOSE 5432 - ENTRYPOINT ["/bin/echo"]`, - true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImage(name, - `FROM scratch - MAINTAINER dockerio - EXPOSE 5432 - ENTRYPOINT ["/bin/echo"]`, - true) - if err != nil { - c.Fatal(err) - } - if id1 != id2 { - c.Fatal("The cache should have been used but hasn't.") - } -} - -func (s *DockerSuite) TestBuildWithoutCache(c *check.C) { - testRequires(c, DaemonIsLinux) // Expose not implemented on Windows - name := "testbuildwithoutcache" - name2 := "testbuildwithoutcache2" - id1, err := buildImage(name, - `FROM scratch - MAINTAINER dockerio - EXPOSE 5432 - ENTRYPOINT ["/bin/echo"]`, - true) - if err != nil { - c.Fatal(err) - } - - id2, err := buildImage(name2, - `FROM scratch - MAINTAINER dockerio - EXPOSE 5432 - ENTRYPOINT ["/bin/echo"]`, - false) - if err != nil { - c.Fatal(err) - } - if id1 == id2 { - c.Fatal("The cache should have been invalided but hasn't.") - } -} - -func (s *DockerSuite) TestBuildConditionalCache(c *check.C) { - name := "testbuildconditionalcache" - - dockerfile := ` - FROM busybox - ADD foo /tmp/` - ctx, err := fakeContext(dockerfile, map[string]string{ - "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatalf("Error building #1: %s", err) - } - - if err := ctx.Add("foo", "bye"); err != nil { - c.Fatalf("Error modifying foo: %s", err) - } - - id2, err := buildImageFromContext(name, ctx, false) - if err != nil { - c.Fatalf("Error building #2: %s", err) - } - if id2 == id1 { - c.Fatal("Should not have used the cache") - } - - id3, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatalf("Error building #3: %s", err) - } - if id3 != id2 { - c.Fatal("Should have used the cache") - } -} - -func (s *DockerSuite) TestBuildAddLocalFileWithCache(c *check.C) { - // local files are not owned by the correct user - testRequires(c, NotUserNamespace) - name := "testbuildaddlocalfilewithcache" - name2 := "testbuildaddlocalfilewithcache2" - dockerfile := ` - FROM busybox - MAINTAINER dockerio - ADD foo /usr/lib/bla/bar - RUN sh -c "[ $(cat /usr/lib/bla/bar) = "hello" ]"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImageFromContext(name2, ctx, true) - if err != nil { - c.Fatal(err) - } - if id1 != id2 { - c.Fatal("The cache should have been used but hasn't.") - } -} - -func (s *DockerSuite) TestBuildAddMultipleLocalFileWithCache(c *check.C) { - name := "testbuildaddmultiplelocalfilewithcache" - name2 := "testbuildaddmultiplelocalfilewithcache2" - dockerfile := ` - FROM busybox - MAINTAINER dockerio - ADD foo Dockerfile /usr/lib/bla/ - RUN sh -c "[ $(cat /usr/lib/bla/foo) = "hello" ]"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImageFromContext(name2, ctx, true) - if err != nil { - c.Fatal(err) - } - if id1 != id2 { - c.Fatal("The cache should have been used but hasn't.") - } -} - -func (s *DockerSuite) TestBuildAddLocalFileWithoutCache(c *check.C) { - // local files are not owned by the correct user - testRequires(c, NotUserNamespace) - name := "testbuildaddlocalfilewithoutcache" - name2 := "testbuildaddlocalfilewithoutcache2" - dockerfile := ` - FROM busybox - MAINTAINER dockerio - ADD foo /usr/lib/bla/bar - RUN sh -c "[ $(cat /usr/lib/bla/bar) = "hello" ]"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImageFromContext(name2, ctx, false) - if err != nil { - c.Fatal(err) - } - if id1 == id2 { - c.Fatal("The cache should have been invalided but hasn't.") - } -} - -func (s *DockerSuite) TestBuildCopyDirButNotFile(c *check.C) { - name := "testbuildcopydirbutnotfile" - name2 := "testbuildcopydirbutnotfile2" - - dockerfile := ` - FROM ` + minimalBaseImage() + ` - COPY dir /tmp/` - ctx, err := fakeContext(dockerfile, map[string]string{ - "dir/foo": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - // Check that adding file with similar name doesn't mess with cache - if err := ctx.Add("dir_file", "hello2"); err != nil { - c.Fatal(err) - } - id2, err := buildImageFromContext(name2, ctx, true) - if err != nil { - c.Fatal(err) - } - if id1 != id2 { - c.Fatal("The cache should have been used but wasn't") - } -} - -func (s *DockerSuite) TestBuildAddCurrentDirWithCache(c *check.C) { - name := "testbuildaddcurrentdirwithcache" - name2 := name + "2" - name3 := name + "3" - name4 := name + "4" - dockerfile := ` - FROM ` + minimalBaseImage() + ` - MAINTAINER dockerio - ADD . /usr/lib/bla` - ctx, err := fakeContext(dockerfile, map[string]string{ - "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - // Check that adding file invalidate cache of "ADD ." - if err := ctx.Add("bar", "hello2"); err != nil { - c.Fatal(err) - } - id2, err := buildImageFromContext(name2, ctx, true) - if err != nil { - c.Fatal(err) - } - if id1 == id2 { - c.Fatal("The cache should have been invalided but hasn't.") - } - // Check that changing file invalidate cache of "ADD ." - if err := ctx.Add("foo", "hello1"); err != nil { - c.Fatal(err) - } - id3, err := buildImageFromContext(name3, ctx, true) - if err != nil { - c.Fatal(err) - } - if id2 == id3 { - c.Fatal("The cache should have been invalided but hasn't.") - } - // Check that changing file to same content with different mtime does not - // invalidate cache of "ADD ." - time.Sleep(1 * time.Second) // wait second because of mtime precision - if err := ctx.Add("foo", "hello1"); err != nil { - c.Fatal(err) - } - id4, err := buildImageFromContext(name4, ctx, true) - if err != nil { - c.Fatal(err) - } - if id3 != id4 { - c.Fatal("The cache should have been used but hasn't.") - } -} - -func (s *DockerSuite) TestBuildAddCurrentDirWithoutCache(c *check.C) { - name := "testbuildaddcurrentdirwithoutcache" - name2 := "testbuildaddcurrentdirwithoutcache2" - dockerfile := ` - FROM ` + minimalBaseImage() + ` - MAINTAINER dockerio - ADD . /usr/lib/bla` - ctx, err := fakeContext(dockerfile, map[string]string{ - "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImageFromContext(name2, ctx, false) - if err != nil { - c.Fatal(err) - } - if id1 == id2 { - c.Fatal("The cache should have been invalided but hasn't.") - } -} - -func (s *DockerSuite) TestBuildAddRemoteFileWithCache(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet - name := "testbuildaddremotefilewithcache" - server, err := fakeStorage(map[string]string{ - "baz": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer server.Close() - - id1, err := buildImage(name, - fmt.Sprintf(`FROM scratch - MAINTAINER dockerio - ADD %s/baz /usr/lib/baz/quux`, server.URL()), - true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImage(name, - fmt.Sprintf(`FROM scratch - MAINTAINER dockerio - ADD %s/baz /usr/lib/baz/quux`, server.URL()), - true) - if err != nil { - c.Fatal(err) - } - if id1 != id2 { - c.Fatal("The cache should have been used but hasn't.") - } -} - -func (s *DockerSuite) TestBuildAddRemoteFileWithoutCache(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet - name := "testbuildaddremotefilewithoutcache" - name2 := "testbuildaddremotefilewithoutcache2" - server, err := fakeStorage(map[string]string{ - "baz": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer server.Close() - - id1, err := buildImage(name, - fmt.Sprintf(`FROM scratch - MAINTAINER dockerio - ADD %s/baz /usr/lib/baz/quux`, server.URL()), - true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImage(name2, - fmt.Sprintf(`FROM scratch - MAINTAINER dockerio - ADD %s/baz /usr/lib/baz/quux`, server.URL()), - false) - if err != nil { - c.Fatal(err) - } - if id1 == id2 { - c.Fatal("The cache should have been invalided but hasn't.") - } -} - -func (s *DockerSuite) TestBuildAddRemoteFileMTime(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet - name := "testbuildaddremotefilemtime" - name2 := name + "2" - name3 := name + "3" - - files := map[string]string{"baz": "hello"} - server, err := fakeStorage(files) - if err != nil { - c.Fatal(err) - } - defer server.Close() - - ctx, err := fakeContext(fmt.Sprintf(`FROM scratch - MAINTAINER dockerio - ADD %s/baz /usr/lib/baz/quux`, server.URL()), nil) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - - id2, err := buildImageFromContext(name2, ctx, true) - if err != nil { - c.Fatal(err) - } - if id1 != id2 { - c.Fatal("The cache should have been used but wasn't - #1") - } - - // Now create a different server with same contents (causes different mtime) - // The cache should still be used - - // allow some time for clock to pass as mtime precision is only 1s - time.Sleep(2 * time.Second) - - server2, err := fakeStorage(files) - if err != nil { - c.Fatal(err) - } - defer server2.Close() - - ctx2, err := fakeContext(fmt.Sprintf(`FROM scratch - MAINTAINER dockerio - ADD %s/baz /usr/lib/baz/quux`, server2.URL()), nil) - if err != nil { - c.Fatal(err) - } - defer ctx2.Close() - id3, err := buildImageFromContext(name3, ctx2, true) - if err != nil { - c.Fatal(err) - } - if id1 != id3 { - c.Fatal("The cache should have been used but wasn't") - } -} - -func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithCache(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet - name := "testbuildaddlocalandremotefilewithcache" - server, err := fakeStorage(map[string]string{ - "baz": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer server.Close() - - ctx, err := fakeContext(fmt.Sprintf(`FROM scratch - MAINTAINER dockerio - ADD foo /usr/lib/bla/bar - ADD %s/baz /usr/lib/baz/quux`, server.URL()), - map[string]string{ - "foo": "hello world", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - if id1 != id2 { - c.Fatal("The cache should have been used but hasn't.") - } -} - -func testContextTar(c *check.C, compression archive.Compression) { - ctx, err := fakeContext( - `FROM busybox -ADD foo /foo -CMD ["cat", "/foo"]`, - map[string]string{ - "foo": "bar", - }, - ) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - context, err := archive.Tar(ctx.Dir, compression) - if err != nil { - c.Fatalf("failed to build context tar: %v", err) - } - name := "contexttar" - buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") - buildCmd.Stdin = context - - if out, _, err := runCommandWithOutput(buildCmd); err != nil { - c.Fatalf("build failed to complete: %v %v", out, err) - } -} - -func (s *DockerSuite) TestBuildContextTarGzip(c *check.C) { - testContextTar(c, archive.Gzip) -} - -func (s *DockerSuite) TestBuildContextTarNoCompression(c *check.C) { - testContextTar(c, archive.Uncompressed) -} - -func (s *DockerSuite) TestBuildNoContext(c *check.C) { - buildCmd := exec.Command(dockerBinary, "build", "-t", "nocontext", "-") - buildCmd.Stdin = strings.NewReader( - `FROM busybox - CMD ["echo", "ok"]`) - - if out, _, err := runCommandWithOutput(buildCmd); err != nil { - c.Fatalf("build failed to complete: %v %v", out, err) - } - - if out, _ := dockerCmd(c, "run", "--rm", "nocontext"); out != "ok\n" { - c.Fatalf("run produced invalid output: %q, expected %q", out, "ok") - } -} - -// TODO: TestCaching -func (s *DockerSuite) TestBuildAddLocalAndRemoteFilesWithoutCache(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows doesn't have httpserver image yet - name := "testbuildaddlocalandremotefilewithoutcache" - name2 := "testbuildaddlocalandremotefilewithoutcache2" - server, err := fakeStorage(map[string]string{ - "baz": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer server.Close() - - ctx, err := fakeContext(fmt.Sprintf(`FROM scratch - MAINTAINER dockerio - ADD foo /usr/lib/bla/bar - ADD %s/baz /usr/lib/baz/quux`, server.URL()), - map[string]string{ - "foo": "hello world", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - id1, err := buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } - id2, err := buildImageFromContext(name2, ctx, false) - if err != nil { - c.Fatal(err) - } - if id1 == id2 { - c.Fatal("The cache should have been invalided but hasn't.") - } -} - -func (s *DockerSuite) TestBuildWithVolumeOwnership(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildimg" - - _, err := buildImage(name, - `FROM busybox:latest - RUN mkdir /test && chown daemon:daemon /test && chmod 0600 /test - VOLUME /test`, - true) - - if err != nil { - c.Fatal(err) - } - - out, _ := dockerCmd(c, "run", "--rm", "testbuildimg", "ls", "-la", "/test") - - if expected := "drw-------"; !strings.Contains(out, expected) { - c.Fatalf("expected %s received %s", expected, out) - } - - if expected := "daemon daemon"; !strings.Contains(out, expected) { - c.Fatalf("expected %s received %s", expected, out) - } - -} - -// testing #1405 - config.Cmd does not get cleaned up if -// utilizing cache -func (s *DockerSuite) TestBuildEntrypointRunCleanup(c *check.C) { - name := "testbuildcmdcleanup" - if _, err := buildImage(name, - `FROM busybox - RUN echo "hello"`, - true); err != nil { - c.Fatal(err) - } - - ctx, err := fakeContext(`FROM busybox - RUN echo "hello" - ADD foo /foo - ENTRYPOINT ["/bin/echo"]`, - map[string]string{ - "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - res := inspectField(c, name, "Config.Cmd") - // Cmd must be cleaned up - if res != "[]" { - c.Fatalf("Cmd %s, expected nil", res) - } -} - -func (s *DockerSuite) TestBuildAddFileNotFound(c *check.C) { - name := "testbuildaddnotfound" - expected := "foo: no such file or directory" - - if daemonPlatform == "windows" { - expected = "foo: The system cannot find the file specified" - } - - ctx, err := fakeContext(`FROM `+minimalBaseImage()+` - ADD foo /usr/local/bar`, - map[string]string{"bar": "hello"}) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - if !strings.Contains(err.Error(), expected) { - c.Fatalf("Wrong error %v, must be about missing foo file or directory", err) - } - } else { - c.Fatal("Error must not be nil") - } -} - -func (s *DockerSuite) TestBuildInheritance(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildinheritance" - - _, err := buildImage(name, - `FROM scratch - EXPOSE 2375`, - true) - if err != nil { - c.Fatal(err) - } - ports1 := inspectField(c, name, "Config.ExposedPorts") - - _, err = buildImage(name, - fmt.Sprintf(`FROM %s - ENTRYPOINT ["/bin/echo"]`, name), - true) - if err != nil { - c.Fatal(err) - } - - res := inspectField(c, name, "Config.Entrypoint") - if expected := "[/bin/echo]"; res != expected { - c.Fatalf("Entrypoint %s, expected %s", res, expected) - } - ports2 := inspectField(c, name, "Config.ExposedPorts") - if ports1 != ports2 { - c.Fatalf("Ports must be same: %s != %s", ports1, ports2) - } -} - -func (s *DockerSuite) TestBuildFails(c *check.C) { - name := "testbuildfails" - _, err := buildImage(name, - `FROM busybox - RUN sh -c "exit 23"`, - true) - if err != nil { - if !strings.Contains(err.Error(), "returned a non-zero code: 23") { - c.Fatalf("Wrong error %v, must be about non-zero code 23", err) - } - } else { - c.Fatal("Error must not be nil") - } -} - -func (s *DockerSuite) TestBuildOnBuild(c *check.C) { - name := "testbuildonbuild" - _, err := buildImage(name, - `FROM busybox - ONBUILD RUN touch foobar`, - true) - if err != nil { - c.Fatal(err) - } - _, err = buildImage(name, - fmt.Sprintf(`FROM %s - RUN [ -f foobar ]`, name), - true) - if err != nil { - c.Fatal(err) - } -} - -// gh #2446 -func (s *DockerSuite) TestBuildAddToSymlinkDest(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildaddtosymlinkdest" - ctx, err := fakeContext(`FROM busybox - RUN mkdir /foo - RUN ln -s /foo /bar - ADD foo /bar/ - RUN [ -f /bar/foo ] - RUN [ -f /foo/foo ]`, - map[string]string{ - "foo": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildEscapeWhitespace(c *check.C) { - name := "testbuildescapewhitespace" - - _, err := buildImage(name, ` - # ESCAPE=\ - FROM busybox - MAINTAINER "Docker \ -IO " - `, true) - if err != nil { - c.Fatal(err) - } - - res := inspectField(c, name, "Author") - - if res != "\"Docker IO \"" { - c.Fatalf("Parsed string did not match the escaped string. Got: %q", res) - } - -} - -func (s *DockerSuite) TestBuildVerifyIntString(c *check.C) { - // Verify that strings that look like ints are still passed as strings - name := "testbuildstringing" - - _, err := buildImage(name, ` - FROM busybox - MAINTAINER 123 - `, true) - - if err != nil { - c.Fatal(err) - } - - out, _ := dockerCmd(c, "inspect", name) - - if !strings.Contains(out, "\"123\"") { - c.Fatalf("Output does not contain the int as a string:\n%s", out) - } - -} - -func (s *DockerSuite) TestBuildDockerignore(c *check.C) { - testRequires(c, DaemonIsLinux) // TODO Windows: This test passes on Windows, - // but currently adds a disproportionate amount of time for the value it has. - // Removing it from Windows CI for now, but this will be revisited in the - // TP5 timeframe when perf is better. - name := "testbuilddockerignore" - dockerfile := ` - FROM busybox - ADD . /bla - RUN sh -c "[[ -f /bla/src/x.go ]]" - RUN sh -c "[[ -f /bla/Makefile ]]" - RUN sh -c "[[ ! -e /bla/src/_vendor ]]" - RUN sh -c "[[ ! -e /bla/.gitignore ]]" - RUN sh -c "[[ ! -e /bla/README.md ]]" - RUN sh -c "[[ ! -e /bla/dir/foo ]]" - RUN sh -c "[[ ! -e /bla/foo ]]" - RUN sh -c "[[ ! -e /bla/.git ]]" - RUN sh -c "[[ ! -e v.cc ]]" - RUN sh -c "[[ ! -e src/v.cc ]]" - RUN sh -c "[[ ! -e src/_vendor/v.cc ]]"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Makefile": "all:", - ".git/HEAD": "ref: foo", - "src/x.go": "package main", - "src/_vendor/v.go": "package main", - "src/_vendor/v.cc": "package main", - "src/v.cc": "package main", - "v.cc": "package main", - "dir/foo": "", - ".gitignore": "", - "README.md": "readme", - ".dockerignore": ` -.git -pkg -.gitignore -src/_vendor -*.md -**/*.cc -dir`, - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildDockerignoreCleanPaths(c *check.C) { - name := "testbuilddockerignorecleanpaths" - dockerfile := ` - FROM busybox - ADD . /tmp/ - RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (! ls /tmp/dir1/foo)"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "foo": "foo", - "foo2": "foo2", - "dir1/foo": "foo in dir1", - ".dockerignore": "./foo\ndir1//foo\n./dir1/../foo2", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildDockerignoreExceptions(c *check.C) { - testRequires(c, DaemonIsLinux) // TODO Windows: This test passes on Windows, - // but currently adds a disproportionate amount of time for the value it has. - // Removing it from Windows CI for now, but this will be revisited in the - // TP5 timeframe when perf is better. - name := "testbuilddockerignoreexceptions" - dockerfile := ` - FROM busybox - ADD . /bla - RUN sh -c "[[ -f /bla/src/x.go ]]" - RUN sh -c "[[ -f /bla/Makefile ]]" - RUN sh -c "[[ ! -e /bla/src/_vendor ]]" - RUN sh -c "[[ ! -e /bla/.gitignore ]]" - RUN sh -c "[[ ! -e /bla/README.md ]]" - RUN sh -c "[[ -e /bla/dir/dir/foo ]]" - RUN sh -c "[[ ! -e /bla/dir/foo1 ]]" - RUN sh -c "[[ -f /bla/dir/e ]]" - RUN sh -c "[[ -f /bla/dir/e-dir/foo ]]" - RUN sh -c "[[ ! -e /bla/foo ]]" - RUN sh -c "[[ ! -e /bla/.git ]]" - RUN sh -c "[[ -e /bla/dir/a.cc ]]"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Makefile": "all:", - ".git/HEAD": "ref: foo", - "src/x.go": "package main", - "src/_vendor/v.go": "package main", - "dir/foo": "", - "dir/foo1": "", - "dir/dir/f1": "", - "dir/dir/foo": "", - "dir/e": "", - "dir/e-dir/foo": "", - ".gitignore": "", - "README.md": "readme", - "dir/a.cc": "hello", - ".dockerignore": ` -.git -pkg -.gitignore -src/_vendor -*.md -dir -!dir/e* -!dir/dir/foo -**/*.cc -!**/*.cc`, - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildDockerignoringDockerfile(c *check.C) { - name := "testbuilddockerignoredockerfile" - dockerfile := ` - FROM busybox - ADD . /tmp/ - RUN sh -c "! ls /tmp/Dockerfile" - RUN ls /tmp/.dockerignore` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": dockerfile, - ".dockerignore": "Dockerfile\n", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't ignore Dockerfile correctly:%s", err) - } - - // now try it with ./Dockerfile - ctx.Add(".dockerignore", "./Dockerfile\n") - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't ignore ./Dockerfile correctly:%s", err) - } - -} - -func (s *DockerSuite) TestBuildDockerignoringRenamedDockerfile(c *check.C) { - name := "testbuilddockerignoredockerfile" - dockerfile := ` - FROM busybox - ADD . /tmp/ - RUN ls /tmp/Dockerfile - RUN sh -c "! ls /tmp/MyDockerfile" - RUN ls /tmp/.dockerignore` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": "Should not use me", - "MyDockerfile": dockerfile, - ".dockerignore": "MyDockerfile\n", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't ignore MyDockerfile correctly:%s", err) - } - - // now try it with ./MyDockerfile - ctx.Add(".dockerignore", "./MyDockerfile\n") - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't ignore ./MyDockerfile correctly:%s", err) - } - -} - -func (s *DockerSuite) TestBuildDockerignoringDockerignore(c *check.C) { - name := "testbuilddockerignoredockerignore" - dockerfile := ` - FROM busybox - ADD . /tmp/ - RUN sh -c "! ls /tmp/.dockerignore" - RUN ls /tmp/Dockerfile` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": dockerfile, - ".dockerignore": ".dockerignore\n", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't ignore .dockerignore correctly:%s", err) - } -} - -func (s *DockerSuite) TestBuildDockerignoreTouchDockerfile(c *check.C) { - var id1 string - var id2 string - - name := "testbuilddockerignoretouchdockerfile" - dockerfile := ` - FROM busybox - ADD . /tmp/` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": dockerfile, - ".dockerignore": "Dockerfile\n", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if id1, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't build it correctly:%s", err) - } - - if id2, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't build it correctly:%s", err) - } - if id1 != id2 { - c.Fatalf("Didn't use the cache - 1") - } - - // Now make sure touching Dockerfile doesn't invalidate the cache - if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { - c.Fatalf("Didn't add Dockerfile: %s", err) - } - if id2, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't build it correctly:%s", err) - } - if id1 != id2 { - c.Fatalf("Didn't use the cache - 2") - } - - // One more time but just 'touch' it instead of changing the content - if err = ctx.Add("Dockerfile", dockerfile+"\n# hi"); err != nil { - c.Fatalf("Didn't add Dockerfile: %s", err) - } - if id2, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("Didn't build it correctly:%s", err) - } - if id1 != id2 { - c.Fatalf("Didn't use the cache - 3") - } - -} - -func (s *DockerSuite) TestBuildDockerignoringWholeDir(c *check.C) { - name := "testbuilddockerignorewholedir" - dockerfile := ` - FROM busybox - COPY . / - RUN sh -c "[[ ! -e /.gitignore ]]" - RUN sh -c "[[ -f /Makefile ]]"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": "FROM scratch", - "Makefile": "all:", - ".gitignore": "", - ".dockerignore": ".*\n", - }) - c.Assert(err, check.IsNil) - defer ctx.Close() - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - - c.Assert(ctx.Add(".dockerfile", "*"), check.IsNil) - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - - c.Assert(ctx.Add(".dockerfile", "."), check.IsNil) - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - - c.Assert(ctx.Add(".dockerfile", "?"), check.IsNil) - if _, err = buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildDockerignoringBadExclusion(c *check.C) { - name := "testbuilddockerignorebadexclusion" - dockerfile := ` - FROM busybox - COPY . / - RUN sh -c "[[ ! -e /.gitignore ]]" - RUN sh -c "[[ -f /Makefile ]]"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": "FROM scratch", - "Makefile": "all:", - ".gitignore": "", - ".dockerignore": "!\n", - }) - c.Assert(err, check.IsNil) - defer ctx.Close() - if _, err = buildImageFromContext(name, ctx, true); err == nil { - c.Fatalf("Build was supposed to fail but didn't") - } - - if err.Error() != "failed to build the image: Error checking context: 'Illegal exclusion pattern: !'.\n" { - c.Fatalf("Incorrect output, got:%q", err.Error()) - } -} - -func (s *DockerSuite) TestBuildDockerignoringWildTopDir(c *check.C) { - dockerfile := ` - FROM busybox - COPY . / - RUN sh -c "[[ ! -e /.dockerignore ]]" - RUN sh -c "[[ ! -e /Dockerfile ]]" - RUN sh -c "[[ ! -e /file1 ]]" - RUN sh -c "[[ ! -e /dir ]]"` - - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": "FROM scratch", - "file1": "", - "dir/dfile1": "", - }) - c.Assert(err, check.IsNil) - defer ctx.Close() - - // All of these should result in ignoring all files - for _, variant := range []string{"**", "**/", "**/**", "*"} { - ctx.Add(".dockerignore", variant) - _, err = buildImageFromContext("noname", ctx, true) - c.Assert(err, check.IsNil, check.Commentf("variant: %s", variant)) - } -} - -func (s *DockerSuite) TestBuildDockerignoringWildDirs(c *check.C) { - testRequires(c, DaemonIsLinux) // TODO Windows: Fix this test; also perf - - dockerfile := ` - FROM busybox - COPY . / - #RUN sh -c "[[ -e /.dockerignore ]]" - RUN sh -c "[[ -e /Dockerfile ]] && \ - [[ ! -e /file0 ]] && \ - [[ ! -e /dir1/file0 ]] && \ - [[ ! -e /dir2/file0 ]] && \ - [[ ! -e /file1 ]] && \ - [[ ! -e /dir1/file1 ]] && \ - [[ ! -e /dir1/dir2/file1 ]] && \ - [[ ! -e /dir1/file2 ]] && \ - [[ -e /dir1/dir2/file2 ]] && \ - [[ ! -e /dir1/dir2/file4 ]] && \ - [[ ! -e /dir1/dir2/file5 ]] && \ - [[ ! -e /dir1/dir2/file6 ]] && \ - [[ ! -e /dir1/dir3/file7 ]] && \ - [[ ! -e /dir1/dir3/file8 ]] && \ - [[ -e /dir1/dir3 ]] && \ - [[ -e /dir1/dir4 ]] && \ - [[ ! -e 'dir1/dir5/fileAA' ]] && \ - [[ -e 'dir1/dir5/fileAB' ]] && \ - [[ -e 'dir1/dir5/fileB' ]]" # "." in pattern means nothing - - RUN echo all done!` - - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": "FROM scratch", - "file0": "", - "dir1/file0": "", - "dir1/dir2/file0": "", - - "file1": "", - "dir1/file1": "", - "dir1/dir2/file1": "", - - "dir1/file2": "", - "dir1/dir2/file2": "", // remains - - "dir1/dir2/file4": "", - "dir1/dir2/file5": "", - "dir1/dir2/file6": "", - "dir1/dir3/file7": "", - "dir1/dir3/file8": "", - "dir1/dir4/file9": "", - - "dir1/dir5/fileAA": "", - "dir1/dir5/fileAB": "", - "dir1/dir5/fileB": "", - - ".dockerignore": ` -**/file0 -**/*file1 -**/dir1/file2 -dir1/**/file4 -**/dir2/file5 -**/dir1/dir2/file6 -dir1/dir3/** -**/dir4/** -**/file?A -**/file\?B -**/dir5/file. -`, - }) - c.Assert(err, check.IsNil) - defer ctx.Close() - - _, err = buildImageFromContext("noname", ctx, true) - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestBuildLineBreak(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildlinebreak" - _, err := buildImage(name, - `FROM busybox -RUN sh -c 'echo root:testpass \ - > /tmp/passwd' -RUN mkdir -p /var/run/sshd -RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]" -RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`, - true) - if err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildEOLInLine(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildeolinline" - _, err := buildImage(name, - `FROM busybox -RUN sh -c 'echo root:testpass > /tmp/passwd' -RUN echo "foo \n bar"; echo "baz" -RUN mkdir -p /var/run/sshd -RUN sh -c "[ "$(cat /tmp/passwd)" = "root:testpass" ]" -RUN sh -c "[ "$(ls -d /var/run/sshd)" = "/var/run/sshd" ]"`, - true) - if err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildCommentsShebangs(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildcomments" - _, err := buildImage(name, - `FROM busybox -# This is an ordinary comment. -RUN { echo '#!/bin/sh'; echo 'echo hello world'; } > /hello.sh -RUN [ ! -x /hello.sh ] -# comment with line break \ -RUN chmod +x /hello.sh -RUN [ -x /hello.sh ] -RUN [ "$(cat /hello.sh)" = $'#!/bin/sh\necho hello world' ] -RUN [ "$(/hello.sh)" = "hello world" ]`, - true) - if err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildUsersAndGroups(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildusers" - _, err := buildImage(name, - `FROM busybox - -# Make sure our defaults work -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)" = '0:0/root:root' ] - -# TODO decide if "args.user = strconv.Itoa(syscall.Getuid())" is acceptable behavior for changeUser in sysvinit instead of "return nil" when "USER" isn't specified (so that we get the proper group list even if that is the empty list, even in the default case of not supplying an explicit USER to run as, which implies USER 0) -USER root -RUN [ "$(id -G):$(id -Gn)" = '0 10:root wheel' ] - -# Setup dockerio user and group -RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd && \ - echo 'dockerio:x:1001:' >> /etc/group - -# Make sure we can switch to our user and all the information is exactly as we expect it to be -USER dockerio -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] - -# Switch back to root and double check that worked exactly as we might expect it to -USER root -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '0:0/root:root/0 10:root wheel' ] && \ - # Add a "supplementary" group for our dockerio user \ - echo 'supplementary:x:1002:dockerio' >> /etc/group - -# ... and then go verify that we get it like we expect -USER dockerio -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] -USER 1001 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001 1002:dockerio supplementary' ] - -# super test the new "user:group" syntax -USER dockerio:dockerio -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] -USER 1001:dockerio -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] -USER dockerio:1001 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] -USER 1001:1001 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1001/dockerio:dockerio/1001:dockerio' ] -USER dockerio:supplementary -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] -USER dockerio:1002 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] -USER 1001:supplementary -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] -USER 1001:1002 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1001:1002/dockerio:supplementary/1002:supplementary' ] - -# make sure unknown uid/gid still works properly -USER 1042:1043 -RUN [ "$(id -u):$(id -g)/$(id -un):$(id -gn)/$(id -G):$(id -Gn)" = '1042:1043/1042:1043/1043:1043' ]`, - true) - if err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildEnvUsage(c *check.C) { - // /docker/world/hello is not owned by the correct user - testRequires(c, NotUserNamespace) - testRequires(c, DaemonIsLinux) - name := "testbuildenvusage" - dockerfile := `FROM busybox -ENV HOME /root -ENV PATH $HOME/bin:$PATH -ENV PATH /tmp:$PATH -RUN [ "$PATH" = "/tmp:$HOME/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" ] -ENV FOO /foo/baz -ENV BAR /bar -ENV BAZ $BAR -ENV FOOPATH $PATH:$FOO -RUN [ "$BAR" = "$BAZ" ] -RUN [ "$FOOPATH" = "$PATH:/foo/baz" ] -ENV FROM hello/docker/world -ENV TO /docker/world/hello -ADD $FROM $TO -RUN [ "$(cat $TO)" = "hello" ] -ENV abc=def -ENV ghi=$abc -RUN [ "$ghi" = "def" ] -` - ctx, err := fakeContext(dockerfile, map[string]string{ - "hello/docker/world": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - _, err = buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildEnvUsage2(c *check.C) { - // /docker/world/hello is not owned by the correct user - testRequires(c, NotUserNamespace) - testRequires(c, DaemonIsLinux) - name := "testbuildenvusage2" - dockerfile := `FROM busybox -ENV abc=def def="hello world" -RUN [ "$abc,$def" = "def,hello world" ] -ENV def=hello\ world v1=abc v2="hi there" v3='boogie nights' v4="with'quotes too" -RUN [ "$def,$v1,$v2,$v3,$v4" = "hello world,abc,hi there,boogie nights,with'quotes too" ] -ENV abc=zzz FROM=hello/docker/world -ENV abc=zzz TO=/docker/world/hello -ADD $FROM $TO -RUN [ "$abc,$(cat $TO)" = "zzz,hello" ] -ENV abc 'yyy' -RUN [ $abc = 'yyy' ] -ENV abc= -RUN [ "$abc" = "" ] - -# use grep to make sure if the builder substitutes \$foo by mistake -# we don't get a false positive -ENV abc=\$foo -RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) -ENV abc \$foo -RUN [ "$abc" = "\$foo" ] && (echo "$abc" | grep foo) - -ENV abc=\'foo\' abc2=\"foo\" -RUN [ "$abc,$abc2" = "'foo',\"foo\"" ] -ENV abc "foo" -RUN [ "$abc" = "foo" ] -ENV abc 'foo' -RUN [ "$abc" = 'foo' ] -ENV abc \'foo\' -RUN [ "$abc" = "'foo'" ] -ENV abc \"foo\" -RUN [ "$abc" = '"foo"' ] - -ENV abc=ABC -RUN [ "$abc" = "ABC" ] -ENV def1=${abc:-DEF} def2=${ccc:-DEF} -ENV def3=${ccc:-${def2}xx} def4=${abc:+ALT} def5=${def2:+${abc}:} def6=${ccc:-\$abc:} def7=${ccc:-\${abc}:} -RUN [ "$def1,$def2,$def3,$def4,$def5,$def6,$def7" = 'ABC,DEF,DEFxx,ALT,ABC:,$abc:,${abc:}' ] -ENV mypath=${mypath:+$mypath:}/home -ENV mypath=${mypath:+$mypath:}/away -RUN [ "$mypath" = '/home:/away' ] - -ENV e1=bar -ENV e2=$e1 e3=$e11 e4=\$e1 e5=\$e11 -RUN [ "$e0,$e1,$e2,$e3,$e4,$e5" = ',bar,bar,,$e1,$e11' ] - -ENV ee1 bar -ENV ee2 $ee1 -ENV ee3 $ee11 -ENV ee4 \$ee1 -ENV ee5 \$ee11 -RUN [ "$ee1,$ee2,$ee3,$ee4,$ee5" = 'bar,bar,,$ee1,$ee11' ] - -ENV eee1="foo" eee2='foo' -ENV eee3 "foo" -ENV eee4 'foo' -RUN [ "$eee1,$eee2,$eee3,$eee4" = 'foo,foo,foo,foo' ] - -` - ctx, err := fakeContext(dockerfile, map[string]string{ - "hello/docker/world": "hello", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - _, err = buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildAddScript(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildaddscript" - dockerfile := ` -FROM busybox -ADD test /test -RUN ["chmod","+x","/test"] -RUN ["/test"] -RUN [ "$(cat /testfile)" = 'test!' ]` - ctx, err := fakeContext(dockerfile, map[string]string{ - "test": "#!/bin/sh\necho 'test!' > /testfile", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - _, err = buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildAddTar(c *check.C) { - // /test/foo is not owned by the correct user - testRequires(c, NotUserNamespace) - name := "testbuildaddtar" - - ctx := func() *FakeContext { - dockerfile := ` -FROM busybox -ADD test.tar / -RUN cat /test/foo | grep Hi -ADD test.tar /test.tar -RUN cat /test.tar/test/foo | grep Hi -ADD test.tar /unlikely-to-exist -RUN cat /unlikely-to-exist/test/foo | grep Hi -ADD test.tar /unlikely-to-exist-trailing-slash/ -RUN cat /unlikely-to-exist-trailing-slash/test/foo | grep Hi -RUN sh -c "mkdir /existing-directory" #sh -c is needed on Windows to use the correct mkdir -ADD test.tar /existing-directory -RUN cat /existing-directory/test/foo | grep Hi -ADD test.tar /existing-directory-trailing-slash/ -RUN cat /existing-directory-trailing-slash/test/foo | grep Hi` - tmpDir, err := ioutil.TempDir("", "fake-context") - c.Assert(err, check.IsNil) - testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) - if err != nil { - c.Fatalf("failed to create test.tar archive: %v", err) - } - defer testTar.Close() - - tw := tar.NewWriter(testTar) - - if err := tw.WriteHeader(&tar.Header{ - Name: "test/foo", - Size: 2, - }); err != nil { - c.Fatalf("failed to write tar file header: %v", err) - } - if _, err := tw.Write([]byte("Hi")); err != nil { - c.Fatalf("failed to write tar file content: %v", err) - } - if err := tw.Close(); err != nil { - c.Fatalf("failed to close tar archive: %v", err) - } - - if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { - c.Fatalf("failed to open destination dockerfile: %v", err) - } - return fakeContextFromDir(tmpDir) - }() - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("build failed to complete for TestBuildAddTar: %v", err) - } - -} - -func (s *DockerSuite) TestBuildAddBrokenTar(c *check.C) { - name := "testbuildaddbrokentar" - - ctx := func() *FakeContext { - dockerfile := ` -FROM busybox -ADD test.tar /` - tmpDir, err := ioutil.TempDir("", "fake-context") - c.Assert(err, check.IsNil) - testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) - if err != nil { - c.Fatalf("failed to create test.tar archive: %v", err) - } - defer testTar.Close() - - tw := tar.NewWriter(testTar) - - if err := tw.WriteHeader(&tar.Header{ - Name: "test/foo", - Size: 2, - }); err != nil { - c.Fatalf("failed to write tar file header: %v", err) - } - if _, err := tw.Write([]byte("Hi")); err != nil { - c.Fatalf("failed to write tar file content: %v", err) - } - if err := tw.Close(); err != nil { - c.Fatalf("failed to close tar archive: %v", err) - } - - // Corrupt the tar by removing one byte off the end - stat, err := testTar.Stat() - if err != nil { - c.Fatalf("failed to stat tar archive: %v", err) - } - if err := testTar.Truncate(stat.Size() - 1); err != nil { - c.Fatalf("failed to truncate tar archive: %v", err) - } - - if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { - c.Fatalf("failed to open destination dockerfile: %v", err) - } - return fakeContextFromDir(tmpDir) - }() - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err == nil { - c.Fatalf("build should have failed for TestBuildAddBrokenTar") - } -} - -func (s *DockerSuite) TestBuildAddNonTar(c *check.C) { - name := "testbuildaddnontar" - - // Should not try to extract test.tar - ctx, err := fakeContext(` - FROM busybox - ADD test.tar / - RUN test -f /test.tar`, - map[string]string{"test.tar": "not_a_tar_file"}) - - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("build failed for TestBuildAddNonTar") - } -} - -func (s *DockerSuite) TestBuildAddTarXz(c *check.C) { - // /test/foo is not owned by the correct user - testRequires(c, NotUserNamespace) - testRequires(c, DaemonIsLinux) - name := "testbuildaddtarxz" - - ctx := func() *FakeContext { - dockerfile := ` - FROM busybox - ADD test.tar.xz / - RUN cat /test/foo | grep Hi` - tmpDir, err := ioutil.TempDir("", "fake-context") - c.Assert(err, check.IsNil) - testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) - if err != nil { - c.Fatalf("failed to create test.tar archive: %v", err) - } - defer testTar.Close() - - tw := tar.NewWriter(testTar) - - if err := tw.WriteHeader(&tar.Header{ - Name: "test/foo", - Size: 2, - }); err != nil { - c.Fatalf("failed to write tar file header: %v", err) - } - if _, err := tw.Write([]byte("Hi")); err != nil { - c.Fatalf("failed to write tar file content: %v", err) - } - if err := tw.Close(); err != nil { - c.Fatalf("failed to close tar archive: %v", err) - } - - xzCompressCmd := exec.Command("xz", "-k", "test.tar") - xzCompressCmd.Dir = tmpDir - out, _, err := runCommandWithOutput(xzCompressCmd) - if err != nil { - c.Fatal(err, out) - } - - if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { - c.Fatalf("failed to open destination dockerfile: %v", err) - } - return fakeContextFromDir(tmpDir) - }() - - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) - } - -} - -func (s *DockerSuite) TestBuildAddTarXzGz(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildaddtarxzgz" - - ctx := func() *FakeContext { - dockerfile := ` - FROM busybox - ADD test.tar.xz.gz / - RUN ls /test.tar.xz.gz` - tmpDir, err := ioutil.TempDir("", "fake-context") - c.Assert(err, check.IsNil) - testTar, err := os.Create(filepath.Join(tmpDir, "test.tar")) - if err != nil { - c.Fatalf("failed to create test.tar archive: %v", err) - } - defer testTar.Close() - - tw := tar.NewWriter(testTar) - - if err := tw.WriteHeader(&tar.Header{ - Name: "test/foo", - Size: 2, - }); err != nil { - c.Fatalf("failed to write tar file header: %v", err) - } - if _, err := tw.Write([]byte("Hi")); err != nil { - c.Fatalf("failed to write tar file content: %v", err) - } - if err := tw.Close(); err != nil { - c.Fatalf("failed to close tar archive: %v", err) - } - - xzCompressCmd := exec.Command("xz", "-k", "test.tar") - xzCompressCmd.Dir = tmpDir - out, _, err := runCommandWithOutput(xzCompressCmd) - if err != nil { - c.Fatal(err, out) - } - - gzipCompressCmd := exec.Command("gzip", "test.tar.xz") - gzipCompressCmd.Dir = tmpDir - out, _, err = runCommandWithOutput(gzipCompressCmd) - if err != nil { - c.Fatal(err, out) - } - - if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { - c.Fatalf("failed to open destination dockerfile: %v", err) - } - return fakeContextFromDir(tmpDir) - }() - - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("build failed to complete for TestBuildAddTarXz: %v", err) - } - -} - -func (s *DockerSuite) TestBuildFromGIT(c *check.C) { - name := "testbuildfromgit" - git, err := newFakeGit("repo", map[string]string{ - "Dockerfile": `FROM busybox - ADD first /first - RUN [ -f /first ] - MAINTAINER docker`, - "first": "test git data", - }, true) - if err != nil { - c.Fatal(err) - } - defer git.Close() - - _, err = buildImageFromPath(name, git.RepoURL, true) - if err != nil { - c.Fatal(err) - } - res := inspectField(c, name, "Author") - if res != "docker" { - c.Fatalf("Maintainer should be docker, got %s", res) - } -} - -func (s *DockerSuite) TestBuildFromGITWithContext(c *check.C) { - name := "testbuildfromgit" - git, err := newFakeGit("repo", map[string]string{ - "docker/Dockerfile": `FROM busybox - ADD first /first - RUN [ -f /first ] - MAINTAINER docker`, - "docker/first": "test git data", - }, true) - if err != nil { - c.Fatal(err) - } - defer git.Close() - - u := fmt.Sprintf("%s#master:docker", git.RepoURL) - _, err = buildImageFromPath(name, u, true) - if err != nil { - c.Fatal(err) - } - res := inspectField(c, name, "Author") - if res != "docker" { - c.Fatalf("Maintainer should be docker, got %s", res) - } -} - -func (s *DockerSuite) TestBuildFromGITwithF(c *check.C) { - name := "testbuildfromgitwithf" - git, err := newFakeGit("repo", map[string]string{ - "myApp/myDockerfile": `FROM busybox - RUN echo hi from Dockerfile`, - }, true) - if err != nil { - c.Fatal(err) - } - defer git.Close() - - out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "myApp/myDockerfile", git.RepoURL) - if err != nil { - c.Fatalf("Error on build. Out: %s\nErr: %v", out, err) - } - - if !strings.Contains(out, "hi from Dockerfile") { - c.Fatalf("Missing expected output, got:\n%s", out) - } -} - -func (s *DockerSuite) TestBuildFromRemoteTarball(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildfromremotetarball" - - buffer := new(bytes.Buffer) - tw := tar.NewWriter(buffer) - defer tw.Close() - - dockerfile := []byte(`FROM busybox - MAINTAINER docker`) - if err := tw.WriteHeader(&tar.Header{ - Name: "Dockerfile", - Size: int64(len(dockerfile)), - }); err != nil { - c.Fatalf("failed to write tar file header: %v", err) - } - if _, err := tw.Write(dockerfile); err != nil { - c.Fatalf("failed to write tar file content: %v", err) - } - if err := tw.Close(); err != nil { - c.Fatalf("failed to close tar archive: %v", err) - } - - server, err := fakeBinaryStorage(map[string]*bytes.Buffer{ - "testT.tar": buffer, - }) - c.Assert(err, check.IsNil) - - defer server.Close() - - _, err = buildImageFromPath(name, server.URL()+"/testT.tar", true) - c.Assert(err, check.IsNil) - - res := inspectField(c, name, "Author") - - if res != "docker" { - c.Fatalf("Maintainer should be docker, got %s", res) - } -} - -func (s *DockerSuite) TestBuildCleanupCmdOnEntrypoint(c *check.C) { - name := "testbuildcmdcleanuponentrypoint" - if _, err := buildImage(name, - `FROM `+minimalBaseImage()+` - CMD ["test"] - ENTRYPOINT ["echo"]`, - true); err != nil { - c.Fatal(err) - } - if _, err := buildImage(name, - fmt.Sprintf(`FROM %s - ENTRYPOINT ["cat"]`, name), - true); err != nil { - c.Fatal(err) - } - res := inspectField(c, name, "Config.Cmd") - if res != "[]" { - c.Fatalf("Cmd %s, expected nil", res) - } - - res = inspectField(c, name, "Config.Entrypoint") - if expected := "[cat]"; res != expected { - c.Fatalf("Entrypoint %s, expected %s", res, expected) - } -} - -func (s *DockerSuite) TestBuildClearCmd(c *check.C) { - name := "testbuildclearcmd" - _, err := buildImage(name, - `From `+minimalBaseImage()+` - ENTRYPOINT ["/bin/bash"] - CMD []`, - true) - if err != nil { - c.Fatal(err) - } - res := inspectFieldJSON(c, name, "Config.Cmd") - if res != "[]" { - c.Fatalf("Cmd %s, expected %s", res, "[]") - } -} - -func (s *DockerSuite) TestBuildEmptyCmd(c *check.C) { - // Windows Server 2016 RS1 builds load the windowsservercore image from a tar rather than - // a .WIM file, and the tar layer has the default CMD set (same as the Linux ubuntu image), - // where-as the TP5 .WIM had a blank CMD. Hence this test is not applicable on RS1 or later - // builds - if daemonPlatform == "windows" && windowsDaemonKV >= 14375 { - c.Skip("Not applicable on Windows RS1 or later builds") - } - - name := "testbuildemptycmd" - if _, err := buildImage(name, "FROM "+minimalBaseImage()+"\nMAINTAINER quux\n", true); err != nil { - c.Fatal(err) - } - res := inspectFieldJSON(c, name, "Config.Cmd") - if res != "null" { - c.Fatalf("Cmd %s, expected %s", res, "null") - } -} - -func (s *DockerSuite) TestBuildOnBuildOutput(c *check.C) { - name := "testbuildonbuildparent" - if _, err := buildImage(name, "FROM busybox\nONBUILD RUN echo foo\n", true); err != nil { - c.Fatal(err) - } - - _, out, err := buildImageWithOut(name, "FROM "+name+"\nMAINTAINER quux\n", true) - if err != nil { - c.Fatal(err) - } - - if !strings.Contains(out, "# Executing 1 build trigger") { - c.Fatal("failed to find the build trigger output", out) - } -} - -func (s *DockerSuite) TestBuildInvalidTag(c *check.C) { - name := "abcd:" + stringutils.GenerateRandomAlphaOnlyString(200) - _, out, err := buildImageWithOut(name, "FROM "+minimalBaseImage()+"\nMAINTAINER quux\n", true) - // if the error doesn't check for illegal tag name, or the image is built - // then this should fail - if !strings.Contains(out, "Error parsing reference") || strings.Contains(out, "Sending build context to Docker daemon") { - c.Fatalf("failed to stop before building. Error: %s, Output: %s", err, out) - } -} - -func (s *DockerSuite) TestBuildCmdShDashC(c *check.C) { - name := "testbuildcmdshc" - if _, err := buildImage(name, "FROM busybox\nCMD echo cmd\n", true); err != nil { - c.Fatal(err) - } - - res := inspectFieldJSON(c, name, "Config.Cmd") - - expected := `["/bin/sh","-c","echo cmd"]` - if daemonPlatform == "windows" { - expected = `["cmd","/S","/C","echo cmd"]` - } - - if res != expected { - c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) - } - -} - -func (s *DockerSuite) TestBuildCmdSpaces(c *check.C) { - // Test to make sure that when we strcat arrays we take into account - // the arg separator to make sure ["echo","hi"] and ["echo hi"] don't - // look the same - name := "testbuildcmdspaces" - var id1 string - var id2 string - var err error - - if id1, err = buildImage(name, "FROM busybox\nCMD [\"echo hi\"]\n", true); err != nil { - c.Fatal(err) - } - - if id2, err = buildImage(name, "FROM busybox\nCMD [\"echo\", \"hi\"]\n", true); err != nil { - c.Fatal(err) - } - - if id1 == id2 { - c.Fatal("Should not have resulted in the same CMD") - } - - // Now do the same with ENTRYPOINT - if id1, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo hi\"]\n", true); err != nil { - c.Fatal(err) - } - - if id2, err = buildImage(name, "FROM busybox\nENTRYPOINT [\"echo\", \"hi\"]\n", true); err != nil { - c.Fatal(err) - } - - if id1 == id2 { - c.Fatal("Should not have resulted in the same ENTRYPOINT") - } - -} - -func (s *DockerSuite) TestBuildCmdJSONNoShDashC(c *check.C) { - name := "testbuildcmdjson" - if _, err := buildImage(name, "FROM busybox\nCMD [\"echo\", \"cmd\"]", true); err != nil { - c.Fatal(err) - } - - res := inspectFieldJSON(c, name, "Config.Cmd") - - expected := `["echo","cmd"]` - - if res != expected { - c.Fatalf("Expected value %s not in Config.Cmd: %s", expected, res) - } - -} - -func (s *DockerSuite) TestBuildEntrypointInheritance(c *check.C) { - - if _, err := buildImage("parent", ` - FROM busybox - ENTRYPOINT exit 130 - `, true); err != nil { - c.Fatal(err) - } - - if _, status, _ := dockerCmdWithError("run", "parent"); status != 130 { - c.Fatalf("expected exit code 130 but received %d", status) - } - - if _, err := buildImage("child", ` - FROM parent - ENTRYPOINT exit 5 - `, true); err != nil { - c.Fatal(err) - } - - if _, status, _ := dockerCmdWithError("run", "child"); status != 5 { - c.Fatalf("expected exit code 5 but received %d", status) - } - -} - -func (s *DockerSuite) TestBuildEntrypointInheritanceInspect(c *check.C) { - var ( - name = "testbuildepinherit" - name2 = "testbuildepinherit2" - expected = `["/bin/sh","-c","echo quux"]` - ) - - if daemonPlatform == "windows" { - expected = `["cmd","/S","/C","echo quux"]` - } - - if _, err := buildImage(name, "FROM busybox\nENTRYPOINT /foo/bar", true); err != nil { - c.Fatal(err) - } - - if _, err := buildImage(name2, fmt.Sprintf("FROM %s\nENTRYPOINT echo quux", name), true); err != nil { - c.Fatal(err) - } - - res := inspectFieldJSON(c, name2, "Config.Entrypoint") - - if res != expected { - c.Fatalf("Expected value %s not in Config.Entrypoint: %s", expected, res) - } - - out, _ := dockerCmd(c, "run", name2) - - expected = "quux" - - if strings.TrimSpace(out) != expected { - c.Fatalf("Expected output is %s, got %s", expected, out) - } - -} - -func (s *DockerSuite) TestBuildRunShEntrypoint(c *check.C) { - name := "testbuildentrypoint" - _, err := buildImage(name, - `FROM busybox - ENTRYPOINT echo`, - true) - if err != nil { - c.Fatal(err) - } - - dockerCmd(c, "run", "--rm", name) -} - -func (s *DockerSuite) TestBuildExoticShellInterpolation(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildexoticshellinterpolation" - - _, err := buildImage(name, ` - FROM busybox - - ENV SOME_VAR a.b.c - - RUN [ "$SOME_VAR" = 'a.b.c' ] - RUN [ "${SOME_VAR}" = 'a.b.c' ] - RUN [ "${SOME_VAR%.*}" = 'a.b' ] - RUN [ "${SOME_VAR%%.*}" = 'a' ] - RUN [ "${SOME_VAR#*.}" = 'b.c' ] - RUN [ "${SOME_VAR##*.}" = 'c' ] - RUN [ "${SOME_VAR/c/d}" = 'a.b.d' ] - RUN [ "${#SOME_VAR}" = '5' ] - - RUN [ "${SOME_UNSET_VAR:-$SOME_VAR}" = 'a.b.c' ] - RUN [ "${SOME_VAR:+Version: ${SOME_VAR}}" = 'Version: a.b.c' ] - RUN [ "${SOME_UNSET_VAR:+${SOME_VAR}}" = '' ] - RUN [ "${SOME_UNSET_VAR:-${SOME_VAR:-d.e.f}}" = 'a.b.c' ] - `, false) - if err != nil { - c.Fatal(err) - } - -} - -func (s *DockerSuite) TestBuildVerifySingleQuoteFails(c *check.C) { - // This testcase is supposed to generate an error because the - // JSON array we're passing in on the CMD uses single quotes instead - // of double quotes (per the JSON spec). This means we interpret it - // as a "string" instead of "JSON array" and pass it on to "sh -c" and - // it should barf on it. - name := "testbuildsinglequotefails" - - if _, err := buildImage(name, - `FROM busybox - CMD [ '/bin/sh', '-c', 'echo hi' ]`, - true); err != nil { - c.Fatal(err) - } - - if _, _, err := dockerCmdWithError("run", "--rm", name); err == nil { - c.Fatal("The image was not supposed to be able to run") - } - -} - -func (s *DockerSuite) TestBuildVerboseOut(c *check.C) { - name := "testbuildverboseout" - expected := "\n123\n" - - if daemonPlatform == "windows" { - expected = "\n123\r\n" - } - - _, out, err := buildImageWithOut(name, - `FROM busybox -RUN echo 123`, - false) - - if err != nil { - c.Fatal(err) - } - if !strings.Contains(out, expected) { - c.Fatalf("Output should contain %q: %q", "123", out) - } - -} - -func (s *DockerSuite) TestBuildWithTabs(c *check.C) { - name := "testbuildwithtabs" - _, err := buildImage(name, - "FROM busybox\nRUN echo\tone\t\ttwo", true) - if err != nil { - c.Fatal(err) - } - res := inspectFieldJSON(c, name, "ContainerConfig.Cmd") - expected1 := `["/bin/sh","-c","echo\tone\t\ttwo"]` - expected2 := `["/bin/sh","-c","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates - if daemonPlatform == "windows" { - expected1 = `["cmd","/S","/C","echo\tone\t\ttwo"]` - expected2 = `["cmd","/S","/C","echo\u0009one\u0009\u0009two"]` // syntactically equivalent, and what Go 1.3 generates - } - if res != expected1 && res != expected2 { - c.Fatalf("Missing tabs.\nGot: %s\nExp: %s or %s", res, expected1, expected2) - } -} - -func (s *DockerSuite) TestBuildLabels(c *check.C) { - name := "testbuildlabel" - expected := `{"License":"GPL","Vendor":"Acme"}` - _, err := buildImage(name, - `FROM busybox - LABEL Vendor=Acme - LABEL License GPL`, - true) - if err != nil { - c.Fatal(err) - } - res := inspectFieldJSON(c, name, "Config.Labels") - if res != expected { - c.Fatalf("Labels %s, expected %s", res, expected) - } -} - -func (s *DockerSuite) TestBuildLabelsCache(c *check.C) { - name := "testbuildlabelcache" - - id1, err := buildImage(name, - `FROM busybox - LABEL Vendor=Acme`, false) - if err != nil { - c.Fatalf("Build 1 should have worked: %v", err) - } - - id2, err := buildImage(name, - `FROM busybox - LABEL Vendor=Acme`, true) - if err != nil || id1 != id2 { - c.Fatalf("Build 2 should have worked & used cache(%s,%s): %v", id1, id2, err) - } - - id2, err = buildImage(name, - `FROM busybox - LABEL Vendor=Acme1`, true) - if err != nil || id1 == id2 { - c.Fatalf("Build 3 should have worked & NOT used cache(%s,%s): %v", id1, id2, err) - } - - id2, err = buildImage(name, - `FROM busybox - LABEL Vendor Acme`, true) // Note: " " and "=" should be same - if err != nil || id1 != id2 { - c.Fatalf("Build 4 should have worked & used cache(%s,%s): %v", id1, id2, err) - } - - // Now make sure the cache isn't used by mistake - id1, err = buildImage(name, - `FROM busybox - LABEL f1=b1 f2=b2`, false) - if err != nil { - c.Fatalf("Build 5 should have worked: %q", err) - } - - id2, err = buildImage(name, - `FROM busybox - LABEL f1="b1 f2=b2"`, true) - if err != nil || id1 == id2 { - c.Fatalf("Build 6 should have worked & NOT used the cache(%s,%s): %q", id1, id2, err) - } - -} - -func (s *DockerSuite) TestBuildNotVerboseSuccess(c *check.C) { - // This test makes sure that -q works correctly when build is successful: - // stdout has only the image ID (long image ID) and stderr is empty. - var stdout, stderr string - var err error - outRegexp := regexp.MustCompile("^(sha256:|)[a-z0-9]{64}\\n$") - - tt := []struct { - Name string - BuildFunc func(string) - }{ - { - Name: "quiet_build_stdin_success", - BuildFunc: func(name string) { - _, stdout, stderr, err = buildImageWithStdoutStderr(name, "FROM busybox", true, "-q", "--force-rm", "--rm") - }, - }, - { - Name: "quiet_build_ctx_success", - BuildFunc: func(name string) { - ctx, err := fakeContext("FROM busybox", map[string]string{ - "quiet_build_success_fctx": "test", - }) - if err != nil { - c.Fatalf("Failed to create context: %s", err.Error()) - } - defer ctx.Close() - _, stdout, stderr, err = buildImageFromContextWithStdoutStderr(name, ctx, true, "-q", "--force-rm", "--rm") - }, - }, - { - Name: "quiet_build_git_success", - BuildFunc: func(name string) { - git, err := newFakeGit("repo", map[string]string{ - "Dockerfile": "FROM busybox", - }, true) - if err != nil { - c.Fatalf("Failed to create the git repo: %s", err.Error()) - } - defer git.Close() - _, stdout, stderr, err = buildImageFromGitWithStdoutStderr(name, git, true, "-q", "--force-rm", "--rm") - - }, - }, - } - - for _, te := range tt { - te.BuildFunc(te.Name) - if err != nil { - c.Fatalf("Test %s shouldn't fail, but got the following error: %s", te.Name, err.Error()) - } - if outRegexp.Find([]byte(stdout)) == nil { - c.Fatalf("Test %s expected stdout to match the [%v] regexp, but it is [%v]", te.Name, outRegexp, stdout) - } - if runtime.GOOS == "windows" { - // stderr contains a security warning on Windows if the daemon isn't Windows - lines := strings.Split(stderr, "\n") - warningCount := 0 - for _, v := range lines { - warningText := "SECURITY WARNING: You are building a Docker image from Windows against a non-Windows Docker host." - if strings.Contains(v, warningText) { - warningCount++ - } - if v != "" && !strings.Contains(v, warningText) { - c.Fatalf("Stderr contains unexpected output line: %q", v) - } - } - if warningCount != 1 && daemonPlatform != "windows" { - c.Fatalf("Test %s didn't get security warning running from Windows to non-Windows", te.Name) - } - } else { - if stderr != "" { - c.Fatalf("Test %s expected stderr to be empty, but it is [%#v]", te.Name, stderr) - } - } - } - -} - -func (s *DockerSuite) TestBuildNotVerboseFailureWithNonExistImage(c *check.C) { - // This test makes sure that -q works correctly when build fails by - // comparing between the stderr output in quiet mode and in stdout - // and stderr output in verbose mode - testRequires(c, Network) - testName := "quiet_build_not_exists_image" - buildCmd := "FROM busybox11" - _, _, qstderr, qerr := buildImageWithStdoutStderr(testName, buildCmd, false, "-q", "--force-rm", "--rm") - _, vstdout, vstderr, verr := buildImageWithStdoutStderr(testName, buildCmd, false, "--force-rm", "--rm") - if verr == nil || qerr == nil { - c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", testName)) - } - if qstderr != vstdout+vstderr { - c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", testName, qstderr, vstdout+vstderr)) - } -} - -func (s *DockerSuite) TestBuildNotVerboseFailure(c *check.C) { - // This test makes sure that -q works correctly when build fails by - // comparing between the stderr output in quiet mode and in stdout - // and stderr output in verbose mode - tt := []struct { - TestName string - BuildCmds string - }{ - {"quiet_build_no_from_at_the_beginning", "RUN whoami"}, - {"quiet_build_unknown_instr", "FROMD busybox"}, - } - - for _, te := range tt { - _, _, qstderr, qerr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "-q", "--force-rm", "--rm") - _, vstdout, vstderr, verr := buildImageWithStdoutStderr(te.TestName, te.BuildCmds, false, "--force-rm", "--rm") - if verr == nil || qerr == nil { - c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", te.TestName)) - } - if qstderr != vstdout+vstderr { - c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", te.TestName, qstderr, vstdout+vstderr)) - } - } -} - -func (s *DockerSuite) TestBuildNotVerboseFailureRemote(c *check.C) { - // This test ensures that when given a wrong URL, stderr in quiet mode and - // stderr in verbose mode are identical. - // TODO(vdemeester) with cobra, stdout has a carriage return too much so this test should not check stdout - URL := "http://something.invalid" - Name := "quiet_build_wrong_remote" - _, _, qstderr, qerr := buildImageWithStdoutStderr(Name, "", false, "-q", "--force-rm", "--rm", URL) - _, _, vstderr, verr := buildImageWithStdoutStderr(Name, "", false, "--force-rm", "--rm", URL) - if qerr == nil || verr == nil { - c.Fatal(fmt.Errorf("Test [%s] expected to fail but didn't", Name)) - } - if qstderr != vstderr { - c.Fatal(fmt.Errorf("Test[%s] expected that quiet stderr and verbose stdout are equal; quiet [%v], verbose [%v]", Name, qstderr, vstderr)) - } -} - -func (s *DockerSuite) TestBuildStderr(c *check.C) { - // This test just makes sure that no non-error output goes - // to stderr - name := "testbuildstderr" - _, _, stderr, err := buildImageWithStdoutStderr(name, - "FROM busybox\nRUN echo one", true) - if err != nil { - c.Fatal(err) - } - - if runtime.GOOS == "windows" { - // stderr might contain a security warning on windows - lines := strings.Split(stderr, "\n") - for _, v := range lines { - if v != "" && !strings.Contains(v, "SECURITY WARNING:") { - c.Fatalf("Stderr contains unexpected output line: %q", v) - } - } - } else { - if stderr != "" { - c.Fatalf("Stderr should have been empty, instead its: %q", stderr) - } - } -} - -func (s *DockerSuite) TestBuildChownSingleFile(c *check.C) { - testRequires(c, UnixCli) // test uses chown: not available on windows - testRequires(c, DaemonIsLinux) - - name := "testbuildchownsinglefile" - - ctx, err := fakeContext(` -FROM busybox -COPY test / -RUN ls -l /test -RUN [ $(ls -l /test | awk '{print $3":"$4}') = 'root:root' ] -`, map[string]string{ - "test": "test", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if err := os.Chown(filepath.Join(ctx.Dir, "test"), 4242, 4242); err != nil { - c.Fatal(err) - } - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - -} - -func (s *DockerSuite) TestBuildSymlinkBreakout(c *check.C) { - name := "testbuildsymlinkbreakout" - tmpdir, err := ioutil.TempDir("", name) - c.Assert(err, check.IsNil) - defer os.RemoveAll(tmpdir) - ctx := filepath.Join(tmpdir, "context") - if err := os.MkdirAll(ctx, 0755); err != nil { - c.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte(` - from busybox - add symlink.tar / - add inject /symlink/ - `), 0644); err != nil { - c.Fatal(err) - } - inject := filepath.Join(ctx, "inject") - if err := ioutil.WriteFile(inject, nil, 0644); err != nil { - c.Fatal(err) - } - f, err := os.Create(filepath.Join(ctx, "symlink.tar")) - if err != nil { - c.Fatal(err) - } - w := tar.NewWriter(f) - w.WriteHeader(&tar.Header{ - Name: "symlink2", - Typeflag: tar.TypeSymlink, - Linkname: "/../../../../../../../../../../../../../../", - Uid: os.Getuid(), - Gid: os.Getgid(), - }) - w.WriteHeader(&tar.Header{ - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: filepath.Join("symlink2", tmpdir), - Uid: os.Getuid(), - Gid: os.Getgid(), - }) - w.Close() - f.Close() - if _, err := buildImageFromContext(name, fakeContextFromDir(ctx), false); err != nil { - c.Fatal(err) - } - if _, err := os.Lstat(filepath.Join(tmpdir, "inject")); err == nil { - c.Fatal("symlink breakout - inject") - } else if !os.IsNotExist(err) { - c.Fatalf("unexpected error: %v", err) - } -} - -func (s *DockerSuite) TestBuildXZHost(c *check.C) { - // /usr/local/sbin/xz gets permission denied for the user - testRequires(c, NotUserNamespace) - testRequires(c, DaemonIsLinux) - name := "testbuildxzhost" - - ctx, err := fakeContext(` -FROM busybox -ADD xz /usr/local/sbin/ -RUN chmod 755 /usr/local/sbin/xz -ADD test.xz / -RUN [ ! -e /injected ]`, - map[string]string{ - "test.xz": "\xfd\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00" + - "\x21\x01\x16\x00\x00\x00\x74\x2f\xe5\xa3\x01\x00\x3f\xfd" + - "\x37\x7a\x58\x5a\x00\x00\x04\xe6\xd6\xb4\x46\x02\x00\x21", - "xz": "#!/bin/sh\ntouch /injected", - }) - - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } - -} - -func (s *DockerSuite) TestBuildVolumesRetainContents(c *check.C) { - // /foo/file gets permission denied for the user - testRequires(c, NotUserNamespace) - testRequires(c, DaemonIsLinux) // TODO Windows: Issue #20127 - var ( - name = "testbuildvolumescontent" - expected = "some text" - volName = "/foo" - ) - - if daemonPlatform == "windows" { - volName = "C:/foo" - } - - ctx, err := fakeContext(` -FROM busybox -COPY content /foo/file -VOLUME `+volName+` -CMD cat /foo/file`, - map[string]string{ - "content": expected, - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, false); err != nil { - c.Fatal(err) - } - - out, _ := dockerCmd(c, "run", "--rm", name) - if out != expected { - c.Fatalf("expected file contents for /foo/file to be %q but received %q", expected, out) - } - -} - -func (s *DockerSuite) TestBuildRenamedDockerfile(c *check.C) { - - ctx, err := fakeContext(`FROM busybox - RUN echo from Dockerfile`, - map[string]string{ - "Dockerfile": "FROM busybox\nRUN echo from Dockerfile", - "files/Dockerfile": "FROM busybox\nRUN echo from files/Dockerfile", - "files/dFile": "FROM busybox\nRUN echo from files/dFile", - "dFile": "FROM busybox\nRUN echo from dFile", - "files/dFile2": "FROM busybox\nRUN echo from files/dFile2", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") - if err != nil { - c.Fatalf("Failed to build: %s\n%s", out, err) - } - if !strings.Contains(out, "from Dockerfile") { - c.Fatalf("test1 should have used Dockerfile, output:%s", out) - } - - out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "-f", filepath.Join("files", "Dockerfile"), "-t", "test2", ".") - if err != nil { - c.Fatal(err) - } - if !strings.Contains(out, "from files/Dockerfile") { - c.Fatalf("test2 should have used files/Dockerfile, output:%s", out) - } - - out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", filepath.Join("files", "dFile")), "-t", "test3", ".") - if err != nil { - c.Fatal(err) - } - if !strings.Contains(out, "from files/dFile") { - c.Fatalf("test3 should have used files/dFile, output:%s", out) - } - - out, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--file=dFile", "-t", "test4", ".") - if err != nil { - c.Fatal(err) - } - if !strings.Contains(out, "from dFile") { - c.Fatalf("test4 should have used dFile, output:%s", out) - } - - dirWithNoDockerfile, err := ioutil.TempDir(os.TempDir(), "test5") - c.Assert(err, check.IsNil) - nonDockerfileFile := filepath.Join(dirWithNoDockerfile, "notDockerfile") - if _, err = os.Create(nonDockerfileFile); err != nil { - c.Fatal(err) - } - out, _, err = dockerCmdInDir(c, ctx.Dir, "build", fmt.Sprintf("--file=%s", nonDockerfileFile), "-t", "test5", ".") - - if err == nil { - c.Fatalf("test5 was supposed to fail to find passwd") - } - - if expected := fmt.Sprintf("The Dockerfile (%s) must be within the build context (.)", nonDockerfileFile); !strings.Contains(out, expected) { - c.Fatalf("wrong error message:%v\nexpected to contain=%v", out, expected) - } - - out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test6", "..") - if err != nil { - c.Fatalf("test6 failed: %s", err) - } - if !strings.Contains(out, "from Dockerfile") { - c.Fatalf("test6 should have used root Dockerfile, output:%s", out) - } - - out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join(ctx.Dir, "files", "Dockerfile"), "-t", "test7", "..") - if err != nil { - c.Fatalf("test7 failed: %s", err) - } - if !strings.Contains(out, "from files/Dockerfile") { - c.Fatalf("test7 should have used files Dockerfile, output:%s", out) - } - - out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", filepath.Join("..", "Dockerfile"), "-t", "test8", ".") - if err == nil || !strings.Contains(out, "must be within the build context") { - c.Fatalf("test8 should have failed with Dockerfile out of context: %s", err) - } - - tmpDir := os.TempDir() - out, _, err = dockerCmdInDir(c, tmpDir, "build", "-t", "test9", ctx.Dir) - if err != nil { - c.Fatalf("test9 - failed: %s", err) - } - if !strings.Contains(out, "from Dockerfile") { - c.Fatalf("test9 should have used root Dockerfile, output:%s", out) - } - - out, _, err = dockerCmdInDir(c, filepath.Join(ctx.Dir, "files"), "build", "-f", "dFile2", "-t", "test10", ".") - if err != nil { - c.Fatalf("test10 should have worked: %s", err) - } - if !strings.Contains(out, "from files/dFile2") { - c.Fatalf("test10 should have used files/dFile2, output:%s", out) - } - -} - -func (s *DockerSuite) TestBuildFromMixedcaseDockerfile(c *check.C) { - testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows - testRequires(c, DaemonIsLinux) - - ctx, err := fakeContext(`FROM busybox - RUN echo from dockerfile`, - map[string]string{ - "dockerfile": "FROM busybox\nRUN echo from dockerfile", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") - if err != nil { - c.Fatalf("Failed to build: %s\n%s", out, err) - } - - if !strings.Contains(out, "from dockerfile") { - c.Fatalf("Missing proper output: %s", out) - } - -} - -func (s *DockerSuite) TestBuildWithTwoDockerfiles(c *check.C) { - testRequires(c, UnixCli) // Dockerfile overwrites dockerfile on windows - testRequires(c, DaemonIsLinux) - - ctx, err := fakeContext(`FROM busybox -RUN echo from Dockerfile`, - map[string]string{ - "dockerfile": "FROM busybox\nRUN echo from dockerfile", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-t", "test1", ".") - if err != nil { - c.Fatalf("Failed to build: %s\n%s", out, err) - } - - if !strings.Contains(out, "from Dockerfile") { - c.Fatalf("Missing proper output: %s", out) - } - -} - -func (s *DockerSuite) TestBuildFromURLWithF(c *check.C) { - testRequires(c, DaemonIsLinux) - - server, err := fakeStorage(map[string]string{"baz": `FROM busybox -RUN echo from baz -COPY * /tmp/ -RUN find /tmp/`}) - if err != nil { - c.Fatal(err) - } - defer server.Close() - - ctx, err := fakeContext(`FROM busybox -RUN echo from Dockerfile`, - map[string]string{}) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - // Make sure that -f is ignored and that we don't use the Dockerfile - // that's in the current dir - out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "-f", "baz", "-t", "test1", server.URL()+"/baz") - if err != nil { - c.Fatalf("Failed to build: %s\n%s", out, err) - } - - if !strings.Contains(out, "from baz") || - strings.Contains(out, "/tmp/baz") || - !strings.Contains(out, "/tmp/Dockerfile") { - c.Fatalf("Missing proper output: %s", out) - } - -} - -func (s *DockerSuite) TestBuildFromStdinWithF(c *check.C) { - testRequires(c, DaemonIsLinux) // TODO Windows: This test is flaky; no idea why - ctx, err := fakeContext(`FROM busybox -RUN echo "from Dockerfile"`, - map[string]string{}) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - // Make sure that -f is ignored and that we don't use the Dockerfile - // that's in the current dir - dockerCommand := exec.Command(dockerBinary, "build", "-f", "baz", "-t", "test1", "-") - dockerCommand.Dir = ctx.Dir - dockerCommand.Stdin = strings.NewReader(`FROM busybox -RUN echo "from baz" -COPY * /tmp/ -RUN sh -c "find /tmp/" # sh -c is needed on Windows to use the correct find`) - out, status, err := runCommandWithOutput(dockerCommand) - if err != nil || status != 0 { - c.Fatalf("Error building: %s", err) - } - - if !strings.Contains(out, "from baz") || - strings.Contains(out, "/tmp/baz") || - !strings.Contains(out, "/tmp/Dockerfile") { - c.Fatalf("Missing proper output: %s", out) - } - -} - -func (s *DockerSuite) TestBuildFromOfficialNames(c *check.C) { - name := "testbuildfromofficial" - fromNames := []string{ - "busybox", - "docker.io/busybox", - "index.docker.io/busybox", - "library/busybox", - "docker.io/library/busybox", - "index.docker.io/library/busybox", - } - for idx, fromName := range fromNames { - imgName := fmt.Sprintf("%s%d", name, idx) - _, err := buildImage(imgName, "FROM "+fromName, true) - if err != nil { - c.Errorf("Build failed using FROM %s: %s", fromName, err) - } - deleteImages(imgName) - } -} - -func (s *DockerSuite) TestBuildDockerfileOutsideContext(c *check.C) { - testRequires(c, UnixCli) // uses os.Symlink: not implemented in windows at the time of writing (go-1.4.2) - testRequires(c, DaemonIsLinux) - - name := "testbuilddockerfileoutsidecontext" - tmpdir, err := ioutil.TempDir("", name) - c.Assert(err, check.IsNil) - defer os.RemoveAll(tmpdir) - ctx := filepath.Join(tmpdir, "context") - if err := os.MkdirAll(ctx, 0755); err != nil { - c.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(ctx, "Dockerfile"), []byte("FROM scratch\nENV X Y"), 0644); err != nil { - c.Fatal(err) - } - wd, err := os.Getwd() - if err != nil { - c.Fatal(err) - } - defer os.Chdir(wd) - if err := os.Chdir(ctx); err != nil { - c.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(tmpdir, "outsideDockerfile"), []byte("FROM scratch\nENV x y"), 0644); err != nil { - c.Fatal(err) - } - if err := os.Symlink(filepath.Join("..", "outsideDockerfile"), filepath.Join(ctx, "dockerfile1")); err != nil { - c.Fatal(err) - } - if err := os.Symlink(filepath.Join(tmpdir, "outsideDockerfile"), filepath.Join(ctx, "dockerfile2")); err != nil { - c.Fatal(err) - } - - for _, dockerfilePath := range []string{ - filepath.Join("..", "outsideDockerfile"), - filepath.Join(ctx, "dockerfile1"), - filepath.Join(ctx, "dockerfile2"), - } { - out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", dockerfilePath, ".") - if err == nil { - c.Fatalf("Expected error with %s. Out: %s", dockerfilePath, out) - } - if !strings.Contains(out, "must be within the build context") && !strings.Contains(out, "Cannot locate Dockerfile") { - c.Fatalf("Unexpected error with %s. Out: %s", dockerfilePath, out) - } - deleteImages(name) - } - - os.Chdir(tmpdir) - - // Path to Dockerfile should be resolved relative to working directory, not relative to context. - // There is a Dockerfile in the context, but since there is no Dockerfile in the current directory, the following should fail - out, _, err := dockerCmdWithError("build", "-t", name, "--no-cache", "-f", "Dockerfile", ctx) - if err == nil { - c.Fatalf("Expected error. Out: %s", out) - } -} - -func (s *DockerSuite) TestBuildSpaces(c *check.C) { - // Test to make sure that leading/trailing spaces on a command - // doesn't change the error msg we get - var ( - err1 error - err2 error - ) - - name := "testspaces" - ctx, err := fakeContext("FROM busybox\nCOPY\n", - map[string]string{ - "Dockerfile": "FROM busybox\nCOPY\n", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err1 = buildImageFromContext(name, ctx, false); err1 == nil { - c.Fatal("Build 1 was supposed to fail, but didn't") - } - - ctx.Add("Dockerfile", "FROM busybox\nCOPY ") - if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { - c.Fatal("Build 2 was supposed to fail, but didn't") - } - - removeLogTimestamps := func(s string) string { - return regexp.MustCompile(`time="(.*?)"`).ReplaceAllString(s, `time=[TIMESTAMP]`) - } - - // Skip over the times - e1 := removeLogTimestamps(err1.Error()) - e2 := removeLogTimestamps(err2.Error()) - - // Ignore whitespace since that's what were verifying doesn't change stuff - if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { - c.Fatalf("Build 2's error wasn't the same as build 1's\n1:%s\n2:%s", err1, err2) - } - - ctx.Add("Dockerfile", "FROM busybox\n COPY") - if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { - c.Fatal("Build 3 was supposed to fail, but didn't") - } - - // Skip over the times - e1 = removeLogTimestamps(err1.Error()) - e2 = removeLogTimestamps(err2.Error()) - - // Ignore whitespace since that's what were verifying doesn't change stuff - if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { - c.Fatalf("Build 3's error wasn't the same as build 1's\n1:%s\n3:%s", err1, err2) - } - - ctx.Add("Dockerfile", "FROM busybox\n COPY ") - if _, err2 = buildImageFromContext(name, ctx, false); err2 == nil { - c.Fatal("Build 4 was supposed to fail, but didn't") - } - - // Skip over the times - e1 = removeLogTimestamps(err1.Error()) - e2 = removeLogTimestamps(err2.Error()) - - // Ignore whitespace since that's what were verifying doesn't change stuff - if strings.Replace(e1, " ", "", -1) != strings.Replace(e2, " ", "", -1) { - c.Fatalf("Build 4's error wasn't the same as build 1's\n1:%s\n4:%s", err1, err2) - } - -} - -func (s *DockerSuite) TestBuildSpacesWithQuotes(c *check.C) { - testRequires(c, DaemonIsLinux) - // Test to make sure that spaces in quotes aren't lost - name := "testspacesquotes" - - dockerfile := `FROM busybox -RUN echo " \ - foo "` - - _, out, err := buildImageWithOut(name, dockerfile, false) - if err != nil { - c.Fatal("Build failed:", err) - } - - expecting := "\n foo \n" - if !strings.Contains(out, expecting) { - c.Fatalf("Bad output: %q expecting to contain %q", out, expecting) - } - -} - -// #4393 -func (s *DockerSuite) TestBuildVolumeFileExistsinContainer(c *check.C) { - testRequires(c, DaemonIsLinux) // TODO Windows: This should error out - buildCmd := exec.Command(dockerBinary, "build", "-t", "docker-test-errcreatevolumewithfile", "-") - buildCmd.Stdin = strings.NewReader(` - FROM busybox - RUN touch /foo - VOLUME /foo - `) - - out, _, err := runCommandWithOutput(buildCmd) - if err == nil || !strings.Contains(out, "file exists") { - c.Fatalf("expected build to fail when file exists in container at requested volume path") - } - -} - -func (s *DockerSuite) TestBuildMissingArgs(c *check.C) { - // Test to make sure that all Dockerfile commands (except the ones listed - // in skipCmds) will generate an error if no args are provided. - // Note: INSERT is deprecated so we exclude it because of that. - skipCmds := map[string]struct{}{ - "CMD": {}, - "RUN": {}, - "ENTRYPOINT": {}, - "INSERT": {}, - } - - if daemonPlatform == "windows" { - skipCmds = map[string]struct{}{ - "CMD": {}, - "RUN": {}, - "ENTRYPOINT": {}, - "INSERT": {}, - "STOPSIGNAL": {}, - "ARG": {}, - "USER": {}, - "EXPOSE": {}, - } - } - - for cmd := range command.Commands { - cmd = strings.ToUpper(cmd) - if _, ok := skipCmds[cmd]; ok { - continue - } - - var dockerfile string - if cmd == "FROM" { - dockerfile = cmd - } else { - // Add FROM to make sure we don't complain about it missing - dockerfile = "FROM busybox\n" + cmd - } - - ctx, err := fakeContext(dockerfile, map[string]string{}) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - var out string - if out, err = buildImageFromContext("args", ctx, true); err == nil { - c.Fatalf("%s was supposed to fail. Out:%s", cmd, out) - } - if !strings.Contains(err.Error(), cmd+" requires") { - c.Fatalf("%s returned the wrong type of error:%s", cmd, err) - } - } - -} - -func (s *DockerSuite) TestBuildEmptyScratch(c *check.C) { - testRequires(c, DaemonIsLinux) - _, out, err := buildImageWithOut("sc", "FROM scratch", true) - if err == nil { - c.Fatalf("Build was supposed to fail") - } - if !strings.Contains(out, "No image was generated") { - c.Fatalf("Wrong error message: %v", out) - } -} - -func (s *DockerSuite) TestBuildDotDotFile(c *check.C) { - ctx, err := fakeContext("FROM busybox\n", - map[string]string{ - "..gitme": "", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err = buildImageFromContext("sc", ctx, false); err != nil { - c.Fatalf("Build was supposed to work: %s", err) - } -} - -func (s *DockerSuite) TestBuildRUNoneJSON(c *check.C) { - testRequires(c, DaemonIsLinux) // No hello-world Windows image - name := "testbuildrunonejson" - - ctx, err := fakeContext(`FROM hello-world:frozen -RUN [ "/hello" ]`, map[string]string{}) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - out, _, err := dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "-t", name, ".") - if err != nil { - c.Fatalf("failed to build the image: %s, %v", out, err) - } - - if !strings.Contains(out, "Hello from Docker") { - c.Fatalf("bad output: %s", out) - } - -} - -func (s *DockerSuite) TestBuildEmptyStringVolume(c *check.C) { - name := "testbuildemptystringvolume" - - _, err := buildImage(name, ` - FROM busybox - ENV foo="" - VOLUME $foo - `, false) - if err == nil { - c.Fatal("Should have failed to build") - } - -} - -func (s *DockerSuite) TestBuildContainerWithCgroupParent(c *check.C) { - testRequires(c, SameHostDaemon) - testRequires(c, DaemonIsLinux) - - cgroupParent := "test" - data, err := ioutil.ReadFile("/proc/self/cgroup") - if err != nil { - c.Fatalf("failed to read '/proc/self/cgroup - %v", err) - } - selfCgroupPaths := parseCgroupPaths(string(data)) - _, found := selfCgroupPaths["memory"] - if !found { - c.Fatalf("unable to find self memory cgroup path. CgroupsPath: %v", selfCgroupPaths) - } - cmd := exec.Command(dockerBinary, "build", "--cgroup-parent", cgroupParent, "-") - cmd.Stdin = strings.NewReader(` -FROM busybox -RUN cat /proc/self/cgroup -`) - - out, _, err := runCommandWithOutput(cmd) - if err != nil { - c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) - } - m, err := regexp.MatchString(fmt.Sprintf("memory:.*/%s/.*", cgroupParent), out) - c.Assert(err, check.IsNil) - if !m { - c.Fatalf("There is no expected memory cgroup with parent /%s/: %s", cgroupParent, out) - } -} - -func (s *DockerSuite) TestBuildNoDupOutput(c *check.C) { - // Check to make sure our build output prints the Dockerfile cmd - // property - there was a bug that caused it to be duplicated on the - // Step X line - name := "testbuildnodupoutput" - - _, out, err := buildImageWithOut(name, ` - FROM busybox - RUN env`, false) - if err != nil { - c.Fatalf("Build should have worked: %q", err) - } - - exp := "\nStep 2 : RUN env\n" - if !strings.Contains(out, exp) { - c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) - } -} - -// GH15826 -func (s *DockerSuite) TestBuildStartsFromOne(c *check.C) { - // Explicit check to ensure that build starts from step 1 rather than 0 - name := "testbuildstartsfromone" - - _, out, err := buildImageWithOut(name, ` - FROM busybox`, false) - if err != nil { - c.Fatalf("Build should have worked: %q", err) - } - - exp := "\nStep 1 : FROM busybox\n" - if !strings.Contains(out, exp) { - c.Fatalf("Bad output\nGot:%s\n\nExpected to contain:%s\n", out, exp) - } -} - -func (s *DockerSuite) TestBuildRUNErrMsg(c *check.C) { - // Test to make sure the bad command is quoted with just "s and - // not as a Go []string - name := "testbuildbadrunerrmsg" - _, out, err := buildImageWithOut(name, ` - FROM busybox - RUN badEXE a1 \& a2 a3`, false) // tab between a2 and a3 - if err == nil { - c.Fatal("Should have failed to build") - } - shell := "/bin/sh -c" - exitCode := "127" - if daemonPlatform == "windows" { - shell = "cmd /S /C" - // architectural - Windows has to start the container to determine the exe is bad, Linux does not - exitCode = "1" - } - exp := `The command '` + shell + ` badEXE a1 \& a2 a3' returned a non-zero code: ` + exitCode - if !strings.Contains(out, exp) { - c.Fatalf("RUN doesn't have the correct output:\nGot:%s\nExpected:%s", out, exp) - } -} - -func (s *DockerTrustSuite) TestTrustedBuild(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-build") - dockerFile := fmt.Sprintf(` - FROM %s - RUN [] - `, repoName) - - name := "testtrustedbuild" - - buildCmd := buildImageCmd(name, dockerFile, true) - s.trustedCmd(buildCmd) - out, _, err := runCommandWithOutput(buildCmd) - if err != nil { - c.Fatalf("Error running trusted build: %s\n%s", err, out) - } - - if !strings.Contains(out, fmt.Sprintf("FROM %s@sha", repoName[:len(repoName)-7])) { - c.Fatalf("Unexpected output on trusted build:\n%s", out) - } - - // We should also have a tag reference for the image. - if out, exitCode := dockerCmd(c, "inspect", repoName); exitCode != 0 { - c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out) - } - - // We should now be able to remove the tag reference. - if out, exitCode := dockerCmd(c, "rmi", repoName); exitCode != 0 { - c.Fatalf("unexpected exit code inspecting image %q: %d: %s", repoName, exitCode, out) - } -} - -func (s *DockerTrustSuite) TestTrustedBuildUntrustedTag(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/build-untrusted-tag:latest", privateRegistryURL) - dockerFile := fmt.Sprintf(` - FROM %s - RUN [] - `, repoName) - - name := "testtrustedbuilduntrustedtag" - - buildCmd := buildImageCmd(name, dockerFile, true) - s.trustedCmd(buildCmd) - out, _, err := runCommandWithOutput(buildCmd) - if err == nil { - c.Fatalf("Expected error on trusted build with untrusted tag: %s\n%s", err, out) - } - - if !strings.Contains(out, "does not have trust data for") { - c.Fatalf("Unexpected output on trusted build with untrusted tag:\n%s", out) - } -} - -func (s *DockerTrustSuite) TestBuildContextDirIsSymlink(c *check.C) { - testRequires(c, DaemonIsLinux) - tempDir, err := ioutil.TempDir("", "test-build-dir-is-symlink-") - c.Assert(err, check.IsNil) - defer os.RemoveAll(tempDir) - - // Make a real context directory in this temp directory with a simple - // Dockerfile. - realContextDirname := filepath.Join(tempDir, "context") - if err := os.Mkdir(realContextDirname, os.FileMode(0755)); err != nil { - c.Fatal(err) - } - - if err = ioutil.WriteFile( - filepath.Join(realContextDirname, "Dockerfile"), - []byte(` - FROM busybox - RUN echo hello world - `), - os.FileMode(0644), - ); err != nil { - c.Fatal(err) - } - - // Make a symlink to the real context directory. - contextSymlinkName := filepath.Join(tempDir, "context_link") - if err := os.Symlink(realContextDirname, contextSymlinkName); err != nil { - c.Fatal(err) - } - - // Executing the build with the symlink as the specified context should - // *not* fail. - if out, exitStatus := dockerCmd(c, "build", contextSymlinkName); exitStatus != 0 { - c.Fatalf("build failed with exit status %d: %s", exitStatus, out) - } -} - -func (s *DockerTrustSuite) TestTrustedBuildTagFromReleasesRole(c *check.C) { - testRequires(c, NotaryHosting) - - latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") - repoName := strings.TrimSuffix(latestTag, ":latest") - - // Now create the releases role - s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) - s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) - s.notaryPublish(c, repoName) - - // push a different tag to the releases role - otherTag := fmt.Sprintf("%s:other", repoName) - dockerCmd(c, "tag", "busybox", otherTag) - - pushCmd := exec.Command(dockerBinary, "push", otherTag) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("Trusted push failed: %s", out)) - s.assertTargetInRoles(c, repoName, "other", "targets/releases") - s.assertTargetNotInRoles(c, repoName, "other", "targets") - - out, status := dockerCmd(c, "rmi", otherTag) - c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out)) - - dockerFile := fmt.Sprintf(` - FROM %s - RUN [] - `, otherTag) - - name := "testtrustedbuildreleasesrole" - - buildCmd := buildImageCmd(name, dockerFile, true) - s.trustedCmd(buildCmd) - out, _, err = runCommandWithOutput(buildCmd) - c.Assert(err, check.IsNil, check.Commentf("Trusted build failed: %s", out)) - c.Assert(out, checker.Contains, fmt.Sprintf("FROM %s@sha", repoName)) -} - -func (s *DockerTrustSuite) TestTrustedBuildTagIgnoresOtherDelegationRoles(c *check.C) { - testRequires(c, NotaryHosting) - - latestTag := s.setupTrustedImage(c, "trusted-build-releases-role") - repoName := strings.TrimSuffix(latestTag, ":latest") - - // Now create a non-releases delegation role - s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) - s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) - s.notaryPublish(c, repoName) - - // push a different tag to the other role - otherTag := fmt.Sprintf("%s:other", repoName) - dockerCmd(c, "tag", "busybox", otherTag) - - pushCmd := exec.Command(dockerBinary, "push", otherTag) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("Trusted push failed: %s", out)) - s.assertTargetInRoles(c, repoName, "other", "targets/other") - s.assertTargetNotInRoles(c, repoName, "other", "targets") - - out, status := dockerCmd(c, "rmi", otherTag) - c.Assert(status, check.Equals, 0, check.Commentf("docker rmi failed: %s", out)) - - dockerFile := fmt.Sprintf(` - FROM %s - RUN [] - `, otherTag) - - name := "testtrustedbuildotherrole" - - buildCmd := buildImageCmd(name, dockerFile, true) - s.trustedCmd(buildCmd) - out, _, err = runCommandWithOutput(buildCmd) - c.Assert(err, check.NotNil, check.Commentf("Trusted build expected to fail: %s", out)) -} - -// Issue #15634: COPY fails when path starts with "null" -func (s *DockerSuite) TestBuildNullStringInAddCopyVolume(c *check.C) { - name := "testbuildnullstringinaddcopyvolume" - - volName := "nullvolume" - - if daemonPlatform == "windows" { - volName = `C:\\nullvolume` - } - - ctx, err := fakeContext(` - FROM busybox - - ADD null / - COPY nullfile / - VOLUME `+volName+` - `, - map[string]string{ - "null": "test1", - "nullfile": "test2", - }, - ) - c.Assert(err, check.IsNil) - defer ctx.Close() - - _, err = buildImageFromContext(name, ctx, true) - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestBuildStopSignal(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support STOPSIGNAL yet - imgName := "test_build_stop_signal" - _, err := buildImage(imgName, - `FROM busybox - STOPSIGNAL SIGKILL`, - true) - c.Assert(err, check.IsNil) - res := inspectFieldJSON(c, imgName, "Config.StopSignal") - if res != `"SIGKILL"` { - c.Fatalf("Signal %s, expected SIGKILL", res) - } - - containerName := "test-container-stop-signal" - dockerCmd(c, "run", "-d", "--name", containerName, imgName, "top") - - res = inspectFieldJSON(c, containerName, "Config.StopSignal") - if res != `"SIGKILL"` { - c.Fatalf("Signal %s, expected SIGKILL", res) - } -} - -func (s *DockerSuite) TestBuildBuildTimeArg(c *check.C) { - imgName := "bldargtest" - envKey := "foo" - envVal := "bar" - args := []string{"--build-arg", fmt.Sprintf("%s=%s", envKey, envVal)} - var dockerfile string - if daemonPlatform == "windows" { - // Bugs in Windows busybox port - use the default base image and native cmd stuff - dockerfile = fmt.Sprintf(`FROM `+minimalBaseImage()+` - ARG %s - RUN echo %%%s%% - CMD setlocal enableextensions && if defined %s (echo %%%s%%)`, envKey, envKey, envKey, envKey) - } else { - dockerfile = fmt.Sprintf(`FROM busybox - ARG %s - RUN echo $%s - CMD echo $%s`, envKey, envKey, envKey) - - } - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) - } - - containerName := "bldargCont" - out, _ := dockerCmd(c, "run", "--name", containerName, imgName) - out = strings.Trim(out, " \r\n'") - if out != "" { - c.Fatalf("run produced invalid output: %q, expected empty string", out) - } -} - -func (s *DockerSuite) TestBuildBuildTimeArgHistory(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support ARG - imgName := "bldargtest" - envKey := "foo" - envVal := "bar" - envDef := "bar1" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } - dockerfile := fmt.Sprintf(`FROM busybox - ARG %s=%s`, envKey, envDef) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) - } - - out, _ := dockerCmd(c, "history", "--no-trunc", imgName) - outputTabs := strings.Split(out, "\n")[1] - if !strings.Contains(outputTabs, envDef) { - c.Fatalf("failed to find arg default in image history output: %q expected: %q", outputTabs, envDef) - } -} - -func (s *DockerSuite) TestBuildBuildTimeArgCacheHit(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support ARG - imgName := "bldargtest" - envKey := "foo" - envVal := "bar" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } - dockerfile := fmt.Sprintf(`FROM busybox - ARG %s - RUN echo $%s`, envKey, envKey) - - origImgID := "" - var err error - if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { - c.Fatal(err) - } - - imgNameCache := "bldargtestcachehit" - if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID != origImgID { - if err != nil { - c.Fatal(err) - } - c.Fatalf("build didn't use cache! expected image id: %q built image id: %q", origImgID, newImgID) - } -} - -func (s *DockerSuite) TestBuildBuildTimeArgCacheMissExtraArg(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support ARG - imgName := "bldargtest" - envKey := "foo" - envVal := "bar" - extraEnvKey := "foo1" - extraEnvVal := "bar1" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } - - dockerfile := fmt.Sprintf(`FROM busybox - ARG %s - ARG %s - RUN echo $%s`, envKey, extraEnvKey, envKey) - - origImgID := "" - var err error - if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { - c.Fatal(err) - } - - imgNameCache := "bldargtestcachemiss" - args = append(args, "--build-arg", fmt.Sprintf("%s=%s", extraEnvKey, extraEnvVal)) - if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID { - if err != nil { - c.Fatal(err) - } - c.Fatalf("build used cache, expected a miss!") - } -} - -func (s *DockerSuite) TestBuildBuildTimeArgCacheMissSameArgDiffVal(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support ARG - imgName := "bldargtest" - envKey := "foo" - envVal := "bar" - newEnvVal := "bar1" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } - - dockerfile := fmt.Sprintf(`FROM busybox - ARG %s - RUN echo $%s`, envKey, envKey) - - origImgID := "" - var err error - if origImgID, err = buildImage(imgName, dockerfile, true, args...); err != nil { - c.Fatal(err) - } - - imgNameCache := "bldargtestcachemiss" - args = []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, newEnvVal), - } - if newImgID, err := buildImage(imgNameCache, dockerfile, true, args...); err != nil || newImgID == origImgID { - if err != nil { - c.Fatal(err) - } - c.Fatalf("build used cache, expected a miss!") - } -} - -func (s *DockerSuite) TestBuildBuildTimeArgOverrideArgDefinedBeforeEnv(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support ARG - imgName := "bldargtest" - envKey := "foo" - envVal := "bar" - envValOveride := "barOverride" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } - dockerfile := fmt.Sprintf(`FROM busybox - ARG %s - ENV %s %s - RUN echo $%s - CMD echo $%s - `, envKey, envKey, envValOveride, envKey, envKey) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) - } - - containerName := "bldargCont" - if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { - c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) - } -} - -func (s *DockerSuite) TestBuildBuildTimeArgOverrideEnvDefinedBeforeArg(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support ARG - imgName := "bldargtest" - envKey := "foo" - envVal := "bar" - envValOveride := "barOverride" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } - dockerfile := fmt.Sprintf(`FROM busybox - ENV %s %s - ARG %s - RUN echo $%s - CMD echo $%s - `, envKey, envValOveride, envKey, envKey, envKey) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) - } - - containerName := "bldargCont" - if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { - c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) - } -} - -func (s *DockerSuite) TestBuildBuildTimeArgExpansion(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support ARG - imgName := "bldvarstest" - - wdVar := "WDIR" - wdVal := "/tmp/" - addVar := "AFILE" - addVal := "addFile" - copyVar := "CFILE" - copyVal := "copyFile" - envVar := "foo" - envVal := "bar" - exposeVar := "EPORT" - exposeVal := "9999" - userVar := "USER" - userVal := "testUser" - volVar := "VOL" - volVal := "/testVol/" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", wdVar, wdVal), - "--build-arg", fmt.Sprintf("%s=%s", addVar, addVal), - "--build-arg", fmt.Sprintf("%s=%s", copyVar, copyVal), - "--build-arg", fmt.Sprintf("%s=%s", envVar, envVal), - "--build-arg", fmt.Sprintf("%s=%s", exposeVar, exposeVal), - "--build-arg", fmt.Sprintf("%s=%s", userVar, userVal), - "--build-arg", fmt.Sprintf("%s=%s", volVar, volVal), - } - ctx, err := fakeContext(fmt.Sprintf(`FROM busybox - ARG %s - WORKDIR ${%s} - ARG %s - ADD ${%s} testDir/ - ARG %s - COPY $%s testDir/ - ARG %s - ENV %s=${%s} - ARG %s - EXPOSE $%s - ARG %s - USER $%s - ARG %s - VOLUME ${%s}`, - wdVar, wdVar, addVar, addVar, copyVar, copyVar, envVar, envVar, - envVar, exposeVar, exposeVar, userVar, userVar, volVar, volVar), - map[string]string{ - addVal: "some stuff", - copyVal: "some stuff", - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - if _, err := buildImageFromContext(imgName, ctx, true, args...); err != nil { - c.Fatal(err) - } - - var resMap map[string]interface{} - var resArr []string - res := "" - res = inspectField(c, imgName, "Config.WorkingDir") - if res != filepath.ToSlash(filepath.Clean(wdVal)) { - c.Fatalf("Config.WorkingDir value mismatch. Expected: %s, got: %s", filepath.ToSlash(filepath.Clean(wdVal)), res) - } - - inspectFieldAndMarshall(c, imgName, "Config.Env", &resArr) - - found := false - for _, v := range resArr { - if fmt.Sprintf("%s=%s", envVar, envVal) == v { - found = true - break - } - } - if !found { - c.Fatalf("Config.Env value mismatch. Expected to exist: %s=%s, got: %v", - envVar, envVal, resArr) - } - - inspectFieldAndMarshall(c, imgName, "Config.ExposedPorts", &resMap) - if _, ok := resMap[fmt.Sprintf("%s/tcp", exposeVal)]; !ok { - c.Fatalf("Config.ExposedPorts value mismatch. Expected exposed port: %s/tcp, got: %v", exposeVal, resMap) - } - - res = inspectField(c, imgName, "Config.User") - if res != userVal { - c.Fatalf("Config.User value mismatch. Expected: %s, got: %s", userVal, res) - } - - inspectFieldAndMarshall(c, imgName, "Config.Volumes", &resMap) - if _, ok := resMap[volVal]; !ok { - c.Fatalf("Config.Volumes value mismatch. Expected volume: %s, got: %v", volVal, resMap) - } -} - -func (s *DockerSuite) TestBuildBuildTimeArgExpansionOverride(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support ARG - imgName := "bldvarstest" - envKey := "foo" - envVal := "bar" - envKey1 := "foo1" - envValOveride := "barOverride" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } - dockerfile := fmt.Sprintf(`FROM busybox - ARG %s - ENV %s %s - ENV %s ${%s} - RUN echo $%s - CMD echo $%s`, envKey, envKey, envValOveride, envKey1, envKey, envKey1, envKey1) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 2 { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) - } - - containerName := "bldargCont" - if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { - c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) - } -} - -func (s *DockerSuite) TestBuildBuildTimeArgUntrustedDefinedAfterUse(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support ARG - imgName := "bldargtest" - envKey := "foo" - envVal := "bar" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } - dockerfile := fmt.Sprintf(`FROM busybox - RUN echo $%s - ARG %s - CMD echo $%s`, envKey, envKey, envKey) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Contains(out, envVal) { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("able to access environment variable in output: %q expected to be missing", out) - } - - containerName := "bldargCont" - if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { - c.Fatalf("run produced invalid output: %q, expected empty string", out) - } -} - -func (s *DockerSuite) TestBuildBuildTimeArgBuiltinArg(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support --build-arg - imgName := "bldargtest" - envKey := "HTTP_PROXY" - envVal := "bar" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } - dockerfile := fmt.Sprintf(`FROM busybox - RUN echo $%s - CMD echo $%s`, envKey, envKey) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || !strings.Contains(out, envVal) { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envVal) - } - - containerName := "bldargCont" - if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); out != "\n" { - c.Fatalf("run produced invalid output: %q, expected empty string", out) - } -} - -func (s *DockerSuite) TestBuildBuildTimeArgDefaultOverride(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support ARG - imgName := "bldargtest" - envKey := "foo" - envVal := "bar" - envValOveride := "barOverride" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envValOveride), - } - dockerfile := fmt.Sprintf(`FROM busybox - ARG %s=%s - ENV %s $%s - RUN echo $%s - CMD echo $%s`, envKey, envVal, envKey, envKey, envKey, envKey) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envValOveride) != 1 { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("failed to access environment variable in output: %q expected: %q", out, envValOveride) - } - - containerName := "bldargCont" - if out, _ := dockerCmd(c, "run", "--name", containerName, imgName); !strings.Contains(out, envValOveride) { - c.Fatalf("run produced invalid output: %q, expected %q", out, envValOveride) - } -} - -func (s *DockerSuite) TestBuildBuildTimeArgUnconsumedArg(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support --build-arg - imgName := "bldargtest" - envKey := "foo" - envVal := "bar" - args := []string{ - "--build-arg", fmt.Sprintf("%s=%s", envKey, envVal), - } - dockerfile := fmt.Sprintf(`FROM busybox - RUN echo $%s - CMD echo $%s`, envKey, envKey) - - errStr := "One or more build-args" - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err == nil { - c.Fatalf("build succeeded, expected to fail. Output: %v", out) - } else if !strings.Contains(out, errStr) { - c.Fatalf("Unexpected error. output: %q, expected error: %q", out, errStr) - } - -} - -func (s *DockerSuite) TestBuildBuildTimeArgQuotedValVariants(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support ARG - imgName := "bldargtest" - envKey := "foo" - envKey1 := "foo1" - envKey2 := "foo2" - envKey3 := "foo3" - args := []string{} - dockerfile := fmt.Sprintf(`FROM busybox - ARG %s="" - ARG %s='' - ARG %s="''" - ARG %s='""' - RUN [ "$%s" != "$%s" ] - RUN [ "$%s" != "$%s" ] - RUN [ "$%s" != "$%s" ] - RUN [ "$%s" != "$%s" ] - RUN [ "$%s" != "$%s" ]`, envKey, envKey1, envKey2, envKey3, - envKey, envKey2, envKey, envKey3, envKey1, envKey2, envKey1, envKey3, - envKey2, envKey3) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } -} - -func (s *DockerSuite) TestBuildBuildTimeArgEmptyValVariants(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support ARG - imgName := "bldargtest" - envKey := "foo" - envKey1 := "foo1" - envKey2 := "foo2" - args := []string{} - dockerfile := fmt.Sprintf(`FROM busybox - ARG %s= - ARG %s="" - ARG %s='' - RUN [ "$%s" == "$%s" ] - RUN [ "$%s" == "$%s" ] - RUN [ "$%s" == "$%s" ]`, envKey, envKey1, envKey2, envKey, envKey1, envKey1, envKey2, envKey, envKey2) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } -} - -func (s *DockerSuite) TestBuildBuildTimeArgDefintionWithNoEnvInjection(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support ARG - imgName := "bldargtest" - envKey := "foo" - args := []string{} - dockerfile := fmt.Sprintf(`FROM busybox - ARG %s - RUN env`, envKey) - - if _, out, err := buildImageWithOut(imgName, dockerfile, true, args...); err != nil || strings.Count(out, envKey) != 1 { - if err != nil { - c.Fatalf("build failed to complete: %q %q", out, err) - } - c.Fatalf("unexpected number of occurrences of the arg in output: %q expected: 1", out) - } -} - -func (s *DockerSuite) TestBuildNoNamedVolume(c *check.C) { - volName := "testname:/foo" - - if daemonPlatform == "windows" { - volName = "testname:C:\\foo" - } - dockerCmd(c, "run", "-v", volName, "busybox", "sh", "-c", "touch /foo/oops") - - dockerFile := `FROM busybox - VOLUME ` + volName + ` - RUN ls /foo/oops - ` - _, err := buildImage("test", dockerFile, false) - c.Assert(err, check.NotNil, check.Commentf("image build should have failed")) -} - -func (s *DockerSuite) TestBuildTagEvent(c *check.C) { - since := daemonUnixTime(c) - - dockerFile := `FROM busybox - RUN echo events - ` - _, err := buildImage("test", dockerFile, false) - c.Assert(err, check.IsNil) - - until := daemonUnixTime(c) - out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "type=image") - events := strings.Split(strings.TrimSpace(out), "\n") - actions := eventActionsByIDAndType(c, events, "test:latest", "image") - var foundTag bool - for _, a := range actions { - if a == "tag" { - foundTag = true - break - } - } - - c.Assert(foundTag, checker.True, check.Commentf("No tag event found:\n%s", out)) -} - -// #15780 -func (s *DockerSuite) TestBuildMultipleTags(c *check.C) { - dockerfile := ` - FROM busybox - MAINTAINER test-15780 - ` - cmd := exec.Command(dockerBinary, "build", "-t", "tag1", "-t", "tag2:v2", - "-t", "tag1:latest", "-t", "tag1", "--no-cache", "-") - cmd.Stdin = strings.NewReader(dockerfile) - _, err := runCommand(cmd) - c.Assert(err, check.IsNil) - - id1, err := getIDByName("tag1") - c.Assert(err, check.IsNil) - id2, err := getIDByName("tag2:v2") - c.Assert(err, check.IsNil) - c.Assert(id1, check.Equals, id2) -} - -// #17290 -func (s *DockerSuite) TestBuildCacheBrokenSymlink(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildbrokensymlink" - ctx, err := fakeContext(` - FROM busybox - COPY . ./`, - map[string]string{ - "foo": "bar", - }) - c.Assert(err, checker.IsNil) - defer ctx.Close() - - err = os.Symlink(filepath.Join(ctx.Dir, "nosuchfile"), filepath.Join(ctx.Dir, "asymlink")) - c.Assert(err, checker.IsNil) - - // warm up cache - _, err = buildImageFromContext(name, ctx, true) - c.Assert(err, checker.IsNil) - - // add new file to context, should invalidate cache - err = ioutil.WriteFile(filepath.Join(ctx.Dir, "newfile"), []byte("foo"), 0644) - c.Assert(err, checker.IsNil) - - _, out, err := buildImageFromContextWithOut(name, ctx, true) - c.Assert(err, checker.IsNil) - - c.Assert(out, checker.Not(checker.Contains), "Using cache") - -} - -func (s *DockerSuite) TestBuildFollowSymlinkToFile(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildbrokensymlink" - ctx, err := fakeContext(` - FROM busybox - COPY asymlink target`, - map[string]string{ - "foo": "bar", - }) - c.Assert(err, checker.IsNil) - defer ctx.Close() - - err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) - c.Assert(err, checker.IsNil) - - id, err := buildImageFromContext(name, ctx, true) - c.Assert(err, checker.IsNil) - - out, _ := dockerCmd(c, "run", "--rm", id, "cat", "target") - c.Assert(out, checker.Matches, "bar") - - // change target file should invalidate cache - err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) - c.Assert(err, checker.IsNil) - - id, out, err = buildImageFromContextWithOut(name, ctx, true) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Not(checker.Contains), "Using cache") - - out, _ = dockerCmd(c, "run", "--rm", id, "cat", "target") - c.Assert(out, checker.Matches, "baz") -} - -func (s *DockerSuite) TestBuildFollowSymlinkToDir(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildbrokensymlink" - ctx, err := fakeContext(` - FROM busybox - COPY asymlink /`, - map[string]string{ - "foo/abc": "bar", - "foo/def": "baz", - }) - c.Assert(err, checker.IsNil) - defer ctx.Close() - - err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) - c.Assert(err, checker.IsNil) - - id, err := buildImageFromContext(name, ctx, true) - c.Assert(err, checker.IsNil) - - out, _ := dockerCmd(c, "run", "--rm", id, "cat", "abc", "def") - c.Assert(out, checker.Matches, "barbaz") - - // change target file should invalidate cache - err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo/def"), []byte("bax"), 0644) - c.Assert(err, checker.IsNil) - - id, out, err = buildImageFromContextWithOut(name, ctx, true) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Not(checker.Contains), "Using cache") - - out, _ = dockerCmd(c, "run", "--rm", id, "cat", "abc", "def") - c.Assert(out, checker.Matches, "barbax") - -} - -// TestBuildSymlinkBasename tests that target file gets basename from symlink, -// not from the target file. -func (s *DockerSuite) TestBuildSymlinkBasename(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildbrokensymlink" - ctx, err := fakeContext(` - FROM busybox - COPY asymlink /`, - map[string]string{ - "foo": "bar", - }) - c.Assert(err, checker.IsNil) - defer ctx.Close() - - err = os.Symlink("foo", filepath.Join(ctx.Dir, "asymlink")) - c.Assert(err, checker.IsNil) - - id, err := buildImageFromContext(name, ctx, true) - c.Assert(err, checker.IsNil) - - out, _ := dockerCmd(c, "run", "--rm", id, "cat", "asymlink") - c.Assert(out, checker.Matches, "bar") - -} - -// #17827 -func (s *DockerSuite) TestBuildCacheRootSource(c *check.C) { - name := "testbuildrootsource" - ctx, err := fakeContext(` - FROM busybox - COPY / /data`, - map[string]string{ - "foo": "bar", - }) - c.Assert(err, checker.IsNil) - defer ctx.Close() - - // warm up cache - _, err = buildImageFromContext(name, ctx, true) - c.Assert(err, checker.IsNil) - - // change file, should invalidate cache - err = ioutil.WriteFile(filepath.Join(ctx.Dir, "foo"), []byte("baz"), 0644) - c.Assert(err, checker.IsNil) - - _, out, err := buildImageFromContextWithOut(name, ctx, true) - c.Assert(err, checker.IsNil) - - c.Assert(out, checker.Not(checker.Contains), "Using cache") -} - -// #19375 -func (s *DockerSuite) TestBuildFailsGitNotCallable(c *check.C) { - cmd := exec.Command(dockerBinary, "build", "github.com/docker/v1.10-migrator.git") - cmd.Env = append(cmd.Env, "PATH=") - out, _, err := runCommandWithOutput(cmd) - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ") - - cmd = exec.Command(dockerBinary, "build", "https://github.com/docker/v1.10-migrator.git") - cmd.Env = append(cmd.Env, "PATH=") - out, _, err = runCommandWithOutput(cmd) - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "unable to prepare context: unable to find 'git': ") -} - -// TestBuildWorkdirWindowsPath tests that a Windows style path works as a workdir -func (s *DockerSuite) TestBuildWorkdirWindowsPath(c *check.C) { - testRequires(c, DaemonIsWindows) - name := "testbuildworkdirwindowspath" - - _, err := buildImage(name, ` - FROM windowsservercore - RUN mkdir C:\\work - WORKDIR C:\\work - RUN if "%CD%" NEQ "C:\work" exit -1 - `, true) - - if err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestBuildLabel(c *check.C) { - name := "testbuildlabel" - testLabel := "foo" - - _, err := buildImage(name, ` - FROM `+minimalBaseImage()+` - LABEL default foo -`, false, "--label", testLabel) - - c.Assert(err, checker.IsNil) - - res := inspectFieldJSON(c, name, "Config.Labels") - - var labels map[string]string - - if err := json.Unmarshal([]byte(res), &labels); err != nil { - c.Fatal(err) - } - - if _, ok := labels[testLabel]; !ok { - c.Fatal("label not found in image") - } -} - -func (s *DockerSuite) TestBuildLabelOneNode(c *check.C) { - name := "testbuildlabel" - - _, err := buildImage(name, "FROM busybox", false, "--label", "foo=bar") - - c.Assert(err, checker.IsNil) - - res, err := inspectImage(name, "json .Config.Labels") - c.Assert(err, checker.IsNil) - var labels map[string]string - - if err := json.Unmarshal([]byte(res), &labels); err != nil { - c.Fatal(err) - } - - v, ok := labels["foo"] - if !ok { - c.Fatal("label `foo` not found in image") - } - c.Assert(v, checker.Equals, "bar") -} - -func (s *DockerSuite) TestBuildLabelCacheCommit(c *check.C) { - name := "testbuildlabelcachecommit" - testLabel := "foo" - - if _, err := buildImage(name, ` - FROM `+minimalBaseImage()+` - LABEL default foo - `, false); err != nil { - c.Fatal(err) - } - - _, err := buildImage(name, ` - FROM `+minimalBaseImage()+` - LABEL default foo -`, true, "--label", testLabel) - - c.Assert(err, checker.IsNil) - - res := inspectFieldJSON(c, name, "Config.Labels") - - var labels map[string]string - - if err := json.Unmarshal([]byte(res), &labels); err != nil { - c.Fatal(err) - } - - if _, ok := labels[testLabel]; !ok { - c.Fatal("label not found in image") - } -} - -func (s *DockerSuite) TestBuildLabelMultiple(c *check.C) { - name := "testbuildlabelmultiple" - testLabels := map[string]string{ - "foo": "bar", - "123": "456", - } - - labelArgs := []string{} - - for k, v := range testLabels { - labelArgs = append(labelArgs, "--label", k+"="+v) - } - - _, err := buildImage(name, ` - FROM `+minimalBaseImage()+` - LABEL default foo -`, false, labelArgs...) - - if err != nil { - c.Fatal("error building image with labels", err) - } - - res := inspectFieldJSON(c, name, "Config.Labels") - - var labels map[string]string - - if err := json.Unmarshal([]byte(res), &labels); err != nil { - c.Fatal(err) - } - - for k, v := range testLabels { - if x, ok := labels[k]; !ok || x != v { - c.Fatalf("label %s=%s not found in image", k, v) - } - } -} - -func (s *DockerSuite) TestBuildLabelOverwrite(c *check.C) { - name := "testbuildlabeloverwrite" - testLabel := "foo" - testValue := "bar" - - _, err := buildImage(name, ` - FROM `+minimalBaseImage()+` - LABEL `+testLabel+`+ foo -`, false, []string{"--label", testLabel + "=" + testValue}...) - - if err != nil { - c.Fatal("error building image with labels", err) - } - - res := inspectFieldJSON(c, name, "Config.Labels") - - var labels map[string]string - - if err := json.Unmarshal([]byte(res), &labels); err != nil { - c.Fatal(err) - } - - v, ok := labels[testLabel] - if !ok { - c.Fatal("label not found in image") - } - - if v != testValue { - c.Fatal("label not overwritten") - } -} - -func (s *DockerRegistryAuthHtpasswdSuite) TestBuildFromAuthenticatedRegistry(c *check.C) { - dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) - - baseImage := privateRegistryURL + "/baseimage" - - _, err := buildImage(baseImage, ` - FROM busybox - ENV env1 val1 - `, true) - - c.Assert(err, checker.IsNil) - - dockerCmd(c, "push", baseImage) - dockerCmd(c, "rmi", baseImage) - - _, err = buildImage(baseImage, fmt.Sprintf(` - FROM %s - ENV env2 val2 - `, baseImage), true) - - c.Assert(err, checker.IsNil) -} - -func (s *DockerRegistryAuthHtpasswdSuite) TestBuildWithExternalAuth(c *check.C) { - osPath := os.Getenv("PATH") - defer os.Setenv("PATH", osPath) - - workingDir, err := os.Getwd() - c.Assert(err, checker.IsNil) - absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) - c.Assert(err, checker.IsNil) - testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) - - os.Setenv("PATH", testPath) - - repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) - - tmp, err := ioutil.TempDir("", "integration-cli-") - c.Assert(err, checker.IsNil) - - externalAuthConfig := `{ "credsStore": "shell-test" }` - - configPath := filepath.Join(tmp, "config.json") - err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) - c.Assert(err, checker.IsNil) - - dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) - - b, err := ioutil.ReadFile(configPath) - c.Assert(err, checker.IsNil) - c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") - - dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) - dockerCmd(c, "--config", tmp, "push", repoName) - - // make sure the image is pulled when building - dockerCmd(c, "rmi", repoName) - - buildCmd := exec.Command(dockerBinary, "--config", tmp, "build", "-") - buildCmd.Stdin = strings.NewReader(fmt.Sprintf("FROM %s", repoName)) - - out, _, err := runCommandWithOutput(buildCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) -} - -// Test cases in #22036 -func (s *DockerSuite) TestBuildLabelsOverride(c *check.C) { - testRequires(c, DaemonIsLinux) - - // Command line option labels will always override - name := "scratchy" - expected := `{"bar":"from-flag","foo":"from-flag"}` - _, err := buildImage(name, - `FROM scratch - LABEL foo=from-dockerfile`, - true, "--label", "foo=from-flag", "--label", "bar=from-flag") - c.Assert(err, check.IsNil) - - res := inspectFieldJSON(c, name, "Config.Labels") - if res != expected { - c.Fatalf("Labels %s, expected %s", res, expected) - } - - name = "from" - expected = `{"foo":"from-dockerfile"}` - _, err = buildImage(name, - `FROM scratch - LABEL foo from-dockerfile`, - true) - c.Assert(err, check.IsNil) - - res = inspectFieldJSON(c, name, "Config.Labels") - if res != expected { - c.Fatalf("Labels %s, expected %s", res, expected) - } - - // Command line option label will override even via `FROM` - name = "new" - expected = `{"bar":"from-dockerfile2","foo":"new"}` - _, err = buildImage(name, - `FROM from - LABEL bar from-dockerfile2`, - true, "--label", "foo=new") - c.Assert(err, check.IsNil) - - res = inspectFieldJSON(c, name, "Config.Labels") - if res != expected { - c.Fatalf("Labels %s, expected %s", res, expected) - } - - // Command line option without a value set (--label foo, --label bar=) - // will be treated as --label foo="", --label bar="" - name = "scratchy2" - expected = `{"bar":"","foo":""}` - _, err = buildImage(name, - `FROM scratch - LABEL foo=from-dockerfile`, - true, "--label", "foo", "--label", "bar=") - c.Assert(err, check.IsNil) - - res = inspectFieldJSON(c, name, "Config.Labels") - if res != expected { - c.Fatalf("Labels %s, expected %s", res, expected) - } - - // Command line option without a value set (--label foo, --label bar=) - // will be treated as --label foo="", --label bar="" - // This time is for inherited images - name = "new2" - expected = `{"bar":"","foo":""}` - _, err = buildImage(name, - `FROM from - LABEL bar from-dockerfile2`, - true, "--label", "foo=", "--label", "bar") - c.Assert(err, check.IsNil) - - res = inspectFieldJSON(c, name, "Config.Labels") - if res != expected { - c.Fatalf("Labels %s, expected %s", res, expected) - } - - // Command line option labels with only `FROM` - name = "scratchy" - expected = `{"bar":"from-flag","foo":"from-flag"}` - _, err = buildImage(name, - `FROM scratch`, - true, "--label", "foo=from-flag", "--label", "bar=from-flag") - c.Assert(err, check.IsNil) - - res = inspectFieldJSON(c, name, "Config.Labels") - if res != expected { - c.Fatalf("Labels %s, expected %s", res, expected) - } - -} - -// Test case for #22855 -func (s *DockerSuite) TestBuildDeleteCommittedFile(c *check.C) { - name := "test-delete-committed-file" - - _, err := buildImage(name, - `FROM busybox - RUN echo test > file - RUN test -e file - RUN rm file - RUN sh -c "! test -e file"`, false) - if err != nil { - c.Fatal(err) - } -} - -// #20083 -func (s *DockerSuite) TestBuildDockerignoreComment(c *check.C) { - // TODO Windows: Figure out why this test is flakey on TP5. If you add - // something like RUN sleep 5, or even RUN ls /tmp after the ADD line, - // it is more reliable, but that's not a good fix. - testRequires(c, DaemonIsLinux) - - name := "testbuilddockerignorecleanpaths" - dockerfile := ` - FROM busybox - ADD . /tmp/ - RUN sh -c "(ls -la /tmp/#1)" - RUN sh -c "(! ls -la /tmp/#2)" - RUN sh -c "(! ls /tmp/foo) && (! ls /tmp/foo2) && (ls /tmp/dir1/foo)"` - ctx, err := fakeContext(dockerfile, map[string]string{ - "foo": "foo", - "foo2": "foo2", - "dir1/foo": "foo in dir1", - "#1": "# file 1", - "#2": "# file 2", - ".dockerignore": `# Visual C++ cache files -# because we have git ;-) -# The above comment is from #20083 -foo -#dir1/foo -foo2 -# The following is considered as comment as # is at the beginning -#1 -# The following is not considered as comment as # is not at the beginning - #2 -`, - }) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatal(err) - } -} - -// Test case for #23221 -func (s *DockerSuite) TestBuildWithUTF8BOM(c *check.C) { - name := "test-with-utf8-bom" - dockerfile := []byte(`FROM busybox`) - bomDockerfile := append([]byte{0xEF, 0xBB, 0xBF}, dockerfile...) - ctx, err := fakeContextFromNewTempDir() - c.Assert(err, check.IsNil) - defer ctx.Close() - err = ctx.addFile("Dockerfile", bomDockerfile) - c.Assert(err, check.IsNil) - _, err = buildImageFromContext(name, ctx, true) - c.Assert(err, check.IsNil) -} - -// Test case for UTF-8 BOM in .dockerignore, related to #23221 -func (s *DockerSuite) TestBuildWithUTF8BOMDockerignore(c *check.C) { - name := "test-with-utf8-bom-dockerignore" - dockerfile := ` - FROM busybox - ADD . /tmp/ - RUN ls -la /tmp - RUN sh -c "! ls /tmp/Dockerfile" - RUN ls /tmp/.dockerignore` - dockerignore := []byte("./Dockerfile\n") - bomDockerignore := append([]byte{0xEF, 0xBB, 0xBF}, dockerignore...) - ctx, err := fakeContext(dockerfile, map[string]string{ - "Dockerfile": dockerfile, - }) - c.Assert(err, check.IsNil) - defer ctx.Close() - err = ctx.addFile(".dockerignore", bomDockerignore) - c.Assert(err, check.IsNil) - _, err = buildImageFromContext(name, ctx, true) - if err != nil { - c.Fatal(err) - } -} - -// #22489 Shell test to confirm config gets updated correctly -func (s *DockerSuite) TestBuildShellUpdatesConfig(c *check.C) { - name := "testbuildshellupdatesconfig" - - expected := `["foo","-bar","#(nop) ","SHELL [foo -bar]"]` - _, err := buildImage(name, - `FROM `+minimalBaseImage()+` - SHELL ["foo", "-bar"]`, - true) - if err != nil { - c.Fatal(err) - } - res := inspectFieldJSON(c, name, "ContainerConfig.Cmd") - if res != expected { - c.Fatalf("%s, expected %s", res, expected) - } - res = inspectFieldJSON(c, name, "ContainerConfig.Shell") - if res != `["foo","-bar"]` { - c.Fatalf(`%s, expected ["foo","-bar"]`, res) - } -} - -// #22489 Changing the shell multiple times and CMD after. -func (s *DockerSuite) TestBuildShellMultiple(c *check.C) { - name := "testbuildshellmultiple" - - _, out, _, err := buildImageWithStdoutStderr(name, - `FROM busybox - RUN echo defaultshell - SHELL ["echo"] - RUN echoshell - SHELL ["ls"] - RUN -l - CMD -l`, - true) - if err != nil { - c.Fatal(err) - } - - // Must contain 'defaultshell' twice - if len(strings.Split(out, "defaultshell")) != 3 { - c.Fatalf("defaultshell should have appeared twice in %s", out) - } - - // Must contain 'echoshell' twice - if len(strings.Split(out, "echoshell")) != 3 { - c.Fatalf("echoshell should have appeared twice in %s", out) - } - - // Must contain "total " (part of ls -l) - if !strings.Contains(out, "total ") { - c.Fatalf("%s should have contained 'total '", out) - } - - // A container started from the image uses the shell-form CMD. - // Last shell is ls. CMD is -l. So should contain 'total '. - outrun, _ := dockerCmd(c, "run", "--rm", name) - if !strings.Contains(outrun, "total ") { - c.Fatalf("Expected started container to run ls -l. %s", outrun) - } -} - -// #22489. Changed SHELL with ENTRYPOINT -func (s *DockerSuite) TestBuildShellEntrypoint(c *check.C) { - name := "testbuildshellentrypoint" - - _, err := buildImage(name, - `FROM busybox - SHELL ["ls"] - ENTRYPOINT -l`, - true) - if err != nil { - c.Fatal(err) - } - - // A container started from the image uses the shell-form ENTRYPOINT. - // Shell is ls. ENTRYPOINT is -l. So should contain 'total '. - outrun, _ := dockerCmd(c, "run", "--rm", name) - if !strings.Contains(outrun, "total ") { - c.Fatalf("Expected started container to run ls -l. %s", outrun) - } -} - -// #22489 Shell test to confirm shell is inherited in a subsequent build -func (s *DockerSuite) TestBuildShellInherited(c *check.C) { - name1 := "testbuildshellinherited1" - _, err := buildImage(name1, - `FROM busybox - SHELL ["ls"]`, - true) - if err != nil { - c.Fatal(err) - } - - name2 := "testbuildshellinherited2" - _, out, _, err := buildImageWithStdoutStderr(name2, - `FROM `+name1+` - RUN -l`, - true) - if err != nil { - c.Fatal(err) - } - - // ls -l has "total " followed by some number in it, ls without -l does not. - if !strings.Contains(out, "total ") { - c.Fatalf("Should have seen total in 'ls -l'.\n%s", out) - } -} - -// #22489 Shell test to confirm non-JSON doesn't work -func (s *DockerSuite) TestBuildShellNotJSON(c *check.C) { - name := "testbuildshellnotjson" - - _, err := buildImage(name, - `FROM `+minimalBaseImage()+` - sHeLl exec -form`, // Casing explicit to ensure error is upper-cased. - true) - if err == nil { - c.Fatal("Image build should have failed") - } - if !strings.Contains(err.Error(), "SHELL requires the arguments to be in JSON form") { - c.Fatal("Error didn't indicate that arguments must be in JSON form") - } -} - -// #22489 Windows shell test to confirm native is powershell if executing a PS command -// This would error if the default shell were still cmd. -func (s *DockerSuite) TestBuildShellWindowsPowershell(c *check.C) { - testRequires(c, DaemonIsWindows) - name := "testbuildshellpowershell" - _, out, err := buildImageWithOut(name, - `FROM `+minimalBaseImage()+` - SHELL ["powershell", "-command"] - RUN Write-Host John`, - true) - if err != nil { - c.Fatal(err) - } - if !strings.Contains(out, "\nJohn\n") { - c.Fatalf("Line with 'John' not found in output %q", out) - } -} diff --git a/integration-cli/docker_cli_build_unix_test.go b/integration-cli/docker_cli_build_unix_test.go deleted file mode 100644 index 56ab66efae..0000000000 --- a/integration-cli/docker_cli_build_unix_test.go +++ /dev/null @@ -1,206 +0,0 @@ -// +build !windows - -package main - -import ( - "bufio" - "bytes" - "encoding/json" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "regexp" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/go-units" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestBuildResourceConstraintsAreUsed(c *check.C) { - testRequires(c, cpuCfsQuota) - name := "testbuildresourceconstraints" - - ctx, err := fakeContext(` - FROM hello-world:frozen - RUN ["/hello"] - `, map[string]string{}) - c.Assert(err, checker.IsNil) - - _, _, err = dockerCmdInDir(c, ctx.Dir, "build", "--no-cache", "--rm=false", "--memory=64m", "--memory-swap=-1", "--cpuset-cpus=0", "--cpuset-mems=0", "--cpu-shares=100", "--cpu-quota=8000", "--ulimit", "nofile=42", "-t", name, ".") - if err != nil { - c.Fatal(err) - } - - out, _ := dockerCmd(c, "ps", "-lq") - cID := strings.TrimSpace(out) - - type hostConfig struct { - Memory int64 - MemorySwap int64 - CpusetCpus string - CpusetMems string - CPUShares int64 - CPUQuota int64 - Ulimits []*units.Ulimit - } - - cfg := inspectFieldJSON(c, cID, "HostConfig") - - var c1 hostConfig - err = json.Unmarshal([]byte(cfg), &c1) - c.Assert(err, checker.IsNil, check.Commentf(cfg)) - - c.Assert(c1.Memory, checker.Equals, int64(64*1024*1024), check.Commentf("resource constraints not set properly for Memory")) - c.Assert(c1.MemorySwap, checker.Equals, int64(-1), check.Commentf("resource constraints not set properly for MemorySwap")) - c.Assert(c1.CpusetCpus, checker.Equals, "0", check.Commentf("resource constraints not set properly for CpusetCpus")) - c.Assert(c1.CpusetMems, checker.Equals, "0", check.Commentf("resource constraints not set properly for CpusetMems")) - c.Assert(c1.CPUShares, checker.Equals, int64(100), check.Commentf("resource constraints not set properly for CPUShares")) - c.Assert(c1.CPUQuota, checker.Equals, int64(8000), check.Commentf("resource constraints not set properly for CPUQuota")) - c.Assert(c1.Ulimits[0].Name, checker.Equals, "nofile", check.Commentf("resource constraints not set properly for Ulimits")) - c.Assert(c1.Ulimits[0].Hard, checker.Equals, int64(42), check.Commentf("resource constraints not set properly for Ulimits")) - - // Make sure constraints aren't saved to image - dockerCmd(c, "run", "--name=test", name) - - cfg = inspectFieldJSON(c, "test", "HostConfig") - - var c2 hostConfig - err = json.Unmarshal([]byte(cfg), &c2) - c.Assert(err, checker.IsNil, check.Commentf(cfg)) - - c.Assert(c2.Memory, check.Not(checker.Equals), int64(64*1024*1024), check.Commentf("resource leaked from build for Memory")) - c.Assert(c2.MemorySwap, check.Not(checker.Equals), int64(-1), check.Commentf("resource leaked from build for MemorySwap")) - c.Assert(c2.CpusetCpus, check.Not(checker.Equals), "0", check.Commentf("resource leaked from build for CpusetCpus")) - c.Assert(c2.CpusetMems, check.Not(checker.Equals), "0", check.Commentf("resource leaked from build for CpusetMems")) - c.Assert(c2.CPUShares, check.Not(checker.Equals), int64(100), check.Commentf("resource leaked from build for CPUShares")) - c.Assert(c2.CPUQuota, check.Not(checker.Equals), int64(8000), check.Commentf("resource leaked from build for CPUQuota")) - c.Assert(c2.Ulimits, checker.IsNil, check.Commentf("resource leaked from build for Ulimits")) -} - -func (s *DockerSuite) TestBuildAddChangeOwnership(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildaddown" - - ctx := func() *FakeContext { - dockerfile := ` - FROM busybox - ADD foo /bar/ - RUN [ $(stat -c %U:%G "/bar") = 'root:root' ] - RUN [ $(stat -c %U:%G "/bar/foo") = 'root:root' ] - ` - tmpDir, err := ioutil.TempDir("", "fake-context") - c.Assert(err, check.IsNil) - testFile, err := os.Create(filepath.Join(tmpDir, "foo")) - if err != nil { - c.Fatalf("failed to create foo file: %v", err) - } - defer testFile.Close() - - chownCmd := exec.Command("chown", "daemon:daemon", "foo") - chownCmd.Dir = tmpDir - out, _, err := runCommandWithOutput(chownCmd) - if err != nil { - c.Fatal(err, out) - } - - if err := ioutil.WriteFile(filepath.Join(tmpDir, "Dockerfile"), []byte(dockerfile), 0644); err != nil { - c.Fatalf("failed to open destination dockerfile: %v", err) - } - return fakeContextFromDir(tmpDir) - }() - - defer ctx.Close() - - if _, err := buildImageFromContext(name, ctx, true); err != nil { - c.Fatalf("build failed to complete for TestBuildAddChangeOwnership: %v", err) - } -} - -// Test that an infinite sleep during a build is killed if the client disconnects. -// This test is fairly hairy because there are lots of ways to race. -// Strategy: -// * Monitor the output of docker events starting from before -// * Run a 1-year-long sleep from a docker build. -// * When docker events sees container start, close the "docker build" command -// * Wait for docker events to emit a dying event. -func (s *DockerSuite) TestBuildCancellationKillsSleep(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testbuildcancellation" - - observer, err := newEventObserver(c) - c.Assert(err, checker.IsNil) - err = observer.Start() - c.Assert(err, checker.IsNil) - defer observer.Stop() - - // (Note: one year, will never finish) - ctx, err := fakeContext("FROM busybox\nRUN sleep 31536000", nil) - if err != nil { - c.Fatal(err) - } - defer ctx.Close() - - buildCmd := exec.Command(dockerBinary, "build", "-t", name, ".") - buildCmd.Dir = ctx.Dir - - stdoutBuild, err := buildCmd.StdoutPipe() - if err := buildCmd.Start(); err != nil { - c.Fatalf("failed to run build: %s", err) - } - - matchCID := regexp.MustCompile("Running in (.+)") - scanner := bufio.NewScanner(stdoutBuild) - - outputBuffer := new(bytes.Buffer) - var buildID string - for scanner.Scan() { - line := scanner.Text() - outputBuffer.WriteString(line) - outputBuffer.WriteString("\n") - if matches := matchCID.FindStringSubmatch(line); len(matches) > 0 { - buildID = matches[1] - break - } - } - - if buildID == "" { - c.Fatalf("Unable to find build container id in build output:\n%s", outputBuffer.String()) - } - - testActions := map[string]chan bool{ - "start": make(chan bool, 1), - "die": make(chan bool, 1), - } - - matcher := matchEventLine(buildID, "container", testActions) - processor := processEventMatch(testActions) - go observer.Match(matcher, processor) - - select { - case <-time.After(10 * time.Second): - observer.CheckEventError(c, buildID, "start", matcher) - case <-testActions["start"]: - // ignore, done - } - - // Send a kill to the `docker build` command. - // Causes the underlying build to be cancelled due to socket close. - if err := buildCmd.Process.Kill(); err != nil { - c.Fatalf("error killing build command: %s", err) - } - - // Get the exit status of `docker build`, check it exited because killed. - if err := buildCmd.Wait(); err != nil && !isKilled(err) { - c.Fatalf("wait failed during build run: %T %s", err, err) - } - - select { - case <-time.After(10 * time.Second): - observer.CheckEventError(c, buildID, "die", matcher) - case <-testActions["die"]: - // ignore, done - } -} diff --git a/integration-cli/docker_cli_by_digest_test.go b/integration-cli/docker_cli_by_digest_test.go deleted file mode 100644 index b62b24e220..0000000000 --- a/integration-cli/docker_cli_by_digest_test.go +++ /dev/null @@ -1,693 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "regexp" - "strings" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema1" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/engine-api/types" - "github.com/go-check/check" -) - -var ( - remoteRepoName = "dockercli/busybox-by-dgst" - repoName = fmt.Sprintf("%s/%s", privateRegistryURL, remoteRepoName) - pushDigestRegex = regexp.MustCompile("[\\S]+: digest: ([\\S]+) size: [0-9]+") - digestRegex = regexp.MustCompile("Digest: ([\\S]+)") -) - -func setupImage(c *check.C) (digest.Digest, error) { - return setupImageWithTag(c, "latest") -} - -func setupImageWithTag(c *check.C, tag string) (digest.Digest, error) { - containerName := "busyboxbydigest" - - dockerCmd(c, "run", "-e", "digest=1", "--name", containerName, "busybox") - - // tag the image to upload it to the private registry - repoAndTag := repoName + ":" + tag - out, _, err := dockerCmdWithError("commit", containerName, repoAndTag) - c.Assert(err, checker.IsNil, check.Commentf("image tagging failed: %s", out)) - - // delete the container as we don't need it any more - err = deleteContainer(containerName) - c.Assert(err, checker.IsNil) - - // push the image - out, _, err = dockerCmdWithError("push", repoAndTag) - c.Assert(err, checker.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out)) - - // delete our local repo that we previously tagged - rmiout, _, err := dockerCmdWithError("rmi", repoAndTag) - c.Assert(err, checker.IsNil, check.Commentf("error deleting images prior to real test: %s", rmiout)) - - matches := pushDigestRegex.FindStringSubmatch(out) - c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from push output: %s", out)) - pushDigest := matches[1] - - return digest.Digest(pushDigest), nil -} - -func testPullByTagDisplaysDigest(c *check.C) { - testRequires(c, DaemonIsLinux) - pushDigest, err := setupImage(c) - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - - // pull from the registry using the tag - out, _ := dockerCmd(c, "pull", repoName) - - // the pull output includes "Digest: ", so find that - matches := digestRegex.FindStringSubmatch(out) - c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) - pullDigest := matches[1] - - // make sure the pushed and pull digests match - c.Assert(pushDigest.String(), checker.Equals, pullDigest) -} - -func (s *DockerRegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { - testPullByTagDisplaysDigest(c) -} - -func (s *DockerSchema1RegistrySuite) TestPullByTagDisplaysDigest(c *check.C) { - testPullByTagDisplaysDigest(c) -} - -func testPullByDigest(c *check.C) { - testRequires(c, DaemonIsLinux) - pushDigest, err := setupImage(c) - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - - // pull from the registry using the @ reference - imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) - out, _ := dockerCmd(c, "pull", imageReference) - - // the pull output includes "Digest: ", so find that - matches := digestRegex.FindStringSubmatch(out) - c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) - pullDigest := matches[1] - - // make sure the pushed and pull digests match - c.Assert(pushDigest.String(), checker.Equals, pullDigest) -} - -func (s *DockerRegistrySuite) TestPullByDigest(c *check.C) { - testPullByDigest(c) -} - -func (s *DockerSchema1RegistrySuite) TestPullByDigest(c *check.C) { - testPullByDigest(c) -} - -func testPullByDigestNoFallback(c *check.C) { - testRequires(c, DaemonIsLinux) - // pull from the registry using the @ reference - imageReference := fmt.Sprintf("%s@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", repoName) - out, _, err := dockerCmdWithError("pull", imageReference) - c.Assert(err, checker.NotNil, check.Commentf("expected non-zero exit status and correct error message when pulling non-existing image")) - c.Assert(out, checker.Contains, "manifest unknown", check.Commentf("expected non-zero exit status and correct error message when pulling non-existing image")) -} - -func (s *DockerRegistrySuite) TestPullByDigestNoFallback(c *check.C) { - testPullByDigestNoFallback(c) -} - -func (s *DockerSchema1RegistrySuite) TestPullByDigestNoFallback(c *check.C) { - testPullByDigestNoFallback(c) -} - -func (s *DockerRegistrySuite) TestCreateByDigest(c *check.C) { - pushDigest, err := setupImage(c) - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - - imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) - - containerName := "createByDigest" - dockerCmd(c, "create", "--name", containerName, imageReference) - - res := inspectField(c, containerName, "Config.Image") - c.Assert(res, checker.Equals, imageReference) -} - -func (s *DockerRegistrySuite) TestRunByDigest(c *check.C) { - pushDigest, err := setupImage(c) - c.Assert(err, checker.IsNil) - - imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) - - containerName := "runByDigest" - out, _ := dockerCmd(c, "run", "--name", containerName, imageReference, "sh", "-c", "echo found=$digest") - - foundRegex := regexp.MustCompile("found=([^\n]+)") - matches := foundRegex.FindStringSubmatch(out) - c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) - c.Assert(matches[1], checker.Equals, "1", check.Commentf("Expected %q, got %q", "1", matches[1])) - - res := inspectField(c, containerName, "Config.Image") - c.Assert(res, checker.Equals, imageReference) -} - -func (s *DockerRegistrySuite) TestRemoveImageByDigest(c *check.C) { - digest, err := setupImage(c) - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - - imageReference := fmt.Sprintf("%s@%s", repoName, digest) - - // pull from the registry using the @ reference - dockerCmd(c, "pull", imageReference) - - // make sure inspect runs ok - inspectField(c, imageReference, "Id") - - // do the delete - err = deleteImages(imageReference) - c.Assert(err, checker.IsNil, check.Commentf("unexpected error deleting image")) - - // try to inspect again - it should error this time - _, err = inspectFieldWithError(imageReference, "Id") - //unexpected nil err trying to inspect what should be a non-existent image - c.Assert(err, checker.NotNil) - c.Assert(err.Error(), checker.Contains, "No such image") -} - -func (s *DockerRegistrySuite) TestBuildByDigest(c *check.C) { - digest, err := setupImage(c) - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - - imageReference := fmt.Sprintf("%s@%s", repoName, digest) - - // pull from the registry using the @ reference - dockerCmd(c, "pull", imageReference) - - // get the image id - imageID := inspectField(c, imageReference, "Id") - - // do the build - name := "buildbydigest" - _, err = buildImage(name, fmt.Sprintf( - `FROM %s - CMD ["/bin/echo", "Hello World"]`, imageReference), - true) - c.Assert(err, checker.IsNil) - - // get the build's image id - res := inspectField(c, name, "Config.Image") - // make sure they match - c.Assert(res, checker.Equals, imageID) -} - -func (s *DockerRegistrySuite) TestTagByDigest(c *check.C) { - digest, err := setupImage(c) - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - - imageReference := fmt.Sprintf("%s@%s", repoName, digest) - - // pull from the registry using the @ reference - dockerCmd(c, "pull", imageReference) - - // tag it - tag := "tagbydigest" - dockerCmd(c, "tag", imageReference, tag) - - expectedID := inspectField(c, imageReference, "Id") - - tagID := inspectField(c, tag, "Id") - c.Assert(tagID, checker.Equals, expectedID) -} - -func (s *DockerRegistrySuite) TestListImagesWithoutDigests(c *check.C) { - digest, err := setupImage(c) - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - - imageReference := fmt.Sprintf("%s@%s", repoName, digest) - - // pull from the registry using the @ reference - dockerCmd(c, "pull", imageReference) - - out, _ := dockerCmd(c, "images") - c.Assert(out, checker.Not(checker.Contains), "DIGEST", check.Commentf("list output should not have contained DIGEST header")) -} - -func (s *DockerRegistrySuite) TestListImagesWithDigests(c *check.C) { - - // setup image1 - digest1, err := setupImageWithTag(c, "tag1") - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1) - c.Logf("imageReference1 = %s", imageReference1) - - // pull image1 by digest - dockerCmd(c, "pull", imageReference1) - - // list images - out, _ := dockerCmd(c, "images", "--digests") - - // make sure repo shown, tag=, digest = $digest1 - re1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1.String() + `\s`) - c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) - // setup image2 - digest2, err := setupImageWithTag(c, "tag2") - //error setting up image - c.Assert(err, checker.IsNil) - imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2) - c.Logf("imageReference2 = %s", imageReference2) - - // pull image1 by digest - dockerCmd(c, "pull", imageReference1) - - // pull image2 by digest - dockerCmd(c, "pull", imageReference2) - - // list images - out, _ = dockerCmd(c, "images", "--digests") - - // make sure repo shown, tag=, digest = $digest1 - c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) - - // make sure repo shown, tag=, digest = $digest2 - re2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2.String() + `\s`) - c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) - - // pull tag1 - dockerCmd(c, "pull", repoName+":tag1") - - // list images - out, _ = dockerCmd(c, "images", "--digests") - - // make sure image 1 has repo, tag, AND repo, , digest - reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*tag1\s*` + digest1.String() + `\s`) - c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) - // make sure image 2 has repo, , digest - c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) - - // pull tag 2 - dockerCmd(c, "pull", repoName+":tag2") - - // list images - out, _ = dockerCmd(c, "images", "--digests") - - // make sure image 1 has repo, tag, digest - c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) - - // make sure image 2 has repo, tag, digest - reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*tag2\s*` + digest2.String() + `\s`) - c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) - - // list images - out, _ = dockerCmd(c, "images", "--digests") - - // make sure image 1 has repo, tag, digest - c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) - // make sure image 2 has repo, tag, digest - c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) - // make sure busybox has tag, but not digest - busyboxRe := regexp.MustCompile(`\s*busybox\s*latest\s*\s`) - c.Assert(busyboxRe.MatchString(out), checker.True, check.Commentf("expected %q: %s", busyboxRe.String(), out)) -} - -func (s *DockerRegistrySuite) TestListDanglingImagesWithDigests(c *check.C) { - // setup image1 - digest1, err := setupImageWithTag(c, "dangle1") - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - imageReference1 := fmt.Sprintf("%s@%s", repoName, digest1) - c.Logf("imageReference1 = %s", imageReference1) - - // pull image1 by digest - dockerCmd(c, "pull", imageReference1) - - // list images - out, _ := dockerCmd(c, "images", "--digests") - - // make sure repo shown, tag=, digest = $digest1 - re1 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest1.String() + `\s`) - c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) - // setup image2 - digest2, err := setupImageWithTag(c, "dangle2") - //error setting up image - c.Assert(err, checker.IsNil) - imageReference2 := fmt.Sprintf("%s@%s", repoName, digest2) - c.Logf("imageReference2 = %s", imageReference2) - - // pull image1 by digest - dockerCmd(c, "pull", imageReference1) - - // pull image2 by digest - dockerCmd(c, "pull", imageReference2) - - // list images - out, _ = dockerCmd(c, "images", "--digests", "--filter=\"dangling=true\"") - - // make sure repo shown, tag=, digest = $digest1 - c.Assert(re1.MatchString(out), checker.True, check.Commentf("expected %q: %s", re1.String(), out)) - - // make sure repo shown, tag=, digest = $digest2 - re2 := regexp.MustCompile(`\s*` + repoName + `\s*\s*` + digest2.String() + `\s`) - c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) - - // pull dangle1 tag - dockerCmd(c, "pull", repoName+":dangle1") - - // list images - out, _ = dockerCmd(c, "images", "--digests", "--filter=\"dangling=true\"") - - // make sure image 1 has repo, tag, AND repo, , digest - reWithDigest1 := regexp.MustCompile(`\s*` + repoName + `\s*dangle1\s*` + digest1.String() + `\s`) - c.Assert(reWithDigest1.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest1.String(), out)) - // make sure image 2 has repo, , digest - c.Assert(re2.MatchString(out), checker.True, check.Commentf("expected %q: %s", re2.String(), out)) - - // pull dangle2 tag - dockerCmd(c, "pull", repoName+":dangle2") - - // list images, show tagged images - out, _ = dockerCmd(c, "images", "--digests") - - // make sure image 1 has repo, tag, digest - c.Assert(reWithDigest1.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest1.String(), out)) - - // make sure image 2 has repo, tag, digest - reWithDigest2 := regexp.MustCompile(`\s*` + repoName + `\s*dangle2\s*` + digest2.String() + `\s`) - c.Assert(reWithDigest2.MatchString(out), checker.True, check.Commentf("expected %q: %s", reWithDigest2.String(), out)) - - // list images, no longer dangling, should not match - out, _ = dockerCmd(c, "images", "--digests", "--filter=\"dangling=true\"") - - // make sure image 1 has repo, tag, digest - c.Assert(reWithDigest1.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest1.String(), out)) - // make sure image 2 has repo, tag, digest - c.Assert(reWithDigest2.MatchString(out), checker.False, check.Commentf("unexpected %q: %s", reWithDigest2.String(), out)) -} - -func (s *DockerRegistrySuite) TestInspectImageWithDigests(c *check.C) { - digest, err := setupImage(c) - c.Assert(err, check.IsNil, check.Commentf("error setting up image")) - - imageReference := fmt.Sprintf("%s@%s", repoName, digest) - - // pull from the registry using the @ reference - dockerCmd(c, "pull", imageReference) - - out, _ := dockerCmd(c, "inspect", imageReference) - - var imageJSON []types.ImageInspect - err = json.Unmarshal([]byte(out), &imageJSON) - c.Assert(err, checker.IsNil) - c.Assert(imageJSON, checker.HasLen, 1) - c.Assert(imageJSON[0].RepoDigests, checker.HasLen, 1) - c.Assert(stringutils.InSlice(imageJSON[0].RepoDigests, imageReference), checker.Equals, true) -} - -func (s *DockerRegistrySuite) TestPsListContainersFilterAncestorImageByDigest(c *check.C) { - digest, err := setupImage(c) - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - - imageReference := fmt.Sprintf("%s@%s", repoName, digest) - - // pull from the registry using the @ reference - dockerCmd(c, "pull", imageReference) - - // build an image from it - imageName1 := "images_ps_filter_test" - _, err = buildImage(imageName1, fmt.Sprintf( - `FROM %s - LABEL match me 1`, imageReference), true) - c.Assert(err, checker.IsNil) - - // run a container based on that - dockerCmd(c, "run", "--name=test1", imageReference, "echo", "hello") - expectedID, err := getIDByName("test1") - c.Assert(err, check.IsNil) - - // run a container based on the a descendant of that too - dockerCmd(c, "run", "--name=test2", imageName1, "echo", "hello") - expectedID1, err := getIDByName("test2") - c.Assert(err, check.IsNil) - - expectedIDs := []string{expectedID, expectedID1} - - // Invalid imageReference - out, _ := dockerCmd(c, "ps", "-a", "-q", "--no-trunc", fmt.Sprintf("--filter=ancestor=busybox@%s", digest)) - // Filter container for ancestor filter should be empty - c.Assert(strings.TrimSpace(out), checker.Equals, "") - - // Valid imageReference - out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageReference) - checkPsAncestorFilterOutput(c, out, imageReference, expectedIDs) -} - -func (s *DockerRegistrySuite) TestDeleteImageByIDOnlyPulledByDigest(c *check.C) { - pushDigest, err := setupImage(c) - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - - // pull from the registry using the @ reference - imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) - dockerCmd(c, "pull", imageReference) - // just in case... - - dockerCmd(c, "tag", imageReference, repoName+":sometag") - - imageID := inspectField(c, imageReference, "Id") - - dockerCmd(c, "rmi", imageID) - - _, err = inspectFieldWithError(imageID, "Id") - c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) -} - -func (s *DockerRegistrySuite) TestDeleteImageWithDigestAndTag(c *check.C) { - pushDigest, err := setupImage(c) - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - - // pull from the registry using the @ reference - imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) - dockerCmd(c, "pull", imageReference) - - imageID := inspectField(c, imageReference, "Id") - - repoTag := repoName + ":sometag" - repoTag2 := repoName + ":othertag" - dockerCmd(c, "tag", imageReference, repoTag) - dockerCmd(c, "tag", imageReference, repoTag2) - - dockerCmd(c, "rmi", repoTag2) - - // rmi should have deleted only repoTag2, because there's another tag - inspectField(c, repoTag, "Id") - - dockerCmd(c, "rmi", repoTag) - - // rmi should have deleted the tag, the digest reference, and the image itself - _, err = inspectFieldWithError(imageID, "Id") - c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) -} - -func (s *DockerRegistrySuite) TestDeleteImageWithDigestAndMultiRepoTag(c *check.C) { - pushDigest, err := setupImage(c) - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - - repo2 := fmt.Sprintf("%s/%s", repoName, "repo2") - - // pull from the registry using the @ reference - imageReference := fmt.Sprintf("%s@%s", repoName, pushDigest) - dockerCmd(c, "pull", imageReference) - - imageID := inspectField(c, imageReference, "Id") - - repoTag := repoName + ":sometag" - repoTag2 := repo2 + ":othertag" - dockerCmd(c, "tag", imageReference, repoTag) - dockerCmd(c, "tag", imageReference, repoTag2) - - dockerCmd(c, "rmi", repoTag) - - // rmi should have deleted repoTag and image reference, but left repoTag2 - inspectField(c, repoTag2, "Id") - _, err = inspectFieldWithError(imageReference, "Id") - c.Assert(err, checker.NotNil, check.Commentf("image digest reference should have been removed")) - - _, err = inspectFieldWithError(repoTag, "Id") - c.Assert(err, checker.NotNil, check.Commentf("image tag reference should have been removed")) - - dockerCmd(c, "rmi", repoTag2) - - // rmi should have deleted the tag, the digest reference, and the image itself - _, err = inspectFieldWithError(imageID, "Id") - c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) -} - -// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when -// we have modified a manifest blob and its digest cannot be verified. -// This is the schema2 version of the test. -func (s *DockerRegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { - testRequires(c, DaemonIsLinux) - manifestDigest, err := setupImage(c) - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - - // Load the target manifest blob. - manifestBlob := s.reg.readBlobContents(c, manifestDigest) - - var imgManifest schema2.Manifest - err = json.Unmarshal(manifestBlob, &imgManifest) - c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob")) - - // Change a layer in the manifest. - imgManifest.Layers[0].Digest = digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef") - - // Move the existing data file aside, so that we can replace it with a - // malicious blob of data. NOTE: we defer the returned undo func. - undo := s.reg.tempMoveBlobData(c, manifestDigest) - defer undo() - - alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") - c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) - - s.reg.writeBlobContents(c, manifestDigest, alteredManifestBlob) - - // Now try pulling that image by digest. We should get an error about - // digest verification for the manifest digest. - - // Pull from the registry using the @ reference. - imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) - out, exitStatus, _ := dockerCmdWithError("pull", imageReference) - c.Assert(exitStatus, checker.Not(check.Equals), 0) - - expectedErrorMsg := fmt.Sprintf("manifest verification failed for digest %s", manifestDigest) - c.Assert(out, checker.Contains, expectedErrorMsg) -} - -// TestPullFailsWithAlteredManifest tests that a `docker pull` fails when -// we have modified a manifest blob and its digest cannot be verified. -// This is the schema1 version of the test. -func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredManifest(c *check.C) { - testRequires(c, DaemonIsLinux) - manifestDigest, err := setupImage(c) - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - - // Load the target manifest blob. - manifestBlob := s.reg.readBlobContents(c, manifestDigest) - - var imgManifest schema1.Manifest - err = json.Unmarshal(manifestBlob, &imgManifest) - c.Assert(err, checker.IsNil, check.Commentf("unable to decode image manifest from blob")) - - // Change a layer in the manifest. - imgManifest.FSLayers[0] = schema1.FSLayer{ - BlobSum: digest.Digest("sha256:0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"), - } - - // Move the existing data file aside, so that we can replace it with a - // malicious blob of data. NOTE: we defer the returned undo func. - undo := s.reg.tempMoveBlobData(c, manifestDigest) - defer undo() - - alteredManifestBlob, err := json.MarshalIndent(imgManifest, "", " ") - c.Assert(err, checker.IsNil, check.Commentf("unable to encode altered image manifest to JSON")) - - s.reg.writeBlobContents(c, manifestDigest, alteredManifestBlob) - - // Now try pulling that image by digest. We should get an error about - // digest verification for the manifest digest. - - // Pull from the registry using the @ reference. - imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) - out, exitStatus, _ := dockerCmdWithError("pull", imageReference) - c.Assert(exitStatus, checker.Not(check.Equals), 0) - - expectedErrorMsg := fmt.Sprintf("image verification failed for digest %s", manifestDigest) - c.Assert(out, checker.Contains, expectedErrorMsg) -} - -// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when -// we have modified a layer blob and its digest cannot be verified. -// This is the schema2 version of the test. -func (s *DockerRegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { - testRequires(c, DaemonIsLinux) - manifestDigest, err := setupImage(c) - c.Assert(err, checker.IsNil) - - // Load the target manifest blob. - manifestBlob := s.reg.readBlobContents(c, manifestDigest) - - var imgManifest schema2.Manifest - err = json.Unmarshal(manifestBlob, &imgManifest) - c.Assert(err, checker.IsNil) - - // Next, get the digest of one of the layers from the manifest. - targetLayerDigest := imgManifest.Layers[0].Digest - - // Move the existing data file aside, so that we can replace it with a - // malicious blob of data. NOTE: we defer the returned undo func. - undo := s.reg.tempMoveBlobData(c, targetLayerDigest) - defer undo() - - // Now make a fake data blob in this directory. - s.reg.writeBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) - - // Now try pulling that image by digest. We should get an error about - // digest verification for the target layer digest. - - // Remove distribution cache to force a re-pull of the blobs - if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { - c.Fatalf("error clearing distribution cache: %v", err) - } - - // Pull from the registry using the @ reference. - imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) - out, exitStatus, _ := dockerCmdWithError("pull", imageReference) - c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a zero exit status")) - - expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) - c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out)) -} - -// TestPullFailsWithAlteredLayer tests that a `docker pull` fails when -// we have modified a layer blob and its digest cannot be verified. -// This is the schema1 version of the test. -func (s *DockerSchema1RegistrySuite) TestPullFailsWithAlteredLayer(c *check.C) { - testRequires(c, DaemonIsLinux) - manifestDigest, err := setupImage(c) - c.Assert(err, checker.IsNil) - - // Load the target manifest blob. - manifestBlob := s.reg.readBlobContents(c, manifestDigest) - - var imgManifest schema1.Manifest - err = json.Unmarshal(manifestBlob, &imgManifest) - c.Assert(err, checker.IsNil) - - // Next, get the digest of one of the layers from the manifest. - targetLayerDigest := imgManifest.FSLayers[0].BlobSum - - // Move the existing data file aside, so that we can replace it with a - // malicious blob of data. NOTE: we defer the returned undo func. - undo := s.reg.tempMoveBlobData(c, targetLayerDigest) - defer undo() - - // Now make a fake data blob in this directory. - s.reg.writeBlobContents(c, targetLayerDigest, []byte("This is not the data you are looking for.")) - - // Now try pulling that image by digest. We should get an error about - // digest verification for the target layer digest. - - // Remove distribution cache to force a re-pull of the blobs - if err := os.RemoveAll(filepath.Join(dockerBasePath, "image", s.d.storageDriver, "distribution")); err != nil { - c.Fatalf("error clearing distribution cache: %v", err) - } - - // Pull from the registry using the @ reference. - imageReference := fmt.Sprintf("%s@%s", repoName, manifestDigest) - out, exitStatus, _ := dockerCmdWithError("pull", imageReference) - c.Assert(exitStatus, checker.Not(check.Equals), 0, check.Commentf("expected a zero exit status")) - - expectedErrorMsg := fmt.Sprintf("filesystem layer verification failed for digest %s", targetLayerDigest) - c.Assert(out, checker.Contains, expectedErrorMsg, check.Commentf("expected error message in output: %s", out)) -} diff --git a/integration-cli/docker_cli_commit_test.go b/integration-cli/docker_cli_commit_test.go deleted file mode 100644 index 086a203124..0000000000 --- a/integration-cli/docker_cli_commit_test.go +++ /dev/null @@ -1,189 +0,0 @@ -package main - -import ( - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestCommitAfterContainerIsDone(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") - - cleanedContainerID := strings.TrimSpace(out) - - dockerCmd(c, "wait", cleanedContainerID) - - out, _ = dockerCmd(c, "commit", cleanedContainerID) - - cleanedImageID := strings.TrimSpace(out) - - dockerCmd(c, "inspect", cleanedImageID) -} - -func (s *DockerSuite) TestCommitWithoutPause(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-i", "-a", "stdin", "busybox", "echo", "foo") - - cleanedContainerID := strings.TrimSpace(out) - - dockerCmd(c, "wait", cleanedContainerID) - - out, _ = dockerCmd(c, "commit", "-p=false", cleanedContainerID) - - cleanedImageID := strings.TrimSpace(out) - - dockerCmd(c, "inspect", cleanedImageID) -} - -//test commit a paused container should not unpause it after commit -func (s *DockerSuite) TestCommitPausedContainer(c *check.C) { - testRequires(c, DaemonIsLinux) - defer unpauseAllContainers() - out, _ := dockerCmd(c, "run", "-i", "-d", "busybox") - - cleanedContainerID := strings.TrimSpace(out) - - dockerCmd(c, "pause", cleanedContainerID) - - out, _ = dockerCmd(c, "commit", cleanedContainerID) - - out = inspectField(c, cleanedContainerID, "State.Paused") - // commit should not unpause a paused container - c.Assert(out, checker.Contains, "true") -} - -func (s *DockerSuite) TestCommitNewFile(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "--name", "commiter", "busybox", "/bin/sh", "-c", "echo koye > /foo") - - imageID, _ := dockerCmd(c, "commit", "commiter") - imageID = strings.TrimSpace(imageID) - - out, _ := dockerCmd(c, "run", imageID, "cat", "/foo") - actual := strings.TrimSpace(out) - c.Assert(actual, checker.Equals, "koye") -} - -func (s *DockerSuite) TestCommitHardlink(c *check.C) { - testRequires(c, DaemonIsLinux) - firstOutput, _ := dockerCmd(c, "run", "-t", "--name", "hardlinks", "busybox", "sh", "-c", "touch file1 && ln file1 file2 && ls -di file1 file2") - - chunks := strings.Split(strings.TrimSpace(firstOutput), " ") - inode := chunks[0] - chunks = strings.SplitAfterN(strings.TrimSpace(firstOutput), " ", 2) - c.Assert(chunks[1], checker.Contains, chunks[0], check.Commentf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])) - - imageID, _ := dockerCmd(c, "commit", "hardlinks", "hardlinks") - imageID = strings.TrimSpace(imageID) - - secondOutput, _ := dockerCmd(c, "run", "-t", "hardlinks", "ls", "-di", "file1", "file2") - - chunks = strings.Split(strings.TrimSpace(secondOutput), " ") - inode = chunks[0] - chunks = strings.SplitAfterN(strings.TrimSpace(secondOutput), " ", 2) - c.Assert(chunks[1], checker.Contains, chunks[0], check.Commentf("Failed to create hardlink in a container. Expected to find %q in %q", inode, chunks[1:])) -} - -func (s *DockerSuite) TestCommitTTY(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-t", "--name", "tty", "busybox", "/bin/ls") - - imageID, _ := dockerCmd(c, "commit", "tty", "ttytest") - imageID = strings.TrimSpace(imageID) - - dockerCmd(c, "run", "ttytest", "/bin/ls") -} - -func (s *DockerSuite) TestCommitWithHostBindMount(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "--name", "bind-commit", "-v", "/dev/null:/winning", "busybox", "true") - - imageID, _ := dockerCmd(c, "commit", "bind-commit", "bindtest") - imageID = strings.TrimSpace(imageID) - - dockerCmd(c, "run", "bindtest", "true") -} - -func (s *DockerSuite) TestCommitChange(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "--name", "test", "busybox", "true") - - imageID, _ := dockerCmd(c, "commit", - "--change", "EXPOSE 8080", - "--change", "ENV DEBUG true", - "--change", "ENV test 1", - "--change", "ENV PATH /foo", - "--change", "LABEL foo bar", - "--change", "CMD [\"/bin/sh\"]", - "--change", "WORKDIR /opt", - "--change", "ENTRYPOINT [\"/bin/sh\"]", - "--change", "USER testuser", - "--change", "VOLUME /var/lib/docker", - "--change", "ONBUILD /usr/local/bin/python-build --dir /app/src", - "test", "test-commit") - imageID = strings.TrimSpace(imageID) - - expected := map[string]string{ - "Config.ExposedPorts": "map[8080/tcp:{}]", - "Config.Env": "[DEBUG=true test=1 PATH=/foo]", - "Config.Labels": "map[foo:bar]", - "Config.Cmd": "[/bin/sh]", - "Config.WorkingDir": "/opt", - "Config.Entrypoint": "[/bin/sh]", - "Config.User": "testuser", - "Config.Volumes": "map[/var/lib/docker:{}]", - "Config.OnBuild": "[/usr/local/bin/python-build --dir /app/src]", - } - - for conf, value := range expected { - res := inspectField(c, imageID, conf) - if res != value { - c.Errorf("%s('%s'), expected %s", conf, res, value) - } - } -} - -// TODO: commit --run is deprecated, remove this once --run is removed -func (s *DockerSuite) TestCommitMergeConfigRun(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "commit-test" - out, _ := dockerCmd(c, "run", "-d", "-e=FOO=bar", "busybox", "/bin/sh", "-c", "echo testing > /tmp/foo") - id := strings.TrimSpace(out) - - dockerCmd(c, "commit", `--run={"Cmd": ["cat", "/tmp/foo"]}`, id, "commit-test") - - out, _ = dockerCmd(c, "run", "--name", name, "commit-test") - //run config in committed container was not merged - c.Assert(strings.TrimSpace(out), checker.Equals, "testing") - - type cfg struct { - Env []string - Cmd []string - } - config1 := cfg{} - inspectFieldAndMarshall(c, id, "Config", &config1) - - config2 := cfg{} - inspectFieldAndMarshall(c, name, "Config", &config2) - - // Env has at least PATH loaded as well here, so let's just grab the FOO one - var env1, env2 string - for _, e := range config1.Env { - if strings.HasPrefix(e, "FOO") { - env1 = e - break - } - } - for _, e := range config2.Env { - if strings.HasPrefix(e, "FOO") { - env2 = e - break - } - } - - if len(config1.Env) != len(config2.Env) || env1 != env2 && env2 != "" { - c.Fatalf("expected envs to match: %v - %v", config1.Env, config2.Env) - } -} diff --git a/integration-cli/docker_cli_config_test.go b/integration-cli/docker_cli_config_test.go deleted file mode 100644 index 6015231065..0000000000 --- a/integration-cli/docker_cli_config_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package main - -import ( - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "os/exec" - "path/filepath" - "runtime" - - "github.com/docker/docker/dockerversion" - "github.com/docker/docker/pkg/homedir" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestConfigHttpHeader(c *check.C) { - testRequires(c, UnixCli) // Can't set/unset HOME on windows right now - // We either need a level of Go that supports Unsetenv (for cases - // when HOME/USERPROFILE isn't set), or we need to be able to use - // os/user but user.Current() only works if we aren't statically compiling - - var headers map[string][]string - - server := httptest.NewServer(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - headers = r.Header - })) - defer server.Close() - - homeKey := homedir.Key() - homeVal := homedir.Get() - tmpDir, err := ioutil.TempDir("", "fake-home") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(tmpDir) - - dotDocker := filepath.Join(tmpDir, ".docker") - os.Mkdir(dotDocker, 0600) - tmpCfg := filepath.Join(dotDocker, "config.json") - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpDir) - - data := `{ - "HttpHeaders": { "MyHeader": "MyValue" } - }` - - err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) - c.Assert(err, checker.IsNil) - - cmd := exec.Command(dockerBinary, "-H="+server.URL[7:], "ps") - out, _, _ := runCommandWithOutput(cmd) - - c.Assert(headers["User-Agent"], checker.NotNil, check.Commentf("Missing User-Agent")) - - c.Assert(headers["User-Agent"][0], checker.Equals, "Docker-Client/"+dockerversion.Version+" ("+runtime.GOOS+")", check.Commentf("Badly formatted User-Agent,out:%v", out)) - - c.Assert(headers["Myheader"], checker.NotNil) - c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("Missing/bad header,out:%v", out)) - -} - -func (s *DockerSuite) TestConfigDir(c *check.C) { - cDir, err := ioutil.TempDir("", "fake-home") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(cDir) - - // First make sure pointing to empty dir doesn't generate an error - dockerCmd(c, "--config", cDir, "ps") - - // Test with env var too - cmd := exec.Command(dockerBinary, "ps") - cmd.Env = appendBaseEnv(true, "DOCKER_CONFIG="+cDir) - out, _, err := runCommandWithOutput(cmd) - - c.Assert(err, checker.IsNil, check.Commentf("ps2 didn't work,out:%v", out)) - - // Start a server so we can check to see if the config file was - // loaded properly - var headers map[string][]string - - server := httptest.NewServer(http.HandlerFunc( - func(w http.ResponseWriter, r *http.Request) { - headers = r.Header - })) - defer server.Close() - - // Create a dummy config file in our new config dir - data := `{ - "HttpHeaders": { "MyHeader": "MyValue" } - }` - - tmpCfg := filepath.Join(cDir, "config.json") - err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) - c.Assert(err, checker.IsNil, check.Commentf("Err creating file")) - - env := appendBaseEnv(false) - - cmd = exec.Command(dockerBinary, "--config", cDir, "-H="+server.URL[7:], "ps") - cmd.Env = env - out, _, err = runCommandWithOutput(cmd) - - c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) - c.Assert(headers["Myheader"], checker.NotNil) - c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps3 - Missing header,out:%v", out)) - - // Reset headers and try again using env var this time - headers = map[string][]string{} - cmd = exec.Command(dockerBinary, "-H="+server.URL[7:], "ps") - cmd.Env = append(env, "DOCKER_CONFIG="+cDir) - out, _, err = runCommandWithOutput(cmd) - - c.Assert(err, checker.NotNil, check.Commentf("%v", out)) - c.Assert(headers["Myheader"], checker.NotNil) - c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps4 - Missing header,out:%v", out)) - - // Reset headers and make sure flag overrides the env var - headers = map[string][]string{} - cmd = exec.Command(dockerBinary, "--config", cDir, "-H="+server.URL[7:], "ps") - cmd.Env = append(env, "DOCKER_CONFIG=MissingDir") - out, _, err = runCommandWithOutput(cmd) - - c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) - c.Assert(headers["Myheader"], checker.NotNil) - c.Assert(headers["Myheader"][0], checker.Equals, "MyValue", check.Commentf("ps5 - Missing header,out:%v", out)) - - // Reset headers and make sure flag overrides the env var. - // Almost same as previous but make sure the "MissingDir" isn't - // ignore - we don't want to default back to the env var. - headers = map[string][]string{} - cmd = exec.Command(dockerBinary, "--config", "MissingDir", "-H="+server.URL[7:], "ps") - cmd.Env = append(env, "DOCKER_CONFIG="+cDir) - out, _, err = runCommandWithOutput(cmd) - - c.Assert(err, checker.NotNil, check.Commentf("out:%v", out)) - c.Assert(headers["Myheader"], checker.IsNil, check.Commentf("ps6 - Headers shouldn't be the expected value,out:%v", out)) -} diff --git a/integration-cli/docker_cli_cp_from_container_test.go b/integration-cli/docker_cli_cp_from_container_test.go deleted file mode 100644 index 677085a134..0000000000 --- a/integration-cli/docker_cli_cp_from_container_test.go +++ /dev/null @@ -1,489 +0,0 @@ -package main - -import ( - "os" - "path/filepath" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// docker cp CONTAINER:PATH LOCALPATH - -// Try all of the test cases from the archive package which implements the -// internals of `docker cp` and ensure that the behavior matches when actually -// copying to and from containers. - -// Basic assumptions about SRC and DST: -// 1. SRC must exist. -// 2. If SRC ends with a trailing separator, it must be a directory. -// 3. DST parent directory must exist. -// 4. If DST exists as a file, it must not end with a trailing separator. - -// First get these easy error cases out of the way. - -// Test for error when SRC does not exist. -func (s *DockerSuite) TestCpFromErrSrcNotExists(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{}) - - tmpDir := getTestDir(c, "test-cp-from-err-src-not-exists") - defer os.RemoveAll(tmpDir) - - err := runDockerCp(c, containerCpPath(containerID, "file1"), tmpDir) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) -} - -// Test for error when SRC ends in a trailing -// path separator but it exists as a file. -func (s *DockerSuite) TestCpFromErrSrcNotDir(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-from-err-src-not-dir") - defer os.RemoveAll(tmpDir) - - err := runDockerCp(c, containerCpPathTrailingSep(containerID, "file1"), tmpDir) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) -} - -// Test for error when SRC is a valid file or directory, -// bu the DST parent directory does not exist. -func (s *DockerSuite) TestCpFromErrDstParentNotExists(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-from-err-dst-parent-not-exists") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - // Try with a file source. - srcPath := containerCpPath(containerID, "/file1") - dstPath := cpPath(tmpDir, "notExists", "file1") - - err := runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) - - // Try with a directory source. - srcPath = containerCpPath(containerID, "/dir1") - - err = runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) -} - -// Test for error when DST ends in a trailing -// path separator but exists as a file. -func (s *DockerSuite) TestCpFromErrDstNotDir(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - // Try with a file source. - srcPath := containerCpPath(containerID, "/file1") - dstPath := cpPathTrailingSep(tmpDir, "file1") - - err := runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) - - // Try with a directory source. - srcPath = containerCpPath(containerID, "/dir1") - - err = runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) -} - -// Check that copying from a container to a local symlink copies to the symlink -// target and does not overwrite the local symlink itself. -func (s *DockerSuite) TestCpFromSymlinkDestination(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-from-err-dst-not-dir") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - // First, copy a file from the container to a symlink to a file. This - // should overwrite the symlink target contents with the source contents. - srcPath := containerCpPath(containerID, "/file2") - dstPath := cpPath(tmpDir, "symlinkToFile1") - - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) - - // The symlink should not have been modified. - c.Assert(symlinkTargetEquals(c, dstPath, "file1"), checker.IsNil) - - // The file should have the contents of "file2" now. - c.Assert(fileContentEquals(c, cpPath(tmpDir, "file1"), "file2\n"), checker.IsNil) - - // Next, copy a file from the container to a symlink to a directory. This - // should copy the file into the symlink target directory. - dstPath = cpPath(tmpDir, "symlinkToDir1") - - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) - - // The symlink should not have been modified. - c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) - - // The file should have the contents of "file2" now. - c.Assert(fileContentEquals(c, cpPath(tmpDir, "file2"), "file2\n"), checker.IsNil) - - // Next, copy a file from the container to a symlink to a file that does - // not exist (a broken symlink). This should create the target file with - // the contents of the source file. - dstPath = cpPath(tmpDir, "brokenSymlinkToFileX") - - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) - - // The symlink should not have been modified. - c.Assert(symlinkTargetEquals(c, dstPath, "fileX"), checker.IsNil) - - // The file should have the contents of "file2" now. - c.Assert(fileContentEquals(c, cpPath(tmpDir, "fileX"), "file2\n"), checker.IsNil) - - // Next, copy a directory from the container to a symlink to a local - // directory. This should copy the directory into the symlink target - // directory and not modify the symlink. - srcPath = containerCpPath(containerID, "/dir2") - dstPath = cpPath(tmpDir, "symlinkToDir1") - - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) - - // The symlink should not have been modified. - c.Assert(symlinkTargetEquals(c, dstPath, "dir1"), checker.IsNil) - - // The directory should now contain a copy of "dir2". - c.Assert(fileContentEquals(c, cpPath(tmpDir, "dir1/dir2/file2-1"), "file2-1\n"), checker.IsNil) - - // Next, copy a directory from the container to a symlink to a local - // directory that does not exist (a broken symlink). This should create - // the target as a directory with the contents of the source directory. It - // should not modify the symlink. - dstPath = cpPath(tmpDir, "brokenSymlinkToDirX") - - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) - - // The symlink should not have been modified. - c.Assert(symlinkTargetEquals(c, dstPath, "dirX"), checker.IsNil) - - // The "dirX" directory should now be a copy of "dir2". - c.Assert(fileContentEquals(c, cpPath(tmpDir, "dirX/file2-1"), "file2-1\n"), checker.IsNil) -} - -// Possibilities are reduced to the remaining 10 cases: -// -// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action -// =================================================================================================== -// A | no | - | no | - | no | create file -// B | no | - | no | - | yes | error -// C | no | - | yes | no | - | overwrite file -// D | no | - | yes | yes | - | create file in dst dir -// E | yes | no | no | - | - | create dir, copy contents -// F | yes | no | yes | no | - | error -// G | yes | no | yes | yes | - | copy dir and contents -// H | yes | yes | no | - | - | create dir, copy contents -// I | yes | yes | yes | no | - | error -// J | yes | yes | yes | yes | - | copy dir contents -// - -// A. SRC specifies a file and DST (no trailing path separator) doesn't -// exist. This should create a file with the name DST and copy the -// contents of the source file into it. -func (s *DockerSuite) TestCpFromCaseA(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{ - addContent: true, workDir: "/root", - }) - - tmpDir := getTestDir(c, "test-cp-from-case-a") - defer os.RemoveAll(tmpDir) - - srcPath := containerCpPath(containerID, "/root/file1") - dstPath := cpPath(tmpDir, "itWorks.txt") - - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) - - c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) -} - -// B. SRC specifies a file and DST (with trailing path separator) doesn't -// exist. This should cause an error because the copy operation cannot -// create a directory when copying a single file. -func (s *DockerSuite) TestCpFromCaseB(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-from-case-b") - defer os.RemoveAll(tmpDir) - - srcPath := containerCpPath(containerID, "/file1") - dstDir := cpPathTrailingSep(tmpDir, "testDir") - - err := runDockerCp(c, srcPath, dstDir) - c.Assert(err, checker.NotNil) - - c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) -} - -// C. SRC specifies a file and DST exists as a file. This should overwrite -// the file at DST with the contents of the source file. -func (s *DockerSuite) TestCpFromCaseC(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{ - addContent: true, workDir: "/root", - }) - - tmpDir := getTestDir(c, "test-cp-from-case-c") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcPath := containerCpPath(containerID, "/root/file1") - dstPath := cpPath(tmpDir, "file2") - - // Ensure the local file starts with different content. - c.Assert(fileContentEquals(c, dstPath, "file2\n"), checker.IsNil) - - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) - - c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) -} - -// D. SRC specifies a file and DST exists as a directory. This should place -// a copy of the source file inside it using the basename from SRC. Ensure -// this works whether DST has a trailing path separator or not. -func (s *DockerSuite) TestCpFromCaseD(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-from-case-d") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcPath := containerCpPath(containerID, "/file1") - dstDir := cpPath(tmpDir, "dir1") - dstPath := filepath.Join(dstDir, "file1") - - // Ensure that dstPath doesn't exist. - _, err := os.Stat(dstPath) - c.Assert(os.IsNotExist(err), checker.True, check.Commentf("did not expect dstPath %q to exist", dstPath)) - - c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) - - c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) - - // Now try again but using a trailing path separator for dstDir. - - // unable to remove dstDir - c.Assert(os.RemoveAll(dstDir), checker.IsNil) - - // unable to make dstDir - c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) - - dstDir = cpPathTrailingSep(tmpDir, "dir1") - - c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) - - c.Assert(fileContentEquals(c, dstPath, "file1\n"), checker.IsNil) -} - -// E. SRC specifies a directory and DST does not exist. This should create a -// directory at DST and copy the contents of the SRC directory into the DST -// directory. Ensure this works whether DST has a trailing path separator or -// not. -func (s *DockerSuite) TestCpFromCaseE(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-from-case-e") - defer os.RemoveAll(tmpDir) - - srcDir := containerCpPath(containerID, "dir1") - dstDir := cpPath(tmpDir, "testDir") - dstPath := filepath.Join(dstDir, "file1-1") - - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) - - c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) - - // Now try again but using a trailing path separator for dstDir. - - // unable to remove dstDir - c.Assert(os.RemoveAll(dstDir), checker.IsNil) - - dstDir = cpPathTrailingSep(tmpDir, "testDir") - - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) - - c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) -} - -// F. SRC specifies a directory and DST exists as a file. This should cause an -// error as it is not possible to overwrite a file with a directory. -func (s *DockerSuite) TestCpFromCaseF(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{ - addContent: true, workDir: "/root", - }) - - tmpDir := getTestDir(c, "test-cp-from-case-f") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcDir := containerCpPath(containerID, "/root/dir1") - dstFile := cpPath(tmpDir, "file1") - - err := runDockerCp(c, srcDir, dstFile) - c.Assert(err, checker.NotNil) - - c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) -} - -// G. SRC specifies a directory and DST exists as a directory. This should copy -// the SRC directory and all its contents to the DST directory. Ensure this -// works whether DST has a trailing path separator or not. -func (s *DockerSuite) TestCpFromCaseG(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{ - addContent: true, workDir: "/root", - }) - - tmpDir := getTestDir(c, "test-cp-from-case-g") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcDir := containerCpPath(containerID, "/root/dir1") - dstDir := cpPath(tmpDir, "dir2") - resultDir := filepath.Join(dstDir, "dir1") - dstPath := filepath.Join(resultDir, "file1-1") - - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) - - c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) - - // Now try again but using a trailing path separator for dstDir. - - // unable to remove dstDir - c.Assert(os.RemoveAll(dstDir), checker.IsNil) - - // unable to make dstDir - c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) - - dstDir = cpPathTrailingSep(tmpDir, "dir2") - - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) - - c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) -} - -// H. SRC specifies a directory's contents only and DST does not exist. This -// should create a directory at DST and copy the contents of the SRC -// directory (but not the directory itself) into the DST directory. Ensure -// this works whether DST has a trailing path separator or not. -func (s *DockerSuite) TestCpFromCaseH(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-from-case-h") - defer os.RemoveAll(tmpDir) - - srcDir := containerCpPathTrailingSep(containerID, "dir1") + "." - dstDir := cpPath(tmpDir, "testDir") - dstPath := filepath.Join(dstDir, "file1-1") - - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) - - c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) - - // Now try again but using a trailing path separator for dstDir. - - // unable to remove resultDir - c.Assert(os.RemoveAll(dstDir), checker.IsNil) - - dstDir = cpPathTrailingSep(tmpDir, "testDir") - - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) - - c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) -} - -// I. SRC specifies a directory's contents only and DST exists as a file. This -// should cause an error as it is not possible to overwrite a file with a -// directory. -func (s *DockerSuite) TestCpFromCaseI(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{ - addContent: true, workDir: "/root", - }) - - tmpDir := getTestDir(c, "test-cp-from-case-i") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcDir := containerCpPathTrailingSep(containerID, "/root/dir1") + "." - dstFile := cpPath(tmpDir, "file1") - - err := runDockerCp(c, srcDir, dstFile) - c.Assert(err, checker.NotNil) - - c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) -} - -// J. SRC specifies a directory's contents only and DST exists as a directory. -// This should copy the contents of the SRC directory (but not the directory -// itself) into the DST directory. Ensure this works whether DST has a -// trailing path separator or not. -func (s *DockerSuite) TestCpFromCaseJ(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{ - addContent: true, workDir: "/root", - }) - - tmpDir := getTestDir(c, "test-cp-from-case-j") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcDir := containerCpPathTrailingSep(containerID, "/root/dir1") + "." - dstDir := cpPath(tmpDir, "dir2") - dstPath := filepath.Join(dstDir, "file1-1") - - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) - - c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) - - // Now try again but using a trailing path separator for dstDir. - - // unable to remove dstDir - c.Assert(os.RemoveAll(dstDir), checker.IsNil) - - // unable to make dstDir - c.Assert(os.MkdirAll(dstDir, os.FileMode(0755)), checker.IsNil) - - dstDir = cpPathTrailingSep(tmpDir, "dir2") - - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) - - c.Assert(fileContentEquals(c, dstPath, "file1-1\n"), checker.IsNil) -} diff --git a/integration-cli/docker_cli_cp_test.go b/integration-cli/docker_cli_cp_test.go deleted file mode 100644 index e3602c637c..0000000000 --- a/integration-cli/docker_cli_cp_test.go +++ /dev/null @@ -1,665 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -const ( - cpTestPathParent = "/some" - cpTestPath = "/some/path" - cpTestName = "test" - cpFullPath = "/some/path/test" - - cpContainerContents = "holla, i am the container" - cpHostContents = "hello, i am the host" -) - -// Ensure that an all-local path case returns an error. -func (s *DockerSuite) TestCpLocalOnly(c *check.C) { - err := runDockerCp(c, "foo", "bar") - c.Assert(err, checker.NotNil) - - c.Assert(err.Error(), checker.Contains, "must specify at least one container source") -} - -// Test for #5656 -// Check that garbage paths don't escape the container's rootfs -func (s *DockerSuite) TestCpGarbagePath(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) - - containerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "wait", containerID) - // failed to set up container - c.Assert(strings.TrimSpace(out), checker.Equals, "0") - - c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) - - hostFile, err := os.Create(cpFullPath) - c.Assert(err, checker.IsNil) - defer hostFile.Close() - defer os.RemoveAll(cpTestPathParent) - - fmt.Fprintf(hostFile, "%s", cpHostContents) - - tmpdir, err := ioutil.TempDir("", "docker-integration") - c.Assert(err, checker.IsNil) - - tmpname := filepath.Join(tmpdir, cpTestName) - defer os.RemoveAll(tmpdir) - - path := path.Join("../../../../../../../../../../../../", cpFullPath) - - dockerCmd(c, "cp", containerID+":"+path, tmpdir) - - file, _ := os.Open(tmpname) - defer file.Close() - - test, err := ioutil.ReadAll(file) - c.Assert(err, checker.IsNil) - - // output matched host file -- garbage path can escape container rootfs - c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) - - // output doesn't match the input for garbage path - c.Assert(string(test), checker.Equals, cpContainerContents) -} - -// Check that relative paths are relative to the container's rootfs -func (s *DockerSuite) TestCpRelativePath(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) - - containerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "wait", containerID) - // failed to set up container - c.Assert(strings.TrimSpace(out), checker.Equals, "0") - - c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) - - hostFile, err := os.Create(cpFullPath) - c.Assert(err, checker.IsNil) - defer hostFile.Close() - defer os.RemoveAll(cpTestPathParent) - - fmt.Fprintf(hostFile, "%s", cpHostContents) - - tmpdir, err := ioutil.TempDir("", "docker-integration") - c.Assert(err, checker.IsNil) - - tmpname := filepath.Join(tmpdir, cpTestName) - defer os.RemoveAll(tmpdir) - - var relPath string - if path.IsAbs(cpFullPath) { - // normally this is `filepath.Rel("/", cpFullPath)` but we cannot - // get this unix-path manipulation on windows with filepath. - relPath = cpFullPath[1:] - } - c.Assert(path.IsAbs(cpFullPath), checker.True, check.Commentf("path %s was assumed to be an absolute path", cpFullPath)) - - dockerCmd(c, "cp", containerID+":"+relPath, tmpdir) - - file, _ := os.Open(tmpname) - defer file.Close() - - test, err := ioutil.ReadAll(file) - c.Assert(err, checker.IsNil) - - // output matched host file -- relative path can escape container rootfs - c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) - - // output doesn't match the input for relative path - c.Assert(string(test), checker.Equals, cpContainerContents) -} - -// Check that absolute paths are relative to the container's rootfs -func (s *DockerSuite) TestCpAbsolutePath(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath) - - containerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "wait", containerID) - // failed to set up container - c.Assert(strings.TrimSpace(out), checker.Equals, "0") - - c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) - - hostFile, err := os.Create(cpFullPath) - c.Assert(err, checker.IsNil) - defer hostFile.Close() - defer os.RemoveAll(cpTestPathParent) - - fmt.Fprintf(hostFile, "%s", cpHostContents) - - tmpdir, err := ioutil.TempDir("", "docker-integration") - c.Assert(err, checker.IsNil) - - tmpname := filepath.Join(tmpdir, cpTestName) - defer os.RemoveAll(tmpdir) - - path := cpFullPath - - dockerCmd(c, "cp", containerID+":"+path, tmpdir) - - file, _ := os.Open(tmpname) - defer file.Close() - - test, err := ioutil.ReadAll(file) - c.Assert(err, checker.IsNil) - - // output matched host file -- absolute path can escape container rootfs - c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) - - // output doesn't match the input for absolute path - c.Assert(string(test), checker.Equals, cpContainerContents) -} - -// Test for #5619 -// Check that absolute symlinks are still relative to the container's rootfs -func (s *DockerSuite) TestCpAbsoluteSymlink(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" container_path") - - containerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "wait", containerID) - // failed to set up container - c.Assert(strings.TrimSpace(out), checker.Equals, "0") - - c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) - - hostFile, err := os.Create(cpFullPath) - c.Assert(err, checker.IsNil) - defer hostFile.Close() - defer os.RemoveAll(cpTestPathParent) - - fmt.Fprintf(hostFile, "%s", cpHostContents) - - tmpdir, err := ioutil.TempDir("", "docker-integration") - c.Assert(err, checker.IsNil) - - tmpname := filepath.Join(tmpdir, "container_path") - defer os.RemoveAll(tmpdir) - - path := path.Join("/", "container_path") - - dockerCmd(c, "cp", containerID+":"+path, tmpdir) - - // We should have copied a symlink *NOT* the file itself! - linkTarget, err := os.Readlink(tmpname) - c.Assert(err, checker.IsNil) - - c.Assert(linkTarget, checker.Equals, filepath.FromSlash(cpFullPath)) -} - -// Check that symlinks to a directory behave as expected when copying one from -// a container. -func (s *DockerSuite) TestCpFromSymlinkToDirectory(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPathParent+" /dir_link") - - containerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "wait", containerID) - // failed to set up container - c.Assert(strings.TrimSpace(out), checker.Equals, "0") - - testDir, err := ioutil.TempDir("", "test-cp-from-symlink-to-dir-") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(testDir) - - // This copy command should copy the symlink, not the target, into the - // temporary directory. - dockerCmd(c, "cp", containerID+":"+"/dir_link", testDir) - - expectedPath := filepath.Join(testDir, "dir_link") - linkTarget, err := os.Readlink(expectedPath) - c.Assert(err, checker.IsNil) - - c.Assert(linkTarget, checker.Equals, filepath.FromSlash(cpTestPathParent)) - - os.Remove(expectedPath) - - // This copy command should resolve the symlink (note the trailing - // separator), copying the target into the temporary directory. - dockerCmd(c, "cp", containerID+":"+"/dir_link/", testDir) - - // It *should not* have copied the directory using the target's name, but - // used the given name instead. - unexpectedPath := filepath.Join(testDir, cpTestPathParent) - stat, err := os.Lstat(unexpectedPath) - if err == nil { - out = fmt.Sprintf("target name was copied: %q - %q", stat.Mode(), stat.Name()) - } - c.Assert(err, checker.NotNil, check.Commentf(out)) - - // It *should* have copied the directory using the asked name "dir_link". - stat, err = os.Lstat(expectedPath) - c.Assert(err, checker.IsNil, check.Commentf("unable to stat resource at %q", expectedPath)) - - c.Assert(stat.IsDir(), checker.True, check.Commentf("should have copied a directory but got %q instead", stat.Mode())) -} - -// Check that symlinks to a directory behave as expected when copying one to a -// container. -func (s *DockerSuite) TestCpToSymlinkToDirectory(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, SameHostDaemon) // Requires local volume mount bind. - - testVol, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(testVol) - - // Create a test container with a local volume. We will test by copying - // to the volume path in the container which we can then verify locally. - out, _ := dockerCmd(c, "create", "-v", testVol+":/testVol", "busybox") - - containerID := strings.TrimSpace(out) - - // Create a temp directory to hold a test file nested in a direcotry. - testDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(testDir) - - // This file will be at "/testDir/some/path/test" and will be copied into - // the test volume later. - hostTestFilename := filepath.Join(testDir, cpFullPath) - c.Assert(os.MkdirAll(filepath.Dir(hostTestFilename), os.FileMode(0700)), checker.IsNil) - c.Assert(ioutil.WriteFile(hostTestFilename, []byte(cpHostContents), os.FileMode(0600)), checker.IsNil) - - // Now create another temp directory to hold a symlink to the - // "/testDir/some" directory. - linkDir, err := ioutil.TempDir("", "test-cp-to-symlink-to-dir-") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(linkDir) - - // Then symlink "/linkDir/dir_link" to "/testdir/some". - linkTarget := filepath.Join(testDir, cpTestPathParent) - localLink := filepath.Join(linkDir, "dir_link") - c.Assert(os.Symlink(linkTarget, localLink), checker.IsNil) - - // Now copy that symlink into the test volume in the container. - dockerCmd(c, "cp", localLink, containerID+":/testVol") - - // This copy command should have copied the symlink *not* the target. - expectedPath := filepath.Join(testVol, "dir_link") - actualLinkTarget, err := os.Readlink(expectedPath) - c.Assert(err, checker.IsNil, check.Commentf("unable to read symlink at %q", expectedPath)) - - c.Assert(actualLinkTarget, checker.Equals, linkTarget) - - // Good, now remove that copied link for the next test. - os.Remove(expectedPath) - - // This copy command should resolve the symlink (note the trailing - // separator), copying the target into the test volume directory in the - // container. - dockerCmd(c, "cp", localLink+"/", containerID+":/testVol") - - // It *should not* have copied the directory using the target's name, but - // used the given name instead. - unexpectedPath := filepath.Join(testVol, cpTestPathParent) - stat, err := os.Lstat(unexpectedPath) - if err == nil { - out = fmt.Sprintf("target name was copied: %q - %q", stat.Mode(), stat.Name()) - } - c.Assert(err, checker.NotNil, check.Commentf(out)) - - // It *should* have copied the directory using the asked name "dir_link". - stat, err = os.Lstat(expectedPath) - c.Assert(err, checker.IsNil, check.Commentf("unable to stat resource at %q", expectedPath)) - - c.Assert(stat.IsDir(), checker.True, check.Commentf("should have copied a directory but got %q instead", stat.Mode())) - - // And this directory should contain the file copied from the host at the - // expected location: "/testVol/dir_link/path/test" - expectedFilepath := filepath.Join(testVol, "dir_link/path/test") - fileContents, err := ioutil.ReadFile(expectedFilepath) - c.Assert(err, checker.IsNil) - - c.Assert(string(fileContents), checker.Equals, cpHostContents) -} - -// Test for #5619 -// Check that symlinks which are part of the resource path are still relative to the container's rootfs -func (s *DockerSuite) TestCpSymlinkComponent(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpTestPath+" container_path") - - containerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "wait", containerID) - // failed to set up container - c.Assert(strings.TrimSpace(out), checker.Equals, "0") - - c.Assert(os.MkdirAll(cpTestPath, os.ModeDir), checker.IsNil) - - hostFile, err := os.Create(cpFullPath) - c.Assert(err, checker.IsNil) - defer hostFile.Close() - defer os.RemoveAll(cpTestPathParent) - - fmt.Fprintf(hostFile, "%s", cpHostContents) - - tmpdir, err := ioutil.TempDir("", "docker-integration") - - c.Assert(err, checker.IsNil) - - tmpname := filepath.Join(tmpdir, cpTestName) - defer os.RemoveAll(tmpdir) - - path := path.Join("/", "container_path", cpTestName) - - dockerCmd(c, "cp", containerID+":"+path, tmpdir) - - file, _ := os.Open(tmpname) - defer file.Close() - - test, err := ioutil.ReadAll(file) - c.Assert(err, checker.IsNil) - - // output matched host file -- symlink path component can escape container rootfs - c.Assert(string(test), checker.Not(checker.Equals), cpHostContents) - - // output doesn't match the input for symlink path component - c.Assert(string(test), checker.Equals, cpContainerContents) -} - -// Check that cp with unprivileged user doesn't return any error -func (s *DockerSuite) TestCpUnprivilegedUser(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, UnixCli) // uses chmod/su: not available on windows - - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch "+cpTestName) - - containerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "wait", containerID) - // failed to set up container - c.Assert(strings.TrimSpace(out), checker.Equals, "0") - - tmpdir, err := ioutil.TempDir("", "docker-integration") - c.Assert(err, checker.IsNil) - - defer os.RemoveAll(tmpdir) - - c.Assert(os.Chmod(tmpdir, 0777), checker.IsNil) - - path := cpTestName - - _, _, err = runCommandWithOutput(exec.Command("su", "unprivilegeduser", "-c", dockerBinary+" cp "+containerID+":"+path+" "+tmpdir)) - c.Assert(err, checker.IsNil, check.Commentf("couldn't copy with unprivileged user: %s:%s", containerID, path)) -} - -func (s *DockerSuite) TestCpSpecialFiles(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, SameHostDaemon) - - outDir, err := ioutil.TempDir("", "cp-test-special-files") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(outDir) - - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "touch /foo") - - containerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "wait", containerID) - // failed to set up container - c.Assert(strings.TrimSpace(out), checker.Equals, "0") - - // Copy actual /etc/resolv.conf - dockerCmd(c, "cp", containerID+":/etc/resolv.conf", outDir) - - expected, err := readContainerFile(containerID, "resolv.conf") - actual, err := ioutil.ReadFile(outDir + "/resolv.conf") - - // Expected copied file to be duplicate of the container resolvconf - c.Assert(bytes.Equal(actual, expected), checker.True) - - // Copy actual /etc/hosts - dockerCmd(c, "cp", containerID+":/etc/hosts", outDir) - - expected, err = readContainerFile(containerID, "hosts") - actual, err = ioutil.ReadFile(outDir + "/hosts") - - // Expected copied file to be duplicate of the container hosts - c.Assert(bytes.Equal(actual, expected), checker.True) - - // Copy actual /etc/resolv.conf - dockerCmd(c, "cp", containerID+":/etc/hostname", outDir) - - expected, err = readContainerFile(containerID, "hostname") - actual, err = ioutil.ReadFile(outDir + "/hostname") - - // Expected copied file to be duplicate of the container resolvconf - c.Assert(bytes.Equal(actual, expected), checker.True) -} - -func (s *DockerSuite) TestCpVolumePath(c *check.C) { - // stat /tmp/cp-test-volumepath851508420/test gets permission denied for the user - testRequires(c, NotUserNamespace) - testRequires(c, DaemonIsLinux) - testRequires(c, SameHostDaemon) - - tmpDir, err := ioutil.TempDir("", "cp-test-volumepath") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(tmpDir) - outDir, err := ioutil.TempDir("", "cp-test-volumepath-out") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(outDir) - _, err = os.Create(tmpDir + "/test") - c.Assert(err, checker.IsNil) - - out, _ := dockerCmd(c, "run", "-d", "-v", "/foo", "-v", tmpDir+"/test:/test", "-v", tmpDir+":/baz", "busybox", "/bin/sh", "-c", "touch /foo/bar") - - containerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "wait", containerID) - // failed to set up container - c.Assert(strings.TrimSpace(out), checker.Equals, "0") - - // Copy actual volume path - dockerCmd(c, "cp", containerID+":/foo", outDir) - - stat, err := os.Stat(outDir + "/foo") - c.Assert(err, checker.IsNil) - // expected copied content to be dir - c.Assert(stat.IsDir(), checker.True) - stat, err = os.Stat(outDir + "/foo/bar") - c.Assert(err, checker.IsNil) - // Expected file `bar` to be a file - c.Assert(stat.IsDir(), checker.False) - - // Copy file nested in volume - dockerCmd(c, "cp", containerID+":/foo/bar", outDir) - - stat, err = os.Stat(outDir + "/bar") - c.Assert(err, checker.IsNil) - // Expected file `bar` to be a file - c.Assert(stat.IsDir(), checker.False) - - // Copy Bind-mounted dir - dockerCmd(c, "cp", containerID+":/baz", outDir) - stat, err = os.Stat(outDir + "/baz") - c.Assert(err, checker.IsNil) - // Expected `baz` to be a dir - c.Assert(stat.IsDir(), checker.True) - - // Copy file nested in bind-mounted dir - dockerCmd(c, "cp", containerID+":/baz/test", outDir) - fb, err := ioutil.ReadFile(outDir + "/baz/test") - c.Assert(err, checker.IsNil) - fb2, err := ioutil.ReadFile(tmpDir + "/test") - c.Assert(err, checker.IsNil) - // Expected copied file to be duplicate of bind-mounted file - c.Assert(bytes.Equal(fb, fb2), checker.True) - - // Copy bind-mounted file - dockerCmd(c, "cp", containerID+":/test", outDir) - fb, err = ioutil.ReadFile(outDir + "/test") - c.Assert(err, checker.IsNil) - fb2, err = ioutil.ReadFile(tmpDir + "/test") - c.Assert(err, checker.IsNil) - // Expected copied file to be duplicate of bind-mounted file - c.Assert(bytes.Equal(fb, fb2), checker.True) -} - -func (s *DockerSuite) TestCpToDot(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") - - containerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "wait", containerID) - // failed to set up container - c.Assert(strings.TrimSpace(out), checker.Equals, "0") - - tmpdir, err := ioutil.TempDir("", "docker-integration") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(tmpdir) - cwd, err := os.Getwd() - c.Assert(err, checker.IsNil) - defer os.Chdir(cwd) - c.Assert(os.Chdir(tmpdir), checker.IsNil) - dockerCmd(c, "cp", containerID+":/test", ".") - content, err := ioutil.ReadFile("./test") - c.Assert(string(content), checker.Equals, "lololol\n") -} - -func (s *DockerSuite) TestCpToStdout(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /test") - - containerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "wait", containerID) - // failed to set up container - c.Assert(strings.TrimSpace(out), checker.Equals, "0") - - out, _, err := runCommandPipelineWithOutput( - exec.Command(dockerBinary, "cp", containerID+":/test", "-"), - exec.Command("tar", "-vtf", "-")) - - c.Assert(err, checker.IsNil) - - c.Assert(out, checker.Contains, "test") - c.Assert(out, checker.Contains, "-rw") -} - -func (s *DockerSuite) TestCpNameHasColon(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) - - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "echo lololol > /te:s:t") - - containerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "wait", containerID) - // failed to set up container - c.Assert(strings.TrimSpace(out), checker.Equals, "0") - - tmpdir, err := ioutil.TempDir("", "docker-integration") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(tmpdir) - dockerCmd(c, "cp", containerID+":/te:s:t", tmpdir) - content, err := ioutil.ReadFile(tmpdir + "/te:s:t") - c.Assert(string(content), checker.Equals, "lololol\n") -} - -func (s *DockerSuite) TestCopyAndRestart(c *check.C) { - testRequires(c, DaemonIsLinux) - expectedMsg := "hello" - out, _ := dockerCmd(c, "run", "-d", "busybox", "echo", expectedMsg) - containerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "wait", containerID) - // failed to set up container - c.Assert(strings.TrimSpace(out), checker.Equals, "0") - - tmpDir, err := ioutil.TempDir("", "test-docker-restart-after-copy-") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(tmpDir) - - dockerCmd(c, "cp", fmt.Sprintf("%s:/etc/group", containerID), tmpDir) - - out, _ = dockerCmd(c, "start", "-a", containerID) - - c.Assert(strings.TrimSpace(out), checker.Equals, expectedMsg) -} - -func (s *DockerSuite) TestCopyCreatedContainer(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "create", "--name", "test_cp", "-v", "/test", "busybox") - - tmpDir, err := ioutil.TempDir("", "test") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(tmpDir) - dockerCmd(c, "cp", "test_cp:/bin/sh", tmpDir) -} - -// test copy with option `-L`: following symbol link -// Check that symlinks to a file behave as expected when copying one from -// a container to host following symbol link -func (s *DockerSuite) TestCpSymlinkFromConToHostFollowSymlink(c *check.C) { - testRequires(c, DaemonIsLinux) - out, exitCode := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir -p '"+cpTestPath+"' && echo -n '"+cpContainerContents+"' > "+cpFullPath+" && ln -s "+cpFullPath+" /dir_link") - if exitCode != 0 { - c.Fatal("failed to create a container", out) - } - - cleanedContainerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "wait", cleanedContainerID) - if strings.TrimSpace(out) != "0" { - c.Fatal("failed to set up container", out) - } - - testDir, err := ioutil.TempDir("", "test-cp-symlink-container-to-host-follow-symlink") - if err != nil { - c.Fatal(err) - } - defer os.RemoveAll(testDir) - - // This copy command should copy the symlink, not the target, into the - // temporary directory. - dockerCmd(c, "cp", "-L", cleanedContainerID+":"+"/dir_link", testDir) - - expectedPath := filepath.Join(testDir, "dir_link") - - expected := []byte(cpContainerContents) - actual, err := ioutil.ReadFile(expectedPath) - - if !bytes.Equal(actual, expected) { - c.Fatalf("Expected copied file to be duplicate of the container symbol link target") - } - os.Remove(expectedPath) - - // now test copy symbol link to a non-existing file in host - expectedPath = filepath.Join(testDir, "somefile_host") - // expectedPath shouldn't exist, if exists, remove it - if _, err := os.Lstat(expectedPath); err == nil { - os.Remove(expectedPath) - } - - dockerCmd(c, "cp", "-L", cleanedContainerID+":"+"/dir_link", expectedPath) - - actual, err = ioutil.ReadFile(expectedPath) - - if !bytes.Equal(actual, expected) { - c.Fatalf("Expected copied file to be duplicate of the container symbol link target") - } - defer os.Remove(expectedPath) -} diff --git a/integration-cli/docker_cli_cp_to_container_test.go b/integration-cli/docker_cli_cp_to_container_test.go deleted file mode 100644 index 63fbd44665..0000000000 --- a/integration-cli/docker_cli_cp_to_container_test.go +++ /dev/null @@ -1,605 +0,0 @@ -package main - -import ( - "os" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// docker cp LOCALPATH CONTAINER:PATH - -// Try all of the test cases from the archive package which implements the -// internals of `docker cp` and ensure that the behavior matches when actually -// copying to and from containers. - -// Basic assumptions about SRC and DST: -// 1. SRC must exist. -// 2. If SRC ends with a trailing separator, it must be a directory. -// 3. DST parent directory must exist. -// 4. If DST exists as a file, it must not end with a trailing separator. - -// First get these easy error cases out of the way. - -// Test for error when SRC does not exist. -func (s *DockerSuite) TestCpToErrSrcNotExists(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{}) - - tmpDir := getTestDir(c, "test-cp-to-err-src-not-exists") - defer os.RemoveAll(tmpDir) - - srcPath := cpPath(tmpDir, "file1") - dstPath := containerCpPath(containerID, "file1") - - err := runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) -} - -// Test for error when SRC ends in a trailing -// path separator but it exists as a file. -func (s *DockerSuite) TestCpToErrSrcNotDir(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{}) - - tmpDir := getTestDir(c, "test-cp-to-err-src-not-dir") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcPath := cpPathTrailingSep(tmpDir, "file1") - dstPath := containerCpPath(containerID, "testDir") - - err := runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotDir(err), checker.True, check.Commentf("expected IsNotDir error, but got %T: %s", err, err)) -} - -// Test for error when SRC is a valid file or directory, -// bu the DST parent directory does not exist. -func (s *DockerSuite) TestCpToErrDstParentNotExists(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-to-err-dst-parent-not-exists") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - // Try with a file source. - srcPath := cpPath(tmpDir, "file1") - dstPath := containerCpPath(containerID, "/notExists", "file1") - - err := runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) - - // Try with a directory source. - srcPath = cpPath(tmpDir, "dir1") - - c.Assert(err, checker.NotNil) - - c.Assert(isCpNotExist(err), checker.True, check.Commentf("expected IsNotExist error, but got %T: %s", err, err)) -} - -// Test for error when DST ends in a trailing path separator but exists as a -// file. Also test that we cannot overwrite an existing directory with a -// non-directory and cannot overwrite an existing -func (s *DockerSuite) TestCpToErrDstNotDir(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{addContent: true}) - - tmpDir := getTestDir(c, "test-cp-to-err-dst-not-dir") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - // Try with a file source. - srcPath := cpPath(tmpDir, "dir1/file1-1") - dstPath := containerCpPathTrailingSep(containerID, "file1") - - // The client should encounter an error trying to stat the destination - // and then be unable to copy since the destination is asserted to be a - // directory but does not exist. - err := runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExist error, but got %T: %s", err, err)) - - // Try with a directory source. - srcPath = cpPath(tmpDir, "dir1") - - // The client should encounter an error trying to stat the destination and - // then decide to extract to the parent directory instead with a rebased - // name in the source archive, but this directory would overwrite the - // existing file with the same name. - err = runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCannotOverwriteNonDirWithDir(err), checker.True, check.Commentf("expected CannotOverwriteNonDirWithDir error, but got %T: %s", err, err)) -} - -// Check that copying from a local path to a symlink in a container copies to -// the symlink target and does not overwrite the container symlink itself. -func (s *DockerSuite) TestCpToSymlinkDestination(c *check.C) { - // stat /tmp/test-cp-to-symlink-destination-262430901/vol3 gets permission denied for the user - testRequires(c, NotUserNamespace) - testRequires(c, DaemonIsLinux) - testRequires(c, SameHostDaemon) // Requires local volume mount bind. - - testVol := getTestDir(c, "test-cp-to-symlink-destination-") - defer os.RemoveAll(testVol) - - makeTestContentInDir(c, testVol) - - containerID := makeTestContainer(c, testContainerOptions{ - volumes: defaultVolumes(testVol), // Our bind mount is at /vol2 - }) - - // First, copy a local file to a symlink to a file in the container. This - // should overwrite the symlink target contents with the source contents. - srcPath := cpPath(testVol, "file2") - dstPath := containerCpPath(containerID, "/vol2/symlinkToFile1") - - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) - - // The symlink should not have been modified. - c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToFile1"), "file1"), checker.IsNil) - - // The file should have the contents of "file2" now. - c.Assert(fileContentEquals(c, cpPath(testVol, "file1"), "file2\n"), checker.IsNil) - - // Next, copy a local file to a symlink to a directory in the container. - // This should copy the file into the symlink target directory. - dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") - - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) - - // The symlink should not have been modified. - c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) - - // The file should have the contents of "file2" now. - c.Assert(fileContentEquals(c, cpPath(testVol, "file2"), "file2\n"), checker.IsNil) - - // Next, copy a file to a symlink to a file that does not exist (a broken - // symlink) in the container. This should create the target file with the - // contents of the source file. - dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToFileX") - - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) - - // The symlink should not have been modified. - c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToFileX"), "fileX"), checker.IsNil) - - // The file should have the contents of "file2" now. - c.Assert(fileContentEquals(c, cpPath(testVol, "fileX"), "file2\n"), checker.IsNil) - - // Next, copy a local directory to a symlink to a directory in the - // container. This should copy the directory into the symlink target - // directory and not modify the symlink. - srcPath = cpPath(testVol, "/dir2") - dstPath = containerCpPath(containerID, "/vol2/symlinkToDir1") - - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) - - // The symlink should not have been modified. - c.Assert(symlinkTargetEquals(c, cpPath(testVol, "symlinkToDir1"), "dir1"), checker.IsNil) - - // The directory should now contain a copy of "dir2". - c.Assert(fileContentEquals(c, cpPath(testVol, "dir1/dir2/file2-1"), "file2-1\n"), checker.IsNil) - - // Next, copy a local directory to a symlink to a local directory that does - // not exist (a broken symlink) in the container. This should create the - // target as a directory with the contents of the source directory. It - // should not modify the symlink. - dstPath = containerCpPath(containerID, "/vol2/brokenSymlinkToDirX") - - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) - - // The symlink should not have been modified. - c.Assert(symlinkTargetEquals(c, cpPath(testVol, "brokenSymlinkToDirX"), "dirX"), checker.IsNil) - - // The "dirX" directory should now be a copy of "dir2". - c.Assert(fileContentEquals(c, cpPath(testVol, "dirX/file2-1"), "file2-1\n"), checker.IsNil) -} - -// Possibilities are reduced to the remaining 10 cases: -// -// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action -// =================================================================================================== -// A | no | - | no | - | no | create file -// B | no | - | no | - | yes | error -// C | no | - | yes | no | - | overwrite file -// D | no | - | yes | yes | - | create file in dst dir -// E | yes | no | no | - | - | create dir, copy contents -// F | yes | no | yes | no | - | error -// G | yes | no | yes | yes | - | copy dir and contents -// H | yes | yes | no | - | - | create dir, copy contents -// I | yes | yes | yes | no | - | error -// J | yes | yes | yes | yes | - | copy dir contents -// - -// A. SRC specifies a file and DST (no trailing path separator) doesn't -// exist. This should create a file with the name DST and copy the -// contents of the source file into it. -func (s *DockerSuite) TestCpToCaseA(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{ - workDir: "/root", command: makeCatFileCommand("itWorks.txt"), - }) - - tmpDir := getTestDir(c, "test-cp-to-case-a") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcPath := cpPath(tmpDir, "file1") - dstPath := containerCpPath(containerID, "/root/itWorks.txt") - - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) - - c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) -} - -// B. SRC specifies a file and DST (with trailing path separator) doesn't -// exist. This should cause an error because the copy operation cannot -// create a directory when copying a single file. -func (s *DockerSuite) TestCpToCaseB(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{ - command: makeCatFileCommand("testDir/file1"), - }) - - tmpDir := getTestDir(c, "test-cp-to-case-b") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcPath := cpPath(tmpDir, "file1") - dstDir := containerCpPathTrailingSep(containerID, "testDir") - - err := runDockerCp(c, srcPath, dstDir) - c.Assert(err, checker.NotNil) - - c.Assert(isCpDirNotExist(err), checker.True, check.Commentf("expected DirNotExists error, but got %T: %s", err, err)) -} - -// C. SRC specifies a file and DST exists as a file. This should overwrite -// the file at DST with the contents of the source file. -func (s *DockerSuite) TestCpToCaseC(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{ - addContent: true, workDir: "/root", - command: makeCatFileCommand("file2"), - }) - - tmpDir := getTestDir(c, "test-cp-to-case-c") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcPath := cpPath(tmpDir, "file1") - dstPath := containerCpPath(containerID, "/root/file2") - - // Ensure the container's file starts with the original content. - c.Assert(containerStartOutputEquals(c, containerID, "file2\n"), checker.IsNil) - - c.Assert(runDockerCp(c, srcPath, dstPath), checker.IsNil) - - // Should now contain file1's contents. - c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) -} - -// D. SRC specifies a file and DST exists as a directory. This should place -// a copy of the source file inside it using the basename from SRC. Ensure -// this works whether DST has a trailing path separator or not. -func (s *DockerSuite) TestCpToCaseD(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{ - addContent: true, - command: makeCatFileCommand("/dir1/file1"), - }) - - tmpDir := getTestDir(c, "test-cp-to-case-d") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcPath := cpPath(tmpDir, "file1") - dstDir := containerCpPath(containerID, "dir1") - - // Ensure that dstPath doesn't exist. - c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - - c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) - - // Should now contain file1's contents. - c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) - - // Now try again but using a trailing path separator for dstDir. - - // Make new destination container. - containerID = makeTestContainer(c, testContainerOptions{ - addContent: true, - command: makeCatFileCommand("/dir1/file1"), - }) - - dstDir = containerCpPathTrailingSep(containerID, "dir1") - - // Ensure that dstPath doesn't exist. - c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - - c.Assert(runDockerCp(c, srcPath, dstDir), checker.IsNil) - - // Should now contain file1's contents. - c.Assert(containerStartOutputEquals(c, containerID, "file1\n"), checker.IsNil) -} - -// E. SRC specifies a directory and DST does not exist. This should create a -// directory at DST and copy the contents of the SRC directory into the DST -// directory. Ensure this works whether DST has a trailing path separator or -// not. -func (s *DockerSuite) TestCpToCaseE(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{ - command: makeCatFileCommand("/testDir/file1-1"), - }) - - tmpDir := getTestDir(c, "test-cp-to-case-e") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcDir := cpPath(tmpDir, "dir1") - dstDir := containerCpPath(containerID, "testDir") - - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) - - // Should now contain file1-1's contents. - c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) - - // Now try again but using a trailing path separator for dstDir. - - // Make new destination container. - containerID = makeTestContainer(c, testContainerOptions{ - command: makeCatFileCommand("/testDir/file1-1"), - }) - - dstDir = containerCpPathTrailingSep(containerID, "testDir") - - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) - - // Should now contain file1-1's contents. - c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) -} - -// F. SRC specifies a directory and DST exists as a file. This should cause an -// error as it is not possible to overwrite a file with a directory. -func (s *DockerSuite) TestCpToCaseF(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{ - addContent: true, workDir: "/root", - }) - - tmpDir := getTestDir(c, "test-cp-to-case-f") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcDir := cpPath(tmpDir, "dir1") - dstFile := containerCpPath(containerID, "/root/file1") - - err := runDockerCp(c, srcDir, dstFile) - c.Assert(err, checker.NotNil) - - c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) -} - -// G. SRC specifies a directory and DST exists as a directory. This should copy -// the SRC directory and all its contents to the DST directory. Ensure this -// works whether DST has a trailing path separator or not. -func (s *DockerSuite) TestCpToCaseG(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{ - addContent: true, workDir: "/root", - command: makeCatFileCommand("dir2/dir1/file1-1"), - }) - - tmpDir := getTestDir(c, "test-cp-to-case-g") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcDir := cpPath(tmpDir, "dir1") - dstDir := containerCpPath(containerID, "/root/dir2") - - // Ensure that dstPath doesn't exist. - c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) - - // Should now contain file1-1's contents. - c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) - - // Now try again but using a trailing path separator for dstDir. - - // Make new destination container. - containerID = makeTestContainer(c, testContainerOptions{ - addContent: true, - command: makeCatFileCommand("/dir2/dir1/file1-1"), - }) - - dstDir = containerCpPathTrailingSep(containerID, "/dir2") - - // Ensure that dstPath doesn't exist. - c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) - - // Should now contain file1-1's contents. - c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) -} - -// H. SRC specifies a directory's contents only and DST does not exist. This -// should create a directory at DST and copy the contents of the SRC -// directory (but not the directory itself) into the DST directory. Ensure -// this works whether DST has a trailing path separator or not. -func (s *DockerSuite) TestCpToCaseH(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{ - command: makeCatFileCommand("/testDir/file1-1"), - }) - - tmpDir := getTestDir(c, "test-cp-to-case-h") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." - dstDir := containerCpPath(containerID, "testDir") - - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) - - // Should now contain file1-1's contents. - c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) - - // Now try again but using a trailing path separator for dstDir. - - // Make new destination container. - containerID = makeTestContainer(c, testContainerOptions{ - command: makeCatFileCommand("/testDir/file1-1"), - }) - - dstDir = containerCpPathTrailingSep(containerID, "testDir") - - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) - - // Should now contain file1-1's contents. - c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) -} - -// I. SRC specifies a directory's contents only and DST exists as a file. This -// should cause an error as it is not possible to overwrite a file with a -// directory. -func (s *DockerSuite) TestCpToCaseI(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{ - addContent: true, workDir: "/root", - }) - - tmpDir := getTestDir(c, "test-cp-to-case-i") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." - dstFile := containerCpPath(containerID, "/root/file1") - - err := runDockerCp(c, srcDir, dstFile) - c.Assert(err, checker.NotNil) - - c.Assert(isCpCannotCopyDir(err), checker.True, check.Commentf("expected ErrCannotCopyDir error, but got %T: %s", err, err)) -} - -// J. SRC specifies a directory's contents only and DST exists as a directory. -// This should copy the contents of the SRC directory (but not the directory -// itself) into the DST directory. Ensure this works whether DST has a -// trailing path separator or not. -func (s *DockerSuite) TestCpToCaseJ(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := makeTestContainer(c, testContainerOptions{ - addContent: true, workDir: "/root", - command: makeCatFileCommand("/dir2/file1-1"), - }) - - tmpDir := getTestDir(c, "test-cp-to-case-j") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcDir := cpPathTrailingSep(tmpDir, "dir1") + "." - dstDir := containerCpPath(containerID, "/dir2") - - // Ensure that dstPath doesn't exist. - c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) - - // Should now contain file1-1's contents. - c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) - - // Now try again but using a trailing path separator for dstDir. - - // Make new destination container. - containerID = makeTestContainer(c, testContainerOptions{ - command: makeCatFileCommand("/dir2/file1-1"), - }) - - dstDir = containerCpPathTrailingSep(containerID, "/dir2") - - // Ensure that dstPath doesn't exist. - c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) - - c.Assert(runDockerCp(c, srcDir, dstDir), checker.IsNil) - - // Should now contain file1-1's contents. - c.Assert(containerStartOutputEquals(c, containerID, "file1-1\n"), checker.IsNil) -} - -// The `docker cp` command should also ensure that you cannot -// write to a container rootfs that is marked as read-only. -func (s *DockerSuite) TestCpToErrReadOnlyRootfs(c *check.C) { - // --read-only + userns has remount issues - testRequires(c, DaemonIsLinux, NotUserNamespace) - tmpDir := getTestDir(c, "test-cp-to-err-read-only-rootfs") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - containerID := makeTestContainer(c, testContainerOptions{ - readOnly: true, workDir: "/root", - command: makeCatFileCommand("shouldNotExist"), - }) - - srcPath := cpPath(tmpDir, "file1") - dstPath := containerCpPath(containerID, "/root/shouldNotExist") - - err := runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrContainerRootfsReadonly error, but got %T: %s", err, err)) - - // Ensure that dstPath doesn't exist. - c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) -} - -// The `docker cp` command should also ensure that you -// cannot write to a volume that is mounted as read-only. -func (s *DockerSuite) TestCpToErrReadOnlyVolume(c *check.C) { - // --read-only + userns has remount issues - testRequires(c, DaemonIsLinux, NotUserNamespace) - tmpDir := getTestDir(c, "test-cp-to-err-read-only-volume") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - containerID := makeTestContainer(c, testContainerOptions{ - volumes: defaultVolumes(tmpDir), workDir: "/root", - command: makeCatFileCommand("/vol_ro/shouldNotExist"), - }) - - srcPath := cpPath(tmpDir, "file1") - dstPath := containerCpPath(containerID, "/vol_ro/shouldNotExist") - - err := runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.NotNil) - - c.Assert(isCpCannotCopyReadOnly(err), checker.True, check.Commentf("expected ErrVolumeReadonly error, but got %T: %s", err, err)) - - // Ensure that dstPath doesn't exist. - c.Assert(containerStartOutputEquals(c, containerID, ""), checker.IsNil) -} diff --git a/integration-cli/docker_cli_cp_to_container_unix_test.go b/integration-cli/docker_cli_cp_to_container_unix_test.go deleted file mode 100644 index 45d85ba5d1..0000000000 --- a/integration-cli/docker_cli_cp_to_container_unix_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build !windows - -package main - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/system" - "github.com/go-check/check" -) - -// Check ownership is root, both in non-userns and userns enabled modes -func (s *DockerSuite) TestCpCheckDestOwnership(c *check.C) { - testRequires(c, DaemonIsLinux, SameHostDaemon) - tmpVolDir := getTestDir(c, "test-cp-tmpvol") - containerID := makeTestContainer(c, - testContainerOptions{volumes: []string{fmt.Sprintf("%s:/tmpvol", tmpVolDir)}}) - - tmpDir := getTestDir(c, "test-cp-to-check-ownership") - defer os.RemoveAll(tmpDir) - - makeTestContentInDir(c, tmpDir) - - srcPath := cpPath(tmpDir, "file1") - dstPath := containerCpPath(containerID, "/tmpvol", "file1") - - err := runDockerCp(c, srcPath, dstPath) - c.Assert(err, checker.IsNil) - - stat, err := system.Stat(filepath.Join(tmpVolDir, "file1")) - c.Assert(err, checker.IsNil) - uid, gid, err := getRootUIDGID() - c.Assert(err, checker.IsNil) - c.Assert(stat.UID(), checker.Equals, uint32(uid), check.Commentf("Copied file not owned by container root UID")) - c.Assert(stat.GID(), checker.Equals, uint32(gid), check.Commentf("Copied file not owned by container root GID")) -} diff --git a/integration-cli/docker_cli_cp_utils.go b/integration-cli/docker_cli_cp_utils.go deleted file mode 100644 index 0501c5d735..0000000000 --- a/integration-cli/docker_cli_cp_utils.go +++ /dev/null @@ -1,303 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -type fileType uint32 - -const ( - ftRegular fileType = iota - ftDir - ftSymlink -) - -type fileData struct { - filetype fileType - path string - contents string -} - -func (fd fileData) creationCommand() string { - var command string - - switch fd.filetype { - case ftRegular: - // Don't overwrite the file if it already exists! - command = fmt.Sprintf("if [ ! -f %s ]; then echo %q > %s; fi", fd.path, fd.contents, fd.path) - case ftDir: - command = fmt.Sprintf("mkdir -p %s", fd.path) - case ftSymlink: - command = fmt.Sprintf("ln -fs %s %s", fd.contents, fd.path) - } - - return command -} - -func mkFilesCommand(fds []fileData) string { - commands := make([]string, len(fds)) - - for i, fd := range fds { - commands[i] = fd.creationCommand() - } - - return strings.Join(commands, " && ") -} - -var defaultFileData = []fileData{ - {ftRegular, "file1", "file1"}, - {ftRegular, "file2", "file2"}, - {ftRegular, "file3", "file3"}, - {ftRegular, "file4", "file4"}, - {ftRegular, "file5", "file5"}, - {ftRegular, "file6", "file6"}, - {ftRegular, "file7", "file7"}, - {ftDir, "dir1", ""}, - {ftRegular, "dir1/file1-1", "file1-1"}, - {ftRegular, "dir1/file1-2", "file1-2"}, - {ftDir, "dir2", ""}, - {ftRegular, "dir2/file2-1", "file2-1"}, - {ftRegular, "dir2/file2-2", "file2-2"}, - {ftDir, "dir3", ""}, - {ftRegular, "dir3/file3-1", "file3-1"}, - {ftRegular, "dir3/file3-2", "file3-2"}, - {ftDir, "dir4", ""}, - {ftRegular, "dir4/file3-1", "file4-1"}, - {ftRegular, "dir4/file3-2", "file4-2"}, - {ftDir, "dir5", ""}, - {ftSymlink, "symlinkToFile1", "file1"}, - {ftSymlink, "symlinkToDir1", "dir1"}, - {ftSymlink, "brokenSymlinkToFileX", "fileX"}, - {ftSymlink, "brokenSymlinkToDirX", "dirX"}, - {ftSymlink, "symlinkToAbsDir", "/root"}, -} - -func defaultMkContentCommand() string { - return mkFilesCommand(defaultFileData) -} - -func makeTestContentInDir(c *check.C, dir string) { - for _, fd := range defaultFileData { - path := filepath.Join(dir, filepath.FromSlash(fd.path)) - switch fd.filetype { - case ftRegular: - c.Assert(ioutil.WriteFile(path, []byte(fd.contents+"\n"), os.FileMode(0666)), checker.IsNil) - case ftDir: - c.Assert(os.Mkdir(path, os.FileMode(0777)), checker.IsNil) - case ftSymlink: - c.Assert(os.Symlink(fd.contents, path), checker.IsNil) - } - } -} - -type testContainerOptions struct { - addContent bool - readOnly bool - volumes []string - workDir string - command string -} - -func makeTestContainer(c *check.C, options testContainerOptions) (containerID string) { - if options.addContent { - mkContentCmd := defaultMkContentCommand() - if options.command == "" { - options.command = mkContentCmd - } else { - options.command = fmt.Sprintf("%s && %s", defaultMkContentCommand(), options.command) - } - } - - if options.command == "" { - options.command = "#(nop)" - } - - args := []string{"run", "-d"} - - for _, volume := range options.volumes { - args = append(args, "-v", volume) - } - - if options.workDir != "" { - args = append(args, "-w", options.workDir) - } - - if options.readOnly { - args = append(args, "--read-only") - } - - args = append(args, "busybox", "/bin/sh", "-c", options.command) - - out, _ := dockerCmd(c, args...) - - containerID = strings.TrimSpace(out) - - out, _ = dockerCmd(c, "wait", containerID) - - exitCode := strings.TrimSpace(out) - if exitCode != "0" { - out, _ = dockerCmd(c, "logs", containerID) - } - c.Assert(exitCode, checker.Equals, "0", check.Commentf("failed to make test container: %s", out)) - - return -} - -func makeCatFileCommand(path string) string { - return fmt.Sprintf("if [ -f %s ]; then cat %s; fi", path, path) -} - -func cpPath(pathElements ...string) string { - localizedPathElements := make([]string, len(pathElements)) - for i, path := range pathElements { - localizedPathElements[i] = filepath.FromSlash(path) - } - return strings.Join(localizedPathElements, string(filepath.Separator)) -} - -func cpPathTrailingSep(pathElements ...string) string { - return fmt.Sprintf("%s%c", cpPath(pathElements...), filepath.Separator) -} - -func containerCpPath(containerID string, pathElements ...string) string { - joined := strings.Join(pathElements, "/") - return fmt.Sprintf("%s:%s", containerID, joined) -} - -func containerCpPathTrailingSep(containerID string, pathElements ...string) string { - return fmt.Sprintf("%s/", containerCpPath(containerID, pathElements...)) -} - -func runDockerCp(c *check.C, src, dst string) (err error) { - c.Logf("running `docker cp %s %s`", src, dst) - - args := []string{"cp", src, dst} - - out, _, err := runCommandWithOutput(exec.Command(dockerBinary, args...)) - if err != nil { - err = fmt.Errorf("error executing `docker cp` command: %s: %s", err, out) - } - - return -} - -func startContainerGetOutput(c *check.C, containerID string) (out string, err error) { - c.Logf("running `docker start -a %s`", containerID) - - args := []string{"start", "-a", containerID} - - out, _, err = runCommandWithOutput(exec.Command(dockerBinary, args...)) - if err != nil { - err = fmt.Errorf("error executing `docker start` command: %s: %s", err, out) - } - - return -} - -func getTestDir(c *check.C, label string) (tmpDir string) { - var err error - - tmpDir, err = ioutil.TempDir("", label) - // unable to make temporary directory - c.Assert(err, checker.IsNil) - - return -} - -func isCpNotExist(err error) bool { - return strings.Contains(err.Error(), "no such file or directory") || strings.Contains(err.Error(), "cannot find the file specified") -} - -func isCpDirNotExist(err error) bool { - return strings.Contains(err.Error(), archive.ErrDirNotExists.Error()) -} - -func isCpNotDir(err error) bool { - return strings.Contains(err.Error(), archive.ErrNotDirectory.Error()) || strings.Contains(err.Error(), "filename, directory name, or volume label syntax is incorrect") -} - -func isCpCannotCopyDir(err error) bool { - return strings.Contains(err.Error(), archive.ErrCannotCopyDir.Error()) -} - -func isCpCannotCopyReadOnly(err error) bool { - return strings.Contains(err.Error(), "marked read-only") -} - -func isCannotOverwriteNonDirWithDir(err error) bool { - return strings.Contains(err.Error(), "cannot overwrite non-directory") -} - -func fileContentEquals(c *check.C, filename, contents string) (err error) { - c.Logf("checking that file %q contains %q\n", filename, contents) - - fileBytes, err := ioutil.ReadFile(filename) - if err != nil { - return - } - - expectedBytes, err := ioutil.ReadAll(strings.NewReader(contents)) - if err != nil { - return - } - - if !bytes.Equal(fileBytes, expectedBytes) { - err = fmt.Errorf("file content not equal - expected %q, got %q", string(expectedBytes), string(fileBytes)) - } - - return -} - -func symlinkTargetEquals(c *check.C, symlink, expectedTarget string) (err error) { - c.Logf("checking that the symlink %q points to %q\n", symlink, expectedTarget) - - actualTarget, err := os.Readlink(symlink) - if err != nil { - return - } - - if actualTarget != expectedTarget { - err = fmt.Errorf("symlink target points to %q not %q", actualTarget, expectedTarget) - } - - return -} - -func containerStartOutputEquals(c *check.C, containerID, contents string) (err error) { - c.Logf("checking that container %q start output contains %q\n", containerID, contents) - - out, err := startContainerGetOutput(c, containerID) - if err != nil { - return - } - - if out != contents { - err = fmt.Errorf("output contents not equal - expected %q, got %q", contents, out) - } - - return -} - -func defaultVolumes(tmpDir string) []string { - if SameHostDaemon.Condition() { - return []string{ - "/vol1", - fmt.Sprintf("%s:/vol2", tmpDir), - fmt.Sprintf("%s:/vol3", filepath.Join(tmpDir, "vol3")), - fmt.Sprintf("%s:/vol_ro:ro", filepath.Join(tmpDir, "vol_ro")), - } - } - - // Can't bind-mount volumes with separate host daemon. - return []string{"/vol1", "/vol2", "/vol3", "/vol_ro:/vol_ro:ro"} -} diff --git a/integration-cli/docker_cli_create_test.go b/integration-cli/docker_cli_create_test.go deleted file mode 100644 index ed76353aae..0000000000 --- a/integration-cli/docker_cli_create_test.go +++ /dev/null @@ -1,480 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "os" - "reflect" - "strings" - "time" - - "os/exec" - - "io/ioutil" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/go-connections/nat" - "github.com/go-check/check" -) - -// Make sure we can create a simple container with some args -func (s *DockerSuite) TestCreateArgs(c *check.C) { - // TODO Windows. This requires further investigation for porting to - // Windows CI. Currently fails. - if daemonPlatform == "windows" { - c.Skip("Fails on Windows CI") - } - out, _ := dockerCmd(c, "create", "busybox", "command", "arg1", "arg2", "arg with space", "-c", "flags") - - cleanedContainerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "inspect", cleanedContainerID) - - containers := []struct { - ID string - Created time.Time - Path string - Args []string - Image string - }{} - - err := json.Unmarshal([]byte(out), &containers) - c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) - c.Assert(containers, checker.HasLen, 1) - - cont := containers[0] - c.Assert(string(cont.Path), checker.Equals, "command", check.Commentf("Unexpected container path. Expected command, received: %s", cont.Path)) - - b := false - expected := []string{"arg1", "arg2", "arg with space", "-c", "flags"} - for i, arg := range expected { - if arg != cont.Args[i] { - b = true - break - } - } - if len(cont.Args) != len(expected) || b { - c.Fatalf("Unexpected args. Expected %v, received: %v", expected, cont.Args) - } - -} - -// Make sure we can grow the container's rootfs at creation time. -func (s *DockerSuite) TestCreateGrowRootfs(c *check.C) { - testRequires(c, Devicemapper) - out, _ := dockerCmd(c, "create", "--storage-opt", "size=120G", "busybox") - - cleanedContainerID := strings.TrimSpace(out) - - inspectOut := inspectField(c, cleanedContainerID, "HostConfig.StorageOpt") - c.Assert(inspectOut, checker.Equals, "map[size:120G]") -} - -// Make sure we cannot shrink the container's rootfs at creation time. -func (s *DockerSuite) TestCreateShrinkRootfs(c *check.C) { - testRequires(c, Devicemapper) - - // Ensure this fails because of the defaultBaseFsSize is 10G - out, _, err := dockerCmdWithError("create", "--storage-opt", "size=5G", "busybox") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Container size cannot be smaller than") -} - -// Make sure we can set hostconfig options too -func (s *DockerSuite) TestCreateHostConfig(c *check.C) { - out, _ := dockerCmd(c, "create", "-P", "busybox", "echo") - - cleanedContainerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "inspect", cleanedContainerID) - - containers := []struct { - HostConfig *struct { - PublishAllPorts bool - } - }{} - - err := json.Unmarshal([]byte(out), &containers) - c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) - c.Assert(containers, checker.HasLen, 1) - - cont := containers[0] - c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) - c.Assert(cont.HostConfig.PublishAllPorts, check.NotNil, check.Commentf("Expected PublishAllPorts, got false")) -} - -func (s *DockerSuite) TestCreateWithPortRange(c *check.C) { - // Windows does not currently support port ranges. - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "create", "-p", "3300-3303:3300-3303/tcp", "busybox", "echo") - - cleanedContainerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "inspect", cleanedContainerID) - - containers := []struct { - HostConfig *struct { - PortBindings map[nat.Port][]nat.PortBinding - } - }{} - err := json.Unmarshal([]byte(out), &containers) - c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) - c.Assert(containers, checker.HasLen, 1) - - cont := containers[0] - - c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) - c.Assert(cont.HostConfig.PortBindings, checker.HasLen, 4, check.Commentf("Expected 4 ports bindings, got %d", len(cont.HostConfig.PortBindings))) - - for k, v := range cont.HostConfig.PortBindings { - c.Assert(v, checker.HasLen, 1, check.Commentf("Expected 1 ports binding, for the port %s but found %s", k, v)) - c.Assert(k.Port(), checker.Equals, v[0].HostPort, check.Commentf("Expected host port %s to match published port %s", k.Port(), v[0].HostPort)) - - } - -} - -func (s *DockerSuite) TestCreateWithLargePortRange(c *check.C) { - // Windows does not currently support port ranges. - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "create", "-p", "1-65535:1-65535/tcp", "busybox", "echo") - - cleanedContainerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "inspect", cleanedContainerID) - - containers := []struct { - HostConfig *struct { - PortBindings map[nat.Port][]nat.PortBinding - } - }{} - - err := json.Unmarshal([]byte(out), &containers) - c.Assert(err, check.IsNil, check.Commentf("Error inspecting the container: %s", err)) - c.Assert(containers, checker.HasLen, 1) - - cont := containers[0] - c.Assert(cont.HostConfig, check.NotNil, check.Commentf("Expected HostConfig, got none")) - c.Assert(cont.HostConfig.PortBindings, checker.HasLen, 65535) - - for k, v := range cont.HostConfig.PortBindings { - c.Assert(v, checker.HasLen, 1) - c.Assert(k.Port(), checker.Equals, v[0].HostPort, check.Commentf("Expected host port %s to match published port %s", k.Port(), v[0].HostPort)) - } - -} - -// "test123" should be printed by docker create + start -func (s *DockerSuite) TestCreateEchoStdout(c *check.C) { - out, _ := dockerCmd(c, "create", "busybox", "echo", "test123") - - cleanedContainerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "start", "-ai", cleanedContainerID) - c.Assert(out, checker.Equals, "test123\n", check.Commentf("container should've printed 'test123', got %q", out)) - -} - -func (s *DockerSuite) TestCreateVolumesCreated(c *check.C) { - testRequires(c, SameHostDaemon) - prefix := "/" - if daemonPlatform == "windows" { - prefix = `c:\` - } - - name := "test_create_volume" - dockerCmd(c, "create", "--name", name, "-v", prefix+"foo", "busybox") - - dir, err := inspectMountSourceField(name, prefix+"foo") - c.Assert(err, check.IsNil, check.Commentf("Error getting volume host path: %q", err)) - - if _, err := os.Stat(dir); err != nil && os.IsNotExist(err) { - c.Fatalf("Volume was not created") - } - if err != nil { - c.Fatalf("Error statting volume host path: %q", err) - } - -} - -func (s *DockerSuite) TestCreateLabels(c *check.C) { - name := "test_create_labels" - expected := map[string]string{"k1": "v1", "k2": "v2"} - dockerCmd(c, "create", "--name", name, "-l", "k1=v1", "--label", "k2=v2", "busybox") - - actual := make(map[string]string) - inspectFieldAndMarshall(c, name, "Config.Labels", &actual) - - if !reflect.DeepEqual(expected, actual) { - c.Fatalf("Expected %s got %s", expected, actual) - } -} - -func (s *DockerSuite) TestCreateLabelFromImage(c *check.C) { - imageName := "testcreatebuildlabel" - _, err := buildImage(imageName, - `FROM busybox - LABEL k1=v1 k2=v2`, - true) - - c.Assert(err, check.IsNil) - - name := "test_create_labels_from_image" - expected := map[string]string{"k2": "x", "k3": "v3", "k1": "v1"} - dockerCmd(c, "create", "--name", name, "-l", "k2=x", "--label", "k3=v3", imageName) - - actual := make(map[string]string) - inspectFieldAndMarshall(c, name, "Config.Labels", &actual) - - if !reflect.DeepEqual(expected, actual) { - c.Fatalf("Expected %s got %s", expected, actual) - } -} - -func (s *DockerSuite) TestCreateHostnameWithNumber(c *check.C) { - // TODO Windows. Consider enabling this in TP5 timeframe if Windows support - // is fully hooked up. The hostname is passed through, but only to the - // environment variable "COMPUTERNAME". It is not hooked up to hostname.exe - // or returned in ipconfig. Needs platform support in networking. - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-h", "web.0", "busybox", "hostname") - c.Assert(strings.TrimSpace(out), checker.Equals, "web.0", check.Commentf("hostname not set, expected `web.0`, got: %s", out)) - -} - -func (s *DockerSuite) TestCreateRM(c *check.C) { - // Test to make sure we can 'rm' a new container that is in - // "Created" state, and has ever been run. Test "rm -f" too. - - // create a container - out, _ := dockerCmd(c, "create", "busybox") - cID := strings.TrimSpace(out) - - dockerCmd(c, "rm", cID) - - // Now do it again so we can "rm -f" this time - out, _ = dockerCmd(c, "create", "busybox") - - cID = strings.TrimSpace(out) - dockerCmd(c, "rm", "-f", cID) -} - -func (s *DockerSuite) TestCreateModeIpcContainer(c *check.C) { - // Uses Linux specific functionality (--ipc) - testRequires(c, DaemonIsLinux, SameHostDaemon) - - out, _ := dockerCmd(c, "create", "busybox") - id := strings.TrimSpace(out) - - dockerCmd(c, "create", fmt.Sprintf("--ipc=container:%s", id), "busybox") -} - -func (s *DockerSuite) TestCreateByImageID(c *check.C) { - imageName := "testcreatebyimageid" - imageID, err := buildImage(imageName, - `FROM busybox - MAINTAINER dockerio`, - true) - if err != nil { - c.Fatal(err) - } - truncatedImageID := stringid.TruncateID(imageID) - - dockerCmd(c, "create", imageID) - dockerCmd(c, "create", truncatedImageID) - dockerCmd(c, "create", fmt.Sprintf("%s:%s", imageName, truncatedImageID)) - - // Ensure this fails - out, exit, _ := dockerCmdWithError("create", fmt.Sprintf("%s:%s", imageName, imageID)) - if exit == 0 { - c.Fatalf("expected non-zero exit code; received %d", exit) - } - - if expected := "Error parsing reference"; !strings.Contains(out, expected) { - c.Fatalf(`Expected %q in output; got: %s`, expected, out) - } - - out, exit, _ = dockerCmdWithError("create", fmt.Sprintf("%s:%s", "wrongimage", truncatedImageID)) - if exit == 0 { - c.Fatalf("expected non-zero exit code; received %d", exit) - } - - if expected := "Unable to find image"; !strings.Contains(out, expected) { - c.Fatalf(`Expected %q in output; got: %s`, expected, out) - } -} - -func (s *DockerTrustSuite) TestTrustedCreate(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-create") - - // Try create - createCmd := exec.Command(dockerBinary, "create", repoName) - s.trustedCmd(createCmd) - out, _, err := runCommandWithOutput(createCmd) - c.Assert(err, check.IsNil) - c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) - - dockerCmd(c, "rmi", repoName) - - // Try untrusted create to ensure we pushed the tag to the registry - createCmd = exec.Command(dockerBinary, "create", "--disable-content-trust=true", repoName) - s.trustedCmd(createCmd) - out, _, err = runCommandWithOutput(createCmd) - c.Assert(err, check.IsNil) - c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted create with --disable-content-trust:\n%s", out)) - -} - -func (s *DockerTrustSuite) TestUntrustedCreate(c *check.C) { - repoName := fmt.Sprintf("%v/dockercliuntrusted/createtest", privateRegistryURL) - withTagName := fmt.Sprintf("%s:latest", repoName) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", withTagName) - dockerCmd(c, "push", withTagName) - dockerCmd(c, "rmi", withTagName) - - // Try trusted create on untrusted tag - createCmd := exec.Command(dockerBinary, "create", withTagName) - s.trustedCmd(createCmd) - out, _, err := runCommandWithOutput(createCmd) - c.Assert(err, check.Not(check.IsNil)) - c.Assert(string(out), checker.Contains, fmt.Sprintf("does not have trust data for %s", repoName), check.Commentf("Missing expected output on trusted create:\n%s", out)) - -} - -func (s *DockerTrustSuite) TestTrustedIsolatedCreate(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-isolated-create") - - // Try create - createCmd := exec.Command(dockerBinary, "--config", "/tmp/docker-isolated-create", "create", repoName) - s.trustedCmd(createCmd) - out, _, err := runCommandWithOutput(createCmd) - c.Assert(err, check.IsNil) - c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) - - dockerCmd(c, "rmi", repoName) -} - -func (s *DockerTrustSuite) TestCreateWhenCertExpired(c *check.C) { - c.Skip("Currently changes system time, causing instability") - repoName := s.setupTrustedImage(c, "trusted-create-expired") - - // Certificates have 10 years of expiration - elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) - - runAtDifferentDate(elevenYearsFromNow, func() { - // Try create - createCmd := exec.Command(dockerBinary, "create", repoName) - s.trustedCmd(createCmd) - out, _, err := runCommandWithOutput(createCmd) - c.Assert(err, check.Not(check.IsNil)) - c.Assert(string(out), checker.Contains, "could not validate the path to a trusted root", check.Commentf("Missing expected output on trusted create in the distant future:\n%s", out)) - }) - - runAtDifferentDate(elevenYearsFromNow, func() { - // Try create - createCmd := exec.Command(dockerBinary, "create", "--disable-content-trust", repoName) - s.trustedCmd(createCmd) - out, _, err := runCommandWithOutput(createCmd) - c.Assert(err, check.Not(check.IsNil)) - c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted create in the distant future:\n%s", out)) - - }) -} - -func (s *DockerTrustSuite) TestTrustedCreateFromBadTrustServer(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclievilcreate/trusted:latest", privateRegistryURL) - evilLocalConfigDir, err := ioutil.TempDir("", "evilcreate-local-config-dir") - c.Assert(err, check.IsNil) - - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil) - c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) - - dockerCmd(c, "rmi", repoName) - - // Try create - createCmd := exec.Command(dockerBinary, "create", repoName) - s.trustedCmd(createCmd) - out, _, err = runCommandWithOutput(createCmd) - c.Assert(err, check.IsNil) - c.Assert(string(out), checker.Contains, "Tagging", check.Commentf("Missing expected output on trusted push:\n%s", out)) - - dockerCmd(c, "rmi", repoName) - - // Kill the notary server, start a new "evil" one. - s.not.Close() - s.not, err = newTestNotary(c) - c.Assert(err, check.IsNil) - - // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. - // tag an image and upload it to the private registry - dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) - - // Push up to the new server - pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err = runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil) - c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) - - // Now, try creating with the original client from this new trust server. This should fallback to our cached timestamp and metadata. - createCmd = exec.Command(dockerBinary, "create", repoName) - s.trustedCmd(createCmd) - out, _, err = runCommandWithOutput(createCmd) - if err != nil { - c.Fatalf("Error falling back to cached trust data: %s\n%s", err, out) - } - if !strings.Contains(string(out), "Error while downloading remote metadata, using cached timestamp") { - c.Fatalf("Missing expected output on trusted create:\n%s", out) - } - -} - -func (s *DockerSuite) TestCreateStopSignal(c *check.C) { - name := "test_create_stop_signal" - dockerCmd(c, "create", "--name", name, "--stop-signal", "9", "busybox") - - res := inspectFieldJSON(c, name, "Config.StopSignal") - c.Assert(res, checker.Contains, "9") - -} - -func (s *DockerSuite) TestCreateWithWorkdir(c *check.C) { - // TODO Windows. This requires further investigation for porting to - // Windows CI. Currently fails. - if daemonPlatform == "windows" { - c.Skip("Fails on Windows CI") - } - name := "foo" - - prefix, slash := getPrefixAndSlashFromDaemonPlatform() - dir := prefix + slash + "home" + slash + "foo" + slash + "bar" - - dockerCmd(c, "create", "--name", name, "-w", dir, "busybox") - dockerCmd(c, "cp", fmt.Sprintf("%s:%s", name, dir), prefix+slash+"tmp") -} - -func (s *DockerSuite) TestCreateWithInvalidLogOpts(c *check.C) { - name := "test-invalidate-log-opts" - out, _, err := dockerCmdWithError("create", "--name", name, "--log-opt", "invalid=true", "busybox") - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "unknown log opt") - - out, _ = dockerCmd(c, "ps", "-a") - c.Assert(out, checker.Not(checker.Contains), name) -} - -// #20972 -func (s *DockerSuite) TestCreate64ByteHexID(c *check.C) { - out := inspectField(c, "busybox", "Id") - imageID := strings.TrimPrefix(strings.TrimSpace(string(out)), "sha256:") - - dockerCmd(c, "create", imageID) -} diff --git a/integration-cli/docker_cli_daemon_experimental_test.go b/integration-cli/docker_cli_daemon_experimental_test.go deleted file mode 100644 index 5fe96ba3ba..0000000000 --- a/integration-cli/docker_cli_daemon_experimental_test.go +++ /dev/null @@ -1,224 +0,0 @@ -// +build linux, experimental - -package main - -import ( - "os" - "os/exec" - "path/filepath" - "strings" - "syscall" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -var pluginName = "tiborvass/no-remove" - -// TestDaemonRestartWithPluginEnabled tests state restore for an enabled plugin -func (s *DockerDaemonSuite) TestDaemonRestartWithPluginEnabled(c *check.C) { - if err := s.d.Start(); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } - - if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pluginName); err != nil { - c.Fatalf("Could not install plugin: %v %s", err, out) - } - - defer func() { - if out, err := s.d.Cmd("plugin", "disable", pluginName); err != nil { - c.Fatalf("Could not disable plugin: %v %s", err, out) - } - if out, err := s.d.Cmd("plugin", "remove", pluginName); err != nil { - c.Fatalf("Could not remove plugin: %v %s", err, out) - } - }() - - if err := s.d.Restart(); err != nil { - c.Fatalf("Could not restart daemon: %v", err) - } - - out, err := s.d.Cmd("plugin", "ls") - if err != nil { - c.Fatalf("Could not list plugins: %v %s", err, out) - } - c.Assert(out, checker.Contains, pluginName) - c.Assert(out, checker.Contains, "true") -} - -// TestDaemonRestartWithPluginDisabled tests state restore for a disabled plugin -func (s *DockerDaemonSuite) TestDaemonRestartWithPluginDisabled(c *check.C) { - if err := s.d.Start(); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } - - if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pluginName, "--disable"); err != nil { - c.Fatalf("Could not install plugin: %v %s", err, out) - } - - defer func() { - if out, err := s.d.Cmd("plugin", "remove", pluginName); err != nil { - c.Fatalf("Could not remove plugin: %v %s", err, out) - } - }() - - if err := s.d.Restart(); err != nil { - c.Fatalf("Could not restart daemon: %v", err) - } - - out, err := s.d.Cmd("plugin", "ls") - if err != nil { - c.Fatalf("Could not list plugins: %v %s", err, out) - } - c.Assert(out, checker.Contains, pluginName) - c.Assert(out, checker.Contains, "false") -} - -// TestDaemonKillLiveRestoreWithPlugins SIGKILLs daemon started with --live-restore. -// Plugins should continue to run. -func (s *DockerDaemonSuite) TestDaemonKillLiveRestoreWithPlugins(c *check.C) { - if err := s.d.Start("--live-restore"); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } - if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pluginName); err != nil { - c.Fatalf("Could not install plugin: %v %s", err, out) - } - defer func() { - if err := s.d.Restart("--live-restore"); err != nil { - c.Fatalf("Could not restart daemon: %v", err) - } - if out, err := s.d.Cmd("plugin", "disable", pluginName); err != nil { - c.Fatalf("Could not disable plugin: %v %s", err, out) - } - if out, err := s.d.Cmd("plugin", "remove", pluginName); err != nil { - c.Fatalf("Could not remove plugin: %v %s", err, out) - } - }() - - if err := s.d.Kill(); err != nil { - c.Fatalf("Could not kill daemon: %v", err) - } - - cmd := exec.Command("pgrep", "-f", "plugin-no-remove") - if out, ec, err := runCommandWithOutput(cmd); ec != 0 { - c.Fatalf("Expected exit code '0', got %d err: %v output: %s ", ec, err, out) - } -} - -// TestDaemonShutdownLiveRestoreWithPlugins SIGTERMs daemon started with --live-restore. -// Plugins should continue to run. -func (s *DockerDaemonSuite) TestDaemonShutdownLiveRestoreWithPlugins(c *check.C) { - if err := s.d.Start("--live-restore"); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } - if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pluginName); err != nil { - c.Fatalf("Could not install plugin: %v %s", err, out) - } - defer func() { - if err := s.d.Restart("--live-restore"); err != nil { - c.Fatalf("Could not restart daemon: %v", err) - } - if out, err := s.d.Cmd("plugin", "disable", pluginName); err != nil { - c.Fatalf("Could not disable plugin: %v %s", err, out) - } - if out, err := s.d.Cmd("plugin", "remove", pluginName); err != nil { - c.Fatalf("Could not remove plugin: %v %s", err, out) - } - }() - - if err := s.d.cmd.Process.Signal(os.Interrupt); err != nil { - c.Fatalf("Could not kill daemon: %v", err) - } - - cmd := exec.Command("pgrep", "-f", "plugin-no-remove") - if out, ec, err := runCommandWithOutput(cmd); ec != 0 { - c.Fatalf("Expected exit code '0', got %d err: %v output: %s ", ec, err, out) - } -} - -// TestDaemonShutdownWithPlugins shuts down running plugins. -func (s *DockerDaemonSuite) TestDaemonShutdownWithPlugins(c *check.C) { - if err := s.d.Start(); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } - if out, err := s.d.Cmd("plugin", "install", "--grant-all-permissions", pluginName); err != nil { - c.Fatalf("Could not install plugin: %v %s", err, out) - } - - defer func() { - if err := s.d.Restart(); err != nil { - c.Fatalf("Could not restart daemon: %v", err) - } - if out, err := s.d.Cmd("plugin", "disable", pluginName); err != nil { - c.Fatalf("Could not disable plugin: %v %s", err, out) - } - if out, err := s.d.Cmd("plugin", "remove", pluginName); err != nil { - c.Fatalf("Could not remove plugin: %v %s", err, out) - } - }() - - if err := s.d.cmd.Process.Signal(os.Interrupt); err != nil { - c.Fatalf("Could not kill daemon: %v", err) - } - - for { - if err := syscall.Kill(s.d.cmd.Process.Pid, 0); err == syscall.ESRCH { - break - } - } - - cmd := exec.Command("pgrep", "-f", "plugin-no-remove") - if out, ec, err := runCommandWithOutput(cmd); ec != 1 { - c.Fatalf("Expected exit code '1', got %d err: %v output: %s ", ec, err, out) - } -} - -// TestVolumePlugin tests volume creation using a plugin. -func (s *DockerDaemonSuite) TestVolumePlugin(c *check.C) { - volName := "plugin-volume" - volRoot := "/data" - destDir := "/tmp/data/" - destFile := "foo" - - if err := s.d.Start(); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } - out, err := s.d.Cmd("plugin", "install", pluginName, "--grant-all-permissions") - if err != nil { - c.Fatalf("Could not install plugin: %v %s", err, out) - } - defer func() { - if out, err := s.d.Cmd("plugin", "disable", pluginName); err != nil { - c.Fatalf("Could not disable plugin: %v %s", err, out) - } - if out, err := s.d.Cmd("plugin", "remove", pluginName); err != nil { - c.Fatalf("Could not remove plugin: %v %s", err, out) - } - }() - - out, err = s.d.Cmd("volume", "create", "-d", pluginName, "--name", volName) - if err != nil { - c.Fatalf("Could not create volume: %v %s", err, out) - } - defer func() { - if out, err := s.d.Cmd("volume", "remove", volName); err != nil { - c.Fatalf("Could not remove volume: %v %s", err, out) - } - }() - - mountPoint, err := s.d.Cmd("volume", "inspect", volName, "--format", "{{.Mountpoint}}") - if err != nil { - c.Fatalf("Could not inspect volume: %v %s", err, mountPoint) - } - mountPoint = strings.TrimSpace(mountPoint) - - out, err = s.d.Cmd("run", "--rm", "-v", volName+":"+destDir, "busybox", "touch", destDir+destFile) - c.Assert(err, checker.IsNil, check.Commentf(out)) - path := filepath.Join(mountPoint, destFile) - _, err = os.Lstat(path) - c.Assert(err, checker.IsNil) - - // tiborvass/no-remove is a volume plugin that persists data on disk at /data, - // even after the volume is removed. So perform an explicit filesystem cleanup. - os.RemoveAll(volRoot) -} diff --git a/integration-cli/docker_cli_daemon_test.go b/integration-cli/docker_cli_daemon_test.go deleted file mode 100644 index 7508f9fc67..0000000000 --- a/integration-cli/docker_cli_daemon_test.go +++ /dev/null @@ -1,2719 +0,0 @@ -// +build linux - -package main - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "os/exec" - "path" - "path/filepath" - "regexp" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/mount" - "github.com/docker/go-units" - "github.com/docker/libnetwork/iptables" - "github.com/docker/libtrust" - "github.com/go-check/check" - "github.com/kr/pty" -) - -// TestLegacyDaemonCommand test starting docker daemon using "deprecated" docker daemon -// command. Remove this test when we remove this. -func (s *DockerDaemonSuite) TestLegacyDaemonCommand(c *check.C) { - cmd := exec.Command(dockerBinary, "daemon", "--storage-driver=vfs", "--debug") - err := cmd.Start() - c.Assert(err, checker.IsNil, check.Commentf("could not start daemon using 'docker daemon'")) - - c.Assert(cmd.Process.Kill(), checker.IsNil) -} - -func (s *DockerDaemonSuite) TestDaemonRestartWithRunningContainersPorts(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } - - if out, err := s.d.Cmd("run", "-d", "--name", "top1", "-p", "1234:80", "--restart", "always", "busybox:latest", "top"); err != nil { - c.Fatalf("Could not run top1: err=%v\n%s", err, out) - } - // --restart=no by default - if out, err := s.d.Cmd("run", "-d", "--name", "top2", "-p", "80", "busybox:latest", "top"); err != nil { - c.Fatalf("Could not run top2: err=%v\n%s", err, out) - } - - testRun := func(m map[string]bool, prefix string) { - var format string - for cont, shouldRun := range m { - out, err := s.d.Cmd("ps") - if err != nil { - c.Fatalf("Could not run ps: err=%v\n%q", err, out) - } - if shouldRun { - format = "%scontainer %q is not running" - } else { - format = "%scontainer %q is running" - } - if shouldRun != strings.Contains(out, cont) { - c.Fatalf(format, prefix, cont) - } - } - } - - testRun(map[string]bool{"top1": true, "top2": true}, "") - - if err := s.d.Restart(); err != nil { - c.Fatalf("Could not restart daemon: %v", err) - } - testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") -} - -func (s *DockerDaemonSuite) TestDaemonRestartWithVolumesRefs(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatal(err) - } - - if out, err := s.d.Cmd("run", "--name", "volrestarttest1", "-v", "/foo", "busybox"); err != nil { - c.Fatal(err, out) - } - - if err := s.d.Restart(); err != nil { - c.Fatal(err) - } - - if _, err := s.d.Cmd("run", "-d", "--volumes-from", "volrestarttest1", "--name", "volrestarttest2", "busybox", "top"); err != nil { - c.Fatal(err) - } - - if out, err := s.d.Cmd("rm", "-fv", "volrestarttest2"); err != nil { - c.Fatal(err, out) - } - - out, err := s.d.Cmd("inspect", "-f", "{{json .Mounts}}", "volrestarttest1") - c.Assert(err, check.IsNil) - - if _, err := inspectMountPointJSON(out, "/foo"); err != nil { - c.Fatalf("Expected volume to exist: /foo, error: %v\n", err) - } -} - -// #11008 -func (s *DockerDaemonSuite) TestDaemonRestartUnlessStopped(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, check.IsNil) - - out, err := s.d.Cmd("run", "-d", "--name", "top1", "--restart", "always", "busybox:latest", "top") - c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) - - out, err = s.d.Cmd("run", "-d", "--name", "top2", "--restart", "unless-stopped", "busybox:latest", "top") - c.Assert(err, check.IsNil, check.Commentf("run top2: %v", out)) - - testRun := func(m map[string]bool, prefix string) { - var format string - for name, shouldRun := range m { - out, err := s.d.Cmd("ps") - c.Assert(err, check.IsNil, check.Commentf("run ps: %v", out)) - if shouldRun { - format = "%scontainer %q is not running" - } else { - format = "%scontainer %q is running" - } - c.Assert(strings.Contains(out, name), check.Equals, shouldRun, check.Commentf(format, prefix, name)) - } - } - - // both running - testRun(map[string]bool{"top1": true, "top2": true}, "") - - out, err = s.d.Cmd("stop", "top1") - c.Assert(err, check.IsNil, check.Commentf(out)) - - out, err = s.d.Cmd("stop", "top2") - c.Assert(err, check.IsNil, check.Commentf(out)) - - // both stopped - testRun(map[string]bool{"top1": false, "top2": false}, "") - - err = s.d.Restart() - c.Assert(err, check.IsNil) - - // restart=always running - testRun(map[string]bool{"top1": true, "top2": false}, "After daemon restart: ") - - out, err = s.d.Cmd("start", "top2") - c.Assert(err, check.IsNil, check.Commentf("start top2: %v", out)) - - err = s.d.Restart() - c.Assert(err, check.IsNil) - - // both running - testRun(map[string]bool{"top1": true, "top2": true}, "After second daemon restart: ") - -} - -func (s *DockerDaemonSuite) TestDaemonRestartOnFailure(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, check.IsNil) - - out, err := s.d.Cmd("run", "-d", "--name", "test1", "--restart", "on-failure:3", "busybox:latest", "false") - c.Assert(err, check.IsNil, check.Commentf("run top1: %v", out)) - - // wait test1 to stop - hostArgs := []string{"--host", s.d.sock()} - err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 10*time.Second, hostArgs...) - c.Assert(err, checker.IsNil, check.Commentf("test1 should exit but not")) - - // record last start time - out, err = s.d.Cmd("inspect", "-f={{.State.StartedAt}}", "test1") - c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) - lastStartTime := out - - err = s.d.Restart() - c.Assert(err, check.IsNil) - - // test1 shouldn't restart at all - err = waitInspectWithArgs("test1", "{{.State.Running}} {{.State.Restarting}}", "false false", 0, hostArgs...) - c.Assert(err, checker.IsNil, check.Commentf("test1 should exit but not")) - - // make sure test1 isn't restarted when daemon restart - // if "StartAt" time updates, means test1 was once restarted. - out, err = s.d.Cmd("inspect", "-f={{.State.StartedAt}}", "test1") - c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) - c.Assert(out, checker.Equals, lastStartTime, check.Commentf("test1 shouldn't start after daemon restarts")) -} - -func (s *DockerDaemonSuite) TestDaemonStartIptablesFalse(c *check.C) { - if err := s.d.Start("--iptables=false"); err != nil { - c.Fatalf("we should have been able to start the daemon with passing iptables=false: %v", err) - } -} - -// Make sure we cannot shrink base device at daemon restart. -func (s *DockerDaemonSuite) TestDaemonRestartWithInvalidBasesize(c *check.C) { - testRequires(c, Devicemapper) - c.Assert(s.d.Start(), check.IsNil) - - oldBasesizeBytes := s.d.getBaseDeviceSize(c) - var newBasesizeBytes int64 = 1073741824 //1GB in bytes - - if newBasesizeBytes < oldBasesizeBytes { - err := s.d.Restart("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) - c.Assert(err, check.IsNil, check.Commentf("daemon should not have started as new base device size is less than existing base device size: %v", err)) - } - c.Assert(s.d.Stop(), check.IsNil) -} - -// Make sure we can grow base device at daemon restart. -func (s *DockerDaemonSuite) TestDaemonRestartWithIncreasedBasesize(c *check.C) { - testRequires(c, Devicemapper) - c.Assert(s.d.Start(), check.IsNil) - - oldBasesizeBytes := s.d.getBaseDeviceSize(c) - - var newBasesizeBytes int64 = 53687091200 //50GB in bytes - - if newBasesizeBytes < oldBasesizeBytes { - c.Skip(fmt.Sprintf("New base device size (%v) must be greater than (%s)", units.HumanSize(float64(newBasesizeBytes)), units.HumanSize(float64(oldBasesizeBytes)))) - } - - err := s.d.Restart("--storage-opt", fmt.Sprintf("dm.basesize=%d", newBasesizeBytes)) - c.Assert(err, check.IsNil, check.Commentf("we should have been able to start the daemon with increased base device size: %v", err)) - - basesizeAfterRestart := s.d.getBaseDeviceSize(c) - newBasesize, err := convertBasesize(newBasesizeBytes) - c.Assert(err, check.IsNil, check.Commentf("Error in converting base device size: %v", err)) - c.Assert(newBasesize, check.Equals, basesizeAfterRestart, check.Commentf("Basesize passed is not equal to Basesize set")) - c.Assert(s.d.Stop(), check.IsNil) -} - -// Issue #8444: If docker0 bridge is modified (intentionally or unintentionally) and -// no longer has an IP associated, we should gracefully handle that case and associate -// an IP with it rather than fail daemon start -func (s *DockerDaemonSuite) TestDaemonStartBridgeWithoutIPAssociation(c *check.C) { - // rather than depending on brctl commands to verify docker0 is created and up - // let's start the daemon and stop it, and then make a modification to run the - // actual test - if err := s.d.Start(); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } - if err := s.d.Stop(); err != nil { - c.Fatalf("Could not stop daemon: %v", err) - } - - // now we will remove the ip from docker0 and then try starting the daemon - ipCmd := exec.Command("ip", "addr", "flush", "dev", "docker0") - stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) - if err != nil { - c.Fatalf("failed to remove docker0 IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) - } - - if err := s.d.Start(); err != nil { - warning := "**WARNING: Docker bridge network in bad state--delete docker0 bridge interface to fix" - c.Fatalf("Could not start daemon when docker0 has no IP address: %v\n%s", err, warning) - } -} - -func (s *DockerDaemonSuite) TestDaemonIptablesClean(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } - - if out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top"); err != nil { - c.Fatalf("Could not run top: %s, %v", out, err) - } - - // get output from iptables with container running - ipTablesSearchString := "tcp dpt:80" - ipTablesCmd := exec.Command("iptables", "-nvL") - out, _, err := runCommandWithOutput(ipTablesCmd) - if err != nil { - c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) - } - - if !strings.Contains(out, ipTablesSearchString) { - c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) - } - - if err := s.d.Stop(); err != nil { - c.Fatalf("Could not stop daemon: %v", err) - } - - // get output from iptables after restart - ipTablesCmd = exec.Command("iptables", "-nvL") - out, _, err = runCommandWithOutput(ipTablesCmd) - if err != nil { - c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) - } - - if strings.Contains(out, ipTablesSearchString) { - c.Fatalf("iptables output should not have contained %q, but was %q", ipTablesSearchString, out) - } -} - -func (s *DockerDaemonSuite) TestDaemonIptablesCreate(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } - - if out, err := s.d.Cmd("run", "-d", "--name", "top", "--restart=always", "-p", "80", "busybox:latest", "top"); err != nil { - c.Fatalf("Could not run top: %s, %v", out, err) - } - - // get output from iptables with container running - ipTablesSearchString := "tcp dpt:80" - ipTablesCmd := exec.Command("iptables", "-nvL") - out, _, err := runCommandWithOutput(ipTablesCmd) - if err != nil { - c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) - } - - if !strings.Contains(out, ipTablesSearchString) { - c.Fatalf("iptables output should have contained %q, but was %q", ipTablesSearchString, out) - } - - if err := s.d.Restart(); err != nil { - c.Fatalf("Could not restart daemon: %v", err) - } - - // make sure the container is not running - runningOut, err := s.d.Cmd("inspect", "--format='{{.State.Running}}'", "top") - if err != nil { - c.Fatalf("Could not inspect on container: %s, %v", out, err) - } - if strings.TrimSpace(runningOut) != "true" { - c.Fatalf("Container should have been restarted after daemon restart. Status running should have been true but was: %q", strings.TrimSpace(runningOut)) - } - - // get output from iptables after restart - ipTablesCmd = exec.Command("iptables", "-nvL") - out, _, err = runCommandWithOutput(ipTablesCmd) - if err != nil { - c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) - } - - if !strings.Contains(out, ipTablesSearchString) { - c.Fatalf("iptables output after restart should have contained %q, but was %q", ipTablesSearchString, out) - } -} - -// TestDaemonIPv6Enabled checks that when the daemon is started with --ipv6=true that the docker0 bridge -// has the fe80::1 address and that a container is assigned a link-local address -func (s *DockerSuite) TestDaemonIPv6Enabled(c *check.C) { - testRequires(c, IPv6) - - if err := setupV6(); err != nil { - c.Fatal("Could not set up host for IPv6 tests") - } - - d := NewDaemon(c) - - if err := d.StartWithBusybox("--ipv6"); err != nil { - c.Fatal(err) - } - defer d.Stop() - - iface, err := net.InterfaceByName("docker0") - if err != nil { - c.Fatalf("Error getting docker0 interface: %v", err) - } - - addrs, err := iface.Addrs() - if err != nil { - c.Fatalf("Error getting addresses for docker0 interface: %v", err) - } - - var found bool - expected := "fe80::1/64" - - for i := range addrs { - if addrs[i].String() == expected { - found = true - } - } - - if !found { - c.Fatalf("Bridge does not have an IPv6 Address") - } - - if out, err := d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest"); err != nil { - c.Fatalf("Could not run container: %s, %v", out, err) - } - - out, err := d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.LinkLocalIPv6Address}}'", "ipv6test") - out = strings.Trim(out, " \r\n'") - - if err != nil { - c.Fatalf("Error inspecting container: %s, %v", out, err) - } - - if ip := net.ParseIP(out); ip == nil { - c.Fatalf("Container should have a link-local IPv6 address") - } - - out, err = d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}'", "ipv6test") - out = strings.Trim(out, " \r\n'") - - if err != nil { - c.Fatalf("Error inspecting container: %s, %v", out, err) - } - - if ip := net.ParseIP(out); ip != nil { - c.Fatalf("Container should not have a global IPv6 address: %v", out) - } - - if err := teardownV6(); err != nil { - c.Fatal("Could not perform teardown for IPv6 tests") - } - -} - -// TestDaemonIPv6FixedCIDR checks that when the daemon is started with --ipv6=true and a fixed CIDR -// that running containers are given a link-local and global IPv6 address -func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDR(c *check.C) { - // IPv6 setup is messing with local bridge address. - testRequires(c, SameHostDaemon) - err := setupV6() - c.Assert(err, checker.IsNil, check.Commentf("Could not set up host for IPv6 tests")) - - err = s.d.StartWithBusybox("--ipv6", "--fixed-cidr-v6='2001:db8:2::/64'", "--default-gateway-v6='2001:db8:2::100'") - c.Assert(err, checker.IsNil, check.Commentf("Could not start daemon with busybox: %v", err)) - - out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "busybox:latest") - c.Assert(err, checker.IsNil, check.Commentf("Could not run container: %s, %v", out, err)) - - out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}'", "ipv6test") - out = strings.Trim(out, " \r\n'") - - c.Assert(err, checker.IsNil, check.Commentf(out)) - - ip := net.ParseIP(out) - c.Assert(ip, checker.NotNil, check.Commentf("Container should have a global IPv6 address")) - - out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.IPv6Gateway}}'", "ipv6test") - c.Assert(err, checker.IsNil, check.Commentf(out)) - - c.Assert(strings.Trim(out, " \r\n'"), checker.Equals, "2001:db8:2::100", check.Commentf("Container should have a global IPv6 gateway")) - - err = teardownV6() - c.Assert(err, checker.IsNil, check.Commentf("Could not perform teardown for IPv6 tests")) -} - -// TestDaemonIPv6FixedCIDRAndMac checks that when the daemon is started with ipv6 fixed CIDR -// the running containers are given an IPv6 address derived from the MAC address and the ipv6 fixed CIDR -func (s *DockerDaemonSuite) TestDaemonIPv6FixedCIDRAndMac(c *check.C) { - // IPv6 setup is messing with local bridge address. - testRequires(c, SameHostDaemon) - err := setupV6() - c.Assert(err, checker.IsNil) - - err = s.d.StartWithBusybox("--ipv6", "--fixed-cidr-v6='2001:db8:1::/64'") - c.Assert(err, checker.IsNil) - - out, err := s.d.Cmd("run", "-itd", "--name=ipv6test", "--mac-address", "AA:BB:CC:DD:EE:FF", "busybox") - c.Assert(err, checker.IsNil) - - out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.Networks.bridge.GlobalIPv6Address}}'", "ipv6test") - c.Assert(err, checker.IsNil) - c.Assert(strings.Trim(out, " \r\n'"), checker.Equals, "2001:db8:1::aabb:ccdd:eeff") - - err = teardownV6() - c.Assert(err, checker.IsNil) -} - -func (s *DockerDaemonSuite) TestDaemonLogLevelWrong(c *check.C) { - c.Assert(s.d.Start("--log-level=bogus"), check.NotNil, check.Commentf("Daemon shouldn't start with wrong log level")) -} - -func (s *DockerDaemonSuite) TestDaemonLogLevelDebug(c *check.C) { - if err := s.d.Start("--log-level=debug"); err != nil { - c.Fatal(err) - } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) - if !strings.Contains(string(content), `level=debug`) { - c.Fatalf(`Missing level="debug" in log file:\n%s`, string(content)) - } -} - -func (s *DockerDaemonSuite) TestDaemonLogLevelFatal(c *check.C) { - // we creating new daemons to create new logFile - if err := s.d.Start("--log-level=fatal"); err != nil { - c.Fatal(err) - } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) - if strings.Contains(string(content), `level=debug`) { - c.Fatalf(`Should not have level="debug" in log file:\n%s`, string(content)) - } -} - -func (s *DockerDaemonSuite) TestDaemonFlagD(c *check.C) { - if err := s.d.Start("-D"); err != nil { - c.Fatal(err) - } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) - if !strings.Contains(string(content), `level=debug`) { - c.Fatalf(`Should have level="debug" in log file using -D:\n%s`, string(content)) - } -} - -func (s *DockerDaemonSuite) TestDaemonFlagDebug(c *check.C) { - if err := s.d.Start("--debug"); err != nil { - c.Fatal(err) - } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) - if !strings.Contains(string(content), `level=debug`) { - c.Fatalf(`Should have level="debug" in log file using --debug:\n%s`, string(content)) - } -} - -func (s *DockerDaemonSuite) TestDaemonFlagDebugLogLevelFatal(c *check.C) { - if err := s.d.Start("--debug", "--log-level=fatal"); err != nil { - c.Fatal(err) - } - content, _ := ioutil.ReadFile(s.d.logFile.Name()) - if !strings.Contains(string(content), `level=debug`) { - c.Fatalf(`Should have level="debug" in log file when using both --debug and --log-level=fatal:\n%s`, string(content)) - } -} - -func (s *DockerDaemonSuite) TestDaemonAllocatesListeningPort(c *check.C) { - listeningPorts := [][]string{ - {"0.0.0.0", "0.0.0.0", "5678"}, - {"127.0.0.1", "127.0.0.1", "1234"}, - {"localhost", "127.0.0.1", "1235"}, - } - - cmdArgs := make([]string, 0, len(listeningPorts)*2) - for _, hostDirective := range listeningPorts { - cmdArgs = append(cmdArgs, "--host", fmt.Sprintf("tcp://%s:%s", hostDirective[0], hostDirective[2])) - } - - if err := s.d.StartWithBusybox(cmdArgs...); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } - - for _, hostDirective := range listeningPorts { - output, err := s.d.Cmd("run", "-p", fmt.Sprintf("%s:%s:80", hostDirective[1], hostDirective[2]), "busybox", "true") - if err == nil { - c.Fatalf("Container should not start, expected port already allocated error: %q", output) - } else if !strings.Contains(output, "port is already allocated") { - c.Fatalf("Expected port is already allocated error: %q", output) - } - } -} - -func (s *DockerDaemonSuite) TestDaemonKeyGeneration(c *check.C) { - // TODO: skip or update for Windows daemon - os.Remove("/etc/docker/key.json") - if err := s.d.Start(); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } - s.d.Stop() - - k, err := libtrust.LoadKeyFile("/etc/docker/key.json") - if err != nil { - c.Fatalf("Error opening key file") - } - kid := k.KeyID() - // Test Key ID is a valid fingerprint (e.g. QQXN:JY5W:TBXI:MK3X:GX6P:PD5D:F56N:NHCS:LVRZ:JA46:R24J:XEFF) - if len(kid) != 59 { - c.Fatalf("Bad key ID: %s", kid) - } -} - -func (s *DockerDaemonSuite) TestDaemonKeyMigration(c *check.C) { - // TODO: skip or update for Windows daemon - os.Remove("/etc/docker/key.json") - k1, err := libtrust.GenerateECP256PrivateKey() - if err != nil { - c.Fatalf("Error generating private key: %s", err) - } - if err := os.MkdirAll(filepath.Join(os.Getenv("HOME"), ".docker"), 0755); err != nil { - c.Fatalf("Error creating .docker directory: %s", err) - } - if err := libtrust.SaveKey(filepath.Join(os.Getenv("HOME"), ".docker", "key.json"), k1); err != nil { - c.Fatalf("Error saving private key: %s", err) - } - - if err := s.d.Start(); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } - s.d.Stop() - - k2, err := libtrust.LoadKeyFile("/etc/docker/key.json") - if err != nil { - c.Fatalf("Error opening key file") - } - if k1.KeyID() != k2.KeyID() { - c.Fatalf("Key not migrated") - } -} - -// GH#11320 - verify that the daemon exits on failure properly -// Note that this explicitly tests the conflict of {-b,--bridge} and {--bip} options as the means -// to get a daemon init failure; no other tests for -b/--bip conflict are therefore required -func (s *DockerDaemonSuite) TestDaemonExitOnFailure(c *check.C) { - //attempt to start daemon with incorrect flags (we know -b and --bip conflict) - if err := s.d.Start("--bridge", "nosuchbridge", "--bip", "1.1.1.1"); err != nil { - //verify we got the right error - if !strings.Contains(err.Error(), "Daemon exited") { - c.Fatalf("Expected daemon not to start, got %v", err) - } - // look in the log and make sure we got the message that daemon is shutting down - runCmd := exec.Command("grep", "Error starting daemon", s.d.LogFileName()) - if out, _, err := runCommandWithOutput(runCmd); err != nil { - c.Fatalf("Expected 'Error starting daemon' message; but doesn't exist in log: %q, err: %v", out, err) - } - } else { - //if we didn't get an error and the daemon is running, this is a failure - c.Fatal("Conflicting options should cause the daemon to error out with a failure") - } -} - -func (s *DockerDaemonSuite) TestDaemonBridgeExternal(c *check.C) { - d := s.d - err := d.Start("--bridge", "nosuchbridge") - c.Assert(err, check.NotNil, check.Commentf("--bridge option with an invalid bridge should cause the daemon to fail")) - defer d.Restart() - - bridgeName := "external-bridge" - bridgeIP := "192.169.1.1/24" - _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) - - out, err := createInterface(c, "bridge", bridgeName, bridgeIP) - c.Assert(err, check.IsNil, check.Commentf(out)) - defer deleteInterface(c, bridgeName) - - err = d.StartWithBusybox("--bridge", bridgeName) - c.Assert(err, check.IsNil) - - ipTablesSearchString := bridgeIPNet.String() - ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") - out, _, err = runCommandWithOutput(ipTablesCmd) - c.Assert(err, check.IsNil) - - c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true, - check.Commentf("iptables output should have contained %q, but was %q", - ipTablesSearchString, out)) - - _, err = d.Cmd("run", "-d", "--name", "ExtContainer", "busybox", "top") - c.Assert(err, check.IsNil) - - containerIP := d.findContainerIP("ExtContainer") - ip := net.ParseIP(containerIP) - c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, - check.Commentf("Container IP-Address must be in the same subnet range : %s", - containerIP)) -} - -func createInterface(c *check.C, ifType string, ifName string, ipNet string) (string, error) { - args := []string{"link", "add", "name", ifName, "type", ifType} - ipLinkCmd := exec.Command("ip", args...) - out, _, err := runCommandWithOutput(ipLinkCmd) - if err != nil { - return out, err - } - - ifCfgCmd := exec.Command("ifconfig", ifName, ipNet, "up") - out, _, err = runCommandWithOutput(ifCfgCmd) - return out, err -} - -func deleteInterface(c *check.C, ifName string) { - ifCmd := exec.Command("ip", "link", "delete", ifName) - out, _, err := runCommandWithOutput(ifCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - - flushCmd := exec.Command("iptables", "-t", "nat", "--flush") - out, _, err = runCommandWithOutput(flushCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - - flushCmd = exec.Command("iptables", "--flush") - out, _, err = runCommandWithOutput(flushCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) -} - -func (s *DockerDaemonSuite) TestDaemonBridgeIP(c *check.C) { - // TestDaemonBridgeIP Steps - // 1. Delete the existing docker0 Bridge - // 2. Set --bip daemon configuration and start the new Docker Daemon - // 3. Check if the bip config has taken effect using ifconfig and iptables commands - // 4. Launch a Container and make sure the IP-Address is in the expected subnet - // 5. Delete the docker0 Bridge - // 6. Restart the Docker Daemon (via deferred action) - // This Restart takes care of bringing docker0 interface back to auto-assigned IP - - defaultNetworkBridge := "docker0" - deleteInterface(c, defaultNetworkBridge) - - d := s.d - - bridgeIP := "192.169.1.1/24" - ip, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) - - err := d.StartWithBusybox("--bip", bridgeIP) - c.Assert(err, check.IsNil) - defer d.Restart() - - ifconfigSearchString := ip.String() - ifconfigCmd := exec.Command("ifconfig", defaultNetworkBridge) - out, _, _, err := runCommandWithStdoutStderr(ifconfigCmd) - c.Assert(err, check.IsNil) - - c.Assert(strings.Contains(out, ifconfigSearchString), check.Equals, true, - check.Commentf("ifconfig output should have contained %q, but was %q", - ifconfigSearchString, out)) - - ipTablesSearchString := bridgeIPNet.String() - ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") - out, _, err = runCommandWithOutput(ipTablesCmd) - c.Assert(err, check.IsNil) - - c.Assert(strings.Contains(out, ipTablesSearchString), check.Equals, true, - check.Commentf("iptables output should have contained %q, but was %q", - ipTablesSearchString, out)) - - out, err = d.Cmd("run", "-d", "--name", "test", "busybox", "top") - c.Assert(err, check.IsNil) - - containerIP := d.findContainerIP("test") - ip = net.ParseIP(containerIP) - c.Assert(bridgeIPNet.Contains(ip), check.Equals, true, - check.Commentf("Container IP-Address must be in the same subnet range : %s", - containerIP)) - deleteInterface(c, defaultNetworkBridge) -} - -func (s *DockerDaemonSuite) TestDaemonRestartWithBridgeIPChange(c *check.C) { - if err := s.d.Start(); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } - defer s.d.Restart() - if err := s.d.Stop(); err != nil { - c.Fatalf("Could not stop daemon: %v", err) - } - - // now we will change the docker0's IP and then try starting the daemon - bridgeIP := "192.169.100.1/24" - _, bridgeIPNet, _ := net.ParseCIDR(bridgeIP) - - ipCmd := exec.Command("ifconfig", "docker0", bridgeIP) - stdout, stderr, _, err := runCommandWithStdoutStderr(ipCmd) - if err != nil { - c.Fatalf("failed to change docker0's IP association: %v, stdout: %q, stderr: %q", err, stdout, stderr) - } - - if err := s.d.Start("--bip", bridgeIP); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } - - //check if the iptables contains new bridgeIP MASQUERADE rule - ipTablesSearchString := bridgeIPNet.String() - ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") - out, _, err := runCommandWithOutput(ipTablesCmd) - if err != nil { - c.Fatalf("Could not run iptables -nvL: %s, %v", out, err) - } - if !strings.Contains(out, ipTablesSearchString) { - c.Fatalf("iptables output should have contained new MASQUERADE rule with IP %q, but was %q", ipTablesSearchString, out) - } -} - -func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr(c *check.C) { - d := s.d - - bridgeName := "external-bridge" - bridgeIP := "192.169.1.1/24" - - out, err := createInterface(c, "bridge", bridgeName, bridgeIP) - c.Assert(err, check.IsNil, check.Commentf(out)) - defer deleteInterface(c, bridgeName) - - args := []string{"--bridge", bridgeName, "--fixed-cidr", "192.169.1.0/30"} - err = d.StartWithBusybox(args...) - c.Assert(err, check.IsNil) - defer d.Restart() - - for i := 0; i < 4; i++ { - cName := "Container" + strconv.Itoa(i) - out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") - if err != nil { - c.Assert(strings.Contains(out, "no available IPv4 addresses"), check.Equals, true, - check.Commentf("Could not run a Container : %s %s", err.Error(), out)) - } - } -} - -func (s *DockerDaemonSuite) TestDaemonBridgeFixedCidr2(c *check.C) { - d := s.d - - bridgeName := "external-bridge" - bridgeIP := "10.2.2.1/16" - - out, err := createInterface(c, "bridge", bridgeName, bridgeIP) - c.Assert(err, check.IsNil, check.Commentf(out)) - defer deleteInterface(c, bridgeName) - - err = d.StartWithBusybox("--bip", bridgeIP, "--fixed-cidr", "10.2.2.0/24") - c.Assert(err, check.IsNil) - defer s.d.Restart() - - out, err = d.Cmd("run", "-d", "--name", "bb", "busybox", "top") - c.Assert(err, checker.IsNil, check.Commentf(out)) - defer d.Cmd("stop", "bb") - - out, err = d.Cmd("exec", "bb", "/bin/sh", "-c", "ifconfig eth0 | awk '/inet addr/{print substr($2,6)}'") - c.Assert(out, checker.Equals, "10.2.2.0\n") - - out, err = d.Cmd("run", "--rm", "busybox", "/bin/sh", "-c", "ifconfig eth0 | awk '/inet addr/{print substr($2,6)}'") - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(out, checker.Equals, "10.2.2.2\n") -} - -func (s *DockerDaemonSuite) TestDaemonBridgeFixedCIDREqualBridgeNetwork(c *check.C) { - d := s.d - - bridgeName := "external-bridge" - bridgeIP := "172.27.42.1/16" - - out, err := createInterface(c, "bridge", bridgeName, bridgeIP) - c.Assert(err, check.IsNil, check.Commentf(out)) - defer deleteInterface(c, bridgeName) - - err = d.StartWithBusybox("--bridge", bridgeName, "--fixed-cidr", bridgeIP) - c.Assert(err, check.IsNil) - defer s.d.Restart() - - out, err = d.Cmd("run", "-d", "busybox", "top") - c.Assert(err, check.IsNil, check.Commentf(out)) - cid1 := strings.TrimSpace(out) - defer d.Cmd("stop", cid1) -} - -func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Implicit(c *check.C) { - defaultNetworkBridge := "docker0" - deleteInterface(c, defaultNetworkBridge) - - d := s.d - - bridgeIP := "192.169.1.1" - bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) - - err := d.StartWithBusybox("--bip", bridgeIPNet) - c.Assert(err, check.IsNil) - defer d.Restart() - - expectedMessage := fmt.Sprintf("default via %s dev", bridgeIP) - out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") - c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, - check.Commentf("Implicit default gateway should be bridge IP %s, but default route was '%s'", - bridgeIP, strings.TrimSpace(out))) - deleteInterface(c, defaultNetworkBridge) -} - -func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4Explicit(c *check.C) { - defaultNetworkBridge := "docker0" - deleteInterface(c, defaultNetworkBridge) - - d := s.d - - bridgeIP := "192.169.1.1" - bridgeIPNet := fmt.Sprintf("%s/24", bridgeIP) - gatewayIP := "192.169.1.254" - - err := d.StartWithBusybox("--bip", bridgeIPNet, "--default-gateway", gatewayIP) - c.Assert(err, check.IsNil) - defer d.Restart() - - expectedMessage := fmt.Sprintf("default via %s dev", gatewayIP) - out, err := d.Cmd("run", "busybox", "ip", "-4", "route", "list", "0/0") - c.Assert(strings.Contains(out, expectedMessage), check.Equals, true, - check.Commentf("Explicit default gateway should be %s, but default route was '%s'", - gatewayIP, strings.TrimSpace(out))) - deleteInterface(c, defaultNetworkBridge) -} - -func (s *DockerDaemonSuite) TestDaemonDefaultGatewayIPv4ExplicitOutsideContainerSubnet(c *check.C) { - defaultNetworkBridge := "docker0" - deleteInterface(c, defaultNetworkBridge) - - // Program a custom default gateway outside of the container subnet, daemon should accept it and start - err := s.d.StartWithBusybox("--bip", "172.16.0.10/16", "--fixed-cidr", "172.16.1.0/24", "--default-gateway", "172.16.0.254") - c.Assert(err, check.IsNil) - - deleteInterface(c, defaultNetworkBridge) - s.d.Restart() -} - -func (s *DockerDaemonSuite) TestDaemonDefaultNetworkInvalidClusterConfig(c *check.C) { - testRequires(c, DaemonIsLinux, SameHostDaemon) - - // Start daemon without docker0 bridge - defaultNetworkBridge := "docker0" - deleteInterface(c, defaultNetworkBridge) - - d := NewDaemon(c) - discoveryBackend := "consul://consuladdr:consulport/some/path" - err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend)) - c.Assert(err, checker.IsNil) - - // Start daemon with docker0 bridge - ifconfigCmd := exec.Command("ifconfig", defaultNetworkBridge) - _, err = runCommand(ifconfigCmd) - c.Assert(err, check.IsNil) - - err = d.Restart(fmt.Sprintf("--cluster-store=%s", discoveryBackend)) - c.Assert(err, checker.IsNil) - - d.Stop() -} - -func (s *DockerDaemonSuite) TestDaemonIP(c *check.C) { - d := s.d - - ipStr := "192.170.1.1/24" - ip, _, _ := net.ParseCIDR(ipStr) - args := []string{"--ip", ip.String()} - err := d.StartWithBusybox(args...) - c.Assert(err, check.IsNil) - defer d.Restart() - - out, err := d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") - c.Assert(err, check.NotNil, - check.Commentf("Running a container must fail with an invalid --ip option")) - c.Assert(strings.Contains(out, "Error starting userland proxy"), check.Equals, true) - - ifName := "dummy" - out, err = createInterface(c, "dummy", ifName, ipStr) - c.Assert(err, check.IsNil, check.Commentf(out)) - defer deleteInterface(c, ifName) - - _, err = d.Cmd("run", "-d", "-p", "8000:8000", "busybox", "top") - c.Assert(err, check.IsNil) - - ipTablesCmd := exec.Command("iptables", "-t", "nat", "-nvL") - out, _, err = runCommandWithOutput(ipTablesCmd) - c.Assert(err, check.IsNil) - - regex := fmt.Sprintf("DNAT.*%s.*dpt:8000", ip.String()) - matched, _ := regexp.MatchString(regex, out) - c.Assert(matched, check.Equals, true, - check.Commentf("iptables output should have contained %q, but was %q", regex, out)) -} - -func (s *DockerDaemonSuite) TestDaemonICCPing(c *check.C) { - testRequires(c, bridgeNfIptables) - d := s.d - - bridgeName := "external-bridge" - bridgeIP := "192.169.1.1/24" - - out, err := createInterface(c, "bridge", bridgeName, bridgeIP) - c.Assert(err, check.IsNil, check.Commentf(out)) - defer deleteInterface(c, bridgeName) - - args := []string{"--bridge", bridgeName, "--icc=false"} - err = d.StartWithBusybox(args...) - c.Assert(err, check.IsNil) - defer d.Restart() - - ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD") - out, _, err = runCommandWithOutput(ipTablesCmd) - c.Assert(err, check.IsNil) - - regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) - matched, _ := regexp.MatchString(regex, out) - c.Assert(matched, check.Equals, true, - check.Commentf("iptables output should have contained %q, but was %q", regex, out)) - - // Pinging another container must fail with --icc=false - pingContainers(c, d, true) - - ipStr := "192.171.1.1/24" - ip, _, _ := net.ParseCIDR(ipStr) - ifName := "icc-dummy" - - createInterface(c, "dummy", ifName, ipStr) - - // But, Pinging external or a Host interface must succeed - pingCmd := fmt.Sprintf("ping -c 1 %s -W 1", ip.String()) - runArgs := []string{"--rm", "busybox", "sh", "-c", pingCmd} - _, err = d.Cmd("run", runArgs...) - c.Assert(err, check.IsNil) -} - -func (s *DockerDaemonSuite) TestDaemonICCLinkExpose(c *check.C) { - d := s.d - - bridgeName := "external-bridge" - bridgeIP := "192.169.1.1/24" - - out, err := createInterface(c, "bridge", bridgeName, bridgeIP) - c.Assert(err, check.IsNil, check.Commentf(out)) - defer deleteInterface(c, bridgeName) - - args := []string{"--bridge", bridgeName, "--icc=false"} - err = d.StartWithBusybox(args...) - c.Assert(err, check.IsNil) - defer d.Restart() - - ipTablesCmd := exec.Command("iptables", "-nvL", "FORWARD") - out, _, err = runCommandWithOutput(ipTablesCmd) - c.Assert(err, check.IsNil) - - regex := fmt.Sprintf("DROP.*all.*%s.*%s", bridgeName, bridgeName) - matched, _ := regexp.MatchString(regex, out) - c.Assert(matched, check.Equals, true, - check.Commentf("iptables output should have contained %q, but was %q", regex, out)) - - out, err = d.Cmd("run", "-d", "--expose", "4567", "--name", "icc1", "busybox", "nc", "-l", "-p", "4567") - c.Assert(err, check.IsNil, check.Commentf(out)) - - out, err = d.Cmd("run", "--link", "icc1:icc1", "busybox", "nc", "icc1", "4567") - c.Assert(err, check.IsNil, check.Commentf(out)) -} - -func (s *DockerDaemonSuite) TestDaemonLinksIpTablesRulesWhenLinkAndUnlink(c *check.C) { - bridgeName := "external-bridge" - bridgeIP := "192.169.1.1/24" - - out, err := createInterface(c, "bridge", bridgeName, bridgeIP) - c.Assert(err, check.IsNil, check.Commentf(out)) - defer deleteInterface(c, bridgeName) - - err = s.d.StartWithBusybox("--bridge", bridgeName, "--icc=false") - c.Assert(err, check.IsNil) - defer s.d.Restart() - - _, err = s.d.Cmd("run", "-d", "--name", "child", "--publish", "8080:80", "busybox", "top") - c.Assert(err, check.IsNil) - _, err = s.d.Cmd("run", "-d", "--name", "parent", "--link", "child:http", "busybox", "top") - c.Assert(err, check.IsNil) - - childIP := s.d.findContainerIP("child") - parentIP := s.d.findContainerIP("parent") - - sourceRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", childIP, "--sport", "80", "-d", parentIP, "-j", "ACCEPT"} - destinationRule := []string{"-i", bridgeName, "-o", bridgeName, "-p", "tcp", "-s", parentIP, "--dport", "80", "-d", childIP, "-j", "ACCEPT"} - if !iptables.Exists("filter", "DOCKER", sourceRule...) || !iptables.Exists("filter", "DOCKER", destinationRule...) { - c.Fatal("Iptables rules not found") - } - - s.d.Cmd("rm", "--link", "parent/http") - if iptables.Exists("filter", "DOCKER", sourceRule...) || iptables.Exists("filter", "DOCKER", destinationRule...) { - c.Fatal("Iptables rules should be removed when unlink") - } - - s.d.Cmd("kill", "child") - s.d.Cmd("kill", "parent") -} - -func (s *DockerDaemonSuite) TestDaemonUlimitDefaults(c *check.C) { - testRequires(c, DaemonIsLinux) - - if err := s.d.StartWithBusybox("--default-ulimit", "nofile=42:42", "--default-ulimit", "nproc=1024:1024"); err != nil { - c.Fatal(err) - } - - out, err := s.d.Cmd("run", "--ulimit", "nproc=2048", "--name=test", "busybox", "/bin/sh", "-c", "echo $(ulimit -n); echo $(ulimit -p)") - if err != nil { - c.Fatal(out, err) - } - - outArr := strings.Split(out, "\n") - if len(outArr) < 2 { - c.Fatalf("got unexpected output: %s", out) - } - nofile := strings.TrimSpace(outArr[0]) - nproc := strings.TrimSpace(outArr[1]) - - if nofile != "42" { - c.Fatalf("expected `ulimit -n` to be `42`, got: %s", nofile) - } - if nproc != "2048" { - c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) - } - - // Now restart daemon with a new default - if err := s.d.Restart("--default-ulimit", "nofile=43"); err != nil { - c.Fatal(err) - } - - out, err = s.d.Cmd("start", "-a", "test") - if err != nil { - c.Fatal(err) - } - - outArr = strings.Split(out, "\n") - if len(outArr) < 2 { - c.Fatalf("got unexpected output: %s", out) - } - nofile = strings.TrimSpace(outArr[0]) - nproc = strings.TrimSpace(outArr[1]) - - if nofile != "43" { - c.Fatalf("expected `ulimit -n` to be `43`, got: %s", nofile) - } - if nproc != "2048" { - c.Fatalf("exepcted `ulimit -p` to be 2048, got: %s", nproc) - } -} - -// #11315 -func (s *DockerDaemonSuite) TestDaemonRestartRenameContainer(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatal(err) - } - - if out, err := s.d.Cmd("run", "--name=test", "busybox"); err != nil { - c.Fatal(err, out) - } - - if out, err := s.d.Cmd("rename", "test", "test2"); err != nil { - c.Fatal(err, out) - } - - if err := s.d.Restart(); err != nil { - c.Fatal(err) - } - - if out, err := s.d.Cmd("start", "test2"); err != nil { - c.Fatal(err, out) - } -} - -func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefault(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatal(err) - } - - out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") - c.Assert(err, check.IsNil, check.Commentf(out)) - id, err := s.d.getIDByName("test") - c.Assert(err, check.IsNil) - - logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") - - if _, err := os.Stat(logPath); err != nil { - c.Fatal(err) - } - f, err := os.Open(logPath) - if err != nil { - c.Fatal(err) - } - var res struct { - Log string `json:"log"` - Stream string `json:"stream"` - Time time.Time `json:"time"` - } - if err := json.NewDecoder(f).Decode(&res); err != nil { - c.Fatal(err) - } - if res.Log != "testline\n" { - c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") - } - if res.Stream != "stdout" { - c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") - } - if !time.Now().After(res.Time) { - c.Fatalf("Log time %v in future", res.Time) - } -} - -func (s *DockerDaemonSuite) TestDaemonLoggingDriverDefaultOverride(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatal(err) - } - - out, err := s.d.Cmd("run", "--name=test", "--log-driver=none", "busybox", "echo", "testline") - if err != nil { - c.Fatal(out, err) - } - id, err := s.d.getIDByName("test") - c.Assert(err, check.IsNil) - - logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") - - if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { - c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) - } -} - -func (s *DockerDaemonSuite) TestDaemonLoggingDriverNone(c *check.C) { - if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { - c.Fatal(err) - } - - out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") - if err != nil { - c.Fatal(out, err) - } - id, err := s.d.getIDByName("test") - c.Assert(err, check.IsNil) - - logPath := filepath.Join(s.d.folder, "graph", "containers", id, id+"-json.log") - - if _, err := os.Stat(logPath); err == nil || !os.IsNotExist(err) { - c.Fatalf("%s shouldn't exits, error on Stat: %s", logPath, err) - } -} - -func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneOverride(c *check.C) { - if err := s.d.StartWithBusybox("--log-driver=none"); err != nil { - c.Fatal(err) - } - - out, err := s.d.Cmd("run", "--name=test", "--log-driver=json-file", "busybox", "echo", "testline") - if err != nil { - c.Fatal(out, err) - } - id, err := s.d.getIDByName("test") - c.Assert(err, check.IsNil) - - logPath := filepath.Join(s.d.root, "containers", id, id+"-json.log") - - if _, err := os.Stat(logPath); err != nil { - c.Fatal(err) - } - f, err := os.Open(logPath) - if err != nil { - c.Fatal(err) - } - var res struct { - Log string `json:"log"` - Stream string `json:"stream"` - Time time.Time `json:"time"` - } - if err := json.NewDecoder(f).Decode(&res); err != nil { - c.Fatal(err) - } - if res.Log != "testline\n" { - c.Fatalf("Unexpected log line: %q, expected: %q", res.Log, "testline\n") - } - if res.Stream != "stdout" { - c.Fatalf("Unexpected stream: %q, expected: %q", res.Stream, "stdout") - } - if !time.Now().After(res.Time) { - c.Fatalf("Log time %v in future", res.Time) - } -} - -func (s *DockerDaemonSuite) TestDaemonLoggingDriverNoneLogsError(c *check.C) { - c.Assert(s.d.StartWithBusybox("--log-driver=none"), checker.IsNil) - - out, err := s.d.Cmd("run", "--name=test", "busybox", "echo", "testline") - c.Assert(err, checker.IsNil, check.Commentf(out)) - - out, err = s.d.Cmd("logs", "test") - c.Assert(err, check.NotNil, check.Commentf("Logs should fail with 'none' driver")) - expected := `"logs" command is supported only for "json-file" and "journald" logging drivers (got: none)` - c.Assert(out, checker.Contains, expected) -} - -func (s *DockerDaemonSuite) TestDaemonDots(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatal(err) - } - - // Now create 4 containers - if _, err := s.d.Cmd("create", "busybox"); err != nil { - c.Fatalf("Error creating container: %q", err) - } - if _, err := s.d.Cmd("create", "busybox"); err != nil { - c.Fatalf("Error creating container: %q", err) - } - if _, err := s.d.Cmd("create", "busybox"); err != nil { - c.Fatalf("Error creating container: %q", err) - } - if _, err := s.d.Cmd("create", "busybox"); err != nil { - c.Fatalf("Error creating container: %q", err) - } - - s.d.Stop() - - s.d.Start("--log-level=debug") - s.d.Stop() - content, _ := ioutil.ReadFile(s.d.logFile.Name()) - if strings.Contains(string(content), "....") { - c.Fatalf("Debug level should not have ....\n%s", string(content)) - } - - s.d.Start("--log-level=error") - s.d.Stop() - content, _ = ioutil.ReadFile(s.d.logFile.Name()) - if strings.Contains(string(content), "....") { - c.Fatalf("Error level should not have ....\n%s", string(content)) - } - - s.d.Start("--log-level=info") - s.d.Stop() - content, _ = ioutil.ReadFile(s.d.logFile.Name()) - if !strings.Contains(string(content), "....") { - c.Fatalf("Info level should have ....\n%s", string(content)) - } -} - -func (s *DockerDaemonSuite) TestDaemonUnixSockCleanedUp(c *check.C) { - dir, err := ioutil.TempDir("", "socket-cleanup-test") - if err != nil { - c.Fatal(err) - } - defer os.RemoveAll(dir) - - sockPath := filepath.Join(dir, "docker.sock") - if err := s.d.Start("--host", "unix://"+sockPath); err != nil { - c.Fatal(err) - } - - if _, err := os.Stat(sockPath); err != nil { - c.Fatal("socket does not exist") - } - - if err := s.d.Stop(); err != nil { - c.Fatal(err) - } - - if _, err := os.Stat(sockPath); err == nil || !os.IsNotExist(err) { - c.Fatal("unix socket is not cleaned up") - } -} - -func (s *DockerDaemonSuite) TestDaemonWithWrongkey(c *check.C) { - type Config struct { - Crv string `json:"crv"` - D string `json:"d"` - Kid string `json:"kid"` - Kty string `json:"kty"` - X string `json:"x"` - Y string `json:"y"` - } - - os.Remove("/etc/docker/key.json") - if err := s.d.Start(); err != nil { - c.Fatalf("Failed to start daemon: %v", err) - } - - if err := s.d.Stop(); err != nil { - c.Fatalf("Could not stop daemon: %v", err) - } - - config := &Config{} - bytes, err := ioutil.ReadFile("/etc/docker/key.json") - if err != nil { - c.Fatalf("Error reading key.json file: %s", err) - } - - // byte[] to Data-Struct - if err := json.Unmarshal(bytes, &config); err != nil { - c.Fatalf("Error Unmarshal: %s", err) - } - - //replace config.Kid with the fake value - config.Kid = "VSAJ:FUYR:X3H2:B2VZ:KZ6U:CJD5:K7BX:ZXHY:UZXT:P4FT:MJWG:HRJ4" - - // NEW Data-Struct to byte[] - newBytes, err := json.Marshal(&config) - if err != nil { - c.Fatalf("Error Marshal: %s", err) - } - - // write back - if err := ioutil.WriteFile("/etc/docker/key.json", newBytes, 0400); err != nil { - c.Fatalf("Error ioutil.WriteFile: %s", err) - } - - defer os.Remove("/etc/docker/key.json") - - if err := s.d.Start(); err == nil { - c.Fatalf("It should not be successful to start daemon with wrong key: %v", err) - } - - content, _ := ioutil.ReadFile(s.d.logFile.Name()) - - if !strings.Contains(string(content), "Public Key ID does not match") { - c.Fatal("Missing KeyID message from daemon logs") - } -} - -func (s *DockerDaemonSuite) TestDaemonRestartKillWait(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } - - out, err := s.d.Cmd("run", "-id", "busybox", "/bin/cat") - if err != nil { - c.Fatalf("Could not run /bin/cat: err=%v\n%s", err, out) - } - containerID := strings.TrimSpace(out) - - if out, err := s.d.Cmd("kill", containerID); err != nil { - c.Fatalf("Could not kill %s: err=%v\n%s", containerID, err, out) - } - - if err := s.d.Restart(); err != nil { - c.Fatalf("Could not restart daemon: %v", err) - } - - errchan := make(chan error) - go func() { - if out, err := s.d.Cmd("wait", containerID); err != nil { - errchan <- fmt.Errorf("%v:\n%s", err, out) - } - close(errchan) - }() - - select { - case <-time.After(5 * time.Second): - c.Fatal("Waiting on a stopped (killed) container timed out") - case err := <-errchan: - if err != nil { - c.Fatal(err) - } - } -} - -// TestHttpsInfo connects via two-way authenticated HTTPS to the info endpoint -func (s *DockerDaemonSuite) TestHttpsInfo(c *check.C) { - const ( - testDaemonHTTPSAddr = "tcp://localhost:4271" - ) - - if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", - "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } - - daemonArgs := []string{"--host", testDaemonHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-cert.pem", "--tlskey", "fixtures/https/client-key.pem"} - out, err := s.d.CmdWithArgs(daemonArgs, "info") - if err != nil { - c.Fatalf("Error Occurred: %s and output: %s", err, out) - } -} - -// TestHttpsRun connects via two-way authenticated HTTPS to the create, attach, start, and wait endpoints. -// https://github.com/docker/docker/issues/19280 -func (s *DockerDaemonSuite) TestHttpsRun(c *check.C) { - const ( - testDaemonHTTPSAddr = "tcp://localhost:4271" - ) - - if err := s.d.StartWithBusybox("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", - "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } - - daemonArgs := []string{"--host", testDaemonHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-cert.pem", "--tlskey", "fixtures/https/client-key.pem"} - out, err := s.d.CmdWithArgs(daemonArgs, "run", "busybox", "echo", "TLS response") - if err != nil { - c.Fatalf("Error Occurred: %s and output: %s", err, out) - } - - if !strings.Contains(out, "TLS response") { - c.Fatalf("expected output to include `TLS response`, got %v", out) - } -} - -// TestTlsVerify verifies that --tlsverify=false turns on tls -func (s *DockerDaemonSuite) TestTlsVerify(c *check.C) { - out, err := exec.Command(dockerdBinary, "--tlsverify=false").CombinedOutput() - if err == nil || !strings.Contains(string(out), "Could not load X509 key pair") { - c.Fatalf("Daemon should not have started due to missing certs: %v\n%s", err, string(out)) - } -} - -// TestHttpsInfoRogueCert connects via two-way authenticated HTTPS to the info endpoint -// by using a rogue client certificate and checks that it fails with the expected error. -func (s *DockerDaemonSuite) TestHttpsInfoRogueCert(c *check.C) { - const ( - errBadCertificate = "bad certificate" - testDaemonHTTPSAddr = "tcp://localhost:4271" - ) - - if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-cert.pem", - "--tlskey", "fixtures/https/server-key.pem", "-H", testDaemonHTTPSAddr); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } - - daemonArgs := []string{"--host", testDaemonHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-rogue-cert.pem", "--tlskey", "fixtures/https/client-rogue-key.pem"} - out, err := s.d.CmdWithArgs(daemonArgs, "info") - if err == nil || !strings.Contains(out, errBadCertificate) { - c.Fatalf("Expected err: %s, got instead: %s and output: %s", errBadCertificate, err, out) - } -} - -// TestHttpsInfoRogueServerCert connects via two-way authenticated HTTPS to the info endpoint -// which provides a rogue server certificate and checks that it fails with the expected error -func (s *DockerDaemonSuite) TestHttpsInfoRogueServerCert(c *check.C) { - const ( - errCaUnknown = "x509: certificate signed by unknown authority" - testDaemonRogueHTTPSAddr = "tcp://localhost:4272" - ) - if err := s.d.Start("--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/server-rogue-cert.pem", - "--tlskey", "fixtures/https/server-rogue-key.pem", "-H", testDaemonRogueHTTPSAddr); err != nil { - c.Fatalf("Could not start daemon with busybox: %v", err) - } - - daemonArgs := []string{"--host", testDaemonRogueHTTPSAddr, "--tlsverify", "--tlscacert", "fixtures/https/ca.pem", "--tlscert", "fixtures/https/client-rogue-cert.pem", "--tlskey", "fixtures/https/client-rogue-key.pem"} - out, err := s.d.CmdWithArgs(daemonArgs, "info") - if err == nil || !strings.Contains(out, errCaUnknown) { - c.Fatalf("Expected err: %s, got instead: %s and output: %s", errCaUnknown, err, out) - } -} - -func pingContainers(c *check.C, d *Daemon, expectFailure bool) { - var dargs []string - if d != nil { - dargs = []string{"--host", d.sock()} - } - - args := append(dargs, "run", "-d", "--name", "container1", "busybox", "top") - dockerCmd(c, args...) - - args = append(dargs, "run", "--rm", "--link", "container1:alias1", "busybox", "sh", "-c") - pingCmd := "ping -c 1 %s -W 1" - args = append(args, fmt.Sprintf(pingCmd, "alias1")) - _, _, err := dockerCmdWithError(args...) - - if expectFailure { - c.Assert(err, check.NotNil) - } else { - c.Assert(err, check.IsNil) - } - - args = append(dargs, "rm", "-f", "container1") - dockerCmd(c, args...) -} - -func (s *DockerDaemonSuite) TestDaemonRestartWithSocketAsVolume(c *check.C) { - c.Assert(s.d.StartWithBusybox(), check.IsNil) - - socket := filepath.Join(s.d.folder, "docker.sock") - - out, err := s.d.Cmd("run", "--restart=always", "-v", socket+":/sock", "busybox") - c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - c.Assert(s.d.Restart(), check.IsNil) -} - -// os.Kill should kill daemon ungracefully, leaving behind container mounts. -// A subsequent daemon restart shoud clean up said mounts. -func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonAndContainerKill(c *check.C) { - c.Assert(s.d.StartWithBusybox(), check.IsNil) - - out, err := s.d.Cmd("run", "-d", "busybox", "top") - c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - id := strings.TrimSpace(out) - c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) - mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") - c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) - - // container mounts should exist even after daemon has crashed. - comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) - c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) - - // kill the container - runCmd := exec.Command(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", id) - if out, ec, err := runCommandWithOutput(runCmd); err != nil { - c.Fatalf("Failed to run ctr, ExitCode: %d, err: %v output: %s id: %s\n", ec, err, out, id) - } - - // restart daemon. - if err := s.d.Restart(); err != nil { - c.Fatal(err) - } - - // Now, container mounts should be gone. - mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") - c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) - comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) - c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) -} - -// os.Interrupt should perform a graceful daemon shutdown and hence cleanup mounts. -func (s *DockerDaemonSuite) TestCleanupMountsAfterGracefulShutdown(c *check.C) { - c.Assert(s.d.StartWithBusybox(), check.IsNil) - - out, err := s.d.Cmd("run", "-d", "busybox", "top") - c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - id := strings.TrimSpace(out) - - // Send SIGINT and daemon should clean up - c.Assert(s.d.cmd.Process.Signal(os.Interrupt), check.IsNil) - // Wait for the daemon to stop. - c.Assert(<-s.d.wait, checker.IsNil) - - mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") - c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) - - comment := check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) - c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) -} - -func (s *DockerDaemonSuite) TestRunContainerWithBridgeNone(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - c.Assert(s.d.StartWithBusybox("-b", "none"), check.IsNil) - - out, err := s.d.Cmd("run", "--rm", "busybox", "ip", "l") - c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - c.Assert(strings.Contains(out, "eth0"), check.Equals, false, - check.Commentf("There shouldn't be eth0 in container in default(bridge) mode when bridge network is disabled: %s", out)) - - out, err = s.d.Cmd("run", "--rm", "--net=bridge", "busybox", "ip", "l") - c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - c.Assert(strings.Contains(out, "eth0"), check.Equals, false, - check.Commentf("There shouldn't be eth0 in container in bridge mode when bridge network is disabled: %s", out)) - // the extra grep and awk clean up the output of `ip` to only list the number and name of - // interfaces, allowing for different versions of ip (e.g. inside and outside the container) to - // be used while still verifying that the interface list is the exact same - cmd := exec.Command("sh", "-c", "ip l | grep -E '^[0-9]+:' | awk -F: ' { print $1\":\"$2 } '") - stdout := bytes.NewBuffer(nil) - cmd.Stdout = stdout - if err := cmd.Run(); err != nil { - c.Fatal("Failed to get host network interface") - } - out, err = s.d.Cmd("run", "--rm", "--net=host", "busybox", "sh", "-c", "ip l | grep -E '^[0-9]+:' | awk -F: ' { print $1\":\"$2 } '") - c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - c.Assert(out, check.Equals, fmt.Sprintf("%s", stdout), - check.Commentf("The network interfaces in container should be the same with host when --net=host when bridge network is disabled: %s", out)) -} - -func (s *DockerDaemonSuite) TestDaemonRestartWithContainerRunning(t *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - t.Fatal(err) - } - if out, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top"); err != nil { - t.Fatal(out, err) - } - - if err := s.d.Restart(); err != nil { - t.Fatal(err) - } - // Container 'test' should be removed without error - if out, err := s.d.Cmd("rm", "test"); err != nil { - t.Fatal(out, err) - } -} - -func (s *DockerDaemonSuite) TestDaemonRestartCleanupNetns(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatal(err) - } - out, err := s.d.Cmd("run", "--name", "netns", "-d", "busybox", "top") - if err != nil { - c.Fatal(out, err) - } - - // Get sandbox key via inspect - out, err = s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.SandboxKey}}'", "netns") - if err != nil { - c.Fatalf("Error inspecting container: %s, %v", out, err) - } - fileName := strings.Trim(out, " \r\n'") - - if out, err := s.d.Cmd("stop", "netns"); err != nil { - c.Fatal(out, err) - } - - // Test if the file still exists - out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", fileName)) - out = strings.TrimSpace(out) - c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - c.Assert(out, check.Equals, fileName, check.Commentf("Output: %s", out)) - - // Remove the container and restart the daemon - if out, err := s.d.Cmd("rm", "netns"); err != nil { - c.Fatal(out, err) - } - - if err := s.d.Restart(); err != nil { - c.Fatal(err) - } - - // Test again and see now the netns file does not exist - out, _, err = runCommandWithOutput(exec.Command("stat", "-c", "%n", fileName)) - out = strings.TrimSpace(out) - c.Assert(err, check.Not(check.IsNil), check.Commentf("Output: %s", out)) -} - -// tests regression detailed in #13964 where DOCKER_TLS_VERIFY env is ignored -func (s *DockerDaemonSuite) TestDaemonNoTlsCliTlsVerifyWithEnv(c *check.C) { - host := "tcp://localhost:4271" - c.Assert(s.d.Start("-H", host), check.IsNil) - cmd := exec.Command(dockerBinary, "-H", host, "info") - cmd.Env = []string{"DOCKER_TLS_VERIFY=1", "DOCKER_CERT_PATH=fixtures/https"} - out, _, err := runCommandWithOutput(cmd) - c.Assert(err, check.Not(check.IsNil), check.Commentf("%s", out)) - c.Assert(strings.Contains(out, "error occurred trying to connect"), check.Equals, true) - -} - -func setupV6() error { - // Hack to get the right IPv6 address on docker0, which has already been created - return exec.Command("ip", "addr", "add", "fe80::1/64", "dev", "docker0").Run() -} - -func teardownV6() error { - return exec.Command("ip", "addr", "del", "fe80::1/64", "dev", "docker0").Run() -} - -func (s *DockerDaemonSuite) TestDaemonRestartWithContainerWithRestartPolicyAlways(c *check.C) { - c.Assert(s.d.StartWithBusybox(), check.IsNil) - - out, err := s.d.Cmd("run", "-d", "--restart", "always", "busybox", "top") - c.Assert(err, check.IsNil) - id := strings.TrimSpace(out) - - _, err = s.d.Cmd("stop", id) - c.Assert(err, check.IsNil) - _, err = s.d.Cmd("wait", id) - c.Assert(err, check.IsNil) - - out, err = s.d.Cmd("ps", "-q") - c.Assert(err, check.IsNil) - c.Assert(out, check.Equals, "") - - c.Assert(s.d.Restart(), check.IsNil) - - out, err = s.d.Cmd("ps", "-q") - c.Assert(err, check.IsNil) - c.Assert(strings.TrimSpace(out), check.Equals, id[:12]) -} - -func (s *DockerDaemonSuite) TestDaemonWideLogConfig(c *check.C) { - if err := s.d.StartWithBusybox("--log-opt=max-size=1k"); err != nil { - c.Fatal(err) - } - name := "logtest" - out, err := s.d.Cmd("run", "-d", "--log-opt=max-file=5", "--name", name, "busybox", "top") - c.Assert(err, check.IsNil, check.Commentf("Output: %s, err: %v", out, err)) - - out, err = s.d.Cmd("inspect", "-f", "{{ .HostConfig.LogConfig.Config }}", name) - c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - c.Assert(out, checker.Contains, "max-size:1k") - c.Assert(out, checker.Contains, "max-file:5") - - out, err = s.d.Cmd("inspect", "-f", "{{ .HostConfig.LogConfig.Type }}", name) - c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - c.Assert(strings.TrimSpace(out), checker.Equals, "json-file") -} - -func (s *DockerDaemonSuite) TestDaemonRestartWithPausedContainer(c *check.C) { - if err := s.d.StartWithBusybox(); err != nil { - c.Fatal(err) - } - if out, err := s.d.Cmd("run", "-i", "-d", "--name", "test", "busybox", "top"); err != nil { - c.Fatal(err, out) - } - if out, err := s.d.Cmd("pause", "test"); err != nil { - c.Fatal(err, out) - } - if err := s.d.Restart(); err != nil { - c.Fatal(err) - } - - errchan := make(chan error) - go func() { - out, err := s.d.Cmd("start", "test") - if err != nil { - errchan <- fmt.Errorf("%v:\n%s", err, out) - } - name := strings.TrimSpace(out) - if name != "test" { - errchan <- fmt.Errorf("Paused container start error on docker daemon restart, expected 'test' but got '%s'", name) - } - close(errchan) - }() - - select { - case <-time.After(5 * time.Second): - c.Fatal("Waiting on start a container timed out") - case err := <-errchan: - if err != nil { - c.Fatal(err) - } - } -} - -func (s *DockerDaemonSuite) TestDaemonRestartRmVolumeInUse(c *check.C) { - c.Assert(s.d.StartWithBusybox(), check.IsNil) - - out, err := s.d.Cmd("create", "-v", "test:/foo", "busybox") - c.Assert(err, check.IsNil, check.Commentf(out)) - - c.Assert(s.d.Restart(), check.IsNil) - - out, err = s.d.Cmd("volume", "rm", "test") - c.Assert(err, check.NotNil, check.Commentf("should not be able to remove in use volume after daemon restart")) - c.Assert(out, checker.Contains, "in use") -} - -func (s *DockerDaemonSuite) TestDaemonRestartLocalVolumes(c *check.C) { - c.Assert(s.d.Start(), check.IsNil) - - _, err := s.d.Cmd("volume", "create", "--name", "test") - c.Assert(err, check.IsNil) - c.Assert(s.d.Restart(), check.IsNil) - - _, err = s.d.Cmd("volume", "inspect", "test") - c.Assert(err, check.IsNil) -} - -func (s *DockerDaemonSuite) TestDaemonCorruptedLogDriverAddress(c *check.C) { - c.Assert(s.d.Start("--log-driver=syslog", "--log-opt", "syslog-address=corrupted:42"), check.NotNil) - expected := "Failed to set log opts: syslog-address should be in form proto://address" - runCmd := exec.Command("grep", expected, s.d.LogFileName()) - if out, _, err := runCommandWithOutput(runCmd); err != nil { - c.Fatalf("Expected %q message; but doesn't exist in log: %q, err: %v", expected, out, err) - } -} - -func (s *DockerDaemonSuite) TestDaemonCorruptedFluentdAddress(c *check.C) { - c.Assert(s.d.Start("--log-driver=fluentd", "--log-opt", "fluentd-address=corrupted:c"), check.NotNil) - expected := "Failed to set log opts: invalid fluentd-address corrupted:c: " - runCmd := exec.Command("grep", expected, s.d.LogFileName()) - if out, _, err := runCommandWithOutput(runCmd); err != nil { - c.Fatalf("Expected %q message; but doesn't exist in log: %q, err: %v", expected, out, err) - } -} - -func (s *DockerDaemonSuite) TestDaemonStartWithoutHost(c *check.C) { - s.d.useDefaultHost = true - defer func() { - s.d.useDefaultHost = false - }() - c.Assert(s.d.Start(), check.IsNil) -} - -func (s *DockerDaemonSuite) TestDaemonStartWithDefalutTlsHost(c *check.C) { - s.d.useDefaultTLSHost = true - defer func() { - s.d.useDefaultTLSHost = false - }() - if err := s.d.Start( - "--tlsverify", - "--tlscacert", "fixtures/https/ca.pem", - "--tlscert", "fixtures/https/server-cert.pem", - "--tlskey", "fixtures/https/server-key.pem"); err != nil { - c.Fatalf("Could not start daemon: %v", err) - } - - // The client with --tlsverify should also use default host localhost:2376 - tmpHost := os.Getenv("DOCKER_HOST") - defer func() { - os.Setenv("DOCKER_HOST", tmpHost) - }() - - os.Setenv("DOCKER_HOST", "") - - out, _ := dockerCmd( - c, - "--tlsverify", - "--tlscacert", "fixtures/https/ca.pem", - "--tlscert", "fixtures/https/client-cert.pem", - "--tlskey", "fixtures/https/client-key.pem", - "version", - ) - if !strings.Contains(out, "Server") { - c.Fatalf("docker version should return information of server side") - } -} - -func (s *DockerDaemonSuite) TestBridgeIPIsExcludedFromAllocatorPool(c *check.C) { - defaultNetworkBridge := "docker0" - deleteInterface(c, defaultNetworkBridge) - - bridgeIP := "192.169.1.1" - bridgeRange := bridgeIP + "/30" - - err := s.d.StartWithBusybox("--bip", bridgeRange) - c.Assert(err, check.IsNil) - defer s.d.Restart() - - var cont int - for { - contName := fmt.Sprintf("container%d", cont) - _, err = s.d.Cmd("run", "--name", contName, "-d", "busybox", "/bin/sleep", "2") - if err != nil { - // pool exhausted - break - } - ip, err := s.d.Cmd("inspect", "--format", "'{{.NetworkSettings.IPAddress}}'", contName) - c.Assert(err, check.IsNil) - - c.Assert(ip, check.Not(check.Equals), bridgeIP) - cont++ - } -} - -// Test daemon for no space left on device error -func (s *DockerDaemonSuite) TestDaemonNoSpaceLeftOnDeviceError(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux, Network) - - testDir, err := ioutil.TempDir("", "no-space-left-on-device-test") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(testDir) - c.Assert(mount.MakeRShared(testDir), checker.IsNil) - defer mount.Unmount(testDir) - - // create a 2MiB image and mount it as graph root - // Why in a container? Because `mount` sometimes behaves weirdly and often fails outright on this test in debian:jessie (which is what the test suite runs under if run from the Makefile) - dockerCmd(c, "run", "--rm", "-v", testDir+":/test", "busybox", "sh", "-c", "dd of=/test/testfs.img bs=1M seek=2 count=0") - out, _, err := runCommandWithOutput(exec.Command("mkfs.ext4", "-F", filepath.Join(testDir, "testfs.img"))) // `mkfs.ext4` is not in busybox - c.Assert(err, checker.IsNil, check.Commentf(out)) - - cmd := exec.Command("losetup", "-f", "--show", filepath.Join(testDir, "testfs.img")) - loout, err := cmd.CombinedOutput() - c.Assert(err, checker.IsNil) - loopname := strings.TrimSpace(string(loout)) - defer exec.Command("losetup", "-d", loopname).Run() - - dockerCmd(c, "run", "--privileged", "--rm", "-v", testDir+":/test:shared", "busybox", "sh", "-c", fmt.Sprintf("mkdir -p /test/test-mount && mount -t ext4 -no loop,rw %v /test/test-mount", loopname)) - defer mount.Unmount(filepath.Join(testDir, "test-mount")) - - err = s.d.Start("--graph", filepath.Join(testDir, "test-mount")) - defer s.d.Stop() - c.Assert(err, check.IsNil) - - // pull a repository large enough to fill the mount point - pullOut, err := s.d.Cmd("pull", "registry:2") - c.Assert(err, checker.NotNil, check.Commentf(pullOut)) - c.Assert(pullOut, checker.Contains, "no space left on device") -} - -// Test daemon restart with container links + auto restart -func (s *DockerDaemonSuite) TestDaemonRestartContainerLinksRestart(c *check.C) { - d := NewDaemon(c) - defer d.Stop() - err := d.StartWithBusybox() - c.Assert(err, checker.IsNil) - - parent1Args := []string{} - parent2Args := []string{} - wg := sync.WaitGroup{} - maxChildren := 10 - chErr := make(chan error, maxChildren) - - for i := 0; i < maxChildren; i++ { - wg.Add(1) - name := fmt.Sprintf("test%d", i) - - if i < maxChildren/2 { - parent1Args = append(parent1Args, []string{"--link", name}...) - } else { - parent2Args = append(parent2Args, []string{"--link", name}...) - } - - go func() { - _, err = d.Cmd("run", "-d", "--name", name, "--restart=always", "busybox", "top") - chErr <- err - wg.Done() - }() - } - - wg.Wait() - close(chErr) - for err := range chErr { - c.Assert(err, check.IsNil) - } - - parent1Args = append([]string{"run", "-d"}, parent1Args...) - parent1Args = append(parent1Args, []string{"--name=parent1", "--restart=always", "busybox", "top"}...) - parent2Args = append([]string{"run", "-d"}, parent2Args...) - parent2Args = append(parent2Args, []string{"--name=parent2", "--restart=always", "busybox", "top"}...) - - _, err = d.Cmd(parent1Args[0], parent1Args[1:]...) - c.Assert(err, check.IsNil) - _, err = d.Cmd(parent2Args[0], parent2Args[1:]...) - c.Assert(err, check.IsNil) - - err = d.Stop() - c.Assert(err, check.IsNil) - // clear the log file -- we don't need any of it but may for the next part - // can ignore the error here, this is just a cleanup - os.Truncate(d.LogFileName(), 0) - err = d.Start() - c.Assert(err, check.IsNil) - - for _, num := range []string{"1", "2"} { - out, err := d.Cmd("inspect", "-f", "{{ .State.Running }}", "parent"+num) - c.Assert(err, check.IsNil) - if strings.TrimSpace(out) != "true" { - log, _ := ioutil.ReadFile(d.LogFileName()) - c.Fatalf("parent container is not running\n%s", string(log)) - } - } -} - -func (s *DockerDaemonSuite) TestDaemonCgroupParent(c *check.C) { - testRequires(c, DaemonIsLinux) - - cgroupParent := "test" - name := "cgroup-test" - - err := s.d.StartWithBusybox("--cgroup-parent", cgroupParent) - c.Assert(err, check.IsNil) - defer s.d.Restart() - - out, err := s.d.Cmd("run", "--name", name, "busybox", "cat", "/proc/self/cgroup") - c.Assert(err, checker.IsNil) - cgroupPaths := parseCgroupPaths(string(out)) - c.Assert(len(cgroupPaths), checker.Not(checker.Equals), 0, check.Commentf("unexpected output - %q", string(out))) - out, err = s.d.Cmd("inspect", "-f", "{{.Id}}", name) - c.Assert(err, checker.IsNil) - id := strings.TrimSpace(string(out)) - expectedCgroup := path.Join(cgroupParent, id) - found := false - for _, path := range cgroupPaths { - if strings.HasSuffix(path, expectedCgroup) { - found = true - break - } - } - c.Assert(found, checker.True, check.Commentf("Cgroup path for container (%s) doesn't found in cgroups file: %s", expectedCgroup, cgroupPaths)) -} - -func (s *DockerDaemonSuite) TestDaemonRestartWithLinks(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support links - err := s.d.StartWithBusybox() - c.Assert(err, check.IsNil) - - out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top") - c.Assert(err, check.IsNil, check.Commentf(out)) - - out, err = s.d.Cmd("run", "--name=test2", "--link", "test:abc", "busybox", "sh", "-c", "ping -c 1 -w 1 abc") - c.Assert(err, check.IsNil, check.Commentf(out)) - - c.Assert(s.d.Restart(), check.IsNil) - - // should fail since test is not running yet - out, err = s.d.Cmd("start", "test2") - c.Assert(err, check.NotNil, check.Commentf(out)) - - out, err = s.d.Cmd("start", "test") - c.Assert(err, check.IsNil, check.Commentf(out)) - out, err = s.d.Cmd("start", "-a", "test2") - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(strings.Contains(out, "1 packets transmitted, 1 packets received"), check.Equals, true, check.Commentf(out)) -} - -func (s *DockerDaemonSuite) TestDaemonRestartWithNames(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support links - err := s.d.StartWithBusybox() - c.Assert(err, check.IsNil) - - out, err := s.d.Cmd("create", "--name=test", "busybox") - c.Assert(err, check.IsNil, check.Commentf(out)) - - out, err = s.d.Cmd("run", "-d", "--name=test2", "busybox", "top") - c.Assert(err, check.IsNil, check.Commentf(out)) - test2ID := strings.TrimSpace(out) - - out, err = s.d.Cmd("run", "-d", "--name=test3", "--link", "test2:abc", "busybox", "top") - test3ID := strings.TrimSpace(out) - - c.Assert(s.d.Restart(), check.IsNil) - - out, err = s.d.Cmd("create", "--name=test", "busybox") - c.Assert(err, check.NotNil, check.Commentf("expected error trying to create container with duplicate name")) - // this one is no longer needed, removing simplifies the remainder of the test - out, err = s.d.Cmd("rm", "-f", "test") - c.Assert(err, check.IsNil, check.Commentf(out)) - - out, err = s.d.Cmd("ps", "-a", "--no-trunc") - c.Assert(err, check.IsNil, check.Commentf(out)) - - lines := strings.Split(strings.TrimSpace(out), "\n")[1:] - - test2validated := false - test3validated := false - for _, line := range lines { - fields := strings.Fields(line) - names := fields[len(fields)-1] - switch fields[0] { - case test2ID: - c.Assert(names, check.Equals, "test2,test3/abc") - test2validated = true - case test3ID: - c.Assert(names, check.Equals, "test3") - test3validated = true - } - } - - c.Assert(test2validated, check.Equals, true) - c.Assert(test3validated, check.Equals, true) -} - -// TestDaemonRestartWithKilledRunningContainer requires live restore of running containers -func (s *DockerDaemonSuite) TestDaemonRestartWithKilledRunningContainer(t *check.C) { - // TODO(mlaventure): Not sure what would the exit code be on windows - testRequires(t, DaemonIsLinux) - if err := s.d.StartWithBusybox(); err != nil { - t.Fatal(err) - } - - cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top") - defer s.d.Stop() - if err != nil { - t.Fatal(cid, err) - } - cid = strings.TrimSpace(cid) - - pid, err := s.d.Cmd("inspect", "-f", "{{.State.Pid}}", cid) - t.Assert(err, check.IsNil) - pid = strings.TrimSpace(pid) - - // Kill the daemon - if err := s.d.Kill(); err != nil { - t.Fatal(err) - } - - // kill the container - runCmd := exec.Command(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "kill", cid) - if out, ec, err := runCommandWithOutput(runCmd); err != nil { - t.Fatalf("Failed to run ctr, ExitCode: %d, err: '%v' output: '%s' cid: '%s'\n", ec, err, out, cid) - } - - // Give time to containerd to process the command if we don't - // the exit event might be received after we do the inspect - pidCmd := exec.Command("kill", "-0", pid) - _, ec, _ := runCommandWithOutput(pidCmd) - for ec == 0 { - time.Sleep(1 * time.Second) - _, ec, _ = runCommandWithOutput(pidCmd) - } - - // restart the daemon - if err := s.d.Start(); err != nil { - t.Fatal(err) - } - - // Check that we've got the correct exit code - out, err := s.d.Cmd("inspect", "-f", "{{.State.ExitCode}}", cid) - t.Assert(err, check.IsNil) - - out = strings.TrimSpace(out) - if out != "143" { - t.Fatalf("Expected exit code '%s' got '%s' for container '%s'\n", "143", out, cid) - } - -} - -// os.Kill should kill daemon ungracefully, leaving behind live containers. -// The live containers should be known to the restarted daemon. Stopping -// them now, should remove the mounts. -func (s *DockerDaemonSuite) TestCleanupMountsAfterDaemonCrash(c *check.C) { - testRequires(c, DaemonIsLinux) - c.Assert(s.d.StartWithBusybox("--live-restore"), check.IsNil) - - out, err := s.d.Cmd("run", "-d", "busybox", "top") - c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - id := strings.TrimSpace(out) - - c.Assert(s.d.cmd.Process.Signal(os.Kill), check.IsNil) - mountOut, err := ioutil.ReadFile("/proc/self/mountinfo") - c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) - - // container mounts should exist even after daemon has crashed. - comment := check.Commentf("%s should stay mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) - c.Assert(strings.Contains(string(mountOut), id), check.Equals, true, comment) - - // restart daemon. - if err := s.d.Restart("--live-restore"); err != nil { - c.Fatal(err) - } - - // container should be running. - out, err = s.d.Cmd("inspect", "--format='{{.State.Running}}'", id) - c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - out = strings.TrimSpace(out) - if out != "true" { - c.Fatalf("Container %s expected to stay alive after daemon restart", id) - } - - // 'docker stop' should work. - out, err = s.d.Cmd("stop", id) - c.Assert(err, check.IsNil, check.Commentf("Output: %s", out)) - - // Now, container mounts should be gone. - mountOut, err = ioutil.ReadFile("/proc/self/mountinfo") - c.Assert(err, check.IsNil, check.Commentf("Output: %s", mountOut)) - comment = check.Commentf("%s is still mounted from older daemon start:\nDaemon root repository %s\n%s", id, s.d.folder, mountOut) - c.Assert(strings.Contains(string(mountOut), id), check.Equals, false, comment) -} - -// TestDaemonRestartWithUnpausedRunningContainer requires live restore of running containers. -func (s *DockerDaemonSuite) TestDaemonRestartWithUnpausedRunningContainer(t *check.C) { - // TODO(mlaventure): Not sure what would the exit code be on windows - testRequires(t, DaemonIsLinux) - if err := s.d.StartWithBusybox("--live-restore"); err != nil { - t.Fatal(err) - } - - cid, err := s.d.Cmd("run", "-d", "--name", "test", "busybox", "top") - defer s.d.Stop() - if err != nil { - t.Fatal(cid, err) - } - cid = strings.TrimSpace(cid) - - pid, err := s.d.Cmd("inspect", "-f", "{{.State.Pid}}", cid) - t.Assert(err, check.IsNil) - pid = strings.TrimSpace(pid) - - // pause the container - if _, err := s.d.Cmd("pause", cid); err != nil { - t.Fatal(cid, err) - } - - // Kill the daemon - if err := s.d.Kill(); err != nil { - t.Fatal(err) - } - - // resume the container - runCmd := exec.Command(ctrBinary, "--address", "unix:///var/run/docker/libcontainerd/docker-containerd.sock", "containers", "resume", cid) - if out, ec, err := runCommandWithOutput(runCmd); err != nil { - t.Fatalf("Failed to run ctr, ExitCode: %d, err: '%v' output: '%s' cid: '%s'\n", ec, err, out, cid) - } - - // Give time to containerd to process the command if we don't - // the resume event might be received after we do the inspect - pidCmd := exec.Command("kill", "-0", pid) - _, ec, _ := runCommandWithOutput(pidCmd) - for ec == 0 { - time.Sleep(1 * time.Second) - _, ec, _ = runCommandWithOutput(pidCmd) - } - - // restart the daemon - if err := s.d.Start("--live-restore"); err != nil { - t.Fatal(err) - } - - // Check that we've got the correct status - out, err := s.d.Cmd("inspect", "-f", "{{.State.Status}}", cid) - t.Assert(err, check.IsNil) - - out = strings.TrimSpace(out) - if out != "running" { - t.Fatalf("Expected exit code '%s' got '%s' for container '%s'\n", "running", out, cid) - } - if _, err := s.d.Cmd("kill", cid); err != nil { - t.Fatal(err) - } -} - -// TestRunLinksChanged checks that creating a new container with the same name does not update links -// this ensures that the old, pre gh#16032 functionality continues on -func (s *DockerDaemonSuite) TestRunLinksChanged(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support links - err := s.d.StartWithBusybox() - c.Assert(err, check.IsNil) - - out, err := s.d.Cmd("run", "-d", "--name=test", "busybox", "top") - c.Assert(err, check.IsNil, check.Commentf(out)) - - out, err = s.d.Cmd("run", "--name=test2", "--link=test:abc", "busybox", "sh", "-c", "ping -c 1 abc") - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "1 packets transmitted, 1 packets received") - - out, err = s.d.Cmd("rm", "-f", "test") - c.Assert(err, check.IsNil, check.Commentf(out)) - - out, err = s.d.Cmd("run", "-d", "--name=test", "busybox", "top") - c.Assert(err, check.IsNil, check.Commentf(out)) - out, err = s.d.Cmd("start", "-a", "test2") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received") - - err = s.d.Restart() - c.Assert(err, check.IsNil) - out, err = s.d.Cmd("start", "-a", "test2") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, check.Not(checker.Contains), "1 packets transmitted, 1 packets received") -} - -func (s *DockerDaemonSuite) TestDaemonStartWithoutColors(c *check.C) { - testRequires(c, DaemonIsLinux, NotPpc64le) - newD := NewDaemon(c) - - infoLog := "\x1b[34mINFO\x1b" - - p, tty, err := pty.Open() - c.Assert(err, checker.IsNil) - defer func() { - tty.Close() - p.Close() - }() - - b := bytes.NewBuffer(nil) - go io.Copy(b, p) - - // Enable coloring explicitly - newD.StartWithLogFile(tty, "--raw-logs=false") - newD.Stop() - c.Assert(b.String(), checker.Contains, infoLog) - - b.Reset() - - // Disable coloring explicitly - newD.StartWithLogFile(tty, "--raw-logs=true") - newD.Stop() - c.Assert(b.String(), check.Not(checker.Contains), infoLog) -} - -func (s *DockerDaemonSuite) TestDaemonDebugLog(c *check.C) { - testRequires(c, DaemonIsLinux, NotPpc64le) - newD := NewDaemon(c) - - debugLog := "\x1b[37mDEBU\x1b" - - p, tty, err := pty.Open() - c.Assert(err, checker.IsNil) - defer func() { - tty.Close() - p.Close() - }() - - b := bytes.NewBuffer(nil) - go io.Copy(b, p) - - newD.StartWithLogFile(tty, "--debug") - newD.Stop() - c.Assert(b.String(), checker.Contains, debugLog) -} - -func (s *DockerSuite) TestDaemonDiscoveryBackendConfigReload(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) - - // daemon config file - daemonConfig := `{ "debug" : false }` - configFilePath := "test.json" - - configFile, err := os.Create(configFilePath) - c.Assert(err, checker.IsNil) - fmt.Fprintf(configFile, "%s", daemonConfig) - - d := NewDaemon(c) - err = d.Start(fmt.Sprintf("--config-file=%s", configFilePath)) - c.Assert(err, checker.IsNil) - defer d.Stop() - - // daemon config file - daemonConfig = `{ - "cluster-store": "consul://consuladdr:consulport/some/path", - "cluster-advertise": "192.168.56.100:0", - "debug" : false - }` - - configFile.Close() - os.Remove(configFilePath) - - configFile, err = os.Create(configFilePath) - c.Assert(err, checker.IsNil) - defer os.Remove(configFilePath) - fmt.Fprintf(configFile, "%s", daemonConfig) - configFile.Close() - - syscall.Kill(d.cmd.Process.Pid, syscall.SIGHUP) - - time.Sleep(3 * time.Second) - - out, err := d.Cmd("info") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: consul://consuladdr:consulport/some/path")) - c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: 192.168.56.100:0")) -} - -// Test for #21956 -func (s *DockerDaemonSuite) TestDaemonLogOptions(c *check.C) { - err := s.d.StartWithBusybox("--log-driver=syslog", "--log-opt=syslog-address=udp://127.0.0.1:514") - c.Assert(err, check.IsNil) - - out, err := s.d.Cmd("run", "-d", "--log-driver=json-file", "busybox", "top") - c.Assert(err, check.IsNil, check.Commentf(out)) - id := strings.TrimSpace(out) - - out, err = s.d.Cmd("inspect", "--format='{{.HostConfig.LogConfig}}'", id) - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "{json-file map[]}") -} - -// Test case for #20936, #22443 -func (s *DockerDaemonSuite) TestDaemonMaxConcurrency(c *check.C) { - c.Assert(s.d.Start("--max-concurrent-uploads=6", "--max-concurrent-downloads=8"), check.IsNil) - - expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 6"` - expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` - content, _ := ioutil.ReadFile(s.d.logFile.Name()) - c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) - c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) -} - -// Test case for #20936, #22443 -func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFile(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) - - // daemon config file - configFilePath := "test.json" - configFile, err := os.Create(configFilePath) - c.Assert(err, checker.IsNil) - defer os.Remove(configFilePath) - - daemonConfig := `{ "max-concurrent-downloads" : 8 }` - fmt.Fprintf(configFile, "%s", daemonConfig) - configFile.Close() - c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) - - expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` - expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 8"` - content, _ := ioutil.ReadFile(s.d.logFile.Name()) - c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) - c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) - - configFile, err = os.Create(configFilePath) - c.Assert(err, checker.IsNil) - daemonConfig = `{ "max-concurrent-uploads" : 7, "max-concurrent-downloads" : 9 }` - fmt.Fprintf(configFile, "%s", daemonConfig) - configFile.Close() - - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) - - time.Sleep(3 * time.Second) - - expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 7"` - expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 9"` - content, _ = ioutil.ReadFile(s.d.logFile.Name()) - c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) - c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) -} - -// Test case for #20936, #22443 -func (s *DockerDaemonSuite) TestDaemonMaxConcurrencyWithConfigFileReload(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) - - // daemon config file - configFilePath := "test.json" - configFile, err := os.Create(configFilePath) - c.Assert(err, checker.IsNil) - defer os.Remove(configFilePath) - - daemonConfig := `{ "max-concurrent-uploads" : null }` - fmt.Fprintf(configFile, "%s", daemonConfig) - configFile.Close() - c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) - - expectedMaxConcurrentUploads := `level=debug msg="Max Concurrent Uploads: 5"` - expectedMaxConcurrentDownloads := `level=debug msg="Max Concurrent Downloads: 3"` - content, _ := ioutil.ReadFile(s.d.logFile.Name()) - c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) - c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) - - configFile, err = os.Create(configFilePath) - c.Assert(err, checker.IsNil) - daemonConfig = `{ "max-concurrent-uploads" : 1, "max-concurrent-downloads" : null }` - fmt.Fprintf(configFile, "%s", daemonConfig) - configFile.Close() - - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) - - time.Sleep(3 * time.Second) - - expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 1"` - expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` - content, _ = ioutil.ReadFile(s.d.logFile.Name()) - c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) - c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) - - configFile, err = os.Create(configFilePath) - c.Assert(err, checker.IsNil) - daemonConfig = `{ "labels":["foo=bar"] }` - fmt.Fprintf(configFile, "%s", daemonConfig) - configFile.Close() - - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) - - time.Sleep(3 * time.Second) - - expectedMaxConcurrentUploads = `level=debug msg="Reset Max Concurrent Uploads: 5"` - expectedMaxConcurrentDownloads = `level=debug msg="Reset Max Concurrent Downloads: 3"` - content, _ = ioutil.ReadFile(s.d.logFile.Name()) - c.Assert(string(content), checker.Contains, expectedMaxConcurrentUploads) - c.Assert(string(content), checker.Contains, expectedMaxConcurrentDownloads) -} - -func (s *DockerDaemonSuite) TestBuildOnDisabledBridgeNetworkDaemon(c *check.C) { - err := s.d.StartWithBusybox("-b=none", "--iptables=false") - c.Assert(err, check.IsNil) - s.d.c.Logf("dockerBinary %s", dockerBinary) - out, code, err := s.d.buildImageWithOut("busyboxs", - `FROM busybox - RUN cat /etc/hosts`, false) - comment := check.Commentf("Failed to build image. output %s, exitCode %d, err %v", out, code, err) - c.Assert(err, check.IsNil, comment) - c.Assert(code, check.Equals, 0, comment) -} - -// Test case for #21976 -func (s *DockerDaemonSuite) TestDaemonDnsInHostMode(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) - - err := s.d.StartWithBusybox("--dns", "1.2.3.4") - c.Assert(err, checker.IsNil) - - expectedOutput := "nameserver 1.2.3.4" - out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") - c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) -} - -// Test case for #21976 -func (s *DockerDaemonSuite) TestDaemonDnsSearchInHostMode(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) - - err := s.d.StartWithBusybox("--dns-search", "example.com") - c.Assert(err, checker.IsNil) - - expectedOutput := "search example.com" - out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") - c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) -} - -// Test case for #21976 -func (s *DockerDaemonSuite) TestDaemonDnsOptionsInHostMode(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) - - err := s.d.StartWithBusybox("--dns-opt", "timeout:3") - c.Assert(err, checker.IsNil) - - expectedOutput := "options timeout:3" - out, _ := s.d.Cmd("run", "--net=host", "busybox", "cat", "/etc/resolv.conf") - c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) -} - -func (s *DockerDaemonSuite) TestRunWithRuntimeFromConfigFile(c *check.C) { - conf, err := ioutil.TempFile("", "config-file-") - c.Assert(err, check.IsNil) - configName := conf.Name() - conf.Close() - defer os.Remove(configName) - - config := ` -{ - "runtimes": { - "oci": { - "path": "docker-runc" - }, - "vm": { - "path": "/usr/local/bin/vm-manager", - "runtimeArgs": [ - "--debug" - ] - } - } -} -` - ioutil.WriteFile(configName, []byte(config), 0644) - err = s.d.StartWithBusybox("--config-file", configName) - c.Assert(err, check.IsNil) - - // Run with default runtime - out, err := s.d.Cmd("run", "--rm", "busybox", "ls") - c.Assert(err, check.IsNil, check.Commentf(out)) - - // Run with default runtime explicitly - out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") - c.Assert(err, check.IsNil, check.Commentf(out)) - - // Run with oci (same path as default) but keep it around - out, err = s.d.Cmd("run", "--name", "oci-runtime-ls", "--runtime=oci", "busybox", "ls") - c.Assert(err, check.IsNil, check.Commentf(out)) - - // Run with "vm" - out, err = s.d.Cmd("run", "--rm", "--runtime=vm", "busybox", "ls") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") - - // Reset config to only have the default - config = ` -{ - "runtimes": { - } -} -` - ioutil.WriteFile(configName, []byte(config), 0644) - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) - // Give daemon time to reload config - <-time.After(1 * time.Second) - - // Run with default runtime - out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") - c.Assert(err, check.IsNil, check.Commentf(out)) - - // Run with "oci" - out, err = s.d.Cmd("run", "--rm", "--runtime=oci", "busybox", "ls") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Unknown runtime specified oci") - - // Start previously created container with oci - out, err = s.d.Cmd("start", "oci-runtime-ls") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Unknown runtime specified oci") - - // Check that we can't override the default runtime - config = ` -{ - "runtimes": { - "runc": { - "path": "my-runc" - } - } -} -` - ioutil.WriteFile(configName, []byte(config), 0644) - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) - // Give daemon time to reload config - <-time.After(1 * time.Second) - - content, _ := ioutil.ReadFile(s.d.logFile.Name()) - c.Assert(string(content), checker.Contains, `file configuration validation failed (runtime name 'runc' is reserved)`) - - // Check that we can select a default runtime - config = ` -{ - "default-runtime": "vm", - "runtimes": { - "oci": { - "path": "docker-runc" - }, - "vm": { - "path": "/usr/local/bin/vm-manager", - "runtimeArgs": [ - "--debug" - ] - } - } -} -` - ioutil.WriteFile(configName, []byte(config), 0644) - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) - // Give daemon time to reload config - <-time.After(1 * time.Second) - - out, err = s.d.Cmd("run", "--rm", "busybox", "ls") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") - - // Run with default runtime explicitly - out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") - c.Assert(err, check.IsNil, check.Commentf(out)) -} - -func (s *DockerDaemonSuite) TestRunWithRuntimeFromCommandLine(c *check.C) { - err := s.d.StartWithBusybox("--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager") - c.Assert(err, check.IsNil) - - // Run with default runtime - out, err := s.d.Cmd("run", "--rm", "busybox", "ls") - c.Assert(err, check.IsNil, check.Commentf(out)) - - // Run with default runtime explicitly - out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") - c.Assert(err, check.IsNil, check.Commentf(out)) - - // Run with oci (same path as default) but keep it around - out, err = s.d.Cmd("run", "--name", "oci-runtime-ls", "--runtime=oci", "busybox", "ls") - c.Assert(err, check.IsNil, check.Commentf(out)) - - // Run with "vm" - out, err = s.d.Cmd("run", "--rm", "--runtime=vm", "busybox", "ls") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") - - // Start a daemon without any extra runtimes - s.d.Stop() - err = s.d.StartWithBusybox() - c.Assert(err, check.IsNil) - - // Run with default runtime - out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") - c.Assert(err, check.IsNil, check.Commentf(out)) - - // Run with "oci" - out, err = s.d.Cmd("run", "--rm", "--runtime=oci", "busybox", "ls") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Unknown runtime specified oci") - - // Start previously created container with oci - out, err = s.d.Cmd("start", "oci-runtime-ls") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Unknown runtime specified oci") - - // Check that we can't override the default runtime - s.d.Stop() - err = s.d.Start("--add-runtime", "runc=my-runc") - c.Assert(err, check.NotNil) - - content, _ := ioutil.ReadFile(s.d.logFile.Name()) - c.Assert(string(content), checker.Contains, `runtime name 'runc' is reserved`) - - // Check that we can select a default runtime - s.d.Stop() - err = s.d.StartWithBusybox("--default-runtime=vm", "--add-runtime", "oci=docker-runc", "--add-runtime", "vm=/usr/local/bin/vm-manager") - c.Assert(err, check.IsNil) - - out, err = s.d.Cmd("run", "--rm", "busybox", "ls") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "/usr/local/bin/vm-manager: no such file or directory") - - // Run with default runtime explicitly - out, err = s.d.Cmd("run", "--rm", "--runtime=runc", "busybox", "ls") - c.Assert(err, check.IsNil, check.Commentf(out)) -} diff --git a/integration-cli/docker_cli_diff_test.go b/integration-cli/docker_cli_diff_test.go deleted file mode 100644 index 957617c97d..0000000000 --- a/integration-cli/docker_cli_diff_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package main - -import ( - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// ensure that an added file shows up in docker diff -func (s *DockerSuite) TestDiffFilenameShownInOutput(c *check.C) { - testRequires(c, DaemonIsLinux) - containerCmd := `echo foo > /root/bar` - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) - - cleanCID := strings.TrimSpace(out) - out, _ = dockerCmd(c, "diff", cleanCID) - - found := false - for _, line := range strings.Split(out, "\n") { - if strings.Contains("A /root/bar", line) { - found = true - break - } - } - c.Assert(found, checker.True) -} - -// test to ensure GH #3840 doesn't occur any more -func (s *DockerSuite) TestDiffEnsureInitLayerFilesAreIgnored(c *check.C) { - testRequires(c, DaemonIsLinux) - // this is a list of files which shouldn't show up in `docker diff` - initLayerFiles := []string{"/etc/resolv.conf", "/etc/hostname", "/etc/hosts", "/.dockerenv"} - containerCount := 5 - - // we might not run into this problem from the first run, so start a few containers - for i := 0; i < containerCount; i++ { - containerCmd := `echo foo > /root/bar` - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", containerCmd) - - cleanCID := strings.TrimSpace(out) - out, _ = dockerCmd(c, "diff", cleanCID) - - for _, filename := range initLayerFiles { - c.Assert(out, checker.Not(checker.Contains), filename) - } - } -} - -func (s *DockerSuite) TestDiffEnsureDefaultDevs(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "sleep", "0") - - cleanCID := strings.TrimSpace(out) - out, _ = dockerCmd(c, "diff", cleanCID) - - expected := map[string]bool{ - "C /dev": true, - "A /dev/full": true, // busybox - "C /dev/ptmx": true, // libcontainer - "A /dev/mqueue": true, - "A /dev/kmsg": true, - "A /dev/fd": true, - "A /dev/fuse": true, - "A /dev/ptmx": true, - "A /dev/null": true, - "A /dev/random": true, - "A /dev/stdout": true, - "A /dev/stderr": true, - "A /dev/tty1": true, - "A /dev/stdin": true, - "A /dev/tty": true, - "A /dev/urandom": true, - "A /dev/zero": true, - } - - for _, line := range strings.Split(out, "\n") { - c.Assert(line == "" || expected[line], checker.True, check.Commentf(line)) - } -} - -// https://github.com/docker/docker/pull/14381#discussion_r33859347 -func (s *DockerSuite) TestDiffEmptyArgClientError(c *check.C) { - out, _, err := dockerCmdWithError("diff", "") - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Contains, "Container name cannot be empty") -} diff --git a/integration-cli/docker_cli_events_test.go b/integration-cli/docker_cli_events_test.go deleted file mode 100644 index 0f63fffdce..0000000000 --- a/integration-cli/docker_cli_events_test.go +++ /dev/null @@ -1,743 +0,0 @@ -package main - -import ( - "bufio" - "fmt" - "io/ioutil" - "net/http" - "os" - "os/exec" - "strings" - "sync" - "time" - - "github.com/docker/docker/daemon/events/testutils" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestEventsTimestampFormats(c *check.C) { - name := "events-time-format-test" - - // Start stopwatch, generate an event - start := daemonTime(c) - time.Sleep(1100 * time.Millisecond) // so that first event occur in different second from since (just for the case) - dockerCmd(c, "run", "--rm", "--name", name, "busybox", "true") - time.Sleep(1100 * time.Millisecond) // so that until > since - end := daemonTime(c) - - // List of available time formats to --since - unixTs := func(t time.Time) string { return fmt.Sprintf("%v", t.Unix()) } - rfc3339 := func(t time.Time) string { return t.Format(time.RFC3339) } - duration := func(t time.Time) string { return time.Now().Sub(t).String() } - - // --since=$start must contain only the 'untag' event - for _, f := range []func(time.Time) string{unixTs, rfc3339, duration} { - since, until := f(start), f(end) - out, _ := dockerCmd(c, "events", "--since="+since, "--until="+until) - events := strings.Split(out, "\n") - events = events[:len(events)-1] - - nEvents := len(events) - c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event - containerEvents := eventActionsByIDAndType(c, events, name, "container") - c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) - - c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) - c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) - c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) - c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) - c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) - } -} - -func (s *DockerSuite) TestEventsUntag(c *check.C) { - image := "busybox" - dockerCmd(c, "tag", image, "utest:tag1") - dockerCmd(c, "tag", image, "utest:tag2") - dockerCmd(c, "rmi", "utest:tag1") - dockerCmd(c, "rmi", "utest:tag2") - eventsCmd := exec.Command(dockerBinary, "events", "--since=1") - out, exitCode, _, err := runCommandWithOutputForDuration(eventsCmd, time.Duration(time.Millisecond*2500)) - c.Assert(err, checker.IsNil) - c.Assert(exitCode, checker.Equals, 0, check.Commentf("Failed to get events")) - events := strings.Split(out, "\n") - nEvents := len(events) - // The last element after the split above will be an empty string, so we - // get the two elements before the last, which are the untags we're - // looking for. - for _, v := range events[nEvents-3 : nEvents-1] { - c.Assert(v, checker.Contains, "untag", check.Commentf("event should be untag")) - } -} - -func (s *DockerSuite) TestEventsContainerFailStartDie(c *check.C) { - _, _, err := dockerCmdWithError("run", "--name", "testeventdie", "busybox", "blerg") - c.Assert(err, checker.NotNil, check.Commentf("Container run with command blerg should have failed, but it did not")) - - out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) - events := strings.Split(strings.TrimSpace(out), "\n") - - nEvents := len(events) - c.Assert(nEvents, checker.GreaterOrEqualThan, 1) //Missing expected event - - actions := eventActionsByIDAndType(c, events, "testeventdie", "container") - - var startEvent bool - var dieEvent bool - for _, a := range actions { - switch a { - case "start": - startEvent = true - case "die": - dieEvent = true - } - } - - // Windows platform is different from Linux, it will start container whatever - // so Windows can get start/die event but Linux can't - if daemonPlatform == "windows" { - c.Assert(startEvent, checker.True, check.Commentf("Start event not found: %v\n%v", actions, events)) - c.Assert(dieEvent, checker.True, check.Commentf("Die event not found: %v\n%v", actions, events)) - } else { - c.Assert(startEvent, checker.False, check.Commentf("Start event not expected: %v\n%v", actions, events)) - c.Assert(dieEvent, checker.False, check.Commentf("Die event not expected: %v\n%v", actions, events)) - } -} - -func (s *DockerSuite) TestEventsLimit(c *check.C) { - var waitGroup sync.WaitGroup - errChan := make(chan error, 17) - - args := []string{"run", "--rm", "busybox", "true"} - for i := 0; i < 17; i++ { - waitGroup.Add(1) - go func() { - defer waitGroup.Done() - out, err := exec.Command(dockerBinary, args...).CombinedOutput() - if err != nil { - err = fmt.Errorf("%v: %s", err, string(out)) - } - errChan <- err - }() - } - - waitGroup.Wait() - close(errChan) - - for err := range errChan { - c.Assert(err, checker.IsNil, check.Commentf("%q failed with error", strings.Join(args, " "))) - } - - out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) - events := strings.Split(out, "\n") - nEvents := len(events) - 1 - c.Assert(nEvents, checker.Equals, 64, check.Commentf("events should be limited to 64, but received %d", nEvents)) -} - -func (s *DockerSuite) TestEventsContainerEvents(c *check.C) { - dockerCmd(c, "run", "--rm", "--name", "container-events-test", "busybox", "true") - - out, _ := dockerCmd(c, "events", "--until", daemonUnixTime(c)) - events := strings.Split(out, "\n") - events = events[:len(events)-1] - - nEvents := len(events) - c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event - containerEvents := eventActionsByIDAndType(c, events, "container-events-test", "container") - c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) - - c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) - c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) - c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) - c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) - c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) -} - -func (s *DockerSuite) TestEventsContainerEventsAttrSort(c *check.C) { - since := daemonUnixTime(c) - dockerCmd(c, "run", "--rm", "--name", "container-events-test", "busybox", "true") - - out, _ := dockerCmd(c, "events", "--filter", "container=container-events-test", "--since", since, "--until", daemonUnixTime(c)) - events := strings.Split(out, "\n") - - nEvents := len(events) - c.Assert(nEvents, checker.GreaterOrEqualThan, 3) //Missing expected event - matchedEvents := 0 - for _, event := range events { - matches := eventstestutils.ScanMap(event) - if matches["eventType"] == "container" && matches["action"] == "create" { - matchedEvents++ - c.Assert(out, checker.Contains, "(image=busybox, name=container-events-test)", check.Commentf("Event attributes not sorted")) - } else if matches["eventType"] == "container" && matches["action"] == "start" { - matchedEvents++ - c.Assert(out, checker.Contains, "(image=busybox, name=container-events-test)", check.Commentf("Event attributes not sorted")) - } - } - c.Assert(matchedEvents, checker.Equals, 2, check.Commentf("missing events for container container-events-test:\n%s", out)) -} - -func (s *DockerSuite) TestEventsContainerEventsSinceUnixEpoch(c *check.C) { - dockerCmd(c, "run", "--rm", "--name", "since-epoch-test", "busybox", "true") - timeBeginning := time.Unix(0, 0).Format(time.RFC3339Nano) - timeBeginning = strings.Replace(timeBeginning, "Z", ".000000000Z", -1) - out, _ := dockerCmd(c, "events", "--since", timeBeginning, "--until", daemonUnixTime(c)) - events := strings.Split(out, "\n") - events = events[:len(events)-1] - - nEvents := len(events) - c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event - containerEvents := eventActionsByIDAndType(c, events, "since-epoch-test", "container") - c.Assert(containerEvents, checker.HasLen, 5, check.Commentf("events: %v", events)) - - c.Assert(containerEvents[0], checker.Equals, "create", check.Commentf(out)) - c.Assert(containerEvents[1], checker.Equals, "attach", check.Commentf(out)) - c.Assert(containerEvents[2], checker.Equals, "start", check.Commentf(out)) - c.Assert(containerEvents[3], checker.Equals, "die", check.Commentf(out)) - c.Assert(containerEvents[4], checker.Equals, "destroy", check.Commentf(out)) -} - -func (s *DockerSuite) TestEventsImageTag(c *check.C) { - time.Sleep(1 * time.Second) // because API has seconds granularity - since := daemonUnixTime(c) - image := "testimageevents:tag" - dockerCmd(c, "tag", "busybox", image) - - out, _ := dockerCmd(c, "events", - "--since", since, "--until", daemonUnixTime(c)) - - events := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(events, checker.HasLen, 1, check.Commentf("was expecting 1 event. out=%s", out)) - event := strings.TrimSpace(events[0]) - - matches := eventstestutils.ScanMap(event) - c.Assert(matchEventID(matches, image), checker.True, check.Commentf("matches: %v\nout:\n%s", matches, out)) - c.Assert(matches["action"], checker.Equals, "tag") -} - -func (s *DockerSuite) TestEventsImagePull(c *check.C) { - // TODO Windows: Enable this test once pull and reliable image names are available - testRequires(c, DaemonIsLinux) - since := daemonUnixTime(c) - testRequires(c, Network) - - dockerCmd(c, "pull", "hello-world") - - out, _ := dockerCmd(c, "events", - "--since", since, "--until", daemonUnixTime(c)) - - events := strings.Split(strings.TrimSpace(out), "\n") - event := strings.TrimSpace(events[len(events)-1]) - matches := eventstestutils.ScanMap(event) - c.Assert(matches["id"], checker.Equals, "hello-world:latest") - c.Assert(matches["action"], checker.Equals, "pull") - -} - -func (s *DockerSuite) TestEventsImageImport(c *check.C) { - // TODO Windows CI. This should be portable once export/import are - // more reliable (@swernli) - testRequires(c, DaemonIsLinux) - - out, _ := dockerCmd(c, "run", "-d", "busybox", "true") - cleanedContainerID := strings.TrimSpace(out) - - since := daemonUnixTime(c) - out, _, err := runCommandPipelineWithOutput( - exec.Command(dockerBinary, "export", cleanedContainerID), - exec.Command(dockerBinary, "import", "-"), - ) - c.Assert(err, checker.IsNil, check.Commentf("import failed with output: %q", out)) - imageRef := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=import") - events := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(events, checker.HasLen, 1) - matches := eventstestutils.ScanMap(events[0]) - c.Assert(matches["id"], checker.Equals, imageRef, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) - c.Assert(matches["action"], checker.Equals, "import", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) -} - -func (s *DockerSuite) TestEventsImageLoad(c *check.C) { - testRequires(c, DaemonIsLinux) - myImageName := "footest:v1" - dockerCmd(c, "tag", "busybox", myImageName) - since := daemonUnixTime(c) - - out, _ := dockerCmd(c, "images", "-q", "--no-trunc", myImageName) - longImageID := strings.TrimSpace(out) - c.Assert(longImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty")) - - dockerCmd(c, "save", "-o", "saveimg.tar", myImageName) - dockerCmd(c, "rmi", myImageName) - out, _ = dockerCmd(c, "images", "-q", myImageName) - noImageID := strings.TrimSpace(out) - c.Assert(noImageID, checker.Equals, "", check.Commentf("Should not have any image")) - dockerCmd(c, "load", "-i", "saveimg.tar") - - cmd := exec.Command("rm", "-rf", "saveimg.tar") - runCommand(cmd) - - out, _ = dockerCmd(c, "images", "-q", "--no-trunc", myImageName) - imageID := strings.TrimSpace(out) - c.Assert(imageID, checker.Equals, longImageID, check.Commentf("Should have same image id as before")) - - out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=load") - events := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(events, checker.HasLen, 1) - matches := eventstestutils.ScanMap(events[0]) - c.Assert(matches["id"], checker.Equals, imageID, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) - c.Assert(matches["action"], checker.Equals, "load", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) - - out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=save") - events = strings.Split(strings.TrimSpace(out), "\n") - c.Assert(events, checker.HasLen, 1) - matches = eventstestutils.ScanMap(events[0]) - c.Assert(matches["id"], checker.Equals, imageID, check.Commentf("matches: %v\nout:\n%s\n", matches, out)) - c.Assert(matches["action"], checker.Equals, "save", check.Commentf("matches: %v\nout:\n%s\n", matches, out)) -} - -func (s *DockerSuite) TestEventsPluginOps(c *check.C) { - testRequires(c, DaemonIsLinux, ExperimentalDaemon) - - pluginName := "tiborvass/no-remove:latest" - since := daemonUnixTime(c) - - dockerCmd(c, "plugin", "install", pluginName, "--grant-all-permissions") - dockerCmd(c, "plugin", "disable", pluginName) - dockerCmd(c, "plugin", "remove", pluginName) - - out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c)) - events := strings.Split(out, "\n") - events = events[:len(events)-1] - - nEvents := len(events) - c.Assert(nEvents, checker.GreaterOrEqualThan, 4) - - pluginEvents := eventActionsByIDAndType(c, events, pluginName, "plugin") - c.Assert(pluginEvents, checker.HasLen, 4, check.Commentf("events: %v", events)) - - c.Assert(pluginEvents[0], checker.Equals, "pull", check.Commentf(out)) - c.Assert(pluginEvents[1], checker.Equals, "enable", check.Commentf(out)) - c.Assert(pluginEvents[2], checker.Equals, "disable", check.Commentf(out)) - c.Assert(pluginEvents[3], checker.Equals, "remove", check.Commentf(out)) -} - -func (s *DockerSuite) TestEventsFilters(c *check.C) { - since := daemonUnixTime(c) - dockerCmd(c, "run", "--rm", "busybox", "true") - dockerCmd(c, "run", "--rm", "busybox", "true") - out, _ := dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=die") - parseEvents(c, out, "die") - - out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", "event=die", "--filter", "event=start") - parseEvents(c, out, "die|start") - - // make sure we at least got 2 start events - count := strings.Count(out, "start") - c.Assert(strings.Count(out, "start"), checker.GreaterOrEqualThan, 2, check.Commentf("should have had 2 start events but had %d, out: %s", count, out)) - -} - -func (s *DockerSuite) TestEventsFilterImageName(c *check.C) { - since := daemonUnixTime(c) - - out, _ := dockerCmd(c, "run", "--name", "container_1", "-d", "busybox:latest", "true") - container1 := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "run", "--name", "container_2", "-d", "busybox", "true") - container2 := strings.TrimSpace(out) - - name := "busybox" - out, _ = dockerCmd(c, "events", "--since", since, "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("image=%s", name)) - events := strings.Split(out, "\n") - events = events[:len(events)-1] - c.Assert(events, checker.Not(checker.HasLen), 0) //Expected events but found none for the image busybox:latest - count1 := 0 - count2 := 0 - - for _, e := range events { - if strings.Contains(e, container1) { - count1++ - } else if strings.Contains(e, container2) { - count2++ - } - } - c.Assert(count1, checker.Not(checker.Equals), 0, check.Commentf("Expected event from container but got %d from %s", count1, container1)) - c.Assert(count2, checker.Not(checker.Equals), 0, check.Commentf("Expected event from container but got %d from %s", count2, container2)) - -} - -func (s *DockerSuite) TestEventsFilterLabels(c *check.C) { - since := daemonUnixTime(c) - label := "io.docker.testing=foo" - - out, _ := dockerCmd(c, "run", "-d", "-l", label, "busybox:latest", "true") - container1 := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "run", "-d", "busybox", "true") - container2 := strings.TrimSpace(out) - - out, _ = dockerCmd( - c, - "events", - "--since", since, - "--until", daemonUnixTime(c), - "--filter", fmt.Sprintf("label=%s", label)) - - events := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(events), checker.Equals, 3) - - for _, e := range events { - c.Assert(e, checker.Contains, container1) - c.Assert(e, checker.Not(checker.Contains), container2) - } -} - -func (s *DockerSuite) TestEventsFilterImageLabels(c *check.C) { - since := daemonUnixTime(c) - name := "labelfiltertest" - label := "io.docker.testing=image" - - // Build a test image. - _, err := buildImage(name, fmt.Sprintf(` - FROM busybox:latest - LABEL %s`, label), true) - c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) - - dockerCmd(c, "tag", name, "labelfiltertest:tag1") - dockerCmd(c, "tag", name, "labelfiltertest:tag2") - dockerCmd(c, "tag", "busybox:latest", "labelfiltertest:tag3") - - out, _ := dockerCmd( - c, - "events", - "--since", since, - "--until", daemonUnixTime(c), - "--filter", fmt.Sprintf("label=%s", label), - "--filter", "type=image") - - events := strings.Split(strings.TrimSpace(out), "\n") - - // 2 events from the "docker tag" command, another one is from "docker build" - c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) - for _, e := range events { - c.Assert(e, checker.Contains, "labelfiltertest") - } -} - -func (s *DockerSuite) TestEventsFilterContainer(c *check.C) { - since := daemonUnixTime(c) - nameID := make(map[string]string) - - for _, name := range []string{"container_1", "container_2"} { - dockerCmd(c, "run", "--name", name, "busybox", "true") - id := inspectField(c, name, "Id") - nameID[name] = id - } - - until := daemonUnixTime(c) - - checkEvents := func(id string, events []string) error { - if len(events) != 4 { // create, attach, start, die - return fmt.Errorf("expected 4 events, got %v", events) - } - for _, event := range events { - matches := eventstestutils.ScanMap(event) - if !matchEventID(matches, id) { - return fmt.Errorf("expected event for container id %s: %s - parsed container id: %s", id, event, matches["id"]) - } - } - return nil - } - - for name, ID := range nameID { - // filter by names - out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "container="+name) - events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") - c.Assert(checkEvents(ID, events), checker.IsNil) - - // filter by ID's - out, _ = dockerCmd(c, "events", "--since", since, "--until", until, "--filter", "container="+ID) - events = strings.Split(strings.TrimSuffix(out, "\n"), "\n") - c.Assert(checkEvents(ID, events), checker.IsNil) - } -} - -func (s *DockerSuite) TestEventsCommit(c *check.C) { - // Problematic on Windows as cannot commit a running container - testRequires(c, DaemonIsLinux) - - out, _ := runSleepingContainer(c) - cID := strings.TrimSpace(out) - c.Assert(waitRun(cID), checker.IsNil) - - dockerCmd(c, "commit", "-m", "test", cID) - dockerCmd(c, "stop", cID) - c.Assert(waitExited(cID, 5*time.Second), checker.IsNil) - - until := daemonUnixTime(c) - out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) - c.Assert(out, checker.Contains, "commit", check.Commentf("Missing 'commit' log event")) -} - -func (s *DockerSuite) TestEventsCopy(c *check.C) { - // Build a test image. - id, err := buildImage("cpimg", ` - FROM busybox - RUN echo HI > /file`, true) - c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) - - // Create an empty test file. - tempFile, err := ioutil.TempFile("", "test-events-copy-") - c.Assert(err, checker.IsNil) - defer os.Remove(tempFile.Name()) - - c.Assert(tempFile.Close(), checker.IsNil) - - dockerCmd(c, "create", "--name=cptest", id) - - dockerCmd(c, "cp", "cptest:/file", tempFile.Name()) - - until := daemonUnixTime(c) - out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=cptest", "--until="+until) - c.Assert(out, checker.Contains, "archive-path", check.Commentf("Missing 'archive-path' log event\n")) - - dockerCmd(c, "cp", tempFile.Name(), "cptest:/filecopy") - - until = daemonUnixTime(c) - out, _ = dockerCmd(c, "events", "-f", "container=cptest", "--until="+until) - c.Assert(out, checker.Contains, "extract-to-dir", check.Commentf("Missing 'extract-to-dir' log event")) -} - -func (s *DockerSuite) TestEventsResize(c *check.C) { - out, _ := runSleepingContainer(c, "-d") - cID := strings.TrimSpace(out) - c.Assert(waitRun(cID), checker.IsNil) - - endpoint := "/containers/" + cID + "/resize?h=80&w=24" - status, _, err := sockRequest("POST", endpoint, nil) - c.Assert(status, checker.Equals, http.StatusOK) - c.Assert(err, checker.IsNil) - - dockerCmd(c, "stop", cID) - - until := daemonUnixTime(c) - out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) - c.Assert(out, checker.Contains, "resize", check.Commentf("Missing 'resize' log event")) -} - -func (s *DockerSuite) TestEventsAttach(c *check.C) { - // TODO Windows CI: Figure out why this test fails intermittently (TP5). - testRequires(c, DaemonIsLinux) - - out, _ := dockerCmd(c, "run", "-di", "busybox", "cat") - cID := strings.TrimSpace(out) - c.Assert(waitRun(cID), checker.IsNil) - - cmd := exec.Command(dockerBinary, "attach", cID) - stdin, err := cmd.StdinPipe() - c.Assert(err, checker.IsNil) - defer stdin.Close() - stdout, err := cmd.StdoutPipe() - c.Assert(err, checker.IsNil) - defer stdout.Close() - c.Assert(cmd.Start(), checker.IsNil) - defer cmd.Process.Kill() - - // Make sure we're done attaching by writing/reading some stuff - _, err = stdin.Write([]byte("hello\n")) - c.Assert(err, checker.IsNil) - out, err = bufio.NewReader(stdout).ReadString('\n') - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Equals, "hello", check.Commentf("expected 'hello'")) - - c.Assert(stdin.Close(), checker.IsNil) - - dockerCmd(c, "kill", cID) - c.Assert(waitExited(cID, 5*time.Second), checker.IsNil) - - until := daemonUnixTime(c) - out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) - c.Assert(out, checker.Contains, "attach", check.Commentf("Missing 'attach' log event")) -} - -func (s *DockerSuite) TestEventsRename(c *check.C) { - out, _ := dockerCmd(c, "run", "--name", "oldName", "busybox", "true") - cID := strings.TrimSpace(out) - dockerCmd(c, "rename", "oldName", "newName") - - until := daemonUnixTime(c) - // filter by the container id because the name in the event will be the new name. - out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until", until) - c.Assert(out, checker.Contains, "rename", check.Commentf("Missing 'rename' log event\n")) -} - -func (s *DockerSuite) TestEventsTop(c *check.C) { - // Problematic on Windows as Windows does not support top - testRequires(c, DaemonIsLinux) - - out, _ := runSleepingContainer(c, "-d") - cID := strings.TrimSpace(out) - c.Assert(waitRun(cID), checker.IsNil) - - dockerCmd(c, "top", cID) - dockerCmd(c, "stop", cID) - - until := daemonUnixTime(c) - out, _ = dockerCmd(c, "events", "-f", "container="+cID, "--until="+until) - c.Assert(out, checker.Contains, " top", check.Commentf("Missing 'top' log event")) -} - -// #14316 -func (s *DockerRegistrySuite) TestEventsImageFilterPush(c *check.C) { - // Problematic to port for Windows CI during TP5 timeframe until - // supporting push - testRequires(c, DaemonIsLinux) - testRequires(c, Network) - repoName := fmt.Sprintf("%v/dockercli/testf", privateRegistryURL) - - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - cID := strings.TrimSpace(out) - c.Assert(waitRun(cID), checker.IsNil) - - dockerCmd(c, "commit", cID, repoName) - dockerCmd(c, "stop", cID) - dockerCmd(c, "push", repoName) - - until := daemonUnixTime(c) - out, _ = dockerCmd(c, "events", "-f", "image="+repoName, "-f", "event=push", "--until", until) - c.Assert(out, checker.Contains, repoName, check.Commentf("Missing 'push' log event for %s", repoName)) -} - -func (s *DockerSuite) TestEventsFilterType(c *check.C) { - since := daemonUnixTime(c) - name := "labelfiltertest" - label := "io.docker.testing=image" - - // Build a test image. - _, err := buildImage(name, fmt.Sprintf(` - FROM busybox:latest - LABEL %s`, label), true) - c.Assert(err, checker.IsNil, check.Commentf("Couldn't create image")) - - dockerCmd(c, "tag", name, "labelfiltertest:tag1") - dockerCmd(c, "tag", name, "labelfiltertest:tag2") - dockerCmd(c, "tag", "busybox:latest", "labelfiltertest:tag3") - - out, _ := dockerCmd( - c, - "events", - "--since", since, - "--until", daemonUnixTime(c), - "--filter", fmt.Sprintf("label=%s", label), - "--filter", "type=image") - - events := strings.Split(strings.TrimSpace(out), "\n") - - // 2 events from the "docker tag" command, another one is from "docker build" - c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) - for _, e := range events { - c.Assert(e, checker.Contains, "labelfiltertest") - } - - out, _ = dockerCmd( - c, - "events", - "--since", since, - "--until", daemonUnixTime(c), - "--filter", fmt.Sprintf("label=%s", label), - "--filter", "type=container") - events = strings.Split(strings.TrimSpace(out), "\n") - - // Events generated by the container that builds the image - c.Assert(events, checker.HasLen, 3, check.Commentf("Events == %s", events)) - - out, _ = dockerCmd( - c, - "events", - "--since", since, - "--until", daemonUnixTime(c), - "--filter", "type=network") - events = strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(events), checker.GreaterOrEqualThan, 1, check.Commentf("Events == %s", events)) -} - -func (s *DockerSuite) TestEventsFilterImageInContainerAction(c *check.C) { - since := daemonUnixTime(c) - dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") - waitRun("test-container") - - out, _ := dockerCmd(c, "events", "--filter", "image=busybox", "--since", since, "--until", daemonUnixTime(c)) - events := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(events), checker.GreaterThan, 1, check.Commentf(out)) -} - -func (s *DockerSuite) TestEventsContainerRestart(c *check.C) { - dockerCmd(c, "run", "-d", "--name=testEvent", "--restart=on-failure:3", "busybox", "false") - - // wait until test2 is auto removed. - waitTime := 10 * time.Second - if daemonPlatform == "windows" { - // nslookup isn't present in Windows busybox. Is built-in. - waitTime = 90 * time.Second - } - - err := waitInspect("testEvent", "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTime) - c.Assert(err, checker.IsNil) - - var ( - createCount int - startCount int - dieCount int - ) - out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c), "-f", "container=testEvent") - events := strings.Split(strings.TrimSpace(out), "\n") - - nEvents := len(events) - c.Assert(nEvents, checker.GreaterOrEqualThan, 1) //Missing expected event - actions := eventActionsByIDAndType(c, events, "testEvent", "container") - - for _, a := range actions { - switch a { - case "create": - createCount++ - case "start": - startCount++ - case "die": - dieCount++ - } - } - c.Assert(createCount, checker.Equals, 1, check.Commentf("testEvent should be created 1 times: %v", actions)) - c.Assert(startCount, checker.Equals, 4, check.Commentf("testEvent should start 4 times: %v", actions)) - c.Assert(dieCount, checker.Equals, 4, check.Commentf("testEvent should die 4 times: %v", actions)) -} - -func (s *DockerSuite) TestEventsSinceInTheFuture(c *check.C) { - dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") - waitRun("test-container") - - since := daemonTime(c) - until := since.Add(time.Duration(-24) * time.Hour) - out, _, err := dockerCmdWithError("events", "--filter", "image=busybox", "--since", parseEventTime(since), "--until", parseEventTime(until)) - - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "cannot be after `until`") -} - -func (s *DockerSuite) TestEventsUntilInThePast(c *check.C) { - since := daemonUnixTime(c) - - dockerCmd(c, "run", "--name", "test-container", "-d", "busybox", "true") - waitRun("test-container") - - until := daemonUnixTime(c) - - dockerCmd(c, "run", "--name", "test-container2", "-d", "busybox", "true") - waitRun("test-container2") - - out, _ := dockerCmd(c, "events", "--filter", "image=busybox", "--since", since, "--until", until) - - c.Assert(out, checker.Not(checker.Contains), "test-container2") - c.Assert(out, checker.Contains, "test-container") -} diff --git a/integration-cli/docker_cli_events_unix_test.go b/integration-cli/docker_cli_events_unix_test.go deleted file mode 100644 index 777092f304..0000000000 --- a/integration-cli/docker_cli_events_unix_test.go +++ /dev/null @@ -1,494 +0,0 @@ -// +build !windows - -package main - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "os/exec" - "strings" - "syscall" - "time" - "unicode" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" - "github.com/kr/pty" -) - -// #5979 -func (s *DockerSuite) TestEventsRedirectStdout(c *check.C) { - since := daemonUnixTime(c) - dockerCmd(c, "run", "busybox", "true") - - file, err := ioutil.TempFile("", "") - c.Assert(err, checker.IsNil, check.Commentf("could not create temp file")) - defer os.Remove(file.Name()) - - command := fmt.Sprintf("%s events --since=%s --until=%s > %s", dockerBinary, since, daemonUnixTime(c), file.Name()) - _, tty, err := pty.Open() - c.Assert(err, checker.IsNil, check.Commentf("Could not open pty")) - cmd := exec.Command("sh", "-c", command) - cmd.Stdin = tty - cmd.Stdout = tty - cmd.Stderr = tty - c.Assert(cmd.Run(), checker.IsNil, check.Commentf("run err for command %q", command)) - - scanner := bufio.NewScanner(file) - for scanner.Scan() { - for _, ch := range scanner.Text() { - c.Assert(unicode.IsControl(ch), checker.False, check.Commentf("found control character %v", []byte(string(ch)))) - } - } - c.Assert(scanner.Err(), checker.IsNil, check.Commentf("Scan err for command %q", command)) - -} - -func (s *DockerSuite) TestEventsOOMDisableFalse(c *check.C) { - testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, NotGCCGO, swapMemorySupport) - - errChan := make(chan error) - go func() { - defer close(errChan) - out, exitCode, _ := dockerCmdWithError("run", "--name", "oomFalse", "-m", "10MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") - if expected := 137; exitCode != expected { - errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) - } - }() - select { - case err := <-errChan: - c.Assert(err, checker.IsNil) - case <-time.After(30 * time.Second): - c.Fatal("Timeout waiting for container to die on OOM") - } - - out, _ := dockerCmd(c, "events", "--since=0", "-f", "container=oomFalse", "--until", daemonUnixTime(c)) - events := strings.Split(strings.TrimSuffix(out, "\n"), "\n") - nEvents := len(events) - - c.Assert(nEvents, checker.GreaterOrEqualThan, 5) //Missing expected event - c.Assert(parseEventAction(c, events[nEvents-5]), checker.Equals, "create") - c.Assert(parseEventAction(c, events[nEvents-4]), checker.Equals, "attach") - c.Assert(parseEventAction(c, events[nEvents-3]), checker.Equals, "start") - c.Assert(parseEventAction(c, events[nEvents-2]), checker.Equals, "oom") - c.Assert(parseEventAction(c, events[nEvents-1]), checker.Equals, "die") -} - -func (s *DockerSuite) TestEventsOOMDisableTrue(c *check.C) { - testRequires(c, DaemonIsLinux, oomControl, memoryLimitSupport, NotGCCGO, NotArm, swapMemorySupport) - - errChan := make(chan error) - observer, err := newEventObserver(c) - c.Assert(err, checker.IsNil) - err = observer.Start() - c.Assert(err, checker.IsNil) - defer observer.Stop() - - go func() { - defer close(errChan) - out, exitCode, _ := dockerCmdWithError("run", "--oom-kill-disable=true", "--name", "oomTrue", "-m", "10MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") - if expected := 137; exitCode != expected { - errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) - } - }() - - c.Assert(waitRun("oomTrue"), checker.IsNil) - defer dockerCmd(c, "kill", "oomTrue") - containerID := inspectField(c, "oomTrue", "Id") - - testActions := map[string]chan bool{ - "oom": make(chan bool), - } - - matcher := matchEventLine(containerID, "container", testActions) - processor := processEventMatch(testActions) - go observer.Match(matcher, processor) - - select { - case <-time.After(20 * time.Second): - observer.CheckEventError(c, containerID, "oom", matcher) - case <-testActions["oom"]: - // ignore, done - case errRun := <-errChan: - if errRun != nil { - c.Fatalf("%v", errRun) - } else { - c.Fatalf("container should be still running but it's not") - } - } - - status := inspectField(c, "oomTrue", "State.Status") - c.Assert(strings.TrimSpace(status), checker.Equals, "running", check.Commentf("container should be still running")) -} - -// #18453 -func (s *DockerSuite) TestEventsContainerFilterByName(c *check.C) { - testRequires(c, DaemonIsLinux) - cOut, _ := dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") - c1 := strings.TrimSpace(cOut) - waitRun("foo") - cOut, _ = dockerCmd(c, "run", "--name=bar", "-d", "busybox", "top") - c2 := strings.TrimSpace(cOut) - waitRun("bar") - out, _ := dockerCmd(c, "events", "-f", "container=foo", "--since=0", "--until", daemonUnixTime(c)) - c.Assert(out, checker.Contains, c1, check.Commentf(out)) - c.Assert(out, checker.Not(checker.Contains), c2, check.Commentf(out)) -} - -// #18453 -func (s *DockerSuite) TestEventsContainerFilterBeforeCreate(c *check.C) { - testRequires(c, DaemonIsLinux) - var ( - out string - ch chan struct{} - ) - ch = make(chan struct{}) - - // calculate the time it takes to create and start a container and sleep 2 seconds - // this is to make sure the docker event will recevie the event of container - since := daemonTime(c) - id, _ := dockerCmd(c, "run", "-d", "busybox", "top") - cID := strings.TrimSpace(id) - waitRun(cID) - time.Sleep(2 * time.Second) - duration := daemonTime(c).Sub(since) - - go func() { - // start events and wait for future events to - // make sure the new container shows up even when - // the event stream was created before the container. - t := daemonTime(c).Add(2 * duration) - out, _ = dockerCmd(c, "events", "-f", "container=foo", "--since=0", "--until", parseEventTime(t)) - close(ch) - }() - // Sleep 2 second to wait docker event to start - time.Sleep(2 * time.Second) - id, _ = dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") - cID = strings.TrimSpace(id) - waitRun(cID) - <-ch - c.Assert(out, checker.Contains, cID, check.Commentf("Missing event of container (foo)")) -} - -func (s *DockerSuite) TestVolumeEvents(c *check.C) { - testRequires(c, DaemonIsLinux) - - since := daemonUnixTime(c) - - // Observe create/mount volume actions - dockerCmd(c, "volume", "create", "--name", "test-event-volume-local") - dockerCmd(c, "run", "--name", "test-volume-container", "--volume", "test-event-volume-local:/foo", "-d", "busybox", "true") - waitRun("test-volume-container") - - // Observe unmount/destroy volume actions - dockerCmd(c, "rm", "-f", "test-volume-container") - dockerCmd(c, "volume", "rm", "test-event-volume-local") - - until := daemonUnixTime(c) - out, _ := dockerCmd(c, "events", "--since", since, "--until", until) - events := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(events), checker.GreaterThan, 4) - - volumeEvents := eventActionsByIDAndType(c, events, "test-event-volume-local", "volume") - c.Assert(volumeEvents, checker.HasLen, 4) - c.Assert(volumeEvents[0], checker.Equals, "create") - c.Assert(volumeEvents[1], checker.Equals, "mount") - c.Assert(volumeEvents[2], checker.Equals, "unmount") - c.Assert(volumeEvents[3], checker.Equals, "destroy") -} - -func (s *DockerSuite) TestNetworkEvents(c *check.C) { - testRequires(c, DaemonIsLinux) - - since := daemonUnixTime(c) - - // Observe create/connect network actions - dockerCmd(c, "network", "create", "test-event-network-local") - dockerCmd(c, "run", "--name", "test-network-container", "--net", "test-event-network-local", "-d", "busybox", "true") - waitRun("test-network-container") - - // Observe disconnect/destroy network actions - dockerCmd(c, "rm", "-f", "test-network-container") - dockerCmd(c, "network", "rm", "test-event-network-local") - - until := daemonUnixTime(c) - out, _ := dockerCmd(c, "events", "--since", since, "--until", until) - events := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(events), checker.GreaterThan, 4) - - netEvents := eventActionsByIDAndType(c, events, "test-event-network-local", "network") - c.Assert(netEvents, checker.HasLen, 4) - c.Assert(netEvents[0], checker.Equals, "create") - c.Assert(netEvents[1], checker.Equals, "connect") - c.Assert(netEvents[2], checker.Equals, "disconnect") - c.Assert(netEvents[3], checker.Equals, "destroy") -} - -func (s *DockerSuite) TestEventsContainerWithMultiNetwork(c *check.C) { - testRequires(c, DaemonIsLinux) - - // Observe create/connect network actions - dockerCmd(c, "network", "create", "test-event-network-local-1") - dockerCmd(c, "network", "create", "test-event-network-local-2") - dockerCmd(c, "run", "--name", "test-network-container", "--net", "test-event-network-local-1", "-td", "busybox", "sh") - waitRun("test-network-container") - dockerCmd(c, "network", "connect", "test-event-network-local-2", "test-network-container") - - since := daemonUnixTime(c) - - dockerCmd(c, "stop", "-t", "1", "test-network-container") - - until := daemonUnixTime(c) - out, _ := dockerCmd(c, "events", "--since", since, "--until", until, "-f", "type=network") - netEvents := strings.Split(strings.TrimSpace(out), "\n") - - // NOTE: order in which disconnect takes place is undetermined, - // so don't check for the *full* name - c.Assert(len(netEvents), checker.Equals, 2) - c.Assert(netEvents[0], checker.Contains, "disconnect") - c.Assert(netEvents[0], checker.Contains, "test-event-network-local-") - - c.Assert(netEvents[1], checker.Contains, "disconnect") - c.Assert(netEvents[1], checker.Contains, "test-event-network-local-") -} - -func (s *DockerSuite) TestEventsStreaming(c *check.C) { - testRequires(c, DaemonIsLinux) - - observer, err := newEventObserver(c) - c.Assert(err, checker.IsNil) - err = observer.Start() - c.Assert(err, checker.IsNil) - defer observer.Stop() - - out, _ := dockerCmd(c, "run", "-d", "busybox:latest", "true") - containerID := strings.TrimSpace(out) - - testActions := map[string]chan bool{ - "create": make(chan bool, 1), - "start": make(chan bool, 1), - "die": make(chan bool, 1), - "destroy": make(chan bool, 1), - } - - matcher := matchEventLine(containerID, "container", testActions) - processor := processEventMatch(testActions) - go observer.Match(matcher, processor) - - select { - case <-time.After(5 * time.Second): - observer.CheckEventError(c, containerID, "create", matcher) - case <-testActions["create"]: - // ignore, done - } - - select { - case <-time.After(5 * time.Second): - observer.CheckEventError(c, containerID, "start", matcher) - case <-testActions["start"]: - // ignore, done - } - - select { - case <-time.After(5 * time.Second): - observer.CheckEventError(c, containerID, "die", matcher) - case <-testActions["die"]: - // ignore, done - } - - dockerCmd(c, "rm", containerID) - - select { - case <-time.After(5 * time.Second): - observer.CheckEventError(c, containerID, "destroy", matcher) - case <-testActions["destroy"]: - // ignore, done - } -} - -func (s *DockerSuite) TestEventsImageUntagDelete(c *check.C) { - testRequires(c, DaemonIsLinux) - - observer, err := newEventObserver(c) - c.Assert(err, checker.IsNil) - err = observer.Start() - c.Assert(err, checker.IsNil) - defer observer.Stop() - - name := "testimageevents" - imageID, err := buildImage(name, - `FROM scratch - MAINTAINER "docker"`, - true) - c.Assert(err, checker.IsNil) - c.Assert(deleteImages(name), checker.IsNil) - - testActions := map[string]chan bool{ - "untag": make(chan bool, 1), - "delete": make(chan bool, 1), - } - - matcher := matchEventLine(imageID, "image", testActions) - processor := processEventMatch(testActions) - go observer.Match(matcher, processor) - - select { - case <-time.After(10 * time.Second): - observer.CheckEventError(c, imageID, "untag", matcher) - case <-testActions["untag"]: - // ignore, done - } - - select { - case <-time.After(10 * time.Second): - observer.CheckEventError(c, imageID, "delete", matcher) - case <-testActions["delete"]: - // ignore, done - } -} - -func (s *DockerSuite) TestEventsFilterVolumeAndNetworkType(c *check.C) { - testRequires(c, DaemonIsLinux) - - since := daemonUnixTime(c) - - dockerCmd(c, "network", "create", "test-event-network-type") - dockerCmd(c, "volume", "create", "--name", "test-event-volume-type") - - out, _ := dockerCmd(c, "events", "--filter", "type=volume", "--filter", "type=network", "--since", since, "--until", daemonUnixTime(c)) - events := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(events), checker.GreaterOrEqualThan, 2, check.Commentf(out)) - - networkActions := eventActionsByIDAndType(c, events, "test-event-network-type", "network") - volumeActions := eventActionsByIDAndType(c, events, "test-event-volume-type", "volume") - - c.Assert(volumeActions[0], checker.Equals, "create") - c.Assert(networkActions[0], checker.Equals, "create") -} - -func (s *DockerSuite) TestEventsFilterVolumeID(c *check.C) { - testRequires(c, DaemonIsLinux) - - since := daemonUnixTime(c) - - dockerCmd(c, "volume", "create", "--name", "test-event-volume-id") - out, _ := dockerCmd(c, "events", "--filter", "volume=test-event-volume-id", "--since", since, "--until", daemonUnixTime(c)) - events := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(events, checker.HasLen, 1) - - c.Assert(events[0], checker.Contains, "test-event-volume-id") - c.Assert(events[0], checker.Contains, "driver=local") -} - -func (s *DockerSuite) TestEventsFilterNetworkID(c *check.C) { - testRequires(c, DaemonIsLinux) - - since := daemonUnixTime(c) - - dockerCmd(c, "network", "create", "test-event-network-local") - out, _ := dockerCmd(c, "events", "--filter", "network=test-event-network-local", "--since", since, "--until", daemonUnixTime(c)) - events := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(events, checker.HasLen, 1) - - c.Assert(events[0], checker.Contains, "test-event-network-local") - c.Assert(events[0], checker.Contains, "type=bridge") -} - -func (s *DockerDaemonSuite) TestDaemonEvents(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) - - // daemon config file - configFilePath := "test.json" - configFile, err := os.Create(configFilePath) - c.Assert(err, checker.IsNil) - defer os.Remove(configFilePath) - - daemonConfig := `{"labels":["foo=bar"]}` - fmt.Fprintf(configFile, "%s", daemonConfig) - configFile.Close() - c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) - - // Get daemon ID - out, err := s.d.Cmd("info") - c.Assert(err, checker.IsNil) - daemonID := "" - daemonName := "" - for _, line := range strings.Split(out, "\n") { - if strings.HasPrefix(line, "ID: ") { - daemonID = strings.TrimPrefix(line, "ID: ") - } else if strings.HasPrefix(line, "Name: ") { - daemonName = strings.TrimPrefix(line, "Name: ") - } - } - c.Assert(daemonID, checker.Not(checker.Equals), "") - - configFile, err = os.Create(configFilePath) - c.Assert(err, checker.IsNil) - daemonConfig = `{"max-concurrent-downloads":1,"labels":["bar=foo"]}` - fmt.Fprintf(configFile, "%s", daemonConfig) - configFile.Close() - - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) - - time.Sleep(3 * time.Second) - - out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c)) - c.Assert(err, checker.IsNil) - - c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s (cluster-advertise=, cluster-store=, cluster-store-opts={}, debug=true, default-runtime=runc, labels=[\"bar=foo\"], max-concurrent-downloads=1, max-concurrent-uploads=5, name=%s, runtimes=runc:{docker-runc []})", daemonID, daemonName)) -} - -func (s *DockerDaemonSuite) TestDaemonEventsWithFilters(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) - - // daemon config file - configFilePath := "test.json" - configFile, err := os.Create(configFilePath) - c.Assert(err, checker.IsNil) - defer os.Remove(configFilePath) - - daemonConfig := `{"labels":["foo=bar"]}` - fmt.Fprintf(configFile, "%s", daemonConfig) - configFile.Close() - c.Assert(s.d.Start(fmt.Sprintf("--config-file=%s", configFilePath)), check.IsNil) - - // Get daemon ID - out, err := s.d.Cmd("info") - c.Assert(err, checker.IsNil) - daemonID := "" - daemonName := "" - for _, line := range strings.Split(out, "\n") { - if strings.HasPrefix(line, "ID: ") { - daemonID = strings.TrimPrefix(line, "ID: ") - } else if strings.HasPrefix(line, "Name: ") { - daemonName = strings.TrimPrefix(line, "Name: ") - } - } - c.Assert(daemonID, checker.Not(checker.Equals), "") - - syscall.Kill(s.d.cmd.Process.Pid, syscall.SIGHUP) - - time.Sleep(3 * time.Second) - - out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("daemon=%s", daemonID)) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) - - out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", fmt.Sprintf("daemon=%s", daemonName)) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) - - out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "daemon=foo") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Not(checker.Contains), fmt.Sprintf("daemon reload %s", daemonID)) - - out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "type=daemon") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, fmt.Sprintf("daemon reload %s", daemonID)) - - out, err = s.d.Cmd("events", "--since=0", "--until", daemonUnixTime(c), "--filter", "type=container") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Not(checker.Contains), fmt.Sprintf("daemon reload %s", daemonID)) -} diff --git a/integration-cli/docker_cli_exec_test.go b/integration-cli/docker_cli_exec_test.go deleted file mode 100644 index a2b7331e38..0000000000 --- a/integration-cli/docker_cli_exec_test.go +++ /dev/null @@ -1,515 +0,0 @@ -// +build !test_no_exec - -package main - -import ( - "bufio" - "fmt" - "net/http" - "os" - "os/exec" - "reflect" - "runtime" - "sort" - "strings" - "sync" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestExec(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") - c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) - - out, _ = dockerCmd(c, "exec", "testing", "cat", "/tmp/file") - out = strings.Trim(out, "\r\n") - c.Assert(out, checker.Equals, "test") - -} - -func (s *DockerSuite) TestExecInteractive(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "sh", "-c", "echo test > /tmp/file && top") - - execCmd := exec.Command(dockerBinary, "exec", "-i", "testing", "sh") - stdin, err := execCmd.StdinPipe() - c.Assert(err, checker.IsNil) - stdout, err := execCmd.StdoutPipe() - c.Assert(err, checker.IsNil) - - err = execCmd.Start() - c.Assert(err, checker.IsNil) - _, err = stdin.Write([]byte("cat /tmp/file\n")) - c.Assert(err, checker.IsNil) - - r := bufio.NewReader(stdout) - line, err := r.ReadString('\n') - c.Assert(err, checker.IsNil) - line = strings.TrimSpace(line) - c.Assert(line, checker.Equals, "test") - err = stdin.Close() - c.Assert(err, checker.IsNil) - errChan := make(chan error) - go func() { - errChan <- execCmd.Wait() - close(errChan) - }() - select { - case err := <-errChan: - c.Assert(err, checker.IsNil) - case <-time.After(1 * time.Second): - c.Fatal("docker exec failed to exit on stdin close") - } - -} - -func (s *DockerSuite) TestExecAfterContainerRestart(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := runSleepingContainer(c) - cleanedContainerID := strings.TrimSpace(out) - c.Assert(waitRun(cleanedContainerID), check.IsNil) - dockerCmd(c, "restart", cleanedContainerID) - c.Assert(waitRun(cleanedContainerID), check.IsNil) - - out, _ = dockerCmd(c, "exec", cleanedContainerID, "echo", "hello") - outStr := strings.TrimSpace(out) - c.Assert(outStr, checker.Equals, "hello") -} - -func (s *DockerDaemonSuite) TestExecAfterDaemonRestart(c *check.C) { - // TODO Windows CI: Requires a little work to get this ported. - testRequires(c, DaemonIsLinux) - testRequires(c, SameHostDaemon) - - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) - - out, err := s.d.Cmd("run", "-d", "--name", "top", "-p", "80", "busybox:latest", "top") - c.Assert(err, checker.IsNil, check.Commentf("Could not run top: %s", out)) - - err = s.d.Restart() - c.Assert(err, checker.IsNil, check.Commentf("Could not restart daemon")) - - out, err = s.d.Cmd("start", "top") - c.Assert(err, checker.IsNil, check.Commentf("Could not start top after daemon restart: %s", out)) - - out, err = s.d.Cmd("exec", "top", "echo", "hello") - c.Assert(err, checker.IsNil, check.Commentf("Could not exec on container top: %s", out)) - - outStr := strings.TrimSpace(string(out)) - c.Assert(outStr, checker.Equals, "hello") -} - -// Regression test for #9155, #9044 -func (s *DockerSuite) TestExecEnv(c *check.C) { - // TODO Windows CI: This one is interesting and may just end up being a feature - // difference between Windows and Linux. On Windows, the environment is passed - // into the process that is launched, not into the machine environment. Hence - // a subsequent exec will not have LALA set/ - testRequires(c, DaemonIsLinux) - runSleepingContainer(c, "-e", "LALA=value1", "-e", "LALA=value2", "-d", "--name", "testing") - c.Assert(waitRun("testing"), check.IsNil) - - out, _ := dockerCmd(c, "exec", "testing", "env") - c.Assert(out, checker.Not(checker.Contains), "LALA=value1") - c.Assert(out, checker.Contains, "LALA=value2") - c.Assert(out, checker.Contains, "HOME=/root") -} - -func (s *DockerSuite) TestExecExitStatus(c *check.C) { - runSleepingContainer(c, "-d", "--name", "top") - - // Test normal (non-detached) case first - cmd := exec.Command(dockerBinary, "exec", "top", "sh", "-c", "exit 23") - ec, _ := runCommand(cmd) - c.Assert(ec, checker.Equals, 23) -} - -func (s *DockerSuite) TestExecPausedContainer(c *check.C) { - // Windows does not support pause - testRequires(c, DaemonIsLinux) - defer unpauseAllContainers() - - out, _ := dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") - ContainerID := strings.TrimSpace(out) - - dockerCmd(c, "pause", "testing") - out, _, err := dockerCmdWithError("exec", "-i", "-t", ContainerID, "echo", "hello") - c.Assert(err, checker.NotNil, check.Commentf("container should fail to exec new conmmand if it is paused")) - - expected := ContainerID + " is paused, unpause the container before exec" - c.Assert(out, checker.Contains, expected, check.Commentf("container should not exec new command if it is paused")) -} - -// regression test for #9476 -func (s *DockerSuite) TestExecTTYCloseStdin(c *check.C) { - // TODO Windows CI: This requires some work to port to Windows. - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "-it", "--name", "exec_tty_stdin", "busybox") - - cmd := exec.Command(dockerBinary, "exec", "-i", "exec_tty_stdin", "cat") - stdinRw, err := cmd.StdinPipe() - c.Assert(err, checker.IsNil) - - stdinRw.Write([]byte("test")) - stdinRw.Close() - - out, _, err := runCommandWithOutput(cmd) - c.Assert(err, checker.IsNil, check.Commentf(out)) - - out, _ = dockerCmd(c, "top", "exec_tty_stdin") - outArr := strings.Split(out, "\n") - c.Assert(len(outArr), checker.LessOrEqualThan, 3, check.Commentf("exec process left running")) - c.Assert(out, checker.Not(checker.Contains), "nsenter-exec") -} - -func (s *DockerSuite) TestExecTTYWithoutStdin(c *check.C) { - // TODO Windows CI: This requires some work to port to Windows. - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "-ti", "busybox") - id := strings.TrimSpace(out) - c.Assert(waitRun(id), checker.IsNil) - - errChan := make(chan error) - go func() { - defer close(errChan) - - cmd := exec.Command(dockerBinary, "exec", "-ti", id, "true") - if _, err := cmd.StdinPipe(); err != nil { - errChan <- err - return - } - - expected := "the input device is not a TTY" - if runtime.GOOS == "windows" { - expected += ". If you are using mintty, try prefixing the command with 'winpty'" - } - if out, _, err := runCommandWithOutput(cmd); err == nil { - errChan <- fmt.Errorf("exec should have failed") - return - } else if !strings.Contains(out, expected) { - errChan <- fmt.Errorf("exec failed with error %q: expected %q", out, expected) - return - } - }() - - select { - case err := <-errChan: - c.Assert(err, check.IsNil) - case <-time.After(3 * time.Second): - c.Fatal("exec is running but should have failed") - } -} - -func (s *DockerSuite) TestExecParseError(c *check.C) { - // TODO Windows CI: Requires some extra work. Consider copying the - // runSleepingContainer helper to have an exec version. - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "--name", "top", "busybox", "top") - - // Test normal (non-detached) case first - cmd := exec.Command(dockerBinary, "exec", "top") - _, stderr, _, err := runCommandWithStdoutStderr(cmd) - c.Assert(err, checker.NotNil) - c.Assert(stderr, checker.Contains, "See '"+dockerBinary+" exec --help'") -} - -func (s *DockerSuite) TestExecStopNotHanging(c *check.C) { - // TODO Windows CI: Requires some extra work. Consider copying the - // runSleepingContainer helper to have an exec version. - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") - - err := exec.Command(dockerBinary, "exec", "testing", "top").Start() - c.Assert(err, checker.IsNil) - - type dstop struct { - out []byte - err error - } - - ch := make(chan dstop) - go func() { - out, err := exec.Command(dockerBinary, "stop", "testing").CombinedOutput() - ch <- dstop{out, err} - close(ch) - }() - select { - case <-time.After(3 * time.Second): - c.Fatal("Container stop timed out") - case s := <-ch: - c.Assert(s.err, check.IsNil) - } -} - -func (s *DockerSuite) TestExecCgroup(c *check.C) { - // Not applicable on Windows - using Linux specific functionality - testRequires(c, NotUserNamespace) - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") - - out, _ := dockerCmd(c, "exec", "testing", "cat", "/proc/1/cgroup") - containerCgroups := sort.StringSlice(strings.Split(out, "\n")) - - var wg sync.WaitGroup - var mu sync.Mutex - execCgroups := []sort.StringSlice{} - errChan := make(chan error) - // exec a few times concurrently to get consistent failure - for i := 0; i < 5; i++ { - wg.Add(1) - go func() { - out, _, err := dockerCmdWithError("exec", "testing", "cat", "/proc/self/cgroup") - if err != nil { - errChan <- err - return - } - cg := sort.StringSlice(strings.Split(out, "\n")) - - mu.Lock() - execCgroups = append(execCgroups, cg) - mu.Unlock() - wg.Done() - }() - } - wg.Wait() - close(errChan) - - for err := range errChan { - c.Assert(err, checker.IsNil) - } - - for _, cg := range execCgroups { - if !reflect.DeepEqual(cg, containerCgroups) { - fmt.Println("exec cgroups:") - for _, name := range cg { - fmt.Printf(" %s\n", name) - } - - fmt.Println("container cgroups:") - for _, name := range containerCgroups { - fmt.Printf(" %s\n", name) - } - c.Fatal("cgroups mismatched") - } - } -} - -func (s *DockerSuite) TestExecInspectID(c *check.C) { - out, _ := runSleepingContainer(c, "-d") - id := strings.TrimSuffix(out, "\n") - - out = inspectField(c, id, "ExecIDs") - c.Assert(out, checker.Equals, "[]", check.Commentf("ExecIDs should be empty, got: %s", out)) - - // Start an exec, have it block waiting so we can do some checking - cmd := exec.Command(dockerBinary, "exec", id, "sh", "-c", - "while ! test -e /execid1; do sleep 1; done") - - err := cmd.Start() - c.Assert(err, checker.IsNil, check.Commentf("failed to start the exec cmd")) - - // Give the exec 10 chances/seconds to start then give up and stop the test - tries := 10 - for i := 0; i < tries; i++ { - // Since its still running we should see exec as part of the container - out = strings.TrimSpace(inspectField(c, id, "ExecIDs")) - - if out != "[]" && out != "" { - break - } - c.Assert(i+1, checker.Not(checker.Equals), tries, check.Commentf("ExecIDs still empty after 10 second")) - time.Sleep(1 * time.Second) - } - - // Save execID for later - execID, err := inspectFilter(id, "index .ExecIDs 0") - c.Assert(err, checker.IsNil, check.Commentf("failed to get the exec id")) - - // End the exec by creating the missing file - err = exec.Command(dockerBinary, "exec", id, - "sh", "-c", "touch /execid1").Run() - - c.Assert(err, checker.IsNil, check.Commentf("failed to run the 2nd exec cmd")) - - // Wait for 1st exec to complete - cmd.Wait() - - // Give the exec 10 chances/seconds to stop then give up and stop the test - for i := 0; i < tries; i++ { - // Since its still running we should see exec as part of the container - out = strings.TrimSpace(inspectField(c, id, "ExecIDs")) - - if out == "[]" { - break - } - c.Assert(i+1, checker.Not(checker.Equals), tries, check.Commentf("ExecIDs still not empty after 10 second")) - time.Sleep(1 * time.Second) - } - - // But we should still be able to query the execID - sc, body, err := sockRequest("GET", "/exec/"+execID+"/json", nil) - c.Assert(sc, checker.Equals, http.StatusOK, check.Commentf("received status != 200 OK: %d\n%s", sc, body)) - - // Now delete the container and then an 'inspect' on the exec should - // result in a 404 (not 'container not running') - out, ec := dockerCmd(c, "rm", "-f", id) - c.Assert(ec, checker.Equals, 0, check.Commentf("error removing container: %s", out)) - sc, body, err = sockRequest("GET", "/exec/"+execID+"/json", nil) - c.Assert(sc, checker.Equals, http.StatusNotFound, check.Commentf("received status != 404: %d\n%s", sc, body)) -} - -func (s *DockerSuite) TestLinksPingLinkedContainersOnRename(c *check.C) { - // Problematic on Windows as Windows does not support links - testRequires(c, DaemonIsLinux) - var out string - out, _ = dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") - idA := strings.TrimSpace(out) - c.Assert(idA, checker.Not(checker.Equals), "", check.Commentf("%s, id should not be nil", out)) - out, _ = dockerCmd(c, "run", "-d", "--link", "container1:alias1", "--name", "container2", "busybox", "top") - idB := strings.TrimSpace(out) - c.Assert(idB, checker.Not(checker.Equals), "", check.Commentf("%s, id should not be nil", out)) - - dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") - dockerCmd(c, "rename", "container1", "container_new") - dockerCmd(c, "exec", "container2", "ping", "-c", "1", "alias1", "-W", "1") -} - -func (s *DockerSuite) TestRunMutableNetworkFiles(c *check.C) { - // Not applicable on Windows to Windows CI. - testRequires(c, SameHostDaemon, DaemonIsLinux) - for _, fn := range []string{"resolv.conf", "hosts"} { - deleteAllContainers() - - content, err := runCommandAndReadContainerFile(fn, exec.Command(dockerBinary, "run", "-d", "--name", "c1", "busybox", "sh", "-c", fmt.Sprintf("echo success >/etc/%s && top", fn))) - c.Assert(err, checker.IsNil) - - c.Assert(strings.TrimSpace(string(content)), checker.Equals, "success", check.Commentf("Content was not what was modified in the container", string(content))) - - out, _ := dockerCmd(c, "run", "-d", "--name", "c2", "busybox", "top") - contID := strings.TrimSpace(out) - netFilePath := containerStorageFile(contID, fn) - - f, err := os.OpenFile(netFilePath, os.O_WRONLY|os.O_SYNC|os.O_APPEND, 0644) - c.Assert(err, checker.IsNil) - - if _, err := f.Seek(0, 0); err != nil { - f.Close() - c.Fatal(err) - } - - if err := f.Truncate(0); err != nil { - f.Close() - c.Fatal(err) - } - - if _, err := f.Write([]byte("success2\n")); err != nil { - f.Close() - c.Fatal(err) - } - f.Close() - - res, _ := dockerCmd(c, "exec", contID, "cat", "/etc/"+fn) - c.Assert(res, checker.Equals, "success2\n") - } -} - -func (s *DockerSuite) TestExecWithUser(c *check.C) { - // TODO Windows CI: This may be fixable in the future once Windows - // supports users - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") - - out, _ := dockerCmd(c, "exec", "-u", "1", "parent", "id") - c.Assert(out, checker.Contains, "uid=1(daemon) gid=1(daemon)") - - out, _ = dockerCmd(c, "exec", "-u", "root", "parent", "id") - c.Assert(out, checker.Contains, "uid=0(root) gid=0(root)", check.Commentf("exec with user by id expected daemon user got %s", out)) -} - -func (s *DockerSuite) TestExecWithPrivileged(c *check.C) { - // Not applicable on Windows - testRequires(c, DaemonIsLinux, NotUserNamespace) - // Start main loop which attempts mknod repeatedly - dockerCmd(c, "run", "-d", "--name", "parent", "--cap-drop=ALL", "busybox", "sh", "-c", `while (true); do if [ -e /exec_priv ]; then cat /exec_priv && mknod /tmp/sda b 8 0 && echo "Success"; else echo "Privileged exec has not run yet"; fi; usleep 10000; done`) - - // Check exec mknod doesn't work - cmd := exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdb b 8 16") - out, _, err := runCommandWithOutput(cmd) - c.Assert(err, checker.NotNil, check.Commentf("exec mknod in --cap-drop=ALL container without --privileged should fail")) - c.Assert(out, checker.Contains, "Operation not permitted", check.Commentf("exec mknod in --cap-drop=ALL container without --privileged should fail")) - - // Check exec mknod does work with --privileged - cmd = exec.Command(dockerBinary, "exec", "--privileged", "parent", "sh", "-c", `echo "Running exec --privileged" > /exec_priv && mknod /tmp/sdb b 8 16 && usleep 50000 && echo "Finished exec --privileged" > /exec_priv && echo ok`) - out, _, err = runCommandWithOutput(cmd) - c.Assert(err, checker.IsNil) - - actual := strings.TrimSpace(out) - c.Assert(actual, checker.Equals, "ok", check.Commentf("exec mknod in --cap-drop=ALL container with --privileged failed, output: %q", out)) - - // Check subsequent unprivileged exec cannot mknod - cmd = exec.Command(dockerBinary, "exec", "parent", "sh", "-c", "mknod /tmp/sdc b 8 32") - out, _, err = runCommandWithOutput(cmd) - c.Assert(err, checker.NotNil, check.Commentf("repeating exec mknod in --cap-drop=ALL container after --privileged without --privileged should fail")) - c.Assert(out, checker.Contains, "Operation not permitted", check.Commentf("repeating exec mknod in --cap-drop=ALL container after --privileged without --privileged should fail")) - - // Confirm at no point was mknod allowed - logCmd := exec.Command(dockerBinary, "logs", "parent") - out, _, err = runCommandWithOutput(logCmd) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Not(checker.Contains), "Success") - -} - -func (s *DockerSuite) TestExecWithImageUser(c *check.C) { - // Not applicable on Windows - testRequires(c, DaemonIsLinux) - name := "testbuilduser" - _, err := buildImage(name, - `FROM busybox - RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd - USER dockerio`, - true) - c.Assert(err, checker.IsNil) - - dockerCmd(c, "run", "-d", "--name", "dockerioexec", name, "top") - - out, _ := dockerCmd(c, "exec", "dockerioexec", "whoami") - c.Assert(out, checker.Contains, "dockerio", check.Commentf("exec with user by id expected dockerio user got %s", out)) -} - -func (s *DockerSuite) TestExecOnReadonlyContainer(c *check.C) { - // Windows does not support read-only - // --read-only + userns has remount issues - testRequires(c, DaemonIsLinux, NotUserNamespace) - dockerCmd(c, "run", "-d", "--read-only", "--name", "parent", "busybox", "top") - dockerCmd(c, "exec", "parent", "true") -} - -func (s *DockerSuite) TestExecUlimits(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "testexeculimits" - runSleepingContainer(c, "-d", "--ulimit", "nproc=21", "--name", name) - c.Assert(waitRun(name), checker.IsNil) - - out, _, err := dockerCmdWithError("exec", name, "sh", "-c", "ulimit -p") - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Equals, "21") -} - -// #15750 -func (s *DockerSuite) TestExecStartFails(c *check.C) { - // TODO Windows CI. This test should be portable. Figure out why it fails - // currently. - testRequires(c, DaemonIsLinux) - name := "exec-15750" - runSleepingContainer(c, "-d", "--name", name) - c.Assert(waitRun(name), checker.IsNil) - - out, _, err := dockerCmdWithError("exec", name, "no-such-cmd") - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "executable file not found") -} diff --git a/integration-cli/docker_cli_exec_unix_test.go b/integration-cli/docker_cli_exec_unix_test.go deleted file mode 100644 index 42db4091dd..0000000000 --- a/integration-cli/docker_cli_exec_unix_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build !windows,!test_no_exec - -package main - -import ( - "bytes" - "io" - "os/exec" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" - "github.com/kr/pty" -) - -// regression test for #12546 -func (s *DockerSuite) TestExecInteractiveStdinClose(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-itd", "busybox", "/bin/cat") - contID := strings.TrimSpace(out) - - cmd := exec.Command(dockerBinary, "exec", "-i", contID, "echo", "-n", "hello") - p, err := pty.Start(cmd) - c.Assert(err, checker.IsNil) - - b := bytes.NewBuffer(nil) - go io.Copy(b, p) - - ch := make(chan error) - go func() { ch <- cmd.Wait() }() - - select { - case err := <-ch: - c.Assert(err, checker.IsNil) - output := b.String() - c.Assert(strings.TrimSpace(output), checker.Equals, "hello") - case <-time.After(5 * time.Second): - c.Fatal("timed out running docker exec") - } -} - -func (s *DockerSuite) TestExecTTY(c *check.C) { - testRequires(c, DaemonIsLinux, SameHostDaemon) - dockerCmd(c, "run", "-d", "--name=test", "busybox", "sh", "-c", "echo hello > /foo && top") - - cmd := exec.Command(dockerBinary, "exec", "-it", "test", "sh") - p, err := pty.Start(cmd) - c.Assert(err, checker.IsNil) - defer p.Close() - - _, err = p.Write([]byte("cat /foo && exit\n")) - c.Assert(err, checker.IsNil) - - chErr := make(chan error) - go func() { - chErr <- cmd.Wait() - }() - select { - case err := <-chErr: - c.Assert(err, checker.IsNil) - case <-time.After(3 * time.Second): - c.Fatal("timeout waiting for exec to exit") - } - - buf := make([]byte, 256) - read, err := p.Read(buf) - c.Assert(err, checker.IsNil) - c.Assert(bytes.Contains(buf, []byte("hello")), checker.Equals, true, check.Commentf(string(buf[:read]))) -} diff --git a/integration-cli/docker_cli_experimental_test.go b/integration-cli/docker_cli_experimental_test.go deleted file mode 100644 index 8795078f64..0000000000 --- a/integration-cli/docker_cli_experimental_test.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build experimental - -package main - -import ( - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" - "strings" -) - -func (s *DockerSuite) TestExperimentalVersion(c *check.C) { - out, _ := dockerCmd(c, "version") - for _, line := range strings.Split(out, "\n") { - if strings.HasPrefix(line, "Experimental (client):") || strings.HasPrefix(line, "Experimental (server):") { - c.Assert(line, checker.Matches, "*true") - } - } - - out, _ = dockerCmd(c, "-v") - c.Assert(out, checker.Contains, ", experimental", check.Commentf("docker version did not contain experimental")) -} diff --git a/integration-cli/docker_cli_export_import_test.go b/integration-cli/docker_cli_export_import_test.go deleted file mode 100644 index 069dc08162..0000000000 --- a/integration-cli/docker_cli_export_import_test.go +++ /dev/null @@ -1,49 +0,0 @@ -package main - -import ( - "os" - "os/exec" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// export an image and try to import it into a new one -func (s *DockerSuite) TestExportContainerAndImportImage(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := "testexportcontainerandimportimage" - - dockerCmd(c, "run", "--name", containerID, "busybox", "true") - - out, _ := dockerCmd(c, "export", containerID) - - importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") - importCmd.Stdin = strings.NewReader(out) - out, _, err := runCommandWithOutput(importCmd) - c.Assert(err, checker.IsNil, check.Commentf("failed to import image repo/testexp:v1: %s", out)) - - cleanedImageID := strings.TrimSpace(out) - c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) -} - -// Used to test output flag in the export command -func (s *DockerSuite) TestExportContainerWithOutputAndImportImage(c *check.C) { - testRequires(c, DaemonIsLinux) - containerID := "testexportcontainerwithoutputandimportimage" - - dockerCmd(c, "run", "--name", containerID, "busybox", "true") - dockerCmd(c, "export", "--output=testexp.tar", containerID) - defer os.Remove("testexp.tar") - - out, _, err := runCommandWithOutput(exec.Command("cat", "testexp.tar")) - c.Assert(err, checker.IsNil, check.Commentf(out)) - - importCmd := exec.Command(dockerBinary, "import", "-", "repo/testexp:v1") - importCmd.Stdin = strings.NewReader(out) - out, _, err = runCommandWithOutput(importCmd) - c.Assert(err, checker.IsNil, check.Commentf("failed to import image repo/testexp:v1: %s", out)) - - cleanedImageID := strings.TrimSpace(out) - c.Assert(cleanedImageID, checker.Not(checker.Equals), "", check.Commentf("output should have been an image id")) -} diff --git a/integration-cli/docker_cli_external_graphdriver_unix_test.go b/integration-cli/docker_cli_external_graphdriver_unix_test.go deleted file mode 100644 index 771996bb11..0000000000 --- a/integration-cli/docker_cli_external_graphdriver_unix_test.go +++ /dev/null @@ -1,399 +0,0 @@ -// +build experimental -// +build !windows - -package main - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "strings" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/daemon/graphdriver/vfs" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/plugins" - "github.com/go-check/check" -) - -func init() { - check.Suite(&DockerExternalGraphdriverSuite{ - ds: &DockerSuite{}, - }) -} - -type DockerExternalGraphdriverSuite struct { - server *httptest.Server - jserver *httptest.Server - ds *DockerSuite - d *Daemon - ec map[string]*graphEventsCounter -} - -type graphEventsCounter struct { - activations int - creations int - removals int - gets int - puts int - stats int - cleanups int - exists int - init int - metadata int - diff int - applydiff int - changes int - diffsize int -} - -func (s *DockerExternalGraphdriverSuite) SetUpTest(c *check.C) { - s.d = NewDaemon(c) -} - -func (s *DockerExternalGraphdriverSuite) TearDownTest(c *check.C) { - s.d.Stop() - s.ds.TearDownTest(c) -} - -func (s *DockerExternalGraphdriverSuite) SetUpSuite(c *check.C) { - s.ec = make(map[string]*graphEventsCounter) - s.setUpPluginViaSpecFile(c) - s.setUpPluginViaJSONFile(c) -} - -func (s *DockerExternalGraphdriverSuite) setUpPluginViaSpecFile(c *check.C) { - mux := http.NewServeMux() - s.server = httptest.NewServer(mux) - - s.setUpPlugin(c, "test-external-graph-driver", "spec", mux, []byte(s.server.URL)) -} - -func (s *DockerExternalGraphdriverSuite) setUpPluginViaJSONFile(c *check.C) { - mux := http.NewServeMux() - s.jserver = httptest.NewServer(mux) - - p := plugins.NewLocalPlugin("json-external-graph-driver", s.jserver.URL) - b, err := json.Marshal(p) - c.Assert(err, check.IsNil) - - s.setUpPlugin(c, "json-external-graph-driver", "json", mux, b) -} - -func (s *DockerExternalGraphdriverSuite) setUpPlugin(c *check.C, name string, ext string, mux *http.ServeMux, b []byte) { - type graphDriverRequest struct { - ID string `json:",omitempty"` - Parent string `json:",omitempty"` - MountLabel string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - } - - type graphDriverResponse struct { - Err error `json:",omitempty"` - Dir string `json:",omitempty"` - Exists bool `json:",omitempty"` - Status [][2]string `json:",omitempty"` - Metadata map[string]string `json:",omitempty"` - Changes []archive.Change `json:",omitempty"` - Size int64 `json:",omitempty"` - } - - respond := func(w http.ResponseWriter, data interface{}) { - w.Header().Set("Content-Type", "appplication/vnd.docker.plugins.v1+json") - switch t := data.(type) { - case error: - fmt.Fprintln(w, fmt.Sprintf(`{"Err": %q}`, t.Error())) - case string: - fmt.Fprintln(w, t) - default: - json.NewEncoder(w).Encode(&data) - } - } - - decReq := func(b io.ReadCloser, out interface{}, w http.ResponseWriter) error { - defer b.Close() - if err := json.NewDecoder(b).Decode(&out); err != nil { - http.Error(w, fmt.Sprintf("error decoding json: %s", err.Error()), 500) - } - return nil - } - - base, err := ioutil.TempDir("", name) - c.Assert(err, check.IsNil) - vfsProto, err := vfs.Init(base, []string{}, nil, nil) - c.Assert(err, check.IsNil, check.Commentf("error initializing graph driver")) - driver := graphdriver.NewNaiveDiffDriver(vfsProto, nil, nil) - - s.ec[ext] = &graphEventsCounter{} - mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].activations++ - respond(w, `{"Implements": ["GraphDriver"]}`) - }) - - mux.HandleFunc("/GraphDriver.Init", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].init++ - respond(w, "{}") - }) - - mux.HandleFunc("/GraphDriver.CreateReadWrite", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].creations++ - - var req graphDriverRequest - if err := decReq(r.Body, &req, w); err != nil { - return - } - if err := driver.CreateReadWrite(req.ID, req.Parent, "", nil); err != nil { - respond(w, err) - return - } - respond(w, "{}") - }) - - mux.HandleFunc("/GraphDriver.Create", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].creations++ - - var req graphDriverRequest - if err := decReq(r.Body, &req, w); err != nil { - return - } - if err := driver.Create(req.ID, req.Parent, "", nil); err != nil { - respond(w, err) - return - } - respond(w, "{}") - }) - - mux.HandleFunc("/GraphDriver.Remove", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].removals++ - - var req graphDriverRequest - if err := decReq(r.Body, &req, w); err != nil { - return - } - - if err := driver.Remove(req.ID); err != nil { - respond(w, err) - return - } - respond(w, "{}") - }) - - mux.HandleFunc("/GraphDriver.Get", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].gets++ - - var req graphDriverRequest - if err := decReq(r.Body, &req, w); err != nil { - return - } - - dir, err := driver.Get(req.ID, req.MountLabel) - if err != nil { - respond(w, err) - return - } - respond(w, &graphDriverResponse{Dir: dir}) - }) - - mux.HandleFunc("/GraphDriver.Put", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].puts++ - - var req graphDriverRequest - if err := decReq(r.Body, &req, w); err != nil { - return - } - - if err := driver.Put(req.ID); err != nil { - respond(w, err) - return - } - respond(w, "{}") - }) - - mux.HandleFunc("/GraphDriver.Exists", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].exists++ - - var req graphDriverRequest - if err := decReq(r.Body, &req, w); err != nil { - return - } - respond(w, &graphDriverResponse{Exists: driver.Exists(req.ID)}) - }) - - mux.HandleFunc("/GraphDriver.Status", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].stats++ - respond(w, &graphDriverResponse{Status: driver.Status()}) - }) - - mux.HandleFunc("/GraphDriver.Cleanup", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].cleanups++ - err := driver.Cleanup() - if err != nil { - respond(w, err) - return - } - respond(w, `{}`) - }) - - mux.HandleFunc("/GraphDriver.GetMetadata", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].metadata++ - - var req graphDriverRequest - if err := decReq(r.Body, &req, w); err != nil { - return - } - - data, err := driver.GetMetadata(req.ID) - if err != nil { - respond(w, err) - return - } - respond(w, &graphDriverResponse{Metadata: data}) - }) - - mux.HandleFunc("/GraphDriver.Diff", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].diff++ - - var req graphDriverRequest - if err := decReq(r.Body, &req, w); err != nil { - return - } - - diff, err := driver.Diff(req.ID, req.Parent) - if err != nil { - respond(w, err) - return - } - io.Copy(w, diff) - }) - - mux.HandleFunc("/GraphDriver.Changes", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].changes++ - var req graphDriverRequest - if err := decReq(r.Body, &req, w); err != nil { - return - } - - changes, err := driver.Changes(req.ID, req.Parent) - if err != nil { - respond(w, err) - return - } - respond(w, &graphDriverResponse{Changes: changes}) - }) - - mux.HandleFunc("/GraphDriver.ApplyDiff", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].applydiff++ - var diff archive.Reader = r.Body - defer r.Body.Close() - - id := r.URL.Query().Get("id") - parent := r.URL.Query().Get("parent") - - if id == "" { - http.Error(w, fmt.Sprintf("missing id"), 409) - } - - size, err := driver.ApplyDiff(id, parent, diff) - if err != nil { - respond(w, err) - return - } - respond(w, &graphDriverResponse{Size: size}) - }) - - mux.HandleFunc("/GraphDriver.DiffSize", func(w http.ResponseWriter, r *http.Request) { - s.ec[ext].diffsize++ - - var req graphDriverRequest - if err := decReq(r.Body, &req, w); err != nil { - return - } - - size, err := driver.DiffSize(req.ID, req.Parent) - if err != nil { - respond(w, err) - return - } - respond(w, &graphDriverResponse{Size: size}) - }) - - err = os.MkdirAll("/etc/docker/plugins", 0755) - c.Assert(err, check.IsNil, check.Commentf("error creating /etc/docker/plugins")) - - specFile := "/etc/docker/plugins/" + name + "." + ext - err = ioutil.WriteFile(specFile, b, 0644) - c.Assert(err, check.IsNil, check.Commentf("error writing to %s", specFile)) -} - -func (s *DockerExternalGraphdriverSuite) TearDownSuite(c *check.C) { - s.server.Close() - s.jserver.Close() - - err := os.RemoveAll("/etc/docker/plugins") - c.Assert(err, check.IsNil, check.Commentf("error removing /etc/docker/plugins")) -} - -func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriver(c *check.C) { - s.testExternalGraphDriver("test-external-graph-driver", "spec", c) - s.testExternalGraphDriver("json-external-graph-driver", "json", c) -} - -func (s *DockerExternalGraphdriverSuite) testExternalGraphDriver(name string, ext string, c *check.C) { - if err := s.d.StartWithBusybox("-s", name); err != nil { - b, _ := ioutil.ReadFile(s.d.LogFileName()) - c.Assert(err, check.IsNil, check.Commentf("\n%s", string(b))) - } - - out, err := s.d.Cmd("run", "--name=graphtest", "busybox", "sh", "-c", "echo hello > /hello") - c.Assert(err, check.IsNil, check.Commentf(out)) - - err = s.d.Restart("-s", name) - - out, err = s.d.Cmd("inspect", "--format='{{.GraphDriver.Name}}'", "graphtest") - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(strings.TrimSpace(out), check.Equals, name) - - out, err = s.d.Cmd("diff", "graphtest") - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(strings.Contains(out, "A /hello"), check.Equals, true, check.Commentf("diff output: %s", out)) - - out, err = s.d.Cmd("rm", "-f", "graphtest") - c.Assert(err, check.IsNil, check.Commentf(out)) - - out, err = s.d.Cmd("info") - c.Assert(err, check.IsNil, check.Commentf(out)) - - err = s.d.Stop() - c.Assert(err, check.IsNil) - - // Don't check s.ec.exists, because the daemon no longer calls the - // Exists function. - c.Assert(s.ec[ext].activations, check.Equals, 2) - c.Assert(s.ec[ext].init, check.Equals, 2) - c.Assert(s.ec[ext].creations >= 1, check.Equals, true) - c.Assert(s.ec[ext].removals >= 1, check.Equals, true) - c.Assert(s.ec[ext].gets >= 1, check.Equals, true) - c.Assert(s.ec[ext].puts >= 1, check.Equals, true) - c.Assert(s.ec[ext].stats, check.Equals, 3) - c.Assert(s.ec[ext].cleanups, check.Equals, 2) - c.Assert(s.ec[ext].applydiff >= 1, check.Equals, true) - c.Assert(s.ec[ext].changes, check.Equals, 1) - c.Assert(s.ec[ext].diffsize, check.Equals, 0) - c.Assert(s.ec[ext].diff, check.Equals, 0) - c.Assert(s.ec[ext].metadata, check.Equals, 1) -} - -func (s *DockerExternalGraphdriverSuite) TestExternalGraphDriverPull(c *check.C) { - testRequires(c, Network) - c.Assert(s.d.Start(), check.IsNil) - - out, err := s.d.Cmd("pull", "busybox:latest") - c.Assert(err, check.IsNil, check.Commentf(out)) - - out, err = s.d.Cmd("run", "-d", "busybox", "top") - c.Assert(err, check.IsNil, check.Commentf(out)) -} diff --git a/integration-cli/docker_cli_external_volume_driver_unix_test.go b/integration-cli/docker_cli_external_volume_driver_unix_test.go deleted file mode 100644 index 726534597d..0000000000 --- a/integration-cli/docker_cli_external_volume_driver_unix_test.go +++ /dev/null @@ -1,522 +0,0 @@ -// +build !windows - -package main - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "os/exec" - "path/filepath" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/volume" - "github.com/docker/engine-api/types" - "github.com/go-check/check" -) - -func init() { - check.Suite(&DockerExternalVolumeSuite{ - ds: &DockerSuite{}, - }) -} - -type eventCounter struct { - activations int - creations int - removals int - mounts int - unmounts int - paths int - lists int - gets int - caps int -} - -type DockerExternalVolumeSuite struct { - server *httptest.Server - ds *DockerSuite - d *Daemon - ec *eventCounter -} - -func (s *DockerExternalVolumeSuite) SetUpTest(c *check.C) { - s.d = NewDaemon(c) - s.ec = &eventCounter{} -} - -func (s *DockerExternalVolumeSuite) TearDownTest(c *check.C) { - s.d.Stop() - s.ds.TearDownTest(c) -} - -func (s *DockerExternalVolumeSuite) SetUpSuite(c *check.C) { - mux := http.NewServeMux() - s.server = httptest.NewServer(mux) - - type pluginRequest struct { - Name string - Opts map[string]string - ID string - } - - type pluginResp struct { - Mountpoint string `json:",omitempty"` - Err string `json:",omitempty"` - } - - type vol struct { - Name string - Mountpoint string - Ninja bool // hack used to trigger a null volume return on `Get` - Status map[string]interface{} - } - var volList []vol - - read := func(b io.ReadCloser) (pluginRequest, error) { - defer b.Close() - var pr pluginRequest - if err := json.NewDecoder(b).Decode(&pr); err != nil { - return pr, err - } - return pr, nil - } - - send := func(w http.ResponseWriter, data interface{}) { - switch t := data.(type) { - case error: - http.Error(w, t.Error(), 500) - case string: - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - fmt.Fprintln(w, t) - default: - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - json.NewEncoder(w).Encode(&data) - } - } - - mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { - s.ec.activations++ - send(w, `{"Implements": ["VolumeDriver"]}`) - }) - - mux.HandleFunc("/VolumeDriver.Create", func(w http.ResponseWriter, r *http.Request) { - s.ec.creations++ - pr, err := read(r.Body) - if err != nil { - send(w, err) - return - } - _, isNinja := pr.Opts["ninja"] - status := map[string]interface{}{"Hello": "world"} - volList = append(volList, vol{Name: pr.Name, Ninja: isNinja, Status: status}) - send(w, nil) - }) - - mux.HandleFunc("/VolumeDriver.List", func(w http.ResponseWriter, r *http.Request) { - s.ec.lists++ - vols := []vol{} - for _, v := range volList { - if v.Ninja { - continue - } - vols = append(vols, v) - } - send(w, map[string][]vol{"Volumes": vols}) - }) - - mux.HandleFunc("/VolumeDriver.Get", func(w http.ResponseWriter, r *http.Request) { - s.ec.gets++ - pr, err := read(r.Body) - if err != nil { - send(w, err) - return - } - - for _, v := range volList { - if v.Name == pr.Name { - if v.Ninja { - send(w, map[string]vol{}) - return - } - - v.Mountpoint = hostVolumePath(pr.Name) - send(w, map[string]vol{"Volume": v}) - return - } - } - send(w, `{"Err": "no such volume"}`) - }) - - mux.HandleFunc("/VolumeDriver.Remove", func(w http.ResponseWriter, r *http.Request) { - s.ec.removals++ - pr, err := read(r.Body) - if err != nil { - send(w, err) - return - } - - for i, v := range volList { - if v.Name == pr.Name { - if err := os.RemoveAll(hostVolumePath(v.Name)); err != nil { - send(w, &pluginResp{Err: err.Error()}) - return - } - volList = append(volList[:i], volList[i+1:]...) - break - } - } - send(w, nil) - }) - - mux.HandleFunc("/VolumeDriver.Path", func(w http.ResponseWriter, r *http.Request) { - s.ec.paths++ - - pr, err := read(r.Body) - if err != nil { - send(w, err) - return - } - p := hostVolumePath(pr.Name) - send(w, &pluginResp{Mountpoint: p}) - }) - - mux.HandleFunc("/VolumeDriver.Mount", func(w http.ResponseWriter, r *http.Request) { - s.ec.mounts++ - - pr, err := read(r.Body) - if err != nil { - send(w, err) - return - } - - p := hostVolumePath(pr.Name) - if err := os.MkdirAll(p, 0755); err != nil { - send(w, &pluginResp{Err: err.Error()}) - return - } - - if err := ioutil.WriteFile(filepath.Join(p, "test"), []byte(s.server.URL), 0644); err != nil { - send(w, err) - return - } - - if err := ioutil.WriteFile(filepath.Join(p, "mountID"), []byte(pr.ID), 0644); err != nil { - send(w, err) - return - } - - send(w, &pluginResp{Mountpoint: p}) - }) - - mux.HandleFunc("/VolumeDriver.Unmount", func(w http.ResponseWriter, r *http.Request) { - s.ec.unmounts++ - - _, err := read(r.Body) - if err != nil { - send(w, err) - return - } - - send(w, nil) - }) - - mux.HandleFunc("/VolumeDriver.Capabilities", func(w http.ResponseWriter, r *http.Request) { - s.ec.caps++ - - _, err := read(r.Body) - if err != nil { - send(w, err) - return - } - - send(w, `{"Capabilities": { "Scope": "global" }}`) - }) - - err := os.MkdirAll("/etc/docker/plugins", 0755) - c.Assert(err, checker.IsNil) - - err = ioutil.WriteFile("/etc/docker/plugins/test-external-volume-driver.spec", []byte(s.server.URL), 0644) - c.Assert(err, checker.IsNil) -} - -func (s *DockerExternalVolumeSuite) TearDownSuite(c *check.C) { - s.server.Close() - - err := os.RemoveAll("/etc/docker/plugins") - c.Assert(err, checker.IsNil) -} - -func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverNamed(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) - - out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test") - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(out, checker.Contains, s.server.URL) - - _, err = s.d.Cmd("volume", "rm", "external-volume-test") - c.Assert(err, checker.IsNil) - - p := hostVolumePath("external-volume-test") - _, err = os.Lstat(p) - c.Assert(err, checker.NotNil) - c.Assert(os.IsNotExist(err), checker.True, check.Commentf("Expected volume path in host to not exist: %s, %v\n", p, err)) - - c.Assert(s.ec.activations, checker.Equals, 1) - c.Assert(s.ec.creations, checker.Equals, 1) - c.Assert(s.ec.removals, checker.Equals, 1) - c.Assert(s.ec.mounts, checker.Equals, 1) - c.Assert(s.ec.unmounts, checker.Equals, 1) -} - -func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverUnnamed(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) - - out, err := s.d.Cmd("run", "--rm", "--name", "test-data", "-v", "/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test") - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(out, checker.Contains, s.server.URL) - - c.Assert(s.ec.activations, checker.Equals, 1) - c.Assert(s.ec.creations, checker.Equals, 1) - c.Assert(s.ec.removals, checker.Equals, 1) - c.Assert(s.ec.mounts, checker.Equals, 1) - c.Assert(s.ec.unmounts, checker.Equals, 1) -} - -func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverVolumesFrom(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) - - out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", "test-external-volume-driver", "busybox:latest") - c.Assert(err, checker.IsNil, check.Commentf(out)) - - out, err = s.d.Cmd("run", "--rm", "--volumes-from", "vol-test1", "--name", "vol-test2", "busybox", "ls", "/tmp") - c.Assert(err, checker.IsNil, check.Commentf(out)) - - out, err = s.d.Cmd("rm", "-fv", "vol-test1") - c.Assert(err, checker.IsNil, check.Commentf(out)) - - c.Assert(s.ec.activations, checker.Equals, 1) - c.Assert(s.ec.creations, checker.Equals, 1) - c.Assert(s.ec.removals, checker.Equals, 1) - c.Assert(s.ec.mounts, checker.Equals, 2) - c.Assert(s.ec.unmounts, checker.Equals, 2) -} - -func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverDeleteContainer(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) - - out, err := s.d.Cmd("run", "--name", "vol-test1", "-v", "/foo", "--volume-driver", "test-external-volume-driver", "busybox:latest") - c.Assert(err, checker.IsNil, check.Commentf(out)) - - out, err = s.d.Cmd("rm", "-fv", "vol-test1") - c.Assert(err, checker.IsNil, check.Commentf(out)) - - c.Assert(s.ec.activations, checker.Equals, 1) - c.Assert(s.ec.creations, checker.Equals, 1) - c.Assert(s.ec.removals, checker.Equals, 1) - c.Assert(s.ec.mounts, checker.Equals, 1) - c.Assert(s.ec.unmounts, checker.Equals, 1) -} - -func hostVolumePath(name string) string { - return fmt.Sprintf("/var/lib/docker/volumes/%s", name) -} - -// Make sure a request to use a down driver doesn't block other requests -func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverLookupNotBlocked(c *check.C) { - specPath := "/etc/docker/plugins/down-driver.spec" - err := ioutil.WriteFile(specPath, []byte("tcp://127.0.0.7:9999"), 0644) - c.Assert(err, check.IsNil) - defer os.RemoveAll(specPath) - - chCmd1 := make(chan struct{}) - chCmd2 := make(chan error) - cmd1 := exec.Command(dockerBinary, "volume", "create", "-d", "down-driver") - cmd2 := exec.Command(dockerBinary, "volume", "create") - - c.Assert(cmd1.Start(), checker.IsNil) - defer cmd1.Process.Kill() - time.Sleep(100 * time.Millisecond) // ensure API has been called - c.Assert(cmd2.Start(), checker.IsNil) - - go func() { - cmd1.Wait() - close(chCmd1) - }() - go func() { - chCmd2 <- cmd2.Wait() - }() - - select { - case <-chCmd1: - cmd2.Process.Kill() - c.Fatalf("volume create with down driver finished unexpectedly") - case err := <-chCmd2: - c.Assert(err, checker.IsNil) - case <-time.After(5 * time.Second): - cmd2.Process.Kill() - c.Fatal("volume creates are blocked by previous create requests when previous driver is down") - } -} - -func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverRetryNotImmediatelyExists(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) - - specPath := "/etc/docker/plugins/test-external-volume-driver-retry.spec" - os.RemoveAll(specPath) - defer os.RemoveAll(specPath) - - errchan := make(chan error) - go func() { - if out, err := s.d.Cmd("run", "--rm", "--name", "test-data-retry", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver-retry", "busybox:latest"); err != nil { - errchan <- fmt.Errorf("%v:\n%s", err, out) - } - close(errchan) - }() - go func() { - // wait for a retry to occur, then create spec to allow plugin to register - time.Sleep(2000 * time.Millisecond) - // no need to check for an error here since it will get picked up by the timeout later - ioutil.WriteFile(specPath, []byte(s.server.URL), 0644) - }() - - select { - case err := <-errchan: - c.Assert(err, checker.IsNil) - case <-time.After(8 * time.Second): - c.Fatal("volume creates fail when plugin not immediately available") - } - - _, err = s.d.Cmd("volume", "rm", "external-volume-test") - c.Assert(err, checker.IsNil) - - c.Assert(s.ec.activations, checker.Equals, 1) - c.Assert(s.ec.creations, checker.Equals, 1) - c.Assert(s.ec.removals, checker.Equals, 1) - c.Assert(s.ec.mounts, checker.Equals, 1) - c.Assert(s.ec.unmounts, checker.Equals, 1) -} - -func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverBindExternalVolume(c *check.C) { - dockerCmd(c, "volume", "create", "-d", "test-external-volume-driver", "--name", "foo") - dockerCmd(c, "run", "-d", "--name", "testing", "-v", "foo:/bar", "busybox", "top") - - var mounts []struct { - Name string - Driver string - } - out := inspectFieldJSON(c, "testing", "Mounts") - c.Assert(json.NewDecoder(strings.NewReader(out)).Decode(&mounts), checker.IsNil) - c.Assert(len(mounts), checker.Equals, 1, check.Commentf(out)) - c.Assert(mounts[0].Name, checker.Equals, "foo") - c.Assert(mounts[0].Driver, checker.Equals, "test-external-volume-driver") -} - -func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverList(c *check.C) { - dockerCmd(c, "volume", "create", "-d", "test-external-volume-driver", "--name", "abc3") - out, _ := dockerCmd(c, "volume", "ls") - ls := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(ls), check.Equals, 2, check.Commentf("\n%s", out)) - - vol := strings.Fields(ls[len(ls)-1]) - c.Assert(len(vol), check.Equals, 2, check.Commentf("%v", vol)) - c.Assert(vol[0], check.Equals, "test-external-volume-driver") - c.Assert(vol[1], check.Equals, "abc3") - - c.Assert(s.ec.lists, check.Equals, 1) -} - -func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGet(c *check.C) { - out, _, err := dockerCmdWithError("volume", "inspect", "dummy") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(s.ec.gets, check.Equals, 1) - c.Assert(out, checker.Contains, "No such volume") - - dockerCmd(c, "volume", "create", "--name", "test", "-d", "test-external-volume-driver") - out, _ = dockerCmd(c, "volume", "inspect", "test") - - type vol struct { - Status map[string]string - } - var st []vol - - c.Assert(json.Unmarshal([]byte(out), &st), checker.IsNil) - c.Assert(st, checker.HasLen, 1) - c.Assert(st[0].Status, checker.HasLen, 1, check.Commentf("%v", st[0])) - c.Assert(st[0].Status["Hello"], checker.Equals, "world", check.Commentf("%v", st[0].Status)) -} - -func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverWithDaemnRestart(c *check.C) { - dockerCmd(c, "volume", "create", "-d", "test-external-volume-driver", "--name", "abc1") - err := s.d.Restart() - c.Assert(err, checker.IsNil) - - dockerCmd(c, "run", "--name=test", "-v", "abc1:/foo", "busybox", "true") - var mounts []types.MountPoint - inspectFieldAndMarshall(c, "test", "Mounts", &mounts) - c.Assert(mounts, checker.HasLen, 1) - c.Assert(mounts[0].Driver, checker.Equals, "test-external-volume-driver") -} - -// Ensures that the daemon handles when the plugin responds to a `Get` request with a null volume and a null error. -// Prior the daemon would panic in this scenario. -func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverGetEmptyResponse(c *check.C) { - dockerCmd(c, "volume", "create", "-d", "test-external-volume-driver", "--name", "abc2", "--opt", "ninja=1") - out, _, err := dockerCmdWithError("volume", "inspect", "abc2") - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "No such volume") -} - -// Ensure only cached paths are used in volume list to prevent N+1 calls to `VolumeDriver.Path` -func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverPathCalls(c *check.C) { - c.Assert(s.d.Start(), checker.IsNil) - c.Assert(s.ec.paths, checker.Equals, 0) - - out, err := s.d.Cmd("volume", "create", "--name=test", "--driver=test-external-volume-driver") - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(s.ec.paths, checker.Equals, 1) - - out, err = s.d.Cmd("volume", "ls") - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(s.ec.paths, checker.Equals, 1) - - out, err = s.d.Cmd("volume", "inspect", "--format='{{.Mountpoint}}'", "test") - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - c.Assert(s.ec.paths, checker.Equals, 1) -} - -func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverMountID(c *check.C) { - err := s.d.StartWithBusybox() - c.Assert(err, checker.IsNil) - - out, err := s.d.Cmd("run", "--rm", "-v", "external-volume-test:/tmp/external-volume-test", "--volume-driver", "test-external-volume-driver", "busybox:latest", "cat", "/tmp/external-volume-test/test") - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") -} - -// Check that VolumeDriver.Capabilities gets called, and only called once -func (s *DockerExternalVolumeSuite) TestExternalVolumeDriverCapabilities(c *check.C) { - c.Assert(s.d.Start(), checker.IsNil) - c.Assert(s.ec.caps, checker.Equals, 0) - - for i := 0; i < 3; i++ { - out, err := s.d.Cmd("volume", "create", "-d", "test-external-volume-driver", "--name", fmt.Sprintf("test%d", i)) - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(s.ec.caps, checker.Equals, 1) - out, err = s.d.Cmd("volume", "inspect", "--format={{.Scope}}", fmt.Sprintf("test%d", i)) - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Equals, volume.GlobalScope) - } -} diff --git a/integration-cli/docker_cli_health_test.go b/integration-cli/docker_cli_health_test.go deleted file mode 100644 index 28ef47dcc2..0000000000 --- a/integration-cli/docker_cli_health_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package main - -import ( - "encoding/json" - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types" - "github.com/go-check/check" - "strconv" - "strings" - "time" -) - -func waitForStatus(c *check.C, name string, prev string, expected string) { - prev = prev + "\n" - expected = expected + "\n" - for { - out, _ := dockerCmd(c, "inspect", "--format={{.State.Status}}", name) - if out == expected { - return - } - c.Check(out, checker.Equals, prev) - if out != prev { - return - } - time.Sleep(100 * time.Millisecond) - } -} - -func waitForHealthStatus(c *check.C, name string, prev string, expected string) { - prev = prev + "\n" - expected = expected + "\n" - for { - out, _ := dockerCmd(c, "inspect", "--format={{.State.Health.Status}}", name) - if out == expected { - return - } - c.Check(out, checker.Equals, prev) - if out != prev { - return - } - time.Sleep(100 * time.Millisecond) - } -} - -func getHealth(c *check.C, name string) *types.Health { - out, _ := dockerCmd(c, "inspect", "--format={{json .State.Health}}", name) - var health types.Health - err := json.Unmarshal([]byte(out), &health) - c.Check(err, checker.Equals, nil) - return &health -} - -func (s *DockerSuite) TestHealth(c *check.C) { - testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows - - imageName := "testhealth" - _, err := buildImage(imageName, - `FROM busybox - RUN echo OK > /status - CMD ["/bin/sleep", "120"] - STOPSIGNAL SIGKILL - HEALTHCHECK --interval=1s --timeout=30s \ - CMD cat /status`, - true) - - c.Check(err, check.IsNil) - - // No health status before starting - name := "test_health" - dockerCmd(c, "create", "--name", name, imageName) - out, _ := dockerCmd(c, "ps", "-a", "--format={{.Status}}") - c.Check(out, checker.Equals, "Created\n") - - // Inspect the options - out, _ = dockerCmd(c, "inspect", - "--format='timeout={{.Config.Healthcheck.Timeout}} "+ - "interval={{.Config.Healthcheck.Interval}} "+ - "retries={{.Config.Healthcheck.Retries}} "+ - "test={{.Config.Healthcheck.Test}}'", name) - c.Check(out, checker.Equals, "timeout=30s interval=1s retries=0 test=[CMD-SHELL cat /status]\n") - - // Start - dockerCmd(c, "start", name) - waitForHealthStatus(c, name, "starting", "healthy") - - // Make it fail - dockerCmd(c, "exec", name, "rm", "/status") - waitForHealthStatus(c, name, "healthy", "unhealthy") - - // Inspect the status - out, _ = dockerCmd(c, "inspect", "--format={{.State.Health.Status}}", name) - c.Check(out, checker.Equals, "unhealthy\n") - - // Make it healthy again - dockerCmd(c, "exec", name, "touch", "/status") - waitForHealthStatus(c, name, "unhealthy", "healthy") - - // Remove container - dockerCmd(c, "rm", "-f", name) - - // Disable the check from the CLI - out, _ = dockerCmd(c, "create", "--name=noh", "--no-healthcheck", imageName) - out, _ = dockerCmd(c, "inspect", "--format={{.Config.Healthcheck.Test}}", "noh") - c.Check(out, checker.Equals, "[NONE]\n") - dockerCmd(c, "rm", "noh") - - // Disable the check with a new build - _, err = buildImage("no_healthcheck", - `FROM testhealth - HEALTHCHECK NONE`, true) - c.Check(err, check.IsNil) - - out, _ = dockerCmd(c, "inspect", "--format={{.ContainerConfig.Healthcheck.Test}}", "no_healthcheck") - c.Check(out, checker.Equals, "[NONE]\n") - - // Enable the checks from the CLI - _, _ = dockerCmd(c, "run", "-d", "--name=fatal_healthcheck", - "--health-interval=0.5s", - "--health-retries=3", - "--health-cmd=cat /status", - "no_healthcheck") - waitForHealthStatus(c, "fatal_healthcheck", "starting", "healthy") - health := getHealth(c, "fatal_healthcheck") - c.Check(health.Status, checker.Equals, "healthy") - c.Check(health.FailingStreak, checker.Equals, 0) - last := health.Log[len(health.Log)-1] - c.Check(last.ExitCode, checker.Equals, 0) - c.Check(last.Output, checker.Equals, "OK\n") - - // Fail the check - dockerCmd(c, "exec", "fatal_healthcheck", "rm", "/status") - waitForHealthStatus(c, "fatal_healthcheck", "healthy", "unhealthy") - - failsStr, _ := dockerCmd(c, "inspect", "--format={{.State.Health.FailingStreak}}", "fatal_healthcheck") - fails, err := strconv.Atoi(strings.TrimSpace(failsStr)) - c.Check(err, check.IsNil) - c.Check(fails >= 3, checker.Equals, true) - dockerCmd(c, "rm", "-f", "fatal_healthcheck") - - // Check timeout - // Note: if the interval is too small, it seems that Docker spends all its time running health - // checks and never gets around to killing it. - _, _ = dockerCmd(c, "run", "-d", "--name=test", - "--health-interval=1s", "--health-cmd=sleep 5m", "--health-timeout=1ms", imageName) - waitForHealthStatus(c, "test", "starting", "unhealthy") - health = getHealth(c, "test") - last = health.Log[len(health.Log)-1] - c.Check(health.Status, checker.Equals, "unhealthy") - c.Check(last.ExitCode, checker.Equals, -1) - c.Check(last.Output, checker.Equals, "Health check exceeded timeout (1ms)") - dockerCmd(c, "rm", "-f", "test") - - // Check JSON-format - _, err = buildImage(imageName, - `FROM busybox - RUN echo OK > /status - CMD ["/bin/sleep", "120"] - STOPSIGNAL SIGKILL - HEALTHCHECK --interval=1s --timeout=30s \ - CMD ["cat", "/my status"]`, - true) - c.Check(err, check.IsNil) - out, _ = dockerCmd(c, "inspect", - "--format={{.Config.Healthcheck.Test}}", imageName) - c.Check(out, checker.Equals, "[CMD cat /my status]\n") - -} diff --git a/integration-cli/docker_cli_help_test.go b/integration-cli/docker_cli_help_test.go deleted file mode 100644 index a4dca18ef0..0000000000 --- a/integration-cli/docker_cli_help_test.go +++ /dev/null @@ -1,339 +0,0 @@ -package main - -import ( - "fmt" - "os/exec" - "runtime" - "strings" - "unicode" - - "github.com/docker/docker/pkg/homedir" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestHelpTextVerify(c *check.C) { - testRequires(c, DaemonIsLinux) - - // Make sure main help text fits within 80 chars and that - // on non-windows system we use ~ when possible (to shorten things). - // Test for HOME set to its default value and set to "/" on linux - // Yes on windows setting up an array and looping (right now) isn't - // necessary because we just have one value, but we'll need the - // array/loop on linux so we might as well set it up so that we can - // test any number of home dirs later on and all we need to do is - // modify the array - the rest of the testing infrastructure should work - homes := []string{homedir.Get()} - - // Non-Windows machines need to test for this special case of $HOME - if runtime.GOOS != "windows" { - homes = append(homes, "/") - } - - homeKey := homedir.Key() - baseEnvs := appendBaseEnv(true) - - // Remove HOME env var from list so we can add a new value later. - for i, env := range baseEnvs { - if strings.HasPrefix(env, homeKey+"=") { - baseEnvs = append(baseEnvs[:i], baseEnvs[i+1:]...) - break - } - } - - for _, home := range homes { - - // Dup baseEnvs and add our new HOME value - newEnvs := make([]string, len(baseEnvs)+1) - copy(newEnvs, baseEnvs) - newEnvs[len(newEnvs)-1] = homeKey + "=" + home - - scanForHome := runtime.GOOS != "windows" && home != "/" - - // Check main help text to make sure its not over 80 chars - helpCmd := exec.Command(dockerBinary, "help") - helpCmd.Env = newEnvs - out, _, err := runCommandWithOutput(helpCmd) - c.Assert(err, checker.IsNil, check.Commentf(out)) - lines := strings.Split(out, "\n") - foundTooLongLine := false - for _, line := range lines { - if !foundTooLongLine && len(line) > 80 { - c.Logf("Line is too long:\n%s", line) - foundTooLongLine = true - } - // All lines should not end with a space - c.Assert(line, checker.Not(checker.HasSuffix), " ", check.Commentf("Line should not end with a space")) - - if scanForHome && strings.Contains(line, `=`+home) { - c.Fatalf("Line should use '%q' instead of %q:\n%s", homedir.GetShortcutString(), home, line) - } - if runtime.GOOS != "windows" { - i := strings.Index(line, homedir.GetShortcutString()) - if i >= 0 && i != len(line)-1 && line[i+1] != '/' { - c.Fatalf("Main help should not have used home shortcut:\n%s", line) - } - } - } - - // Make sure each cmd's help text fits within 90 chars and that - // on non-windows system we use ~ when possible (to shorten things). - // Pull the list of commands from the "Commands:" section of docker help - helpCmd = exec.Command(dockerBinary, "help") - helpCmd.Env = newEnvs - out, _, err = runCommandWithOutput(helpCmd) - c.Assert(err, checker.IsNil, check.Commentf(out)) - i := strings.Index(out, "Commands:") - c.Assert(i, checker.GreaterOrEqualThan, 0, check.Commentf("Missing 'Commands:' in:\n%s", out)) - - cmds := []string{} - // Grab all chars starting at "Commands:" - helpOut := strings.Split(out[i:], "\n") - // First line is just "Commands:" - if isLocalDaemon { - // Replace first line with "daemon" command since it's not part of the list of commands. - helpOut[0] = " daemon" - } else { - // Skip first line - helpOut = helpOut[1:] - } - - // Create the list of commands we want to test - cmdsToTest := []string{} - for _, cmd := range helpOut { - // Stop on blank line or non-idented line - if cmd == "" || !unicode.IsSpace(rune(cmd[0])) { - break - } - - // Grab just the first word of each line - cmd = strings.Split(strings.TrimSpace(cmd), " ")[0] - cmds = append(cmds, cmd) // Saving count for later - - cmdsToTest = append(cmdsToTest, cmd) - } - - // Add some 'two word' commands - would be nice to automatically - // calculate this list - somehow - cmdsToTest = append(cmdsToTest, "volume create") - cmdsToTest = append(cmdsToTest, "volume inspect") - cmdsToTest = append(cmdsToTest, "volume ls") - cmdsToTest = append(cmdsToTest, "volume rm") - cmdsToTest = append(cmdsToTest, "network connect") - cmdsToTest = append(cmdsToTest, "network create") - cmdsToTest = append(cmdsToTest, "network disconnect") - cmdsToTest = append(cmdsToTest, "network inspect") - cmdsToTest = append(cmdsToTest, "network ls") - cmdsToTest = append(cmdsToTest, "network rm") - - // Divide the list of commands into go routines and run the func testcommand on the commands in parallel - // to save runtime of test - - errChan := make(chan error) - - for index := 0; index < len(cmdsToTest); index++ { - go func(index int) { - errChan <- testCommand(cmdsToTest[index], newEnvs, scanForHome, home) - }(index) - } - - for index := 0; index < len(cmdsToTest); index++ { - err := <-errChan - if err != nil { - c.Fatal(err) - } - } - } -} - -func (s *DockerSuite) TestHelpExitCodesHelpOutput(c *check.C) { - testRequires(c, DaemonIsLinux) - // Test to make sure the exit code and output (stdout vs stderr) of - // various good and bad cases are what we expect - - // docker : stdout=all, stderr=empty, rc=0 - out, _, err := dockerCmdWithError() - c.Assert(err, checker.IsNil, check.Commentf(out)) - // Be really pick - c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker'\n")) - - // docker help: stdout=all, stderr=empty, rc=0 - out, _, err = dockerCmdWithError("help") - c.Assert(err, checker.IsNil, check.Commentf(out)) - // Be really pick - c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker help'\n")) - - // docker --help: stdout=all, stderr=empty, rc=0 - out, _, err = dockerCmdWithError("--help") - c.Assert(err, checker.IsNil, check.Commentf(out)) - // Be really pick - c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker --help'\n")) - - // docker inspect busybox: stdout=all, stderr=empty, rc=0 - // Just making sure stderr is empty on valid cmd - out, _, err = dockerCmdWithError("inspect", "busybox") - c.Assert(err, checker.IsNil, check.Commentf(out)) - // Be really pick - c.Assert(out, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker inspect busyBox'\n")) - - // docker rm: stdout=empty, stderr=all, rc!=0 - // testing the min arg error msg - cmd := exec.Command(dockerBinary, "rm") - stdout, stderr, _, err := runCommandWithStdoutStderr(cmd) - c.Assert(err, checker.NotNil) - c.Assert(stdout, checker.Equals, "") - // Should not contain full help text but should contain info about - // # of args and Usage line - c.Assert(stderr, checker.Contains, "requires at least 1 argument", check.Commentf("Missing # of args text from 'docker rm'\n")) - - // docker rm NoSuchContainer: stdout=empty, stderr=all, rc=0 - // testing to make sure no blank line on error - cmd = exec.Command(dockerBinary, "rm", "NoSuchContainer") - stdout, stderr, _, err = runCommandWithStdoutStderr(cmd) - c.Assert(err, checker.NotNil) - c.Assert(len(stderr), checker.Not(checker.Equals), 0) - c.Assert(stdout, checker.Equals, "") - // Be really picky - c.Assert(stderr, checker.Not(checker.HasSuffix), "\n\n", check.Commentf("Should not have a blank line at the end of 'docker rm'\n")) - - // docker BadCmd: stdout=empty, stderr=all, rc=0 - cmd = exec.Command(dockerBinary, "BadCmd") - stdout, stderr, _, err = runCommandWithStdoutStderr(cmd) - c.Assert(err, checker.NotNil) - c.Assert(stdout, checker.Equals, "") - c.Assert(stderr, checker.Equals, "docker: 'BadCmd' is not a docker command.\nSee 'docker --help'.\n", check.Commentf("Unexcepted output for 'docker badCmd'\n")) -} - -func testCommand(cmd string, newEnvs []string, scanForHome bool, home string) error { - - args := strings.Split(cmd+" --help", " ") - - // Check the full usage text - helpCmd := exec.Command(dockerBinary, args...) - helpCmd.Env = newEnvs - out, stderr, _, err := runCommandWithStdoutStderr(helpCmd) - if len(stderr) != 0 { - return fmt.Errorf("Error on %q help. non-empty stderr:%q\n", cmd, stderr) - } - if strings.HasSuffix(out, "\n\n") { - return fmt.Errorf("Should not have blank line on %q\n", cmd) - } - if !strings.Contains(out, "--help") { - return fmt.Errorf("All commands should mention '--help'. Command '%v' did not.\n", cmd) - } - - if err != nil { - return fmt.Errorf(out) - } - - // Check each line for lots of stuff - lines := strings.Split(out, "\n") - for _, line := range lines { - if len(line) > 107 { - return fmt.Errorf("Help for %q is too long:\n%s\n", cmd, line) - } - - if scanForHome && strings.Contains(line, `"`+home) { - return fmt.Errorf("Help for %q should use ~ instead of %q on:\n%s\n", - cmd, home, line) - } - i := strings.Index(line, "~") - if i >= 0 && i != len(line)-1 && line[i+1] != '/' { - return fmt.Errorf("Help for %q should not have used ~:\n%s", cmd, line) - } - - // If a line starts with 4 spaces then assume someone - // added a multi-line description for an option and we need - // to flag it - if strings.HasPrefix(line, " ") && - !strings.HasPrefix(strings.TrimLeft(line, " "), "--") { - return fmt.Errorf("Help for %q should not have a multi-line option", cmd) - } - - // Options should NOT end with a period - if strings.HasPrefix(line, " -") && strings.HasSuffix(line, ".") { - return fmt.Errorf("Help for %q should not end with a period: %s", cmd, line) - } - - // Options should NOT end with a space - if strings.HasSuffix(line, " ") { - return fmt.Errorf("Help for %q should not end with a space: %s", cmd, line) - } - - } - - // For each command make sure we generate an error - // if we give a bad arg - args = strings.Split(cmd+" --badArg", " ") - - out, _, err = dockerCmdWithError(args...) - if err == nil { - return fmt.Errorf(out) - } - - // Be really picky - if strings.HasSuffix(stderr, "\n\n") { - return fmt.Errorf("Should not have a blank line at the end of 'docker rm'\n") - } - - // Now make sure that each command will print a short-usage - // (not a full usage - meaning no opts section) if we - // are missing a required arg or pass in a bad arg - - // These commands will never print a short-usage so don't test - noShortUsage := map[string]string{ - "images": "", - "login": "", - "logout": "", - "network": "", - "stats": "", - } - - if _, ok := noShortUsage[cmd]; !ok { - // For each command run it w/o any args. It will either return - // valid output or print a short-usage - var dCmd *exec.Cmd - - // skipNoArgs are ones that we don't want to try w/o - // any args. Either because it'll hang the test or - // lead to incorrect test result (like false negative). - // Whatever the reason, skip trying to run w/o args and - // jump to trying with a bogus arg. - skipNoArgs := map[string]struct{}{ - "daemon": {}, - "events": {}, - "load": {}, - } - - ec := 0 - if _, ok := skipNoArgs[cmd]; !ok { - args = strings.Split(cmd, " ") - dCmd = exec.Command(dockerBinary, args...) - out, stderr, ec, err = runCommandWithStdoutStderr(dCmd) - } - - // If its ok w/o any args then try again with an arg - if ec == 0 { - args = strings.Split(cmd+" badArg", " ") - dCmd = exec.Command(dockerBinary, args...) - out, stderr, ec, err = runCommandWithStdoutStderr(dCmd) - } - - if len(out) != 0 || len(stderr) == 0 || ec == 0 || err == nil { - return fmt.Errorf("Bad output from %q\nstdout:%q\nstderr:%q\nec:%d\nerr:%q\n", args, out, stderr, ec, err) - } - // Should have just short usage - if !strings.Contains(stderr, "\nUsage:") { - return fmt.Errorf("Missing short usage on %q\n:%#v", args, stderr) - } - // But shouldn't have full usage - if strings.Contains(stderr, "--help=false") { - return fmt.Errorf("Should not have full usage on %q\n", args) - } - if strings.HasSuffix(stderr, "\n\n") { - return fmt.Errorf("Should not have a blank line on %q\n%v", args, stderr) - } - } - - return nil -} diff --git a/integration-cli/docker_cli_history_test.go b/integration-cli/docker_cli_history_test.go deleted file mode 100644 index 6ad1a7e49d..0000000000 --- a/integration-cli/docker_cli_history_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package main - -import ( - "fmt" - "regexp" - "strconv" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// This is a heisen-test. Because the created timestamp of images and the behavior of -// sort is not predictable it doesn't always fail. -func (s *DockerSuite) TestBuildHistory(c *check.C) { - testRequires(c, DaemonIsLinux) // TODO Windows: This test passes on Windows, - // but currently adds a disproportionate amount of time for the value it has. - // Removing it from Windows CI for now, but this will be revisited in the - // TP5 timeframe when perf is better. - name := "testbuildhistory" - _, err := buildImage(name, `FROM `+minimalBaseImage()+` -LABEL label.A="A" -LABEL label.B="B" -LABEL label.C="C" -LABEL label.D="D" -LABEL label.E="E" -LABEL label.F="F" -LABEL label.G="G" -LABEL label.H="H" -LABEL label.I="I" -LABEL label.J="J" -LABEL label.K="K" -LABEL label.L="L" -LABEL label.M="M" -LABEL label.N="N" -LABEL label.O="O" -LABEL label.P="P" -LABEL label.Q="Q" -LABEL label.R="R" -LABEL label.S="S" -LABEL label.T="T" -LABEL label.U="U" -LABEL label.V="V" -LABEL label.W="W" -LABEL label.X="X" -LABEL label.Y="Y" -LABEL label.Z="Z"`, - true) - - c.Assert(err, checker.IsNil) - - out, _ := dockerCmd(c, "history", "testbuildhistory") - actualValues := strings.Split(out, "\n")[1:27] - expectedValues := [26]string{"Z", "Y", "X", "W", "V", "U", "T", "S", "R", "Q", "P", "O", "N", "M", "L", "K", "J", "I", "H", "G", "F", "E", "D", "C", "B", "A"} - - for i := 0; i < 26; i++ { - echoValue := fmt.Sprintf("LABEL label.%s=%s", expectedValues[i], expectedValues[i]) - actualValue := actualValues[i] - c.Assert(actualValue, checker.Contains, echoValue) - } - -} - -func (s *DockerSuite) TestHistoryExistentImage(c *check.C) { - dockerCmd(c, "history", "busybox") -} - -func (s *DockerSuite) TestHistoryNonExistentImage(c *check.C) { - _, _, err := dockerCmdWithError("history", "testHistoryNonExistentImage") - c.Assert(err, checker.NotNil, check.Commentf("history on a non-existent image should fail.")) -} - -func (s *DockerSuite) TestHistoryImageWithComment(c *check.C) { - name := "testhistoryimagewithcomment" - - // make an image through docker commit [ -m messages ] - - dockerCmd(c, "run", "--name", name, "busybox", "true") - dockerCmd(c, "wait", name) - - comment := "This_is_a_comment" - dockerCmd(c, "commit", "-m="+comment, name, name) - - // test docker history to check comment messages - - out, _ := dockerCmd(c, "history", name) - outputTabs := strings.Fields(strings.Split(out, "\n")[1]) - actualValue := outputTabs[len(outputTabs)-1] - c.Assert(actualValue, checker.Contains, comment) -} - -func (s *DockerSuite) TestHistoryHumanOptionFalse(c *check.C) { - out, _ := dockerCmd(c, "history", "--human=false", "busybox") - lines := strings.Split(out, "\n") - sizeColumnRegex, _ := regexp.Compile("SIZE +") - indices := sizeColumnRegex.FindStringIndex(lines[0]) - startIndex := indices[0] - endIndex := indices[1] - for i := 1; i < len(lines)-1; i++ { - if endIndex > len(lines[i]) { - endIndex = len(lines[i]) - } - sizeString := lines[i][startIndex:endIndex] - - _, err := strconv.Atoi(strings.TrimSpace(sizeString)) - c.Assert(err, checker.IsNil, check.Commentf("The size '%s' was not an Integer", sizeString)) - } -} - -func (s *DockerSuite) TestHistoryHumanOptionTrue(c *check.C) { - out, _ := dockerCmd(c, "history", "--human=true", "busybox") - lines := strings.Split(out, "\n") - sizeColumnRegex, _ := regexp.Compile("SIZE +") - humanSizeRegexRaw := "\\d+.*B" // Matches human sizes like 10 MB, 3.2 KB, etc - indices := sizeColumnRegex.FindStringIndex(lines[0]) - startIndex := indices[0] - endIndex := indices[1] - for i := 1; i < len(lines)-1; i++ { - if endIndex > len(lines[i]) { - endIndex = len(lines[i]) - } - sizeString := lines[i][startIndex:endIndex] - c.Assert(strings.TrimSpace(sizeString), checker.Matches, humanSizeRegexRaw, check.Commentf("The size '%s' was not in human format", sizeString)) - } -} diff --git a/integration-cli/docker_cli_images_test.go b/integration-cli/docker_cli_images_test.go deleted file mode 100644 index 904f380fd3..0000000000 --- a/integration-cli/docker_cli_images_test.go +++ /dev/null @@ -1,359 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "sort" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/stringid" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestImagesEnsureImageIsListed(c *check.C) { - testRequires(c, DaemonIsLinux) - imagesOut, _ := dockerCmd(c, "images") - c.Assert(imagesOut, checker.Contains, "busybox") -} - -func (s *DockerSuite) TestImagesEnsureImageWithTagIsListed(c *check.C) { - testRequires(c, DaemonIsLinux) - - name := "imagewithtag" - dockerCmd(c, "tag", "busybox", name+":v1") - dockerCmd(c, "tag", "busybox", name+":v1v1") - dockerCmd(c, "tag", "busybox", name+":v2") - - imagesOut, _ := dockerCmd(c, "images", name+":v1") - c.Assert(imagesOut, checker.Contains, name) - c.Assert(imagesOut, checker.Contains, "v1") - c.Assert(imagesOut, checker.Not(checker.Contains), "v2") - c.Assert(imagesOut, checker.Not(checker.Contains), "v1v1") - - imagesOut, _ = dockerCmd(c, "images", name) - c.Assert(imagesOut, checker.Contains, name) - c.Assert(imagesOut, checker.Contains, "v1") - c.Assert(imagesOut, checker.Contains, "v1v1") - c.Assert(imagesOut, checker.Contains, "v2") -} - -func (s *DockerSuite) TestImagesEnsureImageWithBadTagIsNotListed(c *check.C) { - imagesOut, _ := dockerCmd(c, "images", "busybox:nonexistent") - c.Assert(imagesOut, checker.Not(checker.Contains), "busybox") -} - -func (s *DockerSuite) TestImagesOrderedByCreationDate(c *check.C) { - testRequires(c, DaemonIsLinux) - id1, err := buildImage("order:test_a", - `FROM scratch - MAINTAINER dockerio1`, true) - c.Assert(err, checker.IsNil) - time.Sleep(1 * time.Second) - id2, err := buildImage("order:test_c", - `FROM scratch - MAINTAINER dockerio2`, true) - c.Assert(err, checker.IsNil) - time.Sleep(1 * time.Second) - id3, err := buildImage("order:test_b", - `FROM scratch - MAINTAINER dockerio3`, true) - c.Assert(err, checker.IsNil) - - out, _ := dockerCmd(c, "images", "-q", "--no-trunc") - imgs := strings.Split(out, "\n") - c.Assert(imgs[0], checker.Equals, id3, check.Commentf("First image must be %s, got %s", id3, imgs[0])) - c.Assert(imgs[1], checker.Equals, id2, check.Commentf("First image must be %s, got %s", id2, imgs[1])) - c.Assert(imgs[2], checker.Equals, id1, check.Commentf("First image must be %s, got %s", id1, imgs[2])) -} - -func (s *DockerSuite) TestImagesErrorWithInvalidFilterNameTest(c *check.C) { - out, _, err := dockerCmdWithError("images", "-f", "FOO=123") - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "Invalid filter") -} - -func (s *DockerSuite) TestImagesFilterLabelMatch(c *check.C) { - testRequires(c, DaemonIsLinux) - imageName1 := "images_filter_test1" - imageName2 := "images_filter_test2" - imageName3 := "images_filter_test3" - image1ID, err := buildImage(imageName1, - `FROM scratch - LABEL match me`, true) - c.Assert(err, check.IsNil) - - image2ID, err := buildImage(imageName2, - `FROM scratch - LABEL match="me too"`, true) - c.Assert(err, check.IsNil) - - image3ID, err := buildImage(imageName3, - `FROM scratch - LABEL nomatch me`, true) - c.Assert(err, check.IsNil) - - out, _ := dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match") - out = strings.TrimSpace(out) - c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image1ID)) - c.Assert(out, check.Matches, fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image2ID)) - c.Assert(out, check.Not(check.Matches), fmt.Sprintf("[\\s\\w:]*%s[\\s\\w:]*", image3ID)) - - out, _ = dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=match=me too") - out = strings.TrimSpace(out) - c.Assert(out, check.Equals, image2ID) -} - -// Regression : #15659 -func (s *DockerSuite) TestImagesFilterLabelWithCommit(c *check.C) { - // Create a container - dockerCmd(c, "run", "--name", "bar", "busybox", "/bin/sh") - // Commit with labels "using changes" - out, _ := dockerCmd(c, "commit", "-c", "LABEL foo.version=1.0.0-1", "-c", "LABEL foo.name=bar", "-c", "LABEL foo.author=starlord", "bar", "bar:1.0.0-1") - imageID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "images", "--no-trunc", "-q", "-f", "label=foo.version=1.0.0-1") - out = strings.TrimSpace(out) - c.Assert(out, check.Equals, imageID) -} - -func (s *DockerSuite) TestImagesFilterSinceAndBefore(c *check.C) { - imageID1, err := buildImage("image:1", `FROM `+minimalBaseImage()+` -LABEL number=1`, true) - c.Assert(err, checker.IsNil) - imageID2, err := buildImage("image:2", `FROM `+minimalBaseImage()+` -LABEL number=2`, true) - c.Assert(err, checker.IsNil) - imageID3, err := buildImage("image:3", `FROM `+minimalBaseImage()+` -LABEL number=3`, true) - c.Assert(err, checker.IsNil) - - expected := []string{imageID3, imageID2} - - out, _ := dockerCmd(c, "images", "-f", "since=image:1", "image") - c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) - - out, _ = dockerCmd(c, "images", "-f", "since="+imageID1, "image") - c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) - - expected = []string{imageID3} - - out, _ = dockerCmd(c, "images", "-f", "since=image:2", "image") - c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) - - out, _ = dockerCmd(c, "images", "-f", "since="+imageID2, "image") - c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Image list is not in the correct order: %v\n%s", expected, out)) - - expected = []string{imageID2, imageID1} - - out, _ = dockerCmd(c, "images", "-f", "before=image:3", "image") - c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) - - out, _ = dockerCmd(c, "images", "-f", "before="+imageID3, "image") - c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) - - expected = []string{imageID1} - - out, _ = dockerCmd(c, "images", "-f", "before=image:2", "image") - c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) - - out, _ = dockerCmd(c, "images", "-f", "before="+imageID2, "image") - c.Assert(assertImageList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Image list is not in the correct order: %v\n%s", expected, out)) -} - -func assertImageList(out string, expected []string) bool { - lines := strings.Split(strings.Trim(out, "\n "), "\n") - - if len(lines)-1 != len(expected) { - return false - } - - imageIDIndex := strings.Index(lines[0], "IMAGE ID") - for i := 0; i < len(expected); i++ { - imageID := lines[i+1][imageIDIndex : imageIDIndex+12] - found := false - for _, e := range expected { - if imageID == e[7:19] { - found = true - break - } - } - if !found { - return false - } - } - - return true -} - -func (s *DockerSuite) TestImagesFilterSpaceTrimCase(c *check.C) { - testRequires(c, DaemonIsLinux) - imageName := "images_filter_test" - buildImage(imageName, - `FROM scratch - RUN touch /test/foo - RUN touch /test/bar - RUN touch /test/baz`, true) - - filters := []string{ - "dangling=true", - "Dangling=true", - " dangling=true", - "dangling=true ", - "dangling = true", - } - - imageListings := make([][]string, 5, 5) - for idx, filter := range filters { - out, _ := dockerCmd(c, "images", "-q", "-f", filter) - listing := strings.Split(out, "\n") - sort.Strings(listing) - imageListings[idx] = listing - } - - for idx, listing := range imageListings { - if idx < 4 && !reflect.DeepEqual(listing, imageListings[idx+1]) { - for idx, errListing := range imageListings { - fmt.Printf("out %d", idx) - for _, image := range errListing { - fmt.Print(image) - } - fmt.Print("") - } - c.Fatalf("All output must be the same") - } - } -} - -func (s *DockerSuite) TestImagesEnsureDanglingImageOnlyListedOnce(c *check.C) { - testRequires(c, DaemonIsLinux) - // create container 1 - out, _ := dockerCmd(c, "run", "-d", "busybox", "true") - containerID1 := strings.TrimSpace(out) - - // tag as foobox - out, _ = dockerCmd(c, "commit", containerID1, "foobox") - imageID := stringid.TruncateID(strings.TrimSpace(out)) - - // overwrite the tag, making the previous image dangling - dockerCmd(c, "tag", "busybox", "foobox") - - out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=true") - // Expect one dangling image - c.Assert(strings.Count(out, imageID), checker.Equals, 1) - - out, _ = dockerCmd(c, "images", "-q", "-f", "dangling=false") - //dangling=false would not include dangling images - c.Assert(out, checker.Not(checker.Contains), imageID) - - out, _ = dockerCmd(c, "images") - //docker images still include dangling images - c.Assert(out, checker.Contains, imageID) - -} - -func (s *DockerSuite) TestImagesWithIncorrectFilter(c *check.C) { - out, _, err := dockerCmdWithError("images", "-f", "dangling=invalid") - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, "Invalid filter") -} - -func (s *DockerSuite) TestImagesEnsureOnlyHeadsImagesShown(c *check.C) { - testRequires(c, DaemonIsLinux) - - dockerfile := ` - FROM scratch - MAINTAINER docker - ENV foo bar` - - head, out, err := buildImageWithOut("scratch-image", dockerfile, false) - c.Assert(err, check.IsNil) - - // this is just the output of docker build - // we're interested in getting the image id of the MAINTAINER instruction - // and that's located at output, line 5, from 7 to end - split := strings.Split(out, "\n") - intermediate := strings.TrimSpace(split[5][7:]) - - out, _ = dockerCmd(c, "images") - // images shouldn't show non-heads images - c.Assert(out, checker.Not(checker.Contains), intermediate) - // images should contain final built images - c.Assert(out, checker.Contains, stringid.TruncateID(head)) -} - -func (s *DockerSuite) TestImagesEnsureImagesFromScratchShown(c *check.C) { - testRequires(c, DaemonIsLinux) - - dockerfile := ` - FROM scratch - MAINTAINER docker` - - id, _, err := buildImageWithOut("scratch-image", dockerfile, false) - c.Assert(err, check.IsNil) - - out, _ := dockerCmd(c, "images") - // images should contain images built from scratch - c.Assert(out, checker.Contains, stringid.TruncateID(id)) -} - -// #18181 -func (s *DockerSuite) TestImagesFilterNameWithPort(c *check.C) { - tag := "a.b.c.d:5000/hello" - dockerCmd(c, "tag", "busybox", tag) - out, _ := dockerCmd(c, "images", tag) - c.Assert(out, checker.Contains, tag) - - out, _ = dockerCmd(c, "images", tag+":latest") - c.Assert(out, checker.Contains, tag) - - out, _ = dockerCmd(c, "images", tag+":no-such-tag") - c.Assert(out, checker.Not(checker.Contains), tag) -} - -func (s *DockerSuite) TestImagesFormat(c *check.C) { - // testRequires(c, DaemonIsLinux) - tag := "myimage" - dockerCmd(c, "tag", "busybox", tag+":v1") - dockerCmd(c, "tag", "busybox", tag+":v2") - - out, _ := dockerCmd(c, "images", "--format", "{{.Repository}}", tag) - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - - expected := []string{"myimage", "myimage"} - var names []string - for _, l := range lines { - names = append(names, l) - } - c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with truncated names: %v, got: %v", expected, names)) -} - -// ImagesDefaultFormatAndQuiet -func (s *DockerSuite) TestImagesFormatDefaultFormat(c *check.C) { - testRequires(c, DaemonIsLinux) - - // create container 1 - out, _ := dockerCmd(c, "run", "-d", "busybox", "true") - containerID1 := strings.TrimSpace(out) - - // tag as foobox - out, _ = dockerCmd(c, "commit", containerID1, "myimage") - imageID := stringid.TruncateID(strings.TrimSpace(out)) - - config := `{ - "imagesFormat": "{{ .ID }} default" -}` - d, err := ioutil.TempDir("", "integration-cli-") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(d) - - err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) - c.Assert(err, checker.IsNil) - - out, _ = dockerCmd(c, "--config", d, "images", "-q", "myimage") - c.Assert(out, checker.Equals, imageID+"\n", check.Commentf("Expected to print only the image id, got %v\n", out)) -} diff --git a/integration-cli/docker_cli_import_test.go b/integration-cli/docker_cli_import_test.go deleted file mode 100644 index dc07fb40ee..0000000000 --- a/integration-cli/docker_cli_import_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package main - -import ( - "bufio" - "compress/gzip" - "io/ioutil" - "os" - "os/exec" - "regexp" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestImportDisplay(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "true") - cleanedContainerID := strings.TrimSpace(out) - - out, _, err := runCommandPipelineWithOutput( - exec.Command(dockerBinary, "export", cleanedContainerID), - exec.Command(dockerBinary, "import", "-"), - ) - c.Assert(err, checker.IsNil) - - c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) - - image := strings.TrimSpace(out) - out, _ = dockerCmd(c, "run", "--rm", image, "true") - c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) -} - -func (s *DockerSuite) TestImportBadURL(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _, err := dockerCmdWithError("import", "http://nourl/bad") - c.Assert(err, checker.NotNil, check.Commentf("import was supposed to fail but didn't")) - // Depending on your system you can get either of these errors - if !strings.Contains(out, "dial tcp") && - !strings.Contains(out, "Error processing tar file") { - c.Fatalf("expected an error msg but didn't get one.\nErr: %v\nOut: %v", err, out) - } -} - -func (s *DockerSuite) TestImportFile(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "--name", "test-import", "busybox", "true") - - temporaryFile, err := ioutil.TempFile("", "exportImportTest") - c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) - defer os.Remove(temporaryFile.Name()) - - runCmd := exec.Command(dockerBinary, "export", "test-import") - runCmd.Stdout = bufio.NewWriter(temporaryFile) - - _, err = runCommand(runCmd) - c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) - - out, _ := dockerCmd(c, "import", temporaryFile.Name()) - c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) - image := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "run", "--rm", image, "true") - c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) -} - -func (s *DockerSuite) TestImportGzipped(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "--name", "test-import", "busybox", "true") - - temporaryFile, err := ioutil.TempFile("", "exportImportTest") - c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) - defer os.Remove(temporaryFile.Name()) - - runCmd := exec.Command(dockerBinary, "export", "test-import") - w := gzip.NewWriter(temporaryFile) - runCmd.Stdout = w - - _, err = runCommand(runCmd) - c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) - err = w.Close() - c.Assert(err, checker.IsNil, check.Commentf("failed to close gzip writer")) - temporaryFile.Close() - out, _ := dockerCmd(c, "import", temporaryFile.Name()) - c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) - image := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "run", "--rm", image, "true") - c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing.")) -} - -func (s *DockerSuite) TestImportFileWithMessage(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "--name", "test-import", "busybox", "true") - - temporaryFile, err := ioutil.TempFile("", "exportImportTest") - c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary file")) - defer os.Remove(temporaryFile.Name()) - - runCmd := exec.Command(dockerBinary, "export", "test-import") - runCmd.Stdout = bufio.NewWriter(temporaryFile) - - _, err = runCommand(runCmd) - c.Assert(err, checker.IsNil, check.Commentf("failed to export a container")) - - message := "Testing commit message" - out, _ := dockerCmd(c, "import", "-m", message, temporaryFile.Name()) - c.Assert(out, checker.Count, "\n", 1, check.Commentf("display is expected 1 '\\n' but didn't")) - image := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "history", image) - split := strings.Split(out, "\n") - - c.Assert(split, checker.HasLen, 3, check.Commentf("expected 3 lines from image history")) - r := regexp.MustCompile("[\\s]{2,}") - split = r.Split(split[1], -1) - - c.Assert(message, checker.Equals, split[3], check.Commentf("didn't get expected value in commit message")) - - out, _ = dockerCmd(c, "run", "--rm", image, "true") - c.Assert(out, checker.Equals, "", check.Commentf("command output should've been nothing")) -} - -func (s *DockerSuite) TestImportFileNonExistentFile(c *check.C) { - _, _, err := dockerCmdWithError("import", "example.com/myImage.tar") - c.Assert(err, checker.NotNil, check.Commentf("import non-existing file must failed")) -} diff --git a/integration-cli/docker_cli_info_test.go b/integration-cli/docker_cli_info_test.go deleted file mode 100644 index a48e69aa3f..0000000000 --- a/integration-cli/docker_cli_info_test.go +++ /dev/null @@ -1,189 +0,0 @@ -package main - -import ( - "fmt" - "net" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/utils" - "github.com/go-check/check" -) - -// ensure docker info succeeds -func (s *DockerSuite) TestInfoEnsureSucceeds(c *check.C) { - out, _ := dockerCmd(c, "info") - - // always shown fields - stringsToCheck := []string{ - "ID:", - "Containers:", - " Running:", - " Paused:", - " Stopped:", - "Images:", - "OSType:", - "Architecture:", - "Logging Driver:", - "Operating System:", - "CPUs:", - "Total Memory:", - "Kernel Version:", - "Storage Driver:", - "Volume:", - "Network:", - "Security Options:", - } - - if DaemonIsLinux.Condition() { - stringsToCheck = append(stringsToCheck, "Runtimes:", "Default Runtime: runc") - } - - if utils.ExperimentalBuild() { - stringsToCheck = append(stringsToCheck, "Experimental: true") - } - - for _, linePrefix := range stringsToCheck { - c.Assert(out, checker.Contains, linePrefix, check.Commentf("couldn't find string %v in output", linePrefix)) - } -} - -// TestInfoDiscoveryBackend verifies that a daemon run with `--cluster-advertise` and -// `--cluster-store` properly show the backend's endpoint in info output. -func (s *DockerSuite) TestInfoDiscoveryBackend(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) - - d := NewDaemon(c) - discoveryBackend := "consul://consuladdr:consulport/some/path" - discoveryAdvertise := "1.1.1.1:2375" - err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s", discoveryAdvertise)) - c.Assert(err, checker.IsNil) - defer d.Stop() - - out, err := d.Cmd("info") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: %s\n", discoveryBackend)) - c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: %s\n", discoveryAdvertise)) -} - -// TestInfoDiscoveryInvalidAdvertise verifies that a daemon run with -// an invalid `--cluster-advertise` configuration -func (s *DockerSuite) TestInfoDiscoveryInvalidAdvertise(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) - - d := NewDaemon(c) - discoveryBackend := "consul://consuladdr:consulport/some/path" - - // --cluster-advertise with an invalid string is an error - err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), "--cluster-advertise=invalid") - c.Assert(err, checker.Not(checker.IsNil)) - - // --cluster-advertise without --cluster-store is also an error - err = d.Start("--cluster-advertise=1.1.1.1:2375") - c.Assert(err, checker.Not(checker.IsNil)) -} - -// TestInfoDiscoveryAdvertiseInterfaceName verifies that a daemon run with `--cluster-advertise` -// configured with interface name properly show the advertise ip-address in info output. -func (s *DockerSuite) TestInfoDiscoveryAdvertiseInterfaceName(c *check.C) { - testRequires(c, SameHostDaemon, Network, DaemonIsLinux) - - d := NewDaemon(c) - discoveryBackend := "consul://consuladdr:consulport/some/path" - discoveryAdvertise := "eth0" - - err := d.Start(fmt.Sprintf("--cluster-store=%s", discoveryBackend), fmt.Sprintf("--cluster-advertise=%s:2375", discoveryAdvertise)) - c.Assert(err, checker.IsNil) - defer d.Stop() - - iface, err := net.InterfaceByName(discoveryAdvertise) - c.Assert(err, checker.IsNil) - addrs, err := iface.Addrs() - c.Assert(err, checker.IsNil) - c.Assert(len(addrs), checker.GreaterThan, 0) - ip, _, err := net.ParseCIDR(addrs[0].String()) - c.Assert(err, checker.IsNil) - - out, err := d.Cmd("info") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Store: %s\n", discoveryBackend)) - c.Assert(out, checker.Contains, fmt.Sprintf("Cluster Advertise: %s:2375\n", ip.String())) -} - -func (s *DockerSuite) TestInfoDisplaysRunningContainers(c *check.C) { - testRequires(c, DaemonIsLinux) - - dockerCmd(c, "run", "-d", "busybox", "top") - out, _ := dockerCmd(c, "info") - c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 1)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) -} - -func (s *DockerSuite) TestInfoDisplaysPausedContainers(c *check.C) { - testRequires(c, DaemonIsLinux) - - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - cleanedContainerID := strings.TrimSpace(out) - - dockerCmd(c, "pause", cleanedContainerID) - - out, _ = dockerCmd(c, "info") - c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 1)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 0)) -} - -func (s *DockerSuite) TestInfoDisplaysStoppedContainers(c *check.C) { - testRequires(c, DaemonIsLinux) - - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - cleanedContainerID := strings.TrimSpace(out) - - dockerCmd(c, "stop", cleanedContainerID) - - out, _ = dockerCmd(c, "info") - c.Assert(out, checker.Contains, fmt.Sprintf("Containers: %d\n", 1)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Running: %d\n", 0)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Paused: %d\n", 0)) - c.Assert(out, checker.Contains, fmt.Sprintf(" Stopped: %d\n", 1)) -} - -func (s *DockerSuite) TestInfoDebug(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) - - d := NewDaemon(c) - err := d.Start("--debug") - c.Assert(err, checker.IsNil) - defer d.Stop() - - out, err := d.Cmd("--debug", "info") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, "Debug Mode (client): true\n") - c.Assert(out, checker.Contains, "Debug Mode (server): true\n") - c.Assert(out, checker.Contains, "File Descriptors") - c.Assert(out, checker.Contains, "Goroutines") - c.Assert(out, checker.Contains, "System Time") - c.Assert(out, checker.Contains, "EventsListeners") - c.Assert(out, checker.Contains, "Docker Root Dir") -} - -func (s *DockerSuite) TestInsecureRegistries(c *check.C) { - testRequires(c, SameHostDaemon, DaemonIsLinux) - - registryCIDR := "192.168.1.0/24" - registryHost := "insecurehost.com:5000" - - d := NewDaemon(c) - err := d.Start("--insecure-registry="+registryCIDR, "--insecure-registry="+registryHost) - c.Assert(err, checker.IsNil) - defer d.Stop() - - out, err := d.Cmd("info") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, "Insecure Registries:\n") - c.Assert(out, checker.Contains, fmt.Sprintf(" %s\n", registryHost)) - c.Assert(out, checker.Contains, fmt.Sprintf(" %s\n", registryCIDR)) -} diff --git a/integration-cli/docker_cli_info_unix_test.go b/integration-cli/docker_cli_info_unix_test.go deleted file mode 100644 index 900534d68d..0000000000 --- a/integration-cli/docker_cli_info_unix_test.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package main - -import ( - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestInfoSecurityOptions(c *check.C) { - testRequires(c, SameHostDaemon, seccompEnabled, Apparmor, DaemonIsLinux) - - out, _ := dockerCmd(c, "info") - c.Assert(out, checker.Contains, "Security Options: apparmor seccomp") -} diff --git a/integration-cli/docker_cli_inspect_experimental_test.go b/integration-cli/docker_cli_inspect_experimental_test.go deleted file mode 100644 index 0d9a261d81..0000000000 --- a/integration-cli/docker_cli_inspect_experimental_test.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build experimental - -package main - -import ( - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestInspectNamedMountPoint(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "--name", "test", "-v", "data:/data", "busybox", "cat") - - vol := inspectFieldJSON(c, "test", "Mounts") - - var mp []types.MountPoint - err := unmarshalJSON([]byte(vol), &mp) - c.Assert(err, checker.IsNil) - - c.Assert(mp, checker.HasLen, 1, check.Commentf("Expected 1 mount point")) - - m := mp[0] - c.Assert(m.Name, checker.Equals, "data", check.Commentf("Expected name data")) - - c.Assert(m.Driver, checker.Equals, "local", check.Commentf("Expected driver local")) - - c.Assert(m.Source, checker.Not(checker.Equals), "", check.Commentf("Expected source to not be empty")) - - c.Assert(m.RW, checker.Equals, true) - - c.Assert(m.Destination, checker.Equals, "/data", check.Commentf("Expected destination /data")) -} diff --git a/integration-cli/docker_cli_inspect_test.go b/integration-cli/docker_cli_inspect_test.go deleted file mode 100644 index fc5946987b..0000000000 --- a/integration-cli/docker_cli_inspect_test.go +++ /dev/null @@ -1,407 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "os" - "os/exec" - "strconv" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/go-check/check" -) - -func checkValidGraphDriver(c *check.C, name string) { - if name != "devicemapper" && name != "overlay" && name != "vfs" && name != "zfs" && name != "btrfs" && name != "aufs" { - c.Fatalf("%v is not a valid graph driver name", name) - } -} - -func (s *DockerSuite) TestInspectImage(c *check.C) { - testRequires(c, DaemonIsLinux) - imageTest := "emptyfs" - // It is important that this ID remain stable. If a code change causes - // it to be different, this is equivalent to a cache bust when pulling - // a legacy-format manifest. If the check at the end of this function - // fails, fix the difference in the image serialization instead of - // updating this hash. - imageTestID := "sha256:11f64303f0f7ffdc71f001788132bca5346831939a956e3e975c93267d89a16d" - id := inspectField(c, imageTest, "Id") - - c.Assert(id, checker.Equals, imageTestID) -} - -func (s *DockerSuite) TestInspectInt64(c *check.C) { - testRequires(c, DaemonIsLinux) - - dockerCmd(c, "run", "-d", "-m=300M", "--name", "inspectTest", "busybox", "true") - inspectOut := inspectField(c, "inspectTest", "HostConfig.Memory") - c.Assert(inspectOut, checker.Equals, "314572800") -} - -func (s *DockerSuite) TestInspectDefault(c *check.C) { - testRequires(c, DaemonIsLinux) - //Both the container and image are named busybox. docker inspect will fetch the container JSON. - //If the container JSON is not available, it will go for the image JSON. - - out, _ := dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") - containerID := strings.TrimSpace(out) - - inspectOut := inspectField(c, "busybox", "Id") - c.Assert(strings.TrimSpace(inspectOut), checker.Equals, containerID) -} - -func (s *DockerSuite) TestInspectStatus(c *check.C) { - defer unpauseAllContainers() - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - out = strings.TrimSpace(out) - - inspectOut := inspectField(c, out, "State.Status") - c.Assert(inspectOut, checker.Equals, "running") - - dockerCmd(c, "pause", out) - inspectOut = inspectField(c, out, "State.Status") - c.Assert(inspectOut, checker.Equals, "paused") - - dockerCmd(c, "unpause", out) - inspectOut = inspectField(c, out, "State.Status") - c.Assert(inspectOut, checker.Equals, "running") - - dockerCmd(c, "stop", out) - inspectOut = inspectField(c, out, "State.Status") - c.Assert(inspectOut, checker.Equals, "exited") - -} - -func (s *DockerSuite) TestInspectTypeFlagContainer(c *check.C) { - testRequires(c, DaemonIsLinux) - //Both the container and image are named busybox. docker inspect will fetch container - //JSON State.Running field. If the field is true, it's a container. - - dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "top") - - formatStr := "--format='{{.State.Running}}'" - out, _ := dockerCmd(c, "inspect", "--type=container", formatStr, "busybox") - c.Assert(out, checker.Equals, "true\n") // not a container JSON -} - -func (s *DockerSuite) TestInspectTypeFlagWithNoContainer(c *check.C) { - testRequires(c, DaemonIsLinux) - //Run this test on an image named busybox. docker inspect will try to fetch container - //JSON. Since there is no container named busybox and --type=container, docker inspect will - //not try to get the image JSON. It will throw an error. - - dockerCmd(c, "run", "-d", "busybox", "true") - - _, _, err := dockerCmdWithError("inspect", "--type=container", "busybox") - // docker inspect should fail, as there is no container named busybox - c.Assert(err, checker.NotNil) -} - -func (s *DockerSuite) TestInspectTypeFlagWithImage(c *check.C) { - testRequires(c, DaemonIsLinux) - //Both the container and image are named busybox. docker inspect will fetch image - //JSON as --type=image. if there is no image with name busybox, docker inspect - //will throw an error. - - dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") - - out, _ := dockerCmd(c, "inspect", "--type=image", "busybox") - c.Assert(out, checker.Not(checker.Contains), "State") // not an image JSON -} - -func (s *DockerSuite) TestInspectTypeFlagWithInvalidValue(c *check.C) { - testRequires(c, DaemonIsLinux) - //Both the container and image are named busybox. docker inspect will fail - //as --type=foobar is not a valid value for the flag. - - dockerCmd(c, "run", "--name=busybox", "-d", "busybox", "true") - - out, exitCode, err := dockerCmdWithError("inspect", "--type=foobar", "busybox") - c.Assert(err, checker.NotNil, check.Commentf("%s", exitCode)) - c.Assert(exitCode, checker.Equals, 1, check.Commentf("%s", err)) - c.Assert(out, checker.Contains, "not a valid value for --type") -} - -func (s *DockerSuite) TestInspectImageFilterInt(c *check.C) { - testRequires(c, DaemonIsLinux) - imageTest := "emptyfs" - out := inspectField(c, imageTest, "Size") - - size, err := strconv.Atoi(out) - c.Assert(err, checker.IsNil, check.Commentf("failed to inspect size of the image: %s, %v", out, err)) - - //now see if the size turns out to be the same - formatStr := fmt.Sprintf("--format='{{eq .Size %d}}'", size) - out, _ = dockerCmd(c, "inspect", formatStr, imageTest) - result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")) - c.Assert(err, checker.IsNil) - c.Assert(result, checker.Equals, true) -} - -func (s *DockerSuite) TestInspectContainerFilterInt(c *check.C) { - testRequires(c, DaemonIsLinux) - runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat") - runCmd.Stdin = strings.NewReader("blahblah") - out, _, _, err := runCommandWithStdoutStderr(runCmd) - c.Assert(err, checker.IsNil, check.Commentf("failed to run container: %v, output: %q", err, out)) - - id := strings.TrimSpace(out) - - out = inspectField(c, id, "State.ExitCode") - - exitCode, err := strconv.Atoi(out) - c.Assert(err, checker.IsNil, check.Commentf("failed to inspect exitcode of the container: %s, %v", out, err)) - - //now get the exit code to verify - formatStr := fmt.Sprintf("--format='{{eq .State.ExitCode %d}}'", exitCode) - out, _ = dockerCmd(c, "inspect", formatStr, id) - result, err := strconv.ParseBool(strings.TrimSuffix(out, "\n")) - c.Assert(err, checker.IsNil) - c.Assert(result, checker.Equals, true) -} - -func (s *DockerSuite) TestInspectImageGraphDriver(c *check.C) { - testRequires(c, DaemonIsLinux, Devicemapper) - imageTest := "emptyfs" - name := inspectField(c, imageTest, "GraphDriver.Name") - - checkValidGraphDriver(c, name) - - deviceID := inspectField(c, imageTest, "GraphDriver.Data.DeviceId") - - _, err := strconv.Atoi(deviceID) - c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceId of the image: %s, %v", deviceID, err)) - - deviceSize := inspectField(c, imageTest, "GraphDriver.Data.DeviceSize") - - _, err = strconv.ParseUint(deviceSize, 10, 64) - c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)) -} - -func (s *DockerSuite) TestInspectContainerGraphDriver(c *check.C) { - testRequires(c, DaemonIsLinux, Devicemapper) - - out, _ := dockerCmd(c, "run", "-d", "busybox", "true") - out = strings.TrimSpace(out) - - name := inspectField(c, out, "GraphDriver.Name") - - checkValidGraphDriver(c, name) - - imageDeviceID := inspectField(c, "busybox", "GraphDriver.Data.DeviceId") - - deviceID := inspectField(c, out, "GraphDriver.Data.DeviceId") - - c.Assert(imageDeviceID, checker.Not(checker.Equals), deviceID) - - _, err := strconv.Atoi(deviceID) - c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceId of the image: %s, %v", deviceID, err)) - - deviceSize := inspectField(c, out, "GraphDriver.Data.DeviceSize") - - _, err = strconv.ParseUint(deviceSize, 10, 64) - c.Assert(err, checker.IsNil, check.Commentf("failed to inspect DeviceSize of the image: %s, %v", deviceSize, err)) -} - -func (s *DockerSuite) TestInspectBindMountPoint(c *check.C) { - modifier := ",z" - prefix, slash := getPrefixAndSlashFromDaemonPlatform() - if daemonPlatform == "windows" { - modifier = "" - // TODO Windows: Temporary check - remove once TP5 support is dropped - if windowsDaemonKV < 14350 { - c.Skip("Needs later Windows build for RO volumes") - } - // Linux creates the host directory if it doesn't exist. Windows does not. - os.Mkdir(`c:\data`, os.ModeDir) - } - - dockerCmd(c, "run", "-d", "--name", "test", "-v", prefix+slash+"data:"+prefix+slash+"data:ro"+modifier, "busybox", "cat") - - vol := inspectFieldJSON(c, "test", "Mounts") - - var mp []types.MountPoint - err := unmarshalJSON([]byte(vol), &mp) - c.Assert(err, checker.IsNil) - - // check that there is only one mountpoint - c.Assert(mp, check.HasLen, 1) - - m := mp[0] - - c.Assert(m.Name, checker.Equals, "") - c.Assert(m.Driver, checker.Equals, "") - c.Assert(m.Source, checker.Equals, prefix+slash+"data") - c.Assert(m.Destination, checker.Equals, prefix+slash+"data") - if daemonPlatform != "windows" { // Windows does not set mode - c.Assert(m.Mode, checker.Equals, "ro"+modifier) - } - c.Assert(m.RW, checker.Equals, false) -} - -// #14947 -func (s *DockerSuite) TestInspectTimesAsRFC3339Nano(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "true") - id := strings.TrimSpace(out) - startedAt := inspectField(c, id, "State.StartedAt") - finishedAt := inspectField(c, id, "State.FinishedAt") - created := inspectField(c, id, "Created") - - _, err := time.Parse(time.RFC3339Nano, startedAt) - c.Assert(err, checker.IsNil) - _, err = time.Parse(time.RFC3339Nano, finishedAt) - c.Assert(err, checker.IsNil) - _, err = time.Parse(time.RFC3339Nano, created) - c.Assert(err, checker.IsNil) - - created = inspectField(c, "busybox", "Created") - - _, err = time.Parse(time.RFC3339Nano, created) - c.Assert(err, checker.IsNil) -} - -// #15633 -func (s *DockerSuite) TestInspectLogConfigNoType(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "create", "--name=test", "--log-opt", "max-file=42", "busybox") - var logConfig container.LogConfig - - out := inspectFieldJSON(c, "test", "HostConfig.LogConfig") - - err := json.NewDecoder(strings.NewReader(out)).Decode(&logConfig) - c.Assert(err, checker.IsNil, check.Commentf("%v", out)) - - c.Assert(logConfig.Type, checker.Equals, "json-file") - c.Assert(logConfig.Config["max-file"], checker.Equals, "42", check.Commentf("%v", logConfig)) -} - -func (s *DockerSuite) TestInspectNoSizeFlagContainer(c *check.C) { - - //Both the container and image are named busybox. docker inspect will fetch container - //JSON SizeRw and SizeRootFs field. If there is no flag --size/-s, there are no size fields. - - runSleepingContainer(c, "--name=busybox", "-d") - - formatStr := "--format='{{.SizeRw}},{{.SizeRootFs}}'" - out, _ := dockerCmd(c, "inspect", "--type=container", formatStr, "busybox") - c.Assert(strings.TrimSpace(out), check.Equals, ",", check.Commentf("Exepcted not to display size info: %s", out)) -} - -func (s *DockerSuite) TestInspectSizeFlagContainer(c *check.C) { - runSleepingContainer(c, "--name=busybox", "-d") - - formatStr := "--format='{{.SizeRw}},{{.SizeRootFs}}'" - out, _ := dockerCmd(c, "inspect", "-s", "--type=container", formatStr, "busybox") - sz := strings.Split(out, ",") - - c.Assert(strings.TrimSpace(sz[0]), check.Not(check.Equals), "") - c.Assert(strings.TrimSpace(sz[1]), check.Not(check.Equals), "") -} - -func (s *DockerSuite) TestInspectSizeFlagImage(c *check.C) { - runSleepingContainer(c, "-d") - - formatStr := "--format='{{.SizeRw}},{{.SizeRootFs}}'" - out, _, err := dockerCmdWithError("inspect", "-s", "--type=image", formatStr, "busybox") - - // Template error rather than - // This is a more correct behavior because images don't have sizes associated. - c.Assert(err, check.Not(check.IsNil)) - c.Assert(out, checker.Contains, "Template parsing error") -} - -func (s *DockerSuite) TestInspectTemplateError(c *check.C) { - // Template parsing error for both the container and image. - - runSleepingContainer(c, "--name=container1", "-d") - - out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='Format container: {{.ThisDoesNotExist}}'", "container1") - c.Assert(err, check.Not(check.IsNil)) - c.Assert(out, checker.Contains, "Template parsing error") - - out, _, err = dockerCmdWithError("inspect", "--type=image", "--format='Format container: {{.ThisDoesNotExist}}'", "busybox") - c.Assert(err, check.Not(check.IsNil)) - c.Assert(out, checker.Contains, "Template parsing error") -} - -func (s *DockerSuite) TestInspectJSONFields(c *check.C) { - runSleepingContainer(c, "--name=busybox", "-d") - out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='{{.HostConfig.Dns}}'", "busybox") - - c.Assert(err, check.IsNil) - c.Assert(out, checker.Equals, "[]\n") -} - -func (s *DockerSuite) TestInspectByPrefix(c *check.C) { - id := inspectField(c, "busybox", "Id") - c.Assert(id, checker.HasPrefix, "sha256:") - - id2 := inspectField(c, id[:12], "Id") - c.Assert(id, checker.Equals, id2) - - id3 := inspectField(c, strings.TrimPrefix(id, "sha256:")[:12], "Id") - c.Assert(id, checker.Equals, id3) -} - -func (s *DockerSuite) TestInspectStopWhenNotFound(c *check.C) { - runSleepingContainer(c, "--name=busybox", "-d") - runSleepingContainer(c, "--name=not-shown", "-d") - out, _, err := dockerCmdWithError("inspect", "--type=container", "--format='{{.Name}}'", "busybox", "missing", "not-shown") - - c.Assert(err, checker.Not(check.IsNil)) - c.Assert(out, checker.Contains, "busybox") - c.Assert(out, checker.Not(checker.Contains), "not-shown") - c.Assert(out, checker.Contains, "Error: No such container: missing") -} - -func (s *DockerSuite) TestInspectHistory(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "--name=testcont", "-d", "busybox", "top") - dockerCmd(c, "commit", "-m", "test comment", "testcont", "testimg") - out, _, err := dockerCmdWithError("inspect", "--format='{{.Comment}}'", "testimg") - - c.Assert(err, check.IsNil) - c.Assert(out, checker.Contains, "test comment") -} - -func (s *DockerSuite) TestInspectContainerNetworkDefault(c *check.C) { - testRequires(c, DaemonIsLinux) - - contName := "test1" - dockerCmd(c, "run", "--name", contName, "-d", "busybox", "top") - netOut, _ := dockerCmd(c, "network", "inspect", "--format={{.ID}}", "bridge") - out := inspectField(c, contName, "NetworkSettings.Networks") - c.Assert(out, checker.Contains, "bridge") - out = inspectField(c, contName, "NetworkSettings.Networks.bridge.NetworkID") - c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(netOut)) -} - -func (s *DockerSuite) TestInspectContainerNetworkCustom(c *check.C) { - testRequires(c, DaemonIsLinux) - - netOut, _ := dockerCmd(c, "network", "create", "net1") - dockerCmd(c, "run", "--name=container1", "--net=net1", "-d", "busybox", "top") - out := inspectField(c, "container1", "NetworkSettings.Networks") - c.Assert(out, checker.Contains, "net1") - out = inspectField(c, "container1", "NetworkSettings.Networks.net1.NetworkID") - c.Assert(strings.TrimSpace(out), checker.Equals, strings.TrimSpace(netOut)) -} - -func (s *DockerSuite) TestInspectRootFS(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _, err := dockerCmdWithError("inspect", "busybox") - c.Assert(err, check.IsNil) - - var imageJSON []types.ImageInspect - err = json.Unmarshal([]byte(out), &imageJSON) - c.Assert(err, checker.IsNil) - - c.Assert(len(imageJSON[0].RootFS.Layers), checker.GreaterOrEqualThan, 1) -} diff --git a/integration-cli/docker_cli_kill_test.go b/integration-cli/docker_cli_kill_test.go deleted file mode 100644 index 05d9a55879..0000000000 --- a/integration-cli/docker_cli_kill_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package main - -import ( - "fmt" - "net/http" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestKillContainer(c *check.C) { - out, _ := runSleepingContainer(c, "-d") - cleanedContainerID := strings.TrimSpace(out) - c.Assert(waitRun(cleanedContainerID), check.IsNil) - - dockerCmd(c, "kill", cleanedContainerID) - - out, _ = dockerCmd(c, "ps", "-q") - c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) - -} - -func (s *DockerSuite) TestKillOffStoppedContainer(c *check.C) { - out, _ := runSleepingContainer(c, "-d") - cleanedContainerID := strings.TrimSpace(out) - - dockerCmd(c, "stop", cleanedContainerID) - - _, _, err := dockerCmdWithError("kill", "-s", "30", cleanedContainerID) - c.Assert(err, check.Not(check.IsNil), check.Commentf("Container %s is not running", cleanedContainerID)) -} - -func (s *DockerSuite) TestKillDifferentUserContainer(c *check.C) { - // TODO Windows: Windows does not yet support -u (Feb 2016). - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-u", "daemon", "-d", "busybox", "top") - cleanedContainerID := strings.TrimSpace(out) - c.Assert(waitRun(cleanedContainerID), check.IsNil) - - dockerCmd(c, "kill", cleanedContainerID) - - out, _ = dockerCmd(c, "ps", "-q") - c.Assert(out, checker.Not(checker.Contains), cleanedContainerID, check.Commentf("killed container is still running")) - -} - -// regression test about correct signal parsing see #13665 -func (s *DockerSuite) TestKillWithSignal(c *check.C) { - // Cannot port to Windows - does not support signals in the same was a Linux does - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - cid := strings.TrimSpace(out) - c.Assert(waitRun(cid), check.IsNil) - - dockerCmd(c, "kill", "-s", "SIGWINCH", cid) - - running := inspectField(c, cid, "State.Running") - - c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after SIGWINCH")) -} - -func (s *DockerSuite) TestKillWithInvalidSignal(c *check.C) { - out, _ := runSleepingContainer(c, "-d") - cid := strings.TrimSpace(out) - c.Assert(waitRun(cid), check.IsNil) - - out, _, err := dockerCmdWithError("kill", "-s", "0", cid) - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, "Invalid signal: 0", check.Commentf("Kill with an invalid signal didn't error out correctly")) - - running := inspectField(c, cid, "State.Running") - c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) - - out, _ = runSleepingContainer(c, "-d") - cid = strings.TrimSpace(out) - c.Assert(waitRun(cid), check.IsNil) - - out, _, err = dockerCmdWithError("kill", "-s", "SIG42", cid) - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, "Invalid signal: SIG42", check.Commentf("Kill with an invalid signal error out correctly")) - - running = inspectField(c, cid, "State.Running") - c.Assert(running, checker.Equals, "true", check.Commentf("Container should be in running state after an invalid signal")) - -} - -func (s *DockerSuite) TestKillStoppedContainerAPIPre120(c *check.C) { - runSleepingContainer(c, "--name", "docker-kill-test-api", "-d") - dockerCmd(c, "stop", "docker-kill-test-api") - - status, _, err := sockRequest("POST", fmt.Sprintf("/v1.19/containers/%s/kill", "docker-kill-test-api"), nil) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusNoContent) -} diff --git a/integration-cli/docker_cli_links_test.go b/integration-cli/docker_cli_links_test.go deleted file mode 100644 index d4bfc6a128..0000000000 --- a/integration-cli/docker_cli_links_test.go +++ /dev/null @@ -1,239 +0,0 @@ -package main - -import ( - "fmt" - "regexp" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/runconfig" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestLinksPingUnlinkedContainers(c *check.C) { - testRequires(c, DaemonIsLinux) - _, exitCode, err := dockerCmdWithError("run", "--rm", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") - - // run ping failed with error - c.Assert(exitCode, checker.Equals, 1, check.Commentf("error: %v", err)) -} - -// Test for appropriate error when calling --link with an invalid target container -func (s *DockerSuite) TestLinksInvalidContainerTarget(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _, err := dockerCmdWithError("run", "--link", "bogus:alias", "busybox", "true") - - // an invalid container target should produce an error - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) - // an invalid container target should produce an error - c.Assert(out, checker.Contains, "Could not get container") -} - -func (s *DockerSuite) TestLinksPingLinkedContainers(c *check.C) { - testRequires(c, DaemonIsLinux) - // Test with the three different ways of specifying the default network on Linux - testLinkPingOnNetwork(c, "") - testLinkPingOnNetwork(c, "default") - testLinkPingOnNetwork(c, "bridge") -} - -func testLinkPingOnNetwork(c *check.C, network string) { - var postArgs []string - if network != "" { - postArgs = append(postArgs, []string{"--net", network}...) - } - postArgs = append(postArgs, []string{"busybox", "top"}...) - runArgs1 := append([]string{"run", "-d", "--name", "container1", "--hostname", "fred"}, postArgs...) - runArgs2 := append([]string{"run", "-d", "--name", "container2", "--hostname", "wilma"}, postArgs...) - - // Run the two named containers - dockerCmd(c, runArgs1...) - dockerCmd(c, runArgs2...) - - postArgs = []string{} - if network != "" { - postArgs = append(postArgs, []string{"--net", network}...) - } - postArgs = append(postArgs, []string{"busybox", "sh", "-c"}...) - - // Format a run for a container which links to the other two - runArgs := append([]string{"run", "--rm", "--link", "container1:alias1", "--link", "container2:alias2"}, postArgs...) - pingCmd := "ping -c 1 %s -W 1 && ping -c 1 %s -W 1" - - // test ping by alias, ping by name, and ping by hostname - // 1. Ping by alias - dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "alias1", "alias2"))...) - // 2. Ping by container name - dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "container1", "container2"))...) - // 3. Ping by hostname - dockerCmd(c, append(runArgs, fmt.Sprintf(pingCmd, "fred", "wilma"))...) - - // Clean for next round - dockerCmd(c, "rm", "-f", "container1") - dockerCmd(c, "rm", "-f", "container2") -} - -func (s *DockerSuite) TestLinksPingLinkedContainersAfterRename(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") - idA := strings.TrimSpace(out) - out, _ = dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") - idB := strings.TrimSpace(out) - dockerCmd(c, "rename", "container1", "container_new") - dockerCmd(c, "run", "--rm", "--link", "container_new:alias1", "--link", "container2:alias2", "busybox", "sh", "-c", "ping -c 1 alias1 -W 1 && ping -c 1 alias2 -W 1") - dockerCmd(c, "kill", idA) - dockerCmd(c, "kill", idB) - -} - -func (s *DockerSuite) TestLinksInspectLinksStarted(c *check.C) { - testRequires(c, DaemonIsLinux) - var ( - expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} - result []string - ) - dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") - dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") - dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "top") - links := inspectFieldJSON(c, "testinspectlink", "HostConfig.Links") - - err := unmarshalJSON([]byte(links), &result) - c.Assert(err, checker.IsNil) - - output := convertSliceOfStringsToMap(result) - - c.Assert(output, checker.DeepEquals, expected) -} - -func (s *DockerSuite) TestLinksInspectLinksStopped(c *check.C) { - testRequires(c, DaemonIsLinux) - var ( - expected = map[string]struct{}{"/container1:/testinspectlink/alias1": {}, "/container2:/testinspectlink/alias2": {}} - result []string - ) - dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") - dockerCmd(c, "run", "-d", "--name", "container2", "busybox", "top") - dockerCmd(c, "run", "-d", "--name", "testinspectlink", "--link", "container1:alias1", "--link", "container2:alias2", "busybox", "true") - links := inspectFieldJSON(c, "testinspectlink", "HostConfig.Links") - - err := unmarshalJSON([]byte(links), &result) - c.Assert(err, checker.IsNil) - - output := convertSliceOfStringsToMap(result) - - c.Assert(output, checker.DeepEquals, expected) -} - -func (s *DockerSuite) TestLinksNotStartedParentNotFail(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "create", "--name=first", "busybox", "top") - dockerCmd(c, "create", "--name=second", "--link=first:first", "busybox", "top") - dockerCmd(c, "start", "first") - -} - -func (s *DockerSuite) TestLinksHostsFilesInject(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, SameHostDaemon, ExecSupport) - - out, _ := dockerCmd(c, "run", "-itd", "--name", "one", "busybox", "top") - idOne := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "run", "-itd", "--name", "two", "--link", "one:onetwo", "busybox", "top") - idTwo := strings.TrimSpace(out) - - c.Assert(waitRun(idTwo), checker.IsNil) - - contentOne, err := readContainerFileWithExec(idOne, "/etc/hosts") - c.Assert(err, checker.IsNil, check.Commentf("contentOne: %s", string(contentOne))) - - contentTwo, err := readContainerFileWithExec(idTwo, "/etc/hosts") - c.Assert(err, checker.IsNil, check.Commentf("contentTwo: %s", string(contentTwo))) - // Host is not present in updated hosts file - c.Assert(string(contentTwo), checker.Contains, "onetwo") -} - -func (s *DockerSuite) TestLinksUpdateOnRestart(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, SameHostDaemon, ExecSupport) - dockerCmd(c, "run", "-d", "--name", "one", "busybox", "top") - out, _ := dockerCmd(c, "run", "-d", "--name", "two", "--link", "one:onetwo", "--link", "one:one", "busybox", "top") - id := strings.TrimSpace(string(out)) - - realIP := inspectField(c, "one", "NetworkSettings.Networks.bridge.IPAddress") - content, err := readContainerFileWithExec(id, "/etc/hosts") - c.Assert(err, checker.IsNil) - - getIP := func(hosts []byte, hostname string) string { - re := regexp.MustCompile(fmt.Sprintf(`(\S*)\t%s`, regexp.QuoteMeta(hostname))) - matches := re.FindSubmatch(hosts) - c.Assert(matches, checker.NotNil, check.Commentf("Hostname %s have no matches in hosts", hostname)) - return string(matches[1]) - } - ip := getIP(content, "one") - c.Assert(ip, checker.Equals, realIP) - - ip = getIP(content, "onetwo") - c.Assert(ip, checker.Equals, realIP) - - dockerCmd(c, "restart", "one") - realIP = inspectField(c, "one", "NetworkSettings.Networks.bridge.IPAddress") - - content, err = readContainerFileWithExec(id, "/etc/hosts") - c.Assert(err, checker.IsNil, check.Commentf("content: %s", string(content))) - ip = getIP(content, "one") - c.Assert(ip, checker.Equals, realIP) - - ip = getIP(content, "onetwo") - c.Assert(ip, checker.Equals, realIP) -} - -func (s *DockerSuite) TestLinksEnvs(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "-e", "e1=", "-e", "e2=v2", "-e", "e3=v3=v3", "--name=first", "busybox", "top") - out, _ := dockerCmd(c, "run", "--name=second", "--link=first:first", "busybox", "env") - c.Assert(out, checker.Contains, "FIRST_ENV_e1=\n") - c.Assert(out, checker.Contains, "FIRST_ENV_e2=v2") - c.Assert(out, checker.Contains, "FIRST_ENV_e3=v3=v3") -} - -func (s *DockerSuite) TestLinkShortDefinition(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "--name", "shortlinkdef", "busybox", "top") - - cid := strings.TrimSpace(out) - c.Assert(waitRun(cid), checker.IsNil) - - out, _ = dockerCmd(c, "run", "-d", "--name", "link2", "--link", "shortlinkdef", "busybox", "top") - - cid2 := strings.TrimSpace(out) - c.Assert(waitRun(cid2), checker.IsNil) - - links := inspectFieldJSON(c, cid2, "HostConfig.Links") - c.Assert(links, checker.Equals, "[\"/shortlinkdef:/link2/shortlinkdef\"]") -} - -func (s *DockerSuite) TestLinksNetworkHostContainer(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - dockerCmd(c, "run", "-d", "--net", "host", "--name", "host_container", "busybox", "top") - out, _, err := dockerCmdWithError("run", "--name", "should_fail", "--link", "host_container:tester", "busybox", "true") - - // Running container linking to a container with --net host should have failed - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) - // Running container linking to a container with --net host should have failed - c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetworkAndLinks.Error()) -} - -func (s *DockerSuite) TestLinksEtcHostsRegularFile(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - out, _ := dockerCmd(c, "run", "--net=host", "busybox", "ls", "-la", "/etc/hosts") - // /etc/hosts should be a regular file - c.Assert(out, checker.Matches, "^-.+\n") -} - -func (s *DockerSuite) TestLinksMultipleWithSameName(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "--name=upstream-a", "busybox", "top") - dockerCmd(c, "run", "-d", "--name=upstream-b", "busybox", "top") - dockerCmd(c, "run", "--link", "upstream-a:upstream", "--link", "upstream-b:upstream", "busybox", "sh", "-c", "ping -c 1 upstream") -} diff --git a/integration-cli/docker_cli_links_unix_test.go b/integration-cli/docker_cli_links_unix_test.go deleted file mode 100644 index 1af927930d..0000000000 --- a/integration-cli/docker_cli_links_unix_test.go +++ /dev/null @@ -1,26 +0,0 @@ -// +build !windows - -package main - -import ( - "io/ioutil" - "os" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestLinksEtcHostsContentMatch(c *check.C) { - // In a _unix file as using Unix specific files, and must be on the - // same host as the daemon. - testRequires(c, SameHostDaemon, NotUserNamespace) - - out, _ := dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hosts") - hosts, err := ioutil.ReadFile("/etc/hosts") - if os.IsNotExist(err) { - c.Skip("/etc/hosts does not exist, skip this test") - } - - c.Assert(out, checker.Equals, string(hosts), check.Commentf("container: %s\n\nhost:%s", out, hosts)) - -} diff --git a/integration-cli/docker_cli_login_test.go b/integration-cli/docker_cli_login_test.go deleted file mode 100644 index 01de75d985..0000000000 --- a/integration-cli/docker_cli_login_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "bytes" - "os/exec" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestLoginWithoutTTY(c *check.C) { - cmd := exec.Command(dockerBinary, "login") - - // Send to stdin so the process does not get the TTY - cmd.Stdin = bytes.NewBufferString("buffer test string \n") - - // run the command and block until it's done - err := cmd.Run() - c.Assert(err, checker.NotNil) //"Expected non nil err when loginning in & TTY not available" -} - -func (s *DockerRegistryAuthHtpasswdSuite) TestLoginToPrivateRegistry(c *check.C) { - // wrong credentials - out, _, err := dockerCmdWithError("login", "-u", s.reg.username, "-p", "WRONGPASSWORD", privateRegistryURL) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "401 Unauthorized") - - // now it's fine - dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) -} - -func (s *DockerRegistryAuthHtpasswdSuite) TestLoginToPrivateRegistryDeprecatedEmailFlag(c *check.C) { - // Test to make sure login still works with the deprecated -e and --email flags - // wrong credentials - out, _, err := dockerCmdWithError("login", "-u", s.reg.username, "-p", "WRONGPASSWORD", "-e", s.reg.email, privateRegistryURL) - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "401 Unauthorized") - - // now it's fine - // -e flag - dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, "-e", s.reg.email, privateRegistryURL) - // --email flag - dockerCmd(c, "login", "-u", s.reg.username, "-p", s.reg.password, "--email", s.reg.email, privateRegistryURL) -} diff --git a/integration-cli/docker_cli_logout_test.go b/integration-cli/docker_cli_logout_test.go deleted file mode 100644 index 52d4fff303..0000000000 --- a/integration-cli/docker_cli_logout_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerRegistryAuthHtpasswdSuite) TestLogoutWithExternalAuth(c *check.C) { - osPath := os.Getenv("PATH") - defer os.Setenv("PATH", osPath) - - workingDir, err := os.Getwd() - c.Assert(err, checker.IsNil) - absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) - c.Assert(err, checker.IsNil) - testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) - - os.Setenv("PATH", testPath) - - repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) - - tmp, err := ioutil.TempDir("", "integration-cli-") - c.Assert(err, checker.IsNil) - - externalAuthConfig := `{ "credsStore": "shell-test" }` - - configPath := filepath.Join(tmp, "config.json") - err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) - c.Assert(err, checker.IsNil) - - dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) - - b, err := ioutil.ReadFile(configPath) - c.Assert(err, checker.IsNil) - c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") - c.Assert(string(b), checker.Contains, privateRegistryURL) - - dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) - dockerCmd(c, "--config", tmp, "push", repoName) - - dockerCmd(c, "--config", tmp, "logout", privateRegistryURL) - - b, err = ioutil.ReadFile(configPath) - c.Assert(err, checker.IsNil) - c.Assert(string(b), checker.Not(checker.Contains), privateRegistryURL) - - // check I cannot pull anymore - out, _, err := dockerCmdWithError("--config", tmp, "pull", repoName) - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Error: image dockercli/busybox:authtest not found") -} diff --git a/integration-cli/docker_cli_logs_bench_test.go b/integration-cli/docker_cli_logs_bench_test.go deleted file mode 100644 index eeb008de70..0000000000 --- a/integration-cli/docker_cli_logs_bench_test.go +++ /dev/null @@ -1,32 +0,0 @@ -package main - -import ( - "fmt" - "strings" - "time" - - "github.com/go-check/check" -) - -func (s *DockerSuite) BenchmarkLogsCLIRotateFollow(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "--log-opt", "max-size=1b", "--log-opt", "max-file=10", "busybox", "sh", "-c", "while true; do usleep 50000; echo hello; done") - id := strings.TrimSpace(out) - ch := make(chan error, 1) - go func() { - ch <- nil - out, _, _ := dockerCmdWithError("logs", "-f", id) - // if this returns at all, it's an error - ch <- fmt.Errorf(out) - }() - - <-ch - select { - case <-time.After(30 * time.Second): - // ran for 30 seconds with no problem - return - case err := <-ch: - if err != nil { - c.Fatal(err) - } - } -} diff --git a/integration-cli/docker_cli_logs_test.go b/integration-cli/docker_cli_logs_test.go deleted file mode 100644 index 317cb202ec..0000000000 --- a/integration-cli/docker_cli_logs_test.go +++ /dev/null @@ -1,322 +0,0 @@ -package main - -import ( - "fmt" - "io" - "os/exec" - "regexp" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/jsonlog" - "github.com/go-check/check" -) - -// This used to work, it test a log of PageSize-1 (gh#4851) -func (s *DockerSuite) TestLogsContainerSmallerThanPage(c *check.C) { - testLen := 32767 - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) - - id := strings.TrimSpace(out) - dockerCmd(c, "wait", id) - - out, _ = dockerCmd(c, "logs", id) - - c.Assert(out, checker.HasLen, testLen+1) -} - -// Regression test: When going over the PageSize, it used to panic (gh#4851) -func (s *DockerSuite) TestLogsContainerBiggerThanPage(c *check.C) { - testLen := 32768 - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) - - id := strings.TrimSpace(out) - dockerCmd(c, "wait", id) - - out, _ = dockerCmd(c, "logs", id) - - c.Assert(out, checker.HasLen, testLen+1) -} - -// Regression test: When going much over the PageSize, it used to block (gh#4851) -func (s *DockerSuite) TestLogsContainerMuchBiggerThanPage(c *check.C) { - testLen := 33000 - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo -n = >> a.a; done; echo >> a.a; cat a.a", testLen)) - - id := strings.TrimSpace(out) - dockerCmd(c, "wait", id) - - out, _ = dockerCmd(c, "logs", id) - - c.Assert(out, checker.HasLen, testLen+1) -} - -func (s *DockerSuite) TestLogsTimestamps(c *check.C) { - testLen := 100 - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo = >> a.a; done; cat a.a", testLen)) - - id := strings.TrimSpace(out) - dockerCmd(c, "wait", id) - - out, _ = dockerCmd(c, "logs", "-t", id) - - lines := strings.Split(out, "\n") - - c.Assert(lines, checker.HasLen, testLen+1) - - ts := regexp.MustCompile(`^.* `) - - for _, l := range lines { - if l != "" { - _, err := time.Parse(jsonlog.RFC3339NanoFixed+" ", ts.FindString(l)) - c.Assert(err, checker.IsNil, check.Commentf("Failed to parse timestamp from %v", l)) - // ensure we have padded 0's - c.Assert(l[29], checker.Equals, uint8('Z')) - } - } -} - -func (s *DockerSuite) TestLogsSeparateStderr(c *check.C) { - msg := "stderr_log" - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) - - id := strings.TrimSpace(out) - dockerCmd(c, "wait", id) - - stdout, stderr, _ := dockerCmdWithStdoutStderr(c, "logs", id) - - c.Assert(stdout, checker.Equals, "") - - stderr = strings.TrimSpace(stderr) - - c.Assert(stderr, checker.Equals, msg) -} - -func (s *DockerSuite) TestLogsStderrInStdout(c *check.C) { - // TODO Windows: Needs investigation why this fails. Obtained string includes - // a bunch of ANSI escape sequences before the "stderr_log" message. - testRequires(c, DaemonIsLinux) - msg := "stderr_log" - out, _ := dockerCmd(c, "run", "-d", "-t", "busybox", "sh", "-c", fmt.Sprintf("echo %s 1>&2", msg)) - - id := strings.TrimSpace(out) - dockerCmd(c, "wait", id) - - stdout, stderr, _ := dockerCmdWithStdoutStderr(c, "logs", id) - c.Assert(stderr, checker.Equals, "") - - stdout = strings.TrimSpace(stdout) - c.Assert(stdout, checker.Equals, msg) -} - -func (s *DockerSuite) TestLogsTail(c *check.C) { - testLen := 100 - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", fmt.Sprintf("for i in $(seq 1 %d); do echo =; done;", testLen)) - - id := strings.TrimSpace(out) - dockerCmd(c, "wait", id) - - out, _ = dockerCmd(c, "logs", "--tail", "5", id) - - lines := strings.Split(out, "\n") - - c.Assert(lines, checker.HasLen, 6) - - out, _ = dockerCmd(c, "logs", "--tail", "all", id) - - lines = strings.Split(out, "\n") - - c.Assert(lines, checker.HasLen, testLen+1) - - out, _, _ = dockerCmdWithStdoutStderr(c, "logs", "--tail", "random", id) - - lines = strings.Split(out, "\n") - - c.Assert(lines, checker.HasLen, testLen+1) -} - -func (s *DockerSuite) TestLogsFollowStopped(c *check.C) { - dockerCmd(c, "run", "--name=test", "busybox", "echo", "hello") - id, err := getIDByName("test") - c.Assert(err, check.IsNil) - - logsCmd := exec.Command(dockerBinary, "logs", "-f", id) - c.Assert(logsCmd.Start(), checker.IsNil) - - errChan := make(chan error) - go func() { - errChan <- logsCmd.Wait() - close(errChan) - }() - - select { - case err := <-errChan: - c.Assert(err, checker.IsNil) - case <-time.After(30 * time.Second): - c.Fatal("Following logs is hanged") - } -} - -func (s *DockerSuite) TestLogsSince(c *check.C) { - name := "testlogssince" - dockerCmd(c, "run", "--name="+name, "busybox", "/bin/sh", "-c", "for i in $(seq 1 3); do sleep 2; echo log$i; done") - out, _ := dockerCmd(c, "logs", "-t", name) - - log2Line := strings.Split(strings.Split(out, "\n")[1], " ") - t, err := time.Parse(time.RFC3339Nano, log2Line[0]) // the timestamp log2 is written - c.Assert(err, checker.IsNil) - since := t.Unix() + 1 // add 1s so log1 & log2 doesn't show up - out, _ = dockerCmd(c, "logs", "-t", fmt.Sprintf("--since=%v", since), name) - - // Skip 2 seconds - unexpected := []string{"log1", "log2"} - for _, v := range unexpected { - c.Assert(out, checker.Not(checker.Contains), v, check.Commentf("unexpected log message returned, since=%v", since)) - } - - // Test to make sure a bad since format is caught by the client - out, _, _ = dockerCmdWithError("logs", "-t", "--since=2006-01-02T15:04:0Z", name) - c.Assert(out, checker.Contains, "cannot parse \"0Z\" as \"05\"", check.Commentf("bad since format passed to server")) - - // Test with default value specified and parameter omitted - expected := []string{"log1", "log2", "log3"} - for _, cmd := range []*exec.Cmd{ - exec.Command(dockerBinary, "logs", "-t", name), - exec.Command(dockerBinary, "logs", "-t", "--since=0", name), - } { - out, _, err = runCommandWithOutput(cmd) - c.Assert(err, checker.IsNil, check.Commentf("failed to log container: %s", out)) - for _, v := range expected { - c.Assert(out, checker.Contains, v) - } - } -} - -func (s *DockerSuite) TestLogsSinceFutureFollow(c *check.C) { - // TODO Windows TP5 - Figure out why this test is so flakey. Disabled for now. - testRequires(c, DaemonIsLinux) - name := "testlogssincefuturefollow" - out, _ := dockerCmd(c, "run", "-d", "--name", name, "busybox", "/bin/sh", "-c", `for i in $(seq 1 5); do echo log$i; sleep 1; done`) - - // Extract one timestamp from the log file to give us a starting point for - // our `--since` argument. Because the log producer runs in the background, - // we need to check repeatedly for some output to be produced. - var timestamp string - for i := 0; i != 100 && timestamp == ""; i++ { - if out, _ = dockerCmd(c, "logs", "-t", name); out == "" { - time.Sleep(time.Millisecond * 100) // Retry - } else { - timestamp = strings.Split(strings.Split(out, "\n")[0], " ")[0] - } - } - - c.Assert(timestamp, checker.Not(checker.Equals), "") - t, err := time.Parse(time.RFC3339Nano, timestamp) - c.Assert(err, check.IsNil) - - since := t.Unix() + 2 - out, _ = dockerCmd(c, "logs", "-t", "-f", fmt.Sprintf("--since=%v", since), name) - c.Assert(out, checker.Not(checker.HasLen), 0, check.Commentf("cannot read from empty log")) - lines := strings.Split(strings.TrimSpace(out), "\n") - for _, v := range lines { - ts, err := time.Parse(time.RFC3339Nano, strings.Split(v, " ")[0]) - c.Assert(err, checker.IsNil, check.Commentf("cannot parse timestamp output from log: '%v'", v)) - c.Assert(ts.Unix() >= since, checker.Equals, true, check.Commentf("earlier log found. since=%v logdate=%v", since, ts)) - } -} - -// Regression test for #8832 -func (s *DockerSuite) TestLogsFollowSlowStdoutConsumer(c *check.C) { - // TODO Windows: Fix this test for TP5. - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", `usleep 600000;yes X | head -c 200000`) - - id := strings.TrimSpace(out) - - stopSlowRead := make(chan bool) - - go func() { - exec.Command(dockerBinary, "wait", id).Run() - stopSlowRead <- true - }() - - logCmd := exec.Command(dockerBinary, "logs", "-f", id) - stdout, err := logCmd.StdoutPipe() - c.Assert(err, checker.IsNil) - c.Assert(logCmd.Start(), checker.IsNil) - - // First read slowly - bytes1, err := consumeWithSpeed(stdout, 10, 50*time.Millisecond, stopSlowRead) - c.Assert(err, checker.IsNil) - - // After the container has finished we can continue reading fast - bytes2, err := consumeWithSpeed(stdout, 32*1024, 0, nil) - c.Assert(err, checker.IsNil) - - actual := bytes1 + bytes2 - expected := 200000 - c.Assert(actual, checker.Equals, expected) -} - -func (s *DockerSuite) TestLogsFollowGoroutinesWithStdout(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do echo hello; sleep 2; done") - id := strings.TrimSpace(out) - c.Assert(waitRun(id), checker.IsNil) - - nroutines, err := getGoroutineNumber() - c.Assert(err, checker.IsNil) - cmd := exec.Command(dockerBinary, "logs", "-f", id) - r, w := io.Pipe() - cmd.Stdout = w - c.Assert(cmd.Start(), checker.IsNil) - - // Make sure pipe is written to - chErr := make(chan error) - go func() { - b := make([]byte, 1) - _, err := r.Read(b) - chErr <- err - }() - c.Assert(<-chErr, checker.IsNil) - c.Assert(cmd.Process.Kill(), checker.IsNil) - - // NGoroutines is not updated right away, so we need to wait before failing - c.Assert(waitForGoroutines(nroutines), checker.IsNil) -} - -func (s *DockerSuite) TestLogsFollowGoroutinesNoOutput(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "while true; do sleep 2; done") - id := strings.TrimSpace(out) - c.Assert(waitRun(id), checker.IsNil) - - nroutines, err := getGoroutineNumber() - c.Assert(err, checker.IsNil) - cmd := exec.Command(dockerBinary, "logs", "-f", id) - c.Assert(cmd.Start(), checker.IsNil) - time.Sleep(200 * time.Millisecond) - c.Assert(cmd.Process.Kill(), checker.IsNil) - - // NGoroutines is not updated right away, so we need to wait before failing - c.Assert(waitForGoroutines(nroutines), checker.IsNil) -} - -func (s *DockerSuite) TestLogsCLIContainerNotFound(c *check.C) { - name := "testlogsnocontainer" - out, _, _ := dockerCmdWithError("logs", name) - message := fmt.Sprintf("Error: No such container: %s\n", name) - c.Assert(out, checker.Equals, message) -} - -func (s *DockerSuite) TestLogsWithDetails(c *check.C) { - dockerCmd(c, "run", "--name=test", "--label", "foo=bar", "-e", "baz=qux", "--log-opt", "labels=foo", "--log-opt", "env=baz", "busybox", "echo", "hello") - out, _ := dockerCmd(c, "logs", "--details", "--timestamps", "test") - - logFields := strings.Fields(strings.TrimSpace(out)) - c.Assert(len(logFields), checker.Equals, 3, check.Commentf(out)) - - details := strings.Split(logFields[1], ",") - c.Assert(details, checker.HasLen, 2) - c.Assert(details[0], checker.Equals, "baz=qux") - c.Assert(details[1], checker.Equals, "foo=bar") -} diff --git a/integration-cli/docker_cli_nat_test.go b/integration-cli/docker_cli_nat_test.go deleted file mode 100644 index 7f4cc2cbd7..0000000000 --- a/integration-cli/docker_cli_nat_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "net" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func startServerContainer(c *check.C, msg string, port int) string { - name := "server" - cmd := []string{ - "-d", - "-p", fmt.Sprintf("%d:%d", port, port), - "busybox", - "sh", "-c", fmt.Sprintf("echo %q | nc -lp %d", msg, port), - } - c.Assert(waitForContainer(name, cmd...), check.IsNil) - return name -} - -func getExternalAddress(c *check.C) net.IP { - iface, err := net.InterfaceByName("eth0") - if err != nil { - c.Skip(fmt.Sprintf("Test not running with `make test`. Interface eth0 not found: %v", err)) - } - - ifaceAddrs, err := iface.Addrs() - c.Assert(err, check.IsNil) - c.Assert(ifaceAddrs, checker.Not(checker.HasLen), 0) - - ifaceIP, _, err := net.ParseCIDR(ifaceAddrs[0].String()) - c.Assert(err, check.IsNil) - - return ifaceIP -} - -func getContainerLogs(c *check.C, containerID string) string { - out, _ := dockerCmd(c, "logs", containerID) - return strings.Trim(out, "\r\n") -} - -func getContainerStatus(c *check.C, containerID string) string { - out := inspectField(c, containerID, "State.Running") - return out -} - -func (s *DockerSuite) TestNetworkNat(c *check.C) { - testRequires(c, DaemonIsLinux, SameHostDaemon) - msg := "it works" - startServerContainer(c, msg, 8080) - endpoint := getExternalAddress(c) - conn, err := net.Dial("tcp", fmt.Sprintf("%s:%d", endpoint.String(), 8080)) - c.Assert(err, check.IsNil) - - data, err := ioutil.ReadAll(conn) - conn.Close() - c.Assert(err, check.IsNil) - - final := strings.TrimRight(string(data), "\n") - c.Assert(final, checker.Equals, msg) -} - -func (s *DockerSuite) TestNetworkLocalhostTCPNat(c *check.C) { - testRequires(c, DaemonIsLinux, SameHostDaemon) - var ( - msg = "hi yall" - ) - startServerContainer(c, msg, 8081) - conn, err := net.Dial("tcp", "localhost:8081") - c.Assert(err, check.IsNil) - - data, err := ioutil.ReadAll(conn) - conn.Close() - c.Assert(err, check.IsNil) - - final := strings.TrimRight(string(data), "\n") - c.Assert(final, checker.Equals, msg) -} - -func (s *DockerSuite) TestNetworkLoopbackNat(c *check.C) { - testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) - msg := "it works" - startServerContainer(c, msg, 8080) - endpoint := getExternalAddress(c) - out, _ := dockerCmd(c, "run", "-t", "--net=container:server", "busybox", - "sh", "-c", fmt.Sprintf("stty raw && nc -w 5 %s 8080", endpoint.String())) - final := strings.TrimRight(string(out), "\n") - c.Assert(final, checker.Equals, msg) -} diff --git a/integration-cli/docker_cli_netmode_test.go b/integration-cli/docker_cli_netmode_test.go deleted file mode 100644 index 4dfad937b5..0000000000 --- a/integration-cli/docker_cli_netmode_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package main - -import ( - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/runconfig" - "github.com/go-check/check" -) - -// GH14530. Validates combinations of --net= with other options - -// stringCheckPS is how the output of PS starts in order to validate that -// the command executed in a container did really run PS correctly. -const stringCheckPS = "PID USER" - -// DockerCmdWithFail executes a docker command that is supposed to fail and returns -// the output, the exit code. If the command returns a Nil error, it will fail and -// stop the tests. -func dockerCmdWithFail(c *check.C, args ...string) (string, int) { - out, status, err := dockerCmdWithError(args...) - c.Assert(err, check.NotNil, check.Commentf("%v", out)) - return out, status -} - -func (s *DockerSuite) TestNetHostnameWithNetHost(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - - out, _ := dockerCmd(c, "run", "--net=host", "busybox", "ps") - c.Assert(out, checker.Contains, stringCheckPS) -} - -func (s *DockerSuite) TestNetHostname(c *check.C) { - testRequires(c, DaemonIsLinux) - - out, _ := dockerCmd(c, "run", "-h=name", "busybox", "ps") - c.Assert(out, checker.Contains, stringCheckPS) - - out, _ = dockerCmd(c, "run", "-h=name", "--net=bridge", "busybox", "ps") - c.Assert(out, checker.Contains, stringCheckPS) - - out, _ = dockerCmd(c, "run", "-h=name", "--net=none", "busybox", "ps") - c.Assert(out, checker.Contains, stringCheckPS) - - out, _ = dockerCmdWithFail(c, "run", "-h=name", "--net=container:other", "busybox", "ps") - c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHostname.Error()) - - out, _ = dockerCmdWithFail(c, "run", "--net=container", "busybox", "ps") - c.Assert(out, checker.Contains, "--net: invalid net mode: invalid container format container:") - - out, _ = dockerCmdWithFail(c, "run", "--net=weird", "busybox", "ps") - c.Assert(out, checker.Contains, "network weird not found") -} - -func (s *DockerSuite) TestConflictContainerNetworkAndLinks(c *check.C) { - testRequires(c, DaemonIsLinux) - - out, _ := dockerCmdWithFail(c, "run", "--net=container:other", "--link=zip:zap", "busybox", "ps") - c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndLinks.Error()) -} - -func (s *DockerSuite) TestConflictContainerNetworkHostAndLinks(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - - out, _ := dockerCmdWithFail(c, "run", "--net=host", "--link=zip:zap", "busybox", "ps") - c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetworkAndLinks.Error()) -} - -func (s *DockerSuite) TestConflictNetworkModeNetHostAndOptions(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - - out, _ := dockerCmdWithFail(c, "run", "--net=host", "--mac-address=92:d0:c6:0a:29:33", "busybox", "ps") - c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndMac.Error()) -} - -func (s *DockerSuite) TestConflictNetworkModeAndOptions(c *check.C) { - testRequires(c, DaemonIsLinux) - - out, _ := dockerCmdWithFail(c, "run", "--net=container:other", "--dns=8.8.8.8", "busybox", "ps") - c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkAndDNS.Error()) - - out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--add-host=name:8.8.8.8", "busybox", "ps") - c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkHosts.Error()) - - out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--mac-address=92:d0:c6:0a:29:33", "busybox", "ps") - c.Assert(out, checker.Contains, runconfig.ErrConflictContainerNetworkAndMac.Error()) - - out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "-P", "busybox", "ps") - c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkPublishPorts.Error()) - - out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "-p", "8080", "busybox", "ps") - c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkPublishPorts.Error()) - - out, _ = dockerCmdWithFail(c, "run", "--net=container:other", "--expose", "8000-9000", "busybox", "ps") - c.Assert(out, checker.Contains, runconfig.ErrConflictNetworkExposePorts.Error()) -} diff --git a/integration-cli/docker_cli_network_unix_test.go b/integration-cli/docker_cli_network_unix_test.go deleted file mode 100644 index f333d73d22..0000000000 --- a/integration-cli/docker_cli_network_unix_test.go +++ /dev/null @@ -1,1674 +0,0 @@ -// +build !windows - -package main - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "os" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/runconfig" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/versions/v1p20" - "github.com/docker/libnetwork/driverapi" - remoteapi "github.com/docker/libnetwork/drivers/remote/api" - "github.com/docker/libnetwork/ipamapi" - remoteipam "github.com/docker/libnetwork/ipams/remote/api" - "github.com/docker/libnetwork/netlabel" - "github.com/go-check/check" - "github.com/vishvananda/netlink" -) - -const dummyNetworkDriver = "dummy-network-driver" -const dummyIpamDriver = "dummy-ipam-driver" - -var remoteDriverNetworkRequest remoteapi.CreateNetworkRequest - -func init() { - check.Suite(&DockerNetworkSuite{ - ds: &DockerSuite{}, - }) -} - -type DockerNetworkSuite struct { - server *httptest.Server - ds *DockerSuite - d *Daemon -} - -func (s *DockerNetworkSuite) SetUpTest(c *check.C) { - s.d = NewDaemon(c) -} - -func (s *DockerNetworkSuite) TearDownTest(c *check.C) { - s.d.Stop() - s.ds.TearDownTest(c) -} - -func (s *DockerNetworkSuite) SetUpSuite(c *check.C) { - mux := http.NewServeMux() - s.server = httptest.NewServer(mux) - c.Assert(s.server, check.NotNil, check.Commentf("Failed to start an HTTP Server")) - setupRemoteNetworkDrivers(c, mux, s.server.URL, dummyNetworkDriver, dummyIpamDriver) -} - -func setupRemoteNetworkDrivers(c *check.C, mux *http.ServeMux, url, netDrv, ipamDrv string) { - - mux.HandleFunc("/Plugin.Activate", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - fmt.Fprintf(w, `{"Implements": ["%s", "%s"]}`, driverapi.NetworkPluginEndpointType, ipamapi.PluginEndpointType) - }) - - // Network driver implementation - mux.HandleFunc(fmt.Sprintf("/%s.GetCapabilities", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - fmt.Fprintf(w, `{"Scope":"local"}`) - }) - - mux.HandleFunc(fmt.Sprintf("/%s.CreateNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { - err := json.NewDecoder(r.Body).Decode(&remoteDriverNetworkRequest) - if err != nil { - http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) - return - } - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - fmt.Fprintf(w, "null") - }) - - mux.HandleFunc(fmt.Sprintf("/%s.DeleteNetwork", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - fmt.Fprintf(w, "null") - }) - - mux.HandleFunc(fmt.Sprintf("/%s.CreateEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - fmt.Fprintf(w, `{"Interface":{"MacAddress":"a0:b1:c2:d3:e4:f5"}}`) - }) - - mux.HandleFunc(fmt.Sprintf("/%s.Join", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - - veth := &netlink.Veth{ - LinkAttrs: netlink.LinkAttrs{Name: "randomIfName", TxQLen: 0}, PeerName: "cnt0"} - if err := netlink.LinkAdd(veth); err != nil { - fmt.Fprintf(w, `{"Error":"failed to add veth pair: `+err.Error()+`"}`) - } else { - fmt.Fprintf(w, `{"InterfaceName":{ "SrcName":"cnt0", "DstPrefix":"veth"}}`) - } - }) - - mux.HandleFunc(fmt.Sprintf("/%s.Leave", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - fmt.Fprintf(w, "null") - }) - - mux.HandleFunc(fmt.Sprintf("/%s.DeleteEndpoint", driverapi.NetworkPluginEndpointType), func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - if link, err := netlink.LinkByName("cnt0"); err == nil { - netlink.LinkDel(link) - } - fmt.Fprintf(w, "null") - }) - - // Ipam Driver implementation - var ( - poolRequest remoteipam.RequestPoolRequest - poolReleaseReq remoteipam.ReleasePoolRequest - addressRequest remoteipam.RequestAddressRequest - addressReleaseReq remoteipam.ReleaseAddressRequest - lAS = "localAS" - gAS = "globalAS" - pool = "172.28.0.0/16" - poolID = lAS + "/" + pool - gw = "172.28.255.254/16" - ) - - mux.HandleFunc(fmt.Sprintf("/%s.GetDefaultAddressSpaces", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - fmt.Fprintf(w, `{"LocalDefaultAddressSpace":"`+lAS+`", "GlobalDefaultAddressSpace": "`+gAS+`"}`) - }) - - mux.HandleFunc(fmt.Sprintf("/%s.RequestPool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { - err := json.NewDecoder(r.Body).Decode(&poolRequest) - if err != nil { - http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) - return - } - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - if poolRequest.AddressSpace != lAS && poolRequest.AddressSpace != gAS { - fmt.Fprintf(w, `{"Error":"Unknown address space in pool request: `+poolRequest.AddressSpace+`"}`) - } else if poolRequest.Pool != "" && poolRequest.Pool != pool { - fmt.Fprintf(w, `{"Error":"Cannot handle explicit pool requests yet"}`) - } else { - fmt.Fprintf(w, `{"PoolID":"`+poolID+`", "Pool":"`+pool+`"}`) - } - }) - - mux.HandleFunc(fmt.Sprintf("/%s.RequestAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { - err := json.NewDecoder(r.Body).Decode(&addressRequest) - if err != nil { - http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) - return - } - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - // make sure libnetwork is now querying on the expected pool id - if addressRequest.PoolID != poolID { - fmt.Fprintf(w, `{"Error":"unknown pool id"}`) - } else if addressRequest.Address != "" { - fmt.Fprintf(w, `{"Error":"Cannot handle explicit address requests yet"}`) - } else { - fmt.Fprintf(w, `{"Address":"`+gw+`"}`) - } - }) - - mux.HandleFunc(fmt.Sprintf("/%s.ReleaseAddress", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { - err := json.NewDecoder(r.Body).Decode(&addressReleaseReq) - if err != nil { - http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) - return - } - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - // make sure libnetwork is now asking to release the expected address from the expected poolid - if addressRequest.PoolID != poolID { - fmt.Fprintf(w, `{"Error":"unknown pool id"}`) - } else if addressReleaseReq.Address != gw { - fmt.Fprintf(w, `{"Error":"unknown address"}`) - } else { - fmt.Fprintf(w, "null") - } - }) - - mux.HandleFunc(fmt.Sprintf("/%s.ReleasePool", ipamapi.PluginEndpointType), func(w http.ResponseWriter, r *http.Request) { - err := json.NewDecoder(r.Body).Decode(&poolReleaseReq) - if err != nil { - http.Error(w, "Unable to decode JSON payload: "+err.Error(), http.StatusBadRequest) - return - } - w.Header().Set("Content-Type", "application/vnd.docker.plugins.v1+json") - // make sure libnetwork is now asking to release the expected poolid - if addressRequest.PoolID != poolID { - fmt.Fprintf(w, `{"Error":"unknown pool id"}`) - } else { - fmt.Fprintf(w, "null") - } - }) - - err := os.MkdirAll("/etc/docker/plugins", 0755) - c.Assert(err, checker.IsNil) - - fileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", netDrv) - err = ioutil.WriteFile(fileName, []byte(url), 0644) - c.Assert(err, checker.IsNil) - - ipamFileName := fmt.Sprintf("/etc/docker/plugins/%s.spec", ipamDrv) - err = ioutil.WriteFile(ipamFileName, []byte(url), 0644) - c.Assert(err, checker.IsNil) -} - -func (s *DockerNetworkSuite) TearDownSuite(c *check.C) { - if s.server == nil { - return - } - - s.server.Close() - - err := os.RemoveAll("/etc/docker/plugins") - c.Assert(err, checker.IsNil) -} - -func assertNwIsAvailable(c *check.C, name string) { - if !isNwPresent(c, name) { - c.Fatalf("Network %s not found in network ls o/p", name) - } -} - -func assertNwNotAvailable(c *check.C, name string) { - if isNwPresent(c, name) { - c.Fatalf("Found network %s in network ls o/p", name) - } -} - -func isNwPresent(c *check.C, name string) bool { - out, _ := dockerCmd(c, "network", "ls") - lines := strings.Split(out, "\n") - for i := 1; i < len(lines)-1; i++ { - netFields := strings.Fields(lines[i]) - if netFields[1] == name { - return true - } - } - return false -} - -// assertNwList checks network list retrieved with ls command -// equals to expected network list -// note: out should be `network ls [option]` result -func assertNwList(c *check.C, out string, expectNws []string) { - lines := strings.Split(out, "\n") - var nwList []string - for _, line := range lines[1 : len(lines)-1] { - netFields := strings.Fields(line) - // wrap all network name in nwList - nwList = append(nwList, netFields[1]) - } - - // network ls should contains all expected networks - c.Assert(nwList, checker.DeepEquals, expectNws) -} - -func getNwResource(c *check.C, name string) *types.NetworkResource { - out, _ := dockerCmd(c, "network", "inspect", name) - nr := []types.NetworkResource{} - err := json.Unmarshal([]byte(out), &nr) - c.Assert(err, check.IsNil) - return &nr[0] -} - -func (s *DockerNetworkSuite) TestDockerNetworkLsDefault(c *check.C) { - defaults := []string{"bridge", "host", "none"} - for _, nn := range defaults { - assertNwIsAvailable(c, nn) - } -} - -func (s *DockerNetworkSuite) TestDockerNetworkCreatePredefined(c *check.C) { - predefined := []string{"bridge", "host", "none", "default"} - for _, net := range predefined { - // predefined networks can't be created again - out, _, err := dockerCmdWithError("network", "create", net) - c.Assert(err, checker.NotNil, check.Commentf("%v", out)) - } -} - -func (s *DockerNetworkSuite) TestDockerNetworkCreateHostBind(c *check.C) { - dockerCmd(c, "network", "create", "--subnet=192.168.10.0/24", "--gateway=192.168.10.1", "-o", "com.docker.network.bridge.host_binding_ipv4=192.168.10.1", "testbind") - assertNwIsAvailable(c, "testbind") - - out, _ := runSleepingContainer(c, "--net=testbind", "-p", "5000:5000") - id := strings.TrimSpace(out) - c.Assert(waitRun(id), checker.IsNil) - out, _ = dockerCmd(c, "ps") - c.Assert(out, checker.Contains, "192.168.10.1:5000->5000/tcp") -} - -func (s *DockerNetworkSuite) TestDockerNetworkRmPredefined(c *check.C) { - predefined := []string{"bridge", "host", "none", "default"} - for _, net := range predefined { - // predefined networks can't be removed - out, _, err := dockerCmdWithError("network", "rm", net) - c.Assert(err, checker.NotNil, check.Commentf("%v", out)) - } -} - -func (s *DockerNetworkSuite) TestDockerNetworkLsFilter(c *check.C) { - testNet := "testnet1" - testLabel := "foo" - testValue := "bar" - out, _ := dockerCmd(c, "network", "create", "dev") - defer func() { - dockerCmd(c, "network", "rm", "dev") - dockerCmd(c, "network", "rm", testNet) - }() - networkID := strings.TrimSpace(out) - - // filter with partial ID - // only show 'dev' network - out, _ = dockerCmd(c, "network", "ls", "-f", "id="+networkID[0:5]) - assertNwList(c, out, []string{"dev"}) - - out, _ = dockerCmd(c, "network", "ls", "-f", "name=dge") - assertNwList(c, out, []string{"bridge"}) - - // only show built-in network (bridge, none, host) - out, _ = dockerCmd(c, "network", "ls", "-f", "type=builtin") - assertNwList(c, out, []string{"bridge", "host", "none"}) - - // only show custom networks (dev) - out, _ = dockerCmd(c, "network", "ls", "-f", "type=custom") - assertNwList(c, out, []string{"dev"}) - - // show all networks with filter - // it should be equivalent of ls without option - out, _ = dockerCmd(c, "network", "ls", "-f", "type=custom", "-f", "type=builtin") - assertNwList(c, out, []string{"bridge", "dev", "host", "none"}) - - out, _ = dockerCmd(c, "network", "create", "--label", testLabel+"="+testValue, testNet) - assertNwIsAvailable(c, testNet) - - out, _ = dockerCmd(c, "network", "ls", "-f", "label="+testLabel) - assertNwList(c, out, []string{testNet}) - - out, _ = dockerCmd(c, "network", "ls", "-f", "label="+testLabel+"="+testValue) - assertNwList(c, out, []string{testNet}) - - out, _ = dockerCmd(c, "network", "ls", "-f", "label=nonexistent") - outArr := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) - - out, _ = dockerCmd(c, "network", "ls", "-f", "driver=null") - assertNwList(c, out, []string{"none"}) - - out, _ = dockerCmd(c, "network", "ls", "-f", "driver=host") - assertNwList(c, out, []string{"host"}) - - out, _ = dockerCmd(c, "network", "ls", "-f", "driver=bridge") - assertNwList(c, out, []string{"bridge", "dev", testNet}) -} - -func (s *DockerNetworkSuite) TestDockerNetworkCreateDelete(c *check.C) { - dockerCmd(c, "network", "create", "test") - assertNwIsAvailable(c, "test") - - dockerCmd(c, "network", "rm", "test") - assertNwNotAvailable(c, "test") -} - -func (s *DockerNetworkSuite) TestDockerNetworkCreateLabel(c *check.C) { - testNet := "testnetcreatelabel" - testLabel := "foo" - testValue := "bar" - - dockerCmd(c, "network", "create", "--label", testLabel+"="+testValue, testNet) - assertNwIsAvailable(c, testNet) - - out, _, err := dockerCmdWithError("network", "inspect", "--format={{ .Labels."+testLabel+" }}", testNet) - c.Assert(err, check.IsNil) - c.Assert(strings.TrimSpace(out), check.Equals, testValue) - - dockerCmd(c, "network", "rm", testNet) - assertNwNotAvailable(c, testNet) -} - -func (s *DockerSuite) TestDockerNetworkDeleteNotExists(c *check.C) { - out, _, err := dockerCmdWithError("network", "rm", "test") - c.Assert(err, checker.NotNil, check.Commentf("%v", out)) -} - -func (s *DockerSuite) TestDockerNetworkDeleteMultiple(c *check.C) { - dockerCmd(c, "network", "create", "testDelMulti0") - assertNwIsAvailable(c, "testDelMulti0") - dockerCmd(c, "network", "create", "testDelMulti1") - assertNwIsAvailable(c, "testDelMulti1") - dockerCmd(c, "network", "create", "testDelMulti2") - assertNwIsAvailable(c, "testDelMulti2") - out, _ := dockerCmd(c, "run", "-d", "--net", "testDelMulti2", "busybox", "top") - containerID := strings.TrimSpace(out) - waitRun(containerID) - - // delete three networks at the same time, since testDelMulti2 - // contains active container, its deletion should fail. - out, _, err := dockerCmdWithError("network", "rm", "testDelMulti0", "testDelMulti1", "testDelMulti2") - // err should not be nil due to deleting testDelMulti2 failed. - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) - // testDelMulti2 should fail due to network has active endpoints - c.Assert(out, checker.Contains, "has active endpoints") - assertNwNotAvailable(c, "testDelMulti0") - assertNwNotAvailable(c, "testDelMulti1") - // testDelMulti2 can't be deleted, so it should exist - assertNwIsAvailable(c, "testDelMulti2") -} - -func (s *DockerSuite) TestDockerNetworkInspect(c *check.C) { - out, _ := dockerCmd(c, "network", "inspect", "host") - networkResources := []types.NetworkResource{} - err := json.Unmarshal([]byte(out), &networkResources) - c.Assert(err, check.IsNil) - c.Assert(networkResources, checker.HasLen, 1) - - out, _ = dockerCmd(c, "network", "inspect", "--format={{ .Name }}", "host") - c.Assert(strings.TrimSpace(out), check.Equals, "host") -} - -func (s *DockerSuite) TestDockerNetworkInspectWithID(c *check.C) { - out, _ := dockerCmd(c, "network", "create", "test2") - networkID := strings.TrimSpace(out) - assertNwIsAvailable(c, "test2") - out, _ = dockerCmd(c, "network", "inspect", "--format={{ .Id }}", "test2") - c.Assert(strings.TrimSpace(out), check.Equals, networkID) - - out, _ = dockerCmd(c, "network", "inspect", "--format={{ .ID }}", "test2") - c.Assert(strings.TrimSpace(out), check.Equals, networkID) -} - -func (s *DockerSuite) TestDockerInspectMultipleNetwork(c *check.C) { - out, _ := dockerCmd(c, "network", "inspect", "host", "none") - networkResources := []types.NetworkResource{} - err := json.Unmarshal([]byte(out), &networkResources) - c.Assert(err, check.IsNil) - c.Assert(networkResources, checker.HasLen, 2) - - // Should print an error, return an exitCode 1 *but* should print the host network - out, exitCode, err := dockerCmdWithError("network", "inspect", "host", "nonexistent") - c.Assert(err, checker.NotNil) - c.Assert(exitCode, checker.Equals, 1) - c.Assert(out, checker.Contains, "Error: No such network: nonexistent") - networkResources = []types.NetworkResource{} - inspectOut := strings.SplitN(out, "\nError: No such network: nonexistent\n", 2)[0] - err = json.Unmarshal([]byte(inspectOut), &networkResources) - c.Assert(networkResources, checker.HasLen, 1) - - // Should print an error and return an exitCode, nothing else - out, exitCode, err = dockerCmdWithError("network", "inspect", "nonexistent") - c.Assert(err, checker.NotNil) - c.Assert(exitCode, checker.Equals, 1) - c.Assert(out, checker.Contains, "Error: No such network: nonexistent") -} - -func (s *DockerSuite) TestDockerInspectNetworkWithContainerName(c *check.C) { - dockerCmd(c, "network", "create", "brNetForInspect") - assertNwIsAvailable(c, "brNetForInspect") - defer func() { - dockerCmd(c, "network", "rm", "brNetForInspect") - assertNwNotAvailable(c, "brNetForInspect") - }() - - out, _ := dockerCmd(c, "run", "-d", "--name", "testNetInspect1", "--net", "brNetForInspect", "busybox", "top") - c.Assert(waitRun("testNetInspect1"), check.IsNil) - containerID := strings.TrimSpace(out) - defer func() { - // we don't stop container by name, because we'll rename it later - dockerCmd(c, "stop", containerID) - }() - - out, _ = dockerCmd(c, "network", "inspect", "brNetForInspect") - networkResources := []types.NetworkResource{} - err := json.Unmarshal([]byte(out), &networkResources) - c.Assert(err, check.IsNil) - c.Assert(networkResources, checker.HasLen, 1) - container, ok := networkResources[0].Containers[containerID] - c.Assert(ok, checker.True) - c.Assert(container.Name, checker.Equals, "testNetInspect1") - - // rename container and check docker inspect output update - newName := "HappyNewName" - dockerCmd(c, "rename", "testNetInspect1", newName) - - // check whether network inspect works properly - out, _ = dockerCmd(c, "network", "inspect", "brNetForInspect") - newNetRes := []types.NetworkResource{} - err = json.Unmarshal([]byte(out), &newNetRes) - c.Assert(err, check.IsNil) - c.Assert(newNetRes, checker.HasLen, 1) - container1, ok := newNetRes[0].Containers[containerID] - c.Assert(ok, checker.True) - c.Assert(container1.Name, checker.Equals, newName) - -} - -func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnect(c *check.C) { - dockerCmd(c, "network", "create", "test") - assertNwIsAvailable(c, "test") - nr := getNwResource(c, "test") - - c.Assert(nr.Name, checker.Equals, "test") - c.Assert(len(nr.Containers), checker.Equals, 0) - - // run a container - out, _ := dockerCmd(c, "run", "-d", "--name", "test", "busybox", "top") - c.Assert(waitRun("test"), check.IsNil) - containerID := strings.TrimSpace(out) - - // connect the container to the test network - dockerCmd(c, "network", "connect", "test", containerID) - - // inspect the network to make sure container is connected - nr = getNetworkResource(c, nr.ID) - c.Assert(len(nr.Containers), checker.Equals, 1) - c.Assert(nr.Containers[containerID], check.NotNil) - - // check if container IP matches network inspect - ip, _, err := net.ParseCIDR(nr.Containers[containerID].IPv4Address) - c.Assert(err, check.IsNil) - containerIP := findContainerIP(c, "test", "test") - c.Assert(ip.String(), checker.Equals, containerIP) - - // disconnect container from the network - dockerCmd(c, "network", "disconnect", "test", containerID) - nr = getNwResource(c, "test") - c.Assert(nr.Name, checker.Equals, "test") - c.Assert(len(nr.Containers), checker.Equals, 0) - - // run another container - out, _ = dockerCmd(c, "run", "-d", "--net", "test", "--name", "test2", "busybox", "top") - c.Assert(waitRun("test2"), check.IsNil) - containerID = strings.TrimSpace(out) - - nr = getNwResource(c, "test") - c.Assert(nr.Name, checker.Equals, "test") - c.Assert(len(nr.Containers), checker.Equals, 1) - - // force disconnect the container to the test network - dockerCmd(c, "network", "disconnect", "-f", "test", containerID) - - nr = getNwResource(c, "test") - c.Assert(nr.Name, checker.Equals, "test") - c.Assert(len(nr.Containers), checker.Equals, 0) - - dockerCmd(c, "network", "rm", "test") - assertNwNotAvailable(c, "test") -} - -func (s *DockerNetworkSuite) TestDockerNetworkIpamMultipleNetworks(c *check.C) { - // test0 bridge network - dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test1") - assertNwIsAvailable(c, "test1") - - // test2 bridge network does not overlap - dockerCmd(c, "network", "create", "--subnet=192.169.0.0/16", "test2") - assertNwIsAvailable(c, "test2") - - // for networks w/o ipam specified, docker will choose proper non-overlapping subnets - dockerCmd(c, "network", "create", "test3") - assertNwIsAvailable(c, "test3") - dockerCmd(c, "network", "create", "test4") - assertNwIsAvailable(c, "test4") - dockerCmd(c, "network", "create", "test5") - assertNwIsAvailable(c, "test5") - - // test network with multiple subnets - // bridge network doesn't support multiple subnets. hence, use a dummy driver that supports - - dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", "test6") - assertNwIsAvailable(c, "test6") - - // test network with multiple subnets with valid ipam combinations - // also check same subnet across networks when the driver supports it. - dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, - "--subnet=192.168.0.0/16", "--subnet=192.170.0.0/16", - "--gateway=192.168.0.100", "--gateway=192.170.0.100", - "--ip-range=192.168.1.0/24", - "--aux-address", "a=192.168.1.5", "--aux-address", "b=192.168.1.6", - "--aux-address", "c=192.170.1.5", "--aux-address", "d=192.170.1.6", - "test7") - assertNwIsAvailable(c, "test7") - - // cleanup - for i := 1; i < 8; i++ { - dockerCmd(c, "network", "rm", fmt.Sprintf("test%d", i)) - } -} - -func (s *DockerNetworkSuite) TestDockerNetworkCustomIpam(c *check.C) { - // Create a bridge network using custom ipam driver - dockerCmd(c, "network", "create", "--ipam-driver", dummyIpamDriver, "br0") - assertNwIsAvailable(c, "br0") - - // Verify expected network ipam fields are there - nr := getNetworkResource(c, "br0") - c.Assert(nr.Driver, checker.Equals, "bridge") - c.Assert(nr.IPAM.Driver, checker.Equals, dummyIpamDriver) - - // remove network and exercise remote ipam driver - dockerCmd(c, "network", "rm", "br0") - assertNwNotAvailable(c, "br0") -} - -func (s *DockerNetworkSuite) TestDockerNetworkIpamOptions(c *check.C) { - // Create a bridge network using custom ipam driver and options - dockerCmd(c, "network", "create", "--ipam-driver", dummyIpamDriver, "--ipam-opt", "opt1=drv1", "--ipam-opt", "opt2=drv2", "br0") - assertNwIsAvailable(c, "br0") - - // Verify expected network ipam options - nr := getNetworkResource(c, "br0") - opts := nr.IPAM.Options - c.Assert(opts["opt1"], checker.Equals, "drv1") - c.Assert(opts["opt2"], checker.Equals, "drv2") -} - -func (s *DockerNetworkSuite) TestDockerNetworkInspectDefault(c *check.C) { - nr := getNetworkResource(c, "none") - c.Assert(nr.Driver, checker.Equals, "null") - c.Assert(nr.Scope, checker.Equals, "local") - c.Assert(nr.Internal, checker.Equals, false) - c.Assert(nr.EnableIPv6, checker.Equals, false) - c.Assert(nr.IPAM.Driver, checker.Equals, "default") - c.Assert(len(nr.IPAM.Config), checker.Equals, 0) - - nr = getNetworkResource(c, "host") - c.Assert(nr.Driver, checker.Equals, "host") - c.Assert(nr.Scope, checker.Equals, "local") - c.Assert(nr.Internal, checker.Equals, false) - c.Assert(nr.EnableIPv6, checker.Equals, false) - c.Assert(nr.IPAM.Driver, checker.Equals, "default") - c.Assert(len(nr.IPAM.Config), checker.Equals, 0) - - nr = getNetworkResource(c, "bridge") - c.Assert(nr.Driver, checker.Equals, "bridge") - c.Assert(nr.Scope, checker.Equals, "local") - c.Assert(nr.Internal, checker.Equals, false) - c.Assert(nr.EnableIPv6, checker.Equals, false) - c.Assert(nr.IPAM.Driver, checker.Equals, "default") - c.Assert(len(nr.IPAM.Config), checker.Equals, 1) - c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil) - c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil) -} - -func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomUnspecified(c *check.C) { - // if unspecified, network subnet will be selected from inside preferred pool - dockerCmd(c, "network", "create", "test01") - assertNwIsAvailable(c, "test01") - - nr := getNetworkResource(c, "test01") - c.Assert(nr.Driver, checker.Equals, "bridge") - c.Assert(nr.Scope, checker.Equals, "local") - c.Assert(nr.Internal, checker.Equals, false) - c.Assert(nr.EnableIPv6, checker.Equals, false) - c.Assert(nr.IPAM.Driver, checker.Equals, "default") - c.Assert(len(nr.IPAM.Config), checker.Equals, 1) - c.Assert(nr.IPAM.Config[0].Subnet, checker.NotNil) - c.Assert(nr.IPAM.Config[0].Gateway, checker.NotNil) - - dockerCmd(c, "network", "rm", "test01") - assertNwNotAvailable(c, "test01") -} - -func (s *DockerNetworkSuite) TestDockerNetworkInspectCustomSpecified(c *check.C) { - dockerCmd(c, "network", "create", "--driver=bridge", "--ipv6", "--subnet=172.28.0.0/16", "--ip-range=172.28.5.0/24", "--gateway=172.28.5.254", "br0") - assertNwIsAvailable(c, "br0") - - nr := getNetworkResource(c, "br0") - c.Assert(nr.Driver, checker.Equals, "bridge") - c.Assert(nr.Scope, checker.Equals, "local") - c.Assert(nr.Internal, checker.Equals, false) - c.Assert(nr.EnableIPv6, checker.Equals, true) - c.Assert(nr.IPAM.Driver, checker.Equals, "default") - c.Assert(len(nr.IPAM.Config), checker.Equals, 1) - c.Assert(nr.IPAM.Config[0].Subnet, checker.Equals, "172.28.0.0/16") - c.Assert(nr.IPAM.Config[0].IPRange, checker.Equals, "172.28.5.0/24") - c.Assert(nr.IPAM.Config[0].Gateway, checker.Equals, "172.28.5.254") - c.Assert(nr.Internal, checker.False) - dockerCmd(c, "network", "rm", "br0") - assertNwNotAvailable(c, "test01") -} - -func (s *DockerNetworkSuite) TestDockerNetworkIpamInvalidCombinations(c *check.C) { - // network with ip-range out of subnet range - _, _, err := dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--ip-range=192.170.0.0/16", "test") - c.Assert(err, check.NotNil) - - // network with multiple gateways for a single subnet - _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--gateway=192.168.0.1", "--gateway=192.168.0.2", "test") - c.Assert(err, check.NotNil) - - // Multiple overlapping subnets in the same network must fail - _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.0.0/16", "--subnet=192.168.1.0/16", "test") - c.Assert(err, check.NotNil) - - // overlapping subnets across networks must fail - // create a valid test0 network - dockerCmd(c, "network", "create", "--subnet=192.168.0.0/16", "test0") - assertNwIsAvailable(c, "test0") - // create an overlapping test1 network - _, _, err = dockerCmdWithError("network", "create", "--subnet=192.168.128.0/17", "test1") - c.Assert(err, check.NotNil) - dockerCmd(c, "network", "rm", "test0") - assertNwNotAvailable(c, "test0") -} - -func (s *DockerNetworkSuite) TestDockerNetworkDriverOptions(c *check.C) { - dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, "-o", "opt1=drv1", "-o", "opt2=drv2", "testopt") - assertNwIsAvailable(c, "testopt") - gopts := remoteDriverNetworkRequest.Options[netlabel.GenericData] - c.Assert(gopts, checker.NotNil) - opts, ok := gopts.(map[string]interface{}) - c.Assert(ok, checker.Equals, true) - c.Assert(opts["opt1"], checker.Equals, "drv1") - c.Assert(opts["opt2"], checker.Equals, "drv2") - dockerCmd(c, "network", "rm", "testopt") - assertNwNotAvailable(c, "testopt") - -} - -func (s *DockerDaemonSuite) TestDockerNetworkNoDiscoveryDefaultBridgeNetwork(c *check.C) { - testRequires(c, ExecSupport) - // On default bridge network built-in service discovery should not happen - hostsFile := "/etc/hosts" - bridgeName := "external-bridge" - bridgeIP := "192.169.255.254/24" - out, err := createInterface(c, "bridge", bridgeName, bridgeIP) - c.Assert(err, check.IsNil, check.Commentf(out)) - defer deleteInterface(c, bridgeName) - - err = s.d.StartWithBusybox("--bridge", bridgeName) - c.Assert(err, check.IsNil) - defer s.d.Restart() - - // run two containers and store first container's etc/hosts content - out, err = s.d.Cmd("run", "-d", "busybox", "top") - c.Assert(err, check.IsNil) - cid1 := strings.TrimSpace(out) - defer s.d.Cmd("stop", cid1) - - hosts, err := s.d.Cmd("exec", cid1, "cat", hostsFile) - c.Assert(err, checker.IsNil) - - out, err = s.d.Cmd("run", "-d", "--name", "container2", "busybox", "top") - c.Assert(err, check.IsNil) - cid2 := strings.TrimSpace(out) - - // verify first container's etc/hosts file has not changed after spawning the second named container - hostsPost, err := s.d.Cmd("exec", cid1, "cat", hostsFile) - c.Assert(err, checker.IsNil) - c.Assert(string(hosts), checker.Equals, string(hostsPost), - check.Commentf("Unexpected %s change on second container creation", hostsFile)) - - // stop container 2 and verify first container's etc/hosts has not changed - _, err = s.d.Cmd("stop", cid2) - c.Assert(err, check.IsNil) - - hostsPost, err = s.d.Cmd("exec", cid1, "cat", hostsFile) - c.Assert(err, checker.IsNil) - c.Assert(string(hosts), checker.Equals, string(hostsPost), - check.Commentf("Unexpected %s change on second container creation", hostsFile)) - - // but discovery is on when connecting to non default bridge network - network := "anotherbridge" - out, err = s.d.Cmd("network", "create", network) - c.Assert(err, check.IsNil, check.Commentf(out)) - defer s.d.Cmd("network", "rm", network) - - out, err = s.d.Cmd("network", "connect", network, cid1) - c.Assert(err, check.IsNil, check.Commentf(out)) - - hosts, err = s.d.Cmd("exec", cid1, "cat", hostsFile) - c.Assert(err, checker.IsNil) - - hostsPost, err = s.d.Cmd("exec", cid1, "cat", hostsFile) - c.Assert(err, checker.IsNil) - c.Assert(string(hosts), checker.Equals, string(hostsPost), - check.Commentf("Unexpected %s change on second network connection", hostsFile)) -} - -func (s *DockerNetworkSuite) TestDockerNetworkAnonymousEndpoint(c *check.C) { - testRequires(c, ExecSupport, NotArm) - hostsFile := "/etc/hosts" - cstmBridgeNw := "custom-bridge-nw" - cstmBridgeNw1 := "custom-bridge-nw1" - - dockerCmd(c, "network", "create", "-d", "bridge", cstmBridgeNw) - assertNwIsAvailable(c, cstmBridgeNw) - - // run two anonymous containers and store their etc/hosts content - out, _ := dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "busybox", "top") - cid1 := strings.TrimSpace(out) - - hosts1, err := readContainerFileWithExec(cid1, hostsFile) - c.Assert(err, checker.IsNil) - - out, _ = dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "busybox", "top") - cid2 := strings.TrimSpace(out) - - hosts2, err := readContainerFileWithExec(cid2, hostsFile) - c.Assert(err, checker.IsNil) - - // verify first container etc/hosts file has not changed - hosts1post, err := readContainerFileWithExec(cid1, hostsFile) - c.Assert(err, checker.IsNil) - c.Assert(string(hosts1), checker.Equals, string(hosts1post), - check.Commentf("Unexpected %s change on anonymous container creation", hostsFile)) - - // Connect the 2nd container to a new network and verify the - // first container /etc/hosts file still hasn't changed. - dockerCmd(c, "network", "create", "-d", "bridge", cstmBridgeNw1) - assertNwIsAvailable(c, cstmBridgeNw1) - - dockerCmd(c, "network", "connect", cstmBridgeNw1, cid2) - - hosts2, err = readContainerFileWithExec(cid2, hostsFile) - c.Assert(err, checker.IsNil) - - hosts1post, err = readContainerFileWithExec(cid1, hostsFile) - c.Assert(err, checker.IsNil) - c.Assert(string(hosts1), checker.Equals, string(hosts1post), - check.Commentf("Unexpected %s change on container connect", hostsFile)) - - // start a named container - cName := "AnyName" - out, _ = dockerCmd(c, "run", "-d", "--net", cstmBridgeNw, "--name", cName, "busybox", "top") - cid3 := strings.TrimSpace(out) - - // verify that container 1 and 2 can ping the named container - dockerCmd(c, "exec", cid1, "ping", "-c", "1", cName) - dockerCmd(c, "exec", cid2, "ping", "-c", "1", cName) - - // Stop named container and verify first two containers' etc/hosts file hasn't changed - dockerCmd(c, "stop", cid3) - hosts1post, err = readContainerFileWithExec(cid1, hostsFile) - c.Assert(err, checker.IsNil) - c.Assert(string(hosts1), checker.Equals, string(hosts1post), - check.Commentf("Unexpected %s change on name container creation", hostsFile)) - - hosts2post, err := readContainerFileWithExec(cid2, hostsFile) - c.Assert(err, checker.IsNil) - c.Assert(string(hosts2), checker.Equals, string(hosts2post), - check.Commentf("Unexpected %s change on name container creation", hostsFile)) - - // verify that container 1 and 2 can't ping the named container now - _, _, err = dockerCmdWithError("exec", cid1, "ping", "-c", "1", cName) - c.Assert(err, check.NotNil) - _, _, err = dockerCmdWithError("exec", cid2, "ping", "-c", "1", cName) - c.Assert(err, check.NotNil) -} - -func (s *DockerNetworkSuite) TestDockerNetworkLinkOnDefaultNetworkOnly(c *check.C) { - // Legacy Link feature must work only on default network, and not across networks - cnt1 := "container1" - cnt2 := "container2" - network := "anotherbridge" - - // Run first container on default network - dockerCmd(c, "run", "-d", "--name", cnt1, "busybox", "top") - - // Create another network and run the second container on it - dockerCmd(c, "network", "create", network) - assertNwIsAvailable(c, network) - dockerCmd(c, "run", "-d", "--net", network, "--name", cnt2, "busybox", "top") - - // Try launching a container on default network, linking to the first container. Must succeed - dockerCmd(c, "run", "-d", "--link", fmt.Sprintf("%s:%s", cnt1, cnt1), "busybox", "top") - - // Try launching a container on default network, linking to the second container. Must fail - _, _, err := dockerCmdWithError("run", "-d", "--link", fmt.Sprintf("%s:%s", cnt2, cnt2), "busybox", "top") - c.Assert(err, checker.NotNil) - - // Connect second container to default network. Now a container on default network can link to it - dockerCmd(c, "network", "connect", "bridge", cnt2) - dockerCmd(c, "run", "-d", "--link", fmt.Sprintf("%s:%s", cnt2, cnt2), "busybox", "top") -} - -func (s *DockerNetworkSuite) TestDockerNetworkOverlayPortMapping(c *check.C) { - // Verify exposed ports are present in ps output when running a container on - // a network managed by a driver which does not provide the default gateway - // for the container - nwn := "ov" - ctn := "bb" - port1 := 80 - port2 := 443 - expose1 := fmt.Sprintf("--expose=%d", port1) - expose2 := fmt.Sprintf("--expose=%d", port2) - - dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, nwn) - assertNwIsAvailable(c, nwn) - - dockerCmd(c, "run", "-d", "--net", nwn, "--name", ctn, expose1, expose2, "busybox", "top") - - // Check docker ps o/p for last created container reports the unpublished ports - unpPort1 := fmt.Sprintf("%d/tcp", port1) - unpPort2 := fmt.Sprintf("%d/tcp", port2) - out, _ := dockerCmd(c, "ps", "-n=1") - // Missing unpublished ports in docker ps output - c.Assert(out, checker.Contains, unpPort1) - // Missing unpublished ports in docker ps output - c.Assert(out, checker.Contains, unpPort2) -} - -func (s *DockerNetworkSuite) TestDockerNetworkDriverUngracefulRestart(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - dnd := "dnd" - did := "did" - - mux := http.NewServeMux() - server := httptest.NewServer(mux) - setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) - - s.d.StartWithBusybox() - _, err := s.d.Cmd("network", "create", "-d", dnd, "--subnet", "1.1.1.0/24", "net1") - c.Assert(err, checker.IsNil) - - _, err = s.d.Cmd("run", "-itd", "--net", "net1", "--name", "foo", "--ip", "1.1.1.10", "busybox", "sh") - c.Assert(err, checker.IsNil) - - // Kill daemon and restart - if err = s.d.cmd.Process.Kill(); err != nil { - c.Fatal(err) - } - - server.Close() - - startTime := time.Now().Unix() - if err = s.d.Restart(); err != nil { - c.Fatal(err) - } - lapse := time.Now().Unix() - startTime - if lapse > 60 { - // In normal scenarios, daemon restart takes ~1 second. - // Plugin retry mechanism can delay the daemon start. systemd may not like it. - // Avoid accessing plugins during daemon bootup - c.Logf("daemon restart took too long : %d seconds", lapse) - } - - // Restart the custom dummy plugin - mux = http.NewServeMux() - server = httptest.NewServer(mux) - setupRemoteNetworkDrivers(c, mux, server.URL, dnd, did) - - // trying to reuse the same ip must succeed - _, err = s.d.Cmd("run", "-itd", "--net", "net1", "--name", "bar", "--ip", "1.1.1.10", "busybox", "sh") - c.Assert(err, checker.IsNil) -} - -func (s *DockerNetworkSuite) TestDockerNetworkMacInspect(c *check.C) { - // Verify endpoint MAC address is correctly populated in container's network settings - nwn := "ov" - ctn := "bb" - - dockerCmd(c, "network", "create", "-d", dummyNetworkDriver, nwn) - assertNwIsAvailable(c, nwn) - - dockerCmd(c, "run", "-d", "--net", nwn, "--name", ctn, "busybox", "top") - - mac := inspectField(c, ctn, "NetworkSettings.Networks."+nwn+".MacAddress") - c.Assert(mac, checker.Equals, "a0:b1:c2:d3:e4:f5") -} - -func (s *DockerSuite) TestInspectApiMultipleNetworks(c *check.C) { - dockerCmd(c, "network", "create", "mybridge1") - dockerCmd(c, "network", "create", "mybridge2") - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - id := strings.TrimSpace(out) - c.Assert(waitRun(id), check.IsNil) - - dockerCmd(c, "network", "connect", "mybridge1", id) - dockerCmd(c, "network", "connect", "mybridge2", id) - - body := getInspectBody(c, "v1.20", id) - var inspect120 v1p20.ContainerJSON - err := json.Unmarshal(body, &inspect120) - c.Assert(err, checker.IsNil) - - versionedIP := inspect120.NetworkSettings.IPAddress - - body = getInspectBody(c, "v1.21", id) - var inspect121 types.ContainerJSON - err = json.Unmarshal(body, &inspect121) - c.Assert(err, checker.IsNil) - c.Assert(inspect121.NetworkSettings.Networks, checker.HasLen, 3) - - bridge := inspect121.NetworkSettings.Networks["bridge"] - c.Assert(bridge.IPAddress, checker.Equals, versionedIP) - c.Assert(bridge.IPAddress, checker.Equals, inspect121.NetworkSettings.IPAddress) -} - -func connectContainerToNetworks(c *check.C, d *Daemon, cName string, nws []string) { - // Run a container on the default network - out, err := d.Cmd("run", "-d", "--name", cName, "busybox", "top") - c.Assert(err, checker.IsNil, check.Commentf(out)) - - // Attach the container to other networks - for _, nw := range nws { - out, err = d.Cmd("network", "create", nw) - c.Assert(err, checker.IsNil, check.Commentf(out)) - out, err = d.Cmd("network", "connect", nw, cName) - c.Assert(err, checker.IsNil, check.Commentf(out)) - } -} - -func verifyContainerIsConnectedToNetworks(c *check.C, d *Daemon, cName string, nws []string) { - // Verify container is connected to all the networks - for _, nw := range nws { - out, err := d.Cmd("inspect", "-f", fmt.Sprintf("{{.NetworkSettings.Networks.%s}}", nw), cName) - c.Assert(err, checker.IsNil, check.Commentf(out)) - c.Assert(out, checker.Not(checker.Equals), "\n") - } -} - -func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksGracefulDaemonRestart(c *check.C) { - cName := "bb" - nwList := []string{"nw1", "nw2", "nw3"} - - s.d.StartWithBusybox() - - connectContainerToNetworks(c, s.d, cName, nwList) - verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) - - // Reload daemon - s.d.Restart() - - _, err := s.d.Cmd("start", cName) - c.Assert(err, checker.IsNil) - - verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) -} - -func (s *DockerNetworkSuite) TestDockerNetworkMultipleNetworksUngracefulDaemonRestart(c *check.C) { - cName := "cc" - nwList := []string{"nw1", "nw2", "nw3"} - - s.d.StartWithBusybox() - - connectContainerToNetworks(c, s.d, cName, nwList) - verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) - - // Kill daemon and restart - if err := s.d.cmd.Process.Kill(); err != nil { - c.Fatal(err) - } - s.d.Restart() - - // Restart container - _, err := s.d.Cmd("start", cName) - c.Assert(err, checker.IsNil) - - verifyContainerIsConnectedToNetworks(c, s.d, cName, nwList) -} - -func (s *DockerNetworkSuite) TestDockerNetworkRunNetByID(c *check.C) { - out, _ := dockerCmd(c, "network", "create", "one") - containerOut, _, err := dockerCmdWithError("run", "-d", "--net", strings.TrimSpace(out), "busybox", "top") - c.Assert(err, checker.IsNil, check.Commentf(containerOut)) -} - -func (s *DockerNetworkSuite) TestDockerNetworkHostModeUngracefulDaemonRestart(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - s.d.StartWithBusybox() - - // Run a few containers on host network - for i := 0; i < 10; i++ { - cName := fmt.Sprintf("hostc-%d", i) - out, err := s.d.Cmd("run", "-d", "--name", cName, "--net=host", "--restart=always", "busybox", "top") - c.Assert(err, checker.IsNil, check.Commentf(out)) - - // verfiy container has finished starting before killing daemon - err = s.d.waitRun(cName) - c.Assert(err, checker.IsNil) - } - - // Kill daemon ungracefully and restart - if err := s.d.cmd.Process.Kill(); err != nil { - c.Fatal(err) - } - if err := s.d.Restart(); err != nil { - c.Fatal(err) - } - - // make sure all the containers are up and running - for i := 0; i < 10; i++ { - err := s.d.waitRun(fmt.Sprintf("hostc-%d", i)) - c.Assert(err, checker.IsNil) - } -} - -func (s *DockerNetworkSuite) TestDockerNetworkConnectToHostFromOtherNetwork(c *check.C) { - dockerCmd(c, "run", "-d", "--name", "container1", "busybox", "top") - c.Assert(waitRun("container1"), check.IsNil) - dockerCmd(c, "network", "disconnect", "bridge", "container1") - out, _, err := dockerCmdWithError("network", "connect", "host", "container1") - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetwork.Error()) -} - -func (s *DockerNetworkSuite) TestDockerNetworkDisconnectFromHost(c *check.C) { - dockerCmd(c, "run", "-d", "--name", "container1", "--net=host", "busybox", "top") - c.Assert(waitRun("container1"), check.IsNil) - out, _, err := dockerCmdWithError("network", "disconnect", "host", "container1") - c.Assert(err, checker.NotNil, check.Commentf("Should err out disconnect from host")) - c.Assert(out, checker.Contains, runconfig.ErrConflictHostNetwork.Error()) -} - -func (s *DockerNetworkSuite) TestDockerNetworkConnectWithPortMapping(c *check.C) { - testRequires(c, NotArm) - dockerCmd(c, "network", "create", "test1") - dockerCmd(c, "run", "-d", "--name", "c1", "-p", "5000:5000", "busybox", "top") - c.Assert(waitRun("c1"), check.IsNil) - dockerCmd(c, "network", "connect", "test1", "c1") -} - -func verifyPortMap(c *check.C, container, port, originalMapping string, mustBeEqual bool) { - chk := checker.Equals - if !mustBeEqual { - chk = checker.Not(checker.Equals) - } - currentMapping, _ := dockerCmd(c, "port", container, port) - c.Assert(currentMapping, chk, originalMapping) -} - -func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectWithPortMapping(c *check.C) { - // Connect and disconnect a container with explicit and non-explicit - // host port mapping to/from networks which do cause and do not cause - // the container default gateway to change, and verify docker port cmd - // returns congruent information - testRequires(c, NotArm) - cnt := "c1" - dockerCmd(c, "network", "create", "aaa") - dockerCmd(c, "network", "create", "ccc") - - dockerCmd(c, "run", "-d", "--name", cnt, "-p", "9000:90", "-p", "70", "busybox", "top") - c.Assert(waitRun(cnt), check.IsNil) - curPortMap, _ := dockerCmd(c, "port", cnt, "70") - curExplPortMap, _ := dockerCmd(c, "port", cnt, "90") - - // Connect to a network which causes the container's default gw switch - dockerCmd(c, "network", "connect", "aaa", cnt) - verifyPortMap(c, cnt, "70", curPortMap, false) - verifyPortMap(c, cnt, "90", curExplPortMap, true) - - // Read current mapping - curPortMap, _ = dockerCmd(c, "port", cnt, "70") - - // Disconnect from a network which causes the container's default gw switch - dockerCmd(c, "network", "disconnect", "aaa", cnt) - verifyPortMap(c, cnt, "70", curPortMap, false) - verifyPortMap(c, cnt, "90", curExplPortMap, true) - - // Read current mapping - curPortMap, _ = dockerCmd(c, "port", cnt, "70") - - // Connect to a network which does not cause the container's default gw switch - dockerCmd(c, "network", "connect", "ccc", cnt) - verifyPortMap(c, cnt, "70", curPortMap, true) - verifyPortMap(c, cnt, "90", curExplPortMap, true) -} - -func (s *DockerNetworkSuite) TestDockerNetworkConnectWithMac(c *check.C) { - macAddress := "02:42:ac:11:00:02" - dockerCmd(c, "network", "create", "mynetwork") - dockerCmd(c, "run", "--name=test", "-d", "--mac-address", macAddress, "busybox", "top") - c.Assert(waitRun("test"), check.IsNil) - mac1 := inspectField(c, "test", "NetworkSettings.Networks.bridge.MacAddress") - c.Assert(strings.TrimSpace(mac1), checker.Equals, macAddress) - dockerCmd(c, "network", "connect", "mynetwork", "test") - mac2 := inspectField(c, "test", "NetworkSettings.Networks.mynetwork.MacAddress") - c.Assert(strings.TrimSpace(mac2), checker.Not(checker.Equals), strings.TrimSpace(mac1)) -} - -func (s *DockerNetworkSuite) TestDockerNetworkInspectCreatedContainer(c *check.C) { - dockerCmd(c, "create", "--name", "test", "busybox") - networks := inspectField(c, "test", "NetworkSettings.Networks") - c.Assert(networks, checker.Contains, "bridge", check.Commentf("Should return 'bridge' network")) -} - -func (s *DockerNetworkSuite) TestDockerNetworkRestartWithMultipleNetworks(c *check.C) { - dockerCmd(c, "network", "create", "test") - dockerCmd(c, "run", "--name=foo", "-d", "busybox", "top") - c.Assert(waitRun("foo"), checker.IsNil) - dockerCmd(c, "network", "connect", "test", "foo") - dockerCmd(c, "restart", "foo") - networks := inspectField(c, "foo", "NetworkSettings.Networks") - c.Assert(networks, checker.Contains, "bridge", check.Commentf("Should contain 'bridge' network")) - c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) -} - -func (s *DockerNetworkSuite) TestDockerNetworkConnectDisconnectToStoppedContainer(c *check.C) { - dockerCmd(c, "network", "create", "test") - dockerCmd(c, "create", "--name=foo", "busybox", "top") - dockerCmd(c, "network", "connect", "test", "foo") - networks := inspectField(c, "foo", "NetworkSettings.Networks") - c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) - - // Restart docker daemon to test the config has persisted to disk - s.d.Restart() - networks = inspectField(c, "foo", "NetworkSettings.Networks") - c.Assert(networks, checker.Contains, "test", check.Commentf("Should contain 'test' network")) - - // start the container and test if we can ping it from another container in the same network - dockerCmd(c, "start", "foo") - c.Assert(waitRun("foo"), checker.IsNil) - ip := inspectField(c, "foo", "NetworkSettings.Networks.test.IPAddress") - ip = strings.TrimSpace(ip) - dockerCmd(c, "run", "--net=test", "busybox", "sh", "-c", fmt.Sprintf("ping -c 1 %s", ip)) - - dockerCmd(c, "stop", "foo") - - // Test disconnect - dockerCmd(c, "network", "disconnect", "test", "foo") - networks = inspectField(c, "foo", "NetworkSettings.Networks") - c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) - - // Restart docker daemon to test the config has persisted to disk - s.d.Restart() - networks = inspectField(c, "foo", "NetworkSettings.Networks") - c.Assert(networks, checker.Not(checker.Contains), "test", check.Commentf("Should not contain 'test' network")) - -} - -func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIP(c *check.C) { - // create two networks - dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.28.0.0/16", "--subnet=2001:db8:1234::/64", "n0") - assertNwIsAvailable(c, "n0") - - dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.30.0.0/16", "--ip-range=172.30.5.0/24", "--subnet=2001:db8:abcd::/64", "--ip-range=2001:db8:abcd::/80", "n1") - assertNwIsAvailable(c, "n1") - - // run a container on first network specifying the ip addresses - dockerCmd(c, "run", "-d", "--name", "c0", "--net=n0", "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") - c.Assert(waitRun("c0"), check.IsNil) - verifyIPAddressConfig(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") - verifyIPAddresses(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") - - // connect the container to the second network specifying an ip addresses - dockerCmd(c, "network", "connect", "--ip", "172.30.55.44", "--ip6", "2001:db8:abcd::5544", "n1", "c0") - verifyIPAddressConfig(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") - verifyIPAddresses(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") - - // Stop and restart the container - dockerCmd(c, "stop", "c0") - dockerCmd(c, "start", "c0") - - // verify requested addresses are applied and configs are still there - verifyIPAddressConfig(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") - verifyIPAddresses(c, "c0", "n0", "172.28.99.88", "2001:db8:1234::9988") - verifyIPAddressConfig(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") - verifyIPAddresses(c, "c0", "n1", "172.30.55.44", "2001:db8:abcd::5544") - - // Still it should fail to connect to the default network with a specified IP (whatever ip) - out, _, err := dockerCmdWithError("network", "connect", "--ip", "172.21.55.44", "bridge", "c0") - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) - c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndIP.Error()) - -} - -func (s *DockerNetworkSuite) TestDockerNetworkConnectPreferredIPStoppedContainer(c *check.C) { - // create a container - dockerCmd(c, "create", "--name", "c0", "busybox", "top") - - // create a network - dockerCmd(c, "network", "create", "--ipv6", "--subnet=172.30.0.0/16", "--subnet=2001:db8:abcd::/64", "n0") - assertNwIsAvailable(c, "n0") - - // connect the container to the network specifying an ip addresses - dockerCmd(c, "network", "connect", "--ip", "172.30.55.44", "--ip6", "2001:db8:abcd::5544", "n0", "c0") - verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") - - // start the container, verify config has not changed and ip addresses are assigned - dockerCmd(c, "start", "c0") - c.Assert(waitRun("c0"), check.IsNil) - verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") - verifyIPAddresses(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") - - // stop the container and check ip config has not changed - dockerCmd(c, "stop", "c0") - verifyIPAddressConfig(c, "c0", "n0", "172.30.55.44", "2001:db8:abcd::5544") -} - -func (s *DockerNetworkSuite) TestDockerNetworkUnsupportedRequiredIP(c *check.C) { - // requested IP is not supported on predefined networks - for _, mode := range []string{"none", "host", "bridge", "default"} { - checkUnsupportedNetworkAndIP(c, mode) - } - - // requested IP is not supported on networks with no user defined subnets - dockerCmd(c, "network", "create", "n0") - assertNwIsAvailable(c, "n0") - - out, _, err := dockerCmdWithError("run", "-d", "--ip", "172.28.99.88", "--net", "n0", "busybox", "top") - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) - c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkNoSubnetAndIP.Error()) - - out, _, err = dockerCmdWithError("run", "-d", "--ip6", "2001:db8:1234::9988", "--net", "n0", "busybox", "top") - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) - c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkNoSubnetAndIP.Error()) - - dockerCmd(c, "network", "rm", "n0") - assertNwNotAvailable(c, "n0") -} - -func checkUnsupportedNetworkAndIP(c *check.C, nwMode string) { - out, _, err := dockerCmdWithError("run", "-d", "--net", nwMode, "--ip", "172.28.99.88", "--ip6", "2001:db8:1234::9988", "busybox", "top") - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) - c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndIP.Error()) -} - -func verifyIPAddressConfig(c *check.C, cName, nwname, ipv4, ipv6 string) { - if ipv4 != "" { - out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAMConfig.IPv4Address", nwname)) - c.Assert(strings.TrimSpace(out), check.Equals, ipv4) - } - - if ipv6 != "" { - out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAMConfig.IPv6Address", nwname)) - c.Assert(strings.TrimSpace(out), check.Equals, ipv6) - } -} - -func verifyIPAddresses(c *check.C, cName, nwname, ipv4, ipv6 string) { - out := inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.IPAddress", nwname)) - c.Assert(strings.TrimSpace(out), check.Equals, ipv4) - - out = inspectField(c, cName, fmt.Sprintf("NetworkSettings.Networks.%s.GlobalIPv6Address", nwname)) - c.Assert(strings.TrimSpace(out), check.Equals, ipv6) -} - -func (s *DockerNetworkSuite) TestDockerNetworkConnectLinkLocalIP(c *check.C) { - // create one test network - dockerCmd(c, "network", "create", "n0") - assertNwIsAvailable(c, "n0") - - // run a container with incorrect link-local address - _, _, err := dockerCmdWithError("run", "--link-local-ip", "169.253.5.5", "busybox", "top") - c.Assert(err, check.NotNil) - _, _, err = dockerCmdWithError("run", "--link-local-ip", "2001:db8::89", "busybox", "top") - c.Assert(err, check.NotNil) - - // run two containers with link-local ip on the test network - dockerCmd(c, "run", "-d", "--name", "c0", "--net=n0", "--link-local-ip", "169.254.7.7", "--link-local-ip", "fe80::254:77", "busybox", "top") - c.Assert(waitRun("c0"), check.IsNil) - dockerCmd(c, "run", "-d", "--name", "c1", "--net=n0", "--link-local-ip", "169.254.8.8", "--link-local-ip", "fe80::254:88", "busybox", "top") - c.Assert(waitRun("c1"), check.IsNil) - - // run a container on the default network and connect it to the test network specifying a link-local address - dockerCmd(c, "run", "-d", "--name", "c2", "busybox", "top") - c.Assert(waitRun("c2"), check.IsNil) - dockerCmd(c, "network", "connect", "--link-local-ip", "169.254.9.9", "n0", "c2") - - // verify the three containers can ping each other via the link-local addresses - _, _, err = dockerCmdWithError("exec", "c0", "ping", "-c", "1", "169.254.8.8") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "c1", "ping", "-c", "1", "169.254.9.9") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "c2", "ping", "-c", "1", "169.254.7.7") - c.Assert(err, check.IsNil) - - // Stop and restart the three containers - dockerCmd(c, "stop", "c0") - dockerCmd(c, "stop", "c1") - dockerCmd(c, "stop", "c2") - dockerCmd(c, "start", "c0") - dockerCmd(c, "start", "c1") - dockerCmd(c, "start", "c2") - - // verify the ping again - _, _, err = dockerCmdWithError("exec", "c0", "ping", "-c", "1", "169.254.8.8") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "c1", "ping", "-c", "1", "169.254.9.9") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "c2", "ping", "-c", "1", "169.254.7.7") - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectLink(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) - dockerCmd(c, "network", "create", "-d", "bridge", "foo1") - dockerCmd(c, "network", "create", "-d", "bridge", "foo2") - - dockerCmd(c, "run", "-d", "--net=foo1", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - - // run a container in a user-defined network with a link for an existing container - // and a link for a container that doesn't exist - dockerCmd(c, "run", "-d", "--net=foo1", "--name=second", "--link=first:FirstInFoo1", - "--link=third:bar", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // ping to first and its alias FirstInFoo1 must succeed - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo1") - c.Assert(err, check.IsNil) - - // connect first container to foo2 network - dockerCmd(c, "network", "connect", "foo2", "first") - // connect second container to foo2 network with a different alias for first container - dockerCmd(c, "network", "connect", "--link=first:FirstInFoo2", "foo2", "second") - - // ping the new alias in network foo2 - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo2") - c.Assert(err, check.IsNil) - - // disconnect first container from foo1 network - dockerCmd(c, "network", "disconnect", "foo1", "first") - - // link in foo1 network must fail - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo1") - c.Assert(err, check.NotNil) - - // link in foo2 network must succeed - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "FirstInFoo2") - c.Assert(err, check.IsNil) -} - -func (s *DockerNetworkSuite) TestDockerNetworkDisconnectDefault(c *check.C) { - netWorkName1 := "test1" - netWorkName2 := "test2" - containerName := "foo" - - dockerCmd(c, "network", "create", netWorkName1) - dockerCmd(c, "network", "create", netWorkName2) - dockerCmd(c, "create", "--name", containerName, "busybox", "top") - dockerCmd(c, "network", "connect", netWorkName1, containerName) - dockerCmd(c, "network", "connect", netWorkName2, containerName) - dockerCmd(c, "network", "disconnect", "bridge", containerName) - - dockerCmd(c, "start", containerName) - c.Assert(waitRun(containerName), checker.IsNil) - networks := inspectField(c, containerName, "NetworkSettings.Networks") - c.Assert(networks, checker.Contains, netWorkName1, check.Commentf(fmt.Sprintf("Should contain '%s' network", netWorkName1))) - c.Assert(networks, checker.Contains, netWorkName2, check.Commentf(fmt.Sprintf("Should contain '%s' network", netWorkName2))) - c.Assert(networks, checker.Not(checker.Contains), "bridge", check.Commentf("Should not contain 'bridge' network")) -} - -func (s *DockerNetworkSuite) TestDockerNetworkConnectWithAliasOnDefaultNetworks(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) - - defaults := []string{"bridge", "host", "none"} - out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top") - containerID := strings.TrimSpace(out) - for _, net := range defaults { - res, _, err := dockerCmdWithError("network", "connect", "--alias", "alias"+net, net, containerID) - c.Assert(err, checker.NotNil) - c.Assert(res, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) - } -} - -func (s *DockerSuite) TestUserDefinedNetworkConnectDisconnectAlias(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) - dockerCmd(c, "network", "create", "-d", "bridge", "net1") - dockerCmd(c, "network", "create", "-d", "bridge", "net2") - - cid, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - - dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // ping first container and its alias - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") - c.Assert(err, check.IsNil) - - // ping first container's short-id alias - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid)) - c.Assert(err, check.IsNil) - - // connect first container to net2 network - dockerCmd(c, "network", "connect", "--alias=bar", "net2", "first") - // connect second container to foo2 network with a different alias for first container - dockerCmd(c, "network", "connect", "net2", "second") - - // ping the new alias in network foo2 - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") - c.Assert(err, check.IsNil) - - // disconnect first container from net1 network - dockerCmd(c, "network", "disconnect", "net1", "first") - - // ping to net1 scoped alias "foo" must fail - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") - c.Assert(err, check.NotNil) - - // ping to net2 scoped alias "bar" must still succeed - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") - c.Assert(err, check.IsNil) - // ping to net2 scoped alias short-id must still succeed - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid)) - c.Assert(err, check.IsNil) - - // verify the alias option is rejected when running on predefined network - out, _, err := dockerCmdWithError("run", "--rm", "--name=any", "--net-alias=any", "busybox", "top") - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) - c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) - - // verify the alias option is rejected when connecting to predefined network - out, _, err = dockerCmdWithError("network", "connect", "--alias=any", "bridge", "first") - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) - c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) -} - -func (s *DockerSuite) TestUserDefinedNetworkConnectivity(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - dockerCmd(c, "network", "create", "-d", "bridge", "br.net1") - - dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c1.net1", "busybox", "top") - c.Assert(waitRun("c1.net1"), check.IsNil) - - dockerCmd(c, "run", "-d", "--net=br.net1", "--name=c2.net1", "busybox", "top") - c.Assert(waitRun("c2.net1"), check.IsNil) - - // ping first container by its unqualified name - _, _, err := dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1") - c.Assert(err, check.IsNil) - - // ping first container by its qualified name - _, _, err = dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1.br.net1") - c.Assert(err, check.IsNil) - - // ping with first qualified name masked by an additional domain. should fail - _, _, err = dockerCmdWithError("exec", "c2.net1", "ping", "-c", "1", "c1.net1.br.net1.google.com") - c.Assert(err, check.NotNil) -} - -func (s *DockerSuite) TestEmbeddedDNSInvalidInput(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - dockerCmd(c, "network", "create", "-d", "bridge", "nw1") - - // Sending garbage to embedded DNS shouldn't crash the daemon - dockerCmd(c, "run", "-i", "--net=nw1", "--name=c1", "debian:jessie", "bash", "-c", "echo InvalidQuery > /dev/udp/127.0.0.11/53") -} - -func (s *DockerSuite) TestDockerNetworkConnectFailsNoInspectChange(c *check.C) { - dockerCmd(c, "run", "-d", "--name=bb", "busybox", "top") - c.Assert(waitRun("bb"), check.IsNil) - - ns0 := inspectField(c, "bb", "NetworkSettings.Networks.bridge") - - // A failing redundant network connect should not alter current container's endpoint settings - _, _, err := dockerCmdWithError("network", "connect", "bridge", "bb") - c.Assert(err, check.NotNil) - - ns1 := inspectField(c, "bb", "NetworkSettings.Networks.bridge") - c.Assert(ns1, check.Equals, ns0) -} - -func (s *DockerSuite) TestDockerNetworkInternalMode(c *check.C) { - dockerCmd(c, "network", "create", "--driver=bridge", "--internal", "internal") - assertNwIsAvailable(c, "internal") - nr := getNetworkResource(c, "internal") - c.Assert(nr.Internal, checker.True) - - dockerCmd(c, "run", "-d", "--net=internal", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=internal", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - out, _, err := dockerCmdWithError("exec", "first", "ping", "-W", "4", "-c", "1", "www.google.com") - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, "ping: bad address") - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) -} - -// Test for #21401 -func (s *DockerNetworkSuite) TestDockerNetworkCreateDeleteSpecialCharacters(c *check.C) { - dockerCmd(c, "network", "create", "test@#$") - assertNwIsAvailable(c, "test@#$") - dockerCmd(c, "network", "rm", "test@#$") - assertNwNotAvailable(c, "test@#$") - - dockerCmd(c, "network", "create", "kiwl$%^") - assertNwIsAvailable(c, "kiwl$%^") - dockerCmd(c, "network", "rm", "kiwl$%^") - assertNwNotAvailable(c, "kiwl$%^") -} - -func (s *DockerDaemonSuite) TestDaemonRestartRestoreBridgeNetwork(t *check.C) { - testRequires(t, DaemonIsLinux) - if err := s.d.StartWithBusybox("--live-restore"); err != nil { - t.Fatal(err) - } - defer s.d.Stop() - oldCon := "old" - - _, err := s.d.Cmd("run", "-d", "--name", oldCon, "-p", "80:80", "busybox", "top") - if err != nil { - t.Fatal(err) - } - oldContainerIP, err := s.d.Cmd("inspect", "-f", "{{ .NetworkSettings.Networks.bridge.IPAddress }}", oldCon) - if err != nil { - t.Fatal(err) - } - // Kill the daemon - if err := s.d.Kill(); err != nil { - t.Fatal(err) - } - - // restart the daemon - if err := s.d.Start("--live-restore"); err != nil { - t.Fatal(err) - } - - // start a new container, the new container's ip should not be the same with - // old running container. - newCon := "new" - _, err = s.d.Cmd("run", "-d", "--name", newCon, "busybox", "top") - if err != nil { - t.Fatal(err) - } - newContainerIP, err := s.d.Cmd("inspect", "-f", "{{ .NetworkSettings.Networks.bridge.IPAddress }}", newCon) - if err != nil { - t.Fatal(err) - } - if strings.Compare(strings.TrimSpace(oldContainerIP), strings.TrimSpace(newContainerIP)) == 0 { - t.Fatalf("new container ip should not equal to old running container ip") - } - - // start a new container, the new container should ping old running container - _, err = s.d.Cmd("run", "-t", "busybox", "ping", "-c", "1", oldContainerIP) - if err != nil { - t.Fatal(err) - } - - // start a new container, trying to publish port 80:80 should fail - out, err := s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top") - if err == nil || !strings.Contains(out, "Bind for 0.0.0.0:80 failed: port is already allocated") { - t.Fatalf("80 port is allocated to old running container, it should failed on allocating to new container") - } - - // kill old running container and try to allocate again - _, err = s.d.Cmd("kill", oldCon) - if err != nil { - t.Fatal(err) - } - id, err := s.d.Cmd("run", "-p", "80:80", "-d", "busybox", "top") - if err != nil { - t.Fatal(err) - } - - // Cleanup because these containers will not be shut down by daemon - out, err = s.d.Cmd("stop", newCon) - if err != nil { - t.Fatalf("err: %v %v", err, string(out)) - } - _, err = s.d.Cmd("stop", strings.TrimSpace(id)) - if err != nil { - t.Fatal(err) - } -} - -func (s *DockerNetworkSuite) TestDockerNetworkFlagAlias(c *check.C) { - dockerCmd(c, "network", "create", "user") - output, status := dockerCmd(c, "run", "--rm", "--network=user", "--network-alias=foo", "busybox", "true") - c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) - - output, status, _ = dockerCmdWithError("run", "--rm", "--net=user", "--network=user", "busybox", "true") - c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) - - output, status, _ = dockerCmdWithError("run", "--rm", "--network=user", "--net-alias=foo", "--network-alias=bar", "busybox", "true") - c.Assert(status, checker.Equals, 0, check.Commentf("unexpected status code %d (%s)", status, output)) -} diff --git a/integration-cli/docker_cli_oom_killed_test.go b/integration-cli/docker_cli_oom_killed_test.go deleted file mode 100644 index bcf59f8601..0000000000 --- a/integration-cli/docker_cli_oom_killed_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !windows - -package main - -import ( - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestInspectOomKilledTrue(c *check.C) { - testRequires(c, DaemonIsLinux, memoryLimitSupport, swapMemorySupport) - - name := "testoomkilled" - _, exitCode, _ := dockerCmdWithError("run", "--name", name, "--memory", "32MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") - - c.Assert(exitCode, checker.Equals, 137, check.Commentf("OOM exit should be 137")) - - oomKilled := inspectField(c, name, "State.OOMKilled") - c.Assert(oomKilled, checker.Equals, "true") -} - -func (s *DockerSuite) TestInspectOomKilledFalse(c *check.C) { - testRequires(c, DaemonIsLinux, memoryLimitSupport, swapMemorySupport) - - name := "testoomkilled" - dockerCmd(c, "run", "--name", name, "--memory", "32MB", "busybox", "sh", "-c", "echo hello world") - - oomKilled := inspectField(c, name, "State.OOMKilled") - c.Assert(oomKilled, checker.Equals, "false") -} diff --git a/integration-cli/docker_cli_pause_test.go b/integration-cli/docker_cli_pause_test.go deleted file mode 100644 index e546ad45d5..0000000000 --- a/integration-cli/docker_cli_pause_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package main - -import ( - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestPause(c *check.C) { - testRequires(c, DaemonIsLinux) - defer unpauseAllContainers() - - name := "testeventpause" - dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") - - dockerCmd(c, "pause", name) - pausedContainers, err := getSliceOfPausedContainers() - c.Assert(err, checker.IsNil) - c.Assert(len(pausedContainers), checker.Equals, 1) - - dockerCmd(c, "unpause", name) - - out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) - events := strings.Split(strings.TrimSpace(out), "\n") - actions := eventActionsByIDAndType(c, events, name, "container") - - c.Assert(actions[len(actions)-2], checker.Equals, "pause") - c.Assert(actions[len(actions)-1], checker.Equals, "unpause") -} - -func (s *DockerSuite) TestPauseMultipleContainers(c *check.C) { - testRequires(c, DaemonIsLinux) - defer unpauseAllContainers() - - containers := []string{ - "testpausewithmorecontainers1", - "testpausewithmorecontainers2", - } - for _, name := range containers { - dockerCmd(c, "run", "-d", "--name", name, "busybox", "top") - } - dockerCmd(c, append([]string{"pause"}, containers...)...) - pausedContainers, err := getSliceOfPausedContainers() - c.Assert(err, checker.IsNil) - c.Assert(len(pausedContainers), checker.Equals, len(containers)) - - dockerCmd(c, append([]string{"unpause"}, containers...)...) - - out, _ := dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c)) - events := strings.Split(strings.TrimSpace(out), "\n") - - for _, name := range containers { - actions := eventActionsByIDAndType(c, events, name, "container") - - c.Assert(actions[len(actions)-2], checker.Equals, "pause") - c.Assert(actions[len(actions)-1], checker.Equals, "unpause") - } -} - -func (s *DockerSuite) TestPauseFailsOnWindows(c *check.C) { - testRequires(c, DaemonIsWindows) - dockerCmd(c, "run", "-d", "--name=test", "busybox", "sleep 3") - out, _, _ := dockerCmdWithError("pause", "test") - c.Assert(out, checker.Contains, "Windows: Containers cannot be paused") -} diff --git a/integration-cli/docker_cli_plugins_test.go b/integration-cli/docker_cli_plugins_test.go deleted file mode 100644 index c873b75b5d..0000000000 --- a/integration-cli/docker_cli_plugins_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package main - -import ( - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" - - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strings" -) - -var ( - pName = "tiborvass/no-remove" - pTag = "latest" - pNameWithTag = pName + ":" + pTag -) - -func (s *DockerSuite) TestPluginBasicOps(c *check.C) { - testRequires(c, DaemonIsLinux, ExperimentalDaemon) - _, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pNameWithTag) - c.Assert(err, checker.IsNil) - - out, _, err := dockerCmdWithError("plugin", "ls") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, pName) - c.Assert(out, checker.Contains, pTag) - c.Assert(out, checker.Contains, "true") - - out, _, err = dockerCmdWithError("plugin", "inspect", pNameWithTag) - c.Assert(err, checker.IsNil) - tmpFile, err := ioutil.TempFile("", "inspect.json") - c.Assert(err, checker.IsNil) - defer tmpFile.Close() - - if _, err := tmpFile.Write([]byte(out)); err != nil { - c.Fatal(err) - } - // FIXME: When `docker plugin inspect` takes a format as input, jq can be replaced. - id, err := exec.Command("jq", ".Id", "--raw-output", tmpFile.Name()).CombinedOutput() - c.Assert(err, checker.IsNil) - - out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) - c.Assert(out, checker.Contains, "is active") - - _, _, err = dockerCmdWithError("plugin", "disable", pNameWithTag) - c.Assert(err, checker.IsNil) - - out, _, err = dockerCmdWithError("plugin", "remove", pNameWithTag) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, pNameWithTag) - - _, err = os.Stat(filepath.Join(dockerBasePath, "plugins", string(id))) - if !os.IsNotExist(err) { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestPluginInstallDisable(c *check.C) { - testRequires(c, DaemonIsLinux, ExperimentalDaemon) - out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", "--disable", pName) - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Contains, pName) - - out, _, err = dockerCmdWithError("plugin", "ls") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, "false") - - out, _, err = dockerCmdWithError("plugin", "enable", pName) - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Contains, pName) - - out, _, err = dockerCmdWithError("plugin", "disable", pName) - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Contains, pName) - - out, _, err = dockerCmdWithError("plugin", "remove", pName) - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Contains, pName) -} - -func (s *DockerSuite) TestPluginInstallImage(c *check.C) { - testRequires(c, DaemonIsLinux, ExperimentalDaemon) - out, _, err := dockerCmdWithError("plugin", "install", "redis") - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "content is not a plugin") -} - -func (s *DockerSuite) TestPluginEnableDisableNegative(c *check.C) { - testRequires(c, DaemonIsLinux, ExperimentalDaemon) - out, _, err := dockerCmdWithError("plugin", "install", "--grant-all-permissions", pName) - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Contains, pName) - - out, _, err = dockerCmdWithError("plugin", "enable", pName) - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Contains, "already enabled") - - _, _, err = dockerCmdWithError("plugin", "disable", pName) - c.Assert(err, checker.IsNil) - - out, _, err = dockerCmdWithError("plugin", "disable", pName) - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Contains, "already disabled") - - _, _, err = dockerCmdWithError("plugin", "remove", pName) - c.Assert(err, checker.IsNil) -} diff --git a/integration-cli/docker_cli_port_test.go b/integration-cli/docker_cli_port_test.go deleted file mode 100644 index 80b00fe93e..0000000000 --- a/integration-cli/docker_cli_port_test.go +++ /dev/null @@ -1,319 +0,0 @@ -package main - -import ( - "fmt" - "net" - "regexp" - "sort" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestPortList(c *check.C) { - testRequires(c, DaemonIsLinux) - // one port - out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", "top") - firstID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "port", firstID, "80") - - err := assertPortList(c, out, []string{"0.0.0.0:9876"}) - // Port list is not correct - c.Assert(err, checker.IsNil) - - out, _ = dockerCmd(c, "port", firstID) - - err = assertPortList(c, out, []string{"80/tcp -> 0.0.0.0:9876"}) - // Port list is not correct - c.Assert(err, checker.IsNil) - - dockerCmd(c, "rm", "-f", firstID) - - // three port - out, _ = dockerCmd(c, "run", "-d", - "-p", "9876:80", - "-p", "9877:81", - "-p", "9878:82", - "busybox", "top") - ID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "port", ID, "80") - - err = assertPortList(c, out, []string{"0.0.0.0:9876"}) - // Port list is not correct - c.Assert(err, checker.IsNil) - - out, _ = dockerCmd(c, "port", ID) - - err = assertPortList(c, out, []string{ - "80/tcp -> 0.0.0.0:9876", - "81/tcp -> 0.0.0.0:9877", - "82/tcp -> 0.0.0.0:9878"}) - // Port list is not correct - c.Assert(err, checker.IsNil) - - dockerCmd(c, "rm", "-f", ID) - - // more and one port mapped to the same container port - out, _ = dockerCmd(c, "run", "-d", - "-p", "9876:80", - "-p", "9999:80", - "-p", "9877:81", - "-p", "9878:82", - "busybox", "top") - ID = strings.TrimSpace(out) - - out, _ = dockerCmd(c, "port", ID, "80") - - err = assertPortList(c, out, []string{"0.0.0.0:9876", "0.0.0.0:9999"}) - // Port list is not correct - c.Assert(err, checker.IsNil) - - out, _ = dockerCmd(c, "port", ID) - - err = assertPortList(c, out, []string{ - "80/tcp -> 0.0.0.0:9876", - "80/tcp -> 0.0.0.0:9999", - "81/tcp -> 0.0.0.0:9877", - "82/tcp -> 0.0.0.0:9878"}) - // Port list is not correct - c.Assert(err, checker.IsNil) - dockerCmd(c, "rm", "-f", ID) - - testRange := func() { - // host port ranges used - IDs := make([]string, 3) - for i := 0; i < 3; i++ { - out, _ = dockerCmd(c, "run", "-d", - "-p", "9090-9092:80", - "busybox", "top") - IDs[i] = strings.TrimSpace(out) - - out, _ = dockerCmd(c, "port", IDs[i]) - - err = assertPortList(c, out, []string{fmt.Sprintf("80/tcp -> 0.0.0.0:%d", 9090+i)}) - // Port list is not correct - c.Assert(err, checker.IsNil) - } - - // test port range exhaustion - out, _, err = dockerCmdWithError("run", "-d", - "-p", "9090-9092:80", - "busybox", "top") - // Exhausted port range did not return an error - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) - - for i := 0; i < 3; i++ { - dockerCmd(c, "rm", "-f", IDs[i]) - } - } - testRange() - // Verify we ran re-use port ranges after they are no longer in use. - testRange() - - // test invalid port ranges - for _, invalidRange := range []string{"9090-9089:80", "9090-:80", "-9090:80"} { - out, _, err = dockerCmdWithError("run", "-d", - "-p", invalidRange, - "busybox", "top") - // Port range should have returned an error - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) - } - - // test host range:container range spec. - out, _ = dockerCmd(c, "run", "-d", - "-p", "9800-9803:80-83", - "busybox", "top") - ID = strings.TrimSpace(out) - - out, _ = dockerCmd(c, "port", ID) - - err = assertPortList(c, out, []string{ - "80/tcp -> 0.0.0.0:9800", - "81/tcp -> 0.0.0.0:9801", - "82/tcp -> 0.0.0.0:9802", - "83/tcp -> 0.0.0.0:9803"}) - // Port list is not correct - c.Assert(err, checker.IsNil) - dockerCmd(c, "rm", "-f", ID) - - // test mixing protocols in same port range - out, _ = dockerCmd(c, "run", "-d", - "-p", "8000-8080:80", - "-p", "8000-8080:80/udp", - "busybox", "top") - ID = strings.TrimSpace(out) - - out, _ = dockerCmd(c, "port", ID) - - err = assertPortList(c, out, []string{ - "80/tcp -> 0.0.0.0:8000", - "80/udp -> 0.0.0.0:8000"}) - // Port list is not correct - c.Assert(err, checker.IsNil) - dockerCmd(c, "rm", "-f", ID) -} - -func assertPortList(c *check.C, out string, expected []string) error { - lines := strings.Split(strings.Trim(out, "\n "), "\n") - if len(lines) != len(expected) { - return fmt.Errorf("different size lists %s, %d, %d", out, len(lines), len(expected)) - } - sort.Strings(lines) - sort.Strings(expected) - - for i := 0; i < len(expected); i++ { - if lines[i] != expected[i] { - return fmt.Errorf("|" + lines[i] + "!=" + expected[i] + "|") - } - } - - return nil -} - -func stopRemoveContainer(id string, c *check.C) { - dockerCmd(c, "rm", "-f", id) -} - -func (s *DockerSuite) TestUnpublishedPortsInPsOutput(c *check.C) { - testRequires(c, DaemonIsLinux) - // Run busybox with command line expose (equivalent to EXPOSE in image's Dockerfile) for the following ports - port1 := 80 - port2 := 443 - expose1 := fmt.Sprintf("--expose=%d", port1) - expose2 := fmt.Sprintf("--expose=%d", port2) - dockerCmd(c, "run", "-d", expose1, expose2, "busybox", "sleep", "5") - - // Check docker ps o/p for last created container reports the unpublished ports - unpPort1 := fmt.Sprintf("%d/tcp", port1) - unpPort2 := fmt.Sprintf("%d/tcp", port2) - out, _ := dockerCmd(c, "ps", "-n=1") - // Missing unpublished ports in docker ps output - c.Assert(out, checker.Contains, unpPort1) - // Missing unpublished ports in docker ps output - c.Assert(out, checker.Contains, unpPort2) - - // Run the container forcing to publish the exposed ports - dockerCmd(c, "run", "-d", "-P", expose1, expose2, "busybox", "sleep", "5") - - // Check docker ps o/p for last created container reports the exposed ports in the port bindings - expBndRegx1 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort1) - expBndRegx2 := regexp.MustCompile(`0.0.0.0:\d\d\d\d\d->` + unpPort2) - out, _ = dockerCmd(c, "ps", "-n=1") - // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort1) in docker ps output - c.Assert(expBndRegx1.MatchString(out), checker.Equals, true, check.Commentf("out: %s; unpPort1: %s", out, unpPort1)) - // Cannot find expected port binding port (0.0.0.0:xxxxx->unpPort2) in docker ps output - c.Assert(expBndRegx2.MatchString(out), checker.Equals, true, check.Commentf("out: %s; unpPort2: %s", out, unpPort2)) - - // Run the container specifying explicit port bindings for the exposed ports - offset := 10000 - pFlag1 := fmt.Sprintf("%d:%d", offset+port1, port1) - pFlag2 := fmt.Sprintf("%d:%d", offset+port2, port2) - out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, expose1, expose2, "busybox", "sleep", "5") - id := strings.TrimSpace(out) - - // Check docker ps o/p for last created container reports the specified port mappings - expBnd1 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port1, unpPort1) - expBnd2 := fmt.Sprintf("0.0.0.0:%d->%s", offset+port2, unpPort2) - out, _ = dockerCmd(c, "ps", "-n=1") - // Cannot find expected port binding (expBnd1) in docker ps output - c.Assert(out, checker.Contains, expBnd1) - // Cannot find expected port binding (expBnd2) in docker ps output - c.Assert(out, checker.Contains, expBnd2) - - // Remove container now otherwise it will interfere with next test - stopRemoveContainer(id, c) - - // Run the container with explicit port bindings and no exposed ports - out, _ = dockerCmd(c, "run", "-d", "-p", pFlag1, "-p", pFlag2, "busybox", "sleep", "5") - id = strings.TrimSpace(out) - - // Check docker ps o/p for last created container reports the specified port mappings - out, _ = dockerCmd(c, "ps", "-n=1") - // Cannot find expected port binding (expBnd1) in docker ps output - c.Assert(out, checker.Contains, expBnd1) - // Cannot find expected port binding (expBnd2) in docker ps output - c.Assert(out, checker.Contains, expBnd2) - // Remove container now otherwise it will interfere with next test - stopRemoveContainer(id, c) - - // Run the container with one unpublished exposed port and one explicit port binding - dockerCmd(c, "run", "-d", expose1, "-p", pFlag2, "busybox", "sleep", "5") - - // Check docker ps o/p for last created container reports the specified unpublished port and port mapping - out, _ = dockerCmd(c, "ps", "-n=1") - // Missing unpublished exposed ports (unpPort1) in docker ps output - c.Assert(out, checker.Contains, unpPort1) - // Missing port binding (expBnd2) in docker ps output - c.Assert(out, checker.Contains, expBnd2) -} - -func (s *DockerSuite) TestPortHostBinding(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - out, _ := dockerCmd(c, "run", "-d", "-p", "9876:80", "busybox", - "nc", "-l", "-p", "80") - firstID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "port", firstID, "80") - - err := assertPortList(c, out, []string{"0.0.0.0:9876"}) - // Port list is not correct - c.Assert(err, checker.IsNil) - - dockerCmd(c, "run", "--net=host", "busybox", - "nc", "localhost", "9876") - - dockerCmd(c, "rm", "-f", firstID) - - out, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "9876") - // Port is still bound after the Container is removed - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) -} - -func (s *DockerSuite) TestPortExposeHostBinding(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - out, _ := dockerCmd(c, "run", "-d", "-P", "--expose", "80", "busybox", - "nc", "-l", "-p", "80") - firstID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "port", firstID, "80") - - _, exposedPort, err := net.SplitHostPort(out) - c.Assert(err, checker.IsNil, check.Commentf("out: %s", out)) - - dockerCmd(c, "run", "--net=host", "busybox", - "nc", "localhost", strings.TrimSpace(exposedPort)) - - dockerCmd(c, "rm", "-f", firstID) - - out, _, err = dockerCmdWithError("run", "--net=host", "busybox", - "nc", "localhost", strings.TrimSpace(exposedPort)) - // Port is still bound after the Container is removed - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) -} - -func (s *DockerSuite) TestPortBindingOnSandbox(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - dockerCmd(c, "network", "create", "--internal", "-d", "bridge", "internal-net") - nr := getNetworkResource(c, "internal-net") - c.Assert(nr.Internal, checker.Equals, true) - - dockerCmd(c, "run", "--net", "internal-net", "-d", "--name", "c1", - "-p", "8080:8080", "busybox", "nc", "-l", "-p", "8080") - c.Assert(waitRun("c1"), check.IsNil) - - _, _, err := dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") - c.Assert(err, check.NotNil, - check.Commentf("Port mapping on internal network is expected to fail")) - - // Connect container to another normal bridge network - dockerCmd(c, "network", "create", "-d", "bridge", "foo-net") - dockerCmd(c, "network", "connect", "foo-net", "c1") - - _, _, err = dockerCmdWithError("run", "--net=host", "busybox", "nc", "localhost", "8080") - c.Assert(err, check.IsNil, - check.Commentf("Port mapping on the new network is expected to succeed")) - -} diff --git a/integration-cli/docker_cli_proxy_test.go b/integration-cli/docker_cli_proxy_test.go deleted file mode 100644 index e5699ca52c..0000000000 --- a/integration-cli/docker_cli_proxy_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package main - -import ( - "net" - "os/exec" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestCliProxyDisableProxyUnixSock(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, SameHostDaemon) // test is valid when DOCKER_HOST=unix://.. - - cmd := exec.Command(dockerBinary, "info") - cmd.Env = appendBaseEnv(false, "HTTP_PROXY=http://127.0.0.1:9999") - - out, _, err := runCommandWithOutput(cmd) - c.Assert(err, checker.IsNil, check.Commentf("%v", out)) - -} - -// Can't use localhost here since go has a special case to not use proxy if connecting to localhost -// See https://golang.org/pkg/net/http/#ProxyFromEnvironment -func (s *DockerDaemonSuite) TestCliProxyProxyTCPSock(c *check.C) { - testRequires(c, SameHostDaemon) - // get the IP to use to connect since we can't use localhost - addrs, err := net.InterfaceAddrs() - c.Assert(err, checker.IsNil) - var ip string - for _, addr := range addrs { - sAddr := addr.String() - if !strings.Contains(sAddr, "127.0.0.1") { - addrArr := strings.Split(sAddr, "/") - ip = addrArr[0] - break - } - } - - c.Assert(ip, checker.Not(checker.Equals), "") - - err = s.d.Start("-H", "tcp://"+ip+":2375") - c.Assert(err, checker.IsNil) - cmd := exec.Command(dockerBinary, "info") - cmd.Env = []string{"DOCKER_HOST=tcp://" + ip + ":2375", "HTTP_PROXY=127.0.0.1:9999"} - out, _, err := runCommandWithOutput(cmd) - c.Assert(err, checker.NotNil, check.Commentf("%v", out)) - // Test with no_proxy - cmd.Env = append(cmd.Env, "NO_PROXY="+ip) - out, _, err = runCommandWithOutput(exec.Command(dockerBinary, "info")) - c.Assert(err, checker.IsNil, check.Commentf("%v", out)) -} diff --git a/integration-cli/docker_cli_ps_test.go b/integration-cli/docker_cli_ps_test.go deleted file mode 100644 index 6e1756f06f..0000000000 --- a/integration-cli/docker_cli_ps_test.go +++ /dev/null @@ -1,900 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "sort" - "strconv" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/stringid" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestPsListContainersBase(c *check.C) { - out, _ := runSleepingContainer(c, "-d") - firstID := strings.TrimSpace(out) - - out, _ = runSleepingContainer(c, "-d") - secondID := strings.TrimSpace(out) - - // not long running - out, _ = dockerCmd(c, "run", "-d", "busybox", "true") - thirdID := strings.TrimSpace(out) - - out, _ = runSleepingContainer(c, "-d") - fourthID := strings.TrimSpace(out) - - // make sure the second is running - c.Assert(waitRun(secondID), checker.IsNil) - - // make sure third one is not running - dockerCmd(c, "wait", thirdID) - - // make sure the forth is running - c.Assert(waitRun(fourthID), checker.IsNil) - - // all - out, _ = dockerCmd(c, "ps", "-a") - c.Assert(assertContainerList(out, []string{fourthID, thirdID, secondID, firstID}), checker.Equals, true, check.Commentf("ALL: Container list is not in the correct order: \n%s", out)) - - // running - out, _ = dockerCmd(c, "ps") - c.Assert(assertContainerList(out, []string{fourthID, secondID, firstID}), checker.Equals, true, check.Commentf("RUNNING: Container list is not in the correct order: \n%s", out)) - - // limit - out, _ = dockerCmd(c, "ps", "-n=2", "-a") - expected := []string{fourthID, thirdID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT & ALL: Container list is not in the correct order: \n%s", out)) - - out, _ = dockerCmd(c, "ps", "-n=2") - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("LIMIT: Container list is not in the correct order: \n%s", out)) - - // filter since - out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-a") - expected = []string{fourthID, thirdID, secondID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter & ALL: Container list is not in the correct order: \n%s", out)) - - out, _ = dockerCmd(c, "ps", "-f", "since="+firstID) - expected = []string{fourthID, secondID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) - - out, _ = dockerCmd(c, "ps", "-f", "since="+thirdID) - expected = []string{fourthID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) - - // filter before - out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-a") - expected = []string{thirdID, secondID, firstID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) - - out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID) - expected = []string{secondID, firstID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter: Container list is not in the correct order: \n%s", out)) - - out, _ = dockerCmd(c, "ps", "-f", "before="+thirdID) - expected = []string{secondID, firstID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter: Container list is not in the correct order: \n%s", out)) - - // filter since & before - out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-a") - expected = []string{thirdID, secondID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter & ALL: Container list is not in the correct order: \n%s", out)) - - out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID) - expected = []string{secondID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter: Container list is not in the correct order: \n%s", out)) - - // filter since & limit - out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2", "-a") - expected = []string{fourthID, thirdID} - - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) - - out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-n=2") - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, LIMIT: Container list is not in the correct order: \n%s", out)) - - // filter before & limit - out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1", "-a") - expected = []string{thirdID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) - - out, _ = dockerCmd(c, "ps", "-f", "before="+fourthID, "-n=1") - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) - - // filter since & filter before & limit - out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1", "-a") - expected = []string{thirdID} - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT & ALL: Container list is not in the correct order: \n%s", out)) - - out, _ = dockerCmd(c, "ps", "-f", "since="+firstID, "-f", "before="+fourthID, "-n=1") - c.Assert(assertContainerList(out, expected), checker.Equals, true, check.Commentf("SINCE filter, BEFORE filter, LIMIT: Container list is not in the correct order: \n%s", out)) - -} - -func assertContainerList(out string, expected []string) bool { - lines := strings.Split(strings.Trim(out, "\n "), "\n") - - if len(lines)-1 != len(expected) { - return false - } - - containerIDIndex := strings.Index(lines[0], "CONTAINER ID") - for i := 0; i < len(expected); i++ { - foundID := lines[i+1][containerIDIndex : containerIDIndex+12] - if foundID != expected[i][:12] { - return false - } - } - - return true -} - -func (s *DockerSuite) TestPsListContainersInvalidFilterName(c *check.C) { - out, _, err := dockerCmdWithError("ps", "-f", "invalidFilter=test") - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "Invalid filter") -} - -func (s *DockerSuite) TestPsListContainersSize(c *check.C) { - // Problematic on Windows as it doesn't report the size correctly @swernli - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "busybox") - - baseOut, _ := dockerCmd(c, "ps", "-s", "-n=1") - baseLines := strings.Split(strings.Trim(baseOut, "\n "), "\n") - baseSizeIndex := strings.Index(baseLines[0], "SIZE") - baseFoundsize := baseLines[1][baseSizeIndex:] - baseBytes, err := strconv.Atoi(strings.Split(baseFoundsize, " ")[0]) - c.Assert(err, checker.IsNil) - - name := "test_size" - dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo 1 > test") - id, err := getIDByName(name) - c.Assert(err, checker.IsNil) - - runCmd := exec.Command(dockerBinary, "ps", "-s", "-n=1") - var out string - - wait := make(chan struct{}) - go func() { - out, _, err = runCommandWithOutput(runCmd) - close(wait) - }() - select { - case <-wait: - case <-time.After(3 * time.Second): - c.Fatalf("Calling \"docker ps -s\" timed out!") - } - c.Assert(err, checker.IsNil) - lines := strings.Split(strings.Trim(out, "\n "), "\n") - c.Assert(lines, checker.HasLen, 2, check.Commentf("Expected 2 lines for 'ps -s -n=1' output, got %d", len(lines))) - sizeIndex := strings.Index(lines[0], "SIZE") - idIndex := strings.Index(lines[0], "CONTAINER ID") - foundID := lines[1][idIndex : idIndex+12] - c.Assert(foundID, checker.Equals, id[:12], check.Commentf("Expected id %s, got %s", id[:12], foundID)) - expectedSize := fmt.Sprintf("%d B", (2 + baseBytes)) - foundSize := lines[1][sizeIndex:] - c.Assert(foundSize, checker.Contains, expectedSize, check.Commentf("Expected size %q, got %q", expectedSize, foundSize)) -} - -func (s *DockerSuite) TestPsListContainersFilterStatus(c *check.C) { - // start exited container - out, _ := dockerCmd(c, "run", "-d", "busybox") - firstID := strings.TrimSpace(out) - - // make sure the exited container is not running - dockerCmd(c, "wait", firstID) - - // start running container - out, _ = dockerCmd(c, "run", "-itd", "busybox") - secondID := strings.TrimSpace(out) - - // filter containers by exited - out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=exited") - containerOut := strings.TrimSpace(out) - c.Assert(containerOut, checker.Equals, firstID) - - out, _ = dockerCmd(c, "ps", "-a", "--no-trunc", "-q", "--filter=status=running") - containerOut = strings.TrimSpace(out) - c.Assert(containerOut, checker.Equals, secondID) - - out, _, _ = dockerCmdWithTimeout(time.Second*60, "ps", "-a", "-q", "--filter=status=rubbish") - c.Assert(out, checker.Contains, "Unrecognised filter value for status", check.Commentf("Expected error response due to invalid status filter output: %q", out)) - - // Windows doesn't support pausing of containers - if daemonPlatform != "windows" { - // pause running container - out, _ = dockerCmd(c, "run", "-itd", "busybox") - pausedID := strings.TrimSpace(out) - dockerCmd(c, "pause", pausedID) - // make sure the container is unpaused to let the daemon stop it properly - defer func() { dockerCmd(c, "unpause", pausedID) }() - - out, _ = dockerCmd(c, "ps", "--no-trunc", "-q", "--filter=status=paused") - containerOut = strings.TrimSpace(out) - c.Assert(containerOut, checker.Equals, pausedID) - } -} - -func (s *DockerSuite) TestPsListContainersFilterID(c *check.C) { - // start container - out, _ := dockerCmd(c, "run", "-d", "busybox") - firstID := strings.TrimSpace(out) - - // start another container - runSleepingContainer(c) - - // filter containers by id - out, _ = dockerCmd(c, "ps", "-a", "-q", "--filter=id="+firstID) - containerOut := strings.TrimSpace(out) - c.Assert(containerOut, checker.Equals, firstID[:12], check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID[:12], containerOut, out)) - -} - -func (s *DockerSuite) TestPsListContainersFilterName(c *check.C) { - // start container - dockerCmd(c, "run", "--name=a_name_to_match", "busybox") - id, err := getIDByName("a_name_to_match") - c.Assert(err, check.IsNil) - - // start another container - runSleepingContainer(c, "--name=b_name_to_match") - - // filter containers by name - out, _ := dockerCmd(c, "ps", "-a", "-q", "--filter=name=a_name_to_match") - containerOut := strings.TrimSpace(out) - c.Assert(containerOut, checker.Equals, id[:12], check.Commentf("Expected id %s, got %s for exited filter, output: %q", id[:12], containerOut, out)) -} - -// Test for the ancestor filter for ps. -// There is also the same test but with image:tag@digest in docker_cli_by_digest_test.go -// -// What the test setups : -// - Create 2 image based on busybox using the same repository but different tags -// - Create an image based on the previous image (images_ps_filter_test2) -// - Run containers for each of those image (busybox, images_ps_filter_test1, images_ps_filter_test2) -// - Filter them out :P -func (s *DockerSuite) TestPsListContainersFilterAncestorImage(c *check.C) { - // Build images - imageName1 := "images_ps_filter_test1" - imageID1, err := buildImage(imageName1, - `FROM busybox - LABEL match me 1`, true) - c.Assert(err, checker.IsNil) - - imageName1Tagged := "images_ps_filter_test1:tag" - imageID1Tagged, err := buildImage(imageName1Tagged, - `FROM busybox - LABEL match me 1 tagged`, true) - c.Assert(err, checker.IsNil) - - imageName2 := "images_ps_filter_test2" - imageID2, err := buildImage(imageName2, - fmt.Sprintf(`FROM %s - LABEL match me 2`, imageName1), true) - c.Assert(err, checker.IsNil) - - // start containers - dockerCmd(c, "run", "--name=first", "busybox", "echo", "hello") - firstID, err := getIDByName("first") - c.Assert(err, check.IsNil) - - // start another container - dockerCmd(c, "run", "--name=second", "busybox", "echo", "hello") - secondID, err := getIDByName("second") - c.Assert(err, check.IsNil) - - // start third container - dockerCmd(c, "run", "--name=third", imageName1, "echo", "hello") - thirdID, err := getIDByName("third") - c.Assert(err, check.IsNil) - - // start fourth container - dockerCmd(c, "run", "--name=fourth", imageName1Tagged, "echo", "hello") - fourthID, err := getIDByName("fourth") - c.Assert(err, check.IsNil) - - // start fifth container - dockerCmd(c, "run", "--name=fifth", imageName2, "echo", "hello") - fifthID, err := getIDByName("fifth") - c.Assert(err, check.IsNil) - - var filterTestSuite = []struct { - filterName string - expectedIDs []string - }{ - // non existent stuff - {"nonexistent", []string{}}, - {"nonexistent:tag", []string{}}, - // image - {"busybox", []string{firstID, secondID, thirdID, fourthID, fifthID}}, - {imageName1, []string{thirdID, fifthID}}, - {imageName2, []string{fifthID}}, - // image:tag - {fmt.Sprintf("%s:latest", imageName1), []string{thirdID, fifthID}}, - {imageName1Tagged, []string{fourthID}}, - // short-id - {stringid.TruncateID(imageID1), []string{thirdID, fifthID}}, - {stringid.TruncateID(imageID2), []string{fifthID}}, - // full-id - {imageID1, []string{thirdID, fifthID}}, - {imageID1Tagged, []string{fourthID}}, - {imageID2, []string{fifthID}}, - } - - var out string - for _, filter := range filterTestSuite { - out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+filter.filterName) - checkPsAncestorFilterOutput(c, out, filter.filterName, filter.expectedIDs) - } - - // Multiple ancestor filter - out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=ancestor="+imageName2, "--filter=ancestor="+imageName1Tagged) - checkPsAncestorFilterOutput(c, out, imageName2+","+imageName1Tagged, []string{fourthID, fifthID}) -} - -func checkPsAncestorFilterOutput(c *check.C, out string, filterName string, expectedIDs []string) { - actualIDs := []string{} - if out != "" { - actualIDs = strings.Split(out[:len(out)-1], "\n") - } - sort.Strings(actualIDs) - sort.Strings(expectedIDs) - - c.Assert(actualIDs, checker.HasLen, len(expectedIDs), check.Commentf("Expected filtered container(s) for %s ancestor filter to be %v:%v, got %v:%v", filterName, len(expectedIDs), expectedIDs, len(actualIDs), actualIDs)) - if len(expectedIDs) > 0 { - same := true - for i := range expectedIDs { - if actualIDs[i] != expectedIDs[i] { - c.Logf("%s, %s", actualIDs[i], expectedIDs[i]) - same = false - break - } - } - c.Assert(same, checker.Equals, true, check.Commentf("Expected filtered container(s) for %s ancestor filter to be %v, got %v", filterName, expectedIDs, actualIDs)) - } -} - -func (s *DockerSuite) TestPsListContainersFilterLabel(c *check.C) { - // start container - dockerCmd(c, "run", "--name=first", "-l", "match=me", "-l", "second=tag", "busybox") - firstID, err := getIDByName("first") - c.Assert(err, check.IsNil) - - // start another container - dockerCmd(c, "run", "--name=second", "-l", "match=me too", "busybox") - secondID, err := getIDByName("second") - c.Assert(err, check.IsNil) - - // start third container - dockerCmd(c, "run", "--name=third", "-l", "nomatch=me", "busybox") - thirdID, err := getIDByName("third") - c.Assert(err, check.IsNil) - - // filter containers by exact match - out, _ := dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me") - containerOut := strings.TrimSpace(out) - c.Assert(containerOut, checker.Equals, firstID, check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)) - - // filter containers by two labels - out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag") - containerOut = strings.TrimSpace(out) - c.Assert(containerOut, checker.Equals, firstID, check.Commentf("Expected id %s, got %s for exited filter, output: %q", firstID, containerOut, out)) - - // filter containers by two labels, but expect not found because of AND behavior - out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match=me", "--filter=label=second=tag-no") - containerOut = strings.TrimSpace(out) - c.Assert(containerOut, checker.Equals, "", check.Commentf("Expected nothing, got %s for exited filter, output: %q", containerOut, out)) - - // filter containers by exact key - out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=label=match") - containerOut = strings.TrimSpace(out) - c.Assert(containerOut, checker.Contains, firstID) - c.Assert(containerOut, checker.Contains, secondID) - c.Assert(containerOut, checker.Not(checker.Contains), thirdID) -} - -func (s *DockerSuite) TestPsListContainersFilterExited(c *check.C) { - runSleepingContainer(c, "--name=sleep") - - dockerCmd(c, "run", "--name", "zero1", "busybox", "true") - firstZero, err := getIDByName("zero1") - c.Assert(err, checker.IsNil) - - dockerCmd(c, "run", "--name", "zero2", "busybox", "true") - secondZero, err := getIDByName("zero2") - c.Assert(err, checker.IsNil) - - out, _, err := dockerCmdWithError("run", "--name", "nonzero1", "busybox", "false") - c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err)) - - firstNonZero, err := getIDByName("nonzero1") - c.Assert(err, checker.IsNil) - - out, _, err = dockerCmdWithError("run", "--name", "nonzero2", "busybox", "false") - c.Assert(err, checker.NotNil, check.Commentf("Should fail.", out, err)) - secondNonZero, err := getIDByName("nonzero2") - c.Assert(err, checker.IsNil) - - // filter containers by exited=0 - out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=0") - ids := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(ids, checker.HasLen, 2, check.Commentf("Should be 2 zero exited containers got %d: %s", len(ids), out)) - c.Assert(ids[0], checker.Equals, secondZero, check.Commentf("First in list should be %q, got %q", secondZero, ids[0])) - c.Assert(ids[1], checker.Equals, firstZero, check.Commentf("Second in list should be %q, got %q", firstZero, ids[1])) - - out, _ = dockerCmd(c, "ps", "-a", "-q", "--no-trunc", "--filter=exited=1") - ids = strings.Split(strings.TrimSpace(out), "\n") - c.Assert(ids, checker.HasLen, 2, check.Commentf("Should be 2 zero exited containers got %d", len(ids))) - c.Assert(ids[0], checker.Equals, secondNonZero, check.Commentf("First in list should be %q, got %q", secondNonZero, ids[0])) - c.Assert(ids[1], checker.Equals, firstNonZero, check.Commentf("Second in list should be %q, got %q", firstNonZero, ids[1])) - -} - -func (s *DockerSuite) TestPsRightTagName(c *check.C) { - // TODO Investigate further why this fails on Windows to Windows CI - testRequires(c, DaemonIsLinux) - tag := "asybox:shmatest" - dockerCmd(c, "tag", "busybox", tag) - - var id1 string - out, _ := runSleepingContainer(c) - id1 = strings.TrimSpace(string(out)) - - var id2 string - out, _ = runSleepingContainerInImage(c, tag) - id2 = strings.TrimSpace(string(out)) - - var imageID string - out = inspectField(c, "busybox", "Id") - imageID = strings.TrimSpace(string(out)) - - var id3 string - out, _ = runSleepingContainerInImage(c, imageID) - id3 = strings.TrimSpace(string(out)) - - out, _ = dockerCmd(c, "ps", "--no-trunc") - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - // skip header - lines = lines[1:] - c.Assert(lines, checker.HasLen, 3, check.Commentf("There should be 3 running container, got %d", len(lines))) - for _, line := range lines { - f := strings.Fields(line) - switch f[0] { - case id1: - c.Assert(f[1], checker.Equals, "busybox", check.Commentf("Expected %s tag for id %s, got %s", "busybox", id1, f[1])) - case id2: - c.Assert(f[1], checker.Equals, tag, check.Commentf("Expected %s tag for id %s, got %s", tag, id2, f[1])) - case id3: - c.Assert(f[1], checker.Equals, imageID, check.Commentf("Expected %s imageID for id %s, got %s", tag, id3, f[1])) - default: - c.Fatalf("Unexpected id %s, expected %s and %s and %s", f[0], id1, id2, id3) - } - } -} - -func (s *DockerSuite) TestPsLinkedWithNoTrunc(c *check.C) { - // Problematic on Windows as it doesn't support links as of Jan 2016 - testRequires(c, DaemonIsLinux) - runSleepingContainer(c, "--name=first") - runSleepingContainer(c, "--name=second", "--link=first:first") - - out, _ := dockerCmd(c, "ps", "--no-trunc") - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - // strip header - lines = lines[1:] - expected := []string{"second", "first,second/first"} - var names []string - for _, l := range lines { - fields := strings.Fields(l) - names = append(names, fields[len(fields)-1]) - } - c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array: %v, got: %v", expected, names)) -} - -func (s *DockerSuite) TestPsGroupPortRange(c *check.C) { - // Problematic on Windows as it doesn't support port ranges as of Jan 2016 - testRequires(c, DaemonIsLinux) - portRange := "3850-3900" - dockerCmd(c, "run", "-d", "--name", "porttest", "-p", portRange+":"+portRange, "busybox", "top") - - out, _ := dockerCmd(c, "ps") - - c.Assert(string(out), checker.Contains, portRange, check.Commentf("docker ps output should have had the port range %q: %s", portRange, string(out))) - -} - -func (s *DockerSuite) TestPsWithSize(c *check.C) { - // Problematic on Windows as it doesn't report the size correctly @swernli - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "--name", "sizetest", "busybox", "top") - - out, _ := dockerCmd(c, "ps", "--size") - c.Assert(out, checker.Contains, "virtual", check.Commentf("docker ps with --size should show virtual size of container")) -} - -func (s *DockerSuite) TestPsListContainersFilterCreated(c *check.C) { - // create a container - out, _ := dockerCmd(c, "create", "busybox") - cID := strings.TrimSpace(out) - shortCID := cID[:12] - - // Make sure it DOESN'T show up w/o a '-a' for normal 'ps' - out, _ = dockerCmd(c, "ps", "-q") - c.Assert(out, checker.Not(checker.Contains), shortCID, check.Commentf("Should have not seen '%s' in ps output:\n%s", shortCID, out)) - - // Make sure it DOES show up as 'Created' for 'ps -a' - out, _ = dockerCmd(c, "ps", "-a") - - hits := 0 - for _, line := range strings.Split(out, "\n") { - if !strings.Contains(line, shortCID) { - continue - } - hits++ - c.Assert(line, checker.Contains, "Created", check.Commentf("Missing 'Created' on '%s'", line)) - } - - c.Assert(hits, checker.Equals, 1, check.Commentf("Should have seen '%s' in ps -a output once:%d\n%s", shortCID, hits, out)) - - // filter containers by 'create' - note, no -a needed - out, _ = dockerCmd(c, "ps", "-q", "-f", "status=created") - containerOut := strings.TrimSpace(out) - c.Assert(cID, checker.HasPrefix, containerOut) -} - -func (s *DockerSuite) TestPsFormatMultiNames(c *check.C) { - // Problematic on Windows as it doesn't support link as of Jan 2016 - testRequires(c, DaemonIsLinux) - //create 2 containers and link them - dockerCmd(c, "run", "--name=child", "-d", "busybox", "top") - dockerCmd(c, "run", "--name=parent", "--link=child:linkedone", "-d", "busybox", "top") - - //use the new format capabilities to only list the names and --no-trunc to get all names - out, _ := dockerCmd(c, "ps", "--format", "{{.Names}}", "--no-trunc") - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - expected := []string{"parent", "child,parent/linkedone"} - var names []string - for _, l := range lines { - names = append(names, l) - } - c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with non-truncated names: %v, got: %v", expected, names)) - - //now list without turning off truncation and make sure we only get the non-link names - out, _ = dockerCmd(c, "ps", "--format", "{{.Names}}") - lines = strings.Split(strings.TrimSpace(string(out)), "\n") - expected = []string{"parent", "child"} - var truncNames []string - for _, l := range lines { - truncNames = append(truncNames, l) - } - c.Assert(expected, checker.DeepEquals, truncNames, check.Commentf("Expected array with truncated names: %v, got: %v", expected, truncNames)) -} - -// Test for GitHub issue #21772 -func (s *DockerSuite) TestPsNamesMultipleTime(c *check.C) { - runSleepingContainer(c, "--name=test1") - runSleepingContainer(c, "--name=test2") - - //use the new format capabilities to list the names twice - out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Names}}") - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - expected := []string{"test2 test2", "test1 test1"} - var names []string - for _, l := range lines { - names = append(names, l) - } - c.Assert(expected, checker.DeepEquals, names, check.Commentf("Expected array with names displayed twice: %v, got: %v", expected, names)) -} - -func (s *DockerSuite) TestPsFormatHeaders(c *check.C) { - // make sure no-container "docker ps" still prints the header row - out, _ := dockerCmd(c, "ps", "--format", "table {{.ID}}") - c.Assert(out, checker.Equals, "CONTAINER ID\n", check.Commentf(`Expected 'CONTAINER ID\n', got %v`, out)) - - // verify that "docker ps" with a container still prints the header row also - runSleepingContainer(c, "--name=test") - out, _ = dockerCmd(c, "ps", "--format", "table {{.Names}}") - c.Assert(out, checker.Equals, "NAMES\ntest\n", check.Commentf(`Expected 'NAMES\ntest\n', got %v`, out)) -} - -func (s *DockerSuite) TestPsDefaultFormatAndQuiet(c *check.C) { - config := `{ - "psFormat": "default {{ .ID }}" -}` - d, err := ioutil.TempDir("", "integration-cli-") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(d) - - err = ioutil.WriteFile(filepath.Join(d, "config.json"), []byte(config), 0644) - c.Assert(err, checker.IsNil) - - out, _ := runSleepingContainer(c, "--name=test") - id := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "--config", d, "ps", "-q") - c.Assert(id, checker.HasPrefix, strings.TrimSpace(out), check.Commentf("Expected to print only the container id, got %v\n", out)) -} - -// Test for GitHub issue #12595 -func (s *DockerSuite) TestPsImageIDAfterUpdate(c *check.C) { - // TODO: Investigate why this fails on Windows to Windows CI further. - testRequires(c, DaemonIsLinux) - originalImageName := "busybox:TestPsImageIDAfterUpdate-original" - updatedImageName := "busybox:TestPsImageIDAfterUpdate-updated" - - runCmd := exec.Command(dockerBinary, "tag", "busybox:latest", originalImageName) - out, _, err := runCommandWithOutput(runCmd) - c.Assert(err, checker.IsNil) - - originalImageID, err := getIDByName(originalImageName) - c.Assert(err, checker.IsNil) - - runCmd = exec.Command(dockerBinary, append([]string{"run", "-d", originalImageName}, defaultSleepCommand...)...) - out, _, err = runCommandWithOutput(runCmd) - c.Assert(err, checker.IsNil) - containerID := strings.TrimSpace(out) - - linesOut, err := exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput() - c.Assert(err, checker.IsNil) - - lines := strings.Split(strings.TrimSpace(string(linesOut)), "\n") - // skip header - lines = lines[1:] - c.Assert(len(lines), checker.Equals, 1) - - for _, line := range lines { - f := strings.Fields(line) - c.Assert(f[1], checker.Equals, originalImageName) - } - - runCmd = exec.Command(dockerBinary, "commit", containerID, updatedImageName) - out, _, err = runCommandWithOutput(runCmd) - c.Assert(err, checker.IsNil) - - runCmd = exec.Command(dockerBinary, "tag", updatedImageName, originalImageName) - out, _, err = runCommandWithOutput(runCmd) - c.Assert(err, checker.IsNil) - - linesOut, err = exec.Command(dockerBinary, "ps", "--no-trunc").CombinedOutput() - c.Assert(err, checker.IsNil) - - lines = strings.Split(strings.TrimSpace(string(linesOut)), "\n") - // skip header - lines = lines[1:] - c.Assert(len(lines), checker.Equals, 1) - - for _, line := range lines { - f := strings.Fields(line) - c.Assert(f[1], checker.Equals, originalImageID) - } - -} - -func (s *DockerSuite) TestPsNotShowPortsOfStoppedContainer(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "--name=foo", "-d", "-p", "5000:5000", "busybox", "top") - c.Assert(waitRun("foo"), checker.IsNil) - out, _ := dockerCmd(c, "ps") - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - expected := "0.0.0.0:5000->5000/tcp" - fields := strings.Fields(lines[1]) - c.Assert(fields[len(fields)-2], checker.Equals, expected, check.Commentf("Expected: %v, got: %v", expected, fields[len(fields)-2])) - - dockerCmd(c, "kill", "foo") - dockerCmd(c, "wait", "foo") - out, _ = dockerCmd(c, "ps", "-l") - lines = strings.Split(strings.TrimSpace(string(out)), "\n") - fields = strings.Fields(lines[1]) - c.Assert(fields[len(fields)-2], checker.Not(checker.Equals), expected, check.Commentf("Should not got %v", expected)) -} - -func (s *DockerSuite) TestPsShowMounts(c *check.C) { - prefix, slash := getPrefixAndSlashFromDaemonPlatform() - - mp := prefix + slash + "test" - - dockerCmd(c, "volume", "create", "--name", "ps-volume-test") - // volume mount containers - runSleepingContainer(c, "--name=volume-test-1", "--volume", "ps-volume-test:"+mp) - c.Assert(waitRun("volume-test-1"), checker.IsNil) - runSleepingContainer(c, "--name=volume-test-2", "--volume", mp) - c.Assert(waitRun("volume-test-2"), checker.IsNil) - // bind mount container - var bindMountSource string - var bindMountDestination string - if DaemonIsWindows.Condition() { - bindMountSource = "c:\\" - bindMountDestination = "c:\\t" - } else { - bindMountSource = "/tmp" - bindMountDestination = "/t" - } - runSleepingContainer(c, "--name=bind-mount-test", "-v", bindMountSource+":"+bindMountDestination) - c.Assert(waitRun("bind-mount-test"), checker.IsNil) - - out, _ := dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}") - - lines := strings.Split(strings.TrimSpace(string(out)), "\n") - c.Assert(lines, checker.HasLen, 3) - - fields := strings.Fields(lines[0]) - c.Assert(fields, checker.HasLen, 2) - c.Assert(fields[0], checker.Equals, "bind-mount-test") - c.Assert(fields[1], checker.Equals, bindMountSource) - - fields = strings.Fields(lines[1]) - c.Assert(fields, checker.HasLen, 2) - - annonymounsVolumeID := fields[1] - - fields = strings.Fields(lines[2]) - c.Assert(fields[1], checker.Equals, "ps-volume-test") - - // filter by volume name - out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=ps-volume-test") - - lines = strings.Split(strings.TrimSpace(string(out)), "\n") - c.Assert(lines, checker.HasLen, 1) - - fields = strings.Fields(lines[0]) - c.Assert(fields[1], checker.Equals, "ps-volume-test") - - // empty results filtering by unknown volume - out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume=this-volume-should-not-exist") - c.Assert(strings.TrimSpace(string(out)), checker.HasLen, 0) - - // filter by mount destination - out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+mp) - - lines = strings.Split(strings.TrimSpace(string(out)), "\n") - c.Assert(lines, checker.HasLen, 2) - - fields = strings.Fields(lines[0]) - c.Assert(fields[1], checker.Equals, annonymounsVolumeID) - fields = strings.Fields(lines[1]) - c.Assert(fields[1], checker.Equals, "ps-volume-test") - - // filter by bind mount source - out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountSource) - - lines = strings.Split(strings.TrimSpace(string(out)), "\n") - c.Assert(lines, checker.HasLen, 1) - - fields = strings.Fields(lines[0]) - c.Assert(fields, checker.HasLen, 2) - c.Assert(fields[0], checker.Equals, "bind-mount-test") - c.Assert(fields[1], checker.Equals, bindMountSource) - - // filter by bind mount destination - out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+bindMountDestination) - - lines = strings.Split(strings.TrimSpace(string(out)), "\n") - c.Assert(lines, checker.HasLen, 1) - - fields = strings.Fields(lines[0]) - c.Assert(fields, checker.HasLen, 2) - c.Assert(fields[0], checker.Equals, "bind-mount-test") - c.Assert(fields[1], checker.Equals, bindMountSource) - - // empty results filtering by unknown mount point - out, _ = dockerCmd(c, "ps", "--format", "{{.Names}} {{.Mounts}}", "--filter", "volume="+prefix+slash+"this-path-was-never-mounted") - c.Assert(strings.TrimSpace(string(out)), checker.HasLen, 0) -} - -func (s *DockerSuite) TestPsFormatSize(c *check.C) { - testRequires(c, DaemonIsLinux) - runSleepingContainer(c) - - out, _ := dockerCmd(c, "ps", "--format", "table {{.Size}}") - lines := strings.Split(out, "\n") - c.Assert(lines[1], checker.Not(checker.Equals), "0 B", check.Commentf("Should not display a size of 0 B")) - - out, _ = dockerCmd(c, "ps", "--size", "--format", "table {{.Size}}") - lines = strings.Split(out, "\n") - c.Assert(lines[0], checker.Equals, "SIZE", check.Commentf("Should only have one size column")) - - out, _ = dockerCmd(c, "ps", "--size", "--format", "raw") - lines = strings.Split(out, "\n") - c.Assert(lines[8], checker.HasPrefix, "size:", check.Commentf("Size should be appended on a newline")) -} - -func (s *DockerSuite) TestPsListContainersFilterNetwork(c *check.C) { - // TODO default network on Windows is not called "bridge", and creating a - // custom network fails on Windows fails with "Error response from daemon: plugin not found") - testRequires(c, DaemonIsLinux) - - // create some containers - runSleepingContainer(c, "--net=bridge", "--name=onbridgenetwork") - runSleepingContainer(c, "--net=none", "--name=onnonenetwork") - - // Filter docker ps on non existing network - out, _ := dockerCmd(c, "ps", "--filter", "network=doesnotexist") - containerOut := strings.TrimSpace(string(out)) - lines := strings.Split(containerOut, "\n") - - // skip header - lines = lines[1:] - - // ps output should have no containers - c.Assert(lines, checker.HasLen, 0) - - // Filter docker ps on network bridge - out, _ = dockerCmd(c, "ps", "--filter", "network=bridge") - containerOut = strings.TrimSpace(string(out)) - - lines = strings.Split(containerOut, "\n") - - // skip header - lines = lines[1:] - - // ps output should have only one container - c.Assert(lines, checker.HasLen, 1) - - // Making sure onbridgenetwork is on the output - c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on network\n")) - - // Filter docker ps on networks bridge and none - out, _ = dockerCmd(c, "ps", "--filter", "network=bridge", "--filter", "network=none") - containerOut = strings.TrimSpace(string(out)) - - lines = strings.Split(containerOut, "\n") - - // skip header - lines = lines[1:] - - //ps output should have both the containers - c.Assert(lines, checker.HasLen, 2) - - // Making sure onbridgenetwork and onnonenetwork is on the output - c.Assert(containerOut, checker.Contains, "onnonenetwork", check.Commentf("Missing the container on none network\n")) - c.Assert(containerOut, checker.Contains, "onbridgenetwork", check.Commentf("Missing the container on bridge network\n")) - - nwID, _ := dockerCmd(c, "network", "inspect", "--format", "{{.ID}}", "bridge") - - // Filter by network ID - out, _ = dockerCmd(c, "ps", "--filter", "network="+nwID) - containerOut = strings.TrimSpace(string(out)) - - c.Assert(containerOut, checker.Contains, "onbridgenetwork") -} - -func (s *DockerSuite) TestPsByOrder(c *check.C) { - name1 := "xyz-abc" - out, err := runSleepingContainer(c, "--name", name1) - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - container1 := strings.TrimSpace(out) - - name2 := "xyz-123" - out, err = runSleepingContainer(c, "--name", name2) - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - container2 := strings.TrimSpace(out) - - name3 := "789-abc" - out, err = runSleepingContainer(c, "--name", name3) - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - - name4 := "789-123" - out, err = runSleepingContainer(c, "--name", name4) - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - - // Run multiple time should have the same result - out, err = dockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz") - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Equals, fmt.Sprintf("%s\n%s", container2, container1)) - - // Run multiple time should have the same result - out, err = dockerCmd(c, "ps", "--no-trunc", "-q", "-f", "name=xyz") - c.Assert(err, checker.NotNil) - c.Assert(strings.TrimSpace(out), checker.Equals, fmt.Sprintf("%s\n%s", container2, container1)) -} diff --git a/integration-cli/docker_cli_pull_local_test.go b/integration-cli/docker_cli_pull_local_test.go deleted file mode 100644 index bd4fb4bd2a..0000000000 --- a/integration-cli/docker_cli_pull_local_test.go +++ /dev/null @@ -1,446 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/manifest/manifestlist" - "github.com/docker/distribution/manifest/schema2" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// testPullImageWithAliases pulls a specific image tag and verifies that any aliases (i.e., other -// tags for the same image) are not also pulled down. -// -// Ref: docker/docker#8141 -func testPullImageWithAliases(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - - repos := []string{} - for _, tag := range []string{"recent", "fresh"} { - repos = append(repos, fmt.Sprintf("%v:%v", repoName, tag)) - } - - // Tag and push the same image multiple times. - for _, repo := range repos { - dockerCmd(c, "tag", "busybox", repo) - dockerCmd(c, "push", repo) - } - - // Clear local images store. - args := append([]string{"rmi"}, repos...) - dockerCmd(c, args...) - - // Pull a single tag and verify it doesn't bring down all aliases. - dockerCmd(c, "pull", repos[0]) - dockerCmd(c, "inspect", repos[0]) - for _, repo := range repos[1:] { - _, _, err := dockerCmdWithError("inspect", repo) - c.Assert(err, checker.NotNil, check.Commentf("Image %v shouldn't have been pulled down", repo)) - } -} - -func (s *DockerRegistrySuite) TestPullImageWithAliases(c *check.C) { - testPullImageWithAliases(c) -} - -func (s *DockerSchema1RegistrySuite) TestPullImageWithAliases(c *check.C) { - testPullImageWithAliases(c) -} - -// testConcurrentPullWholeRepo pulls the same repo concurrently. -func testConcurrentPullWholeRepo(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - - repos := []string{} - for _, tag := range []string{"recent", "fresh", "todays"} { - repo := fmt.Sprintf("%v:%v", repoName, tag) - _, err := buildImage(repo, fmt.Sprintf(` - FROM busybox - ENTRYPOINT ["/bin/echo"] - ENV FOO foo - ENV BAR bar - CMD echo %s - `, repo), true) - c.Assert(err, checker.IsNil) - dockerCmd(c, "push", repo) - repos = append(repos, repo) - } - - // Clear local images store. - args := append([]string{"rmi"}, repos...) - dockerCmd(c, args...) - - // Run multiple re-pulls concurrently - results := make(chan error) - numPulls := 3 - - for i := 0; i != numPulls; i++ { - go func() { - _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", "-a", repoName)) - results <- err - }() - } - - // These checks are separate from the loop above because the check - // package is not goroutine-safe. - for i := 0; i != numPulls; i++ { - err := <-results - c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err)) - } - - // Ensure all tags were pulled successfully - for _, repo := range repos { - dockerCmd(c, "inspect", repo) - out, _ := dockerCmd(c, "run", "--rm", repo) - c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) - } -} - -func (s *DockerRegistrySuite) testConcurrentPullWholeRepo(c *check.C) { - testConcurrentPullWholeRepo(c) -} - -func (s *DockerSchema1RegistrySuite) testConcurrentPullWholeRepo(c *check.C) { - testConcurrentPullWholeRepo(c) -} - -// testConcurrentFailingPull tries a concurrent pull that doesn't succeed. -func testConcurrentFailingPull(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - - // Run multiple pulls concurrently - results := make(chan error) - numPulls := 3 - - for i := 0; i != numPulls; i++ { - go func() { - _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", repoName+":asdfasdf")) - results <- err - }() - } - - // These checks are separate from the loop above because the check - // package is not goroutine-safe. - for i := 0; i != numPulls; i++ { - err := <-results - c.Assert(err, checker.NotNil, check.Commentf("expected pull to fail")) - } -} - -func (s *DockerRegistrySuite) testConcurrentFailingPull(c *check.C) { - testConcurrentFailingPull(c) -} - -func (s *DockerSchema1RegistrySuite) testConcurrentFailingPull(c *check.C) { - testConcurrentFailingPull(c) -} - -// testConcurrentPullMultipleTags pulls multiple tags from the same repo -// concurrently. -func testConcurrentPullMultipleTags(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - - repos := []string{} - for _, tag := range []string{"recent", "fresh", "todays"} { - repo := fmt.Sprintf("%v:%v", repoName, tag) - _, err := buildImage(repo, fmt.Sprintf(` - FROM busybox - ENTRYPOINT ["/bin/echo"] - ENV FOO foo - ENV BAR bar - CMD echo %s - `, repo), true) - c.Assert(err, checker.IsNil) - dockerCmd(c, "push", repo) - repos = append(repos, repo) - } - - // Clear local images store. - args := append([]string{"rmi"}, repos...) - dockerCmd(c, args...) - - // Re-pull individual tags, in parallel - results := make(chan error) - - for _, repo := range repos { - go func(repo string) { - _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "pull", repo)) - results <- err - }(repo) - } - - // These checks are separate from the loop above because the check - // package is not goroutine-safe. - for range repos { - err := <-results - c.Assert(err, checker.IsNil, check.Commentf("concurrent pull failed with error: %v", err)) - } - - // Ensure all tags were pulled successfully - for _, repo := range repos { - dockerCmd(c, "inspect", repo) - out, _ := dockerCmd(c, "run", "--rm", repo) - c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) - } -} - -func (s *DockerRegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { - testConcurrentPullMultipleTags(c) -} - -func (s *DockerSchema1RegistrySuite) TestConcurrentPullMultipleTags(c *check.C) { - testConcurrentPullMultipleTags(c) -} - -// testPullIDStability verifies that pushing an image and pulling it back -// preserves the image ID. -func testPullIDStability(c *check.C) { - derivedImage := privateRegistryURL + "/dockercli/id-stability" - baseImage := "busybox" - - _, err := buildImage(derivedImage, fmt.Sprintf(` - FROM %s - ENV derived true - ENV asdf true - RUN dd if=/dev/zero of=/file bs=1024 count=1024 - CMD echo %s - `, baseImage, derivedImage), true) - if err != nil { - c.Fatal(err) - } - - originalID, err := getIDByName(derivedImage) - if err != nil { - c.Fatalf("error inspecting: %v", err) - } - dockerCmd(c, "push", derivedImage) - - // Pull - out, _ := dockerCmd(c, "pull", derivedImage) - if strings.Contains(out, "Pull complete") { - c.Fatalf("repull redownloaded a layer: %s", out) - } - - derivedIDAfterPull, err := getIDByName(derivedImage) - if err != nil { - c.Fatalf("error inspecting: %v", err) - } - - if derivedIDAfterPull != originalID { - c.Fatal("image's ID unexpectedly changed after a repush/repull") - } - - // Make sure the image runs correctly - out, _ = dockerCmd(c, "run", "--rm", derivedImage) - if strings.TrimSpace(out) != derivedImage { - c.Fatalf("expected %s; got %s", derivedImage, out) - } - - // Confirm that repushing and repulling does not change the computed ID - dockerCmd(c, "push", derivedImage) - dockerCmd(c, "rmi", derivedImage) - dockerCmd(c, "pull", derivedImage) - - derivedIDAfterPull, err = getIDByName(derivedImage) - if err != nil { - c.Fatalf("error inspecting: %v", err) - } - - if derivedIDAfterPull != originalID { - c.Fatal("image's ID unexpectedly changed after a repush/repull") - } - if err != nil { - c.Fatalf("error inspecting: %v", err) - } - - // Make sure the image still runs - out, _ = dockerCmd(c, "run", "--rm", derivedImage) - if strings.TrimSpace(out) != derivedImage { - c.Fatalf("expected %s; got %s", derivedImage, out) - } -} - -func (s *DockerRegistrySuite) TestPullIDStability(c *check.C) { - testPullIDStability(c) -} - -func (s *DockerSchema1RegistrySuite) TestPullIDStability(c *check.C) { - testPullIDStability(c) -} - -// #21213 -func testPullNoLayers(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/scratch", privateRegistryURL) - - _, err := buildImage(repoName, ` - FROM scratch - ENV foo bar`, - true) - if err != nil { - c.Fatal(err) - } - - dockerCmd(c, "push", repoName) - dockerCmd(c, "rmi", repoName) - dockerCmd(c, "pull", repoName) -} - -func (s *DockerRegistrySuite) TestPullNoLayers(c *check.C) { - testPullNoLayers(c) -} - -func (s *DockerSchema1RegistrySuite) TestPullNoLayers(c *check.C) { - testPullNoLayers(c) -} - -func (s *DockerRegistrySuite) TestPullManifestList(c *check.C) { - testRequires(c, NotArm) - pushDigest, err := setupImage(c) - c.Assert(err, checker.IsNil, check.Commentf("error setting up image")) - - // Inject a manifest list into the registry - manifestList := &manifestlist.ManifestList{ - Versioned: manifest.Versioned{ - SchemaVersion: 2, - MediaType: manifestlist.MediaTypeManifestList, - }, - Manifests: []manifestlist.ManifestDescriptor{ - { - Descriptor: distribution.Descriptor{ - Digest: "sha256:1a9ec845ee94c202b2d5da74a24f0ed2058318bfa9879fa541efaecba272e86b", - Size: 3253, - MediaType: schema2.MediaTypeManifest, - }, - Platform: manifestlist.PlatformSpec{ - Architecture: "bogus_arch", - OS: "bogus_os", - }, - }, - { - Descriptor: distribution.Descriptor{ - Digest: pushDigest, - Size: 3253, - MediaType: schema2.MediaTypeManifest, - }, - Platform: manifestlist.PlatformSpec{ - Architecture: runtime.GOARCH, - OS: runtime.GOOS, - }, - }, - }, - } - - manifestListJSON, err := json.MarshalIndent(manifestList, "", " ") - c.Assert(err, checker.IsNil, check.Commentf("error marshalling manifest list")) - - manifestListDigest := digest.FromBytes(manifestListJSON) - hexDigest := manifestListDigest.Hex() - - registryV2Path := filepath.Join(s.reg.dir, "docker", "registry", "v2") - - // Write manifest list to blob store - blobDir := filepath.Join(registryV2Path, "blobs", "sha256", hexDigest[:2], hexDigest) - err = os.MkdirAll(blobDir, 0755) - c.Assert(err, checker.IsNil, check.Commentf("error creating blob dir")) - blobPath := filepath.Join(blobDir, "data") - err = ioutil.WriteFile(blobPath, []byte(manifestListJSON), 0644) - c.Assert(err, checker.IsNil, check.Commentf("error writing manifest list")) - - // Add to revision store - revisionDir := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "revisions", "sha256", hexDigest) - err = os.Mkdir(revisionDir, 0755) - c.Assert(err, checker.IsNil, check.Commentf("error creating revision dir")) - revisionPath := filepath.Join(revisionDir, "link") - err = ioutil.WriteFile(revisionPath, []byte(manifestListDigest.String()), 0644) - c.Assert(err, checker.IsNil, check.Commentf("error writing revision link")) - - // Update tag - tagPath := filepath.Join(registryV2Path, "repositories", remoteRepoName, "_manifests", "tags", "latest", "current", "link") - err = ioutil.WriteFile(tagPath, []byte(manifestListDigest.String()), 0644) - c.Assert(err, checker.IsNil, check.Commentf("error writing tag link")) - - // Verify that the image can be pulled through the manifest list. - out, _ := dockerCmd(c, "pull", repoName) - - // The pull output includes "Digest: ", so find that - matches := digestRegex.FindStringSubmatch(out) - c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) - pullDigest := matches[1] - - // Make sure the pushed and pull digests match - c.Assert(manifestListDigest.String(), checker.Equals, pullDigest) - - // Was the image actually created? - dockerCmd(c, "inspect", repoName) - - dockerCmd(c, "rmi", repoName) -} - -func (s *DockerRegistryAuthHtpasswdSuite) TestPullWithExternalAuth(c *check.C) { - osPath := os.Getenv("PATH") - defer os.Setenv("PATH", osPath) - - workingDir, err := os.Getwd() - c.Assert(err, checker.IsNil) - absolute, err := filepath.Abs(filepath.Join(workingDir, "fixtures", "auth")) - c.Assert(err, checker.IsNil) - testPath := fmt.Sprintf("%s%c%s", osPath, filepath.ListSeparator, absolute) - - os.Setenv("PATH", testPath) - - repoName := fmt.Sprintf("%v/dockercli/busybox:authtest", privateRegistryURL) - - tmp, err := ioutil.TempDir("", "integration-cli-") - c.Assert(err, checker.IsNil) - - externalAuthConfig := `{ "credsStore": "shell-test" }` - - configPath := filepath.Join(tmp, "config.json") - err = ioutil.WriteFile(configPath, []byte(externalAuthConfig), 0644) - c.Assert(err, checker.IsNil) - - dockerCmd(c, "--config", tmp, "login", "-u", s.reg.username, "-p", s.reg.password, privateRegistryURL) - - b, err := ioutil.ReadFile(configPath) - c.Assert(err, checker.IsNil) - c.Assert(string(b), checker.Not(checker.Contains), "\"auth\":") - - dockerCmd(c, "--config", tmp, "tag", "busybox", repoName) - dockerCmd(c, "--config", tmp, "push", repoName) - - dockerCmd(c, "--config", tmp, "pull", repoName) -} - -// TestRunImplicitPullWithNoTag should pull implicitly only the default tag (latest) -func (s *DockerRegistrySuite) TestRunImplicitPullWithNoTag(c *check.C) { - testRequires(c, DaemonIsLinux) - repo := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - repoTag1 := fmt.Sprintf("%v:latest", repo) - repoTag2 := fmt.Sprintf("%v:t1", repo) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoTag1) - dockerCmd(c, "tag", "busybox", repoTag2) - dockerCmd(c, "push", repo) - dockerCmd(c, "rmi", repoTag1) - dockerCmd(c, "rmi", repoTag2) - - out, _, err := dockerCmdWithError("run", repo) - c.Assert(err, check.IsNil) - c.Assert(out, checker.Contains, fmt.Sprintf("Unable to find image '%s:latest' locally", repo)) - - // There should be only one line for repo, the one with repo:latest - outImageCmd, _, err := dockerCmdWithError("images", repo) - splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") - c.Assert(splitOutImageCmd, checker.HasLen, 2) -} diff --git a/integration-cli/docker_cli_pull_test.go b/integration-cli/docker_cli_pull_test.go deleted file mode 100644 index c9f4ef195f..0000000000 --- a/integration-cli/docker_cli_pull_test.go +++ /dev/null @@ -1,274 +0,0 @@ -package main - -import ( - "fmt" - "regexp" - "strings" - "sync" - "time" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// TestPullFromCentralRegistry pulls an image from the central registry and verifies that the client -// prints all expected output. -func (s *DockerHubPullSuite) TestPullFromCentralRegistry(c *check.C) { - testRequires(c, DaemonIsLinux) - out := s.Cmd(c, "pull", "hello-world") - defer deleteImages("hello-world") - - c.Assert(out, checker.Contains, "Using default tag: latest", check.Commentf("expected the 'latest' tag to be automatically assumed")) - c.Assert(out, checker.Contains, "Pulling from library/hello-world", check.Commentf("expected the 'library/' prefix to be automatically assumed")) - c.Assert(out, checker.Contains, "Downloaded newer image for hello-world:latest") - - matches := regexp.MustCompile(`Digest: (.+)\n`).FindAllStringSubmatch(out, -1) - c.Assert(len(matches), checker.Equals, 1, check.Commentf("expected exactly one image digest in the output")) - c.Assert(len(matches[0]), checker.Equals, 2, check.Commentf("unexpected number of submatches for the digest")) - _, err := digest.ParseDigest(matches[0][1]) - c.Check(err, checker.IsNil, check.Commentf("invalid digest %q in output", matches[0][1])) - - // We should have a single entry in images. - img := strings.TrimSpace(s.Cmd(c, "images")) - splitImg := strings.Split(img, "\n") - c.Assert(splitImg, checker.HasLen, 2) - c.Assert(splitImg[1], checker.Matches, `hello-world\s+latest.*?`, check.Commentf("invalid output for `docker images` (expected image and tag name")) -} - -// TestPullNonExistingImage pulls non-existing images from the central registry, with different -// combinations of implicit tag and library prefix. -func (s *DockerHubPullSuite) TestPullNonExistingImage(c *check.C) { - testRequires(c, DaemonIsLinux) - - type entry struct { - repo string - alias string - tag string - } - - entries := []entry{ - {"library/asdfasdf", "asdfasdf", "foobar"}, - {"library/asdfasdf", "library/asdfasdf", "foobar"}, - {"library/asdfasdf", "asdfasdf", ""}, - {"library/asdfasdf", "asdfasdf", "latest"}, - {"library/asdfasdf", "library/asdfasdf", ""}, - {"library/asdfasdf", "library/asdfasdf", "latest"}, - } - - // The option field indicates "-a" or not. - type record struct { - e entry - option string - out string - err error - } - - // Execute 'docker pull' in parallel, pass results (out, err) and - // necessary information ("-a" or not, and the image name) to channel. - var group sync.WaitGroup - recordChan := make(chan record, len(entries)*2) - for _, e := range entries { - group.Add(1) - go func(e entry) { - defer group.Done() - repoName := e.alias - if e.tag != "" { - repoName += ":" + e.tag - } - out, err := s.CmdWithError("pull", repoName) - recordChan <- record{e, "", out, err} - }(e) - if e.tag == "" { - // pull -a on a nonexistent registry should fall back as well - group.Add(1) - go func(e entry) { - defer group.Done() - out, err := s.CmdWithError("pull", "-a", e.alias) - recordChan <- record{e, "-a", out, err} - }(e) - } - } - - // Wait for completion - group.Wait() - close(recordChan) - - // Process the results (out, err). - for record := range recordChan { - if len(record.option) == 0 { - c.Assert(record.err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", record.out)) - // Hub returns 401 rather than 404 for nonexistent repos over - // the v2 protocol - but we should end up falling back to v1, - // which does return a 404. - tag := record.e.tag - if tag == "" { - tag = "latest" - } - c.Assert(record.out, checker.Contains, fmt.Sprintf("Error: image %s:%s not found", record.e.repo, tag), check.Commentf("expected image not found error messages")) - } else { - // pull -a on a nonexistent registry should fall back as well - c.Assert(record.err, checker.NotNil, check.Commentf("expected non-zero exit status when pulling non-existing image: %s", record.out)) - c.Assert(record.out, checker.Contains, fmt.Sprintf("Error: image %s not found", record.e.repo), check.Commentf("expected image not found error messages")) - c.Assert(record.out, checker.Not(checker.Contains), "unauthorized", check.Commentf(`message should not contain "unauthorized"`)) - } - } - -} - -// TestPullFromCentralRegistryImplicitRefParts pulls an image from the central registry and verifies -// that pulling the same image with different combinations of implicit elements of the the image -// reference (tag, repository, central registry url, ...) doesn't trigger a new pull nor leads to -// multiple images. -func (s *DockerHubPullSuite) TestPullFromCentralRegistryImplicitRefParts(c *check.C) { - testRequires(c, DaemonIsLinux) - - // Pull hello-world from v2 - pullFromV2 := func(ref string) (int, string) { - out := s.Cmd(c, "pull", "hello-world") - v1Retries := 0 - for strings.Contains(out, "this image was pulled from a legacy registry") { - // Some network errors may cause fallbacks to the v1 - // protocol, which would violate the test's assumption - // that it will get the same images. To make the test - // more robust against these network glitches, allow a - // few retries if we end up with a v1 pull. - - if v1Retries > 2 { - c.Fatalf("too many v1 fallback incidents when pulling %s", ref) - } - - s.Cmd(c, "rmi", ref) - out = s.Cmd(c, "pull", ref) - - v1Retries++ - } - - return v1Retries, out - } - - pullFromV2("hello-world") - defer deleteImages("hello-world") - - s.Cmd(c, "tag", "hello-world", "hello-world-backup") - - for _, ref := range []string{ - "hello-world", - "hello-world:latest", - "library/hello-world", - "library/hello-world:latest", - "docker.io/library/hello-world", - "index.docker.io/library/hello-world", - } { - var out string - for { - var v1Retries int - v1Retries, out = pullFromV2(ref) - - // Keep repeating the test case until we don't hit a v1 - // fallback case. We won't get the right "Image is up - // to date" message if the local image was replaced - // with one pulled from v1. - if v1Retries == 0 { - break - } - s.Cmd(c, "rmi", ref) - s.Cmd(c, "tag", "hello-world-backup", "hello-world") - } - c.Assert(out, checker.Contains, "Image is up to date for hello-world:latest") - } - - s.Cmd(c, "rmi", "hello-world-backup") - - // We should have a single entry in images. - img := strings.TrimSpace(s.Cmd(c, "images")) - splitImg := strings.Split(img, "\n") - c.Assert(splitImg, checker.HasLen, 2) - c.Assert(splitImg[1], checker.Matches, `hello-world\s+latest.*?`, check.Commentf("invalid output for `docker images` (expected image and tag name")) -} - -// TestPullScratchNotAllowed verifies that pulling 'scratch' is rejected. -func (s *DockerHubPullSuite) TestPullScratchNotAllowed(c *check.C) { - testRequires(c, DaemonIsLinux) - out, err := s.CmdWithError("pull", "scratch") - c.Assert(err, checker.NotNil, check.Commentf("expected pull of scratch to fail")) - c.Assert(out, checker.Contains, "'scratch' is a reserved name") - c.Assert(out, checker.Not(checker.Contains), "Pulling repository scratch") -} - -// TestPullAllTagsFromCentralRegistry pulls using `all-tags` for a given image and verifies that it -// results in more images than a naked pull. -func (s *DockerHubPullSuite) TestPullAllTagsFromCentralRegistry(c *check.C) { - testRequires(c, DaemonIsLinux) - s.Cmd(c, "pull", "busybox") - outImageCmd := s.Cmd(c, "images", "busybox") - splitOutImageCmd := strings.Split(strings.TrimSpace(outImageCmd), "\n") - c.Assert(splitOutImageCmd, checker.HasLen, 2) - - s.Cmd(c, "pull", "--all-tags=true", "busybox") - outImageAllTagCmd := s.Cmd(c, "images", "busybox") - linesCount := strings.Count(outImageAllTagCmd, "\n") - c.Assert(linesCount, checker.GreaterThan, 2, check.Commentf("pulling all tags should provide more than two images, got %s", outImageAllTagCmd)) - - // Verify that the line for 'busybox:latest' is left unchanged. - var latestLine string - for _, line := range strings.Split(outImageAllTagCmd, "\n") { - if strings.HasPrefix(line, "busybox") && strings.Contains(line, "latest") { - latestLine = line - break - } - } - c.Assert(latestLine, checker.Not(checker.Equals), "", check.Commentf("no entry for busybox:latest found after pulling all tags")) - splitLatest := strings.Fields(latestLine) - splitCurrent := strings.Fields(splitOutImageCmd[1]) - - // Clear relative creation times, since these can easily change between - // two invocations of "docker images". Without this, the test can fail - // like this: - // ... obtained []string = []string{"busybox", "latest", "d9551b4026f0", "27", "minutes", "ago", "1.113", "MB"} - // ... expected []string = []string{"busybox", "latest", "d9551b4026f0", "26", "minutes", "ago", "1.113", "MB"} - splitLatest[3] = "" - splitLatest[4] = "" - splitLatest[5] = "" - splitCurrent[3] = "" - splitCurrent[4] = "" - splitCurrent[5] = "" - - c.Assert(splitLatest, checker.DeepEquals, splitCurrent, check.Commentf("busybox:latest was changed after pulling all tags")) -} - -// TestPullClientDisconnect kills the client during a pull operation and verifies that the operation -// gets cancelled. -// -// Ref: docker/docker#15589 -func (s *DockerHubPullSuite) TestPullClientDisconnect(c *check.C) { - testRequires(c, DaemonIsLinux) - repoName := "hello-world:latest" - - pullCmd := s.MakeCmd("pull", repoName) - stdout, err := pullCmd.StdoutPipe() - c.Assert(err, checker.IsNil) - err = pullCmd.Start() - c.Assert(err, checker.IsNil) - - // Cancel as soon as we get some output. - buf := make([]byte, 10) - _, err = stdout.Read(buf) - c.Assert(err, checker.IsNil) - - err = pullCmd.Process.Kill() - c.Assert(err, checker.IsNil) - - time.Sleep(2 * time.Second) - _, err = s.CmdWithError("inspect", repoName) - c.Assert(err, checker.NotNil, check.Commentf("image was pulled after client disconnected")) -} - -func (s *DockerRegistryAuthHtpasswdSuite) TestPullNoCredentialsNotFound(c *check.C) { - // we don't care about the actual image, we just want to see image not found - // because that means v2 call returned 401 and we fell back to v1 which usually - // gives a 404 (in this case the test registry doesn't handle v1 at all) - out, _, err := dockerCmdWithError("pull", privateRegistryURL+"/busybox") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Error: image busybox:latest not found") -} diff --git a/integration-cli/docker_cli_pull_trusted_test.go b/integration-cli/docker_cli_pull_trusted_test.go deleted file mode 100644 index 6bc38e699f..0000000000 --- a/integration-cli/docker_cli_pull_trusted_test.go +++ /dev/null @@ -1,365 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "os/exec" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerTrustSuite) TestTrustedPull(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-pull") - - // Try pull - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err := runCommandWithOutput(pullCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) - - dockerCmd(c, "rmi", repoName) - // Try untrusted pull to ensure we pushed the tag to the registry - pullCmd = exec.Command(dockerBinary, "pull", "--disable-content-trust=true", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) - -} - -func (s *DockerTrustSuite) TestTrustedIsolatedPull(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-isolated-pull") - - // Try pull (run from isolated directory without trust information) - pullCmd := exec.Command(dockerBinary, "--config", "/tmp/docker-isolated", "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err := runCommandWithOutput(pullCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(string(out))) - - dockerCmd(c, "rmi", repoName) -} - -func (s *DockerTrustSuite) TestUntrustedPull(c *check.C) { - repoName := fmt.Sprintf("%v/dockercliuntrusted/pulltest:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - dockerCmd(c, "push", repoName) - dockerCmd(c, "rmi", repoName) - - // Try trusted pull on untrusted tag - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err := runCommandWithOutput(pullCmd) - - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Error: remote trust data does not exist", check.Commentf(out)) -} - -func (s *DockerTrustSuite) TestPullWhenCertExpired(c *check.C) { - c.Skip("Currently changes system time, causing instability") - repoName := s.setupTrustedImage(c, "trusted-cert-expired") - - // Certificates have 10 years of expiration - elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) - - runAtDifferentDate(elevenYearsFromNow, func() { - // Try pull - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err := runCommandWithOutput(pullCmd) - - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "could not validate the path to a trusted root", check.Commentf(out)) - }) - - runAtDifferentDate(elevenYearsFromNow, func() { - // Try pull - pullCmd := exec.Command(dockerBinary, "pull", "--disable-content-trust", repoName) - s.trustedCmd(pullCmd) - out, _, err := runCommandWithOutput(pullCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Status: Downloaded", check.Commentf(out)) - }) -} - -func (s *DockerTrustSuite) TestTrustedPullFromBadTrustServer(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclievilpull/trusted:latest", privateRegistryURL) - evilLocalConfigDir, err := ioutil.TempDir("", "evil-local-config-dir") - if err != nil { - c.Fatalf("Failed to create local temp dir") - } - - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) - dockerCmd(c, "rmi", repoName) - - // Try pull - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) - dockerCmd(c, "rmi", repoName) - - // Kill the notary server, start a new "evil" one. - s.not.Close() - s.not, err = newTestNotary(c) - - c.Assert(err, check.IsNil, check.Commentf("Restarting notary server failed.")) - - // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. - // tag an image and upload it to the private registry - dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) - - // Push up to the new server - pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err = runCommandWithOutput(pushCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) - - // Now, try pulling with the original client from this new trust server. This should fall back to cached metadata. - pullCmd = exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - if err != nil { - c.Fatalf("Error falling back to cached trust data: %s\n%s", err, out) - } - if !strings.Contains(string(out), "Error while downloading remote metadata, using cached timestamp") { - c.Fatalf("Missing expected output on trusted pull:\n%s", out) - } -} - -func (s *DockerTrustSuite) TestTrustedPullWithExpiredSnapshot(c *check.C) { - c.Skip("Currently changes system time, causing instability") - repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppull/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - // Push with default passphrases - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Signing and pushing trust metadata", check.Commentf(out)) - - dockerCmd(c, "rmi", repoName) - - // Snapshots last for three years. This should be expired - fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) - - runAtDifferentDate(fourYearsLater, func() { - // Try pull - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - - c.Assert(err, check.NotNil, check.Commentf("Missing expected error running trusted pull with expired snapshots")) - c.Assert(string(out), checker.Contains, "repository out-of-date", check.Commentf(out)) - }) -} - -func (s *DockerTrustSuite) TestTrustedOfflinePull(c *check.C) { - repoName := s.setupTrustedImage(c, "trusted-offline-pull") - - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmdWithServer(pullCmd, "https://invalidnotaryserver") - out, _, err := runCommandWithOutput(pullCmd) - - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "error contacting notary server", check.Commentf(out)) - // Do valid trusted pull to warm cache - pullCmd = exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) - - dockerCmd(c, "rmi", repoName) - - // Try pull again with invalid notary server, should use cache - pullCmd = exec.Command(dockerBinary, "pull", repoName) - s.trustedCmdWithServer(pullCmd, "https://invalidnotaryserver") - out, _, err = runCommandWithOutput(pullCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Tagging", check.Commentf(out)) -} - -func (s *DockerTrustSuite) TestTrustedPullDelete(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, "trusted-pull-delete") - // tag the image and upload it to the private registry - _, err := buildImage(repoName, ` - FROM busybox - CMD echo trustedpulldelete - `, true) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - if err != nil { - c.Fatalf("Error running trusted push: %s\n%s", err, out) - } - if !strings.Contains(string(out), "Signing and pushing trust metadata") { - c.Fatalf("Missing expected output on trusted push:\n%s", out) - } - - if out, status := dockerCmd(c, "rmi", repoName); status != 0 { - c.Fatalf("Error removing image %q\n%s", repoName, out) - } - - // Try pull - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - - c.Assert(err, check.IsNil, check.Commentf(out)) - - matches := digestRegex.FindStringSubmatch(out) - c.Assert(matches, checker.HasLen, 2, check.Commentf("unable to parse digest from pull output: %s", out)) - pullDigest := matches[1] - - imageID := inspectField(c, repoName, "Id") - - imageByDigest := repoName + "@" + pullDigest - byDigestID := inspectField(c, imageByDigest, "Id") - - c.Assert(byDigestID, checker.Equals, imageID) - - // rmi of tag should also remove the digest reference - dockerCmd(c, "rmi", repoName) - - _, err = inspectFieldWithError(imageByDigest, "Id") - c.Assert(err, checker.NotNil, check.Commentf("digest reference should have been removed")) - - _, err = inspectFieldWithError(imageID, "Id") - c.Assert(err, checker.NotNil, check.Commentf("image should have been deleted")) -} - -func (s *DockerTrustSuite) TestTrustedPullReadsFromReleasesRole(c *check.C) { - testRequires(c, NotaryHosting) - repoName := fmt.Sprintf("%v/dockerclireleasesdelegationpulling/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - - // Push with targets first, initializing the repo - dockerCmd(c, "tag", "busybox", targetName) - pushCmd := exec.Command(dockerBinary, "push", targetName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - s.assertTargetInRoles(c, repoName, "latest", "targets") - - // Try pull, check we retrieve from targets role - pullCmd := exec.Command(dockerBinary, "-D", "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "retrieving target for targets role") - - // Now we'll create the releases role, and try pushing and pulling - s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) - s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) - s.notaryPublish(c, repoName) - - // try a pull, check that we can still pull because we can still read the - // old tag in the targets role - pullCmd = exec.Command(dockerBinary, "-D", "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "retrieving target for targets role") - - // try a pull -a, check that it succeeds because we can still pull from the - // targets role - pullCmd = exec.Command(dockerBinary, "-D", "pull", "-a", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - - // Push, should sign with targets/releases - dockerCmd(c, "tag", "busybox", targetName) - pushCmd = exec.Command(dockerBinary, "push", targetName) - s.trustedCmd(pushCmd) - out, _, err = runCommandWithOutput(pushCmd) - s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases") - - // Try pull, check we retrieve from targets/releases role - pullCmd = exec.Command(dockerBinary, "-D", "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(out, checker.Contains, "retrieving target for targets/releases role") - - // Create another delegation that we'll sign with - s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[1].Public) - s.notaryImportKey(c, repoName, "targets/other", s.not.keys[1].Private) - s.notaryPublish(c, repoName) - - dockerCmd(c, "tag", "busybox", targetName) - pushCmd = exec.Command(dockerBinary, "push", targetName) - s.trustedCmd(pushCmd) - out, _, err = runCommandWithOutput(pushCmd) - s.assertTargetInRoles(c, repoName, "latest", "targets", "targets/releases", "targets/other") - - // Try pull, check we retrieve from targets/releases role - pullCmd = exec.Command(dockerBinary, "-D", "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(out, checker.Contains, "retrieving target for targets/releases role") -} - -func (s *DockerTrustSuite) TestTrustedPullIgnoresOtherDelegationRoles(c *check.C) { - testRequires(c, NotaryHosting) - repoName := fmt.Sprintf("%v/dockerclipullotherdelegation/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - - // We'll create a repo first with a non-release delegation role, so that when we - // push we'll sign it into the delegation role - s.notaryInitRepo(c, repoName) - s.notaryCreateDelegation(c, repoName, "targets/other", s.not.keys[0].Public) - s.notaryImportKey(c, repoName, "targets/other", s.not.keys[0].Private) - s.notaryPublish(c, repoName) - - // Push should write to the delegation role, not targets - dockerCmd(c, "tag", "busybox", targetName) - pushCmd := exec.Command(dockerBinary, "push", targetName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - s.assertTargetInRoles(c, repoName, "latest", "targets/other") - s.assertTargetNotInRoles(c, repoName, "latest", "targets") - - // Try pull - we should fail, since pull will only pull from the targets/releases - // role or the targets role - pullCmd := exec.Command(dockerBinary, "-D", "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "No trust data for") - - // try a pull -a: we should fail since pull will only pull from the targets/releases - // role or the targets role - pullCmd = exec.Command(dockerBinary, "-D", "pull", "-a", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "No trusted tags for") -} diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go deleted file mode 100644 index f9d53449fb..0000000000 --- a/integration-cli/docker_cli_push_test.go +++ /dev/null @@ -1,702 +0,0 @@ -package main - -import ( - "archive/tar" - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "os/exec" - "path/filepath" - "strings" - "time" - - "github.com/docker/distribution/reference" - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// Pushing an image to a private registry. -func testPushBusyboxImage(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - // tag the image to upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - // push the image to the registry - dockerCmd(c, "push", repoName) -} - -func (s *DockerRegistrySuite) TestPushBusyboxImage(c *check.C) { - testPushBusyboxImage(c) -} - -func (s *DockerSchema1RegistrySuite) TestPushBusyboxImage(c *check.C) { - testPushBusyboxImage(c) -} - -// pushing an image without a prefix should throw an error -func (s *DockerSuite) TestPushUnprefixedRepo(c *check.C) { - out, _, err := dockerCmdWithError("push", "busybox") - c.Assert(err, check.NotNil, check.Commentf("pushing an unprefixed repo didn't result in a non-zero exit status: %s", out)) -} - -func testPushUntagged(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - expected := "An image does not exist locally with the tag" - - out, _, err := dockerCmdWithError("push", repoName) - c.Assert(err, check.NotNil, check.Commentf("pushing the image to the private registry should have failed: output %q", out)) - c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed")) -} - -func (s *DockerRegistrySuite) TestPushUntagged(c *check.C) { - testPushUntagged(c) -} - -func (s *DockerSchema1RegistrySuite) TestPushUntagged(c *check.C) { - testPushUntagged(c) -} - -func testPushBadTag(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/busybox:latest", privateRegistryURL) - expected := "does not exist" - - out, _, err := dockerCmdWithError("push", repoName) - c.Assert(err, check.NotNil, check.Commentf("pushing the image to the private registry should have failed: output %q", out)) - c.Assert(out, checker.Contains, expected, check.Commentf("pushing the image failed")) -} - -func (s *DockerRegistrySuite) TestPushBadTag(c *check.C) { - testPushBadTag(c) -} - -func (s *DockerSchema1RegistrySuite) TestPushBadTag(c *check.C) { - testPushBadTag(c) -} - -func testPushMultipleTags(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - repoTag1 := fmt.Sprintf("%v/dockercli/busybox:t1", privateRegistryURL) - repoTag2 := fmt.Sprintf("%v/dockercli/busybox:t2", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoTag1) - - dockerCmd(c, "tag", "busybox", repoTag2) - - dockerCmd(c, "push", repoName) - - // Ensure layer list is equivalent for repoTag1 and repoTag2 - out1, _ := dockerCmd(c, "pull", repoTag1) - - imageAlreadyExists := ": Image already exists" - var out1Lines []string - for _, outputLine := range strings.Split(out1, "\n") { - if strings.Contains(outputLine, imageAlreadyExists) { - out1Lines = append(out1Lines, outputLine) - } - } - - out2, _ := dockerCmd(c, "pull", repoTag2) - - var out2Lines []string - for _, outputLine := range strings.Split(out2, "\n") { - if strings.Contains(outputLine, imageAlreadyExists) { - out1Lines = append(out1Lines, outputLine) - } - } - c.Assert(out2Lines, checker.HasLen, len(out1Lines)) - - for i := range out1Lines { - c.Assert(out1Lines[i], checker.Equals, out2Lines[i]) - } -} - -func (s *DockerRegistrySuite) TestPushMultipleTags(c *check.C) { - testPushMultipleTags(c) -} - -func (s *DockerSchema1RegistrySuite) TestPushMultipleTags(c *check.C) { - testPushMultipleTags(c) -} - -func testPushEmptyLayer(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/emptylayer", privateRegistryURL) - emptyTarball, err := ioutil.TempFile("", "empty_tarball") - c.Assert(err, check.IsNil, check.Commentf("Unable to create test file")) - - tw := tar.NewWriter(emptyTarball) - err = tw.Close() - c.Assert(err, check.IsNil, check.Commentf("Error creating empty tarball")) - - freader, err := os.Open(emptyTarball.Name()) - c.Assert(err, check.IsNil, check.Commentf("Could not open test tarball")) - - importCmd := exec.Command(dockerBinary, "import", "-", repoName) - importCmd.Stdin = freader - out, _, err := runCommandWithOutput(importCmd) - c.Assert(err, check.IsNil, check.Commentf("import failed: %q", out)) - - // Now verify we can push it - out, _, err = dockerCmdWithError("push", repoName) - c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out)) -} - -func (s *DockerRegistrySuite) TestPushEmptyLayer(c *check.C) { - testPushEmptyLayer(c) -} - -func (s *DockerSchema1RegistrySuite) TestPushEmptyLayer(c *check.C) { - testPushEmptyLayer(c) -} - -// testConcurrentPush pushes multiple tags to the same repo -// concurrently. -func testConcurrentPush(c *check.C) { - repoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - - repos := []string{} - for _, tag := range []string{"push1", "push2", "push3"} { - repo := fmt.Sprintf("%v:%v", repoName, tag) - _, err := buildImage(repo, fmt.Sprintf(` - FROM busybox - ENTRYPOINT ["/bin/echo"] - ENV FOO foo - ENV BAR bar - CMD echo %s -`, repo), true) - c.Assert(err, checker.IsNil) - repos = append(repos, repo) - } - - // Push tags, in parallel - results := make(chan error) - - for _, repo := range repos { - go func(repo string) { - _, _, err := runCommandWithOutput(exec.Command(dockerBinary, "push", repo)) - results <- err - }(repo) - } - - for range repos { - err := <-results - c.Assert(err, checker.IsNil, check.Commentf("concurrent push failed with error: %v", err)) - } - - // Clear local images store. - args := append([]string{"rmi"}, repos...) - dockerCmd(c, args...) - - // Re-pull and run individual tags, to make sure pushes succeeded - for _, repo := range repos { - dockerCmd(c, "pull", repo) - dockerCmd(c, "inspect", repo) - out, _ := dockerCmd(c, "run", "--rm", repo) - c.Assert(strings.TrimSpace(out), checker.Equals, "/bin/sh -c echo "+repo) - } -} - -func (s *DockerRegistrySuite) TestConcurrentPush(c *check.C) { - testConcurrentPush(c) -} - -func (s *DockerSchema1RegistrySuite) TestConcurrentPush(c *check.C) { - testConcurrentPush(c) -} - -func (s *DockerRegistrySuite) TestCrossRepositoryLayerPush(c *check.C) { - sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - // tag the image to upload it to the private registry - dockerCmd(c, "tag", "busybox", sourceRepoName) - // push the image to the registry - out1, _, err := dockerCmdWithError("push", sourceRepoName) - c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1)) - // ensure that none of the layers were mounted from another repository during push - c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) - - digest1 := reference.DigestRegexp.FindString(out1) - c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) - - destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) - // retag the image to upload the same layers to another repo in the same registry - dockerCmd(c, "tag", "busybox", destRepoName) - // push the image to the registry - out2, _, err := dockerCmdWithError("push", destRepoName) - c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) - // ensure that layers were mounted from the first repo during push - c.Assert(strings.Contains(out2, "Mounted from dockercli/busybox"), check.Equals, true) - - digest2 := reference.DigestRegexp.FindString(out2) - c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) - c.Assert(digest1, check.Equals, digest2) - - // ensure that pushing again produces the same digest - out3, _, err := dockerCmdWithError("push", destRepoName) - c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) - - digest3 := reference.DigestRegexp.FindString(out3) - c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) - c.Assert(digest3, check.Equals, digest2) - - // ensure that we can pull and run the cross-repo-pushed repository - dockerCmd(c, "rmi", destRepoName) - dockerCmd(c, "pull", destRepoName) - out4, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world") - c.Assert(out4, check.Equals, "hello world") -} - -func (s *DockerSchema1RegistrySuite) TestCrossRepositoryLayerPushNotSupported(c *check.C) { - sourceRepoName := fmt.Sprintf("%v/dockercli/busybox", privateRegistryURL) - // tag the image to upload it to the private registry - dockerCmd(c, "tag", "busybox", sourceRepoName) - // push the image to the registry - out1, _, err := dockerCmdWithError("push", sourceRepoName) - c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out1)) - // ensure that none of the layers were mounted from another repository during push - c.Assert(strings.Contains(out1, "Mounted from"), check.Equals, false) - - digest1 := reference.DigestRegexp.FindString(out1) - c.Assert(len(digest1), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) - - destRepoName := fmt.Sprintf("%v/dockercli/crossrepopush", privateRegistryURL) - // retag the image to upload the same layers to another repo in the same registry - dockerCmd(c, "tag", "busybox", destRepoName) - // push the image to the registry - out2, _, err := dockerCmdWithError("push", destRepoName) - c.Assert(err, check.IsNil, check.Commentf("pushing the image to the private registry has failed: %s", out2)) - // schema1 registry should not support cross-repo layer mounts, so ensure that this does not happen - c.Assert(strings.Contains(out2, "Mounted from"), check.Equals, false) - - digest2 := reference.DigestRegexp.FindString(out2) - c.Assert(len(digest2), checker.GreaterThan, 0, check.Commentf("no digest found for pushed manifest")) - c.Assert(digest1, check.Not(check.Equals), digest2) - - // ensure that we can pull and run the second pushed repository - dockerCmd(c, "rmi", destRepoName) - dockerCmd(c, "pull", destRepoName) - out3, _ := dockerCmd(c, "run", destRepoName, "echo", "-n", "hello world") - c.Assert(out3, check.Equals, "hello world") -} - -func (s *DockerTrustSuite) TestTrustedPush(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclitrusted/pushtest:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) - - // Try pull after push - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) - - // Assert that we rotated the snapshot key to the server by checking our local keystore - contents, err := ioutil.ReadDir(filepath.Join(cliconfig.ConfigDir(), "trust/private/tuf_keys", privateRegistryURL, "dockerclitrusted/pushtest")) - c.Assert(err, check.IsNil, check.Commentf("Unable to read local tuf key files")) - // Check that we only have 1 key (targets key) - c.Assert(contents, checker.HasLen, 1) -} - -func (s *DockerTrustSuite) TestTrustedPushWithEnvPasswords(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclienv/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmdWithPassphrases(pushCmd, "12345678", "12345678") - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) - - // Try pull after push - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) -} - -func (s *DockerTrustSuite) TestTrustedPushWithFailingServer(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclitrusted/failingserver:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - // Using a name that doesn't resolve to an address makes this test faster - s.trustedCmdWithServer(pushCmd, "https://server.invalid:81/") - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.NotNil, check.Commentf("Missing error while running trusted push w/ no server")) - c.Assert(out, checker.Contains, "error contacting notary server", check.Commentf("Missing expected output on trusted push")) -} - -func (s *DockerTrustSuite) TestTrustedPushWithoutServerAndUntrusted(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclitrusted/trustedandnot:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - pushCmd := exec.Command(dockerBinary, "push", "--disable-content-trust", repoName) - // Using a name that doesn't resolve to an address makes this test faster - s.trustedCmdWithServer(pushCmd, "https://server.invalid") - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push with no server and --disable-content-trust failed: %s\n%s", err, out)) - c.Assert(out, check.Not(checker.Contains), "Error establishing connection to notary repository", check.Commentf("Missing expected output on trusted push with --disable-content-trust:")) -} - -func (s *DockerTrustSuite) TestTrustedPushWithExistingTag(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclitag/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - dockerCmd(c, "push", repoName) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) - - // Try pull after push - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) -} - -func (s *DockerTrustSuite) TestTrustedPushWithExistingSignedTag(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclipushpush/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - // Do a trusted push - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) - - // Do another trusted push - pushCmd = exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err = runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) - - dockerCmd(c, "rmi", repoName) - - // Try pull to ensure the double push did not break our ability to pull - pullCmd := exec.Command(dockerBinary, "pull", repoName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf("Error running trusted pull: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Status: Downloaded", check.Commentf("Missing expected output on trusted pull with --disable-content-trust")) - -} - -func (s *DockerTrustSuite) TestTrustedPushWithIncorrectPassphraseForNonRoot(c *check.C) { - repoName := fmt.Sprintf("%v/dockercliincorretpwd/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - // Push with default passphrases - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push:\n%s", out)) - - // Push with wrong passphrases - pushCmd = exec.Command(dockerBinary, "push", repoName) - s.trustedCmdWithPassphrases(pushCmd, "12345678", "87654321") - out, _, err = runCommandWithOutput(pushCmd) - c.Assert(err, check.NotNil, check.Commentf("Error missing from trusted push with short targets passphrase: \n%s", out)) - c.Assert(out, checker.Contains, "could not find necessary signing keys", check.Commentf("Missing expected output on trusted push with short targets/snapsnot passphrase")) -} - -func (s *DockerTrustSuite) TestTrustedPushWithExpiredSnapshot(c *check.C) { - c.Skip("Currently changes system time, causing instability") - repoName := fmt.Sprintf("%v/dockercliexpiredsnapshot/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - // Push with default passphrases - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) - - // Snapshots last for three years. This should be expired - fourYearsLater := time.Now().Add(time.Hour * 24 * 365 * 4) - - runAtDifferentDate(fourYearsLater, func() { - // Push with wrong passphrases - pushCmd = exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err = runCommandWithOutput(pushCmd) - c.Assert(err, check.NotNil, check.Commentf("Error missing from trusted push with expired snapshot: \n%s", out)) - c.Assert(out, checker.Contains, "repository out-of-date", check.Commentf("Missing expected output on trusted push with expired snapshot")) - }) -} - -func (s *DockerTrustSuite) TestTrustedPushWithExpiredTimestamp(c *check.C) { - c.Skip("Currently changes system time, causing instability") - repoName := fmt.Sprintf("%v/dockercliexpiredtimestamppush/trusted:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - // Push with default passphrases - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push")) - - // The timestamps expire in two weeks. Lets check three - threeWeeksLater := time.Now().Add(time.Hour * 24 * 21) - - // Should succeed because the server transparently re-signs one - runAtDifferentDate(threeWeeksLater, func() { - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("Error running trusted push: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with expired timestamp")) - }) -} - -func (s *DockerTrustSuite) TestTrustedPushWithReleasesDelegationOnly(c *check.C) { - testRequires(c, NotaryHosting) - repoName := fmt.Sprintf("%v/dockerclireleasedelegationinitfirst/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - s.notaryInitRepo(c, repoName) - s.notaryCreateDelegation(c, repoName, "targets/releases", s.not.keys[0].Public) - s.notaryPublish(c, repoName) - - s.notaryImportKey(c, repoName, "targets/releases", s.not.keys[0].Private) - - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", targetName) - - pushCmd := exec.Command(dockerBinary, "push", targetName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) - // check to make sure that the target has been added to targets/releases and not targets - s.assertTargetInRoles(c, repoName, "latest", "targets/releases") - s.assertTargetNotInRoles(c, repoName, "latest", "targets") - - // Try pull after push - os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) - - pullCmd := exec.Command(dockerBinary, "pull", targetName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - c.Assert(string(out), checker.Contains, "Status: Image is up to date", check.Commentf(out)) -} - -func (s *DockerTrustSuite) TestTrustedPushSignsAllFirstLevelRolesWeHaveKeysFor(c *check.C) { - testRequires(c, NotaryHosting) - repoName := fmt.Sprintf("%v/dockerclimanyroles/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - s.notaryInitRepo(c, repoName) - s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public) - s.notaryCreateDelegation(c, repoName, "targets/role2", s.not.keys[1].Public) - s.notaryCreateDelegation(c, repoName, "targets/role3", s.not.keys[2].Public) - - // import everything except the third key - s.notaryImportKey(c, repoName, "targets/role1", s.not.keys[0].Private) - s.notaryImportKey(c, repoName, "targets/role2", s.not.keys[1].Private) - - s.notaryCreateDelegation(c, repoName, "targets/role1/subrole", s.not.keys[3].Public) - s.notaryImportKey(c, repoName, "targets/role1/subrole", s.not.keys[3].Private) - - s.notaryPublish(c, repoName) - - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", targetName) - - pushCmd := exec.Command(dockerBinary, "push", targetName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) - - // check to make sure that the target has been added to targets/role1 and targets/role2, and - // not targets (because there are delegations) or targets/role3 (due to missing key) or - // targets/role1/subrole (due to it being a second level delegation) - s.assertTargetInRoles(c, repoName, "latest", "targets/role1", "targets/role2") - s.assertTargetNotInRoles(c, repoName, "latest", "targets") - - // Try pull after push - os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) - - // pull should fail because none of these are the releases role - pullCmd := exec.Command(dockerBinary, "pull", targetName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.NotNil, check.Commentf(out)) -} - -func (s *DockerTrustSuite) TestTrustedPushSignsForRolesWithKeysAndValidPaths(c *check.C) { - repoName := fmt.Sprintf("%v/dockerclirolesbykeysandpaths/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - s.notaryInitRepo(c, repoName) - s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public, "l", "z") - s.notaryCreateDelegation(c, repoName, "targets/role2", s.not.keys[1].Public, "x", "y") - s.notaryCreateDelegation(c, repoName, "targets/role3", s.not.keys[2].Public, "latest") - s.notaryCreateDelegation(c, repoName, "targets/role4", s.not.keys[3].Public, "latest") - - // import everything except the third key - s.notaryImportKey(c, repoName, "targets/role1", s.not.keys[0].Private) - s.notaryImportKey(c, repoName, "targets/role2", s.not.keys[1].Private) - s.notaryImportKey(c, repoName, "targets/role4", s.not.keys[3].Private) - - s.notaryPublish(c, repoName) - - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", targetName) - - pushCmd := exec.Command(dockerBinary, "push", targetName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.IsNil, check.Commentf("trusted push failed: %s\n%s", err, out)) - c.Assert(out, checker.Contains, "Signing and pushing trust metadata", check.Commentf("Missing expected output on trusted push with existing tag")) - - // check to make sure that the target has been added to targets/role1 and targets/role4, and - // not targets (because there are delegations) or targets/role2 (due to path restrictions) or - // targets/role3 (due to missing key) - s.assertTargetInRoles(c, repoName, "latest", "targets/role1", "targets/role4") - s.assertTargetNotInRoles(c, repoName, "latest", "targets") - - // Try pull after push - os.RemoveAll(filepath.Join(cliconfig.ConfigDir(), "trust")) - - // pull should fail because none of these are the releases role - pullCmd := exec.Command(dockerBinary, "pull", targetName) - s.trustedCmd(pullCmd) - out, _, err = runCommandWithOutput(pullCmd) - c.Assert(err, check.NotNil, check.Commentf(out)) -} - -func (s *DockerTrustSuite) TestTrustedPushDoesntSignTargetsIfDelegationsExist(c *check.C) { - testRequires(c, NotaryHosting) - repoName := fmt.Sprintf("%v/dockerclireleasedelegationnotsignable/trusted", privateRegistryURL) - targetName := fmt.Sprintf("%s:latest", repoName) - s.notaryInitRepo(c, repoName) - s.notaryCreateDelegation(c, repoName, "targets/role1", s.not.keys[0].Public) - s.notaryPublish(c, repoName) - - // do not import any delegations key - - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", targetName) - - pushCmd := exec.Command(dockerBinary, "push", targetName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - c.Assert(err, check.NotNil, check.Commentf("trusted push succeeded but should have failed:\n%s", out)) - c.Assert(out, checker.Contains, "no valid signing keys", - check.Commentf("Missing expected output on trusted push without keys")) - - s.assertTargetNotInRoles(c, repoName, "latest", "targets", "targets/role1") -} - -func (s *DockerRegistryAuthHtpasswdSuite) TestPushNoCredentialsNoRetry(c *check.C) { - repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) - dockerCmd(c, "tag", "busybox", repoName) - out, _, err := dockerCmdWithError("push", repoName) - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, check.Not(checker.Contains), "Retrying") - c.Assert(out, checker.Contains, "no basic auth credentials") -} - -// This may be flaky but it's needed not to regress on unauthorized push, see #21054 -func (s *DockerSuite) TestPushToCentralRegistryUnauthorized(c *check.C) { - testRequires(c, Network) - repoName := "test/busybox" - dockerCmd(c, "tag", "busybox", repoName) - out, _, err := dockerCmdWithError("push", repoName) - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, check.Not(checker.Contains), "Retrying") -} - -func getTestTokenService(status int, body string) *httptest.Server { - return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(status) - w.Header().Set("Content-Type", "application/json") - w.Write([]byte(body)) - })) -} - -func (s *DockerRegistryAuthTokenSuite) TestPushTokenServiceUnauthResponse(c *check.C) { - ts := getTestTokenService(http.StatusUnauthorized, `{"errors": [{"Code":"UNAUTHORIZED", "message": "a message", "detail": null}]}`) - defer ts.Close() - s.setupRegistryWithTokenService(c, ts.URL) - repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) - dockerCmd(c, "tag", "busybox", repoName) - out, _, err := dockerCmdWithError("push", repoName) - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Not(checker.Contains), "Retrying") - c.Assert(out, checker.Contains, "unauthorized: a message") -} - -func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseUnauthorized(c *check.C) { - ts := getTestTokenService(http.StatusUnauthorized, `{"error": "unauthorized"}`) - defer ts.Close() - s.setupRegistryWithTokenService(c, ts.URL) - repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) - dockerCmd(c, "tag", "busybox", repoName) - out, _, err := dockerCmdWithError("push", repoName) - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Not(checker.Contains), "Retrying") - split := strings.Split(out, "\n") - c.Assert(split[len(split)-2], check.Equals, "unauthorized: authentication required") -} - -func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseError(c *check.C) { - ts := getTestTokenService(http.StatusInternalServerError, `{"error": "unexpected"}`) - defer ts.Close() - s.setupRegistryWithTokenService(c, ts.URL) - repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) - dockerCmd(c, "tag", "busybox", repoName) - out, _, err := dockerCmdWithError("push", repoName) - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Retrying") - split := strings.Split(out, "\n") - c.Assert(split[len(split)-2], check.Equals, "received unexpected HTTP status: 500 Internal Server Error") -} - -func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseUnparsable(c *check.C) { - ts := getTestTokenService(http.StatusForbidden, `no way`) - defer ts.Close() - s.setupRegistryWithTokenService(c, ts.URL) - repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) - dockerCmd(c, "tag", "busybox", repoName) - out, _, err := dockerCmdWithError("push", repoName) - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Not(checker.Contains), "Retrying") - split := strings.Split(out, "\n") - c.Assert(split[len(split)-2], checker.Contains, "error parsing HTTP 403 response body: ") -} - -func (s *DockerRegistryAuthTokenSuite) TestPushMisconfiguredTokenServiceResponseNoToken(c *check.C) { - ts := getTestTokenService(http.StatusOK, `{"something": "wrong"}`) - defer ts.Close() - s.setupRegistryWithTokenService(c, ts.URL) - repoName := fmt.Sprintf("%s/busybox", privateRegistryURL) - dockerCmd(c, "tag", "busybox", repoName) - out, _, err := dockerCmdWithError("push", repoName) - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Not(checker.Contains), "Retrying") - split := strings.Split(out, "\n") - c.Assert(split[len(split)-2], check.Equals, "authorization server did not include a token in the response") -} diff --git a/integration-cli/docker_cli_registry_user_agent_test.go b/integration-cli/docker_cli_registry_user_agent_test.go deleted file mode 100644 index 67a950cabd..0000000000 --- a/integration-cli/docker_cli_registry_user_agent_test.go +++ /dev/null @@ -1,120 +0,0 @@ -package main - -import ( - "fmt" - "net/http" - "regexp" - - "github.com/go-check/check" -) - -// unescapeBackslashSemicolonParens unescapes \;() -func unescapeBackslashSemicolonParens(s string) string { - re := regexp.MustCompile(`\\;`) - ret := re.ReplaceAll([]byte(s), []byte(";")) - - re = regexp.MustCompile(`\\\(`) - ret = re.ReplaceAll([]byte(ret), []byte("(")) - - re = regexp.MustCompile(`\\\)`) - ret = re.ReplaceAll([]byte(ret), []byte(")")) - - re = regexp.MustCompile(`\\\\`) - ret = re.ReplaceAll([]byte(ret), []byte(`\`)) - - return string(ret) -} - -func regexpCheckUA(c *check.C, ua string) { - re := regexp.MustCompile("(?P.+) UpstreamClient(?P.+)") - substrArr := re.FindStringSubmatch(ua) - - c.Assert(substrArr, check.HasLen, 3, check.Commentf("Expected 'UpstreamClient()' with upstream client UA")) - dockerUA := substrArr[1] - upstreamUAEscaped := substrArr[2] - - // check dockerUA looks correct - reDockerUA := regexp.MustCompile("^docker/[0-9A-Za-z+]") - bMatchDockerUA := reDockerUA.MatchString(dockerUA) - c.Assert(bMatchDockerUA, check.Equals, true, check.Commentf("Docker Engine User-Agent malformed")) - - // check upstreamUA looks correct - // Expecting something like: Docker-Client/1.11.0-dev (linux) - upstreamUA := unescapeBackslashSemicolonParens(upstreamUAEscaped) - reUpstreamUA := regexp.MustCompile("^\\(Docker-Client/[0-9A-Za-z+]") - bMatchUpstreamUA := reUpstreamUA.MatchString(upstreamUA) - c.Assert(bMatchUpstreamUA, check.Equals, true, check.Commentf("(Upstream) Docker Client User-Agent malformed")) -} - -func registerUserAgentHandler(reg *testRegistry, result *string) { - reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(404) - var ua string - for k, v := range r.Header { - if k == "User-Agent" { - ua = v[0] - } - } - *result = ua - }) -} - -// TestUserAgentPassThroughOnPull verifies that when an image is pulled from -// a registry, the registry should see a User-Agent string of the form -// [docker engine UA] UptreamClientSTREAM-CLIENT([client UA]) -func (s *DockerRegistrySuite) TestUserAgentPassThrough(c *check.C) { - var ( - buildUA string - pullUA string - pushUA string - loginUA string - ) - - buildReg, err := newTestRegistry(c) - c.Assert(err, check.IsNil) - registerUserAgentHandler(buildReg, &buildUA) - buildRepoName := fmt.Sprintf("%s/busybox", buildReg.hostport) - - pullReg, err := newTestRegistry(c) - c.Assert(err, check.IsNil) - registerUserAgentHandler(pullReg, &pullUA) - pullRepoName := fmt.Sprintf("%s/busybox", pullReg.hostport) - - pushReg, err := newTestRegistry(c) - c.Assert(err, check.IsNil) - registerUserAgentHandler(pushReg, &pushUA) - pushRepoName := fmt.Sprintf("%s/busybox", pushReg.hostport) - - loginReg, err := newTestRegistry(c) - c.Assert(err, check.IsNil) - registerUserAgentHandler(loginReg, &loginUA) - - err = s.d.Start( - "--insecure-registry", buildReg.hostport, - "--insecure-registry", pullReg.hostport, - "--insecure-registry", pushReg.hostport, - "--insecure-registry", loginReg.hostport, - "--disable-legacy-registry=true") - c.Assert(err, check.IsNil) - - dockerfileName, cleanup1, err := makefile(fmt.Sprintf("FROM %s", buildRepoName)) - c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) - defer cleanup1() - s.d.Cmd("build", "--file", dockerfileName, ".") - regexpCheckUA(c, buildUA) - - s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "testuser@testdomain.com", loginReg.hostport) - regexpCheckUA(c, loginUA) - - s.d.Cmd("pull", pullRepoName) - regexpCheckUA(c, pullUA) - - dockerfileName, cleanup2, err := makefile(`FROM scratch - ENV foo bar`) - c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) - defer cleanup2() - s.d.Cmd("build", "-t", pushRepoName, "--file", dockerfileName, ".") - - s.d.Cmd("push", pushRepoName) - regexpCheckUA(c, pushUA) -} diff --git a/integration-cli/docker_cli_rename_test.go b/integration-cli/docker_cli_rename_test.go deleted file mode 100644 index 6d61c08cf6..0000000000 --- a/integration-cli/docker_cli_rename_test.go +++ /dev/null @@ -1,123 +0,0 @@ -package main - -import ( - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/stringid" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestRenameStoppedContainer(c *check.C) { - out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") - - cleanedContainerID := strings.TrimSpace(out) - dockerCmd(c, "wait", cleanedContainerID) - - name := inspectField(c, cleanedContainerID, "Name") - newName := "new_name" + stringid.GenerateNonCryptoID() - dockerCmd(c, "rename", "first_name", newName) - - name = inspectField(c, cleanedContainerID, "Name") - c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) - -} - -func (s *DockerSuite) TestRenameRunningContainer(c *check.C) { - out, _ := dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") - - newName := "new_name" + stringid.GenerateNonCryptoID() - cleanedContainerID := strings.TrimSpace(out) - dockerCmd(c, "rename", "first_name", newName) - - name := inspectField(c, cleanedContainerID, "Name") - c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) -} - -func (s *DockerSuite) TestRenameRunningContainerAndReuse(c *check.C) { - out, _ := runSleepingContainer(c, "--name", "first_name") - c.Assert(waitRun("first_name"), check.IsNil) - - newName := "new_name" - ContainerID := strings.TrimSpace(out) - dockerCmd(c, "rename", "first_name", newName) - - name := inspectField(c, ContainerID, "Name") - c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container")) - - out, _ = runSleepingContainer(c, "--name", "first_name") - c.Assert(waitRun("first_name"), check.IsNil) - newContainerID := strings.TrimSpace(out) - name = inspectField(c, newContainerID, "Name") - c.Assert(name, checker.Equals, "/first_name", check.Commentf("Failed to reuse container name")) -} - -func (s *DockerSuite) TestRenameCheckNames(c *check.C) { - dockerCmd(c, "run", "--name", "first_name", "-d", "busybox", "sh") - - newName := "new_name" + stringid.GenerateNonCryptoID() - dockerCmd(c, "rename", "first_name", newName) - - name := inspectField(c, newName, "Name") - c.Assert(name, checker.Equals, "/"+newName, check.Commentf("Failed to rename container %s", name)) - - name, err := inspectFieldWithError("first_name", "Name") - c.Assert(err, checker.NotNil, check.Commentf(name)) - c.Assert(err.Error(), checker.Contains, "No such image, container or task: first_name") -} - -func (s *DockerSuite) TestRenameInvalidName(c *check.C) { - runSleepingContainer(c, "--name", "myname") - - out, _, err := dockerCmdWithError("rename", "myname", "new:invalid") - c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) - c.Assert(out, checker.Contains, "Invalid container name", check.Commentf("%v", err)) - - out, _, err = dockerCmdWithError("rename", "myname") - c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) - c.Assert(out, checker.Contains, "requires exactly 2 argument(s).", check.Commentf("%v", err)) - - out, _, err = dockerCmdWithError("rename", "myname", "") - c.Assert(err, checker.NotNil, check.Commentf("Renaming container to invalid name should have failed: %s", out)) - c.Assert(out, checker.Contains, "may be empty", check.Commentf("%v", err)) - - out, _, err = dockerCmdWithError("rename", "", "newname") - c.Assert(err, checker.NotNil, check.Commentf("Renaming container with empty name should have failed: %s", out)) - c.Assert(out, checker.Contains, "may be empty", check.Commentf("%v", err)) - - out, _ = dockerCmd(c, "ps", "-a") - c.Assert(out, checker.Contains, "myname", check.Commentf("Output of docker ps should have included 'myname': %s", out)) -} - -func (s *DockerSuite) TestRenameAnonymousContainer(c *check.C) { - testRequires(c, DaemonIsLinux) - - dockerCmd(c, "network", "create", "network1") - out, _ := dockerCmd(c, "create", "-it", "--net", "network1", "busybox", "top") - - anonymousContainerID := strings.TrimSpace(out) - - dockerCmd(c, "rename", anonymousContainerID, "container1") - dockerCmd(c, "start", "container1") - - count := "-c" - if daemonPlatform == "windows" { - count = "-n" - } - - _, _, err := dockerCmdWithError("run", "--net", "network1", "busybox", "ping", count, "1", "container1") - c.Assert(err, check.IsNil, check.Commentf("Embedded DNS lookup fails after renaming anonymous container: %v", err)) -} - -func (s *DockerSuite) TestRenameContainerWithSameName(c *check.C) { - out, _ := runSleepingContainer(c, "--name", "old") - ContainerID := strings.TrimSpace(out) - - out, _, err := dockerCmdWithError("rename", "old", "old") - c.Assert(err, checker.NotNil, check.Commentf("Renaming a container with the same name should have failed")) - c.Assert(out, checker.Contains, "Renaming a container with the same name", check.Commentf("%v", err)) - - out, _, err = dockerCmdWithError("rename", ContainerID, "old") - c.Assert(err, checker.NotNil, check.Commentf("Renaming a container with the same name should have failed")) - c.Assert(out, checker.Contains, "Renaming a container with the same name", check.Commentf("%v", err)) -} diff --git a/integration-cli/docker_cli_restart_test.go b/integration-cli/docker_cli_restart_test.go deleted file mode 100644 index 4e591d1d82..0000000000 --- a/integration-cli/docker_cli_restart_test.go +++ /dev/null @@ -1,242 +0,0 @@ -package main - -import ( - "os" - "strconv" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestRestartStoppedContainer(c *check.C) { - dockerCmd(c, "run", "--name=test", "busybox", "echo", "foobar") - cleanedContainerID, err := getIDByName("test") - c.Assert(err, check.IsNil) - - out, _ := dockerCmd(c, "logs", cleanedContainerID) - c.Assert(out, checker.Equals, "foobar\n") - - dockerCmd(c, "restart", cleanedContainerID) - - // Wait until the container has stopped - err = waitInspect(cleanedContainerID, "{{.State.Running}}", "false", 20*time.Second) - c.Assert(err, checker.IsNil) - - out, _ = dockerCmd(c, "logs", cleanedContainerID) - c.Assert(out, checker.Equals, "foobar\nfoobar\n") -} - -func (s *DockerSuite) TestRestartRunningContainer(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo foobar && sleep 30 && echo 'should not print this'") - - cleanedContainerID := strings.TrimSpace(out) - - c.Assert(waitRun(cleanedContainerID), checker.IsNil) - - out, _ = dockerCmd(c, "logs", cleanedContainerID) - c.Assert(out, checker.Equals, "foobar\n") - - dockerCmd(c, "restart", "-t", "1", cleanedContainerID) - - out, _ = dockerCmd(c, "logs", cleanedContainerID) - - c.Assert(waitRun(cleanedContainerID), checker.IsNil) - - c.Assert(out, checker.Equals, "foobar\nfoobar\n") -} - -// Test that restarting a container with a volume does not create a new volume on restart. Regression test for #819. -func (s *DockerSuite) TestRestartWithVolumes(c *check.C) { - prefix, slash := getPrefixAndSlashFromDaemonPlatform() - out, _ := runSleepingContainer(c, "-d", "-v", prefix+slash+"test") - - cleanedContainerID := strings.TrimSpace(out) - out, err := inspectFilter(cleanedContainerID, "len .Mounts") - c.Assert(err, check.IsNil, check.Commentf("failed to inspect %s: %s", cleanedContainerID, out)) - out = strings.Trim(out, " \n\r") - c.Assert(out, checker.Equals, "1") - - source, err := inspectMountSourceField(cleanedContainerID, prefix+slash+"test") - c.Assert(err, checker.IsNil) - - dockerCmd(c, "restart", cleanedContainerID) - - out, err = inspectFilter(cleanedContainerID, "len .Mounts") - c.Assert(err, check.IsNil, check.Commentf("failed to inspect %s: %s", cleanedContainerID, out)) - out = strings.Trim(out, " \n\r") - c.Assert(out, checker.Equals, "1") - - sourceAfterRestart, err := inspectMountSourceField(cleanedContainerID, prefix+slash+"test") - c.Assert(err, checker.IsNil) - c.Assert(source, checker.Equals, sourceAfterRestart) -} - -func (s *DockerSuite) TestRestartPolicyNO(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "--restart=no", "busybox", "false") - - id := strings.TrimSpace(string(out)) - name := inspectField(c, id, "HostConfig.RestartPolicy.Name") - c.Assert(name, checker.Equals, "no") -} - -func (s *DockerSuite) TestRestartPolicyAlways(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "--restart=always", "busybox", "false") - - id := strings.TrimSpace(string(out)) - name := inspectField(c, id, "HostConfig.RestartPolicy.Name") - c.Assert(name, checker.Equals, "always") - - MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") - - // MaximumRetryCount=0 if the restart policy is always - c.Assert(MaximumRetryCount, checker.Equals, "0") -} - -func (s *DockerSuite) TestRestartPolicyOnFailure(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:1", "busybox", "false") - - id := strings.TrimSpace(string(out)) - name := inspectField(c, id, "HostConfig.RestartPolicy.Name") - c.Assert(name, checker.Equals, "on-failure") - -} - -// a good container with --restart=on-failure:3 -// MaximumRetryCount!=0; RestartCount=0 -func (s *DockerSuite) TestRestartContainerwithGoodContainer(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "true") - - id := strings.TrimSpace(string(out)) - err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", 30*time.Second) - c.Assert(err, checker.IsNil) - - count := inspectField(c, id, "RestartCount") - c.Assert(count, checker.Equals, "0") - - MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") - c.Assert(MaximumRetryCount, checker.Equals, "3") - -} - -func (s *DockerSuite) TestRestartContainerSuccess(c *check.C) { - testRequires(c, SameHostDaemon) - - out, _ := runSleepingContainer(c, "-d", "--restart=always") - id := strings.TrimSpace(out) - c.Assert(waitRun(id), check.IsNil) - - pidStr := inspectField(c, id, "State.Pid") - - pid, err := strconv.Atoi(pidStr) - c.Assert(err, check.IsNil) - - p, err := os.FindProcess(pid) - c.Assert(err, check.IsNil) - c.Assert(p, check.NotNil) - - err = p.Kill() - c.Assert(err, check.IsNil) - - err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) - c.Assert(err, check.IsNil) - - err = waitInspect(id, "{{.State.Status}}", "running", 30*time.Second) - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestRestartWithPolicyUserDefinedNetwork(c *check.C) { - // TODO Windows. This may be portable following HNS integration post TP5. - testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace, NotArm) - dockerCmd(c, "network", "create", "-d", "bridge", "udNet") - - dockerCmd(c, "run", "-d", "--net=udNet", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - - dockerCmd(c, "run", "-d", "--restart=always", "--net=udNet", "--name=second", - "--link=first:foo", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // ping to first and its alias foo must succeed - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") - c.Assert(err, check.IsNil) - - // Now kill the second container and let the restart policy kick in - pidStr := inspectField(c, "second", "State.Pid") - - pid, err := strconv.Atoi(pidStr) - c.Assert(err, check.IsNil) - - p, err := os.FindProcess(pid) - c.Assert(err, check.IsNil) - c.Assert(p, check.NotNil) - - err = p.Kill() - c.Assert(err, check.IsNil) - - err = waitInspect("second", "{{.RestartCount}}", "1", 5*time.Second) - c.Assert(err, check.IsNil) - - err = waitInspect("second", "{{.State.Status}}", "running", 5*time.Second) - - // ping to first and its alias foo must still succeed - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestRestartPolicyAfterRestart(c *check.C) { - testRequires(c, SameHostDaemon) - - out, _ := runSleepingContainer(c, "-d", "--restart=always") - id := strings.TrimSpace(out) - c.Assert(waitRun(id), check.IsNil) - - dockerCmd(c, "restart", id) - - c.Assert(waitRun(id), check.IsNil) - - pidStr := inspectField(c, id, "State.Pid") - - pid, err := strconv.Atoi(pidStr) - c.Assert(err, check.IsNil) - - p, err := os.FindProcess(pid) - c.Assert(err, check.IsNil) - c.Assert(p, check.NotNil) - - err = p.Kill() - c.Assert(err, check.IsNil) - - err = waitInspect(id, "{{.RestartCount}}", "1", 30*time.Second) - c.Assert(err, check.IsNil) - - err = waitInspect(id, "{{.State.Status}}", "running", 30*time.Second) - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestRestartContainerwithRestartPolicy(c *check.C) { - out1, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false") - out2, _ := dockerCmd(c, "run", "-d", "--restart=always", "busybox", "false") - - id1 := strings.TrimSpace(string(out1)) - id2 := strings.TrimSpace(string(out2)) - waitTimeout := 15 * time.Second - if daemonPlatform == "windows" { - waitTimeout = 150 * time.Second - } - err := waitInspect(id1, "{{ .State.Restarting }} {{ .State.Running }}", "false false", waitTimeout) - c.Assert(err, checker.IsNil) - - dockerCmd(c, "restart", id1) - dockerCmd(c, "restart", id2) - - dockerCmd(c, "stop", id1) - dockerCmd(c, "stop", id2) - dockerCmd(c, "start", id1) - dockerCmd(c, "start", id2) -} diff --git a/integration-cli/docker_cli_rm_test.go b/integration-cli/docker_cli_rm_test.go deleted file mode 100644 index 0186c56741..0000000000 --- a/integration-cli/docker_cli_rm_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package main - -import ( - "io/ioutil" - "os" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestRmContainerWithRemovedVolume(c *check.C) { - testRequires(c, SameHostDaemon) - - prefix, slash := getPrefixAndSlashFromDaemonPlatform() - - tempDir, err := ioutil.TempDir("", "test-rm-container-with-removed-volume-") - if err != nil { - c.Fatalf("failed to create temporary directory: %s", tempDir) - } - defer os.RemoveAll(tempDir) - - dockerCmd(c, "run", "--name", "losemyvolumes", "-v", tempDir+":"+prefix+slash+"test", "busybox", "true") - - err = os.RemoveAll(tempDir) - c.Assert(err, check.IsNil) - - dockerCmd(c, "rm", "-v", "losemyvolumes") -} - -func (s *DockerSuite) TestRmContainerWithVolume(c *check.C) { - prefix, slash := getPrefixAndSlashFromDaemonPlatform() - - dockerCmd(c, "run", "--name", "foo", "-v", prefix+slash+"srv", "busybox", "true") - - dockerCmd(c, "rm", "-v", "foo") -} - -func (s *DockerSuite) TestRmContainerRunning(c *check.C) { - createRunningContainer(c, "foo") - - _, _, err := dockerCmdWithError("rm", "foo") - c.Assert(err, checker.NotNil, check.Commentf("Expected error, can't rm a running container")) -} - -func (s *DockerSuite) TestRmContainerForceRemoveRunning(c *check.C) { - createRunningContainer(c, "foo") - - // Stop then remove with -s - dockerCmd(c, "rm", "-f", "foo") -} - -func (s *DockerSuite) TestRmContainerOrphaning(c *check.C) { - dockerfile1 := `FROM busybox:latest - ENTRYPOINT ["true"]` - img := "test-container-orphaning" - dockerfile2 := `FROM busybox:latest - ENTRYPOINT ["true"] - MAINTAINER Integration Tests` - - // build first dockerfile - img1, err := buildImage(img, dockerfile1, true) - c.Assert(err, check.IsNil, check.Commentf("Could not build image %s", img)) - // run container on first image - dockerCmd(c, "run", img) - // rebuild dockerfile with a small addition at the end - _, err = buildImage(img, dockerfile2, true) - c.Assert(err, check.IsNil, check.Commentf("Could not rebuild image %s", img)) - // try to remove the image, should not error out. - out, _, err := dockerCmdWithError("rmi", img) - c.Assert(err, check.IsNil, check.Commentf("Expected to removing the image, but failed: %s", out)) - - // check if we deleted the first image - out, _ = dockerCmd(c, "images", "-q", "--no-trunc") - c.Assert(out, checker.Contains, img1, check.Commentf("Orphaned container (could not find %q in docker images): %s", img1, out)) - -} - -func (s *DockerSuite) TestRmInvalidContainer(c *check.C) { - out, _, err := dockerCmdWithError("rm", "unknown") - c.Assert(err, checker.NotNil, check.Commentf("Expected error on rm unknown container, got none")) - c.Assert(out, checker.Contains, "No such container") -} - -func createRunningContainer(c *check.C, name string) { - runSleepingContainer(c, "-dt", "--name", name) -} diff --git a/integration-cli/docker_cli_rmi_test.go b/integration-cli/docker_cli_rmi_test.go deleted file mode 100644 index 8c9e2af07e..0000000000 --- a/integration-cli/docker_cli_rmi_test.go +++ /dev/null @@ -1,347 +0,0 @@ -package main - -import ( - "fmt" - "os/exec" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/stringid" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestRmiWithContainerFails(c *check.C) { - errSubstr := "is using it" - - // create a container - out, _ := dockerCmd(c, "run", "-d", "busybox", "true") - - cleanedContainerID := strings.TrimSpace(out) - - // try to delete the image - out, _, err := dockerCmdWithError("rmi", "busybox") - // Container is using image, should not be able to rmi - c.Assert(err, checker.NotNil) - // Container is using image, error message should contain errSubstr - c.Assert(out, checker.Contains, errSubstr, check.Commentf("Container: %q", cleanedContainerID)) - - // make sure it didn't delete the busybox name - images, _ := dockerCmd(c, "images") - // The name 'busybox' should not have been removed from images - c.Assert(images, checker.Contains, "busybox") -} - -func (s *DockerSuite) TestRmiTag(c *check.C) { - imagesBefore, _ := dockerCmd(c, "images", "-a") - dockerCmd(c, "tag", "busybox", "utest:tag1") - dockerCmd(c, "tag", "busybox", "utest/docker:tag2") - dockerCmd(c, "tag", "busybox", "utest:5000/docker:tag3") - { - imagesAfter, _ := dockerCmd(c, "images", "-a") - c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+3, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) - } - dockerCmd(c, "rmi", "utest/docker:tag2") - { - imagesAfter, _ := dockerCmd(c, "images", "-a") - c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+2, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) - } - dockerCmd(c, "rmi", "utest:5000/docker:tag3") - { - imagesAfter, _ := dockerCmd(c, "images", "-a") - c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+1, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) - - } - dockerCmd(c, "rmi", "utest:tag1") - { - imagesAfter, _ := dockerCmd(c, "images", "-a") - c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n"), check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) - - } -} - -func (s *DockerSuite) TestRmiImgIDMultipleTag(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-one'") - - containerID := strings.TrimSpace(out) - - // Wait for it to exit as cannot commit a running container on Windows, and - // it will take a few seconds to exit - if daemonPlatform == "windows" { - err := waitExited(containerID, 60*time.Second) - c.Assert(err, check.IsNil) - } - - dockerCmd(c, "commit", containerID, "busybox-one") - - imagesBefore, _ := dockerCmd(c, "images", "-a") - dockerCmd(c, "tag", "busybox-one", "busybox-one:tag1") - dockerCmd(c, "tag", "busybox-one", "busybox-one:tag2") - - imagesAfter, _ := dockerCmd(c, "images", "-a") - // tag busybox to create 2 more images with same imageID - c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+2, check.Commentf("docker images shows: %q\n", imagesAfter)) - - imgID := inspectField(c, "busybox-one:tag1", "Id") - - // run a container with the image - out, _ = runSleepingContainerInImage(c, "busybox-one") - - containerID = strings.TrimSpace(out) - - // first checkout without force it fails - out, _, err := dockerCmdWithError("rmi", imgID) - expected := fmt.Sprintf("conflict: unable to delete %s (cannot be forced) - image is being used by running container %s", stringid.TruncateID(imgID), stringid.TruncateID(containerID)) - // rmi tagged in multiple repos should have failed without force - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, expected) - - dockerCmd(c, "stop", containerID) - dockerCmd(c, "rmi", "-f", imgID) - - imagesAfter, _ = dockerCmd(c, "images", "-a") - // rmi -f failed, image still exists - c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12], check.Commentf("ImageID:%q; ImagesAfter: %q", imgID, imagesAfter)) -} - -func (s *DockerSuite) TestRmiImgIDForce(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "mkdir '/busybox-test'") - - containerID := strings.TrimSpace(out) - - // Wait for it to exit as cannot commit a running container on Windows, and - // it will take a few seconds to exit - if daemonPlatform == "windows" { - err := waitExited(containerID, 60*time.Second) - c.Assert(err, check.IsNil) - } - - dockerCmd(c, "commit", containerID, "busybox-test") - - imagesBefore, _ := dockerCmd(c, "images", "-a") - dockerCmd(c, "tag", "busybox-test", "utest:tag1") - dockerCmd(c, "tag", "busybox-test", "utest:tag2") - dockerCmd(c, "tag", "busybox-test", "utest/docker:tag3") - dockerCmd(c, "tag", "busybox-test", "utest:5000/docker:tag4") - { - imagesAfter, _ := dockerCmd(c, "images", "-a") - c.Assert(strings.Count(imagesAfter, "\n"), checker.Equals, strings.Count(imagesBefore, "\n")+4, check.Commentf("before: %q\n\nafter: %q\n", imagesBefore, imagesAfter)) - } - imgID := inspectField(c, "busybox-test", "Id") - - // first checkout without force it fails - out, _, err := dockerCmdWithError("rmi", imgID) - // rmi tagged in multiple repos should have failed without force - c.Assert(err, checker.NotNil) - // rmi tagged in multiple repos should have failed without force - c.Assert(out, checker.Contains, "(must be forced) - image is referenced in one or more repositories", check.Commentf("out: %s; err: %v;", out, err)) - - dockerCmd(c, "rmi", "-f", imgID) - { - imagesAfter, _ := dockerCmd(c, "images", "-a") - // rmi failed, image still exists - c.Assert(imagesAfter, checker.Not(checker.Contains), imgID[:12]) - } -} - -// See https://github.com/docker/docker/issues/14116 -func (s *DockerSuite) TestRmiImageIDForceWithRunningContainersAndMultipleTags(c *check.C) { - dockerfile := "FROM busybox\nRUN echo test 14116\n" - imgID, err := buildImage("test-14116", dockerfile, false) - c.Assert(err, checker.IsNil) - - newTag := "newtag" - dockerCmd(c, "tag", imgID, newTag) - runSleepingContainerInImage(c, imgID) - - out, _, err := dockerCmdWithError("rmi", "-f", imgID) - // rmi -f should not delete image with running containers - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "(cannot be forced) - image is being used by running container") -} - -func (s *DockerSuite) TestRmiTagWithExistingContainers(c *check.C) { - container := "test-delete-tag" - newtag := "busybox:newtag" - bb := "busybox:latest" - dockerCmd(c, "tag", bb, newtag) - - dockerCmd(c, "run", "--name", container, bb, "/bin/true") - - out, _ := dockerCmd(c, "rmi", newtag) - c.Assert(strings.Count(out, "Untagged: "), checker.Equals, 1) -} - -func (s *DockerSuite) TestRmiForceWithExistingContainers(c *check.C) { - image := "busybox-clone" - - cmd := exec.Command(dockerBinary, "build", "--no-cache", "-t", image, "-") - cmd.Stdin = strings.NewReader(`FROM busybox -MAINTAINER foo`) - - out, _, err := runCommandWithOutput(cmd) - c.Assert(err, checker.IsNil, check.Commentf("Could not build %s: %s", image, out)) - - dockerCmd(c, "run", "--name", "test-force-rmi", image, "/bin/true") - - dockerCmd(c, "rmi", "-f", image) -} - -func (s *DockerSuite) TestRmiWithMultipleRepositories(c *check.C) { - newRepo := "127.0.0.1:5000/busybox" - oldRepo := "busybox" - newTag := "busybox:test" - dockerCmd(c, "tag", oldRepo, newRepo) - - dockerCmd(c, "run", "--name", "test", oldRepo, "touch", "/abcd") - - dockerCmd(c, "commit", "test", newTag) - - out, _ := dockerCmd(c, "rmi", newTag) - c.Assert(out, checker.Contains, "Untagged: "+newTag) -} - -func (s *DockerSuite) TestRmiForceWithMultipleRepositories(c *check.C) { - imageName := "rmiimage" - tag1 := imageName + ":tag1" - tag2 := imageName + ":tag2" - - _, err := buildImage(tag1, - `FROM busybox - MAINTAINER "docker"`, - true) - if err != nil { - c.Fatal(err) - } - - dockerCmd(c, "tag", tag1, tag2) - - out, _ := dockerCmd(c, "rmi", "-f", tag2) - c.Assert(out, checker.Contains, "Untagged: "+tag2) - c.Assert(out, checker.Not(checker.Contains), "Untagged: "+tag1) - - // Check built image still exists - images, _ := dockerCmd(c, "images", "-a") - c.Assert(images, checker.Contains, imageName, check.Commentf("Built image missing %q; Images: %q", imageName, images)) -} - -func (s *DockerSuite) TestRmiBlank(c *check.C) { - out, _, err := dockerCmdWithError("rmi", " ") - // Should have failed to delete ' ' image - c.Assert(err, checker.NotNil) - // Wrong error message generated - c.Assert(out, checker.Not(checker.Contains), "no such id", check.Commentf("out: %s", out)) - // Expected error message not generated - c.Assert(out, checker.Contains, "image name cannot be blank", check.Commentf("out: %s", out)) -} - -func (s *DockerSuite) TestRmiContainerImageNotFound(c *check.C) { - // Build 2 images for testing. - imageNames := []string{"test1", "test2"} - imageIds := make([]string, 2) - for i, name := range imageNames { - dockerfile := fmt.Sprintf("FROM busybox\nMAINTAINER %s\nRUN echo %s\n", name, name) - id, err := buildImage(name, dockerfile, false) - c.Assert(err, checker.IsNil) - imageIds[i] = id - } - - // Create a long-running container. - runSleepingContainerInImage(c, imageNames[0]) - - // Create a stopped container, and then force remove its image. - dockerCmd(c, "run", imageNames[1], "true") - dockerCmd(c, "rmi", "-f", imageIds[1]) - - // Try to remove the image of the running container and see if it fails as expected. - out, _, err := dockerCmdWithError("rmi", "-f", imageIds[0]) - // The image of the running container should not be removed. - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "image is being used by running container", check.Commentf("out: %s", out)) -} - -// #13422 -func (s *DockerSuite) TestRmiUntagHistoryLayer(c *check.C) { - image := "tmp1" - // Build an image for testing. - dockerfile := `FROM busybox -MAINTAINER foo -RUN echo 0 #layer0 -RUN echo 1 #layer1 -RUN echo 2 #layer2 -` - _, err := buildImage(image, dockerfile, false) - c.Assert(err, checker.IsNil) - - out, _ := dockerCmd(c, "history", "-q", image) - ids := strings.Split(out, "\n") - idToTag := ids[2] - - // Tag layer0 to "tmp2". - newTag := "tmp2" - dockerCmd(c, "tag", idToTag, newTag) - // Create a container based on "tmp1". - dockerCmd(c, "run", "-d", image, "true") - - // See if the "tmp2" can be untagged. - out, _ = dockerCmd(c, "rmi", newTag) - // Expected 1 untagged entry - c.Assert(strings.Count(out, "Untagged: "), checker.Equals, 1, check.Commentf("out: %s", out)) - - // Now let's add the tag again and create a container based on it. - dockerCmd(c, "tag", idToTag, newTag) - out, _ = dockerCmd(c, "run", "-d", newTag, "true") - cid := strings.TrimSpace(out) - - // At this point we have 2 containers, one based on layer2 and another based on layer0. - // Try to untag "tmp2" without the -f flag. - out, _, err = dockerCmdWithError("rmi", newTag) - // should not be untagged without the -f flag - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, cid[:12]) - c.Assert(out, checker.Contains, "(must force)") - - // Add the -f flag and test again. - out, _ = dockerCmd(c, "rmi", "-f", newTag) - // should be allowed to untag with the -f flag - c.Assert(out, checker.Contains, fmt.Sprintf("Untagged: %s:latest", newTag)) -} - -func (*DockerSuite) TestRmiParentImageFail(c *check.C) { - parent := inspectField(c, "busybox", "Parent") - out, _, err := dockerCmdWithError("rmi", parent) - c.Assert(err, check.NotNil) - if !strings.Contains(out, "image has dependent child images") { - c.Fatalf("rmi should have failed because it's a parent image, got %s", out) - } -} - -func (s *DockerSuite) TestRmiWithParentInUse(c *check.C) { - out, _ := dockerCmd(c, "create", "busybox") - cID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "commit", cID) - imageID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "create", imageID) - cID = strings.TrimSpace(out) - - out, _ = dockerCmd(c, "commit", cID) - imageID = strings.TrimSpace(out) - - dockerCmd(c, "rmi", imageID) -} - -// #18873 -func (s *DockerSuite) TestRmiByIDHardConflict(c *check.C) { - dockerCmd(c, "create", "busybox") - - imgID := inspectField(c, "busybox:latest", "Id") - - _, _, err := dockerCmdWithError("rmi", imgID[:12]) - c.Assert(err, checker.NotNil) - - // check that tag was not removed - imgID2 := inspectField(c, "busybox:latest", "Id") - c.Assert(imgID, checker.Equals, imgID2) -} diff --git a/integration-cli/docker_cli_run_test.go b/integration-cli/docker_cli_run_test.go deleted file mode 100644 index ef2c98c6a8..0000000000 --- a/integration-cli/docker_cli_run_test.go +++ /dev/null @@ -1,4493 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "fmt" - "io/ioutil" - "net" - "os" - "os/exec" - "path" - "path/filepath" - "reflect" - "regexp" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/docker/runconfig" - "github.com/docker/go-connections/nat" - "github.com/docker/libnetwork/resolvconf" - "github.com/docker/libnetwork/types" - "github.com/go-check/check" - libcontainerUser "github.com/opencontainers/runc/libcontainer/user" -) - -// "test123" should be printed by docker run -func (s *DockerSuite) TestRunEchoStdout(c *check.C) { - out, _ := dockerCmd(c, "run", "busybox", "echo", "test123") - if out != "test123\n" { - c.Fatalf("container should've printed 'test123', got '%s'", out) - } -} - -// "test" should be printed -func (s *DockerSuite) TestRunEchoNamedContainer(c *check.C) { - out, _ := dockerCmd(c, "run", "--name", "testfoonamedcontainer", "busybox", "echo", "test") - if out != "test\n" { - c.Errorf("container should've printed 'test'") - } -} - -// docker run should not leak file descriptors. This test relies on Unix -// specific functionality and cannot run on Windows. -func (s *DockerSuite) TestRunLeakyFileDescriptors(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "busybox", "ls", "-C", "/proc/self/fd") - - // normally, we should only get 0, 1, and 2, but 3 gets created by "ls" when it does "opendir" on the "fd" directory - if out != "0 1 2 3\n" { - c.Errorf("container should've printed '0 1 2 3', not: %s", out) - } -} - -// it should be possible to lookup Google DNS -// this will fail when Internet access is unavailable -func (s *DockerSuite) TestRunLookupGoogleDns(c *check.C) { - testRequires(c, Network, NotArm) - image := DefaultImage - if daemonPlatform == "windows" { - // nslookup isn't present in Windows busybox. Is built-in. - image = WindowsBaseImage - } - dockerCmd(c, "run", image, "nslookup", "google.com") -} - -// the exit code should be 0 -func (s *DockerSuite) TestRunExitCodeZero(c *check.C) { - dockerCmd(c, "run", "busybox", "true") -} - -// the exit code should be 1 -func (s *DockerSuite) TestRunExitCodeOne(c *check.C) { - _, exitCode, err := dockerCmdWithError("run", "busybox", "false") - c.Assert(err, checker.NotNil) - c.Assert(exitCode, checker.Equals, 1) -} - -// it should be possible to pipe in data via stdin to a process running in a container -func (s *DockerSuite) TestRunStdinPipe(c *check.C) { - // TODO Windows: This needs some work to make compatible. - testRequires(c, DaemonIsLinux) - runCmd := exec.Command(dockerBinary, "run", "-i", "-a", "stdin", "busybox", "cat") - runCmd.Stdin = strings.NewReader("blahblah") - out, _, _, err := runCommandWithStdoutStderr(runCmd) - if err != nil { - c.Fatalf("failed to run container: %v, output: %q", err, out) - } - - out = strings.TrimSpace(out) - dockerCmd(c, "wait", out) - - logsOut, _ := dockerCmd(c, "logs", out) - - containerLogs := strings.TrimSpace(logsOut) - if containerLogs != "blahblah" { - c.Errorf("logs didn't print the container's logs %s", containerLogs) - } - - dockerCmd(c, "rm", out) -} - -// the container's ID should be printed when starting a container in detached mode -func (s *DockerSuite) TestRunDetachedContainerIDPrinting(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "true") - - out = strings.TrimSpace(out) - dockerCmd(c, "wait", out) - - rmOut, _ := dockerCmd(c, "rm", out) - - rmOut = strings.TrimSpace(rmOut) - if rmOut != out { - c.Errorf("rm didn't print the container ID %s %s", out, rmOut) - } -} - -// the working directory should be set correctly -func (s *DockerSuite) TestRunWorkingDirectory(c *check.C) { - dir := "/root" - image := "busybox" - if daemonPlatform == "windows" { - dir = `C:/Windows` - } - - // First with -w - out, _ := dockerCmd(c, "run", "-w", dir, image, "pwd") - out = strings.TrimSpace(out) - if out != dir { - c.Errorf("-w failed to set working directory") - } - - // Then with --workdir - out, _ = dockerCmd(c, "run", "--workdir", dir, image, "pwd") - out = strings.TrimSpace(out) - if out != dir { - c.Errorf("--workdir failed to set working directory") - } -} - -// pinging Google's DNS resolver should fail when we disable the networking -func (s *DockerSuite) TestRunWithoutNetworking(c *check.C) { - count := "-c" - image := "busybox" - if daemonPlatform == "windows" { - count = "-n" - image = WindowsBaseImage - } - - // First using the long form --net - out, exitCode, err := dockerCmdWithError("run", "--net=none", image, "ping", count, "1", "8.8.8.8") - if err != nil && exitCode != 1 { - c.Fatal(out, err) - } - if exitCode != 1 { - c.Errorf("--net=none should've disabled the network; the container shouldn't have been able to ping 8.8.8.8") - } -} - -//test --link use container name to link target -func (s *DockerSuite) TestRunLinksContainerWithContainerName(c *check.C) { - // TODO Windows: This test cannot run on a Windows daemon as the networking - // settings are not populated back yet on inspect. - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-i", "-t", "-d", "--name", "parent", "busybox") - - ip := inspectField(c, "parent", "NetworkSettings.Networks.bridge.IPAddress") - - out, _ := dockerCmd(c, "run", "--link", "parent:test", "busybox", "/bin/cat", "/etc/hosts") - if !strings.Contains(out, ip+" test") { - c.Fatalf("use a container name to link target failed") - } -} - -//test --link use container id to link target -func (s *DockerSuite) TestRunLinksContainerWithContainerId(c *check.C) { - // TODO Windows: This test cannot run on a Windows daemon as the networking - // settings are not populated back yet on inspect. - testRequires(c, DaemonIsLinux) - cID, _ := dockerCmd(c, "run", "-i", "-t", "-d", "busybox") - - cID = strings.TrimSpace(cID) - ip := inspectField(c, cID, "NetworkSettings.Networks.bridge.IPAddress") - - out, _ := dockerCmd(c, "run", "--link", cID+":test", "busybox", "/bin/cat", "/etc/hosts") - if !strings.Contains(out, ip+" test") { - c.Fatalf("use a container id to link target failed") - } -} - -func (s *DockerSuite) TestUserDefinedNetworkLinks(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) - dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet") - - dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - - // run a container in user-defined network udlinkNet with a link for an existing container - // and a link for a container that doesn't exist - dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo", - "--link=third:bar", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // ping to first and its alias foo must succeed - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") - c.Assert(err, check.IsNil) - - // ping to third and its alias must fail - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third") - c.Assert(err, check.NotNil) - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") - c.Assert(err, check.NotNil) - - // start third container now - dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=third", "busybox", "top") - c.Assert(waitRun("third"), check.IsNil) - - // ping to third and its alias must succeed now - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "third") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "bar") - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestUserDefinedNetworkLinksWithRestart(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) - dockerCmd(c, "network", "create", "-d", "bridge", "udlinkNet") - - dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - - dockerCmd(c, "run", "-d", "--net=udlinkNet", "--name=second", "--link=first:foo", - "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // ping to first and its alias foo must succeed - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") - c.Assert(err, check.IsNil) - - // Restart first container - dockerCmd(c, "restart", "first") - c.Assert(waitRun("first"), check.IsNil) - - // ping to first and its alias foo must still succeed - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") - c.Assert(err, check.IsNil) - - // Restart second container - dockerCmd(c, "restart", "second") - c.Assert(waitRun("second"), check.IsNil) - - // ping to first and its alias foo must still succeed - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo") - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestRunWithNetAliasOnDefaultNetworks(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) - - defaults := []string{"bridge", "host", "none"} - for _, net := range defaults { - out, _, err := dockerCmdWithError("run", "-d", "--net", net, "--net-alias", "alias_"+net, "busybox", "top") - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, runconfig.ErrUnsupportedNetworkAndAlias.Error()) - } -} - -func (s *DockerSuite) TestUserDefinedNetworkAlias(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) - dockerCmd(c, "network", "create", "-d", "bridge", "net1") - - cid1, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=first", "--net-alias=foo1", "--net-alias=foo2", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - - // Check if default short-id alias is added automatically - id := strings.TrimSpace(cid1) - aliases := inspectField(c, id, "NetworkSettings.Networks.net1.Aliases") - c.Assert(aliases, checker.Contains, stringid.TruncateID(id)) - - cid2, _ := dockerCmd(c, "run", "-d", "--net=net1", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // Check if default short-id alias is added automatically - id = strings.TrimSpace(cid2) - aliases = inspectField(c, id, "NetworkSettings.Networks.net1.Aliases") - c.Assert(aliases, checker.Contains, stringid.TruncateID(id)) - - // ping to first and its network-scoped aliases - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2") - c.Assert(err, check.IsNil) - // ping first container's short-id alias - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1)) - c.Assert(err, check.IsNil) - - // Restart first container - dockerCmd(c, "restart", "first") - c.Assert(waitRun("first"), check.IsNil) - - // ping to first and its network-scoped aliases must succeed - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo1") - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "foo2") - c.Assert(err, check.IsNil) - // ping first container's short-id alias - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", stringid.TruncateID(cid1)) - c.Assert(err, check.IsNil) -} - -// Issue 9677. -func (s *DockerSuite) TestRunWithDaemonFlags(c *check.C) { - out, _, err := dockerCmdWithError("--exec-opt", "foo=bar", "run", "-i", "busybox", "true") - if err != nil { - if !strings.Contains(out, "flag provided but not defined: --exec-opt") { // no daemon (client-only) - c.Fatal(err, out) - } - } -} - -// Regression test for #4979 -func (s *DockerSuite) TestRunWithVolumesFromExited(c *check.C) { - - var ( - out string - exitCode int - ) - - // Create a file in a volume - if daemonPlatform == "windows" { - out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", `c:\some\dir`, WindowsBaseImage, "cmd", "/c", `echo hello > c:\some\dir\file`) - } else { - out, exitCode = dockerCmd(c, "run", "--name", "test-data", "--volume", "/some/dir", "busybox", "touch", "/some/dir/file") - } - if exitCode != 0 { - c.Fatal("1", out, exitCode) - } - - // Read the file from another container using --volumes-from to access the volume in the second container - if daemonPlatform == "windows" { - out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", WindowsBaseImage, "cmd", "/c", `type c:\some\dir\file`) - } else { - out, exitCode = dockerCmd(c, "run", "--volumes-from", "test-data", "busybox", "cat", "/some/dir/file") - } - if exitCode != 0 { - c.Fatal("2", out, exitCode) - } -} - -// Volume path is a symlink which also exists on the host, and the host side is a file not a dir -// But the volume call is just a normal volume, not a bind mount -func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir(c *check.C) { - var ( - dockerFile string - containerPath string - cmd string - ) - // TODO Windows (Post TP5): This test cannot run on a Windows daemon as - // Windows does not support symlinks inside a volume path - testRequires(c, SameHostDaemon, DaemonIsLinux) - name := "test-volume-symlink" - - dir, err := ioutil.TempDir("", name) - if err != nil { - c.Fatal(err) - } - defer os.RemoveAll(dir) - - // In the case of Windows to Windows CI, if the machine is setup so that - // the temp directory is not the C: drive, this test is invalid and will - // not work. - if daemonPlatform == "windows" && strings.ToLower(dir[:1]) != "c" { - c.Skip("Requires TEMP to point to C: drive") - } - - f, err := os.OpenFile(filepath.Join(dir, "test"), os.O_CREATE, 0700) - if err != nil { - c.Fatal(err) - } - f.Close() - - if daemonPlatform == "windows" { - dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir %s\nRUN mklink /D c:\\test %s", WindowsBaseImage, dir, dir) - containerPath = `c:\test\test` - cmd = "tasklist" - } else { - dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p %s\nRUN ln -s %s /test", dir, dir) - containerPath = "/test/test" - cmd = "true" - } - if _, err := buildImage(name, dockerFile, false); err != nil { - c.Fatal(err) - } - - dockerCmd(c, "run", "-v", containerPath, name, cmd) -} - -// Volume path is a symlink in the container -func (s *DockerSuite) TestRunCreateVolumesInSymlinkDir2(c *check.C) { - var ( - dockerFile string - containerPath string - cmd string - ) - // TODO Windows (Post TP5): This test cannot run on a Windows daemon as - // Windows does not support symlinks inside a volume path - testRequires(c, SameHostDaemon, DaemonIsLinux) - name := "test-volume-symlink2" - - if daemonPlatform == "windows" { - dockerFile = fmt.Sprintf("FROM %s\nRUN mkdir c:\\%s\nRUN mklink /D c:\\test c:\\%s", WindowsBaseImage, name, name) - containerPath = `c:\test\test` - cmd = "tasklist" - } else { - dockerFile = fmt.Sprintf("FROM busybox\nRUN mkdir -p /%s\nRUN ln -s /%s /test", name, name) - containerPath = "/test/test" - cmd = "true" - } - if _, err := buildImage(name, dockerFile, false); err != nil { - c.Fatal(err) - } - - dockerCmd(c, "run", "-v", containerPath, name, cmd) -} - -func (s *DockerSuite) TestRunVolumesMountedAsReadonly(c *check.C) { - // TODO Windows: Temporary check - remove once TP5 support is dropped - if daemonPlatform == "windows" && windowsDaemonKV < 14350 { - c.Skip("Needs later Windows build for RO volumes") - } - if _, code, err := dockerCmdWithError("run", "-v", "/test:/test:ro", "busybox", "touch", "/test/somefile"); err == nil || code == 0 { - c.Fatalf("run should fail because volume is ro: exit code %d", code) - } -} - -func (s *DockerSuite) TestRunVolumesFromInReadonlyModeFails(c *check.C) { - // TODO Windows: Temporary check - remove once TP5 support is dropped - if daemonPlatform == "windows" && windowsDaemonKV < 14350 { - c.Skip("Needs later Windows build for RO volumes") - } - var ( - volumeDir string - fileInVol string - ) - if daemonPlatform == "windows" { - volumeDir = `c:/test` // Forward-slash as using busybox - fileInVol = `c:/test/file` - } else { - testRequires(c, DaemonIsLinux) - volumeDir = "/test" - fileInVol = `/test/file` - } - dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true") - - if _, code, err := dockerCmdWithError("run", "--volumes-from", "parent:ro", "busybox", "touch", fileInVol); err == nil || code == 0 { - c.Fatalf("run should fail because volume is ro: exit code %d", code) - } -} - -// Regression test for #1201 -func (s *DockerSuite) TestRunVolumesFromInReadWriteMode(c *check.C) { - var ( - volumeDir string - fileInVol string - ) - if daemonPlatform == "windows" { - volumeDir = `c:/test` // Forward-slash as using busybox - fileInVol = `c:/test/file` - } else { - volumeDir = "/test" - fileInVol = "/test/file" - } - - dockerCmd(c, "run", "--name", "parent", "-v", volumeDir, "busybox", "true") - dockerCmd(c, "run", "--volumes-from", "parent:rw", "busybox", "touch", fileInVol) - - if out, _, err := dockerCmdWithError("run", "--volumes-from", "parent:bar", "busybox", "touch", fileInVol); err == nil || !strings.Contains(out, `invalid mode: bar`) { - c.Fatalf("running --volumes-from parent:bar should have failed with invalid mode: %q", out) - } - - dockerCmd(c, "run", "--volumes-from", "parent", "busybox", "touch", fileInVol) -} - -func (s *DockerSuite) TestVolumesFromGetsProperMode(c *check.C) { - testRequires(c, SameHostDaemon) - prefix, slash := getPrefixAndSlashFromDaemonPlatform() - hostpath := randomTmpDirPath("test", daemonPlatform) - if err := os.MkdirAll(hostpath, 0755); err != nil { - c.Fatalf("Failed to create %s: %q", hostpath, err) - } - defer os.RemoveAll(hostpath) - - // TODO Windows: Temporary check - remove once TP5 support is dropped - if daemonPlatform == "windows" && windowsDaemonKV < 14350 { - c.Skip("Needs later Windows build for RO volumes") - } - dockerCmd(c, "run", "--name", "parent", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true") - - // Expect this "rw" mode to be be ignored since the inherited volume is "ro" - if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent:rw", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil { - c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `rw`") - } - - dockerCmd(c, "run", "--name", "parent2", "-v", hostpath+":"+prefix+slash+"test:ro", "busybox", "true") - - // Expect this to be read-only since both are "ro" - if _, _, err := dockerCmdWithError("run", "--volumes-from", "parent2:ro", "busybox", "touch", prefix+slash+"test"+slash+"file"); err == nil { - c.Fatal("Expected volumes-from to inherit read-only volume even when passing in `ro`") - } -} - -// Test for GH#10618 -func (s *DockerSuite) TestRunNoDupVolumes(c *check.C) { - path1 := randomTmpDirPath("test1", daemonPlatform) - path2 := randomTmpDirPath("test2", daemonPlatform) - - someplace := ":/someplace" - if daemonPlatform == "windows" { - // Windows requires that the source directory exists before calling HCS - testRequires(c, SameHostDaemon) - someplace = `:c:\someplace` - if err := os.MkdirAll(path1, 0755); err != nil { - c.Fatalf("Failed to create %s: %q", path1, err) - } - defer os.RemoveAll(path1) - if err := os.MkdirAll(path2, 0755); err != nil { - c.Fatalf("Failed to create %s: %q", path1, err) - } - defer os.RemoveAll(path2) - } - mountstr1 := path1 + someplace - mountstr2 := path2 + someplace - - if out, _, err := dockerCmdWithError("run", "-v", mountstr1, "-v", mountstr2, "busybox", "true"); err == nil { - c.Fatal("Expected error about duplicate mount definitions") - } else { - if !strings.Contains(out, "Duplicate mount point") { - c.Fatalf("Expected 'duplicate mount point' error, got %v", out) - } - } - - // Test for https://github.com/docker/docker/issues/22093 - volumename1 := "test1" - volumename2 := "test2" - volume1 := volumename1 + someplace - volume2 := volumename2 + someplace - if out, _, err := dockerCmdWithError("run", "-v", volume1, "-v", volume2, "busybox", "true"); err == nil { - c.Fatal("Expected error about duplicate mount definitions") - } else { - if !strings.Contains(out, "Duplicate mount point") { - c.Fatalf("Expected 'duplicate mount point' error, got %v", out) - } - } - // create failed should have create volume volumename1 or volumename2 - // we should remove volumename2 or volumename2 successfully - out, _ := dockerCmd(c, "volume", "ls") - if strings.Contains(out, volumename1) { - dockerCmd(c, "volume", "rm", volumename1) - } else { - dockerCmd(c, "volume", "rm", volumename2) - } -} - -// Test for #1351 -func (s *DockerSuite) TestRunApplyVolumesFromBeforeVolumes(c *check.C) { - prefix := "" - if daemonPlatform == "windows" { - prefix = `c:` - } - dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") - dockerCmd(c, "run", "--volumes-from", "parent", "-v", prefix+"/test", "busybox", "cat", prefix+"/test/foo") -} - -func (s *DockerSuite) TestRunMultipleVolumesFrom(c *check.C) { - prefix := "" - if daemonPlatform == "windows" { - prefix = `c:` - } - dockerCmd(c, "run", "--name", "parent1", "-v", prefix+"/test", "busybox", "touch", prefix+"/test/foo") - dockerCmd(c, "run", "--name", "parent2", "-v", prefix+"/other", "busybox", "touch", prefix+"/other/bar") - dockerCmd(c, "run", "--volumes-from", "parent1", "--volumes-from", "parent2", "busybox", "sh", "-c", "cat /test/foo && cat /other/bar") -} - -// this tests verifies the ID format for the container -func (s *DockerSuite) TestRunVerifyContainerID(c *check.C) { - out, exit, err := dockerCmdWithError("run", "-d", "busybox", "true") - if err != nil { - c.Fatal(err) - } - if exit != 0 { - c.Fatalf("expected exit code 0 received %d", exit) - } - - match, err := regexp.MatchString("^[0-9a-f]{64}$", strings.TrimSuffix(out, "\n")) - if err != nil { - c.Fatal(err) - } - if !match { - c.Fatalf("Invalid container ID: %s", out) - } -} - -// Test that creating a container with a volume doesn't crash. Regression test for #995. -func (s *DockerSuite) TestRunCreateVolume(c *check.C) { - prefix := "" - if daemonPlatform == "windows" { - prefix = `c:` - } - dockerCmd(c, "run", "-v", prefix+"/var/lib/data", "busybox", "true") -} - -// Test that creating a volume with a symlink in its path works correctly. Test for #5152. -// Note that this bug happens only with symlinks with a target that starts with '/'. -func (s *DockerSuite) TestRunCreateVolumeWithSymlink(c *check.C) { - // Cannot run on Windows as relies on Linux-specific functionality (sh -c mount...) - testRequires(c, DaemonIsLinux) - image := "docker-test-createvolumewithsymlink" - - buildCmd := exec.Command(dockerBinary, "build", "-t", image, "-") - buildCmd.Stdin = strings.NewReader(`FROM busybox - RUN ln -s home /bar`) - buildCmd.Dir = workingDirectory - err := buildCmd.Run() - if err != nil { - c.Fatalf("could not build '%s': %v", image, err) - } - - _, exitCode, err := dockerCmdWithError("run", "-v", "/bar/foo", "--name", "test-createvolumewithsymlink", image, "sh", "-c", "mount | grep -q /home/foo") - if err != nil || exitCode != 0 { - c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) - } - - volPath, err := inspectMountSourceField("test-createvolumewithsymlink", "/bar/foo") - c.Assert(err, checker.IsNil) - - _, exitCode, err = dockerCmdWithError("rm", "-v", "test-createvolumewithsymlink") - if err != nil || exitCode != 0 { - c.Fatalf("[rm] err: %v, exitcode: %d", err, exitCode) - } - - _, err = os.Stat(volPath) - if !os.IsNotExist(err) { - c.Fatalf("[open] (expecting 'file does not exist' error) err: %v, volPath: %s", err, volPath) - } -} - -// Tests that a volume path that has a symlink exists in a container mounting it with `--volumes-from`. -func (s *DockerSuite) TestRunVolumesFromSymlinkPath(c *check.C) { - // TODO Windows (Post TP5): This test cannot run on a Windows daemon as - // Windows does not support symlinks inside a volume path - testRequires(c, DaemonIsLinux) - name := "docker-test-volumesfromsymlinkpath" - prefix := "" - dfContents := `FROM busybox - RUN ln -s home /foo - VOLUME ["/foo/bar"]` - - if daemonPlatform == "windows" { - prefix = `c:` - dfContents = `FROM ` + WindowsBaseImage + ` - RUN mkdir c:\home - RUN mklink /D c:\foo c:\home - VOLUME ["c:/foo/bar"] - ENTRYPOINT c:\windows\system32\cmd.exe` - } - - buildCmd := exec.Command(dockerBinary, "build", "-t", name, "-") - buildCmd.Stdin = strings.NewReader(dfContents) - buildCmd.Dir = workingDirectory - err := buildCmd.Run() - if err != nil { - c.Fatalf("could not build 'docker-test-volumesfromsymlinkpath': %v", err) - } - - out, exitCode, err := dockerCmdWithError("run", "--name", "test-volumesfromsymlinkpath", name) - if err != nil || exitCode != 0 { - c.Fatalf("[run] (volume) err: %v, exitcode: %d, out: %s", err, exitCode, out) - } - - _, exitCode, err = dockerCmdWithError("run", "--volumes-from", "test-volumesfromsymlinkpath", "busybox", "sh", "-c", "ls "+prefix+"/foo | grep -q bar") - if err != nil || exitCode != 0 { - c.Fatalf("[run] err: %v, exitcode: %d", err, exitCode) - } -} - -func (s *DockerSuite) TestRunExitCode(c *check.C) { - var ( - exit int - err error - ) - - _, exit, err = dockerCmdWithError("run", "busybox", "/bin/sh", "-c", "exit 72") - - if err == nil { - c.Fatal("should not have a non nil error") - } - if exit != 72 { - c.Fatalf("expected exit code 72 received %d", exit) - } -} - -func (s *DockerSuite) TestRunUserDefaults(c *check.C) { - expected := "uid=0(root) gid=0(root)" - if daemonPlatform == "windows" { - expected = "uid=1000(ContainerAdministrator) gid=1000(ContainerAdministrator)" - } - out, _ := dockerCmd(c, "run", "busybox", "id") - if !strings.Contains(out, expected) { - c.Fatalf("expected '%s' got %s", expected, out) - } -} - -func (s *DockerSuite) TestRunUserByName(c *check.C) { - // TODO Windows: This test cannot run on a Windows daemon as Windows does - // not support the use of -u - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-u", "root", "busybox", "id") - if !strings.Contains(out, "uid=0(root) gid=0(root)") { - c.Fatalf("expected root user got %s", out) - } -} - -func (s *DockerSuite) TestRunUserByID(c *check.C) { - // TODO Windows: This test cannot run on a Windows daemon as Windows does - // not support the use of -u - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-u", "1", "busybox", "id") - if !strings.Contains(out, "uid=1(daemon) gid=1(daemon)") { - c.Fatalf("expected daemon user got %s", out) - } -} - -func (s *DockerSuite) TestRunUserByIDBig(c *check.C) { - // TODO Windows: This test cannot run on a Windows daemon as Windows does - // not support the use of -u - testRequires(c, DaemonIsLinux, NotArm) - out, _, err := dockerCmdWithError("run", "-u", "2147483648", "busybox", "id") - if err == nil { - c.Fatal("No error, but must be.", out) - } - if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) { - c.Fatalf("expected error about uids range, got %s", out) - } -} - -func (s *DockerSuite) TestRunUserByIDNegative(c *check.C) { - // TODO Windows: This test cannot run on a Windows daemon as Windows does - // not support the use of -u - testRequires(c, DaemonIsLinux) - out, _, err := dockerCmdWithError("run", "-u", "-1", "busybox", "id") - if err == nil { - c.Fatal("No error, but must be.", out) - } - if !strings.Contains(strings.ToUpper(out), strings.ToUpper(libcontainerUser.ErrRange.Error())) { - c.Fatalf("expected error about uids range, got %s", out) - } -} - -func (s *DockerSuite) TestRunUserByIDZero(c *check.C) { - // TODO Windows: This test cannot run on a Windows daemon as Windows does - // not support the use of -u - testRequires(c, DaemonIsLinux) - out, _, err := dockerCmdWithError("run", "-u", "0", "busybox", "id") - if err != nil { - c.Fatal(err, out) - } - if !strings.Contains(out, "uid=0(root) gid=0(root) groups=10(wheel)") { - c.Fatalf("expected daemon user got %s", out) - } -} - -func (s *DockerSuite) TestRunUserNotFound(c *check.C) { - // TODO Windows: This test cannot run on a Windows daemon as Windows does - // not support the use of -u - testRequires(c, DaemonIsLinux) - _, _, err := dockerCmdWithError("run", "-u", "notme", "busybox", "id") - if err == nil { - c.Fatal("unknown user should cause container to fail") - } -} - -func (s *DockerSuite) TestRunTwoConcurrentContainers(c *check.C) { - sleepTime := "2" - group := sync.WaitGroup{} - group.Add(2) - - errChan := make(chan error, 2) - for i := 0; i < 2; i++ { - go func() { - defer group.Done() - _, _, err := dockerCmdWithError("run", "busybox", "sleep", sleepTime) - errChan <- err - }() - } - - group.Wait() - close(errChan) - - for err := range errChan { - c.Assert(err, check.IsNil) - } -} - -func (s *DockerSuite) TestRunEnvironment(c *check.C) { - // TODO Windows: Environment handling is different between Linux and - // Windows and this test relies currently on unix functionality. - testRequires(c, DaemonIsLinux) - cmd := exec.Command(dockerBinary, "run", "-h", "testing", "-e=FALSE=true", "-e=TRUE", "-e=TRICKY", "-e=HOME=", "busybox", "env") - cmd.Env = append(os.Environ(), - "TRUE=false", - "TRICKY=tri\ncky\n", - ) - - out, _, err := runCommandWithOutput(cmd) - if err != nil { - c.Fatal(err, out) - } - - actualEnv := strings.Split(strings.TrimSpace(out), "\n") - sort.Strings(actualEnv) - - goodEnv := []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "HOSTNAME=testing", - "FALSE=true", - "TRUE=false", - "TRICKY=tri", - "cky", - "", - "HOME=/root", - } - sort.Strings(goodEnv) - if len(goodEnv) != len(actualEnv) { - c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) - } - for i := range goodEnv { - if actualEnv[i] != goodEnv[i] { - c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) - } - } -} - -func (s *DockerSuite) TestRunEnvironmentErase(c *check.C) { - // TODO Windows: Environment handling is different between Linux and - // Windows and this test relies currently on unix functionality. - testRequires(c, DaemonIsLinux) - - // Test to make sure that when we use -e on env vars that are - // not set in our local env that they're removed (if present) in - // the container - - cmd := exec.Command(dockerBinary, "run", "-e", "FOO", "-e", "HOSTNAME", "busybox", "env") - cmd.Env = appendBaseEnv(true) - - out, _, err := runCommandWithOutput(cmd) - if err != nil { - c.Fatal(err, out) - } - - actualEnv := strings.Split(strings.TrimSpace(out), "\n") - sort.Strings(actualEnv) - - goodEnv := []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "HOME=/root", - } - sort.Strings(goodEnv) - if len(goodEnv) != len(actualEnv) { - c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) - } - for i := range goodEnv { - if actualEnv[i] != goodEnv[i] { - c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) - } - } -} - -func (s *DockerSuite) TestRunEnvironmentOverride(c *check.C) { - // TODO Windows: Environment handling is different between Linux and - // Windows and this test relies currently on unix functionality. - testRequires(c, DaemonIsLinux) - - // Test to make sure that when we use -e on env vars that are - // already in the env that we're overriding them - - cmd := exec.Command(dockerBinary, "run", "-e", "HOSTNAME", "-e", "HOME=/root2", "busybox", "env") - cmd.Env = appendBaseEnv(true, "HOSTNAME=bar") - - out, _, err := runCommandWithOutput(cmd) - if err != nil { - c.Fatal(err, out) - } - - actualEnv := strings.Split(strings.TrimSpace(out), "\n") - sort.Strings(actualEnv) - - goodEnv := []string{ - "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin", - "HOME=/root2", - "HOSTNAME=bar", - } - sort.Strings(goodEnv) - if len(goodEnv) != len(actualEnv) { - c.Fatalf("Wrong environment: should be %d variables, not %d: %q", len(goodEnv), len(actualEnv), strings.Join(actualEnv, ", ")) - } - for i := range goodEnv { - if actualEnv[i] != goodEnv[i] { - c.Fatalf("Wrong environment variable: should be %s, not %s", goodEnv[i], actualEnv[i]) - } - } -} - -func (s *DockerSuite) TestRunContainerNetwork(c *check.C) { - if daemonPlatform == "windows" { - // Windows busybox does not have ping. Use built in ping instead. - dockerCmd(c, "run", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1") - } else { - dockerCmd(c, "run", "busybox", "ping", "-c", "1", "127.0.0.1") - } -} - -func (s *DockerSuite) TestRunNetHostNotAllowedWithLinks(c *check.C) { - // TODO Windows: This is Linux specific as --link is not supported and - // this will be deprecated in favor of container networking model. - testRequires(c, DaemonIsLinux, NotUserNamespace) - dockerCmd(c, "run", "--name", "linked", "busybox", "true") - - _, _, err := dockerCmdWithError("run", "--net=host", "--link", "linked:linked", "busybox", "true") - if err == nil { - c.Fatal("Expected error") - } -} - -// #7851 hostname outside container shows FQDN, inside only shortname -// For testing purposes it is not required to set host's hostname directly -// and use "--net=host" (as the original issue submitter did), as the same -// codepath is executed with "docker run -h ". Both were manually -// tested, but this testcase takes the simpler path of using "run -h .." -func (s *DockerSuite) TestRunFullHostnameSet(c *check.C) { - // TODO Windows: -h is not yet functional. - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-h", "foo.bar.baz", "busybox", "hostname") - if actual := strings.Trim(out, "\r\n"); actual != "foo.bar.baz" { - c.Fatalf("expected hostname 'foo.bar.baz', received %s", actual) - } -} - -func (s *DockerSuite) TestRunPrivilegedCanMknod(c *check.C) { - // Not applicable for Windows as Windows daemon does not support - // the concept of --privileged, and mknod is a Unix concept. - testRequires(c, DaemonIsLinux, NotUserNamespace) - out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") - if actual := strings.Trim(out, "\r\n"); actual != "ok" { - c.Fatalf("expected output ok received %s", actual) - } -} - -func (s *DockerSuite) TestRunUnprivilegedCanMknod(c *check.C) { - // Not applicable for Windows as Windows daemon does not support - // the concept of --privileged, and mknod is a Unix concept. - testRequires(c, DaemonIsLinux, NotUserNamespace) - out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") - if actual := strings.Trim(out, "\r\n"); actual != "ok" { - c.Fatalf("expected output ok received %s", actual) - } -} - -func (s *DockerSuite) TestRunCapDropInvalid(c *check.C) { - // Not applicable for Windows as there is no concept of --cap-drop - testRequires(c, DaemonIsLinux) - out, _, err := dockerCmdWithError("run", "--cap-drop=CHPASS", "busybox", "ls") - if err == nil { - c.Fatal(err, out) - } -} - -func (s *DockerSuite) TestRunCapDropCannotMknod(c *check.C) { - // Not applicable for Windows as there is no concept of --cap-drop or mknod - testRequires(c, DaemonIsLinux) - out, _, err := dockerCmdWithError("run", "--cap-drop=MKNOD", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") - - if err == nil { - c.Fatal(err, out) - } - if actual := strings.Trim(out, "\r\n"); actual == "ok" { - c.Fatalf("expected output not ok received %s", actual) - } -} - -func (s *DockerSuite) TestRunCapDropCannotMknodLowerCase(c *check.C) { - // Not applicable for Windows as there is no concept of --cap-drop or mknod - testRequires(c, DaemonIsLinux) - out, _, err := dockerCmdWithError("run", "--cap-drop=mknod", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") - - if err == nil { - c.Fatal(err, out) - } - if actual := strings.Trim(out, "\r\n"); actual == "ok" { - c.Fatalf("expected output not ok received %s", actual) - } -} - -func (s *DockerSuite) TestRunCapDropALLCannotMknod(c *check.C) { - // Not applicable for Windows as there is no concept of --cap-drop or mknod - testRequires(c, DaemonIsLinux) - out, _, err := dockerCmdWithError("run", "--cap-drop=ALL", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") - if err == nil { - c.Fatal(err, out) - } - if actual := strings.Trim(out, "\r\n"); actual == "ok" { - c.Fatalf("expected output not ok received %s", actual) - } -} - -func (s *DockerSuite) TestRunCapDropALLAddMknodCanMknod(c *check.C) { - // Not applicable for Windows as there is no concept of --cap-drop or mknod - testRequires(c, DaemonIsLinux, NotUserNamespace) - out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=MKNOD", "--cap-add=SETGID", "busybox", "sh", "-c", "mknod /tmp/sda b 8 0 && echo ok") - - if actual := strings.Trim(out, "\r\n"); actual != "ok" { - c.Fatalf("expected output ok received %s", actual) - } -} - -func (s *DockerSuite) TestRunCapAddInvalid(c *check.C) { - // Not applicable for Windows as there is no concept of --cap-add - testRequires(c, DaemonIsLinux) - out, _, err := dockerCmdWithError("run", "--cap-add=CHPASS", "busybox", "ls") - if err == nil { - c.Fatal(err, out) - } -} - -func (s *DockerSuite) TestRunCapAddCanDownInterface(c *check.C) { - // Not applicable for Windows as there is no concept of --cap-add - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "--cap-add=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") - - if actual := strings.Trim(out, "\r\n"); actual != "ok" { - c.Fatalf("expected output ok received %s", actual) - } -} - -func (s *DockerSuite) TestRunCapAddALLCanDownInterface(c *check.C) { - // Not applicable for Windows as there is no concept of --cap-add - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "--cap-add=ALL", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") - - if actual := strings.Trim(out, "\r\n"); actual != "ok" { - c.Fatalf("expected output ok received %s", actual) - } -} - -func (s *DockerSuite) TestRunCapAddALLDropNetAdminCanDownInterface(c *check.C) { - // Not applicable for Windows as there is no concept of --cap-add - testRequires(c, DaemonIsLinux) - out, _, err := dockerCmdWithError("run", "--cap-add=ALL", "--cap-drop=NET_ADMIN", "busybox", "sh", "-c", "ip link set eth0 down && echo ok") - if err == nil { - c.Fatal(err, out) - } - if actual := strings.Trim(out, "\r\n"); actual == "ok" { - c.Fatalf("expected output not ok received %s", actual) - } -} - -func (s *DockerSuite) TestRunGroupAdd(c *check.C) { - // Not applicable for Windows as there is no concept of --group-add - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "--group-add=audio", "--group-add=staff", "--group-add=777", "busybox", "sh", "-c", "id") - - groupsList := "uid=0(root) gid=0(root) groups=10(wheel),29(audio),50(staff),777" - if actual := strings.Trim(out, "\r\n"); actual != groupsList { - c.Fatalf("expected output %s received %s", groupsList, actual) - } -} - -func (s *DockerSuite) TestRunPrivilegedCanMount(c *check.C) { - // Not applicable for Windows as there is no concept of --privileged - testRequires(c, DaemonIsLinux, NotUserNamespace) - out, _ := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") - - if actual := strings.Trim(out, "\r\n"); actual != "ok" { - c.Fatalf("expected output ok received %s", actual) - } -} - -func (s *DockerSuite) TestRunUnprivilegedCannotMount(c *check.C) { - // Not applicable for Windows as there is no concept of unprivileged - testRequires(c, DaemonIsLinux) - out, _, err := dockerCmdWithError("run", "busybox", "sh", "-c", "mount -t tmpfs none /tmp && echo ok") - - if err == nil { - c.Fatal(err, out) - } - if actual := strings.Trim(out, "\r\n"); actual == "ok" { - c.Fatalf("expected output not ok received %s", actual) - } -} - -func (s *DockerSuite) TestRunSysNotWritableInNonPrivilegedContainers(c *check.C) { - // Not applicable for Windows as there is no concept of unprivileged - testRequires(c, DaemonIsLinux, NotArm) - if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/sys/kernel/profiling"); err == nil || code == 0 { - c.Fatal("sys should not be writable in a non privileged container") - } -} - -func (s *DockerSuite) TestRunSysWritableInPrivilegedContainers(c *check.C) { - // Not applicable for Windows as there is no concept of unprivileged - testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) - if _, code, err := dockerCmdWithError("run", "--privileged", "busybox", "touch", "/sys/kernel/profiling"); err != nil || code != 0 { - c.Fatalf("sys should be writable in privileged container") - } -} - -func (s *DockerSuite) TestRunProcNotWritableInNonPrivilegedContainers(c *check.C) { - // Not applicable for Windows as there is no concept of unprivileged - testRequires(c, DaemonIsLinux) - if _, code, err := dockerCmdWithError("run", "busybox", "touch", "/proc/sysrq-trigger"); err == nil || code == 0 { - c.Fatal("proc should not be writable in a non privileged container") - } -} - -func (s *DockerSuite) TestRunProcWritableInPrivilegedContainers(c *check.C) { - // Not applicable for Windows as there is no concept of --privileged - testRequires(c, DaemonIsLinux, NotUserNamespace) - if _, code := dockerCmd(c, "run", "--privileged", "busybox", "sh", "-c", "touch /proc/sysrq-trigger"); code != 0 { - c.Fatalf("proc should be writable in privileged container") - } -} - -func (s *DockerSuite) TestRunDeviceNumbers(c *check.C) { - // Not applicable on Windows as /dev/ is a Unix specific concept - // TODO: NotUserNamespace could be removed here if "root" "root" is replaced w user - testRequires(c, DaemonIsLinux, NotUserNamespace) - out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "ls -l /dev/null") - deviceLineFields := strings.Fields(out) - deviceLineFields[6] = "" - deviceLineFields[7] = "" - deviceLineFields[8] = "" - expected := []string{"crw-rw-rw-", "1", "root", "root", "1,", "3", "", "", "", "/dev/null"} - - if !(reflect.DeepEqual(deviceLineFields, expected)) { - c.Fatalf("expected output\ncrw-rw-rw- 1 root root 1, 3 May 24 13:29 /dev/null\n received\n %s\n", out) - } -} - -func (s *DockerSuite) TestRunThatCharacterDevicesActLikeCharacterDevices(c *check.C) { - // Not applicable on Windows as /dev/ is a Unix specific concept - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "busybox", "sh", "-c", "dd if=/dev/zero of=/zero bs=1k count=5 2> /dev/null ; du -h /zero") - if actual := strings.Trim(out, "\r\n"); actual[0] == '0' { - c.Fatalf("expected a new file called /zero to be create that is greater than 0 bytes long, but du says: %s", actual) - } -} - -func (s *DockerSuite) TestRunUnprivilegedWithChroot(c *check.C) { - // Not applicable on Windows as it does not support chroot - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "busybox", "chroot", "/", "true") -} - -func (s *DockerSuite) TestRunAddingOptionalDevices(c *check.C) { - // Not applicable on Windows as Windows does not support --device - testRequires(c, DaemonIsLinux, NotUserNamespace) - out, _ := dockerCmd(c, "run", "--device", "/dev/zero:/dev/nulo", "busybox", "sh", "-c", "ls /dev/nulo") - if actual := strings.Trim(out, "\r\n"); actual != "/dev/nulo" { - c.Fatalf("expected output /dev/nulo, received %s", actual) - } -} - -func (s *DockerSuite) TestRunAddingOptionalDevicesNoSrc(c *check.C) { - // Not applicable on Windows as Windows does not support --device - testRequires(c, DaemonIsLinux, NotUserNamespace) - out, _ := dockerCmd(c, "run", "--device", "/dev/zero:rw", "busybox", "sh", "-c", "ls /dev/zero") - if actual := strings.Trim(out, "\r\n"); actual != "/dev/zero" { - c.Fatalf("expected output /dev/zero, received %s", actual) - } -} - -func (s *DockerSuite) TestRunAddingOptionalDevicesInvalidMode(c *check.C) { - // Not applicable on Windows as Windows does not support --device - testRequires(c, DaemonIsLinux, NotUserNamespace) - _, _, err := dockerCmdWithError("run", "--device", "/dev/zero:ro", "busybox", "sh", "-c", "ls /dev/zero") - if err == nil { - c.Fatalf("run container with device mode ro should fail") - } -} - -func (s *DockerSuite) TestRunModeHostname(c *check.C) { - // Not applicable on Windows as Windows does not support -h - testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) - - out, _ := dockerCmd(c, "run", "-h=testhostname", "busybox", "cat", "/etc/hostname") - - if actual := strings.Trim(out, "\r\n"); actual != "testhostname" { - c.Fatalf("expected 'testhostname', but says: %q", actual) - } - - out, _ = dockerCmd(c, "run", "--net=host", "busybox", "cat", "/etc/hostname") - - hostname, err := os.Hostname() - if err != nil { - c.Fatal(err) - } - if actual := strings.Trim(out, "\r\n"); actual != hostname { - c.Fatalf("expected %q, but says: %q", hostname, actual) - } -} - -func (s *DockerSuite) TestRunRootWorkdir(c *check.C) { - out, _ := dockerCmd(c, "run", "--workdir", "/", "busybox", "pwd") - expected := "/\n" - if daemonPlatform == "windows" { - expected = "C:" + expected - } - if out != expected { - c.Fatalf("pwd returned %q (expected %s)", s, expected) - } -} - -func (s *DockerSuite) TestRunAllowBindMountingRoot(c *check.C) { - if daemonPlatform == "windows" { - // Windows busybox will fail with Permission Denied on items such as pagefile.sys - dockerCmd(c, "run", "-v", `c:\:c:\host`, WindowsBaseImage, "cmd", "-c", "dir", `c:\host`) - } else { - dockerCmd(c, "run", "-v", "/:/host", "busybox", "ls", "/host") - } -} - -func (s *DockerSuite) TestRunDisallowBindMountingRootToRoot(c *check.C) { - mount := "/:/" - targetDir := "/host" - if daemonPlatform == "windows" { - mount = `c:\:c\` - targetDir = "c:/host" // Forward slash as using busybox - } - out, _, err := dockerCmdWithError("run", "-v", mount, "busybox", "ls", targetDir) - if err == nil { - c.Fatal(out, err) - } -} - -// Verify that a container gets default DNS when only localhost resolvers exist -func (s *DockerSuite) TestRunDnsDefaultOptions(c *check.C) { - // Not applicable on Windows as this is testing Unix specific functionality - testRequires(c, SameHostDaemon, DaemonIsLinux) - - // preserve original resolv.conf for restoring after test - origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") - if os.IsNotExist(err) { - c.Fatalf("/etc/resolv.conf does not exist") - } - // defer restored original conf - defer func() { - if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { - c.Fatal(err) - } - }() - - // test 3 cases: standard IPv4 localhost, commented out localhost, and IPv6 localhost - // 2 are removed from the file at container start, and the 3rd (commented out) one is ignored by - // GetNameservers(), leading to a replacement of nameservers with the default set - tmpResolvConf := []byte("nameserver 127.0.0.1\n#nameserver 127.0.2.1\nnameserver ::1") - if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { - c.Fatal(err) - } - - actual, _ := dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") - // check that the actual defaults are appended to the commented out - // localhost resolver (which should be preserved) - // NOTE: if we ever change the defaults from google dns, this will break - expected := "#nameserver 127.0.2.1\n\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" - if actual != expected { - c.Fatalf("expected resolv.conf be: %q, but was: %q", expected, actual) - } -} - -func (s *DockerSuite) TestRunDnsOptions(c *check.C) { - // Not applicable on Windows as Windows does not support --dns*, or - // the Unix-specific functionality of resolv.conf. - testRequires(c, DaemonIsLinux) - out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=mydomain", "--dns-opt=ndots:9", "busybox", "cat", "/etc/resolv.conf") - - // The client will get a warning on stderr when setting DNS to a localhost address; verify this: - if !strings.Contains(stderr, "Localhost DNS setting") { - c.Fatalf("Expected warning on stderr about localhost resolver, but got %q", stderr) - } - - actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) - if actual != "search mydomain nameserver 127.0.0.1 options ndots:9" { - c.Fatalf("expected 'search mydomain nameserver 127.0.0.1 options ndots:9', but says: %q", actual) - } - - out, stderr, _ = dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--dns-search=.", "--dns-opt=ndots:3", "busybox", "cat", "/etc/resolv.conf") - - actual = strings.Replace(strings.Trim(strings.Trim(out, "\r\n"), " "), "\n", " ", -1) - if actual != "nameserver 127.0.0.1 options ndots:3" { - c.Fatalf("expected 'nameserver 127.0.0.1 options ndots:3', but says: %q", actual) - } -} - -func (s *DockerSuite) TestRunDnsRepeatOptions(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=1.1.1.1", "--dns=2.2.2.2", "--dns-search=mydomain", "--dns-search=mydomain2", "--dns-opt=ndots:9", "--dns-opt=timeout:3", "busybox", "cat", "/etc/resolv.conf") - - actual := strings.Replace(strings.Trim(out, "\r\n"), "\n", " ", -1) - if actual != "search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3" { - c.Fatalf("expected 'search mydomain mydomain2 nameserver 1.1.1.1 nameserver 2.2.2.2 options ndots:9 timeout:3', but says: %q", actual) - } -} - -func (s *DockerSuite) TestRunDnsOptionsBasedOnHostResolvConf(c *check.C) { - // Not applicable on Windows as testing Unix specific functionality - testRequires(c, SameHostDaemon, DaemonIsLinux) - - origResolvConf, err := ioutil.ReadFile("/etc/resolv.conf") - if os.IsNotExist(err) { - c.Fatalf("/etc/resolv.conf does not exist") - } - - hostNameservers := resolvconf.GetNameservers(origResolvConf, types.IP) - hostSearch := resolvconf.GetSearchDomains(origResolvConf) - - var out string - out, _ = dockerCmd(c, "run", "--dns=127.0.0.1", "busybox", "cat", "/etc/resolv.conf") - - if actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "127.0.0.1" { - c.Fatalf("expected '127.0.0.1', but says: %q", string(actualNameservers[0])) - } - - actualSearch := resolvconf.GetSearchDomains([]byte(out)) - if len(actualSearch) != len(hostSearch) { - c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) - } - for i := range actualSearch { - if actualSearch[i] != hostSearch[i] { - c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) - } - } - - out, _ = dockerCmd(c, "run", "--dns-search=mydomain", "busybox", "cat", "/etc/resolv.conf") - - actualNameservers := resolvconf.GetNameservers([]byte(out), types.IP) - if len(actualNameservers) != len(hostNameservers) { - c.Fatalf("expected %q nameserver(s), but it has: %q", len(hostNameservers), len(actualNameservers)) - } - for i := range actualNameservers { - if actualNameservers[i] != hostNameservers[i] { - c.Fatalf("expected %q nameserver, but says: %q", actualNameservers[i], hostNameservers[i]) - } - } - - if actualSearch = resolvconf.GetSearchDomains([]byte(out)); string(actualSearch[0]) != "mydomain" { - c.Fatalf("expected 'mydomain', but says: %q", string(actualSearch[0])) - } - - // test with file - tmpResolvConf := []byte("search example.com\nnameserver 12.34.56.78\nnameserver 127.0.0.1") - if err := ioutil.WriteFile("/etc/resolv.conf", tmpResolvConf, 0644); err != nil { - c.Fatal(err) - } - // put the old resolvconf back - defer func() { - if err := ioutil.WriteFile("/etc/resolv.conf", origResolvConf, 0644); err != nil { - c.Fatal(err) - } - }() - - resolvConf, err := ioutil.ReadFile("/etc/resolv.conf") - if os.IsNotExist(err) { - c.Fatalf("/etc/resolv.conf does not exist") - } - - hostNameservers = resolvconf.GetNameservers(resolvConf, types.IP) - hostSearch = resolvconf.GetSearchDomains(resolvConf) - - out, _ = dockerCmd(c, "run", "busybox", "cat", "/etc/resolv.conf") - if actualNameservers = resolvconf.GetNameservers([]byte(out), types.IP); string(actualNameservers[0]) != "12.34.56.78" || len(actualNameservers) != 1 { - c.Fatalf("expected '12.34.56.78', but has: %v", actualNameservers) - } - - actualSearch = resolvconf.GetSearchDomains([]byte(out)) - if len(actualSearch) != len(hostSearch) { - c.Fatalf("expected %q search domain(s), but it has: %q", len(hostSearch), len(actualSearch)) - } - for i := range actualSearch { - if actualSearch[i] != hostSearch[i] { - c.Fatalf("expected %q domain, but says: %q", actualSearch[i], hostSearch[i]) - } - } -} - -// Test to see if a non-root user can resolve a DNS name. Also -// check if the container resolv.conf file has at least 0644 perm. -func (s *DockerSuite) TestRunNonRootUserResolvName(c *check.C) { - // Not applicable on Windows as Windows does not support --user - testRequires(c, SameHostDaemon, Network, DaemonIsLinux, NotArm) - - dockerCmd(c, "run", "--name=testperm", "--user=nobody", "busybox", "nslookup", "apt.dockerproject.org") - - cID, err := getIDByName("testperm") - if err != nil { - c.Fatal(err) - } - - fmode := (os.FileMode)(0644) - finfo, err := os.Stat(containerStorageFile(cID, "resolv.conf")) - if err != nil { - c.Fatal(err) - } - - if (finfo.Mode() & fmode) != fmode { - c.Fatalf("Expected container resolv.conf mode to be at least %s, instead got %s", fmode.String(), finfo.Mode().String()) - } -} - -// Test if container resolv.conf gets updated the next time it restarts -// if host /etc/resolv.conf has changed. This only applies if the container -// uses the host's /etc/resolv.conf and does not have any dns options provided. -func (s *DockerSuite) TestRunResolvconfUpdate(c *check.C) { - // Not applicable on Windows as testing unix specific functionality - testRequires(c, SameHostDaemon, DaemonIsLinux) - c.Skip("Unstable test, to be re-activated once #19937 is resolved") - - tmpResolvConf := []byte("search pommesfrites.fr\nnameserver 12.34.56.78\n") - tmpLocalhostResolvConf := []byte("nameserver 127.0.0.1") - - //take a copy of resolv.conf for restoring after test completes - resolvConfSystem, err := ioutil.ReadFile("/etc/resolv.conf") - if err != nil { - c.Fatal(err) - } - - // This test case is meant to test monitoring resolv.conf when it is - // a regular file not a bind mounc. So we unmount resolv.conf and replace - // it with a file containing the original settings. - mounted, err := mount.Mounted("/etc/resolv.conf") - if err != nil { - c.Fatal(err) - } - if mounted { - cmd := exec.Command("umount", "/etc/resolv.conf") - if _, err = runCommand(cmd); err != nil { - c.Fatal(err) - } - } - - //cleanup - defer func() { - if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { - c.Fatal(err) - } - }() - - //1. test that a restarting container gets an updated resolv.conf - dockerCmd(c, "run", "--name=first", "busybox", "true") - containerID1, err := getIDByName("first") - if err != nil { - c.Fatal(err) - } - - // replace resolv.conf with our temporary copy - bytesResolvConf := []byte(tmpResolvConf) - if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { - c.Fatal(err) - } - - // start the container again to pickup changes - dockerCmd(c, "start", "first") - - // check for update in container - containerResolv, err := readContainerFile(containerID1, "resolv.conf") - if err != nil { - c.Fatal(err) - } - if !bytes.Equal(containerResolv, bytesResolvConf) { - c.Fatalf("Restarted container does not have updated resolv.conf; expected %q, got %q", tmpResolvConf, string(containerResolv)) - } - - /* //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) - if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { - c.Fatal(err) - } */ - //2. test that a restarting container does not receive resolv.conf updates - // if it modified the container copy of the starting point resolv.conf - dockerCmd(c, "run", "--name=second", "busybox", "sh", "-c", "echo 'search mylittlepony.com' >>/etc/resolv.conf") - containerID2, err := getIDByName("second") - if err != nil { - c.Fatal(err) - } - - //make a change to resolv.conf (in this case replacing our tmp copy with orig copy) - if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { - c.Fatal(err) - } - - // start the container again - dockerCmd(c, "start", "second") - - // check for update in container - containerResolv, err = readContainerFile(containerID2, "resolv.conf") - if err != nil { - c.Fatal(err) - } - - if bytes.Equal(containerResolv, resolvConfSystem) { - c.Fatalf("Container's resolv.conf should not have been updated with host resolv.conf: %q", string(containerResolv)) - } - - //3. test that a running container's resolv.conf is not modified while running - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - runningContainerID := strings.TrimSpace(out) - - // replace resolv.conf - if err := ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { - c.Fatal(err) - } - - // check for update in container - containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") - if err != nil { - c.Fatal(err) - } - - if bytes.Equal(containerResolv, bytesResolvConf) { - c.Fatalf("Running container should not have updated resolv.conf; expected %q, got %q", string(resolvConfSystem), string(containerResolv)) - } - - //4. test that a running container's resolv.conf is updated upon restart - // (the above container is still running..) - dockerCmd(c, "restart", runningContainerID) - - // check for update in container - containerResolv, err = readContainerFile(runningContainerID, "resolv.conf") - if err != nil { - c.Fatal(err) - } - if !bytes.Equal(containerResolv, bytesResolvConf) { - c.Fatalf("Restarted container should have updated resolv.conf; expected %q, got %q", string(bytesResolvConf), string(containerResolv)) - } - - //5. test that additions of a localhost resolver are cleaned from - // host resolv.conf before updating container's resolv.conf copies - - // replace resolv.conf with a localhost-only nameserver copy - bytesResolvConf = []byte(tmpLocalhostResolvConf) - if err = ioutil.WriteFile("/etc/resolv.conf", bytesResolvConf, 0644); err != nil { - c.Fatal(err) - } - - // start the container again to pickup changes - dockerCmd(c, "start", "first") - - // our first exited container ID should have been updated, but with default DNS - // after the cleanup of resolv.conf found only a localhost nameserver: - containerResolv, err = readContainerFile(containerID1, "resolv.conf") - if err != nil { - c.Fatal(err) - } - - expected := "\nnameserver 8.8.8.8\nnameserver 8.8.4.4\n" - if !bytes.Equal(containerResolv, []byte(expected)) { - c.Fatalf("Container does not have cleaned/replaced DNS in resolv.conf; expected %q, got %q", expected, string(containerResolv)) - } - - //6. Test that replacing (as opposed to modifying) resolv.conf triggers an update - // of containers' resolv.conf. - - // Restore the original resolv.conf - if err := ioutil.WriteFile("/etc/resolv.conf", resolvConfSystem, 0644); err != nil { - c.Fatal(err) - } - - // Run the container so it picks up the old settings - dockerCmd(c, "run", "--name=third", "busybox", "true") - containerID3, err := getIDByName("third") - if err != nil { - c.Fatal(err) - } - - // Create a modified resolv.conf.aside and override resolv.conf with it - bytesResolvConf = []byte(tmpResolvConf) - if err := ioutil.WriteFile("/etc/resolv.conf.aside", bytesResolvConf, 0644); err != nil { - c.Fatal(err) - } - - err = os.Rename("/etc/resolv.conf.aside", "/etc/resolv.conf") - if err != nil { - c.Fatal(err) - } - - // start the container again to pickup changes - dockerCmd(c, "start", "third") - - // check for update in container - containerResolv, err = readContainerFile(containerID3, "resolv.conf") - if err != nil { - c.Fatal(err) - } - if !bytes.Equal(containerResolv, bytesResolvConf) { - c.Fatalf("Stopped container does not have updated resolv.conf; expected\n%q\n got\n%q", tmpResolvConf, string(containerResolv)) - } - - //cleanup, restore original resolv.conf happens in defer func() -} - -func (s *DockerSuite) TestRunAddHost(c *check.C) { - // Not applicable on Windows as it does not support --add-host - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "--add-host=extra:86.75.30.9", "busybox", "grep", "extra", "/etc/hosts") - - actual := strings.Trim(out, "\r\n") - if actual != "86.75.30.9\textra" { - c.Fatalf("expected '86.75.30.9\textra', but says: %q", actual) - } -} - -// Regression test for #6983 -func (s *DockerSuite) TestRunAttachStdErrOnlyTTYMode(c *check.C) { - _, exitCode := dockerCmd(c, "run", "-t", "-a", "stderr", "busybox", "true") - if exitCode != 0 { - c.Fatalf("Container should have exited with error code 0") - } -} - -// Regression test for #6983 -func (s *DockerSuite) TestRunAttachStdOutOnlyTTYMode(c *check.C) { - _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "busybox", "true") - if exitCode != 0 { - c.Fatalf("Container should have exited with error code 0") - } -} - -// Regression test for #6983 -func (s *DockerSuite) TestRunAttachStdOutAndErrTTYMode(c *check.C) { - _, exitCode := dockerCmd(c, "run", "-t", "-a", "stdout", "-a", "stderr", "busybox", "true") - if exitCode != 0 { - c.Fatalf("Container should have exited with error code 0") - } -} - -// Test for #10388 - this will run the same test as TestRunAttachStdOutAndErrTTYMode -// but using --attach instead of -a to make sure we read the flag correctly -func (s *DockerSuite) TestRunAttachWithDetach(c *check.C) { - cmd := exec.Command(dockerBinary, "run", "-d", "--attach", "stdout", "busybox", "true") - _, stderr, _, err := runCommandWithStdoutStderr(cmd) - if err == nil { - c.Fatal("Container should have exited with error code different than 0") - } else if !strings.Contains(stderr, "Conflicting options: -a and -d") { - c.Fatal("Should have been returned an error with conflicting options -a and -d") - } -} - -func (s *DockerSuite) TestRunState(c *check.C) { - // TODO Windows: This needs some rework as Windows busybox does not support top - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - - id := strings.TrimSpace(out) - state := inspectField(c, id, "State.Running") - if state != "true" { - c.Fatal("Container state is 'not running'") - } - pid1 := inspectField(c, id, "State.Pid") - if pid1 == "0" { - c.Fatal("Container state Pid 0") - } - - dockerCmd(c, "stop", id) - state = inspectField(c, id, "State.Running") - if state != "false" { - c.Fatal("Container state is 'running'") - } - pid2 := inspectField(c, id, "State.Pid") - if pid2 == pid1 { - c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) - } - - dockerCmd(c, "start", id) - state = inspectField(c, id, "State.Running") - if state != "true" { - c.Fatal("Container state is 'not running'") - } - pid3 := inspectField(c, id, "State.Pid") - if pid3 == pid1 { - c.Fatalf("Container state Pid %s, but expected %s", pid2, pid1) - } -} - -// Test for #1737 -func (s *DockerSuite) TestRunCopyVolumeUidGid(c *check.C) { - // Not applicable on Windows as it does not support uid or gid in this way - testRequires(c, DaemonIsLinux) - name := "testrunvolumesuidgid" - _, err := buildImage(name, - `FROM busybox - RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd - RUN echo 'dockerio:x:1001:' >> /etc/group - RUN mkdir -p /hello && touch /hello/test && chown dockerio.dockerio /hello`, - true) - if err != nil { - c.Fatal(err) - } - - // Test that the uid and gid is copied from the image to the volume - out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "sh", "-c", "ls -l / | grep hello | awk '{print $3\":\"$4}'") - out = strings.TrimSpace(out) - if out != "dockerio:dockerio" { - c.Fatalf("Wrong /hello ownership: %s, expected dockerio:dockerio", out) - } -} - -// Test for #1582 -func (s *DockerSuite) TestRunCopyVolumeContent(c *check.C) { - // TODO Windows, post TP5. Windows does not yet support volume functionality - // that copies from the image to the volume. - testRequires(c, DaemonIsLinux) - name := "testruncopyvolumecontent" - _, err := buildImage(name, - `FROM busybox - RUN mkdir -p /hello/local && echo hello > /hello/local/world`, - true) - if err != nil { - c.Fatal(err) - } - - // Test that the content is copied from the image to the volume - out, _ := dockerCmd(c, "run", "--rm", "-v", "/hello", name, "find", "/hello") - if !(strings.Contains(out, "/hello/local/world") && strings.Contains(out, "/hello/local")) { - c.Fatal("Container failed to transfer content to volume") - } -} - -func (s *DockerSuite) TestRunCleanupCmdOnEntrypoint(c *check.C) { - name := "testrunmdcleanuponentrypoint" - if _, err := buildImage(name, - `FROM busybox - ENTRYPOINT ["echo"] - CMD ["testingpoint"]`, - true); err != nil { - c.Fatal(err) - } - - out, exit := dockerCmd(c, "run", "--entrypoint", "whoami", name) - if exit != 0 { - c.Fatalf("expected exit code 0 received %d, out: %q", exit, out) - } - out = strings.TrimSpace(out) - expected := "root" - if daemonPlatform == "windows" { - expected = `user manager\containeradministrator` - } - if out != expected { - c.Fatalf("Expected output %s, got %q", expected, out) - } -} - -// TestRunWorkdirExistsAndIsFile checks that if 'docker run -w' with existing file can be detected -func (s *DockerSuite) TestRunWorkdirExistsAndIsFile(c *check.C) { - existingFile := "/bin/cat" - expected := "not a directory" - if daemonPlatform == "windows" { - existingFile = `\windows\system32\ntdll.dll` - expected = `Cannot mkdir: \windows\system32\ntdll.dll is not a directory.` - } - - out, exitCode, err := dockerCmdWithError("run", "-w", existingFile, "busybox") - if !(err != nil && exitCode == 125 && strings.Contains(out, expected)) { - c.Fatalf("Existing binary as a directory should error out with exitCode 125; we got: %s, exitCode: %d", out, exitCode) - } -} - -func (s *DockerSuite) TestRunExitOnStdinClose(c *check.C) { - name := "testrunexitonstdinclose" - - meow := "/bin/cat" - delay := 60 - if daemonPlatform == "windows" { - meow = "cat" - } - runCmd := exec.Command(dockerBinary, "run", "--name", name, "-i", "busybox", meow) - - stdin, err := runCmd.StdinPipe() - if err != nil { - c.Fatal(err) - } - stdout, err := runCmd.StdoutPipe() - if err != nil { - c.Fatal(err) - } - - if err := runCmd.Start(); err != nil { - c.Fatal(err) - } - if _, err := stdin.Write([]byte("hello\n")); err != nil { - c.Fatal(err) - } - - r := bufio.NewReader(stdout) - line, err := r.ReadString('\n') - if err != nil { - c.Fatal(err) - } - line = strings.TrimSpace(line) - if line != "hello" { - c.Fatalf("Output should be 'hello', got '%q'", line) - } - if err := stdin.Close(); err != nil { - c.Fatal(err) - } - finish := make(chan error) - go func() { - finish <- runCmd.Wait() - close(finish) - }() - select { - case err := <-finish: - c.Assert(err, check.IsNil) - case <-time.After(time.Duration(delay) * time.Second): - c.Fatal("docker run failed to exit on stdin close") - } - state := inspectField(c, name, "State.Running") - - if state != "false" { - c.Fatal("Container must be stopped after stdin closing") - } -} - -// Test run -i --restart xxx doesn't hang -func (s *DockerSuite) TestRunInteractiveWithRestartPolicy(c *check.C) { - name := "test-inter-restart" - runCmd := exec.Command(dockerBinary, "run", "-i", "--name", name, "--restart=always", "busybox", "sh") - - stdin, err := runCmd.StdinPipe() - c.Assert(err, checker.IsNil) - - err = runCmd.Start() - c.Assert(err, checker.IsNil) - c.Assert(waitRun(name), check.IsNil) - - _, err = stdin.Write([]byte("exit 11\n")) - c.Assert(err, checker.IsNil) - - finish := make(chan error) - go func() { - finish <- runCmd.Wait() - close(finish) - }() - delay := 10 * time.Second - select { - case <-finish: - case <-time.After(delay): - c.Fatal("run -i --restart hangs") - } - - c.Assert(waitRun(name), check.IsNil) - dockerCmd(c, "stop", name) -} - -// Test for #2267 -func (s *DockerSuite) TestRunWriteHostsFileAndNotCommit(c *check.C) { - // Cannot run on Windows as Windows does not support diff. - testRequires(c, DaemonIsLinux) - name := "writehosts" - out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hosts && cat /etc/hosts") - if !strings.Contains(out, "test2267") { - c.Fatal("/etc/hosts should contain 'test2267'") - } - - out, _ = dockerCmd(c, "diff", name) - if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { - c.Fatal("diff should be empty") - } -} - -func eqToBaseDiff(out string, c *check.C) bool { - name := "eqToBaseDiff" + stringutils.GenerateRandomAlphaOnlyString(32) - dockerCmd(c, "run", "--name", name, "busybox", "echo", "hello") - cID, err := getIDByName(name) - c.Assert(err, check.IsNil) - - baseDiff, _ := dockerCmd(c, "diff", cID) - baseArr := strings.Split(baseDiff, "\n") - sort.Strings(baseArr) - outArr := strings.Split(out, "\n") - sort.Strings(outArr) - return sliceEq(baseArr, outArr) -} - -func sliceEq(a, b []string) bool { - if len(a) != len(b) { - return false - } - - for i := range a { - if a[i] != b[i] { - return false - } - } - - return true -} - -// Test for #2267 -func (s *DockerSuite) TestRunWriteHostnameFileAndNotCommit(c *check.C) { - // Cannot run on Windows as Windows does not support diff. - testRequires(c, DaemonIsLinux) - name := "writehostname" - out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/hostname && cat /etc/hostname") - if !strings.Contains(out, "test2267") { - c.Fatal("/etc/hostname should contain 'test2267'") - } - - out, _ = dockerCmd(c, "diff", name) - if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { - c.Fatal("diff should be empty") - } -} - -// Test for #2267 -func (s *DockerSuite) TestRunWriteResolvFileAndNotCommit(c *check.C) { - // Cannot run on Windows as Windows does not support diff. - testRequires(c, DaemonIsLinux) - name := "writeresolv" - out, _ := dockerCmd(c, "run", "--name", name, "busybox", "sh", "-c", "echo test2267 >> /etc/resolv.conf && cat /etc/resolv.conf") - if !strings.Contains(out, "test2267") { - c.Fatal("/etc/resolv.conf should contain 'test2267'") - } - - out, _ = dockerCmd(c, "diff", name) - if len(strings.Trim(out, "\r\n")) != 0 && !eqToBaseDiff(out, c) { - c.Fatal("diff should be empty") - } -} - -func (s *DockerSuite) TestRunWithBadDevice(c *check.C) { - // Cannot run on Windows as Windows does not support --device - testRequires(c, DaemonIsLinux) - name := "baddevice" - out, _, err := dockerCmdWithError("run", "--name", name, "--device", "/etc", "busybox", "true") - - if err == nil { - c.Fatal("Run should fail with bad device") - } - expected := `"/etc": not a device node` - if !strings.Contains(out, expected) { - c.Fatalf("Output should contain %q, actual out: %q", expected, out) - } -} - -func (s *DockerSuite) TestRunEntrypoint(c *check.C) { - name := "entrypoint" - - out, _ := dockerCmd(c, "run", "--name", name, "--entrypoint", "echo", "busybox", "-n", "foobar") - expected := "foobar" - - if out != expected { - c.Fatalf("Output should be %q, actual out: %q", expected, out) - } -} - -func (s *DockerSuite) TestRunBindMounts(c *check.C) { - testRequires(c, SameHostDaemon) - if daemonPlatform == "linux" { - testRequires(c, DaemonIsLinux, NotUserNamespace) - } - - prefix, _ := getPrefixAndSlashFromDaemonPlatform() - - tmpDir, err := ioutil.TempDir("", "docker-test-container") - if err != nil { - c.Fatal(err) - } - - defer os.RemoveAll(tmpDir) - writeFile(path.Join(tmpDir, "touch-me"), "", c) - - // TODO Windows: Temporary check - remove once TP5 support is dropped - if daemonPlatform != "windows" || windowsDaemonKV >= 14350 { - // Test reading from a read-only bind mount - out, _ := dockerCmd(c, "run", "-v", fmt.Sprintf("%s:%s/tmp:ro", tmpDir, prefix), "busybox", "ls", prefix+"/tmp") - if !strings.Contains(out, "touch-me") { - c.Fatal("Container failed to read from bind mount") - } - } - - // test writing to bind mount - if daemonPlatform == "windows" { - dockerCmd(c, "run", "-v", fmt.Sprintf(`%s:c:\tmp:rw`, tmpDir), "busybox", "touch", "c:/tmp/holla") - } else { - dockerCmd(c, "run", "-v", fmt.Sprintf("%s:/tmp:rw", tmpDir), "busybox", "touch", "/tmp/holla") - } - - readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist - - // test mounting to an illegal destination directory - _, _, err = dockerCmdWithError("run", "-v", fmt.Sprintf("%s:.", tmpDir), "busybox", "ls", ".") - if err == nil { - c.Fatal("Container bind mounted illegal directory") - } - - // Windows does not (and likely never will) support mounting a single file - if daemonPlatform != "windows" { - // test mount a file - dockerCmd(c, "run", "-v", fmt.Sprintf("%s/holla:/tmp/holla:rw", tmpDir), "busybox", "sh", "-c", "echo -n 'yotta' > /tmp/holla") - content := readFile(path.Join(tmpDir, "holla"), c) // Will fail if the file doesn't exist - expected := "yotta" - if content != expected { - c.Fatalf("Output should be %q, actual out: %q", expected, content) - } - } -} - -// Ensure that CIDFile gets deleted if it's empty -// Perform this test by making `docker run` fail -func (s *DockerSuite) TestRunCidFileCleanupIfEmpty(c *check.C) { - // Windows Server 2016 RS1 builds load the windowsservercore image from a tar rather than - // a .WIM file, and the tar layer has the default CMD set (same as the Linux ubuntu image), - // where-as the TP5 .WIM had a blank CMD. Hence this test is not applicable on RS1 or later - // builds as the command won't fail as it's not blank - if daemonPlatform == "windows" && windowsDaemonKV >= 14375 { - c.Skip("Not applicable on Windows RS1 or later builds") - } - - tmpDir, err := ioutil.TempDir("", "TestRunCidFile") - if err != nil { - c.Fatal(err) - } - defer os.RemoveAll(tmpDir) - tmpCidFile := path.Join(tmpDir, "cid") - - image := "emptyfs" - if daemonPlatform == "windows" { - // Windows can't support an emptyfs image. Just use the regular Windows image - image = WindowsBaseImage - } - out, _, err := dockerCmdWithError("run", "--cidfile", tmpCidFile, image) - if err == nil { - c.Fatalf("Run without command must fail. out=%s", out) - } else if !strings.Contains(out, "No command specified") { - c.Fatalf("Run without command failed with wrong output. out=%s\nerr=%v", out, err) - } - - if _, err := os.Stat(tmpCidFile); err == nil { - c.Fatalf("empty CIDFile %q should've been deleted", tmpCidFile) - } -} - -// #2098 - Docker cidFiles only contain short version of the containerId -//sudo docker run --cidfile /tmp/docker_tesc.cid ubuntu echo "test" -// TestRunCidFile tests that run --cidfile returns the longid -func (s *DockerSuite) TestRunCidFileCheckIDLength(c *check.C) { - tmpDir, err := ioutil.TempDir("", "TestRunCidFile") - if err != nil { - c.Fatal(err) - } - tmpCidFile := path.Join(tmpDir, "cid") - defer os.RemoveAll(tmpDir) - - out, _ := dockerCmd(c, "run", "-d", "--cidfile", tmpCidFile, "busybox", "true") - - id := strings.TrimSpace(out) - buffer, err := ioutil.ReadFile(tmpCidFile) - if err != nil { - c.Fatal(err) - } - cid := string(buffer) - if len(cid) != 64 { - c.Fatalf("--cidfile should be a long id, not %q", id) - } - if cid != id { - c.Fatalf("cid must be equal to %s, got %s", id, cid) - } -} - -func (s *DockerSuite) TestRunSetMacAddress(c *check.C) { - mac := "12:34:56:78:9a:bc" - var out string - if daemonPlatform == "windows" { - out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "sh", "-c", "ipconfig /all | grep 'Physical Address' | awk '{print $12}'") - mac = strings.Replace(strings.ToUpper(mac), ":", "-", -1) // To Windows-style MACs - } else { - out, _ = dockerCmd(c, "run", "-i", "--rm", fmt.Sprintf("--mac-address=%s", mac), "busybox", "/bin/sh", "-c", "ip link show eth0 | tail -1 | awk '{print $2}'") - } - - actualMac := strings.TrimSpace(out) - if actualMac != mac { - c.Fatalf("Set MAC address with --mac-address failed. The container has an incorrect MAC address: %q, expected: %q", actualMac, mac) - } -} - -func (s *DockerSuite) TestRunInspectMacAddress(c *check.C) { - // TODO Windows. Network settings are not propagated back to inspect. - testRequires(c, DaemonIsLinux) - mac := "12:34:56:78:9a:bc" - out, _ := dockerCmd(c, "run", "-d", "--mac-address="+mac, "busybox", "top") - - id := strings.TrimSpace(out) - inspectedMac := inspectField(c, id, "NetworkSettings.Networks.bridge.MacAddress") - if inspectedMac != mac { - c.Fatalf("docker inspect outputs wrong MAC address: %q, should be: %q", inspectedMac, mac) - } -} - -// test docker run use an invalid mac address -func (s *DockerSuite) TestRunWithInvalidMacAddress(c *check.C) { - out, _, err := dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29", "busybox") - //use an invalid mac address should with an error out - if err == nil || !strings.Contains(out, "is not a valid mac address") { - c.Fatalf("run with an invalid --mac-address should with error out") - } -} - -func (s *DockerSuite) TestRunDeallocatePortOnMissingIptablesRule(c *check.C) { - // TODO Windows. Network settings are not propagated back to inspect. - testRequires(c, SameHostDaemon, DaemonIsLinux) - - out, _ := dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") - - id := strings.TrimSpace(out) - ip := inspectField(c, id, "NetworkSettings.Networks.bridge.IPAddress") - iptCmd := exec.Command("iptables", "-D", "DOCKER", "-d", fmt.Sprintf("%s/32", ip), - "!", "-i", "docker0", "-o", "docker0", "-p", "tcp", "-m", "tcp", "--dport", "23", "-j", "ACCEPT") - out, _, err := runCommandWithOutput(iptCmd) - if err != nil { - c.Fatal(err, out) - } - if err := deleteContainer(id); err != nil { - c.Fatal(err) - } - - dockerCmd(c, "run", "-d", "-p", "23:23", "busybox", "top") -} - -func (s *DockerSuite) TestRunPortInUse(c *check.C) { - // TODO Windows. The duplicate NAT message returned by Windows will be - // changing as is currently completely undecipherable. Does need modifying - // to run sh rather than top though as top isn't in Windows busybox. - testRequires(c, SameHostDaemon, DaemonIsLinux) - - port := "1234" - dockerCmd(c, "run", "-d", "-p", port+":80", "busybox", "top") - - out, _, err := dockerCmdWithError("run", "-d", "-p", port+":80", "busybox", "top") - if err == nil { - c.Fatalf("Binding on used port must fail") - } - if !strings.Contains(out, "port is already allocated") { - c.Fatalf("Out must be about \"port is already allocated\", got %s", out) - } -} - -// https://github.com/docker/docker/issues/12148 -func (s *DockerSuite) TestRunAllocatePortInReservedRange(c *check.C) { - // TODO Windows. -P is not yet supported - testRequires(c, DaemonIsLinux) - // allocate a dynamic port to get the most recent - out, _ := dockerCmd(c, "run", "-d", "-P", "-p", "80", "busybox", "top") - - id := strings.TrimSpace(out) - out, _ = dockerCmd(c, "port", id, "80") - - strPort := strings.Split(strings.TrimSpace(out), ":")[1] - port, err := strconv.ParseInt(strPort, 10, 64) - if err != nil { - c.Fatalf("invalid port, got: %s, error: %s", strPort, err) - } - - // allocate a static port and a dynamic port together, with static port - // takes the next recent port in dynamic port range. - dockerCmd(c, "run", "-d", "-P", "-p", "80", "-p", fmt.Sprintf("%d:8080", port+1), "busybox", "top") -} - -// Regression test for #7792 -func (s *DockerSuite) TestRunMountOrdering(c *check.C) { - // TODO Windows: Post TP5. Updated, but Windows does not support nested mounts currently. - testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) - prefix, _ := getPrefixAndSlashFromDaemonPlatform() - - tmpDir, err := ioutil.TempDir("", "docker_nested_mount_test") - if err != nil { - c.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - tmpDir2, err := ioutil.TempDir("", "docker_nested_mount_test2") - if err != nil { - c.Fatal(err) - } - defer os.RemoveAll(tmpDir2) - - // Create a temporary tmpfs mounc. - fooDir := filepath.Join(tmpDir, "foo") - if err := os.MkdirAll(filepath.Join(tmpDir, "foo"), 0755); err != nil { - c.Fatalf("failed to mkdir at %s - %s", fooDir, err) - } - - if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", fooDir), []byte{}, 0644); err != nil { - c.Fatal(err) - } - - if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir), []byte{}, 0644); err != nil { - c.Fatal(err) - } - - if err := ioutil.WriteFile(fmt.Sprintf("%s/touch-me", tmpDir2), []byte{}, 0644); err != nil { - c.Fatal(err) - } - - dockerCmd(c, "run", - "-v", fmt.Sprintf("%s:"+prefix+"/tmp", tmpDir), - "-v", fmt.Sprintf("%s:"+prefix+"/tmp/foo", fooDir), - "-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2", tmpDir2), - "-v", fmt.Sprintf("%s:"+prefix+"/tmp/tmp2/foo", fooDir), - "busybox:latest", "sh", "-c", - "ls "+prefix+"/tmp/touch-me && ls "+prefix+"/tmp/foo/touch-me && ls "+prefix+"/tmp/tmp2/touch-me && ls "+prefix+"/tmp/tmp2/foo/touch-me") -} - -// Regression test for https://github.com/docker/docker/issues/8259 -func (s *DockerSuite) TestRunReuseBindVolumeThatIsSymlink(c *check.C) { - // Not applicable on Windows as Windows does not support volumes - testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) - prefix, _ := getPrefixAndSlashFromDaemonPlatform() - - tmpDir, err := ioutil.TempDir(os.TempDir(), "testlink") - if err != nil { - c.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - linkPath := os.TempDir() + "/testlink2" - if err := os.Symlink(tmpDir, linkPath); err != nil { - c.Fatal(err) - } - defer os.RemoveAll(linkPath) - - // Create first container - dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test") - - // Create second container with same symlinked path - // This will fail if the referenced issue is hit with a "Volume exists" error - dockerCmd(c, "run", "-v", fmt.Sprintf("%s:"+prefix+"/tmp/test", linkPath), "busybox", "ls", prefix+"/tmp/test") -} - -//GH#10604: Test an "/etc" volume doesn't overlay special bind mounts in container -func (s *DockerSuite) TestRunCreateVolumeEtc(c *check.C) { - // While Windows supports volumes, it does not support --add-host hence - // this test is not applicable on Windows. - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "--dns=127.0.0.1", "-v", "/etc", "busybox", "cat", "/etc/resolv.conf") - if !strings.Contains(out, "nameserver 127.0.0.1") { - c.Fatal("/etc volume mount hides /etc/resolv.conf") - } - - out, _ = dockerCmd(c, "run", "-h=test123", "-v", "/etc", "busybox", "cat", "/etc/hostname") - if !strings.Contains(out, "test123") { - c.Fatal("/etc volume mount hides /etc/hostname") - } - - out, _ = dockerCmd(c, "run", "--add-host=test:192.168.0.1", "-v", "/etc", "busybox", "cat", "/etc/hosts") - out = strings.Replace(out, "\n", " ", -1) - if !strings.Contains(out, "192.168.0.1\ttest") || !strings.Contains(out, "127.0.0.1\tlocalhost") { - c.Fatal("/etc volume mount hides /etc/hosts") - } -} - -func (s *DockerSuite) TestVolumesNoCopyData(c *check.C) { - // TODO Windows (Post TP5). Windows does not support volumes which - // are pre-populated such as is built in the dockerfile used in this test. - testRequires(c, DaemonIsLinux) - if _, err := buildImage("dataimage", - `FROM busybox - RUN mkdir -p /foo - RUN touch /foo/bar`, - true); err != nil { - c.Fatal(err) - } - - dockerCmd(c, "run", "--name", "test", "-v", "/foo", "busybox") - - if out, _, err := dockerCmdWithError("run", "--volumes-from", "test", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { - c.Fatalf("Data was copied on volumes-from but shouldn't be:\n%q", out) - } - - tmpDir := randomTmpDirPath("docker_test_bind_mount_copy_data", daemonPlatform) - if out, _, err := dockerCmdWithError("run", "-v", tmpDir+":/foo", "dataimage", "ls", "-lh", "/foo/bar"); err == nil || !strings.Contains(out, "No such file or directory") { - c.Fatalf("Data was copied on bind-mount but shouldn't be:\n%q", out) - } -} - -func (s *DockerSuite) TestRunNoOutputFromPullInStdout(c *check.C) { - // just run with unknown image - cmd := exec.Command(dockerBinary, "run", "asdfsg") - stdout := bytes.NewBuffer(nil) - cmd.Stdout = stdout - if err := cmd.Run(); err == nil { - c.Fatal("Run with unknown image should fail") - } - if stdout.Len() != 0 { - c.Fatalf("Stdout contains output from pull: %s", stdout) - } -} - -func (s *DockerSuite) TestRunVolumesCleanPaths(c *check.C) { - testRequires(c, SameHostDaemon) - prefix, slash := getPrefixAndSlashFromDaemonPlatform() - if _, err := buildImage("run_volumes_clean_paths", - `FROM busybox - VOLUME `+prefix+`/foo/`, - true); err != nil { - c.Fatal(err) - } - - dockerCmd(c, "run", "-v", prefix+"/foo", "-v", prefix+"/bar/", "--name", "dark_helmet", "run_volumes_clean_paths") - - out, err := inspectMountSourceField("dark_helmet", prefix+slash+"foo"+slash) - if err != errMountNotFound { - c.Fatalf("Found unexpected volume entry for '%s/foo/' in volumes\n%q", prefix, out) - } - - out, err = inspectMountSourceField("dark_helmet", prefix+slash+`foo`) - c.Assert(err, check.IsNil) - if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) { - c.Fatalf("Volume was not defined for %s/foo\n%q", prefix, out) - } - - out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar"+slash) - if err != errMountNotFound { - c.Fatalf("Found unexpected volume entry for '%s/bar/' in volumes\n%q", prefix, out) - } - - out, err = inspectMountSourceField("dark_helmet", prefix+slash+"bar") - c.Assert(err, check.IsNil) - if !strings.Contains(strings.ToLower(out), strings.ToLower(volumesConfigPath)) { - c.Fatalf("Volume was not defined for %s/bar\n%q", prefix, out) - } -} - -// Regression test for #3631 -func (s *DockerSuite) TestRunSlowStdoutConsumer(c *check.C) { - // TODO Windows: This should be able to run on Windows if can find an - // alternate to /dev/zero and /dev/stdout. - testRequires(c, DaemonIsLinux) - cont := exec.Command(dockerBinary, "run", "--rm", "busybox", "/bin/sh", "-c", "dd if=/dev/zero of=/dev/stdout bs=1024 count=2000 | catv") - - stdout, err := cont.StdoutPipe() - if err != nil { - c.Fatal(err) - } - - if err := cont.Start(); err != nil { - c.Fatal(err) - } - n, err := consumeWithSpeed(stdout, 10000, 5*time.Millisecond, nil) - if err != nil { - c.Fatal(err) - } - - expected := 2 * 1024 * 2000 - if n != expected { - c.Fatalf("Expected %d, got %d", expected, n) - } -} - -func (s *DockerSuite) TestRunAllowPortRangeThroughExpose(c *check.C) { - // TODO Windows: -P is not currently supported. Also network - // settings are not propagated back. - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-P", "busybox", "top") - - id := strings.TrimSpace(out) - portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports") - var ports nat.PortMap - if err := unmarshalJSON([]byte(portstr), &ports); err != nil { - c.Fatal(err) - } - for port, binding := range ports { - portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) - if portnum < 3000 || portnum > 3003 { - c.Fatalf("Port %d is out of range ", portnum) - } - if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { - c.Fatalf("Port is not mapped for the port %s", port) - } - } -} - -func (s *DockerSuite) TestRunExposePort(c *check.C) { - out, _, err := dockerCmdWithError("run", "--expose", "80000", "busybox") - c.Assert(err, checker.NotNil, check.Commentf("--expose with an invalid port should error out")) - c.Assert(out, checker.Contains, "invalid range format for --expose") -} - -func (s *DockerSuite) TestRunUnknownCommand(c *check.C) { - out, _, _ := dockerCmdWithStdoutStderr(c, "create", "busybox", "/bin/nada") - - cID := strings.TrimSpace(out) - _, _, err := dockerCmdWithError("start", cID) - - // Windows and Linux are different here by architectural design. Linux will - // fail to start the container, so an error is expected. Windows will - // successfully start the container, and once started attempt to execute - // the command which will fail. - if daemonPlatform == "windows" { - // Wait for it to exit. - waitExited(cID, 30*time.Second) - c.Assert(err, check.IsNil) - } else { - c.Assert(err, check.NotNil) - } - - rc := inspectField(c, cID, "State.ExitCode") - if rc == "0" { - c.Fatalf("ExitCode(%v) cannot be 0", rc) - } -} - -func (s *DockerSuite) TestRunModeIpcHost(c *check.C) { - // Not applicable on Windows as uses Unix-specific capabilities - testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) - - hostIpc, err := os.Readlink("/proc/1/ns/ipc") - if err != nil { - c.Fatal(err) - } - - out, _ := dockerCmd(c, "run", "--ipc=host", "busybox", "readlink", "/proc/self/ns/ipc") - out = strings.Trim(out, "\n") - if hostIpc != out { - c.Fatalf("IPC different with --ipc=host %s != %s\n", hostIpc, out) - } - - out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/ipc") - out = strings.Trim(out, "\n") - if hostIpc == out { - c.Fatalf("IPC should be different without --ipc=host %s == %s\n", hostIpc, out) - } -} - -func (s *DockerSuite) TestRunModeIpcContainer(c *check.C) { - // Not applicable on Windows as uses Unix-specific capabilities - testRequires(c, SameHostDaemon, DaemonIsLinux) - - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") - - id := strings.TrimSpace(out) - state := inspectField(c, id, "State.Running") - if state != "true" { - c.Fatal("Container state is 'not running'") - } - pid1 := inspectField(c, id, "State.Pid") - - parentContainerIpc, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/ipc", pid1)) - if err != nil { - c.Fatal(err) - } - - out, _ = dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "readlink", "/proc/self/ns/ipc") - out = strings.Trim(out, "\n") - if parentContainerIpc != out { - c.Fatalf("IPC different with --ipc=container:%s %s != %s\n", id, parentContainerIpc, out) - } - - catOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "cat", "/dev/shm/test") - if catOutput != "test" { - c.Fatalf("Output of /dev/shm/test expected test but found: %s", catOutput) - } - - // check that /dev/mqueue is actually of mqueue type - grepOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "grep", "/dev/mqueue", "/proc/mounts") - if !strings.HasPrefix(grepOutput, "mqueue /dev/mqueue mqueue rw") { - c.Fatalf("Output of 'grep /proc/mounts' expected 'mqueue /dev/mqueue mqueue rw' but found: %s", grepOutput) - } - - lsOutput, _ := dockerCmd(c, "run", fmt.Sprintf("--ipc=container:%s", id), "busybox", "ls", "/dev/mqueue") - lsOutput = strings.Trim(lsOutput, "\n") - if lsOutput != "toto" { - c.Fatalf("Output of 'ls /dev/mqueue' expected 'toto' but found: %s", lsOutput) - } -} - -func (s *DockerSuite) TestRunModeIpcContainerNotExists(c *check.C) { - // Not applicable on Windows as uses Unix-specific capabilities - testRequires(c, DaemonIsLinux) - out, _, err := dockerCmdWithError("run", "-d", "--ipc", "container:abcd1234", "busybox", "top") - if !strings.Contains(out, "abcd1234") || err == nil { - c.Fatalf("run IPC from a non exists container should with correct error out") - } -} - -func (s *DockerSuite) TestRunModeIpcContainerNotRunning(c *check.C) { - // Not applicable on Windows as uses Unix-specific capabilities - testRequires(c, SameHostDaemon, DaemonIsLinux) - - out, _ := dockerCmd(c, "create", "busybox") - - id := strings.TrimSpace(out) - out, _, err := dockerCmdWithError("run", fmt.Sprintf("--ipc=container:%s", id), "busybox") - if err == nil { - c.Fatalf("Run container with ipc mode container should fail with non running container: %s\n%s", out, err) - } -} - -func (s *DockerSuite) TestRunModePidContainer(c *check.C) { - // Not applicable on Windows as uses Unix-specific capabilities - testRequires(c, SameHostDaemon, DaemonIsLinux) - - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "top") - - id := strings.TrimSpace(out) - state := inspectField(c, id, "State.Running") - if state != "true" { - c.Fatal("Container state is 'not running'") - } - pid1 := inspectField(c, id, "State.Pid") - - parentContainerPid, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/pid", pid1)) - if err != nil { - c.Fatal(err) - } - - out, _ = dockerCmd(c, "run", fmt.Sprintf("--pid=container:%s", id), "busybox", "readlink", "/proc/self/ns/pid") - out = strings.Trim(out, "\n") - if parentContainerPid != out { - c.Fatalf("PID different with --pid=container:%s %s != %s\n", id, parentContainerPid, out) - } -} - -func (s *DockerSuite) TestRunModePidContainerNotExists(c *check.C) { - // Not applicable on Windows as uses Unix-specific capabilities - testRequires(c, DaemonIsLinux) - out, _, err := dockerCmdWithError("run", "-d", "--pid", "container:abcd1234", "busybox", "top") - if !strings.Contains(out, "abcd1234") || err == nil { - c.Fatalf("run PID from a non exists container should with correct error out") - } -} - -func (s *DockerSuite) TestRunModePidContainerNotRunning(c *check.C) { - // Not applicable on Windows as uses Unix-specific capabilities - testRequires(c, SameHostDaemon, DaemonIsLinux) - - out, _ := dockerCmd(c, "create", "busybox") - - id := strings.TrimSpace(out) - out, _, err := dockerCmdWithError("run", fmt.Sprintf("--pid=container:%s", id), "busybox") - if err == nil { - c.Fatalf("Run container with pid mode container should fail with non running container: %s\n%s", out, err) - } -} - -func (s *DockerSuite) TestRunMountShmMqueueFromHost(c *check.C) { - // Not applicable on Windows as uses Unix-specific capabilities - testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) - - dockerCmd(c, "run", "-d", "--name", "shmfromhost", "-v", "/dev/shm:/dev/shm", "-v", "/dev/mqueue:/dev/mqueue", "busybox", "sh", "-c", "echo -n test > /dev/shm/test && touch /dev/mqueue/toto && top") - defer os.Remove("/dev/mqueue/toto") - defer os.Remove("/dev/shm/test") - volPath, err := inspectMountSourceField("shmfromhost", "/dev/shm") - c.Assert(err, checker.IsNil) - if volPath != "/dev/shm" { - c.Fatalf("volumePath should have been /dev/shm, was %s", volPath) - } - - out, _ := dockerCmd(c, "run", "--name", "ipchost", "--ipc", "host", "busybox", "cat", "/dev/shm/test") - if out != "test" { - c.Fatalf("Output of /dev/shm/test expected test but found: %s", out) - } - - // Check that the mq was created - if _, err := os.Stat("/dev/mqueue/toto"); err != nil { - c.Fatalf("Failed to confirm '/dev/mqueue/toto' presence on host: %s", err.Error()) - } -} - -func (s *DockerSuite) TestContainerNetworkMode(c *check.C) { - // Not applicable on Windows as uses Unix-specific capabilities - testRequires(c, SameHostDaemon, DaemonIsLinux) - - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - id := strings.TrimSpace(out) - c.Assert(waitRun(id), check.IsNil) - pid1 := inspectField(c, id, "State.Pid") - - parentContainerNet, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) - if err != nil { - c.Fatal(err) - } - - out, _ = dockerCmd(c, "run", fmt.Sprintf("--net=container:%s", id), "busybox", "readlink", "/proc/self/ns/net") - out = strings.Trim(out, "\n") - if parentContainerNet != out { - c.Fatalf("NET different with --net=container:%s %s != %s\n", id, parentContainerNet, out) - } -} - -func (s *DockerSuite) TestRunModePidHost(c *check.C) { - // Not applicable on Windows as uses Unix-specific capabilities - testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) - - hostPid, err := os.Readlink("/proc/1/ns/pid") - if err != nil { - c.Fatal(err) - } - - out, _ := dockerCmd(c, "run", "--pid=host", "busybox", "readlink", "/proc/self/ns/pid") - out = strings.Trim(out, "\n") - if hostPid != out { - c.Fatalf("PID different with --pid=host %s != %s\n", hostPid, out) - } - - out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/pid") - out = strings.Trim(out, "\n") - if hostPid == out { - c.Fatalf("PID should be different without --pid=host %s == %s\n", hostPid, out) - } -} - -func (s *DockerSuite) TestRunModeUTSHost(c *check.C) { - // Not applicable on Windows as uses Unix-specific capabilities - testRequires(c, SameHostDaemon, DaemonIsLinux) - - hostUTS, err := os.Readlink("/proc/1/ns/uts") - if err != nil { - c.Fatal(err) - } - - out, _ := dockerCmd(c, "run", "--uts=host", "busybox", "readlink", "/proc/self/ns/uts") - out = strings.Trim(out, "\n") - if hostUTS != out { - c.Fatalf("UTS different with --uts=host %s != %s\n", hostUTS, out) - } - - out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/uts") - out = strings.Trim(out, "\n") - if hostUTS == out { - c.Fatalf("UTS should be different without --uts=host %s == %s\n", hostUTS, out) - } - - out, _ = dockerCmdWithFail(c, "run", "-h=name", "--uts=host", "busybox", "ps") - c.Assert(out, checker.Contains, runconfig.ErrConflictUTSHostname.Error()) -} - -func (s *DockerSuite) TestRunTLSverify(c *check.C) { - // Remote daemons use TLS and this test is not applicable when TLS is required. - testRequires(c, SameHostDaemon) - if out, code, err := dockerCmdWithError("ps"); err != nil || code != 0 { - c.Fatalf("Should have worked: %v:\n%v", err, out) - } - - // Regardless of whether we specify true or false we need to - // test to make sure tls is turned on if --tlsverify is specified at all - out, code, err := dockerCmdWithError("--tlsverify=false", "ps") - if err == nil || code == 0 || !strings.Contains(out, "trying to connect") { - c.Fatalf("Should have failed: \net:%v\nout:%v\nerr:%v", code, out, err) - } - - out, code, err = dockerCmdWithError("--tlsverify=true", "ps") - if err == nil || code == 0 || !strings.Contains(out, "cert") { - c.Fatalf("Should have failed: \net:%v\nout:%v\nerr:%v", code, out, err) - } -} - -func (s *DockerSuite) TestRunPortFromDockerRangeInUse(c *check.C) { - // TODO Windows. Once moved to libnetwork/CNM, this may be able to be - // re-instated. - testRequires(c, DaemonIsLinux) - // first find allocator current position - out, _ := dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") - - id := strings.TrimSpace(out) - out, _ = dockerCmd(c, "port", id) - - out = strings.TrimSpace(out) - if out == "" { - c.Fatal("docker port command output is empty") - } - out = strings.Split(out, ":")[1] - lastPort, err := strconv.Atoi(out) - if err != nil { - c.Fatal(err) - } - port := lastPort + 1 - l, err := net.Listen("tcp", ":"+strconv.Itoa(port)) - if err != nil { - c.Fatal(err) - } - defer l.Close() - - out, _ = dockerCmd(c, "run", "-d", "-p", ":80", "busybox", "top") - - id = strings.TrimSpace(out) - dockerCmd(c, "port", id) -} - -func (s *DockerSuite) TestRunTTYWithPipe(c *check.C) { - errChan := make(chan error) - go func() { - defer close(errChan) - - cmd := exec.Command(dockerBinary, "run", "-ti", "busybox", "true") - if _, err := cmd.StdinPipe(); err != nil { - errChan <- err - return - } - - expected := "the input device is not a TTY" - if runtime.GOOS == "windows" { - expected += ". If you are using mintty, try prefixing the command with 'winpty'" - } - if out, _, err := runCommandWithOutput(cmd); err == nil { - errChan <- fmt.Errorf("run should have failed") - return - } else if !strings.Contains(out, expected) { - errChan <- fmt.Errorf("run failed with error %q: expected %q", out, expected) - return - } - }() - - select { - case err := <-errChan: - c.Assert(err, check.IsNil) - case <-time.After(30 * time.Second): - c.Fatal("container is running but should have failed") - } -} - -func (s *DockerSuite) TestRunNonLocalMacAddress(c *check.C) { - addr := "00:16:3E:08:00:50" - args := []string{"run", "--mac-address", addr} - expected := addr - - if daemonPlatform != "windows" { - args = append(args, "busybox", "ifconfig") - } else { - args = append(args, WindowsBaseImage, "ipconfig", "/all") - expected = strings.Replace(strings.ToUpper(addr), ":", "-", -1) - } - - if out, _ := dockerCmd(c, args...); !strings.Contains(out, expected) { - c.Fatalf("Output should have contained %q: %s", expected, out) - } -} - -func (s *DockerSuite) TestRunNetHost(c *check.C) { - // Not applicable on Windows as uses Unix-specific capabilities - testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) - - hostNet, err := os.Readlink("/proc/1/ns/net") - if err != nil { - c.Fatal(err) - } - - out, _ := dockerCmd(c, "run", "--net=host", "busybox", "readlink", "/proc/self/ns/net") - out = strings.Trim(out, "\n") - if hostNet != out { - c.Fatalf("Net namespace different with --net=host %s != %s\n", hostNet, out) - } - - out, _ = dockerCmd(c, "run", "busybox", "readlink", "/proc/self/ns/net") - out = strings.Trim(out, "\n") - if hostNet == out { - c.Fatalf("Net namespace should be different without --net=host %s == %s\n", hostNet, out) - } -} - -func (s *DockerSuite) TestRunNetHostTwiceSameName(c *check.C) { - // TODO Windows. As Windows networking evolves and converges towards - // CNM, this test may be possible to enable on Windows. - testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) - - dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") - dockerCmd(c, "run", "--rm", "--name=thost", "--net=host", "busybox", "true") -} - -func (s *DockerSuite) TestRunNetContainerWhichHost(c *check.C) { - // Not applicable on Windows as uses Unix-specific capabilities - testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) - - hostNet, err := os.Readlink("/proc/1/ns/net") - if err != nil { - c.Fatal(err) - } - - dockerCmd(c, "run", "-d", "--net=host", "--name=test", "busybox", "top") - - out, _ := dockerCmd(c, "run", "--net=container:test", "busybox", "readlink", "/proc/self/ns/net") - out = strings.Trim(out, "\n") - if hostNet != out { - c.Fatalf("Container should have host network namespace") - } -} - -func (s *DockerSuite) TestRunAllowPortRangeThroughPublish(c *check.C) { - // TODO Windows. This may be possible to enable in the future. However, - // Windows does not currently support --expose, or populate the network - // settings seen through inspect. - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "--expose", "3000-3003", "-p", "3000-3003", "busybox", "top") - - id := strings.TrimSpace(out) - portstr := inspectFieldJSON(c, id, "NetworkSettings.Ports") - - var ports nat.PortMap - err := unmarshalJSON([]byte(portstr), &ports) - c.Assert(err, checker.IsNil, check.Commentf("failed to unmarshal: %v", portstr)) - for port, binding := range ports { - portnum, _ := strconv.Atoi(strings.Split(string(port), "/")[0]) - if portnum < 3000 || portnum > 3003 { - c.Fatalf("Port %d is out of range ", portnum) - } - if binding == nil || len(binding) != 1 || len(binding[0].HostPort) == 0 { - c.Fatal("Port is not mapped for the port "+port, out) - } - } -} - -func (s *DockerSuite) TestRunSetDefaultRestartPolicy(c *check.C) { - runSleepingContainer(c, "--name=testrunsetdefaultrestartpolicy") - out := inspectField(c, "testrunsetdefaultrestartpolicy", "HostConfig.RestartPolicy.Name") - if out != "no" { - c.Fatalf("Set default restart policy failed") - } -} - -func (s *DockerSuite) TestRunRestartMaxRetries(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "false") - timeout := 10 * time.Second - if daemonPlatform == "windows" { - timeout = 120 * time.Second - } - - id := strings.TrimSpace(string(out)) - if err := waitInspect(id, "{{ .State.Restarting }} {{ .State.Running }}", "false false", timeout); err != nil { - c.Fatal(err) - } - - count := inspectField(c, id, "RestartCount") - if count != "3" { - c.Fatalf("Container was restarted %s times, expected %d", count, 3) - } - - MaximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") - if MaximumRetryCount != "3" { - c.Fatalf("Container Maximum Retry Count is %s, expected %s", MaximumRetryCount, "3") - } -} - -func (s *DockerSuite) TestRunContainerWithWritableRootfs(c *check.C) { - dockerCmd(c, "run", "--rm", "busybox", "touch", "/file") -} - -func (s *DockerSuite) TestRunContainerWithReadonlyRootfs(c *check.C) { - // Not applicable on Windows which does not support --read-only - testRequires(c, DaemonIsLinux) - - testReadOnlyFile(c, "/file", "/etc/hosts", "/etc/resolv.conf", "/etc/hostname", "/sys/kernel", "/dev/.dont.touch.me") -} - -func (s *DockerSuite) TestPermissionsPtsReadonlyRootfs(c *check.C) { - // Not applicable on Windows due to use of Unix specific functionality, plus - // the use of --read-only which is not supported. - // --read-only + userns has remount issues - testRequires(c, DaemonIsLinux, NotUserNamespace) - - // Ensure we have not broken writing /dev/pts - out, status := dockerCmd(c, "run", "--read-only", "--rm", "busybox", "mount") - if status != 0 { - c.Fatal("Could not obtain mounts when checking /dev/pts mntpnt.") - } - expected := "type devpts (rw," - if !strings.Contains(string(out), expected) { - c.Fatalf("expected output to contain %s but contains %s", expected, out) - } -} - -func testReadOnlyFile(c *check.C, filenames ...string) { - // Not applicable on Windows which does not support --read-only - testRequires(c, DaemonIsLinux, NotUserNamespace) - touch := "touch " + strings.Join(filenames, " ") - out, _, err := dockerCmdWithError("run", "--read-only", "--rm", "busybox", "sh", "-c", touch) - c.Assert(err, checker.NotNil) - - for _, f := range filenames { - expected := "touch: " + f + ": Read-only file system" - c.Assert(out, checker.Contains, expected) - } - - out, _, err = dockerCmdWithError("run", "--read-only", "--privileged", "--rm", "busybox", "sh", "-c", touch) - c.Assert(err, checker.NotNil) - - for _, f := range filenames { - expected := "touch: " + f + ": Read-only file system" - c.Assert(out, checker.Contains, expected) - } -} - -func (s *DockerSuite) TestRunContainerWithReadonlyEtcHostsAndLinkedContainer(c *check.C) { - // Not applicable on Windows which does not support --link - // --read-only + userns has remount issues - testRequires(c, DaemonIsLinux, NotUserNamespace) - - dockerCmd(c, "run", "-d", "--name", "test-etc-hosts-ro-linked", "busybox", "top") - - out, _ := dockerCmd(c, "run", "--read-only", "--link", "test-etc-hosts-ro-linked:testlinked", "busybox", "cat", "/etc/hosts") - if !strings.Contains(string(out), "testlinked") { - c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled") - } -} - -func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithDnsFlag(c *check.C) { - // Not applicable on Windows which does not support either --read-only or --dns. - // --read-only + userns has remount issues - testRequires(c, DaemonIsLinux, NotUserNamespace) - - out, _ := dockerCmd(c, "run", "--read-only", "--dns", "1.1.1.1", "busybox", "/bin/cat", "/etc/resolv.conf") - if !strings.Contains(string(out), "1.1.1.1") { - c.Fatal("Expected /etc/resolv.conf to be updated even if --read-only enabled and --dns flag used") - } -} - -func (s *DockerSuite) TestRunContainerWithReadonlyRootfsWithAddHostFlag(c *check.C) { - // Not applicable on Windows which does not support --read-only - // --read-only + userns has remount issues - testRequires(c, DaemonIsLinux, NotUserNamespace) - - out, _ := dockerCmd(c, "run", "--read-only", "--add-host", "testreadonly:127.0.0.1", "busybox", "/bin/cat", "/etc/hosts") - if !strings.Contains(string(out), "testreadonly") { - c.Fatal("Expected /etc/hosts to be updated even if --read-only enabled and --add-host flag used") - } -} - -func (s *DockerSuite) TestRunVolumesFromRestartAfterRemoved(c *check.C) { - prefix, _ := getPrefixAndSlashFromDaemonPlatform() - runSleepingContainer(c, "--name=voltest", "-v", prefix+"/foo") - runSleepingContainer(c, "--name=restarter", "--volumes-from", "voltest") - - // Remove the main volume container and restart the consuming container - dockerCmd(c, "rm", "-f", "voltest") - - // This should not fail since the volumes-from were already applied - dockerCmd(c, "restart", "restarter") -} - -// run container with --rm should remove container if exit code != 0 -func (s *DockerSuite) TestRunContainerWithRmFlagExitCodeNotEqualToZero(c *check.C) { - name := "flowers" - out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "ls", "/notexists") - if err == nil { - c.Fatal("Expected docker run to fail", out, err) - } - - out, err = getAllContainers() - if err != nil { - c.Fatal(out, err) - } - - if out != "" { - c.Fatal("Expected not to have containers", out) - } -} - -func (s *DockerSuite) TestRunContainerWithRmFlagCannotStartContainer(c *check.C) { - name := "sparkles" - out, _, err := dockerCmdWithError("run", "--name", name, "--rm", "busybox", "commandNotFound") - if err == nil { - c.Fatal("Expected docker run to fail", out, err) - } - - out, err = getAllContainers() - if err != nil { - c.Fatal(out, err) - } - - if out != "" { - c.Fatal("Expected not to have containers", out) - } -} - -func (s *DockerSuite) TestRunPidHostWithChildIsKillable(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux, NotUserNamespace) - name := "ibuildthecloud" - dockerCmd(c, "run", "-d", "--pid=host", "--name", name, "busybox", "sh", "-c", "sleep 30; echo hi") - - c.Assert(waitRun(name), check.IsNil) - - errchan := make(chan error) - go func() { - if out, _, err := dockerCmdWithError("kill", name); err != nil { - errchan <- fmt.Errorf("%v:\n%s", err, out) - } - close(errchan) - }() - select { - case err := <-errchan: - c.Assert(err, check.IsNil) - case <-time.After(5 * time.Second): - c.Fatal("Kill container timed out") - } -} - -func (s *DockerSuite) TestRunWithTooSmallMemoryLimit(c *check.C) { - // TODO Windows. This may be possible to enable once Windows supports - // memory limits on containers - testRequires(c, DaemonIsLinux) - // this memory limit is 1 byte less than the min, which is 4MB - // https://github.com/docker/docker/blob/v1.5.0/daemon/create.go#L22 - out, _, err := dockerCmdWithError("run", "-m", "4194303", "busybox") - if err == nil || !strings.Contains(out, "Minimum memory limit allowed is 4MB") { - c.Fatalf("expected run to fail when using too low a memory limit: %q", out) - } -} - -func (s *DockerSuite) TestRunWriteToProcAsound(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux) - _, code, err := dockerCmdWithError("run", "busybox", "sh", "-c", "echo 111 >> /proc/asound/version") - if err == nil || code == 0 { - c.Fatal("standard container should not be able to write to /proc/asound") - } -} - -func (s *DockerSuite) TestRunReadProcTimer(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux) - out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/timer_stats") - if code != 0 { - return - } - if err != nil { - c.Fatal(err) - } - if strings.Trim(out, "\n ") != "" { - c.Fatalf("expected to receive no output from /proc/timer_stats but received %q", out) - } -} - -func (s *DockerSuite) TestRunReadProcLatency(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux) - // some kernels don't have this configured so skip the test if this file is not found - // on the host running the tests. - if _, err := os.Stat("/proc/latency_stats"); err != nil { - c.Skip("kernel doesn't have latency_stats configured") - return - } - out, code, err := dockerCmdWithError("run", "busybox", "cat", "/proc/latency_stats") - if code != 0 { - return - } - if err != nil { - c.Fatal(err) - } - if strings.Trim(out, "\n ") != "" { - c.Fatalf("expected to receive no output from /proc/latency_stats but received %q", out) - } -} - -func (s *DockerSuite) TestRunReadFilteredProc(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) - - testReadPaths := []string{ - "/proc/latency_stats", - "/proc/timer_stats", - "/proc/kcore", - } - for i, filePath := range testReadPaths { - name := fmt.Sprintf("procsieve-%d", i) - shellCmd := fmt.Sprintf("exec 3<%s", filePath) - - out, exitCode, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) - if exitCode != 0 { - return - } - if err != nil { - c.Fatalf("Open FD for read should have failed with permission denied, got: %s, %v", out, err) - } - } -} - -func (s *DockerSuite) TestMountIntoProc(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux) - _, code, err := dockerCmdWithError("run", "-v", "/proc//sys", "busybox", "true") - if err == nil || code == 0 { - c.Fatal("container should not be able to mount into /proc") - } -} - -func (s *DockerSuite) TestMountIntoSys(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux) - testRequires(c, NotUserNamespace) - dockerCmd(c, "run", "-v", "/sys/fs/cgroup", "busybox", "true") -} - -func (s *DockerSuite) TestRunUnshareProc(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) - - // In this test goroutines are used to run test cases in parallel to prevent the test from taking a long time to run. - errChan := make(chan error) - - go func() { - name := "acidburn" - out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "--mount-proc=/proc", "mount") - if err == nil || - !(strings.Contains(strings.ToLower(out), "permission denied") || - strings.Contains(strings.ToLower(out), "operation not permitted")) { - errChan <- fmt.Errorf("unshare with --mount-proc should have failed with 'permission denied' or 'operation not permitted', got: %s, %v", out, err) - } else { - errChan <- nil - } - }() - - go func() { - name := "cereal" - out, _, err := dockerCmdWithError("run", "--name", name, "--security-opt", "seccomp=unconfined", "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") - if err == nil || - !(strings.Contains(strings.ToLower(out), "mount: cannot mount none") || - strings.Contains(strings.ToLower(out), "permission denied") || - strings.Contains(strings.ToLower(out), "operation not permitted")) { - errChan <- fmt.Errorf("unshare and mount of /proc should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err) - } else { - errChan <- nil - } - }() - - /* Ensure still fails if running privileged with the default policy */ - go func() { - name := "crashoverride" - out, _, err := dockerCmdWithError("run", "--privileged", "--security-opt", "seccomp=unconfined", "--security-opt", "apparmor=docker-default", "--name", name, "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") - if err == nil || - !(strings.Contains(strings.ToLower(out), "mount: cannot mount none") || - strings.Contains(strings.ToLower(out), "permission denied") || - strings.Contains(strings.ToLower(out), "operation not permitted")) { - errChan <- fmt.Errorf("privileged unshare with apparmor should have failed with 'mount: cannot mount none' or 'permission denied', got: %s, %v", out, err) - } else { - errChan <- nil - } - }() - - for i := 0; i < 3; i++ { - err := <-errChan - if err != nil { - c.Fatal(err) - } - } -} - -func (s *DockerSuite) TestRunPublishPort(c *check.C) { - // TODO Windows: This may be possible once Windows moves to libnetwork and CNM - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "--name", "test", "--expose", "8080", "busybox", "top") - out, _ := dockerCmd(c, "port", "test") - out = strings.Trim(out, "\r\n") - if out != "" { - c.Fatalf("run without --publish-all should not publish port, out should be nil, but got: %s", out) - } -} - -// Issue #10184. -func (s *DockerSuite) TestDevicePermissions(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux) - const permissions = "crw-rw-rw-" - out, status := dockerCmd(c, "run", "--device", "/dev/fuse:/dev/fuse:mrw", "busybox:latest", "ls", "-l", "/dev/fuse") - if status != 0 { - c.Fatalf("expected status 0, got %d", status) - } - if !strings.HasPrefix(out, permissions) { - c.Fatalf("output should begin with %q, got %q", permissions, out) - } -} - -func (s *DockerSuite) TestRunCapAddCHOWN(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=CHOWN", "busybox", "sh", "-c", "adduser -D -H newuser && chown newuser /home && echo ok") - - if actual := strings.Trim(out, "\r\n"); actual != "ok" { - c.Fatalf("expected output ok received %s", actual) - } -} - -// https://github.com/docker/docker/pull/14498 -func (s *DockerSuite) TestVolumeFromMixedRWOptions(c *check.C) { - prefix, slash := getPrefixAndSlashFromDaemonPlatform() - - dockerCmd(c, "run", "--name", "parent", "-v", prefix+"/test", "busybox", "true") - - // TODO Windows: Temporary check - remove once TP5 support is dropped - if daemonPlatform != "windows" || windowsDaemonKV >= 14350 { - dockerCmd(c, "run", "--volumes-from", "parent:ro", "--name", "test-volumes-1", "busybox", "true") - } - dockerCmd(c, "run", "--volumes-from", "parent:rw", "--name", "test-volumes-2", "busybox", "true") - - if daemonPlatform != "windows" { - mRO, err := inspectMountPoint("test-volumes-1", prefix+slash+"test") - c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point")) - if mRO.RW { - c.Fatalf("Expected RO volume was RW") - } - } - - mRW, err := inspectMountPoint("test-volumes-2", prefix+slash+"test") - c.Assert(err, checker.IsNil, check.Commentf("failed to inspect mount point")) - if !mRW.RW { - c.Fatalf("Expected RW volume was RO") - } -} - -func (s *DockerSuite) TestRunWriteFilteredProc(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, Apparmor, DaemonIsLinux, NotUserNamespace) - - testWritePaths := []string{ - /* modprobe and core_pattern should both be denied by generic - * policy of denials for /proc/sys/kernel. These files have been - * picked to be checked as they are particularly sensitive to writes */ - "/proc/sys/kernel/modprobe", - "/proc/sys/kernel/core_pattern", - "/proc/sysrq-trigger", - "/proc/kcore", - } - for i, filePath := range testWritePaths { - name := fmt.Sprintf("writeprocsieve-%d", i) - - shellCmd := fmt.Sprintf("exec 3>%s", filePath) - out, code, err := dockerCmdWithError("run", "--privileged", "--security-opt", "apparmor=docker-default", "--name", name, "busybox", "sh", "-c", shellCmd) - if code != 0 { - return - } - if err != nil { - c.Fatalf("Open FD for write should have failed with permission denied, got: %s, %v", out, err) - } - } -} - -func (s *DockerSuite) TestRunNetworkFilesBindMount(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, SameHostDaemon, DaemonIsLinux) - - expected := "test123" - - filename := createTmpFile(c, expected) - defer os.Remove(filename) - - nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} - - for i := range nwfiles { - actual, _ := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "busybox", "cat", nwfiles[i]) - if actual != expected { - c.Fatalf("expected %s be: %q, but was: %q", nwfiles[i], expected, actual) - } - } -} - -func (s *DockerSuite) TestRunNetworkFilesBindMountRO(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, SameHostDaemon, DaemonIsLinux) - - filename := createTmpFile(c, "test123") - defer os.Remove(filename) - - nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} - - for i := range nwfiles { - _, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "busybox", "touch", nwfiles[i]) - if err == nil || exitCode == 0 { - c.Fatalf("run should fail because bind mount of %s is ro: exit code %d", nwfiles[i], exitCode) - } - } -} - -func (s *DockerSuite) TestRunNetworkFilesBindMountROFilesystem(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - // --read-only + userns has remount issues - testRequires(c, SameHostDaemon, DaemonIsLinux, NotUserNamespace) - - filename := createTmpFile(c, "test123") - defer os.Remove(filename) - - nwfiles := []string{"/etc/resolv.conf", "/etc/hosts", "/etc/hostname"} - - for i := range nwfiles { - _, exitCode := dockerCmd(c, "run", "-v", filename+":"+nwfiles[i], "--read-only", "busybox", "touch", nwfiles[i]) - if exitCode != 0 { - c.Fatalf("run should not fail because %s is mounted writable on read-only root filesystem: exit code %d", nwfiles[i], exitCode) - } - } - - for i := range nwfiles { - _, exitCode, err := dockerCmdWithError("run", "-v", filename+":"+nwfiles[i]+":ro", "--read-only", "busybox", "touch", nwfiles[i]) - if err == nil || exitCode == 0 { - c.Fatalf("run should fail because %s is mounted read-only on read-only root filesystem: exit code %d", nwfiles[i], exitCode) - } - } -} - -func (s *DockerTrustSuite) TestTrustedRun(c *check.C) { - // Windows does not support this functionality - testRequires(c, DaemonIsLinux) - repoName := s.setupTrustedImage(c, "trusted-run") - - // Try run - runCmd := exec.Command(dockerBinary, "run", repoName) - s.trustedCmd(runCmd) - out, _, err := runCommandWithOutput(runCmd) - if err != nil { - c.Fatalf("Error running trusted run: %s\n%s\n", err, out) - } - - if !strings.Contains(string(out), "Tagging") { - c.Fatalf("Missing expected output on trusted push:\n%s", out) - } - - dockerCmd(c, "rmi", repoName) - - // Try untrusted run to ensure we pushed the tag to the registry - runCmd = exec.Command(dockerBinary, "run", "--disable-content-trust=true", repoName) - s.trustedCmd(runCmd) - out, _, err = runCommandWithOutput(runCmd) - if err != nil { - c.Fatalf("Error running trusted run: %s\n%s", err, out) - } - - if !strings.Contains(string(out), "Status: Downloaded") { - c.Fatalf("Missing expected output on trusted run with --disable-content-trust:\n%s", out) - } -} - -func (s *DockerTrustSuite) TestUntrustedRun(c *check.C) { - // Windows does not support this functionality - testRequires(c, DaemonIsLinux) - repoName := fmt.Sprintf("%v/dockercliuntrusted/runtest:latest", privateRegistryURL) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - dockerCmd(c, "push", repoName) - dockerCmd(c, "rmi", repoName) - - // Try trusted run on untrusted tag - runCmd := exec.Command(dockerBinary, "run", repoName) - s.trustedCmd(runCmd) - out, _, err := runCommandWithOutput(runCmd) - if err == nil { - c.Fatalf("Error expected when running trusted run with:\n%s", out) - } - - if !strings.Contains(string(out), "does not have trust data for") { - c.Fatalf("Missing expected output on trusted run:\n%s", out) - } -} - -func (s *DockerTrustSuite) TestRunWhenCertExpired(c *check.C) { - // Windows does not support this functionality - testRequires(c, DaemonIsLinux) - c.Skip("Currently changes system time, causing instability") - repoName := s.setupTrustedImage(c, "trusted-run-expired") - - // Certificates have 10 years of expiration - elevenYearsFromNow := time.Now().Add(time.Hour * 24 * 365 * 11) - - runAtDifferentDate(elevenYearsFromNow, func() { - // Try run - runCmd := exec.Command(dockerBinary, "run", repoName) - s.trustedCmd(runCmd) - out, _, err := runCommandWithOutput(runCmd) - if err == nil { - c.Fatalf("Error running trusted run in the distant future: %s\n%s", err, out) - } - - if !strings.Contains(string(out), "could not validate the path to a trusted root") { - c.Fatalf("Missing expected output on trusted run in the distant future:\n%s", out) - } - }) - - runAtDifferentDate(elevenYearsFromNow, func() { - // Try run - runCmd := exec.Command(dockerBinary, "run", "--disable-content-trust", repoName) - s.trustedCmd(runCmd) - out, _, err := runCommandWithOutput(runCmd) - if err != nil { - c.Fatalf("Error running untrusted run in the distant future: %s\n%s", err, out) - } - - if !strings.Contains(string(out), "Status: Downloaded") { - c.Fatalf("Missing expected output on untrusted run in the distant future:\n%s", out) - } - }) -} - -func (s *DockerTrustSuite) TestTrustedRunFromBadTrustServer(c *check.C) { - // Windows does not support this functionality - testRequires(c, DaemonIsLinux) - repoName := fmt.Sprintf("%v/dockerclievilrun/trusted:latest", privateRegistryURL) - evilLocalConfigDir, err := ioutil.TempDir("", "evilrun-local-config-dir") - if err != nil { - c.Fatalf("Failed to create local temp dir") - } - - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - if err != nil { - c.Fatalf("Error running trusted push: %s\n%s", err, out) - } - if !strings.Contains(string(out), "Signing and pushing trust metadata") { - c.Fatalf("Missing expected output on trusted push:\n%s", out) - } - - dockerCmd(c, "rmi", repoName) - - // Try run - runCmd := exec.Command(dockerBinary, "run", repoName) - s.trustedCmd(runCmd) - out, _, err = runCommandWithOutput(runCmd) - if err != nil { - c.Fatalf("Error running trusted run: %s\n%s", err, out) - } - - if !strings.Contains(string(out), "Tagging") { - c.Fatalf("Missing expected output on trusted push:\n%s", out) - } - - dockerCmd(c, "rmi", repoName) - - // Kill the notary server, start a new "evil" one. - s.not.Close() - s.not, err = newTestNotary(c) - if err != nil { - c.Fatalf("Restarting notary server failed.") - } - - // In order to make an evil server, lets re-init a client (with a different trust dir) and push new data. - // tag an image and upload it to the private registry - dockerCmd(c, "--config", evilLocalConfigDir, "tag", "busybox", repoName) - - // Push up to the new server - pushCmd = exec.Command(dockerBinary, "--config", evilLocalConfigDir, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err = runCommandWithOutput(pushCmd) - if err != nil { - c.Fatalf("Error running trusted push: %s\n%s", err, out) - } - if !strings.Contains(string(out), "Signing and pushing trust metadata") { - c.Fatalf("Missing expected output on trusted push:\n%s", out) - } - - // Now, try running with the original client from this new trust server. This should fallback to our cached timestamp and metadata. - runCmd = exec.Command(dockerBinary, "run", repoName) - s.trustedCmd(runCmd) - out, _, err = runCommandWithOutput(runCmd) - - if err != nil { - c.Fatalf("Error falling back to cached trust data: %s\n%s", err, out) - } - if !strings.Contains(string(out), "Error while downloading remote metadata, using cached timestamp") { - c.Fatalf("Missing expected output on trusted push:\n%s", out) - } -} - -func (s *DockerSuite) TestPtraceContainerProcsFromHost(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux, SameHostDaemon) - - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - id := strings.TrimSpace(out) - c.Assert(waitRun(id), check.IsNil) - pid1 := inspectField(c, id, "State.Pid") - - _, err := os.Readlink(fmt.Sprintf("/proc/%s/ns/net", pid1)) - if err != nil { - c.Fatal(err) - } -} - -func (s *DockerSuite) TestAppArmorDeniesPtrace(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux, NotGCCGO) - - // Run through 'sh' so we are NOT pid 1. Pid 1 may be able to trace - // itself, but pid>1 should not be able to trace pid1. - _, exitCode, _ := dockerCmdWithError("run", "busybox", "sh", "-c", "sh -c readlink /proc/1/ns/net") - if exitCode == 0 { - c.Fatal("ptrace was not successfully restricted by AppArmor") - } -} - -func (s *DockerSuite) TestAppArmorTraceSelf(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux, SameHostDaemon, Apparmor) - - _, exitCode, _ := dockerCmdWithError("run", "busybox", "readlink", "/proc/1/ns/net") - if exitCode != 0 { - c.Fatal("ptrace of self failed.") - } -} - -func (s *DockerSuite) TestAppArmorDeniesChmodProc(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, SameHostDaemon, Apparmor, DaemonIsLinux, NotUserNamespace) - _, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "744", "/proc/cpuinfo") - if exitCode == 0 { - // If our test failed, attempt to repair the host system... - _, exitCode, _ := dockerCmdWithError("run", "busybox", "chmod", "444", "/proc/cpuinfo") - if exitCode == 0 { - c.Fatal("AppArmor was unsuccessful in prohibiting chmod of /proc/* files.") - } - } -} - -func (s *DockerSuite) TestRunCapAddSYSTIME(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux) - - dockerCmd(c, "run", "--cap-drop=ALL", "--cap-add=SYS_TIME", "busybox", "sh", "-c", "grep ^CapEff /proc/self/status | sed 's/^CapEff:\t//' | grep ^0000000002000000$") -} - -// run create container failed should clean up the container -func (s *DockerSuite) TestRunCreateContainerFailedCleanUp(c *check.C) { - // TODO Windows. This may be possible to enable once link is supported - testRequires(c, DaemonIsLinux) - name := "unique_name" - _, _, err := dockerCmdWithError("run", "--name", name, "--link", "nothing:nothing", "busybox") - c.Assert(err, check.NotNil, check.Commentf("Expected docker run to fail!")) - - containerID, err := inspectFieldWithError(name, "Id") - c.Assert(err, checker.NotNil, check.Commentf("Expected not to have this container: %s!", containerID)) - c.Assert(containerID, check.Equals, "", check.Commentf("Expected not to have this container: %s!", containerID)) -} - -func (s *DockerSuite) TestRunNamedVolume(c *check.C) { - prefix, _ := getPrefixAndSlashFromDaemonPlatform() - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "--name=test", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "echo hello > "+prefix+"/foo/bar") - - out, _ := dockerCmd(c, "run", "--volumes-from", "test", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar") - c.Assert(strings.TrimSpace(out), check.Equals, "hello") - - out, _ = dockerCmd(c, "run", "-v", "testing:"+prefix+"/foo", "busybox", "sh", "-c", "cat "+prefix+"/foo/bar") - c.Assert(strings.TrimSpace(out), check.Equals, "hello") -} - -func (s *DockerSuite) TestRunWithUlimits(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux) - - out, _ := dockerCmd(c, "run", "--name=testulimits", "--ulimit", "nofile=42", "busybox", "/bin/sh", "-c", "ulimit -n") - ul := strings.TrimSpace(out) - if ul != "42" { - c.Fatalf("expected `ulimit -n` to be 42, got %s", ul) - } -} - -func (s *DockerSuite) TestRunContainerWithCgroupParent(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux) - - cgroupParent := "test" - name := "cgroup-test" - - out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") - if err != nil { - c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) - } - cgroupPaths := parseCgroupPaths(string(out)) - if len(cgroupPaths) == 0 { - c.Fatalf("unexpected output - %q", string(out)) - } - id, err := getIDByName(name) - c.Assert(err, check.IsNil) - expectedCgroup := path.Join(cgroupParent, id) - found := false - for _, path := range cgroupPaths { - if strings.HasSuffix(path, expectedCgroup) { - found = true - break - } - } - if !found { - c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) - } -} - -func (s *DockerSuite) TestRunContainerWithCgroupParentAbsPath(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux) - - cgroupParent := "/cgroup-parent/test" - name := "cgroup-test" - out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") - if err != nil { - c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) - } - cgroupPaths := parseCgroupPaths(string(out)) - if len(cgroupPaths) == 0 { - c.Fatalf("unexpected output - %q", string(out)) - } - id, err := getIDByName(name) - c.Assert(err, check.IsNil) - expectedCgroup := path.Join(cgroupParent, id) - found := false - for _, path := range cgroupPaths { - if strings.HasSuffix(path, expectedCgroup) { - found = true - break - } - } - if !found { - c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) - } -} - -// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. -func (s *DockerSuite) TestRunInvalidCgroupParent(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux) - - cgroupParent := "../../../../../../../../SHOULD_NOT_EXIST" - cleanCgroupParent := "SHOULD_NOT_EXIST" - name := "cgroup-invalid-test" - - out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") - if err != nil { - // XXX: This may include a daemon crash. - c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) - } - - // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. - if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { - c.Fatalf("SECURITY: --cgroup-parent with ../../ relative paths cause files to be created in the host (this is bad) !!") - } - - cgroupPaths := parseCgroupPaths(string(out)) - if len(cgroupPaths) == 0 { - c.Fatalf("unexpected output - %q", string(out)) - } - id, err := getIDByName(name) - c.Assert(err, check.IsNil) - expectedCgroup := path.Join(cleanCgroupParent, id) - found := false - for _, path := range cgroupPaths { - if strings.HasSuffix(path, expectedCgroup) { - found = true - break - } - } - if !found { - c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) - } -} - -// TestRunInvalidCgroupParent checks that a specially-crafted cgroup parent doesn't cause Docker to crash or start modifying /. -func (s *DockerSuite) TestRunAbsoluteInvalidCgroupParent(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - testRequires(c, DaemonIsLinux) - - cgroupParent := "/../../../../../../../../SHOULD_NOT_EXIST" - cleanCgroupParent := "/SHOULD_NOT_EXIST" - name := "cgroup-absolute-invalid-test" - - out, _, err := dockerCmdWithError("run", "--cgroup-parent", cgroupParent, "--name", name, "busybox", "cat", "/proc/self/cgroup") - if err != nil { - // XXX: This may include a daemon crash. - c.Fatalf("unexpected failure when running container with --cgroup-parent option - %s\n%v", string(out), err) - } - - // We expect "/SHOULD_NOT_EXIST" to not exist. If not, we have a security issue. - if _, err := os.Stat("/SHOULD_NOT_EXIST"); err == nil || !os.IsNotExist(err) { - c.Fatalf("SECURITY: --cgroup-parent with /../../ garbage paths cause files to be created in the host (this is bad) !!") - } - - cgroupPaths := parseCgroupPaths(string(out)) - if len(cgroupPaths) == 0 { - c.Fatalf("unexpected output - %q", string(out)) - } - id, err := getIDByName(name) - c.Assert(err, check.IsNil) - expectedCgroup := path.Join(cleanCgroupParent, id) - found := false - for _, path := range cgroupPaths { - if strings.HasSuffix(path, expectedCgroup) { - found = true - break - } - } - if !found { - c.Fatalf("unexpected cgroup paths. Expected at least one cgroup path to have suffix %q. Cgroup Paths: %v", expectedCgroup, cgroupPaths) - } -} - -func (s *DockerSuite) TestRunContainerWithCgroupMountRO(c *check.C) { - // Not applicable on Windows as uses Unix specific functionality - // --read-only + userns has remount issues - testRequires(c, DaemonIsLinux, NotUserNamespace) - - filename := "/sys/fs/cgroup/devices/test123" - out, _, err := dockerCmdWithError("run", "busybox", "touch", filename) - if err == nil { - c.Fatal("expected cgroup mount point to be read-only, touch file should fail") - } - expected := "Read-only file system" - if !strings.Contains(out, expected) { - c.Fatalf("expected output from failure to contain %s but contains %s", expected, out) - } -} - -func (s *DockerSuite) TestRunContainerNetworkModeToSelf(c *check.C) { - // Not applicable on Windows which does not support --net=container - testRequires(c, DaemonIsLinux) - out, _, err := dockerCmdWithError("run", "--name=me", "--net=container:me", "busybox", "true") - if err == nil || !strings.Contains(out, "cannot join own network") { - c.Fatalf("using container net mode to self should result in an error\nerr: %q\nout: %s", err, out) - } -} - -func (s *DockerSuite) TestRunContainerNetModeWithDnsMacHosts(c *check.C) { - // Not applicable on Windows which does not support --net=container - testRequires(c, DaemonIsLinux) - out, _, err := dockerCmdWithError("run", "-d", "--name", "parent", "busybox", "top") - if err != nil { - c.Fatalf("failed to run container: %v, output: %q", err, out) - } - - out, _, err = dockerCmdWithError("run", "--dns", "1.2.3.4", "--net=container:parent", "busybox") - if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkAndDNS.Error()) { - c.Fatalf("run --net=container with --dns should error out") - } - - out, _, err = dockerCmdWithError("run", "--mac-address", "92:d0:c6:0a:29:33", "--net=container:parent", "busybox") - if err == nil || !strings.Contains(out, runconfig.ErrConflictContainerNetworkAndMac.Error()) { - c.Fatalf("run --net=container with --mac-address should error out") - } - - out, _, err = dockerCmdWithError("run", "--add-host", "test:192.168.2.109", "--net=container:parent", "busybox") - if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkHosts.Error()) { - c.Fatalf("run --net=container with --add-host should error out") - } -} - -func (s *DockerSuite) TestRunContainerNetModeWithExposePort(c *check.C) { - // Not applicable on Windows which does not support --net=container - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") - - out, _, err := dockerCmdWithError("run", "-p", "5000:5000", "--net=container:parent", "busybox") - if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) { - c.Fatalf("run --net=container with -p should error out") - } - - out, _, err = dockerCmdWithError("run", "-P", "--net=container:parent", "busybox") - if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkPublishPorts.Error()) { - c.Fatalf("run --net=container with -P should error out") - } - - out, _, err = dockerCmdWithError("run", "--expose", "5000", "--net=container:parent", "busybox") - if err == nil || !strings.Contains(out, runconfig.ErrConflictNetworkExposePorts.Error()) { - c.Fatalf("run --net=container with --expose should error out") - } -} - -func (s *DockerSuite) TestRunLinkToContainerNetMode(c *check.C) { - // Not applicable on Windows which does not support --net=container or --link - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "--name", "test", "-d", "busybox", "top") - dockerCmd(c, "run", "--name", "parent", "-d", "--net=container:test", "busybox", "top") - dockerCmd(c, "run", "-d", "--link=parent:parent", "busybox", "top") - dockerCmd(c, "run", "--name", "child", "-d", "--net=container:parent", "busybox", "top") - dockerCmd(c, "run", "-d", "--link=child:child", "busybox", "top") -} - -func (s *DockerSuite) TestRunLoopbackOnlyExistsWhenNetworkingDisabled(c *check.C) { - // TODO Windows: This may be possible to convert. - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "--net=none", "busybox", "ip", "-o", "-4", "a", "show", "up") - - var ( - count = 0 - parts = strings.Split(out, "\n") - ) - - for _, l := range parts { - if l != "" { - count++ - } - } - - if count != 1 { - c.Fatalf("Wrong interface count in container %d", count) - } - - if !strings.HasPrefix(out, "1: lo") { - c.Fatalf("Wrong interface in test container: expected [1: lo], got %s", out) - } -} - -// Issue #4681 -func (s *DockerSuite) TestRunLoopbackWhenNetworkDisabled(c *check.C) { - if daemonPlatform == "windows" { - dockerCmd(c, "run", "--net=none", WindowsBaseImage, "ping", "-n", "1", "127.0.0.1") - } else { - dockerCmd(c, "run", "--net=none", "busybox", "ping", "-c", "1", "127.0.0.1") - } -} - -func (s *DockerSuite) TestRunModeNetContainerHostname(c *check.C) { - // Windows does not support --net=container - testRequires(c, DaemonIsLinux, ExecSupport) - - dockerCmd(c, "run", "-i", "-d", "--name", "parent", "busybox", "top") - out, _ := dockerCmd(c, "exec", "parent", "cat", "/etc/hostname") - out1, _ := dockerCmd(c, "run", "--net=container:parent", "busybox", "cat", "/etc/hostname") - - if out1 != out { - c.Fatal("containers with shared net namespace should have same hostname") - } -} - -func (s *DockerSuite) TestRunNetworkNotInitializedNoneMode(c *check.C) { - // TODO Windows: Network settings are not currently propagated. This may - // be resolved in the future with the move to libnetwork and CNM. - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "--net=none", "busybox", "top") - id := strings.TrimSpace(out) - res := inspectField(c, id, "NetworkSettings.Networks.none.IPAddress") - if res != "" { - c.Fatalf("For 'none' mode network must not be initialized, but container got IP: %s", res) - } -} - -func (s *DockerSuite) TestTwoContainersInNetHost(c *check.C) { - // Not applicable as Windows does not support --net=host - testRequires(c, DaemonIsLinux, NotUserNamespace, NotUserNamespace) - dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=host", "--name=second", "busybox", "top") - dockerCmd(c, "stop", "first") - dockerCmd(c, "stop", "second") -} - -func (s *DockerSuite) TestContainersInUserDefinedNetwork(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) - dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork") - dockerCmd(c, "run", "-d", "--net=testnetwork", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-t", "--net=testnetwork", "--name=second", "busybox", "ping", "-c", "1", "first") -} - -func (s *DockerSuite) TestContainersInMultipleNetworks(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) - // Create 2 networks using bridge driver - dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") - dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") - // Run and connect containers to testnetwork1 - dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - // Check connectivity between containers in testnetwork2 - dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") - // Connect containers to testnetwork2 - dockerCmd(c, "network", "connect", "testnetwork2", "first") - dockerCmd(c, "network", "connect", "testnetwork2", "second") - // Check connectivity between containers - dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") -} - -func (s *DockerSuite) TestContainersNetworkIsolation(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) - // Create 2 networks using bridge driver - dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") - dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") - // Run 1 container in testnetwork1 and another in testnetwork2 - dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=testnetwork2", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // Check Isolation between containers : ping must fail - _, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") - c.Assert(err, check.NotNil) - // Connect first container to testnetwork2 - dockerCmd(c, "network", "connect", "testnetwork2", "first") - // ping must succeed now - _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") - c.Assert(err, check.IsNil) - - // Disconnect first container from testnetwork2 - dockerCmd(c, "network", "disconnect", "testnetwork2", "first") - // ping must fail again - _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second") - c.Assert(err, check.NotNil) -} - -func (s *DockerSuite) TestNetworkRmWithActiveContainers(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - // Create 2 networks using bridge driver - dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") - // Run and connect containers to testnetwork1 - dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - // Network delete with active containers must fail - _, _, err := dockerCmdWithError("network", "rm", "testnetwork1") - c.Assert(err, check.NotNil) - - dockerCmd(c, "stop", "first") - _, _, err = dockerCmdWithError("network", "rm", "testnetwork1") - c.Assert(err, check.NotNil) -} - -func (s *DockerSuite) TestContainerRestartInMultipleNetworks(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) - // Create 2 networks using bridge driver - dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") - dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork2") - - // Run and connect containers to testnetwork1 - dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - // Check connectivity between containers in testnetwork2 - dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") - // Connect containers to testnetwork2 - dockerCmd(c, "network", "connect", "testnetwork2", "first") - dockerCmd(c, "network", "connect", "testnetwork2", "second") - // Check connectivity between containers - dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") - - // Stop second container and test ping failures on both networks - dockerCmd(c, "stop", "second") - _, _, err := dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork1") - c.Assert(err, check.NotNil) - _, _, err = dockerCmdWithError("exec", "first", "ping", "-c", "1", "second.testnetwork2") - c.Assert(err, check.NotNil) - - // Start second container and connectivity must be restored on both networks - dockerCmd(c, "start", "second") - dockerCmd(c, "exec", "first", "ping", "-c", "1", "second.testnetwork1") - dockerCmd(c, "exec", "second", "ping", "-c", "1", "first.testnetwork2") -} - -func (s *DockerSuite) TestContainerWithConflictingHostNetworks(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - // Run a container with --net=host - dockerCmd(c, "run", "-d", "--net=host", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - - // Create a network using bridge driver - dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") - - // Connecting to the user defined network must fail - _, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first") - c.Assert(err, check.NotNil) -} - -func (s *DockerSuite) TestContainerWithConflictingSharedNetwork(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - // Run second container in first container's network namespace - dockerCmd(c, "run", "-d", "--net=container:first", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // Create a network using bridge driver - dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") - - // Connecting to the user defined network must fail - out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "second") - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, runconfig.ErrConflictSharedNetwork.Error()) -} - -func (s *DockerSuite) TestContainerWithConflictingNoneNetwork(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "-d", "--net=none", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - - // Create a network using bridge driver - dockerCmd(c, "network", "create", "-d", "bridge", "testnetwork1") - - // Connecting to the user defined network must fail - out, _, err := dockerCmdWithError("network", "connect", "testnetwork1", "first") - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, runconfig.ErrConflictNoNetwork.Error()) - - // create a container connected to testnetwork1 - dockerCmd(c, "run", "-d", "--net=testnetwork1", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // Connect second container to none network. it must fail as well - _, _, err = dockerCmdWithError("network", "connect", "none", "second") - c.Assert(err, check.NotNil) -} - -// #11957 - stdin with no tty does not exit if stdin is not closed even though container exited -func (s *DockerSuite) TestRunStdinBlockedAfterContainerExit(c *check.C) { - cmd := exec.Command(dockerBinary, "run", "-i", "--name=test", "busybox", "true") - in, err := cmd.StdinPipe() - c.Assert(err, check.IsNil) - defer in.Close() - stdout := bytes.NewBuffer(nil) - cmd.Stdout = stdout - cmd.Stderr = stdout - c.Assert(cmd.Start(), check.IsNil) - - waitChan := make(chan error) - go func() { - waitChan <- cmd.Wait() - }() - - select { - case err := <-waitChan: - c.Assert(err, check.IsNil, check.Commentf(stdout.String())) - case <-time.After(30 * time.Second): - c.Fatal("timeout waiting for command to exit") - } -} - -func (s *DockerSuite) TestRunWrongCpusetCpusFlagValue(c *check.C) { - // TODO Windows: This needs validation (error out) in the daemon. - testRequires(c, DaemonIsLinux) - out, exitCode, err := dockerCmdWithError("run", "--cpuset-cpus", "1-10,11--", "busybox", "true") - c.Assert(err, check.NotNil) - expected := "Error response from daemon: Invalid value 1-10,11-- for cpuset cpus.\n" - if !(strings.Contains(out, expected) || exitCode == 125) { - c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode) - } -} - -func (s *DockerSuite) TestRunWrongCpusetMemsFlagValue(c *check.C) { - // TODO Windows: This needs validation (error out) in the daemon. - testRequires(c, DaemonIsLinux) - out, exitCode, err := dockerCmdWithError("run", "--cpuset-mems", "1-42--", "busybox", "true") - c.Assert(err, check.NotNil) - expected := "Error response from daemon: Invalid value 1-42-- for cpuset mems.\n" - if !(strings.Contains(out, expected) || exitCode == 125) { - c.Fatalf("Expected output to contain %q with exitCode 125, got out: %q exitCode: %v", expected, out, exitCode) - } -} - -// TestRunNonExecutableCmd checks that 'docker run busybox foo' exits with error code 127' -func (s *DockerSuite) TestRunNonExecutableCmd(c *check.C) { - name := "testNonExecutableCmd" - runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "foo") - _, exit, _ := runCommandWithOutput(runCmd) - stateExitCode := findContainerExitCode(c, name) - if !(exit == 127 && strings.Contains(stateExitCode, "127")) { - c.Fatalf("Run non-executable command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode) - } -} - -// TestRunNonExistingCmd checks that 'docker run busybox /bin/foo' exits with code 127. -func (s *DockerSuite) TestRunNonExistingCmd(c *check.C) { - name := "testNonExistingCmd" - runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/bin/foo") - _, exit, _ := runCommandWithOutput(runCmd) - stateExitCode := findContainerExitCode(c, name) - if !(exit == 127 && strings.Contains(stateExitCode, "127")) { - c.Fatalf("Run non-existing command should have errored with exit code 127, but we got exit: %d, State.ExitCode: %s", exit, stateExitCode) - } -} - -// TestCmdCannotBeInvoked checks that 'docker run busybox /etc' exits with 126, or -// 127 on Windows. The difference is that in Windows, the container must be started -// as that's when the check is made (and yes, by its design...) -func (s *DockerSuite) TestCmdCannotBeInvoked(c *check.C) { - expected := 126 - if daemonPlatform == "windows" { - expected = 127 - } - name := "testCmdCannotBeInvoked" - runCmd := exec.Command(dockerBinary, "run", "--name", name, "busybox", "/etc") - _, exit, _ := runCommandWithOutput(runCmd) - stateExitCode := findContainerExitCode(c, name) - if !(exit == expected && strings.Contains(stateExitCode, strconv.Itoa(expected))) { - c.Fatalf("Run cmd that cannot be invoked should have errored with code %d, but we got exit: %d, State.ExitCode: %s", expected, exit, stateExitCode) - } -} - -// TestRunNonExistingImage checks that 'docker run foo' exits with error msg 125 and contains 'Unable to find image' -func (s *DockerSuite) TestRunNonExistingImage(c *check.C) { - runCmd := exec.Command(dockerBinary, "run", "foo") - out, exit, err := runCommandWithOutput(runCmd) - if !(err != nil && exit == 125 && strings.Contains(out, "Unable to find image")) { - c.Fatalf("Run non-existing image should have errored with 'Unable to find image' code 125, but we got out: %s, exit: %d, err: %s", out, exit, err) - } -} - -// TestDockerFails checks that 'docker run -foo busybox' exits with 125 to signal docker run failed -func (s *DockerSuite) TestDockerFails(c *check.C) { - runCmd := exec.Command(dockerBinary, "run", "-foo", "busybox") - out, exit, err := runCommandWithOutput(runCmd) - if !(err != nil && exit == 125) { - c.Fatalf("Docker run with flag not defined should exit with 125, but we got out: %s, exit: %d, err: %s", out, exit, err) - } -} - -// TestRunInvalidReference invokes docker run with a bad reference. -func (s *DockerSuite) TestRunInvalidReference(c *check.C) { - out, exit, _ := dockerCmdWithError("run", "busybox@foo") - if exit == 0 { - c.Fatalf("expected non-zero exist code; received %d", exit) - } - - if !strings.Contains(out, "Error parsing reference") { - c.Fatalf(`Expected "Error parsing reference" in output; got: %s`, out) - } -} - -// Test fix for issue #17854 -func (s *DockerSuite) TestRunInitLayerPathOwnership(c *check.C) { - // Not applicable on Windows as it does not support Linux uid/gid ownership - testRequires(c, DaemonIsLinux) - name := "testetcfileownership" - _, err := buildImage(name, - `FROM busybox - RUN echo 'dockerio:x:1001:1001::/bin:/bin/false' >> /etc/passwd - RUN echo 'dockerio:x:1001:' >> /etc/group - RUN chown dockerio:dockerio /etc`, - true) - if err != nil { - c.Fatal(err) - } - - // Test that dockerio ownership of /etc is retained at runtime - out, _ := dockerCmd(c, "run", "--rm", name, "stat", "-c", "%U:%G", "/etc") - out = strings.TrimSpace(out) - if out != "dockerio:dockerio" { - c.Fatalf("Wrong /etc ownership: expected dockerio:dockerio, got %q", out) - } -} - -func (s *DockerSuite) TestRunWithOomScoreAdj(c *check.C) { - testRequires(c, DaemonIsLinux) - - expected := "642" - out, _ := dockerCmd(c, "run", "--oom-score-adj", expected, "busybox", "cat", "/proc/self/oom_score_adj") - oomScoreAdj := strings.TrimSpace(out) - if oomScoreAdj != "642" { - c.Fatalf("Expected oom_score_adj set to %q, got %q instead", expected, oomScoreAdj) - } -} - -func (s *DockerSuite) TestRunWithOomScoreAdjInvalidRange(c *check.C) { - testRequires(c, DaemonIsLinux) - - out, _, err := dockerCmdWithError("run", "--oom-score-adj", "1001", "busybox", "true") - c.Assert(err, check.NotNil) - expected := "Invalid value 1001, range for oom score adj is [-1000, 1000]." - if !strings.Contains(out, expected) { - c.Fatalf("Expected output to contain %q, got %q instead", expected, out) - } - out, _, err = dockerCmdWithError("run", "--oom-score-adj", "-1001", "busybox", "true") - c.Assert(err, check.NotNil) - expected = "Invalid value -1001, range for oom score adj is [-1000, 1000]." - if !strings.Contains(out, expected) { - c.Fatalf("Expected output to contain %q, got %q instead", expected, out) - } -} - -func (s *DockerSuite) TestRunVolumesMountedAsShared(c *check.C) { - // Volume propagation is linux only. Also it creates directories for - // bind mounting, so needs to be same host. - testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) - - // Prepare a source directory to bind mount - tmpDir, err := ioutil.TempDir("", "volume-source") - if err != nil { - c.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil { - c.Fatal(err) - } - - // Convert this directory into a shared mount point so that we do - // not rely on propagation properties of parent mount. - cmd := exec.Command("mount", "--bind", tmpDir, tmpDir) - if _, err = runCommand(cmd); err != nil { - c.Fatal(err) - } - - cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir) - if _, err = runCommand(cmd); err != nil { - c.Fatal(err) - } - - dockerCmd(c, "run", "--privileged", "-v", fmt.Sprintf("%s:/volume-dest:shared", tmpDir), "busybox", "mount", "--bind", "/volume-dest/mnt1", "/volume-dest/mnt1") - - // Make sure a bind mount under a shared volume propagated to host. - if mounted, _ := mount.Mounted(path.Join(tmpDir, "mnt1")); !mounted { - c.Fatalf("Bind mount under shared volume did not propagate to host") - } - - mount.Unmount(path.Join(tmpDir, "mnt1")) -} - -func (s *DockerSuite) TestRunVolumesMountedAsSlave(c *check.C) { - // Volume propagation is linux only. Also it creates directories for - // bind mounting, so needs to be same host. - testRequires(c, DaemonIsLinux, SameHostDaemon, NotUserNamespace) - - // Prepare a source directory to bind mount - tmpDir, err := ioutil.TempDir("", "volume-source") - if err != nil { - c.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - if err := os.Mkdir(path.Join(tmpDir, "mnt1"), 0755); err != nil { - c.Fatal(err) - } - - // Prepare a source directory with file in it. We will bind mount this - // direcotry and see if file shows up. - tmpDir2, err := ioutil.TempDir("", "volume-source2") - if err != nil { - c.Fatal(err) - } - defer os.RemoveAll(tmpDir2) - - if err := ioutil.WriteFile(path.Join(tmpDir2, "slave-testfile"), []byte("Test"), 0644); err != nil { - c.Fatal(err) - } - - // Convert this directory into a shared mount point so that we do - // not rely on propagation properties of parent mount. - cmd := exec.Command("mount", "--bind", tmpDir, tmpDir) - if _, err = runCommand(cmd); err != nil { - c.Fatal(err) - } - - cmd = exec.Command("mount", "--make-private", "--make-shared", tmpDir) - if _, err = runCommand(cmd); err != nil { - c.Fatal(err) - } - - dockerCmd(c, "run", "-i", "-d", "--name", "parent", "-v", fmt.Sprintf("%s:/volume-dest:slave", tmpDir), "busybox", "top") - - // Bind mount tmpDir2/ onto tmpDir/mnt1. If mount propagates inside - // container then contents of tmpDir2/slave-testfile should become - // visible at "/volume-dest/mnt1/slave-testfile" - cmd = exec.Command("mount", "--bind", tmpDir2, path.Join(tmpDir, "mnt1")) - if _, err = runCommand(cmd); err != nil { - c.Fatal(err) - } - - out, _ := dockerCmd(c, "exec", "parent", "cat", "/volume-dest/mnt1/slave-testfile") - - mount.Unmount(path.Join(tmpDir, "mnt1")) - - if out != "Test" { - c.Fatalf("Bind mount under slave volume did not propagate to container") - } -} - -func (s *DockerSuite) TestRunNamedVolumesMountedAsShared(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - out, exitcode, _ := dockerCmdWithError("run", "-v", "foo:/test:shared", "busybox", "touch", "/test/somefile") - - if exitcode == 0 { - c.Fatalf("expected non-zero exit code; received %d", exitcode) - } - - if expected := "Invalid volume specification"; !strings.Contains(out, expected) { - c.Fatalf(`Expected %q in output; got: %s`, expected, out) - } -} - -func (s *DockerSuite) TestRunNamedVolumeCopyImageData(c *check.C) { - testRequires(c, DaemonIsLinux) - - testImg := "testvolumecopy" - _, err := buildImage(testImg, ` - FROM busybox - RUN mkdir -p /foo && echo hello > /foo/hello - `, true) - c.Assert(err, check.IsNil) - - dockerCmd(c, "run", "-v", "foo:/foo", testImg) - out, _ := dockerCmd(c, "run", "-v", "foo:/foo", "busybox", "cat", "/foo/hello") - c.Assert(strings.TrimSpace(out), check.Equals, "hello") -} - -func (s *DockerSuite) TestRunNamedVolumeNotRemoved(c *check.C) { - prefix, _ := getPrefixAndSlashFromDaemonPlatform() - - dockerCmd(c, "volume", "create", "--name", "test") - - dockerCmd(c, "run", "--rm", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") - dockerCmd(c, "volume", "inspect", "test") - out, _ := dockerCmd(c, "volume", "ls", "-q") - c.Assert(strings.TrimSpace(out), checker.Equals, "test") - - dockerCmd(c, "run", "--name=test", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") - dockerCmd(c, "rm", "-fv", "test") - dockerCmd(c, "volume", "inspect", "test") - out, _ = dockerCmd(c, "volume", "ls", "-q") - c.Assert(strings.TrimSpace(out), checker.Equals, "test") -} - -func (s *DockerSuite) TestRunNamedVolumesFromNotRemoved(c *check.C) { - prefix, _ := getPrefixAndSlashFromDaemonPlatform() - - dockerCmd(c, "volume", "create", "--name", "test") - dockerCmd(c, "run", "--name=parent", "-v", "test:"+prefix+"/foo", "-v", prefix+"/bar", "busybox", "true") - dockerCmd(c, "run", "--name=child", "--volumes-from=parent", "busybox", "true") - - // Remove the parent so there are not other references to the volumes - dockerCmd(c, "rm", "-f", "parent") - // now remove the child and ensure the named volume (and only the named volume) still exists - dockerCmd(c, "rm", "-fv", "child") - dockerCmd(c, "volume", "inspect", "test") - out, _ := dockerCmd(c, "volume", "ls", "-q") - c.Assert(strings.TrimSpace(out), checker.Equals, "test") -} - -func (s *DockerSuite) TestRunAttachFailedNoLeak(c *check.C) { - nroutines, err := getGoroutineNumber() - c.Assert(err, checker.IsNil) - - runSleepingContainer(c, "--name=test", "-p", "8000:8000") - - // Wait until container is fully up and running - c.Assert(waitRun("test"), check.IsNil) - - out, _, err := dockerCmdWithError("run", "--name=fail", "-p", "8000:8000", "busybox", "true") - // We will need the following `inspect` to diagnose the issue if test fails (#21247) - out1, err1 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "test") - out2, err2 := dockerCmd(c, "inspect", "--format", "{{json .State}}", "fail") - c.Assert(err, checker.NotNil, check.Commentf("Command should have failed but succeeded with: %s\nContainer 'test' [%+v]: %s\nContainer 'fail' [%+v]: %s", out, err1, out1, err2, out2)) - // check for windows error as well - // TODO Windows Post TP5. Fix the error message string - c.Assert(strings.Contains(string(out), "port is already allocated") || - strings.Contains(string(out), "were not connected because a duplicate name exists") || - strings.Contains(string(out), "HNS failed with error : Failed to create endpoint") || - strings.Contains(string(out), "HNS failed with error : The object already exists"), checker.Equals, true, check.Commentf("Output: %s", out)) - dockerCmd(c, "rm", "-f", "test") - - // NGoroutines is not updated right away, so we need to wait before failing - c.Assert(waitForGoroutines(nroutines), checker.IsNil) -} - -// Test for one character directory name case (#20122) -func (s *DockerSuite) TestRunVolumeWithOneCharacter(c *check.C) { - testRequires(c, DaemonIsLinux) - - out, _ := dockerCmd(c, "run", "-v", "/tmp/q:/foo", "busybox", "sh", "-c", "find /foo") - c.Assert(strings.TrimSpace(out), checker.Equals, "/foo") -} - -func (s *DockerSuite) TestRunVolumeCopyFlag(c *check.C) { - testRequires(c, DaemonIsLinux) // Windows does not support copying data from image to the volume - _, err := buildImage("volumecopy", - `FROM busybox - RUN mkdir /foo && echo hello > /foo/bar - CMD cat /foo/bar`, - true, - ) - c.Assert(err, checker.IsNil) - - dockerCmd(c, "volume", "create", "--name=test") - - // test with the nocopy flag - out, _, err := dockerCmdWithError("run", "-v", "test:/foo:nocopy", "volumecopy") - c.Assert(err, checker.NotNil, check.Commentf(out)) - // test default behavior which is to copy for non-binds - out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy") - c.Assert(strings.TrimSpace(out), checker.Equals, "hello") - // error out when the volume is already populated - out, _, err = dockerCmdWithError("run", "-v", "test:/foo:copy", "volumecopy") - c.Assert(err, checker.NotNil, check.Commentf(out)) - // do not error out when copy isn't explicitly set even though it's already populated - out, _ = dockerCmd(c, "run", "-v", "test:/foo", "volumecopy") - c.Assert(strings.TrimSpace(out), checker.Equals, "hello") - - // do not allow copy modes on volumes-from - dockerCmd(c, "run", "--name=test", "-v", "/foo", "busybox", "true") - out, _, err = dockerCmdWithError("run", "--volumes-from=test:copy", "busybox", "true") - c.Assert(err, checker.NotNil, check.Commentf(out)) - out, _, err = dockerCmdWithError("run", "--volumes-from=test:nocopy", "busybox", "true") - c.Assert(err, checker.NotNil, check.Commentf(out)) - - // do not allow copy modes on binds - out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:copy", "busybox", "true") - c.Assert(err, checker.NotNil, check.Commentf(out)) - out, _, err = dockerCmdWithError("run", "-v", "/foo:/bar:nocopy", "busybox", "true") - c.Assert(err, checker.NotNil, check.Commentf(out)) -} - -func (s *DockerSuite) TestRunTooLongHostname(c *check.C) { - // Test case in #21445 - hostname1 := "this-is-a-way-too-long-hostname-but-it-should-give-a-nice-error.local" - out, _, err := dockerCmdWithError("run", "--hostname", hostname1, "busybox", "echo", "test") - c.Assert(err, checker.NotNil, check.Commentf("Expected docker run to fail!")) - c.Assert(out, checker.Contains, "invalid hostname format:", check.Commentf("Expected to have 'invalid hostname format:' in the output, get: %s!", out)) - - // Additional test cases - validHostnames := map[string]string{ - "hostname": "hostname", - "host-name": "host-name", - "hostname123": "hostname123", - "123hostname": "123hostname", - "hostname-of-63-bytes-long-should-be-valid-and-without-any-error": "hostname-of-63-bytes-long-should-be-valid-and-without-any-error", - } - for hostname := range validHostnames { - dockerCmd(c, "run", "--hostname", hostname, "busybox", "echo", "test") - } - - invalidHostnames := map[string]string{ - "^hostname": "invalid hostname format: ^hostname", - "hostname%": "invalid hostname format: hostname%", - "host&name": "invalid hostname format: host&name", - "-hostname": "invalid hostname format: -hostname", - "host_name": "invalid hostname format: host_name", - "hostname-of-64-bytes-long-should-be-invalid-and-be-with-an-error": "invalid hostname format: hostname-of-64-bytes-long-should-be-invalid-and-be-with-an-error", - } - - for hostname, expectedError := range invalidHostnames { - out, _, err = dockerCmdWithError("run", "--hostname", hostname, "busybox", "echo", "test") - c.Assert(err, checker.NotNil, check.Commentf("Expected docker run to fail!")) - c.Assert(out, checker.Contains, expectedError, check.Commentf("Expected to have '%s' in the output, get: %s!", expectedError, out)) - - } -} - -// Test case for #21976 -func (s *DockerSuite) TestRunDnsInHostMode(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - - expectedOutput := "nameserver 127.0.0.1" - expectedWarning := "Localhost DNS setting" - out, stderr, _ := dockerCmdWithStdoutStderr(c, "run", "--dns=127.0.0.1", "--net=host", "busybox", "cat", "/etc/resolv.conf") - c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) - c.Assert(stderr, checker.Contains, expectedWarning, check.Commentf("Expected warning on stderr about localhost resolver, but got %q", stderr)) - - expectedOutput = "nameserver 1.2.3.4" - out, _ = dockerCmd(c, "run", "--dns=1.2.3.4", "--net=host", "busybox", "cat", "/etc/resolv.conf") - c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) - - expectedOutput = "search example.com" - out, _ = dockerCmd(c, "run", "--dns-search=example.com", "--net=host", "busybox", "cat", "/etc/resolv.conf") - c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) - - expectedOutput = "options timeout:3" - out, _ = dockerCmd(c, "run", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf") - c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) - - expectedOutput1 := "nameserver 1.2.3.4" - expectedOutput2 := "search example.com" - expectedOutput3 := "options timeout:3" - out, _ = dockerCmd(c, "run", "--dns=1.2.3.4", "--dns-search=example.com", "--dns-opt=timeout:3", "--net=host", "busybox", "cat", "/etc/resolv.conf") - c.Assert(out, checker.Contains, expectedOutput1, check.Commentf("Expected '%s', but got %q", expectedOutput1, out)) - c.Assert(out, checker.Contains, expectedOutput2, check.Commentf("Expected '%s', but got %q", expectedOutput2, out)) - c.Assert(out, checker.Contains, expectedOutput3, check.Commentf("Expected '%s', but got %q", expectedOutput3, out)) -} - -// Test case for #21976 -func (s *DockerSuite) TestRunAddHostInHostMode(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - - expectedOutput := "1.2.3.4\textra" - out, _ := dockerCmd(c, "run", "--add-host=extra:1.2.3.4", "--net=host", "busybox", "cat", "/etc/hosts") - c.Assert(out, checker.Contains, expectedOutput, check.Commentf("Expected '%s', but got %q", expectedOutput, out)) -} diff --git a/integration-cli/docker_cli_run_unix_test.go b/integration-cli/docker_cli_run_unix_test.go deleted file mode 100644 index c1995b7ce1..0000000000 --- a/integration-cli/docker_cli_run_unix_test.go +++ /dev/null @@ -1,1306 +0,0 @@ -// +build !windows - -package main - -import ( - "bufio" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "regexp" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/docker/docker/pkg/homedir" - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/parsers" - "github.com/docker/docker/pkg/sysinfo" - "github.com/go-check/check" - "github.com/kr/pty" -) - -// #6509 -func (s *DockerSuite) TestRunRedirectStdout(c *check.C) { - checkRedirect := func(command string) { - _, tty, err := pty.Open() - c.Assert(err, checker.IsNil, check.Commentf("Could not open pty")) - cmd := exec.Command("sh", "-c", command) - cmd.Stdin = tty - cmd.Stdout = tty - cmd.Stderr = tty - c.Assert(cmd.Start(), checker.IsNil) - ch := make(chan error) - go func() { - ch <- cmd.Wait() - close(ch) - }() - - select { - case <-time.After(10 * time.Second): - c.Fatal("command timeout") - case err := <-ch: - c.Assert(err, checker.IsNil, check.Commentf("wait err")) - } - } - - checkRedirect(dockerBinary + " run -i busybox cat /etc/passwd | grep -q root") - checkRedirect(dockerBinary + " run busybox cat /etc/passwd | grep -q root") -} - -// Test recursive bind mount works by default -func (s *DockerSuite) TestRunWithVolumesIsRecursive(c *check.C) { - // /tmp gets permission denied - testRequires(c, NotUserNamespace, SameHostDaemon) - tmpDir, err := ioutil.TempDir("", "docker_recursive_mount_test") - c.Assert(err, checker.IsNil) - - defer os.RemoveAll(tmpDir) - - // Create a temporary tmpfs mount. - tmpfsDir := filepath.Join(tmpDir, "tmpfs") - c.Assert(os.MkdirAll(tmpfsDir, 0777), checker.IsNil, check.Commentf("failed to mkdir at %s", tmpfsDir)) - c.Assert(mount.Mount("tmpfs", tmpfsDir, "tmpfs", ""), checker.IsNil, check.Commentf("failed to create a tmpfs mount at %s", tmpfsDir)) - - f, err := ioutil.TempFile(tmpfsDir, "touch-me") - c.Assert(err, checker.IsNil) - defer f.Close() - - runCmd := exec.Command(dockerBinary, "run", "--name", "test-data", "--volume", fmt.Sprintf("%s:/tmp:ro", tmpDir), "busybox:latest", "ls", "/tmp/tmpfs") - out, _, _, err := runCommandWithStdoutStderr(runCmd) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, filepath.Base(f.Name()), check.Commentf("Recursive bind mount test failed. Expected file not found")) -} - -func (s *DockerSuite) TestRunDeviceDirectory(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm) - if _, err := os.Stat("/dev/snd"); err != nil { - c.Skip("Host does not have /dev/snd") - } - - out, _ := dockerCmd(c, "run", "--device", "/dev/snd:/dev/snd", "busybox", "sh", "-c", "ls /dev/snd/") - c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "timer", check.Commentf("expected output /dev/snd/timer")) - - out, _ = dockerCmd(c, "run", "--device", "/dev/snd:/dev/othersnd", "busybox", "sh", "-c", "ls /dev/othersnd/") - c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "seq", check.Commentf("expected output /dev/othersnd/seq")) -} - -// TestRunDetach checks attaching and detaching with the default escape sequence. -func (s *DockerSuite) TestRunAttachDetach(c *check.C) { - name := "attach-detach" - - dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") - - cmd := exec.Command(dockerBinary, "attach", name) - stdout, err := cmd.StdoutPipe() - c.Assert(err, checker.IsNil) - cpty, tty, err := pty.Open() - c.Assert(err, checker.IsNil) - defer cpty.Close() - cmd.Stdin = tty - c.Assert(cmd.Start(), checker.IsNil) - c.Assert(waitRun(name), check.IsNil) - - _, err = cpty.Write([]byte("hello\n")) - c.Assert(err, checker.IsNil) - - out, err := bufio.NewReader(stdout).ReadString('\n') - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Equals, "hello") - - // escape sequence - _, err = cpty.Write([]byte{16}) - c.Assert(err, checker.IsNil) - time.Sleep(100 * time.Millisecond) - _, err = cpty.Write([]byte{17}) - c.Assert(err, checker.IsNil) - - ch := make(chan struct{}) - go func() { - cmd.Wait() - ch <- struct{}{} - }() - - select { - case <-ch: - case <-time.After(10 * time.Second): - c.Fatal("timed out waiting for container to exit") - } - - running := inspectField(c, name, "State.Running") - c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) - - out, _ = dockerCmd(c, "events", "--since=0", "--until", daemonUnixTime(c), "-f", "container="+name) - // attach and detach event should be monitored - c.Assert(out, checker.Contains, "attach") - c.Assert(out, checker.Contains, "detach") -} - -// TestRunDetach checks attaching and detaching with the escape sequence specified via flags. -func (s *DockerSuite) TestRunAttachDetachFromFlag(c *check.C) { - name := "attach-detach" - keyCtrlA := []byte{1} - keyA := []byte{97} - - dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") - - cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-a,a", name) - stdout, err := cmd.StdoutPipe() - if err != nil { - c.Fatal(err) - } - cpty, tty, err := pty.Open() - if err != nil { - c.Fatal(err) - } - defer cpty.Close() - cmd.Stdin = tty - if err := cmd.Start(); err != nil { - c.Fatal(err) - } - c.Assert(waitRun(name), check.IsNil) - - if _, err := cpty.Write([]byte("hello\n")); err != nil { - c.Fatal(err) - } - - out, err := bufio.NewReader(stdout).ReadString('\n') - if err != nil { - c.Fatal(err) - } - if strings.TrimSpace(out) != "hello" { - c.Fatalf("expected 'hello', got %q", out) - } - - // escape sequence - if _, err := cpty.Write(keyCtrlA); err != nil { - c.Fatal(err) - } - time.Sleep(100 * time.Millisecond) - if _, err := cpty.Write(keyA); err != nil { - c.Fatal(err) - } - - ch := make(chan struct{}) - go func() { - cmd.Wait() - ch <- struct{}{} - }() - - select { - case <-ch: - case <-time.After(10 * time.Second): - c.Fatal("timed out waiting for container to exit") - } - - running := inspectField(c, name, "State.Running") - c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) -} - -// TestRunDetach checks attaching and detaching with the escape sequence specified via flags. -func (s *DockerSuite) TestRunAttachDetachFromInvalidFlag(c *check.C) { - name := "attach-detach" - dockerCmd(c, "run", "--name", name, "-itd", "busybox", "top") - c.Assert(waitRun(name), check.IsNil) - - // specify an invalid detach key, container will ignore it and use default - cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-A,a", name) - stdout, err := cmd.StdoutPipe() - if err != nil { - c.Fatal(err) - } - cpty, tty, err := pty.Open() - if err != nil { - c.Fatal(err) - } - defer cpty.Close() - cmd.Stdin = tty - if err := cmd.Start(); err != nil { - c.Fatal(err) - } - - bufReader := bufio.NewReader(stdout) - out, err := bufReader.ReadString('\n') - if err != nil { - c.Fatal(err) - } - // it should print a warning to indicate the detach key flag is invalid - errStr := "Invalid escape keys (ctrl-A,a) provided" - c.Assert(strings.TrimSpace(out), checker.Equals, errStr) -} - -// TestRunDetach checks attaching and detaching with the escape sequence specified via config file. -func (s *DockerSuite) TestRunAttachDetachFromConfig(c *check.C) { - keyCtrlA := []byte{1} - keyA := []byte{97} - - // Setup config - homeKey := homedir.Key() - homeVal := homedir.Get() - tmpDir, err := ioutil.TempDir("", "fake-home") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(tmpDir) - - dotDocker := filepath.Join(tmpDir, ".docker") - os.Mkdir(dotDocker, 0600) - tmpCfg := filepath.Join(dotDocker, "config.json") - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpDir) - - data := `{ - "detachKeys": "ctrl-a,a" - }` - - err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) - c.Assert(err, checker.IsNil) - - // Then do the work - name := "attach-detach" - dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") - - cmd := exec.Command(dockerBinary, "attach", name) - stdout, err := cmd.StdoutPipe() - if err != nil { - c.Fatal(err) - } - cpty, tty, err := pty.Open() - if err != nil { - c.Fatal(err) - } - defer cpty.Close() - cmd.Stdin = tty - if err := cmd.Start(); err != nil { - c.Fatal(err) - } - c.Assert(waitRun(name), check.IsNil) - - if _, err := cpty.Write([]byte("hello\n")); err != nil { - c.Fatal(err) - } - - out, err := bufio.NewReader(stdout).ReadString('\n') - if err != nil { - c.Fatal(err) - } - if strings.TrimSpace(out) != "hello" { - c.Fatalf("expected 'hello', got %q", out) - } - - // escape sequence - if _, err := cpty.Write(keyCtrlA); err != nil { - c.Fatal(err) - } - time.Sleep(100 * time.Millisecond) - if _, err := cpty.Write(keyA); err != nil { - c.Fatal(err) - } - - ch := make(chan struct{}) - go func() { - cmd.Wait() - ch <- struct{}{} - }() - - select { - case <-ch: - case <-time.After(10 * time.Second): - c.Fatal("timed out waiting for container to exit") - } - - running := inspectField(c, name, "State.Running") - c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) -} - -// TestRunDetach checks attaching and detaching with the detach flags, making sure it overrides config file -func (s *DockerSuite) TestRunAttachDetachKeysOverrideConfig(c *check.C) { - keyCtrlA := []byte{1} - keyA := []byte{97} - - // Setup config - homeKey := homedir.Key() - homeVal := homedir.Get() - tmpDir, err := ioutil.TempDir("", "fake-home") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(tmpDir) - - dotDocker := filepath.Join(tmpDir, ".docker") - os.Mkdir(dotDocker, 0600) - tmpCfg := filepath.Join(dotDocker, "config.json") - - defer func() { os.Setenv(homeKey, homeVal) }() - os.Setenv(homeKey, tmpDir) - - data := `{ - "detachKeys": "ctrl-e,e" - }` - - err = ioutil.WriteFile(tmpCfg, []byte(data), 0600) - c.Assert(err, checker.IsNil) - - // Then do the work - name := "attach-detach" - dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") - - cmd := exec.Command(dockerBinary, "attach", "--detach-keys=ctrl-a,a", name) - stdout, err := cmd.StdoutPipe() - if err != nil { - c.Fatal(err) - } - cpty, tty, err := pty.Open() - if err != nil { - c.Fatal(err) - } - defer cpty.Close() - cmd.Stdin = tty - if err := cmd.Start(); err != nil { - c.Fatal(err) - } - c.Assert(waitRun(name), check.IsNil) - - if _, err := cpty.Write([]byte("hello\n")); err != nil { - c.Fatal(err) - } - - out, err := bufio.NewReader(stdout).ReadString('\n') - if err != nil { - c.Fatal(err) - } - if strings.TrimSpace(out) != "hello" { - c.Fatalf("expected 'hello', got %q", out) - } - - // escape sequence - if _, err := cpty.Write(keyCtrlA); err != nil { - c.Fatal(err) - } - time.Sleep(100 * time.Millisecond) - if _, err := cpty.Write(keyA); err != nil { - c.Fatal(err) - } - - ch := make(chan struct{}) - go func() { - cmd.Wait() - ch <- struct{}{} - }() - - select { - case <-ch: - case <-time.After(10 * time.Second): - c.Fatal("timed out waiting for container to exit") - } - - running := inspectField(c, name, "State.Running") - c.Assert(running, checker.Equals, "true", check.Commentf("expected container to still be running")) -} - -func (s *DockerSuite) TestRunAttachInvalidDetachKeySequencePreserved(c *check.C) { - name := "attach-detach" - keyA := []byte{97} - keyB := []byte{98} - - dockerCmd(c, "run", "--name", name, "-itd", "busybox", "cat") - - cmd := exec.Command(dockerBinary, "attach", "--detach-keys=a,b,c", name) - stdout, err := cmd.StdoutPipe() - if err != nil { - c.Fatal(err) - } - cpty, tty, err := pty.Open() - if err != nil { - c.Fatal(err) - } - defer cpty.Close() - cmd.Stdin = tty - if err := cmd.Start(); err != nil { - c.Fatal(err) - } - c.Assert(waitRun(name), check.IsNil) - - // Invalid escape sequence aba, should print aba in output - if _, err := cpty.Write(keyA); err != nil { - c.Fatal(err) - } - time.Sleep(100 * time.Millisecond) - if _, err := cpty.Write(keyB); err != nil { - c.Fatal(err) - } - time.Sleep(100 * time.Millisecond) - if _, err := cpty.Write(keyA); err != nil { - c.Fatal(err) - } - time.Sleep(100 * time.Millisecond) - if _, err := cpty.Write([]byte("\n")); err != nil { - c.Fatal(err) - } - - out, err := bufio.NewReader(stdout).ReadString('\n') - if err != nil { - c.Fatal(err) - } - if strings.TrimSpace(out) != "aba" { - c.Fatalf("expected 'aba', got %q", out) - } -} - -// "test" should be printed -func (s *DockerSuite) TestRunWithCPUQuota(c *check.C) { - testRequires(c, cpuCfsQuota) - - file := "/sys/fs/cgroup/cpu/cpu.cfs_quota_us" - out, _ := dockerCmd(c, "run", "--cpu-quota", "8000", "--name", "test", "busybox", "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "8000") - - out = inspectField(c, "test", "HostConfig.CpuQuota") - c.Assert(out, checker.Equals, "8000", check.Commentf("setting the CPU CFS quota failed")) -} - -func (s *DockerSuite) TestRunWithCpuPeriod(c *check.C) { - testRequires(c, cpuCfsPeriod) - - file := "/sys/fs/cgroup/cpu/cpu.cfs_period_us" - out, _ := dockerCmd(c, "run", "--cpu-period", "50000", "--name", "test", "busybox", "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "50000") - - out, _ = dockerCmd(c, "run", "--cpu-period", "0", "busybox", "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "100000") - - out = inspectField(c, "test", "HostConfig.CpuPeriod") - c.Assert(out, checker.Equals, "50000", check.Commentf("setting the CPU CFS period failed")) -} - -func (s *DockerSuite) TestRunWithInvalidCpuPeriod(c *check.C) { - testRequires(c, cpuCfsPeriod) - out, _, err := dockerCmdWithError("run", "--cpu-period", "900", "busybox", "true") - c.Assert(err, check.NotNil) - expected := "CPU cfs period can not be less than 1ms (i.e. 1000) or larger than 1s (i.e. 1000000)" - c.Assert(out, checker.Contains, expected) - - out, _, err = dockerCmdWithError("run", "--cpu-period", "2000000", "busybox", "true") - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, expected) - - out, _, err = dockerCmdWithError("run", "--cpu-period", "-3", "busybox", "true") - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, expected) -} - -func (s *DockerSuite) TestRunWithKernelMemory(c *check.C) { - testRequires(c, kernelMemorySupport) - - file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" - stdout, _, _ := dockerCmdWithStdoutStderr(c, "run", "--kernel-memory", "50M", "--name", "test1", "busybox", "cat", file) - c.Assert(strings.TrimSpace(stdout), checker.Equals, "52428800") - - out := inspectField(c, "test1", "HostConfig.KernelMemory") - c.Assert(out, check.Equals, "52428800") -} - -func (s *DockerSuite) TestRunWithInvalidKernelMemory(c *check.C) { - testRequires(c, kernelMemorySupport) - - out, _, err := dockerCmdWithError("run", "--kernel-memory", "2M", "busybox", "true") - c.Assert(err, check.NotNil) - expected := "Minimum kernel memory limit allowed is 4MB" - c.Assert(out, checker.Contains, expected) - - out, _, err = dockerCmdWithError("run", "--kernel-memory", "-16m", "--name", "test2", "busybox", "echo", "test") - c.Assert(err, check.NotNil) - expected = "invalid size" - c.Assert(out, checker.Contains, expected) -} - -func (s *DockerSuite) TestRunWithCPUShares(c *check.C) { - testRequires(c, cpuShare) - - file := "/sys/fs/cgroup/cpu/cpu.shares" - out, _ := dockerCmd(c, "run", "--cpu-shares", "1000", "--name", "test", "busybox", "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "1000") - - out = inspectField(c, "test", "HostConfig.CPUShares") - c.Assert(out, check.Equals, "1000") -} - -// "test" should be printed -func (s *DockerSuite) TestRunEchoStdoutWithCPUSharesAndMemoryLimit(c *check.C) { - testRequires(c, cpuShare) - testRequires(c, memoryLimitSupport) - out, _, _ := dockerCmdWithStdoutStderr(c, "run", "--cpu-shares", "1000", "-m", "32m", "busybox", "echo", "test") - c.Assert(out, checker.Equals, "test\n", check.Commentf("container should've printed 'test'")) -} - -func (s *DockerSuite) TestRunWithCpusetCpus(c *check.C) { - testRequires(c, cgroupCpuset) - - file := "/sys/fs/cgroup/cpuset/cpuset.cpus" - out, _ := dockerCmd(c, "run", "--cpuset-cpus", "0", "--name", "test", "busybox", "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "0") - - out = inspectField(c, "test", "HostConfig.CpusetCpus") - c.Assert(out, check.Equals, "0") -} - -func (s *DockerSuite) TestRunWithCpusetMems(c *check.C) { - testRequires(c, cgroupCpuset) - - file := "/sys/fs/cgroup/cpuset/cpuset.mems" - out, _ := dockerCmd(c, "run", "--cpuset-mems", "0", "--name", "test", "busybox", "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "0") - - out = inspectField(c, "test", "HostConfig.CpusetMems") - c.Assert(out, check.Equals, "0") -} - -func (s *DockerSuite) TestRunWithBlkioWeight(c *check.C) { - testRequires(c, blkioWeight) - - file := "/sys/fs/cgroup/blkio/blkio.weight" - out, _ := dockerCmd(c, "run", "--blkio-weight", "300", "--name", "test", "busybox", "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "300") - - out = inspectField(c, "test", "HostConfig.BlkioWeight") - c.Assert(out, check.Equals, "300") -} - -func (s *DockerSuite) TestRunWithInvalidBlkioWeight(c *check.C) { - testRequires(c, blkioWeight) - out, _, err := dockerCmdWithError("run", "--blkio-weight", "5", "busybox", "true") - c.Assert(err, check.NotNil, check.Commentf(out)) - expected := "Range of blkio weight is from 10 to 1000" - c.Assert(out, checker.Contains, expected) -} - -func (s *DockerSuite) TestRunWithInvalidPathforBlkioWeightDevice(c *check.C) { - testRequires(c, blkioWeight) - out, _, err := dockerCmdWithError("run", "--blkio-weight-device", "/dev/sdX:100", "busybox", "true") - c.Assert(err, check.NotNil, check.Commentf(out)) -} - -func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceReadBps(c *check.C) { - testRequires(c, blkioWeight) - out, _, err := dockerCmdWithError("run", "--device-read-bps", "/dev/sdX:500", "busybox", "true") - c.Assert(err, check.NotNil, check.Commentf(out)) -} - -func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceWriteBps(c *check.C) { - testRequires(c, blkioWeight) - out, _, err := dockerCmdWithError("run", "--device-write-bps", "/dev/sdX:500", "busybox", "true") - c.Assert(err, check.NotNil, check.Commentf(out)) -} - -func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceReadIOps(c *check.C) { - testRequires(c, blkioWeight) - out, _, err := dockerCmdWithError("run", "--device-read-iops", "/dev/sdX:500", "busybox", "true") - c.Assert(err, check.NotNil, check.Commentf(out)) -} - -func (s *DockerSuite) TestRunWithInvalidPathforBlkioDeviceWriteIOps(c *check.C) { - testRequires(c, blkioWeight) - out, _, err := dockerCmdWithError("run", "--device-write-iops", "/dev/sdX:500", "busybox", "true") - c.Assert(err, check.NotNil, check.Commentf(out)) -} - -func (s *DockerSuite) TestRunOOMExitCode(c *check.C) { - testRequires(c, memoryLimitSupport, swapMemorySupport) - errChan := make(chan error) - go func() { - defer close(errChan) - //changing memory to 40MB from 4MB due to an issue with GCCGO that test fails to start the container. - out, exitCode, _ := dockerCmdWithError("run", "-m", "40MB", "busybox", "sh", "-c", "x=a; while true; do x=$x$x$x$x; done") - if expected := 137; exitCode != expected { - errChan <- fmt.Errorf("wrong exit code for OOM container: expected %d, got %d (output: %q)", expected, exitCode, out) - } - }() - - select { - case err := <-errChan: - c.Assert(err, check.IsNil) - case <-time.After(600 * time.Second): - c.Fatal("Timeout waiting for container to die on OOM") - } -} - -func (s *DockerSuite) TestRunWithMemoryLimit(c *check.C) { - testRequires(c, memoryLimitSupport) - - file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" - stdout, _, _ := dockerCmdWithStdoutStderr(c, "run", "-m", "32M", "--name", "test", "busybox", "cat", file) - c.Assert(strings.TrimSpace(stdout), checker.Equals, "33554432") - - out := inspectField(c, "test", "HostConfig.Memory") - c.Assert(out, check.Equals, "33554432") -} - -// TestRunWithoutMemoryswapLimit sets memory limit and disables swap -// memory limit, this means the processes in the container can use -// 16M memory and as much swap memory as they need (if the host -// supports swap memory). -func (s *DockerSuite) TestRunWithoutMemoryswapLimit(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, memoryLimitSupport) - testRequires(c, swapMemorySupport) - dockerCmd(c, "run", "-m", "32m", "--memory-swap", "-1", "busybox", "true") -} - -func (s *DockerSuite) TestRunWithSwappiness(c *check.C) { - testRequires(c, memorySwappinessSupport) - file := "/sys/fs/cgroup/memory/memory.swappiness" - out, _ := dockerCmd(c, "run", "--memory-swappiness", "0", "--name", "test", "busybox", "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "0") - - out = inspectField(c, "test", "HostConfig.MemorySwappiness") - c.Assert(out, check.Equals, "0") -} - -func (s *DockerSuite) TestRunWithSwappinessInvalid(c *check.C) { - testRequires(c, memorySwappinessSupport) - out, _, err := dockerCmdWithError("run", "--memory-swappiness", "101", "busybox", "true") - c.Assert(err, check.NotNil) - expected := "Valid memory swappiness range is 0-100" - c.Assert(out, checker.Contains, expected, check.Commentf("Expected output to contain %q, not %q", out, expected)) - - out, _, err = dockerCmdWithError("run", "--memory-swappiness", "-10", "busybox", "true") - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, expected, check.Commentf("Expected output to contain %q, not %q", out, expected)) -} - -func (s *DockerSuite) TestRunWithMemoryReservation(c *check.C) { - testRequires(c, memoryReservationSupport) - - file := "/sys/fs/cgroup/memory/memory.soft_limit_in_bytes" - out, _ := dockerCmd(c, "run", "--memory-reservation", "200M", "--name", "test", "busybox", "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "209715200") - - out = inspectField(c, "test", "HostConfig.MemoryReservation") - c.Assert(out, check.Equals, "209715200") -} - -func (s *DockerSuite) TestRunWithMemoryReservationInvalid(c *check.C) { - testRequires(c, memoryLimitSupport) - testRequires(c, memoryReservationSupport) - out, _, err := dockerCmdWithError("run", "-m", "500M", "--memory-reservation", "800M", "busybox", "true") - c.Assert(err, check.NotNil) - expected := "Minimum memory limit should be larger than memory reservation limit" - c.Assert(strings.TrimSpace(out), checker.Contains, expected, check.Commentf("run container should fail with invalid memory reservation")) - - out, _, err = dockerCmdWithError("run", "--memory-reservation", "1k", "busybox", "true") - c.Assert(err, check.NotNil) - expected = "Minimum memory reservation allowed is 4MB" - c.Assert(strings.TrimSpace(out), checker.Contains, expected, check.Commentf("run container should fail with invalid memory reservation")) -} - -func (s *DockerSuite) TestStopContainerSignal(c *check.C) { - out, _ := dockerCmd(c, "run", "--stop-signal", "SIGUSR1", "-d", "busybox", "/bin/sh", "-c", `trap 'echo "exit trapped"; exit 0' USR1; while true; do sleep 1; done`) - containerID := strings.TrimSpace(out) - - c.Assert(waitRun(containerID), checker.IsNil) - - dockerCmd(c, "stop", containerID) - out, _ = dockerCmd(c, "logs", containerID) - - c.Assert(out, checker.Contains, "exit trapped", check.Commentf("Expected `exit trapped` in the log")) -} - -func (s *DockerSuite) TestRunSwapLessThanMemoryLimit(c *check.C) { - testRequires(c, memoryLimitSupport) - testRequires(c, swapMemorySupport) - out, _, err := dockerCmdWithError("run", "-m", "16m", "--memory-swap", "15m", "busybox", "echo", "test") - expected := "Minimum memoryswap limit should be larger than memory limit" - c.Assert(err, check.NotNil) - - c.Assert(out, checker.Contains, expected) -} - -func (s *DockerSuite) TestRunInvalidCpusetCpusFlagValue(c *check.C) { - testRequires(c, cgroupCpuset, SameHostDaemon) - - sysInfo := sysinfo.New(true) - cpus, err := parsers.ParseUintList(sysInfo.Cpus) - c.Assert(err, check.IsNil) - var invalid int - for i := 0; i <= len(cpus)+1; i++ { - if !cpus[i] { - invalid = i - break - } - } - out, _, err := dockerCmdWithError("run", "--cpuset-cpus", strconv.Itoa(invalid), "busybox", "true") - c.Assert(err, check.NotNil) - expected := fmt.Sprintf("Error response from daemon: Requested CPUs are not available - requested %s, available: %s", strconv.Itoa(invalid), sysInfo.Cpus) - c.Assert(out, checker.Contains, expected) -} - -func (s *DockerSuite) TestRunInvalidCpusetMemsFlagValue(c *check.C) { - testRequires(c, cgroupCpuset) - - sysInfo := sysinfo.New(true) - mems, err := parsers.ParseUintList(sysInfo.Mems) - c.Assert(err, check.IsNil) - var invalid int - for i := 0; i <= len(mems)+1; i++ { - if !mems[i] { - invalid = i - break - } - } - out, _, err := dockerCmdWithError("run", "--cpuset-mems", strconv.Itoa(invalid), "busybox", "true") - c.Assert(err, check.NotNil) - expected := fmt.Sprintf("Error response from daemon: Requested memory nodes are not available - requested %s, available: %s", strconv.Itoa(invalid), sysInfo.Mems) - c.Assert(out, checker.Contains, expected) -} - -func (s *DockerSuite) TestRunInvalidCPUShares(c *check.C) { - testRequires(c, cpuShare, DaemonIsLinux) - out, _, err := dockerCmdWithError("run", "--cpu-shares", "1", "busybox", "echo", "test") - c.Assert(err, check.NotNil, check.Commentf(out)) - expected := "The minimum allowed cpu-shares is 2" - c.Assert(out, checker.Contains, expected) - - out, _, err = dockerCmdWithError("run", "--cpu-shares", "-1", "busybox", "echo", "test") - c.Assert(err, check.NotNil, check.Commentf(out)) - expected = "shares: invalid argument" - c.Assert(out, checker.Contains, expected) - - out, _, err = dockerCmdWithError("run", "--cpu-shares", "99999999", "busybox", "echo", "test") - c.Assert(err, check.NotNil, check.Commentf(out)) - expected = "The maximum allowed cpu-shares is" - c.Assert(out, checker.Contains, expected) -} - -func (s *DockerSuite) TestRunWithDefaultShmSize(c *check.C) { - testRequires(c, DaemonIsLinux) - - name := "shm-default" - out, _ := dockerCmd(c, "run", "--name", name, "busybox", "mount") - shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=65536k`) - if !shmRegex.MatchString(out) { - c.Fatalf("Expected shm of 64MB in mount command, got %v", out) - } - shmSize := inspectField(c, name, "HostConfig.ShmSize") - c.Assert(shmSize, check.Equals, "67108864") -} - -func (s *DockerSuite) TestRunWithShmSize(c *check.C) { - testRequires(c, DaemonIsLinux) - - name := "shm" - out, _ := dockerCmd(c, "run", "--name", name, "--shm-size=1G", "busybox", "mount") - shmRegex := regexp.MustCompile(`shm on /dev/shm type tmpfs(.*)size=1048576k`) - if !shmRegex.MatchString(out) { - c.Fatalf("Expected shm of 1GB in mount command, got %v", out) - } - shmSize := inspectField(c, name, "HostConfig.ShmSize") - c.Assert(shmSize, check.Equals, "1073741824") -} - -func (s *DockerSuite) TestRunTmpfsMountsEnsureOrdered(c *check.C) { - tmpFile, err := ioutil.TempFile("", "test") - c.Assert(err, check.IsNil) - defer tmpFile.Close() - out, _ := dockerCmd(c, "run", "--tmpfs", "/run", "-v", tmpFile.Name()+":/run/test", "busybox", "ls", "/run") - c.Assert(out, checker.Contains, "test") -} - -func (s *DockerSuite) TestRunTmpfsMounts(c *check.C) { - // TODO Windows (Post TP5): This test cannot run on a Windows daemon as - // Windows does not support tmpfs mounts. - testRequires(c, DaemonIsLinux) - if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run", "busybox", "touch", "/run/somefile"); err != nil { - c.Fatalf("/run directory not mounted on tmpfs %q %s", err, out) - } - if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run:noexec", "busybox", "touch", "/run/somefile"); err != nil { - c.Fatalf("/run directory not mounted on tmpfs %q %s", err, out) - } - if out, _, err := dockerCmdWithError("run", "--tmpfs", "/run:noexec,nosuid,rw,size=5k,mode=700", "busybox", "touch", "/run/somefile"); err != nil { - c.Fatalf("/run failed to mount on tmpfs with valid options %q %s", err, out) - } - if _, _, err := dockerCmdWithError("run", "--tmpfs", "/run:foobar", "busybox", "touch", "/run/somefile"); err == nil { - c.Fatalf("/run mounted on tmpfs when it should have vailed within invalid mount option") - } - if _, _, err := dockerCmdWithError("run", "--tmpfs", "/run", "-v", "/run:/run", "busybox", "touch", "/run/somefile"); err == nil { - c.Fatalf("Should have generated an error saying Duplicate mount points") - } -} - -func (s *DockerSuite) TestRunTmpfsMountsOverrideImageVolumes(c *check.C) { - name := "img-with-volumes" - _, err := buildImage( - name, - ` - FROM busybox - VOLUME /run - RUN touch /run/stuff - `, - true) - if err != nil { - c.Fatal(err) - } - out, _ := dockerCmd(c, "run", "--tmpfs", "/run", name, "ls", "/run") - c.Assert(out, checker.Not(checker.Contains), "stuff") -} - -// Test case for #22420 -func (s *DockerSuite) TestRunTmpfsMountsWithOptions(c *check.C) { - testRequires(c, DaemonIsLinux) - - expectedOptions := []string{"rw", "nosuid", "nodev", "noexec", "relatime"} - out, _ := dockerCmd(c, "run", "--tmpfs", "/tmp", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") - for _, option := range expectedOptions { - c.Assert(out, checker.Contains, option) - } - c.Assert(out, checker.Not(checker.Contains), "size=") - - expectedOptions = []string{"rw", "nosuid", "nodev", "noexec", "relatime"} - out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") - for _, option := range expectedOptions { - c.Assert(out, checker.Contains, option) - } - c.Assert(out, checker.Not(checker.Contains), "size=") - - expectedOptions = []string{"rw", "nosuid", "nodev", "relatime", "size=8192k"} - out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw,exec,size=8192k", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") - for _, option := range expectedOptions { - c.Assert(out, checker.Contains, option) - } - - expectedOptions = []string{"rw", "nosuid", "nodev", "noexec", "relatime", "size=4096k"} - out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:rw,size=8192k,exec,size=4096k,noexec", "busybox", "sh", "-c", "mount | grep 'tmpfs on /tmp'") - for _, option := range expectedOptions { - c.Assert(out, checker.Contains, option) - } - - // We use debian:jessie as there is no findmnt in busybox. Also the output will be in the format of - // TARGET PROPAGATION - // /tmp shared - // so we only capture `shared` here. - expectedOptions = []string{"shared"} - out, _ = dockerCmd(c, "run", "--tmpfs", "/tmp:shared", "debian:jessie", "findmnt", "-o", "TARGET,PROPAGATION", "/tmp") - for _, option := range expectedOptions { - c.Assert(out, checker.Contains, option) - } -} - -func (s *DockerSuite) TestRunSysctls(c *check.C) { - - testRequires(c, DaemonIsLinux) - var err error - - out, _ := dockerCmd(c, "run", "--sysctl", "net.ipv4.ip_forward=1", "--name", "test", "busybox", "cat", "/proc/sys/net/ipv4/ip_forward") - c.Assert(strings.TrimSpace(out), check.Equals, "1") - - out = inspectFieldJSON(c, "test", "HostConfig.Sysctls") - - sysctls := make(map[string]string) - err = json.Unmarshal([]byte(out), &sysctls) - c.Assert(err, check.IsNil) - c.Assert(sysctls["net.ipv4.ip_forward"], check.Equals, "1") - - out, _ = dockerCmd(c, "run", "--sysctl", "net.ipv4.ip_forward=0", "--name", "test1", "busybox", "cat", "/proc/sys/net/ipv4/ip_forward") - c.Assert(strings.TrimSpace(out), check.Equals, "0") - - out = inspectFieldJSON(c, "test1", "HostConfig.Sysctls") - - err = json.Unmarshal([]byte(out), &sysctls) - c.Assert(err, check.IsNil) - c.Assert(sysctls["net.ipv4.ip_forward"], check.Equals, "0") - - runCmd := exec.Command(dockerBinary, "run", "--sysctl", "kernel.foobar=1", "--name", "test2", "busybox", "cat", "/proc/sys/kernel/foobar") - out, _, _ = runCommandWithOutput(runCmd) - if !strings.Contains(out, "invalid argument") { - c.Fatalf("expected --sysctl to fail, got %s", out) - } -} - -// TestRunSeccompProfileDenyUnshare checks that 'docker run --security-opt seccomp=/tmp/profile.json debian:jessie unshare' exits with operation not permitted. -func (s *DockerSuite) TestRunSeccompProfileDenyUnshare(c *check.C) { - testRequires(c, SameHostDaemon, seccompEnabled, NotArm, Apparmor) - jsonData := `{ - "defaultAction": "SCMP_ACT_ALLOW", - "syscalls": [ - { - "name": "unshare", - "action": "SCMP_ACT_ERRNO" - } - ] -}` - tmpFile, err := ioutil.TempFile("", "profile.json") - defer tmpFile.Close() - if err != nil { - c.Fatal(err) - } - - if _, err := tmpFile.Write([]byte(jsonData)); err != nil { - c.Fatal(err) - } - runCmd := exec.Command(dockerBinary, "run", "--security-opt", "apparmor=unconfined", "--security-opt", "seccomp="+tmpFile.Name(), "debian:jessie", "unshare", "-p", "-m", "-f", "-r", "mount", "-t", "proc", "none", "/proc") - out, _, _ := runCommandWithOutput(runCmd) - if !strings.Contains(out, "Operation not permitted") { - c.Fatalf("expected unshare with seccomp profile denied to fail, got %s", out) - } -} - -// TestRunSeccompProfileDenyChmod checks that 'docker run --security-opt seccomp=/tmp/profile.json busybox chmod 400 /etc/hostname' exits with operation not permitted. -func (s *DockerSuite) TestRunSeccompProfileDenyChmod(c *check.C) { - testRequires(c, SameHostDaemon, seccompEnabled) - jsonData := `{ - "defaultAction": "SCMP_ACT_ALLOW", - "syscalls": [ - { - "name": "chmod", - "action": "SCMP_ACT_ERRNO" - }, - { - "name":"fchmod", - "action": "SCMP_ACT_ERRNO" - }, - { - "name": "fchmodat", - "action":"SCMP_ACT_ERRNO" - } - ] -}` - tmpFile, err := ioutil.TempFile("", "profile.json") - c.Assert(err, check.IsNil) - defer tmpFile.Close() - - if _, err := tmpFile.Write([]byte(jsonData)); err != nil { - c.Fatal(err) - } - runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp="+tmpFile.Name(), "busybox", "chmod", "400", "/etc/hostname") - out, _, _ := runCommandWithOutput(runCmd) - if !strings.Contains(out, "Operation not permitted") { - c.Fatalf("expected chmod with seccomp profile denied to fail, got %s", out) - } -} - -// TestRunSeccompProfileDenyUnshareUserns checks that 'docker run debian:jessie unshare --map-root-user --user sh -c whoami' with a specific profile to -// deny unhare of a userns exits with operation not permitted. -func (s *DockerSuite) TestRunSeccompProfileDenyUnshareUserns(c *check.C) { - testRequires(c, SameHostDaemon, seccompEnabled, NotArm, Apparmor) - // from sched.h - jsonData := fmt.Sprintf(`{ - "defaultAction": "SCMP_ACT_ALLOW", - "syscalls": [ - { - "name": "unshare", - "action": "SCMP_ACT_ERRNO", - "args": [ - { - "index": 0, - "value": %d, - "op": "SCMP_CMP_EQ" - } - ] - } - ] -}`, uint64(0x10000000)) - tmpFile, err := ioutil.TempFile("", "profile.json") - defer tmpFile.Close() - if err != nil { - c.Fatal(err) - } - - if _, err := tmpFile.Write([]byte(jsonData)); err != nil { - c.Fatal(err) - } - runCmd := exec.Command(dockerBinary, "run", "--security-opt", "apparmor=unconfined", "--security-opt", "seccomp="+tmpFile.Name(), "debian:jessie", "unshare", "--map-root-user", "--user", "sh", "-c", "whoami") - out, _, _ := runCommandWithOutput(runCmd) - if !strings.Contains(out, "Operation not permitted") { - c.Fatalf("expected unshare userns with seccomp profile denied to fail, got %s", out) - } -} - -// TestRunSeccompProfileDenyCloneUserns checks that 'docker run syscall-test' -// with a the default seccomp profile exits with operation not permitted. -func (s *DockerSuite) TestRunSeccompProfileDenyCloneUserns(c *check.C) { - testRequires(c, SameHostDaemon, seccompEnabled) - - runCmd := exec.Command(dockerBinary, "run", "syscall-test", "userns-test", "id") - out, _, err := runCommandWithOutput(runCmd) - if err == nil || !strings.Contains(out, "clone failed: Operation not permitted") { - c.Fatalf("expected clone userns with default seccomp profile denied to fail, got %s: %v", out, err) - } -} - -// TestRunSeccompUnconfinedCloneUserns checks that -// 'docker run --security-opt seccomp=unconfined syscall-test' allows creating a userns. -func (s *DockerSuite) TestRunSeccompUnconfinedCloneUserns(c *check.C) { - testRequires(c, SameHostDaemon, seccompEnabled, UserNamespaceInKernel, NotUserNamespace) - - // make sure running w privileged is ok - runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "syscall-test", "userns-test", "id") - if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "nobody") { - c.Fatalf("expected clone userns with --security-opt seccomp=unconfined to succeed, got %s: %v", out, err) - } -} - -// TestRunSeccompAllowPrivCloneUserns checks that 'docker run --privileged syscall-test' -// allows creating a userns. -func (s *DockerSuite) TestRunSeccompAllowPrivCloneUserns(c *check.C) { - testRequires(c, SameHostDaemon, seccompEnabled, UserNamespaceInKernel, NotUserNamespace) - - // make sure running w privileged is ok - runCmd := exec.Command(dockerBinary, "run", "--privileged", "syscall-test", "userns-test", "id") - if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "nobody") { - c.Fatalf("expected clone userns with --privileged to succeed, got %s: %v", out, err) - } -} - -// TestRunSeccompAllowSetrlimit checks that 'docker run debian:jessie ulimit -v 1048510' succeeds. -func (s *DockerSuite) TestRunSeccompAllowSetrlimit(c *check.C) { - testRequires(c, SameHostDaemon, seccompEnabled) - - // ulimit uses setrlimit, so we want to make sure we don't break it - runCmd := exec.Command(dockerBinary, "run", "debian:jessie", "bash", "-c", "ulimit -v 1048510") - if out, _, err := runCommandWithOutput(runCmd); err != nil { - c.Fatalf("expected ulimit with seccomp to succeed, got %s: %v", out, err) - } -} - -func (s *DockerSuite) TestRunSeccompDefaultProfileAcct(c *check.C) { - testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace) - - var group sync.WaitGroup - group.Add(5) - errChan := make(chan error, 5) - go func() { - out, _, err := dockerCmdWithError("run", "syscall-test", "acct-test") - if err == nil || !strings.Contains(out, "Operation not permitted") { - errChan <- fmt.Errorf("goroutine 0: expected Operation not permitted, got: %s", out) - } - group.Done() - }() - - go func() { - out, _, err := dockerCmdWithError("run", "--cap-add", "sys_admin", "syscall-test", "acct-test") - if err == nil || !strings.Contains(out, "Operation not permitted") { - errChan <- fmt.Errorf("goroutine 1: expected Operation not permitted, got: %s", out) - } - group.Done() - }() - - go func() { - out, _, err := dockerCmdWithError("run", "--cap-add", "sys_pacct", "syscall-test", "acct-test") - if err == nil || !strings.Contains(out, "No such file or directory") { - errChan <- fmt.Errorf("goroutine 2: expected No such file or directory, got: %s", out) - } - group.Done() - }() - - go func() { - out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "acct-test") - if err == nil || !strings.Contains(out, "No such file or directory") { - errChan <- fmt.Errorf("goroutine 3: expected No such file or directory, got: %s", out) - } - group.Done() - }() - - go func() { - out, _, err := dockerCmdWithError("run", "--cap-drop", "ALL", "--cap-add", "sys_pacct", "syscall-test", "acct-test") - if err == nil || !strings.Contains(out, "No such file or directory") { - errChan <- fmt.Errorf("goroutine 4: expected No such file or directory, got: %s", out) - } - group.Done() - }() - - group.Wait() - close(errChan) - - for err := range errChan { - c.Assert(err, checker.IsNil) - } -} - -func (s *DockerSuite) TestRunSeccompDefaultProfileNS(c *check.C) { - testRequires(c, SameHostDaemon, seccompEnabled, NotUserNamespace) - - var group sync.WaitGroup - group.Add(6) - errChan := make(chan error, 6) - - go func() { - out, _, err := dockerCmdWithError("run", "syscall-test", "ns-test", "echo", "hello0") - if err == nil || !strings.Contains(out, "Operation not permitted") { - errChan <- fmt.Errorf("goroutine 0: expected Operation not permitted, got: %s", out) - } - group.Done() - }() - - go func() { - out, _, err := dockerCmdWithError("run", "--cap-add", "sys_admin", "syscall-test", "ns-test", "echo", "hello1") - if err != nil || !strings.Contains(out, "hello1") { - errChan <- fmt.Errorf("goroutine 1: expected hello1, got: %s, %v", out, err) - } - group.Done() - }() - - go func() { - out, _, err := dockerCmdWithError("run", "--cap-drop", "all", "--cap-add", "sys_admin", "syscall-test", "ns-test", "echo", "hello2") - if err != nil || !strings.Contains(out, "hello2") { - errChan <- fmt.Errorf("goroutine 2: expected hello2, got: %s, %v", out, err) - } - group.Done() - }() - - go func() { - out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "syscall-test", "ns-test", "echo", "hello3") - if err != nil || !strings.Contains(out, "hello3") { - errChan <- fmt.Errorf("goroutine 3: expected hello3, got: %s, %v", out, err) - } - group.Done() - }() - - go func() { - out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "acct-test") - if err == nil || !strings.Contains(out, "No such file or directory") { - errChan <- fmt.Errorf("goroutine 4: expected No such file or directory, got: %s", out) - } - group.Done() - }() - - go func() { - out, _, err := dockerCmdWithError("run", "--cap-add", "ALL", "--security-opt", "seccomp=unconfined", "syscall-test", "ns-test", "echo", "hello4") - if err != nil || !strings.Contains(out, "hello4") { - errChan <- fmt.Errorf("goroutine 5: expected hello4, got: %s, %v", out, err) - } - group.Done() - }() - - group.Wait() - close(errChan) - - for err := range errChan { - c.Assert(err, checker.IsNil) - } -} - -// TestRunNoNewPrivSetuid checks that --security-opt=no-new-privileges prevents -// effective uid transtions on executing setuid binaries. -func (s *DockerSuite) TestRunNoNewPrivSetuid(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace, SameHostDaemon) - - // test that running a setuid binary results in no effective uid transition - runCmd := exec.Command(dockerBinary, "run", "--security-opt", "no-new-privileges", "--user", "1000", "nnp-test", "/usr/bin/nnp-test") - if out, _, err := runCommandWithOutput(runCmd); err != nil || !strings.Contains(out, "EUID=1000") { - c.Fatalf("expected output to contain EUID=1000, got %s: %v", out, err) - } -} - -func (s *DockerSuite) TestRunApparmorProcDirectory(c *check.C) { - testRequires(c, SameHostDaemon, Apparmor) - - // running w seccomp unconfined tests the apparmor profile - runCmd := exec.Command(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "busybox", "chmod", "777", "/proc/1/cgroup") - if out, _, err := runCommandWithOutput(runCmd); err == nil || !(strings.Contains(out, "Permission denied") || strings.Contains(out, "Operation not permitted")) { - c.Fatalf("expected chmod 777 /proc/1/cgroup to fail, got %s: %v", out, err) - } - - runCmd = exec.Command(dockerBinary, "run", "--security-opt", "seccomp=unconfined", "busybox", "chmod", "777", "/proc/1/attr/current") - if out, _, err := runCommandWithOutput(runCmd); err == nil || !(strings.Contains(out, "Permission denied") || strings.Contains(out, "Operation not permitted")) { - c.Fatalf("expected chmod 777 /proc/1/attr/current to fail, got %s: %v", out, err) - } -} - -// make sure the default profile can be successfully parsed (using unshare as it is -// something which we know is blocked in the default profile) -func (s *DockerSuite) TestRunSeccompWithDefaultProfile(c *check.C) { - testRequires(c, SameHostDaemon, seccompEnabled, NotArm, NotPpc64le, NotS390X) - - out, _, err := dockerCmdWithError("run", "--security-opt", "seccomp=../profiles/seccomp/default.json", "debian:jessie", "unshare", "--map-root-user", "--user", "sh", "-c", "whoami") - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(strings.TrimSpace(out), checker.Equals, "unshare: unshare failed: Operation not permitted") -} - -// TestRunDeviceSymlink checks run with device that follows symlink (#13840 and #22271) -func (s *DockerSuite) TestRunDeviceSymlink(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace, NotArm, SameHostDaemon) - if _, err := os.Stat("/dev/zero"); err != nil { - c.Skip("Host does not have /dev/zero") - } - - // Create a temporary directory to create symlink - tmpDir, err := ioutil.TempDir("", "docker_device_follow_symlink_tests") - c.Assert(err, checker.IsNil) - - defer os.RemoveAll(tmpDir) - - // Create a symbolic link to /dev/zero - symZero := filepath.Join(tmpDir, "zero") - err = os.Symlink("/dev/zero", symZero) - c.Assert(err, checker.IsNil) - - // Create a temporary file "temp" inside tmpDir, write some data to "tmpDir/temp", - // then create a symlink "tmpDir/file" to the temporary file "tmpDir/temp". - tmpFile := filepath.Join(tmpDir, "temp") - err = ioutil.WriteFile(tmpFile, []byte("temp"), 0666) - c.Assert(err, checker.IsNil) - symFile := filepath.Join(tmpDir, "file") - err = os.Symlink(tmpFile, symFile) - c.Assert(err, checker.IsNil) - - // Create a symbolic link to /dev/zero, this time with a relative path (#22271) - err = os.Symlink("zero", "/dev/symzero") - if err != nil { - c.Fatal("/dev/symzero creation failed") - } - // We need to remove this symbolic link here as it is created in /dev/, not temporary directory as above - defer os.Remove("/dev/symzero") - - // md5sum of 'dd if=/dev/zero bs=4K count=8' is bb7df04e1b0a2570657527a7e108ae23 - out, _ := dockerCmd(c, "run", "--device", symZero+":/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") - c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "bb7df04e1b0a2570657527a7e108ae23", check.Commentf("expected output bb7df04e1b0a2570657527a7e108ae23")) - - // symlink "tmpDir/file" to a file "tmpDir/temp" will result in an error as it is not a device. - out, _, err = dockerCmdWithError("run", "--device", symFile+":/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") - c.Assert(err, check.NotNil) - c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "not a device node", check.Commentf("expected output 'not a device node'")) - - // md5sum of 'dd if=/dev/zero bs=4K count=8' is bb7df04e1b0a2570657527a7e108ae23 (this time check with relative path backed, see #22271) - out, _ = dockerCmd(c, "run", "--device", "/dev/symzero:/dev/symzero", "busybox", "sh", "-c", "dd if=/dev/symzero bs=4K count=8 | md5sum") - c.Assert(strings.Trim(out, "\r\n"), checker.Contains, "bb7df04e1b0a2570657527a7e108ae23", check.Commentf("expected output bb7df04e1b0a2570657527a7e108ae23")) -} - -// TestRunPidsLimit makes sure the pids cgroup is set with --pids-limit -func (s *DockerSuite) TestRunPidsLimit(c *check.C) { - testRequires(c, pidsLimit) - - file := "/sys/fs/cgroup/pids/pids.max" - out, _ := dockerCmd(c, "run", "--name", "skittles", "--pids-limit", "2", "busybox", "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "2") - - out = inspectField(c, "skittles", "HostConfig.PidsLimit") - c.Assert(out, checker.Equals, "2", check.Commentf("setting the pids limit failed")) -} - -func (s *DockerSuite) TestRunPrivilegedAllowedDevices(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - - file := "/sys/fs/cgroup/devices/devices.list" - out, _ := dockerCmd(c, "run", "--privileged", "busybox", "cat", file) - c.Logf("out: %q", out) - c.Assert(strings.TrimSpace(out), checker.Equals, "a *:* rwm") -} - -func (s *DockerSuite) TestRunUserDeviceAllowed(c *check.C) { - testRequires(c, DaemonIsLinux) - - fi, err := os.Stat("/dev/snd/timer") - if err != nil { - c.Skip("Host does not have /dev/snd/timer") - } - stat, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - c.Skip("Could not stat /dev/snd/timer") - } - - file := "/sys/fs/cgroup/devices/devices.list" - out, _ := dockerCmd(c, "run", "--device", "/dev/snd/timer:w", "busybox", "cat", file) - c.Assert(out, checker.Contains, fmt.Sprintf("c %d:%d w", stat.Rdev/256, stat.Rdev%256)) -} diff --git a/integration-cli/docker_cli_save_load_test.go b/integration-cli/docker_cli_save_load_test.go deleted file mode 100644 index 869bf6bf88..0000000000 --- a/integration-cli/docker_cli_save_load_test.go +++ /dev/null @@ -1,382 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "reflect" - "regexp" - "sort" - "strings" - "time" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// save a repo using gz compression and try to load it using stdout -func (s *DockerSuite) TestSaveXzAndLoadRepoStdout(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "test-save-xz-and-load-repo-stdout" - dockerCmd(c, "run", "--name", name, "busybox", "true") - - repoName := "foobar-save-load-test-xz-gz" - out, _ := dockerCmd(c, "commit", name, repoName) - - dockerCmd(c, "inspect", repoName) - - repoTarball, _, err := runCommandPipelineWithOutput( - exec.Command(dockerBinary, "save", repoName), - exec.Command("xz", "-c"), - exec.Command("gzip", "-c")) - c.Assert(err, checker.IsNil, check.Commentf("failed to save repo: %v %v", out, err)) - deleteImages(repoName) - - loadCmd := exec.Command(dockerBinary, "load") - loadCmd.Stdin = strings.NewReader(repoTarball) - out, _, err = runCommandWithOutput(loadCmd) - c.Assert(err, checker.NotNil, check.Commentf("expected error, but succeeded with no error and output: %v", out)) - - after, _, err := dockerCmdWithError("inspect", repoName) - c.Assert(err, checker.NotNil, check.Commentf("the repo should not exist: %v", after)) -} - -// save a repo using xz+gz compression and try to load it using stdout -func (s *DockerSuite) TestSaveXzGzAndLoadRepoStdout(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "test-save-xz-gz-and-load-repo-stdout" - dockerCmd(c, "run", "--name", name, "busybox", "true") - - repoName := "foobar-save-load-test-xz-gz" - dockerCmd(c, "commit", name, repoName) - - dockerCmd(c, "inspect", repoName) - - out, _, err := runCommandPipelineWithOutput( - exec.Command(dockerBinary, "save", repoName), - exec.Command("xz", "-c"), - exec.Command("gzip", "-c")) - c.Assert(err, checker.IsNil, check.Commentf("failed to save repo: %v %v", out, err)) - - deleteImages(repoName) - - loadCmd := exec.Command(dockerBinary, "load") - loadCmd.Stdin = strings.NewReader(out) - out, _, err = runCommandWithOutput(loadCmd) - c.Assert(err, checker.NotNil, check.Commentf("expected error, but succeeded with no error and output: %v", out)) - - after, _, err := dockerCmdWithError("inspect", repoName) - c.Assert(err, checker.NotNil, check.Commentf("the repo should not exist: %v", after)) -} - -func (s *DockerSuite) TestSaveSingleTag(c *check.C) { - testRequires(c, DaemonIsLinux) - repoName := "foobar-save-single-tag-test" - dockerCmd(c, "tag", "busybox:latest", fmt.Sprintf("%v:latest", repoName)) - - out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) - cleanedImageID := strings.TrimSpace(out) - - out, _, err := runCommandPipelineWithOutput( - exec.Command(dockerBinary, "save", fmt.Sprintf("%v:latest", repoName)), - exec.Command("tar", "t"), - exec.Command("grep", "-E", fmt.Sprintf("(^repositories$|%v)", cleanedImageID))) - c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)) -} - -func (s *DockerSuite) TestSaveCheckTimes(c *check.C) { - testRequires(c, DaemonIsLinux) - repoName := "busybox:latest" - out, _ := dockerCmd(c, "inspect", repoName) - data := []struct { - ID string - Created time.Time - }{} - err := json.Unmarshal([]byte(out), &data) - c.Assert(err, checker.IsNil, check.Commentf("failed to marshal from %q: err %v", repoName, err)) - c.Assert(len(data), checker.Not(checker.Equals), 0, check.Commentf("failed to marshal the data from %q", repoName)) - tarTvTimeFormat := "2006-01-02 15:04" - out, _, err = runCommandPipelineWithOutput( - exec.Command(dockerBinary, "save", repoName), - exec.Command("tar", "tv"), - exec.Command("grep", "-E", fmt.Sprintf("%s %s", data[0].Created.Format(tarTvTimeFormat), digest.Digest(data[0].ID).Hex()))) - c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID and 'repositories' file: %s, %v", out, err)) -} - -func (s *DockerSuite) TestSaveImageId(c *check.C) { - testRequires(c, DaemonIsLinux) - repoName := "foobar-save-image-id-test" - dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v:latest", repoName)) - - out, _ := dockerCmd(c, "images", "-q", "--no-trunc", repoName) - cleanedLongImageID := strings.TrimPrefix(strings.TrimSpace(out), "sha256:") - - out, _ = dockerCmd(c, "images", "-q", repoName) - cleanedShortImageID := strings.TrimSpace(out) - - // Make sure IDs are not empty - c.Assert(cleanedLongImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty.")) - c.Assert(cleanedShortImageID, checker.Not(check.Equals), "", check.Commentf("Id should not be empty.")) - - saveCmd := exec.Command(dockerBinary, "save", cleanedShortImageID) - tarCmd := exec.Command("tar", "t") - - var err error - tarCmd.Stdin, err = saveCmd.StdoutPipe() - c.Assert(err, checker.IsNil, check.Commentf("cannot set stdout pipe for tar: %v", err)) - grepCmd := exec.Command("grep", cleanedLongImageID) - grepCmd.Stdin, err = tarCmd.StdoutPipe() - c.Assert(err, checker.IsNil, check.Commentf("cannot set stdout pipe for grep: %v", err)) - - c.Assert(tarCmd.Start(), checker.IsNil, check.Commentf("tar failed with error: %v", err)) - c.Assert(saveCmd.Start(), checker.IsNil, check.Commentf("docker save failed with error: %v", err)) - defer func() { - saveCmd.Wait() - tarCmd.Wait() - dockerCmd(c, "rmi", repoName) - }() - - out, _, err = runCommandWithOutput(grepCmd) - - c.Assert(err, checker.IsNil, check.Commentf("failed to save repo with image ID: %s, %v", out, err)) -} - -// save a repo and try to load it using flags -func (s *DockerSuite) TestSaveAndLoadRepoFlags(c *check.C) { - testRequires(c, DaemonIsLinux) - name := "test-save-and-load-repo-flags" - dockerCmd(c, "run", "--name", name, "busybox", "true") - - repoName := "foobar-save-load-test" - - deleteImages(repoName) - dockerCmd(c, "commit", name, repoName) - - before, _ := dockerCmd(c, "inspect", repoName) - - out, _, err := runCommandPipelineWithOutput( - exec.Command(dockerBinary, "save", repoName), - exec.Command(dockerBinary, "load")) - c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) - - after, _ := dockerCmd(c, "inspect", repoName) - c.Assert(before, checker.Equals, after, check.Commentf("inspect is not the same after a save / load")) -} - -func (s *DockerSuite) TestSaveWithNoExistImage(c *check.C) { - testRequires(c, DaemonIsLinux) - - imgName := "foobar-non-existing-image" - - out, _, err := dockerCmdWithError("save", "-o", "test-img.tar", imgName) - c.Assert(err, checker.NotNil, check.Commentf("save image should fail for non-existing image")) - c.Assert(out, checker.Contains, fmt.Sprintf("No such image: %s", imgName)) -} - -func (s *DockerSuite) TestSaveMultipleNames(c *check.C) { - testRequires(c, DaemonIsLinux) - repoName := "foobar-save-multi-name-test" - - // Make one image - dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-one:latest", repoName)) - - // Make two images - dockerCmd(c, "tag", "emptyfs:latest", fmt.Sprintf("%v-two:latest", repoName)) - - out, _, err := runCommandPipelineWithOutput( - exec.Command(dockerBinary, "save", fmt.Sprintf("%v-one", repoName), fmt.Sprintf("%v-two:latest", repoName)), - exec.Command("tar", "xO", "repositories"), - exec.Command("grep", "-q", "-E", "(-one|-two)"), - ) - c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple repos: %s, %v", out, err)) -} - -func (s *DockerSuite) TestSaveRepoWithMultipleImages(c *check.C) { - testRequires(c, DaemonIsLinux) - makeImage := func(from string, tag string) string { - var ( - out string - ) - out, _ = dockerCmd(c, "run", "-d", from, "true") - cleanedContainerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "commit", cleanedContainerID, tag) - imageID := strings.TrimSpace(out) - return imageID - } - - repoName := "foobar-save-multi-images-test" - tagFoo := repoName + ":foo" - tagBar := repoName + ":bar" - - idFoo := makeImage("busybox:latest", tagFoo) - idBar := makeImage("busybox:latest", tagBar) - - deleteImages(repoName) - - // create the archive - out, _, err := runCommandPipelineWithOutput( - exec.Command(dockerBinary, "save", repoName, "busybox:latest"), - exec.Command("tar", "t")) - c.Assert(err, checker.IsNil, check.Commentf("failed to save multiple images: %s, %v", out, err)) - - lines := strings.Split(strings.TrimSpace(out), "\n") - var actual []string - for _, l := range lines { - if regexp.MustCompile("^[a-f0-9]{64}\\.json$").Match([]byte(l)) { - actual = append(actual, strings.TrimSuffix(l, ".json")) - } - } - - // make the list of expected layers - out = inspectField(c, "busybox:latest", "Id") - expected := []string{strings.TrimSpace(out), idFoo, idBar} - - // prefixes are not in tar - for i := range expected { - expected[i] = digest.Digest(expected[i]).Hex() - } - - sort.Strings(actual) - sort.Strings(expected) - c.Assert(actual, checker.DeepEquals, expected, check.Commentf("archive does not contains the right layers: got %v, expected %v, output: %q", actual, expected, out)) -} - -// Issue #6722 #5892 ensure directories are included in changes -func (s *DockerSuite) TestSaveDirectoryPermissions(c *check.C) { - testRequires(c, DaemonIsLinux) - layerEntries := []string{"opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} - layerEntriesAUFS := []string{"./", ".wh..wh.aufs", ".wh..wh.orph/", ".wh..wh.plnk/", "opt/", "opt/a/", "opt/a/b/", "opt/a/b/c"} - - name := "save-directory-permissions" - tmpDir, err := ioutil.TempDir("", "save-layers-with-directories") - c.Assert(err, checker.IsNil, check.Commentf("failed to create temporary directory: %s", err)) - extractionDirectory := filepath.Join(tmpDir, "image-extraction-dir") - os.Mkdir(extractionDirectory, 0777) - - defer os.RemoveAll(tmpDir) - _, err = buildImage(name, - `FROM busybox - RUN adduser -D user && mkdir -p /opt/a/b && chown -R user:user /opt/a - RUN touch /opt/a/b/c && chown user:user /opt/a/b/c`, - true) - c.Assert(err, checker.IsNil, check.Commentf("%v", err)) - - out, _, err := runCommandPipelineWithOutput( - exec.Command(dockerBinary, "save", name), - exec.Command("tar", "-xf", "-", "-C", extractionDirectory), - ) - c.Assert(err, checker.IsNil, check.Commentf("failed to save and extract image: %s", out)) - - dirs, err := ioutil.ReadDir(extractionDirectory) - c.Assert(err, checker.IsNil, check.Commentf("failed to get a listing of the layer directories: %s", err)) - - found := false - for _, entry := range dirs { - var entriesSansDev []string - if entry.IsDir() { - layerPath := filepath.Join(extractionDirectory, entry.Name(), "layer.tar") - - f, err := os.Open(layerPath) - c.Assert(err, checker.IsNil, check.Commentf("failed to open %s: %s", layerPath, err)) - - entries, err := listTar(f) - for _, e := range entries { - if !strings.Contains(e, "dev/") { - entriesSansDev = append(entriesSansDev, e) - } - } - c.Assert(err, checker.IsNil, check.Commentf("encountered error while listing tar entries: %s", err)) - - if reflect.DeepEqual(entriesSansDev, layerEntries) || reflect.DeepEqual(entriesSansDev, layerEntriesAUFS) { - found = true - break - } - } - } - - c.Assert(found, checker.Equals, true, check.Commentf("failed to find the layer with the right content listing")) - -} - -// Test loading a weird image where one of the layers is of zero size. -// The layer.tar file is actually zero bytes, no padding or anything else. -// See issue: 18170 -func (s *DockerSuite) TestLoadZeroSizeLayer(c *check.C) { - testRequires(c, DaemonIsLinux) - - dockerCmd(c, "load", "-i", "fixtures/load/emptyLayer.tar") -} - -func (s *DockerSuite) TestSaveLoadParents(c *check.C) { - testRequires(c, DaemonIsLinux) - - makeImage := func(from string, addfile string) string { - var ( - out string - ) - out, _ = dockerCmd(c, "run", "-d", from, "touch", addfile) - cleanedContainerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "commit", cleanedContainerID) - imageID := strings.TrimSpace(out) - - dockerCmd(c, "rm", "-f", cleanedContainerID) - return imageID - } - - idFoo := makeImage("busybox", "foo") - idBar := makeImage(idFoo, "bar") - - tmpDir, err := ioutil.TempDir("", "save-load-parents") - c.Assert(err, checker.IsNil) - defer os.RemoveAll(tmpDir) - - c.Log("tmpdir", tmpDir) - - outfile := filepath.Join(tmpDir, "out.tar") - - dockerCmd(c, "save", "-o", outfile, idBar, idFoo) - dockerCmd(c, "rmi", idBar) - dockerCmd(c, "load", "-i", outfile) - - inspectOut := inspectField(c, idBar, "Parent") - c.Assert(inspectOut, checker.Equals, idFoo) - - inspectOut = inspectField(c, idFoo, "Parent") - c.Assert(inspectOut, checker.Equals, "") -} - -func (s *DockerSuite) TestSaveLoadNoTag(c *check.C) { - testRequires(c, DaemonIsLinux) - - name := "saveloadnotag" - - _, err := buildImage(name, "FROM busybox\nENV foo=bar", true) - c.Assert(err, checker.IsNil, check.Commentf("%v", err)) - - id := inspectField(c, name, "Id") - - // Test to make sure that save w/o name just shows imageID during load - out, _, err := runCommandPipelineWithOutput( - exec.Command(dockerBinary, "save", id), - exec.Command(dockerBinary, "load")) - c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) - - // Should not show 'name' but should show the image ID during the load - c.Assert(out, checker.Not(checker.Contains), "Loaded image: ") - c.Assert(out, checker.Contains, "Loaded image ID:") - c.Assert(out, checker.Contains, id) - - // Test to make sure that save by name shows that name during load - out, _, err = runCommandPipelineWithOutput( - exec.Command(dockerBinary, "save", name), - exec.Command(dockerBinary, "load")) - c.Assert(err, checker.IsNil, check.Commentf("failed to save and load repo: %s, %v", out, err)) - c.Assert(out, checker.Contains, "Loaded image: "+name+":latest") - c.Assert(out, checker.Not(checker.Contains), "Loaded image ID:") -} diff --git a/integration-cli/docker_cli_save_load_unix_test.go b/integration-cli/docker_cli_save_load_unix_test.go deleted file mode 100644 index d9dd95f126..0000000000 --- a/integration-cli/docker_cli_save_load_unix_test.go +++ /dev/null @@ -1,87 +0,0 @@ -// +build !windows - -package main - -import ( - "fmt" - "io/ioutil" - "os" - "os/exec" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" - "github.com/kr/pty" -) - -// save a repo and try to load it using stdout -func (s *DockerSuite) TestSaveAndLoadRepoStdout(c *check.C) { - name := "test-save-and-load-repo-stdout" - dockerCmd(c, "run", "--name", name, "busybox", "true") - - repoName := "foobar-save-load-test" - before, _ := dockerCmd(c, "commit", name, repoName) - before = strings.TrimRight(before, "\n") - - tmpFile, err := ioutil.TempFile("", "foobar-save-load-test.tar") - c.Assert(err, check.IsNil) - defer os.Remove(tmpFile.Name()) - - saveCmd := exec.Command(dockerBinary, "save", repoName) - saveCmd.Stdout = tmpFile - - _, err = runCommand(saveCmd) - c.Assert(err, check.IsNil) - - tmpFile, err = os.Open(tmpFile.Name()) - c.Assert(err, check.IsNil) - - deleteImages(repoName) - - loadCmd := exec.Command(dockerBinary, "load") - loadCmd.Stdin = tmpFile - - out, _, err := runCommandWithOutput(loadCmd) - c.Assert(err, check.IsNil, check.Commentf(out)) - - after := inspectField(c, repoName, "Id") - after = strings.TrimRight(after, "\n") - - c.Assert(after, check.Equals, before) //inspect is not the same after a save / load - - deleteImages(repoName) - - pty, tty, err := pty.Open() - c.Assert(err, check.IsNil) - cmd := exec.Command(dockerBinary, "save", repoName) - cmd.Stdin = tty - cmd.Stdout = tty - cmd.Stderr = tty - c.Assert(cmd.Start(), check.IsNil) - c.Assert(cmd.Wait(), check.NotNil) //did not break writing to a TTY - - buf := make([]byte, 1024) - - n, err := pty.Read(buf) - c.Assert(err, check.IsNil) //could not read tty output - c.Assert(string(buf[:n]), checker.Contains, "Cowardly refusing", check.Commentf("help output is not being yielded", out)) -} - -func (s *DockerSuite) TestSaveAndLoadWithProgressBar(c *check.C) { - name := "test-load" - _, err := buildImage(name, ` - FROM busybox - RUN touch aa - `, true) - c.Assert(err, check.IsNil) - - tmptar := name + ".tar" - dockerCmd(c, "save", "-o", tmptar, name) - defer os.Remove(tmptar) - - dockerCmd(c, "rmi", name) - dockerCmd(c, "tag", "busybox", name) - out, _ := dockerCmd(c, "load", "-i", tmptar) - expected := fmt.Sprintf("The image %s:latest already exists, renaming the old one with ID", name) - c.Assert(out, checker.Contains, expected) -} diff --git a/integration-cli/docker_cli_search_test.go b/integration-cli/docker_cli_search_test.go deleted file mode 100644 index 5a32f2ab93..0000000000 --- a/integration-cli/docker_cli_search_test.go +++ /dev/null @@ -1,131 +0,0 @@ -package main - -import ( - "fmt" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// search for repos named "registry" on the central registry -func (s *DockerSuite) TestSearchOnCentralRegistry(c *check.C) { - testRequires(c, Network, DaemonIsLinux) - - out, _ := dockerCmd(c, "search", "busybox") - c.Assert(out, checker.Contains, "Busybox base image.", check.Commentf("couldn't find any repository named (or containing) 'Busybox base image.'")) -} - -func (s *DockerSuite) TestSearchStarsOptionWithWrongParameter(c *check.C) { - out, _, err := dockerCmdWithError("search", "--filter", "stars=a", "busybox") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) - - out, _, err = dockerCmdWithError("search", "-f", "stars=a", "busybox") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) - - out, _, err = dockerCmdWithError("search", "-f", "is-automated=a", "busybox") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) - - out, _, err = dockerCmdWithError("search", "-f", "is-official=a", "busybox") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Invalid filter", check.Commentf("couldn't find the invalid filter warning")) - - // -s --stars deprecated since Docker 1.13 - out, _, err = dockerCmdWithError("search", "--stars=a", "busybox") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "invalid syntax", check.Commentf("couldn't find the invalid value warning")) - - // -s --stars deprecated since Docker 1.13 - out, _, err = dockerCmdWithError("search", "-s=-1", "busybox") - c.Assert(err, check.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "invalid syntax", check.Commentf("couldn't find the invalid value warning")) -} - -func (s *DockerSuite) TestSearchCmdOptions(c *check.C) { - testRequires(c, Network, DaemonIsLinux) - - out, _ := dockerCmd(c, "search", "--help") - c.Assert(out, checker.Contains, "Usage:\tdocker search [OPTIONS] TERM") - - outSearchCmd, _ := dockerCmd(c, "search", "busybox") - outSearchCmdNotrunc, _ := dockerCmd(c, "search", "--no-trunc=true", "busybox") - - c.Assert(len(outSearchCmd) > len(outSearchCmdNotrunc), check.Equals, false, check.Commentf("The no-trunc option can't take effect.")) - - outSearchCmdautomated, _ := dockerCmd(c, "search", "--filter", "is-automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. - outSearchCmdautomatedSlice := strings.Split(outSearchCmdautomated, "\n") - for i := range outSearchCmdautomatedSlice { - c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)) - } - - outSearchCmdNotOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=false", "busybox") //The busybox is a busybox base image, official image. - outSearchCmdNotOfficialSlice := strings.Split(outSearchCmdNotOfficial, "\n") - for i := range outSearchCmdNotOfficialSlice { - c.Assert(strings.HasPrefix(outSearchCmdNotOfficialSlice[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an OFFICIAL image: %s", outSearchCmdNotOfficial)) - } - - outSearchCmdOfficial, _ := dockerCmd(c, "search", "--filter", "is-official=true", "busybox") //The busybox is a busybox base image, official image. - outSearchCmdOfficialSlice := strings.Split(outSearchCmdOfficial, "\n") - c.Assert(outSearchCmdOfficialSlice, checker.HasLen, 3) // 1 header, 1 line, 1 carriage return - c.Assert(strings.HasPrefix(outSearchCmdOfficialSlice[1], "busybox "), check.Equals, true, check.Commentf("The busybox is an OFFICIAL image: %s", outSearchCmdNotOfficial)) - - outSearchCmdStars, _ := dockerCmd(c, "search", "--filter", "stars=2", "busybox") - c.Assert(strings.Count(outSearchCmdStars, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars)) - - dockerCmd(c, "search", "--filter", "is-automated=true", "--filter", "stars=2", "--no-trunc=true", "busybox") - - // --automated deprecated since Docker 1.13 - outSearchCmdautomated1, _ := dockerCmd(c, "search", "--automated=true", "busybox") //The busybox is a busybox base image, not an AUTOMATED image. - outSearchCmdautomatedSlice1 := strings.Split(outSearchCmdautomated1, "\n") - for i := range outSearchCmdautomatedSlice1 { - c.Assert(strings.HasPrefix(outSearchCmdautomatedSlice1[i], "busybox "), check.Equals, false, check.Commentf("The busybox is not an AUTOMATED image: %s", outSearchCmdautomated)) - } - - // -s --stars deprecated since Docker 1.13 - outSearchCmdStars1, _ := dockerCmd(c, "search", "--stars=2", "busybox") - c.Assert(strings.Count(outSearchCmdStars1, "[OK]") > strings.Count(outSearchCmd, "[OK]"), check.Equals, false, check.Commentf("The quantity of images with stars should be less than that of all images: %s", outSearchCmdStars1)) - - // -s --stars deprecated since Docker 1.13 - dockerCmd(c, "search", "--stars=2", "--automated=true", "--no-trunc=true", "busybox") -} - -// search for repos which start with "ubuntu-" on the central registry -func (s *DockerSuite) TestSearchOnCentralRegistryWithDash(c *check.C) { - testRequires(c, Network, DaemonIsLinux) - - dockerCmd(c, "search", "ubuntu-") -} - -// test case for #23055 -func (s *DockerSuite) TestSearchWithLimit(c *check.C) { - testRequires(c, Network, DaemonIsLinux) - - limit := 10 - out, _, err := dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") - c.Assert(err, checker.IsNil) - outSlice := strings.Split(out, "\n") - c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return - - limit = 50 - out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") - c.Assert(err, checker.IsNil) - outSlice = strings.Split(out, "\n") - c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return - - limit = 100 - out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") - c.Assert(err, checker.IsNil) - outSlice = strings.Split(out, "\n") - c.Assert(outSlice, checker.HasLen, limit+2) // 1 header, 1 carriage return - - limit = 0 - out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") - c.Assert(err, checker.Not(checker.IsNil)) - - limit = 200 - out, _, err = dockerCmdWithError("search", fmt.Sprintf("--limit=%d", limit), "docker") - c.Assert(err, checker.Not(checker.IsNil)) -} diff --git a/integration-cli/docker_cli_service_create_hack_test.go b/integration-cli/docker_cli_service_create_hack_test.go deleted file mode 100644 index 4814ddee59..0000000000 --- a/integration-cli/docker_cli_service_create_hack_test.go +++ /dev/null @@ -1,45 +0,0 @@ -// +build !windows - -package main - -import ( - "encoding/json" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/swarm" - "github.com/go-check/check" -) - -func (s *DockerSwarmSuite) TestServiceCreateMountVolume(c *check.C) { - d := s.AddDaemon(c, true, true) - out, err := d.Cmd("service", "create", "--mount", "type=volume,source=foo,target=/foo", "busybox", "top") - c.Assert(err, checker.IsNil, check.Commentf(out)) - id := strings.TrimSpace(out) - - var tasks []swarm.Task - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - tasks = d.getServiceTasks(c, id) - return len(tasks) > 0, nil - }, checker.Equals, true) - - task := tasks[0] - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - if task.NodeID == "" || task.Status.ContainerStatus.ContainerID == "" { - task = d.getTask(c, task.ID) - } - return task.NodeID != "" && task.Status.ContainerStatus.ContainerID != "", nil - }, checker.Equals, true) - - out, err = s.nodeCmd(c, task.NodeID, "inspect", "--format", "{{json .Mounts}}", task.Status.ContainerStatus.ContainerID) - c.Assert(err, checker.IsNil, check.Commentf(out)) - - var mounts []types.MountPoint - c.Assert(json.Unmarshal([]byte(out), &mounts), checker.IsNil) - c.Assert(mounts, checker.HasLen, 1) - - c.Assert(mounts[0].Name, checker.Equals, "foo") - c.Assert(mounts[0].Destination, checker.Equals, "/foo") - c.Assert(mounts[0].RW, checker.Equals, true) -} diff --git a/integration-cli/docker_cli_service_health_test.go b/integration-cli/docker_cli_service_health_test.go deleted file mode 100644 index fb32070827..0000000000 --- a/integration-cli/docker_cli_service_health_test.go +++ /dev/null @@ -1,191 +0,0 @@ -// +build !windows - -package main - -import ( - "strconv" - "strings" - - "github.com/docker/docker/daemon/cluster/executor/container" - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types/swarm" - "github.com/go-check/check" -) - -// start a service, and then make its task unhealthy during running -// finally, unhealthy task should be detected and killed -func (s *DockerSwarmSuite) TestServiceHealthRun(c *check.C) { - testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows - - d := s.AddDaemon(c, true, true) - - // build image with health-check - // note: use `daemon.buildImageWithOut` to build, do not use `buildImage` to build - imageName := "testhealth" - _, _, err := d.buildImageWithOut(imageName, - `FROM busybox - RUN touch /status - HEALTHCHECK --interval=1s --timeout=1s --retries=1\ - CMD cat /status`, - true) - c.Check(err, check.IsNil) - - serviceName := "healthServiceRun" - out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top") - c.Assert(err, checker.IsNil, check.Commentf(out)) - id := strings.TrimSpace(out) - - var tasks []swarm.Task - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - tasks = d.getServiceTasks(c, id) - return tasks, nil - }, checker.HasLen, 1) - - task := tasks[0] - - // wait for task to start - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) - return task.Status.State, nil - }, checker.Equals, swarm.TaskStateStarting) - containerID := task.Status.ContainerStatus.ContainerID - - // wait for container to be healthy - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - out, _ := d.Cmd("inspect", "--format={{.State.Health.Status}}", containerID) - return strings.TrimSpace(out), nil - }, checker.Equals, "healthy") - - // make it fail - d.Cmd("exec", containerID, "rm", "/status") - // wait for container to be unhealthy - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - out, _ := d.Cmd("inspect", "--format={{.State.Health.Status}}", containerID) - return strings.TrimSpace(out), nil - }, checker.Equals, "unhealthy") - - // Task should be terminated - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) - return task.Status.State, nil - }, checker.Equals, swarm.TaskStateFailed) - - if !strings.Contains(task.Status.Err, container.ErrContainerUnhealthy.Error()) { - c.Fatal("unhealthy task exits because of other error") - } -} - -// start a service whose task is unhealthy at beginning -// its tasks should be blocked in starting stage, until health check is passed -func (s *DockerSwarmSuite) TestServiceHealthStart(c *check.C) { - testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows - - d := s.AddDaemon(c, true, true) - - // service started from this image won't pass health check - imageName := "testhealth" - _, _, err := d.buildImageWithOut(imageName, - `FROM busybox - HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ - CMD cat /status`, - true) - c.Check(err, check.IsNil) - - serviceName := "healthServiceStart" - out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top") - c.Assert(err, checker.IsNil, check.Commentf(out)) - id := strings.TrimSpace(out) - - var tasks []swarm.Task - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - tasks = d.getServiceTasks(c, id) - return tasks, nil - }, checker.HasLen, 1) - - task := tasks[0] - - // wait for task to start - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) - return task.Status.State, nil - }, checker.Equals, swarm.TaskStateStarting) - - containerID := task.Status.ContainerStatus.ContainerID - - // wait for health check to work - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - out, _ := d.Cmd("inspect", "--format={{.State.Health.FailingStreak}}", containerID) - failingStreak, _ := strconv.Atoi(strings.TrimSpace(out)) - return failingStreak, nil - }, checker.GreaterThan, 0) - - // task should be blocked at starting status - task = d.getTask(c, task.ID) - c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) - - // make it healthy - d.Cmd("exec", containerID, "touch", "/status") - - // Task should be at running status - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) - return task.Status.State, nil - }, checker.Equals, swarm.TaskStateRunning) -} - -// start a service whose task is unhealthy at beginning -// its tasks should be blocked in starting stage, until health check is passed -func (s *DockerSwarmSuite) TestServiceHealthUpdate(c *check.C) { - testRequires(c, DaemonIsLinux) // busybox doesn't work on Windows - - d := s.AddDaemon(c, true, true) - - // service started from this image won't pass health check - imageName := "testhealth" - _, _, err := d.buildImageWithOut(imageName, - `FROM busybox - HEALTHCHECK --interval=1s --timeout=1s --retries=1024\ - CMD cat /status`, - true) - c.Check(err, check.IsNil) - - serviceName := "healthServiceStart" - out, err := d.Cmd("service", "create", "--name", serviceName, imageName, "top") - c.Assert(err, checker.IsNil, check.Commentf(out)) - id := strings.TrimSpace(out) - - var tasks []swarm.Task - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - tasks = d.getServiceTasks(c, id) - return tasks, nil - }, checker.HasLen, 1) - - task := tasks[0] - - // wait for task to start - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) - return task.Status.State, nil - }, checker.Equals, swarm.TaskStateStarting) - - containerID := task.Status.ContainerStatus.ContainerID - - // wait for health check to work - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - out, _ := d.Cmd("inspect", "--format={{.State.Health.FailingStreak}}", containerID) - failingStreak, _ := strconv.Atoi(strings.TrimSpace(out)) - return failingStreak, nil - }, checker.GreaterThan, 0) - - // task should be blocked at starting status - task = d.getTask(c, task.ID) - c.Assert(task.Status.State, check.Equals, swarm.TaskStateStarting) - - // make it healthy - d.Cmd("exec", containerID, "touch", "/status") - // Task should be at running status - waitAndAssert(c, defaultReconciliationTimeout, func(c *check.C) (interface{}, check.CommentInterface) { - task = d.getTask(c, task.ID) - return task.Status.State, nil - }, checker.Equals, swarm.TaskStateRunning) -} diff --git a/integration-cli/docker_cli_service_update_test.go b/integration-cli/docker_cli_service_update_test.go deleted file mode 100644 index 829ffdddb1..0000000000 --- a/integration-cli/docker_cli_service_update_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// +build !windows - -package main - -import ( - "encoding/json" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types/swarm" - "github.com/go-check/check" -) - -func (s *DockerSwarmSuite) TestServiceUpdatePort(c *check.C) { - d := s.AddDaemon(c, true, true) - - serviceName := "TestServiceUpdatePort" - serviceArgs := append([]string{"create", "--name", serviceName, "-p", "8080:8081", defaultSleepImage}, defaultSleepCommand...) - - // Create a service with a port mapping of 8080:8081. - out, err := d.Cmd("service", serviceArgs...) - c.Assert(err, checker.IsNil) - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 1) - - // Update the service: changed the port mapping from 8080:8081 to 8082:8083. - _, err = d.Cmd("service", "update", "--publish-add", "8082:8083", "--publish-rm", "8081", serviceName) - c.Assert(err, checker.IsNil) - - // Inspect the service and verify port mapping - expected := []swarm.PortConfig{ - { - Protocol: "tcp", - PublishedPort: 8082, - TargetPort: 8083, - }, - } - - out, err = d.Cmd("service", "inspect", "--format", "{{ json .Spec.EndpointSpec.Ports }}", serviceName) - c.Assert(err, checker.IsNil) - - var portConfig []swarm.PortConfig - if err := json.Unmarshal([]byte(out), &portConfig); err != nil { - c.Fatalf("invalid JSON in inspect result: %v (%s)", err, out) - } - c.Assert(portConfig, checker.DeepEquals, expected) -} - -func (s *DockerSwarmSuite) TestServiceUpdateLabel(c *check.C) { - d := s.AddDaemon(c, true, true) - out, err := d.Cmd("service", "create", "--name=test", "busybox", "top") - c.Assert(err, checker.IsNil, check.Commentf(out)) - service := d.getService(c, "test") - c.Assert(service.Spec.Labels, checker.HasLen, 0) - - // add label to empty set - out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") - c.Assert(err, checker.IsNil, check.Commentf(out)) - service = d.getService(c, "test") - c.Assert(service.Spec.Labels, checker.HasLen, 1) - c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") - - // add label to non-empty set - out, err = d.Cmd("service", "update", "test", "--label-add", "foo2=bar") - c.Assert(err, checker.IsNil, check.Commentf(out)) - service = d.getService(c, "test") - c.Assert(service.Spec.Labels, checker.HasLen, 2) - c.Assert(service.Spec.Labels["foo2"], checker.Equals, "bar") - - out, err = d.Cmd("service", "update", "test", "--label-rm", "foo2") - c.Assert(err, checker.IsNil, check.Commentf(out)) - service = d.getService(c, "test") - c.Assert(service.Spec.Labels, checker.HasLen, 1) - c.Assert(service.Spec.Labels["foo2"], checker.Equals, "") - - out, err = d.Cmd("service", "update", "test", "--label-rm", "foo") - c.Assert(err, checker.IsNil, check.Commentf(out)) - service = d.getService(c, "test") - c.Assert(service.Spec.Labels, checker.HasLen, 0) - c.Assert(service.Spec.Labels["foo"], checker.Equals, "") - - // now make sure we can add again - out, err = d.Cmd("service", "update", "test", "--label-add", "foo=bar") - c.Assert(err, checker.IsNil, check.Commentf(out)) - service = d.getService(c, "test") - c.Assert(service.Spec.Labels, checker.HasLen, 1) - c.Assert(service.Spec.Labels["foo"], checker.Equals, "bar") -} diff --git a/integration-cli/docker_cli_sni_test.go b/integration-cli/docker_cli_sni_test.go deleted file mode 100644 index fb896d52d5..0000000000 --- a/integration-cli/docker_cli_sni_test.go +++ /dev/null @@ -1,44 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "log" - "net/http" - "net/http/httptest" - "net/url" - "os/exec" - "strings" - - "github.com/go-check/check" -) - -func (s *DockerSuite) TestClientSetsTLSServerName(c *check.C) { - c.Skip("Flakey test") - // there may be more than one hit to the server for each registry request - serverNameReceived := []string{} - var serverName string - - virtualHostServer := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - serverNameReceived = append(serverNameReceived, r.TLS.ServerName) - })) - defer virtualHostServer.Close() - // discard TLS handshake errors written by default to os.Stderr - virtualHostServer.Config.ErrorLog = log.New(ioutil.Discard, "", 0) - - u, err := url.Parse(virtualHostServer.URL) - c.Assert(err, check.IsNil) - hostPort := u.Host - serverName = strings.Split(hostPort, ":")[0] - - repoName := fmt.Sprintf("%v/dockercli/image:latest", hostPort) - cmd := exec.Command(dockerBinary, "pull", repoName) - cmd.Run() - - // check that the fake server was hit at least once - c.Assert(len(serverNameReceived) > 0, check.Equals, true) - // check that for each hit the right server name was received - for _, item := range serverNameReceived { - c.Check(item, check.Equals, serverName) - } -} diff --git a/integration-cli/docker_cli_stack_test.go b/integration-cli/docker_cli_stack_test.go deleted file mode 100644 index 6b4de8e73a..0000000000 --- a/integration-cli/docker_cli_stack_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build experimental - -package main - -import ( - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSwarmSuite) TestStackRemove(c *check.C) { - d := s.AddDaemon(c, true, true) - - stackArgs := append([]string{"remove", "UNKNOWN_STACK"}) - - out, err := d.Cmd("stack", stackArgs...) - c.Assert(err, checker.IsNil) - c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") -} - -func (s *DockerSwarmSuite) TestStackTasks(c *check.C) { - d := s.AddDaemon(c, true, true) - - stackArgs := append([]string{"ps", "UNKNOWN_STACK"}) - - out, err := d.Cmd("stack", stackArgs...) - c.Assert(err, checker.IsNil) - c.Assert(out, check.Equals, "Nothing found in stack: UNKNOWN_STACK\n") -} diff --git a/integration-cli/docker_cli_start_test.go b/integration-cli/docker_cli_start_test.go deleted file mode 100644 index 1c6a3cf2ae..0000000000 --- a/integration-cli/docker_cli_start_test.go +++ /dev/null @@ -1,187 +0,0 @@ -package main - -import ( - "fmt" - "os/exec" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// Regression test for https://github.com/docker/docker/issues/7843 -func (s *DockerSuite) TestStartAttachReturnsOnError(c *check.C) { - // Windows does not support link - testRequires(c, DaemonIsLinux) - dockerCmd(c, "run", "--name", "test", "busybox") - - // Expect this to fail because the above container is stopped, this is what we want - out, _, err := dockerCmdWithError("run", "--name", "test2", "--link", "test:test", "busybox") - // err shouldn't be nil because container test2 try to link to stopped container - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) - - ch := make(chan error) - go func() { - // Attempt to start attached to the container that won't start - // This should return an error immediately since the container can't be started - if out, _, err := dockerCmdWithError("start", "-a", "test2"); err == nil { - ch <- fmt.Errorf("Expected error but got none:\n%s", out) - } - close(ch) - }() - - select { - case err := <-ch: - c.Assert(err, check.IsNil) - case <-time.After(5 * time.Second): - c.Fatalf("Attach did not exit properly") - } -} - -// gh#8555: Exit code should be passed through when using start -a -func (s *DockerSuite) TestStartAttachCorrectExitCode(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _, _ := dockerCmdWithStdoutStderr(c, "run", "-d", "busybox", "sh", "-c", "sleep 2; exit 1") - out = strings.TrimSpace(out) - - // make sure the container has exited before trying the "start -a" - dockerCmd(c, "wait", out) - - startOut, exitCode, err := dockerCmdWithError("start", "-a", out) - // start command should fail - c.Assert(err, checker.NotNil, check.Commentf("startOut: %s", startOut)) - // start -a did not respond with proper exit code - c.Assert(exitCode, checker.Equals, 1, check.Commentf("startOut: %s", startOut)) - -} - -func (s *DockerSuite) TestStartAttachSilent(c *check.C) { - name := "teststartattachcorrectexitcode" - dockerCmd(c, "run", "--name", name, "busybox", "echo", "test") - - // make sure the container has exited before trying the "start -a" - dockerCmd(c, "wait", name) - - startOut, _ := dockerCmd(c, "start", "-a", name) - // start -a produced unexpected output - c.Assert(startOut, checker.Equals, "test\n") -} - -func (s *DockerSuite) TestStartRecordError(c *check.C) { - // TODO Windows CI: Requires further porting work. Should be possible. - testRequires(c, DaemonIsLinux) - // when container runs successfully, we should not have state.Error - dockerCmd(c, "run", "-d", "-p", "9999:9999", "--name", "test", "busybox", "top") - stateErr := inspectField(c, "test", "State.Error") - // Expected to not have state error - c.Assert(stateErr, checker.Equals, "") - - // Expect this to fail and records error because of ports conflict - out, _, err := dockerCmdWithError("run", "-d", "--name", "test2", "-p", "9999:9999", "busybox", "top") - // err shouldn't be nil because docker run will fail - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) - - stateErr = inspectField(c, "test2", "State.Error") - c.Assert(stateErr, checker.Contains, "port is already allocated") - - // Expect the conflict to be resolved when we stop the initial container - dockerCmd(c, "stop", "test") - dockerCmd(c, "start", "test2") - stateErr = inspectField(c, "test2", "State.Error") - // Expected to not have state error but got one - c.Assert(stateErr, checker.Equals, "") -} - -func (s *DockerSuite) TestStartPausedContainer(c *check.C) { - // Windows does not support pausing containers - testRequires(c, DaemonIsLinux) - defer unpauseAllContainers() - - dockerCmd(c, "run", "-d", "--name", "testing", "busybox", "top") - - dockerCmd(c, "pause", "testing") - - out, _, err := dockerCmdWithError("start", "testing") - // an error should have been shown that you cannot start paused container - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) - // an error should have been shown that you cannot start paused container - c.Assert(out, checker.Contains, "Cannot start a paused container, try unpause instead.") -} - -func (s *DockerSuite) TestStartMultipleContainers(c *check.C) { - // Windows does not support --link - testRequires(c, DaemonIsLinux) - // run a container named 'parent' and create two container link to `parent` - dockerCmd(c, "run", "-d", "--name", "parent", "busybox", "top") - - for _, container := range []string{"child_first", "child_second"} { - dockerCmd(c, "create", "--name", container, "--link", "parent:parent", "busybox", "top") - } - - // stop 'parent' container - dockerCmd(c, "stop", "parent") - - out := inspectField(c, "parent", "State.Running") - // Container should be stopped - c.Assert(out, checker.Equals, "false") - - // start all the three containers, container `child_first` start first which should be failed - // container 'parent' start second and then start container 'child_second' - expOut := "Cannot link to a non running container" - expErr := "failed to start containers: [child_first]" - out, _, err := dockerCmdWithError("start", "child_first", "parent", "child_second") - // err shouldn't be nil because start will fail - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) - // output does not correspond to what was expected - if !(strings.Contains(out, expOut) || strings.Contains(err.Error(), expErr)) { - c.Fatalf("Expected out: %v with err: %v but got out: %v with err: %v", expOut, expErr, out, err) - } - - for container, expected := range map[string]string{"parent": "true", "child_first": "false", "child_second": "true"} { - out := inspectField(c, container, "State.Running") - // Container running state wrong - c.Assert(out, checker.Equals, expected) - } -} - -func (s *DockerSuite) TestStartAttachMultipleContainers(c *check.C) { - // run multiple containers to test - for _, container := range []string{"test1", "test2", "test3"} { - runSleepingContainer(c, "--name", container) - } - - // stop all the containers - for _, container := range []string{"test1", "test2", "test3"} { - dockerCmd(c, "stop", container) - } - - // test start and attach multiple containers at once, expected error - for _, option := range []string{"-a", "-i", "-ai"} { - out, _, err := dockerCmdWithError("start", option, "test1", "test2", "test3") - // err shouldn't be nil because start will fail - c.Assert(err, checker.NotNil, check.Commentf("out: %s", out)) - // output does not correspond to what was expected - c.Assert(out, checker.Contains, "You cannot start and attach multiple containers at once.") - } - - // confirm the state of all the containers be stopped - for container, expected := range map[string]string{"test1": "false", "test2": "false", "test3": "false"} { - out := inspectField(c, container, "State.Running") - // Container running state wrong - c.Assert(out, checker.Equals, expected) - } -} - -// Test case for #23716 -func (s *DockerSuite) TestStartAttachWithRename(c *check.C) { - testRequires(c, DaemonIsLinux) - dockerCmd(c, "create", "-t", "--name", "before", "busybox") - go func() { - c.Assert(waitRun("before"), checker.IsNil) - dockerCmd(c, "rename", "before", "after") - dockerCmd(c, "stop", "--time=2", "after") - }() - _, stderr, _, _ := runCommandWithStdoutStderr(exec.Command(dockerBinary, "start", "-a", "before")) - c.Assert(stderr, checker.Not(checker.Contains), "No such container") -} diff --git a/integration-cli/docker_cli_stats_test.go b/integration-cli/docker_cli_stats_test.go deleted file mode 100644 index 5cb1a3ea02..0000000000 --- a/integration-cli/docker_cli_stats_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package main - -import ( - "bufio" - "os/exec" - "regexp" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestStatsNoStream(c *check.C) { - // Windows does not support stats - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - id := strings.TrimSpace(out) - c.Assert(waitRun(id), checker.IsNil) - - statsCmd := exec.Command(dockerBinary, "stats", "--no-stream", id) - type output struct { - out []byte - err error - } - - ch := make(chan output) - go func() { - out, err := statsCmd.Output() - ch <- output{out, err} - }() - - select { - case outerr := <-ch: - c.Assert(outerr.err, checker.IsNil, check.Commentf("Error running stats: %v", outerr.err)) - c.Assert(string(outerr.out), checker.Contains, id) //running container wasn't present in output - case <-time.After(3 * time.Second): - statsCmd.Process.Kill() - c.Fatalf("stats did not return immediately when not streaming") - } -} - -func (s *DockerSuite) TestStatsContainerNotFound(c *check.C) { - // Windows does not support stats - testRequires(c, DaemonIsLinux) - - out, _, err := dockerCmdWithError("stats", "notfound") - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "No such container: notfound", check.Commentf("Expected to fail on not found container stats, got %q instead", out)) - - out, _, err = dockerCmdWithError("stats", "--no-stream", "notfound") - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "No such container: notfound", check.Commentf("Expected to fail on not found container stats with --no-stream, got %q instead", out)) -} - -func (s *DockerSuite) TestStatsAllRunningNoStream(c *check.C) { - // Windows does not support stats - testRequires(c, DaemonIsLinux) - - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - id1 := strings.TrimSpace(out)[:12] - c.Assert(waitRun(id1), check.IsNil) - out, _ = dockerCmd(c, "run", "-d", "busybox", "top") - id2 := strings.TrimSpace(out)[:12] - c.Assert(waitRun(id2), check.IsNil) - out, _ = dockerCmd(c, "run", "-d", "busybox", "top") - id3 := strings.TrimSpace(out)[:12] - c.Assert(waitRun(id3), check.IsNil) - dockerCmd(c, "stop", id3) - - out, _ = dockerCmd(c, "stats", "--no-stream") - if !strings.Contains(out, id1) || !strings.Contains(out, id2) { - c.Fatalf("Expected stats output to contain both %s and %s, got %s", id1, id2, out) - } - if strings.Contains(out, id3) { - c.Fatalf("Did not expect %s in stats, got %s", id3, out) - } - - // check output contains real data, but not all zeros - reg, _ := regexp.Compile("[1-9]+") - // split output with "\n", outLines[1] is id2's output - // outLines[2] is id1's output - outLines := strings.Split(out, "\n") - // check stat result of id2 contains real data - realData := reg.Find([]byte(outLines[1][12:])) - c.Assert(realData, checker.NotNil, check.Commentf("stat result are empty: %s", out)) - // check stat result of id1 contains real data - realData = reg.Find([]byte(outLines[2][12:])) - c.Assert(realData, checker.NotNil, check.Commentf("stat result are empty: %s", out)) -} - -func (s *DockerSuite) TestStatsAllNoStream(c *check.C) { - // Windows does not support stats - testRequires(c, DaemonIsLinux) - - out, _ := dockerCmd(c, "run", "-d", "busybox", "top") - id1 := strings.TrimSpace(out)[:12] - c.Assert(waitRun(id1), check.IsNil) - dockerCmd(c, "stop", id1) - out, _ = dockerCmd(c, "run", "-d", "busybox", "top") - id2 := strings.TrimSpace(out)[:12] - c.Assert(waitRun(id2), check.IsNil) - - out, _ = dockerCmd(c, "stats", "--all", "--no-stream") - if !strings.Contains(out, id1) || !strings.Contains(out, id2) { - c.Fatalf("Expected stats output to contain both %s and %s, got %s", id1, id2, out) - } - - // check output contains real data, but not all zeros - reg, _ := regexp.Compile("[1-9]+") - // split output with "\n", outLines[1] is id2's output - outLines := strings.Split(out, "\n") - // check stat result of id2 contains real data - realData := reg.Find([]byte(outLines[1][12:])) - c.Assert(realData, checker.NotNil, check.Commentf("stat result of %s is empty: %s", id2, out)) - // check stat result of id1 contains all zero - realData = reg.Find([]byte(outLines[2][12:])) - c.Assert(realData, checker.IsNil, check.Commentf("stat result of %s should be empty : %s", id1, out)) -} - -func (s *DockerSuite) TestStatsAllNewContainersAdded(c *check.C) { - // Windows does not support stats - testRequires(c, DaemonIsLinux) - - id := make(chan string) - addedChan := make(chan struct{}) - - runSleepingContainer(c, "-d") - statsCmd := exec.Command(dockerBinary, "stats") - stdout, err := statsCmd.StdoutPipe() - c.Assert(err, check.IsNil) - c.Assert(statsCmd.Start(), check.IsNil) - defer statsCmd.Process.Kill() - - go func() { - containerID := <-id - matchID := regexp.MustCompile(containerID) - - scanner := bufio.NewScanner(stdout) - for scanner.Scan() { - switch { - case matchID.MatchString(scanner.Text()): - close(addedChan) - return - } - } - }() - - out, _ := runSleepingContainer(c, "-d") - c.Assert(waitRun(strings.TrimSpace(out)), check.IsNil) - id <- strings.TrimSpace(out)[:12] - - select { - case <-time.After(30 * time.Second): - c.Fatal("failed to observe new container created added to stats") - case <-addedChan: - // ignore, done - } -} diff --git a/integration-cli/docker_cli_stop_test.go b/integration-cli/docker_cli_stop_test.go deleted file mode 100644 index 103d01374c..0000000000 --- a/integration-cli/docker_cli_stop_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package main - -import ( - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestStopContainerWithRestartPolicyAlways(c *check.C) { - dockerCmd(c, "run", "--name", "verifyRestart1", "-d", "--restart=always", "busybox", "false") - dockerCmd(c, "run", "--name", "verifyRestart2", "-d", "--restart=always", "busybox", "false") - - c.Assert(waitRun("verifyRestart1"), checker.IsNil) - c.Assert(waitRun("verifyRestart2"), checker.IsNil) - - dockerCmd(c, "stop", "verifyRestart1") - dockerCmd(c, "stop", "verifyRestart2") -} diff --git a/integration-cli/docker_cli_swarm_test.go b/integration-cli/docker_cli_swarm_test.go deleted file mode 100644 index bf9cfb72b4..0000000000 --- a/integration-cli/docker_cli_swarm_test.go +++ /dev/null @@ -1,222 +0,0 @@ -// +build !windows - -package main - -import ( - "io/ioutil" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types/swarm" - "github.com/go-check/check" -) - -func (s *DockerSwarmSuite) TestSwarmUpdate(c *check.C) { - d := s.AddDaemon(c, true, true) - - getSpec := func() swarm.Spec { - sw := d.getSwarm(c) - return sw.Spec - } - - out, err := d.Cmd("swarm", "update", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s") - c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) - - spec := getSpec() - c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) - c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, uint64(11*time.Second)) - - // setting anything under 30m for cert-expiry is not allowed - out, err = d.Cmd("swarm", "update", "--cert-expiry", "15m") - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "minimum certificate expiry time") - spec = getSpec() - c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) -} - -func (s *DockerSwarmSuite) TestSwarmInit(c *check.C) { - d := s.AddDaemon(c, false, false) - - getSpec := func() swarm.Spec { - sw := d.getSwarm(c) - return sw.Spec - } - - out, err := d.Cmd("swarm", "init", "--cert-expiry", "30h", "--dispatcher-heartbeat", "11s") - c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) - - spec := getSpec() - c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 30*time.Hour) - c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, uint64(11*time.Second)) - - c.Assert(d.Leave(true), checker.IsNil) - - out, err = d.Cmd("swarm", "init") - c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) - - spec = getSpec() - c.Assert(spec.CAConfig.NodeCertExpiry, checker.Equals, 90*24*time.Hour) - c.Assert(spec.Dispatcher.HeartbeatPeriod, checker.Equals, uint64(5*time.Second)) -} - -func (s *DockerSwarmSuite) TestSwarmInitIPv6(c *check.C) { - testRequires(c, IPv6) - d1 := s.AddDaemon(c, false, false) - out, err := d1.Cmd("swarm", "init", "--listen-addr", "::1") - c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) - - d2 := s.AddDaemon(c, false, false) - out, err = d2.Cmd("swarm", "join", "::1") - c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) - - out, err = d2.Cmd("info") - c.Assert(err, checker.IsNil, check.Commentf("out: %v", out)) - c.Assert(out, checker.Contains, "Swarm: active") -} - -func (s *DockerSwarmSuite) TestSwarmIncompatibleDaemon(c *check.C) { - // init swarm mode and stop a daemon - d := s.AddDaemon(c, true, true) - info, err := d.info() - c.Assert(err, checker.IsNil) - c.Assert(info.LocalNodeState, checker.Equals, swarm.LocalNodeStateActive) - c.Assert(d.Stop(), checker.IsNil) - - // start a daemon with --cluster-store and --cluster-advertise - err = d.Start("--cluster-store=consul://consuladdr:consulport/some/path", "--cluster-advertise=1.1.1.1:2375") - c.Assert(err, checker.NotNil) - content, _ := ioutil.ReadFile(d.logFile.Name()) - c.Assert(string(content), checker.Contains, "--cluster-store and --cluster-advertise daemon configurations are incompatible with swarm mode") - - // start a daemon with --live-restore - err = d.Start("--live-restore") - c.Assert(err, checker.NotNil) - content, _ = ioutil.ReadFile(d.logFile.Name()) - c.Assert(string(content), checker.Contains, "--live-restore daemon configuration is incompatible with swarm mode") - // restart for teardown - c.Assert(d.Start(), checker.IsNil) -} - -// Test case for #24090 -func (s *DockerSwarmSuite) TestSwarmNodeListHostname(c *check.C) { - d := s.AddDaemon(c, true, true) - - // The first line should contain "HOSTNAME" - out, err := d.Cmd("node", "ls") - c.Assert(err, checker.IsNil) - c.Assert(strings.Split(out, "\n")[0], checker.Contains, "HOSTNAME") -} - -// Test case for #24270 -func (s *DockerSwarmSuite) TestSwarmServiceListFilter(c *check.C) { - d := s.AddDaemon(c, true, true) - - name1 := "redis-cluster-md5" - name2 := "redis-cluster" - name3 := "other-cluster" - out, err := d.Cmd("service", "create", "--name", name1, "busybox", "top") - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - - out, err = d.Cmd("service", "create", "--name", name2, "busybox", "top") - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - - out, err = d.Cmd("service", "create", "--name", name3, "busybox", "top") - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - - filter1 := "name=redis-cluster-md5" - filter2 := "name=redis-cluster" - - // We search checker.Contains with `name+" "` to prevent prefix only. - out, err = d.Cmd("service", "ls", "--filter", filter1) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, name1+" ") - c.Assert(out, checker.Not(checker.Contains), name2+" ") - c.Assert(out, checker.Not(checker.Contains), name3+" ") - - out, err = d.Cmd("service", "ls", "--filter", filter2) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, name1+" ") - c.Assert(out, checker.Contains, name2+" ") - c.Assert(out, checker.Not(checker.Contains), name3+" ") - - out, err = d.Cmd("service", "ls") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, name1+" ") - c.Assert(out, checker.Contains, name2+" ") - c.Assert(out, checker.Contains, name3+" ") -} - -func (s *DockerSwarmSuite) TestSwarmNodeListFilter(c *check.C) { - d := s.AddDaemon(c, true, true) - - out, err := d.Cmd("node", "inspect", "--format", "{{ .Description.Hostname }}", "self") - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - name := strings.TrimSpace(out) - - filter := "name=" + name[:4] - - out, err = d.Cmd("node", "ls", "--filter", filter) - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, name) - - out, err = d.Cmd("node", "ls", "--filter", "name=none") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Not(checker.Contains), name) -} - -func (s *DockerSwarmSuite) TestSwarmNodeTaskListFilter(c *check.C) { - d := s.AddDaemon(c, true, true) - - name := "redis-cluster-md5" - out, err := d.Cmd("service", "create", "--name", name, "--replicas=3", "busybox", "top") - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - - // make sure task has been deployed. - waitAndAssert(c, defaultReconciliationTimeout, d.checkActiveContainerCount, checker.Equals, 3) - - filter := "name=redis-cluster" - - out, err = d.Cmd("node", "ps", "--filter", filter, "self") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Contains, name+".1") - c.Assert(out, checker.Contains, name+".2") - c.Assert(out, checker.Contains, name+".3") - - out, err = d.Cmd("node", "ps", "--filter", "name=none", "self") - c.Assert(err, checker.IsNil) - c.Assert(out, checker.Not(checker.Contains), name+".1") - c.Assert(out, checker.Not(checker.Contains), name+".2") - c.Assert(out, checker.Not(checker.Contains), name+".3") -} - -// Test case for #25375 -func (s *DockerSwarmSuite) TestSwarmPublishAdd(c *check.C) { - d := s.AddDaemon(c, true, true) - - name := "top" - out, err := d.Cmd("service", "create", "--name", name, "--label", "x=y", "busybox", "top") - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Not(checker.Equals), "") - - out, err = d.Cmd("service", "update", "--publish-add", "80:80", name) - c.Assert(err, checker.IsNil) - - out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", name) - c.Assert(err, checker.IsNil) - - out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", "80:80", "--publish-add", "80:20", name) - c.Assert(err, checker.NotNil) - - out, err = d.cmdRetryOutOfSequence("service", "update", "--publish-add", "80:20", name) - c.Assert(err, checker.IsNil) - - out, err = d.Cmd("service", "inspect", "--format", "{{ .Spec.EndpointSpec.Ports }}", name) - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(out), checker.Equals, "[{ tcp 20 80}]") -} diff --git a/integration-cli/docker_cli_tag_test.go b/integration-cli/docker_cli_tag_test.go deleted file mode 100644 index b7d2b1dfe6..0000000000 --- a/integration-cli/docker_cli_tag_test.go +++ /dev/null @@ -1,225 +0,0 @@ -package main - -import ( - "fmt" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/pkg/stringutils" - "github.com/go-check/check" -) - -// tagging a named image in a new unprefixed repo should work -func (s *DockerSuite) TestTagUnprefixedRepoByName(c *check.C) { - // Don't attempt to pull on Windows as not in hub. It's installed - // as an image through .ensure-frozen-images-windows - if daemonPlatform != "windows" { - if err := pullImageIfNotExist("busybox:latest"); err != nil { - c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") - } - } - - dockerCmd(c, "tag", "busybox:latest", "testfoobarbaz") -} - -// tagging an image by ID in a new unprefixed repo should work -func (s *DockerSuite) TestTagUnprefixedRepoByID(c *check.C) { - imageID := inspectField(c, "busybox", "Id") - dockerCmd(c, "tag", imageID, "testfoobarbaz") -} - -// ensure we don't allow the use of invalid repository names; these tag operations should fail -func (s *DockerSuite) TestTagInvalidUnprefixedRepo(c *check.C) { - invalidRepos := []string{"fo$z$", "Foo@3cc", "Foo$3", "Foo*3", "Fo^3", "Foo!3", "F)xcz(", "fo%asd", "FOO/bar"} - - for _, repo := range invalidRepos { - out, _, err := dockerCmdWithError("tag", "busybox", repo) - c.Assert(err, checker.NotNil, check.Commentf("tag busybox %v should have failed : %v", repo, out)) - } -} - -// ensure we don't allow the use of invalid tags; these tag operations should fail -func (s *DockerSuite) TestTagInvalidPrefixedRepo(c *check.C) { - longTag := stringutils.GenerateRandomAlphaOnlyString(121) - - invalidTags := []string{"repo:fo$z$", "repo:Foo@3cc", "repo:Foo$3", "repo:Foo*3", "repo:Fo^3", "repo:Foo!3", "repo:%goodbye", "repo:#hashtagit", "repo:F)xcz(", "repo:-foo", "repo:..", longTag} - - for _, repotag := range invalidTags { - out, _, err := dockerCmdWithError("tag", "busybox", repotag) - c.Assert(err, checker.NotNil, check.Commentf("tag busybox %v should have failed : %v", repotag, out)) - } -} - -// ensure we allow the use of valid tags -func (s *DockerSuite) TestTagValidPrefixedRepo(c *check.C) { - // Don't attempt to pull on Windows as not in hub. It's installed - // as an image through .ensure-frozen-images-windows - if daemonPlatform != "windows" { - if err := pullImageIfNotExist("busybox:latest"); err != nil { - c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") - } - } - - validRepos := []string{"fooo/bar", "fooaa/test", "foooo:t", "HOSTNAME.DOMAIN.COM:443/foo/bar"} - - for _, repo := range validRepos { - _, _, err := dockerCmdWithError("tag", "busybox:latest", repo) - if err != nil { - c.Errorf("tag busybox %v should have worked: %s", repo, err) - continue - } - deleteImages(repo) - } -} - -// tag an image with an existed tag name without -f option should work -func (s *DockerSuite) TestTagExistedNameWithoutForce(c *check.C) { - // Don't attempt to pull on Windows as not in hub. It's installed - // as an image through .ensure-frozen-images-windows - if daemonPlatform != "windows" { - if err := pullImageIfNotExist("busybox:latest"); err != nil { - c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") - } - } - - dockerCmd(c, "tag", "busybox:latest", "busybox:test") -} - -func (s *DockerSuite) TestTagWithPrefixHyphen(c *check.C) { - // Don't attempt to pull on Windows as not in hub. It's installed - // as an image through .ensure-frozen-images-windows - if daemonPlatform != "windows" { - if err := pullImageIfNotExist("busybox:latest"); err != nil { - c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") - } - } - // test repository name begin with '-' - out, _, err := dockerCmdWithError("tag", "busybox:latest", "-busybox:test") - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) - - // test namespace name begin with '-' - out, _, err = dockerCmdWithError("tag", "busybox:latest", "-test/busybox:test") - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) - - // test index name begin with '-' - out, _, err = dockerCmdWithError("tag", "busybox:latest", "-index:5000/busybox:test") - c.Assert(err, checker.NotNil, check.Commentf(out)) - c.Assert(out, checker.Contains, "Error parsing reference", check.Commentf("tag a name begin with '-' should failed")) -} - -// ensure tagging using official names works -// ensure all tags result in the same name -func (s *DockerSuite) TestTagOfficialNames(c *check.C) { - names := []string{ - "docker.io/busybox", - "index.docker.io/busybox", - "library/busybox", - "docker.io/library/busybox", - "index.docker.io/library/busybox", - } - - for _, name := range names { - out, exitCode, err := dockerCmdWithError("tag", "busybox:latest", name+":latest") - if err != nil || exitCode != 0 { - c.Errorf("tag busybox %v should have worked: %s, %s", name, err, out) - continue - } - - // ensure we don't have multiple tag names. - out, _, err = dockerCmdWithError("images") - if err != nil { - c.Errorf("listing images failed with errors: %v, %s", err, out) - } else if strings.Contains(out, name) { - c.Errorf("images should not have listed '%s'", name) - deleteImages(name + ":latest") - } - } - - for _, name := range names { - _, exitCode, err := dockerCmdWithError("tag", name+":latest", "fooo/bar:latest") - if err != nil || exitCode != 0 { - c.Errorf("tag %v fooo/bar should have worked: %s", name, err) - continue - } - deleteImages("fooo/bar:latest") - } -} - -// ensure tags can not match digests -func (s *DockerSuite) TestTagMatchesDigest(c *check.C) { - // Don't attempt to pull on Windows as not in hub. It's installed - // as an image through .ensure-frozen-images-windows - if daemonPlatform != "windows" { - if err := pullImageIfNotExist("busybox:latest"); err != nil { - c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") - } - } - digest := "busybox@sha256:abcdef76720241213f5303bda7704ec4c2ef75613173910a56fb1b6e20251507" - // test setting tag fails - _, _, err := dockerCmdWithError("tag", "busybox:latest", digest) - if err == nil { - c.Fatal("digest tag a name should have failed") - } - // check that no new image matches the digest - _, _, err = dockerCmdWithError("inspect", digest) - if err == nil { - c.Fatal("inspecting by digest should have failed") - } -} - -func (s *DockerSuite) TestTagInvalidRepoName(c *check.C) { - // Don't attempt to pull on Windows as not in hub. It's installed - // as an image through .ensure-frozen-images-windows - if daemonPlatform != "windows" { - if err := pullImageIfNotExist("busybox:latest"); err != nil { - c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") - } - } - - // test setting tag fails - _, _, err := dockerCmdWithError("tag", "busybox:latest", "sha256:sometag") - if err == nil { - c.Fatal("tagging with image named \"sha256\" should have failed") - } -} - -// ensure tags cannot create ambiguity with image ids -func (s *DockerSuite) TestTagTruncationAmbiguity(c *check.C) { - //testRequires(c, DaemonIsLinux) - // Don't attempt to pull on Windows as not in hub. It's installed - // as an image through .ensure-frozen-images-windows - if daemonPlatform != "windows" { - if err := pullImageIfNotExist("busybox:latest"); err != nil { - c.Fatal("couldn't find the busybox:latest image locally and failed to pull it") - } - } - imageID, err := buildImage("notbusybox:latest", - `FROM busybox - MAINTAINER dockerio`, - true) - if err != nil { - c.Fatal(err) - } - truncatedImageID := stringid.TruncateID(imageID) - truncatedTag := fmt.Sprintf("notbusybox:%s", truncatedImageID) - - id := inspectField(c, truncatedTag, "Id") - - // Ensure inspect by image id returns image for image id - c.Assert(id, checker.Equals, imageID) - c.Logf("Built image: %s", imageID) - - // test setting tag fails - _, _, err = dockerCmdWithError("tag", "busybox:latest", truncatedTag) - if err != nil { - c.Fatalf("Error tagging with an image id: %s", err) - } - - id = inspectField(c, truncatedTag, "Id") - - // Ensure id is imageID and not busybox:latest - c.Assert(id, checker.Not(checker.Equals), imageID) -} diff --git a/integration-cli/docker_cli_top_test.go b/integration-cli/docker_cli_top_test.go deleted file mode 100644 index e0865b9212..0000000000 --- a/integration-cli/docker_cli_top_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package main - -import ( - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestTopMultipleArgs(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-i", "-d", "busybox", "top") - cleanedContainerID := strings.TrimSpace(out) - - out, _ = dockerCmd(c, "top", cleanedContainerID, "-o", "pid") - c.Assert(out, checker.Contains, "PID", check.Commentf("did not see PID after top -o pid: %s", out)) -} - -func (s *DockerSuite) TestTopNonPrivileged(c *check.C) { - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-i", "-d", "busybox", "top") - cleanedContainerID := strings.TrimSpace(out) - - out1, _ := dockerCmd(c, "top", cleanedContainerID) - out2, _ := dockerCmd(c, "top", cleanedContainerID) - dockerCmd(c, "kill", cleanedContainerID) - - c.Assert(out1, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the first time")) - c.Assert(out2, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the second time")) -} - -func (s *DockerSuite) TestTopPrivileged(c *check.C) { - testRequires(c, DaemonIsLinux, NotUserNamespace) - out, _ := dockerCmd(c, "run", "--privileged", "-i", "-d", "busybox", "top") - cleanedContainerID := strings.TrimSpace(out) - - out1, _ := dockerCmd(c, "top", cleanedContainerID) - out2, _ := dockerCmd(c, "top", cleanedContainerID) - dockerCmd(c, "kill", cleanedContainerID) - - c.Assert(out1, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the first time")) - c.Assert(out2, checker.Contains, "top", check.Commentf("top should've listed `top` in the process list, but failed the second time")) -} diff --git a/integration-cli/docker_cli_update_test.go b/integration-cli/docker_cli_update_test.go deleted file mode 100644 index 188030ffbb..0000000000 --- a/integration-cli/docker_cli_update_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package main - -import ( - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestUpdateRestartPolicy(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "--restart=on-failure:3", "busybox", "sh", "-c", "sleep 1 && false") - timeout := 60 * time.Second - if daemonPlatform == "windows" { - timeout = 180 * time.Second - } - - id := strings.TrimSpace(string(out)) - - // update restart policy to on-failure:5 - dockerCmd(c, "update", "--restart=on-failure:5", id) - - err := waitExited(id, timeout) - c.Assert(err, checker.IsNil) - - count := inspectField(c, id, "RestartCount") - c.Assert(count, checker.Equals, "5") - - maximumRetryCount := inspectField(c, id, "HostConfig.RestartPolicy.MaximumRetryCount") - c.Assert(maximumRetryCount, checker.Equals, "5") -} diff --git a/integration-cli/docker_cli_update_unix_test.go b/integration-cli/docker_cli_update_unix_test.go deleted file mode 100644 index 186c6fe845..0000000000 --- a/integration-cli/docker_cli_update_unix_test.go +++ /dev/null @@ -1,217 +0,0 @@ -// +build !windows - -package main - -import ( - "encoding/json" - "fmt" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/engine-api/types" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestUpdateRunningContainer(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, memoryLimitSupport) - - name := "test-update-container" - dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "top") - dockerCmd(c, "update", "-m", "500M", name) - - c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") - - file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" - out, _ := dockerCmd(c, "exec", name, "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") -} - -func (s *DockerSuite) TestUpdateRunningContainerWithRestart(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, memoryLimitSupport) - - name := "test-update-container" - dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "top") - dockerCmd(c, "update", "-m", "500M", name) - dockerCmd(c, "restart", name) - - c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") - - file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" - out, _ := dockerCmd(c, "exec", name, "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") -} - -func (s *DockerSuite) TestUpdateStoppedContainer(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, memoryLimitSupport) - - name := "test-update-container" - file := "/sys/fs/cgroup/memory/memory.limit_in_bytes" - dockerCmd(c, "run", "--name", name, "-m", "300M", "busybox", "cat", file) - dockerCmd(c, "update", "-m", "500M", name) - - c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "524288000") - - out, _ := dockerCmd(c, "start", "-a", name) - c.Assert(strings.TrimSpace(out), checker.Equals, "524288000") -} - -func (s *DockerSuite) TestUpdatePausedContainer(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, cpuShare) - - name := "test-update-container" - dockerCmd(c, "run", "-d", "--name", name, "--cpu-shares", "1000", "busybox", "top") - dockerCmd(c, "pause", name) - dockerCmd(c, "update", "--cpu-shares", "500", name) - - c.Assert(inspectField(c, name, "HostConfig.CPUShares"), checker.Equals, "500") - - dockerCmd(c, "unpause", name) - file := "/sys/fs/cgroup/cpu/cpu.shares" - out, _ := dockerCmd(c, "exec", name, "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "500") -} - -func (s *DockerSuite) TestUpdateWithUntouchedFields(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, memoryLimitSupport) - testRequires(c, cpuShare) - - name := "test-update-container" - dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "--cpu-shares", "800", "busybox", "top") - dockerCmd(c, "update", "-m", "500M", name) - - // Update memory and not touch cpus, `cpuset.cpus` should still have the old value - out := inspectField(c, name, "HostConfig.CPUShares") - c.Assert(out, check.Equals, "800") - - file := "/sys/fs/cgroup/cpu/cpu.shares" - out, _ = dockerCmd(c, "exec", name, "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "800") -} - -func (s *DockerSuite) TestUpdateContainerInvalidValue(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, memoryLimitSupport) - - name := "test-update-container" - dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "true") - out, _, err := dockerCmdWithError("update", "-m", "2M", name) - c.Assert(err, check.NotNil) - expected := "Minimum memory limit allowed is 4MB" - c.Assert(out, checker.Contains, expected) -} - -func (s *DockerSuite) TestUpdateContainerWithoutFlags(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, memoryLimitSupport) - - name := "test-update-container" - dockerCmd(c, "run", "-d", "--name", name, "-m", "300M", "busybox", "true") - _, _, err := dockerCmdWithError("update", name) - c.Assert(err, check.NotNil) -} - -func (s *DockerSuite) TestUpdateKernelMemory(c *check.C) { - testRequires(c, DaemonIsLinux, kernelMemorySupport) - - name := "test-update-container" - dockerCmd(c, "run", "-d", "--name", name, "--kernel-memory", "50M", "busybox", "top") - _, _, err := dockerCmdWithError("update", "--kernel-memory", "100M", name) - // Update kernel memory to a running container is not allowed. - c.Assert(err, check.NotNil) - - // Update kernel memory to a running container with failure should not change HostConfig - c.Assert(inspectField(c, name, "HostConfig.KernelMemory"), checker.Equals, "52428800") - - dockerCmd(c, "pause", name) - _, _, err = dockerCmdWithError("update", "--kernel-memory", "100M", name) - c.Assert(err, check.NotNil) - c.Assert(inspectField(c, name, "HostConfig.KernelMemory"), checker.Equals, "52428800") - dockerCmd(c, "unpause", name) - - dockerCmd(c, "stop", name) - dockerCmd(c, "update", "--kernel-memory", "100M", name) - dockerCmd(c, "start", name) - - c.Assert(inspectField(c, name, "HostConfig.KernelMemory"), checker.Equals, "104857600") - - file := "/sys/fs/cgroup/memory/memory.kmem.limit_in_bytes" - out, _ := dockerCmd(c, "exec", name, "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "104857600") -} - -func (s *DockerSuite) TestUpdateSwapMemoryOnly(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, memoryLimitSupport) - testRequires(c, swapMemorySupport) - - name := "test-update-container" - dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "--memory-swap", "500M", "busybox", "top") - dockerCmd(c, "update", "--memory-swap", "600M", name) - - c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "629145600") - - file := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" - out, _ := dockerCmd(c, "exec", name, "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "629145600") -} - -func (s *DockerSuite) TestUpdateInvalidSwapMemory(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, memoryLimitSupport) - testRequires(c, swapMemorySupport) - - name := "test-update-container" - dockerCmd(c, "run", "-d", "--name", name, "--memory", "300M", "--memory-swap", "500M", "busybox", "top") - _, _, err := dockerCmdWithError("update", "--memory-swap", "200M", name) - // Update invalid swap memory should fail. - // This will pass docker config validation, but failed at kernel validation - c.Assert(err, check.NotNil) - - // Update invalid swap memory with failure should not change HostConfig - c.Assert(inspectField(c, name, "HostConfig.Memory"), checker.Equals, "314572800") - c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "524288000") - - dockerCmd(c, "update", "--memory-swap", "600M", name) - - c.Assert(inspectField(c, name, "HostConfig.MemorySwap"), checker.Equals, "629145600") - - file := "/sys/fs/cgroup/memory/memory.memsw.limit_in_bytes" - out, _ := dockerCmd(c, "exec", name, "cat", file) - c.Assert(strings.TrimSpace(out), checker.Equals, "629145600") -} - -func (s *DockerSuite) TestUpdateStats(c *check.C) { - testRequires(c, DaemonIsLinux) - testRequires(c, memoryLimitSupport) - testRequires(c, cpuCfsQuota) - name := "foo" - dockerCmd(c, "run", "-d", "-ti", "--name", name, "-m", "500m", "busybox") - - c.Assert(waitRun(name), checker.IsNil) - - getMemLimit := func(id string) uint64 { - resp, body, err := sockRequestRaw("GET", fmt.Sprintf("/containers/%s/stats?stream=false", id), nil, "") - c.Assert(err, checker.IsNil) - c.Assert(resp.Header.Get("Content-Type"), checker.Equals, "application/json") - - var v *types.Stats - err = json.NewDecoder(body).Decode(&v) - c.Assert(err, checker.IsNil) - body.Close() - - return v.MemoryStats.Limit - } - preMemLimit := getMemLimit(name) - - dockerCmd(c, "update", "--cpu-quota", "2000", name) - - curMemLimit := getMemLimit(name) - - c.Assert(preMemLimit, checker.Equals, curMemLimit) - -} diff --git a/integration-cli/docker_cli_userns_test.go b/integration-cli/docker_cli_userns_test.go deleted file mode 100644 index f8b3f77b61..0000000000 --- a/integration-cli/docker_cli_userns_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// +build !windows - -package main - -import ( - "fmt" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/system" - "github.com/go-check/check" -) - -// user namespaces test: run daemon with remapped root setting -// 1. validate uid/gid maps are set properly -// 2. verify that files created are owned by remapped root -func (s *DockerDaemonSuite) TestDaemonUserNamespaceRootSetting(c *check.C) { - testRequires(c, DaemonIsLinux, SameHostDaemon, UserNamespaceInKernel) - - c.Assert(s.d.StartWithBusybox("--userns-remap", "default"), checker.IsNil) - - tmpDir, err := ioutil.TempDir("", "userns") - c.Assert(err, checker.IsNil) - - defer os.RemoveAll(tmpDir) - - // we need to find the uid and gid of the remapped root from the daemon's root dir info - uidgid := strings.Split(filepath.Base(s.d.root), ".") - c.Assert(uidgid, checker.HasLen, 2, check.Commentf("Should have gotten uid/gid strings from root dirname: %s", filepath.Base(s.d.root))) - uid, err := strconv.Atoi(uidgid[0]) - c.Assert(err, checker.IsNil, check.Commentf("Can't parse uid")) - gid, err := strconv.Atoi(uidgid[1]) - c.Assert(err, checker.IsNil, check.Commentf("Can't parse gid")) - - // writable by the remapped root UID/GID pair - c.Assert(os.Chown(tmpDir, uid, gid), checker.IsNil) - - out, err := s.d.Cmd("run", "-d", "--name", "userns", "-v", tmpDir+":/goofy", "busybox", "sh", "-c", "touch /goofy/testfile; top") - c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) - user := s.findUser(c, "userns") - c.Assert(uidgid[0], checker.Equals, user) - - pid, err := s.d.Cmd("inspect", "--format='{{.State.Pid}}'", "userns") - c.Assert(err, checker.IsNil, check.Commentf("Could not inspect running container: out: %q", pid)) - // check the uid and gid maps for the PID to ensure root is remapped - // (cmd = cat /proc//uid_map | grep -E '0\s+9999\s+1') - out, rc1, err := runCommandPipelineWithOutput( - exec.Command("cat", "/proc/"+strings.TrimSpace(pid)+"/uid_map"), - exec.Command("grep", "-E", fmt.Sprintf("0[[:space:]]+%d[[:space:]]+", uid))) - c.Assert(rc1, checker.Equals, 0, check.Commentf("Didn't match uid_map: output: %s", out)) - - out, rc2, err := runCommandPipelineWithOutput( - exec.Command("cat", "/proc/"+strings.TrimSpace(pid)+"/gid_map"), - exec.Command("grep", "-E", fmt.Sprintf("0[[:space:]]+%d[[:space:]]+", gid))) - c.Assert(rc2, checker.Equals, 0, check.Commentf("Didn't match gid_map: output: %s", out)) - - // check that the touched file is owned by remapped uid:gid - stat, err := system.Stat(filepath.Join(tmpDir, "testfile")) - c.Assert(err, checker.IsNil) - c.Assert(stat.UID(), checker.Equals, uint32(uid), check.Commentf("Touched file not owned by remapped root UID")) - c.Assert(stat.GID(), checker.Equals, uint32(gid), check.Commentf("Touched file not owned by remapped root GID")) - - // use host usernamespace - out, err = s.d.Cmd("run", "-d", "--name", "userns_skip", "--userns", "host", "busybox", "sh", "-c", "touch /goofy/testfile; top") - c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) - user = s.findUser(c, "userns_skip") - // userns are skipped, user is root - c.Assert(user, checker.Equals, "root") -} - -// findUser finds the uid or name of the user of the first process that runs in a container -func (s *DockerDaemonSuite) findUser(c *check.C, container string) string { - out, err := s.d.Cmd("top", container) - c.Assert(err, checker.IsNil, check.Commentf("Output: %s", out)) - rows := strings.Split(out, "\n") - if len(rows) < 2 { - // No process rows founds - c.FailNow() - } - return strings.Fields(rows[1])[0] -} diff --git a/integration-cli/docker_cli_v2_only_test.go b/integration-cli/docker_cli_v2_only_test.go deleted file mode 100644 index 889936a062..0000000000 --- a/integration-cli/docker_cli_v2_only_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - - "github.com/go-check/check" -) - -func makefile(contents string) (string, func(), error) { - cleanup := func() { - - } - - f, err := ioutil.TempFile(".", "tmp") - if err != nil { - return "", cleanup, err - } - err = ioutil.WriteFile(f.Name(), []byte(contents), os.ModePerm) - if err != nil { - return "", cleanup, err - } - - cleanup = func() { - err := os.Remove(f.Name()) - if err != nil { - fmt.Println("Error removing tmpfile") - } - } - return f.Name(), cleanup, nil - -} - -// TestV2Only ensures that a daemon in v2-only mode does not -// attempt to contact any v1 registry endpoints. -func (s *DockerRegistrySuite) TestV2Only(c *check.C) { - reg, err := newTestRegistry(c) - c.Assert(err, check.IsNil) - - reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(404) - }) - - reg.registerHandler("/v1/.*", func(w http.ResponseWriter, r *http.Request) { - c.Fatal("V1 registry contacted") - }) - - repoName := fmt.Sprintf("%s/busybox", reg.hostport) - - err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=true") - c.Assert(err, check.IsNil) - - dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport)) - c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) - defer cleanup() - - s.d.Cmd("build", "--file", dockerfileName, ".") - - s.d.Cmd("run", repoName) - s.d.Cmd("login", "-u", "richard", "-p", "testtest", "-e", "testuser@testdomain.com", reg.hostport) - s.d.Cmd("tag", "busybox", repoName) - s.d.Cmd("push", repoName) - s.d.Cmd("pull", repoName) -} - -// TestV1 starts a daemon in 'normal' mode -// and ensure v1 endpoints are hit for the following operations: -// login, push, pull, build & run -func (s *DockerRegistrySuite) TestV1(c *check.C) { - reg, err := newTestRegistry(c) - c.Assert(err, check.IsNil) - - v2Pings := 0 - reg.registerHandler("/v2/", func(w http.ResponseWriter, r *http.Request) { - v2Pings++ - // V2 ping 404 causes fallback to v1 - w.WriteHeader(404) - }) - - v1Pings := 0 - reg.registerHandler("/v1/_ping", func(w http.ResponseWriter, r *http.Request) { - v1Pings++ - }) - - v1Logins := 0 - reg.registerHandler("/v1/users/", func(w http.ResponseWriter, r *http.Request) { - v1Logins++ - }) - - v1Repo := 0 - reg.registerHandler("/v1/repositories/busybox/", func(w http.ResponseWriter, r *http.Request) { - v1Repo++ - }) - - reg.registerHandler("/v1/repositories/busybox/images", func(w http.ResponseWriter, r *http.Request) { - v1Repo++ - }) - - err = s.d.Start("--insecure-registry", reg.hostport, "--disable-legacy-registry=false") - c.Assert(err, check.IsNil) - - dockerfileName, cleanup, err := makefile(fmt.Sprintf("FROM %s/busybox", reg.hostport)) - c.Assert(err, check.IsNil, check.Commentf("Unable to create test dockerfile")) - defer cleanup() - - s.d.Cmd("build", "--file", dockerfileName, ".") - c.Assert(v1Repo, check.Equals, 1, check.Commentf("Expected v1 repository access after build")) - - repoName := fmt.Sprintf("%s/busybox", reg.hostport) - s.d.Cmd("run", repoName) - c.Assert(v1Repo, check.Equals, 2, check.Commentf("Expected v1 repository access after run")) - - s.d.Cmd("login", "-u", "richard", "-p", "testtest", reg.hostport) - c.Assert(v1Logins, check.Equals, 1, check.Commentf("Expected v1 login attempt")) - - s.d.Cmd("tag", "busybox", repoName) - s.d.Cmd("push", repoName) - - c.Assert(v1Repo, check.Equals, 2) - - s.d.Cmd("pull", repoName) - c.Assert(v1Repo, check.Equals, 3, check.Commentf("Expected v1 repository access after pull")) -} diff --git a/integration-cli/docker_cli_version_test.go b/integration-cli/docker_cli_version_test.go deleted file mode 100644 index 7672beb732..0000000000 --- a/integration-cli/docker_cli_version_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package main - -import ( - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// ensure docker version works -func (s *DockerSuite) TestVersionEnsureSucceeds(c *check.C) { - out, _ := dockerCmd(c, "version") - stringsToCheck := map[string]int{ - "Client:": 1, - "Server:": 1, - " Version:": 2, - " API version:": 2, - " Go version:": 2, - " Git commit:": 2, - " OS/Arch:": 2, - " Built:": 2, - } - - for k, v := range stringsToCheck { - c.Assert(strings.Count(out, k), checker.Equals, v, check.Commentf("The count of %v in %s does not match excepted", k, out)) - } -} - -// ensure the Windows daemon return the correct platform string -func (s *DockerSuite) TestVersionPlatform_w(c *check.C) { - testRequires(c, DaemonIsWindows) - testVersionPlatform(c, "windows/amd64") -} - -// ensure the Linux daemon return the correct platform string -func (s *DockerSuite) TestVersionPlatform_l(c *check.C) { - testRequires(c, DaemonIsLinux) - testVersionPlatform(c, "linux") -} - -func testVersionPlatform(c *check.C, platform string) { - out, _ := dockerCmd(c, "version") - expected := "OS/Arch: " + platform - - split := strings.Split(out, "\n") - c.Assert(len(split) >= 14, checker.Equals, true, check.Commentf("got %d lines from version", len(split))) - - // Verify the second 'OS/Arch' matches the platform. Experimental has - // more lines of output than 'regular' - bFound := false - for i := 14; i < len(split); i++ { - if strings.Contains(split[i], expected) { - bFound = true - break - } - } - c.Assert(bFound, checker.Equals, true, check.Commentf("Could not find server '%s' in '%s'", expected, out)) -} diff --git a/integration-cli/docker_cli_volume_test.go b/integration-cli/docker_cli_volume_test.go deleted file mode 100644 index 8835e1d60f..0000000000 --- a/integration-cli/docker_cli_volume_test.go +++ /dev/null @@ -1,301 +0,0 @@ -package main - -import ( - "os/exec" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func (s *DockerSuite) TestVolumeCliCreate(c *check.C) { - dockerCmd(c, "volume", "create") - - _, err := runCommand(exec.Command(dockerBinary, "volume", "create", "-d", "nosuchdriver")) - c.Assert(err, check.Not(check.IsNil)) - - out, _ := dockerCmd(c, "volume", "create", "--name=test") - name := strings.TrimSpace(out) - c.Assert(name, check.Equals, "test") -} - -func (s *DockerSuite) TestVolumeCliCreateOptionConflict(c *check.C) { - dockerCmd(c, "volume", "create", "--name=test") - out, _, err := dockerCmdWithError("volume", "create", "--name", "test", "--driver", "nosuchdriver") - c.Assert(err, check.NotNil, check.Commentf("volume create exception name already in use with another driver")) - c.Assert(out, checker.Contains, "A volume named test already exists") - - out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Driver }}", "test") - _, _, err = dockerCmdWithError("volume", "create", "--name", "test", "--driver", strings.TrimSpace(out)) - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestVolumeCliInspect(c *check.C) { - c.Assert( - exec.Command(dockerBinary, "volume", "inspect", "doesntexist").Run(), - check.Not(check.IsNil), - check.Commentf("volume inspect should error on non-existent volume"), - ) - - out, _ := dockerCmd(c, "volume", "create") - name := strings.TrimSpace(out) - out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Name }}", name) - c.Assert(strings.TrimSpace(out), check.Equals, name) - - dockerCmd(c, "volume", "create", "--name", "test") - out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Name }}", "test") - c.Assert(strings.TrimSpace(out), check.Equals, "test") -} - -func (s *DockerSuite) TestVolumeCliInspectMulti(c *check.C) { - dockerCmd(c, "volume", "create", "--name", "test1") - dockerCmd(c, "volume", "create", "--name", "test2") - dockerCmd(c, "volume", "create", "--name", "not-shown") - - out, _, err := dockerCmdWithError("volume", "inspect", "--format='{{ .Name }}'", "test1", "test2", "doesntexist", "not-shown") - c.Assert(err, checker.NotNil) - outArr := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 3, check.Commentf("\n%s", out)) - - c.Assert(out, checker.Contains, "test1") - c.Assert(out, checker.Contains, "test2") - c.Assert(out, checker.Contains, "Error: No such volume: doesntexist") - c.Assert(out, checker.Not(checker.Contains), "not-shown") -} - -func (s *DockerSuite) TestVolumeCliLs(c *check.C) { - prefix, _ := getPrefixAndSlashFromDaemonPlatform() - out, _ := dockerCmd(c, "volume", "create", "--name", "aaa") - - dockerCmd(c, "volume", "create", "--name", "test") - - dockerCmd(c, "volume", "create", "--name", "soo") - dockerCmd(c, "run", "-v", "soo:"+prefix+"/foo", "busybox", "ls", "/") - - out, _ = dockerCmd(c, "volume", "ls") - outArr := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) - - assertVolList(c, out, []string{"aaa", "soo", "test"}) -} - -// assertVolList checks volume retrieved with ls command -// equals to expected volume list -// note: out should be `volume ls [option]` result -func assertVolList(c *check.C, out string, expectVols []string) { - lines := strings.Split(out, "\n") - var volList []string - for _, line := range lines[1 : len(lines)-1] { - volFields := strings.Fields(line) - // wrap all volume name in volList - volList = append(volList, volFields[1]) - } - - // volume ls should contains all expected volumes - c.Assert(volList, checker.DeepEquals, expectVols) -} - -func (s *DockerSuite) TestVolumeCliLsFilterDangling(c *check.C) { - prefix, _ := getPrefixAndSlashFromDaemonPlatform() - dockerCmd(c, "volume", "create", "--name", "testnotinuse1") - dockerCmd(c, "volume", "create", "--name", "testisinuse1") - dockerCmd(c, "volume", "create", "--name", "testisinuse2") - - // Make sure both "created" (but not started), and started - // containers are included in reference counting - dockerCmd(c, "run", "--name", "volume-test1", "-v", "testisinuse1:"+prefix+"/foo", "busybox", "true") - dockerCmd(c, "create", "--name", "volume-test2", "-v", "testisinuse2:"+prefix+"/foo", "busybox", "true") - - out, _ := dockerCmd(c, "volume", "ls") - - // No filter, all volumes should show - c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) - c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) - c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) - - out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=false") - - // Explicitly disabling dangling - c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) - c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) - c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) - - out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=true") - - // Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output - c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) - c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) - c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) - - out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=1") - // Filter "dangling" volumes; only "dangling" (unused) volumes should be in the output, dangling also accept 1 - c.Assert(out, checker.Contains, "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) - c.Assert(out, check.Not(checker.Contains), "testisinuse1\n", check.Commentf("volume 'testisinuse1' in output, but not expected")) - c.Assert(out, check.Not(checker.Contains), "testisinuse2\n", check.Commentf("volume 'testisinuse2' in output, but not expected")) - - out, _ = dockerCmd(c, "volume", "ls", "--filter", "dangling=0") - // dangling=0 is same as dangling=false case - c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) - c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("expected volume 'testisinuse1' in output")) - c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) - - out, _ = dockerCmd(c, "volume", "ls", "--filter", "name=testisin") - c.Assert(out, check.Not(checker.Contains), "testnotinuse1\n", check.Commentf("expected volume 'testnotinuse1' in output")) - c.Assert(out, checker.Contains, "testisinuse1\n", check.Commentf("execpeted volume 'testisinuse1' in output")) - c.Assert(out, checker.Contains, "testisinuse2\n", check.Commentf("expected volume 'testisinuse2' in output")) - - out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=invalidDriver") - outArr := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) - - out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=local") - outArr = strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) - - out, _ = dockerCmd(c, "volume", "ls", "--filter", "driver=loc") - outArr = strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 4, check.Commentf("\n%s", out)) - -} - -func (s *DockerSuite) TestVolumeCliLsErrorWithInvalidFilterName(c *check.C) { - out, _, err := dockerCmdWithError("volume", "ls", "-f", "FOO=123") - c.Assert(err, checker.NotNil) - c.Assert(out, checker.Contains, "Invalid filter") -} - -func (s *DockerSuite) TestVolumeCliLsWithIncorrectFilterValue(c *check.C) { - out, _, err := dockerCmdWithError("volume", "ls", "-f", "dangling=invalid") - c.Assert(err, check.NotNil) - c.Assert(out, checker.Contains, "Invalid filter") -} - -func (s *DockerSuite) TestVolumeCliRm(c *check.C) { - prefix, _ := getPrefixAndSlashFromDaemonPlatform() - out, _ := dockerCmd(c, "volume", "create") - id := strings.TrimSpace(out) - - dockerCmd(c, "volume", "create", "--name", "test") - dockerCmd(c, "volume", "rm", id) - dockerCmd(c, "volume", "rm", "test") - - out, _ = dockerCmd(c, "volume", "ls") - outArr := strings.Split(strings.TrimSpace(out), "\n") - c.Assert(len(outArr), check.Equals, 1, check.Commentf("%s\n", out)) - - volumeID := "testing" - dockerCmd(c, "run", "-v", volumeID+":"+prefix+"/foo", "--name=test", "busybox", "sh", "-c", "echo hello > /foo/bar") - out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "volume", "rm", "testing")) - c.Assert( - err, - check.Not(check.IsNil), - check.Commentf("Should not be able to remove volume that is in use by a container\n%s", out)) - - out, _ = dockerCmd(c, "run", "--volumes-from=test", "--name=test2", "busybox", "sh", "-c", "cat /foo/bar") - c.Assert(strings.TrimSpace(out), check.Equals, "hello") - dockerCmd(c, "rm", "-fv", "test2") - dockerCmd(c, "volume", "inspect", volumeID) - dockerCmd(c, "rm", "-f", "test") - - out, _ = dockerCmd(c, "run", "--name=test2", "-v", volumeID+":"+prefix+"/foo", "busybox", "sh", "-c", "cat /foo/bar") - c.Assert(strings.TrimSpace(out), check.Equals, "hello", check.Commentf("volume data was removed")) - dockerCmd(c, "rm", "test2") - - dockerCmd(c, "volume", "rm", volumeID) - c.Assert( - exec.Command("volume", "rm", "doesntexist").Run(), - check.Not(check.IsNil), - check.Commentf("volume rm should fail with non-existent volume"), - ) -} - -func (s *DockerSuite) TestVolumeCliNoArgs(c *check.C) { - out, _ := dockerCmd(c, "volume") - // no args should produce the cmd usage output - usage := "Usage: docker volume COMMAND" - c.Assert(out, checker.Contains, usage) - - // invalid arg should error and show the command usage on stderr - _, stderr, _, err := runCommandWithStdoutStderr(exec.Command(dockerBinary, "volume", "somearg")) - c.Assert(err, check.NotNil, check.Commentf(stderr)) - c.Assert(stderr, checker.Contains, usage) - - // invalid flag should error and show the flag error and cmd usage - _, stderr, _, err = runCommandWithStdoutStderr(exec.Command(dockerBinary, "volume", "--no-such-flag")) - c.Assert(err, check.NotNil, check.Commentf(stderr)) - c.Assert(stderr, checker.Contains, usage) - c.Assert(stderr, checker.Contains, "unknown flag: --no-such-flag") -} - -func (s *DockerSuite) TestVolumeCliInspectTmplError(c *check.C) { - out, _ := dockerCmd(c, "volume", "create") - name := strings.TrimSpace(out) - - out, exitCode, err := dockerCmdWithError("volume", "inspect", "--format='{{ .FooBar }}'", name) - c.Assert(err, checker.NotNil, check.Commentf("Output: %s", out)) - c.Assert(exitCode, checker.Equals, 1, check.Commentf("Output: %s", out)) - c.Assert(out, checker.Contains, "Template parsing error") -} - -func (s *DockerSuite) TestVolumeCliCreateWithOpts(c *check.C) { - testRequires(c, DaemonIsLinux) - - dockerCmd(c, "volume", "create", "-d", "local", "--name", "test", "--opt=type=tmpfs", "--opt=device=tmpfs", "--opt=o=size=1m,uid=1000") - out, _ := dockerCmd(c, "run", "-v", "test:/foo", "busybox", "mount") - - mounts := strings.Split(out, "\n") - var found bool - for _, m := range mounts { - if strings.Contains(m, "/foo") { - found = true - info := strings.Fields(m) - // tmpfs on type tmpfs (rw,relatime,size=1024k,uid=1000) - c.Assert(info[0], checker.Equals, "tmpfs") - c.Assert(info[2], checker.Equals, "/foo") - c.Assert(info[4], checker.Equals, "tmpfs") - c.Assert(info[5], checker.Contains, "uid=1000") - c.Assert(info[5], checker.Contains, "size=1024k") - } - } - c.Assert(found, checker.Equals, true) -} - -func (s *DockerSuite) TestVolumeCliCreateLabel(c *check.C) { - testVol := "testvolcreatelabel" - testLabel := "foo" - testValue := "bar" - - out, _, err := dockerCmdWithError("volume", "create", "--label", testLabel+"="+testValue, "--name", testVol) - c.Assert(err, check.IsNil) - - out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Labels."+testLabel+" }}", testVol) - c.Assert(strings.TrimSpace(out), check.Equals, testValue) -} - -func (s *DockerSuite) TestVolumeCliCreateLabelMultiple(c *check.C) { - testVol := "testvolcreatelabel" - - testLabels := map[string]string{ - "foo": "bar", - "baz": "foo", - } - - args := []string{ - "volume", - "create", - "--name", - testVol, - } - - for k, v := range testLabels { - args = append(args, "--label", k+"="+v) - } - - out, _, err := dockerCmdWithError(args...) - c.Assert(err, check.IsNil) - - for k, v := range testLabels { - out, _ = dockerCmd(c, "volume", "inspect", "--format={{ .Labels."+k+" }}", testVol) - c.Assert(strings.TrimSpace(out), check.Equals, v) - } -} diff --git a/integration-cli/docker_cli_wait_test.go b/integration-cli/docker_cli_wait_test.go deleted file mode 100644 index 961aef5525..0000000000 --- a/integration-cli/docker_cli_wait_test.go +++ /dev/null @@ -1,97 +0,0 @@ -package main - -import ( - "bytes" - "os/exec" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// non-blocking wait with 0 exit code -func (s *DockerSuite) TestWaitNonBlockedExitZero(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "true") - containerID := strings.TrimSpace(out) - - err := waitInspect(containerID, "{{.State.Running}}", "false", 30*time.Second) - c.Assert(err, checker.IsNil) //Container should have stopped by now - - out, _ = dockerCmd(c, "wait", containerID) - c.Assert(strings.TrimSpace(out), checker.Equals, "0", check.Commentf("failed to set up container, %v", out)) - -} - -// blocking wait with 0 exit code -func (s *DockerSuite) TestWaitBlockedExitZero(c *check.C) { - // Windows busybox does not support trap in this way, not sleep with sub-second - // granularity. It will always exit 0x40010004. - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 0' TERM; while true; do usleep 10; done") - containerID := strings.TrimSpace(out) - - c.Assert(waitRun(containerID), checker.IsNil) - - chWait := make(chan string) - go func() { - chWait <- "" - out, _, _ := runCommandWithOutput(exec.Command(dockerBinary, "wait", containerID)) - chWait <- out - }() - - <-chWait // make sure the goroutine is started - time.Sleep(100 * time.Millisecond) - dockerCmd(c, "stop", containerID) - - select { - case status := <-chWait: - c.Assert(strings.TrimSpace(status), checker.Equals, "0", check.Commentf("expected exit 0, got %s", status)) - case <-time.After(2 * time.Second): - c.Fatal("timeout waiting for `docker wait` to exit") - } - -} - -// non-blocking wait with random exit code -func (s *DockerSuite) TestWaitNonBlockedExitRandom(c *check.C) { - out, _ := dockerCmd(c, "run", "-d", "busybox", "sh", "-c", "exit 99") - containerID := strings.TrimSpace(out) - - err := waitInspect(containerID, "{{.State.Running}}", "false", 30*time.Second) - c.Assert(err, checker.IsNil) //Container should have stopped by now - out, _ = dockerCmd(c, "wait", containerID) - c.Assert(strings.TrimSpace(out), checker.Equals, "99", check.Commentf("failed to set up container, %v", out)) - -} - -// blocking wait with random exit code -func (s *DockerSuite) TestWaitBlockedExitRandom(c *check.C) { - // Cannot run on Windows as trap in Windows busybox does not support trap in this way. - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "run", "-d", "busybox", "/bin/sh", "-c", "trap 'exit 99' TERM; while true; do usleep 10; done") - containerID := strings.TrimSpace(out) - c.Assert(waitRun(containerID), checker.IsNil) - - chWait := make(chan error) - waitCmd := exec.Command(dockerBinary, "wait", containerID) - waitCmdOut := bytes.NewBuffer(nil) - waitCmd.Stdout = waitCmdOut - c.Assert(waitCmd.Start(), checker.IsNil) - go func() { - chWait <- waitCmd.Wait() - }() - - dockerCmd(c, "stop", containerID) - - select { - case err := <-chWait: - c.Assert(err, checker.IsNil, check.Commentf(waitCmdOut.String())) - status, err := waitCmdOut.ReadString('\n') - c.Assert(err, checker.IsNil) - c.Assert(strings.TrimSpace(status), checker.Equals, "99", check.Commentf("expected exit 99, got %s", status)) - case <-time.After(2 * time.Second): - waitCmd.Process.Kill() - c.Fatal("timeout waiting for `docker wait` to exit") - } -} diff --git a/integration-cli/docker_deprecated_api_v124_test.go b/integration-cli/docker_deprecated_api_v124_test.go deleted file mode 100644 index 8e2823477c..0000000000 --- a/integration-cli/docker_deprecated_api_v124_test.go +++ /dev/null @@ -1,227 +0,0 @@ -// This file will be removed when we completely drop support for -// passing HostConfig to container start API. - -package main - -import ( - "net/http" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func formatV123StartAPIURL(url string) string { - return "/v1.23" + url -} - -func (s *DockerSuite) TestDeprecatedContainerApiStartHostConfig(c *check.C) { - name := "test-deprecated-api-124" - dockerCmd(c, "create", "--name", name, "busybox") - config := map[string]interface{}{ - "Binds": []string{"/aa:/bb"}, - } - status, body, err := sockRequest("POST", "/containers/"+name+"/start", config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusBadRequest) - c.Assert(string(body), checker.Contains, "was deprecated since v1.10") -} - -func (s *DockerSuite) TestDeprecatedContainerApiStartVolumeBinds(c *check.C) { - // TODO Windows CI: Investigate further why this fails on Windows to Windows CI. - testRequires(c, DaemonIsLinux) - path := "/foo" - if daemonPlatform == "windows" { - path = `c:\foo` - } - name := "testing" - config := map[string]interface{}{ - "Image": "busybox", - "Volumes": map[string]struct{}{path: {}}, - } - - status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - - bindPath := randomTmpDirPath("test", daemonPlatform) - config = map[string]interface{}{ - "Binds": []string{bindPath + ":" + path}, - } - status, _, err = sockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) - - pth, err := inspectMountSourceField(name, path) - c.Assert(err, checker.IsNil) - c.Assert(pth, checker.Equals, bindPath, check.Commentf("expected volume host path to be %s, got %s", bindPath, pth)) -} - -// Test for GH#10618 -func (s *DockerSuite) TestDeprecatedContainerApiStartDupVolumeBinds(c *check.C) { - // TODO Windows to Windows CI - Port this - testRequires(c, DaemonIsLinux) - name := "testdups" - config := map[string]interface{}{ - "Image": "busybox", - "Volumes": map[string]struct{}{"/tmp": {}}, - } - - status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - - bindPath1 := randomTmpDirPath("test1", daemonPlatform) - bindPath2 := randomTmpDirPath("test2", daemonPlatform) - - config = map[string]interface{}{ - "Binds": []string{bindPath1 + ":/tmp", bindPath2 + ":/tmp"}, - } - status, body, err := sockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusInternalServerError) - c.Assert(string(body), checker.Contains, "Duplicate mount point", check.Commentf("Expected failure due to duplicate bind mounts to same path, instead got: %q with error: %v", string(body), err)) -} - -func (s *DockerSuite) TestDeprecatedContainerApiStartVolumesFrom(c *check.C) { - // TODO Windows to Windows CI - Port this - testRequires(c, DaemonIsLinux) - volName := "voltst" - volPath := "/tmp" - - dockerCmd(c, "run", "--name", volName, "-v", volPath, "busybox") - - name := "TestContainerApiStartVolumesFrom" - config := map[string]interface{}{ - "Image": "busybox", - "Volumes": map[string]struct{}{volPath: {}}, - } - - status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/create?name="+name), config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusCreated) - - config = map[string]interface{}{ - "VolumesFrom": []string{volName}, - } - status, _, err = sockRequest("POST", formatV123StartAPIURL("/containers/"+name+"/start"), config) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) - - pth, err := inspectMountSourceField(name, volPath) - c.Assert(err, checker.IsNil) - pth2, err := inspectMountSourceField(volName, volPath) - c.Assert(err, checker.IsNil) - c.Assert(pth, checker.Equals, pth2, check.Commentf("expected volume host path to be %s, got %s", pth, pth2)) -} - -// #9981 - Allow a docker created volume (ie, one in /var/lib/docker/volumes) to be used to overwrite (via passing in Binds on api start) an existing volume -func (s *DockerSuite) TestDeprecatedPostContainerBindNormalVolume(c *check.C) { - // TODO Windows to Windows CI - Port this - testRequires(c, DaemonIsLinux) - dockerCmd(c, "create", "-v", "/foo", "--name=one", "busybox") - - fooDir, err := inspectMountSourceField("one", "/foo") - c.Assert(err, checker.IsNil) - - dockerCmd(c, "create", "-v", "/foo", "--name=two", "busybox") - - bindSpec := map[string][]string{"Binds": {fooDir + ":/foo"}} - status, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/two/start"), bindSpec) - c.Assert(err, checker.IsNil) - c.Assert(status, checker.Equals, http.StatusNoContent) - - fooDir2, err := inspectMountSourceField("two", "/foo") - c.Assert(err, checker.IsNil) - c.Assert(fooDir2, checker.Equals, fooDir, check.Commentf("expected volume path to be %s, got: %s", fooDir, fooDir2)) -} - -func (s *DockerSuite) TestDeprecatedStartWithTooLowMemoryLimit(c *check.C) { - // TODO Windows: Port once memory is supported - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "create", "busybox") - - containerID := strings.TrimSpace(out) - - config := `{ - "CpuShares": 100, - "Memory": 524287 - }` - - res, body, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+containerID+"/start"), strings.NewReader(config), "application/json") - c.Assert(err, checker.IsNil) - b, err2 := readBody(body) - c.Assert(err2, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusInternalServerError) - c.Assert(string(b), checker.Contains, "Minimum memory limit allowed is 4MB") -} - -// #14640 -func (s *DockerSuite) TestDeprecatedPostContainersStartWithoutLinksInHostConfig(c *check.C) { - // TODO Windows: Windows doesn't support supplying a hostconfig on start. - // An alternate test could be written to validate the negative testing aspect of this - testRequires(c, DaemonIsLinux) - name := "test-host-config-links" - dockerCmd(c, append([]string{"create", "--name", name, "busybox"}, defaultSleepCommand...)...) - - hc := inspectFieldJSON(c, name, "HostConfig") - config := `{"HostConfig":` + hc + `}` - - res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+name+"/start"), strings.NewReader(config), "application/json") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) - b.Close() -} - -// #14640 -func (s *DockerSuite) TestDeprecatedPostContainersStartWithLinksInHostConfig(c *check.C) { - // TODO Windows: Windows doesn't support supplying a hostconfig on start. - // An alternate test could be written to validate the negative testing aspect of this - testRequires(c, DaemonIsLinux) - name := "test-host-config-links" - dockerCmd(c, "run", "--name", "foo", "-d", "busybox", "top") - dockerCmd(c, "create", "--name", name, "--link", "foo:bar", "busybox", "top") - - hc := inspectFieldJSON(c, name, "HostConfig") - config := `{"HostConfig":` + hc + `}` - - res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+name+"/start"), strings.NewReader(config), "application/json") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) - b.Close() -} - -// #14640 -func (s *DockerSuite) TestDeprecatedPostContainersStartWithLinksInHostConfigIdLinked(c *check.C) { - // Windows does not support links - testRequires(c, DaemonIsLinux) - name := "test-host-config-links" - out, _ := dockerCmd(c, "run", "--name", "link0", "-d", "busybox", "top") - id := strings.TrimSpace(out) - dockerCmd(c, "create", "--name", name, "--link", id, "busybox", "top") - - hc := inspectFieldJSON(c, name, "HostConfig") - config := `{"HostConfig":` + hc + `}` - - res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+name+"/start"), strings.NewReader(config), "application/json") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) - b.Close() -} - -func (s *DockerSuite) TestDeprecatedStartWithNilDNS(c *check.C) { - // TODO Windows: Add once DNS is supported - testRequires(c, DaemonIsLinux) - out, _ := dockerCmd(c, "create", "busybox") - containerID := strings.TrimSpace(out) - - config := `{"HostConfig": {"Dns": null}}` - - res, b, err := sockRequestRaw("POST", formatV123StartAPIURL("/containers/"+containerID+"/start"), strings.NewReader(config), "application/json") - c.Assert(err, checker.IsNil) - c.Assert(res.StatusCode, checker.Equals, http.StatusNoContent) - b.Close() - - dns := inspectFieldJSON(c, containerID, "HostConfig.Dns") - c.Assert(dns, checker.Equals, "[]") -} diff --git a/integration-cli/docker_deprecated_api_v124_unix_test.go b/integration-cli/docker_deprecated_api_v124_unix_test.go deleted file mode 100644 index 94ef9b1a00..0000000000 --- a/integration-cli/docker_deprecated_api_v124_unix_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !windows - -package main - -import ( - "fmt" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// #19100 This is a deprecated feature test, it should be removed in Docker 1.12 -func (s *DockerNetworkSuite) TestDeprecatedDockerNetworkStartAPIWithHostconfig(c *check.C) { - netName := "test" - conName := "foo" - dockerCmd(c, "network", "create", netName) - dockerCmd(c, "create", "--name", conName, "busybox", "top") - - config := map[string]interface{}{ - "HostConfig": map[string]interface{}{ - "NetworkMode": netName, - }, - } - _, _, err := sockRequest("POST", formatV123StartAPIURL("/containers/"+conName+"/start"), config) - c.Assert(err, checker.IsNil) - c.Assert(waitRun(conName), checker.IsNil) - networks := inspectField(c, conName, "NetworkSettings.Networks") - c.Assert(networks, checker.Contains, netName, check.Commentf(fmt.Sprintf("Should contain '%s' network", netName))) - c.Assert(networks, checker.Not(checker.Contains), "bridge", check.Commentf("Should not contain 'bridge' network")) -} diff --git a/integration-cli/docker_experimental_network_test.go b/integration-cli/docker_experimental_network_test.go deleted file mode 100644 index f33dbd1c84..0000000000 --- a/integration-cli/docker_experimental_network_test.go +++ /dev/null @@ -1,591 +0,0 @@ -// +build experimental - -package main - -import ( - "os/exec" - "strings" - "time" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/parsers/kernel" - "github.com/go-check/check" -) - -var ( - MacvlanKernelSupport = testRequirement{ - func() bool { - const macvlanKernelVer = 3 // minimum macvlan kernel support - const macvlanMajorVer = 9 // minimum macvlan major kernel support - kv, err := kernel.GetKernelVersion() - if err != nil { - return false - } - // ensure Kernel version is >= v3.9 for macvlan support - if kv.Kernel < macvlanKernelVer || (kv.Kernel == macvlanKernelVer && kv.Major < macvlanMajorVer) { - return false - } - return true - }, - "kernel version failed to meet the minimum macvlan kernel requirement of 3.9", - } - IpvlanKernelSupport = testRequirement{ - func() bool { - const ipvlanKernelVer = 4 // minimum ipvlan kernel support - const ipvlanMajorVer = 2 // minimum ipvlan major kernel support - kv, err := kernel.GetKernelVersion() - if err != nil { - return false - } - // ensure Kernel version is >= v4.2 for ipvlan support - if kv.Kernel < ipvlanKernelVer || (kv.Kernel == ipvlanKernelVer && kv.Major < ipvlanMajorVer) { - return false - } - return true - }, - "kernel version failed to meet the minimum ipvlan kernel requirement of 4.0.0", - } -) - -func (s *DockerNetworkSuite) TestDockerNetworkMacvlanPersistance(c *check.C) { - // verify the driver automatically provisions the 802.1q link (dm-dummy0.60) - testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm) - // master dummy interface 'dm' abbreviation represents 'docker macvlan' - master := "dm-dummy0" - // simulate the master link the vlan tagged subinterface parent link will use - out, err := createMasterDummy(c, master) - c.Assert(err, check.IsNil, check.Commentf(out)) - // create a network specifying the desired sub-interface name - dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.60", "dm-persist") - assertNwIsAvailable(c, "dm-persist") - // Restart docker daemon to test the config has persisted to disk - s.d.Restart() - // verify network is recreated from persistence - assertNwIsAvailable(c, "dm-persist") - // cleanup the master interface that also collects the slave dev - deleteInterface(c, "dm-dummy0") -} - -func (s *DockerNetworkSuite) TestDockerNetworkIpvlanPersistance(c *check.C) { - // verify the driver automatically provisions the 802.1q link (di-dummy0.70) - testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm) - // master dummy interface 'di' notation represent 'docker ipvlan' - master := "di-dummy0" - // simulate the master link the vlan tagged subinterface parent link will use - out, err := createMasterDummy(c, master) - c.Assert(err, check.IsNil, check.Commentf(out)) - // create a network specifying the desired sub-interface name - dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.70", "di-persist") - assertNwIsAvailable(c, "di-persist") - // Restart docker daemon to test the config has persisted to disk - s.d.Restart() - // verify network is recreated from persistence - assertNwIsAvailable(c, "di-persist") - // cleanup the master interface that also collects the slave dev - deleteInterface(c, "di-dummy0") -} - -func (s *DockerNetworkSuite) TestDockerNetworkMacvlanSubIntCreate(c *check.C) { - // verify the driver automatically provisions the 802.1q link (dm-dummy0.50) - testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm) - // master dummy interface 'dm' abbreviation represents 'docker macvlan' - master := "dm-dummy0" - // simulate the master link the vlan tagged subinterface parent link will use - out, err := createMasterDummy(c, master) - c.Assert(err, check.IsNil, check.Commentf(out)) - // create a network specifying the desired sub-interface name - dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.50", "dm-subinterface") - assertNwIsAvailable(c, "dm-subinterface") - // cleanup the master interface which also collects the slave dev - deleteInterface(c, "dm-dummy0") -} - -func (s *DockerNetworkSuite) TestDockerNetworkIpvlanSubIntCreate(c *check.C) { - // verify the driver automatically provisions the 802.1q link (di-dummy0.50) - testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm) - // master dummy interface 'dm' abbreviation represents 'docker ipvlan' - master := "di-dummy0" - // simulate the master link the vlan tagged subinterface parent link will use - out, err := createMasterDummy(c, master) - c.Assert(err, check.IsNil, check.Commentf(out)) - // create a network specifying the desired sub-interface name - dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.60", "di-subinterface") - assertNwIsAvailable(c, "di-subinterface") - // cleanup the master interface which also collects the slave dev - deleteInterface(c, "di-dummy0") -} - -func (s *DockerNetworkSuite) TestDockerNetworkMacvlanOverlapParent(c *check.C) { - // verify the same parent interface cannot be used if already in use by an existing network - testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm) - // master dummy interface 'dm' abbreviation represents 'docker macvlan' - master := "dm-dummy0" - out, err := createMasterDummy(c, master) - c.Assert(err, check.IsNil, check.Commentf(out)) - out, err = createVlanInterface(c, master, "dm-dummy0.40", "40") - c.Assert(err, check.IsNil, check.Commentf(out)) - // create a network using an existing parent interface - dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.40", "dm-subinterface") - assertNwIsAvailable(c, "dm-subinterface") - // attempt to create another network using the same parent iface that should fail - out, _, err = dockerCmdWithError("network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.40", "dm-parent-net-overlap") - // verify that the overlap returns an error - c.Assert(err, check.NotNil) - // cleanup the master interface which also collects the slave dev - deleteInterface(c, "dm-dummy0") -} - -func (s *DockerNetworkSuite) TestDockerNetworkIpvlanOverlapParent(c *check.C) { - // verify the same parent interface cannot be used if already in use by an existing network - testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm) - // master dummy interface 'dm' abbreviation represents 'docker ipvlan' - master := "di-dummy0" - out, err := createMasterDummy(c, master) - c.Assert(err, check.IsNil, check.Commentf(out)) - out, err = createVlanInterface(c, master, "di-dummy0.30", "30") - c.Assert(err, check.IsNil, check.Commentf(out)) - // create a network using an existing parent interface - dockerCmd(c, "network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.30", "di-subinterface") - assertNwIsAvailable(c, "di-subinterface") - // attempt to create another network using the same parent iface that should fail - out, _, err = dockerCmdWithError("network", "create", "--driver=ipvlan", "-o", "parent=di-dummy0.30", "di-parent-net-overlap") - // verify that the overlap returns an error - c.Assert(err, check.NotNil) - // cleanup the master interface which also collects the slave dev - deleteInterface(c, "di-dummy0") -} - -func (s *DockerNetworkSuite) TestDockerNetworkMacvlanMultiSubnet(c *check.C) { - // create a dual stack multi-subnet Macvlan bridge mode network and validate connectivity between four containers, two on each subnet - testRequires(c, DaemonIsLinux, IPv6, MacvlanKernelSupport, NotUserNamespace, NotArm) - dockerCmd(c, "network", "create", "--driver=macvlan", "--ipv6", "--subnet=172.28.100.0/24", "--subnet=172.28.102.0/24", "--gateway=172.28.102.254", - "--subnet=2001:db8:abc2::/64", "--subnet=2001:db8:abc4::/64", "--gateway=2001:db8:abc4::254", "dualstackbridge") - // Ensure the network was created - assertNwIsAvailable(c, "dualstackbridge") - // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.100.0/24 and 2001:db8:abc2::/64 - dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "--ip", "172.28.100.20", "--ip6", "2001:db8:abc2::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=second", "--ip", "172.28.100.21", "--ip6", "2001:db8:abc2::21", "busybox", "top") - - // Inspect and store the v4 address from specified container on the network dualstackbridge - ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.IPAddress") - // Inspect and store the v6 address from specified container on the network dualstackbridge - ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.GlobalIPv6Address") - - // verify ipv4 connectivity to the explicit --ipv address second to first - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) - c.Assert(err, check.IsNil) - // verify ipv6 connectivity to the explicit --ipv6 address second to first - c.Skip("Temporarily skipping while invesitigating sporadic v6 CI issues") - _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) - c.Assert(err, check.IsNil) - - // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.102.0/24 and 2001:db8:abc4::/64 - dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=third", "--ip", "172.28.102.20", "--ip6", "2001:db8:abc4::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=fourth", "--ip", "172.28.102.21", "--ip6", "2001:db8:abc4::21", "busybox", "top") - - // Inspect and store the v4 address from specified container on the network dualstackbridge - ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.IPAddress") - // Inspect and store the v6 address from specified container on the network dualstackbridge - ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.GlobalIPv6Address") - - // verify ipv4 connectivity to the explicit --ipv address from third to fourth - _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) - c.Assert(err, check.IsNil) - // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth - _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) - c.Assert(err, check.IsNil) - - // Inspect the v4 gateway to ensure the proper default GW was assigned - ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.Gateway") - c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.100.1") - // Inspect the v6 gateway to ensure the proper default GW was assigned - ip6gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackbridge.IPv6Gateway") - c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc2::1") - - // Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned - ip4gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.Gateway") - c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.102.254") - // Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned - ip6gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackbridge.IPv6Gateway") - c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc4::254") -} - -func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL2MultiSubnet(c *check.C) { - // create a dual stack multi-subnet Ipvlan L2 network and validate connectivity within the subnets, two on each subnet - testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm) - dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.200.0/24", "--subnet=172.28.202.0/24", "--gateway=172.28.202.254", - "--subnet=2001:db8:abc8::/64", "--subnet=2001:db8:abc6::/64", "--gateway=2001:db8:abc6::254", "dualstackl2") - // Ensure the network was created - assertNwIsAvailable(c, "dualstackl2") - // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.200.0/24 and 2001:db8:abc8::/64 - dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=first", "--ip", "172.28.200.20", "--ip6", "2001:db8:abc8::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "--ip", "172.28.200.21", "--ip6", "2001:db8:abc8::21", "busybox", "top") - - // Inspect and store the v4 address from specified container on the network dualstackl2 - ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.IPAddress") - // Inspect and store the v6 address from specified container on the network dualstackl2 - ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.GlobalIPv6Address") - - // verify ipv4 connectivity to the explicit --ipv address second to first - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) - c.Assert(err, check.IsNil) - // verify ipv6 connectivity to the explicit --ipv6 address second to first - _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) - c.Assert(err, check.IsNil) - - // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.202.0/24 and 2001:db8:abc6::/64 - dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=third", "--ip", "172.28.202.20", "--ip6", "2001:db8:abc6::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=fourth", "--ip", "172.28.202.21", "--ip6", "2001:db8:abc6::21", "busybox", "top") - - // Inspect and store the v4 address from specified container on the network dualstackl2 - ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.IPAddress") - // Inspect and store the v6 address from specified container on the network dualstackl2 - ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.GlobalIPv6Address") - - // verify ipv4 connectivity to the explicit --ipv address from third to fourth - _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) - c.Assert(err, check.IsNil) - // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth - _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) - c.Assert(err, check.IsNil) - - // Inspect the v4 gateway to ensure the proper default GW was assigned - ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.Gateway") - c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.200.1") - // Inspect the v6 gateway to ensure the proper default GW was assigned - ip6gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl2.IPv6Gateway") - c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc8::1") - - // Inspect the v4 gateway to ensure the proper explicitly assigned default GW was assigned - ip4gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.Gateway") - c.Assert(strings.TrimSpace(ip4gw), check.Equals, "172.28.202.254") - // Inspect the v6 gateway to ensure the proper explicitly assigned default GW was assigned - ip6gw = inspectField(c, "third", "NetworkSettings.Networks.dualstackl2.IPv6Gateway") - c.Assert(strings.TrimSpace(ip6gw), check.Equals, "2001:db8:abc6::254") -} - -func (s *DockerNetworkSuite) TestDockerNetworkIpvlanL3MultiSubnet(c *check.C) { - // create a dual stack multi-subnet Ipvlan L3 network and validate connectivity between all four containers per L3 mode - testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm, IPv6) - dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.10.0/24", "--subnet=172.28.12.0/24", "--gateway=172.28.12.254", - "--subnet=2001:db8:abc9::/64", "--subnet=2001:db8:abc7::/64", "--gateway=2001:db8:abc7::254", "-o", "ipvlan_mode=l3", "dualstackl3") - // Ensure the network was created - assertNwIsAvailable(c, "dualstackl3") - - // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.10.0/24 and 2001:db8:abc9::/64 - dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=first", "--ip", "172.28.10.20", "--ip6", "2001:db8:abc9::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=second", "--ip", "172.28.10.21", "--ip6", "2001:db8:abc9::21", "busybox", "top") - - // Inspect and store the v4 address from specified container on the network dualstackl3 - ip := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.IPAddress") - // Inspect and store the v6 address from specified container on the network dualstackl3 - ip6 := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") - - // verify ipv4 connectivity to the explicit --ipv address second to first - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", strings.TrimSpace(ip)) - c.Assert(err, check.IsNil) - // verify ipv6 connectivity to the explicit --ipv6 address second to first - _, _, err = dockerCmdWithError("exec", "second", "ping6", "-c", "1", strings.TrimSpace(ip6)) - c.Assert(err, check.IsNil) - - // start dual stack containers and verify the user specified --ip and --ip6 addresses on subnets 172.28.12.0/24 and 2001:db8:abc7::/64 - dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "--ip", "172.28.12.20", "--ip6", "2001:db8:abc7::20", "busybox", "top") - dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=fourth", "--ip", "172.28.12.21", "--ip6", "2001:db8:abc7::21", "busybox", "top") - - // Inspect and store the v4 address from specified container on the network dualstackl3 - ip = inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.IPAddress") - // Inspect and store the v6 address from specified container on the network dualstackl3 - ip6 = inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") - - // verify ipv4 connectivity to the explicit --ipv address from third to fourth - _, _, err = dockerCmdWithError("exec", "fourth", "ping", "-c", "1", strings.TrimSpace(ip)) - c.Assert(err, check.IsNil) - // verify ipv6 connectivity to the explicit --ipv6 address from third to fourth - _, _, err = dockerCmdWithError("exec", "fourth", "ping6", "-c", "1", strings.TrimSpace(ip6)) - c.Assert(err, check.IsNil) - - // Inspect and store the v4 address from specified container on the network dualstackl3 - ip = inspectField(c, "second", "NetworkSettings.Networks.dualstackl3.IPAddress") - // Inspect and store the v6 address from specified container on the network dualstackl3 - ip6 = inspectField(c, "second", "NetworkSettings.Networks.dualstackl3.GlobalIPv6Address") - - // Verify connectivity across disparate subnets which is unique to L3 mode only - _, _, err = dockerCmdWithError("exec", "third", "ping", "-c", "1", strings.TrimSpace(ip)) - c.Assert(err, check.IsNil) - _, _, err = dockerCmdWithError("exec", "third", "ping6", "-c", "1", strings.TrimSpace(ip6)) - c.Assert(err, check.IsNil) - - // Inspect the v4 gateway to ensure no next hop is assigned in L3 mode - ip4gw := inspectField(c, "first", "NetworkSettings.Networks.dualstackl3.Gateway") - c.Assert(strings.TrimSpace(ip4gw), check.Equals, "") - // Inspect the v6 gateway to ensure the explicitly specified default GW is ignored per L3 mode enabled - ip6gw := inspectField(c, "third", "NetworkSettings.Networks.dualstackl3.IPv6Gateway") - c.Assert(strings.TrimSpace(ip6gw), check.Equals, "") -} - -func (s *DockerNetworkSuite) TestDockerNetworkIpvlanAddressing(c *check.C) { - // Ensure the default gateways, next-hops and default dev devices are properly set - testRequires(c, DaemonIsLinux, IPv6, IpvlanKernelSupport, NotUserNamespace, NotArm) - dockerCmd(c, "network", "create", "--driver=macvlan", "--ipv6", "--subnet=172.28.130.0/24", - "--subnet=2001:db8:abca::/64", "--gateway=2001:db8:abca::254", "-o", "macvlan_mode=bridge", "dualstackbridge") - assertNwIsAvailable(c, "dualstackbridge") - dockerCmd(c, "run", "-d", "--net=dualstackbridge", "--name=first", "busybox", "top") - // Validate macvlan bridge mode defaults gateway sets the default IPAM next-hop inferred from the subnet - out, _, err := dockerCmdWithError("exec", "first", "ip", "route") - c.Assert(err, check.IsNil) - c.Assert(out, checker.Contains, "default via 172.28.130.1 dev eth0") - // Validate macvlan bridge mode sets the v6 gateway to the user specified default gateway/next-hop - out, _, err = dockerCmdWithError("exec", "first", "ip", "-6", "route") - c.Assert(err, check.IsNil) - c.Assert(out, checker.Contains, "default via 2001:db8:abca::254 dev eth0") - - // Verify ipvlan l2 mode sets the proper default gateway routes via netlink - // for either an explicitly set route by the user or inferred via default IPAM - dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.140.0/24", "--gateway=172.28.140.254", - "--subnet=2001:db8:abcb::/64", "-o", "ipvlan_mode=l2", "dualstackl2") - assertNwIsAvailable(c, "dualstackl2") - dockerCmd(c, "run", "-d", "--net=dualstackl2", "--name=second", "busybox", "top") - // Validate ipvlan l2 mode defaults gateway sets the default IPAM next-hop inferred from the subnet - out, _, err = dockerCmdWithError("exec", "second", "ip", "route") - c.Assert(err, check.IsNil) - c.Assert(out, checker.Contains, "default via 172.28.140.254 dev eth0") - // Validate ipvlan l2 mode sets the v6 gateway to the user specified default gateway/next-hop - out, _, err = dockerCmdWithError("exec", "second", "ip", "-6", "route") - c.Assert(err, check.IsNil) - c.Assert(out, checker.Contains, "default via 2001:db8:abcb::1 dev eth0") - - // Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops - dockerCmd(c, "network", "create", "--driver=ipvlan", "--ipv6", "--subnet=172.28.160.0/24", "--gateway=172.28.160.254", - "--subnet=2001:db8:abcd::/64", "--gateway=2001:db8:abcd::254", "-o", "ipvlan_mode=l3", "dualstackl3") - assertNwIsAvailable(c, "dualstackl3") - dockerCmd(c, "run", "-d", "--net=dualstackl3", "--name=third", "busybox", "top") - // Validate ipvlan l3 mode sets the v4 gateway to dev eth0 and disregards any explicit or inferred next-hops - out, _, err = dockerCmdWithError("exec", "third", "ip", "route") - c.Assert(err, check.IsNil) - c.Assert(out, checker.Contains, "default dev eth0") - // Validate ipvlan l3 mode sets the v6 gateway to dev eth0 and disregards any explicit or inferred next-hops - out, _, err = dockerCmdWithError("exec", "third", "ip", "-6", "route") - c.Assert(err, check.IsNil) - c.Assert(out, checker.Contains, "default dev eth0") -} - -func (s *DockerSuite) TestDockerNetworkMacVlanBridgeNilParent(c *check.C) { - // macvlan bridge mode - dummy parent interface is provisioned dynamically - testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm) - dockerCmd(c, "network", "create", "--driver=macvlan", "dm-nil-parent") - assertNwIsAvailable(c, "dm-nil-parent") - - // start two containers on the same subnet - dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=dm-nil-parent", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // intra-network communications should succeed - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestDockerNetworkMacVlanBridgeInternalMode(c *check.C) { - // macvlan bridge mode --internal containers can communicate inside the network but not externally - testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm) - dockerCmd(c, "network", "create", "--driver=macvlan", "--internal", "dm-internal") - assertNwIsAvailable(c, "dm-internal") - nr := getNetworkResource(c, "dm-internal") - c.Assert(nr.Internal, checker.True) - - // start two containers on the same subnet - dockerCmd(c, "run", "-d", "--net=dm-internal", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=dm-internal", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // access outside of the network should fail - _, _, err := dockerCmdWithTimeout(time.Second, "exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8") - c.Assert(err, check.NotNil) - // intra-network communications should succeed - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestDockerNetworkIpvlanL2NilParent(c *check.C) { - // ipvlan l2 mode - dummy parent interface is provisioned dynamically - testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm) - dockerCmd(c, "network", "create", "--driver=ipvlan", "di-nil-parent") - assertNwIsAvailable(c, "di-nil-parent") - - // start two containers on the same subnet - dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=di-nil-parent", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // intra-network communications should succeed - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestDockerNetworkIpvlanL2InternalMode(c *check.C) { - // ipvlan l2 mode --internal containers can communicate inside the network but not externally - testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm) - dockerCmd(c, "network", "create", "--driver=ipvlan", "--internal", "di-internal") - assertNwIsAvailable(c, "di-internal") - nr := getNetworkResource(c, "di-internal") - c.Assert(nr.Internal, checker.True) - - // start two containers on the same subnet - dockerCmd(c, "run", "-d", "--net=di-internal", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=di-internal", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // access outside of the network should fail - _, _, err := dockerCmdWithTimeout(time.Second, "exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8") - c.Assert(err, check.NotNil) - // intra-network communications should succeed - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestDockerNetworkIpvlanL3NilParent(c *check.C) { - // ipvlan l3 mode - dummy parent interface is provisioned dynamically - testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm) - dockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.230.0/24", - "--subnet=172.28.220.0/24", "-o", "ipvlan_mode=l3", "di-nil-parent-l3") - assertNwIsAvailable(c, "di-nil-parent-l3") - - // start two containers on separate subnets - dockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-nil-parent-l3", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-nil-parent-l3", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // intra-network communications should succeed - _, _, err := dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestDockerNetworkIpvlanL3InternalMode(c *check.C) { - // ipvlan l3 mode --internal containers can communicate inside the network but not externally - testRequires(c, DaemonIsLinux, IpvlanKernelSupport, NotUserNamespace, NotArm) - dockerCmd(c, "network", "create", "--driver=ipvlan", "--subnet=172.28.230.0/24", - "--subnet=172.28.220.0/24", "-o", "ipvlan_mode=l3", "--internal", "di-internal-l3") - assertNwIsAvailable(c, "di-internal-l3") - nr := getNetworkResource(c, "di-internal-l3") - c.Assert(nr.Internal, checker.True) - - // start two containers on separate subnets - dockerCmd(c, "run", "-d", "--ip=172.28.220.10", "--net=di-internal-l3", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--ip=172.28.230.10", "--net=di-internal-l3", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - - // access outside of the network should fail - _, _, err := dockerCmdWithTimeout(time.Second, "exec", "first", "ping", "-c", "1", "-w", "1", "8.8.8.8") - c.Assert(err, check.NotNil) - // intra-network communications should succeed - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) -} - -func (s *DockerSuite) TestDockerNetworkMacVlanExistingParent(c *check.C) { - // macvlan bridge mode - empty parent interface containers can reach each other internally but not externally - testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm) - netName := "dm-parent-exists" - out, err := createMasterDummy(c, "dm-dummy0") - //out, err := createVlanInterface(c, "dm-parent", "dm-slave", "macvlan", "bridge") - c.Assert(err, check.IsNil, check.Commentf(out)) - // create a network using an existing parent interface - dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0", netName) - assertNwIsAvailable(c, netName) - // delete the network while preserving the parent link - dockerCmd(c, "network", "rm", netName) - assertNwNotAvailable(c, netName) - // verify the network delete did not delete the predefined link - out, err = linkExists(c, "dm-dummy0") - c.Assert(err, check.IsNil, check.Commentf(out)) - deleteInterface(c, "dm-dummy0") - c.Assert(err, check.IsNil, check.Commentf(out)) -} - -func (s *DockerSuite) TestDockerNetworkMacVlanSubinterface(c *check.C) { - // macvlan bridge mode - empty parent interface containers can reach each other internally but not externally - testRequires(c, DaemonIsLinux, MacvlanKernelSupport, NotUserNamespace, NotArm) - netName := "dm-subinterface" - out, err := createMasterDummy(c, "dm-dummy0") - c.Assert(err, check.IsNil, check.Commentf(out)) - out, err = createVlanInterface(c, "dm-dummy0", "dm-dummy0.20", "20") - c.Assert(err, check.IsNil, check.Commentf(out)) - // create a network using an existing parent interface - dockerCmd(c, "network", "create", "--driver=macvlan", "-o", "parent=dm-dummy0.20", netName) - assertNwIsAvailable(c, netName) - - // start containers on 802.1q tagged '-o parent' sub-interface - dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=first", "busybox", "top") - c.Assert(waitRun("first"), check.IsNil) - dockerCmd(c, "run", "-d", "--net=dm-subinterface", "--name=second", "busybox", "top") - c.Assert(waitRun("second"), check.IsNil) - // verify containers can communicate - _, _, err = dockerCmdWithError("exec", "second", "ping", "-c", "1", "first") - c.Assert(err, check.IsNil) - - // remove the containers - dockerCmd(c, "rm", "-f", "first") - dockerCmd(c, "rm", "-f", "second") - // delete the network while preserving the parent link - dockerCmd(c, "network", "rm", netName) - assertNwNotAvailable(c, netName) - // verify the network delete did not delete the predefined sub-interface - out, err = linkExists(c, "dm-dummy0.20") - c.Assert(err, check.IsNil, check.Commentf(out)) - // delete the parent interface which also collects the slave - deleteInterface(c, "dm-dummy0") - c.Assert(err, check.IsNil, check.Commentf(out)) -} - -func createMasterDummy(c *check.C, master string) (string, error) { - // ip link add type dummy - args := []string{"link", "add", master, "type", "dummy"} - ipLinkCmd := exec.Command("ip", args...) - out, _, err := runCommandWithOutput(ipLinkCmd) - if err != nil { - return out, err - } - // ip link set dummy_name up - args = []string{"link", "set", master, "up"} - ipLinkCmd = exec.Command("ip", args...) - out, _, err = runCommandWithOutput(ipLinkCmd) - if err != nil { - return out, err - } - return out, err -} - -func createVlanInterface(c *check.C, master, slave, id string) (string, error) { - // ip link add link name . type vlan id - args := []string{"link", "add", "link", master, "name", slave, "type", "vlan", "id", id} - ipLinkCmd := exec.Command("ip", args...) - out, _, err := runCommandWithOutput(ipLinkCmd) - if err != nil { - return out, err - } - // ip link set up - args = []string{"link", "set", slave, "up"} - ipLinkCmd = exec.Command("ip", args...) - out, _, err = runCommandWithOutput(ipLinkCmd) - if err != nil { - return out, err - } - return out, err -} - -func linkExists(c *check.C, master string) (string, error) { - // verify the specified link exists, ip link show - args := []string{"link", "show", master} - ipLinkCmd := exec.Command("ip", args...) - out, _, err := runCommandWithOutput(ipLinkCmd) - if err != nil { - return out, err - } - return out, err -} diff --git a/integration-cli/docker_hub_pull_suite_test.go b/integration-cli/docker_hub_pull_suite_test.go deleted file mode 100644 index a702b5889b..0000000000 --- a/integration-cli/docker_hub_pull_suite_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package main - -import ( - "os/exec" - "runtime" - "strings" - - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -func init() { - // FIXME. Temporarily turning this off for Windows as GH16039 was breaking - // Windows to Linux CI @icecrime - if runtime.GOOS != "windows" { - check.Suite(newDockerHubPullSuite()) - } -} - -// DockerHubPullSuite provides an isolated daemon that doesn't have all the -// images that are baked into our 'global' test environment daemon (e.g., -// busybox, httpserver, ...). -// -// We use it for push/pull tests where we want to start fresh, and measure the -// relative impact of each individual operation. As part of this suite, all -// images are removed after each test. -type DockerHubPullSuite struct { - d *Daemon - ds *DockerSuite -} - -// newDockerHubPullSuite returns a new instance of a DockerHubPullSuite. -func newDockerHubPullSuite() *DockerHubPullSuite { - return &DockerHubPullSuite{ - ds: &DockerSuite{}, - } -} - -// SetUpSuite starts the suite daemon. -func (s *DockerHubPullSuite) SetUpSuite(c *check.C) { - testRequires(c, DaemonIsLinux) - s.d = NewDaemon(c) - err := s.d.Start() - c.Assert(err, checker.IsNil, check.Commentf("starting push/pull test daemon: %v", err)) -} - -// TearDownSuite stops the suite daemon. -func (s *DockerHubPullSuite) TearDownSuite(c *check.C) { - if s.d != nil { - err := s.d.Stop() - c.Assert(err, checker.IsNil, check.Commentf("stopping push/pull test daemon: %v", err)) - } -} - -// SetUpTest declares that all tests of this suite require network. -func (s *DockerHubPullSuite) SetUpTest(c *check.C) { - testRequires(c, Network) -} - -// TearDownTest removes all images from the suite daemon. -func (s *DockerHubPullSuite) TearDownTest(c *check.C) { - out := s.Cmd(c, "images", "-aq") - images := strings.Split(out, "\n") - images = append([]string{"-f"}, images...) - s.d.Cmd("rmi", images...) - s.ds.TearDownTest(c) -} - -// Cmd executes a command against the suite daemon and returns the combined -// output. The function fails the test when the command returns an error. -func (s *DockerHubPullSuite) Cmd(c *check.C, name string, arg ...string) string { - out, err := s.CmdWithError(name, arg...) - c.Assert(err, checker.IsNil, check.Commentf("%q failed with errors: %s, %v", strings.Join(arg, " "), out, err)) - return out -} - -// CmdWithError executes a command against the suite daemon and returns the -// combined output as well as any error. -func (s *DockerHubPullSuite) CmdWithError(name string, arg ...string) (string, error) { - c := s.MakeCmd(name, arg...) - b, err := c.CombinedOutput() - return string(b), err -} - -// MakeCmd returns an exec.Cmd command to run against the suite daemon. -func (s *DockerHubPullSuite) MakeCmd(name string, arg ...string) *exec.Cmd { - args := []string{"--host", s.d.sock(), name} - args = append(args, arg...) - return exec.Command(dockerBinary, args...) -} diff --git a/integration-cli/docker_test_vars.go b/integration-cli/docker_test_vars.go deleted file mode 100644 index e668a8700c..0000000000 --- a/integration-cli/docker_test_vars.go +++ /dev/null @@ -1,131 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "os" - "os/exec" - - "github.com/docker/docker/pkg/reexec" -) - -var ( - // the docker client binary to use - dockerBinary = "docker" - // the docker daemon binary to use - dockerdBinary = "dockerd" - - // path to containerd's ctr binary - ctrBinary = "docker-containerd-ctr" - - // the private registry image to use for tests involving the registry - registryImageName = "registry" - - // the private registry to use for tests - privateRegistryURL = "127.0.0.1:5000" - - // TODO Windows CI. These are incorrect and need fixing into - // platform specific pieces. - runtimePath = "/var/run/docker" - - workingDirectory string - - // isLocalDaemon is true if the daemon under test is on the same - // host as the CLI. - isLocalDaemon bool - - // daemonPlatform is held globally so that tests can make intelligent - // decisions on how to configure themselves according to the platform - // of the daemon. This is initialized in docker_utils by sending - // a version call to the daemon and examining the response header. - daemonPlatform string - - // windowsDaemonKV is used on Windows to distinguish between different - // versions. This is necessary to enable certain tests based on whether - // the platform supports it. For example, Windows Server 2016 TP3 did - // not support volumes, but TP4 did. - windowsDaemonKV int - - // daemonDefaultImage is the name of the default image to use when running - // tests. This is platform dependent. - daemonDefaultImage string - - // For a local daemon on Linux, these values will be used for testing - // user namespace support as the standard graph path(s) will be - // appended with the root remapped uid.gid prefix - dockerBasePath string - volumesConfigPath string - containerStoragePath string - - // daemonStorageDriver is held globally so that tests can know the storage - // driver of the daemon. This is initialized in docker_utils by sending - // a version call to the daemon and examining the response header. - daemonStorageDriver string -) - -const ( - // WindowsBaseImage is the name of the base image for Windows testing - WindowsBaseImage = "windowsservercore" - - // DefaultImage is the name of the base image for the majority of tests that - // are run across suites - DefaultImage = "busybox" -) - -func init() { - reexec.Init() - if dockerBin := os.Getenv("DOCKER_BINARY"); dockerBin != "" { - dockerBinary = dockerBin - } - var err error - dockerBinary, err = exec.LookPath(dockerBinary) - if err != nil { - fmt.Printf("ERROR: couldn't resolve full path to the Docker binary (%v)", err) - os.Exit(1) - } - if registryImage := os.Getenv("REGISTRY_IMAGE"); registryImage != "" { - registryImageName = registryImage - } - if registry := os.Getenv("REGISTRY_URL"); registry != "" { - privateRegistryURL = registry - } - workingDirectory, _ = os.Getwd() - - // Deterministically working out the environment in which CI is running - // to evaluate whether the daemon is local or remote is not possible through - // a build tag. - // - // For example Windows to Linux CI under Jenkins tests the 64-bit - // Windows binary build with the daemon build tag, but calls a remote - // Linux daemon. - // - // We can't just say if Windows then assume the daemon is local as at - // some point, we will be testing the Windows CLI against a Windows daemon. - // - // Similarly, it will be perfectly valid to also run CLI tests from - // a Linux CLI (built with the daemon tag) against a Windows daemon. - if len(os.Getenv("DOCKER_REMOTE_DAEMON")) > 0 { - isLocalDaemon = false - } else { - isLocalDaemon = true - } - - // TODO Windows CI. This are incorrect and need fixing into - // platform specific pieces. - // This is only used for a tests with local daemon true (Linux-only today) - // default is "/var/lib/docker", but we'll try and ask the - // /info endpoint for the specific root dir - dockerBasePath = "/var/lib/docker" - type Info struct { - DockerRootDir string - } - var i Info - status, b, err := sockRequest("GET", "/info", nil) - if err == nil && status == 200 { - if err = json.Unmarshal(b, &i); err == nil { - dockerBasePath = i.DockerRootDir - } - } - volumesConfigPath = dockerBasePath + "/volumes" - containerStoragePath = dockerBasePath + "/containers" -} diff --git a/integration-cli/docker_utils.go b/integration-cli/docker_utils.go deleted file mode 100644 index 1141cae2c1..0000000000 --- a/integration-cli/docker_utils.go +++ /dev/null @@ -1,1569 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "crypto/tls" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "net/http/httputil" - "net/url" - "os" - "os/exec" - "path" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/httputils" - "github.com/docker/docker/pkg/integration" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/stringutils" - "github.com/docker/engine-api/types" - "github.com/docker/go-connections/tlsconfig" - "github.com/docker/go-units" - "github.com/go-check/check" -) - -func init() { - cmd := exec.Command(dockerBinary, "images", "-f", "dangling=false", "--format", "{{.Repository}}:{{.Tag}}") - cmd.Env = appendBaseEnv(true) - out, err := cmd.CombinedOutput() - if err != nil { - panic(fmt.Errorf("err=%v\nout=%s\n", err, out)) - } - images := strings.Split(strings.TrimSpace(string(out)), "\n") - for _, img := range images { - protectedImages[img] = struct{}{} - } - - res, body, err := sockRequestRaw("GET", "/info", nil, "application/json") - if err != nil { - panic(fmt.Errorf("Init failed to get /info: %v", err)) - } - defer body.Close() - if res.StatusCode != http.StatusOK { - panic(fmt.Errorf("Init failed to get /info. Res=%v", res)) - } - - svrHeader, _ := httputils.ParseServerHeader(res.Header.Get("Server")) - daemonPlatform = svrHeader.OS - if daemonPlatform != "linux" && daemonPlatform != "windows" { - panic("Cannot run tests against platform: " + daemonPlatform) - } - - // Now we know the daemon platform, can set paths used by tests. - var info types.Info - err = json.NewDecoder(body).Decode(&info) - if err != nil { - panic(fmt.Errorf("Init failed to unmarshal docker info: %v", err)) - } - - daemonStorageDriver = info.Driver - dockerBasePath = info.DockerRootDir - volumesConfigPath = filepath.Join(dockerBasePath, "volumes") - containerStoragePath = filepath.Join(dockerBasePath, "containers") - // Make sure in context of daemon, not the local platform. Note we can't - // use filepath.FromSlash or ToSlash here as they are a no-op on Unix. - if daemonPlatform == "windows" { - volumesConfigPath = strings.Replace(volumesConfigPath, `/`, `\`, -1) - containerStoragePath = strings.Replace(containerStoragePath, `/`, `\`, -1) - // On Windows, extract out the version as we need to make selective - // decisions during integration testing as and when features are implemented. - // eg in "10.0 10550 (10550.1000.amd64fre.branch.date-time)" we want 10550 - windowsDaemonKV, _ = strconv.Atoi(strings.Split(info.KernelVersion, " ")[1]) - } else { - volumesConfigPath = strings.Replace(volumesConfigPath, `\`, `/`, -1) - containerStoragePath = strings.Replace(containerStoragePath, `\`, `/`, -1) - } -} - -func convertBasesize(basesizeBytes int64) (int64, error) { - basesize := units.HumanSize(float64(basesizeBytes)) - basesize = strings.Trim(basesize, " ")[:len(basesize)-3] - basesizeFloat, err := strconv.ParseFloat(strings.Trim(basesize, " "), 64) - if err != nil { - return 0, err - } - return int64(basesizeFloat) * 1024 * 1024 * 1024, nil -} - -func daemonHost() string { - daemonURLStr := "unix://" + opts.DefaultUnixSocket - if daemonHostVar := os.Getenv("DOCKER_HOST"); daemonHostVar != "" { - daemonURLStr = daemonHostVar - } - return daemonURLStr -} - -func getTLSConfig() (*tls.Config, error) { - dockerCertPath := os.Getenv("DOCKER_CERT_PATH") - - if dockerCertPath == "" { - return nil, fmt.Errorf("DOCKER_TLS_VERIFY specified, but no DOCKER_CERT_PATH environment variable") - } - - option := &tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - } - tlsConfig, err := tlsconfig.Client(*option) - if err != nil { - return nil, err - } - - return tlsConfig, nil -} - -func sockConn(timeout time.Duration, daemon string) (net.Conn, error) { - if daemon == "" { - daemon = daemonHost() - } - daemonURL, err := url.Parse(daemon) - if err != nil { - return nil, fmt.Errorf("could not parse url %q: %v", daemon, err) - } - - var c net.Conn - switch daemonURL.Scheme { - case "npipe": - return npipeDial(daemonURL.Path, timeout) - case "unix": - return net.DialTimeout(daemonURL.Scheme, daemonURL.Path, timeout) - case "tcp": - if os.Getenv("DOCKER_TLS_VERIFY") != "" { - // Setup the socket TLS configuration. - tlsConfig, err := getTLSConfig() - if err != nil { - return nil, err - } - dialer := &net.Dialer{Timeout: timeout} - return tls.DialWithDialer(dialer, daemonURL.Scheme, daemonURL.Host, tlsConfig) - } - return net.DialTimeout(daemonURL.Scheme, daemonURL.Host, timeout) - default: - return c, fmt.Errorf("unknown scheme %v (%s)", daemonURL.Scheme, daemon) - } -} - -func sockRequest(method, endpoint string, data interface{}) (int, []byte, error) { - jsonData := bytes.NewBuffer(nil) - if err := json.NewEncoder(jsonData).Encode(data); err != nil { - return -1, nil, err - } - - res, body, err := sockRequestRaw(method, endpoint, jsonData, "application/json") - if err != nil { - return -1, nil, err - } - b, err := readBody(body) - return res.StatusCode, b, err -} - -func sockRequestRaw(method, endpoint string, data io.Reader, ct string) (*http.Response, io.ReadCloser, error) { - return sockRequestRawToDaemon(method, endpoint, data, ct, "") -} - -func sockRequestRawToDaemon(method, endpoint string, data io.Reader, ct, daemon string) (*http.Response, io.ReadCloser, error) { - req, client, err := newRequestClient(method, endpoint, data, ct, daemon) - if err != nil { - return nil, nil, err - } - - resp, err := client.Do(req) - if err != nil { - client.Close() - return nil, nil, err - } - body := ioutils.NewReadCloserWrapper(resp.Body, func() error { - defer resp.Body.Close() - return client.Close() - }) - - return resp, body, nil -} - -func sockRequestHijack(method, endpoint string, data io.Reader, ct string) (net.Conn, *bufio.Reader, error) { - req, client, err := newRequestClient(method, endpoint, data, ct, "") - if err != nil { - return nil, nil, err - } - - client.Do(req) - conn, br := client.Hijack() - return conn, br, nil -} - -func newRequestClient(method, endpoint string, data io.Reader, ct, daemon string) (*http.Request, *httputil.ClientConn, error) { - c, err := sockConn(time.Duration(10*time.Second), daemon) - if err != nil { - return nil, nil, fmt.Errorf("could not dial docker daemon: %v", err) - } - - client := httputil.NewClientConn(c, nil) - - req, err := http.NewRequest(method, endpoint, data) - if err != nil { - client.Close() - return nil, nil, fmt.Errorf("could not create new request: %v", err) - } - - if ct != "" { - req.Header.Set("Content-Type", ct) - } - return req, client, nil -} - -func readBody(b io.ReadCloser) ([]byte, error) { - defer b.Close() - return ioutil.ReadAll(b) -} - -func deleteContainer(container string) error { - container = strings.TrimSpace(strings.Replace(container, "\n", " ", -1)) - rmArgs := strings.Split(fmt.Sprintf("rm -fv %v", container), " ") - exitCode, err := runCommand(exec.Command(dockerBinary, rmArgs...)) - // set error manually if not set - if exitCode != 0 && err == nil { - err = fmt.Errorf("failed to remove container: `docker rm` exit is non-zero") - } - - return err -} - -func getAllContainers() (string, error) { - getContainersCmd := exec.Command(dockerBinary, "ps", "-q", "-a") - out, exitCode, err := runCommandWithOutput(getContainersCmd) - if exitCode != 0 && err == nil { - err = fmt.Errorf("failed to get a list of containers: %v\n", out) - } - - return out, err -} - -func deleteAllContainers() error { - containers, err := getAllContainers() - if err != nil { - fmt.Println(containers) - return err - } - - if containers != "" { - if err = deleteContainer(containers); err != nil { - return err - } - } - return nil -} - -func deleteAllNetworks() error { - networks, err := getAllNetworks() - if err != nil { - return err - } - var errors []string - for _, n := range networks { - if n.Name == "bridge" || n.Name == "none" || n.Name == "host" { - continue - } - if daemonPlatform == "windows" && strings.ToLower(n.Name) == "nat" { - // nat is a pre-defined network on Windows and cannot be removed - continue - } - status, b, err := sockRequest("DELETE", "/networks/"+n.Name, nil) - if err != nil { - errors = append(errors, err.Error()) - continue - } - if status != http.StatusNoContent { - errors = append(errors, fmt.Sprintf("error deleting network %s: %s", n.Name, string(b))) - } - } - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, "\n")) - } - return nil -} - -func getAllNetworks() ([]types.NetworkResource, error) { - var networks []types.NetworkResource - _, b, err := sockRequest("GET", "/networks", nil) - if err != nil { - return nil, err - } - if err := json.Unmarshal(b, &networks); err != nil { - return nil, err - } - return networks, nil -} - -func deleteAllVolumes() error { - volumes, err := getAllVolumes() - if err != nil { - return err - } - var errors []string - for _, v := range volumes { - status, b, err := sockRequest("DELETE", "/volumes/"+v.Name, nil) - if err != nil { - errors = append(errors, err.Error()) - continue - } - if status != http.StatusNoContent { - errors = append(errors, fmt.Sprintf("error deleting volume %s: %s", v.Name, string(b))) - } - } - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, "\n")) - } - return nil -} - -func getAllVolumes() ([]*types.Volume, error) { - var volumes types.VolumesListResponse - _, b, err := sockRequest("GET", "/volumes", nil) - if err != nil { - return nil, err - } - if err := json.Unmarshal(b, &volumes); err != nil { - return nil, err - } - return volumes.Volumes, nil -} - -var protectedImages = map[string]struct{}{} - -func deleteAllImages() error { - cmd := exec.Command(dockerBinary, "images") - cmd.Env = appendBaseEnv(true) - out, err := cmd.CombinedOutput() - if err != nil { - return err - } - lines := strings.Split(string(out), "\n")[1:] - var imgs []string - for _, l := range lines { - if l == "" { - continue - } - fields := strings.Fields(l) - imgTag := fields[0] + ":" + fields[1] - if _, ok := protectedImages[imgTag]; !ok { - if fields[0] == "" { - imgs = append(imgs, fields[2]) - continue - } - imgs = append(imgs, imgTag) - } - } - if len(imgs) == 0 { - return nil - } - args := append([]string{"rmi", "-f"}, imgs...) - if err := exec.Command(dockerBinary, args...).Run(); err != nil { - return err - } - return nil -} - -func getPausedContainers() (string, error) { - getPausedContainersCmd := exec.Command(dockerBinary, "ps", "-f", "status=paused", "-q", "-a") - out, exitCode, err := runCommandWithOutput(getPausedContainersCmd) - if exitCode != 0 && err == nil { - err = fmt.Errorf("failed to get a list of paused containers: %v\n", out) - } - - return out, err -} - -func getSliceOfPausedContainers() ([]string, error) { - out, err := getPausedContainers() - if err == nil { - if len(out) == 0 { - return nil, err - } - slice := strings.Split(strings.TrimSpace(out), "\n") - return slice, err - } - return []string{out}, err -} - -func unpauseContainer(container string) error { - unpauseCmd := exec.Command(dockerBinary, "unpause", container) - exitCode, err := runCommand(unpauseCmd) - if exitCode != 0 && err == nil { - err = fmt.Errorf("failed to unpause container") - } - - return err -} - -func unpauseAllContainers() error { - containers, err := getPausedContainers() - if err != nil { - fmt.Println(containers) - return err - } - - containers = strings.Replace(containers, "\n", " ", -1) - containers = strings.Trim(containers, " ") - containerList := strings.Split(containers, " ") - - for _, value := range containerList { - if err = unpauseContainer(value); err != nil { - return err - } - } - - return nil -} - -func deleteImages(images ...string) error { - args := []string{"rmi", "-f"} - args = append(args, images...) - rmiCmd := exec.Command(dockerBinary, args...) - exitCode, err := runCommand(rmiCmd) - // set error manually if not set - if exitCode != 0 && err == nil { - err = fmt.Errorf("failed to remove image: `docker rmi` exit is non-zero") - } - return err -} - -func imageExists(image string) error { - inspectCmd := exec.Command(dockerBinary, "inspect", image) - exitCode, err := runCommand(inspectCmd) - if exitCode != 0 && err == nil { - err = fmt.Errorf("couldn't find image %q", image) - } - return err -} - -func pullImageIfNotExist(image string) error { - if err := imageExists(image); err != nil { - pullCmd := exec.Command(dockerBinary, "pull", image) - _, exitCode, err := runCommandWithOutput(pullCmd) - - if err != nil || exitCode != 0 { - return fmt.Errorf("image %q wasn't found locally and it couldn't be pulled: %s", image, err) - } - } - return nil -} - -func dockerCmdWithError(args ...string) (string, int, error) { - if err := validateArgs(args...); err != nil { - return "", 0, err - } - out, code, err := integration.DockerCmdWithError(dockerBinary, args...) - if err != nil { - err = fmt.Errorf("%v: %s", err, out) - } - return out, code, err -} - -func dockerCmdWithStdoutStderr(c *check.C, args ...string) (string, string, int) { - if err := validateArgs(args...); err != nil { - c.Fatalf(err.Error()) - } - return integration.DockerCmdWithStdoutStderr(dockerBinary, c, args...) -} - -func dockerCmd(c *check.C, args ...string) (string, int) { - if err := validateArgs(args...); err != nil { - c.Fatalf(err.Error()) - } - return integration.DockerCmd(dockerBinary, c, args...) -} - -// execute a docker command with a timeout -func dockerCmdWithTimeout(timeout time.Duration, args ...string) (string, int, error) { - if err := validateArgs(args...); err != nil { - return "", 0, err - } - return integration.DockerCmdWithTimeout(dockerBinary, timeout, args...) -} - -// execute a docker command in a directory -func dockerCmdInDir(c *check.C, path string, args ...string) (string, int, error) { - if err := validateArgs(args...); err != nil { - c.Fatalf(err.Error()) - } - return integration.DockerCmdInDir(dockerBinary, path, args...) -} - -// execute a docker command in a directory with a timeout -func dockerCmdInDirWithTimeout(timeout time.Duration, path string, args ...string) (string, int, error) { - if err := validateArgs(args...); err != nil { - return "", 0, err - } - return integration.DockerCmdInDirWithTimeout(dockerBinary, timeout, path, args...) -} - -// validateArgs is a checker to ensure tests are not running commands which are -// not supported on platforms. Specifically on Windows this is 'busybox top'. -func validateArgs(args ...string) error { - if daemonPlatform != "windows" { - return nil - } - foundBusybox := -1 - for key, value := range args { - if strings.ToLower(value) == "busybox" { - foundBusybox = key - } - if (foundBusybox != -1) && (key == foundBusybox+1) && (strings.ToLower(value) == "top") { - return errors.New("Cannot use 'busybox top' in tests on Windows. Use runSleepingContainer()") - } - } - return nil -} - -// find the State.ExitCode in container metadata -func findContainerExitCode(c *check.C, name string, vargs ...string) string { - args := append(vargs, "inspect", "--format='{{ .State.ExitCode }} {{ .State.Error }}'", name) - cmd := exec.Command(dockerBinary, args...) - out, _, err := runCommandWithOutput(cmd) - if err != nil { - c.Fatal(err, out) - } - return out -} - -func findContainerIP(c *check.C, id string, network string) string { - out, _ := dockerCmd(c, "inspect", fmt.Sprintf("--format='{{ .NetworkSettings.Networks.%s.IPAddress }}'", network), id) - return strings.Trim(out, " \r\n'") -} - -func getContainerCount() (int, error) { - const containers = "Containers:" - - cmd := exec.Command(dockerBinary, "info") - out, _, err := runCommandWithOutput(cmd) - if err != nil { - return 0, err - } - - lines := strings.Split(out, "\n") - for _, line := range lines { - if strings.Contains(line, containers) { - output := strings.TrimSpace(line) - output = strings.TrimLeft(output, containers) - output = strings.Trim(output, " ") - containerCount, err := strconv.Atoi(output) - if err != nil { - return 0, err - } - return containerCount, nil - } - } - return 0, fmt.Errorf("couldn't find the Container count in the output") -} - -// FakeContext creates directories that can be used as a build context -type FakeContext struct { - Dir string -} - -// Add a file at a path, creating directories where necessary -func (f *FakeContext) Add(file, content string) error { - return f.addFile(file, []byte(content)) -} - -func (f *FakeContext) addFile(file string, content []byte) error { - filepath := path.Join(f.Dir, file) - dirpath := path.Dir(filepath) - if dirpath != "." { - if err := os.MkdirAll(dirpath, 0755); err != nil { - return err - } - } - return ioutil.WriteFile(filepath, content, 0644) - -} - -// Delete a file at a path -func (f *FakeContext) Delete(file string) error { - filepath := path.Join(f.Dir, file) - return os.RemoveAll(filepath) -} - -// Close deletes the context -func (f *FakeContext) Close() error { - return os.RemoveAll(f.Dir) -} - -func fakeContextFromNewTempDir() (*FakeContext, error) { - tmp, err := ioutil.TempDir("", "fake-context") - if err != nil { - return nil, err - } - if err := os.Chmod(tmp, 0755); err != nil { - return nil, err - } - return fakeContextFromDir(tmp), nil -} - -func fakeContextFromDir(dir string) *FakeContext { - return &FakeContext{dir} -} - -func fakeContextWithFiles(files map[string]string) (*FakeContext, error) { - ctx, err := fakeContextFromNewTempDir() - if err != nil { - return nil, err - } - for file, content := range files { - if err := ctx.Add(file, content); err != nil { - ctx.Close() - return nil, err - } - } - return ctx, nil -} - -func fakeContextAddDockerfile(ctx *FakeContext, dockerfile string) error { - if err := ctx.Add("Dockerfile", dockerfile); err != nil { - ctx.Close() - return err - } - return nil -} - -func fakeContext(dockerfile string, files map[string]string) (*FakeContext, error) { - ctx, err := fakeContextWithFiles(files) - if err != nil { - return nil, err - } - if err := fakeContextAddDockerfile(ctx, dockerfile); err != nil { - return nil, err - } - return ctx, nil -} - -// FakeStorage is a static file server. It might be running locally or remotely -// on test host. -type FakeStorage interface { - Close() error - URL() string - CtxDir() string -} - -func fakeBinaryStorage(archives map[string]*bytes.Buffer) (FakeStorage, error) { - ctx, err := fakeContextFromNewTempDir() - if err != nil { - return nil, err - } - for name, content := range archives { - if err := ctx.addFile(name, content.Bytes()); err != nil { - return nil, err - } - } - return fakeStorageWithContext(ctx) -} - -// fakeStorage returns either a local or remote (at daemon machine) file server -func fakeStorage(files map[string]string) (FakeStorage, error) { - ctx, err := fakeContextWithFiles(files) - if err != nil { - return nil, err - } - return fakeStorageWithContext(ctx) -} - -// fakeStorageWithContext returns either a local or remote (at daemon machine) file server -func fakeStorageWithContext(ctx *FakeContext) (FakeStorage, error) { - if isLocalDaemon { - return newLocalFakeStorage(ctx) - } - return newRemoteFileServer(ctx) -} - -// localFileStorage is a file storage on the running machine -type localFileStorage struct { - *FakeContext - *httptest.Server -} - -func (s *localFileStorage) URL() string { - return s.Server.URL -} - -func (s *localFileStorage) CtxDir() string { - return s.FakeContext.Dir -} - -func (s *localFileStorage) Close() error { - defer s.Server.Close() - return s.FakeContext.Close() -} - -func newLocalFakeStorage(ctx *FakeContext) (*localFileStorage, error) { - handler := http.FileServer(http.Dir(ctx.Dir)) - server := httptest.NewServer(handler) - return &localFileStorage{ - FakeContext: ctx, - Server: server, - }, nil -} - -// remoteFileServer is a containerized static file server started on the remote -// testing machine to be used in URL-accepting docker build functionality. -type remoteFileServer struct { - host string // hostname/port web server is listening to on docker host e.g. 0.0.0.0:43712 - container string - image string - ctx *FakeContext -} - -func (f *remoteFileServer) URL() string { - u := url.URL{ - Scheme: "http", - Host: f.host} - return u.String() -} - -func (f *remoteFileServer) CtxDir() string { - return f.ctx.Dir -} - -func (f *remoteFileServer) Close() error { - defer func() { - if f.ctx != nil { - f.ctx.Close() - } - if f.image != "" { - deleteImages(f.image) - } - }() - if f.container == "" { - return nil - } - return deleteContainer(f.container) -} - -func newRemoteFileServer(ctx *FakeContext) (*remoteFileServer, error) { - var ( - image = fmt.Sprintf("fileserver-img-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) - container = fmt.Sprintf("fileserver-cnt-%s", strings.ToLower(stringutils.GenerateRandomAlphaOnlyString(10))) - ) - - // Build the image - if err := fakeContextAddDockerfile(ctx, `FROM httpserver -COPY . /static`); err != nil { - return nil, fmt.Errorf("Cannot add Dockerfile to context: %v", err) - } - if _, err := buildImageFromContext(image, ctx, false); err != nil { - return nil, fmt.Errorf("failed building file storage container image: %v", err) - } - - // Start the container - runCmd := exec.Command(dockerBinary, "run", "-d", "-P", "--name", container, image) - if out, ec, err := runCommandWithOutput(runCmd); err != nil { - return nil, fmt.Errorf("failed to start file storage container. ec=%v\nout=%s\nerr=%v", ec, out, err) - } - - // Find out the system assigned port - out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "port", container, "80/tcp")) - if err != nil { - return nil, fmt.Errorf("failed to find container port: err=%v\nout=%s", err, out) - } - - fileserverHostPort := strings.Trim(out, "\n") - _, port, err := net.SplitHostPort(fileserverHostPort) - if err != nil { - return nil, fmt.Errorf("unable to parse file server host:port: %v", err) - } - - dockerHostURL, err := url.Parse(daemonHost()) - if err != nil { - return nil, fmt.Errorf("unable to parse daemon host URL: %v", err) - } - - host, _, err := net.SplitHostPort(dockerHostURL.Host) - if err != nil { - return nil, fmt.Errorf("unable to parse docker daemon host:port: %v", err) - } - - return &remoteFileServer{ - container: container, - image: image, - host: fmt.Sprintf("%s:%s", host, port), - ctx: ctx}, nil -} - -func inspectFieldAndMarshall(c *check.C, name, field string, output interface{}) { - str := inspectFieldJSON(c, name, field) - err := json.Unmarshal([]byte(str), output) - if c != nil { - c.Assert(err, check.IsNil, check.Commentf("failed to unmarshal: %v", err)) - } -} - -func inspectFilter(name, filter string) (string, error) { - format := fmt.Sprintf("{{%s}}", filter) - inspectCmd := exec.Command(dockerBinary, "inspect", "-f", format, name) - out, exitCode, err := runCommandWithOutput(inspectCmd) - if err != nil || exitCode != 0 { - return "", fmt.Errorf("failed to inspect %s: %s", name, out) - } - return strings.TrimSpace(out), nil -} - -func inspectFieldWithError(name, field string) (string, error) { - return inspectFilter(name, fmt.Sprintf(".%s", field)) -} - -func inspectField(c *check.C, name, field string) string { - out, err := inspectFilter(name, fmt.Sprintf(".%s", field)) - if c != nil { - c.Assert(err, check.IsNil) - } - return out -} - -func inspectFieldJSON(c *check.C, name, field string) string { - out, err := inspectFilter(name, fmt.Sprintf("json .%s", field)) - if c != nil { - c.Assert(err, check.IsNil) - } - return out -} - -func inspectFieldMap(c *check.C, name, path, field string) string { - out, err := inspectFilter(name, fmt.Sprintf("index .%s %q", path, field)) - if c != nil { - c.Assert(err, check.IsNil) - } - return out -} - -func inspectMountSourceField(name, destination string) (string, error) { - m, err := inspectMountPoint(name, destination) - if err != nil { - return "", err - } - return m.Source, nil -} - -func inspectMountPoint(name, destination string) (types.MountPoint, error) { - out, err := inspectFilter(name, "json .Mounts") - if err != nil { - return types.MountPoint{}, err - } - - return inspectMountPointJSON(out, destination) -} - -var errMountNotFound = errors.New("mount point not found") - -func inspectMountPointJSON(j, destination string) (types.MountPoint, error) { - var mp []types.MountPoint - if err := unmarshalJSON([]byte(j), &mp); err != nil { - return types.MountPoint{}, err - } - - var m *types.MountPoint - for _, c := range mp { - if c.Destination == destination { - m = &c - break - } - } - - if m == nil { - return types.MountPoint{}, errMountNotFound - } - - return *m, nil -} - -func inspectImage(name, filter string) (string, error) { - args := []string{"inspect", "--type", "image"} - if filter != "" { - format := fmt.Sprintf("{{%s}}", filter) - args = append(args, "-f", format) - } - args = append(args, name) - inspectCmd := exec.Command(dockerBinary, args...) - out, exitCode, err := runCommandWithOutput(inspectCmd) - if err != nil || exitCode != 0 { - return "", fmt.Errorf("failed to inspect %s: %s", name, out) - } - return strings.TrimSpace(out), nil -} - -func getIDByName(name string) (string, error) { - return inspectFieldWithError(name, "Id") -} - -// getContainerState returns the exit code of the container -// and true if it's running -// the exit code should be ignored if it's running -func getContainerState(c *check.C, id string) (int, bool, error) { - var ( - exitStatus int - running bool - ) - out, exitCode := dockerCmd(c, "inspect", "--format={{.State.Running}} {{.State.ExitCode}}", id) - if exitCode != 0 { - return 0, false, fmt.Errorf("%q doesn't exist: %s", id, out) - } - - out = strings.Trim(out, "\n") - splitOutput := strings.Split(out, " ") - if len(splitOutput) != 2 { - return 0, false, fmt.Errorf("failed to get container state: output is broken") - } - if splitOutput[0] == "true" { - running = true - } - if n, err := strconv.Atoi(splitOutput[1]); err == nil { - exitStatus = n - } else { - return 0, false, fmt.Errorf("failed to get container state: couldn't parse integer") - } - - return exitStatus, running, nil -} - -func buildImageCmd(name, dockerfile string, useCache bool, buildFlags ...string) *exec.Cmd { - return buildImageCmdWithHost(name, dockerfile, "", useCache, buildFlags...) -} - -func buildImageCmdWithHost(name, dockerfile, host string, useCache bool, buildFlags ...string) *exec.Cmd { - args := []string{} - if host != "" { - args = append(args, "--host", host) - } - args = append(args, "build", "-t", name) - if !useCache { - args = append(args, "--no-cache") - } - args = append(args, buildFlags...) - args = append(args, "-") - buildCmd := exec.Command(dockerBinary, args...) - buildCmd.Stdin = strings.NewReader(dockerfile) - return buildCmd -} - -func buildImageWithOut(name, dockerfile string, useCache bool, buildFlags ...string) (string, string, error) { - buildCmd := buildImageCmd(name, dockerfile, useCache, buildFlags...) - out, exitCode, err := runCommandWithOutput(buildCmd) - if err != nil || exitCode != 0 { - return "", out, fmt.Errorf("failed to build the image: %s", out) - } - id, err := getIDByName(name) - if err != nil { - return "", out, err - } - return id, out, nil -} - -func buildImageWithStdoutStderr(name, dockerfile string, useCache bool, buildFlags ...string) (string, string, string, error) { - buildCmd := buildImageCmd(name, dockerfile, useCache, buildFlags...) - stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) - if err != nil || exitCode != 0 { - return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) - } - id, err := getIDByName(name) - if err != nil { - return "", stdout, stderr, err - } - return id, stdout, stderr, nil -} - -func buildImage(name, dockerfile string, useCache bool, buildFlags ...string) (string, error) { - id, _, err := buildImageWithOut(name, dockerfile, useCache, buildFlags...) - return id, err -} - -func buildImageFromContext(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, error) { - id, _, err := buildImageFromContextWithOut(name, ctx, useCache, buildFlags...) - if err != nil { - return "", err - } - return id, nil -} - -func buildImageFromContextWithOut(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, string, error) { - args := []string{"build", "-t", name} - if !useCache { - args = append(args, "--no-cache") - } - args = append(args, buildFlags...) - args = append(args, ".") - buildCmd := exec.Command(dockerBinary, args...) - buildCmd.Dir = ctx.Dir - out, exitCode, err := runCommandWithOutput(buildCmd) - if err != nil || exitCode != 0 { - return "", "", fmt.Errorf("failed to build the image: %s", out) - } - id, err := getIDByName(name) - if err != nil { - return "", "", err - } - return id, out, nil -} - -func buildImageFromContextWithStdoutStderr(name string, ctx *FakeContext, useCache bool, buildFlags ...string) (string, string, string, error) { - args := []string{"build", "-t", name} - if !useCache { - args = append(args, "--no-cache") - } - args = append(args, buildFlags...) - args = append(args, ".") - buildCmd := exec.Command(dockerBinary, args...) - buildCmd.Dir = ctx.Dir - - stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) - if err != nil || exitCode != 0 { - return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) - } - id, err := getIDByName(name) - if err != nil { - return "", stdout, stderr, err - } - return id, stdout, stderr, nil -} - -func buildImageFromGitWithStdoutStderr(name string, ctx *fakeGit, useCache bool, buildFlags ...string) (string, string, string, error) { - args := []string{"build", "-t", name} - if !useCache { - args = append(args, "--no-cache") - } - args = append(args, buildFlags...) - args = append(args, ctx.RepoURL) - buildCmd := exec.Command(dockerBinary, args...) - - stdout, stderr, exitCode, err := runCommandWithStdoutStderr(buildCmd) - if err != nil || exitCode != 0 { - return "", stdout, stderr, fmt.Errorf("failed to build the image: %s", stdout) - } - id, err := getIDByName(name) - if err != nil { - return "", stdout, stderr, err - } - return id, stdout, stderr, nil -} - -func buildImageFromPath(name, path string, useCache bool, buildFlags ...string) (string, error) { - args := []string{"build", "-t", name} - if !useCache { - args = append(args, "--no-cache") - } - args = append(args, buildFlags...) - args = append(args, path) - buildCmd := exec.Command(dockerBinary, args...) - out, exitCode, err := runCommandWithOutput(buildCmd) - if err != nil || exitCode != 0 { - return "", fmt.Errorf("failed to build the image: %s", out) - } - return getIDByName(name) -} - -type gitServer interface { - URL() string - Close() error -} - -type localGitServer struct { - *httptest.Server -} - -func (r *localGitServer) Close() error { - r.Server.Close() - return nil -} - -func (r *localGitServer) URL() string { - return r.Server.URL -} - -type fakeGit struct { - root string - server gitServer - RepoURL string -} - -func (g *fakeGit) Close() { - g.server.Close() - os.RemoveAll(g.root) -} - -func newFakeGit(name string, files map[string]string, enforceLocalServer bool) (*fakeGit, error) { - ctx, err := fakeContextWithFiles(files) - if err != nil { - return nil, err - } - defer ctx.Close() - curdir, err := os.Getwd() - if err != nil { - return nil, err - } - defer os.Chdir(curdir) - - if output, err := exec.Command("git", "init", ctx.Dir).CombinedOutput(); err != nil { - return nil, fmt.Errorf("error trying to init repo: %s (%s)", err, output) - } - err = os.Chdir(ctx.Dir) - if err != nil { - return nil, err - } - if output, err := exec.Command("git", "config", "user.name", "Fake User").CombinedOutput(); err != nil { - return nil, fmt.Errorf("error trying to set 'user.name': %s (%s)", err, output) - } - if output, err := exec.Command("git", "config", "user.email", "fake.user@example.com").CombinedOutput(); err != nil { - return nil, fmt.Errorf("error trying to set 'user.email': %s (%s)", err, output) - } - if output, err := exec.Command("git", "add", "*").CombinedOutput(); err != nil { - return nil, fmt.Errorf("error trying to add files to repo: %s (%s)", err, output) - } - if output, err := exec.Command("git", "commit", "-a", "-m", "Initial commit").CombinedOutput(); err != nil { - return nil, fmt.Errorf("error trying to commit to repo: %s (%s)", err, output) - } - - root, err := ioutil.TempDir("", "docker-test-git-repo") - if err != nil { - return nil, err - } - repoPath := filepath.Join(root, name+".git") - if output, err := exec.Command("git", "clone", "--bare", ctx.Dir, repoPath).CombinedOutput(); err != nil { - os.RemoveAll(root) - return nil, fmt.Errorf("error trying to clone --bare: %s (%s)", err, output) - } - err = os.Chdir(repoPath) - if err != nil { - os.RemoveAll(root) - return nil, err - } - if output, err := exec.Command("git", "update-server-info").CombinedOutput(); err != nil { - os.RemoveAll(root) - return nil, fmt.Errorf("error trying to git update-server-info: %s (%s)", err, output) - } - err = os.Chdir(curdir) - if err != nil { - os.RemoveAll(root) - return nil, err - } - - var server gitServer - if !enforceLocalServer { - // use fakeStorage server, which might be local or remote (at test daemon) - server, err = fakeStorageWithContext(fakeContextFromDir(root)) - if err != nil { - return nil, fmt.Errorf("cannot start fake storage: %v", err) - } - } else { - // always start a local http server on CLI test machine - httpServer := httptest.NewServer(http.FileServer(http.Dir(root))) - server = &localGitServer{httpServer} - } - return &fakeGit{ - root: root, - server: server, - RepoURL: fmt.Sprintf("%s/%s.git", server.URL(), name), - }, nil -} - -// Write `content` to the file at path `dst`, creating it if necessary, -// as well as any missing directories. -// The file is truncated if it already exists. -// Fail the test when error occurs. -func writeFile(dst, content string, c *check.C) { - // Create subdirectories if necessary - c.Assert(os.MkdirAll(path.Dir(dst), 0700), check.IsNil) - f, err := os.OpenFile(dst, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0700) - c.Assert(err, check.IsNil) - defer f.Close() - // Write content (truncate if it exists) - _, err = io.Copy(f, strings.NewReader(content)) - c.Assert(err, check.IsNil) -} - -// Return the contents of file at path `src`. -// Fail the test when error occurs. -func readFile(src string, c *check.C) (content string) { - data, err := ioutil.ReadFile(src) - c.Assert(err, check.IsNil) - - return string(data) -} - -func containerStorageFile(containerID, basename string) string { - return filepath.Join(containerStoragePath, containerID, basename) -} - -// docker commands that use this function must be run with the '-d' switch. -func runCommandAndReadContainerFile(filename string, cmd *exec.Cmd) ([]byte, error) { - out, _, err := runCommandWithOutput(cmd) - if err != nil { - return nil, fmt.Errorf("%v: %q", err, out) - } - - contID := strings.TrimSpace(out) - - if err := waitRun(contID); err != nil { - return nil, fmt.Errorf("%v: %q", contID, err) - } - - return readContainerFile(contID, filename) -} - -func readContainerFile(containerID, filename string) ([]byte, error) { - f, err := os.Open(containerStorageFile(containerID, filename)) - if err != nil { - return nil, err - } - defer f.Close() - - content, err := ioutil.ReadAll(f) - if err != nil { - return nil, err - } - - return content, nil -} - -func readContainerFileWithExec(containerID, filename string) ([]byte, error) { - out, _, err := runCommandWithOutput(exec.Command(dockerBinary, "exec", containerID, "cat", filename)) - return []byte(out), err -} - -// daemonTime provides the current time on the daemon host -func daemonTime(c *check.C) time.Time { - if isLocalDaemon { - return time.Now() - } - - status, body, err := sockRequest("GET", "/info", nil) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusOK) - - type infoJSON struct { - SystemTime string - } - var info infoJSON - err = json.Unmarshal(body, &info) - c.Assert(err, check.IsNil, check.Commentf("unable to unmarshal GET /info response")) - - dt, err := time.Parse(time.RFC3339Nano, info.SystemTime) - c.Assert(err, check.IsNil, check.Commentf("invalid time format in GET /info response")) - return dt -} - -// daemonUnixTime returns the current time on the daemon host with nanoseconds precision. -// It return the time formatted how the client sends timestamps to the server. -func daemonUnixTime(c *check.C) string { - return parseEventTime(daemonTime(c)) -} - -func parseEventTime(t time.Time) string { - return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())) -} - -func setupRegistry(c *check.C, schema1 bool, auth, tokenURL string) *testRegistryV2 { - reg, err := newTestRegistryV2(c, schema1, auth, tokenURL) - c.Assert(err, check.IsNil) - - // Wait for registry to be ready to serve requests. - for i := 0; i != 50; i++ { - if err = reg.Ping(); err == nil { - break - } - time.Sleep(100 * time.Millisecond) - } - - c.Assert(err, check.IsNil, check.Commentf("Timeout waiting for test registry to become available: %v", err)) - return reg -} - -func setupNotary(c *check.C) *testNotary { - ts, err := newTestNotary(c) - c.Assert(err, check.IsNil) - - return ts -} - -// appendBaseEnv appends the minimum set of environment variables to exec the -// docker cli binary for testing with correct configuration to the given env -// list. -func appendBaseEnv(isTLS bool, env ...string) []string { - preserveList := []string{ - // preserve remote test host - "DOCKER_HOST", - - // windows: requires preserving SystemRoot, otherwise dial tcp fails - // with "GetAddrInfoW: A non-recoverable error occurred during a database lookup." - "SystemRoot", - - // testing help text requires the $PATH to dockerd is set - "PATH", - } - if isTLS { - preserveList = append(preserveList, "DOCKER_TLS_VERIFY", "DOCKER_CERT_PATH") - } - - for _, key := range preserveList { - if val := os.Getenv(key); val != "" { - env = append(env, fmt.Sprintf("%s=%s", key, val)) - } - } - return env -} - -func createTmpFile(c *check.C, content string) string { - f, err := ioutil.TempFile("", "testfile") - c.Assert(err, check.IsNil) - - filename := f.Name() - - err = ioutil.WriteFile(filename, []byte(content), 0644) - c.Assert(err, check.IsNil) - - return filename -} - -func buildImageWithOutInDamon(socket string, name, dockerfile string, useCache bool) (string, error) { - args := []string{"--host", socket} - buildCmd := buildImageCmdArgs(args, name, dockerfile, useCache) - out, exitCode, err := runCommandWithOutput(buildCmd) - if err != nil || exitCode != 0 { - return out, fmt.Errorf("failed to build the image: %s, error: %v", out, err) - } - return out, nil -} - -func buildImageCmdArgs(args []string, name, dockerfile string, useCache bool) *exec.Cmd { - args = append(args, []string{"-D", "build", "-t", name}...) - if !useCache { - args = append(args, "--no-cache") - } - args = append(args, "-") - buildCmd := exec.Command(dockerBinary, args...) - buildCmd.Stdin = strings.NewReader(dockerfile) - return buildCmd - -} - -func waitForContainer(contID string, args ...string) error { - args = append([]string{"run", "--name", contID}, args...) - cmd := exec.Command(dockerBinary, args...) - if _, err := runCommand(cmd); err != nil { - return err - } - - if err := waitRun(contID); err != nil { - return err - } - - return nil -} - -// waitRun will wait for the specified container to be running, maximum 5 seconds. -func waitRun(contID string) error { - return waitInspect(contID, "{{.State.Running}}", "true", 5*time.Second) -} - -// waitExited will wait for the specified container to state exit, subject -// to a maximum time limit in seconds supplied by the caller -func waitExited(contID string, duration time.Duration) error { - return waitInspect(contID, "{{.State.Status}}", "exited", duration) -} - -// waitInspect will wait for the specified container to have the specified string -// in the inspect output. It will wait until the specified timeout (in seconds) -// is reached. -func waitInspect(name, expr, expected string, timeout time.Duration) error { - return waitInspectWithArgs(name, expr, expected, timeout) -} - -func waitInspectWithArgs(name, expr, expected string, timeout time.Duration, arg ...string) error { - after := time.After(timeout) - - args := append(arg, "inspect", "-f", expr, name) - for { - cmd := exec.Command(dockerBinary, args...) - out, _, err := runCommandWithOutput(cmd) - if err != nil { - if !strings.Contains(out, "No such") { - return fmt.Errorf("error executing docker inspect: %v\n%s", err, out) - } - select { - case <-after: - return err - default: - time.Sleep(10 * time.Millisecond) - continue - } - } - - out = strings.TrimSpace(out) - if out == expected { - break - } - - select { - case <-after: - return fmt.Errorf("condition \"%q == %q\" not true in time", out, expected) - default: - } - - time.Sleep(100 * time.Millisecond) - } - return nil -} - -func getInspectBody(c *check.C, version, id string) []byte { - endpoint := fmt.Sprintf("/%s/containers/%s/json", version, id) - status, body, err := sockRequest("GET", endpoint, nil) - c.Assert(err, check.IsNil) - c.Assert(status, check.Equals, http.StatusOK) - return body -} - -// Run a long running idle task in a background container using the -// system-specific default image and command. -func runSleepingContainer(c *check.C, extraArgs ...string) (string, int) { - return runSleepingContainerInImage(c, defaultSleepImage, extraArgs...) -} - -// Run a long running idle task in a background container using the specified -// image and the system-specific command. -func runSleepingContainerInImage(c *check.C, image string, extraArgs ...string) (string, int) { - args := []string{"run", "-d"} - args = append(args, extraArgs...) - args = append(args, image) - args = append(args, defaultSleepCommand...) - return dockerCmd(c, args...) -} - -func getRootUIDGID() (int, int, error) { - uidgid := strings.Split(filepath.Base(dockerBasePath), ".") - if len(uidgid) == 1 { - //user namespace remapping is not turned on; return 0 - return 0, 0, nil - } - uid, err := strconv.Atoi(uidgid[0]) - if err != nil { - return 0, 0, err - } - gid, err := strconv.Atoi(uidgid[1]) - if err != nil { - return 0, 0, err - } - return uid, gid, nil -} - -// minimalBaseImage returns the name of the minimal base image for the current -// daemon platform. -func minimalBaseImage() string { - if daemonPlatform == "windows" { - return WindowsBaseImage - } - return "scratch" -} - -func getGoroutineNumber() (int, error) { - i := struct { - NGoroutines int - }{} - status, b, err := sockRequest("GET", "/info", nil) - if err != nil { - return 0, err - } - if status != http.StatusOK { - return 0, fmt.Errorf("http status code: %d", status) - } - if err := json.Unmarshal(b, &i); err != nil { - return 0, err - } - return i.NGoroutines, nil -} - -func waitForGoroutines(expected int) error { - t := time.After(30 * time.Second) - for { - select { - case <-t: - n, err := getGoroutineNumber() - if err != nil { - return err - } - if n > expected { - return fmt.Errorf("leaked goroutines: expected less than or equal to %d, got: %d", expected, n) - } - default: - n, err := getGoroutineNumber() - if err != nil { - return err - } - if n <= expected { - return nil - } - time.Sleep(200 * time.Millisecond) - } - } -} - -// getErrorMessage returns the error message from an error API response -func getErrorMessage(c *check.C, body []byte) string { - var resp types.ErrorResponse - c.Assert(json.Unmarshal(body, &resp), check.IsNil) - return strings.TrimSpace(resp.Message) -} - -func waitAndAssert(c *check.C, timeout time.Duration, f checkF, checker check.Checker, args ...interface{}) { - after := time.After(timeout) - for { - v, comment := f(c) - assert, _ := checker.Check(append([]interface{}{v}, args...), checker.Info().Params) - select { - case <-after: - assert = true - default: - } - if assert { - if comment != nil { - args = append(args, comment) - } - c.Assert(v, checker, args...) - return - } - time.Sleep(100 * time.Millisecond) - } -} - -type checkF func(*check.C) (interface{}, check.CommentInterface) -type reducer func(...interface{}) interface{} - -func reducedCheck(r reducer, funcs ...checkF) checkF { - return func(c *check.C) (interface{}, check.CommentInterface) { - var values []interface{} - var comments []string - for _, f := range funcs { - v, comment := f(c) - values = append(values, v) - if comment != nil { - comments = append(comments, comment.CheckCommentString()) - } - } - return r(values...), check.Commentf("%v", strings.Join(comments, ", ")) - } -} - -func sumAsIntegers(vals ...interface{}) interface{} { - var s int - for _, v := range vals { - s += v.(int) - } - return s -} diff --git a/integration-cli/events_utils.go b/integration-cli/events_utils.go deleted file mode 100644 index d0863154b6..0000000000 --- a/integration-cli/events_utils.go +++ /dev/null @@ -1,206 +0,0 @@ -package main - -import ( - "bufio" - "bytes" - "io" - "os/exec" - "regexp" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/daemon/events/testutils" - "github.com/docker/docker/pkg/integration/checker" - "github.com/go-check/check" -) - -// eventMatcher is a function that tries to match an event input. -// It returns true if the event matches and a map with -// a set of key/value to identify the match. -type eventMatcher func(text string) (map[string]string, bool) - -// eventMatchProcessor is a function to handle an event match. -// It receives a map of key/value with the information extracted in a match. -type eventMatchProcessor func(matches map[string]string) - -// eventObserver runs an events commands and observes its output. -type eventObserver struct { - buffer *bytes.Buffer - command *exec.Cmd - scanner *bufio.Scanner - startTime string - disconnectionError error -} - -// newEventObserver creates the observer and initializes the command -// without running it. Users must call `eventObserver.Start` to start the command. -func newEventObserver(c *check.C, args ...string) (*eventObserver, error) { - since := daemonTime(c).Unix() - return newEventObserverWithBacklog(c, since, args...) -} - -// newEventObserverWithBacklog creates a new observer changing the start time of the backlog to return. -func newEventObserverWithBacklog(c *check.C, since int64, args ...string) (*eventObserver, error) { - startTime := strconv.FormatInt(since, 10) - cmdArgs := []string{"events", "--since", startTime} - if len(args) > 0 { - cmdArgs = append(cmdArgs, args...) - } - eventsCmd := exec.Command(dockerBinary, cmdArgs...) - stdout, err := eventsCmd.StdoutPipe() - if err != nil { - return nil, err - } - - return &eventObserver{ - buffer: new(bytes.Buffer), - command: eventsCmd, - scanner: bufio.NewScanner(stdout), - startTime: startTime, - }, nil -} - -// Start starts the events command. -func (e *eventObserver) Start() error { - return e.command.Start() -} - -// Stop stops the events command. -func (e *eventObserver) Stop() { - e.command.Process.Kill() - e.command.Process.Release() -} - -// Match tries to match the events output with a given matcher. -func (e *eventObserver) Match(match eventMatcher, process eventMatchProcessor) { - for e.scanner.Scan() { - text := e.scanner.Text() - e.buffer.WriteString(text) - e.buffer.WriteString("\n") - - if matches, ok := match(text); ok { - process(matches) - } - } - - err := e.scanner.Err() - if err == nil { - err = io.EOF - } - - logrus.Debugf("EventObserver scanner loop finished: %v", err) - e.disconnectionError = err -} - -func (e *eventObserver) CheckEventError(c *check.C, id, event string, match eventMatcher) { - var foundEvent bool - scannerOut := e.buffer.String() - - if e.disconnectionError != nil { - until := daemonUnixTime(c) - out, _ := dockerCmd(c, "events", "--since", e.startTime, "--until", until) - events := strings.Split(strings.TrimSpace(out), "\n") - for _, e := range events { - if _, ok := match(e); ok { - foundEvent = true - break - } - } - scannerOut = out - } - if !foundEvent { - c.Fatalf("failed to observe event `%s` for %s. Disconnection error: %v\nout:\n%v", event, id, e.disconnectionError, scannerOut) - } -} - -// matchEventLine matches a text with the event regular expression. -// It returns the matches and true if the regular expression matches with the given id and event type. -// It returns an empty map and false if there is no match. -func matchEventLine(id, eventType string, actions map[string]chan bool) eventMatcher { - return func(text string) (map[string]string, bool) { - matches := eventstestutils.ScanMap(text) - if len(matches) == 0 { - return matches, false - } - - if matchIDAndEventType(matches, id, eventType) { - if _, ok := actions[matches["action"]]; ok { - return matches, true - } - } - return matches, false - } -} - -// processEventMatch closes an action channel when an event line matches the expected action. -func processEventMatch(actions map[string]chan bool) eventMatchProcessor { - return func(matches map[string]string) { - if ch, ok := actions[matches["action"]]; ok { - ch <- true - } - } -} - -// parseEventAction parses an event text and returns the action. -// It fails if the text is not in the event format. -func parseEventAction(c *check.C, text string) string { - matches := eventstestutils.ScanMap(text) - return matches["action"] -} - -// eventActionsByIDAndType returns the actions for a given id and type. -// It fails if the text is not in the event format. -func eventActionsByIDAndType(c *check.C, events []string, id, eventType string) []string { - var filtered []string - for _, event := range events { - matches := eventstestutils.ScanMap(event) - c.Assert(matches, checker.Not(checker.IsNil)) - if matchIDAndEventType(matches, id, eventType) { - filtered = append(filtered, matches["action"]) - } - } - return filtered -} - -// matchIDAndEventType returns true if an event matches a given id and type. -// It also resolves names in the event attributes if the id doesn't match. -func matchIDAndEventType(matches map[string]string, id, eventType string) bool { - return matchEventID(matches, id) && matches["eventType"] == eventType -} - -func matchEventID(matches map[string]string, id string) bool { - matchID := matches["id"] == id || strings.HasPrefix(matches["id"], id) - if !matchID && matches["attributes"] != "" { - // try matching a name in the attributes - attributes := map[string]string{} - for _, a := range strings.Split(matches["attributes"], ", ") { - kv := strings.Split(a, "=") - attributes[kv[0]] = kv[1] - } - matchID = attributes["name"] == id - } - return matchID -} - -func parseEvents(c *check.C, out, match string) { - events := strings.Split(strings.TrimSpace(out), "\n") - for _, event := range events { - matches := eventstestutils.ScanMap(event) - matched, err := regexp.MatchString(match, matches["action"]) - c.Assert(err, checker.IsNil) - c.Assert(matched, checker.True, check.Commentf("Matcher: %s did not match %s", match, matches["action"])) - } -} - -func parseEventsWithID(c *check.C, out, match, id string) { - events := strings.Split(strings.TrimSpace(out), "\n") - for _, event := range events { - matches := eventstestutils.ScanMap(event) - c.Assert(matchEventID(matches, id), checker.True) - - matched, err := regexp.MatchString(match, matches["action"]) - c.Assert(err, checker.IsNil) - c.Assert(matched, checker.True, check.Commentf("Matcher: %s did not match %s", match, matches["action"])) - } -} diff --git a/integration-cli/fixtures/auth/docker-credential-shell-test b/integration-cli/fixtures/auth/docker-credential-shell-test deleted file mode 100755 index 1980bb1803..0000000000 --- a/integration-cli/fixtures/auth/docker-credential-shell-test +++ /dev/null @@ -1,33 +0,0 @@ -#!/bin/bash - -set -e - -case $1 in - "store") - in=$( $TEMP/$server - ;; - "get") - in=$(c* zovW*2`my-BS#-6VC#(TWylUoH;FtY>E>gSBdD(R?T)*vNRTtCjW{dQq*rhc~JiEJz zsl`J#iI?4n-S>-|RPWcb+PkO&p2Vj7>-wi+S}$j_7sXd|yt?(*H*av1 z+nj$j?T=M`^{U#O?d!U0Kio7`Pi=kdMg6yBHQQD7cjwb=bNHthMQp;4h~=l$E~*AE zuwWB8>5;iMTvuHR-Lmal{@LpHT4MUleZcZ73oou6S9)q>^d-k>oX_}Qcxyo^p;T#I z4yBjEs}MP78A@rGh|&6(B$v4q&IKEaPbcYiRLK(|Fm&Lbue#U-=g_}>K?m>u=o9SE z|2Wfh|Nm{EC(M}~AUk1=_?|0jAs(MS-F>HlGq1Fv!G%u}F zxq~RdI2NK#f|*#Nn~(V{vFLOO8UqEuV@wWH0lWbP;+XE!kN?B|zvfDCV+{KLI33XJ zG420+{r|!CKbik8jnddl(C`@lF&t?A{}{IbApid{FhAKg29N*Y{y*OI0q1{v<6mif z{ttd8A4C0_$Nx6||IluLzUGZ<*Y1BAOzlPXfBaM3-28d; z^3|(Ru4ZY9j6K(!m62PF7=SBvvY>6u9_MkwI>$54a7OCT$T3-m=lJE( zmUnm}hhi;&u@=Bq+_JlF`T)-5bsBwzUH;iw#FV;!sB`|AkTbLnLJ;j*XJ0q(@q+&r#AY7K_tidj`Je@64Y zt^SAcHvH+*phv{y_9nO&aXZ27@H1gNA7uuCn|I7u4AJ4h5%>!(#?o4@01sM9=?Y<}U-wbn}~IT=+q) zpnU-y`(0?Hj|bqk-Cup*+8>Y5g-CYfCvx)B0vO)01+SpM1Tko z0U|&IhyW2F0z`la5CI}U1c(3;cp3zTzMXo^%Y*&eXctdy*>?6A8Sq zp|r}Cz7&k%LWj}|qk_p@3pT0=6BEu*_dHZBTzvXOYO{wlC)G^^hyW2F0z`la5CI}U k1c(3;AOb{y2oM1xKm>>Y5g-CYfCvx)B0vO)z%wTBe{YUtx&QzG diff --git a/integration-cli/fixtures/notary/delgkey1.crt b/integration-cli/fixtures/notary/delgkey1.crt deleted file mode 100644 index 306eeec9a2..0000000000 --- a/integration-cli/fixtures/notary/delgkey1.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8jCCAtqgAwIBAgIJAJkxr+7rAgXbMA0GCSqGSIb3DQEBBQUAMFgxCzAJBgNV -BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEPMA0G -A1UEChMGRG9ja2VyMRMwEQYDVQQDEwpkZWxlZ2F0aW9uMCAXDTE2MDMwODAyNDEy -MFoYDzIxMTYwMjEzMDI0MTIwWjBYMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0Ex -FjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoTBkRvY2tlcjETMBEGA1UE -AxMKZGVsZWdhdGlvbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJuz -To1qoL/RY5pNxdPkP/jiO3f/RTvz20C90EweaKgRdIV/vTUUE+mMRQulpf1vpCP9 -uidGfEoJcq4jM1H59XTYUoUvGbAMP3Iu7Uz0rF5v+Glm82Z0WGI+PkOnwRN2bJi4 -LhAch6QlA/48IOFH/O9jnHYMb45lQFpm+gOvatRyGkPZCftD3ntkhVMk1OJ7EZC4 -LYiwzmuPEYusO/qVgcHkGtIxLWAjGmDzrV3Q5orPVwwUOxNQdRRU1L2bhfUsodcb -Fgi/LCz4xnGx4YpF0O24Y7/0SPotSyaT0RYyj/j/bIKvYB20g4P7469klde1Ariz -UEIf12PlaJ/H/PaIlEcCAwEAAaOBvDCBuTAdBgNVHQ4EFgQUXZK4ZGswIq54W4VZ -OJY7zXvvndwwgYkGA1UdIwSBgTB/gBRdkrhkazAirnhbhVk4ljvNe++d3KFcpFow -WDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNp -c2NvMQ8wDQYDVQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb26CCQCZMa/u -6wIF2zAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQA2ktnjxB2lXF+g -lTc2qp9LhjofgCWKwLgWEeCwXI2nUBNE4n00hA9or2wer2viWC4IJnG0kTyMzzYT -m1lBpZ8BP6S3sSkvhohaqS+gBIUVB7U65tAof/SY2UHpeVJ1YpTE4F1GAUfqSY7V -6IGHZAGiLeUS5kC6pzZA4siBhyCoYKRKEb9R82jSCHeFYS3ntwY1/gqcO/uIidVE -2hLHlx6vBx9BEfXv31AGLoB3YocSTZLATwlrDHUQG1+oNh5ejQU1x/z+Y62EG5Jb -u0yLDdJeSgup/DzPEoNpSihtdQZytKMK+KBmh22gDA5h+a6620zTZwCvJYxH9kkM -IClUWwuD ------END CERTIFICATE----- diff --git a/integration-cli/fixtures/notary/delgkey1.key b/integration-cli/fixtures/notary/delgkey1.key deleted file mode 100644 index a0d0a30e6a..0000000000 --- a/integration-cli/fixtures/notary/delgkey1.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEowIBAAKCAQEAm7NOjWqgv9Fjmk3F0+Q/+OI7d/9FO/PbQL3QTB5oqBF0hX+9 -NRQT6YxFC6Wl/W+kI/26J0Z8SglyriMzUfn1dNhShS8ZsAw/ci7tTPSsXm/4aWbz -ZnRYYj4+Q6fBE3ZsmLguEByHpCUD/jwg4Uf872OcdgxvjmVAWmb6A69q1HIaQ9kJ -+0Pee2SFUyTU4nsRkLgtiLDOa48Ri6w7+pWBweQa0jEtYCMaYPOtXdDmis9XDBQ7 -E1B1FFTUvZuF9Syh1xsWCL8sLPjGcbHhikXQ7bhjv/RI+i1LJpPRFjKP+P9sgq9g -HbSDg/vjr2SV17UCuLNQQh/XY+Von8f89oiURwIDAQABAoIBAB7DhfDRMaPU5n41 -gbIFNlKhuKhUCsT2wMqA9qgjlgAnOsOp4qObLPgHXBkaCLsTlPX7iw15ktM6HKul -jt1SqxoEKAHitYugT+Tqur5q1afvLcD9s3f54wC+VaUefzquOnTOZ2ONj4tyOODB -1qlMhQBzyRVWDbCv9tAl6p5RyaTh+8IULctlER6w9m3upT9NxoRi1PrPBCRiEKKo -4zDRvfbT/0ucLD20GS6trPv4ihTCTU7ydFujioDkFyNzCzYNGBnImpQ9/xeT5/Ys -IJQy9Tdn6V0rXMBBb1EhyBQYw5Oxy6d6tzhjvva6LaJBGo9yzX0NHt58Ymhgm1q/ -vscj1pECgYEAyegQFP7dkmUdXdNpdrIdCvKlvni3r/hwB/9H0sJHIJbfTusfzeLL -5Q8QSZAsaR7tSgJfr9GMdOjntvefYjKLfl3SnG/wF91m05eYfkeiZXc9RGe+XXGu -wv5u2m/G7a05XpW1JFX+1ORyj2x5KsvF7KDtWJyR5ryIsOwHZNGQpJ8CgYEAxWoo -r2eJBc9Xj5bhhS0VxUFODXImfeQF2aG2rSeuWMY7k4vmVkJwhBZiPW/dHBu1aMPh -/SY1W7cgzdVIf2RIF5MgzzkmoisEApZTiSwmP6A2bTx6miXwFCLTCHIDfiXJ0tQA -Nb+Ln+exks4BfCgKHOqWTcWizKNE/8Gb6SnhB1kCgYAgM1Z9QrhrpJyuXg0v1PA0 -0sYEPpRtCB416Ey4HCvj0qwClhUYbNc/zMs4MDok+b22U/KWw8C21H4/+/X7XzxI -BwaT1HZiF/lSPZcgbKRFsmKfCjyeAodwqctcIv+C4GGJ6C5fgSeHJHfwz8fzP1Rt -jKzNuQq71c2nCb2UIqgC2QKBgEieoJDFmVYVy7P6YMNIrnV9bGTt1NMCilRgdH6F -1lC5uzivge/BSPqN8V2AROoOF1GOnRcucvpmBx8wkhaqoQprCOqxr1CAWl1JRzly -kC9flCXi1YbW5dXCabb1metRo0h2zAz5hTcxV9UVCt7NK8svUFMTnKuCc+NRKTVA -PpMhAoGBAJ9rFgZpWHRVuzsangbGslq3fDYGENLJ2fPNjLgfgVLi+YotG9jfgQPW -QCvoSA1CChxzEJEB5hzEOEv9pThnBNg1LWNj+a3N5anW2UBHMEWeCrVFZwJMVdSd -srUFtap7da8iUddc+sHC5hHHFDBdqG4pDck/uTs3CNWRF/ZqzE/G ------END RSA PRIVATE KEY----- diff --git a/integration-cli/fixtures/notary/delgkey2.crt b/integration-cli/fixtures/notary/delgkey2.crt deleted file mode 100644 index 40f2db2cc1..0000000000 --- a/integration-cli/fixtures/notary/delgkey2.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8jCCAtqgAwIBAgIJAMi/AxlwFquJMA0GCSqGSIb3DQEBBQUAMFgxCzAJBgNV -BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEPMA0G -A1UEChMGRG9ja2VyMRMwEQYDVQQDEwpkZWxlZ2F0aW9uMCAXDTE2MDMwODAyNDEy -MloYDzIxMTYwMjEzMDI0MTIyWjBYMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0Ex -FjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoTBkRvY2tlcjETMBEGA1UE -AxMKZGVsZWdhdGlvbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL/a -1GO+02jt1p0sME+YGaK4+uZ9jezrpkCXKMsMfItgqCKRTX7YVuR7tnRt/Y1DNVqR -nMeGc77soDag6eW4xrYrv9LwylUsOLanvK1d/8hDxZhzJjqlJBmz6BvLWDZUF9uu -OjULL8yuP2cmRogjn0bqmdeKztrZtDQqQiwsG02nVjfuvVi3rP4G4DhL5fUoHB0R -E6L9Su3/2OWGpdxZqkT7GAbjgLl4/4CXs00493m8xZIHXQ9559PiVlLfk6p6FjEV -7irZp7XXSe1My/0HGebFXkYqEL9+My2od4w+qJmBT23aTduGTo8IZC7g9lwKEykA -hWrYhR5tjkLvOsQIE7ECAwEAAaOBvDCBuTAdBgNVHQ4EFgQUHtEAVcwI3k7W5B6c -L3w+eKQRsIYwgYkGA1UdIwSBgTB/gBQe0QBVzAjeTtbkHpwvfD54pBGwhqFcpFow -WDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNp -c2NvMQ8wDQYDVQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb26CCQDIvwMZ -cBariTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAfjsMtZ+nJ7y5t -rH9xPwWMLmtC5MwzDRvTUAGbNbFwwm8AncfvsDmmERqsr8L2qhY8CZ9vsN4NjjBn -QRmM/ynYA8JTbf/5ZNDnD4D6qTXLgGFqyHcBaorcB9uQ8eiMOFAbhxLYfPrKaYdV -qj+MejcFa3HmzmYCSqsvxRhSje5b4sORe9/3jNheXsX8VZUpWtCHc3k4GiCU6KyS -gpnXkShU4sG92cK72L8pxmGTz8ynNMj/9WKkLxpNIv5u0/D01a3z4wx5k1zfRZiz -IQS+xqxV/ztY844MDknxENlYzcqGj0Fd6hE5OKZxnGaH83A5adldMLlnhG1rscGP -as9uwPYP ------END CERTIFICATE----- diff --git a/integration-cli/fixtures/notary/delgkey2.key b/integration-cli/fixtures/notary/delgkey2.key deleted file mode 100644 index 59e854786e..0000000000 --- a/integration-cli/fixtures/notary/delgkey2.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEAv9rUY77TaO3WnSwwT5gZorj65n2N7OumQJcoywx8i2CoIpFN -fthW5Hu2dG39jUM1WpGcx4ZzvuygNqDp5bjGtiu/0vDKVSw4tqe8rV3/yEPFmHMm -OqUkGbPoG8tYNlQX2646NQsvzK4/ZyZGiCOfRuqZ14rO2tm0NCpCLCwbTadWN+69 -WLes/gbgOEvl9SgcHRETov1K7f/Y5Yal3FmqRPsYBuOAuXj/gJezTTj3ebzFkgdd -D3nn0+JWUt+TqnoWMRXuKtmntddJ7UzL/QcZ5sVeRioQv34zLah3jD6omYFPbdpN -24ZOjwhkLuD2XAoTKQCFatiFHm2OQu86xAgTsQIDAQABAoIBAQCDdASic1WXR58N -AgH4B1dJT0VaOBzOgIfhKbEhruSG+ys4xCY9Cy4+TyWskNBxweMEs1CgxVb5Mlh0 -Fb0tUXWVzFQazDFWOn6BaFy2zPk81nLFCeDfvdcGZWZb5PAECYpvUuk+/vM5Ywq+ -OlOJZB72EDhonwssmI4IUAwXCAGNKjLfC4L+3ZgA3+I1xgxisJ2XWNYSLwHzIDRh -U3zO2NpJi1edTNPltDBTb4iFhajX0SFgbARc+XVTpA3pgQujWo6CNB5YKCPuzIqr -GFsvGSZDVzOUnfOlitaYNW+QIWAQf8VLWULwyFrS5Cb2WR/k7AmojZVuDHvzWrtg -ZMG6b1mBAoGBAOV+3SiX8+khKBpxnOJLq0XlGjFNDWNNB34UIMVkFehRxpUr2261 -HDp4YiC9n7l47sjeFBk4IQf2vG/5MPpuqIixsk2W3siCASdMQypVZMG+zj6xDFfH -8rwQSeZhwjmk2a+A7qgnhqvd/qa7EYOnsn1tLf2iBB2EaHV9lWBJFX0lAoGBANYD -GbAPEiwh4Fns8pf59T3Lp0Q9XvAN3uh4DUU0mFrQ1HQHeXIw1IDCJ9JiRjLX7aHu -79EtDssVPQ9dv0MN5rRULtrutCfRLsomm385PLLBIgBdVApnVvJJIWhQkFFMrhFt -UP+483utiDOcCVXMxAy+1jx23EiWvl2H0xGIwsSdAoGBAMIcM+OJ4vxk1w7G2fNu -HUfZJ/ZbPd+n35Z8X9uVdBI0WMsDdW6GMYIjIJygxuCRsSak8EsEdqvNvkTXeN3Z -iyNTaYTG/1iI3YDnuEeuQrK9OKU+CzqUHHOFM3xxY15uWNFhNHt2MypbcnCD+aRp -y0bbefL1fpWY0OHPfvEZ39shAoGAPbVdJc/irIkEGMni1YGEflIHo/ySMGO/f4aG -RQs6Vw1aBS7WjN+ZlprlQpuFpElwwr2TttvoJRS1q4WbjakneZ3AeO5VUhnWBQIG -2jNV1jEsLbC7d9h+UJRXpq18P4T9uBauQV5CDspluIPoiS3m5cntGjgnomKc93kf -mjG1/10CgYA7kgOOva64sjWakL/IgDRiwr0YrJkAfPUZYwxYLHuiW9izUgngpqWd -1wtq+YCsc4l7t8u9Tahb8OE0KSN5RC6QM6b8yW9qFDZ68QAX00+sN6di4qyAZlm+ -rK05W/3JmyvQbvO+JVRQtegZ1ExCj7LGuGOQ5KIpWsBEM3ic9ZP9gw== ------END RSA PRIVATE KEY----- diff --git a/integration-cli/fixtures/notary/delgkey3.crt b/integration-cli/fixtures/notary/delgkey3.crt deleted file mode 100644 index be34eab5c0..0000000000 --- a/integration-cli/fixtures/notary/delgkey3.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8jCCAtqgAwIBAgIJAI3uONxeFQJtMA0GCSqGSIb3DQEBBQUAMFgxCzAJBgNV -BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEPMA0G -A1UEChMGRG9ja2VyMRMwEQYDVQQDEwpkZWxlZ2F0aW9uMCAXDTE2MDMwODAyNDEy -NFoYDzIxMTYwMjEzMDI0MTI0WjBYMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0Ex -FjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoTBkRvY2tlcjETMBEGA1UE -AxMKZGVsZWdhdGlvbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOii -Ij01MkSExgurs5owUNxNgRvrZFxNSNGfnscYZiaT/wNcocrOz40vvY29SOBEbCSW -oBlCi0rYu/7LZBqvsP3YItmifpJHGfRiZ6xEQ4rKznY8+8E3FHVChlmVv9x6QPhA -9OpATlSLvcdiXHbohdc+kQsl9qM93+QadRQLmtZ6H5Sv90d1MHNViX+8d/k2WyT0 -8u6fNv0ZHeltnZFYruF82YKJCOPdAJnCLUOXWRSG6xDhhvSewjxz6gFla5n8m+D9 -jvmIUUjoMEhjORUIVeA/lXT0AT3Lx0xE8uyhJQbp+hGtcPCcwYFZdz3yLcrxKO47 -nh6qOygf7I2fiR1ogqECAwEAAaOBvDCBuTAdBgNVHQ4EFgQUUqsFJdVoos2aewDh -m1r66zyXeI4wgYkGA1UdIwSBgTB/gBRSqwUl1WiizZp7AOGbWvrrPJd4jqFcpFow -WDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNp -c2NvMQ8wDQYDVQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb26CCQCN7jjc -XhUCbTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQADcyno8/AwNatf -pjgILCZl1DVrqaKEVbp6ciNgVRCF4cM0bE5W4mjd5tO8d3/yTilry2FPicqxiMps -oGroMFR+X1cZbOf0U32FyEW4EyWm2jjbiuEpnM5J/EeB/QfckqP6whS/QAM7PxDV -Sxd8sKDb9SOGZiickFU4QpG1fdmY/knrrtbzRl7Nk/3tBgRaq+Brg7YNZZKlpUNB -Hp3q0E+MFgVAojpcL7w1oSgoNev+cUNaBdPEmWIEi7F5rosCzmAIhuIY+ghmo9Qg -zy+byAcxLpujl8vZvE1nZKMKZ7oJayOOgjB2Ztk6bO1r+GPtK5VfqEPhKTRDbBlo -xS3tSCDJ ------END CERTIFICATE----- diff --git a/integration-cli/fixtures/notary/delgkey3.key b/integration-cli/fixtures/notary/delgkey3.key deleted file mode 100644 index 4790c95716..0000000000 --- a/integration-cli/fixtures/notary/delgkey3.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA6KIiPTUyRITGC6uzmjBQ3E2BG+tkXE1I0Z+exxhmJpP/A1yh -ys7PjS+9jb1I4ERsJJagGUKLSti7/stkGq+w/dgi2aJ+kkcZ9GJnrERDisrOdjz7 -wTcUdUKGWZW/3HpA+ED06kBOVIu9x2JcduiF1z6RCyX2oz3f5Bp1FAua1noflK/3 -R3Uwc1WJf7x3+TZbJPTy7p82/Rkd6W2dkViu4XzZgokI490AmcItQ5dZFIbrEOGG -9J7CPHPqAWVrmfyb4P2O+YhRSOgwSGM5FQhV4D+VdPQBPcvHTETy7KElBun6Ea1w -8JzBgVl3PfItyvEo7jueHqo7KB/sjZ+JHWiCoQIDAQABAoIBADvh8HpdBTGKFAjR -DAx2v3nWIZP0RgNUiZgcRJzvdOwdUJmm8KbqEZdAYMTpoqbINiY8971I2d5MaCgw -ZvZPn3nYdzAamgZBczbrVdCMSe6iQf9Bt3SHHycIFtlcqOSyO6Mr5V+fagptZk66 -zR52wG0l1+RMw25F8SogfV7JlfP7Qh5Bob0lEN2xpbhwLiNaaB+IHNe0FelmRvmJ -VUonoD0xaos25EXUES7J/9coiBqgRlDVHdUM0oaa/94UnxNPJnoNfte0yd+mC4LZ -JVHo0Zti3x/8SiCYMbLQs5L8AL8VtPu9OPfur/J8+9Rv0Rh+L1Ben+JWzCzUw1Cj -abH1zvkCgYEA9Q06Lu69ZLD31fTv46CphN+dGS/VgvMELkob6VQOhbV3RPhe6vqL -p7D67J53iq4rZY5KX3zuXZ+A5s48atc8gz+hTsrE022QVXmO2ZrE22bEpL+mwpsB -8//ul1UG51XTw6YR9CmLLD3Y4BgMjhSllx4Wwr9e9+PKl+DuSreqhxMCgYEA8wbf -P3zh85jPN92gBG8+VIbVlXYOTK0OllYUoLt4URmLRllmrB6LyRlpODCyb9+JymMg -WvAq5Bc0h8gMbSQEkYaAUq2CfSbyExASUHA+/nZglsTZhPkg5PJImntK6S58KAM7 -RJzyz20gxYA5H4KXFSiF+ONOE9X/cFUPxzF1AfsCgYBfgUY54GYEBkyxIIMWDhnD -ZXtOw6vNG3V3rP5v04jNZ8oSIVKs9fTT6FADREeGzxauv+QQjxo/dtjAG4TEhxpY -dMYjdTd8x2jHR1b7TCyI7eaZ5u/RTKRYOlj8tfC43GRqDiFVLZPGLFyIChdqkHVx -DhME15zls+vTgaCdkjNt7QKBgQCfwDywNx8wSZqtVnoBcD7AwYFUpi3wKTIVkLAu -mA0XAnuS2uGq8slgf9uynBAvifnBmDeEj6siFD7roozIkYyPPKLNtlC4hAlMjpv7 -VE2UZ6xGb0+tITaGSN2A7trnPS9P/g/PonvZ7hpEuWzTUbyOo/ytBn4ke99VsBSX -E+OeUQKBgQCgmcwCj2/IH+GOpe9qAG6MTMKK7k22O8fBCrcDybL1pMWIesJEbzpv -T5Atcx9L5ff6Q4Ysghb8ebXsErv4oZ72xyAwWJmbIaPllWn2ffUikzL3grSriWZy -0bz6P9sRqYpbdmX3oVvTfBP5kbv+mtDXOB3h5rGfczKWNMyuZmxDOg== ------END RSA PRIVATE KEY----- diff --git a/integration-cli/fixtures/notary/delgkey4.crt b/integration-cli/fixtures/notary/delgkey4.crt deleted file mode 100644 index 42869c8ef4..0000000000 --- a/integration-cli/fixtures/notary/delgkey4.crt +++ /dev/null @@ -1,24 +0,0 @@ ------BEGIN CERTIFICATE----- -MIID8jCCAtqgAwIBAgIJAKKDRMrryBRKMA0GCSqGSIb3DQEBBQUAMFgxCzAJBgNV -BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEPMA0G -A1UEChMGRG9ja2VyMRMwEQYDVQQDEwpkZWxlZ2F0aW9uMCAXDTE2MDMwODAyNDEy -N1oYDzIxMTYwMjEzMDI0MTI3WjBYMQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0Ex -FjAUBgNVBAcTDVNhbiBGcmFuY2lzY28xDzANBgNVBAoTBkRvY2tlcjETMBEGA1UE -AxMKZGVsZWdhdGlvbjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOCf -Wfff5mX/ko/Y790O04eR7h8/4YtZU3LFItcjhkphMf2V2BRlhWwwW6v96gTN1xsZ -1il6/YXjviWiLjhrtOVLQBE2yK0A7Wwdh9KJg3QgNqwtFrR1MA1LgWto1F7NyEMC -9H6Hc95+bgWx1jN0IflfPh1C1m/sA5xGqHDl+8YzJJUOoa5bh04Yk3aIeecatso/ -z7P5c6KicPcZIjhgjxHYB95It/oj8ZuY0hQZb7B5HEGNyBbT2F0vuElWtp+mXexr -6mzgzvHgaKG36bNCTLxr8BxGA/sbVn01LyI3wpk2uqWzyUFk21M4g2X46OPgKrh7 -2h5b+C0X8DUPi45djHcCAwEAAaOBvDCBuTAdBgNVHQ4EFgQUKcrfRFg+6o2l4xbt -Ll6hV9pjJh8wgYkGA1UdIwSBgTB/gBQpyt9EWD7qjaXjFu0uXqFX2mMmH6FcpFow -WDELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNBMRYwFAYDVQQHEw1TYW4gRnJhbmNp -c2NvMQ8wDQYDVQQKEwZEb2NrZXIxEzARBgNVBAMTCmRlbGVnYXRpb26CCQCig0TK -68gUSjAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQAhdKgYUQ36JSPS -f3Dws09pM5hzSsSae4+nG9XckX6dVQ7sLKmjeeeLrXuzjfygir/+h9cHyShgXFH4 -ZbGpdzf6APG1KRag3/njqEWi+kKZZduxZKvI2EHJhj1xBtf8Qru0TgS7bHPlp9bl -1/61+aIrtj05LQhqzWzehuJFrmSdWP9cnNbvlPdOdgfgkKakAiLGwwGNvMQbqxaO -FIB4UPuPdQgm5bpimd5/CThKbpK9/0nr9K4po/m519nvEKxZzsDw5tefGp9Xqly3 -4pk9uyAxO/E2cL0cVA/WHTVTsHPbO7lXxBi6/EjiTUi0Nj1X+btO8+jCLkJyNY0m -qaiL5k9h ------END CERTIFICATE----- diff --git a/integration-cli/fixtures/notary/delgkey4.key b/integration-cli/fixtures/notary/delgkey4.key deleted file mode 100644 index 7573c2089e..0000000000 --- a/integration-cli/fixtures/notary/delgkey4.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEpAIBAAKCAQEA4J9Z99/mZf+Sj9jv3Q7Th5HuHz/hi1lTcsUi1yOGSmEx/ZXY -FGWFbDBbq/3qBM3XGxnWKXr9heO+JaIuOGu05UtAETbIrQDtbB2H0omDdCA2rC0W -tHUwDUuBa2jUXs3IQwL0fodz3n5uBbHWM3Qh+V8+HULWb+wDnEaocOX7xjMklQ6h -rluHThiTdoh55xq2yj/Ps/lzoqJw9xkiOGCPEdgH3ki3+iPxm5jSFBlvsHkcQY3I -FtPYXS+4SVa2n6Zd7GvqbODO8eBoobfps0JMvGvwHEYD+xtWfTUvIjfCmTa6pbPJ -QWTbUziDZfjo4+AquHvaHlv4LRfwNQ+Ljl2MdwIDAQABAoIBAQCrN2wZsFZr2zK5 -aS/0/Y8poIe01Dm0wWMFcdULzm1XltzHIgfyDCx2ein2YPaXsNtNMhV3yuMiwqU3 -BHdc1GSv/vsX4/11Oea/6YaVafKEeuWRulC7PzRgffRpjh+LICqNQdxh8hfVOePd -fV/8GoKnFf0/yqmv6GQcJBPS8stGmFmjo4rkBGvBBMoiUtMYllQqdfH0DtpI24Jh -nR3lZKAPECkAciV7/Lx6+CUEaNOML2XPbLv6EyRh+J/r80jwE8myzpO7R6I+KCzo -R/xuBb/hrUh5Sd5YmuBMa6WfF9yqawTgmVvkpD9fkRusSPSQCq3oe+AugYWu6Fht -XBiZlvjJAoGBAPPBuUaagaUgHyjIzjbRPBHDhSYJpgYR4l/jcypfrl+m0OFC5acA -QG7Hr8AbStIPGtaJRn2pm8dNVPtFecPoi5jVWux2n5RqYlOnwY0tziuxbhU9GQ/W -oCp+99TJSMHFep0E7IoDk8YSxyA/86qk/Tx7KkUUlXv4sjJts17ZHxstAoGBAOvn -mF9rm8Y+Og17WlUQyf5j7g4soWG/4zMnoGpjocDfHVms/pASKbIBp5aFtDgWCmM5 -H7InptvBUInROHlooK6paJRDLbDgzVa/m+NLHoct7N25J4NiG8xV6Wv7hlrRp+XK -zyWL8iL95GnB21HJKvEiVBWvOuZnqfVcnzhbmzyzAoGAYT46jMkcyWRMKfgaFFJa -lXebybX1rtw5pClYC2KKbQxerk8C0SHPkqJFIe2BZtWxzj6LiZw9UkAuk+N+lUJT -VpBfKpCUTyA1w8vb8leAtXueQAjU07W6xdlLQ29dgDgpFzUcrF6K+G0LVXlN2xjh -EdzM2yxACmoHpQiQk1kpCK0CgYAz640Fs1FdmGR+gx+miUNr0eKbDAeY0/rVT2tm -/vai1HhJPGHqo5S5sNOJtXOsxG0U2YW4WDHJPArVyk57qiNzTaXOu9pai5+l8BYH -OIlHhzwSsKWZrQYhOudc9MblRi+Fy9U7lkl8mhSjkh8LKRNibwPCogZ8n2QwtGn2 -pXLNMQKBgQDxvs46CA0M9lGvpl0ggnC7bIYYUEvIlszlBh+o2CgF3IOFlGVcvCia -r18i7hTM5wbcct9OWDzZG4ejBIhtE+gMQ333ofQ64PPJOcfuHxT3Z/fMWfv/yDEj -4e4ZPK44ktcTvuusxAoSe5C5dbcNX2ymAhlRg/F0LyMkhw+qGh4xOQ== ------END RSA PRIVATE KEY----- diff --git a/integration-cli/fixtures/notary/localhost.cert b/integration-cli/fixtures/notary/localhost.cert deleted file mode 100644 index d1233a1b06..0000000000 --- a/integration-cli/fixtures/notary/localhost.cert +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDCTCCAfOgAwIBAgIQTOoFF+ypXwgdXnXHuCTvYDALBgkqhkiG9w0BAQswJjER -MA8GA1UEChMIUXVpY2tUTFMxETAPBgNVBAMTCFF1aWNrVExTMB4XDTE1MDcxNzE5 -NDg1M1oXDTE4MDcwMTE5NDg1M1owJzERMA8GA1UEChMIUXVpY2tUTFMxEjAQBgNV -BAMTCWxvY2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMDO -qvTBAi0ApXLfe90ApJkdkRGwF838Qzt1UFSxomu5fHRV6l3FjX5XCVHiFQ4w3ROh -dMOu9NahfGLJv9VvWU2MV3YoY9Y7lIXpKwnK1v064wuls4nPh13BUWKQKofcY/e2 -qaSPd6/qmSRc/kJUvOI9jZMSX6ZRPu9K4PCqm2CivlbLq9UYuo1AbRGfuqHRvTxg -mQG7WQCzGSvSjuSg5qX3TEh0HckTczJG9ODULNRWNE7ld0W4sfv4VF8R7Uc/G7LO -8QwLCZ9TIl3gYMPCrhUL3Q6z9Jnn1SQS4mhDnPi6ugRYO1X8k3jjdxV9C2sXwUvN -OZI1rLEWl9TJNA7ZXtMCAwEAAaM2MDQwDgYDVR0PAQH/BAQDAgCgMAwGA1UdEwEB -/wQCMAAwFAYDVR0RBA0wC4IJbG9jYWxob3N0MAsGCSqGSIb3DQEBCwOCAQEAH6iq -kM2+UMukGDLEQKHHiauioWJlHDlLXv76bJiNfjSz94B/2XOQMb9PT04//tnGUyPK -K8Dx7RoxSodU6T5VRiz/A36mLOvt2t3bcL/1nHf9sAOHcexGtnCbQbW91V7RKfIL -sjiLNFDkQ9VfVNY+ynQptZoyH1sy07+dplfkIiPzRs5WuVAnEGsX3r6BrhgUITzi -g1B4kpmGZIohP4m6ZEBY5xuo/NQ0+GhjAENQMU38GpuoMyFS0i0dGcbx8weqnI/B -Er/qa0+GE/rBnWY8TiRow8dzpneSFQnUZpJ4EwD9IoOIDHo7k2Nbz2P50HMiCXZf -4RqzctVssRlrRVnO5w== ------END CERTIFICATE----- diff --git a/integration-cli/fixtures/notary/localhost.key b/integration-cli/fixtures/notary/localhost.key deleted file mode 100644 index d7778359a3..0000000000 --- a/integration-cli/fixtures/notary/localhost.key +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIEogIBAAKCAQEAwM6q9MECLQClct973QCkmR2REbAXzfxDO3VQVLGia7l8dFXq -XcWNflcJUeIVDjDdE6F0w6701qF8Ysm/1W9ZTYxXdihj1juUhekrCcrW/TrjC6Wz -ic+HXcFRYpAqh9xj97appI93r+qZJFz+QlS84j2NkxJfplE+70rg8KqbYKK+Vsur -1Ri6jUBtEZ+6odG9PGCZAbtZALMZK9KO5KDmpfdMSHQdyRNzMkb04NQs1FY0TuV3 -Rbix+/hUXxHtRz8bss7xDAsJn1MiXeBgw8KuFQvdDrP0mefVJBLiaEOc+Lq6BFg7 -VfyTeON3FX0LaxfBS805kjWssRaX1Mk0Dtle0wIDAQABAoIBAHbuhNHZROhRn70O -Ui9vOBki/dt1ThnH5AkHQngb4t6kWjrAzILvW2p1cdBKr0ZDqftz+rzCbVD/5+Rg -Iq8bsnB9g23lWEBMHD/GJsAxmRA3hNooamk11IBmwTcVSsbnkdq5mEdkICYphjHC -Ey0DbEf6RBxWlx3WvAWLoNmTw6iFaOCH8IyLavPpe7kLbZc219oNUw2qjCnCXCZE -/NuViADHJBPN8r7g1gmyclJmTumdUK6oHgXEMMPe43vhReGcgcReK9QZjnTcIXPM -4oJOraw+BtoZXVvvIPnC+5ntoLFOzjIzM0kaveReZbdgffqF4zy2vRfCHhWssanc -7a0xR4ECgYEA3Xuvcqy5Xw+v/jVCO0VZj++Z7apA78dY4tWsPx5/0DUTTziTlXkC -ADduEbwX6HgZ/iLvA9j4C3Z4mO8qByby/6UoBU8NEe+PQt6fT7S+dKSP4uy5ZxVM -i5opkEyrJsMbve9Jrlj4bk5CICsydrZ+SBFHnpNGjbduGQick5LORWECgYEA3trt -gepteDGiUYmnnBgjbYtcD11RvpKC8Z/QwGnzN5vk4eBu8r7DkMcLN+SiHjAovlJo -r5j3EbF8sla1zBf/yySdQZFqUGcwtw7MaAKCLdhQl5WsViNMIx6p2OJapu0dzbv2 -KTXrnoRCafcH92k0dUX1ahE9eyc8KX6VhbWwXLMCgYATGCCuEDoC+gVAMzM8jOQF -xrBMjwr+IP+GvskUv/pg5tJ9V/FRR5dmkWDJ4p9lCUWkZTqZ6FCqHFKVTLkg2LjG -VWS34HLOAwskxrCRXJG22KEW/TWWr31j46yFpjZzJwrzOvftMfpo+BI3V8IH/f+x -EtxLzYKdoRy6x8VH67YgwQKBgHor2vjV45142FuK83AHa6SqOZXSuvWWrGJ6Ep7p -doSN2jRaLXi2S9AaznOdy6JxFGUCGJHrcccpXgsGrjNtFLXxJKTFa1sYtwQkALsk -ZOltJQF09D1krGC0driHntrUMvqOiKye+sS0DRS6cIuaCUAhUiELwoC5SaoV0zKy -IDUxAoGAOK8Xq+3/sqe79vTpw25RXl+nkAmOAeKjqf3Kh6jbnBhr81rmefyKXB9a -uj0b980tzUnliwA5cCOsyxfN2vASvMnJxFE721QZI04arlcPFHcFqCtmNnUYTcLp -0hgn/yLZptcoxpy+eTBu3eNsxz1Bu/Tx/198+2Wr3MbtGpLNIcA= ------END RSA PRIVATE KEY----- diff --git a/integration-cli/fixtures/registry/cert.pem b/integration-cli/fixtures/registry/cert.pem deleted file mode 100644 index 376054033a..0000000000 --- a/integration-cli/fixtures/registry/cert.pem +++ /dev/null @@ -1,21 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIDfzCCAmegAwIBAgIJAKZjzF7N4zFJMA0GCSqGSIb3DQEBCwUAMFYxCzAJBgNV -BAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0RlZmF1bHQg -Q29tcGFueSBMdGQxEjAQBgNVBAMMCWxvY2FsaG9zdDAeFw0xNjAzMTQxOTAzMDZa -Fw0xNzAzMTQxOTAzMDZaMFYxCzAJBgNVBAYTAlhYMRUwEwYDVQQHDAxEZWZhdWx0 -IENpdHkxHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxEjAQBgNVBAMMCWxv -Y2FsaG9zdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMAVEPA6tSNy -MoExHvT8CWvbe0MyYqZjMmUUdGVYyAaoZgmj9HvtGKaUWY/hCtgTond3OKhPq69u -fQSDlHQA/scq4KZovKQJhvBaRb2DqD31KcbcDyh5KUAL1aalbjTLbKmAYSFSoY93 -57KiBei2BmvS55HLhOiO8ccQOq3feH/J/XcszAdAaiGXW3woDOIumYzur6Q8Suyn -cIUEX5Ik7mxS7oGYN1IM++Y+B6aAFT7htAZEvF7RF7sjG7QBfxNPOFg9lBWXzVSv -0vRbVme9OCDD2QOpj8O7XAPuLDwW5b2A8Iex3CJRngBI9vAK5h1Wssst8117bur9 -AiubOrF6cxUCAwEAAaNQME4wHQYDVR0OBBYEFNTGYK7uX19yjCPeGXhmel98amoA -MB8GA1UdIwQYMBaAFNTGYK7uX19yjCPeGXhmel98amoAMAwGA1UdEwQFMAMBAf8w -DQYJKoZIhvcNAQELBQADggEBACW/oF6RgLbTPxb8oPI9424Uv/erYYdxdqIaO3Mz -fQfBEvGu62A0ZLH+av4BTeqBM6iVhN6/Y3hUb8UzbbZAIo/dVJSglW7PXAfUITMM -ca9U2r2cFqgXELZkhde6mTFTYwM3swMCP0HUEo+Hu62NX5gunKr4QMNfTlE3vHEj -jitnkTR0ZVEKHvmdTJC9S92j+NuaJVcwe5UNP1Nj/Ksd/iUUCa2DBnw2N7YwHTDB -jb9cQb8aNVNSrjKP3sknMslVy1JVbUB1LXsth/h+kkVFNP4dsk+dZHn20uIA/VeJ -mJ3Wo54CeTAa3DysiWbIIYsFSASCPvki08ZKI373tCf2RvE= ------END CERTIFICATE----- diff --git a/integration-cli/npipe.go b/integration-cli/npipe.go deleted file mode 100644 index fa531a1b4d..0000000000 --- a/integration-cli/npipe.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows - -package main - -import ( - "net" - "time" -) - -func npipeDial(path string, timeout time.Duration) (net.Conn, error) { - panic("npipe protocol only supported on Windows") -} diff --git a/integration-cli/npipe_windows.go b/integration-cli/npipe_windows.go deleted file mode 100644 index 4fd735f2db..0000000000 --- a/integration-cli/npipe_windows.go +++ /dev/null @@ -1,12 +0,0 @@ -package main - -import ( - "net" - "time" - - "github.com/Microsoft/go-winio" -) - -func npipeDial(path string, timeout time.Duration) (net.Conn, error) { - return winio.DialPipe(path, &timeout) -} diff --git a/integration-cli/registry.go b/integration-cli/registry.go deleted file mode 100644 index e3e16f1cfb..0000000000 --- a/integration-cli/registry.go +++ /dev/null @@ -1,175 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - "os/exec" - "path/filepath" - - "github.com/docker/distribution/digest" - "github.com/go-check/check" -) - -const ( - v2binary = "registry-v2" - v2binarySchema1 = "registry-v2-schema1" -) - -type testRegistryV2 struct { - cmd *exec.Cmd - dir string - auth string - username string - password string - email string -} - -func newTestRegistryV2(c *check.C, schema1 bool, auth, tokenURL string) (*testRegistryV2, error) { - tmp, err := ioutil.TempDir("", "registry-test-") - if err != nil { - return nil, err - } - template := `version: 0.1 -loglevel: debug -storage: - filesystem: - rootdirectory: %s -http: - addr: %s -%s` - var ( - authTemplate string - username string - password string - email string - ) - switch auth { - case "htpasswd": - htpasswdPath := filepath.Join(tmp, "htpasswd") - // generated with: htpasswd -Bbn testuser testpassword - userpasswd := "testuser:$2y$05$sBsSqk0OpSD1uTZkHXc4FeJ0Z70wLQdAX/82UiHuQOKbNbBrzs63m" - username = "testuser" - password = "testpassword" - email = "test@test.org" - if err := ioutil.WriteFile(htpasswdPath, []byte(userpasswd), os.FileMode(0644)); err != nil { - return nil, err - } - authTemplate = fmt.Sprintf(`auth: - htpasswd: - realm: basic-realm - path: %s -`, htpasswdPath) - case "token": - authTemplate = fmt.Sprintf(`auth: - token: - realm: %s - service: "registry" - issuer: "auth-registry" - rootcertbundle: "fixtures/registry/cert.pem" -`, tokenURL) - } - - confPath := filepath.Join(tmp, "config.yaml") - config, err := os.Create(confPath) - if err != nil { - return nil, err - } - if _, err := fmt.Fprintf(config, template, tmp, privateRegistryURL, authTemplate); err != nil { - os.RemoveAll(tmp) - return nil, err - } - - binary := v2binary - if schema1 { - binary = v2binarySchema1 - } - cmd := exec.Command(binary, confPath) - if err := cmd.Start(); err != nil { - os.RemoveAll(tmp) - if os.IsNotExist(err) { - c.Skip(err.Error()) - } - return nil, err - } - return &testRegistryV2{ - cmd: cmd, - dir: tmp, - auth: auth, - username: username, - password: password, - email: email, - }, nil -} - -func (t *testRegistryV2) Ping() error { - // We always ping through HTTP for our test registry. - resp, err := http.Get(fmt.Sprintf("http://%s/v2/", privateRegistryURL)) - if err != nil { - return err - } - resp.Body.Close() - - fail := resp.StatusCode != http.StatusOK - if t.auth != "" { - // unauthorized is a _good_ status when pinging v2/ and it needs auth - fail = fail && resp.StatusCode != http.StatusUnauthorized - } - if fail { - return fmt.Errorf("registry ping replied with an unexpected status code %d", resp.StatusCode) - } - return nil -} - -func (t *testRegistryV2) Close() { - t.cmd.Process.Kill() - os.RemoveAll(t.dir) -} - -func (t *testRegistryV2) getBlobFilename(blobDigest digest.Digest) string { - // Split the digest into its algorithm and hex components. - dgstAlg, dgstHex := blobDigest.Algorithm(), blobDigest.Hex() - - // The path to the target blob data looks something like: - // baseDir + "docker/registry/v2/blobs/sha256/a3/a3ed...46d4/data" - return fmt.Sprintf("%s/docker/registry/v2/blobs/%s/%s/%s/data", t.dir, dgstAlg, dgstHex[:2], dgstHex) -} - -func (t *testRegistryV2) readBlobContents(c *check.C, blobDigest digest.Digest) []byte { - // Load the target manifest blob. - manifestBlob, err := ioutil.ReadFile(t.getBlobFilename(blobDigest)) - if err != nil { - c.Fatalf("unable to read blob: %s", err) - } - - return manifestBlob -} - -func (t *testRegistryV2) writeBlobContents(c *check.C, blobDigest digest.Digest, data []byte) { - if err := ioutil.WriteFile(t.getBlobFilename(blobDigest), data, os.FileMode(0644)); err != nil { - c.Fatalf("unable to write malicious data blob: %s", err) - } -} - -func (t *testRegistryV2) tempMoveBlobData(c *check.C, blobDigest digest.Digest) (undo func()) { - tempFile, err := ioutil.TempFile("", "registry-temp-blob-") - if err != nil { - c.Fatalf("unable to get temporary blob file: %s", err) - } - tempFile.Close() - - blobFilename := t.getBlobFilename(blobDigest) - - // Move the existing data file aside, so that we can replace it with a - // another blob of data. - if err := os.Rename(blobFilename, tempFile.Name()); err != nil { - os.Remove(tempFile.Name()) - c.Fatalf("unable to move data blob: %s", err) - } - - return func() { - os.Rename(tempFile.Name(), blobFilename) - os.Remove(tempFile.Name()) - } -} diff --git a/integration-cli/registry_mock.go b/integration-cli/registry_mock.go deleted file mode 100644 index 300bf46425..0000000000 --- a/integration-cli/registry_mock.go +++ /dev/null @@ -1,55 +0,0 @@ -package main - -import ( - "net/http" - "net/http/httptest" - "regexp" - "strings" - "sync" - - "github.com/go-check/check" -) - -type handlerFunc func(w http.ResponseWriter, r *http.Request) - -type testRegistry struct { - server *httptest.Server - hostport string - handlers map[string]handlerFunc - mu sync.Mutex -} - -func (tr *testRegistry) registerHandler(path string, h handlerFunc) { - tr.mu.Lock() - defer tr.mu.Unlock() - tr.handlers[path] = h -} - -func newTestRegistry(c *check.C) (*testRegistry, error) { - testReg := &testRegistry{handlers: make(map[string]handlerFunc)} - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - url := r.URL.String() - - var matched bool - var err error - for re, function := range testReg.handlers { - matched, err = regexp.MatchString(re, url) - if err != nil { - c.Fatal("Error with handler regexp") - } - if matched { - function(w, r) - break - } - } - - if !matched { - c.Fatalf("Unable to match %s with regexp", url) - } - })) - - testReg.server = ts - testReg.hostport = strings.Replace(ts.URL, "http://", "", 1) - return testReg, nil -} diff --git a/integration-cli/requirements.go b/integration-cli/requirements.go deleted file mode 100644 index 359359dab5..0000000000 --- a/integration-cli/requirements.go +++ /dev/null @@ -1,206 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "net/http" - "os" - "os/exec" - "strings" - "time" - - "github.com/docker/docker/utils" - "github.com/go-check/check" -) - -type testCondition func() bool - -type testRequirement struct { - Condition testCondition - SkipMessage string -} - -// List test requirements -var ( - DaemonIsWindows = testRequirement{ - func() bool { return daemonPlatform == "windows" }, - "Test requires a Windows daemon", - } - DaemonIsLinux = testRequirement{ - func() bool { return daemonPlatform == "linux" }, - "Test requires a Linux daemon", - } - ExperimentalDaemon = testRequirement{ - func() bool { return utils.ExperimentalBuild() }, - "Test requires an experimental daemon", - } - NotExperimentalDaemon = testRequirement{ - func() bool { return !utils.ExperimentalBuild() }, - "Test requires a non experimental daemon", - } - NotArm = testRequirement{ - func() bool { return os.Getenv("DOCKER_ENGINE_GOARCH") != "arm" }, - "Test requires a daemon not running on ARM", - } - NotArm64 = testRequirement{ - func() bool { return os.Getenv("DOCKER_ENGINE_GOARCH") != "arm64" }, - "Test requires a daemon not running on arm64", - } - NotPpc64le = testRequirement{ - func() bool { return os.Getenv("DOCKER_ENGINE_GOARCH") != "ppc64le" }, - "Test requires a daemon not running on ppc64le", - } - NotS390X = testRequirement{ - func() bool { return os.Getenv("DOCKER_ENGINE_GOARCH") != "s390x" }, - "Test requires a daemon not running on s390x", - } - SameHostDaemon = testRequirement{ - func() bool { return isLocalDaemon }, - "Test requires docker daemon to run on the same machine as CLI", - } - UnixCli = testRequirement{ - func() bool { return isUnixCli }, - "Test requires posix utilities or functionality to run.", - } - ExecSupport = testRequirement{ - func() bool { return supportsExec }, - "Test requires 'docker exec' capabilities on the tested daemon.", - } - Network = testRequirement{ - func() bool { - // Set a timeout on the GET at 15s - var timeout = time.Duration(15 * time.Second) - var url = "https://hub.docker.com" - - client := http.Client{ - Timeout: timeout, - } - - resp, err := client.Get(url) - if err != nil && strings.Contains(err.Error(), "use of closed network connection") { - panic(fmt.Sprintf("Timeout for GET request on %s", url)) - } - if resp != nil { - resp.Body.Close() - } - return err == nil - }, - "Test requires network availability, environment variable set to none to run in a non-network enabled mode.", - } - Apparmor = testRequirement{ - func() bool { - buf, err := ioutil.ReadFile("/sys/module/apparmor/parameters/enabled") - return err == nil && len(buf) > 1 && buf[0] == 'Y' - }, - "Test requires apparmor is enabled.", - } - RegistryHosting = testRequirement{ - func() bool { - // for now registry binary is built only if we're running inside - // container through `make test`. Figure that out by testing if - // registry binary is in PATH. - _, err := exec.LookPath(v2binary) - return err == nil - }, - fmt.Sprintf("Test requires an environment that can host %s in the same host", v2binary), - } - NotaryHosting = testRequirement{ - func() bool { - // for now notary binary is built only if we're running inside - // container through `make test`. Figure that out by testing if - // notary-server binary is in PATH. - _, err := exec.LookPath(notaryServerBinary) - return err == nil - }, - fmt.Sprintf("Test requires an environment that can host %s in the same host", notaryServerBinary), - } - NotaryServerHosting = testRequirement{ - func() bool { - // for now notary-server binary is built only if we're running inside - // container through `make test`. Figure that out by testing if - // notary-server binary is in PATH. - _, err := exec.LookPath(notaryServerBinary) - return err == nil - }, - fmt.Sprintf("Test requires an environment that can host %s in the same host", notaryServerBinary), - } - NotOverlay = testRequirement{ - func() bool { - return !strings.HasPrefix(daemonStorageDriver, "overlay") - }, - "Test requires underlying root filesystem not be backed by overlay.", - } - - Devicemapper = testRequirement{ - func() bool { - return strings.HasPrefix(daemonStorageDriver, "devicemapper") - }, - "Test requires underlying root filesystem to be backed by devicemapper.", - } - - IPv6 = testRequirement{ - func() bool { - cmd := exec.Command("test", "-f", "/proc/net/if_inet6") - - if err := cmd.Run(); err != nil { - return true - } - return false - }, - "Test requires support for IPv6", - } - NotGCCGO = testRequirement{ - func() bool { - out, err := exec.Command("go", "version").Output() - if err == nil && strings.Contains(string(out), "gccgo") { - return false - } - return true - }, - "Test requires native Golang compiler instead of GCCGO", - } - UserNamespaceInKernel = testRequirement{ - func() bool { - if _, err := os.Stat("/proc/self/uid_map"); os.IsNotExist(err) { - /* - * This kernel-provided file only exists if user namespaces are - * supported - */ - return false - } - - // We need extra check on redhat based distributions - if f, err := os.Open("/sys/module/user_namespace/parameters/enable"); err == nil { - b := make([]byte, 1) - _, _ = f.Read(b) - if string(b) == "N" { - return false - } - return true - } - - return true - }, - "Kernel must have user namespaces configured and enabled.", - } - NotUserNamespace = testRequirement{ - func() bool { - root := os.Getenv("DOCKER_REMAP_ROOT") - if root != "" { - return false - } - return true - }, - "Test cannot be run when remapping root", - } -) - -// testRequires checks if the environment satisfies the requirements -// for the test to run or skips the tests. -func testRequires(c *check.C, requirements ...testRequirement) { - for _, r := range requirements { - if !r.Condition() { - c.Skip(r.SkipMessage) - } - } -} diff --git a/integration-cli/requirements_unix.go b/integration-cli/requirements_unix.go deleted file mode 100644 index edc7bc1f91..0000000000 --- a/integration-cli/requirements_unix.go +++ /dev/null @@ -1,106 +0,0 @@ -// +build !windows - -package main - -import ( - "github.com/docker/docker/pkg/sysinfo" -) - -var ( - // SysInfo stores information about which features a kernel supports. - SysInfo *sysinfo.SysInfo - cpuCfsPeriod = testRequirement{ - func() bool { - return SysInfo.CPUCfsPeriod - }, - "Test requires an environment that supports cgroup cfs period.", - } - cpuCfsQuota = testRequirement{ - func() bool { - return SysInfo.CPUCfsQuota - }, - "Test requires an environment that supports cgroup cfs quota.", - } - cpuShare = testRequirement{ - func() bool { - return SysInfo.CPUShares - }, - "Test requires an environment that supports cgroup cpu shares.", - } - oomControl = testRequirement{ - func() bool { - return SysInfo.OomKillDisable - }, - "Test requires Oom control enabled.", - } - pidsLimit = testRequirement{ - func() bool { - return SysInfo.PidsLimit - }, - "Test requires pids limit enabled.", - } - kernelMemorySupport = testRequirement{ - func() bool { - return SysInfo.KernelMemory - }, - "Test requires an environment that supports cgroup kernel memory.", - } - memoryLimitSupport = testRequirement{ - func() bool { - return SysInfo.MemoryLimit - }, - "Test requires an environment that supports cgroup memory limit.", - } - memoryReservationSupport = testRequirement{ - func() bool { - return SysInfo.MemoryReservation - }, - "Test requires an environment that supports cgroup memory reservation.", - } - swapMemorySupport = testRequirement{ - func() bool { - return SysInfo.SwapLimit - }, - "Test requires an environment that supports cgroup swap memory limit.", - } - memorySwappinessSupport = testRequirement{ - func() bool { - return SysInfo.MemorySwappiness - }, - "Test requires an environment that supports cgroup memory swappiness.", - } - blkioWeight = testRequirement{ - func() bool { - return SysInfo.BlkioWeight - }, - "Test requires an environment that supports blkio weight.", - } - cgroupCpuset = testRequirement{ - func() bool { - return SysInfo.Cpuset - }, - "Test requires an environment that supports cgroup cpuset.", - } - seccompEnabled = testRequirement{ - func() bool { - return supportsSeccomp && SysInfo.Seccomp - }, - "Test requires that seccomp support be enabled in the daemon.", - } - bridgeNfIptables = testRequirement{ - func() bool { - return !SysInfo.BridgeNFCallIPTablesDisabled - }, - "Test requires that bridge-nf-call-iptables support be enabled in the daemon.", - } - bridgeNfIP6tables = testRequirement{ - func() bool { - return !SysInfo.BridgeNFCallIP6TablesDisabled - }, - "Test requires that bridge-nf-call-ip6tables support be enabled in the daemon.", - } -) - -func init() { - SysInfo = sysinfo.New(true) -} diff --git a/integration-cli/test_vars_exec.go b/integration-cli/test_vars_exec.go deleted file mode 100644 index 7633b346ba..0000000000 --- a/integration-cli/test_vars_exec.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !test_no_exec - -package main - -const ( - // indicates docker daemon tested supports 'docker exec' - supportsExec = true -) diff --git a/integration-cli/test_vars_noexec.go b/integration-cli/test_vars_noexec.go deleted file mode 100644 index 0845090524..0000000000 --- a/integration-cli/test_vars_noexec.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build test_no_exec - -package main - -const ( - // indicates docker daemon tested supports 'docker exec' - supportsExec = false -) diff --git a/integration-cli/test_vars_noseccomp.go b/integration-cli/test_vars_noseccomp.go deleted file mode 100644 index 2f47ab07a0..0000000000 --- a/integration-cli/test_vars_noseccomp.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !seccomp - -package main - -const ( - // indicates docker daemon built with seccomp support - supportsSeccomp = false -) diff --git a/integration-cli/test_vars_seccomp.go b/integration-cli/test_vars_seccomp.go deleted file mode 100644 index 00cf697209..0000000000 --- a/integration-cli/test_vars_seccomp.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build seccomp - -package main - -const ( - // indicates docker daemon built with seccomp support - supportsSeccomp = true -) diff --git a/integration-cli/test_vars_unix.go b/integration-cli/test_vars_unix.go deleted file mode 100644 index 853889abe7..0000000000 --- a/integration-cli/test_vars_unix.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !windows - -package main - -const ( - // identifies if test suite is running on a unix platform - isUnixCli = true - - expectedFileChmod = "-rw-r--r--" - - // On Unix variants, the busybox image comes with the `top` command which - // runs indefinitely while still being interruptible by a signal. - defaultSleepImage = "busybox" -) - -var defaultSleepCommand = []string{"top"} diff --git a/integration-cli/test_vars_windows.go b/integration-cli/test_vars_windows.go deleted file mode 100644 index ff51f89bde..0000000000 --- a/integration-cli/test_vars_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package main - -const ( - // identifies if test suite is running on a unix platform - isUnixCli = false - - // this is the expected file permission set on windows: gh#11395 - expectedFileChmod = "-rwxr-xr-x" - - // On Windows, the busybox image doesn't have the `top` command, so we rely - // on `sleep` with a high duration. - defaultSleepImage = "busybox" -) - -// TODO Windows: In TP5, decrease this sleep time, as performance will be better -var defaultSleepCommand = []string{"sleep", "240"} diff --git a/integration-cli/trust_server.go b/integration-cli/trust_server.go deleted file mode 100644 index 66b53389be..0000000000 --- a/integration-cli/trust_server.go +++ /dev/null @@ -1,320 +0,0 @@ -package main - -import ( - "fmt" - "io/ioutil" - "net" - "net/http" - "os" - "os/exec" - "path/filepath" - "strings" - "time" - - "github.com/docker/docker/cliconfig" - "github.com/docker/docker/pkg/integration/checker" - "github.com/docker/docker/pkg/tlsconfig" - "github.com/go-check/check" -) - -var notaryBinary = "notary" -var notaryServerBinary = "notary-server" - -type keyPair struct { - Public string - Private string -} - -type testNotary struct { - cmd *exec.Cmd - dir string - keys []keyPair -} - -const notaryHost = "localhost:4443" -const notaryURL = "https://" + notaryHost - -func newTestNotary(c *check.C) (*testNotary, error) { - // generate server config - template := `{ - "server": { - "http_addr": "%s", - "tls_key_file": "%s", - "tls_cert_file": "%s" - }, - "trust_service": { - "type": "local", - "hostname": "", - "port": "", - "key_algorithm": "ed25519" - }, - "logging": { - "level": "debug" - }, - "storage": { - "backend": "memory" - } -}` - tmp, err := ioutil.TempDir("", "notary-test-") - if err != nil { - return nil, err - } - confPath := filepath.Join(tmp, "config.json") - config, err := os.Create(confPath) - defer config.Close() - if err != nil { - return nil, err - } - - workingDir, err := os.Getwd() - if err != nil { - return nil, err - } - if _, err := fmt.Fprintf(config, template, notaryHost, filepath.Join(workingDir, "fixtures/notary/localhost.key"), filepath.Join(workingDir, "fixtures/notary/localhost.cert")); err != nil { - os.RemoveAll(tmp) - return nil, err - } - - // generate client config - clientConfPath := filepath.Join(tmp, "client-config.json") - clientConfig, err := os.Create(clientConfPath) - defer clientConfig.Close() - if err != nil { - return nil, err - } - template = `{ - "trust_dir" : "%s", - "remote_server": { - "url": "%s", - "skipTLSVerify": true - } -}` - if _, err = fmt.Fprintf(clientConfig, template, filepath.Join(cliconfig.ConfigDir(), "trust"), notaryURL); err != nil { - os.RemoveAll(tmp) - return nil, err - } - - // load key fixture filenames - var keys []keyPair - for i := 1; i < 5; i++ { - keys = append(keys, keyPair{ - Public: filepath.Join(workingDir, fmt.Sprintf("fixtures/notary/delgkey%v.crt", i)), - Private: filepath.Join(workingDir, fmt.Sprintf("fixtures/notary/delgkey%v.key", i)), - }) - } - - // run notary-server - cmd := exec.Command(notaryServerBinary, "-config", confPath) - if err := cmd.Start(); err != nil { - os.RemoveAll(tmp) - if os.IsNotExist(err) { - c.Skip(err.Error()) - } - return nil, err - } - - testNotary := &testNotary{ - cmd: cmd, - dir: tmp, - keys: keys, - } - - // Wait for notary to be ready to serve requests. - for i := 1; i <= 20; i++ { - if err = testNotary.Ping(); err == nil { - break - } - time.Sleep(10 * time.Millisecond * time.Duration(i*i)) - } - - if err != nil { - c.Fatalf("Timeout waiting for test notary to become available: %s", err) - } - - return testNotary, nil -} - -func (t *testNotary) Ping() error { - tlsConfig := tlsconfig.ClientDefault - tlsConfig.InsecureSkipVerify = true - client := http.Client{ - Transport: &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, - TLSClientConfig: &tlsConfig, - }, - } - resp, err := client.Get(fmt.Sprintf("%s/v2/", notaryURL)) - if err != nil { - return err - } - if resp.StatusCode != 200 { - return fmt.Errorf("notary ping replied with an unexpected status code %d", resp.StatusCode) - } - return nil -} - -func (t *testNotary) Close() { - t.cmd.Process.Kill() - os.RemoveAll(t.dir) -} - -func (s *DockerTrustSuite) trustedCmd(cmd *exec.Cmd) { - pwd := "12345678" - trustCmdEnv(cmd, notaryURL, pwd, pwd) -} - -func (s *DockerTrustSuite) trustedCmdWithServer(cmd *exec.Cmd, server string) { - pwd := "12345678" - trustCmdEnv(cmd, server, pwd, pwd) -} - -func (s *DockerTrustSuite) trustedCmdWithPassphrases(cmd *exec.Cmd, rootPwd, repositoryPwd string) { - trustCmdEnv(cmd, notaryURL, rootPwd, repositoryPwd) -} - -func trustCmdEnv(cmd *exec.Cmd, server, rootPwd, repositoryPwd string) { - env := []string{ - "DOCKER_CONTENT_TRUST=1", - fmt.Sprintf("DOCKER_CONTENT_TRUST_SERVER=%s", server), - fmt.Sprintf("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE=%s", rootPwd), - fmt.Sprintf("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE=%s", repositoryPwd), - } - cmd.Env = append(os.Environ(), env...) -} - -func (s *DockerTrustSuite) setupTrustedImage(c *check.C, name string) string { - repoName := fmt.Sprintf("%v/dockercli/%s:latest", privateRegistryURL, name) - // tag the image and upload it to the private registry - dockerCmd(c, "tag", "busybox", repoName) - - pushCmd := exec.Command(dockerBinary, "push", repoName) - s.trustedCmd(pushCmd) - out, _, err := runCommandWithOutput(pushCmd) - - if err != nil { - c.Fatalf("Error running trusted push: %s\n%s", err, out) - } - if !strings.Contains(string(out), "Signing and pushing trust metadata") { - c.Fatalf("Missing expected output on trusted push:\n%s", out) - } - - if out, status := dockerCmd(c, "rmi", repoName); status != 0 { - c.Fatalf("Error removing image %q\n%s", repoName, out) - } - - return repoName -} - -func notaryClientEnv(cmd *exec.Cmd) { - pwd := "12345678" - env := []string{ - fmt.Sprintf("NOTARY_ROOT_PASSPHRASE=%s", pwd), - fmt.Sprintf("NOTARY_TARGETS_PASSPHRASE=%s", pwd), - fmt.Sprintf("NOTARY_SNAPSHOT_PASSPHRASE=%s", pwd), - fmt.Sprintf("NOTARY_DELEGATION_PASSPHRASE=%s", pwd), - } - cmd.Env = append(os.Environ(), env...) -} - -func (s *DockerTrustSuite) notaryInitRepo(c *check.C, repoName string) { - initCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "init", repoName) - notaryClientEnv(initCmd) - out, _, err := runCommandWithOutput(initCmd) - if err != nil { - c.Fatalf("Error initializing notary repository: %s\n", out) - } -} - -func (s *DockerTrustSuite) notaryCreateDelegation(c *check.C, repoName, role string, pubKey string, paths ...string) { - pathsArg := "--all-paths" - if len(paths) > 0 { - pathsArg = "--paths=" + strings.Join(paths, ",") - } - - delgCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), - "delegation", "add", repoName, role, pubKey, pathsArg) - notaryClientEnv(delgCmd) - out, _, err := runCommandWithOutput(delgCmd) - if err != nil { - c.Fatalf("Error adding %s role to notary repository: %s\n", role, out) - } -} - -func (s *DockerTrustSuite) notaryPublish(c *check.C, repoName string) { - pubCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "publish", repoName) - notaryClientEnv(pubCmd) - out, _, err := runCommandWithOutput(pubCmd) - if err != nil { - c.Fatalf("Error publishing notary repository: %s\n", out) - } -} - -func (s *DockerTrustSuite) notaryImportKey(c *check.C, repoName, role string, privKey string) { - impCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "key", - "import", privKey, "-g", repoName, "-r", role) - notaryClientEnv(impCmd) - out, _, err := runCommandWithOutput(impCmd) - if err != nil { - c.Fatalf("Error importing key to notary repository: %s\n", out) - } -} - -func (s *DockerTrustSuite) notaryListTargetsInRole(c *check.C, repoName, role string) map[string]string { - listCmd := exec.Command(notaryBinary, "-c", filepath.Join(s.not.dir, "client-config.json"), "list", - repoName, "-r", role) - notaryClientEnv(listCmd) - out, _, err := runCommandWithOutput(listCmd) - if err != nil { - c.Fatalf("Error listing targets in notary repository: %s\n", out) - } - - // should look something like: - // NAME DIGEST SIZE (BYTES) ROLE - // ------------------------------------------------------------------------------------------------------ - // latest 24a36bbc059b1345b7e8be0df20f1b23caa3602e85d42fff7ecd9d0bd255de56 1377 targets - - targets := make(map[string]string) - - // no target - lines := strings.Split(strings.TrimSpace(out), "\n") - if len(lines) == 1 && strings.Contains(out, "No targets present in this repository.") { - return targets - } - - // otherwise, there is at least one target - c.Assert(len(lines), checker.GreaterOrEqualThan, 3) - - for _, line := range lines[2:] { - tokens := strings.Fields(line) - c.Assert(tokens, checker.HasLen, 4) - targets[tokens[0]] = tokens[3] - } - - return targets -} - -func (s *DockerTrustSuite) assertTargetInRoles(c *check.C, repoName, target string, roles ...string) { - // check all the roles - for _, role := range roles { - targets := s.notaryListTargetsInRole(c, repoName, role) - roleName, ok := targets[target] - c.Assert(ok, checker.True) - c.Assert(roleName, checker.Equals, role) - } -} - -func (s *DockerTrustSuite) assertTargetNotInRoles(c *check.C, repoName, target string, roles ...string) { - targets := s.notaryListTargetsInRole(c, repoName, "targets") - - roleName, ok := targets[target] - if ok { - for _, role := range roles { - c.Assert(roleName, checker.Not(checker.Equals), role) - } - } -} diff --git a/integration-cli/utils.go b/integration-cli/utils.go deleted file mode 100644 index 149f1c4540..0000000000 --- a/integration-cli/utils.go +++ /dev/null @@ -1,85 +0,0 @@ -package main - -import ( - "io" - "os" - "os/exec" - "time" - - "github.com/docker/docker/pkg/integration" -) - -func getPrefixAndSlashFromDaemonPlatform() (prefix, slash string) { - if daemonPlatform == "windows" { - return "c:", `\` - } - return "", "/" -} - -func getExitCode(err error) (int, error) { - return integration.GetExitCode(err) -} - -func processExitCode(err error) (exitCode int) { - return integration.ProcessExitCode(err) -} - -func isKilled(err error) bool { - return integration.IsKilled(err) -} - -func runCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { - return integration.RunCommandWithOutput(cmd) -} - -func runCommandWithStdoutStderr(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) { - return integration.RunCommandWithStdoutStderr(cmd) -} - -func runCommandWithOutputForDuration(cmd *exec.Cmd, duration time.Duration) (output string, exitCode int, timedOut bool, err error) { - return integration.RunCommandWithOutputForDuration(cmd, duration) -} - -func runCommandWithOutputAndTimeout(cmd *exec.Cmd, timeout time.Duration) (output string, exitCode int, err error) { - return integration.RunCommandWithOutputAndTimeout(cmd, timeout) -} - -func runCommand(cmd *exec.Cmd) (exitCode int, err error) { - return integration.RunCommand(cmd) -} - -func runCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { - return integration.RunCommandPipelineWithOutput(cmds...) -} - -func unmarshalJSON(data []byte, result interface{}) error { - return integration.UnmarshalJSON(data, result) -} - -func convertSliceOfStringsToMap(input []string) map[string]struct{} { - return integration.ConvertSliceOfStringsToMap(input) -} - -func compareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { - return integration.CompareDirectoryEntries(e1, e2) -} - -func listTar(f io.Reader) ([]string, error) { - return integration.ListTar(f) -} - -func randomTmpDirPath(s string, platform string) string { - return integration.RandomTmpDirPath(s, platform) -} - -func consumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { - return integration.ConsumeWithSpeed(reader, chunkSize, interval, stop) -} - -func parseCgroupPaths(procCgroupData string) map[string]string { - return integration.ParseCgroupPaths(procCgroupData) -} - -func runAtDifferentDate(date time.Time, block func()) { - integration.RunAtDifferentDate(date, block) -} diff --git a/layer/empty.go b/layer/empty.go deleted file mode 100644 index 5e1cb184b6..0000000000 --- a/layer/empty.go +++ /dev/null @@ -1,48 +0,0 @@ -package layer - -import ( - "archive/tar" - "bytes" - "io" - "io/ioutil" -) - -// DigestSHA256EmptyTar is the canonical sha256 digest of empty tar file - -// (1024 NULL bytes) -const DigestSHA256EmptyTar = DiffID("sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef") - -type emptyLayer struct{} - -// EmptyLayer is a layer that corresponds to empty tar. -var EmptyLayer = &emptyLayer{} - -func (el *emptyLayer) TarStream() (io.ReadCloser, error) { - buf := new(bytes.Buffer) - tarWriter := tar.NewWriter(buf) - tarWriter.Close() - return ioutil.NopCloser(buf), nil -} - -func (el *emptyLayer) ChainID() ChainID { - return ChainID(DigestSHA256EmptyTar) -} - -func (el *emptyLayer) DiffID() DiffID { - return DigestSHA256EmptyTar -} - -func (el *emptyLayer) Parent() Layer { - return nil -} - -func (el *emptyLayer) Size() (size int64, err error) { - return 0, nil -} - -func (el *emptyLayer) DiffSize() (size int64, err error) { - return 0, nil -} - -func (el *emptyLayer) Metadata() (map[string]string, error) { - return make(map[string]string), nil -} diff --git a/layer/empty_test.go b/layer/empty_test.go deleted file mode 100644 index c22da7665d..0000000000 --- a/layer/empty_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package layer - -import ( - "io" - "testing" - - "github.com/docker/distribution/digest" -) - -func TestEmptyLayer(t *testing.T) { - if EmptyLayer.ChainID() != ChainID(DigestSHA256EmptyTar) { - t.Fatal("wrong ID for empty layer") - } - - if EmptyLayer.DiffID() != DigestSHA256EmptyTar { - t.Fatal("wrong DiffID for empty layer") - } - - if EmptyLayer.Parent() != nil { - t.Fatal("expected no parent for empty layer") - } - - if size, err := EmptyLayer.Size(); err != nil || size != 0 { - t.Fatal("expected zero size for empty layer") - } - - if diffSize, err := EmptyLayer.DiffSize(); err != nil || diffSize != 0 { - t.Fatal("expected zero diffsize for empty layer") - } - - tarStream, err := EmptyLayer.TarStream() - if err != nil { - t.Fatalf("error streaming tar for empty layer: %v", err) - } - - digester := digest.Canonical.New() - _, err = io.Copy(digester.Hash(), tarStream) - - if err != nil { - t.Fatalf("error hashing empty tar layer: %v", err) - } - - if digester.Digest() != digest.Digest(DigestSHA256EmptyTar) { - t.Fatal("empty layer tar stream hashes to wrong value") - } -} diff --git a/layer/filestore.go b/layer/filestore.go deleted file mode 100644 index 6361af647c..0000000000 --- a/layer/filestore.go +++ /dev/null @@ -1,354 +0,0 @@ -package layer - -import ( - "compress/gzip" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "regexp" - "strconv" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/docker/pkg/ioutils" -) - -var ( - stringIDRegexp = regexp.MustCompile(`^[a-f0-9]{64}(-init)?$`) - supportedAlgorithms = []digest.Algorithm{ - digest.SHA256, - // digest.SHA384, // Currently not used - // digest.SHA512, // Currently not used - } -) - -type fileMetadataStore struct { - root string -} - -type fileMetadataTransaction struct { - store *fileMetadataStore - root string -} - -// NewFSMetadataStore returns an instance of a metadata store -// which is backed by files on disk using the provided root -// as the root of metadata files. -func NewFSMetadataStore(root string) (MetadataStore, error) { - if err := os.MkdirAll(root, 0700); err != nil { - return nil, err - } - return &fileMetadataStore{ - root: root, - }, nil -} - -func (fms *fileMetadataStore) getLayerDirectory(layer ChainID) string { - dgst := digest.Digest(layer) - return filepath.Join(fms.root, string(dgst.Algorithm()), dgst.Hex()) -} - -func (fms *fileMetadataStore) getLayerFilename(layer ChainID, filename string) string { - return filepath.Join(fms.getLayerDirectory(layer), filename) -} - -func (fms *fileMetadataStore) getMountDirectory(mount string) string { - return filepath.Join(fms.root, "mounts", mount) -} - -func (fms *fileMetadataStore) getMountFilename(mount, filename string) string { - return filepath.Join(fms.getMountDirectory(mount), filename) -} - -func (fms *fileMetadataStore) StartTransaction() (MetadataTransaction, error) { - tmpDir := filepath.Join(fms.root, "tmp") - if err := os.MkdirAll(tmpDir, 0755); err != nil { - return nil, err - } - - td, err := ioutil.TempDir(tmpDir, "layer-") - if err != nil { - return nil, err - } - // Create a new tempdir - return &fileMetadataTransaction{ - store: fms, - root: td, - }, nil -} - -func (fm *fileMetadataTransaction) SetSize(size int64) error { - content := fmt.Sprintf("%d", size) - return ioutil.WriteFile(filepath.Join(fm.root, "size"), []byte(content), 0644) -} - -func (fm *fileMetadataTransaction) SetParent(parent ChainID) error { - return ioutil.WriteFile(filepath.Join(fm.root, "parent"), []byte(digest.Digest(parent).String()), 0644) -} - -func (fm *fileMetadataTransaction) SetDiffID(diff DiffID) error { - return ioutil.WriteFile(filepath.Join(fm.root, "diff"), []byte(digest.Digest(diff).String()), 0644) -} - -func (fm *fileMetadataTransaction) SetCacheID(cacheID string) error { - return ioutil.WriteFile(filepath.Join(fm.root, "cache-id"), []byte(cacheID), 0644) -} - -func (fm *fileMetadataTransaction) SetDescriptor(ref distribution.Descriptor) error { - jsonRef, err := json.Marshal(ref) - if err != nil { - return err - } - return ioutil.WriteFile(filepath.Join(fm.root, "descriptor.json"), jsonRef, 0644) -} - -func (fm *fileMetadataTransaction) TarSplitWriter(compressInput bool) (io.WriteCloser, error) { - f, err := os.OpenFile(filepath.Join(fm.root, "tar-split.json.gz"), os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return nil, err - } - var wc io.WriteCloser - if compressInput { - wc = gzip.NewWriter(f) - } else { - wc = f - } - - return ioutils.NewWriteCloserWrapper(wc, func() error { - wc.Close() - return f.Close() - }), nil -} - -func (fm *fileMetadataTransaction) Commit(layer ChainID) error { - finalDir := fm.store.getLayerDirectory(layer) - if err := os.MkdirAll(filepath.Dir(finalDir), 0755); err != nil { - return err - } - return os.Rename(fm.root, finalDir) -} - -func (fm *fileMetadataTransaction) Cancel() error { - return os.RemoveAll(fm.root) -} - -func (fm *fileMetadataTransaction) String() string { - return fm.root -} - -func (fms *fileMetadataStore) GetSize(layer ChainID) (int64, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "size")) - if err != nil { - return 0, err - } - - size, err := strconv.ParseInt(string(content), 10, 64) - if err != nil { - return 0, err - } - - return size, nil -} - -func (fms *fileMetadataStore) GetParent(layer ChainID) (ChainID, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "parent")) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) - if err != nil { - return "", err - } - - return ChainID(dgst), nil -} - -func (fms *fileMetadataStore) GetDiffID(layer ChainID) (DiffID, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "diff")) - if err != nil { - return "", err - } - - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) - if err != nil { - return "", err - } - - return DiffID(dgst), nil -} - -func (fms *fileMetadataStore) GetCacheID(layer ChainID) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getLayerFilename(layer, "cache-id")) - if err != nil { - return "", err - } - content := strings.TrimSpace(string(contentBytes)) - - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid cache id value") - } - - return content, nil -} - -func (fms *fileMetadataStore) GetDescriptor(layer ChainID) (distribution.Descriptor, error) { - content, err := ioutil.ReadFile(fms.getLayerFilename(layer, "descriptor.json")) - if err != nil { - if os.IsNotExist(err) { - // only return empty descriptor to represent what is stored - return distribution.Descriptor{}, nil - } - return distribution.Descriptor{}, err - } - - var ref distribution.Descriptor - err = json.Unmarshal(content, &ref) - if err != nil { - return distribution.Descriptor{}, err - } - return ref, err -} - -func (fms *fileMetadataStore) TarSplitReader(layer ChainID) (io.ReadCloser, error) { - fz, err := os.Open(fms.getLayerFilename(layer, "tar-split.json.gz")) - if err != nil { - return nil, err - } - f, err := gzip.NewReader(fz) - if err != nil { - return nil, err - } - - return ioutils.NewReadCloserWrapper(f, func() error { - f.Close() - return fz.Close() - }), nil -} - -func (fms *fileMetadataStore) SetMountID(mount string, mountID string) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { - return err - } - return ioutil.WriteFile(fms.getMountFilename(mount, "mount-id"), []byte(mountID), 0644) -} - -func (fms *fileMetadataStore) SetInitID(mount string, init string) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { - return err - } - return ioutil.WriteFile(fms.getMountFilename(mount, "init-id"), []byte(init), 0644) -} - -func (fms *fileMetadataStore) SetMountParent(mount string, parent ChainID) error { - if err := os.MkdirAll(fms.getMountDirectory(mount), 0755); err != nil { - return err - } - return ioutil.WriteFile(fms.getMountFilename(mount, "parent"), []byte(digest.Digest(parent).String()), 0644) -} - -func (fms *fileMetadataStore) GetMountID(mount string) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "mount-id")) - if err != nil { - return "", err - } - content := strings.TrimSpace(string(contentBytes)) - - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid mount id value") - } - - return content, nil -} - -func (fms *fileMetadataStore) GetInitID(mount string) (string, error) { - contentBytes, err := ioutil.ReadFile(fms.getMountFilename(mount, "init-id")) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - content := strings.TrimSpace(string(contentBytes)) - - if !stringIDRegexp.MatchString(content) { - return "", errors.New("invalid init id value") - } - - return content, nil -} - -func (fms *fileMetadataStore) GetMountParent(mount string) (ChainID, error) { - content, err := ioutil.ReadFile(fms.getMountFilename(mount, "parent")) - if err != nil { - if os.IsNotExist(err) { - return "", nil - } - return "", err - } - - dgst, err := digest.ParseDigest(strings.TrimSpace(string(content))) - if err != nil { - return "", err - } - - return ChainID(dgst), nil -} - -func (fms *fileMetadataStore) List() ([]ChainID, []string, error) { - var ids []ChainID - for _, algorithm := range supportedAlgorithms { - fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, string(algorithm))) - if err != nil { - if os.IsNotExist(err) { - continue - } - return nil, nil, err - } - - for _, fi := range fileInfos { - if fi.IsDir() && fi.Name() != "mounts" { - dgst := digest.NewDigestFromHex(string(algorithm), fi.Name()) - if err := dgst.Validate(); err != nil { - logrus.Debugf("Ignoring invalid digest %s:%s", algorithm, fi.Name()) - } else { - ids = append(ids, ChainID(dgst)) - } - } - } - } - - fileInfos, err := ioutil.ReadDir(filepath.Join(fms.root, "mounts")) - if err != nil { - if os.IsNotExist(err) { - return ids, []string{}, nil - } - return nil, nil, err - } - - var mounts []string - for _, fi := range fileInfos { - if fi.IsDir() { - mounts = append(mounts, fi.Name()) - } - } - - return ids, mounts, nil -} - -func (fms *fileMetadataStore) Remove(layer ChainID) error { - return os.RemoveAll(fms.getLayerDirectory(layer)) -} - -func (fms *fileMetadataStore) RemoveMount(mount string) error { - return os.RemoveAll(fms.getMountDirectory(mount)) -} diff --git a/layer/filestore_test.go b/layer/filestore_test.go deleted file mode 100644 index 55e3b28530..0000000000 --- a/layer/filestore_test.go +++ /dev/null @@ -1,104 +0,0 @@ -package layer - -import ( - "fmt" - "io/ioutil" - "math/rand" - "os" - "path/filepath" - "strings" - "syscall" - "testing" - - "github.com/docker/distribution/digest" -) - -func randomLayerID(seed int64) ChainID { - r := rand.New(rand.NewSource(seed)) - - return ChainID(digest.FromBytes([]byte(fmt.Sprintf("%d", r.Int63())))) -} - -func newFileMetadataStore(t *testing.T) (*fileMetadataStore, string, func()) { - td, err := ioutil.TempDir("", "layers-") - if err != nil { - t.Fatal(err) - } - fms, err := NewFSMetadataStore(td) - if err != nil { - t.Fatal(err) - } - - return fms.(*fileMetadataStore), td, func() { - if err := os.RemoveAll(td); err != nil { - t.Logf("Failed to cleanup %q: %s", td, err) - } - } -} - -func assertNotDirectoryError(t *testing.T, err error) { - perr, ok := err.(*os.PathError) - if !ok { - t.Fatalf("Unexpected error %#v, expected path error", err) - } - - if perr.Err != syscall.ENOTDIR { - t.Fatalf("Unexpected error %s, expected %s", perr.Err, syscall.ENOTDIR) - } -} - -func TestCommitFailure(t *testing.T) { - fms, td, cleanup := newFileMetadataStore(t) - defer cleanup() - - if err := ioutil.WriteFile(filepath.Join(td, "sha256"), []byte("was here first!"), 0644); err != nil { - t.Fatal(err) - } - - tx, err := fms.StartTransaction() - if err != nil { - t.Fatal(err) - } - - if err := tx.SetSize(0); err != nil { - t.Fatal(err) - } - - err = tx.Commit(randomLayerID(5)) - if err == nil { - t.Fatalf("Expected error committing with invalid layer parent directory") - } - assertNotDirectoryError(t, err) -} - -func TestStartTransactionFailure(t *testing.T) { - fms, td, cleanup := newFileMetadataStore(t) - defer cleanup() - - if err := ioutil.WriteFile(filepath.Join(td, "tmp"), []byte("was here first!"), 0644); err != nil { - t.Fatal(err) - } - - _, err := fms.StartTransaction() - if err == nil { - t.Fatalf("Expected error starting transaction with invalid layer parent directory") - } - assertNotDirectoryError(t, err) - - if err := os.Remove(filepath.Join(td, "tmp")); err != nil { - t.Fatal(err) - } - - tx, err := fms.StartTransaction() - if err != nil { - t.Fatal(err) - } - - if expected := filepath.Join(td, "tmp"); strings.HasPrefix(expected, tx.String()) { - t.Fatalf("Unexpected transaction string %q, expected prefix %q", tx.String(), expected) - } - - if err := tx.Cancel(); err != nil { - t.Fatal(err) - } -} diff --git a/layer/layer.go b/layer/layer.go deleted file mode 100644 index 3881447b64..0000000000 --- a/layer/layer.go +++ /dev/null @@ -1,270 +0,0 @@ -// Package layer is package for managing read-only -// and read-write mounts on the union file system -// driver. Read-only mounts are referenced using a -// content hash and are protected from mutation in -// the exposed interface. The tar format is used -// to create read-only layers and export both -// read-only and writable layers. The exported -// tar data for a read-only layer should match -// the tar used to create the layer. -package layer - -import ( - "errors" - "io" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/docker/pkg/archive" -) - -var ( - // ErrLayerDoesNotExist is used when an operation is - // attempted on a layer which does not exist. - ErrLayerDoesNotExist = errors.New("layer does not exist") - - // ErrLayerNotRetained is used when a release is - // attempted on a layer which is not retained. - ErrLayerNotRetained = errors.New("layer not retained") - - // ErrMountDoesNotExist is used when an operation is - // attempted on a mount layer which does not exist. - ErrMountDoesNotExist = errors.New("mount does not exist") - - // ErrMountNameConflict is used when a mount is attempted - // to be created but there is already a mount with the name - // used for creation. - ErrMountNameConflict = errors.New("mount already exists with name") - - // ErrActiveMount is used when an operation on a - // mount is attempted but the layer is still - // mounted and the operation cannot be performed. - ErrActiveMount = errors.New("mount still active") - - // ErrNotMounted is used when requesting an active - // mount but the layer is not mounted. - ErrNotMounted = errors.New("not mounted") - - // ErrMaxDepthExceeded is used when a layer is attempted - // to be created which would result in a layer depth - // greater than the 125 max. - ErrMaxDepthExceeded = errors.New("max depth exceeded") - - // ErrNotSupported is used when the action is not supported - // on the current platform - ErrNotSupported = errors.New("not support on this platform") -) - -// ChainID is the content-addressable ID of a layer. -type ChainID digest.Digest - -// String returns a string rendition of a layer ID -func (id ChainID) String() string { - return string(id) -} - -// DiffID is the hash of an individual layer tar. -type DiffID digest.Digest - -// String returns a string rendition of a layer DiffID -func (diffID DiffID) String() string { - return string(diffID) -} - -// TarStreamer represents an object which may -// have its contents exported as a tar stream. -type TarStreamer interface { - // TarStream returns a tar archive stream - // for the contents of a layer. - TarStream() (io.ReadCloser, error) -} - -// Layer represents a read-only layer -type Layer interface { - TarStreamer - - // ChainID returns the content hash of the entire layer chain. The hash - // chain is made up of DiffID of top layer and all of its parents. - ChainID() ChainID - - // DiffID returns the content hash of the layer - // tar stream used to create this layer. - DiffID() DiffID - - // Parent returns the next layer in the layer chain. - Parent() Layer - - // Size returns the size of the entire layer chain. The size - // is calculated from the total size of all files in the layers. - Size() (int64, error) - - // DiffSize returns the size difference of the top layer - // from parent layer. - DiffSize() (int64, error) - - // Metadata returns the low level storage metadata associated - // with layer. - Metadata() (map[string]string, error) -} - -// RWLayer represents a layer which is -// read and writable -type RWLayer interface { - TarStreamer - - // Name of mounted layer - Name() string - - // Parent returns the layer which the writable - // layer was created from. - Parent() Layer - - // Mount mounts the RWLayer and returns the filesystem path - // the to the writable layer. - Mount(mountLabel string) (string, error) - - // Unmount unmounts the RWLayer. This should be called - // for every mount. If there are multiple mount calls - // this operation will only decrement the internal mount counter. - Unmount() error - - // Size represents the size of the writable layer - // as calculated by the total size of the files - // changed in the mutable layer. - Size() (int64, error) - - // Changes returns the set of changes for the mutable layer - // from the base layer. - Changes() ([]archive.Change, error) - - // Metadata returns the low level metadata for the mutable layer - Metadata() (map[string]string, error) -} - -// Metadata holds information about a -// read-only layer -type Metadata struct { - // ChainID is the content hash of the layer - ChainID ChainID - - // DiffID is the hash of the tar data used to - // create the layer - DiffID DiffID - - // Size is the size of the layer and all parents - Size int64 - - // DiffSize is the size of the top layer - DiffSize int64 -} - -// MountInit is a function to initialize a -// writable mount. Changes made here will -// not be included in the Tar stream of the -// RWLayer. -type MountInit func(root string) error - -// Store represents a backend for managing both -// read-only and read-write layers. -type Store interface { - Register(io.Reader, ChainID) (Layer, error) - Get(ChainID) (Layer, error) - Release(Layer) ([]Metadata, error) - - CreateRWLayer(id string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) - GetRWLayer(id string) (RWLayer, error) - GetMountID(id string) (string, error) - ReleaseRWLayer(RWLayer) ([]Metadata, error) - - Cleanup() error - DriverStatus() [][2]string - DriverName() string -} - -// DescribableStore represents a layer store capable of storing -// descriptors for layers. -type DescribableStore interface { - RegisterWithDescriptor(io.Reader, ChainID, distribution.Descriptor) (Layer, error) -} - -// MetadataTransaction represents functions for setting layer metadata -// with a single transaction. -type MetadataTransaction interface { - SetSize(int64) error - SetParent(parent ChainID) error - SetDiffID(DiffID) error - SetCacheID(string) error - SetDescriptor(distribution.Descriptor) error - TarSplitWriter(compressInput bool) (io.WriteCloser, error) - - Commit(ChainID) error - Cancel() error - String() string -} - -// MetadataStore represents a backend for persisting -// metadata about layers and providing the metadata -// for restoring a Store. -type MetadataStore interface { - // StartTransaction starts an update for new metadata - // which will be used to represent an ID on commit. - StartTransaction() (MetadataTransaction, error) - - GetSize(ChainID) (int64, error) - GetParent(ChainID) (ChainID, error) - GetDiffID(ChainID) (DiffID, error) - GetCacheID(ChainID) (string, error) - GetDescriptor(ChainID) (distribution.Descriptor, error) - TarSplitReader(ChainID) (io.ReadCloser, error) - - SetMountID(string, string) error - SetInitID(string, string) error - SetMountParent(string, ChainID) error - - GetMountID(string) (string, error) - GetInitID(string) (string, error) - GetMountParent(string) (ChainID, error) - - // List returns the full list of referenced - // read-only and read-write layers - List() ([]ChainID, []string, error) - - Remove(ChainID) error - RemoveMount(string) error -} - -// CreateChainID returns ID for a layerDigest slice -func CreateChainID(dgsts []DiffID) ChainID { - return createChainIDFromParent("", dgsts...) -} - -func createChainIDFromParent(parent ChainID, dgsts ...DiffID) ChainID { - if len(dgsts) == 0 { - return parent - } - if parent == "" { - return createChainIDFromParent(ChainID(dgsts[0]), dgsts[1:]...) - } - // H = "H(n-1) SHA256(n)" - dgst := digest.FromBytes([]byte(string(parent) + " " + string(dgsts[0]))) - return createChainIDFromParent(ChainID(dgst), dgsts[1:]...) -} - -// ReleaseAndLog releases the provided layer from the given layer -// store, logging any error and release metadata -func ReleaseAndLog(ls Store, l Layer) { - metadata, err := ls.Release(l) - if err != nil { - logrus.Errorf("Error releasing layer %s: %v", l.ChainID(), err) - } - LogReleaseMetadata(metadata) -} - -// LogReleaseMetadata logs a metadata array, uses this to -// ensure consistent logging for release metadata -func LogReleaseMetadata(metadatas []Metadata) { - for _, metadata := range metadatas { - logrus.Infof("Layer %s cleaned up", metadata.ChainID) - } -} diff --git a/layer/layer_store.go b/layer/layer_store.go deleted file mode 100644 index 6d5cb2599d..0000000000 --- a/layer/layer_store.go +++ /dev/null @@ -1,659 +0,0 @@ -package layer - -import ( - "errors" - "fmt" - "io" - "io/ioutil" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/stringid" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// maxLayerDepth represents the maximum number of -// layers which can be chained together. 125 was -// chosen to account for the 127 max in some -// graphdrivers plus the 2 additional layers -// used to create a rwlayer. -const maxLayerDepth = 125 - -type layerStore struct { - store MetadataStore - driver graphdriver.Driver - - layerMap map[ChainID]*roLayer - layerL sync.Mutex - - mounts map[string]*mountedLayer - mountL sync.Mutex -} - -// StoreOptions are the options used to create a new Store instance -type StoreOptions struct { - StorePath string - MetadataStorePathTemplate string - GraphDriver string - GraphDriverOptions []string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap -} - -// NewStoreFromOptions creates a new Store instance -func NewStoreFromOptions(options StoreOptions) (Store, error) { - driver, err := graphdriver.New( - options.StorePath, - options.GraphDriver, - options.GraphDriverOptions, - options.UIDMaps, - options.GIDMaps) - if err != nil { - return nil, fmt.Errorf("error initializing graphdriver: %v", err) - } - logrus.Debugf("Using graph driver %s", driver) - - fms, err := NewFSMetadataStore(fmt.Sprintf(options.MetadataStorePathTemplate, driver)) - if err != nil { - return nil, err - } - - return NewStoreFromGraphDriver(fms, driver) -} - -// NewStoreFromGraphDriver creates a new Store instance using the provided -// metadata store and graph driver. The metadata store will be used to restore -// the Store. -func NewStoreFromGraphDriver(store MetadataStore, driver graphdriver.Driver) (Store, error) { - ls := &layerStore{ - store: store, - driver: driver, - layerMap: map[ChainID]*roLayer{}, - mounts: map[string]*mountedLayer{}, - } - - ids, mounts, err := store.List() - if err != nil { - return nil, err - } - - for _, id := range ids { - l, err := ls.loadLayer(id) - if err != nil { - logrus.Debugf("Failed to load layer %s: %s", id, err) - continue - } - if l.parent != nil { - l.parent.referenceCount++ - } - } - - for _, mount := range mounts { - if err := ls.loadMount(mount); err != nil { - logrus.Debugf("Failed to load mount %s: %s", mount, err) - } - } - - return ls, nil -} - -func (ls *layerStore) loadLayer(layer ChainID) (*roLayer, error) { - cl, ok := ls.layerMap[layer] - if ok { - return cl, nil - } - - diff, err := ls.store.GetDiffID(layer) - if err != nil { - return nil, fmt.Errorf("failed to get diff id for %s: %s", layer, err) - } - - size, err := ls.store.GetSize(layer) - if err != nil { - return nil, fmt.Errorf("failed to get size for %s: %s", layer, err) - } - - cacheID, err := ls.store.GetCacheID(layer) - if err != nil { - return nil, fmt.Errorf("failed to get cache id for %s: %s", layer, err) - } - - parent, err := ls.store.GetParent(layer) - if err != nil { - return nil, fmt.Errorf("failed to get parent for %s: %s", layer, err) - } - - descriptor, err := ls.store.GetDescriptor(layer) - if err != nil { - return nil, fmt.Errorf("failed to get descriptor for %s: %s", layer, err) - } - - cl = &roLayer{ - chainID: layer, - diffID: diff, - size: size, - cacheID: cacheID, - layerStore: ls, - references: map[Layer]struct{}{}, - descriptor: descriptor, - } - - if parent != "" { - p, err := ls.loadLayer(parent) - if err != nil { - return nil, err - } - cl.parent = p - } - - ls.layerMap[cl.chainID] = cl - - return cl, nil -} - -func (ls *layerStore) loadMount(mount string) error { - if _, ok := ls.mounts[mount]; ok { - return nil - } - - mountID, err := ls.store.GetMountID(mount) - if err != nil { - return err - } - - initID, err := ls.store.GetInitID(mount) - if err != nil { - return err - } - - parent, err := ls.store.GetMountParent(mount) - if err != nil { - return err - } - - ml := &mountedLayer{ - name: mount, - mountID: mountID, - initID: initID, - layerStore: ls, - references: map[RWLayer]*referencedRWLayer{}, - } - - if parent != "" { - p, err := ls.loadLayer(parent) - if err != nil { - return err - } - ml.parent = p - - p.referenceCount++ - } - - ls.mounts[ml.name] = ml - - return nil -} - -func (ls *layerStore) applyTar(tx MetadataTransaction, ts io.Reader, parent string, layer *roLayer) error { - digester := digest.Canonical.New() - tr := io.TeeReader(ts, digester.Hash()) - - tsw, err := tx.TarSplitWriter(true) - if err != nil { - return err - } - metaPacker := storage.NewJSONPacker(tsw) - defer tsw.Close() - - // we're passing nil here for the file putter, because the ApplyDiff will - // handle the extraction of the archive - rdr, err := asm.NewInputTarStream(tr, metaPacker, nil) - if err != nil { - return err - } - - applySize, err := ls.driver.ApplyDiff(layer.cacheID, parent, archive.Reader(rdr)) - if err != nil { - return err - } - - // Discard trailing data but ensure metadata is picked up to reconstruct stream - io.Copy(ioutil.Discard, rdr) // ignore error as reader may be closed - - layer.size = applySize - layer.diffID = DiffID(digester.Digest()) - - logrus.Debugf("Applied tar %s to %s, size: %d", layer.diffID, layer.cacheID, applySize) - - return nil -} - -func (ls *layerStore) Register(ts io.Reader, parent ChainID) (Layer, error) { - return ls.registerWithDescriptor(ts, parent, distribution.Descriptor{}) -} - -func (ls *layerStore) registerWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { - // err is used to hold the error which will always trigger - // cleanup of creates sources but may not be an error returned - // to the caller (already exists). - var err error - var pid string - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return nil, ErrLayerDoesNotExist - } - pid = p.cacheID - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - if p.depth() >= maxLayerDepth { - err = ErrMaxDepthExceeded - return nil, err - } - } - - // Create new roLayer - layer := &roLayer{ - parent: p, - cacheID: stringid.GenerateRandomID(), - referenceCount: 1, - layerStore: ls, - references: map[Layer]struct{}{}, - descriptor: descriptor, - } - - if err = ls.driver.Create(layer.cacheID, pid, "", nil); err != nil { - return nil, err - } - - tx, err := ls.store.StartTransaction() - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - logrus.Debugf("Cleaning up layer %s: %v", layer.cacheID, err) - if err := ls.driver.Remove(layer.cacheID); err != nil { - logrus.Errorf("Error cleaning up cache layer %s: %v", layer.cacheID, err) - } - if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) - } - } - }() - - if err = ls.applyTar(tx, ts, pid, layer); err != nil { - return nil, err - } - - if layer.parent == nil { - layer.chainID = ChainID(layer.diffID) - } else { - layer.chainID = createChainIDFromParent(layer.parent.chainID, layer.diffID) - } - - if err = storeLayer(tx, layer); err != nil { - return nil, err - } - - ls.layerL.Lock() - defer ls.layerL.Unlock() - - if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { - // Set error for cleanup, but do not return the error - err = errors.New("layer already exists") - return existingLayer.getReference(), nil - } - - if err = tx.Commit(layer.chainID); err != nil { - return nil, err - } - - ls.layerMap[layer.chainID] = layer - - return layer.getReference(), nil -} - -func (ls *layerStore) getWithoutLock(layer ChainID) *roLayer { - l, ok := ls.layerMap[layer] - if !ok { - return nil - } - - l.referenceCount++ - - return l -} - -func (ls *layerStore) get(l ChainID) *roLayer { - ls.layerL.Lock() - defer ls.layerL.Unlock() - return ls.getWithoutLock(l) -} - -func (ls *layerStore) Get(l ChainID) (Layer, error) { - ls.layerL.Lock() - defer ls.layerL.Unlock() - - layer := ls.getWithoutLock(l) - if layer == nil { - return nil, ErrLayerDoesNotExist - } - - return layer.getReference(), nil -} - -func (ls *layerStore) deleteLayer(layer *roLayer, metadata *Metadata) error { - err := ls.driver.Remove(layer.cacheID) - if err != nil { - return err - } - - err = ls.store.Remove(layer.chainID) - if err != nil { - return err - } - metadata.DiffID = layer.diffID - metadata.ChainID = layer.chainID - metadata.Size, err = layer.Size() - if err != nil { - return err - } - metadata.DiffSize = layer.size - - return nil -} - -func (ls *layerStore) releaseLayer(l *roLayer) ([]Metadata, error) { - depth := 0 - removed := []Metadata{} - for { - if l.referenceCount == 0 { - panic("layer not retained") - } - l.referenceCount-- - if l.referenceCount != 0 { - return removed, nil - } - - if len(removed) == 0 && depth > 0 { - panic("cannot remove layer with child") - } - if l.hasReferences() { - panic("cannot delete referenced layer") - } - var metadata Metadata - if err := ls.deleteLayer(l, &metadata); err != nil { - return nil, err - } - - delete(ls.layerMap, l.chainID) - removed = append(removed, metadata) - - if l.parent == nil { - return removed, nil - } - - depth++ - l = l.parent - } -} - -func (ls *layerStore) Release(l Layer) ([]Metadata, error) { - ls.layerL.Lock() - defer ls.layerL.Unlock() - layer, ok := ls.layerMap[l.ChainID()] - if !ok { - return []Metadata{}, nil - } - if !layer.hasReference(l) { - return nil, ErrLayerNotRetained - } - - layer.deleteReference(l) - - return ls.releaseLayer(layer) -} - -func (ls *layerStore) CreateRWLayer(name string, parent ChainID, mountLabel string, initFunc MountInit, storageOpt map[string]string) (RWLayer, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - m, ok := ls.mounts[name] - if ok { - return nil, ErrMountNameConflict - } - - var err error - var pid string - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return nil, ErrLayerDoesNotExist - } - pid = p.cacheID - - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - } - - m = &mountedLayer{ - name: name, - parent: p, - mountID: ls.mountID(name), - layerStore: ls, - references: map[RWLayer]*referencedRWLayer{}, - } - - if initFunc != nil { - pid, err = ls.initMount(m.mountID, pid, mountLabel, initFunc, storageOpt) - if err != nil { - return nil, err - } - m.initID = pid - } - - if err = ls.driver.CreateReadWrite(m.mountID, pid, "", storageOpt); err != nil { - return nil, err - } - - if err = ls.saveMount(m); err != nil { - return nil, err - } - - return m.getReference(), nil -} - -func (ls *layerStore) GetRWLayer(id string) (RWLayer, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - mount, ok := ls.mounts[id] - if !ok { - return nil, ErrMountDoesNotExist - } - - return mount.getReference(), nil -} - -func (ls *layerStore) GetMountID(id string) (string, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - mount, ok := ls.mounts[id] - if !ok { - return "", ErrMountDoesNotExist - } - logrus.Debugf("GetMountID id: %s -> mountID: %s", id, mount.mountID) - - return mount.mountID, nil -} - -func (ls *layerStore) ReleaseRWLayer(l RWLayer) ([]Metadata, error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - m, ok := ls.mounts[l.Name()] - if !ok { - return []Metadata{}, nil - } - - if err := m.deleteReference(l); err != nil { - return nil, err - } - - if m.hasReferences() { - return []Metadata{}, nil - } - - if err := ls.driver.Remove(m.mountID); err != nil { - logrus.Errorf("Error removing mounted layer %s: %s", m.name, err) - m.retakeReference(l) - return nil, err - } - - if m.initID != "" { - if err := ls.driver.Remove(m.initID); err != nil { - logrus.Errorf("Error removing init layer %s: %s", m.name, err) - m.retakeReference(l) - return nil, err - } - } - - if err := ls.store.RemoveMount(m.name); err != nil { - logrus.Errorf("Error removing mount metadata: %s: %s", m.name, err) - m.retakeReference(l) - return nil, err - } - - delete(ls.mounts, m.Name()) - - ls.layerL.Lock() - defer ls.layerL.Unlock() - if m.parent != nil { - return ls.releaseLayer(m.parent) - } - - return []Metadata{}, nil -} - -func (ls *layerStore) saveMount(mount *mountedLayer) error { - if err := ls.store.SetMountID(mount.name, mount.mountID); err != nil { - return err - } - - if mount.initID != "" { - if err := ls.store.SetInitID(mount.name, mount.initID); err != nil { - return err - } - } - - if mount.parent != nil { - if err := ls.store.SetMountParent(mount.name, mount.parent.chainID); err != nil { - return err - } - } - - ls.mounts[mount.name] = mount - - return nil -} - -func (ls *layerStore) initMount(graphID, parent, mountLabel string, initFunc MountInit, storageOpt map[string]string) (string, error) { - // Use "-init" to maintain compatibility with graph drivers - // which are expecting this layer with this special name. If all - // graph drivers can be updated to not rely on knowing about this layer - // then the initID should be randomly generated. - initID := fmt.Sprintf("%s-init", graphID) - - if err := ls.driver.Create(initID, parent, mountLabel, storageOpt); err != nil { - return "", err - } - p, err := ls.driver.Get(initID, "") - if err != nil { - return "", err - } - - if err := initFunc(p); err != nil { - ls.driver.Put(initID) - return "", err - } - - if err := ls.driver.Put(initID); err != nil { - return "", err - } - - return initID, nil -} - -func (ls *layerStore) assembleTarTo(graphID string, metadata io.ReadCloser, size *int64, w io.Writer) error { - diffDriver, ok := ls.driver.(graphdriver.DiffGetterDriver) - if !ok { - diffDriver = &naiveDiffPathDriver{ls.driver} - } - - defer metadata.Close() - - // get our relative path to the container - fileGetCloser, err := diffDriver.DiffGetter(graphID) - if err != nil { - return err - } - defer fileGetCloser.Close() - - metaUnpacker := storage.NewJSONUnpacker(metadata) - upackerCounter := &unpackSizeCounter{metaUnpacker, size} - logrus.Debugf("Assembling tar data for %s", graphID) - return asm.WriteOutputTarStream(fileGetCloser, upackerCounter, w) -} - -func (ls *layerStore) Cleanup() error { - return ls.driver.Cleanup() -} - -func (ls *layerStore) DriverStatus() [][2]string { - return ls.driver.Status() -} - -func (ls *layerStore) DriverName() string { - return ls.driver.String() -} - -type naiveDiffPathDriver struct { - graphdriver.Driver -} - -type fileGetPutter struct { - storage.FileGetter - driver graphdriver.Driver - id string -} - -func (w *fileGetPutter) Close() error { - return w.driver.Put(w.id) -} - -func (n *naiveDiffPathDriver) DiffGetter(id string) (graphdriver.FileGetCloser, error) { - p, err := n.Driver.Get(id, "") - if err != nil { - return nil, err - } - return &fileGetPutter{storage.NewPathFileGetter(p), n.Driver, id}, nil -} diff --git a/layer/layer_store_windows.go b/layer/layer_store_windows.go deleted file mode 100644 index 1276a912cc..0000000000 --- a/layer/layer_store_windows.go +++ /dev/null @@ -1,11 +0,0 @@ -package layer - -import ( - "io" - - "github.com/docker/distribution" -) - -func (ls *layerStore) RegisterWithDescriptor(ts io.Reader, parent ChainID, descriptor distribution.Descriptor) (Layer, error) { - return ls.registerWithDescriptor(ts, parent, descriptor) -} diff --git a/layer/layer_test.go b/layer/layer_test.go deleted file mode 100644 index 8e6817c96a..0000000000 --- a/layer/layer_test.go +++ /dev/null @@ -1,768 +0,0 @@ -package layer - -import ( - "bytes" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - "testing" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/daemon/graphdriver/vfs" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/stringid" -) - -func init() { - graphdriver.ApplyUncompressedLayer = archive.UnpackLayer - vfs.CopyWithTar = archive.CopyWithTar -} - -func newVFSGraphDriver(td string) (graphdriver.Driver, error) { - uidMap := []idtools.IDMap{ - { - ContainerID: 0, - HostID: os.Getuid(), - Size: 1, - }, - } - gidMap := []idtools.IDMap{ - { - ContainerID: 0, - HostID: os.Getgid(), - Size: 1, - }, - } - - return graphdriver.GetDriver("vfs", td, nil, uidMap, gidMap) -} - -func newTestGraphDriver(t *testing.T) (graphdriver.Driver, func()) { - td, err := ioutil.TempDir("", "graph-") - if err != nil { - t.Fatal(err) - } - - driver, err := newVFSGraphDriver(td) - if err != nil { - t.Fatal(err) - } - - return driver, func() { - os.RemoveAll(td) - } -} - -func newTestStore(t *testing.T) (Store, string, func()) { - td, err := ioutil.TempDir("", "layerstore-") - if err != nil { - t.Fatal(err) - } - - graph, graphcleanup := newTestGraphDriver(t) - fms, err := NewFSMetadataStore(td) - if err != nil { - t.Fatal(err) - } - ls, err := NewStoreFromGraphDriver(fms, graph) - if err != nil { - t.Fatal(err) - } - - return ls, td, func() { - graphcleanup() - os.RemoveAll(td) - } -} - -type layerInit func(root string) error - -func createLayer(ls Store, parent ChainID, layerFunc layerInit) (Layer, error) { - containerID := stringid.GenerateRandomID() - mount, err := ls.CreateRWLayer(containerID, parent, "", nil, nil) - if err != nil { - return nil, err - } - - path, err := mount.Mount("") - if err != nil { - return nil, err - } - - if err := layerFunc(path); err != nil { - return nil, err - } - - ts, err := mount.TarStream() - if err != nil { - return nil, err - } - defer ts.Close() - - layer, err := ls.Register(ts, parent) - if err != nil { - return nil, err - } - - if err := mount.Unmount(); err != nil { - return nil, err - } - - if _, err := ls.ReleaseRWLayer(mount); err != nil { - return nil, err - } - - return layer, nil -} - -type FileApplier interface { - ApplyFile(root string) error -} - -type testFile struct { - name string - content []byte - permission os.FileMode -} - -func newTestFile(name string, content []byte, perm os.FileMode) FileApplier { - return &testFile{ - name: name, - content: content, - permission: perm, - } -} - -func (tf *testFile) ApplyFile(root string) error { - fullPath := filepath.Join(root, tf.name) - if err := os.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { - return err - } - // Check if already exists - if stat, err := os.Stat(fullPath); err == nil && stat.Mode().Perm() != tf.permission { - if err := os.Chmod(fullPath, tf.permission); err != nil { - return err - } - } - if err := ioutil.WriteFile(fullPath, tf.content, tf.permission); err != nil { - return err - } - return nil -} - -func initWithFiles(files ...FileApplier) layerInit { - return func(root string) error { - for _, f := range files { - if err := f.ApplyFile(root); err != nil { - return err - } - } - return nil - } -} - -func getCachedLayer(l Layer) *roLayer { - if rl, ok := l.(*referencedCacheLayer); ok { - return rl.roLayer - } - return l.(*roLayer) -} - -func getMountLayer(l RWLayer) *mountedLayer { - return l.(*referencedRWLayer).mountedLayer -} - -func createMetadata(layers ...Layer) []Metadata { - metadata := make([]Metadata, len(layers)) - for i := range layers { - size, err := layers[i].Size() - if err != nil { - panic(err) - } - - metadata[i].ChainID = layers[i].ChainID() - metadata[i].DiffID = layers[i].DiffID() - metadata[i].Size = size - metadata[i].DiffSize = getCachedLayer(layers[i]).size - } - - return metadata -} - -func assertMetadata(t *testing.T, metadata, expectedMetadata []Metadata) { - if len(metadata) != len(expectedMetadata) { - t.Fatalf("Unexpected number of deletes %d, expected %d", len(metadata), len(expectedMetadata)) - } - - for i := range metadata { - if metadata[i] != expectedMetadata[i] { - t.Errorf("Unexpected metadata\n\tExpected: %#v\n\tActual: %#v", expectedMetadata[i], metadata[i]) - } - } - if t.Failed() { - t.FailNow() - } -} - -func releaseAndCheckDeleted(t *testing.T, ls Store, layer Layer, removed ...Layer) { - layerCount := len(ls.(*layerStore).layerMap) - expectedMetadata := createMetadata(removed...) - metadata, err := ls.Release(layer) - if err != nil { - t.Fatal(err) - } - - assertMetadata(t, metadata, expectedMetadata) - - if expected := layerCount - len(removed); len(ls.(*layerStore).layerMap) != expected { - t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) - } -} - -func cacheID(l Layer) string { - return getCachedLayer(l).cacheID -} - -func assertLayerEqual(t *testing.T, l1, l2 Layer) { - if l1.ChainID() != l2.ChainID() { - t.Fatalf("Mismatched ID: %s vs %s", l1.ChainID(), l2.ChainID()) - } - if l1.DiffID() != l2.DiffID() { - t.Fatalf("Mismatched DiffID: %s vs %s", l1.DiffID(), l2.DiffID()) - } - - size1, err := l1.Size() - if err != nil { - t.Fatal(err) - } - - size2, err := l2.Size() - if err != nil { - t.Fatal(err) - } - - if size1 != size2 { - t.Fatalf("Mismatched size: %d vs %d", size1, size2) - } - - if cacheID(l1) != cacheID(l2) { - t.Fatalf("Mismatched cache id: %s vs %s", cacheID(l1), cacheID(l2)) - } - - p1 := l1.Parent() - p2 := l2.Parent() - if p1 != nil && p2 != nil { - assertLayerEqual(t, p1, p2) - } else if p1 != nil || p2 != nil { - t.Fatalf("Mismatched parents: %v vs %v", p1, p2) - } -} - -func TestMountAndRegister(t *testing.T) { - ls, _, cleanup := newTestStore(t) - defer cleanup() - - li := initWithFiles(newTestFile("testfile.txt", []byte("some test data"), 0644)) - layer, err := createLayer(ls, "", li) - if err != nil { - t.Fatal(err) - } - - size, _ := layer.Size() - t.Logf("Layer size: %d", size) - - mount2, err := ls.CreateRWLayer("new-test-mount", layer.ChainID(), "", nil, nil) - if err != nil { - t.Fatal(err) - } - - path2, err := mount2.Mount("") - if err != nil { - t.Fatal(err) - } - - b, err := ioutil.ReadFile(filepath.Join(path2, "testfile.txt")) - if err != nil { - t.Fatal(err) - } - - if expected := "some test data"; string(b) != expected { - t.Fatalf("Wrong file data, expected %q, got %q", expected, string(b)) - } - - if err := mount2.Unmount(); err != nil { - t.Fatal(err) - } - - if _, err := ls.ReleaseRWLayer(mount2); err != nil { - t.Fatal(err) - } -} - -func TestLayerRelease(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - ls, _, cleanup := newTestStore(t) - defer cleanup() - - layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) - if err != nil { - t.Fatal(err) - } - - layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) - if err != nil { - t.Fatal(err) - } - - if _, err := ls.Release(layer1); err != nil { - t.Fatal(err) - } - - layer3a, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3a file"), 0644))) - if err != nil { - t.Fatal(err) - } - - layer3b, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3b file"), 0644))) - if err != nil { - t.Fatal(err) - } - - if _, err := ls.Release(layer2); err != nil { - t.Fatal(err) - } - - t.Logf("Layer1: %s", layer1.ChainID()) - t.Logf("Layer2: %s", layer2.ChainID()) - t.Logf("Layer3a: %s", layer3a.ChainID()) - t.Logf("Layer3b: %s", layer3b.ChainID()) - - if expected := 4; len(ls.(*layerStore).layerMap) != expected { - t.Fatalf("Unexpected number of layers %d, expected %d", len(ls.(*layerStore).layerMap), expected) - } - - releaseAndCheckDeleted(t, ls, layer3b, layer3b) - releaseAndCheckDeleted(t, ls, layer3a, layer3a, layer2, layer1) -} - -func TestStoreRestore(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - ls, _, cleanup := newTestStore(t) - defer cleanup() - - layer1, err := createLayer(ls, "", initWithFiles(newTestFile("layer1.txt", []byte("layer 1 file"), 0644))) - if err != nil { - t.Fatal(err) - } - - layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("layer2.txt", []byte("layer 2 file"), 0644))) - if err != nil { - t.Fatal(err) - } - - if _, err := ls.Release(layer1); err != nil { - t.Fatal(err) - } - - layer3, err := createLayer(ls, layer2.ChainID(), initWithFiles(newTestFile("layer3.txt", []byte("layer 3 file"), 0644))) - if err != nil { - t.Fatal(err) - } - - if _, err := ls.Release(layer2); err != nil { - t.Fatal(err) - } - - m, err := ls.CreateRWLayer("some-mount_name", layer3.ChainID(), "", nil, nil) - if err != nil { - t.Fatal(err) - } - - path, err := m.Mount("") - if err != nil { - t.Fatal(err) - } - - if err := ioutil.WriteFile(filepath.Join(path, "testfile.txt"), []byte("nothing here"), 0644); err != nil { - t.Fatal(err) - } - - if err := m.Unmount(); err != nil { - t.Fatal(err) - } - - ls2, err := NewStoreFromGraphDriver(ls.(*layerStore).store, ls.(*layerStore).driver) - if err != nil { - t.Fatal(err) - } - - layer3b, err := ls2.Get(layer3.ChainID()) - if err != nil { - t.Fatal(err) - } - - assertLayerEqual(t, layer3b, layer3) - - // Create again with same name, should return error - if _, err := ls2.CreateRWLayer("some-mount_name", layer3b.ChainID(), "", nil, nil); err == nil { - t.Fatal("Expected error creating mount with same name") - } else if err != ErrMountNameConflict { - t.Fatal(err) - } - - m2, err := ls2.GetRWLayer("some-mount_name") - if err != nil { - t.Fatal(err) - } - - if mountPath, err := m2.Mount(""); err != nil { - t.Fatal(err) - } else if path != mountPath { - t.Fatalf("Unexpected path %s, expected %s", mountPath, path) - } - - if mountPath, err := m2.Mount(""); err != nil { - t.Fatal(err) - } else if path != mountPath { - t.Fatalf("Unexpected path %s, expected %s", mountPath, path) - } - if err := m2.Unmount(); err != nil { - t.Fatal(err) - } - - b, err := ioutil.ReadFile(filepath.Join(path, "testfile.txt")) - if err != nil { - t.Fatal(err) - } - if expected := "nothing here"; string(b) != expected { - t.Fatalf("Unexpected content %q, expected %q", string(b), expected) - } - - if err := m2.Unmount(); err != nil { - t.Fatal(err) - } - - if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { - t.Fatal(err) - } else if len(metadata) != 0 { - t.Fatalf("Unexpectedly deleted layers: %#v", metadata) - } - - if metadata, err := ls2.ReleaseRWLayer(m2); err != nil { - t.Fatal(err) - } else if len(metadata) != 0 { - t.Fatalf("Unexpectedly deleted layers: %#v", metadata) - } - - releaseAndCheckDeleted(t, ls2, layer3b, layer3, layer2, layer1) -} - -func TestTarStreamStability(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - ls, _, cleanup := newTestStore(t) - defer cleanup() - - files1 := []FileApplier{ - newTestFile("/etc/hosts", []byte("mydomain 10.0.0.1"), 0644), - newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0644), - } - addedFile := newTestFile("/etc/shadow", []byte("root:::::::"), 0644) - files2 := []FileApplier{ - newTestFile("/etc/hosts", []byte("mydomain 10.0.0.2"), 0644), - newTestFile("/etc/profile", []byte("PATH=/usr/bin"), 0664), - newTestFile("/root/.bashrc", []byte("PATH=/usr/sbin:/usr/bin"), 0644), - } - - tar1, err := tarFromFiles(files1...) - if err != nil { - t.Fatal(err) - } - - tar2, err := tarFromFiles(files2...) - if err != nil { - t.Fatal(err) - } - - layer1, err := ls.Register(bytes.NewReader(tar1), "") - if err != nil { - t.Fatal(err) - } - - // hack layer to add file - p, err := ls.(*layerStore).driver.Get(layer1.(*referencedCacheLayer).cacheID, "") - if err != nil { - t.Fatal(err) - } - - if err := addedFile.ApplyFile(p); err != nil { - t.Fatal(err) - } - - if err := ls.(*layerStore).driver.Put(layer1.(*referencedCacheLayer).cacheID); err != nil { - t.Fatal(err) - } - - layer2, err := ls.Register(bytes.NewReader(tar2), layer1.ChainID()) - if err != nil { - t.Fatal(err) - } - - id1 := layer1.ChainID() - t.Logf("Layer 1: %s", layer1.ChainID()) - t.Logf("Layer 2: %s", layer2.ChainID()) - - if _, err := ls.Release(layer1); err != nil { - t.Fatal(err) - } - - assertLayerDiff(t, tar2, layer2) - - layer1b, err := ls.Get(id1) - if err != nil { - t.Logf("Content of layer map: %#v", ls.(*layerStore).layerMap) - t.Fatal(err) - } - - if _, err := ls.Release(layer2); err != nil { - t.Fatal(err) - } - - assertLayerDiff(t, tar1, layer1b) - - if _, err := ls.Release(layer1b); err != nil { - t.Fatal(err) - } -} - -func assertLayerDiff(t *testing.T, expected []byte, layer Layer) { - expectedDigest := digest.FromBytes(expected) - - if digest.Digest(layer.DiffID()) != expectedDigest { - t.Fatalf("Mismatched diff id for %s, got %s, expected %s", layer.ChainID(), layer.DiffID(), expected) - } - - ts, err := layer.TarStream() - if err != nil { - t.Fatal(err) - } - defer ts.Close() - - actual, err := ioutil.ReadAll(ts) - if err != nil { - t.Fatal(err) - } - - if len(actual) != len(expected) { - logByteDiff(t, actual, expected) - t.Fatalf("Mismatched tar stream size for %s, got %d, expected %d", layer.ChainID(), len(actual), len(expected)) - } - - actualDigest := digest.FromBytes(actual) - - if actualDigest != expectedDigest { - logByteDiff(t, actual, expected) - t.Fatalf("Wrong digest of tar stream, got %s, expected %s", actualDigest, expectedDigest) - } -} - -const maxByteLog = 4 * 1024 - -func logByteDiff(t *testing.T, actual, expected []byte) { - d1, d2 := byteDiff(actual, expected) - if len(d1) == 0 && len(d2) == 0 { - return - } - - prefix := len(actual) - len(d1) - if len(d1) > maxByteLog || len(d2) > maxByteLog { - t.Logf("Byte diff after %d matching bytes", prefix) - } else { - t.Logf("Byte diff after %d matching bytes\nActual bytes after prefix:\n%x\nExpected bytes after prefix:\n%x", prefix, d1, d2) - } -} - -// byteDiff returns the differing bytes after the matching prefix -func byteDiff(b1, b2 []byte) ([]byte, []byte) { - i := 0 - for i < len(b1) && i < len(b2) { - if b1[i] != b2[i] { - break - } - i++ - } - - return b1[i:], b2[i:] -} - -func tarFromFiles(files ...FileApplier) ([]byte, error) { - td, err := ioutil.TempDir("", "tar-") - if err != nil { - return nil, err - } - defer os.RemoveAll(td) - - for _, f := range files { - if err := f.ApplyFile(td); err != nil { - return nil, err - } - } - - r, err := archive.Tar(td, archive.Uncompressed) - if err != nil { - return nil, err - } - - buf := bytes.NewBuffer(nil) - if _, err := io.Copy(buf, r); err != nil { - return nil, err - } - - return buf.Bytes(), nil -} - -// assertReferences asserts that all the references are to the same -// image and represent the full set of references to that image. -func assertReferences(t *testing.T, references ...Layer) { - if len(references) == 0 { - return - } - base := references[0].(*referencedCacheLayer).roLayer - seenReferences := map[Layer]struct{}{ - references[0]: {}, - } - for i := 1; i < len(references); i++ { - other := references[i].(*referencedCacheLayer).roLayer - if base != other { - t.Fatalf("Unexpected referenced cache layer %s, expecting %s", other.ChainID(), base.ChainID()) - } - if _, ok := base.references[references[i]]; !ok { - t.Fatalf("Reference not part of reference list: %v", references[i]) - } - if _, ok := seenReferences[references[i]]; ok { - t.Fatalf("Duplicated reference %v", references[i]) - } - } - if rc := len(base.references); rc != len(references) { - t.Fatalf("Unexpected number of references %d, expecting %d", rc, len(references)) - } -} - -func TestRegisterExistingLayer(t *testing.T) { - ls, _, cleanup := newTestStore(t) - defer cleanup() - - baseFiles := []FileApplier{ - newTestFile("/etc/profile", []byte("# Base configuration"), 0644), - } - - layerFiles := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Root configuration"), 0644), - } - - li := initWithFiles(baseFiles...) - layer1, err := createLayer(ls, "", li) - if err != nil { - t.Fatal(err) - } - - tar1, err := tarFromFiles(layerFiles...) - if err != nil { - t.Fatal(err) - } - - layer2a, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) - if err != nil { - t.Fatal(err) - } - - layer2b, err := ls.Register(bytes.NewReader(tar1), layer1.ChainID()) - if err != nil { - t.Fatal(err) - } - - assertReferences(t, layer2a, layer2b) -} - -func TestTarStreamVerification(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - ls, tmpdir, cleanup := newTestStore(t) - defer cleanup() - - files1 := []FileApplier{ - newTestFile("/foo", []byte("abc"), 0644), - newTestFile("/bar", []byte("def"), 0644), - } - files2 := []FileApplier{ - newTestFile("/foo", []byte("abc"), 0644), - newTestFile("/bar", []byte("def"), 0600), // different perm - } - - tar1, err := tarFromFiles(files1...) - if err != nil { - t.Fatal(err) - } - - tar2, err := tarFromFiles(files2...) - if err != nil { - t.Fatal(err) - } - - layer1, err := ls.Register(bytes.NewReader(tar1), "") - if err != nil { - t.Fatal(err) - } - - layer2, err := ls.Register(bytes.NewReader(tar2), "") - if err != nil { - t.Fatal(err) - } - id1 := digest.Digest(layer1.ChainID()) - id2 := digest.Digest(layer2.ChainID()) - - // Replace tar data files - src, err := os.Open(filepath.Join(tmpdir, id1.Algorithm().String(), id1.Hex(), "tar-split.json.gz")) - if err != nil { - t.Fatal(err) - } - - dst, err := os.Create(filepath.Join(tmpdir, id2.Algorithm().String(), id2.Hex(), "tar-split.json.gz")) - if err != nil { - t.Fatal(err) - } - - if _, err := io.Copy(dst, src); err != nil { - t.Fatal(err) - } - - src.Close() - dst.Close() - - ts, err := layer2.TarStream() - if err != nil { - t.Fatal(err) - } - _, err = io.Copy(ioutil.Discard, ts) - if err == nil { - t.Fatal("expected data verification to fail") - } - if !strings.Contains(err.Error(), "could not verify layer data") { - t.Fatalf("wrong error returned from tarstream: %q", err) - } -} diff --git a/layer/layer_unix.go b/layer/layer_unix.go deleted file mode 100644 index 776b78ac02..0000000000 --- a/layer/layer_unix.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build linux freebsd darwin openbsd solaris - -package layer - -import "github.com/docker/docker/pkg/stringid" - -func (ls *layerStore) mountID(name string) string { - return stringid.GenerateRandomID() -} diff --git a/layer/layer_unix_test.go b/layer/layer_unix_test.go deleted file mode 100644 index 9aa1afd597..0000000000 --- a/layer/layer_unix_test.go +++ /dev/null @@ -1,71 +0,0 @@ -// +build !windows - -package layer - -import "testing" - -func graphDiffSize(ls Store, l Layer) (int64, error) { - cl := getCachedLayer(l) - var parent string - if cl.parent != nil { - parent = cl.parent.cacheID - } - return ls.(*layerStore).driver.DiffSize(cl.cacheID, parent) -} - -// Unix as Windows graph driver does not support Changes which is indirectly -// invoked by calling DiffSize on the driver -func TestLayerSize(t *testing.T) { - ls, _, cleanup := newTestStore(t) - defer cleanup() - - content1 := []byte("Base contents") - content2 := []byte("Added contents") - - layer1, err := createLayer(ls, "", initWithFiles(newTestFile("file1", content1, 0644))) - if err != nil { - t.Fatal(err) - } - - layer2, err := createLayer(ls, layer1.ChainID(), initWithFiles(newTestFile("file2", content2, 0644))) - if err != nil { - t.Fatal(err) - } - - layer1DiffSize, err := graphDiffSize(ls, layer1) - if err != nil { - t.Fatal(err) - } - - if int(layer1DiffSize) != len(content1) { - t.Fatalf("Unexpected diff size %d, expected %d", layer1DiffSize, len(content1)) - } - - layer1Size, err := layer1.Size() - if err != nil { - t.Fatal(err) - } - - if expected := len(content1); int(layer1Size) != expected { - t.Fatalf("Unexpected size %d, expected %d", layer1Size, expected) - } - - layer2DiffSize, err := graphDiffSize(ls, layer2) - if err != nil { - t.Fatal(err) - } - - if int(layer2DiffSize) != len(content2) { - t.Fatalf("Unexpected diff size %d, expected %d", layer2DiffSize, len(content2)) - } - - layer2Size, err := layer2.Size() - if err != nil { - t.Fatal(err) - } - - if expected := len(content1) + len(content2); int(layer2Size) != expected { - t.Fatalf("Unexpected size %d, expected %d", layer2Size, expected) - } - -} diff --git a/layer/layer_windows.go b/layer/layer_windows.go deleted file mode 100644 index e20311a091..0000000000 --- a/layer/layer_windows.go +++ /dev/null @@ -1,98 +0,0 @@ -package layer - -import ( - "errors" - "fmt" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/daemon/graphdriver" -) - -// GetLayerPath returns the path to a layer -func GetLayerPath(s Store, layer ChainID) (string, error) { - ls, ok := s.(*layerStore) - if !ok { - return "", errors.New("unsupported layer store") - } - ls.layerL.Lock() - defer ls.layerL.Unlock() - - rl, ok := ls.layerMap[layer] - if !ok { - return "", ErrLayerDoesNotExist - } - - path, err := ls.driver.Get(rl.cacheID, "") - if err != nil { - return "", err - } - - if err := ls.driver.Put(rl.cacheID); err != nil { - return "", err - } - - return path, nil -} - -func (ls *layerStore) RegisterDiffID(graphID string, size int64) (Layer, error) { - var err error // this is used for cleanup in existingLayer case - diffID := digest.FromBytes([]byte(graphID)) - - // Create new roLayer - layer := &roLayer{ - cacheID: graphID, - diffID: DiffID(diffID), - referenceCount: 1, - layerStore: ls, - references: map[Layer]struct{}{}, - size: size, - } - - tx, err := ls.store.StartTransaction() - if err != nil { - return nil, err - } - defer func() { - if err != nil { - if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) - } - } - }() - - layer.chainID = createChainIDFromParent("", layer.diffID) - - if !ls.driver.Exists(layer.cacheID) { - return nil, fmt.Errorf("layer %q is unknown to driver", layer.cacheID) - } - if err = storeLayer(tx, layer); err != nil { - return nil, err - } - - ls.layerL.Lock() - defer ls.layerL.Unlock() - - if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { - // Set error for cleanup, but do not return - err = errors.New("layer already exists") - return existingLayer.getReference(), nil - } - - if err = tx.Commit(layer.chainID); err != nil { - return nil, err - } - - ls.layerMap[layer.chainID] = layer - - return layer.getReference(), nil -} - -func (ls *layerStore) mountID(name string) string { - // windows has issues if container ID doesn't match mount ID - return name -} - -func (ls *layerStore) GraphDriver() graphdriver.Driver { - return ls.driver -} diff --git a/layer/migration.go b/layer/migration.go deleted file mode 100644 index b45c31099d..0000000000 --- a/layer/migration.go +++ /dev/null @@ -1,256 +0,0 @@ -package layer - -import ( - "compress/gzip" - "errors" - "fmt" - "io" - "os" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -// CreateRWLayerByGraphID creates a RWLayer in the layer store using -// the provided name with the given graphID. To get the RWLayer -// after migration the layer may be retrieved by the given name. -func (ls *layerStore) CreateRWLayerByGraphID(name string, graphID string, parent ChainID) (err error) { - ls.mountL.Lock() - defer ls.mountL.Unlock() - m, ok := ls.mounts[name] - if ok { - if m.parent.chainID != parent { - return errors.New("name conflict, mismatched parent") - } - if m.mountID != graphID { - return errors.New("mount already exists") - } - - return nil - } - - if !ls.driver.Exists(graphID) { - return fmt.Errorf("graph ID does not exist: %q", graphID) - } - - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return ErrLayerDoesNotExist - } - - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - } - - // TODO: Ensure graphID has correct parent - - m = &mountedLayer{ - name: name, - parent: p, - mountID: graphID, - layerStore: ls, - references: map[RWLayer]*referencedRWLayer{}, - } - - // Check for existing init layer - initID := fmt.Sprintf("%s-init", graphID) - if ls.driver.Exists(initID) { - m.initID = initID - } - - if err = ls.saveMount(m); err != nil { - return err - } - - return nil -} - -func (ls *layerStore) ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID DiffID, size int64, err error) { - defer func() { - if err != nil { - logrus.Debugf("could not get checksum for %q with tar-split: %q", id, err) - diffID, size, err = ls.checksumForGraphIDNoTarsplit(id, parent, newTarDataPath) - } - }() - - if oldTarDataPath == "" { - err = errors.New("no tar-split file") - return - } - - tarDataFile, err := os.Open(oldTarDataPath) - if err != nil { - return - } - defer tarDataFile.Close() - uncompressed, err := gzip.NewReader(tarDataFile) - if err != nil { - return - } - - dgst := digest.Canonical.New() - err = ls.assembleTarTo(id, uncompressed, &size, dgst.Hash()) - if err != nil { - return - } - - diffID = DiffID(dgst.Digest()) - err = os.RemoveAll(newTarDataPath) - if err != nil { - return - } - err = os.Link(oldTarDataPath, newTarDataPath) - - return -} - -func (ls *layerStore) checksumForGraphIDNoTarsplit(id, parent, newTarDataPath string) (diffID DiffID, size int64, err error) { - rawarchive, err := ls.driver.Diff(id, parent) - if err != nil { - return - } - defer rawarchive.Close() - - f, err := os.Create(newTarDataPath) - if err != nil { - return - } - defer f.Close() - mfz := gzip.NewWriter(f) - defer mfz.Close() - metaPacker := storage.NewJSONPacker(mfz) - - packerCounter := &packSizeCounter{metaPacker, &size} - - archive, err := asm.NewInputTarStream(rawarchive, packerCounter, nil) - if err != nil { - return - } - dgst, err := digest.FromReader(archive) - if err != nil { - return - } - diffID = DiffID(dgst) - return -} - -func (ls *layerStore) RegisterByGraphID(graphID string, parent ChainID, diffID DiffID, tarDataFile string, size int64) (Layer, error) { - // err is used to hold the error which will always trigger - // cleanup of creates sources but may not be an error returned - // to the caller (already exists). - var err error - var p *roLayer - if string(parent) != "" { - p = ls.get(parent) - if p == nil { - return nil, ErrLayerDoesNotExist - } - - // Release parent chain if error - defer func() { - if err != nil { - ls.layerL.Lock() - ls.releaseLayer(p) - ls.layerL.Unlock() - } - }() - } - - // Create new roLayer - layer := &roLayer{ - parent: p, - cacheID: graphID, - referenceCount: 1, - layerStore: ls, - references: map[Layer]struct{}{}, - diffID: diffID, - size: size, - chainID: createChainIDFromParent(parent, diffID), - } - - ls.layerL.Lock() - defer ls.layerL.Unlock() - - if existingLayer := ls.getWithoutLock(layer.chainID); existingLayer != nil { - // Set error for cleanup, but do not return - err = errors.New("layer already exists") - return existingLayer.getReference(), nil - } - - tx, err := ls.store.StartTransaction() - if err != nil { - return nil, err - } - - defer func() { - if err != nil { - logrus.Debugf("Cleaning up transaction after failed migration for %s: %v", graphID, err) - if err := tx.Cancel(); err != nil { - logrus.Errorf("Error canceling metadata transaction %q: %s", tx.String(), err) - } - } - }() - - tsw, err := tx.TarSplitWriter(false) - if err != nil { - return nil, err - } - defer tsw.Close() - tdf, err := os.Open(tarDataFile) - if err != nil { - return nil, err - } - defer tdf.Close() - _, err = io.Copy(tsw, tdf) - if err != nil { - return nil, err - } - - if err = storeLayer(tx, layer); err != nil { - return nil, err - } - - if err = tx.Commit(layer.chainID); err != nil { - return nil, err - } - - ls.layerMap[layer.chainID] = layer - - return layer.getReference(), nil -} - -type unpackSizeCounter struct { - unpacker storage.Unpacker - size *int64 -} - -func (u *unpackSizeCounter) Next() (*storage.Entry, error) { - e, err := u.unpacker.Next() - if err == nil && u.size != nil { - *u.size += e.Size - } - return e, err -} - -type packSizeCounter struct { - packer storage.Packer - size *int64 -} - -func (p *packSizeCounter) AddEntry(e storage.Entry) (int, error) { - n, err := p.packer.AddEntry(e) - if err == nil && p.size != nil { - *p.size += e.Size - } - return n, err -} diff --git a/layer/migration_test.go b/layer/migration_test.go deleted file mode 100644 index 50ea6407bb..0000000000 --- a/layer/migration_test.go +++ /dev/null @@ -1,435 +0,0 @@ -package layer - -import ( - "bytes" - "compress/gzip" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "testing" - - "github.com/docker/docker/daemon/graphdriver" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/stringid" - "github.com/vbatts/tar-split/tar/asm" - "github.com/vbatts/tar-split/tar/storage" -) - -func writeTarSplitFile(name string, tarContent []byte) error { - f, err := os.OpenFile(name, os.O_TRUNC|os.O_CREATE|os.O_WRONLY, 0644) - if err != nil { - return err - } - defer f.Close() - - fz := gzip.NewWriter(f) - - metaPacker := storage.NewJSONPacker(fz) - defer fz.Close() - - rdr, err := asm.NewInputTarStream(bytes.NewReader(tarContent), metaPacker, nil) - if err != nil { - return err - } - - if _, err := io.Copy(ioutil.Discard, rdr); err != nil { - return err - } - - return nil -} - -func TestLayerMigration(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - td, err := ioutil.TempDir("", "migration-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(td) - - layer1Files := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), - newTestFile("/etc/profile", []byte("# Base configuration"), 0644), - } - - layer2Files := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), - } - - tar1, err := tarFromFiles(layer1Files...) - if err != nil { - t.Fatal(err) - } - - tar2, err := tarFromFiles(layer2Files...) - if err != nil { - t.Fatal(err) - } - - graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) - if err != nil { - t.Fatal(err) - } - - graphID1 := stringid.GenerateRandomID() - if err := graph.Create(graphID1, "", "", nil); err != nil { - t.Fatal(err) - } - if _, err := graph.ApplyDiff(graphID1, "", archive.Reader(bytes.NewReader(tar1))); err != nil { - t.Fatal(err) - } - - tf1 := filepath.Join(td, "tar1.json.gz") - if err := writeTarSplitFile(tf1, tar1); err != nil { - t.Fatal(err) - } - - fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) - if err != nil { - t.Fatal(err) - } - ls, err := NewStoreFromGraphDriver(fms, graph) - if err != nil { - t.Fatal(err) - } - - newTarDataPath := filepath.Join(td, ".migration-tardata") - diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", tf1, newTarDataPath) - if err != nil { - t.Fatal(err) - } - - layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) - if err != nil { - t.Fatal(err) - } - - layer1b, err := ls.Register(bytes.NewReader(tar1), "") - if err != nil { - t.Fatal(err) - } - - assertReferences(t, layer1a, layer1b) - // Attempt register, should be same - layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) - if err != nil { - t.Fatal(err) - } - - graphID2 := stringid.GenerateRandomID() - if err := graph.Create(graphID2, graphID1, "", nil); err != nil { - t.Fatal(err) - } - if _, err := graph.ApplyDiff(graphID2, graphID1, archive.Reader(bytes.NewReader(tar2))); err != nil { - t.Fatal(err) - } - - tf2 := filepath.Join(td, "tar2.json.gz") - if err := writeTarSplitFile(tf2, tar2); err != nil { - t.Fatal(err) - } - diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, tf2, newTarDataPath) - if err != nil { - t.Fatal(err) - } - - layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, tf2, size) - if err != nil { - t.Fatal(err) - } - assertReferences(t, layer2a, layer2b) - - if metadata, err := ls.Release(layer2a); err != nil { - t.Fatal(err) - } else if len(metadata) > 0 { - t.Fatalf("Unexpected layer removal after first release: %#v", metadata) - } - - metadata, err := ls.Release(layer2b) - if err != nil { - t.Fatal(err) - } - - assertMetadata(t, metadata, createMetadata(layer2a)) -} - -func tarFromFilesInGraph(graph graphdriver.Driver, graphID, parentID string, files ...FileApplier) ([]byte, error) { - t, err := tarFromFiles(files...) - if err != nil { - return nil, err - } - - if err := graph.Create(graphID, parentID, "", nil); err != nil { - return nil, err - } - if _, err := graph.ApplyDiff(graphID, parentID, archive.Reader(bytes.NewReader(t))); err != nil { - return nil, err - } - - ar, err := graph.Diff(graphID, parentID) - if err != nil { - return nil, err - } - defer ar.Close() - - return ioutil.ReadAll(ar) -} - -func TestLayerMigrationNoTarsplit(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - td, err := ioutil.TempDir("", "migration-test-") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(td) - - layer1Files := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), - newTestFile("/etc/profile", []byte("# Base configuration"), 0644), - } - - layer2Files := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), - } - - graph, err := newVFSGraphDriver(filepath.Join(td, "graphdriver-")) - if err != nil { - t.Fatal(err) - } - graphID1 := stringid.GenerateRandomID() - graphID2 := stringid.GenerateRandomID() - - tar1, err := tarFromFilesInGraph(graph, graphID1, "", layer1Files...) - if err != nil { - t.Fatal(err) - } - - tar2, err := tarFromFilesInGraph(graph, graphID2, graphID1, layer2Files...) - if err != nil { - t.Fatal(err) - } - - fms, err := NewFSMetadataStore(filepath.Join(td, "layers")) - if err != nil { - t.Fatal(err) - } - ls, err := NewStoreFromGraphDriver(fms, graph) - if err != nil { - t.Fatal(err) - } - - newTarDataPath := filepath.Join(td, ".migration-tardata") - diffID, size, err := ls.(*layerStore).ChecksumForGraphID(graphID1, "", "", newTarDataPath) - if err != nil { - t.Fatal(err) - } - - layer1a, err := ls.(*layerStore).RegisterByGraphID(graphID1, "", diffID, newTarDataPath, size) - if err != nil { - t.Fatal(err) - } - - layer1b, err := ls.Register(bytes.NewReader(tar1), "") - if err != nil { - t.Fatal(err) - } - - assertReferences(t, layer1a, layer1b) - - // Attempt register, should be same - layer2a, err := ls.Register(bytes.NewReader(tar2), layer1a.ChainID()) - if err != nil { - t.Fatal(err) - } - - diffID, size, err = ls.(*layerStore).ChecksumForGraphID(graphID2, graphID1, "", newTarDataPath) - if err != nil { - t.Fatal(err) - } - - layer2b, err := ls.(*layerStore).RegisterByGraphID(graphID2, layer1a.ChainID(), diffID, newTarDataPath, size) - if err != nil { - t.Fatal(err) - } - assertReferences(t, layer2a, layer2b) - - if metadata, err := ls.Release(layer2a); err != nil { - t.Fatal(err) - } else if len(metadata) > 0 { - t.Fatalf("Unexpected layer removal after first release: %#v", metadata) - } - - metadata, err := ls.Release(layer2b) - if err != nil { - t.Fatal(err) - } - - assertMetadata(t, metadata, createMetadata(layer2a)) -} - -func TestMountMigration(t *testing.T) { - // TODO Windows: Figure out why this is failing (obvious - paths... needs porting) - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - ls, _, cleanup := newTestStore(t) - defer cleanup() - - baseFiles := []FileApplier{ - newTestFile("/root/.bashrc", []byte("# Boring configuration"), 0644), - newTestFile("/etc/profile", []byte("# Base configuration"), 0644), - } - initFiles := []FileApplier{ - newTestFile("/etc/hosts", []byte{}, 0644), - newTestFile("/etc/resolv.conf", []byte{}, 0644), - } - mountFiles := []FileApplier{ - newTestFile("/etc/hosts", []byte("localhost 127.0.0.1"), 0644), - newTestFile("/root/.bashrc", []byte("# Updated configuration"), 0644), - newTestFile("/root/testfile1.txt", []byte("nothing valuable"), 0644), - } - - initTar, err := tarFromFiles(initFiles...) - if err != nil { - t.Fatal(err) - } - - mountTar, err := tarFromFiles(mountFiles...) - if err != nil { - t.Fatal(err) - } - - graph := ls.(*layerStore).driver - - layer1, err := createLayer(ls, "", initWithFiles(baseFiles...)) - if err != nil { - t.Fatal(err) - } - - graphID1 := layer1.(*referencedCacheLayer).cacheID - - containerID := stringid.GenerateRandomID() - containerInit := fmt.Sprintf("%s-init", containerID) - - if err := graph.Create(containerInit, graphID1, "", nil); err != nil { - t.Fatal(err) - } - if _, err := graph.ApplyDiff(containerInit, graphID1, archive.Reader(bytes.NewReader(initTar))); err != nil { - t.Fatal(err) - } - - if err := graph.Create(containerID, containerInit, "", nil); err != nil { - t.Fatal(err) - } - if _, err := graph.ApplyDiff(containerID, containerInit, archive.Reader(bytes.NewReader(mountTar))); err != nil { - t.Fatal(err) - } - - if err := ls.(*layerStore).CreateRWLayerByGraphID("migration-mount", containerID, layer1.ChainID()); err != nil { - t.Fatal(err) - } - - rwLayer1, err := ls.GetRWLayer("migration-mount") - if err != nil { - t.Fatal(err) - } - - if _, err := rwLayer1.Mount(""); err != nil { - t.Fatal(err) - } - - changes, err := rwLayer1.Changes() - if err != nil { - t.Fatal(err) - } - - if expected := 5; len(changes) != expected { - t.Logf("Changes %#v", changes) - t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) - } - - sortChanges(changes) - - assertChange(t, changes[0], archive.Change{ - Path: "/etc", - Kind: archive.ChangeModify, - }) - assertChange(t, changes[1], archive.Change{ - Path: "/etc/hosts", - Kind: archive.ChangeModify, - }) - assertChange(t, changes[2], archive.Change{ - Path: "/root", - Kind: archive.ChangeModify, - }) - assertChange(t, changes[3], archive.Change{ - Path: "/root/.bashrc", - Kind: archive.ChangeModify, - }) - assertChange(t, changes[4], archive.Change{ - Path: "/root/testfile1.txt", - Kind: archive.ChangeAdd, - }) - - if _, err := ls.CreateRWLayer("migration-mount", layer1.ChainID(), "", nil, nil); err == nil { - t.Fatal("Expected error creating mount with same name") - } else if err != ErrMountNameConflict { - t.Fatal(err) - } - - rwLayer2, err := ls.GetRWLayer("migration-mount") - if err != nil { - t.Fatal(err) - } - - if getMountLayer(rwLayer1) != getMountLayer(rwLayer2) { - t.Fatal("Expected same layer from get with same name as from migrate") - } - - if _, err := rwLayer2.Mount(""); err != nil { - t.Fatal(err) - } - - if _, err := rwLayer2.Mount(""); err != nil { - t.Fatal(err) - } - - if metadata, err := ls.Release(layer1); err != nil { - t.Fatal(err) - } else if len(metadata) > 0 { - t.Fatalf("Expected no layers to be deleted, deleted %#v", metadata) - } - - if err := rwLayer1.Unmount(); err != nil { - t.Fatal(err) - } - - if _, err := ls.ReleaseRWLayer(rwLayer1); err != nil { - t.Fatal(err) - } - - if err := rwLayer2.Unmount(); err != nil { - t.Fatal(err) - } - if err := rwLayer2.Unmount(); err != nil { - t.Fatal(err) - } - metadata, err := ls.ReleaseRWLayer(rwLayer2) - if err != nil { - t.Fatal(err) - } - if len(metadata) == 0 { - t.Fatal("Expected base layer to be deleted when deleting mount") - } - - assertMetadata(t, metadata, createMetadata(layer1)) -} diff --git a/layer/mount_test.go b/layer/mount_test.go deleted file mode 100644 index 7a8637eae9..0000000000 --- a/layer/mount_test.go +++ /dev/null @@ -1,230 +0,0 @@ -package layer - -import ( - "io/ioutil" - "os" - "path/filepath" - "runtime" - "sort" - "testing" - - "github.com/docker/docker/pkg/archive" -) - -func TestMountInit(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - ls, _, cleanup := newTestStore(t) - defer cleanup() - - basefile := newTestFile("testfile.txt", []byte("base data!"), 0644) - initfile := newTestFile("testfile.txt", []byte("init data!"), 0777) - - li := initWithFiles(basefile) - layer, err := createLayer(ls, "", li) - if err != nil { - t.Fatal(err) - } - - mountInit := func(root string) error { - return initfile.ApplyFile(root) - } - - m, err := ls.CreateRWLayer("fun-mount", layer.ChainID(), "", mountInit, nil) - if err != nil { - t.Fatal(err) - } - - path, err := m.Mount("") - if err != nil { - t.Fatal(err) - } - - f, err := os.Open(filepath.Join(path, "testfile.txt")) - if err != nil { - t.Fatal(err) - } - defer f.Close() - - fi, err := f.Stat() - if err != nil { - t.Fatal(err) - } - - b, err := ioutil.ReadAll(f) - if err != nil { - t.Fatal(err) - } - - if expected := "init data!"; string(b) != expected { - t.Fatalf("Unexpected test file contents %q, expected %q", string(b), expected) - } - - if fi.Mode().Perm() != 0777 { - t.Fatalf("Unexpected filemode %o, expecting %o", fi.Mode().Perm(), 0777) - } -} - -func TestMountSize(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - ls, _, cleanup := newTestStore(t) - defer cleanup() - - content1 := []byte("Base contents") - content2 := []byte("Mutable contents") - contentInit := []byte("why am I excluded from the size ☹") - - li := initWithFiles(newTestFile("file1", content1, 0644)) - layer, err := createLayer(ls, "", li) - if err != nil { - t.Fatal(err) - } - - mountInit := func(root string) error { - return newTestFile("file-init", contentInit, 0777).ApplyFile(root) - } - - m, err := ls.CreateRWLayer("mount-size", layer.ChainID(), "", mountInit, nil) - if err != nil { - t.Fatal(err) - } - - path, err := m.Mount("") - if err != nil { - t.Fatal(err) - } - - if err := ioutil.WriteFile(filepath.Join(path, "file2"), content2, 0755); err != nil { - t.Fatal(err) - } - - mountSize, err := m.Size() - if err != nil { - t.Fatal(err) - } - - if expected := len(content2); int(mountSize) != expected { - t.Fatalf("Unexpected mount size %d, expected %d", int(mountSize), expected) - } -} - -func TestMountChanges(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - ls, _, cleanup := newTestStore(t) - defer cleanup() - - basefiles := []FileApplier{ - newTestFile("testfile1.txt", []byte("base data!"), 0644), - newTestFile("testfile2.txt", []byte("base data!"), 0644), - newTestFile("testfile3.txt", []byte("base data!"), 0644), - } - initfile := newTestFile("testfile1.txt", []byte("init data!"), 0777) - - li := initWithFiles(basefiles...) - layer, err := createLayer(ls, "", li) - if err != nil { - t.Fatal(err) - } - - mountInit := func(root string) error { - return initfile.ApplyFile(root) - } - - m, err := ls.CreateRWLayer("mount-changes", layer.ChainID(), "", mountInit, nil) - if err != nil { - t.Fatal(err) - } - - path, err := m.Mount("") - if err != nil { - t.Fatal(err) - } - - if err := os.Chmod(filepath.Join(path, "testfile1.txt"), 0755); err != nil { - t.Fatal(err) - } - - if err := ioutil.WriteFile(filepath.Join(path, "testfile1.txt"), []byte("mount data!"), 0755); err != nil { - t.Fatal(err) - } - - if err := os.Remove(filepath.Join(path, "testfile2.txt")); err != nil { - t.Fatal(err) - } - - if err := os.Chmod(filepath.Join(path, "testfile3.txt"), 0755); err != nil { - t.Fatal(err) - } - - if err := ioutil.WriteFile(filepath.Join(path, "testfile4.txt"), []byte("mount data!"), 0644); err != nil { - t.Fatal(err) - } - - changes, err := m.Changes() - if err != nil { - t.Fatal(err) - } - - if expected := 4; len(changes) != expected { - t.Fatalf("Wrong number of changes %d, expected %d", len(changes), expected) - } - - sortChanges(changes) - - assertChange(t, changes[0], archive.Change{ - Path: "/testfile1.txt", - Kind: archive.ChangeModify, - }) - assertChange(t, changes[1], archive.Change{ - Path: "/testfile2.txt", - Kind: archive.ChangeDelete, - }) - assertChange(t, changes[2], archive.Change{ - Path: "/testfile3.txt", - Kind: archive.ChangeModify, - }) - assertChange(t, changes[3], archive.Change{ - Path: "/testfile4.txt", - Kind: archive.ChangeAdd, - }) -} - -func assertChange(t *testing.T, actual, expected archive.Change) { - if actual.Path != expected.Path { - t.Fatalf("Unexpected change path %s, expected %s", actual.Path, expected.Path) - } - if actual.Kind != expected.Kind { - t.Fatalf("Unexpected change type %s, expected %s", actual.Kind, expected.Kind) - } -} - -func sortChanges(changes []archive.Change) { - cs := &changeSorter{ - changes: changes, - } - sort.Sort(cs) -} - -type changeSorter struct { - changes []archive.Change -} - -func (cs *changeSorter) Len() int { - return len(cs.changes) -} - -func (cs *changeSorter) Swap(i, j int) { - cs.changes[i], cs.changes[j] = cs.changes[j], cs.changes[i] -} - -func (cs *changeSorter) Less(i, j int) bool { - return cs.changes[i].Path < cs.changes[j].Path -} diff --git a/layer/mounted_layer.go b/layer/mounted_layer.go deleted file mode 100644 index add33d9f19..0000000000 --- a/layer/mounted_layer.go +++ /dev/null @@ -1,103 +0,0 @@ -package layer - -import ( - "io" - - "github.com/docker/docker/pkg/archive" -) - -type mountedLayer struct { - name string - mountID string - initID string - parent *roLayer - path string - layerStore *layerStore - - references map[RWLayer]*referencedRWLayer -} - -func (ml *mountedLayer) cacheParent() string { - if ml.initID != "" { - return ml.initID - } - if ml.parent != nil { - return ml.parent.cacheID - } - return "" -} - -func (ml *mountedLayer) TarStream() (io.ReadCloser, error) { - archiver, err := ml.layerStore.driver.Diff(ml.mountID, ml.cacheParent()) - if err != nil { - return nil, err - } - return archiver, nil -} - -func (ml *mountedLayer) Name() string { - return ml.name -} - -func (ml *mountedLayer) Parent() Layer { - if ml.parent != nil { - return ml.parent - } - - // Return a nil interface instead of an interface wrapping a nil - // pointer. - return nil -} - -func (ml *mountedLayer) Size() (int64, error) { - return ml.layerStore.driver.DiffSize(ml.mountID, ml.cacheParent()) -} - -func (ml *mountedLayer) Changes() ([]archive.Change, error) { - return ml.layerStore.driver.Changes(ml.mountID, ml.cacheParent()) -} - -func (ml *mountedLayer) Metadata() (map[string]string, error) { - return ml.layerStore.driver.GetMetadata(ml.mountID) -} - -func (ml *mountedLayer) getReference() RWLayer { - ref := &referencedRWLayer{ - mountedLayer: ml, - } - ml.references[ref] = ref - - return ref -} - -func (ml *mountedLayer) hasReferences() bool { - return len(ml.references) > 0 -} - -func (ml *mountedLayer) deleteReference(ref RWLayer) error { - if _, ok := ml.references[ref]; !ok { - return ErrLayerNotRetained - } - delete(ml.references, ref) - return nil -} - -func (ml *mountedLayer) retakeReference(r RWLayer) { - if ref, ok := r.(*referencedRWLayer); ok { - ml.references[ref] = ref - } -} - -type referencedRWLayer struct { - *mountedLayer -} - -func (rl *referencedRWLayer) Mount(mountLabel string) (string, error) { - return rl.layerStore.driver.Get(rl.mountedLayer.mountID, mountLabel) -} - -// Unmount decrements the activity count and unmounts the underlying layer -// Callers should only call `Unmount` once per call to `Mount`, even on error. -func (rl *referencedRWLayer) Unmount() error { - return rl.layerStore.driver.Put(rl.mountedLayer.mountID) -} diff --git a/layer/ro_layer.go b/layer/ro_layer.go deleted file mode 100644 index 9fb1cafebe..0000000000 --- a/layer/ro_layer.go +++ /dev/null @@ -1,172 +0,0 @@ -package layer - -import ( - "fmt" - "io" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" -) - -type roLayer struct { - chainID ChainID - diffID DiffID - parent *roLayer - cacheID string - size int64 - layerStore *layerStore - descriptor distribution.Descriptor - - referenceCount int - references map[Layer]struct{} -} - -func (rl *roLayer) TarStream() (io.ReadCloser, error) { - r, err := rl.layerStore.store.TarSplitReader(rl.chainID) - if err != nil { - return nil, err - } - - pr, pw := io.Pipe() - go func() { - err := rl.layerStore.assembleTarTo(rl.cacheID, r, nil, pw) - if err != nil { - pw.CloseWithError(err) - } else { - pw.Close() - } - }() - rc, err := newVerifiedReadCloser(pr, digest.Digest(rl.diffID)) - if err != nil { - return nil, err - } - return rc, nil -} - -func (rl *roLayer) ChainID() ChainID { - return rl.chainID -} - -func (rl *roLayer) DiffID() DiffID { - return rl.diffID -} - -func (rl *roLayer) Parent() Layer { - if rl.parent == nil { - return nil - } - return rl.parent -} - -func (rl *roLayer) Size() (size int64, err error) { - if rl.parent != nil { - size, err = rl.parent.Size() - if err != nil { - return - } - } - - return size + rl.size, nil -} - -func (rl *roLayer) DiffSize() (size int64, err error) { - return rl.size, nil -} - -func (rl *roLayer) Metadata() (map[string]string, error) { - return rl.layerStore.driver.GetMetadata(rl.cacheID) -} - -type referencedCacheLayer struct { - *roLayer -} - -func (rl *roLayer) getReference() Layer { - ref := &referencedCacheLayer{ - roLayer: rl, - } - rl.references[ref] = struct{}{} - - return ref -} - -func (rl *roLayer) hasReference(ref Layer) bool { - _, ok := rl.references[ref] - return ok -} - -func (rl *roLayer) hasReferences() bool { - return len(rl.references) > 0 -} - -func (rl *roLayer) deleteReference(ref Layer) { - delete(rl.references, ref) -} - -func (rl *roLayer) depth() int { - if rl.parent == nil { - return 1 - } - return rl.parent.depth() + 1 -} - -func storeLayer(tx MetadataTransaction, layer *roLayer) error { - if err := tx.SetDiffID(layer.diffID); err != nil { - return err - } - if err := tx.SetSize(layer.size); err != nil { - return err - } - if err := tx.SetCacheID(layer.cacheID); err != nil { - return err - } - // Do not store empty descriptors - if layer.descriptor.Digest != "" { - if err := tx.SetDescriptor(layer.descriptor); err != nil { - return err - } - } - if layer.parent != nil { - if err := tx.SetParent(layer.parent.chainID); err != nil { - return err - } - } - - return nil -} - -func newVerifiedReadCloser(rc io.ReadCloser, dgst digest.Digest) (io.ReadCloser, error) { - verifier, err := digest.NewDigestVerifier(dgst) - if err != nil { - return nil, err - } - return &verifiedReadCloser{ - rc: rc, - dgst: dgst, - verifier: verifier, - }, nil -} - -type verifiedReadCloser struct { - rc io.ReadCloser - dgst digest.Digest - verifier digest.Verifier -} - -func (vrc *verifiedReadCloser) Read(p []byte) (n int, err error) { - n, err = vrc.rc.Read(p) - if n > 0 { - if n, err := vrc.verifier.Write(p[:n]); err != nil { - return n, err - } - } - if err == io.EOF { - if !vrc.verifier.Verified() { - err = fmt.Errorf("could not verify layer data for: %s. This may be because internal files in the layer store were modified. Re-pulling or rebuilding this image may resolve the issue", vrc.dgst) - } - } - return -} -func (vrc *verifiedReadCloser) Close() error { - return vrc.rc.Close() -} diff --git a/layer/ro_layer_windows.go b/layer/ro_layer_windows.go deleted file mode 100644 index 32bd7182a3..0000000000 --- a/layer/ro_layer_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package layer - -import "github.com/docker/distribution" - -var _ distribution.Describable = &roLayer{} - -func (rl *roLayer) Descriptor() distribution.Descriptor { - return rl.descriptor -} diff --git a/libcontainerd/client.go b/libcontainerd/client.go deleted file mode 100644 index 7e8e47bcfa..0000000000 --- a/libcontainerd/client.go +++ /dev/null @@ -1,46 +0,0 @@ -package libcontainerd - -import ( - "fmt" - "sync" - - "github.com/docker/docker/pkg/locker" -) - -// clientCommon contains the platform agnostic fields used in the client structure -type clientCommon struct { - backend Backend - containers map[string]*container - locker *locker.Locker - mapMutex sync.RWMutex // protects read/write oprations from containers map -} - -func (clnt *client) lock(containerID string) { - clnt.locker.Lock(containerID) -} - -func (clnt *client) unlock(containerID string) { - clnt.locker.Unlock(containerID) -} - -// must hold a lock for cont.containerID -func (clnt *client) appendContainer(cont *container) { - clnt.mapMutex.Lock() - clnt.containers[cont.containerID] = cont - clnt.mapMutex.Unlock() -} -func (clnt *client) deleteContainer(friendlyName string) { - clnt.mapMutex.Lock() - delete(clnt.containers, friendlyName) - clnt.mapMutex.Unlock() -} - -func (clnt *client) getContainer(containerID string) (*container, error) { - clnt.mapMutex.RLock() - container, ok := clnt.containers[containerID] - defer clnt.mapMutex.RUnlock() - if !ok { - return nil, fmt.Errorf("invalid container: %s", containerID) // fixme: typed error - } - return container, nil -} diff --git a/libcontainerd/client_linux.go b/libcontainerd/client_linux.go deleted file mode 100644 index b9e7415f38..0000000000 --- a/libcontainerd/client_linux.go +++ /dev/null @@ -1,627 +0,0 @@ -package libcontainerd - -import ( - "encoding/json" - "fmt" - "os" - "path/filepath" - "strings" - "sync" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/mount" - "github.com/golang/protobuf/ptypes" - "github.com/golang/protobuf/ptypes/timestamp" - specs "github.com/opencontainers/specs/specs-go" - "golang.org/x/net/context" -) - -type client struct { - clientCommon - - // Platform specific properties below here. - remote *remote - q queue - exitNotifiers map[string]*exitNotifier - liveRestore bool -} - -func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, specp Process) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - container, err := clnt.getContainer(containerID) - if err != nil { - return err - } - - spec, err := container.spec() - if err != nil { - return err - } - sp := spec.Process - sp.Args = specp.Args - sp.Terminal = specp.Terminal - if specp.Env != nil { - sp.Env = specp.Env - } - if specp.Cwd != nil { - sp.Cwd = *specp.Cwd - } - if specp.User != nil { - sp.User = specs.User{ - UID: specp.User.UID, - GID: specp.User.GID, - AdditionalGids: specp.User.AdditionalGids, - } - } - if specp.Capabilities != nil { - sp.Capabilities = specp.Capabilities - } - - p := container.newProcess(processFriendlyName) - - r := &containerd.AddProcessRequest{ - Args: sp.Args, - Cwd: sp.Cwd, - Terminal: sp.Terminal, - Id: containerID, - Env: sp.Env, - User: &containerd.User{ - Uid: sp.User.UID, - Gid: sp.User.GID, - AdditionalGids: sp.User.AdditionalGids, - }, - Pid: processFriendlyName, - Stdin: p.fifo(syscall.Stdin), - Stdout: p.fifo(syscall.Stdout), - Stderr: p.fifo(syscall.Stderr), - Capabilities: sp.Capabilities, - ApparmorProfile: sp.ApparmorProfile, - SelinuxLabel: sp.SelinuxLabel, - NoNewPrivileges: sp.NoNewPrivileges, - Rlimits: convertRlimits(sp.Rlimits), - } - - iopipe, err := p.openFifos(sp.Terminal) - if err != nil { - return err - } - - if _, err := clnt.remote.apiClient.AddProcess(ctx, r); err != nil { - p.closeFifos(iopipe) - return err - } - - container.processes[processFriendlyName] = p - - clnt.unlock(containerID) - - if err := clnt.backend.AttachStreams(processFriendlyName, *iopipe); err != nil { - return err - } - clnt.lock(containerID) - - return nil -} - -func (clnt *client) prepareBundleDir(uid, gid int) (string, error) { - root, err := filepath.Abs(clnt.remote.stateDir) - if err != nil { - return "", err - } - if uid == 0 && gid == 0 { - return root, nil - } - p := string(filepath.Separator) - for _, d := range strings.Split(root, string(filepath.Separator))[1:] { - p = filepath.Join(p, d) - fi, err := os.Stat(p) - if err != nil && !os.IsNotExist(err) { - return "", err - } - if os.IsNotExist(err) || fi.Mode()&1 == 0 { - p = fmt.Sprintf("%s.%d.%d", p, uid, gid) - if err := idtools.MkdirAs(p, 0700, uid, gid); err != nil && !os.IsExist(err) { - return "", err - } - } - } - return p, nil -} - -func (clnt *client) Create(containerID string, spec Spec, options ...CreateOption) (err error) { - clnt.lock(containerID) - defer clnt.unlock(containerID) - - if ctr, err := clnt.getContainer(containerID); err == nil { - if ctr.restarting { - ctr.restartManager.Cancel() - ctr.clean() - } else { - return fmt.Errorf("Container %s is already active", containerID) - } - } - - uid, gid, err := getRootIDs(specs.Spec(spec)) - if err != nil { - return err - } - dir, err := clnt.prepareBundleDir(uid, gid) - if err != nil { - return err - } - - container := clnt.newContainer(filepath.Join(dir, containerID), options...) - if err := container.clean(); err != nil { - return err - } - - defer func() { - if err != nil { - container.clean() - clnt.deleteContainer(containerID) - } - }() - - if err := idtools.MkdirAllAs(container.dir, 0700, uid, gid); err != nil && !os.IsExist(err) { - return err - } - - f, err := os.Create(filepath.Join(container.dir, configFilename)) - if err != nil { - return err - } - defer f.Close() - if err := json.NewEncoder(f).Encode(spec); err != nil { - return err - } - - return container.start() -} - -func (clnt *client) Signal(containerID string, sig int) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ - Id: containerID, - Pid: InitFriendlyName, - Signal: uint32(sig), - }) - return err -} - -func (clnt *client) SignalProcess(containerID string, pid string, sig int) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - _, err := clnt.remote.apiClient.Signal(context.Background(), &containerd.SignalRequest{ - Id: containerID, - Pid: pid, - Signal: uint32(sig), - }) - return err -} - -func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - if _, err := clnt.getContainer(containerID); err != nil { - return err - } - _, err := clnt.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ - Id: containerID, - Pid: processFriendlyName, - Width: uint32(width), - Height: uint32(height), - }) - return err -} - -func (clnt *client) Pause(containerID string) error { - return clnt.setState(containerID, StatePause) -} - -func (clnt *client) setState(containerID, state string) error { - clnt.lock(containerID) - container, err := clnt.getContainer(containerID) - if err != nil { - clnt.unlock(containerID) - return err - } - if container.systemPid == 0 { - clnt.unlock(containerID) - return fmt.Errorf("No active process for container %s", containerID) - } - st := "running" - if state == StatePause { - st = "paused" - } - chstate := make(chan struct{}) - _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ - Id: containerID, - Pid: InitFriendlyName, - Status: st, - }) - if err != nil { - clnt.unlock(containerID) - return err - } - container.pauseMonitor.append(state, chstate) - clnt.unlock(containerID) - <-chstate - return nil -} - -func (clnt *client) Resume(containerID string) error { - return clnt.setState(containerID, StateResume) -} - -func (clnt *client) Stats(containerID string) (*Stats, error) { - resp, err := clnt.remote.apiClient.Stats(context.Background(), &containerd.StatsRequest{containerID}) - if err != nil { - return nil, err - } - return (*Stats)(resp), nil -} - -// Take care of the old 1.11.0 behavior in case the version upgrade -// happened without a clean daemon shutdown -func (clnt *client) cleanupOldRootfs(containerID string) { - // Unmount and delete the bundle folder - if mts, err := mount.GetMounts(); err == nil { - for _, mts := range mts { - if strings.HasSuffix(mts.Mountpoint, containerID+"/rootfs") { - if err := syscall.Unmount(mts.Mountpoint, syscall.MNT_DETACH); err == nil { - os.RemoveAll(strings.TrimSuffix(mts.Mountpoint, "/rootfs")) - } - break - } - } - } -} - -func (clnt *client) setExited(containerID string, exitCode uint32) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - - err := clnt.backend.StateChanged(containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateExit, - ExitCode: exitCode, - }}) - - clnt.cleanupOldRootfs(containerID) - - return err -} - -func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { - cont, err := clnt.getContainerdContainer(containerID) - if err != nil { - return nil, err - } - pids := make([]int, len(cont.Pids)) - for i, p := range cont.Pids { - pids[i] = int(p) - } - return pids, nil -} - -// Summary returns a summary of the processes running in a container. -// This is a no-op on Linux. -func (clnt *client) Summary(containerID string) ([]Summary, error) { - return nil, nil -} - -func (clnt *client) getContainerdContainer(containerID string) (*containerd.Container, error) { - resp, err := clnt.remote.apiClient.State(context.Background(), &containerd.StateRequest{Id: containerID}) - if err != nil { - return nil, err - } - for _, cont := range resp.Containers { - if cont.Id == containerID { - return cont, nil - } - } - return nil, fmt.Errorf("invalid state response") -} - -func (clnt *client) newContainer(dir string, options ...CreateOption) *container { - container := &container{ - containerCommon: containerCommon{ - process: process{ - dir: dir, - processCommon: processCommon{ - containerID: filepath.Base(dir), - client: clnt, - friendlyName: InitFriendlyName, - }, - }, - processes: make(map[string]*process), - }, - } - for _, option := range options { - if err := option.Apply(container); err != nil { - logrus.Errorf("libcontainerd: newContainer(): %v", err) - } - } - return container -} - -func (clnt *client) UpdateResources(containerID string, resources Resources) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - container, err := clnt.getContainer(containerID) - if err != nil { - return err - } - if container.systemPid == 0 { - return fmt.Errorf("No active process for container %s", containerID) - } - _, err = clnt.remote.apiClient.UpdateContainer(context.Background(), &containerd.UpdateContainerRequest{ - Id: containerID, - Pid: InitFriendlyName, - Resources: (*containerd.UpdateResource)(&resources), - }) - if err != nil { - return err - } - return nil -} - -func (clnt *client) getExitNotifier(containerID string) *exitNotifier { - clnt.mapMutex.RLock() - defer clnt.mapMutex.RUnlock() - return clnt.exitNotifiers[containerID] -} - -func (clnt *client) getOrCreateExitNotifier(containerID string) *exitNotifier { - clnt.mapMutex.Lock() - w, ok := clnt.exitNotifiers[containerID] - defer clnt.mapMutex.Unlock() - if !ok { - w = &exitNotifier{c: make(chan struct{}), client: clnt} - clnt.exitNotifiers[containerID] = w - } - return w -} - -func (clnt *client) restore(cont *containerd.Container, lastEvent *containerd.Event, options ...CreateOption) (err error) { - clnt.lock(cont.Id) - defer clnt.unlock(cont.Id) - - logrus.Debugf("libcontainerd: restore container %s state %s", cont.Id, cont.Status) - - containerID := cont.Id - if _, err := clnt.getContainer(containerID); err == nil { - return fmt.Errorf("container %s is already active", containerID) - } - - defer func() { - if err != nil { - clnt.deleteContainer(cont.Id) - } - }() - - container := clnt.newContainer(cont.BundlePath, options...) - container.systemPid = systemPid(cont) - - var terminal bool - for _, p := range cont.Processes { - if p.Pid == InitFriendlyName { - terminal = p.Terminal - } - } - - iopipe, err := container.openFifos(terminal) - if err != nil { - return err - } - - if err := clnt.backend.AttachStreams(containerID, *iopipe); err != nil { - return err - } - - clnt.appendContainer(container) - - err = clnt.backend.StateChanged(containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateRestore, - Pid: container.systemPid, - }}) - - if err != nil { - return err - } - - if lastEvent != nil { - // This should only be a pause or resume event - if lastEvent.Type == StatePause || lastEvent.Type == StateResume { - return clnt.backend.StateChanged(containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: lastEvent.Type, - Pid: container.systemPid, - }}) - } - - logrus.Warnf("libcontainerd: unexpected backlog event: %#v", lastEvent) - } - - return nil -} - -func (clnt *client) getContainerLastEventSinceTime(id string, tsp *timestamp.Timestamp) (*containerd.Event, error) { - er := &containerd.EventsRequest{ - Timestamp: tsp, - StoredOnly: true, - Id: id, - } - events, err := clnt.remote.apiClient.Events(context.Background(), er) - if err != nil { - logrus.Errorf("libcontainerd: failed to get container events stream for %s: %q", er.Id, err) - return nil, err - } - - var ev *containerd.Event - for { - e, err := events.Recv() - if err != nil { - if err.Error() == "EOF" { - break - } - logrus.Errorf("libcontainerd: failed to get container event for %s: %q", id, err) - return nil, err - } - - logrus.Debugf("libcontainerd: received past event %#v", e) - - switch e.Type { - case StateExit, StatePause, StateResume: - ev = e - } - } - - return ev, nil -} - -func (clnt *client) getContainerLastEvent(id string) (*containerd.Event, error) { - ev, err := clnt.getContainerLastEventSinceTime(id, clnt.remote.restoreFromTimestamp) - if err == nil && ev == nil { - // If ev is nil and the container is running in containerd, - // we already consumed all the event of the - // container, included the "exit" one. - // Thus, we request all events containerd has in memory for - // this container in order to get the last one (which should - // be an exit event) - logrus.Warnf("libcontainerd: client is out of sync, restore was called on a fully synced container (%s).", id) - // Request all events since beginning of time - t := time.Unix(0, 0) - tsp, err := ptypes.TimestampProto(t) - if err != nil { - logrus.Errorf("libcontainerd: getLastEventSinceTime() failed to convert timestamp: %q", err) - return nil, err - } - - return clnt.getContainerLastEventSinceTime(id, tsp) - } - - return ev, err -} - -func (clnt *client) Restore(containerID string, options ...CreateOption) error { - // Synchronize with live events - clnt.remote.Lock() - defer clnt.remote.Unlock() - // Check that containerd still knows this container. - // - // In the unlikely event that Restore for this container process - // the its past event before the main loop, the event will be - // processed twice. However, this is not an issue as all those - // events will do is change the state of the container to be - // exactly the same. - cont, err := clnt.getContainerdContainer(containerID) - // Get its last event - ev, eerr := clnt.getContainerLastEvent(containerID) - if err != nil || cont.Status == "Stopped" { - if err != nil && !strings.Contains(err.Error(), "container not found") { - // Legitimate error - return err - } - - if ev == nil { - if _, err := clnt.getContainer(containerID); err == nil { - // If ev is nil and the container is running in containerd, - // we already consumed all the event of the - // container, included the "exit" one. - // Thus we return to avoid overriding the Exit Code. - logrus.Warnf("libcontainerd: restore was called on a fully synced container (%s)", containerID) - return nil - } - // the container is not running so we need to fix the state within docker - ev = &containerd.Event{ - Type: StateExit, - Status: 1, - } - } - - // get the exit status for this container - ec := uint32(0) - if eerr == nil && ev.Type == StateExit { - ec = ev.Status - } - clnt.setExited(containerID, ec) - - return nil - } - - // container is still alive - if clnt.liveRestore { - if err := clnt.restore(cont, ev, options...); err != nil { - logrus.Errorf("libcontainerd: error restoring %s: %v", containerID, err) - } - return nil - } - - // Kill the container if liveRestore == false - w := clnt.getOrCreateExitNotifier(containerID) - clnt.lock(cont.Id) - container := clnt.newContainer(cont.BundlePath) - container.systemPid = systemPid(cont) - clnt.appendContainer(container) - clnt.unlock(cont.Id) - - container.discardFifos() - - if err := clnt.Signal(containerID, int(syscall.SIGTERM)); err != nil { - logrus.Errorf("libcontainerd: error sending sigterm to %v: %v", containerID, err) - } - // Let the main loop handle the exit event - clnt.remote.Unlock() - select { - case <-time.After(10 * time.Second): - if err := clnt.Signal(containerID, int(syscall.SIGKILL)); err != nil { - logrus.Errorf("libcontainerd: error sending sigkill to %v: %v", containerID, err) - } - select { - case <-time.After(2 * time.Second): - case <-w.wait(): - // relock because of the defer - clnt.remote.Lock() - return nil - } - case <-w.wait(): - // relock because of the defer - clnt.remote.Lock() - return nil - } - // relock because of the defer - clnt.remote.Lock() - - clnt.deleteContainer(containerID) - - return clnt.setExited(containerID, uint32(255)) -} - -type exitNotifier struct { - id string - client *client - c chan struct{} - once sync.Once -} - -func (en *exitNotifier) close() { - en.once.Do(func() { - close(en.c) - en.client.mapMutex.Lock() - if en == en.client.exitNotifiers[en.id] { - delete(en.client.exitNotifiers, en.id) - } - en.client.mapMutex.Unlock() - }) -} -func (en *exitNotifier) wait() <-chan struct{} { - return en.c -} diff --git a/libcontainerd/client_solaris.go b/libcontainerd/client_solaris.go deleted file mode 100644 index 1c14d301b5..0000000000 --- a/libcontainerd/client_solaris.go +++ /dev/null @@ -1,58 +0,0 @@ -package libcontainerd - -import "golang.org/x/net/context" - -type client struct { - clientCommon - - // Platform specific properties below here. -} - -func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, specp Process) error { - return nil -} - -func (clnt *client) Create(containerID string, spec Spec, options ...CreateOption) (err error) { - return nil -} - -func (clnt *client) Signal(containerID string, sig int) error { - return nil -} - -func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { - return nil -} - -func (clnt *client) Pause(containerID string) error { - return nil -} - -func (clnt *client) Resume(containerID string) error { - return nil -} - -func (clnt *client) Stats(containerID string) (*Stats, error) { - return nil, nil -} - -// Restore is the handler for restoring a container -func (clnt *client) Restore(containerID string, unusedOnWindows ...CreateOption) error { - return nil -} - -func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { - return nil, nil -} - -// Summary returns a summary of the processes running in a container. -func (clnt *client) Summary(containerID string) ([]Summary, error) { - return nil, nil -} - -// UpdateResources updates resources for a running container. -func (clnt *client) UpdateResources(containerID string, resources Resources) error { - // Updating resource isn't supported on Solaris - // but we should return nil for enabling updating container - return nil -} diff --git a/libcontainerd/client_windows.go b/libcontainerd/client_windows.go deleted file mode 100644 index cfaec46c98..0000000000 --- a/libcontainerd/client_windows.go +++ /dev/null @@ -1,436 +0,0 @@ -package libcontainerd - -import ( - "errors" - "fmt" - "io" - "path/filepath" - "strings" - "syscall" - - "golang.org/x/net/context" - - "github.com/Microsoft/hcsshim" - "github.com/Sirupsen/logrus" -) - -type client struct { - clientCommon - - // Platform specific properties below here (none presently on Windows) -} - -// Win32 error codes that are used for various workarounds -// These really should be ALL_CAPS to match golangs syscall library and standard -// Win32 error conventions, but golint insists on CamelCase. -const ( - CoEClassstring = syscall.Errno(0x800401F3) // Invalid class string - ErrorNoNetwork = syscall.Errno(1222) // The network is not present or not started - ErrorBadPathname = syscall.Errno(161) // The specified path is invalid - ErrorInvalidObject = syscall.Errno(0x800710D8) // The object identifier does not represent a valid object -) - -// defaultOwner is a tag passed to HCS to allow it to differentiate between -// container creator management stacks. We hard code "docker" in the case -// of docker. -const defaultOwner = "docker" - -// Create is the entrypoint to create a container from a spec, and if successfully -// created, start it too. -func (clnt *client) Create(containerID string, spec Spec, options ...CreateOption) error { - logrus.Debugln("libcontainerd: client.Create() with spec", spec) - - configuration := &hcsshim.ContainerConfig{ - SystemType: "Container", - Name: containerID, - Owner: defaultOwner, - - VolumePath: spec.Root.Path, - IgnoreFlushesDuringBoot: spec.Windows.FirstStart, - LayerFolderPath: spec.Windows.LayerFolder, - HostName: spec.Hostname, - } - - if spec.Windows.Networking != nil { - configuration.EndpointList = spec.Windows.Networking.EndpointList - } - - if spec.Windows.Resources != nil { - if spec.Windows.Resources.CPU != nil { - if spec.Windows.Resources.CPU.Shares != nil { - configuration.ProcessorWeight = *spec.Windows.Resources.CPU.Shares - } - if spec.Windows.Resources.CPU.Percent != nil { - configuration.ProcessorMaximum = *spec.Windows.Resources.CPU.Percent * 100 // ProcessorMaximum is a value between 1 and 10000 - } - } - if spec.Windows.Resources.Memory != nil { - if spec.Windows.Resources.Memory.Limit != nil { - configuration.MemoryMaximumInMB = *spec.Windows.Resources.Memory.Limit / 1024 / 1024 - } - } - if spec.Windows.Resources.Storage != nil { - if spec.Windows.Resources.Storage.Bps != nil { - configuration.StorageBandwidthMaximum = *spec.Windows.Resources.Storage.Bps - } - if spec.Windows.Resources.Storage.Iops != nil { - configuration.StorageIOPSMaximum = *spec.Windows.Resources.Storage.Iops - } - if spec.Windows.Resources.Storage.SandboxSize != nil { - configuration.StorageSandboxSize = *spec.Windows.Resources.Storage.SandboxSize - } - } - } - - if spec.Windows.HvRuntime != nil { - configuration.VolumePath = "" // Always empty for Hyper-V containers - configuration.HvPartition = true - configuration.HvRuntime = &hcsshim.HvRuntime{ - ImagePath: spec.Windows.HvRuntime.ImagePath, - } - } - - if configuration.HvPartition { - configuration.SandboxPath = filepath.Dir(spec.Windows.LayerFolder) - } else { - configuration.VolumePath = spec.Root.Path - configuration.LayerFolderPath = spec.Windows.LayerFolder - } - - for _, option := range options { - if s, ok := option.(*ServicingOption); ok { - configuration.Servicing = s.IsServicing - break - } - } - - for _, layerPath := range spec.Windows.LayerPaths { - _, filename := filepath.Split(layerPath) - g, err := hcsshim.NameToGuid(filename) - if err != nil { - return err - } - configuration.Layers = append(configuration.Layers, hcsshim.Layer{ - ID: g.ToString(), - Path: layerPath, - }) - } - - // Add the mounts (volumes, bind mounts etc) to the structure - mds := make([]hcsshim.MappedDir, len(spec.Mounts)) - for i, mount := range spec.Mounts { - mds[i] = hcsshim.MappedDir{ - HostPath: mount.Source, - ContainerPath: mount.Destination, - ReadOnly: mount.Readonly} - } - configuration.MappedDirectories = mds - - hcsContainer, err := hcsshim.CreateContainer(containerID, configuration) - if err != nil { - return err - } - - // Construct a container object for calling start on it. - container := &container{ - containerCommon: containerCommon{ - process: process{ - processCommon: processCommon{ - containerID: containerID, - client: clnt, - friendlyName: InitFriendlyName, - }, - commandLine: strings.Join(spec.Process.Args, " "), - }, - processes: make(map[string]*process), - }, - ociSpec: spec, - hcsContainer: hcsContainer, - } - - container.options = options - for _, option := range options { - if err := option.Apply(container); err != nil { - logrus.Errorf("libcontainerd: %v", err) - } - } - - // Call start, and if it fails, delete the container from our - // internal structure, start will keep HCS in sync by deleting the - // container there. - logrus.Debugf("libcontainerd: Create() id=%s, Calling start()", containerID) - if err := container.start(); err != nil { - clnt.deleteContainer(containerID) - return err - } - - logrus.Debugf("libcontainerd: Create() id=%s completed successfully", containerID) - return nil - -} - -// AddProcess is the handler for adding a process to an already running -// container. It's called through docker exec. -func (clnt *client) AddProcess(ctx context.Context, containerID, processFriendlyName string, procToAdd Process) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - container, err := clnt.getContainer(containerID) - if err != nil { - return err - } - // Note we always tell HCS to - // create stdout as it's required regardless of '-i' or '-t' options, so that - // docker can always grab the output through logs. We also tell HCS to always - // create stdin, even if it's not used - it will be closed shortly. Stderr - // is only created if it we're not -t. - createProcessParms := hcsshim.ProcessConfig{ - EmulateConsole: procToAdd.Terminal, - ConsoleSize: procToAdd.InitialConsoleSize, - CreateStdInPipe: true, - CreateStdOutPipe: true, - CreateStdErrPipe: !procToAdd.Terminal, - } - - // Take working directory from the process to add if it is defined, - // otherwise take from the first process. - if procToAdd.Cwd != "" { - createProcessParms.WorkingDirectory = procToAdd.Cwd - } else { - createProcessParms.WorkingDirectory = container.ociSpec.Process.Cwd - } - - // Configure the environment for the process - createProcessParms.Environment = setupEnvironmentVariables(procToAdd.Env) - createProcessParms.CommandLine = strings.Join(procToAdd.Args, " ") - - logrus.Debugf("libcontainerd: commandLine: %s", createProcessParms.CommandLine) - - // Start the command running in the container. - var stdout, stderr io.ReadCloser - var stdin io.WriteCloser - newProcess, err := container.hcsContainer.CreateProcess(&createProcessParms) - if err != nil { - logrus.Errorf("libcontainerd: AddProcess(%s) CreateProcess() failed %s", containerID, err) - return err - } - - stdin, stdout, stderr, err = newProcess.Stdio() - if err != nil { - logrus.Errorf("libcontainerd: %s getting std pipes failed %s", containerID, err) - return err - } - - iopipe := &IOPipe{Terminal: procToAdd.Terminal} - iopipe.Stdin = createStdInCloser(stdin, newProcess) - - // TEMP: Work around Windows BS/DEL behavior. - iopipe.Stdin = fixStdinBackspaceBehavior(iopipe.Stdin, container.ociSpec.Platform.OSVersion, procToAdd.Terminal) - - // Convert io.ReadClosers to io.Readers - if stdout != nil { - iopipe.Stdout = openReaderFromPipe(stdout) - } - if stderr != nil { - iopipe.Stderr = openReaderFromPipe(stderr) - } - - pid := newProcess.Pid() - - proc := &process{ - processCommon: processCommon{ - containerID: containerID, - friendlyName: processFriendlyName, - client: clnt, - systemPid: uint32(pid), - }, - commandLine: createProcessParms.CommandLine, - hcsProcess: newProcess, - } - - // Add the process to the container's list of processes - container.processes[processFriendlyName] = proc - - // Make sure the lock is not held while calling back into the daemon - clnt.unlock(containerID) - - // Tell the engine to attach streams back to the client - if err := clnt.backend.AttachStreams(processFriendlyName, *iopipe); err != nil { - return err - } - - // Lock again so that the defer unlock doesn't fail. (I really don't like this code) - clnt.lock(containerID) - - // Spin up a go routine waiting for exit to handle cleanup - go container.waitExit(proc, false) - - return nil -} - -// Signal handles `docker stop` on Windows. While Linux has support for -// the full range of signals, signals aren't really implemented on Windows. -// We fake supporting regular stop and -9 to force kill. -func (clnt *client) Signal(containerID string, sig int) error { - var ( - cont *container - err error - ) - - // Get the container as we need it to find the pid of the process. - clnt.lock(containerID) - defer clnt.unlock(containerID) - if cont, err = clnt.getContainer(containerID); err != nil { - return err - } - - cont.manualStopRequested = true - - logrus.Debugf("libcontainerd: Signal() containerID=%s sig=%d pid=%d", containerID, sig, cont.systemPid) - - if syscall.Signal(sig) == syscall.SIGKILL { - // Terminate the compute system - if err := cont.hcsContainer.Terminate(); err != nil { - if err != hcsshim.ErrVmcomputeOperationPending { - logrus.Errorf("libcontainerd: failed to terminate %s - %q", containerID, err) - } - } - } else { - // Terminate Process - if err := cont.hcsProcess.Kill(); err != nil { - // ignore errors - logrus.Warnf("libcontainerd: failed to terminate pid %d in %s: %q", cont.systemPid, containerID, err) - } - } - - return nil -} - -// While Linux has support for the full range of signals, signals aren't really implemented on Windows. -// We try to terminate the specified process whatever signal is requested. -func (clnt *client) SignalProcess(containerID string, processFriendlyName string, sig int) error { - clnt.lock(containerID) - defer clnt.unlock(containerID) - cont, err := clnt.getContainer(containerID) - if err != nil { - return err - } - - for _, p := range cont.processes { - if p.friendlyName == processFriendlyName { - return hcsshim.TerminateProcessInComputeSystem(containerID, p.systemPid) - } - } - - return fmt.Errorf("SignalProcess could not find process %s in %s", processFriendlyName, containerID) -} - -// Resize handles a CLI event to resize an interactive docker run or docker exec -// window. -func (clnt *client) Resize(containerID, processFriendlyName string, width, height int) error { - // Get the libcontainerd container object - clnt.lock(containerID) - defer clnt.unlock(containerID) - cont, err := clnt.getContainer(containerID) - if err != nil { - return err - } - - h, w := uint16(height), uint16(width) - - if processFriendlyName == InitFriendlyName { - logrus.Debugln("libcontainerd: resizing systemPID in", containerID, cont.process.systemPid) - return cont.process.hcsProcess.ResizeConsole(w, h) - } - - for _, p := range cont.processes { - if p.friendlyName == processFriendlyName { - logrus.Debugln("libcontainerd: resizing exec'd process", containerID, p.systemPid) - return p.hcsProcess.ResizeConsole(w, h) - } - } - - return fmt.Errorf("Resize could not find containerID %s to resize", containerID) - -} - -// Pause handles pause requests for containers -func (clnt *client) Pause(containerID string) error { - return errors.New("Windows: Containers cannot be paused") -} - -// Resume handles resume requests for containers -func (clnt *client) Resume(containerID string) error { - return errors.New("Windows: Containers cannot be paused") -} - -// Stats handles stats requests for containers -func (clnt *client) Stats(containerID string) (*Stats, error) { - return nil, errors.New("Windows: Stats not implemented") -} - -// Restore is the handler for restoring a container -func (clnt *client) Restore(containerID string, unusedOnWindows ...CreateOption) error { - // TODO Windows: Implement this. For now, just tell the backend the container exited. - logrus.Debugf("libcontainerd: Restore(%s)", containerID) - return clnt.backend.StateChanged(containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateExit, - ExitCode: 1 << 31, - }}) -} - -// GetPidsForContainer returns a list of process IDs running in a container. -// Although implemented, this is not used in Windows. -func (clnt *client) GetPidsForContainer(containerID string) ([]int, error) { - var pids []int - clnt.lock(containerID) - defer clnt.unlock(containerID) - cont, err := clnt.getContainer(containerID) - if err != nil { - return nil, err - } - - // Add the first process - pids = append(pids, int(cont.containerCommon.systemPid)) - // And add all the exec'd processes - for _, p := range cont.processes { - pids = append(pids, int(p.processCommon.systemPid)) - } - return pids, nil -} - -// Summary returns a summary of the processes running in a container. -// This is present in Windows to support docker top. In linux, the -// engine shells out to ps to get process information. On Windows, as -// the containers could be Hyper-V containers, they would not be -// visible on the container host. However, libcontainerd does have -// that information. -func (clnt *client) Summary(containerID string) ([]Summary, error) { - var s []Summary - clnt.lock(containerID) - defer clnt.unlock(containerID) - cont, err := clnt.getContainer(containerID) - if err != nil { - return nil, err - } - - // Add the first process - s = append(s, Summary{ - Pid: cont.containerCommon.systemPid, - Command: cont.ociSpec.Process.Args[0]}) - // And add all the exec'd processes - for _, p := range cont.processes { - s = append(s, Summary{ - Pid: p.processCommon.systemPid, - Command: p.commandLine}) - } - return s, nil - -} - -// UpdateResources updates resources for a running container. -func (clnt *client) UpdateResources(containerID string, resources Resources) error { - // Updating resource isn't supported on Windows - // but we should return nil for enabling updating container - return nil -} diff --git a/libcontainerd/container.go b/libcontainerd/container.go deleted file mode 100644 index 30bc95028c..0000000000 --- a/libcontainerd/container.go +++ /dev/null @@ -1,40 +0,0 @@ -package libcontainerd - -import ( - "fmt" - "time" - - "github.com/docker/docker/restartmanager" -) - -const ( - // InitFriendlyName is the name given in the lookup map of processes - // for the first process started in a container. - InitFriendlyName = "init" - configFilename = "config.json" -) - -type containerCommon struct { - process - restartManager restartmanager.RestartManager - restarting bool - processes map[string]*process - startedAt time.Time -} - -// WithRestartManager sets the restartmanager to be used with the container. -func WithRestartManager(rm restartmanager.RestartManager) CreateOption { - return restartManager{rm} -} - -type restartManager struct { - rm restartmanager.RestartManager -} - -func (rm restartManager) Apply(p interface{}) error { - if pr, ok := p.(*container); ok { - pr.restartManager = rm.rm - return nil - } - return fmt.Errorf("WithRestartManager option not supported for this client") -} diff --git a/libcontainerd/container_linux.go b/libcontainerd/container_linux.go deleted file mode 100644 index 454478b5c2..0000000000 --- a/libcontainerd/container_linux.go +++ /dev/null @@ -1,243 +0,0 @@ -package libcontainerd - -import ( - "encoding/json" - "io" - "io/ioutil" - "os" - "path/filepath" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/docker/docker/restartmanager" - "github.com/opencontainers/specs/specs-go" - "golang.org/x/net/context" -) - -type container struct { - containerCommon - - // Platform specific fields are below here. - pauseMonitor - oom bool - runtime string - runtimeArgs []string -} - -type runtime struct { - path string - args []string -} - -// WithRuntime sets the runtime to be used for the created container -func WithRuntime(path string, args []string) CreateOption { - return runtime{path, args} -} - -func (rt runtime) Apply(p interface{}) error { - if pr, ok := p.(*container); ok { - pr.runtime = rt.path - pr.runtimeArgs = rt.args - } - return nil -} - -func (ctr *container) clean() error { - if os.Getenv("LIBCONTAINERD_NOCLEAN") == "1" { - return nil - } - if _, err := os.Lstat(ctr.dir); err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - if err := os.RemoveAll(ctr.dir); err != nil { - return err - } - return nil -} - -// cleanProcess removes the fifos used by an additional process. -// Caller needs to lock container ID before calling this method. -func (ctr *container) cleanProcess(id string) { - if p, ok := ctr.processes[id]; ok { - for _, i := range []int{syscall.Stdin, syscall.Stdout, syscall.Stderr} { - if err := os.Remove(p.fifo(i)); err != nil { - logrus.Warnf("libcontainerd: failed to remove %v for process %v: %v", p.fifo(i), id, err) - } - } - } - delete(ctr.processes, id) -} - -func (ctr *container) spec() (*specs.Spec, error) { - var spec specs.Spec - dt, err := ioutil.ReadFile(filepath.Join(ctr.dir, configFilename)) - if err != nil { - return nil, err - } - if err := json.Unmarshal(dt, &spec); err != nil { - return nil, err - } - return &spec, nil -} - -func (ctr *container) start() error { - spec, err := ctr.spec() - if err != nil { - return nil - } - iopipe, err := ctr.openFifos(spec.Process.Terminal) - if err != nil { - return err - } - - r := &containerd.CreateContainerRequest{ - Id: ctr.containerID, - BundlePath: ctr.dir, - Stdin: ctr.fifo(syscall.Stdin), - Stdout: ctr.fifo(syscall.Stdout), - Stderr: ctr.fifo(syscall.Stderr), - // check to see if we are running in ramdisk to disable pivot root - NoPivotRoot: os.Getenv("DOCKER_RAMDISK") != "", - Runtime: ctr.runtime, - RuntimeArgs: ctr.runtimeArgs, - } - ctr.client.appendContainer(ctr) - - resp, err := ctr.client.remote.apiClient.CreateContainer(context.Background(), r) - if err != nil { - ctr.closeFifos(iopipe) - return err - } - ctr.startedAt = time.Now() - - if err := ctr.client.backend.AttachStreams(ctr.containerID, *iopipe); err != nil { - return err - } - ctr.systemPid = systemPid(resp.Container) - - return ctr.client.backend.StateChanged(ctr.containerID, StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateStart, - Pid: ctr.systemPid, - }}) -} - -func (ctr *container) newProcess(friendlyName string) *process { - return &process{ - dir: ctr.dir, - processCommon: processCommon{ - containerID: ctr.containerID, - friendlyName: friendlyName, - client: ctr.client, - }, - } -} - -func (ctr *container) handleEvent(e *containerd.Event) error { - ctr.client.lock(ctr.containerID) - defer ctr.client.unlock(ctr.containerID) - switch e.Type { - case StateExit, StatePause, StateResume, StateOOM: - var waitRestart chan error - st := StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: e.Type, - ExitCode: e.Status, - }, - OOMKilled: e.Type == StateExit && ctr.oom, - } - if e.Type == StateOOM { - ctr.oom = true - } - if e.Type == StateExit && e.Pid != InitFriendlyName { - st.ProcessID = e.Pid - st.State = StateExitProcess - } - if st.State == StateExit && ctr.restartManager != nil { - restart, wait, err := ctr.restartManager.ShouldRestart(e.Status, false, time.Since(ctr.startedAt)) - if err != nil { - logrus.Warnf("libcontainerd: container %s %v", ctr.containerID, err) - } else if restart { - st.State = StateRestart - ctr.restarting = true - ctr.client.deleteContainer(e.Id) - waitRestart = wait - } - } - - // Remove process from list if we have exited - // We need to do so here in case the Message Handler decides to restart it. - switch st.State { - case StateExit: - ctr.clean() - ctr.client.deleteContainer(e.Id) - case StateExitProcess: - ctr.cleanProcess(st.ProcessID) - } - ctr.client.q.append(e.Id, func() { - if err := ctr.client.backend.StateChanged(e.Id, st); err != nil { - logrus.Errorf("libcontainerd: backend.StateChanged(): %v", err) - } - if st.State == StateRestart { - go func() { - err := <-waitRestart - ctr.client.lock(ctr.containerID) - defer ctr.client.unlock(ctr.containerID) - ctr.restarting = false - if err == nil { - if err = ctr.start(); err != nil { - logrus.Errorf("libcontainerd: error restarting %v", err) - } - } - if err != nil { - st.State = StateExit - ctr.clean() - ctr.client.q.append(e.Id, func() { - if err := ctr.client.backend.StateChanged(e.Id, st); err != nil { - logrus.Errorf("libcontainerd: %v", err) - } - }) - if err != restartmanager.ErrRestartCanceled { - logrus.Errorf("libcontainerd: %v", err) - } - } - }() - } - - if e.Type == StatePause || e.Type == StateResume { - ctr.pauseMonitor.handle(e.Type) - } - if e.Type == StateExit { - if en := ctr.client.getExitNotifier(e.Id); en != nil { - en.close() - } - } - }) - - default: - logrus.Debugf("libcontainerd: event unhandled: %+v", e) - } - return nil -} - -// discardFifos attempts to fully read the container fifos to unblock processes -// that may be blocked on the writer side. -func (ctr *container) discardFifos() { - for _, i := range []int{syscall.Stdout, syscall.Stderr} { - f := ctr.fifo(i) - c := make(chan struct{}) - go func() { - r := openReaderFromFifo(f) - close(c) // this channel is used to not close the writer too early, before readonly open has been called. - io.Copy(ioutil.Discard, r) - }() - <-c - closeReaderFifo(f) // avoid blocking permanently on open if there is no writer side - } -} diff --git a/libcontainerd/container_solaris.go b/libcontainerd/container_solaris.go deleted file mode 100644 index 24ab1de03b..0000000000 --- a/libcontainerd/container_solaris.go +++ /dev/null @@ -1,5 +0,0 @@ -package libcontainerd - -type container struct { - containerCommon -} diff --git a/libcontainerd/container_windows.go b/libcontainerd/container_windows.go deleted file mode 100644 index 31c2227df0..0000000000 --- a/libcontainerd/container_windows.go +++ /dev/null @@ -1,310 +0,0 @@ -package libcontainerd - -import ( - "io" - "strings" - "syscall" - "time" - - "github.com/Microsoft/hcsshim" - "github.com/Sirupsen/logrus" -) - -type container struct { - containerCommon - - // Platform specific fields are below here. There are none presently on Windows. - options []CreateOption - - // The ociSpec is required, as client.Create() needs a spec, - // but can be called from the RestartManager context which does not - // otherwise have access to the Spec - ociSpec Spec - - manualStopRequested bool - hcsContainer hcsshim.Container -} - -func (ctr *container) newProcess(friendlyName string) *process { - return &process{ - processCommon: processCommon{ - containerID: ctr.containerID, - friendlyName: friendlyName, - client: ctr.client, - }, - } -} - -func (ctr *container) start() error { - var err error - isServicing := false - - for _, option := range ctr.options { - if s, ok := option.(*ServicingOption); ok && s.IsServicing { - isServicing = true - } - } - - // Start the container. If this is a servicing container, this call will block - // until the container is done with the servicing execution. - logrus.Debugln("libcontainerd: starting container ", ctr.containerID) - if err = ctr.hcsContainer.Start(); err != nil { - logrus.Errorf("libcontainerd: failed to start container: %s", err) - if err := ctr.terminate(); err != nil { - logrus.Errorf("libcontainerd: failed to cleanup after a failed Start. %s", err) - } else { - logrus.Debugln("libcontainerd: cleaned up after failed Start by calling Terminate") - } - return err - } - - // Note we always tell HCS to - // create stdout as it's required regardless of '-i' or '-t' options, so that - // docker can always grab the output through logs. We also tell HCS to always - // create stdin, even if it's not used - it will be closed shortly. Stderr - // is only created if it we're not -t. - createProcessParms := &hcsshim.ProcessConfig{ - EmulateConsole: ctr.ociSpec.Process.Terminal, - WorkingDirectory: ctr.ociSpec.Process.Cwd, - ConsoleSize: ctr.ociSpec.Process.InitialConsoleSize, - CreateStdInPipe: !isServicing, - CreateStdOutPipe: !isServicing, - CreateStdErrPipe: !ctr.ociSpec.Process.Terminal && !isServicing, - } - - // Configure the environment for the process - createProcessParms.Environment = setupEnvironmentVariables(ctr.ociSpec.Process.Env) - createProcessParms.CommandLine = strings.Join(ctr.ociSpec.Process.Args, " ") - - // Start the command running in the container. - hcsProcess, err := ctr.hcsContainer.CreateProcess(createProcessParms) - if err != nil { - logrus.Errorf("libcontainerd: CreateProcess() failed %s", err) - if err := ctr.terminate(); err != nil { - logrus.Errorf("libcontainerd: failed to cleanup after a failed CreateProcess. %s", err) - } else { - logrus.Debugln("libcontainerd: cleaned up after failed CreateProcess by calling Terminate") - } - return err - } - ctr.startedAt = time.Now() - - // Save the hcs Process and PID - ctr.process.friendlyName = InitFriendlyName - pid := hcsProcess.Pid() - ctr.process.hcsProcess = hcsProcess - - // If this is a servicing container, wait on the process synchronously here and - // immediately call shutdown/terminate when it returns. - if isServicing { - exitCode := ctr.waitProcessExitCode(&ctr.process) - - if exitCode != 0 { - logrus.Warnf("libcontainerd: servicing container %s returned non-zero exit code %d", ctr.containerID, exitCode) - return ctr.terminate() - } - - return ctr.shutdown() - } - - var stdout, stderr io.ReadCloser - var stdin io.WriteCloser - stdin, stdout, stderr, err = hcsProcess.Stdio() - if err != nil { - logrus.Errorf("libcontainerd: failed to get stdio pipes: %s", err) - if err := ctr.terminate(); err != nil { - logrus.Errorf("libcontainerd: failed to cleanup after a failed Stdio. %s", err) - } - return err - } - - iopipe := &IOPipe{Terminal: ctr.ociSpec.Process.Terminal} - - iopipe.Stdin = createStdInCloser(stdin, hcsProcess) - - // TEMP: Work around Windows BS/DEL behavior. - iopipe.Stdin = fixStdinBackspaceBehavior(iopipe.Stdin, ctr.ociSpec.Platform.OSVersion, ctr.ociSpec.Process.Terminal) - - // Convert io.ReadClosers to io.Readers - if stdout != nil { - iopipe.Stdout = openReaderFromPipe(stdout) - } - if stderr != nil { - iopipe.Stderr = openReaderFromPipe(stderr) - } - - // Save the PID - logrus.Debugf("libcontainerd: process started - PID %d", pid) - ctr.systemPid = uint32(pid) - - // Spin up a go routine waiting for exit to handle cleanup - go ctr.waitExit(&ctr.process, true) - - ctr.client.appendContainer(ctr) - - if err := ctr.client.backend.AttachStreams(ctr.containerID, *iopipe); err != nil { - // OK to return the error here, as waitExit will handle tear-down in HCS - return err - } - - // Tell the docker engine that the container has started. - si := StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateStart, - Pid: ctr.systemPid, // Not sure this is needed? Double-check monitor.go in daemon BUGBUG @jhowardmsft - }} - return ctr.client.backend.StateChanged(ctr.containerID, si) - -} - -// waitProcessExitCode will wait for the given process to exit and return its error code. -func (ctr *container) waitProcessExitCode(process *process) int { - // Block indefinitely for the process to exit. - err := process.hcsProcess.Wait() - if err != nil { - if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE { - logrus.Warnf("libcontainerd: Wait() failed (container may have been killed): %s", err) - } - // Fall through here, do not return. This ensures we attempt to continue the - // shutdown in HCS and tell the docker engine that the process/container - // has exited to avoid a container being dropped on the floor. - } - - exitCode, err := process.hcsProcess.ExitCode() - if err != nil { - if herr, ok := err.(*hcsshim.ProcessError); ok && herr.Err != syscall.ERROR_BROKEN_PIPE { - logrus.Warnf("libcontainerd: unable to get exit code from container %s", ctr.containerID) - } - // Fall through here, do not return. This ensures we attempt to continue the - // shutdown in HCS and tell the docker engine that the process/container - // has exited to avoid a container being dropped on the floor. - } - - if err := process.hcsProcess.Close(); err != nil { - logrus.Errorf("libcontainerd: hcsProcess.Close(): %v", err) - } - - return exitCode -} - -// waitExit runs as a goroutine waiting for the process to exit. It's -// equivalent to (in the linux containerd world) where events come in for -// state change notifications from containerd. -func (ctr *container) waitExit(process *process, isFirstProcessToStart bool) error { - var waitRestart chan error - logrus.Debugln("libcontainerd: waitExit() on pid", process.systemPid) - - exitCode := ctr.waitProcessExitCode(process) - - // Assume the container has exited - si := StateInfo{ - CommonStateInfo: CommonStateInfo{ - State: StateExit, - ExitCode: uint32(exitCode), - Pid: process.systemPid, - ProcessID: process.friendlyName, - }, - UpdatePending: false, - } - - // But it could have been an exec'd process which exited - if !isFirstProcessToStart { - si.State = StateExitProcess - } else { - updatePending, err := ctr.hcsContainer.HasPendingUpdates() - if err != nil { - logrus.Warnf("libcontainerd: HasPendingUpdates() failed (container may have been killed): %s", err) - } else { - si.UpdatePending = updatePending - } - - logrus.Debugf("libcontainerd: shutting down container %s", ctr.containerID) - if err := ctr.shutdown(); err != nil { - logrus.Debugf("libcontainerd: failed to shutdown container %s", ctr.containerID) - } else { - logrus.Debugf("libcontainerd: completed shutting down container %s", ctr.containerID) - } - if err := ctr.hcsContainer.Close(); err != nil { - logrus.Error(err) - } - - if !ctr.manualStopRequested && ctr.restartManager != nil { - restart, wait, err := ctr.restartManager.ShouldRestart(uint32(exitCode), false, time.Since(ctr.startedAt)) - if err != nil { - logrus.Error(err) - } else if restart { - si.State = StateRestart - ctr.restarting = true - waitRestart = wait - } - } - - // Remove process from list if we have exited - // We need to do so here in case the Message Handler decides to restart it. - if si.State == StateExit { - ctr.client.deleteContainer(ctr.friendlyName) - } - } - - // Call into the backend to notify it of the state change. - logrus.Debugf("libcontainerd: waitExit() calling backend.StateChanged %+v", si) - if err := ctr.client.backend.StateChanged(ctr.containerID, si); err != nil { - logrus.Error(err) - } - if si.State == StateRestart { - go func() { - err := <-waitRestart - ctr.restarting = false - ctr.client.deleteContainer(ctr.friendlyName) - if err == nil { - if err = ctr.client.Create(ctr.containerID, ctr.ociSpec, ctr.options...); err != nil { - logrus.Errorf("libcontainerd: error restarting %v", err) - } - } - if err != nil { - si.State = StateExit - if err := ctr.client.backend.StateChanged(ctr.containerID, si); err != nil { - logrus.Error(err) - } - } - }() - } - - logrus.Debugf("libcontainerd: waitExit() completed OK, %+v", si) - return nil -} - -func (ctr *container) shutdown() error { - const shutdownTimeout = time.Minute * 5 - err := ctr.hcsContainer.Shutdown() - if err == hcsshim.ErrVmcomputeOperationPending { - // Explicit timeout to avoid a (remote) possibility that shutdown hangs indefinitely. - err = ctr.hcsContainer.WaitTimeout(shutdownTimeout) - } - - if err != nil { - logrus.Debugf("libcontainerd: error shutting down container %s %v calling terminate", ctr.containerID, err) - if err := ctr.terminate(); err != nil { - return err - } - return err - } - - return nil -} - -func (ctr *container) terminate() error { - const terminateTimeout = time.Minute * 5 - err := ctr.hcsContainer.Terminate() - - if err == hcsshim.ErrVmcomputeOperationPending { - err = ctr.hcsContainer.WaitTimeout(terminateTimeout) - } - - if err != nil { - logrus.Debugf("libcontainerd: error terminating container %s %v", ctr.containerID, err) - return err - } - - return nil -} diff --git a/libcontainerd/pausemonitor_linux.go b/libcontainerd/pausemonitor_linux.go deleted file mode 100644 index 379cbf1fcb..0000000000 --- a/libcontainerd/pausemonitor_linux.go +++ /dev/null @@ -1,31 +0,0 @@ -package libcontainerd - -// pauseMonitor is helper to get notifications from pause state changes. -type pauseMonitor struct { - waiters map[string][]chan struct{} -} - -func (m *pauseMonitor) handle(t string) { - if m.waiters == nil { - return - } - q, ok := m.waiters[t] - if !ok { - return - } - if len(q) > 0 { - close(q[0]) - m.waiters[t] = q[1:] - } -} - -func (m *pauseMonitor) append(t string, waiter chan struct{}) { - if m.waiters == nil { - m.waiters = make(map[string][]chan struct{}) - } - _, ok := m.waiters[t] - if !ok { - m.waiters[t] = make([]chan struct{}, 0) - } - m.waiters[t] = append(m.waiters[t], waiter) -} diff --git a/libcontainerd/process.go b/libcontainerd/process.go deleted file mode 100644 index 57562c8789..0000000000 --- a/libcontainerd/process.go +++ /dev/null @@ -1,18 +0,0 @@ -package libcontainerd - -// processCommon are the platform common fields as part of the process structure -// which keeps the state for the main container process, as well as any exec -// processes. -type processCommon struct { - client *client - - // containerID is the Container ID - containerID string - - // friendlyName is an identifier for the process (or `InitFriendlyName` - // for the first process) - friendlyName string - - // systemPid is the PID of the main container process - systemPid uint32 -} diff --git a/libcontainerd/process_linux.go b/libcontainerd/process_linux.go deleted file mode 100644 index 3c48576fe2..0000000000 --- a/libcontainerd/process_linux.go +++ /dev/null @@ -1,110 +0,0 @@ -package libcontainerd - -import ( - "fmt" - "io" - "os" - "path/filepath" - "syscall" - - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/docker/docker/pkg/ioutils" - "golang.org/x/net/context" -) - -var fdNames = map[int]string{ - syscall.Stdin: "stdin", - syscall.Stdout: "stdout", - syscall.Stderr: "stderr", -} - -// process keeps the state for both main container process and exec process. -type process struct { - processCommon - - // Platform specific fields are below here. - dir string -} - -func (p *process) openFifos(terminal bool) (*IOPipe, error) { - bundleDir := p.dir - if err := os.MkdirAll(bundleDir, 0700); err != nil { - return nil, err - } - - for i := 0; i < 3; i++ { - f := p.fifo(i) - if err := syscall.Mkfifo(f, 0700); err != nil && !os.IsExist(err) { - return nil, fmt.Errorf("mkfifo: %s %v", f, err) - } - } - - io := &IOPipe{} - stdinf, err := os.OpenFile(p.fifo(syscall.Stdin), syscall.O_RDWR, 0) - if err != nil { - return nil, err - } - - io.Stdout = openReaderFromFifo(p.fifo(syscall.Stdout)) - if !terminal { - io.Stderr = openReaderFromFifo(p.fifo(syscall.Stderr)) - } else { - io.Stderr = emptyReader{} - } - - io.Stdin = ioutils.NewWriteCloserWrapper(stdinf, func() error { - stdinf.Close() - _, err := p.client.remote.apiClient.UpdateProcess(context.Background(), &containerd.UpdateProcessRequest{ - Id: p.containerID, - Pid: p.friendlyName, - CloseStdin: true, - }) - return err - }) - - return io, nil -} - -func (p *process) closeFifos(io *IOPipe) { - io.Stdin.Close() - closeReaderFifo(p.fifo(syscall.Stdout)) - closeReaderFifo(p.fifo(syscall.Stderr)) -} - -type emptyReader struct{} - -func (r emptyReader) Read(b []byte) (int, error) { - return 0, io.EOF -} - -func openReaderFromFifo(fn string) io.Reader { - r, w := io.Pipe() - c := make(chan struct{}) - go func() { - close(c) - stdoutf, err := os.OpenFile(fn, syscall.O_RDONLY, 0) - if err != nil { - r.CloseWithError(err) - } - if _, err := io.Copy(w, stdoutf); err != nil { - r.CloseWithError(err) - } - w.Close() - stdoutf.Close() - }() - <-c // wait for the goroutine to get scheduled and syscall to block - return r -} - -// closeReaderFifo closes fifo that may be blocked on open by opening the write side. -func closeReaderFifo(fn string) { - f, err := os.OpenFile(fn, syscall.O_WRONLY|syscall.O_NONBLOCK, 0) - if err != nil { - return - } - f.Close() -} - -func (p *process) fifo(index int) string { - return filepath.Join(p.dir, p.friendlyName+"-"+fdNames[index]) -} diff --git a/libcontainerd/process_solaris.go b/libcontainerd/process_solaris.go deleted file mode 100644 index 2ee9b25662..0000000000 --- a/libcontainerd/process_solaris.go +++ /dev/null @@ -1,6 +0,0 @@ -package libcontainerd - -// process keeps the state for both main container process and exec process. -type process struct { - processCommon -} diff --git a/libcontainerd/process_windows.go b/libcontainerd/process_windows.go deleted file mode 100644 index ad6143e1de..0000000000 --- a/libcontainerd/process_windows.go +++ /dev/null @@ -1,95 +0,0 @@ -package libcontainerd - -import ( - "io" - "strconv" - "strings" - - "github.com/Microsoft/hcsshim" -) - -// process keeps the state for both main container process and exec process. -type process struct { - processCommon - - // Platform specific fields are below here. - - // commandLine is to support returning summary information for docker top - commandLine string - hcsProcess hcsshim.Process -} - -func openReaderFromPipe(p io.ReadCloser) io.Reader { - r, w := io.Pipe() - go func() { - if _, err := io.Copy(w, p); err != nil { - r.CloseWithError(err) - } - w.Close() - p.Close() - }() - return r -} - -// fixStdinBackspaceBehavior works around a bug in Windows before build 14350 -// where it interpreted DEL as VK_DELETE instead of as VK_BACK. This replaces -// DEL with BS to work around this. -func fixStdinBackspaceBehavior(w io.WriteCloser, osversion string, tty bool) io.WriteCloser { - if !tty { - return w - } - v := strings.Split(osversion, ".") - if len(v) < 3 { - return w - } - - if build, err := strconv.Atoi(v[2]); err != nil || build >= 14350 { - return w - } - - return &delToBsWriter{w} -} - -type delToBsWriter struct { - io.WriteCloser -} - -func (w *delToBsWriter) Write(b []byte) (int, error) { - const ( - backspace = 0x8 - del = 0x7f - ) - bc := make([]byte, len(b)) - for i, c := range b { - if c == del { - bc[i] = backspace - } else { - bc[i] = c - } - } - return w.WriteCloser.Write(bc) -} - -type stdInCloser struct { - io.WriteCloser - hcsshim.Process -} - -func createStdInCloser(pipe io.WriteCloser, process hcsshim.Process) *stdInCloser { - return &stdInCloser{ - WriteCloser: pipe, - Process: process, - } -} - -func (stdin *stdInCloser) Close() error { - if err := stdin.WriteCloser.Close(); err != nil { - return err - } - - return stdin.Process.CloseStdin() -} - -func (stdin *stdInCloser) Write(p []byte) (n int, err error) { - return stdin.WriteCloser.Write(p) -} diff --git a/libcontainerd/queue_linux.go b/libcontainerd/queue_linux.go deleted file mode 100644 index 34bc81d24e..0000000000 --- a/libcontainerd/queue_linux.go +++ /dev/null @@ -1,29 +0,0 @@ -package libcontainerd - -import "sync" - -type queue struct { - sync.Mutex - fns map[string]chan struct{} -} - -func (q *queue) append(id string, f func()) { - q.Lock() - defer q.Unlock() - - if q.fns == nil { - q.fns = make(map[string]chan struct{}) - } - - done := make(chan struct{}) - - fn, ok := q.fns[id] - q.fns[id] = done - go func() { - if ok { - <-fn - } - f() - close(done) - }() -} diff --git a/libcontainerd/remote.go b/libcontainerd/remote.go deleted file mode 100644 index 9031e3ae7d..0000000000 --- a/libcontainerd/remote.go +++ /dev/null @@ -1,20 +0,0 @@ -package libcontainerd - -// Remote on Linux defines the accesspoint to the containerd grpc API. -// Remote on Windows is largely an unimplemented interface as there is -// no remote containerd. -type Remote interface { - // Client returns a new Client instance connected with given Backend. - Client(Backend) (Client, error) - // Cleanup stops containerd if it was started by libcontainerd. - // Note this is not used on Windows as there is no remote containerd. - Cleanup() - // UpdateOptions allows various remote options to be updated at runtime. - UpdateOptions(...RemoteOption) error -} - -// RemoteOption allows to configure parameters of remotes. -// This is unused on Windows. -type RemoteOption interface { - Apply(Remote) error -} diff --git a/libcontainerd/remote_linux.go b/libcontainerd/remote_linux.go deleted file mode 100644 index 57daa7fc7b..0000000000 --- a/libcontainerd/remote_linux.go +++ /dev/null @@ -1,542 +0,0 @@ -package libcontainerd - -import ( - "fmt" - "io" - "io/ioutil" - "log" - "net" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - "sync" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/docker/docker/pkg/locker" - sysinfo "github.com/docker/docker/pkg/system" - "github.com/docker/docker/utils" - "github.com/golang/protobuf/ptypes" - "github.com/golang/protobuf/ptypes/timestamp" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/grpclog" - "google.golang.org/grpc/transport" -) - -const ( - maxConnectionRetryCount = 3 - connectionRetryDelay = 3 * time.Second - containerdShutdownTimeout = 15 * time.Second - containerdBinary = "docker-containerd" - containerdPidFilename = "docker-containerd.pid" - containerdSockFilename = "docker-containerd.sock" - containerdStateDir = "containerd" - eventTimestampFilename = "event.ts" -) - -type remote struct { - sync.RWMutex - apiClient containerd.APIClient - daemonPid int - stateDir string - rpcAddr string - startDaemon bool - closeManually bool - debugLog bool - rpcConn *grpc.ClientConn - clients []*client - eventTsPath string - runtime string - runtimeArgs []string - daemonWaitCh chan struct{} - liveRestore bool - oomScore int - restoreFromTimestamp *timestamp.Timestamp -} - -// New creates a fresh instance of libcontainerd remote. -func New(stateDir string, options ...RemoteOption) (_ Remote, err error) { - defer func() { - if err != nil { - err = fmt.Errorf("Failed to connect to containerd. Please make sure containerd is installed in your PATH or you have specificed the correct address. Got error: %v", err) - } - }() - r := &remote{ - stateDir: stateDir, - daemonPid: -1, - eventTsPath: filepath.Join(stateDir, eventTimestampFilename), - } - for _, option := range options { - if err := option.Apply(r); err != nil { - return nil, err - } - } - - if err := sysinfo.MkdirAll(stateDir, 0700); err != nil { - return nil, err - } - - if r.rpcAddr == "" { - r.rpcAddr = filepath.Join(stateDir, containerdSockFilename) - } - - if r.startDaemon { - if err := r.runContainerdDaemon(); err != nil { - return nil, err - } - } - - // don't output the grpc reconnect logging - grpclog.SetLogger(log.New(ioutil.Discard, "", log.LstdFlags)) - dialOpts := append([]grpc.DialOption{grpc.WithInsecure()}, - grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) { - return net.DialTimeout("unix", addr, timeout) - }), - ) - conn, err := grpc.Dial(r.rpcAddr, dialOpts...) - if err != nil { - return nil, fmt.Errorf("error connecting to containerd: %v", err) - } - - r.rpcConn = conn - r.apiClient = containerd.NewAPIClient(conn) - - // Get the timestamp to restore from - t := r.getLastEventTimestamp() - tsp, err := ptypes.TimestampProto(t) - if err != nil { - logrus.Errorf("libcontainerd: failed to convert timestamp: %q", err) - } - r.restoreFromTimestamp = tsp - - go r.handleConnectionChange() - - if err := r.startEventsMonitor(); err != nil { - return nil, err - } - - return r, nil -} - -func (r *remote) UpdateOptions(options ...RemoteOption) error { - for _, option := range options { - if err := option.Apply(r); err != nil { - return err - } - } - return nil -} - -func (r *remote) handleConnectionChange() { - var transientFailureCount = 0 - state := grpc.Idle - for { - s, err := r.rpcConn.WaitForStateChange(context.Background(), state) - if err != nil { - break - } - state = s - logrus.Debugf("libcontainerd: containerd connection state change: %v", s) - - if r.daemonPid != -1 { - switch state { - case grpc.TransientFailure: - // Reset state to be notified of next failure - transientFailureCount++ - if transientFailureCount >= maxConnectionRetryCount { - transientFailureCount = 0 - if utils.IsProcessAlive(r.daemonPid) { - utils.KillProcess(r.daemonPid) - } - <-r.daemonWaitCh - if err := r.runContainerdDaemon(); err != nil { //FIXME: Handle error - logrus.Errorf("libcontainerd: error restarting containerd: %v", err) - } - } else { - state = grpc.Idle - time.Sleep(connectionRetryDelay) - } - case grpc.Shutdown: - // Well, we asked for it to stop, just return - return - } - } - } -} - -func (r *remote) Cleanup() { - if r.daemonPid == -1 { - return - } - r.closeManually = true - r.rpcConn.Close() - // Ask the daemon to quit - syscall.Kill(r.daemonPid, syscall.SIGTERM) - - // Wait up to 15secs for it to stop - for i := time.Duration(0); i < containerdShutdownTimeout; i += time.Second { - if !utils.IsProcessAlive(r.daemonPid) { - break - } - time.Sleep(time.Second) - } - - if utils.IsProcessAlive(r.daemonPid) { - logrus.Warnf("libcontainerd: containerd (%d) didn't stop within 15 secs, killing it\n", r.daemonPid) - syscall.Kill(r.daemonPid, syscall.SIGKILL) - } - - // cleanup some files - os.Remove(filepath.Join(r.stateDir, containerdPidFilename)) - os.Remove(filepath.Join(r.stateDir, containerdSockFilename)) -} - -func (r *remote) Client(b Backend) (Client, error) { - c := &client{ - clientCommon: clientCommon{ - backend: b, - containers: make(map[string]*container), - locker: locker.New(), - }, - remote: r, - exitNotifiers: make(map[string]*exitNotifier), - liveRestore: r.liveRestore, - } - - r.Lock() - r.clients = append(r.clients, c) - r.Unlock() - return c, nil -} - -func (r *remote) updateEventTimestamp(t time.Time) { - f, err := os.OpenFile(r.eventTsPath, syscall.O_CREAT|syscall.O_WRONLY|syscall.O_TRUNC, 0600) - defer f.Close() - if err != nil { - logrus.Warnf("libcontainerd: failed to open event timestamp file: %v", err) - return - } - - b, err := t.MarshalText() - if err != nil { - logrus.Warnf("libcontainerd: failed to encode timestamp: %v", err) - return - } - - n, err := f.Write(b) - if err != nil || n != len(b) { - logrus.Warnf("libcontainerd: failed to update event timestamp file: %v", err) - f.Truncate(0) - return - } -} - -func (r *remote) getLastEventTimestamp() time.Time { - t := time.Now() - - fi, err := os.Stat(r.eventTsPath) - if os.IsNotExist(err) || fi.Size() == 0 { - return t - } - - f, err := os.Open(r.eventTsPath) - defer f.Close() - if err != nil { - logrus.Warnf("libcontainerd: Unable to access last event ts: %v", err) - return t - } - - b := make([]byte, fi.Size()) - n, err := f.Read(b) - if err != nil || n != len(b) { - logrus.Warnf("libcontainerd: Unable to read last event ts: %v", err) - return t - } - - t.UnmarshalText(b) - - return t -} - -func (r *remote) startEventsMonitor() error { - // First, get past events - t := r.getLastEventTimestamp() - tsp, err := ptypes.TimestampProto(t) - if err != nil { - logrus.Errorf("libcontainerd: failed to convert timestamp: %q", err) - } - er := &containerd.EventsRequest{ - Timestamp: tsp, - } - events, err := r.apiClient.Events(context.Background(), er) - if err != nil { - return err - } - go r.handleEventStream(events) - return nil -} - -func (r *remote) handleEventStream(events containerd.API_EventsClient) { - for { - e, err := events.Recv() - if err != nil { - if grpc.ErrorDesc(err) == transport.ErrConnClosing.Desc && - r.closeManually { - // ignore error if grpc remote connection is closed manually - return - } - logrus.Errorf("libcontainerd: failed to receive event from containerd: %v", err) - go r.startEventsMonitor() - return - } - - logrus.Debugf("libcontainerd: received containerd event: %#v", e) - - var container *container - var c *client - r.RLock() - for _, c = range r.clients { - container, err = c.getContainer(e.Id) - if err == nil { - break - } - } - r.RUnlock() - if container == nil { - logrus.Warnf("libcontainerd: unknown container %s", e.Id) - continue - } - - if err := container.handleEvent(e); err != nil { - logrus.Errorf("libcontainerd: error processing state change for %s: %v", e.Id, err) - } - - tsp, err := ptypes.Timestamp(e.Timestamp) - if err != nil { - logrus.Errorf("libcontainerd: failed to convert event timestamp: %q", err) - continue - } - - r.updateEventTimestamp(tsp) - } -} - -func (r *remote) runContainerdDaemon() error { - pidFilename := filepath.Join(r.stateDir, containerdPidFilename) - f, err := os.OpenFile(pidFilename, os.O_RDWR|os.O_CREATE, 0600) - defer f.Close() - if err != nil { - return err - } - - // File exist, check if the daemon is alive - b := make([]byte, 8) - n, err := f.Read(b) - if err != nil && err != io.EOF { - return err - } - - if n > 0 { - pid, err := strconv.ParseUint(string(b[:n]), 10, 64) - if err != nil { - return err - } - if utils.IsProcessAlive(int(pid)) { - logrus.Infof("libcontainerd: previous instance of containerd still alive (%d)", pid) - r.daemonPid = int(pid) - return nil - } - } - - // rewind the file - _, err = f.Seek(0, os.SEEK_SET) - if err != nil { - return err - } - - // Truncate it - err = f.Truncate(0) - if err != nil { - return err - } - - // Start a new instance - args := []string{ - "-l", fmt.Sprintf("unix://%s", r.rpcAddr), - "--shim", "docker-containerd-shim", - "--metrics-interval=0", - "--start-timeout", "2m", - "--state-dir", filepath.Join(r.stateDir, containerdStateDir), - } - if r.runtime != "" { - args = append(args, "--runtime") - args = append(args, r.runtime) - } - if r.debugLog { - args = append(args, "--debug") - } - if len(r.runtimeArgs) > 0 { - for _, v := range r.runtimeArgs { - args = append(args, "--runtime-args") - args = append(args, v) - } - logrus.Debugf("libcontainerd: runContainerdDaemon: runtimeArgs: %s", args) - } - - cmd := exec.Command(containerdBinary, args...) - // redirect containerd logs to docker logs - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - cmd.SysProcAttr = &syscall.SysProcAttr{Setsid: true, Pdeathsig: syscall.SIGKILL} - cmd.Env = nil - // clear the NOTIFY_SOCKET from the env when starting containerd - for _, e := range os.Environ() { - if !strings.HasPrefix(e, "NOTIFY_SOCKET") { - cmd.Env = append(cmd.Env, e) - } - } - if err := cmd.Start(); err != nil { - return err - } - logrus.Infof("libcontainerd: new containerd process, pid: %d", cmd.Process.Pid) - if err := setOOMScore(cmd.Process.Pid, r.oomScore); err != nil { - utils.KillProcess(cmd.Process.Pid) - return err - } - if _, err := f.WriteString(fmt.Sprintf("%d", cmd.Process.Pid)); err != nil { - utils.KillProcess(cmd.Process.Pid) - return err - } - - r.daemonWaitCh = make(chan struct{}) - go func() { - cmd.Wait() - close(r.daemonWaitCh) - }() // Reap our child when needed - r.daemonPid = cmd.Process.Pid - return nil -} - -func setOOMScore(pid, score int) error { - f, err := os.OpenFile(fmt.Sprintf("/proc/%d/oom_score_adj", pid), os.O_WRONLY, 0) - if err != nil { - return err - } - _, err = f.WriteString(strconv.Itoa(score)) - f.Close() - return err -} - -// WithRemoteAddr sets the external containerd socket to connect to. -func WithRemoteAddr(addr string) RemoteOption { - return rpcAddr(addr) -} - -type rpcAddr string - -func (a rpcAddr) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.rpcAddr = string(a) - return nil - } - return fmt.Errorf("WithRemoteAddr option not supported for this remote") -} - -// WithRuntimePath sets the path of the runtime to be used as the -// default by containerd -func WithRuntimePath(rt string) RemoteOption { - return runtimePath(rt) -} - -type runtimePath string - -func (rt runtimePath) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.runtime = string(rt) - return nil - } - return fmt.Errorf("WithRuntime option not supported for this remote") -} - -// WithRuntimeArgs sets the list of runtime args passed to containerd -func WithRuntimeArgs(args []string) RemoteOption { - return runtimeArgs(args) -} - -type runtimeArgs []string - -func (rt runtimeArgs) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.runtimeArgs = rt - return nil - } - return fmt.Errorf("WithRuntimeArgs option not supported for this remote") -} - -// WithStartDaemon defines if libcontainerd should also run containerd daemon. -func WithStartDaemon(start bool) RemoteOption { - return startDaemon(start) -} - -type startDaemon bool - -func (s startDaemon) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.startDaemon = bool(s) - return nil - } - return fmt.Errorf("WithStartDaemon option not supported for this remote") -} - -// WithDebugLog defines if containerd debug logs will be enabled for daemon. -func WithDebugLog(debug bool) RemoteOption { - return debugLog(debug) -} - -type debugLog bool - -func (d debugLog) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.debugLog = bool(d) - return nil - } - return fmt.Errorf("WithDebugLog option not supported for this remote") -} - -// WithLiveRestore defines if containers are stopped on shutdown or restored. -func WithLiveRestore(v bool) RemoteOption { - return liveRestore(v) -} - -type liveRestore bool - -func (l liveRestore) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.liveRestore = bool(l) - for _, c := range remote.clients { - c.liveRestore = bool(l) - } - return nil - } - return fmt.Errorf("WithLiveRestore option not supported for this remote") -} - -// WithOOMScore defines the oom_score_adj to set for the containerd process. -func WithOOMScore(score int) RemoteOption { - return oomScore(score) -} - -type oomScore int - -func (o oomScore) Apply(r Remote) error { - if remote, ok := r.(*remote); ok { - remote.oomScore = int(o) - return nil - } - return fmt.Errorf("WithOOMScore option not supported for this remote") -} diff --git a/libcontainerd/remote_solaris.go b/libcontainerd/remote_solaris.go deleted file mode 100644 index e04f192882..0000000000 --- a/libcontainerd/remote_solaris.go +++ /dev/null @@ -1,34 +0,0 @@ -package libcontainerd - -import "github.com/docker/docker/pkg/locker" - -type remote struct { -} - -func (r *remote) Client(b Backend) (Client, error) { - c := &client{ - clientCommon: clientCommon{ - backend: b, - containers: make(map[string]*container), - locker: locker.New(), - }, - } - return c, nil -} - -func (r *remote) Cleanup() { -} - -func (r *remote) UpdateOptions(opts ...RemoteOption) error { - return nil -} - -// New creates a fresh instance of libcontainerd remote. -func New(_ string, _ ...RemoteOption) (Remote, error) { - return &remote{}, nil -} - -// WithLiveRestore is a noop on solaris. -func WithLiveRestore(v bool) RemoteOption { - return nil -} diff --git a/libcontainerd/remote_windows.go b/libcontainerd/remote_windows.go deleted file mode 100644 index 74c10447bb..0000000000 --- a/libcontainerd/remote_windows.go +++ /dev/null @@ -1,36 +0,0 @@ -package libcontainerd - -import "github.com/docker/docker/pkg/locker" - -type remote struct { -} - -func (r *remote) Client(b Backend) (Client, error) { - c := &client{ - clientCommon: clientCommon{ - backend: b, - containers: make(map[string]*container), - locker: locker.New(), - }, - } - return c, nil -} - -// Cleanup is a no-op on Windows. It is here to implement the interface. -func (r *remote) Cleanup() { -} - -func (r *remote) UpdateOptions(opts ...RemoteOption) error { - return nil -} - -// New creates a fresh instance of libcontainerd remote. On Windows, -// this is not used as there is no remote containerd process. -func New(_ string, _ ...RemoteOption) (Remote, error) { - return &remote{}, nil -} - -// WithLiveRestore is a noop on windows. -func WithLiveRestore(v bool) RemoteOption { - return nil -} diff --git a/libcontainerd/types.go b/libcontainerd/types.go deleted file mode 100644 index 6f452c1c3b..0000000000 --- a/libcontainerd/types.go +++ /dev/null @@ -1,64 +0,0 @@ -package libcontainerd - -import ( - "io" - - "golang.org/x/net/context" -) - -// State constants used in state change reporting. -const ( - StateStart = "start-container" - StatePause = "pause" - StateResume = "resume" - StateExit = "exit" - StateRestart = "restart" - StateRestore = "restore" - StateStartProcess = "start-process" - StateExitProcess = "exit-process" - StateOOM = "oom" // fake state - stateLive = "live" -) - -// CommonStateInfo contains the state info common to all platforms. -type CommonStateInfo struct { // FIXME: event? - State string - Pid uint32 - ExitCode uint32 - ProcessID string -} - -// Backend defines callbacks that the client of the library needs to implement. -type Backend interface { - StateChanged(containerID string, state StateInfo) error - AttachStreams(processFriendlyName string, io IOPipe) error -} - -// Client provides access to containerd features. -type Client interface { - Create(containerID string, spec Spec, options ...CreateOption) error - Signal(containerID string, sig int) error - SignalProcess(containerID string, processFriendlyName string, sig int) error - AddProcess(ctx context.Context, containerID, processFriendlyName string, process Process) error - Resize(containerID, processFriendlyName string, width, height int) error - Pause(containerID string) error - Resume(containerID string) error - Restore(containerID string, options ...CreateOption) error - Stats(containerID string) (*Stats, error) - GetPidsForContainer(containerID string) ([]int, error) - Summary(containerID string) ([]Summary, error) - UpdateResources(containerID string, resources Resources) error -} - -// CreateOption allows to configure parameters of container creation. -type CreateOption interface { - Apply(interface{}) error -} - -// IOPipe contains the stdio streams. -type IOPipe struct { - Stdin io.WriteCloser - Stdout io.Reader - Stderr io.Reader - Terminal bool // Whether stderr is connected on Windows -} diff --git a/libcontainerd/types_linux.go b/libcontainerd/types_linux.go deleted file mode 100644 index 4f714a2329..0000000000 --- a/libcontainerd/types_linux.go +++ /dev/null @@ -1,55 +0,0 @@ -package libcontainerd - -import ( - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/opencontainers/specs/specs-go" -) - -// Spec is the base configuration for the container. It specifies platform -// independent configuration. This information must be included when the -// bundle is packaged for distribution. -type Spec specs.Spec - -// Process contains information to start a specific application inside the container. -type Process struct { - // Terminal creates an interactive terminal for the container. - Terminal bool `json:"terminal"` - // User specifies user information for the process. - User *User `json:"user"` - // Args specifies the binary and arguments for the application to execute. - Args []string `json:"args"` - // Env populates the process environment for the process. - Env []string `json:"env,omitempty"` - // Cwd is the current working directory for the process and must be - // relative to the container's root. - Cwd *string `json:"cwd"` - // Capabilities are linux capabilities that are kept for the container. - Capabilities []string `json:"capabilities,omitempty"` - // Rlimits specifies rlimit options to apply to the process. - Rlimits []specs.Rlimit `json:"rlimits,omitempty"` - // ApparmorProfile specified the apparmor profile for the container. - ApparmorProfile *string `json:"apparmorProfile,omitempty"` - // SelinuxProcessLabel specifies the selinux context that the container process is run as. - SelinuxLabel *string `json:"selinuxLabel,omitempty"` -} - -// StateInfo contains description about the new state container has entered. -type StateInfo struct { - CommonStateInfo - - // Platform specific StateInfo - OOMKilled bool -} - -// Stats contains a stats properties from containerd. -type Stats containerd.StatsResponse - -// Summary container a container summary from containerd -type Summary struct{} - -// User specifies linux specific user and group information for the container's -// main process. -type User specs.User - -// Resources defines updatable container resource values. -type Resources containerd.UpdateResource diff --git a/libcontainerd/types_solaris.go b/libcontainerd/types_solaris.go deleted file mode 100644 index 637e54300f..0000000000 --- a/libcontainerd/types_solaris.go +++ /dev/null @@ -1,38 +0,0 @@ -package libcontainerd - -import ( - "github.com/opencontainers/specs/specs-go" -) - -// Spec is the base configuration for the container. It specifies platform -// independent configuration. This information must be included when the -// bundle is packaged for distribution. -type Spec specs.Spec - -// Process contains information to start a specific application inside the container. -type Process struct { - // Terminal creates an interactive terminal for the container. - Terminal bool `json:"terminal"` - // Args specifies the binary and arguments for the application to execute. - Args []string `json:"args"` -} - -// Stats contains a stats properties from containerd. -type Stats struct{} - -// Summary container a container summary from containerd -type Summary struct{} - -// StateInfo contains description about the new state container has entered. -type StateInfo struct { - CommonStateInfo - - // Platform specific StateInfo -} - -// User specifies Solaris specific user and group information for the container's -// main process. -type User specs.User - -// Resources defines updatable container resource values. -type Resources struct{} diff --git a/libcontainerd/types_windows.go b/libcontainerd/types_windows.go deleted file mode 100644 index 1e7014c110..0000000000 --- a/libcontainerd/types_windows.go +++ /dev/null @@ -1,39 +0,0 @@ -package libcontainerd - -import "github.com/docker/docker/libcontainerd/windowsoci" - -// Spec is the base configuration for the container. -type Spec windowsoci.WindowsSpec - -// Process contains information to start a specific application inside the container. -type Process windowsoci.Process - -// User specifies user information for the containers main process. -type User windowsoci.User - -// Summary container a container summary from containerd -type Summary struct { - Pid uint32 - Command string -} - -// StateInfo contains description about the new state container has entered. -type StateInfo struct { - CommonStateInfo - - // Platform specific StateInfo - - UpdatePending bool // Indicates that there are some update operations pending that should be completed by a servicing container. -} - -// Stats contains a stats properties from containerd. -type Stats struct{} - -// Resources defines updatable container resource values. -type Resources struct{} - -// ServicingOption is an empty CreateOption with a no-op application that siginifies -// the container needs to be use for a Windows servicing operation. -type ServicingOption struct { - IsServicing bool -} diff --git a/libcontainerd/utils_linux.go b/libcontainerd/utils_linux.go deleted file mode 100644 index 5b67244f0c..0000000000 --- a/libcontainerd/utils_linux.go +++ /dev/null @@ -1,52 +0,0 @@ -package libcontainerd - -import ( - containerd "github.com/docker/containerd/api/grpc/types" - "github.com/opencontainers/specs/specs-go" -) - -func getRootIDs(s specs.Spec) (int, int, error) { - var hasUserns bool - for _, ns := range s.Linux.Namespaces { - if ns.Type == specs.UserNamespace { - hasUserns = true - break - } - } - if !hasUserns { - return 0, 0, nil - } - uid := hostIDFromMap(0, s.Linux.UIDMappings) - gid := hostIDFromMap(0, s.Linux.GIDMappings) - return uid, gid, nil -} - -func hostIDFromMap(id uint32, mp []specs.IDMapping) int { - for _, m := range mp { - if id >= m.ContainerID && id <= m.ContainerID+m.Size-1 { - return int(m.HostID + id - m.ContainerID) - } - } - return 0 -} - -func systemPid(ctr *containerd.Container) uint32 { - var pid uint32 - for _, p := range ctr.Processes { - if p.Pid == InitFriendlyName { - pid = p.SystemPid - } - } - return pid -} - -func convertRlimits(sr []specs.Rlimit) (cr []*containerd.Rlimit) { - for _, r := range sr { - cr = append(cr, &containerd.Rlimit{ - Type: r.Type, - Hard: r.Hard, - Soft: r.Soft, - }) - } - return -} diff --git a/libcontainerd/utils_windows.go b/libcontainerd/utils_windows.go deleted file mode 100644 index 76ad61819d..0000000000 --- a/libcontainerd/utils_windows.go +++ /dev/null @@ -1,21 +0,0 @@ -package libcontainerd - -import "strings" - -// setupEnvironmentVariables convert a string array of environment variables -// into a map as required by the HCS. Source array is in format [v1=k1] [v2=k2] etc. -func setupEnvironmentVariables(a []string) map[string]string { - r := make(map[string]string) - for _, s := range a { - arr := strings.Split(s, "=") - if len(arr) == 2 { - r[arr[0]] = arr[1] - } - } - return r -} - -// Apply for a servicing option is a no-op. -func (s *ServicingOption) Apply(interface{}) error { - return nil -} diff --git a/libcontainerd/windowsoci/oci_windows.go b/libcontainerd/windowsoci/oci_windows.go deleted file mode 100644 index 5f8f1319d2..0000000000 --- a/libcontainerd/windowsoci/oci_windows.go +++ /dev/null @@ -1,179 +0,0 @@ -package windowsoci - -// This file contains the Windows spec for a container. At the time of -// writing, Windows does not have a spec defined in opencontainers/specs, -// hence this is an interim workaround. TODO Windows: FIXME @jhowardmsft - -import "fmt" - -// WindowsSpec is the full specification for Windows containers. -type WindowsSpec struct { - Spec - - // Windows is platform specific configuration for Windows based containers. - Windows Windows `json:"windows"` -} - -// Spec is the base configuration for the container. It specifies platform -// independent configuration. This information must be included when the -// bundle is packaged for distribution. -type Spec struct { - - // Version is the version of the specification that is supported. - Version string `json:"ociVersion"` - // Platform is the host information for OS and Arch. - Platform Platform `json:"platform"` - // Process is the container's main process. - Process Process `json:"process"` - // Root is the root information for the container's filesystem. - Root Root `json:"root"` - // Hostname is the container's host name. - Hostname string `json:"hostname,omitempty"` - // Mounts profile configuration for adding mounts to the container's filesystem. - Mounts []Mount `json:"mounts"` -} - -// Windows contains platform specific configuration for Windows based containers. -type Windows struct { - // Resources contain information for handling resource constraints for the container - Resources *Resources `json:"resources,omitempty"` - // Networking contains the platform specific network settings for the container. - Networking *Networking `json:"networking,omitempty"` - // FirstStart is used for an optimization on first boot of Windows - FirstStart bool `json:"first_start,omitempty"` - // LayerFolder is the path to the current layer folder - LayerFolder string `json:"layer_folder,omitempty"` - // Layer paths of the parent layers - LayerPaths []string `json:"layer_paths,omitempty"` - // HvRuntime contains settings specific to Hyper-V containers, omitted if not using Hyper-V isolation - HvRuntime *HvRuntime `json:"hv_runtime,omitempty"` -} - -// Process contains information to start a specific application inside the container. -type Process struct { - // Terminal indicates if stderr should NOT be attached for the container. - Terminal bool `json:"terminal"` - // ConsoleSize contains the initial h,w of the console size - InitialConsoleSize [2]int `json:"-"` - // User specifies user information for the process. - User User `json:"user"` - // Args specifies the binary and arguments for the application to execute. - Args []string `json:"args"` - // Env populates the process environment for the process. - Env []string `json:"env,omitempty"` - // Cwd is the current working directory for the process and must be - // relative to the container's root. - Cwd string `json:"cwd"` -} - -// User contains the user information for Windows -type User struct { - User string `json:"user,omitempty"` -} - -// Root contains information about the container's root filesystem on the host. -type Root struct { - // Path is the absolute path to the container's root filesystem. - Path string `json:"path"` - // Readonly makes the root filesystem for the container readonly before the process is executed. - Readonly bool `json:"readonly"` -} - -// Platform specifies OS and arch information for the host system that the container -// is created for. -type Platform struct { - // OS is the operating system. - OS string `json:"os"` - // Arch is the architecture - Arch string `json:"arch"` - // OSVersion is the version of the operating system. - OSVersion string `json:"os.version,omitempty"` -} - -// Mount specifies a mount for a container. -type Mount struct { - // Destination is the path where the mount will be placed relative to the container's root. The path and child directories MUST exist, a runtime MUST NOT create directories automatically to a mount point. - Destination string `json:"destination"` - // Type specifies the mount kind. - Type string `json:"type"` - // Source specifies the source path of the mount. In the case of bind mounts - // this would be the file on the host. - Source string `json:"source"` - // Readonly specifies if the mount should be read-only - Readonly bool `json:"readonly"` -} - -// HvRuntime contains settings specific to Hyper-V containers -type HvRuntime struct { - // ImagePath is the path to the Utility VM image for this container - ImagePath string `json:"image_path,omitempty"` -} - -// Networking contains the platform specific network settings for the container -type Networking struct { - // List of endpoints to be attached to the container - EndpointList []string `json:"endpoints,omitempty"` -} - -// Storage contains storage resource management settings -type Storage struct { - // Specifies maximum Iops for the system drive - Iops *uint64 `json:"iops,omitempty"` - // Specifies maximum bytes per second for the system drive - Bps *uint64 `json:"bps,omitempty"` - // Sandbox size indicates the size to expand the system drive to if it is currently smaller - SandboxSize *uint64 `json:"sandbox_size,omitempty"` -} - -// Memory contains memory settings for the container -type Memory struct { - // Memory limit (in bytes). - Limit *int64 `json:"limit,omitempty"` - // Memory reservation (in bytes). - Reservation *uint64 `json:"reservation,omitempty"` -} - -// CPU contains information for cpu resource management -type CPU struct { - // Number of CPUs available to the container. This is an appoximation for Windows Server Containers. - Count *uint64 `json:"count,omitempty"` - // CPU shares (relative weight (ratio) vs. other containers with cpu shares). Range is from 1 to 10000. - Shares *uint64 `json:"shares,omitempty"` - // Percent of available CPUs usable by the container. - Percent *int64 `json:"percent,omitempty"` -} - -// Network network resource management information -type Network struct { - // Bandwidth is the maximum egress bandwidth in bytes per second - Bandwidth *uint64 `json:"bandwidth,omitempty"` -} - -// Resources has container runtime resource constraints -// TODO Windows containerd. This structure needs ratifying with the old resources -// structure used on Windows and the latest OCI spec. -type Resources struct { - // Memory restriction configuration - Memory *Memory `json:"memory,omitempty"` - // CPU resource restriction configuration - CPU *CPU `json:"cpu,omitempty"` - // Storage restriction configuration - Storage *Storage `json:"storage,omitempty"` - // Network restriction configuration - Network *Network `json:"network,omitempty"` -} - -const ( - // VersionMajor is for an API incompatible changes - VersionMajor = 0 - // VersionMinor is for functionality in a backwards-compatible manner - VersionMinor = 3 - // VersionPatch is for backwards-compatible bug fixes - VersionPatch = 0 - - // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" -) - -// Version is the specification version that the package types support. -var Version = fmt.Sprintf("%d.%d.%d%s (Windows)", VersionMajor, VersionMinor, VersionPatch, VersionDev) diff --git a/libcontainerd/windowsoci/unsupported.go b/libcontainerd/windowsoci/unsupported.go deleted file mode 100644 index a97c282995..0000000000 --- a/libcontainerd/windowsoci/unsupported.go +++ /dev/null @@ -1,3 +0,0 @@ -// +build !windows - -package windowsoci diff --git a/man/Dockerfile b/man/Dockerfile deleted file mode 100644 index 5657d13c92..0000000000 --- a/man/Dockerfile +++ /dev/null @@ -1,24 +0,0 @@ -FROM alpine:3.4 - -RUN apk add -U git go bash curl gcc musl-dev make - -RUN mkdir -p /go/src /go/bin /go/pkg -ENV GOPATH=/go -RUN export GLIDE=v0.11.1; \ - export TARGET=/go/src/github.com/Masterminds; \ - mkdir -p ${TARGET} && \ - git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ - cd ${TARGET}/glide && \ - make build && \ - cp ./glide /usr/bin/glide && \ - cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* - -COPY glide.yaml /manvendor/ -COPY glide.lock /manvendor/ -WORKDIR /manvendor/ -RUN glide install && mv vendor src -ENV GOPATH=$GOPATH:/go/src/github.com/docker/docker/vendor:/manvendor -RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man - -WORKDIR /go/src/github.com/docker/docker/ -ENTRYPOINT ["man/generate.sh"] diff --git a/man/Dockerfile.5.md b/man/Dockerfile.5.md deleted file mode 100644 index df69935397..0000000000 --- a/man/Dockerfile.5.md +++ /dev/null @@ -1,473 +0,0 @@ -% DOCKERFILE(5) Docker User Manuals -% Zac Dover -% May 2014 -# NAME - -Dockerfile - automate the steps of creating a Docker image - -# INTRODUCTION - -The **Dockerfile** is a configuration file that automates the steps of creating -a Docker image. It is similar to a Makefile. Docker reads instructions from the -**Dockerfile** to automate the steps otherwise performed manually to create an -image. To build an image, create a file called **Dockerfile**. - -The **Dockerfile** describes the steps taken to assemble the image. When the -**Dockerfile** has been created, call the `docker build` command, using the -path of directory that contains **Dockerfile** as the argument. - -# SYNOPSIS - -INSTRUCTION arguments - -For example: - - FROM image - -# DESCRIPTION - -A Dockerfile is a file that automates the steps of creating a Docker image. -A Dockerfile is similar to a Makefile. - -# USAGE - - docker build . - - -- Runs the steps and commits them, building a final image. - The path to the source repository defines where to find the context of the - build. The build is run by the Docker daemon, not the CLI. The whole - context must be transferred to the daemon. The Docker CLI reports - `"Sending build context to Docker daemon"` when the context is sent to the - daemon. - - ``` - docker build -t repository/tag . - ``` - - -- specifies a repository and tag at which to save the new image if the build - succeeds. The Docker daemon runs the steps one-by-one, committing the result - to a new image if necessary, before finally outputting the ID of the new - image. The Docker daemon automatically cleans up the context it is given. - - Docker re-uses intermediate images whenever possible. This significantly - accelerates the *docker build* process. - -# FORMAT - - `FROM image` - - `FROM image:tag` - - `FROM image@digest` - - -- The **FROM** instruction sets the base image for subsequent instructions. A - valid Dockerfile must have **FROM** as its first instruction. The image can be any - valid image. It is easy to start by pulling an image from the public - repositories. - - -- **FROM** must be the first non-comment instruction in Dockerfile. - - -- **FROM** may appear multiple times within a single Dockerfile in order to create - multiple images. Make a note of the last image ID output by the commit before - each new **FROM** command. - - -- If no tag is given to the **FROM** instruction, Docker applies the - `latest` tag. If the used tag does not exist, an error is returned. - - -- If no digest is given to the **FROM** instruction, Docker applies the - `latest` tag. If the used tag does not exist, an error is returned. - -**MAINTAINER** - -- **MAINTAINER** sets the Author field for the generated images. - Useful for providing users with an email or url for support. - -**RUN** - -- **RUN** has two forms: - - ``` - # the command is run in a shell - /bin/sh -c - RUN - - # Executable form - RUN ["executable", "param1", "param2"] - ``` - - - -- The **RUN** instruction executes any commands in a new layer on top of the current - image and commits the results. The committed image is used for the next step in - Dockerfile. - - -- Layering **RUN** instructions and generating commits conforms to the core - concepts of Docker where commits are cheap and containers can be created from - any point in the history of an image. This is similar to source control. The - exec form makes it possible to avoid shell string munging. The exec form makes - it possible to **RUN** commands using a base image that does not contain `/bin/sh`. - - Note that the exec form is parsed as a JSON array, which means that you must - use double-quotes (") around words not single-quotes ('). - -**CMD** - -- **CMD** has three forms: - - ``` - # Executable form - CMD ["executable", "param1", "param2"]` - - # Provide default arguments to ENTRYPOINT - CMD ["param1", "param2"]` - - # the command is run in a shell - /bin/sh -c - CMD command param1 param2 - ``` - - -- There should be only one **CMD** in a Dockerfile. If more than one **CMD** is listed, only - the last **CMD** takes effect. - The main purpose of a **CMD** is to provide defaults for an executing container. - These defaults may include an executable, or they can omit the executable. If - they omit the executable, an **ENTRYPOINT** must be specified. - When used in the shell or exec formats, the **CMD** instruction sets the command to - be executed when running the image. - If you use the shell form of the **CMD**, the `` executes in `/bin/sh -c`: - - Note that the exec form is parsed as a JSON array, which means that you must - use double-quotes (") around words not single-quotes ('). - - ``` - FROM ubuntu - CMD echo "This is a test." | wc - - ``` - - -- If you run **command** without a shell, then you must express the command as a - JSON array and give the full path to the executable. This array form is the - preferred form of **CMD**. All additional parameters must be individually expressed - as strings in the array: - - ``` - FROM ubuntu - CMD ["/usr/bin/wc","--help"] - ``` - - -- To make the container run the same executable every time, use **ENTRYPOINT** in - combination with **CMD**. - If the user specifies arguments to `docker run`, the specified commands - override the default in **CMD**. - Do not confuse **RUN** with **CMD**. **RUN** runs a command and commits the result. - **CMD** executes nothing at build time, but specifies the intended command for - the image. - -**LABEL** - -- `LABEL = [= ...]`or - ``` - LABEL [ ] - LABEL [ ] - ... - ``` - The **LABEL** instruction adds metadata to an image. A **LABEL** is a - key-value pair. To specify a **LABEL** without a value, simply use an empty - string. To include spaces within a **LABEL** value, use quotes and - backslashes as you would in command-line parsing. - - ``` - LABEL com.example.vendor="ACME Incorporated" - LABEL com.example.vendor "ACME Incorporated" - LABEL com.example.vendor.is-beta "" - LABEL com.example.vendor.is-beta= - LABEL com.example.vendor.is-beta="" - ``` - - An image can have more than one label. To specify multiple labels, separate - each key-value pair by a space. - - Labels are additive including `LABEL`s in `FROM` images. As the system - encounters and then applies a new label, new `key`s override any previous - labels with identical keys. - - To display an image's labels, use the `docker inspect` command. - -**EXPOSE** - -- `EXPOSE [...]` - The **EXPOSE** instruction informs Docker that the container listens on the - specified network ports at runtime. Docker uses this information to - interconnect containers using links and to set up port redirection on the host - system. - -**ENV** - -- `ENV ` - The **ENV** instruction sets the environment variable to - the value ``. This value is passed to all future - **RUN**, **ENTRYPOINT**, and **CMD** instructions. This is - functionally equivalent to prefixing the command with `=`. The - environment variables that are set with **ENV** persist when a container is run - from the resulting image. Use `docker inspect` to inspect these values, and - change them using `docker run --env =`. - - Note that setting "`ENV DEBIAN_FRONTEND noninteractive`" may cause - unintended consequences, because it will persist when the container is run - interactively, as with the following command: `docker run -t -i image bash` - -**ADD** - -- **ADD** has two forms: - - ``` - ADD - - # Required for paths with whitespace - ADD ["",... ""] - ``` - - The **ADD** instruction copies new files, directories - or remote file URLs to the filesystem of the container at path ``. - Multiple `` resources may be specified but if they are files or directories - then they must be relative to the source directory that is being built - (the context of the build). The `` is the absolute path, or path relative - to **WORKDIR**, into which the source is copied inside the target container. - If the `` argument is a local file in a recognized compression format - (tar, gzip, bzip2, etc) then it is unpacked at the specified `` in the - container's filesystem. Note that only local compressed files will be unpacked, - i.e., the URL download and archive unpacking features cannot be used together. - All new directories are created with mode 0755 and with the uid and gid of **0**. - -**COPY** - -- **COPY** has two forms: - - ``` - COPY - - # Required for paths with whitespace - COPY ["",... ""] - ``` - - The **COPY** instruction copies new files from `` and - adds them to the filesystem of the container at path . The `` must be - the path to a file or directory relative to the source directory that is - being built (the context of the build) or a remote file URL. The `` is an - absolute path, or a path relative to **WORKDIR**, into which the source will - be copied inside the target container. If you **COPY** an archive file it will - land in the container exactly as it appears in the build context without any - attempt to unpack it. All new files and directories are created with mode **0755** - and with the uid and gid of **0**. - -**ENTRYPOINT** - -- **ENTRYPOINT** has two forms: - - ``` - # executable form - ENTRYPOINT ["executable", "param1", "param2"]` - - # run command in a shell - /bin/sh -c - ENTRYPOINT command param1 param2 - ``` - - -- An **ENTRYPOINT** helps you configure a - container that can be run as an executable. When you specify an **ENTRYPOINT**, - the whole container runs as if it was only that executable. The **ENTRYPOINT** - instruction adds an entry command that is not overwritten when arguments are - passed to docker run. This is different from the behavior of **CMD**. This allows - arguments to be passed to the entrypoint, for instance `docker run -d` - passes the -d argument to the **ENTRYPOINT**. Specify parameters either in the - **ENTRYPOINT** JSON array (as in the preferred exec form above), or by using a **CMD** - statement. Parameters in the **ENTRYPOINT** are not overwritten by the docker run - arguments. Parameters specified via **CMD** are overwritten by docker run - arguments. Specify a plain string for the **ENTRYPOINT**, and it will execute in - `/bin/sh -c`, like a **CMD** instruction: - - ``` - FROM ubuntu - ENTRYPOINT wc -l - - ``` - - This means that the Dockerfile's image always takes stdin as input (that's - what "-" means), and prints the number of lines (that's what "-l" means). To - make this optional but default, use a **CMD**: - - ``` - FROM ubuntu - CMD ["-l", "-"] - ENTRYPOINT ["/usr/bin/wc"] - ``` - -**VOLUME** - -- `VOLUME ["/data"]` - The **VOLUME** instruction creates a mount point with the specified name and marks - it as holding externally-mounted volumes from the native host or from other - containers. - -**USER** - -- `USER daemon` - Sets the username or UID used for running subsequent commands. - - The **USER** instruction can optionally be used to set the group or GID. The - followings examples are all valid: - USER [user | user:group | uid | uid:gid | user:gid | uid:group ] - - Until the **USER** instruction is set, instructions will be run as root. The USER - instruction can be used any number of times in a Dockerfile, and will only affect - subsequent commands. - -**WORKDIR** - -- `WORKDIR /path/to/workdir` - The **WORKDIR** instruction sets the working directory for the **RUN**, **CMD**, - **ENTRYPOINT**, **COPY** and **ADD** Dockerfile commands that follow it. It can - be used multiple times in a single Dockerfile. Relative paths are defined - relative to the path of the previous **WORKDIR** instruction. For example: - - ``` - WORKDIR /a - WORKDIR b - WORKDIR c - RUN pwd - ``` - - In the above example, the output of the **pwd** command is **a/b/c**. - -**ARG** - -- ARG [=] - - The `ARG` instruction defines a variable that users can pass at build-time to - the builder with the `docker build` command using the `--build-arg - =` flag. If a user specifies a build argument that was not - defined in the Dockerfile, the build outputs an error. - - ``` - One or more build-args were not consumed, failing build. - ``` - - The Dockerfile author can define a single variable by specifying `ARG` once or many - variables by specifying `ARG` more than once. For example, a valid Dockerfile: - - ``` - FROM busybox - ARG user1 - ARG buildno - ... - ``` - - A Dockerfile author may optionally specify a default value for an `ARG` instruction: - - ``` - FROM busybox - ARG user1=someuser - ARG buildno=1 - ... - ``` - - If an `ARG` value has a default and if there is no value passed at build-time, the - builder uses the default. - - An `ARG` variable definition comes into effect from the line on which it is - defined in the `Dockerfile` not from the argument's use on the command-line or - elsewhere. For example, consider this Dockerfile: - - ``` - 1 FROM busybox - 2 USER ${user:-some_user} - 3 ARG user - 4 USER $user - ... - ``` - A user builds this file by calling: - - ``` - $ docker build --build-arg user=what_user Dockerfile - ``` - - The `USER` at line 2 evaluates to `some_user` as the `user` variable is defined on the - subsequent line 3. The `USER` at line 4 evaluates to `what_user` as `user` is - defined and the `what_user` value was passed on the command line. Prior to its definition by an - `ARG` instruction, any use of a variable results in an empty string. - - > **Warning:** It is not recommended to use build-time variables for - > passing secrets like github keys, user credentials etc. Build-time variable - > values are visible to any user of the image with the `docker history` command. - - You can use an `ARG` or an `ENV` instruction to specify variables that are - available to the `RUN` instruction. Environment variables defined using the - `ENV` instruction always override an `ARG` instruction of the same name. Consider - this Dockerfile with an `ENV` and `ARG` instruction. - - ``` - 1 FROM ubuntu - 2 ARG CONT_IMG_VER - 3 ENV CONT_IMG_VER v1.0.0 - 4 RUN echo $CONT_IMG_VER - ``` - Then, assume this image is built with this command: - - ``` - $ docker build --build-arg CONT_IMG_VER=v2.0.1 Dockerfile - ``` - - In this case, the `RUN` instruction uses `v1.0.0` instead of the `ARG` setting - passed by the user:`v2.0.1` This behavior is similar to a shell - script where a locally scoped variable overrides the variables passed as - arguments or inherited from environment, from its point of definition. - - Using the example above but a different `ENV` specification you can create more - useful interactions between `ARG` and `ENV` instructions: - - ``` - 1 FROM ubuntu - 2 ARG CONT_IMG_VER - 3 ENV CONT_IMG_VER ${CONT_IMG_VER:-v1.0.0} - 4 RUN echo $CONT_IMG_VER - ``` - - Unlike an `ARG` instruction, `ENV` values are always persisted in the built - image. Consider a docker build without the --build-arg flag: - - ``` - $ docker build Dockerfile - ``` - - Using this Dockerfile example, `CONT_IMG_VER` is still persisted in the image but - its value would be `v1.0.0` as it is the default set in line 3 by the `ENV` instruction. - - The variable expansion technique in this example allows you to pass arguments - from the command line and persist them in the final image by leveraging the - `ENV` instruction. Variable expansion is only supported for [a limited set of - Dockerfile instructions.](#environment-replacement) - - Docker has a set of predefined `ARG` variables that you can use without a - corresponding `ARG` instruction in the Dockerfile. - - * `HTTP_PROXY` - * `http_proxy` - * `HTTPS_PROXY` - * `https_proxy` - * `FTP_PROXY` - * `ftp_proxy` - * `NO_PROXY` - * `no_proxy` - - To use these, simply pass them on the command line using the `--build-arg - =` flag. - -**ONBUILD** - -- `ONBUILD [INSTRUCTION]` - The **ONBUILD** instruction adds a trigger instruction to an image. The - trigger is executed at a later time, when the image is used as the base for - another build. Docker executes the trigger in the context of the downstream - build, as if the trigger existed immediately after the **FROM** instruction in - the downstream Dockerfile. - - You can register any build instruction as a trigger. A trigger is useful if - you are defining an image to use as a base for building other images. For - example, if you are defining an application build environment or a daemon that - is customized with a user-specific configuration. - - Consider an image intended as a reusable python application builder. It must - add application source code to a particular directory, and might need a build - script called after that. You can't just call **ADD** and **RUN** now, because - you don't yet have access to the application source code, and it is different - for each application build. - - -- Providing application developers with a boilerplate Dockerfile to copy-paste - into their application is inefficient, error-prone, and - difficult to update because it mixes with application-specific code. - The solution is to use **ONBUILD** to register instructions in advance, to - run later, during the next build stage. - -# HISTORY -*May 2014, Compiled by Zac Dover (zdover at redhat dot com) based on docker.com Dockerfile documentation. -*Feb 2015, updated by Brian Goff (cpuguy83@gmail.com) for readability -*Sept 2015, updated by Sally O'Malley (somalley@redhat.com) diff --git a/man/Dockerfile.armhf b/man/Dockerfile.armhf deleted file mode 100644 index a3552c660b..0000000000 --- a/man/Dockerfile.armhf +++ /dev/null @@ -1,24 +0,0 @@ -FROM armhf/alpine:3.4 - -RUN apk add -U git go bash curl gcc musl-dev make - -RUN mkdir -p /go/src /go/bin /go/pkg -ENV GOPATH=/go -RUN export GLIDE=v0.11.1; \ - export TARGET=/go/src/github.com/Masterminds; \ - mkdir -p ${TARGET} && \ - git clone https://github.com/Masterminds/glide.git ${TARGET}/glide && \ - cd ${TARGET}/glide && \ - make build && \ - cp ./glide /usr/bin/glide && \ - cd / && rm -rf /go/src/* /go/bin/* /go/pkg/* - -COPY glide.yaml /manvendor/ -COPY glide.lock /manvendor/ -WORKDIR /manvendor/ -RUN glide install && mv vendor src -ENV GOPATH=$GOPATH:/go/src/github.com/docker/docker/vendor:/manvendor -RUN go build -o /usr/bin/go-md2man github.com/cpuguy83/go-md2man - -WORKDIR /go/src/github.com/docker/docker/ -ENTRYPOINT ["man/generate.sh"] diff --git a/man/README.md b/man/README.md deleted file mode 100644 index 82dac650f9..0000000000 --- a/man/README.md +++ /dev/null @@ -1,15 +0,0 @@ -Docker Documentation -==================== - -This directory contains scripts for generating the man pages. Many of the man -pages are generated directly from the `spf13/cobra` `Command` definition. Some -legacy pages are still generated from the markdown files in this directory. -Do *not* edit the man pages in the man1 directory. Instead, update the -Cobra command or amend the Markdown files for legacy pages. - - -## Generate the man pages - -From within the project root directory run: - - make manpages diff --git a/man/docker-attach.1.md b/man/docker-attach.1.md deleted file mode 100644 index c39d1c9290..0000000000 --- a/man/docker-attach.1.md +++ /dev/null @@ -1,99 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-attach - Attach to a running container - -# SYNOPSIS -**docker attach** -[**--detach-keys**[=*[]*]] -[**--help**] -[**--no-stdin**] -[**--sig-proxy**[=*true*]] -CONTAINER - -# DESCRIPTION -The **docker attach** command allows you to attach to a running container using -the container's ID or name, either to view its ongoing output or to control it -interactively. You can attach to the same contained process multiple times -simultaneously, screen sharing style, or quickly view the progress of your -detached process. - -To stop a container, use `CTRL-c`. This key sequence sends `SIGKILL` to the -container. You can detach from the container (and leave it running) using a -configurable key sequence. The default sequence is `CTRL-p CTRL-q`. You -configure the key sequence using the **--detach-keys** option or a configuration -file. See **config-json(5)** for documentation on using a configuration file. - -It is forbidden to redirect the standard input of a `docker attach` command while -attaching to a tty-enabled container (i.e.: launched with `-t`). - -# OPTIONS -**--detach-keys**="" - Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. - -**--help** - Print usage statement - -**--no-stdin**=*true*|*false* - Do not attach STDIN. The default is *false*. - -**--sig-proxy**=*true*|*false* - Proxy all received signals to the process (non-TTY mode only). SIGCHLD, SIGKILL, and SIGSTOP are not proxied. The default is *true*. - -# Override the detach sequence - -If you want, you can configure an override the Docker key sequence for detach. -This is useful if the Docker default sequence conflicts with key sequence you -use for other applications. There are two ways to define your own detach key -sequence, as a per-container override or as a configuration property on your -entire configuration. - -To override the sequence for an individual container, use the -`--detach-keys=""` flag with the `docker attach` command. The format of -the `` is either a letter [a-Z], or the `ctrl-` combined with any of -the following: - -* `a-z` (a single lowercase alpha character ) -* `@` (at sign) -* `[` (left bracket) -* `\\` (two backward slashes) -* `_` (underscore) -* `^` (caret) - -These `a`, `ctrl-a`, `X`, or `ctrl-\\` values are all examples of valid key -sequences. To configure a different configuration default key sequence for all -containers, see **docker(1)**. - -# EXAMPLES - -## Attaching to a container - -In this example the top command is run inside a container, from an image called -fedora, in detached mode. The ID from the container is passed into the **docker -attach** command: - - # ID=$(sudo docker run -d fedora /usr/bin/top -b) - # sudo docker attach $ID - top - 02:05:52 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 - Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - Cpu(s): 0.1%us, 0.2%sy, 0.0%ni, 99.7%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st - Mem: 373572k total, 355560k used, 18012k free, 27872k buffers - Swap: 786428k total, 0k used, 786428k free, 221740k cached - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 17200 1116 912 R 0 0.3 0:00.03 top - - top - 02:05:55 up 3:05, 0 users, load average: 0.01, 0.02, 0.05 - Tasks: 1 total, 1 running, 0 sleeping, 0 stopped, 0 zombie - Cpu(s): 0.0%us, 0.2%sy, 0.0%ni, 99.8%id, 0.0%wa, 0.0%hi, 0.0%si, 0.0%st - Mem: 373572k total, 355244k used, 18328k free, 27872k buffers - Swap: 786428k total, 0k used, 786428k free, 221776k cached - - PID USER PR NI VIRT RES SHR S %CPU %MEM TIME+ COMMAND - 1 root 20 0 17208 1144 932 R 0 0.3 0:00.03 top - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/man/docker-build.1.md b/man/docker-build.1.md deleted file mode 100644 index b654e2d922..0000000000 --- a/man/docker-build.1.md +++ /dev/null @@ -1,313 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-build - Build a new image from the source code at PATH - -# SYNOPSIS -**docker build** -[**--build-arg**[=*[]*]] -[**--cpu-shares**[=*0*]] -[**--cgroup-parent**[=*CGROUP-PARENT*]] -[**--help**] -[**-f**|**--file**[=*PATH/Dockerfile*]] -[**--force-rm**] -[**--isolation**[=*default*]] -[**--label**[=*[]*]] -[**--no-cache**] -[**--pull**] -[**-q**|**--quiet**] -[**--rm**[=*true*]] -[**-t**|**--tag**[=*[]*]] -[**-m**|**--memory**[=*MEMORY*]] -[**--memory-swap**[=*LIMIT*]] -[**--shm-size**[=*SHM-SIZE*]] -[**--cpu-period**[=*0*]] -[**--cpu-quota**[=*0*]] -[**--cpuset-cpus**[=*CPUSET-CPUS*]] -[**--cpuset-mems**[=*CPUSET-MEMS*]] -[**--ulimit**[=*[]*]] -PATH | URL | - - -# DESCRIPTION -This will read the Dockerfile from the directory specified in **PATH**. -It also sends any other files and directories found in the current -directory to the Docker daemon. The contents of this directory would -be used by **ADD** commands found within the Dockerfile. - -Warning, this will send a lot of data to the Docker daemon depending -on the contents of the current directory. The build is run by the Docker -daemon, not by the CLI, so the whole context must be transferred to the daemon. -The Docker CLI reports "Sending build context to Docker daemon" when the context is sent to -the daemon. - -When the URL to a tarball archive or to a single Dockerfile is given, no context is sent from -the client to the Docker daemon. In this case, the Dockerfile at the root of the archive and -the rest of the archive will get used as the context of the build. When a Git repository is -set as the **URL**, the repository is cloned locally and then sent as the context. - -# OPTIONS -**-f**, **--file**=*PATH/Dockerfile* - Path to the Dockerfile to use. If the path is a relative path and you are - building from a local directory, then the path must be relative to that - directory. If you are building from a remote URL pointing to either a - tarball or a Git repository, then the path must be relative to the root of - the remote context. In all cases, the file must be within the build context. - The default is *Dockerfile*. - -**--build-arg**=*variable* - name and value of a **buildarg**. - - For example, if you want to pass a value for `http_proxy`, use - `--build-arg=http_proxy="http://some.proxy.url"` - - Users pass these values at build-time. Docker uses the `buildargs` as the - environment context for command(s) run via the Dockerfile's `RUN` instruction - or for variable expansion in other Dockerfile instructions. This is not meant - for passing secret values. [Read more about the buildargs instruction](/reference/builder/#arg) - -**--force-rm**=*true*|*false* - Always remove intermediate containers, even after unsuccessful builds. The default is *false*. - -**--isolation**="*default*" - Isolation specifies the type of isolation technology used by containers. - -**--label**=*label* - Set metadata for an image - -**--no-cache**=*true*|*false* - Do not use cache when building the image. The default is *false*. - -**--help** - Print usage statement - -**--pull**=*true*|*false* - Always attempt to pull a newer version of the image. The default is *false*. - -**-q**, **--quiet**=*true*|*false* - Suppress the build output and print image ID on success. The default is *false*. - -**--rm**=*true*|*false* - Remove intermediate containers after a successful build. The default is *true*. - -**-t**, **--tag**="" - Repository names (and optionally with tags) to be applied to the resulting - image in case of success. Refer to **docker-tag(1)** for more information - about valid tag names. - -**-m**, **--memory**=*MEMORY* - Memory limit - -**--memory-swap**=*LIMIT* - A limit value equal to memory plus swap. Must be used with the **-m** -(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** -(**--memory**) value. - - The format of `LIMIT` is `[]`. Unit can be `b` (bytes), -`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a -unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. - -**--shm-size**=*SHM-SIZE* - Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. - Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. - If you omit the size entirely, the system uses `64m`. - -**--cpu-shares**=*0* - CPU shares (relative weight). - - By default, all containers get the same proportion of CPU cycles. - CPU shares is a 'relative weight', relative to the default setting of 1024. - This default value is defined here: - ``` - cat /sys/fs/cgroup/cpu/cpu.shares - 1024 - ``` - You can change this proportion by adjusting the container's CPU share - weighting relative to the weighting of all other running containers. - - To modify the proportion from the default of 1024, use the **--cpu-shares** - flag to set the weighting to 2 or higher. - - Container CPU share Flag - {C0} 60% of CPU --cpu-shares=614 (614 is 60% of 1024) - {C1} 40% of CPU --cpu-shares=410 (410 is 40% of 1024) - - The proportion is only applied when CPU-intensive processes are running. - When tasks in one container are idle, the other containers can use the - left-over CPU time. The actual amount of CPU time used varies depending on - the number of containers running on the system. - - For example, consider three containers, where one has **--cpu-shares=1024** and - two others have **--cpu-shares=512**. When processes in all three - containers attempt to use 100% of CPU, the first container would receive - 50% of the total CPU time. If you add a fourth container with **--cpu-shares=1024**, - the first container only gets 33% of the CPU. The remaining containers - receive 16.5%, 16.5% and 33% of the CPU. - - - Container CPU share Flag CPU time - {C0} 100% --cpu-shares=1024 33% - {C1} 50% --cpu-shares=512 16.5% - {C2} 50% --cpu-shares=512 16.5% - {C4} 100% --cpu-shares=1024 33% - - - On a multi-core system, the shares of CPU time are distributed across the CPU - cores. Even if a container is limited to less than 100% of CPU time, it can - use 100% of each individual CPU core. - - For example, consider a system with more than three cores. If you start one - container **{C0}** with **--cpu-shares=512** running one process, and another container - **{C1}** with **--cpu-shares=1024** running two processes, this can result in the following - division of CPU shares: - - PID container CPU CPU share - 100 {C0} 0 100% of CPU0 - 101 {C1} 1 100% of CPU1 - 102 {C1} 2 100% of CPU2 - -**--cpu-period**=*0* - Limit the CPU CFS (Completely Fair Scheduler) period. - - Limit the container's CPU usage. This flag causes the kernel to restrict the - container's CPU usage to the period you specify. - -**--cpu-quota**=*0* - Limit the CPU CFS (Completely Fair Scheduler) quota. - - By default, containers run with the full CPU resource. This flag causes the -kernel to restrict the container's CPU usage to the quota you specify. - -**--cpuset-cpus**=*CPUSET-CPUS* - CPUs in which to allow execution (0-3, 0,1). - -**--cpuset-mems**=*CPUSET-MEMS* - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on - NUMA systems. - - For example, if you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` -to ensure the processes in your Docker container only use memory from the first -two memory nodes. - -**--cgroup-parent**=*CGROUP-PARENT* - Path to `cgroups` under which the container's `cgroup` are created. - - If the path is not absolute, the path is considered relative to the `cgroups` path of the init process. -Cgroups are created if they do not already exist. - -**--ulimit**=[] - Ulimit options - - For more information about `ulimit` see [Setting ulimits in a -container](https://docs.docker.com/reference/commandline/run/#setting-ulimits-in-a-container) - -# EXAMPLES - -## Building an image using a Dockerfile located inside the current directory - -Docker images can be built using the build command and a Dockerfile: - - docker build . - -During the build process Docker creates intermediate images. In order to -keep them, you must explicitly set `--rm=false`. - - docker build --rm=false . - -A good practice is to make a sub-directory with a related name and create -the Dockerfile in that directory. For example, a directory called mongo may -contain a Dockerfile to create a Docker MongoDB image. Likewise, another -directory called httpd may be used to store Dockerfiles for Apache web -server images. - -It is also a good practice to add the files required for the image to the -sub-directory. These files will then be specified with the `COPY` or `ADD` -instructions in the `Dockerfile`. - -Note: If you include a tar file (a good practice), then Docker will -automatically extract the contents of the tar file specified within the `ADD` -instruction into the specified target. - -## Building an image and naming that image - -A good practice is to give a name to the image you are building. Note that -only a-z0-9-_. should be used for consistency. There are no hard rules here but it is best to give the names consideration. - -The **-t**/**--tag** flag is used to rename an image. Here are some examples: - -Though it is not a good practice, image names can be arbitrary: - - docker build -t myimage . - -A better approach is to provide a fully qualified and meaningful repository, -name, and tag (where the tag in this context means the qualifier after -the ":"). In this example we build a JBoss image for the Fedora repository -and give it the version 1.0: - - docker build -t fedora/jboss:1.0 . - -The next example is for the "whenry" user repository and uses Fedora and -JBoss and gives it the version 2.1 : - - docker build -t whenry/fedora-jboss:v2.1 . - -If you do not provide a version tag then Docker will assign `latest`: - - docker build -t whenry/fedora-jboss . - -When you list the images, the image above will have the tag `latest`. - -You can apply multiple tags to an image. For example, you can apply the `latest` -tag to a newly built image and add another tag that references a specific -version. -For example, to tag an image both as `whenry/fedora-jboss:latest` and -`whenry/fedora-jboss:v2.1`, use the following: - - docker build -t whenry/fedora-jboss:latest -t whenry/fedora-jboss:v2.1 . - -So renaming an image is arbitrary but consideration should be given to -a useful convention that makes sense for consumers and should also take -into account Docker community conventions. - - -## Building an image using a URL - -This will clone the specified GitHub repository from the URL and use it -as context. The Dockerfile at the root of the repository is used as -Dockerfile. This only works if the GitHub repository is a dedicated -repository. - - docker build github.com/scollier/purpletest - -Note: You can set an arbitrary Git repository via the `git://` scheme. - -## Building an image using a URL to a tarball'ed context - -This will send the URL itself to the Docker daemon. The daemon will fetch the -tarball archive, decompress it and use its contents as the build context. The -Dockerfile at the root of the archive and the rest of the archive will get used -as the context of the build. If you pass an **-f PATH/Dockerfile** option as well, -the system will look for that file inside the contents of the tarball. - - docker build -f dev/Dockerfile https://10.10.10.1/docker/context.tar.gz - -Note: supported compression formats are 'xz', 'bzip2', 'gzip' and 'identity' (no compression). - -## Specify isolation technology for container (--isolation) - -This option is useful in situations where you are running Docker containers on -Windows. The `--isolation=` option sets a container's isolation -technology. On Linux, the only supported is the `default` option which uses -Linux namespaces. On Microsoft Windows, you can specify these values: - -* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. -* `process`: Namespace isolation only. -* `hyperv`: Hyper-V hypervisor partition-based isolation. - -Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. - -# HISTORY -March 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -June 2015, updated by Sally O'Malley diff --git a/man/docker-commit.1.md b/man/docker-commit.1.md deleted file mode 100644 index d8a4cf8387..0000000000 --- a/man/docker-commit.1.md +++ /dev/null @@ -1,71 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-commit - Create a new image from a container's changes - -# SYNOPSIS -**docker commit** -[**-a**|**--author**[=*AUTHOR*]] -[**-c**|**--change**[=\[*DOCKERFILE INSTRUCTIONS*\]]] -[**--help**] -[**-m**|**--message**[=*MESSAGE*]] -[**-p**|**--pause**[=*true*]] -CONTAINER [REPOSITORY[:TAG]] - -# DESCRIPTION -Create a new image from an existing container specified by name or -container ID. The new image will contain the contents of the -container filesystem, *excluding* any data volumes. Refer to **docker-tag(1)** -for more information about valid image and tag names. - -While the `docker commit` command is a convenient way of extending an -existing image, you should prefer the use of a Dockerfile and `docker -build` for generating images that you intend to share with other -people. - -# OPTIONS -**-a**, **--author**="" - Author (e.g., "John Hannibal Smith ") - -**-c** , **--change**=[] - Apply specified Dockerfile instructions while committing the image - Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`LABEL`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` - -**--help** - Print usage statement - -**-m**, **--message**="" - Commit message - -**-p**, **--pause**=*true*|*false* - Pause container during commit. The default is *true*. - -# EXAMPLES - -## Creating a new image from an existing container -An existing Fedora based container has had Apache installed while running -in interactive mode with the bash shell. Apache is also running. To -create a new image run `docker ps` to find the container's ID and then run: - - # docker commit -m="Added Apache to Fedora base image" \ - -a="A D Ministrator" 98bd7fc99854 fedora/fedora_httpd:20 - -Note that only a-z0-9-_. are allowed when naming images from an -existing container. - -## Apply specified Dockerfile instructions while committing the image -If an existing container was created without the DEBUG environment -variable set to "true", you can create a new image based on that -container by first getting the container's ID with `docker ps` and -then running: - - # docker commit -c="ENV DEBUG true" 98bd7fc99854 debug-image - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and in -June 2014, updated by Sven Dowideit -July 2014, updated by Sven Dowideit -Oct 2014, updated by Daniel, Dao Quang Minh -June 2015, updated by Sally O'Malley diff --git a/man/docker-config-json.5.md b/man/docker-config-json.5.md deleted file mode 100644 index 49987f08b8..0000000000 --- a/man/docker-config-json.5.md +++ /dev/null @@ -1,72 +0,0 @@ -% CONFIG.JSON(5) Docker User Manuals -% Docker Community -% JANUARY 2016 -# NAME -HOME/.docker/config.json - Default Docker configuration file - -# INTRODUCTION - -By default, the Docker command line stores its configuration files in a -directory called `.docker` within your `$HOME` directory. Docker manages most of -the files in the configuration directory and you should not modify them. -However, you *can modify* the `config.json` file to control certain aspects of -how the `docker` command behaves. - -Currently, you can modify the `docker` command behavior using environment -variables or command-line options. You can also use options within -`config.json` to modify some of the same behavior. When using these -mechanisms, you must keep in mind the order of precedence among them. Command -line options override environment variables and environment variables override -properties you specify in a `config.json` file. - -The `config.json` file stores a JSON encoding of several properties: - -* The `HttpHeaders` property specifies a set of headers to include in all messages -sent from the Docker client to the daemon. Docker does not try to interpret or -understand these header; it simply puts them into the messages. Docker does not -allow these headers to change any headers it sets for itself. - -* The `psFormat` property specifies the default format for `docker ps` output. -When the `--format` flag is not provided with the `docker ps` command, -Docker's client uses this property. If this property is not set, the client -falls back to the default table format. For a list of supported formatting -directives, see **docker-ps(1)**. - -* The `detachKeys` property specifies the default key sequence which -detaches the container. When the `--detach-keys` flag is not provide -with the `docker attach`, `docker exec`, `docker run` or `docker -start`, Docker's client uses this property. If this property is not -set, the client falls back to the default sequence `ctrl-p,ctrl-q`. - - -* The `imagesFormat` property specifies the default format for `docker images` -output. When the `--format` flag is not provided with the `docker images` -command, Docker's client uses this property. If this property is not set, the -client falls back to the default table format. For a list of supported -formatting directives, see **docker-images(1)**. - -You can specify a different location for the configuration files via the -`DOCKER_CONFIG` environment variable or the `--config` command line option. If -both are specified, then the `--config` option overrides the `DOCKER_CONFIG` -environment variable: - - docker --config ~/testconfigs/ ps - -This command instructs Docker to use the configuration files in the -`~/testconfigs/` directory when running the `ps` command. - -## Examples - -Following is a sample `config.json` file: - - { - "HttpHeaders": { - "MyHeader": "MyValue" - }, - "psFormat": "table {{.ID}}\\t{{.Image}}\\t{{.Command}}\\t{{.Labels}}", - "imagesFormat": "table {{.ID}}\\t{{.Repository}}\\t{{.Tag}}\\t{{.CreatedAt}}", - "detachKeys": "ctrl-e,e" - } - -# HISTORY -January 2016, created by Moxiegirl diff --git a/man/docker-cp.1.md b/man/docker-cp.1.md deleted file mode 100644 index 949d60bb8b..0000000000 --- a/man/docker-cp.1.md +++ /dev/null @@ -1,175 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-cp - Copy files/folders between a container and the local filesystem. - -# SYNOPSIS -**docker cp** -[**--help**] -CONTAINER:SRC_PATH DEST_PATH|- - -**docker cp** -[**--help**] -SRC_PATH|- CONTAINER:DEST_PATH - -# DESCRIPTION - -The `docker cp` utility copies the contents of `SRC_PATH` to the `DEST_PATH`. -You can copy from the container's file system to the local machine or the -reverse, from the local filesystem to the container. If `-` is specified for -either the `SRC_PATH` or `DEST_PATH`, you can also stream a tar archive from -`STDIN` or to `STDOUT`. The `CONTAINER` can be a running or stopped container. -The `SRC_PATH` or `DEST_PATH` can be a file or directory. - -The `docker cp` command assumes container paths are relative to the container's -`/` (root) directory. This means supplying the initial forward slash is optional; -The command sees `compassionate_darwin:/tmp/foo/myfile.txt` and -`compassionate_darwin:tmp/foo/myfile.txt` as identical. Local machine paths can -be an absolute or relative value. The command interprets a local machine's -relative paths as relative to the current working directory where `docker cp` is -run. - -The `cp` command behaves like the Unix `cp -a` command in that directories are -copied recursively with permissions preserved if possible. Ownership is set to -the user and primary group at the destination. For example, files copied to a -container are created with `UID:GID` of the root user. Files copied to the local -machine are created with the `UID:GID` of the user which invoked the `docker cp` -command. If you specify the `-L` option, `docker cp` follows any symbolic link -in the `SRC_PATH`. `docker cp` does *not* create parent directories for -`DEST_PATH` if they do not exist. - -Assuming a path separator of `/`, a first argument of `SRC_PATH` and second -argument of `DEST_PATH`, the behavior is as follows: - -- `SRC_PATH` specifies a file - - `DEST_PATH` does not exist - - the file is saved to a file created at `DEST_PATH` - - `DEST_PATH` does not exist and ends with `/` - - Error condition: the destination directory must exist. - - `DEST_PATH` exists and is a file - - the destination is overwritten with the source file's contents - - `DEST_PATH` exists and is a directory - - the file is copied into this directory using the basename from - `SRC_PATH` -- `SRC_PATH` specifies a directory - - `DEST_PATH` does not exist - - `DEST_PATH` is created as a directory and the *contents* of the source - directory are copied into this directory - - `DEST_PATH` exists and is a file - - Error condition: cannot copy a directory to a file - - `DEST_PATH` exists and is a directory - - `SRC_PATH` does not end with `/.` - - the source directory is copied into this directory - - `SRC_PATH` does end with `/.` - - the *content* of the source directory is copied into this - directory - -The command requires `SRC_PATH` and `DEST_PATH` to exist according to the above -rules. If `SRC_PATH` is local and is a symbolic link, the symbolic link, not -the target, is copied by default. To copy the link target and not the link, -specify the `-L` option. - -A colon (`:`) is used as a delimiter between `CONTAINER` and its path. You can -also use `:` when specifying paths to a `SRC_PATH` or `DEST_PATH` on a local -machine, for example `file:name.txt`. If you use a `:` in a local machine path, -you must be explicit with a relative or absolute path, for example: - - `/path/to/file:name.txt` or `./file:name.txt` - -It is not possible to copy certain system files such as resources under -`/proc`, `/sys`, `/dev`, tmpfs, and mounts created by the user in the container. -However, you can still copy such files by manually running `tar` in `docker exec`. -For example (consider `SRC_PATH` and `DEST_PATH` are directories): - - $ docker exec foo tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | tar Cxf DEST_PATH - - -or - - $ tar Ccf $(dirname SRC_PATH) - $(basename SRC_PATH) | docker exec -i foo tar Cxf DEST_PATH - - - -Using `-` as the `SRC_PATH` streams the contents of `STDIN` as a tar archive. -The command extracts the content of the tar to the `DEST_PATH` in container's -filesystem. In this case, `DEST_PATH` must specify a directory. Using `-` as -the `DEST_PATH` streams the contents of the resource as a tar archive to `STDOUT`. - -# OPTIONS -**-L**, **--follow-link**=*true*|*false* - Follow symbol link in SRC_PATH - -**--help** - Print usage statement - -# EXAMPLES - -Suppose a container has finished producing some output as a file it saves -to somewhere in its filesystem. This could be the output of a build job or -some other computation. You can copy these outputs from the container to a -location on your local host. - -If you want to copy the `/tmp/foo` directory from a container to the -existing `/tmp` directory on your host. If you run `docker cp` in your `~` -(home) directory on the local host: - - $ docker cp compassionate_darwin:tmp/foo /tmp - -Docker creates a `/tmp/foo` directory on your host. Alternatively, you can omit -the leading slash in the command. If you execute this command from your home -directory: - - $ docker cp compassionate_darwin:tmp/foo tmp - -If `~/tmp` does not exist, Docker will create it and copy the contents of -`/tmp/foo` from the container into this new directory. If `~/tmp` already -exists as a directory, then Docker will copy the contents of `/tmp/foo` from -the container into a directory at `~/tmp/foo`. - -When copying a single file to an existing `LOCALPATH`, the `docker cp` command -will either overwrite the contents of `LOCALPATH` if it is a file or place it -into `LOCALPATH` if it is a directory, overwriting an existing file of the same -name if one exists. For example, this command: - - $ docker cp sharp_ptolemy:/tmp/foo/myfile.txt /test - -If `/test` does not exist on the local machine, it will be created as a file -with the contents of `/tmp/foo/myfile.txt` from the container. If `/test` -exists as a file, it will be overwritten. Lastly, if `/test` exists as a -directory, the file will be copied to `/test/myfile.txt`. - -Next, suppose you want to copy a file or folder into a container. For example, -this could be a configuration file or some other input to a long running -computation that you would like to place into a created container before it -starts. This is useful because it does not require the configuration file or -other input to exist in the container image. - -If you have a file, `config.yml`, in the current directory on your local host -and wish to copy it to an existing directory at `/etc/my-app.d` in a container, -this command can be used: - - $ docker cp config.yml myappcontainer:/etc/my-app.d - -If you have several files in a local directory `/config` which you need to copy -to a directory `/etc/my-app.d` in a container: - - $ docker cp /config/. myappcontainer:/etc/my-app.d - -The above command will copy the contents of the local `/config` directory into -the directory `/etc/my-app.d` in the container. - -Finally, if you want to copy a symbolic link into a container, you typically -want to copy the linked target and not the link itself. To copy the target, use -the `-L` option, for example: - - $ ln -s /tmp/somefile /tmp/somefile.ln - $ docker cp -L /tmp/somefile.ln myappcontainer:/tmp/ - -This command copies content of the local `/tmp/somefile` into the file -`/tmp/somefile.ln` in the container. Without `-L` option, the `/tmp/somefile.ln` -preserves its symbolic link but not its content. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -May 2015, updated by Josh Hawn diff --git a/man/docker-create.1.md b/man/docker-create.1.md deleted file mode 100644 index 980e09c101..0000000000 --- a/man/docker-create.1.md +++ /dev/null @@ -1,502 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-create - Create a new container - -# SYNOPSIS -**docker create** -[**-a**|**--attach**[=*[]*]] -[**--add-host**[=*[]*]] -[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] -[**--blkio-weight-device**[=*[]*]] -[**--cpu-shares**[=*0*]] -[**--cap-add**[=*[]*]] -[**--cap-drop**[=*[]*]] -[**--cgroup-parent**[=*CGROUP-PATH*]] -[**--cidfile**[=*CIDFILE*]] -[**--cpu-period**[=*0*]] -[**--cpu-quota**[=*0*]] -[**--cpuset-cpus**[=*CPUSET-CPUS*]] -[**--cpuset-mems**[=*CPUSET-MEMS*]] -[**--device**[=*[]*]] -[**--device-read-bps**[=*[]*]] -[**--device-read-iops**[=*[]*]] -[**--device-write-bps**[=*[]*]] -[**--device-write-iops**[=*[]*]] -[**--dns**[=*[]*]] -[**--dns-search**[=*[]*]] -[**--dns-opt**[=*[]*]] -[**-e**|**--env**[=*[]*]] -[**--entrypoint**[=*ENTRYPOINT*]] -[**--env-file**[=*[]*]] -[**--expose**[=*[]*]] -[**--group-add**[=*[]*]] -[**-h**|**--hostname**[=*HOSTNAME*]] -[**--help**] -[**-i**|**--interactive**] -[**--ip**[=*IPv4-ADDRESS*]] -[**--ip6**[=*IPv6-ADDRESS*]] -[**--ipc**[=*IPC*]] -[**--isolation**[=*default*]] -[**--kernel-memory**[=*KERNEL-MEMORY*]] -[**-l**|**--label**[=*[]*]] -[**--label-file**[=*[]*]] -[**--link**[=*[]*]] -[**--link-local-ip**[=*[]*]] -[**--log-driver**[=*[]*]] -[**--log-opt**[=*[]*]] -[**-m**|**--memory**[=*MEMORY*]] -[**--mac-address**[=*MAC-ADDRESS*]] -[**--memory-reservation**[=*MEMORY-RESERVATION*]] -[**--memory-swap**[=*LIMIT*]] -[**--memory-swappiness**[=*MEMORY-SWAPPINESS*]] -[**--name**[=*NAME*]] -[**--network-alias**[=*[]*]] -[**--network**[=*"bridge"*]] -[**--oom-kill-disable**] -[**--oom-score-adj**[=*0*]] -[**-P**|**--publish-all**] -[**-p**|**--publish**[=*[]*]] -[**--pid**[=*[PID]*]] -[**--userns**[=*[]*]] -[**--pids-limit**[=*PIDS_LIMIT*]] -[**--privileged**] -[**--read-only**] -[**--restart**[=*RESTART*]] -[**--security-opt**[=*[]*]] -[**--storage-opt**[=*[]*]] -[**--stop-signal**[=*SIGNAL*]] -[**--shm-size**[=*[]*]] -[**--sysctl**[=*[]*]] -[**-t**|**--tty**] -[**--tmpfs**[=*[CONTAINER-DIR[:]*]] -[**-u**|**--user**[=*USER*]] -[**--ulimit**[=*[]*]] -[**--uts**[=*[]*]] -[**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*]] -[**--volume-driver**[=*DRIVER*]] -[**--volumes-from**[=*[]*]] -[**-w**|**--workdir**[=*WORKDIR*]] -IMAGE [COMMAND] [ARG...] - -# DESCRIPTION - -Creates a writeable container layer over the specified image and prepares it for -running the specified command. The container ID is then printed to STDOUT. This -is similar to **docker run -d** except the container is never started. You can -then use the **docker start ** command to start the container at -any point. - -The initial status of the container created with **docker create** is 'created'. - -# OPTIONS -**-a**, **--attach**=[] - Attach to STDIN, STDOUT or STDERR. - -**--add-host**=[] - Add a custom host-to-IP mapping (host:ip) - -**--blkio-weight**=*0* - Block IO weight (relative weight) accepts a weight value between 10 and 1000. - -**--blkio-weight-device**=[] - Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). - -**--cpu-shares**=*0* - CPU shares (relative weight) - -**--cap-add**=[] - Add Linux capabilities - -**--cap-drop**=[] - Drop Linux capabilities - -**--cgroup-parent**="" - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. - -**--cidfile**="" - Write the container ID to the file - -**--cpu-period**=*0* - Limit the CPU CFS (Completely Fair Scheduler) period - -**--cpuset-cpus**="" - CPUs in which to allow execution (0-3, 0,1) - -**--cpuset-mems**="" - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. - - If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` -then processes in your Docker container will only use memory from the first -two memory nodes. - -**--cpu-quota**=*0* - Limit the CPU CFS (Completely Fair Scheduler) quota - -**--device**=[] - Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) - -**--device-read-bps**=[] - Limit read rate (bytes per second) from a device (e.g. --device-read-bps=/dev/sda:1mb) - -**--device-read-iops**=[] - Limit read rate (IO per second) from a device (e.g. --device-read-iops=/dev/sda:1000) - -**--device-write-bps**=[] - Limit write rate (bytes per second) to a device (e.g. --device-write-bps=/dev/sda:1mb) - -**--device-write-iops**=[] - Limit write rate (IO per second) to a device (e.g. --device-write-iops=/dev/sda:1000) - -**--dns**=[] - Set custom DNS servers - -**--dns-opt**=[] - Set custom DNS options - -**--dns-search**=[] - Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) - -**-e**, **--env**=[] - Set environment variables - -**--entrypoint**="" - Overwrite the default ENTRYPOINT of the image - -**--env-file**=[] - Read in a line-delimited file of environment variables - -**--expose**=[] - Expose a port or a range of ports (e.g. --expose=3300-3310) from the container without publishing it to your host - -**--group-add**=[] - Add additional groups to run as - -**-h**, **--hostname**="" - Container host name - -**--help** - Print usage statement - -**-i**, **--interactive**=*true*|*false* - Keep STDIN open even if not attached. The default is *false*. - -**--ip**="" - Sets the container's interface IPv4 address (e.g. 172.23.0.9) - - It can only be used in conjunction with **--net** for user-defined networks - -**--ip6**="" - Sets the container's interface IPv6 address (e.g. 2001:db8::1b99) - - It can only be used in conjunction with **--net** for user-defined networks - -**--ipc**="" - Default is to create a private IPC namespace (POSIX SysV IPC) for the container - 'container:': reuses another container shared memory, semaphores and message queues - 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. - -**--isolation**="*default*" - Isolation specifies the type of isolation technology used by containers. Note -that the default on Windows server is `process`, and the default on Windows client -is `hyperv`. Linux only supports `default`. - -**--kernel-memory**="" - Kernel memory limit (format: `[]`, where unit = b, k, m or g) - - Constrains the kernel memory available to a container. If a limit of 0 -is specified (not using `--kernel-memory`), the container's kernel memory -is not limited. If you specify a limit, it may be rounded up to a multiple -of the operating system's page size and the value can be very large, -millions of trillions. - -**-l**, **--label**=[] - Adds metadata to a container (e.g., --label=com.example.key=value) - -**--label-file**=[] - Read labels from a file. Delimit each label with an EOL. - -**--link**=[] - Add link to another container in the form of :alias or just - in which case the alias will match the name. - -**--link-local-ip**=[] - Add one or more link-local IPv4/IPv6 addresses to the container's interface - -**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" - Logging driver for the container. Default is defined by daemon `--log-driver` flag. - **Warning**: the `docker logs` command works only for the `json-file` and - `journald` logging drivers. - -**--log-opt**=[] - Logging driver specific options. - -**-m**, **--memory**="" - Memory limit (format: [], where unit = b, k, m or g) - - Allows you to constrain the memory available to a container. If the host -supports swap memory, then the **-m** memory setting can be larger than physical -RAM. If a limit of 0 is specified (not using **-m**), the container's memory is -not limited. The actual limit may be rounded up to a multiple of the operating -system's page size (the value would be very large, that's millions of trillions). - -**--mac-address**="" - Container MAC address (e.g. 92:d0:c6:0a:29:33) - -**--memory-reservation**="" - Memory soft limit (format: [], where unit = b, k, m or g) - - After setting memory reservation, when the system detects memory contention -or low memory, containers are forced to restrict their consumption to their -reservation. So you should always set the value below **--memory**, otherwise the -hard limit will take precedence. By default, memory reservation will be the same -as memory limit. - -**--memory-swap**="LIMIT" - A limit value equal to memory plus swap. Must be used with the **-m** -(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** -(**--memory**) value. - - The format of `LIMIT` is `[]`. Unit can be `b` (bytes), -`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a -unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. - -**--memory-swappiness**="" - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. - -**--name**="" - Assign a name to the container - -**--net**="*bridge*" - Set the Network mode for the container - 'bridge': create a network stack on the default Docker bridge - 'none': no networking - 'container:': reuse another container's network stack - 'host': use the Docker host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. - '|': connect to a user-defined network - -**--network-alias**=[] - Add network-scoped alias for the container - -**--oom-kill-disable**=*true*|*false* - Whether to disable OOM Killer for the container or not. - -**--oom-score-adj**="" - Tune the host's OOM preferences for containers (accepts -1000 to 1000) - -**-P**, **--publish-all**=*true*|*false* - Publish all exposed ports to random ports on the host interfaces. The default is *false*. - -**-p**, **--publish**=[] - Publish a container's port, or a range of ports, to the host - format: ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort - Both hostPort and containerPort can be specified as a range of ports. - When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. (e.g., `-p 1234-1236:1234-1236/tcp`) - (use 'docker port' to see the actual mapping) - -**--pid**="" - Set the PID mode for the container - Default is to create a private PID namespace for the container - 'container:': join another container's PID namespace - 'host': use the host's PID namespace for the container. Note: the host mode gives the container full access to local PID and is therefore considered insecure. - -**--userns**="" - Set the usernamespace mode for the container when `userns-remap` option is enabled. - **host**: use the host usernamespace and enable all privileged options (e.g., `pid=host` or `--privileged`). - -**--pids-limit**="" - Tune the container's pids limit. Set `-1` to have unlimited pids for the container. - -**--privileged**=*true*|*false* - Give extended privileges to this container. The default is *false*. - -**--read-only**=*true*|*false* - Mount the container's root filesystem as read only. - -**--restart**="*no*" - Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). - -**--shm-size**="" - Size of `/dev/shm`. The format is ``. `number` must be greater than `0`. - Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you omit the unit, the system uses bytes. - If you omit the size entirely, the system uses `64m`. - -**--security-opt**=[] - Security Options - - "label:user:USER" : Set the label user for the container - "label:role:ROLE" : Set the label role for the container - "label:type:TYPE" : Set the label type for the container - "label:level:LEVEL" : Set the label level for the container - "label:disable" : Turn off label confinement for the container - "no-new-privileges" : Disable container processes from gaining additional privileges - "seccomp:unconfined" : Turn off seccomp confinement for the container - "seccomp:profile.json : White listed syscalls seccomp Json file to be used as a seccomp filter - -**--storage-opt**=[] - Storage driver options per container - - $ docker create -it --storage-opt size=120G fedora /bin/bash - - This (size) will allow to set the container rootfs size to 120G at creation time. User cannot pass a size less than the Default BaseFS Size. - This option is only available for the `devicemapper`, `btrfs`, and `zfs` graph drivers. - -**--stop-signal**=*SIGTERM* - Signal to stop a container. Default is SIGTERM. - -**--sysctl**=SYSCTL - Configure namespaced kernel parameters at runtime - - IPC Namespace - current sysctls allowed: - - kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced - Sysctls beginning with fs.mqueue.* - - Note: if you use --ipc=host using these sysctls will not be allowed. - - Network Namespace - current sysctls allowed: - Sysctls beginning with net.* - - Note: if you use --net=host using these sysctls will not be allowed. - -**-t**, **--tty**=*true*|*false* - Allocate a pseudo-TTY. The default is *false*. - -**--tmpfs**=[] Create a tmpfs mount - - Mount a temporary filesystem (`tmpfs`) mount into a container, for example: - - $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image - - This command mounts a `tmpfs` at `/tmp` within the container. The supported mount -options are the same as the Linux default `mount` flags. If you do not specify -any options, the systems uses the following options: -`rw,noexec,nosuid,nodev,size=65536k`. - -**-u**, **--user**="" - Username or UID - -**--ulimit**=[] - Ulimit options - -**--uts**=*host* - Set the UTS mode for the container - **host**: use the host's UTS namespace inside the container. - Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure. - -**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*] - Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Docker - bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Docker - container. If 'HOST-DIR' is omitted, Docker automatically creates the new - volume on the host. The `OPTIONS` are a comma delimited list and can be: - - * [rw|ro] - * [z|Z] - * [`[r]shared`|`[r]slave`|`[r]private`] - -The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` -can be an absolute path or a `name` value. A `name` value must start with an -alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or -`-` (hyphen). An absolute path starts with a `/` (forward slash). - -If you supply a `HOST-DIR` that is an absolute path, Docker bind-mounts to the -path you specify. If you supply a `name`, Docker creates a named volume by that -`name`. For example, you can specify either `/foo` or `foo` for a `HOST-DIR` -value. If you supply the `/foo` value, Docker creates a bind-mount. If you -supply the `foo` specification, Docker creates a named volume. - -You can specify multiple **-v** options to mount one or more mounts to a -container. To use these same mounts in other containers, specify the -**--volumes-from** option also. - -You can add `:ro` or `:rw` suffix to a volume to mount it read-only or -read-write mode, respectively. By default, the volumes are mounted read-write. -See examples. - -Labeling systems like SELinux require that proper labels are placed on volume -content mounted into a container. Without a label, the security system might -prevent the processes running inside the container from using the content. By -default, Docker does not change the labels set by the OS. - -To change a label in the container context, you can add either of two suffixes -`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file -objects on the shared volumes. The `z` option tells Docker that two containers -share the volume content. As a result, Docker labels the content with a shared -content label. Shared volume labels allow all containers to read/write content. -The `Z` option tells Docker to label the content with a private unshared label. -Only the current container can use a private volume. - -By default bind mounted volumes are `private`. That means any mounts done -inside container will not be visible on host and vice-a-versa. One can change -this behavior by specifying a volume mount propagation property. Making a -volume `shared` mounts done under that volume inside container will be -visible on host and vice-a-versa. Making a volume `slave` enables only one -way mount propagation and that is mounts done on host under that volume -will be visible inside container but not the other way around. - -To control mount propagation property of volume one can use `:[r]shared`, -`:[r]slave` or `:[r]private` propagation flag. Propagation property can -be specified only for bind mounted volumes and not for internal volumes or -named volumes. For mount propagation to work source mount point (mount point -where source dir is mounted on) has to have right propagation properties. For -shared volumes, source mount point has to be shared. And for slave volumes, -source mount has to be either shared or slave. - -Use `df ` to figure out the source mount and then use -`findmnt -o TARGET,PROPAGATION ` to figure out propagation -properties of source mount. If `findmnt` utility is not available, then one -can look at mount entry for source mount point in `/proc/self/mountinfo`. Look -at `optional fields` and see if any propagaion properties are specified. -`shared:X` means mount is `shared`, `master:X` means mount is `slave` and if -nothing is there that means mount is `private`. - -To change propagation properties of a mount point use `mount` command. For -example, if one wants to bind mount source directory `/foo` one can do -`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This -will convert /foo into a `shared` mount point. Alternatively one can directly -change propagation properties of source mount. Say `/` is source mount for -`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount. - -> **Note**: -> When using systemd to manage the Docker daemon's start and stop, in the systemd -> unit file there is an option to control mount propagation for the Docker daemon -> itself, called `MountFlags`. The value of this setting may cause Docker to not -> see mount propagation changes made on the mount point. For example, if this value -> is `slave`, you may not be able to use the `shared` or `rshared` propagation on -> a volume. - - -To disable automatic copying of data from the container path to the volume, use -the `nocopy` flag. The `nocopy` flag can be set on bind mounts and named volumes. - -**--volume-driver**="" - Container's volume driver. This driver creates volumes specified either from - a Dockerfile's `VOLUME` instruction or from the `docker run -v` flag. - See **docker-volume-create(1)** for full details. - -**--volumes-from**=[] - Mount volumes from the specified container(s) - -**-w**, **--workdir**="" - Working directory inside the container - -# EXAMPLES - -## Specify isolation technology for container (--isolation) - -This option is useful in situations where you are running Docker containers on -Windows. The `--isolation=` option sets a container's isolation -technology. On Linux, the only supported is the `default` option which uses -Linux namespaces. On Microsoft Windows, you can specify these values: - -* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. -* `process`: Namespace isolation only. -* `hyperv`: Hyper-V hypervisor partition-based isolation. - -Specifying the `--isolation` flag without a value is the same as setting `--isolation="default"`. - -# HISTORY -August 2014, updated by Sven Dowideit -September 2014, updated by Sven Dowideit -November 2014, updated by Sven Dowideit diff --git a/man/docker-diff.1.md b/man/docker-diff.1.md deleted file mode 100644 index 6c6c502533..0000000000 --- a/man/docker-diff.1.md +++ /dev/null @@ -1,49 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-diff - Inspect changes on a container's filesystem - -# SYNOPSIS -**docker diff** -[**--help**] -CONTAINER - -# DESCRIPTION -Inspect changes on a container's filesystem. You can use the full or -shortened container ID or the container name set using -**docker run --name** option. - -# OPTIONS -**--help** - Print usage statement - -# EXAMPLES -Inspect the changes to on a nginx container: - - # docker diff 1fdfd1f54c1b - C /dev - C /dev/console - C /dev/core - C /dev/stdout - C /dev/fd - C /dev/ptmx - C /dev/stderr - C /dev/stdin - C /run - A /run/nginx.pid - C /var/lib/nginx/tmp - A /var/lib/nginx/tmp/client_body - A /var/lib/nginx/tmp/fastcgi - A /var/lib/nginx/tmp/proxy - A /var/lib/nginx/tmp/scgi - A /var/lib/nginx/tmp/uwsgi - C /var/log/nginx - A /var/log/nginx/access.log - A /var/log/nginx/error.log - - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/man/docker-events.1.md b/man/docker-events.1.md deleted file mode 100644 index 4e38b53687..0000000000 --- a/man/docker-events.1.md +++ /dev/null @@ -1,104 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-events - Get real time events from the server - -# SYNOPSIS -**docker events** -[**--help**] -[**-f**|**--filter**[=*[]*]] -[**--since**[=*SINCE*]] -[**--until**[=*UNTIL*]] - - -# DESCRIPTION -Get event information from the Docker daemon. Information can include historical -information and real-time information. - -Docker containers will report the following events: - - attach, commit, copy, create, destroy, detach, die, exec_create, exec_detach, exec_start, export, kill, oom, pause, rename, resize, restart, start, stop, top, unpause, update - -Docker images report the following events: - - delete, import, load, pull, push, save, tag, untag - -Docker volumes report the following events: - - create, mount, unmount, destroy - -Docker networks report the following events: - - create, connect, disconnect, destroy - -# OPTIONS -**--help** - Print usage statement - -**-f**, **--filter**=[] - Provide filter values (i.e., 'event=stop') - -**--since**="" - Show all events created since timestamp - -**--until**="" - Stream events until this timestamp - -The `--since` and `--until` parameters can be Unix timestamps, date formatted -timestamps, or Go duration strings (e.g. `10m`, `1h30m`) computed -relative to the client machine's time. If you do not provide the `--since` option, -the command returns only new and/or live events. Supported formats for date -formatted time stamps include RFC3339Nano, RFC3339, `2006-01-02T15:04:05`, -`2006-01-02T15:04:05.999999999`, `2006-01-02Z07:00`, and `2006-01-02`. The local -timezone on the client will be used if you do not provide either a `Z` or a -`+-00:00` timezone offset at the end of the timestamp. When providing Unix -timestamps enter seconds[.nanoseconds], where seconds is the number of seconds -that have elapsed since January 1, 1970 (midnight UTC/GMT), not counting leap -seconds (aka Unix epoch or Unix time), and the optional .nanoseconds field is a -fraction of a second no more than nine digits long. - -# EXAMPLES - -## Listening for Docker events - -After running docker events a container 786d698004576 is started and stopped -(The container name has been shortened in the output below): - - # docker events - 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) start - 2015-01-28T20:21:31.000000000-08:00 59211849bc10: (from whenry/testimage:latest) die - 2015-01-28T20:21:32.000000000-08:00 59211849bc10: (from whenry/testimage:latest) stop - -## Listening for events since a given date -Again the output container IDs have been shortened for the purposes of this document: - - # docker events --since '2015-01-28' - 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create - 2015-01-28T20:25:38.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start - 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) create - 2015-01-28T20:25:39.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start - 2015-01-28T20:25:40.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die - 2015-01-28T20:25:42.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop - 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) start - 2015-01-28T20:25:45.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) die - 2015-01-28T20:25:46.000000000-08:00 c21f6c22ba27: (from whenry/testimage:latest) stop - -The following example outputs all events that were generated in the last 3 minutes, -relative to the current time on the client machine: - - # docker events --since '3m' - 2015-05-12T11:51:30.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) die - 2015-05-12T15:52:12.999999999Z07:00 4386fb97867d: (from ubuntu-1:14.04) stop - 2015-05-12T15:53:45.999999999Z07:00 7805c1d35632: (from redis:2.8) die - 2015-05-12T15:54:03.999999999Z07:00 7805c1d35632: (from redis:2.8) stop - -If you do not provide the --since option, the command returns only new and/or -live events. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -June 2015, updated by Brian Goff -October 2015, updated by Mike Brown diff --git a/man/docker-exec.1.md b/man/docker-exec.1.md deleted file mode 100644 index 16a061d069..0000000000 --- a/man/docker-exec.1.md +++ /dev/null @@ -1,64 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-exec - Run a command in a running container - -# SYNOPSIS -**docker exec** -[**-d**|**--detach**] -[**--detach-keys**[=*[]*]] -[**--help**] -[**-i**|**--interactive**] -[**--privileged**] -[**-t**|**--tty**] -[**-u**|**--user**[=*USER*]] -CONTAINER COMMAND [ARG...] - -# DESCRIPTION - -Run a process in a running container. - -The command started using `docker exec` will only run while the container's primary -process (`PID 1`) is running, and will not be restarted if the container is restarted. - -If the container is paused, then the `docker exec` command will wait until the -container is unpaused, and then run - -# OPTIONS -**-d**, **--detach**=*true*|*false* - Detached mode: run command in the background. The default is *false*. - -**--detach-keys**="" - Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. - -**--help** - Print usage statement - -**-i**, **--interactive**=*true*|*false* - Keep STDIN open even if not attached. The default is *false*. - -**--privileged**=*true*|*false* - Give the process extended [Linux capabilities](http://man7.org/linux/man-pages/man7/capabilities.7.html) -when running in a container. The default is *false*. - - Without this flag, the process run by `docker exec` in a running container has -the same capabilities as the container, which may be limited. Set -`--privileged` to give all capabilities to the process. - -**-t**, **--tty**=*true*|*false* - Allocate a pseudo-TTY. The default is *false*. - -**-u**, **--user**="" - Sets the username or UID used and optionally the groupname or GID for the specified command. - - The followings examples are all valid: - --user [user | user:group | uid | uid:gid | user:gid | uid:group ] - - Without this argument the command will be run as root in the container. - -The **-t** option is incompatible with a redirection of the docker client -standard input. - -# HISTORY -November 2014, updated by Sven Dowideit diff --git a/man/docker-export.1.md b/man/docker-export.1.md deleted file mode 100644 index 3d59e4788e..0000000000 --- a/man/docker-export.1.md +++ /dev/null @@ -1,46 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-export - Export the contents of a container's filesystem as a tar archive - -# SYNOPSIS -**docker export** -[**--help**] -[**-o**|**--output**[=*""*]] -CONTAINER - -# DESCRIPTION -Export the contents of a container's filesystem using the full or shortened -container ID or container name. The output is exported to STDOUT and can be -redirected to a tar file. - -Stream to a file instead of STDOUT by using **-o**. - -# OPTIONS -**--help** - Print usage statement - -**-o**, **--output**="" - Write to a file, instead of STDOUT - -# EXAMPLES -Export the contents of the container called angry_bell to a tar file -called angry_bell.tar: - - # docker export angry_bell > angry_bell.tar - # docker export --output=angry_bell-latest.tar angry_bell - # ls -sh angry_bell.tar - 321M angry_bell.tar - # ls -sh angry_bell-latest.tar - 321M angry_bell-latest.tar - -# See also -**docker-import(1)** to create an empty filesystem image -and import the contents of the tarball into it, then optionally tag it. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -January 2015, updated by Joseph Kern (josephakern at gmail dot com) diff --git a/man/docker-history.1.md b/man/docker-history.1.md deleted file mode 100644 index 91edefe25f..0000000000 --- a/man/docker-history.1.md +++ /dev/null @@ -1,52 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-history - Show the history of an image - -# SYNOPSIS -**docker history** -[**--help**] -[**-H**|**--human**[=*true*]] -[**--no-trunc**] -[**-q**|**--quiet**] -IMAGE - -# DESCRIPTION - -Show the history of when and how an image was created. - -# OPTIONS -**--help** - Print usage statement - -**-H**, **--human**=*true*|*false* - Print sizes and dates in human readable format. The default is *true*. - -**--no-trunc**=*true*|*false* - Don't truncate output. The default is *false*. - -**-q**, **--quiet**=*true*|*false* - Only show numeric IDs. The default is *false*. - -# EXAMPLES - $ docker history fedora - IMAGE CREATED CREATED BY SIZE COMMENT - 105182bb5e8b 5 days ago /bin/sh -c #(nop) ADD file:71356d2ad59aa3119d 372.7 MB - 73bd853d2ea5 13 days ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B - 511136ea3c5a 10 months ago 0 B Imported from - - -## Display comments in the image history -The `docker commit` command has a **-m** flag for adding comments to the image. These comments will be displayed in the image history. - - $ sudo docker history docker:scm - IMAGE CREATED CREATED BY SIZE COMMENT - 2ac9d1098bf1 3 months ago /bin/bash 241.4 MB Added Apache to Fedora base image - 88b42ffd1f7c 5 months ago /bin/sh -c #(nop) ADD file:1fd8d7f9f6557cafc7 373.7 MB - c69cab00d6ef 5 months ago /bin/sh -c #(nop) MAINTAINER Lokesh Mandvekar 0 B - 511136ea3c5a 19 months ago 0 B Imported from - - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/man/docker-images.1.md b/man/docker-images.1.md deleted file mode 100644 index fd551a58db..0000000000 --- a/man/docker-images.1.md +++ /dev/null @@ -1,115 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-images - List images - -# SYNOPSIS -**docker images** -[**--help**] -[**-a**|**--all**] -[**--digests**] -[**-f**|**--filter**[=*[]*]] -[**--no-trunc**] -[**-q**|**--quiet**] -[REPOSITORY[:TAG]] - -# DESCRIPTION -This command lists the images stored in the local Docker repository. - -By default, intermediate images, used during builds, are not listed. Some of the -output, e.g., image ID, is truncated, for space reasons. However the truncated -image ID, and often the first few characters, are enough to be used in other -Docker commands that use the image ID. The output includes repository, tag, image -ID, date created and the virtual size. - -The title REPOSITORY for the first title may seem confusing. It is essentially -the image name. However, because you can tag a specific image, and multiple tags -(image instances) can be associated with a single name, the name is really a -repository for all tagged images of the same name. For example consider an image -called fedora. It may be tagged with 18, 19, or 20, etc. to manage different -versions. - -# OPTIONS -**-a**, **--all**=*true*|*false* - Show all images (by default filter out the intermediate image layers). The default is *false*. - -**--digests**=*true*|*false* - Show image digests. The default is *false*. - -**-f**, **--filter**=[] - Filters the output based on these conditions: - - dangling=(true|false) - finds unused images. - - label= or label== - - before=([:tag]||) - - since=([:tag]||) - -**--format**="*TEMPLATE*" - Pretty-print containers using a Go template. - Valid placeholders: - .ID - Image ID - .Repository - Image repository - .Tag - Image tag - .Digest - Image digest - .CreatedSince - Elapsed time since the image was created. - .CreatedAt - Time when the image was created.. - .Size - Image disk size. - -**--help** - Print usage statement - -**--no-trunc**=*true*|*false* - Don't truncate output. The default is *false*. - -**-q**, **--quiet**=*true*|*false* - Only show numeric IDs. The default is *false*. - -# EXAMPLES - -## Listing the images - -To list the images in a local repository (not the registry) run: - - docker images - -The list will contain the image repository name, a tag for the image, and an -image ID, when it was created and its virtual size. Columns: REPOSITORY, TAG, -IMAGE ID, CREATED, and SIZE. - -The `docker images` command takes an optional `[REPOSITORY[:TAG]]` argument -that restricts the list to images that match the argument. If you specify -`REPOSITORY`but no `TAG`, the `docker images` command lists all images in the -given repository. - - docker images java - -The `[REPOSITORY[:TAG]]` value must be an "exact match". This means that, for example, -`docker images jav` does not match the image `java`. - -If both `REPOSITORY` and `TAG` are provided, only images matching that -repository and tag are listed. To find all local images in the "java" -repository with tag "8" you can use: - - docker images java:8 - -To get a verbose list of images which contains all the intermediate images -used in builds use **-a**: - - docker images -a - -Previously, the docker images command supported the --tree and --dot arguments, -which displayed different visualizations of the image data. Docker core removed -this functionality in the 1.7 version. If you liked this functionality, you can -still find it in the third-party dockviz tool: https://github.com/justone/dockviz. - -## Listing only the shortened image IDs - -Listing just the shortened image IDs. This can be useful for some automated -tools. - - docker images -q - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/man/docker-import.1.md b/man/docker-import.1.md deleted file mode 100644 index 43d65efe6a..0000000000 --- a/man/docker-import.1.md +++ /dev/null @@ -1,72 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-import - Create an empty filesystem image and import the contents of the tarball (.tar, .tar.gz, .tgz, .bzip, .tar.xz, .txz) into it, then optionally tag it. - -# SYNOPSIS -**docker import** -[**-c**|**--change**[=*[]*]] -[**-m**|**--message**[=*MESSAGE*]] -[**--help**] -file|URL|**-**[REPOSITORY[:TAG]] - -# OPTIONS -**-c**, **--change**=[] - Apply specified Dockerfile instructions while importing the image - Supported Dockerfile instructions: `CMD`|`ENTRYPOINT`|`ENV`|`EXPOSE`|`ONBUILD`|`USER`|`VOLUME`|`WORKDIR` - -**--help** - Print usage statement - -**-m**, **--message**="" - Set commit message for imported image - -# DESCRIPTION -Create a new filesystem image from the contents of a tarball (`.tar`, -`.tar.gz`, `.tgz`, `.bzip`, `.tar.xz`, `.txz`) into it, then optionally tag it. - - -# EXAMPLES - -## Import from a remote location - - # docker import http://example.com/exampleimage.tgz example/imagerepo - -## Import from a local file - -Import to docker via pipe and stdin: - - # cat exampleimage.tgz | docker import - example/imagelocal - -Import with a commit message. - - # cat exampleimage.tgz | docker import --message "New image imported from tarball" - exampleimagelocal:new - -Import to a Docker image from a local file. - - # docker import /path/to/exampleimage.tgz - - -## Import from a local file and tag - -Import to docker via pipe and stdin: - - # cat exampleimageV2.tgz | docker import - example/imagelocal:V-2.0 - -## Import from a local directory - - # tar -c . | docker import - exampleimagedir - -## Apply specified Dockerfile instructions while importing the image -This example sets the docker image ENV variable DEBUG to true by default. - - # tar -c . | docker import -c="ENV DEBUG true" - exampleimagedir - -# See also -**docker-export(1)** to export the contents of a filesystem as a tar archive to STDOUT. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/man/docker-info.1.md b/man/docker-info.1.md deleted file mode 100644 index 1d96b562eb..0000000000 --- a/man/docker-info.1.md +++ /dev/null @@ -1,146 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-info - Display system-wide information - -# SYNOPSIS -**docker info** -[**--help**] - - -# DESCRIPTION -This command displays system wide information regarding the Docker installation. -Information displayed includes the kernel version, number of containers and images. -The number of images shown is the number of unique images. The same image tagged -under different names is counted only once. - -Depending on the storage driver in use, additional information can be shown, such -as pool name, data file, metadata file, data space used, total data space, metadata -space used, and total metadata space. - -The data file is where the images are stored and the metadata file is where the -meta data regarding those images are stored. When run for the first time Docker -allocates a certain amount of data space and meta data space from the space -available on the volume where `/var/lib/docker` is mounted. - -# OPTIONS -**--help** - Print usage statement - -# EXAMPLES - -## Display Docker system information - -Here is a sample output for a daemon running on Ubuntu, using the overlay -storage driver: - - $ docker -D info - Containers: 14 - Running: 3 - Paused: 1 - Stopped: 10 - Images: 52 - Server Version: 1.12.0-dev - Storage Driver: overlay - Backing Filesystem: extfs - Logging Driver: json-file - Cgroup Driver: cgroupfs - Plugins: - Volume: local - Network: bridge null host overlay - Swarm: - NodeID: 0gac67oclbxq7 - IsManager: YES - Managers: 2 - Nodes: 2 - Runtimes: default - Default Runtime: default - Security Options: apparmor seccomp - Kernel Version: 4.4.0-21-generic - Operating System: Ubuntu 16.04 LTS - OSType: linux - Architecture: x86_64 - CPUs: 24 - Total Memory: 62.86 GiB - Name: docker - ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S - Docker Root Dir: /var/lib/docker - Debug mode (client): true - Debug mode (server): true - File Descriptors: 59 - Goroutines: 159 - System Time: 2016-04-26T10:04:06.14689342-04:00 - EventsListeners: 0 - Http Proxy: http://test:test@localhost:8080 - Https Proxy: https://test:test@localhost:8080 - No Proxy: localhost,127.0.0.1,docker-registry.somecorporation.com - Username: svendowideit - Registry: https://index.docker.io/v1/ - WARNING: No swap limit support - Labels: - storage=ssd - staging=true - Insecure registries: - myinsecurehost:5000 - 127.0.0.0/8 - -The global `-D` option tells all `docker` commands to output debug information. - -The example below shows the output for a daemon running on Red Hat Enterprise Linux, -using the devicemapper storage driver. As can be seen in the output, additional -information about the devicemapper storage driver is shown: - - $ docker info - Containers: 14 - Running: 3 - Paused: 1 - Stopped: 10 - Untagged Images: 52 - Server Version: 1.10.3 - Storage Driver: devicemapper - Pool Name: docker-202:2-25583803-pool - Pool Blocksize: 65.54 kB - Base Device Size: 10.74 GB - Backing Filesystem: xfs - Data file: /dev/loop0 - Metadata file: /dev/loop1 - Data Space Used: 1.68 GB - Data Space Total: 107.4 GB - Data Space Available: 7.548 GB - Metadata Space Used: 2.322 MB - Metadata Space Total: 2.147 GB - Metadata Space Available: 2.145 GB - Udev Sync Supported: true - Deferred Removal Enabled: false - Deferred Deletion Enabled: false - Deferred Deleted Device Count: 0 - Data loop file: /var/lib/docker/devicemapper/devicemapper/data - Metadata loop file: /var/lib/docker/devicemapper/devicemapper/metadata - Library Version: 1.02.107-RHEL7 (2015-12-01) - Execution Driver: native-0.2 - Logging Driver: json-file - Plugins: - Volume: local - Network: null host bridge - Kernel Version: 3.10.0-327.el7.x86_64 - Operating System: Red Hat Enterprise Linux Server 7.2 (Maipo) - OSType: linux - Architecture: x86_64 - CPUs: 1 - Total Memory: 991.7 MiB - Name: ip-172-30-0-91.ec2.internal - ID: I54V:OLXT:HVMM:TPKO:JPHQ:CQCD:JNLC:O3BZ:4ZVJ:43XJ:PFHZ:6N2S - Docker Root Dir: /var/lib/docker - Debug mode (client): false - Debug mode (server): false - Username: xyz - Registry: https://index.docker.io/v1/ - Insecure registries: - myinsecurehost:5000 - 127.0.0.0/8 - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/man/docker-inspect.1.md b/man/docker-inspect.1.md deleted file mode 100644 index 6d7a54ad3b..0000000000 --- a/man/docker-inspect.1.md +++ /dev/null @@ -1,322 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-inspect - Return low-level information on a container or image - -# SYNOPSIS -**docker inspect** -[**--help**] -[**-f**|**--format**[=*FORMAT*]] -[**-s**|**--size**] -[**--type**=*container*|*image*] -CONTAINER|IMAGE [CONTAINER|IMAGE...] - -# DESCRIPTION - -This displays all the information available in Docker for a given -container or image. By default, this will render all results in a JSON -array. If the container and image have the same name, this will return -container JSON for unspecified type. If a format is specified, the given -template will be executed for each result. - -# OPTIONS -**--help** - Print usage statement - -**-f**, **--format**="" - Format the output using the given Go template. - -**-s**, **--size** - Display total file sizes if the type is container. - -**--type**="*container*|*image*" - Return JSON for specified type, permissible values are "image" or "container" - -# EXAMPLES - -Get information about an image when image name conflicts with the container name, -e.g. both image and container are named rhel7: - - $ docker inspect --type=image rhel7 - [ - { - "Id": "fe01a428b9d9de35d29531e9994157978e8c48fa693e1bf1d221dffbbb67b170", - "Parent": "10acc31def5d6f249b548e01e8ffbaccfd61af0240c17315a7ad393d022c5ca2", - .... - } - ] - -## Getting information on a container - -To get information on a container use its ID or instance name: - - $ docker inspect d2cc496561d6 - [{ - "Id": "d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47", - "Created": "2015-06-08T16:18:02.505155285Z", - "Path": "bash", - "Args": [], - "State": { - "Running": false, - "Paused": false, - "Restarting": false, - "OOMKilled": false, - "Dead": false, - "Pid": 0, - "ExitCode": 0, - "Error": "", - "StartedAt": "2015-06-08T16:18:03.643865954Z", - "FinishedAt": "2015-06-08T16:57:06.448552862Z" - }, - "Image": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", - "NetworkSettings": { - "Bridge": "", - "SandboxID": "6b4851d1903e16dd6a567bd526553a86664361f31036eaaa2f8454d6f4611f6f", - "HairpinMode": false, - "LinkLocalIPv6Address": "", - "LinkLocalIPv6PrefixLen": 0, - "Ports": {}, - "SandboxKey": "/var/run/docker/netns/6b4851d1903e", - "SecondaryIPAddresses": null, - "SecondaryIPv6Addresses": null, - "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", - "Gateway": "172.17.0.1", - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "IPAddress": "172.17.0.2", - "IPPrefixLen": 16, - "IPv6Gateway": "", - "MacAddress": "02:42:ac:12:00:02", - "Networks": { - "bridge": { - "NetworkID": "7ea29fc1412292a2d7bba362f9253545fecdfa8ce9a6e37dd10ba8bee7129812", - "EndpointID": "7587b82f0dada3656fda26588aee72630c6fab1536d36e394b2bfbcf898c971d", - "Gateway": "172.17.0.1", - "IPAddress": "172.17.0.2", - "IPPrefixLen": 16, - "IPv6Gateway": "", - "GlobalIPv6Address": "", - "GlobalIPv6PrefixLen": 0, - "MacAddress": "02:42:ac:12:00:02" - } - } - - }, - "ResolvConfPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/resolv.conf", - "HostnamePath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hostname", - "HostsPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/hosts", - "LogPath": "/var/lib/docker/containers/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47/d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47-json.log", - "Name": "/adoring_wozniak", - "RestartCount": 0, - "Driver": "devicemapper", - "MountLabel": "", - "ProcessLabel": "", - "Mounts": [ - { - "Source": "/data", - "Destination": "/data", - "Mode": "ro,Z", - "RW": false - "Propagation": "" - } - ], - "AppArmorProfile": "", - "ExecIDs": null, - "HostConfig": { - "Binds": null, - "ContainerIDFile": "", - "Memory": 0, - "MemorySwap": 0, - "CpuShares": 0, - "CpuPeriod": 0, - "CpusetCpus": "", - "CpusetMems": "", - "CpuQuota": 0, - "BlkioWeight": 0, - "OomKillDisable": false, - "Privileged": false, - "PortBindings": {}, - "Links": null, - "PublishAllPorts": false, - "Dns": null, - "DnsSearch": null, - "DnsOptions": null, - "ExtraHosts": null, - "VolumesFrom": null, - "Devices": [], - "NetworkMode": "bridge", - "IpcMode": "", - "PidMode": "", - "UTSMode": "", - "CapAdd": null, - "CapDrop": null, - "RestartPolicy": { - "Name": "no", - "MaximumRetryCount": 0 - }, - "SecurityOpt": null, - "ReadonlyRootfs": false, - "Ulimits": null, - "LogConfig": { - "Type": "json-file", - "Config": {} - }, - "CgroupParent": "" - }, - "GraphDriver": { - "Name": "devicemapper", - "Data": { - "DeviceId": "5", - "DeviceName": "docker-253:1-2763198-d2cc496561d6d520cbc0236b4ba88c362c446a7619992123f11c809cded25b47", - "DeviceSize": "171798691840" - } - }, - "Config": { - "Hostname": "d2cc496561d6", - "Domainname": "", - "User": "", - "AttachStdin": true, - "AttachStdout": true, - "AttachStderr": true, - "ExposedPorts": null, - "Tty": true, - "OpenStdin": true, - "StdinOnce": true, - "Env": null, - "Cmd": [ - "bash" - ], - "Image": "fedora", - "Volumes": null, - "VolumeDriver": "", - "WorkingDir": "", - "Entrypoint": null, - "NetworkDisabled": false, - "MacAddress": "", - "OnBuild": null, - "Labels": {}, - "Memory": 0, - "MemorySwap": 0, - "CpuShares": 0, - "Cpuset": "", - "StopSignal": "SIGTERM" - } - } - ] -## Getting the IP address of a container instance - -To get the IP address of a container use: - - $ docker inspect --format='{{range .NetworkSettings.Networks}}{{.IPAddress}}{{end}}' d2cc496561d6 - 172.17.0.2 - -## Listing all port bindings - -One can loop over arrays and maps in the results to produce simple text -output: - - $ docker inspect --format='{{range $p, $conf := .NetworkSettings.Ports}} \ - {{$p}} -> {{(index $conf 0).HostPort}} {{end}}' d2cc496561d6 - 80/tcp -> 80 - -You can get more information about how to write a Go template from: -https://golang.org/pkg/text/template/. - -## Getting size information on a container - - $ docker inspect -s d2cc496561d6 - [ - { - .... - "SizeRw": 0, - "SizeRootFs": 972, - .... - } - ] - -## Getting information on an image - -Use an image's ID or name (e.g., repository/name[:tag]) to get information -about the image: - - $ docker inspect ded7cd95e059 - [{ - "Id": "ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", - "Parent": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", - "Comment": "", - "Created": "2015-05-27T16:58:22.937503085Z", - "Container": "76cf7f67d83a7a047454b33007d03e32a8f474ad332c3a03c94537edd22b312b", - "ContainerConfig": { - "Hostname": "76cf7f67d83a", - "Domainname": "", - "User": "", - "AttachStdin": false, - "AttachStdout": false, - "AttachStderr": false, - "ExposedPorts": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "/bin/sh", - "-c", - "#(nop) ADD file:4be46382bcf2b095fcb9fe8334206b584eff60bb3fad8178cbd97697fcb2ea83 in /" - ], - "Image": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", - "Volumes": null, - "VolumeDriver": "", - "WorkingDir": "", - "Entrypoint": null, - "NetworkDisabled": false, - "MacAddress": "", - "OnBuild": null, - "Labels": {} - }, - "DockerVersion": "1.6.0", - "Author": "Lokesh Mandvekar \u003clsm5@fedoraproject.org\u003e", - "Config": { - "Hostname": "76cf7f67d83a", - "Domainname": "", - "User": "", - "AttachStdin": false, - "AttachStdout": false, - "AttachStderr": false, - "ExposedPorts": null, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": null, - "Image": "48ecf305d2cf7046c1f5f8fcbcd4994403173441d4a7f125b1bb0ceead9de731", - "Volumes": null, - "VolumeDriver": "", - "WorkingDir": "", - "Entrypoint": null, - "NetworkDisabled": false, - "MacAddress": "", - "OnBuild": null, - "Labels": {} - }, - "Architecture": "amd64", - "Os": "linux", - "Size": 186507296, - "VirtualSize": 186507296, - "GraphDriver": { - "Name": "devicemapper", - "Data": { - "DeviceId": "3", - "DeviceName": "docker-253:1-2763198-ded7cd95e059788f2586a51c275a4f151653779d6a7f4dad77c2bd34601d94e4", - "DeviceSize": "171798691840" - } - } - } - ] - -# HISTORY -April 2014, originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -April 2015, updated by Qiang Huang -October 2015, updated by Sally O'Malley diff --git a/man/docker-kill.1.md b/man/docker-kill.1.md deleted file mode 100644 index 36cbdb90ea..0000000000 --- a/man/docker-kill.1.md +++ /dev/null @@ -1,28 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-kill - Kill a running container using SIGKILL or a specified signal - -# SYNOPSIS -**docker kill** -[**--help**] -[**-s**|**--signal**[=*"KILL"*]] -CONTAINER [CONTAINER...] - -# DESCRIPTION - -The main process inside each container specified will be sent SIGKILL, - or any signal specified with option --signal. - -# OPTIONS -**--help** - Print usage statement - -**-s**, **--signal**="*KILL*" - Signal to send to the container - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) - based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/man/docker-load.1.md b/man/docker-load.1.md deleted file mode 100644 index b165173047..0000000000 --- a/man/docker-load.1.md +++ /dev/null @@ -1,56 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-load - Load an image from a tar archive or STDIN - -# SYNOPSIS -**docker load** -[**--help**] -[**-i**|**--input**[=*INPUT*]] -[**-q**|**--quiet**] - -# DESCRIPTION - -Loads a tarred repository from a file or the standard input stream. -Restores both images and tags. Write image names or IDs imported it -standard output stream. - -# OPTIONS -**--help** - Print usage statement - -**-i**, **--input**="" - Read from a tar archive file, instead of STDIN. The tarball may be compressed with gzip, bzip, or xz. - -**-q**, **--quiet** - Suppress the load progress bar but still outputs the imported images. - -# EXAMPLES - - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox latest 769b9341d937 7 weeks ago 2.489 MB - $ docker load --input fedora.tar - # […] - Loaded image: fedora:rawhide - # […] - Loaded image: fedora:20 - # […] - $ docker images - REPOSITORY TAG IMAGE ID CREATED SIZE - busybox latest 769b9341d937 7 weeks ago 2.489 MB - fedora rawhide 0d20aec6529d 7 weeks ago 387 MB - fedora 20 58394af37342 7 weeks ago 385.5 MB - fedora heisenbug 58394af37342 7 weeks ago 385.5 MB - fedora latest 58394af37342 7 weeks ago 385.5 MB - -# See also -**docker-save(1)** to save one or more images to a tar archive (streamed to STDOUT by default). - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -July 2015 update by Mary Anthony -June 2016 update by Vincent Demeester diff --git a/man/docker-login.1.md b/man/docker-login.1.md deleted file mode 100644 index 6bb0355946..0000000000 --- a/man/docker-login.1.md +++ /dev/null @@ -1,53 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-login - Log in to a Docker registry. - -# SYNOPSIS -**docker login** -[**--help**] -[**-p**|**--password**[=*PASSWORD*]] -[**-u**|**--username**[=*USERNAME*]] -[SERVER] - -# DESCRIPTION -Log in to a Docker Registry located on the specified -`SERVER`. You can specify a URL or a `hostname` for the `SERVER` value. If you -do not specify a `SERVER`, the command uses Docker's public registry located at -`https://registry-1.docker.io/` by default. To get a username/password for Docker's public registry, create an account on Docker Hub. - -`docker login` requires user to use `sudo` or be `root`, except when: - -1. connecting to a remote daemon, such as a `docker-machine` provisioned `docker engine`. -2. user is added to the `docker` group. This will impact the security of your system; the `docker` group is `root` equivalent. See [Docker Daemon Attack Surface](https://docs.docker.com/articles/security/#docker-daemon-attack-surface) for details. - -You can log into any public or private repository for which you have -credentials. When you log in, the command stores encoded credentials in -`$HOME/.docker/config.json` on Linux or `%USERPROFILE%/.docker/config.json` on Windows. - -# OPTIONS -**--help** - Print usage statement - -**-p**, **--password**="" - Password - -**-u**, **--username**="" - Username - -# EXAMPLES - -## Login to a registry on your localhost - - # docker login localhost:8080 - -# See also -**docker-logout(1)** to log out from a Docker registry. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -April 2015, updated by Mary Anthony for v2 -November 2015, updated by Sally O'Malley diff --git a/man/docker-logout.1.md b/man/docker-logout.1.md deleted file mode 100644 index a8a4b7c3c0..0000000000 --- a/man/docker-logout.1.md +++ /dev/null @@ -1,32 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-logout - Log out from a Docker registry. - -# SYNOPSIS -**docker logout** -[SERVER] - -# DESCRIPTION -Log out of a Docker Registry located on the specified `SERVER`. You can -specify a URL or a `hostname` for the `SERVER` value. If you do not specify a -`SERVER`, the command attempts to log you out of Docker's public registry -located at `https://registry-1.docker.io/` by default. - -# OPTIONS -There are no available options. - -# EXAMPLES - -## Log out from a registry on your localhost - - # docker logout localhost:8080 - -# See also -**docker-login(1)** to log in to a Docker registry server. - -# HISTORY -June 2014, Originally compiled by Daniel, Dao Quang Minh (daniel at nitrous dot io) -July 2014, updated by Sven Dowideit -April 2015, updated by Mary Anthony for v2 diff --git a/man/docker-logs.1.md b/man/docker-logs.1.md deleted file mode 100644 index e70f796e28..0000000000 --- a/man/docker-logs.1.md +++ /dev/null @@ -1,71 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-logs - Fetch the logs of a container - -# SYNOPSIS -**docker logs** -[**-f**|**--follow**] -[**--help**] -[**--since**[=*SINCE*]] -[**-t**|**--timestamps**] -[**--tail**[=*"all"*]] -CONTAINER - -# DESCRIPTION -The **docker logs** command batch-retrieves whatever logs are present for -a container at the time of execution. This does not guarantee execution -order when combined with a docker run (i.e., your run may not have generated -any logs at the time you execute docker logs). - -The **docker logs --follow** command combines commands **docker logs** and -**docker attach**. It will first return all logs from the beginning and -then continue streaming new output from the container's stdout and stderr. - -**Warning**: This command works only for the **json-file** or **journald** -logging drivers. - -# OPTIONS -**--help** - Print usage statement - -**--details**=*true*|*false* - Show extra details provided to logs - -**-f**, **--follow**=*true*|*false* - Follow log output. The default is *false*. - -**--since**="" - Show logs since timestamp - -**-t**, **--timestamps**=*true*|*false* - Show timestamps. The default is *false*. - -**--tail**="*all*" - Output the specified number of lines at the end of logs (defaults to all logs) - -The `--since` option can be Unix timestamps, date formatted timestamps, or Go -duration strings (e.g. `10m`, `1h30m`) computed relative to the client machine's -time. Supported formats for date formatted time stamps include RFC3339Nano, -RFC3339, `2006-01-02T15:04:05`, `2006-01-02T15:04:05.999999999`, -`2006-01-02Z07:00`, and `2006-01-02`. The local timezone on the client will be -used if you do not provide either a `Z` or a `+-00:00` timezone offset at the -end of the timestamp. When providing Unix timestamps enter -seconds[.nanoseconds], where seconds is the number of seconds that have elapsed -since January 1, 1970 (midnight UTC/GMT), not counting leap seconds (aka Unix -epoch or Unix time), and the optional .nanoseconds field is a fraction of a -second no more than nine digits long. You can combine the `--since` option with -either or both of the `--follow` or `--tail` options. - -The `docker logs --details` command will add on extra attributes, such as -environment variables and labels, provided to `--log-opt` when creating the -container. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -July 2014, updated by Sven Dowideit -April 2015, updated by Ahmet Alp Balkan -October 2015, updated by Mike Brown diff --git a/man/docker-network-connect.1.md b/man/docker-network-connect.1.md deleted file mode 100644 index d6ee159391..0000000000 --- a/man/docker-network-connect.1.md +++ /dev/null @@ -1,69 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% OCT 2015 -# NAME -docker-network-connect - connect a container to a network - -# SYNOPSIS -**docker network connect** -[**--help**] -NETWORK CONTAINER - -# DESCRIPTION - -Connects a container to a network. You can connect a container by name -or by ID. Once connected, the container can communicate with other containers in -the same network. - -```bash -$ docker network connect multi-host-network container1 -``` - -You can also use the `docker run --net=` option to start a container and immediately connect it to a network. - -```bash -$ docker run -itd --net=multi-host-network --ip 172.20.88.22 --ip6 2001:db8::8822 busybox -``` - -You can pause, restart, and stop containers that are connected to a network. -Paused containers remain connected and can be revealed by a `network inspect`. -When the container is stopped, it does not appear on the network until you restart -it. - -If specified, the container's IP address(es) is reapplied when a stopped -container is restarted. If the IP address is no longer available, the container -fails to start. One way to guarantee that the IP address is available is -to specify an `--ip-range` when creating the network, and choose the static IP -address(es) from outside that range. This ensures that the IP address is not -given to another container while this container is not on the network. - -```bash -$ docker network create --subnet 172.20.0.0/16 --ip-range 172.20.240.0/20 multi-host-network -``` - -```bash -$ docker network connect --ip 172.20.128.2 multi-host-network container2 -``` - -To verify the container is connected, use the `docker network inspect` command. Use `docker network disconnect` to remove a container from the network. - -Once connected in network, containers can communicate using only another -container's IP address or name. For `overlay` networks or custom plugins that -support multi-host connectivity, containers connected to the same multi-host -network but launched from different Engines can also communicate in this way. - -You can connect a container to one or more networks. The networks need not be the same type. For example, you can connect a single container bridge and overlay networks. - - -# OPTIONS -**NETWORK** - Specify network name - -**CONTAINER** - Specify container name - -**--help** - Print usage statement - -# HISTORY -OCT 2015, created by Mary Anthony diff --git a/man/docker-network-create.1.md b/man/docker-network-create.1.md deleted file mode 100644 index 3000bb2135..0000000000 --- a/man/docker-network-create.1.md +++ /dev/null @@ -1,183 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% OCT 2015 -# NAME -docker-network-create - create a new network - -# SYNOPSIS -**docker network create** -[**--aux-address**=*map[]*] -[**-d**|**--driver**=*DRIVER*] -[**--gateway**=*[]*] -[**--help**] -[**--internal**] -[**--ip-range**=*[]*] -[**--ipam-driver**=*default*] -[**--ipam-opt**=*map[]*] -[**--ipv6**] -[**--label**[=*[]*]] -[**-o**|**--opt**=*map[]*] -[**--subnet**=*[]*] -NETWORK-NAME - -# DESCRIPTION - -Creates a new network. The `DRIVER` accepts `bridge` or `overlay` which are the -built-in network drivers. If you have installed a third party or your own custom -network driver you can specify that `DRIVER` here also. If you don't specify the -`--driver` option, the command automatically creates a `bridge` network for you. -When you install Docker Engine it creates a `bridge` network automatically. This -network corresponds to the `docker0` bridge that Engine has traditionally relied -on. When launch a new container with `docker run` it automatically connects to -this bridge network. You cannot remove this default bridge network but you can -create new ones using the `network create` command. - -```bash -$ docker network create -d bridge my-bridge-network -``` - -Bridge networks are isolated networks on a single Engine installation. If you -want to create a network that spans multiple Docker hosts each running an -Engine, you must create an `overlay` network. Unlike `bridge` networks overlay -networks require some pre-existing conditions before you can create one. These -conditions are: - -* Access to a key-value store. Engine supports Consul, Etcd, and Zookeeper (Distributed store) key-value stores. -* A cluster of hosts with connectivity to the key-value store. -* A properly configured Engine `daemon` on each host in the cluster. - -The `dockerd` options that support the `overlay` network are: - -* `--cluster-store` -* `--cluster-store-opt` -* `--cluster-advertise` - -To read more about these options and how to configure them, see ["*Get started -with multi-host -network*"](https://docs.docker.com/engine/userguide/networking/get-started-overlay/). - -It is also a good idea, though not required, that you install Docker Swarm on to -manage the cluster that makes up your network. Swarm provides sophisticated -discovery and server management that can assist your implementation. - -Once you have prepared the `overlay` network prerequisites you simply choose a -Docker host in the cluster and issue the following to create the network: - -```bash -$ docker network create -d overlay my-multihost-network -``` - -Network names must be unique. The Docker daemon attempts to identify naming -conflicts but this is not guaranteed. It is the user's responsibility to avoid -name conflicts. - -## Connect containers - -When you start a container use the `--net` flag to connect it to a network. -This adds the `busybox` container to the `mynet` network. - -```bash -$ docker run -itd --net=mynet busybox -``` - -If you want to add a container to a network after the container is already -running use the `docker network connect` subcommand. - -You can connect multiple containers to the same network. Once connected, the -containers can communicate using only another container's IP address or name. -For `overlay` networks or custom plugins that support multi-host connectivity, -containers connected to the same multi-host network but launched from different -Engines can also communicate in this way. - -You can disconnect a container from a network using the `docker network -disconnect` command. - -## Specifying advanced options - -When you create a network, Engine creates a non-overlapping subnetwork for the -network by default. This subnetwork is not a subdivision of an existing network. -It is purely for ip-addressing purposes. You can override this default and -specify subnetwork values directly using the `--subnet` option. On a -`bridge` network you can only create a single subnet: - -```bash -$ docker network create -d bridge --subnet=192.168.0.0/16 br0 -``` - -Additionally, you also specify the `--gateway` `--ip-range` and `--aux-address` -options. - -```bash -$ docker network create \ - --driver=bridge \ - --subnet=172.28.0.0/16 \ - --ip-range=172.28.5.0/24 \ - --gateway=172.28.5.254 \ - br0 -``` - -If you omit the `--gateway` flag the Engine selects one for you from inside a -preferred pool. For `overlay` networks and for network driver plugins that -support it you can create multiple subnetworks. - -```bash -$ docker network create -d overlay \ - --subnet=192.168.0.0/16 \ - --subnet=192.170.0.0/16 \ - --gateway=192.168.0.100 \ - --gateway=192.170.0.100 \ - --ip-range=192.168.1.0/24 \ - --aux-address="my-router=192.168.1.5" --aux-address="my-switch=192.168.1.6" \ - --aux-address="my-printer=192.170.1.5" --aux-address="my-nas=192.170.1.6" \ - my-multihost-network -``` - -Be sure that your subnetworks do not overlap. If they do, the network create -fails and Engine returns an error. - -### Network internal mode - -By default, when you connect a container to an `overlay` network, Docker also -connects a bridge network to it to provide external connectivity. If you want -to create an externally isolated `overlay` network, you can specify the -`--internal` option. - -# OPTIONS -**--aux-address**=map[] - Auxiliary IPv4 or IPv6 addresses used by network driver - -**-d**, **--driver**=*DRIVER* - Driver to manage the Network bridge or overlay. The default is bridge. - -**--gateway**=[] - IPv4 or IPv6 Gateway for the master subnet - -**--help** - Print usage - -**--internal** - Restrict external access to the network - -**--ip-range**=[] - Allocate container ip from a sub-range - -**--ipam-driver**=*default* - IP Address Management Driver - -**--ipam-opt**=map[] - Set custom IPAM driver options - -**--ipv6** - Enable IPv6 networking - -**--label**=*label* - Set metadata for a network - -**-o**, **--opt**=map[] - Set custom driver options - -**--subnet**=[] - Subnet in CIDR format that represents a network segment - -# HISTORY -OCT 2015, created by Mary Anthony diff --git a/man/docker-network-disconnect.1.md b/man/docker-network-disconnect.1.md deleted file mode 100644 index 09bcac51b0..0000000000 --- a/man/docker-network-disconnect.1.md +++ /dev/null @@ -1,36 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% OCT 2015 -# NAME -docker-network-disconnect - disconnect a container from a network - -# SYNOPSIS -**docker network disconnect** -[**--help**] -[**--force**] -NETWORK CONTAINER - -# DESCRIPTION - -Disconnects a container from a network. - -```bash - $ docker network disconnect multi-host-network container1 -``` - - -# OPTIONS -**NETWORK** - Specify network name - -**CONTAINER** - Specify container name - -**--force** - Force the container to disconnect from a network - -**--help** - Print usage statement - -# HISTORY -OCT 2015, created by Mary Anthony diff --git a/man/docker-network-inspect.1.md b/man/docker-network-inspect.1.md deleted file mode 100644 index da4e7c3550..0000000000 --- a/man/docker-network-inspect.1.md +++ /dev/null @@ -1,112 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% OCT 2015 -# NAME -docker-network-inspect - inspect a network - -# SYNOPSIS -**docker network inspect** -[**-f**|**--format**[=*FORMAT*]] -[**--help**] -NETWORK [NETWORK...] - -# DESCRIPTION - -Returns information about one or more networks. By default, this command renders all results in a JSON object. For example, if you connect two containers to the default `bridge` network: - -```bash -$ sudo docker run -itd --name=container1 busybox -f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27 - -$ sudo docker run -itd --name=container2 busybox -bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727 -``` - -The `network inspect` command shows the containers, by id, in its -results. You can specify an alternate format to execute a given -template for each result. Go's -[text/template](http://golang.org/pkg/text/template/) package -describes all the details of the format. - -```bash -$ sudo docker network inspect bridge -[ - { - "Name": "bridge", - "Id": "b2b1a2cba717161d984383fd68218cf70bbbd17d328496885f7c921333228b0f", - "Scope": "local", - "Driver": "bridge", - "IPAM": { - "Driver": "default", - "Config": [ - { - "Subnet": "172.17.42.1/16", - "Gateway": "172.17.42.1" - } - ] - }, - "Internal": false, - "Containers": { - "bda12f8922785d1f160be70736f26c1e331ab8aaf8ed8d56728508f2e2fd4727": { - "Name": "container2", - "EndpointID": "0aebb8fcd2b282abe1365979536f21ee4ceaf3ed56177c628eae9f706e00e019", - "MacAddress": "02:42:ac:11:00:02", - "IPv4Address": "172.17.0.2/16", - "IPv6Address": "" - }, - "f2870c98fd504370fb86e59f32cd0753b1ac9b69b7d80566ffc7192a82b3ed27": { - "Name": "container1", - "EndpointID": "a00676d9c91a96bbe5bcfb34f705387a33d7cc365bac1a29e4e9728df92d10ad", - "MacAddress": "02:42:ac:11:00:01", - "IPv4Address": "172.17.0.1/16", - "IPv6Address": "" - } - }, - "Options": { - "com.docker.network.bridge.default_bridge": "true", - "com.docker.network.bridge.enable_icc": "true", - "com.docker.network.bridge.enable_ip_masquerade": "true", - "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0", - "com.docker.network.bridge.name": "docker0", - "com.docker.network.driver.mtu": "1500" - } - } -] -``` - -Returns the information about the user-defined network: - -```bash -$ docker network create simple-network -69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a -$ docker network inspect simple-network -[ - { - "Name": "simple-network", - "Id": "69568e6336d8c96bbf57869030919f7c69524f71183b44d80948bd3927c87f6a", - "Scope": "local", - "Driver": "bridge", - "IPAM": { - "Driver": "default", - "Config": [ - { - "Subnet": "172.22.0.0/16", - "Gateway": "172.22.0.1/16" - } - ] - }, - "Containers": {}, - "Options": {} - } -] -``` - -# OPTIONS -**-f**, **--format**="" - Format the output using the given go template. - -**--help** - Print usage statement - -# HISTORY -OCT 2015, created by Mary Anthony diff --git a/man/docker-network-ls.1.md b/man/docker-network-ls.1.md deleted file mode 100644 index bfd87329cc..0000000000 --- a/man/docker-network-ls.1.md +++ /dev/null @@ -1,175 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% OCT 2015 -# NAME -docker-network-ls - list networks - -# SYNOPSIS -**docker network ls** -[**-f**|**--filter**[=*[]*]] -[**--no-trunc**[=*true*|*false*]] -[**-q**|**--quiet**[=*true*|*false*]] -[**--help**] - -# DESCRIPTION - -Lists all the networks the Engine `daemon` knows about. This includes the -networks that span across multiple hosts in a cluster, for example: - -```bash - $ docker network ls - NETWORK ID NAME DRIVER - 7fca4eb8c647 bridge bridge - 9f904ee27bf5 none null - cf03ee007fb4 host host - 78b03ee04fc4 multi-host overlay -``` - -Use the `--no-trunc` option to display the full network id: - -```bash -$ docker network ls --no-trunc -NETWORK ID NAME DRIVER -18a2866682b85619a026c81b98a5e375bd33e1b0936a26cc497c283d27bae9b3 none null -c288470c46f6c8949c5f7e5099b5b7947b07eabe8d9a27d79a9cbf111adcbf47 host host -7b369448dccbf865d397c8d2be0cda7cf7edc6b0945f77d2529912ae917a0185 bridge bridge -95e74588f40db048e86320c6526440c504650a1ff3e9f7d60a497c4d2163e5bd foo bridge -63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 dev bridge -``` - -## Filtering - -The filtering flag (`-f` or `--filter`) format is a `key=value` pair. If there -is more than one filter, then pass multiple flags (e.g. `--filter "foo=bar" --filter "bif=baz"`). -Multiple filter flags are combined as an `OR` filter. For example, -`-f type=custom -f type=builtin` returns both `custom` and `builtin` networks. - -The currently supported filters are: - -* driver -* id (network's id) -* label (`label=` or `label==`) -* name (network's name) -* type (custom|builtin) - -#### Driver - -The `driver` filter matches networks based on their driver. - -The following example matches networks with the `bridge` driver: - -```bash -$ docker network ls --filter driver=bridge -NETWORK ID NAME DRIVER -db9db329f835 test1 bridge -f6e212da9dfd test2 bridge -``` - -#### ID - -The `id` filter matches on all or part of a network's ID. - -The following filter matches all networks with an ID containing the -`63d1ff1f77b0...` string. - -```bash -$ docker network ls --filter id=63d1ff1f77b07ca51070a8c227e962238358bd310bde1529cf62e6c307ade161 -NETWORK ID NAME DRIVER -63d1ff1f77b0 dev bridge -``` - -You can also filter for a substring in an ID as this shows: - -```bash -$ docker network ls --filter id=95e74588f40d -NETWORK ID NAME DRIVER -95e74588f40d foo bridge - -$ docker network ls --filter id=95e -NETWORK ID NAME DRIVER -95e74588f40d foo bridge -``` - -#### Label - -The `label` filter matches networks based on the presence of a `label` alone or a `label` and a -value. - -The following filter matches networks with the `usage` label regardless of its value. - -```bash -$ docker network ls -f "label=usage" -NETWORK ID NAME DRIVER -db9db329f835 test1 bridge -f6e212da9dfd test2 bridge -``` - -The following filter matches networks with the `usage` label with the `prod` value. - -```bash -$ docker network ls -f "label=usage=prod" -NETWORK ID NAME DRIVER -f6e212da9dfd test2 bridge -``` - -#### Name - -The `name` filter matches on all or part of a network's name. - -The following filter matches all networks with a name containing the `foobar` string. - -```bash -$ docker network ls --filter name=foobar -NETWORK ID NAME DRIVER -06e7eef0a170 foobar bridge -``` - -You can also filter for a substring in a name as this shows: - -```bash -$ docker network ls --filter name=foo -NETWORK ID NAME DRIVER -95e74588f40d foo bridge -06e7eef0a170 foobar bridge -``` - -#### Type - -The `type` filter supports two values; `builtin` displays predefined networks -(`bridge`, `none`, `host`), whereas `custom` displays user defined networks. - -The following filter matches all user defined networks: - -```bash -$ docker network ls --filter type=custom -NETWORK ID NAME DRIVER -95e74588f40d foo bridge -63d1ff1f77b0 dev bridge -``` - -By having this flag it allows for batch cleanup. For example, use this filter -to delete all user defined networks: - -```bash -$ docker network rm `docker network ls --filter type=custom -q` -``` - -A warning will be issued when trying to remove a network that has containers -attached. - -# OPTIONS - -**-f**, **--filter**=*[]* - filter output based on conditions provided. - -**--no-trunc**=*true*|*false* - Do not truncate the output - -**-q**, **--quiet**=*true*|*false* - Only display network IDs - -**--help** - Print usage statement - -# HISTORY -OCT 2015, created by Mary Anthony diff --git a/man/docker-network-rm.1.md b/man/docker-network-rm.1.md deleted file mode 100644 index c094a15286..0000000000 --- a/man/docker-network-rm.1.md +++ /dev/null @@ -1,43 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% OCT 2015 -# NAME -docker-network-rm - remove one or more networks - -# SYNOPSIS -**docker network rm** -[**--help**] -NETWORK [NETWORK...] - -# DESCRIPTION - -Removes one or more networks by name or identifier. To remove a network, -you must first disconnect any containers connected to it. -To remove the network named 'my-network': - -```bash - $ docker network rm my-network -``` - -To delete multiple networks in a single `docker network rm` command, provide -multiple network names or ids. The following example deletes a network with id -`3695c422697f` and a network named `my-network`: - -```bash - $ docker network rm 3695c422697f my-network -``` - -When you specify multiple networks, the command attempts to delete each in turn. -If the deletion of one network fails, the command continues to the next on the -list and tries to delete that. The command reports success or failure for each -deletion. - -# OPTIONS -**NETWORK** - Specify network name or id - -**--help** - Print usage statement - -# HISTORY -OCT 2015, created by Mary Anthony diff --git a/man/docker-pause.1.md b/man/docker-pause.1.md deleted file mode 100644 index 5d2267af62..0000000000 --- a/man/docker-pause.1.md +++ /dev/null @@ -1,30 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-pause - Pause all processes within a container - -# SYNOPSIS -**docker pause** -CONTAINER [CONTAINER...] - -# DESCRIPTION - -The `docker pause` command uses the cgroups freezer to suspend all processes in -a container. Traditionally when suspending a process the `SIGSTOP` signal is -used, which is observable by the process being suspended. With the cgroups freezer -the process is unaware, and unable to capture, that it is being suspended, -and subsequently resumed. - -See the [cgroups freezer documentation] -(https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for -further details. - -# OPTIONS -There are no available options. - -# See also -**docker-unpause(1)** to unpause all processes within a container. - -# HISTORY -June 2014, updated by Sven Dowideit diff --git a/man/docker-port.1.md b/man/docker-port.1.md deleted file mode 100644 index 83e9cf93b6..0000000000 --- a/man/docker-port.1.md +++ /dev/null @@ -1,47 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-port - List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT - -# SYNOPSIS -**docker port** -[**--help**] -CONTAINER [PRIVATE_PORT[/PROTO]] - -# DESCRIPTION -List port mappings for the CONTAINER, or lookup the public-facing port that is NAT-ed to the PRIVATE_PORT - -# OPTIONS -**--help** - Print usage statement - -# EXAMPLES - - # docker ps - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - b650456536c7 busybox:latest top 54 minutes ago Up 54 minutes 0.0.0.0:1234->9876/tcp, 0.0.0.0:4321->7890/tcp test - -## Find out all the ports mapped - - # docker port test - 7890/tcp -> 0.0.0.0:4321 - 9876/tcp -> 0.0.0.0:1234 - -## Find out a specific mapping - - # docker port test 7890/tcp - 0.0.0.0:4321 - - # docker port test 7890 - 0.0.0.0:4321 - -## An example showing error for non-existent mapping - - # docker port test 7890/udp - 2014/06/24 11:53:36 Error: No public port '7890/udp' published for test - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -June 2014, updated by Sven Dowideit -November 2014, updated by Sven Dowideit diff --git a/man/docker-ps.1.md b/man/docker-ps.1.md deleted file mode 100644 index 14c770121f..0000000000 --- a/man/docker-ps.1.md +++ /dev/null @@ -1,142 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% FEBRUARY 2015 -# NAME -docker-ps - List containers - -# SYNOPSIS -**docker ps** -[**-a**|**--all**] -[**-f**|**--filter**[=*[]*]] -[**--format**=*"TEMPLATE"*] -[**--help**] -[**-l**|**--latest**] -[**-n**[=*-1*]] -[**--no-trunc**] -[**-q**|**--quiet**] -[**-s**|**--size**] - -# DESCRIPTION - -List the containers in the local repository. By default this shows only -the running containers. - -# OPTIONS -**-a**, **--all**=*true*|*false* - Show all containers. Only running containers are shown by default. The default is *false*. - -**-f**, **--filter**=[] - Filter output based on these conditions: - - exited= an exit code of - - label= or label== - - status=(created|restarting|running|paused|exited|dead) - - name= a container's name - - id= a container's ID - - before=(|) - - since=(|) - - ancestor=([:tag]||) - containers created from an image or a descendant. - - volume=(|) - - network=(|) - containers connected to the provided network - -**--format**="*TEMPLATE*" - Pretty-print containers using a Go template. - Valid placeholders: - .ID - Container ID - .Image - Image ID - .Command - Quoted command - .CreatedAt - Time when the container was created. - .RunningFor - Elapsed time since the container was started. - .Ports - Exposed ports. - .Status - Container status. - .Size - Container disk size. - .Names - Container names. - .Labels - All labels assigned to the container. - .Label - Value of a specific label for this container. For example `{{.Label "com.docker.swarm.cpu"}}` - .Mounts - Names of the volumes mounted in this container. - -**--help** - Print usage statement - -**-l**, **--latest**=*true*|*false* - Show only the latest created container (includes all states). The default is *false*. - -**-n**=*-1* - Show n last created containers (includes all states). - -**--no-trunc**=*true*|*false* - Don't truncate output. The default is *false*. - -**-q**, **--quiet**=*true*|*false* - Only display numeric IDs. The default is *false*. - -**-s**, **--size**=*true*|*false* - Display total file sizes. The default is *false*. - -# EXAMPLES -# Display all containers, including non-running - - # docker ps -a - CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES - a87ecb4f327c fedora:20 /bin/sh -c #(nop) MA 20 minutes ago Exit 0 desperate_brattain - 01946d9d34d8 vpavlin/rhel7:latest /bin/sh -c #(nop) MA 33 minutes ago Exit 0 thirsty_bell - c1d3b0166030 acffc0358b9e /bin/sh -c yum -y up 2 weeks ago Exit 1 determined_torvalds - 41d50ecd2f57 fedora:20 /bin/sh -c #(nop) MA 2 weeks ago Exit 0 drunk_pike - -# Display only IDs of all containers, including non-running - - # docker ps -a -q - a87ecb4f327c - 01946d9d34d8 - c1d3b0166030 - 41d50ecd2f57 - -# Display only IDs of all containers that have the name `determined_torvalds` - - # docker ps -a -q --filter=name=determined_torvalds - c1d3b0166030 - -# Display containers with their commands - - # docker ps --format "{{.ID}}: {{.Command}}" - a87ecb4f327c: /bin/sh -c #(nop) MA - 01946d9d34d8: /bin/sh -c #(nop) MA - c1d3b0166030: /bin/sh -c yum -y up - 41d50ecd2f57: /bin/sh -c #(nop) MA - -# Display containers with their labels in a table - - # docker ps --format "table {{.ID}}\t{{.Labels}}" - CONTAINER ID LABELS - a87ecb4f327c com.docker.swarm.node=ubuntu,com.docker.swarm.storage=ssd - 01946d9d34d8 - c1d3b0166030 com.docker.swarm.node=debian,com.docker.swarm.cpu=6 - 41d50ecd2f57 com.docker.swarm.node=fedora,com.docker.swarm.cpu=3,com.docker.swarm.storage=ssd - -# Display containers with their node label in a table - - # docker ps --format 'table {{.ID}}\t{{(.Label "com.docker.swarm.node")}}' - CONTAINER ID NODE - a87ecb4f327c ubuntu - 01946d9d34d8 - c1d3b0166030 debian - 41d50ecd2f57 fedora - -# Display containers with `remote-volume` mounted - - $ docker ps --filter volume=remote-volume --format "table {{.ID}}\t{{.Mounts}}" - CONTAINER ID MOUNTS - 9c3527ed70ce remote-volume - -# Display containers with a volume mounted in `/data` - - $ docker ps --filter volume=/data --format "table {{.ID}}\t{{.Mounts}}" - CONTAINER ID MOUNTS - 9c3527ed70ce remote-volume - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -August 2014, updated by Sven Dowideit -November 2014, updated by Sven Dowideit -February 2015, updated by André Martins diff --git a/man/docker-pull.1.md b/man/docker-pull.1.md deleted file mode 100644 index c61d005308..0000000000 --- a/man/docker-pull.1.md +++ /dev/null @@ -1,220 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-pull - Pull an image or a repository from a registry - -# SYNOPSIS -**docker pull** -[**-a**|**--all-tags**] -[**--help**] -NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] - -# DESCRIPTION - -This command pulls down an image or a repository from a registry. If -there is more than one image for a repository (e.g., fedora) then all -images for that repository name can be pulled down including any tags -(see the option **-a** or **--all-tags**). - -If you do not specify a `REGISTRY_HOST`, the command uses Docker's public -registry located at `registry-1.docker.io` by default. - -# OPTIONS -**-a**, **--all-tags**=*true*|*false* - Download all tagged images in the repository. The default is *false*. - -**--help** - Print usage statement - -# EXAMPLES - -### Pull an image from Docker Hub - -To download a particular image, or set of images (i.e., a repository), use -`docker pull`. If no tag is provided, Docker Engine uses the `:latest` tag as a -default. This command pulls the `debian:latest` image: - - $ docker pull debian - - Using default tag: latest - latest: Pulling from library/debian - fdd5d7827f33: Pull complete - a3ed95caeb02: Pull complete - Digest: sha256:e7d38b3517548a1c71e41bffe9c8ae6d6d29546ce46bf62159837aad072c90aa - Status: Downloaded newer image for debian:latest - -Docker images can consist of multiple layers. In the example above, the image -consists of two layers; `fdd5d7827f33` and `a3ed95caeb02`. - -Layers can be reused by images. For example, the `debian:jessie` image shares -both layers with `debian:latest`. Pulling the `debian:jessie` image therefore -only pulls its metadata, but not its layers, because all layers are already -present locally: - - $ docker pull debian:jessie - - jessie: Pulling from library/debian - fdd5d7827f33: Already exists - a3ed95caeb02: Already exists - Digest: sha256:a9c958be96d7d40df920e7041608f2f017af81800ca5ad23e327bc402626b58e - Status: Downloaded newer image for debian:jessie - -To see which images are present locally, use the **docker-images(1)** -command: - - $ docker images - - REPOSITORY TAG IMAGE ID CREATED SIZE - debian jessie f50f9524513f 5 days ago 125.1 MB - debian latest f50f9524513f 5 days ago 125.1 MB - -Docker uses a content-addressable image store, and the image ID is a SHA256 -digest covering the image's configuration and layers. In the example above, -`debian:jessie` and `debian:latest` have the same image ID because they are -actually the *same* image tagged with different names. Because they are the -same image, their layers are stored only once and do not consume extra disk -space. - -For more information about images, layers, and the content-addressable store, -refer to [understand images, containers, and storage drivers](https://docs.docker.com/engine/userguide/storagedriver/imagesandcontainers/) -in the online documentation. - - -## Pull an image by digest (immutable identifier) - -So far, you've pulled images by their name (and "tag"). Using names and tags is -a convenient way to work with images. When using tags, you can `docker pull` an -image again to make sure you have the most up-to-date version of that image. -For example, `docker pull ubuntu:14.04` pulls the latest version of the Ubuntu -14.04 image. - -In some cases you don't want images to be updated to newer versions, but prefer -to use a fixed version of an image. Docker enables you to pull an image by its -*digest*. When pulling an image by digest, you specify *exactly* which version -of an image to pull. Doing so, allows you to "pin" an image to that version, -and guarantee that the image you're using is always the same. - -To know the digest of an image, pull the image first. Let's pull the latest -`ubuntu:14.04` image from Docker Hub: - - $ docker pull ubuntu:14.04 - - 14.04: Pulling from library/ubuntu - 5a132a7e7af1: Pull complete - fd2731e4c50c: Pull complete - 28a2f68d1120: Pull complete - a3ed95caeb02: Pull complete - Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - Status: Downloaded newer image for ubuntu:14.04 - -Docker prints the digest of the image after the pull has finished. In the example -above, the digest of the image is: - - sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - -Docker also prints the digest of an image when *pushing* to a registry. This -may be useful if you want to pin to a version of the image you just pushed. - -A digest takes the place of the tag when pulling an image, for example, to -pull the above image by digest, run the following command: - - $ docker pull ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - - sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2: Pulling from library/ubuntu - 5a132a7e7af1: Already exists - fd2731e4c50c: Already exists - 28a2f68d1120: Already exists - a3ed95caeb02: Already exists - Digest: sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - Status: Downloaded newer image for ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - -Digest can also be used in the `FROM` of a Dockerfile, for example: - - FROM ubuntu@sha256:45b23dee08af5e43a7fea6c4cf9c25ccf269ee113168c19722f87876677c5cb2 - MAINTAINER some maintainer - -> **Note**: Using this feature "pins" an image to a specific version in time. -> Docker will therefore not pull updated versions of an image, which may include -> security updates. If you want to pull an updated image, you need to change the -> digest accordingly. - -## Pulling from a different registry - -By default, `docker pull` pulls images from Docker Hub. It is also possible to -manually specify the path of a registry to pull from. For example, if you have -set up a local registry, you can specify its path to pull from it. A registry -path is similar to a URL, but does not contain a protocol specifier (`https://`). - -The following command pulls the `testing/test-image` image from a local registry -listening on port 5000 (`myregistry.local:5000`): - - $ docker pull myregistry.local:5000/testing/test-image - -Registry credentials are managed by **docker-login(1)**. - -Docker uses the `https://` protocol to communicate with a registry, unless the -registry is allowed to be accessed over an insecure connection. Refer to the -[insecure registries](https://docs.docker.com/engine/reference/commandline/daemon/#insecure-registries) -section in the online documentation for more information. - - -## Pull a repository with multiple images - -By default, `docker pull` pulls a *single* image from the registry. A repository -can contain multiple images. To pull all images from a repository, provide the -`-a` (or `--all-tags`) option when using `docker pull`. - -This command pulls all images from the `fedora` repository: - - $ docker pull --all-tags fedora - - Pulling repository fedora - ad57ef8d78d7: Download complete - 105182bb5e8b: Download complete - 511136ea3c5a: Download complete - 73bd853d2ea5: Download complete - .... - - Status: Downloaded newer image for fedora - -After the pull has completed use the `docker images` command to see the -images that were pulled. The example below shows all the `fedora` images -that are present locally: - - $ docker images fedora - - REPOSITORY TAG IMAGE ID CREATED SIZE - fedora rawhide ad57ef8d78d7 5 days ago 359.3 MB - fedora 20 105182bb5e8b 5 days ago 372.7 MB - fedora heisenbug 105182bb5e8b 5 days ago 372.7 MB - fedora latest 105182bb5e8b 5 days ago 372.7 MB - - -## Canceling a pull - -Killing the `docker pull` process, for example by pressing `CTRL-c` while it is -running in a terminal, will terminate the pull operation. - - $ docker pull fedora - - Using default tag: latest - latest: Pulling from library/fedora - a3ed95caeb02: Pulling fs layer - 236608c7b546: Pulling fs layer - ^C - -> **Note**: Technically, the Engine terminates a pull operation when the -> connection between the Docker Engine daemon and the Docker Engine client -> initiating the pull is lost. If the connection with the Engine daemon is -> lost for other reasons than a manual interaction, the pull is also aborted. - - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -August 2014, updated by Sven Dowideit -April 2015, updated by John Willis -April 2015, updated by Mary Anthony for v2 -September 2015, updated by Sally O'Malley diff --git a/man/docker-push.1.md b/man/docker-push.1.md deleted file mode 100644 index 847e66d2e4..0000000000 --- a/man/docker-push.1.md +++ /dev/null @@ -1,63 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-push - Push an image or a repository to a registry - -# SYNOPSIS -**docker push** -[**--help**] -NAME[:TAG] | [REGISTRY_HOST[:REGISTRY_PORT]/]NAME[:TAG] - -# DESCRIPTION - -Use `docker push` to share your images to the [Docker Hub](https://hub.docker.com) -registry or to a self-hosted one. - -Refer to **docker-tag(1)** for more information about valid image and tag names. - -Killing the **docker push** process, for example by pressing **CTRL-c** while it -is running in a terminal, terminates the push operation. - -Registry credentials are managed by **docker-login(1)**. - - -# OPTIONS - -**--disable-content-trust** - Skip image verification (default true) - -**--help** - Print usage statement - -# EXAMPLES - -## Pushing a new image to a registry - -First save the new image by finding the container ID (using **docker ps**) -and then committing it to a new image name. Note that only a-z0-9-_. are -allowed when naming images: - - # docker commit c16378f943fe rhel-httpd - -Now, push the image to the registry using the image ID. In this example the -registry is on host named `registry-host` and listening on port `5000`. To do -this, tag the image with the host name or IP address, and the port of the -registry: - - # docker tag rhel-httpd registry-host:5000/myadmin/rhel-httpd - # docker push registry-host:5000/myadmin/rhel-httpd - -Check that this worked by running: - - # docker images - -You should see both `rhel-httpd` and `registry-host:5000/myadmin/rhel-httpd` -listed. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -April 2015, updated by Mary Anthony for v2 -June 2015, updated by Sally O'Malley diff --git a/man/docker-rename.1.md b/man/docker-rename.1.md deleted file mode 100644 index eaeea5c6e0..0000000000 --- a/man/docker-rename.1.md +++ /dev/null @@ -1,15 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% OCTOBER 2014 -# NAME -docker-rename - Rename a container - -# SYNOPSIS -**docker rename** -CONTAINER NEW_NAME - -# OPTIONS -There are no available options. - -# DESCRIPTION -Rename a container. Container may be running, paused or stopped. diff --git a/man/docker-restart.1.md b/man/docker-restart.1.md deleted file mode 100644 index 271c4eee1b..0000000000 --- a/man/docker-restart.1.md +++ /dev/null @@ -1,26 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-restart - Restart one or more containers - -# SYNOPSIS -**docker restart** -[**--help**] -[**-t**|**--time**[=*10*]] -CONTAINER [CONTAINER...] - -# DESCRIPTION -Restart each container listed. - -# OPTIONS -**--help** - Print usage statement - -**-t**, **--time**=*10* - Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/man/docker-rm.1.md b/man/docker-rm.1.md deleted file mode 100644 index 2105288d0d..0000000000 --- a/man/docker-rm.1.md +++ /dev/null @@ -1,72 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-rm - Remove one or more containers - -# SYNOPSIS -**docker rm** -[**-f**|**--force**] -[**-l**|**--link**] -[**-v**|**--volumes**] -CONTAINER [CONTAINER...] - -# DESCRIPTION - -**docker rm** will remove one or more containers from the host node. The -container name or ID can be used. This does not remove images. You cannot -remove a running container unless you use the **-f** option. To see all -containers on a host use the **docker ps -a** command. - -# OPTIONS -**--help** - Print usage statement - -**-f**, **--force**=*true*|*false* - Force the removal of a running container (uses SIGKILL). The default is *false*. - -**-l**, **--link**=*true*|*false* - Remove the specified link and not the underlying container. The default is *false*. - -**-v**, **--volumes**=*true*|*false* - Remove the volumes associated with the container. The default is *false*. - -# EXAMPLES - -## Removing a container using its ID - -To remove a container using its ID, find either from a **docker ps -a** -command, or use the ID returned from the **docker run** command, or retrieve -it from a file used to store it using the **docker run --cidfile**: - - docker rm abebf7571666 - -## Removing a container using the container name - -The name of the container can be found using the **docker ps -a** -command. The use that name as follows: - - docker rm hopeful_morse - -## Removing a container and all associated volumes - - $ docker rm -v redis - redis - -This command will remove the container and any volumes associated with it. -Note that if a volume was specified with a name, it will not be removed. - - $ docker create -v awesome:/foo -v /bar --name hello redis - hello - $ docker rm -v hello - -In this example, the volume for `/foo` will remain in tact, but the volume for -`/bar` will be removed. The same behavior holds for volumes inherited with -`--volumes-from`. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -July 2014, updated by Sven Dowideit -August 2014, updated by Sven Dowideit diff --git a/man/docker-rmi.1.md b/man/docker-rmi.1.md deleted file mode 100644 index 35bf8aac6a..0000000000 --- a/man/docker-rmi.1.md +++ /dev/null @@ -1,42 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-rmi - Remove one or more images - -# SYNOPSIS -**docker rmi** -[**-f**|**--force**] -[**--help**] -[**--no-prune**] -IMAGE [IMAGE...] - -# DESCRIPTION - -Removes one or more images from the host node. This does not remove images from -a registry. You cannot remove an image of a running container unless you use the -**-f** option. To see all images on a host use the **docker images** command. - -# OPTIONS -**-f**, **--force**=*true*|*false* - Force removal of the image. The default is *false*. - -**--help** - Print usage statement - -**--no-prune**=*true*|*false* - Do not delete untagged parents. The default is *false*. - -# EXAMPLES - -## Removing an image - -Here is an example of removing an image: - - docker rmi fedora/httpd - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -April 2015, updated by Mary Anthony for v2 diff --git a/man/docker-run.1.md b/man/docker-run.1.md deleted file mode 100644 index 912f34d1e9..0000000000 --- a/man/docker-run.1.md +++ /dev/null @@ -1,1005 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-run - Run a command in a new container - -# SYNOPSIS -**docker run** -[**-a**|**--attach**[=*[]*]] -[**--add-host**[=*[]*]] -[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] -[**--blkio-weight-device**[=*[]*]] -[**--cpu-shares**[=*0*]] -[**--cap-add**[=*[]*]] -[**--cap-drop**[=*[]*]] -[**--cgroup-parent**[=*CGROUP-PATH*]] -[**--cidfile**[=*CIDFILE*]] -[**--cpu-period**[=*0*]] -[**--cpu-quota**[=*0*]] -[**--cpuset-cpus**[=*CPUSET-CPUS*]] -[**--cpuset-mems**[=*CPUSET-MEMS*]] -[**-d**|**--detach**] -[**--detach-keys**[=*[]*]] -[**--device**[=*[]*]] -[**--device-read-bps**[=*[]*]] -[**--device-read-iops**[=*[]*]] -[**--device-write-bps**[=*[]*]] -[**--device-write-iops**[=*[]*]] -[**--dns**[=*[]*]] -[**--dns-opt**[=*[]*]] -[**--dns-search**[=*[]*]] -[**-e**|**--env**[=*[]*]] -[**--entrypoint**[=*ENTRYPOINT*]] -[**--env-file**[=*[]*]] -[**--expose**[=*[]*]] -[**--group-add**[=*[]*]] -[**-h**|**--hostname**[=*HOSTNAME*]] -[**--help**] -[**-i**|**--interactive**] -[**--ip**[=*IPv4-ADDRESS*]] -[**--ip6**[=*IPv6-ADDRESS*]] -[**--ipc**[=*IPC*]] -[**--isolation**[=*default*]] -[**--kernel-memory**[=*KERNEL-MEMORY*]] -[**-l**|**--label**[=*[]*]] -[**--label-file**[=*[]*]] -[**--link**[=*[]*]] -[**--link-local-ip**[=*[]*]] -[**--log-driver**[=*[]*]] -[**--log-opt**[=*[]*]] -[**-m**|**--memory**[=*MEMORY*]] -[**--mac-address**[=*MAC-ADDRESS*]] -[**--memory-reservation**[=*MEMORY-RESERVATION*]] -[**--memory-swap**[=*LIMIT*]] -[**--memory-swappiness**[=*MEMORY-SWAPPINESS*]] -[**--name**[=*NAME*]] -[**--network-alias**[=*[]*]] -[**--network**[=*"bridge"*]] -[**--oom-kill-disable**] -[**--oom-score-adj**[=*0*]] -[**-P**|**--publish-all**] -[**-p**|**--publish**[=*[]*]] -[**--pid**[=*[PID]*]] -[**--userns**[=*[]*]] -[**--pids-limit**[=*PIDS_LIMIT*]] -[**--privileged**] -[**--read-only**] -[**--restart**[=*RESTART*]] -[**--rm**] -[**--security-opt**[=*[]*]] -[**--storage-opt**[=*[]*]] -[**--stop-signal**[=*SIGNAL*]] -[**--shm-size**[=*[]*]] -[**--sig-proxy**[=*true*]] -[**--sysctl**[=*[]*]] -[**-t**|**--tty**] -[**--tmpfs**[=*[CONTAINER-DIR[:]*]] -[**-u**|**--user**[=*USER*]] -[**--ulimit**[=*[]*]] -[**--uts**[=*[]*]] -[**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*]] -[**--volume-driver**[=*DRIVER*]] -[**--volumes-from**[=*[]*]] -[**-w**|**--workdir**[=*WORKDIR*]] -IMAGE [COMMAND] [ARG...] - -# DESCRIPTION - -Run a process in a new container. **docker run** starts a process with its own -file system, its own networking, and its own isolated process tree. The IMAGE -which starts the process may define defaults related to the process that will be -run in the container, the networking to expose, and more, but **docker run** -gives final control to the operator or administrator who starts the container -from the image. For that reason **docker run** has more options than any other -Docker command. - -If the IMAGE is not already loaded then **docker run** will pull the IMAGE, and -all image dependencies, from the repository in the same way running **docker -pull** IMAGE, before it starts the container from that image. - -# OPTIONS -**-a**, **--attach**=[] - Attach to STDIN, STDOUT or STDERR. - - In foreground mode (the default when **-d** -is not specified), **docker run** can start the process in the container -and attach the console to the process's standard input, output, and standard -error. It can even pretend to be a TTY (this is what most commandline -executables expect) and pass along signals. The **-a** option can be set for -each of stdin, stdout, and stderr. - -**--add-host**=[] - Add a custom host-to-IP mapping (host:ip) - - Add a line to /etc/hosts. The format is hostname:ip. The **--add-host** -option can be set multiple times. - -**--blkio-weight**=*0* - Block IO weight (relative weight) accepts a weight value between 10 and 1000. - -**--blkio-weight-device**=[] - Block IO weight (relative device weight, format: `DEVICE_NAME:WEIGHT`). - -**--cpu-shares**=*0* - CPU shares (relative weight) - - By default, all containers get the same proportion of CPU cycles. This proportion -can be modified by changing the container's CPU share weighting relative -to the weighting of all other running containers. - -To modify the proportion from the default of 1024, use the **--cpu-shares** -flag to set the weighting to 2 or higher. - -The proportion will only apply when CPU-intensive processes are running. -When tasks in one container are idle, other containers can use the -left-over CPU time. The actual amount of CPU time will vary depending on -the number of containers running on the system. - -For example, consider three containers, one has a cpu-share of 1024 and -two others have a cpu-share setting of 512. When processes in all three -containers attempt to use 100% of CPU, the first container would receive -50% of the total CPU time. If you add a fourth container with a cpu-share -of 1024, the first container only gets 33% of the CPU. The remaining containers -receive 16.5%, 16.5% and 33% of the CPU. - -On a multi-core system, the shares of CPU time are distributed over all CPU -cores. Even if a container is limited to less than 100% of CPU time, it can -use 100% of each individual CPU core. - -For example, consider a system with more than three cores. If you start one -container **{C0}** with **-c=512** running one process, and another container -**{C1}** with **-c=1024** running two processes, this can result in the following -division of CPU shares: - - PID container CPU CPU share - 100 {C0} 0 100% of CPU0 - 101 {C1} 1 100% of CPU1 - 102 {C1} 2 100% of CPU2 - -**--cap-add**=[] - Add Linux capabilities - -**--cap-drop**=[] - Drop Linux capabilities - -**--cgroup-parent**="" - Path to cgroups under which the cgroup for the container will be created. If the path is not absolute, the path is considered to be relative to the cgroups path of the init process. Cgroups will be created if they do not already exist. - -**--cidfile**="" - Write the container ID to the file - -**--cpu-period**=*0* - Limit the CPU CFS (Completely Fair Scheduler) period - - Limit the container's CPU usage. This flag tell the kernel to restrict the container's CPU usage to the period you specify. - -**--cpuset-cpus**="" - CPUs in which to allow execution (0-3, 0,1) - -**--cpuset-mems**="" - Memory nodes (MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. - - If you have four memory nodes on your system (0-3), use `--cpuset-mems=0,1` -then processes in your Docker container will only use memory from the first -two memory nodes. - -**--cpu-quota**=*0* - Limit the CPU CFS (Completely Fair Scheduler) quota - - Limit the container's CPU usage. By default, containers run with the full -CPU resource. This flag tell the kernel to restrict the container's CPU usage -to the quota you specify. - -**-d**, **--detach**=*true*|*false* - Detached mode: run the container in the background and print the new container ID. The default is *false*. - - At any time you can run **docker ps** in -the other shell to view a list of the running containers. You can reattach to a -detached container with **docker attach**. If you choose to run a container in -the detached mode, then you cannot use the **-rm** option. - - When attached in the tty mode, you can detach from the container (and leave it -running) using a configurable key sequence. The default sequence is `CTRL-p CTRL-q`. -You configure the key sequence using the **--detach-keys** option or a configuration file. -See **config-json(5)** for documentation on using a configuration file. - -**--detach-keys**="" - Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. - -**--device**=[] - Add a host device to the container (e.g. --device=/dev/sdc:/dev/xvdc:rwm) - -**--device-read-bps**=[] - Limit read rate from a device (e.g. --device-read-bps=/dev/sda:1mb) - -**--device-read-iops**=[] - Limit read rate from a device (e.g. --device-read-iops=/dev/sda:1000) - -**--device-write-bps**=[] - Limit write rate to a device (e.g. --device-write-bps=/dev/sda:1mb) - -**--device-write-iops**=[] - Limit write rate to a device (e.g. --device-write-iops=/dev/sda:1000) - -**--dns-search**=[] - Set custom DNS search domains (Use --dns-search=. if you don't wish to set the search domain) - -**--dns-opt**=[] - Set custom DNS options - -**--dns**=[] - Set custom DNS servers - - This option can be used to override the DNS -configuration passed to the container. Typically this is necessary when the -host DNS configuration is invalid for the container (e.g., 127.0.0.1). When this -is the case the **--dns** flags is necessary for every run. - -**-e**, **--env**=[] - Set environment variables - - This option allows you to specify arbitrary -environment variables that are available for the process that will be launched -inside of the container. - -**--entrypoint**="" - Overwrite the default ENTRYPOINT of the image - - This option allows you to overwrite the default entrypoint of the image that -is set in the Dockerfile. The ENTRYPOINT of an image is similar to a COMMAND -because it specifies what executable to run when the container starts, but it is -(purposely) more difficult to override. The ENTRYPOINT gives a container its -default nature or behavior, so that when you set an ENTRYPOINT you can run the -container as if it were that binary, complete with default options, and you can -pass in more options via the COMMAND. But, sometimes an operator may want to run -something else inside the container, so you can override the default ENTRYPOINT -at runtime by using a **--entrypoint** and a string to specify the new -ENTRYPOINT. - -**--env-file**=[] - Read in a line delimited file of environment variables - -**--expose**=[] - Expose a port, or a range of ports (e.g. --expose=3300-3310) informs Docker -that the container listens on the specified network ports at runtime. Docker -uses this information to interconnect containers using links and to set up port -redirection on the host system. - -**--group-add**=[] - Add additional groups to run as - -**-h**, **--hostname**="" - Container host name - - Sets the container host name that is available inside the container. - -**--help** - Print usage statement - -**-i**, **--interactive**=*true*|*false* - Keep STDIN open even if not attached. The default is *false*. - - When set to true, keep stdin open even if not attached. The default is false. - -**--ip**="" - Sets the container's interface IPv4 address (e.g. 172.23.0.9) - - It can only be used in conjunction with **--net** for user-defined networks - -**--ip6**="" - Sets the container's interface IPv6 address (e.g. 2001:db8::1b99) - - It can only be used in conjunction with **--net** for user-defined networks - -**--ipc**="" - Default is to create a private IPC namespace (POSIX SysV IPC) for the container - 'container:': reuses another container shared memory, semaphores and message queues - 'host': use the host shared memory,semaphores and message queues inside the container. Note: the host mode gives the container full access to local shared memory and is therefore considered insecure. - -**--isolation**="*default*" - Isolation specifies the type of isolation technology used by containers. Note -that the default on Windows server is `process`, and the default on Windows client -is `hyperv`. Linux only supports `default`. - -**-l**, **--label**=[] - Set metadata on the container (e.g., --label com.example.key=value) - -**--kernel-memory**="" - Kernel memory limit (format: `[]`, where unit = b, k, m or g) - - Constrains the kernel memory available to a container. If a limit of 0 -is specified (not using `--kernel-memory`), the container's kernel memory -is not limited. If you specify a limit, it may be rounded up to a multiple -of the operating system's page size and the value can be very large, -millions of trillions. - -**--label-file**=[] - Read in a line delimited file of labels - -**--link**=[] - Add link to another container in the form of :alias or just -in which case the alias will match the name - - If the operator -uses **--link** when starting the new client container, then the client -container can access the exposed port via a private networking interface. Docker -will set some environment variables in the client container to help indicate -which interface and port to use. - -**--link-local-ip**=[] - Add one or more link-local IPv4/IPv6 addresses to the container's interface - -**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" - Logging driver for the container. Default is defined by daemon `--log-driver` flag. - **Warning**: the `docker logs` command works only for the `json-file` and - `journald` logging drivers. - -**--log-opt**=[] - Logging driver specific options. - -**-m**, **--memory**="" - Memory limit (format: [], where unit = b, k, m or g) - - Allows you to constrain the memory available to a container. If the host -supports swap memory, then the **-m** memory setting can be larger than physical -RAM. If a limit of 0 is specified (not using **-m**), the container's memory is -not limited. The actual limit may be rounded up to a multiple of the operating -system's page size (the value would be very large, that's millions of trillions). - -**--memory-reservation**="" - Memory soft limit (format: [], where unit = b, k, m or g) - - After setting memory reservation, when the system detects memory contention -or low memory, containers are forced to restrict their consumption to their -reservation. So you should always set the value below **--memory**, otherwise the -hard limit will take precedence. By default, memory reservation will be the same -as memory limit. - -**--memory-swap**="LIMIT" - A limit value equal to memory plus swap. Must be used with the **-m** -(**--memory**) flag. The swap `LIMIT` should always be larger than **-m** -(**--memory**) value. By default, the swap `LIMIT` will be set to double -the value of --memory. - - The format of `LIMIT` is `[]`. Unit can be `b` (bytes), -`k` (kilobytes), `m` (megabytes), or `g` (gigabytes). If you don't specify a -unit, `b` is used. Set LIMIT to `-1` to enable unlimited swap. - -**--mac-address**="" - Container MAC address (e.g. 92:d0:c6:0a:29:33) - - Remember that the MAC address in an Ethernet network must be unique. -The IPv6 link-local address will be based on the device's MAC address -according to RFC4862. - -**--name**="" - Assign a name to the container - - The operator can identify a container in three ways: - UUID long identifier (“f78375b1c487e03c9438c729345e54db9d20cfa2ac1fc3494b6eb60872e74778”) - UUID short identifier (“f78375b1c487”) - Name (“jonah”) - - The UUID identifiers come from the Docker daemon, and if a name is not assigned -to the container with **--name** then the daemon will also generate a random -string name. The name is useful when defining links (see **--link**) (or any -other place you need to identify a container). This works for both background -and foreground Docker containers. - -**--net**="*bridge*" - Set the Network mode for the container - 'bridge': create a network stack on the default Docker bridge - 'none': no networking - 'container:': reuse another container's network stack - 'host': use the Docker host network stack. Note: the host mode gives the container full access to local system services such as D-bus and is therefore considered insecure. - '|': connect to a user-defined network - -**--network-alias**=[] - Add network-scoped alias for the container - -**--oom-kill-disable**=*true*|*false* - Whether to disable OOM Killer for the container or not. - -**--oom-score-adj**="" - Tune the host's OOM preferences for containers (accepts -1000 to 1000) - -**-P**, **--publish-all**=*true*|*false* - Publish all exposed ports to random ports on the host interfaces. The default is *false*. - - When set to true publish all exposed ports to the host interfaces. The -default is false. If the operator uses -P (or -p) then Docker will make the -exposed port accessible on the host and the ports will be available to any -client that can reach the host. When using -P, Docker will bind any exposed -port to a random port on the host within an *ephemeral port range* defined by -`/proc/sys/net/ipv4/ip_local_port_range`. To find the mapping between the host -ports and the exposed ports, use `docker port`. - -**-p**, **--publish**=[] - Publish a container's port, or range of ports, to the host. - - Format: `ip:hostPort:containerPort | ip::containerPort | hostPort:containerPort | containerPort` -Both hostPort and containerPort can be specified as a range of ports. -When specifying ranges for both, the number of container ports in the range must match the number of host ports in the range. -(e.g., `docker run -p 1234-1236:1222-1224 --name thisWorks -t busybox` -but not `docker run -p 1230-1236:1230-1240 --name RangeContainerPortsBiggerThanRangeHostPorts -t busybox`) -With ip: `docker run -p 127.0.0.1:$HOSTPORT:$CONTAINERPORT --name CONTAINER -t someimage` -Use `docker port` to see the actual mapping: `docker port CONTAINER $CONTAINERPORT` - -**--pid**="" - Set the PID mode for the container - Default is to create a private PID namespace for the container - 'container:': join another container's PID namespace - 'host': use the host's PID namespace for the container. Note: the host mode gives the container full access to local PID and is therefore considered insecure. - -**--userns**="" - Set the usernamespace mode for the container when `userns-remap` option is enabled. - **host**: use the host usernamespace and enable all privileged options (e.g., `pid=host` or `--privileged`). - -**--pids-limit**="" - Tune the container's pids limit. Set `-1` to have unlimited pids for the container. - -**--uts**=*host* - Set the UTS mode for the container - **host**: use the host's UTS namespace inside the container. - Note: the host mode gives the container access to changing the host's hostname and is therefore considered insecure. - -**--privileged**=*true*|*false* - Give extended privileges to this container. The default is *false*. - - By default, Docker containers are -“unprivileged” (=false) and cannot, for example, run a Docker daemon inside the -Docker container. This is because by default a container is not allowed to -access any devices. A “privileged” container is given access to all devices. - - When the operator executes **docker run --privileged**, Docker will enable access -to all devices on the host as well as set some configuration in AppArmor to -allow the container nearly all the same access to the host as processes running -outside of a container on the host. - -**--read-only**=*true*|*false* - Mount the container's root filesystem as read only. - - By default a container will have its root filesystem writable allowing processes -to write files anywhere. By specifying the `--read-only` flag the container will have -its root filesystem mounted as read only prohibiting any writes. - -**--restart**="*no*" - Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). - -**--rm**=*true*|*false* - Automatically remove the container when it exits (incompatible with -d). The default is *false*. - -**--security-opt**=[] - Security Options - - "label=user:USER" : Set the label user for the container - "label=role:ROLE" : Set the label role for the container - "label=type:TYPE" : Set the label type for the container - "label=level:LEVEL" : Set the label level for the container - "label=disable" : Turn off label confinement for the container - "no-new-privileges" : Disable container processes from gaining additional privileges - - "seccomp=unconfined" : Turn off seccomp confinement for the container - "seccomp=profile.json : White listed syscalls seccomp Json file to be used as a seccomp filter - - "apparmor=unconfined" : Turn off apparmor confinement for the container - "apparmor=your-profile" : Set the apparmor confinement profile for the container - -**--storage-opt**=[] - Storage driver options per container - - $ docker run -it --storage-opt size=120G fedora /bin/bash - - This (size) will allow to set the container rootfs size to 120G at creation time. User cannot pass a size less than the Default BaseFS Size. - This option is only available for the `devicemapper`, `btrfs`, and `zfs` graph drivers. - -**--stop-signal**=*SIGTERM* - Signal to stop a container. Default is SIGTERM. - -**--shm-size**="" - Size of `/dev/shm`. The format is ``. - `number` must be greater than `0`. Unit is optional and can be `b` (bytes), `k` (kilobytes), `m`(megabytes), or `g` (gigabytes). - If you omit the unit, the system uses bytes. If you omit the size entirely, the system uses `64m`. - -**--sysctl**=SYSCTL - Configure namespaced kernel parameters at runtime - - IPC Namespace - current sysctls allowed: - - kernel.msgmax, kernel.msgmnb, kernel.msgmni, kernel.sem, kernel.shmall, kernel.shmmax, kernel.shmmni, kernel.shm_rmid_forced - Sysctls beginning with fs.mqueue.* - - If you use the `--ipc=host` option these sysctls will not be allowed. - - Network Namespace - current sysctls allowed: - Sysctls beginning with net.* - - If you use the `--net=host` option these sysctls will not be allowed. - -**--sig-proxy**=*true*|*false* - Proxy received signals to the process (non-TTY mode only). SIGCHLD, SIGSTOP, and SIGKILL are not proxied. The default is *true*. - -**--memory-swappiness**="" - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100. - -**-t**, **--tty**=*true*|*false* - Allocate a pseudo-TTY. The default is *false*. - - When set to true Docker can allocate a pseudo-tty and attach to the standard -input of any container. This can be used, for example, to run a throwaway -interactive shell. The default is false. - -The **-t** option is incompatible with a redirection of the docker client -standard input. - -**--tmpfs**=[] Create a tmpfs mount - - Mount a temporary filesystem (`tmpfs`) mount into a container, for example: - - $ docker run -d --tmpfs /tmp:rw,size=787448k,mode=1777 my_image - - This command mounts a `tmpfs` at `/tmp` within the container. The supported mount -options are the same as the Linux default `mount` flags. If you do not specify -any options, the systems uses the following options: -`rw,noexec,nosuid,nodev,size=65536k`. - -**-u**, **--user**="" - Sets the username or UID used and optionally the groupname or GID for the specified command. - - The followings examples are all valid: - --user [user | user:group | uid | uid:gid | user:gid | uid:group ] - - Without this argument the command will be run as root in the container. - -**--ulimit**=[] - Ulimit options - -**-v**|**--volume**[=*[[HOST-DIR:]CONTAINER-DIR[:OPTIONS]]*] - Create a bind mount. If you specify, ` -v /HOST-DIR:/CONTAINER-DIR`, Docker - bind mounts `/HOST-DIR` in the host to `/CONTAINER-DIR` in the Docker - container. If 'HOST-DIR' is omitted, Docker automatically creates the new - volume on the host. The `OPTIONS` are a comma delimited list and can be: - - * [rw|ro] - * [z|Z] - * [`[r]shared`|`[r]slave`|`[r]private`] - * [nocopy] - -The `CONTAINER-DIR` must be an absolute path such as `/src/docs`. The `HOST-DIR` -can be an absolute path or a `name` value. A `name` value must start with an -alphanumeric character, followed by `a-z0-9`, `_` (underscore), `.` (period) or -`-` (hyphen). An absolute path starts with a `/` (forward slash). - -If you supply a `HOST-DIR` that is an absolute path, Docker bind-mounts to the -path you specify. If you supply a `name`, Docker creates a named volume by that -`name`. For example, you can specify either `/foo` or `foo` for a `HOST-DIR` -value. If you supply the `/foo` value, Docker creates a bind-mount. If you -supply the `foo` specification, Docker creates a named volume. - -You can specify multiple **-v** options to mount one or more mounts to a -container. To use these same mounts in other containers, specify the -**--volumes-from** option also. - -You can add `:ro` or `:rw` suffix to a volume to mount it read-only or -read-write mode, respectively. By default, the volumes are mounted read-write. -See examples. - -Labeling systems like SELinux require that proper labels are placed on volume -content mounted into a container. Without a label, the security system might -prevent the processes running inside the container from using the content. By -default, Docker does not change the labels set by the OS. - -To change a label in the container context, you can add either of two suffixes -`:z` or `:Z` to the volume mount. These suffixes tell Docker to relabel file -objects on the shared volumes. The `z` option tells Docker that two containers -share the volume content. As a result, Docker labels the content with a shared -content label. Shared volume labels allow all containers to read/write content. -The `Z` option tells Docker to label the content with a private unshared label. -Only the current container can use a private volume. - -By default bind mounted volumes are `private`. That means any mounts done -inside container will not be visible on host and vice-a-versa. One can change -this behavior by specifying a volume mount propagation property. Making a -volume `shared` mounts done under that volume inside container will be -visible on host and vice-a-versa. Making a volume `slave` enables only one -way mount propagation and that is mounts done on host under that volume -will be visible inside container but not the other way around. - -To control mount propagation property of volume one can use `:[r]shared`, -`:[r]slave` or `:[r]private` propagation flag. Propagation property can -be specified only for bind mounted volumes and not for internal volumes or -named volumes. For mount propagation to work source mount point (mount point -where source dir is mounted on) has to have right propagation properties. For -shared volumes, source mount point has to be shared. And for slave volumes, -source mount has to be either shared or slave. - -Use `df ` to figure out the source mount and then use -`findmnt -o TARGET,PROPAGATION ` to figure out propagation -properties of source mount. If `findmnt` utility is not available, then one -can look at mount entry for source mount point in `/proc/self/mountinfo`. Look -at `optional fields` and see if any propagaion properties are specified. -`shared:X` means mount is `shared`, `master:X` means mount is `slave` and if -nothing is there that means mount is `private`. - -To change propagation properties of a mount point use `mount` command. For -example, if one wants to bind mount source directory `/foo` one can do -`mount --bind /foo /foo` and `mount --make-private --make-shared /foo`. This -will convert /foo into a `shared` mount point. Alternatively one can directly -change propagation properties of source mount. Say `/` is source mount for -`/foo`, then use `mount --make-shared /` to convert `/` into a `shared` mount. - -> **Note**: -> When using systemd to manage the Docker daemon's start and stop, in the systemd -> unit file there is an option to control mount propagation for the Docker daemon -> itself, called `MountFlags`. The value of this setting may cause Docker to not -> see mount propagation changes made on the mount point. For example, if this value -> is `slave`, you may not be able to use the `shared` or `rshared` propagation on -> a volume. - -To disable automatic copying of data from the container path to the volume, use -the `nocopy` flag. The `nocopy` flag can be set on bind mounts and named volumes. - -**--volume-driver**="" - Container's volume driver. This driver creates volumes specified either from - a Dockerfile's `VOLUME` instruction or from the `docker run -v` flag. - See **docker-volume-create(1)** for full details. - -**--volumes-from**=[] - Mount volumes from the specified container(s) - - Mounts already mounted volumes from a source container onto another - container. You must supply the source's container-id. To share - a volume, use the **--volumes-from** option when running - the target container. You can share volumes even if the source container - is not running. - - By default, Docker mounts the volumes in the same mode (read-write or - read-only) as it is mounted in the source container. Optionally, you - can change this by suffixing the container-id with either the `:ro` or - `:rw ` keyword. - - If the location of the volume from the source container overlaps with - data residing on a target container, then the volume hides - that data on the target. - -**-w**, **--workdir**="" - Working directory inside the container - - The default working directory for -running binaries within a container is the root directory (/). The developer can -set a different default with the Dockerfile WORKDIR instruction. The operator -can override the working directory by using the **-w** option. - -# Exit Status - -The exit code from `docker run` gives information about why the container -failed to run or why it exited. When `docker run` exits with a non-zero code, -the exit codes follow the `chroot` standard, see below: - -**_125_** if the error is with Docker daemon **_itself_** - - $ docker run --foo busybox; echo $? - # flag provided but not defined: --foo - See 'docker run --help'. - 125 - -**_126_** if the **_contained command_** cannot be invoked - - $ docker run busybox /etc; echo $? - # exec: "/etc": permission denied - docker: Error response from daemon: Contained command could not be invoked - 126 - -**_127_** if the **_contained command_** cannot be found - - $ docker run busybox foo; echo $? - # exec: "foo": executable file not found in $PATH - docker: Error response from daemon: Contained command not found or does not exist - 127 - -**_Exit code_** of **_contained command_** otherwise - - $ docker run busybox /bin/sh -c 'exit 3' - # 3 - -# EXAMPLES - -## Running container in read-only mode - -During container image development, containers often need to write to the image -content. Installing packages into /usr, for example. In production, -applications seldom need to write to the image. Container applications write -to volumes if they need to write to file systems at all. Applications can be -made more secure by running them in read-only mode using the --read-only switch. -This protects the containers image from modification. Read only containers may -still need to write temporary data. The best way to handle this is to mount -tmpfs directories on /run and /tmp. - - # docker run --read-only --tmpfs /run --tmpfs /tmp -i -t fedora /bin/bash - -## Exposing log messages from the container to the host's log - -If you want messages that are logged in your container to show up in the host's -syslog/journal then you should bind mount the /dev/log directory as follows. - - # docker run -v /dev/log:/dev/log -i -t fedora /bin/bash - -From inside the container you can test this by sending a message to the log. - - (bash)# logger "Hello from my container" - -Then exit and check the journal. - - # exit - - # journalctl -b | grep Hello - -This should list the message sent to logger. - -## Attaching to one or more from STDIN, STDOUT, STDERR - -If you do not specify -a then Docker will attach everything (stdin,stdout,stderr) -. You can specify to which of the three standard streams (stdin, stdout, stderr) -you'd like to connect instead, as in: - - # docker run -a stdin -a stdout -i -t fedora /bin/bash - -## Sharing IPC between containers - -Using shm_server.c available here: https://www.cs.cf.ac.uk/Dave/C/node27.html - -Testing `--ipc=host` mode: - -Host shows a shared memory segment with 7 pids attached, happens to be from httpd: - -``` - $ sudo ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status - 0x01128e25 0 root 600 1000 7 -``` - -Now run a regular container, and it correctly does NOT see the shared memory segment from the host: - -``` - $ docker run -it shm ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status -``` - -Run a container with the new `--ipc=host` option, and it now sees the shared memory segment from the host httpd: - - ``` - $ docker run -it --ipc=host shm ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status - 0x01128e25 0 root 600 1000 7 -``` -Testing `--ipc=container:CONTAINERID` mode: - -Start a container with a program to create a shared memory segment: -``` - $ docker run -it shm bash - $ sudo shm/shm_server & - $ sudo ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status - 0x0000162e 0 root 666 27 1 -``` -Create a 2nd container correctly shows no shared memory segment from 1st container: -``` - $ docker run shm ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status -``` - -Create a 3rd container using the new --ipc=container:CONTAINERID option, now it shows the shared memory segment from the first: - -``` - $ docker run -it --ipc=container:ed735b2264ac shm ipcs -m - $ sudo ipcs -m - - ------ Shared Memory Segments -------- - key shmid owner perms bytes nattch status - 0x0000162e 0 root 666 27 1 -``` - -## Linking Containers - -> **Note**: This section describes linking between containers on the -> default (bridge) network, also known as "legacy links". Using `--link` -> on user-defined networks uses the DNS-based discovery, which does not add -> entries to `/etc/hosts`, and does not set environment variables for -> discovery. - -The link feature allows multiple containers to communicate with each other. For -example, a container whose Dockerfile has exposed port 80 can be run and named -as follows: - - # docker run --name=link-test -d -i -t fedora/httpd - -A second container, in this case called linker, can communicate with the httpd -container, named link-test, by running with the **--link=:** - - # docker run -t -i --link=link-test:lt --name=linker fedora /bin/bash - -Now the container linker is linked to container link-test with the alias lt. -Running the **env** command in the linker container shows environment variables - with the LT (alias) context (**LT_**) - - # env - HOSTNAME=668231cb0978 - TERM=xterm - LT_PORT_80_TCP=tcp://172.17.0.3:80 - LT_PORT_80_TCP_PORT=80 - LT_PORT_80_TCP_PROTO=tcp - LT_PORT=tcp://172.17.0.3:80 - PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin - PWD=/ - LT_NAME=/linker/lt - SHLVL=1 - HOME=/ - LT_PORT_80_TCP_ADDR=172.17.0.3 - _=/usr/bin/env - -When linking two containers Docker will use the exposed ports of the container -to create a secure tunnel for the parent to access. - -If a container is connected to the default bridge network and `linked` -with other containers, then the container's `/etc/hosts` file is updated -with the linked container's name. - -> **Note** Since Docker may live update the container's `/etc/hosts` file, there -may be situations when processes inside the container can end up reading an -empty or incomplete `/etc/hosts` file. In most cases, retrying the read again -should fix the problem. - - -## Mapping Ports for External Usage - -The exposed port of an application can be mapped to a host port using the **-p** -flag. For example, a httpd port 80 can be mapped to the host port 8080 using the -following: - - # docker run -p 8080:80 -d -i -t fedora/httpd - -## Creating and Mounting a Data Volume Container - -Many applications require the sharing of persistent data across several -containers. Docker allows you to create a Data Volume Container that other -containers can mount from. For example, create a named container that contains -directories /var/volume1 and /tmp/volume2. The image will need to contain these -directories so a couple of RUN mkdir instructions might be required for you -fedora-data image: - - # docker run --name=data -v /var/volume1 -v /tmp/volume2 -i -t fedora-data true - # docker run --volumes-from=data --name=fedora-container1 -i -t fedora bash - -Multiple --volumes-from parameters will bring together multiple data volumes from -multiple containers. And it's possible to mount the volumes that came from the -DATA container in yet another container via the fedora-container1 intermediary -container, allowing to abstract the actual data source from users of that data: - - # docker run --volumes-from=fedora-container1 --name=fedora-container2 -i -t fedora bash - -## Mounting External Volumes - -To mount a host directory as a container volume, specify the absolute path to -the directory and the absolute path for the container directory separated by a -colon: - - # docker run -v /var/db:/data1 -i -t fedora bash - -When using SELinux, be aware that the host has no knowledge of container SELinux -policy. Therefore, in the above example, if SELinux policy is enforced, the -`/var/db` directory is not writable to the container. A "Permission Denied" -message will occur and an avc: message in the host's syslog. - - -To work around this, at time of writing this man page, the following command -needs to be run in order for the proper SELinux policy type label to be attached -to the host directory: - - # chcon -Rt svirt_sandbox_file_t /var/db - - -Now, writing to the /data1 volume in the container will be allowed and the -changes will also be reflected on the host in /var/db. - -## Using alternative security labeling - -You can override the default labeling scheme for each container by specifying -the `--security-opt` flag. For example, you can specify the MCS/MLS level, a -requirement for MLS systems. Specifying the level in the following command -allows you to share the same content between containers. - - # docker run --security-opt label=level:s0:c100,c200 -i -t fedora bash - -An MLS example might be: - - # docker run --security-opt label=level:TopSecret -i -t rhel7 bash - -To disable the security labeling for this container versus running with the -`--permissive` flag, use the following command: - - # docker run --security-opt label=disable -i -t fedora bash - -If you want a tighter security policy on the processes within a container, -you can specify an alternate type for the container. You could run a container -that is only allowed to listen on Apache ports by executing the following -command: - - # docker run --security-opt label=type:svirt_apache_t -i -t centos bash - -Note: - -You would have to write policy defining a `svirt_apache_t` type. - -## Setting device weight - -If you want to set `/dev/sda` device weight to `200`, you can specify the device -weight by `--blkio-weight-device` flag. Use the following command: - - # docker run -it --blkio-weight-device "/dev/sda:200" ubuntu - -## Specify isolation technology for container (--isolation) - -This option is useful in situations where you are running Docker containers on -Microsoft Windows. The `--isolation ` option sets a container's isolation -technology. On Linux, the only supported is the `default` option which uses -Linux namespaces. These two commands are equivalent on Linux: - -``` -$ docker run -d busybox top -$ docker run -d --isolation default busybox top -``` - -On Microsoft Windows, can take any of these values: - -* `default`: Use the value specified by the Docker daemon's `--exec-opt` . If the `daemon` does not specify an isolation technology, Microsoft Windows uses `process` as its default value. -* `process`: Namespace isolation only. -* `hyperv`: Hyper-V hypervisor partition-based isolation. - -In practice, when running on Microsoft Windows without a `daemon` option set, these two commands are equivalent: - -``` -$ docker run -d --isolation default busybox top -$ docker run -d --isolation process busybox top -``` - -If you have set the `--exec-opt isolation=hyperv` option on the Docker `daemon`, any of these commands also result in `hyperv` isolation: - -``` -$ docker run -d --isolation default busybox top -$ docker run -d --isolation hyperv busybox top -``` - -## Setting Namespaced Kernel Parameters (Sysctls) - -The `--sysctl` sets namespaced kernel parameters (sysctls) in the -container. For example, to turn on IP forwarding in the containers -network namespace, run this command: - - $ docker run --sysctl net.ipv4.ip_forward=1 someimage - -Note: - -Not all sysctls are namespaced. Docker does not support changing sysctls -inside of a container that also modify the host system. As the kernel -evolves we expect to see more sysctls become namespaced. - -See the definition of the `--sysctl` option above for the current list of -supported sysctls. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -July 2014, updated by Sven Dowideit -November 2015, updated by Sally O'Malley diff --git a/man/docker-save.1.md b/man/docker-save.1.md deleted file mode 100644 index 1d1de8a1df..0000000000 --- a/man/docker-save.1.md +++ /dev/null @@ -1,45 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-save - Save one or more images to a tar archive (streamed to STDOUT by default) - -# SYNOPSIS -**docker save** -[**--help**] -[**-o**|**--output**[=*OUTPUT*]] -IMAGE [IMAGE...] - -# DESCRIPTION -Produces a tarred repository to the standard output stream. Contains all -parent layers, and all tags + versions, or specified repo:tag. - -Stream to a file instead of STDOUT by using **-o**. - -# OPTIONS -**--help** - Print usage statement - -**-o**, **--output**="" - Write to a file, instead of STDOUT - -# EXAMPLES - -Save all fedora repository images to a fedora-all.tar and save the latest -fedora image to a fedora-latest.tar: - - $ docker save fedora > fedora-all.tar - $ docker save --output=fedora-latest.tar fedora:latest - $ ls -sh fedora-all.tar - 721M fedora-all.tar - $ ls -sh fedora-latest.tar - 367M fedora-latest.tar - -# See also -**docker-load(1)** to load an image from a tar archive on STDIN. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -November 2014, updated by Sven Dowideit diff --git a/man/docker-search.1.md b/man/docker-search.1.md deleted file mode 100644 index ad8bbc78b2..0000000000 --- a/man/docker-search.1.md +++ /dev/null @@ -1,70 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-search - Search the Docker Hub for images - -# SYNOPSIS -**docker search** -[**-f**|**--filter**[=*[]*]] -[**--help**] -[**--limit**[=*LIMIT*]] -[**--no-trunc**] -TERM - -# DESCRIPTION - -Search Docker Hub for images that match the specified `TERM`. The table -of images returned displays the name, description (truncated by default), number -of stars awarded, whether the image is official, and whether it is automated. - -*Note* - Search queries will only return up to 25 results - -# OPTIONS - -**-f**, **--filter**=[] - Filter output based on these conditions: - - stars= - - is-automated=(true|false) - - is-official=(true|false) - -**--help** - Print usage statement - -**--limit**=*LIMIT* - Maximum returned search results. The default is 25. - -**--no-trunc**=*true*|*false* - Don't truncate output. The default is *false*. - -# EXAMPLES - -## Search Docker Hub for ranked images - -Search a registry for the term 'fedora' and only display those images -ranked 3 or higher: - - $ docker search --filter=stars=3 fedora - NAME DESCRIPTION STARS OFFICIAL AUTOMATED - mattdm/fedora A basic Fedora image corresponding roughly... 50 - fedora (Semi) Official Fedora base image. 38 - mattdm/fedora-small A small Fedora image on which to build. Co... 8 - goldmann/wildfly A WildFly application server running on a ... 3 [OK] - -## Search Docker Hub for automated images - -Search Docker Hub for the term 'fedora' and only display automated images -ranked 1 or higher: - - $ docker search --filter=is-automated=true --filter=stars=1 fedora - NAME DESCRIPTION STARS OFFICIAL AUTOMATED - goldmann/wildfly A WildFly application server running on a ... 3 [OK] - tutum/fedora-20 Fedora 20 image with SSH access. For the r... 1 [OK] - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -April 2015, updated by Mary Anthony for v2 -April 2016, updated by Vincent Demeester - diff --git a/man/docker-start.1.md b/man/docker-start.1.md deleted file mode 100644 index c00b0a1668..0000000000 --- a/man/docker-start.1.md +++ /dev/null @@ -1,39 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-start - Start one or more containers - -# SYNOPSIS -**docker start** -[**-a**|**--attach**] -[**--detach-keys**[=*[]*]] -[**--help**] -[**-i**|**--interactive**] -CONTAINER [CONTAINER...] - -# DESCRIPTION - -Start one or more containers. - -# OPTIONS -**-a**, **--attach**=*true*|*false* - Attach container's STDOUT and STDERR and forward all signals to the - process. The default is *false*. - -**--detach-keys**="" - Override the key sequence for detaching a container. Format is a single character `[a-Z]` or `ctrl-` where `` is one of: `a-z`, `@`, `^`, `[`, `,` or `_`. - -**--help** - Print usage statement - -**-i**, **--interactive**=*true*|*false* - Attach container's STDIN. The default is *false*. - -# See also -**docker-stop(1)** to stop a container. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/man/docker-stats.1.md b/man/docker-stats.1.md deleted file mode 100644 index 41c4b722a5..0000000000 --- a/man/docker-stats.1.md +++ /dev/null @@ -1,43 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-stats - Display a live stream of one or more containers' resource usage statistics - -# SYNOPSIS -**docker stats** -[**-a**|**--all**] -[**--help**] -[**--no-stream**] -[CONTAINER...] - -# DESCRIPTION - -Display a live stream of one or more containers' resource usage statistics - -# OPTIONS -**-a**, **--all**=*true*|*false* - Show all containers. Only running containers are shown by default. The default is *false*. - -**--help** - Print usage statement - -**--no-stream**=*true*|*false* - Disable streaming stats and only pull the first result, default setting is false. - -# EXAMPLES - -Running `docker stats` on all running containers - - $ docker stats - CONTAINER CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O - 1285939c1fd3 0.07% 796 KiB / 64 MiB 1.21% 788 B / 648 B 3.568 MB / 512 KB - 9c76f7834ae2 0.07% 2.746 MiB / 64 MiB 4.29% 1.266 KB / 648 B 12.4 MB / 0 B - d1ea048f04e4 0.03% 4.583 MiB / 64 MiB 6.30% 2.854 KB / 648 B 27.7 MB / 0 B - -Running `docker stats` on multiple containers by name and id. - - $ docker stats fervent_panini 5acfcb1b4fd1 - CONTAINER CPU % MEM USAGE/LIMIT MEM % NET I/O - 5acfcb1b4fd1 0.00% 115.2 MiB/1.045 GiB 11.03% 1.422 kB/648 B - fervent_panini 0.02% 11.08 MiB/1.045 GiB 1.06% 648 B/648 B diff --git a/man/docker-stop.1.md b/man/docker-stop.1.md deleted file mode 100644 index fa377c92c4..0000000000 --- a/man/docker-stop.1.md +++ /dev/null @@ -1,30 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-stop - Stop a container by sending SIGTERM and then SIGKILL after a grace period - -# SYNOPSIS -**docker stop** -[**--help**] -[**-t**|**--time**[=*10*]] -CONTAINER [CONTAINER...] - -# DESCRIPTION -Stop a container (Send SIGTERM, and then SIGKILL after - grace period) - -# OPTIONS -**--help** - Print usage statement - -**-t**, **--time**=*10* - Number of seconds to wait for the container to stop before killing it. Default is 10 seconds. - -#See also -**docker-start(1)** to restart a stopped container. - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit diff --git a/man/docker-tag.1.md b/man/docker-tag.1.md deleted file mode 100644 index 9bb252aef0..0000000000 --- a/man/docker-tag.1.md +++ /dev/null @@ -1,76 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-tag - Tag an image into a repository - -# SYNOPSIS -**docker tag** -[**--help**] -NAME[:TAG] NAME[:TAG] - -# DESCRIPTION -Assigns a new alias to an image in a registry. An alias refers to the -entire image name including the optional `TAG` after the ':'. - -# "OPTIONS" -**--help** - Print usage statement. - -**NAME** - The image name which is made up of slash-separated name components, - optionally prefixed by a registry hostname. The hostname must comply with - standard DNS rules, but may not contain underscores. If a hostname is - present, it may optionally be followed by a port number in the format - `:8080`. If not present, the command uses Docker's public registry located at - `registry-1.docker.io` by default. Name components may contain lowercase - characters, digits and separators. A separator is defined as a period, one or - two underscores, or one or more dashes. A name component may not start or end - with a separator. - -**TAG** - The tag assigned to the image to version and distinguish images with the same - name. The tag name may contain lowercase and uppercase characters, digits, - underscores, periods and dashes. A tag name may not start with a period or a - dash and may contain a maximum of 128 characters. - -# EXAMPLES - -## Tagging an image referenced by ID - -To tag a local image with ID "0e5574283393" into the "fedora" repository with -"version1.0": - - docker tag 0e5574283393 fedora/httpd:version1.0 - -## Tagging an image referenced by Name - -To tag a local image with name "httpd" into the "fedora" repository with -"version1.0": - - docker tag httpd fedora/httpd:version1.0 - -Note that since the tag name is not specified, the alias is created for an -existing local version `httpd:latest`. - -## Tagging an image referenced by Name and Tag - -To tag a local image with name "httpd" and tag "test" into the "fedora" -repository with "version1.0.test": - - docker tag httpd:test fedora/httpd:version1.0.test - -## Tagging an image for a private repository - -To push an image to a private registry and not the central Docker -registry you must tag it with the registry hostname and port (if needed). - - docker tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0 - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -July 2014, updated by Sven Dowideit -April 2015, updated by Mary Anthony for v2 -June 2015, updated by Sally O'Malley diff --git a/man/docker-top.1.md b/man/docker-top.1.md deleted file mode 100644 index a666f7cd37..0000000000 --- a/man/docker-top.1.md +++ /dev/null @@ -1,36 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-top - Display the running processes of a container - -# SYNOPSIS -**docker top** -[**--help**] -CONTAINER [ps OPTIONS] - -# DESCRIPTION - -Display the running process of the container. ps-OPTION can be any of the options you would pass to a Linux ps command. - -All displayed information is from host's point of view. - -# OPTIONS -**--help** - Print usage statement - -# EXAMPLES - -Run **docker top** with the ps option of -x: - - $ docker top 8601afda2b -x - PID TTY STAT TIME COMMAND - 16623 ? Ss 0:00 sleep 99999 - - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) -based on docker.com source material and internal work. -June 2014, updated by Sven Dowideit -June 2015, updated by Ma Shimiao -December 2015, updated by Pavel Pospisil diff --git a/man/docker-unpause.1.md b/man/docker-unpause.1.md deleted file mode 100644 index 466e1bb1a3..0000000000 --- a/man/docker-unpause.1.md +++ /dev/null @@ -1,27 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-unpause - Unpause all processes within a container - -# SYNOPSIS -**docker unpause** -CONTAINER [CONTAINER...] - -# DESCRIPTION - -The `docker unpause` command uses the cgroups freezer to un-suspend all -processes in a container. - -See the [cgroups freezer documentation] -(https://www.kernel.org/doc/Documentation/cgroups/freezer-subsystem.txt) for -further details. - -# OPTIONS -There are no available options. - -# See also -**docker-pause(1)** to pause all processes within a container. - -# HISTORY -June 2014, updated by Sven Dowideit diff --git a/man/docker-update.1.md b/man/docker-update.1.md deleted file mode 100644 index 87849ef8d5..0000000000 --- a/man/docker-update.1.md +++ /dev/null @@ -1,108 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2014 -# NAME -docker-update - Update configuration of one or more containers - -# SYNOPSIS -**docker update** -[**--blkio-weight**[=*[BLKIO-WEIGHT]*]] -[**--cpu-shares**[=*0*]] -[**--cpu-period**[=*0*]] -[**--cpu-quota**[=*0*]] -[**--cpuset-cpus**[=*CPUSET-CPUS*]] -[**--cpuset-mems**[=*CPUSET-MEMS*]] -[**--help**] -[**--kernel-memory**[=*KERNEL-MEMORY*]] -[**-m**|**--memory**[=*MEMORY*]] -[**--memory-reservation**[=*MEMORY-RESERVATION*]] -[**--memory-swap**[=*MEMORY-SWAP*]] -[**--restart**[=*""*]] -CONTAINER [CONTAINER...] - -# DESCRIPTION - -The `docker update` command dynamically updates container configuration. -you can Use this command to prevent containers from consuming too many -resources from their Docker host. With a single command, you can place -limits on a single container or on many. To specify more than one container, -provide space-separated list of container names or IDs. - -With the exception of the `--kernel-memory` value, you can specify these -options on a running or a stopped container. You can only update -`--kernel-memory` on a stopped container. When you run `docker update` on -stopped container, the next time you restart it, the container uses those -values. - -Another configuration you can change with this command is restart policy, -new restart policy will take effect instantly after you run `docker update` -on a container. - -# OPTIONS -**--blkio-weight**=0 - Block IO weight (relative weight) accepts a weight value between 10 and 1000. - -**--cpu-shares**=0 - CPU shares (relative weight) - -**--cpu-period**=0 - Limit the CPU CFS (Completely Fair Scheduler) period - -**--cpu-quota**=0 - Limit the CPU CFS (Completely Fair Scheduler) quota - -**--cpuset-cpus**="" - CPUs in which to allow execution (0-3, 0,1) - -**--cpuset-mems**="" - Memory nodes(MEMs) in which to allow execution (0-3, 0,1). Only effective on NUMA systems. - -**--help** - Print usage statement - -**--kernel-memory**="" - Kernel memory limit (format: `[]`, where unit = b, k, m or g) - - Note that you can not update kernel memory to a running container, it can only -be updated to a stopped container, and affect after it's started. - -**-m**, **--memory**="" - Memory limit (format: , where unit = b, k, m or g) - -**--memory-reservation**="" - Memory soft limit (format: [], where unit = b, k, m or g) - -**--memory-swap**="" - Total memory limit (memory + swap) - -**--restart**="" - Restart policy to apply when a container exits (no, on-failure[:max-retry], always, unless-stopped). - -# EXAMPLES - -The following sections illustrate ways to use this command. - -### Update a container with cpu-shares=512 - -To limit a container's cpu-shares to 512, first identify the container -name or ID. You can use **docker ps** to find these values. You can also -use the ID returned from the **docker run** command. Then, do the following: - -```bash -$ docker update --cpu-shares 512 abebf7571666 -``` - -### Update a container with cpu-shares and memory - -To update multiple resource configurations for multiple containers: - -```bash -$ docker update --cpu-shares 512 -m 300M abebf7571666 hopeful_morse -``` - -### Update a container's restart policy - -To update restart policy for one or more containers: -```bash -$ docker update --restart=on-failure:3 abebf7571666 hopeful_morse -``` diff --git a/man/docker-version.1.md b/man/docker-version.1.md deleted file mode 100644 index 04ae3464f8..0000000000 --- a/man/docker-version.1.md +++ /dev/null @@ -1,62 +0,0 @@ -% DOCKER(1) Docker User Manuals -% Docker Community -% JUNE 2015 -# NAME -docker-version - Show the Docker version information. - -# SYNOPSIS -**docker version** -[**--help**] -[**-f**|**--format**[=*FORMAT*]] - -# DESCRIPTION -This command displays version information for both the Docker client and -daemon. - -# OPTIONS -**--help** - Print usage statement - -**-f**, **--format**="" - Format the output using the given go template. - -# EXAMPLES - -## Display Docker version information - -The default output: - - $ docker version - Client: - Version: 1.8.0 - API version: 1.20 - Go version: go1.4.2 - Git commit: f5bae0a - Built: Tue Jun 23 17:56:00 UTC 2015 - OS/Arch: linux/amd64 - - Server: - Version: 1.8.0 - API version: 1.20 - Go version: go1.4.2 - Git commit: f5bae0a - Built: Tue Jun 23 17:56:00 UTC 2015 - OS/Arch: linux/amd64 - -Get server version: - - $ docker version --format '{{.Server.Version}}' - 1.8.0 - -Dump raw data: - -To view all available fields, you can use the format `{{json .}}`. - - $ docker version --format '{{json .}}' - {"Client":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"},"ServerOK":true,"Server":{"Version":"1.8.0","ApiVersion":"1.20","GitCommit":"f5bae0a","GoVersion":"go1.4.2","Os":"linux","Arch":"amd64","KernelVersion":"3.13.2-gentoo","BuildTime":"Tue Jun 23 17:56:00 UTC 2015"}} - - -# HISTORY -June 2014, updated by Sven Dowideit -June 2015, updated by John Howard -June 2015, updated by Patrick Hemmer diff --git a/man/docker.1.md b/man/docker.1.md deleted file mode 100644 index cf91741c7d..0000000000 --- a/man/docker.1.md +++ /dev/null @@ -1,237 +0,0 @@ -% DOCKER(1) Docker User Manuals -% William Henry -% APRIL 2014 -# NAME -docker \- Docker image and container command line interface - -# SYNOPSIS -**docker** [OPTIONS] COMMAND [arg...] - -**docker** daemon [--help|...] - -**docker** [--help|-v|--version] - -# DESCRIPTION -is a client for interacting with the daemon (see **dockerd(8)**) through the CLI. - -The Docker CLI has over 30 commands. The commands are listed below and each has -its own man page which explain usage and arguments. - -To see the man page for a command run **man docker **. - -# OPTIONS -**--help** - Print usage statement - -**--config**="" - Specifies the location of the Docker client configuration files. The default is '~/.docker'. - -**-D**, **--debug**=*true*|*false* - Enable debug mode. Default is false. - -**-H**, **--host**=[*unix:///var/run/docker.sock*]: tcp://[host]:[port][path] to bind or -unix://[/path/to/socket] to use. - The socket(s) to bind to in daemon mode specified using one or more - tcp://host:port/path, unix:///path/to/socket, fd://* or fd://socketfd. - If the tcp port is not specified, then it will default to either `2375` when - `--tls` is off, or `2376` when `--tls` is on, or `--tlsverify` is specified. - -**-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*" - Set the logging level. Default is `info`. - -**--tls**=*true*|*false* - Use TLS; implied by --tlsverify. Default is false. - -**--tlscacert**=*~/.docker/ca.pem* - Trust certs signed only by this CA. - -**--tlscert**=*~/.docker/cert.pem* - Path to TLS certificate file. - -**--tlskey**=*~/.docker/key.pem* - Path to TLS key file. - -**--tlsverify**=*true*|*false* - Use TLS and verify the remote (daemon: verify client, client: verify daemon). - Default is false. - -**-v**, **--version**=*true*|*false* - Print version information and quit. Default is false. - -# COMMANDS -**attach** - Attach to a running container - See **docker-attach(1)** for full documentation on the **attach** command. - -**build** - Build an image from a Dockerfile - See **docker-build(1)** for full documentation on the **build** command. - -**commit** - Create a new image from a container's changes - See **docker-commit(1)** for full documentation on the **commit** command. - -**cp** - Copy files/folders between a container and the local filesystem - See **docker-cp(1)** for full documentation on the **cp** command. - -**create** - Create a new container - See **docker-create(1)** for full documentation on the **create** command. - -**diff** - Inspect changes on a container's filesystem - See **docker-diff(1)** for full documentation on the **diff** command. - -**events** - Get real time events from the server - See **docker-events(1)** for full documentation on the **events** command. - -**exec** - Run a command in a running container - See **docker-exec(1)** for full documentation on the **exec** command. - -**export** - Stream the contents of a container as a tar archive - See **docker-export(1)** for full documentation on the **export** command. - -**history** - Show the history of an image - See **docker-history(1)** for full documentation on the **history** command. - -**images** - List images - See **docker-images(1)** for full documentation on the **images** command. - -**import** - Create a new filesystem image from the contents of a tarball - See **docker-import(1)** for full documentation on the **import** command. - -**info** - Display system-wide information - See **docker-info(1)** for full documentation on the **info** command. - -**inspect** - Return low-level information on a container or image - See **docker-inspect(1)** for full documentation on the **inspect** command. - -**kill** - Kill a running container (which includes the wrapper process and everything -inside it) - See **docker-kill(1)** for full documentation on the **kill** command. - -**load** - Load an image from a tar archive - See **docker-load(1)** for full documentation on the **load** command. - -**login** - Log in to a Docker Registry - See **docker-login(1)** for full documentation on the **login** command. - -**logout** - Log the user out of a Docker Registry - See **docker-logout(1)** for full documentation on the **logout** command. - -**logs** - Fetch the logs of a container - See **docker-logs(1)** for full documentation on the **logs** command. - -**pause** - Pause all processes within a container - See **docker-pause(1)** for full documentation on the **pause** command. - -**port** - Lookup the public-facing port which is NAT-ed to PRIVATE_PORT - See **docker-port(1)** for full documentation on the **port** command. - -**ps** - List containers - See **docker-ps(1)** for full documentation on the **ps** command. - -**pull** - Pull an image or a repository from a Docker Registry - See **docker-pull(1)** for full documentation on the **pull** command. - -**push** - Push an image or a repository to a Docker Registry - See **docker-push(1)** for full documentation on the **push** command. - -**rename** - Rename a container. - See **docker-rename(1)** for full documentation on the **rename** command. - -**restart** - Restart a container - See **docker-restart(1)** for full documentation on the **restart** command. - -**rm** - Remove one or more containers - See **docker-rm(1)** for full documentation on the **rm** command. - -**rmi** - Remove one or more images - See **docker-rmi(1)** for full documentation on the **rmi** command. - -**run** - Run a command in a new container - See **docker-run(1)** for full documentation on the **run** command. - -**save** - Save an image to a tar archive - See **docker-save(1)** for full documentation on the **save** command. - -**search** - Search for an image in the Docker index - See **docker-search(1)** for full documentation on the **search** command. - -**start** - Start a container - See **docker-start(1)** for full documentation on the **start** command. - -**stats** - Display a live stream of one or more containers' resource usage statistics - See **docker-stats(1)** for full documentation on the **stats** command. - -**stop** - Stop a container - See **docker-stop(1)** for full documentation on the **stop** command. - -**tag** - Tag an image into a repository - See **docker-tag(1)** for full documentation on the **tag** command. - -**top** - Lookup the running processes of a container - See **docker-top(1)** for full documentation on the **top** command. - -**unpause** - Unpause all processes within a container - See **docker-unpause(1)** for full documentation on the **unpause** command. - -**version** - Show the Docker version information - See **docker-version(1)** for full documentation on the **version** command. - -**wait** - Block until a container stops, then print its exit code - See **docker-wait(1)** for full documentation on the **wait** command. - - -# RUNTIME EXECUTION OPTIONS - -Use the **--exec-opt** flags to specify options to the execution driver. -The following options are available: - -#### native.cgroupdriver -Specifies the management of the container's `cgroups`. You can specify `cgroupfs` -or `systemd`. If you specify `systemd` and it is not available, the system errors -out. - -#### Client -For specific client examples please see the man page for the specific Docker -command. For example: - - man docker-run - -# HISTORY -April 2014, Originally compiled by William Henry (whenry at redhat dot com) based on docker.com source material and internal work. diff --git a/man/dockerd.8.md b/man/dockerd.8.md deleted file mode 100644 index a098a708a3..0000000000 --- a/man/dockerd.8.md +++ /dev/null @@ -1,605 +0,0 @@ -% DOCKER(8) Docker User Manuals -% Shishir Mahajan -% SEPTEMBER 2015 -# NAME -dockerd - Enable daemon mode - -# SYNOPSIS -**dockerd** -[**--add-runtime**[=*[]*]] -[**--api-cors-header**=[=*API-CORS-HEADER*]] -[**--authorization-plugin**[=*[]*]] -[**-b**|**--bridge**[=*BRIDGE*]] -[**--bip**[=*BIP*]] -[**--cgroup-parent**[=*[]*]] -[**--cluster-store**[=*[]*]] -[**--cluster-advertise**[=*[]*]] -[**--cluster-store-opt**[=*map[]*]] -[**--config-file**[=*/etc/docker/daemon.json*]] -[**--containerd**[=*SOCKET-PATH*]] -[**-D**|**--debug**] -[**--default-gateway**[=*DEFAULT-GATEWAY*]] -[**--default-gateway-v6**[=*DEFAULT-GATEWAY-V6*]] -[**--default-ulimit**[=*[]*]] -[**--disable-legacy-registry**] -[**--dns**[=*[]*]] -[**--dns-opt**[=*[]*]] -[**--dns-search**[=*[]*]] -[**--exec-opt**[=*[]*]] -[**--exec-root**[=*/var/run/docker*]] -[**--fixed-cidr**[=*FIXED-CIDR*]] -[**--fixed-cidr-v6**[=*FIXED-CIDR-V6*]] -[**-G**|**--group**[=*docker*]] -[**-g**|**--graph**[=*/var/lib/docker*]] -[**-H**|**--host**[=*[]*]] -[**--help**] -[**--icc**[=*true*]] -[**--insecure-registry**[=*[]*]] -[**--ip**[=*0.0.0.0*]] -[**--ip-forward**[=*true*]] -[**--ip-masq**[=*true*]] -[**--iptables**[=*true*]] -[**--ipv6**] -[**--isolation**[=*default*]] -[**-l**|**--log-level**[=*info*]] -[**--label**[=*[]*]] -[**--live-restore**[=*false*]] -[**--log-driver**[=*json-file*]] -[**--log-opt**[=*map[]*]] -[**--mtu**[=*0*]] -[**--max-concurrent-downloads**[=*3*]] -[**--max-concurrent-uploads**[=*5*]] -[**-p**|**--pidfile**[=*/var/run/docker.pid*]] -[**--raw-logs**] -[**--registry-mirror**[=*[]*]] -[**-s**|**--storage-driver**[=*STORAGE-DRIVER*]] -[**--selinux-enabled**] -[**--storage-opt**[=*[]*]] -[**--swarm-default-advertise-addr**[=*IP|INTERFACE*]] -[**--tls**] -[**--tlscacert**[=*~/.docker/ca.pem*]] -[**--tlscert**[=*~/.docker/cert.pem*]] -[**--tlskey**[=*~/.docker/key.pem*]] -[**--tlsverify**] -[**--userland-proxy**[=*true*]] -[**--userns-remap**[=*default*]] - -# DESCRIPTION -**dockerd** is used for starting the Docker daemon(i.e., to command the daemon to manage images, -containers etc.) So **dockerd** is a server, as a daemon. - -To run the Docker daemon you can specify **dockerd**. -You can check the daemon options using **dockerd --help**. -Daemon options should be specified after the **dockerd** keyword in the following -format. - -**dockerd [OPTIONS]** - -# OPTIONS - -**--add-runtime**=[] - Set additional OCI compatible runtime. - -**--api-cors-header**="" - Set CORS headers in the remote API. Default is cors disabled. Give urls like "http://foo, http://bar, ...". Give "*" to allow all. - -**--authorization-plugin**="" - Set authorization plugins to load - -**-b**, **--bridge**="" - Attach containers to a pre\-existing network bridge; use 'none' to disable container networking - -**--bip**="" - Use the provided CIDR notation address for the dynamically created bridge (docker0); Mutually exclusive of \-b - -**--cgroup-parent**="" - Set parent cgroup for all containers. Default is "/docker" for fs cgroup driver and "system.slice" for systemd cgroup driver. - -**--cluster-store**="" - URL of the distributed storage backend - -**--cluster-advertise**="" - Specifies the 'host:port' or `interface:port` combination that this particular - daemon instance should use when advertising itself to the cluster. The daemon - is reached through this value. - -**--cluster-store-opt**="" - Specifies options for the Key/Value store. - -**--config-file**="/etc/docker/daemon.json" - Specifies the JSON file path to load the configuration from. - -**--containerd**="" - Path to containerd socket. - -**-D**, **--debug**=*true*|*false* - Enable debug mode. Default is false. - -**--default-gateway**="" - IPv4 address of the container default gateway; this address must be part of the bridge subnet (which is defined by \-b or \--bip) - -**--default-gateway-v6**="" - IPv6 address of the container default gateway - -**--default-ulimit**=[] - Default ulimits for containers. - -**--disable-legacy-registry**=*true*|*false* - Disable contacting legacy registries - -**--dns**="" - Force Docker to use specific DNS servers - -**--dns-opt**="" - DNS options to use. - -**--dns-search**=[] - DNS search domains to use. - -**--exec-opt**=[] - Set runtime execution options. See RUNTIME EXECUTION OPTIONS. - -**--exec-root**="" - Path to use as the root of the Docker execution state files. Default is `/var/run/docker`. - -**--fixed-cidr**="" - IPv4 subnet for fixed IPs (e.g., 10.20.0.0/16); this subnet must be nested in the bridge subnet (which is defined by \-b or \-\-bip) - -**--fixed-cidr-v6**="" - IPv6 subnet for global IPv6 addresses (e.g., 2a00:1450::/64) - -**-G**, **--group**="" - Group to assign the unix socket specified by -H when running in daemon mode. - use '' (the empty string) to disable setting of a group. Default is `docker`. - -**-g**, **--graph**="" - Path to use as the root of the Docker runtime. Default is `/var/lib/docker`. - -**-H**, **--host**=[*unix:///var/run/docker.sock*]: tcp://[host:port] to bind or -unix://[/path/to/socket] to use. - The socket(s) to bind to in daemon mode specified using one or more - tcp://host:port, unix:///path/to/socket, fd://* or fd://socketfd. - -**--help** - Print usage statement - -**--icc**=*true*|*false* - Allow unrestricted inter\-container and Docker daemon host communication. If disabled, containers can still be linked together using the **--link** option (see **docker-run(1)**). Default is true. - -**--insecure-registry**=[] - Enable insecure registry communication, i.e., enable un-encrypted and/or untrusted communication. - - List of insecure registries can contain an element with CIDR notation to specify a whole subnet. Insecure registries accept HTTP and/or accept HTTPS with certificates from unknown CAs. - - Enabling `--insecure-registry` is useful when running a local registry. However, because its use creates security vulnerabilities it should ONLY be enabled for testing purposes. For increased security, users should add their CA to their system's list of trusted CAs instead of using `--insecure-registry`. - -**--ip**="" - Default IP address to use when binding container ports. Default is `0.0.0.0`. - -**--ip-forward**=*true*|*false* - Enables IP forwarding on the Docker host. The default is `true`. This flag interacts with the IP forwarding setting on your host system's kernel. If your system has IP forwarding disabled, this setting enables it. If your system has IP forwarding enabled, setting this flag to `--ip-forward=false` has no effect. - - This setting will also enable IPv6 forwarding if you have both `--ip-forward=true` and `--fixed-cidr-v6` set. Note that this may reject Router Advertisements and interfere with the host's existing IPv6 configuration. For more information, please consult the documentation about "Advanced Networking - IPv6". - -**--ip-masq**=*true*|*false* - Enable IP masquerading for bridge's IP range. Default is true. - -**--iptables**=*true*|*false* - Enable Docker's addition of iptables rules. Default is true. - -**--ipv6**=*true*|*false* - Enable IPv6 support. Default is false. Docker will create an IPv6-enabled bridge with address fe80::1 which will allow you to create IPv6-enabled containers. Use together with `--fixed-cidr-v6` to provide globally routable IPv6 addresses. IPv6 forwarding will be enabled if not used with `--ip-forward=false`. This may collide with your host's current IPv6 settings. For more information please consult the documentation about "Advanced Networking - IPv6". - -**--isolation**="*default*" - Isolation specifies the type of isolation technology used by containers. Note -that the default on Windows server is `process`, and the default on Windows client -is `hyperv`. Linux only supports `default`. - -**-l**, **--log-level**="*debug*|*info*|*warn*|*error*|*fatal*" - Set the logging level. Default is `info`. - -**--label**="[]" - Set key=value labels to the daemon (displayed in `docker info`) - -**--live-restore**=*false* - Enable live restore of running containers when the daemon starts so that they are not restarted. - -**--log-driver**="*json-file*|*syslog*|*journald*|*gelf*|*fluentd*|*awslogs*|*splunk*|*etwlogs*|*gcplogs*|*none*" - Default driver for container logs. Default is `json-file`. - **Warning**: `docker logs` command works only for `json-file` logging driver. - -**--log-opt**=[] - Logging driver specific options. - -**--mtu**=*0* - Set the containers network mtu. Default is `0`. - -**--max-concurrent-downloads**=*3* - Set the max concurrent downloads for each pull. Default is `3`. - -**--max-concurrent-uploads**=*5* - Set the max concurrent uploads for each push. Default is `5`. - -**-p**, **--pidfile**="" - Path to use for daemon PID file. Default is `/var/run/docker.pid` - -**--raw-logs** -Output daemon logs in full timestamp format without ANSI coloring. If this flag is not set, -the daemon outputs condensed, colorized logs if a terminal is detected, or full ("raw") -output otherwise. - -**--registry-mirror**=*://* - Prepend a registry mirror to be used for image pulls. May be specified multiple times. - -**-s**, **--storage-driver**="" - Force the Docker runtime to use a specific storage driver. - -**--selinux-enabled**=*true*|*false* - Enable selinux support. Default is false. SELinux does not presently support either of the overlay storage drivers. - -**--storage-opt**=[] - Set storage driver options. See STORAGE DRIVER OPTIONS. - -**--swarm-default-advertise-addr**=*IP|INTERFACE* - Set default address or interface for swarm to advertise as its externally-reachable address to other cluster - members. This can be a hostname, an IP address, or an interface such as `eth0`. A port cannot be specified with - this option. - -**--tls**=*true*|*false* - Use TLS; implied by --tlsverify. Default is false. - -**--tlscacert**=*~/.docker/ca.pem* - Trust certs signed only by this CA. - -**--tlscert**=*~/.docker/cert.pem* - Path to TLS certificate file. - -**--tlskey**=*~/.docker/key.pem* - Path to TLS key file. - -**--tlsverify**=*true*|*false* - Use TLS and verify the remote (daemon: verify client, client: verify daemon). - Default is false. - -**--userland-proxy**=*true*|*false* - Rely on a userland proxy implementation for inter-container and outside-to-container loopback communications. Default is true. - -**--userns-remap**=*default*|*uid:gid*|*user:group*|*user*|*uid* - Enable user namespaces for containers on the daemon. Specifying "default" will cause a new user and group to be created to handle UID and GID range remapping for the user namespace mappings used for contained processes. Specifying a user (or uid) and optionally a group (or gid) will cause the daemon to lookup the user and group's subordinate ID ranges for use as the user namespace mappings for contained processes. - -# STORAGE DRIVER OPTIONS - -Docker uses storage backends (known as "graphdrivers" in the Docker -internals) to create writable containers from images. Many of these -backends use operating system level technologies and can be -configured. - -Specify options to the storage backend with **--storage-opt** flags. The -backends that currently take options are *devicemapper*, *zfs* and *btrfs*. -Options for *devicemapper* are prefixed with *dm*, options for *zfs* -start with *zfs* and options for *btrfs* start with *btrfs*. - -Specifically for devicemapper, the default is a "loopback" model which -requires no pre-configuration, but is extremely inefficient. Do not -use it in production. - -To make the best use of Docker with the devicemapper backend, you must -have a recent version of LVM. Use `lvm` to create a thin pool; for -more information see `man lvmthin`. Then, use `--storage-opt -dm.thinpooldev` to tell the Docker engine to use that pool for -allocating images and container snapshots. - -## Devicemapper options - -#### dm.thinpooldev - -Specifies a custom block storage device to use for the thin pool. - -If using a block device for device mapper storage, it is best to use `lvm` -to create and manage the thin-pool volume. This volume is then handed to Docker -to exclusively create snapshot volumes needed for images and containers. - -Managing the thin-pool outside of Engine makes for the most feature-rich -method of having Docker utilize device mapper thin provisioning as the -backing storage for Docker containers. The highlights of the lvm-based -thin-pool management feature include: automatic or interactive thin-pool -resize support, dynamically changing thin-pool features, automatic thinp -metadata checking when lvm activates the thin-pool, etc. - -As a fallback if no thin pool is provided, loopback files are -created. Loopback is very slow, but can be used without any -pre-configuration of storage. It is strongly recommended that you do -not use loopback in production. Ensure your Engine daemon has a -`--storage-opt dm.thinpooldev` argument provided. - -Example use: - - $ dockerd \ - --storage-opt dm.thinpooldev=/dev/mapper/thin-pool - -#### dm.basesize - -Specifies the size to use when creating the base device, which limits -the size of images and containers. The default value is 10G. Note, -thin devices are inherently "sparse", so a 10G device which is mostly -empty doesn't use 10 GB of space on the pool. However, the filesystem -will use more space for base images the larger the device -is. - -The base device size can be increased at daemon restart which will allow -all future images and containers (based on those new images) to be of the -new base device size. - -Example use: `dockerd --storage-opt dm.basesize=50G` - -This will increase the base device size to 50G. The Docker daemon will throw an -error if existing base device size is larger than 50G. A user can use -this option to expand the base device size however shrinking is not permitted. - -This value affects the system-wide "base" empty filesystem that may already -be initialized and inherited by pulled images. Typically, a change to this -value requires additional steps to take effect: - - $ sudo service docker stop - $ sudo rm -rf /var/lib/docker - $ sudo service docker start - -Example use: `dockerd --storage-opt dm.basesize=20G` - -#### dm.fs - -Specifies the filesystem type to use for the base device. The -supported options are `ext4` and `xfs`. The default is `ext4`. - -Example use: `dockerd --storage-opt dm.fs=xfs` - -#### dm.mkfsarg - -Specifies extra mkfs arguments to be used when creating the base device. - -Example use: `dockerd --storage-opt "dm.mkfsarg=-O ^has_journal"` - -#### dm.mountopt - -Specifies extra mount options used when mounting the thin devices. - -Example use: `dockerd --storage-opt dm.mountopt=nodiscard` - -#### dm.use_deferred_removal - -Enables use of deferred device removal if `libdm` and the kernel driver -support the mechanism. - -Deferred device removal means that if device is busy when devices are -being removed/deactivated, then a deferred removal is scheduled on -device. And devices automatically go away when last user of the device -exits. - -For example, when a container exits, its associated thin device is removed. If -that device has leaked into some other mount namespace and can't be removed, -the container exit still succeeds and this option causes the system to schedule -the device for deferred removal. It does not wait in a loop trying to remove a busy -device. - -Example use: `dockerd --storage-opt dm.use_deferred_removal=true` - -#### dm.use_deferred_deletion - -Enables use of deferred device deletion for thin pool devices. By default, -thin pool device deletion is synchronous. Before a container is deleted, the -Docker daemon removes any associated devices. If the storage driver can not -remove a device, the container deletion fails and daemon returns. - -`Error deleting container: Error response from daemon: Cannot destroy container` - -To avoid this failure, enable both deferred device deletion and deferred -device removal on the daemon. - -`dockerd --storage-opt dm.use_deferred_deletion=true --storage-opt dm.use_deferred_removal=true` - -With these two options enabled, if a device is busy when the driver is -deleting a container, the driver marks the device as deleted. Later, when the -device isn't in use, the driver deletes it. - -In general it should be safe to enable this option by default. It will help -when unintentional leaking of mount point happens across multiple mount -namespaces. - -#### dm.loopdatasize - -**Note**: This option configures devicemapper loopback, which should not be used in production. - -Specifies the size to use when creating the loopback file for the -"data" device which is used for the thin pool. The default size is -100G. The file is sparse, so it will not initially take up -this much space. - -Example use: `dockerd --storage-opt dm.loopdatasize=200G` - -#### dm.loopmetadatasize - -**Note**: This option configures devicemapper loopback, which should not be used in production. - -Specifies the size to use when creating the loopback file for the -"metadata" device which is used for the thin pool. The default size -is 2G. The file is sparse, so it will not initially take up -this much space. - -Example use: `dockerd --storage-opt dm.loopmetadatasize=4G` - -#### dm.datadev - -(Deprecated, use `dm.thinpooldev`) - -Specifies a custom blockdevice to use for data for a -Docker-managed thin pool. It is better to use `dm.thinpooldev` - see -the documentation for it above for discussion of the advantages. - -#### dm.metadatadev - -(Deprecated, use `dm.thinpooldev`) - -Specifies a custom blockdevice to use for metadata for a -Docker-managed thin pool. See `dm.datadev` for why this is -deprecated. - -#### dm.blocksize - -Specifies a custom blocksize to use for the thin pool. The default -blocksize is 64K. - -Example use: `dockerd --storage-opt dm.blocksize=512K` - -#### dm.blkdiscard - -Enables or disables the use of `blkdiscard` when removing devicemapper -devices. This is disabled by default due to the additional latency, -but as a special case with loopback devices it will be enabled, in -order to re-sparsify the loopback file on image/container removal. - -Disabling this on loopback can lead to *much* faster container removal -times, but it also prevents the space used in `/var/lib/docker` directory -from being returned to the system for other use when containers are -removed. - -Example use: `dockerd --storage-opt dm.blkdiscard=false` - -#### dm.override_udev_sync_check - -By default, the devicemapper backend attempts to synchronize with the -`udev` device manager for the Linux kernel. This option allows -disabling that synchronization, to continue even though the -configuration may be buggy. - -To view the `udev` sync support of a Docker daemon that is using the -`devicemapper` driver, run: - - $ docker info - [...] - Udev Sync Supported: true - [...] - -When `udev` sync support is `true`, then `devicemapper` and `udev` can -coordinate the activation and deactivation of devices for containers. - -When `udev` sync support is `false`, a race condition occurs between -the `devicemapper` and `udev` during create and cleanup. The race -condition results in errors and failures. (For information on these -failures, see -[docker#4036](https://github.com/docker/docker/issues/4036)) - -To allow the `docker` daemon to start, regardless of whether `udev` sync is -`false`, set `dm.override_udev_sync_check` to true: - - $ dockerd --storage-opt dm.override_udev_sync_check=true - -When this value is `true`, the driver continues and simply warns you -the errors are happening. - -**Note**: The ideal is to pursue a `docker` daemon and environment -that does support synchronizing with `udev`. For further discussion on -this topic, see -[docker#4036](https://github.com/docker/docker/issues/4036). -Otherwise, set this flag for migrating existing Docker daemons to a -daemon with a supported environment. - -#### dm.min_free_space - -Specifies the min free space percent in a thin pool require for new device -creation to succeed. This check applies to both free data space as well -as free metadata space. Valid values are from 0% - 99%. Value 0% disables -free space checking logic. If user does not specify a value for this option, -the Engine uses a default value of 10%. - -Whenever a new a thin pool device is created (during `docker pull` or during -container creation), the Engine checks if the minimum free space is -available. If the space is unavailable, then device creation fails and any -relevant `docker` operation fails. - -To recover from this error, you must create more free space in the thin pool to -recover from the error. You can create free space by deleting some images -and containers from tge thin pool. You can also add -more storage to the thin pool. - -To add more space to an LVM (logical volume management) thin pool, just add -more storage to the group container thin pool; this should automatically -resolve any errors. If your configuration uses loop devices, then stop the -Engine daemon, grow the size of loop files and restart the daemon to resolve -the issue. - -Example use:: `dockerd --storage-opt dm.min_free_space=10%` - -## ZFS options - -#### zfs.fsname - -Set zfs filesystem under which docker will create its own datasets. -By default docker will pick up the zfs filesystem where docker graph -(`/var/lib/docker`) is located. - -Example use: `dockerd -s zfs --storage-opt zfs.fsname=zroot/docker` - -## Btrfs options - -#### btrfs.min_space - -Specifies the mininum size to use when creating the subvolume which is used -for containers. If user uses disk quota for btrfs when creating or running -a container with **--storage-opt size** option, docker should ensure the -**size** cannot be smaller than **btrfs.min_space**. - -Example use: `docker daemon -s btrfs --storage-opt btrfs.min_space=10G` - -# CLUSTER STORE OPTIONS - -The daemon uses libkv to advertise -the node within the cluster. Some Key/Value backends support mutual -TLS, and the client TLS settings used by the daemon can be configured -using the **--cluster-store-opt** flag, specifying the paths to PEM encoded -files. - -#### kv.cacertfile - -Specifies the path to a local file with PEM encoded CA certificates to trust - -#### kv.certfile - -Specifies the path to a local file with a PEM encoded certificate. This -certificate is used as the client cert for communication with the -Key/Value store. - -#### kv.keyfile - -Specifies the path to a local file with a PEM encoded private key. This -private key is used as the client key for communication with the -Key/Value store. - -# Access authorization - -Docker's access authorization can be extended by authorization plugins that your -organization can purchase or build themselves. You can install one or more -authorization plugins when you start the Docker `daemon` using the -`--authorization-plugin=PLUGIN_ID` option. - -```bash -dockerd --authorization-plugin=plugin1 --authorization-plugin=plugin2,... -``` - -The `PLUGIN_ID` value is either the plugin's name or a path to its specification -file. The plugin's implementation determines whether you can specify a name or -path. Consult with your Docker administrator to get information about the -plugins available to you. - -Once a plugin is installed, requests made to the `daemon` through the command -line or Docker's remote API are allowed or denied by the plugin. If you have -multiple plugins installed, at least one must allow the request for it to -complete. - -For information about how to create an authorization plugin, see [authorization -plugin](https://docs.docker.com/engine/extend/authorization/) section in the -Docker extend section of this documentation. - - -# HISTORY -Sept 2015, Originally compiled by Shishir Mahajan -based on docker.com source material and internal work. diff --git a/man/generate.go b/man/generate.go deleted file mode 100644 index 7bcc57009e..0000000000 --- a/man/generate.go +++ /dev/null @@ -1,39 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/docker/docker/cli/cobraadaptor" - cliflags "github.com/docker/docker/cli/flags" - "github.com/spf13/cobra/doc" -) - -func generateManPages(path string) error { - header := &doc.GenManHeader{ - Title: "DOCKER", - Section: "1", - Source: "Docker Community", - } - flags := &cliflags.ClientFlags{ - Common: cliflags.InitCommonFlags(), - } - cmd := cobraadaptor.NewCobraAdaptor(flags).GetRootCommand() - cmd.DisableAutoGenTag = true - return doc.GenManTreeFromOpts(cmd, doc.GenManTreeOptions{ - Header: header, - Path: path, - CommandSeparator: "-", - }) -} - -func main() { - path := "/tmp" - if len(os.Args) > 1 { - path = os.Args[1] - } - fmt.Printf("Generating man pages into %s\n", path) - if err := generateManPages(path); err != nil { - fmt.Fprintf(os.Stderr, "Failed to generate man pages: %s\n", err.Error()) - } -} diff --git a/man/generate.sh b/man/generate.sh deleted file mode 100755 index e4126ba4ac..0000000000 --- a/man/generate.sh +++ /dev/null @@ -1,15 +0,0 @@ -#!/bin/bash -# -# Generate man pages for docker/docker -# - -set -eu - -mkdir -p ./man/man1 - -# Generate man pages from cobra commands -go build -o /tmp/gen-manpages ./man -/tmp/gen-manpages ./man/man1 - -# Generate legacy pages from markdown -./man/md2man-all.sh -q diff --git a/man/glide.lock b/man/glide.lock deleted file mode 100644 index 40c5f5a64a..0000000000 --- a/man/glide.lock +++ /dev/null @@ -1,52 +0,0 @@ -hash: ead3ea293a6143fe41069ebec814bf197d8c43a92cc7666b1f7e21a419b46feb -updated: 2016-06-20T21:53:35.420817456Z -imports: -- name: github.com/BurntSushi/toml - version: f0aeabca5a127c4078abb8c8d64298b147264b55 -- name: github.com/cpuguy83/go-md2man - version: 2724a9c9051aa62e9cca11304e7dd518e9e41599 - subpackages: - - md2man -- name: github.com/fsnotify/fsnotify - version: 30411dbcefb7a1da7e84f75530ad3abe4011b4f8 -- name: github.com/hashicorp/hcl - version: da486364306ed66c218be9b7953e19173447c18b - subpackages: - - hcl/ast - - hcl/parser - - hcl/token - - json/parser - - hcl/scanner - - hcl/strconv - - json/scanner - - json/token -- name: github.com/inconshreveable/mousetrap - version: 76626ae9c91c4f2a10f34cad8ce83ea42c93bb75 -- name: github.com/magiconair/properties - version: c265cfa48dda6474e208715ca93e987829f572f8 -- name: github.com/mitchellh/mapstructure - version: d2dd0262208475919e1a362f675cfc0e7c10e905 -- name: github.com/russross/blackfriday - version: 1d6b8e9301e720b08a8938b8c25c018285885438 -- name: github.com/shurcooL/sanitized_anchor_name - version: 10ef21a441db47d8b13ebcc5fd2310f636973c77 -- name: github.com/spf13/cast - version: 27b586b42e29bec072fe7379259cc719e1289da6 -- name: github.com/spf13/jwalterweatherman - version: 33c24e77fb80341fe7130ee7c594256ff08ccc46 -- name: github.com/spf13/pflag - version: 367864438f1b1a3c7db4da06a2f55b144e6784e0 -- name: github.com/spf13/viper - version: c1ccc378a054ea8d4e38d8c67f6938d4760b53dd -- name: golang.org/x/sys - version: 62bee037599929a6e9146f29d10dd5208c43507d - subpackages: - - unix -- name: gopkg.in/yaml.v2 - version: a83829b6f1293c91addabc89d0571c246397bbf4 -- name: github.com/spf13/cobra - repo: https://github.com/dnephin/cobra - subpackages: - - doc - version: v1.3 -devImports: [] diff --git a/man/glide.yaml b/man/glide.yaml deleted file mode 100644 index e99b2670d8..0000000000 --- a/man/glide.yaml +++ /dev/null @@ -1,12 +0,0 @@ -package: github.com/docker/docker/man -import: -- package: github.com/cpuguy83/go-md2man - subpackages: - - md2man -- package: github.com/inconshreveable/mousetrap -- package: github.com/spf13/pflag -- package: github.com/spf13/viper -- package: github.com/spf13/cobra - repo: https://github.com/dnephin/cobra - subpackages: - - doc diff --git a/man/md2man-all.sh b/man/md2man-all.sh deleted file mode 100755 index 97c65c93bc..0000000000 --- a/man/md2man-all.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/bin/bash -set -e - -# get into this script's directory -cd "$(dirname "$(readlink -f "$BASH_SOURCE")")" - -[ "$1" = '-q' ] || { - set -x - pwd -} - -for FILE in *.md; do - base="$(basename "$FILE")" - name="${base%.md}" - num="${name##*.}" - if [ -z "$num" -o "$name" = "$num" ]; then - # skip files that aren't of the format xxxx.N.md (like README.md) - continue - fi - mkdir -p "./man${num}" - go-md2man -in "$FILE" -out "./man${num}/${name}" -done diff --git a/migrate/v1/migratev1.go b/migrate/v1/migratev1.go deleted file mode 100644 index 3c0e550d0a..0000000000 --- a/migrate/v1/migratev1.go +++ /dev/null @@ -1,504 +0,0 @@ -package v1 - -import ( - "errors" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strconv" - "sync" - "time" - - "encoding/json" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/digest" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/image" - imagev1 "github.com/docker/docker/image/v1" - "github.com/docker/docker/layer" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/reference" -) - -type graphIDRegistrar interface { - RegisterByGraphID(string, layer.ChainID, layer.DiffID, string, int64) (layer.Layer, error) - Release(layer.Layer) ([]layer.Metadata, error) -} - -type graphIDMounter interface { - CreateRWLayerByGraphID(string, string, layer.ChainID) error -} - -type checksumCalculator interface { - ChecksumForGraphID(id, parent, oldTarDataPath, newTarDataPath string) (diffID layer.DiffID, size int64, err error) -} - -const ( - graphDirName = "graph" - tarDataFileName = "tar-data.json.gz" - migrationFileName = ".migration-v1-images.json" - migrationTagsFileName = ".migration-v1-tags" - migrationDiffIDFileName = ".migration-diffid" - migrationSizeFileName = ".migration-size" - migrationTarDataFileName = ".migration-tardata" - containersDirName = "containers" - configFileNameLegacy = "config.json" - configFileName = "config.v2.json" - repositoriesFilePrefixLegacy = "repositories-" -) - -var ( - errUnsupported = errors.New("migration is not supported") -) - -// Migrate takes an old graph directory and transforms the metadata into the -// new format. -func Migrate(root, driverName string, ls layer.Store, is image.Store, rs reference.Store, ms metadata.Store) error { - graphDir := filepath.Join(root, graphDirName) - if _, err := os.Lstat(graphDir); os.IsNotExist(err) { - return nil - } - - mappings, err := restoreMappings(root) - if err != nil { - return err - } - - if cc, ok := ls.(checksumCalculator); ok { - CalculateLayerChecksums(root, cc, mappings) - } - - if registrar, ok := ls.(graphIDRegistrar); !ok { - return errUnsupported - } else if err := migrateImages(root, registrar, is, ms, mappings); err != nil { - return err - } - - err = saveMappings(root, mappings) - if err != nil { - return err - } - - if mounter, ok := ls.(graphIDMounter); !ok { - return errUnsupported - } else if err := migrateContainers(root, mounter, is, mappings); err != nil { - return err - } - - if err := migrateRefs(root, driverName, rs, mappings); err != nil { - return err - } - - return nil -} - -// CalculateLayerChecksums walks an old graph directory and calculates checksums -// for each layer. These checksums are later used for migration. -func CalculateLayerChecksums(root string, ls checksumCalculator, mappings map[string]image.ID) { - graphDir := filepath.Join(root, graphDirName) - // spawn some extra workers also for maximum performance because the process is bounded by both cpu and io - workers := runtime.NumCPU() * 3 - workQueue := make(chan string, workers) - - wg := sync.WaitGroup{} - - for i := 0; i < workers; i++ { - wg.Add(1) - go func() { - for id := range workQueue { - start := time.Now() - if err := calculateLayerChecksum(graphDir, id, ls); err != nil { - logrus.Errorf("could not calculate checksum for %q, %q", id, err) - } - elapsed := time.Since(start) - logrus.Debugf("layer %s took %.2f seconds", id, elapsed.Seconds()) - } - wg.Done() - }() - } - - dir, err := ioutil.ReadDir(graphDir) - if err != nil { - logrus.Errorf("could not read directory %q", graphDir) - return - } - for _, v := range dir { - v1ID := v.Name() - if err := imagev1.ValidateID(v1ID); err != nil { - continue - } - if _, ok := mappings[v1ID]; ok { // support old migrations without helper files - continue - } - workQueue <- v1ID - } - close(workQueue) - wg.Wait() -} - -func calculateLayerChecksum(graphDir, id string, ls checksumCalculator) error { - diffIDFile := filepath.Join(graphDir, id, migrationDiffIDFileName) - if _, err := os.Lstat(diffIDFile); err == nil { - return nil - } else if !os.IsNotExist(err) { - return err - } - - parent, err := getParent(filepath.Join(graphDir, id)) - if err != nil { - return err - } - - diffID, size, err := ls.ChecksumForGraphID(id, parent, filepath.Join(graphDir, id, tarDataFileName), filepath.Join(graphDir, id, migrationTarDataFileName)) - if err != nil { - return err - } - - if err := ioutil.WriteFile(filepath.Join(graphDir, id, migrationSizeFileName), []byte(strconv.Itoa(int(size))), 0600); err != nil { - return err - } - - if err := ioutils.AtomicWriteFile(filepath.Join(graphDir, id, migrationDiffIDFileName), []byte(diffID), 0600); err != nil { - return err - } - - logrus.Infof("calculated checksum for layer %s: %s", id, diffID) - return nil -} - -func restoreMappings(root string) (map[string]image.ID, error) { - mappings := make(map[string]image.ID) - - mfile := filepath.Join(root, migrationFileName) - f, err := os.Open(mfile) - if err != nil && !os.IsNotExist(err) { - return nil, err - } else if err == nil { - err := json.NewDecoder(f).Decode(&mappings) - if err != nil { - f.Close() - return nil, err - } - f.Close() - } - - return mappings, nil -} - -func saveMappings(root string, mappings map[string]image.ID) error { - mfile := filepath.Join(root, migrationFileName) - f, err := os.OpenFile(mfile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - defer f.Close() - if err := json.NewEncoder(f).Encode(mappings); err != nil { - return err - } - return nil -} - -func migrateImages(root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) error { - graphDir := filepath.Join(root, graphDirName) - - dir, err := ioutil.ReadDir(graphDir) - if err != nil { - return err - } - for _, v := range dir { - v1ID := v.Name() - if err := imagev1.ValidateID(v1ID); err != nil { - continue - } - if _, exists := mappings[v1ID]; exists { - continue - } - if err := migrateImage(v1ID, root, ls, is, ms, mappings); err != nil { - continue - } - } - - return nil -} - -func migrateContainers(root string, ls graphIDMounter, is image.Store, imageMappings map[string]image.ID) error { - containersDir := filepath.Join(root, containersDirName) - dir, err := ioutil.ReadDir(containersDir) - if err != nil { - return err - } - for _, v := range dir { - id := v.Name() - - if _, err := os.Stat(filepath.Join(containersDir, id, configFileName)); err == nil { - continue - } - - containerJSON, err := ioutil.ReadFile(filepath.Join(containersDir, id, configFileNameLegacy)) - if err != nil { - logrus.Errorf("migrate container error: %v", err) - continue - } - - var c map[string]*json.RawMessage - if err := json.Unmarshal(containerJSON, &c); err != nil { - logrus.Errorf("migrate container error: %v", err) - continue - } - - imageStrJSON, ok := c["Image"] - if !ok { - return fmt.Errorf("invalid container configuration for %v", id) - } - - var image string - if err := json.Unmarshal([]byte(*imageStrJSON), &image); err != nil { - logrus.Errorf("migrate container error: %v", err) - continue - } - - imageID, ok := imageMappings[image] - if !ok { - logrus.Errorf("image not migrated %v", imageID) // non-fatal error - continue - } - - c["Image"] = rawJSON(imageID) - - containerJSON, err = json.Marshal(c) - if err != nil { - return err - } - - if err := ioutil.WriteFile(filepath.Join(containersDir, id, configFileName), containerJSON, 0600); err != nil { - return err - } - - img, err := is.Get(imageID) - if err != nil { - return err - } - - if err := ls.CreateRWLayerByGraphID(id, id, img.RootFS.ChainID()); err != nil { - logrus.Errorf("migrate container error: %v", err) - continue - } - - logrus.Infof("migrated container %s to point to %s", id, imageID) - - } - return nil -} - -type refAdder interface { - AddTag(ref reference.Named, id image.ID, force bool) error - AddDigest(ref reference.Canonical, id image.ID, force bool) error -} - -func migrateRefs(root, driverName string, rs refAdder, mappings map[string]image.ID) error { - migrationFile := filepath.Join(root, migrationTagsFileName) - if _, err := os.Lstat(migrationFile); !os.IsNotExist(err) { - return err - } - - type repositories struct { - Repositories map[string]map[string]string - } - - var repos repositories - - f, err := os.Open(filepath.Join(root, repositoriesFilePrefixLegacy+driverName)) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - defer f.Close() - if err := json.NewDecoder(f).Decode(&repos); err != nil { - return err - } - - for name, repo := range repos.Repositories { - for tag, id := range repo { - if strongID, exists := mappings[id]; exists { - ref, err := reference.WithName(name) - if err != nil { - logrus.Errorf("migrate tags: invalid name %q, %q", name, err) - continue - } - if dgst, err := digest.ParseDigest(tag); err == nil { - canonical, err := reference.WithDigest(ref, dgst) - if err != nil { - logrus.Errorf("migrate tags: invalid digest %q, %q", dgst, err) - continue - } - if err := rs.AddDigest(canonical, strongID, false); err != nil { - logrus.Errorf("can't migrate digest %q for %q, err: %q", ref.String(), strongID, err) - } - } else { - tagRef, err := reference.WithTag(ref, tag) - if err != nil { - logrus.Errorf("migrate tags: invalid tag %q, %q", tag, err) - continue - } - if err := rs.AddTag(tagRef, strongID, false); err != nil { - logrus.Errorf("can't migrate tag %q for %q, err: %q", ref.String(), strongID, err) - } - } - logrus.Infof("migrated tag %s:%s to point to %s", name, tag, strongID) - } - } - } - - mf, err := os.Create(migrationFile) - if err != nil { - return err - } - mf.Close() - - return nil -} - -func getParent(confDir string) (string, error) { - jsonFile := filepath.Join(confDir, "json") - imageJSON, err := ioutil.ReadFile(jsonFile) - if err != nil { - return "", err - } - var parent struct { - Parent string - ParentID digest.Digest `json:"parent_id"` - } - if err := json.Unmarshal(imageJSON, &parent); err != nil { - return "", err - } - if parent.Parent == "" && parent.ParentID != "" { // v1.9 - parent.Parent = parent.ParentID.Hex() - } - // compatibilityID for parent - parentCompatibilityID, err := ioutil.ReadFile(filepath.Join(confDir, "parent")) - if err == nil && len(parentCompatibilityID) > 0 { - parent.Parent = string(parentCompatibilityID) - } - return parent.Parent, nil -} - -func migrateImage(id, root string, ls graphIDRegistrar, is image.Store, ms metadata.Store, mappings map[string]image.ID) (err error) { - defer func() { - if err != nil { - logrus.Errorf("migration failed for %v, err: %v", id, err) - } - }() - - parent, err := getParent(filepath.Join(root, graphDirName, id)) - if err != nil { - return err - } - - var parentID image.ID - if parent != "" { - var exists bool - if parentID, exists = mappings[parent]; !exists { - if err := migrateImage(parent, root, ls, is, ms, mappings); err != nil { - // todo: fail or allow broken chains? - return err - } - parentID = mappings[parent] - } - } - - rootFS := image.NewRootFS() - var history []image.History - - if parentID != "" { - parentImg, err := is.Get(parentID) - if err != nil { - return err - } - - rootFS = parentImg.RootFS - history = parentImg.History - } - - diffIDData, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationDiffIDFileName)) - if err != nil { - return err - } - diffID, err := digest.ParseDigest(string(diffIDData)) - if err != nil { - return err - } - - sizeStr, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, migrationSizeFileName)) - if err != nil { - return err - } - size, err := strconv.ParseInt(string(sizeStr), 10, 64) - if err != nil { - return err - } - - layer, err := ls.RegisterByGraphID(id, rootFS.ChainID(), layer.DiffID(diffID), filepath.Join(root, graphDirName, id, migrationTarDataFileName), size) - if err != nil { - return err - } - logrus.Infof("migrated layer %s to %s", id, layer.DiffID()) - - jsonFile := filepath.Join(root, graphDirName, id, "json") - imageJSON, err := ioutil.ReadFile(jsonFile) - if err != nil { - return err - } - - h, err := imagev1.HistoryFromConfig(imageJSON, false) - if err != nil { - return err - } - history = append(history, h) - - rootFS.Append(layer.DiffID()) - - config, err := imagev1.MakeConfigFromV1Config(imageJSON, rootFS, history) - if err != nil { - return err - } - strongID, err := is.Create(config) - if err != nil { - return err - } - logrus.Infof("migrated image %s to %s", id, strongID) - - if parentID != "" { - if err := is.SetParent(strongID, parentID); err != nil { - return err - } - } - - checksum, err := ioutil.ReadFile(filepath.Join(root, graphDirName, id, "checksum")) - if err == nil { // best effort - dgst, err := digest.ParseDigest(string(checksum)) - if err == nil { - V2MetadataService := metadata.NewV2MetadataService(ms) - V2MetadataService.Add(layer.DiffID(), metadata.V2Metadata{Digest: dgst}) - } - } - _, err = ls.Release(layer) - if err != nil { - return err - } - - mappings[id] = strongID - return -} - -func rawJSON(value interface{}) *json.RawMessage { - jsonval, err := json.Marshal(value) - if err != nil { - return nil - } - return (*json.RawMessage)(&jsonval) -} diff --git a/migrate/v1/migratev1_test.go b/migrate/v1/migratev1_test.go deleted file mode 100644 index 88f6bbdfad..0000000000 --- a/migrate/v1/migratev1_test.go +++ /dev/null @@ -1,435 +0,0 @@ -package v1 - -import ( - "crypto/rand" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "runtime" - "testing" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/distribution/metadata" - "github.com/docker/docker/image" - "github.com/docker/docker/layer" - "github.com/docker/docker/reference" -) - -func TestMigrateRefs(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "migrate-tags") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108","sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"},"registry":{"2":"5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d","latest":"8d5547a9f329b1d3f93198cd661fb5117e5a96b721c5cf9a2c389e7dd4877128"}}}`), 0600) - - ta := &mockTagAdder{} - err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{ - "5d165b8e4b203685301c815e95663231691d383fd5e3d3185d1ce3f8dddead3d": image.ID("sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), - "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), - "abcdef3434c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:56434342345ae68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"), - }) - if err != nil { - t.Fatal(err) - } - - expected := map[string]string{ - "busybox:latest": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", - "busybox@sha256:16a2a52884c2a9481ed267c2d46483eac7693b813a63132368ab098a71303f8a": "sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9", - "registry:2": "sha256:2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae", - } - - if !reflect.DeepEqual(expected, ta.refs) { - t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) - } - - // second migration is no-op - ioutil.WriteFile(filepath.Join(tmpdir, "repositories-generic"), []byte(`{"Repositories":{"busybox":{"latest":"b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108"`), 0600) - err = migrateRefs(tmpdir, "generic", ta, map[string]image.ID{ - "b3ca410aa2c115c05969a7b2c8cf8a9fcf62c1340ed6a601c9ee50df337ec108": image.ID("sha256:fcde2b2edba56bf408601fb721fe9b5c338d10ee429ea04fae5511b68fbf8fb9"), - }) - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(expected, ta.refs) { - t.Fatalf("Invalid migrated tags: expected %q, got %q", expected, ta.refs) - } -} - -func TestMigrateContainers(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - if runtime.GOARCH != "amd64" { - t.Skip("Test tailored to amd64 architecture") - } - tmpdir, err := ioutil.TempDir("", "migrate-containers") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) - if err != nil { - t.Fatal(err) - } - - // container with invalid image - err = addContainer(tmpdir, `{"State":{"Running":false,"Paused":false,"Restarting":false,"OOMKilled":false,"Dead":false,"Pid":0,"ExitCode":0,"Error":"","StartedAt":"2015-11-10T21:42:40.604267436Z","FinishedAt":"2015-11-10T21:42:41.869265487Z"},"ID":"e780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c","Created":"2015-11-10T21:42:40.433831551Z","Path":"sh","Args":[],"Config":{"Hostname":"f780ee3f80e6","Domainname":"","User":"","AttachStdin":true,"AttachStdout":true,"AttachStderr":true,"Tty":true,"OpenStdin":true,"StdinOnce":true,"Env":null,"Cmd":["sh"],"Image":"busybox","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":{}},"Image":"4c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093","NetworkSettings":{"Bridge":"","EndpointID":"","Gateway":"","GlobalIPv6Address":"","GlobalIPv6PrefixLen":0,"HairpinMode":false,"IPAddress":"","IPPrefixLen":0,"IPv6Gateway":"","LinkLocalIPv6Address":"","LinkLocalIPv6PrefixLen":0,"MacAddress":"","NetworkID":"","PortMapping":null,"Ports":null,"SandboxKey":"","SecondaryIPAddresses":null,"SecondaryIPv6Addresses":null},"ResolvConfPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/resolv.conf","HostnamePath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hostname","HostsPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/hosts","LogPath":"/var/lib/docker/containers/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c/f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c-json.log","Name":"/determined_euclid","Driver":"overlay","ExecDriver":"native-0.2","MountLabel":"","ProcessLabel":"","RestartCount":0,"UpdateDns":false,"HasBeenStartedBefore":false,"MountPoints":{},"Volumes":{},"VolumesRW":{},"AppArmorProfile":""}`) - if err != nil { - t.Fatal(err) - } - - ls := &mockMounter{} - - ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) - if err != nil { - t.Fatal(err) - } - - is, err := image.NewImageStore(ifs, ls) - if err != nil { - t.Fatal(err) - } - - imgID, err := is.Create([]byte(`{"architecture":"amd64","config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["sh"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Entrypoint":null,"Env":null,"Hostname":"23304fc829f9","Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Labels":null,"OnBuild":null,"OpenStdin":false,"StdinOnce":false,"Tty":false,"Volumes":null,"WorkingDir":"","Domainname":"","User":""},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","history":[{"created":"2015-10-31T22:22:54.690851953Z","created_by":"/bin/sh -c #(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"},{"created":"2015-10-31T22:22:55.613815829Z","created_by":"/bin/sh -c #(nop) CMD [\"sh\"]"}],"os":"linux","rootfs":{"type":"layers","diff_ids":["sha256:c6f988f4874bb0add23a778f753c65efe992244e148a1d2ec2a8b664fb66bbd1","sha256:5f70bf18a086007016e948b04aed3b82103a36bea41755b6cddfaf10ace3c6ef"]}}`)) - if err != nil { - t.Fatal(err) - } - - err = migrateContainers(tmpdir, ls, is, map[string]image.ID{ - "2c5ac3f849df8627fcf2822727f87c57f38b7129d3604fbc11d861fe856ff093": imgID, - }) - if err != nil { - t.Fatal(err) - } - - expected := []mountInfo{{ - "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", - "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", - "sha256:c3191d32a37d7159b2e30830937d2e30268ad6c375a773a8994911a3aba9b93f", - }} - if !reflect.DeepEqual(expected, ls.mounts) { - t.Fatalf("invalid mounts: expected %q, got %q", expected, ls.mounts) - } - - if actual, expected := ls.count, 0; actual != expected { - t.Fatalf("invalid active mounts: expected %d, got %d", expected, actual) - } - - config2, err := ioutil.ReadFile(filepath.Join(tmpdir, "containers", "f780ee3f80e66e9b432a57049597118a66aab8932be88e5628d4c824edbee37c", "config.v2.json")) - if err != nil { - t.Fatal(err) - } - var config struct{ Image string } - err = json.Unmarshal(config2, &config) - if err != nil { - t.Fatal(err) - } - - if actual, expected := config.Image, string(imgID); actual != expected { - t.Fatalf("invalid image pointer in migrated config: expected %q, got %q", expected, actual) - } - -} - -func TestMigrateImages(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - if runtime.GOARCH != "amd64" { - t.Skip("Test tailored to amd64 architecture") - } - tmpdir, err := ioutil.TempDir("", "migrate-images") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - // busybox from 1.9 - id1, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:54.690851953Z","docker_version":"1.8.2","layer_id":"sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57","os":"linux"}`, "", "") - if err != nil { - t.Fatal(err) - } - - id2, err := addImage(tmpdir, `{"architecture":"amd64","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"WorkingDir":"","Entrypoint":null,"OnBuild":null,"Labels":null},"created":"2015-10-31T22:22:55.613815829Z","docker_version":"1.8.2","layer_id":"sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4","os":"linux","parent_id":"sha256:039b63dd2cbaa10d6015ea574392530571ed8d7b174090f032211285a71881d0"}`, id1, "") - if err != nil { - t.Fatal(err) - } - - ls := &mockRegistrar{} - - ifs, err := image.NewFSStoreBackend(filepath.Join(tmpdir, "imagedb")) - if err != nil { - t.Fatal(err) - } - - is, err := image.NewImageStore(ifs, ls) - if err != nil { - t.Fatal(err) - } - - ms, err := metadata.NewFSMetadataStore(filepath.Join(tmpdir, "distribution")) - if err != nil { - t.Fatal(err) - } - mappings := make(map[string]image.ID) - - err = migrateImages(tmpdir, ls, is, ms, mappings) - if err != nil { - t.Fatal(err) - } - - expected := map[string]image.ID{ - id1: image.ID("sha256:ca406eaf9c26898414ff5b7b3a023c33310759d6203be0663dbf1b3a712f432d"), - id2: image.ID("sha256:a488bec94bb96b26a968f913d25ef7d8d204d727ca328b52b4b059c7d03260b6"), - } - - if !reflect.DeepEqual(mappings, expected) { - t.Fatalf("invalid image mappings: expected %q, got %q", expected, mappings) - } - - if actual, expected := ls.count, 2; actual != expected { - t.Fatalf("invalid register count: expected %q, got %q", expected, actual) - } - ls.count = 0 - - // next images are busybox from 1.8.2 - _, err = addImage(tmpdir, `{"id":"17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2","parent":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:55.613815829Z","container":"349b014153779e30093d94f6df2a43c7a0a164e05aa207389917b540add39b51","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) CMD [\"sh\"]"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["sh"],"Image":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":0}`, "", "sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") - if err != nil { - t.Fatal(err) - } - - _, err = addImage(tmpdir, `{"id":"d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498","created":"2015-10-31T22:22:54.690851953Z","container":"23304fc829f9b9349416f6eb1afec162907eba3a328f51d53a17f8986f865d65","container_config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":["/bin/sh","-c","#(nop) ADD file:a3bc1e842b69636f9df5256c49c5374fb4eef1e281fe3f282c65fb853ee171c5 in /"],"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"docker_version":"1.8.2","config":{"Hostname":"23304fc829f9","Domainname":"","User":"","AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"PublishService":"","Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":null,"Cmd":null,"Image":"","Volumes":null,"VolumeDriver":"","WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"MacAddress":"","OnBuild":null,"Labels":null},"architecture":"amd64","os":"linux","Size":1108935}`, "", "sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57") - if err != nil { - t.Fatal(err) - } - - err = migrateImages(tmpdir, ls, is, ms, mappings) - if err != nil { - t.Fatal(err) - } - - expected["d1592a710ac323612bd786fa8ac20727c58d8a67847e5a65177c594f43919498"] = image.ID("sha256:c091bb33854e57e6902b74c08719856d30b5593c7db6143b2b48376b8a588395") - expected["17583c7dd0dae6244203b8029733bdb7d17fccbb2b5d93e2b24cf48b8bfd06e2"] = image.ID("sha256:d963020e755ff2715b936065949472c1f8a6300144b922992a1a421999e71f07") - - if actual, expected := ls.count, 2; actual != expected { - t.Fatalf("invalid register count: expected %q, got %q", expected, actual) - } - - v2MetadataService := metadata.NewV2MetadataService(ms) - receivedMetadata, err := v2MetadataService.GetMetadata(layer.EmptyLayer.DiffID()) - if err != nil { - t.Fatal(err) - } - - expectedMetadata := []metadata.V2Metadata{ - {Digest: digest.Digest("sha256:55dc925c23d1ed82551fd018c27ac3ee731377b6bad3963a2a4e76e753d70e57")}, - {Digest: digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")}, - } - - if !reflect.DeepEqual(expectedMetadata, receivedMetadata) { - t.Fatalf("invalid metadata: expected %q, got %q", expectedMetadata, receivedMetadata) - } - -} - -func TestMigrateUnsupported(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "migrate-empty") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - err = os.MkdirAll(filepath.Join(tmpdir, "graph"), 0700) - if err != nil { - t.Fatal(err) - } - - err = Migrate(tmpdir, "generic", nil, nil, nil, nil) - if err != errUnsupported { - t.Fatalf("expected unsupported error, got %q", err) - } -} - -func TestMigrateEmptyDir(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "migrate-empty") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - err = Migrate(tmpdir, "generic", nil, nil, nil, nil) - if err != nil { - t.Fatal(err) - } -} - -func addImage(dest, jsonConfig, parent, checksum string) (string, error) { - var config struct{ ID string } - if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { - return "", err - } - if config.ID == "" { - b := make([]byte, 32) - rand.Read(b) - config.ID = hex.EncodeToString(b) - } - contDir := filepath.Join(dest, "graph", config.ID) - if err := os.MkdirAll(contDir, 0700); err != nil { - return "", err - } - if err := ioutil.WriteFile(filepath.Join(contDir, "json"), []byte(jsonConfig), 0600); err != nil { - return "", err - } - if checksum != "" { - if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { - return "", err - } - } - if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-diffid"), []byte(layer.EmptyLayer.DiffID()), 0600); err != nil { - return "", err - } - if err := ioutil.WriteFile(filepath.Join(contDir, ".migration-size"), []byte("0"), 0600); err != nil { - return "", err - } - if parent != "" { - if err := ioutil.WriteFile(filepath.Join(contDir, "parent"), []byte(parent), 0600); err != nil { - return "", err - } - } - if checksum != "" { - if err := ioutil.WriteFile(filepath.Join(contDir, "checksum"), []byte(checksum), 0600); err != nil { - return "", err - } - } - return config.ID, nil -} - -func addContainer(dest, jsonConfig string) error { - var config struct{ ID string } - if err := json.Unmarshal([]byte(jsonConfig), &config); err != nil { - return err - } - contDir := filepath.Join(dest, "containers", config.ID) - if err := os.MkdirAll(contDir, 0700); err != nil { - return err - } - if err := ioutil.WriteFile(filepath.Join(contDir, "config.json"), []byte(jsonConfig), 0600); err != nil { - return err - } - return nil -} - -type mockTagAdder struct { - refs map[string]string -} - -func (t *mockTagAdder) AddTag(ref reference.Named, id image.ID, force bool) error { - if t.refs == nil { - t.refs = make(map[string]string) - } - t.refs[ref.String()] = id.String() - return nil -} -func (t *mockTagAdder) AddDigest(ref reference.Canonical, id image.ID, force bool) error { - return t.AddTag(ref, id, force) -} - -type mockRegistrar struct { - layers map[layer.ChainID]*mockLayer - count int -} - -func (r *mockRegistrar) RegisterByGraphID(graphID string, parent layer.ChainID, diffID layer.DiffID, tarDataFile string, size int64) (layer.Layer, error) { - r.count++ - l := &mockLayer{} - if parent != "" { - p, exists := r.layers[parent] - if !exists { - return nil, fmt.Errorf("invalid parent %q", parent) - } - l.parent = p - l.diffIDs = append(l.diffIDs, p.diffIDs...) - } - l.diffIDs = append(l.diffIDs, diffID) - if r.layers == nil { - r.layers = make(map[layer.ChainID]*mockLayer) - } - r.layers[l.ChainID()] = l - return l, nil -} -func (r *mockRegistrar) Release(l layer.Layer) ([]layer.Metadata, error) { - return nil, nil -} -func (r *mockRegistrar) Get(layer.ChainID) (layer.Layer, error) { - return nil, nil -} - -type mountInfo struct { - name, graphID, parent string -} -type mockMounter struct { - mounts []mountInfo - count int -} - -func (r *mockMounter) CreateRWLayerByGraphID(name string, graphID string, parent layer.ChainID) error { - r.mounts = append(r.mounts, mountInfo{name, graphID, string(parent)}) - return nil -} -func (r *mockMounter) Unmount(string) error { - r.count-- - return nil -} -func (r *mockMounter) Get(layer.ChainID) (layer.Layer, error) { - return nil, nil -} - -func (r *mockMounter) Release(layer.Layer) ([]layer.Metadata, error) { - return nil, nil -} - -type mockLayer struct { - diffIDs []layer.DiffID - parent *mockLayer -} - -func (l *mockLayer) TarStream() (io.ReadCloser, error) { - return nil, nil -} - -func (l *mockLayer) ChainID() layer.ChainID { - return layer.CreateChainID(l.diffIDs) -} - -func (l *mockLayer) DiffID() layer.DiffID { - return l.diffIDs[len(l.diffIDs)-1] -} - -func (l *mockLayer) Parent() layer.Layer { - if l.parent == nil { - return nil - } - return l.parent -} - -func (l *mockLayer) Size() (int64, error) { - return 0, nil -} - -func (l *mockLayer) DiffSize() (int64, error) { - return 0, nil -} - -func (l *mockLayer) Metadata() (map[string]string, error) { - return nil, nil -} diff --git a/oci/defaults_linux.go b/oci/defaults_linux.go deleted file mode 100644 index 5f93a9af94..0000000000 --- a/oci/defaults_linux.go +++ /dev/null @@ -1,178 +0,0 @@ -package oci - -import ( - "os" - "runtime" - - "github.com/opencontainers/specs/specs-go" -) - -func sPtr(s string) *string { return &s } -func iPtr(i int64) *int64 { return &i } -func u32Ptr(i int64) *uint32 { u := uint32(i); return &u } -func fmPtr(i int64) *os.FileMode { fm := os.FileMode(i); return &fm } - -// DefaultSpec returns default oci spec used by docker. -func DefaultSpec() specs.Spec { - s := specs.Spec{ - Version: specs.Version, - Platform: specs.Platform{ - OS: runtime.GOOS, - Arch: runtime.GOARCH, - }, - } - s.Mounts = []specs.Mount{ - { - Destination: "/proc", - Type: "proc", - Source: "proc", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - { - Destination: "/dev", - Type: "tmpfs", - Source: "tmpfs", - Options: []string{"nosuid", "strictatime", "mode=755"}, - }, - { - Destination: "/dev/pts", - Type: "devpts", - Source: "devpts", - Options: []string{"nosuid", "noexec", "newinstance", "ptmxmode=0666", "mode=0620", "gid=5"}, - }, - { - Destination: "/sys", - Type: "sysfs", - Source: "sysfs", - Options: []string{"nosuid", "noexec", "nodev", "ro"}, - }, - { - Destination: "/sys/fs/cgroup", - Type: "cgroup", - Source: "cgroup", - Options: []string{"ro", "nosuid", "noexec", "nodev"}, - }, - { - Destination: "/dev/mqueue", - Type: "mqueue", - Source: "mqueue", - Options: []string{"nosuid", "noexec", "nodev"}, - }, - } - - s.Process.Capabilities = []string{ - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_FSETID", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETFCAP", - "CAP_SETPCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_KILL", - "CAP_AUDIT_WRITE", - } - - s.Linux = specs.Linux{ - MaskedPaths: []string{ - "/proc/kcore", - "/proc/latency_stats", - "/proc/timer_list", - "/proc/timer_stats", - "/proc/sched_debug", - }, - ReadonlyPaths: []string{ - "/proc/asound", - "/proc/bus", - "/proc/fs", - "/proc/irq", - "/proc/sys", - "/proc/sysrq-trigger", - }, - Namespaces: []specs.Namespace{ - {Type: "mount"}, - {Type: "network"}, - {Type: "uts"}, - {Type: "pid"}, - {Type: "ipc"}, - }, - // Devices implicitly contains the following devices: - // null, zero, full, random, urandom, tty, console, and ptmx. - // ptmx is a bind-mount or symlink of the container's ptmx. - // See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices - Devices: []specs.Device{ - { - Type: "c", - Path: "/dev/fuse", - Major: 10, - Minor: 229, - FileMode: fmPtr(0666), - UID: u32Ptr(0), - GID: u32Ptr(0), - }, - }, - Resources: &specs.Resources{ - Devices: []specs.DeviceCgroup{ - { - Allow: false, - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(1), - Minor: iPtr(5), - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(1), - Minor: iPtr(3), - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(1), - Minor: iPtr(9), - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(1), - Minor: iPtr(8), - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(5), - Minor: iPtr(0), - Access: sPtr("rwm"), - }, - { - Allow: true, - Type: sPtr("c"), - Major: iPtr(5), - Minor: iPtr(1), - Access: sPtr("rwm"), - }, - { - Allow: false, - Type: sPtr("c"), - Major: iPtr(10), - Minor: iPtr(229), - Access: sPtr("rwm"), - }, - }, - }, - } - - return s -} diff --git a/oci/defaults_solaris.go b/oci/defaults_solaris.go deleted file mode 100644 index f3ed5c9c77..0000000000 --- a/oci/defaults_solaris.go +++ /dev/null @@ -1,11 +0,0 @@ -package oci - -import ( - "github.com/opencontainers/specs/specs-go" -) - -// DefaultSpec returns default oci spec used by docker. -func DefaultSpec() specs.Spec { - s := specs.Spec{} - return s -} diff --git a/oci/defaults_windows.go b/oci/defaults_windows.go deleted file mode 100644 index 03dc942eb1..0000000000 --- a/oci/defaults_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -package oci - -import ( - "runtime" - - "github.com/docker/docker/libcontainerd/windowsoci" -) - -// DefaultSpec returns default spec used by docker. -func DefaultSpec() windowsoci.WindowsSpec { - s := windowsoci.Spec{ - Version: windowsoci.Version, - Platform: windowsoci.Platform{ - OS: runtime.GOOS, - Arch: runtime.GOARCH, - }, - } - - return windowsoci.WindowsSpec{ - Spec: s, - Windows: windowsoci.Windows{}, - } -} diff --git a/opts/hosts.go b/opts/hosts.go deleted file mode 100644 index 266df1e537..0000000000 --- a/opts/hosts.go +++ /dev/null @@ -1,151 +0,0 @@ -package opts - -import ( - "fmt" - "net" - "net/url" - "strconv" - "strings" -) - -var ( - // DefaultHTTPPort Default HTTP Port used if only the protocol is provided to -H flag e.g. docker daemon -H tcp:// - // These are the IANA registered port numbers for use with Docker - // see http://www.iana.org/assignments/service-names-port-numbers/service-names-port-numbers.xhtml?search=docker - DefaultHTTPPort = 2375 // Default HTTP Port - // DefaultTLSHTTPPort Default HTTP Port used when TLS enabled - DefaultTLSHTTPPort = 2376 // Default TLS encrypted HTTP Port - // DefaultUnixSocket Path for the unix socket. - // Docker daemon by default always listens on the default unix socket - DefaultUnixSocket = "/var/run/docker.sock" - // DefaultTCPHost constant defines the default host string used by docker on Windows - DefaultTCPHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultHTTPPort) - // DefaultTLSHost constant defines the default host string used by docker for TLS sockets - DefaultTLSHost = fmt.Sprintf("tcp://%s:%d", DefaultHTTPHost, DefaultTLSHTTPPort) - // DefaultNamedPipe defines the default named pipe used by docker on Windows - DefaultNamedPipe = `//./pipe/docker_engine` -) - -// ValidateHost validates that the specified string is a valid host and returns it. -func ValidateHost(val string) (string, error) { - host := strings.TrimSpace(val) - // The empty string means default and is not handled by parseDockerDaemonHost - if host != "" { - _, err := parseDockerDaemonHost(host) - if err != nil { - return val, err - } - } - // Note: unlike most flag validators, we don't return the mutated value here - // we need to know what the user entered later (using ParseHost) to adjust for tls - return val, nil -} - -// ParseHost and set defaults for a Daemon host string -func ParseHost(defaultToTLS bool, val string) (string, error) { - host := strings.TrimSpace(val) - if host == "" { - if defaultToTLS { - host = DefaultTLSHost - } else { - host = DefaultHost - } - } else { - var err error - host, err = parseDockerDaemonHost(host) - if err != nil { - return val, err - } - } - return host, nil -} - -// parseDockerDaemonHost parses the specified address and returns an address that will be used as the host. -// Depending of the address specified, this may return one of the global Default* strings defined in hosts.go. -func parseDockerDaemonHost(addr string) (string, error) { - addrParts := strings.SplitN(addr, "://", 2) - if len(addrParts) == 1 && addrParts[0] != "" { - addrParts = []string{"tcp", addrParts[0]} - } - - switch addrParts[0] { - case "tcp": - return ParseTCPAddr(addrParts[1], DefaultTCPHost) - case "unix": - return parseSimpleProtoAddr("unix", addrParts[1], DefaultUnixSocket) - case "npipe": - return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe) - case "fd": - return addr, nil - default: - return "", fmt.Errorf("Invalid bind address format: %s", addr) - } -} - -// parseSimpleProtoAddr parses and validates that the specified address is a valid -// socket address for simple protocols like unix and npipe. It returns a formatted -// socket address, either using the address parsed from addr, or the contents of -// defaultAddr if addr is a blank string. -func parseSimpleProtoAddr(proto, addr, defaultAddr string) (string, error) { - addr = strings.TrimPrefix(addr, proto+"://") - if strings.Contains(addr, "://") { - return "", fmt.Errorf("Invalid proto, expected %s: %s", proto, addr) - } - if addr == "" { - addr = defaultAddr - } - return fmt.Sprintf("%s://%s", proto, addr), nil -} - -// ParseTCPAddr parses and validates that the specified address is a valid TCP -// address. It returns a formatted TCP address, either using the address parsed -// from tryAddr, or the contents of defaultAddr if tryAddr is a blank string. -// tryAddr is expected to have already been Trim()'d -// defaultAddr must be in the full `tcp://host:port` form -func ParseTCPAddr(tryAddr string, defaultAddr string) (string, error) { - if tryAddr == "" || tryAddr == "tcp://" { - return defaultAddr, nil - } - addr := strings.TrimPrefix(tryAddr, "tcp://") - if strings.Contains(addr, "://") || addr == "" { - return "", fmt.Errorf("Invalid proto, expected tcp: %s", tryAddr) - } - - defaultAddr = strings.TrimPrefix(defaultAddr, "tcp://") - defaultHost, defaultPort, err := net.SplitHostPort(defaultAddr) - if err != nil { - return "", err - } - // url.Parse fails for trailing colon on IPv6 brackets on Go 1.5, but - // not 1.4. See https://github.com/golang/go/issues/12200 and - // https://github.com/golang/go/issues/6530. - if strings.HasSuffix(addr, "]:") { - addr += defaultPort - } - - u, err := url.Parse("tcp://" + addr) - if err != nil { - return "", err - } - host, port, err := net.SplitHostPort(u.Host) - if err != nil { - // try port addition once - host, port, err = net.SplitHostPort(net.JoinHostPort(u.Host, defaultPort)) - } - if err != nil { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - if host == "" { - host = defaultHost - } - if port == "" { - port = defaultPort - } - p, err := strconv.Atoi(port) - if err != nil && p == 0 { - return "", fmt.Errorf("Invalid bind address format: %s", tryAddr) - } - - return fmt.Sprintf("tcp://%s%s", net.JoinHostPort(host, port), u.Path), nil -} diff --git a/opts/hosts_test.go b/opts/hosts_test.go deleted file mode 100644 index a5bec30d4c..0000000000 --- a/opts/hosts_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package opts - -import ( - "fmt" - "testing" -) - -func TestParseHost(t *testing.T) { - invalid := []string{ - "something with spaces", - "://", - "unknown://", - "tcp://:port", - "tcp://invalid:port", - } - - valid := map[string]string{ - "": DefaultHost, - " ": DefaultHost, - " ": DefaultHost, - "fd://": "fd://", - "fd://something": "fd://something", - "tcp://host:": fmt.Sprintf("tcp://host:%d", DefaultHTTPPort), - "tcp://": DefaultTCPHost, - "tcp://:2375": fmt.Sprintf("tcp://%s:2375", DefaultHTTPHost), - "tcp://:2376": fmt.Sprintf("tcp://%s:2376", DefaultHTTPHost), - "tcp://0.0.0.0:8080": "tcp://0.0.0.0:8080", - "tcp://192.168.0.0:12000": "tcp://192.168.0.0:12000", - "tcp://192.168:8080": "tcp://192.168:8080", - "tcp://0.0.0.0:1234567890": "tcp://0.0.0.0:1234567890", // yeah it's valid :P - " tcp://:7777/path ": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), - "tcp://docker.com:2375": "tcp://docker.com:2375", - "unix://": "unix://" + DefaultUnixSocket, - "unix://path/to/socket": "unix://path/to/socket", - "npipe://": "npipe://" + DefaultNamedPipe, - "npipe:////./pipe/foo": "npipe:////./pipe/foo", - } - - for _, value := range invalid { - if _, err := ParseHost(false, value); err == nil { - t.Errorf("Expected an error for %v, got [nil]", value) - } - } - - for value, expected := range valid { - if actual, err := ParseHost(false, value); err != nil || actual != expected { - t.Errorf("Expected for %v [%v], got [%v, %v]", value, expected, actual, err) - } - } -} - -func TestParseDockerDaemonHost(t *testing.T) { - invalids := map[string]string{ - - "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", - "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", - "udp://127.0.0.1": "Invalid bind address format: udp://127.0.0.1", - "udp://127.0.0.1:2375": "Invalid bind address format: udp://127.0.0.1:2375", - "tcp://unix:///run/docker.sock": "Invalid proto, expected tcp: unix:///run/docker.sock", - " tcp://:7777/path ": "Invalid bind address format: tcp://:7777/path ", - "": "Invalid bind address format: ", - } - valids := map[string]string{ - "0.0.0.1:": "tcp://0.0.0.1:2375", - "0.0.0.1:5555": "tcp://0.0.0.1:5555", - "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", - "[::1]:": "tcp://[::1]:2375", - "[::1]:5555/path": "tcp://[::1]:5555/path", - "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2375", - "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", - ":6666": fmt.Sprintf("tcp://%s:6666", DefaultHTTPHost), - ":6666/path": fmt.Sprintf("tcp://%s:6666/path", DefaultHTTPHost), - "tcp://": DefaultTCPHost, - "tcp://:7777": fmt.Sprintf("tcp://%s:7777", DefaultHTTPHost), - "tcp://:7777/path": fmt.Sprintf("tcp://%s:7777/path", DefaultHTTPHost), - "unix:///run/docker.sock": "unix:///run/docker.sock", - "unix://": "unix://" + DefaultUnixSocket, - "fd://": "fd://", - "fd://something": "fd://something", - "localhost:": "tcp://localhost:2375", - "localhost:5555": "tcp://localhost:5555", - "localhost:5555/path": "tcp://localhost:5555/path", - } - for invalidAddr, expectedError := range invalids { - if addr, err := parseDockerDaemonHost(invalidAddr); err == nil || err.Error() != expectedError { - t.Errorf("tcp %v address expected error %q return, got %q and addr %v", invalidAddr, expectedError, err, addr) - } - } - for validAddr, expectedAddr := range valids { - if addr, err := parseDockerDaemonHost(validAddr); err != nil || addr != expectedAddr { - t.Errorf("%v -> expected %v, got (%v) addr (%v)", validAddr, expectedAddr, err, addr) - } - } -} - -func TestParseTCP(t *testing.T) { - var ( - defaultHTTPHost = "tcp://127.0.0.1:2376" - ) - invalids := map[string]string{ - "tcp:a.b.c.d": "Invalid bind address format: tcp:a.b.c.d", - "tcp:a.b.c.d/path": "Invalid bind address format: tcp:a.b.c.d/path", - "udp://127.0.0.1": "Invalid proto, expected tcp: udp://127.0.0.1", - "udp://127.0.0.1:2375": "Invalid proto, expected tcp: udp://127.0.0.1:2375", - } - valids := map[string]string{ - "": defaultHTTPHost, - "tcp://": defaultHTTPHost, - "0.0.0.1:": "tcp://0.0.0.1:2376", - "0.0.0.1:5555": "tcp://0.0.0.1:5555", - "0.0.0.1:5555/path": "tcp://0.0.0.1:5555/path", - ":6666": "tcp://127.0.0.1:6666", - ":6666/path": "tcp://127.0.0.1:6666/path", - "tcp://:7777": "tcp://127.0.0.1:7777", - "tcp://:7777/path": "tcp://127.0.0.1:7777/path", - "[::1]:": "tcp://[::1]:2376", - "[::1]:5555": "tcp://[::1]:5555", - "[::1]:5555/path": "tcp://[::1]:5555/path", - "[0:0:0:0:0:0:0:1]:": "tcp://[0:0:0:0:0:0:0:1]:2376", - "[0:0:0:0:0:0:0:1]:5555": "tcp://[0:0:0:0:0:0:0:1]:5555", - "[0:0:0:0:0:0:0:1]:5555/path": "tcp://[0:0:0:0:0:0:0:1]:5555/path", - "localhost:": "tcp://localhost:2376", - "localhost:5555": "tcp://localhost:5555", - "localhost:5555/path": "tcp://localhost:5555/path", - } - for invalidAddr, expectedError := range invalids { - if addr, err := ParseTCPAddr(invalidAddr, defaultHTTPHost); err == nil || err.Error() != expectedError { - t.Errorf("tcp %v address expected error %v return, got %s and addr %v", invalidAddr, expectedError, err, addr) - } - } - for validAddr, expectedAddr := range valids { - if addr, err := ParseTCPAddr(validAddr, defaultHTTPHost); err != nil || addr != expectedAddr { - t.Errorf("%v -> expected %v, got %v and addr %v", validAddr, expectedAddr, err, addr) - } - } -} - -func TestParseInvalidUnixAddrInvalid(t *testing.T) { - if _, err := parseSimpleProtoAddr("unix", "tcp://127.0.0.1", "unix:///var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { - t.Fatalf("Expected an error, got %v", err) - } - if _, err := parseSimpleProtoAddr("unix", "unix://tcp://127.0.0.1", "/var/run/docker.sock"); err == nil || err.Error() != "Invalid proto, expected unix: tcp://127.0.0.1" { - t.Fatalf("Expected an error, got %v", err) - } - if v, err := parseSimpleProtoAddr("unix", "", "/var/run/docker.sock"); err != nil || v != "unix:///var/run/docker.sock" { - t.Fatalf("Expected an %v, got %v", v, "unix:///var/run/docker.sock") - } -} diff --git a/opts/hosts_unix.go b/opts/hosts_unix.go deleted file mode 100644 index 611407a9d9..0000000000 --- a/opts/hosts_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows - -package opts - -import "fmt" - -// DefaultHost constant defines the default host string used by docker on other hosts than Windows -var DefaultHost = fmt.Sprintf("unix://%s", DefaultUnixSocket) diff --git a/opts/hosts_windows.go b/opts/hosts_windows.go deleted file mode 100644 index 7c239e00f1..0000000000 --- a/opts/hosts_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build windows - -package opts - -// DefaultHost constant defines the default host string used by docker on Windows -var DefaultHost = "npipe://" + DefaultNamedPipe diff --git a/opts/ip.go b/opts/ip.go deleted file mode 100644 index c7b0dc9947..0000000000 --- a/opts/ip.go +++ /dev/null @@ -1,42 +0,0 @@ -package opts - -import ( - "fmt" - "net" -) - -// IPOpt holds an IP. It is used to store values from CLI flags. -type IPOpt struct { - *net.IP -} - -// NewIPOpt creates a new IPOpt from a reference net.IP and a -// string representation of an IP. If the string is not a valid -// IP it will fallback to the specified reference. -func NewIPOpt(ref *net.IP, defaultVal string) *IPOpt { - o := &IPOpt{ - IP: ref, - } - o.Set(defaultVal) - return o -} - -// Set sets an IPv4 or IPv6 address from a given string. If the given -// string is not parseable as an IP address it returns an error. -func (o *IPOpt) Set(val string) error { - ip := net.ParseIP(val) - if ip == nil { - return fmt.Errorf("%s is not an ip address", val) - } - *o.IP = ip - return nil -} - -// String returns the IP address stored in the IPOpt. If stored IP is a -// nil pointer, it returns an empty string. -func (o *IPOpt) String() string { - if *o.IP == nil { - return "" - } - return o.IP.String() -} diff --git a/opts/ip_test.go b/opts/ip_test.go deleted file mode 100644 index 1027d84a05..0000000000 --- a/opts/ip_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package opts - -import ( - "net" - "testing" -) - -func TestIpOptString(t *testing.T) { - addresses := []string{"", "0.0.0.0"} - var ip net.IP - - for _, address := range addresses { - stringAddress := NewIPOpt(&ip, address).String() - if stringAddress != address { - t.Fatalf("IpOpt string should be `%s`, not `%s`", address, stringAddress) - } - } -} - -func TestNewIpOptInvalidDefaultVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - defaultVal := "Not an ip" - - ipOpt := NewIPOpt(&ip, defaultVal) - - expected := "127.0.0.1" - if ipOpt.String() != expected { - t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) - } -} - -func TestNewIpOptValidDefaultVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - defaultVal := "192.168.1.1" - - ipOpt := NewIPOpt(&ip, defaultVal) - - expected := "192.168.1.1" - if ipOpt.String() != expected { - t.Fatalf("Expected [%v], got [%v]", expected, ipOpt.String()) - } -} - -func TestIpOptSetInvalidVal(t *testing.T) { - ip := net.IPv4(127, 0, 0, 1) - ipOpt := &IPOpt{IP: &ip} - - invalidIP := "invalid ip" - expectedError := "invalid ip is not an ip address" - err := ipOpt.Set(invalidIP) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an Error with [%v], got [%v]", expectedError, err.Error()) - } -} diff --git a/opts/opts.go b/opts/opts.go deleted file mode 100644 index 1b9d6b294a..0000000000 --- a/opts/opts.go +++ /dev/null @@ -1,321 +0,0 @@ -package opts - -import ( - "fmt" - "net" - "regexp" - "strings" - - "github.com/docker/engine-api/types/filters" -) - -var ( - alphaRegexp = regexp.MustCompile(`[a-zA-Z]`) - domainRegexp = regexp.MustCompile(`^(:?(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9]))(:?\.(:?[a-zA-Z0-9]|(:?[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])))*)\.?\s*$`) -) - -// ListOpts holds a list of values and a validation function. -type ListOpts struct { - values *[]string - validator ValidatorFctType -} - -// NewListOpts creates a new ListOpts with the specified validator. -func NewListOpts(validator ValidatorFctType) ListOpts { - var values []string - return *NewListOptsRef(&values, validator) -} - -// NewListOptsRef creates a new ListOpts with the specified values and validator. -func NewListOptsRef(values *[]string, validator ValidatorFctType) *ListOpts { - return &ListOpts{ - values: values, - validator: validator, - } -} - -func (opts *ListOpts) String() string { - return fmt.Sprintf("%v", []string((*opts.values))) -} - -// Set validates if needed the input value and adds it to the -// internal slice. -func (opts *ListOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - (*opts.values) = append((*opts.values), value) - return nil -} - -// Delete removes the specified element from the slice. -func (opts *ListOpts) Delete(key string) { - for i, k := range *opts.values { - if k == key { - (*opts.values) = append((*opts.values)[:i], (*opts.values)[i+1:]...) - return - } - } -} - -// GetMap returns the content of values in a map in order to avoid -// duplicates. -func (opts *ListOpts) GetMap() map[string]struct{} { - ret := make(map[string]struct{}) - for _, k := range *opts.values { - ret[k] = struct{}{} - } - return ret -} - -// GetAll returns the values of slice. -func (opts *ListOpts) GetAll() []string { - return (*opts.values) -} - -// GetAllOrEmpty returns the values of the slice -// or an empty slice when there are no values. -func (opts *ListOpts) GetAllOrEmpty() []string { - v := *opts.values - if v == nil { - return make([]string, 0) - } - return v -} - -// Get checks the existence of the specified key. -func (opts *ListOpts) Get(key string) bool { - for _, k := range *opts.values { - if k == key { - return true - } - } - return false -} - -// Len returns the amount of element in the slice. -func (opts *ListOpts) Len() int { - return len((*opts.values)) -} - -// Type returns a string name for this Option type -func (opts *ListOpts) Type() string { - return "list" -} - -// NamedOption is an interface that list and map options -// with names implement. -type NamedOption interface { - Name() string -} - -// NamedListOpts is a ListOpts with a configuration name. -// This struct is useful to keep reference to the assigned -// field name in the internal configuration struct. -type NamedListOpts struct { - name string - ListOpts -} - -var _ NamedOption = &NamedListOpts{} - -// NewNamedListOptsRef creates a reference to a new NamedListOpts struct. -func NewNamedListOptsRef(name string, values *[]string, validator ValidatorFctType) *NamedListOpts { - return &NamedListOpts{ - name: name, - ListOpts: *NewListOptsRef(values, validator), - } -} - -// Name returns the name of the NamedListOpts in the configuration. -func (o *NamedListOpts) Name() string { - return o.name -} - -//MapOpts holds a map of values and a validation function. -type MapOpts struct { - values map[string]string - validator ValidatorFctType -} - -// Set validates if needed the input value and add it to the -// internal map, by splitting on '='. -func (opts *MapOpts) Set(value string) error { - if opts.validator != nil { - v, err := opts.validator(value) - if err != nil { - return err - } - value = v - } - vals := strings.SplitN(value, "=", 2) - if len(vals) == 1 { - (opts.values)[vals[0]] = "" - } else { - (opts.values)[vals[0]] = vals[1] - } - return nil -} - -// GetAll returns the values of MapOpts as a map. -func (opts *MapOpts) GetAll() map[string]string { - return opts.values -} - -func (opts *MapOpts) String() string { - return fmt.Sprintf("%v", map[string]string((opts.values))) -} - -// Type returns a string name for this Option type -func (opts *MapOpts) Type() string { - return "map" -} - -// NewMapOpts creates a new MapOpts with the specified map of values and a validator. -func NewMapOpts(values map[string]string, validator ValidatorFctType) *MapOpts { - if values == nil { - values = make(map[string]string) - } - return &MapOpts{ - values: values, - validator: validator, - } -} - -// NamedMapOpts is a MapOpts struct with a configuration name. -// This struct is useful to keep reference to the assigned -// field name in the internal configuration struct. -type NamedMapOpts struct { - name string - MapOpts -} - -var _ NamedOption = &NamedMapOpts{} - -// NewNamedMapOpts creates a reference to a new NamedMapOpts struct. -func NewNamedMapOpts(name string, values map[string]string, validator ValidatorFctType) *NamedMapOpts { - return &NamedMapOpts{ - name: name, - MapOpts: *NewMapOpts(values, validator), - } -} - -// Name returns the name of the NamedMapOpts in the configuration. -func (o *NamedMapOpts) Name() string { - return o.name -} - -// ValidatorFctType defines a validator function that returns a validated string and/or an error. -type ValidatorFctType func(val string) (string, error) - -// ValidatorFctListType defines a validator function that returns a validated list of string and/or an error -type ValidatorFctListType func(val string) ([]string, error) - -// ValidateIPAddress validates an Ip address. -func ValidateIPAddress(val string) (string, error) { - var ip = net.ParseIP(strings.TrimSpace(val)) - if ip != nil { - return ip.String(), nil - } - return "", fmt.Errorf("%s is not an ip address", val) -} - -// ValidateDNSSearch validates domain for resolvconf search configuration. -// A zero length domain is represented by a dot (.). -func ValidateDNSSearch(val string) (string, error) { - if val = strings.Trim(val, " "); val == "." { - return val, nil - } - return validateDomain(val) -} - -func validateDomain(val string) (string, error) { - if alphaRegexp.FindString(val) == "" { - return "", fmt.Errorf("%s is not a valid domain", val) - } - ns := domainRegexp.FindSubmatch([]byte(val)) - if len(ns) > 0 && len(ns[1]) < 255 { - return string(ns[1]), nil - } - return "", fmt.Errorf("%s is not a valid domain", val) -} - -// ValidateLabel validates that the specified string is a valid label, and returns it. -// Labels are in the form on key=value. -func ValidateLabel(val string) (string, error) { - if strings.Count(val, "=") < 1 { - return "", fmt.Errorf("bad attribute format: %s", val) - } - return val, nil -} - -// ValidateSysctl validates a sysctl and returns it. -func ValidateSysctl(val string) (string, error) { - validSysctlMap := map[string]bool{ - "kernel.msgmax": true, - "kernel.msgmnb": true, - "kernel.msgmni": true, - "kernel.sem": true, - "kernel.shmall": true, - "kernel.shmmax": true, - "kernel.shmmni": true, - "kernel.shm_rmid_forced": true, - } - validSysctlPrefixes := []string{ - "net.", - "fs.mqueue.", - } - arr := strings.Split(val, "=") - if len(arr) < 2 { - return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) - } - if validSysctlMap[arr[0]] { - return val, nil - } - - for _, vp := range validSysctlPrefixes { - if strings.HasPrefix(arr[0], vp) { - return val, nil - } - } - return "", fmt.Errorf("sysctl '%s' is not whitelisted", val) -} - -// FilterOpt is a flag type for validating filters -type FilterOpt struct { - filter filters.Args -} - -// NewFilterOpt returns a new FilterOpt -func NewFilterOpt() FilterOpt { - return FilterOpt{filter: filters.NewArgs()} -} - -func (o *FilterOpt) String() string { - repr, err := filters.ToParam(o.filter) - if err != nil { - return "invalid filters" - } - return repr -} - -// Set sets the value of the opt by parsing the command line value -func (o *FilterOpt) Set(value string) error { - var err error - o.filter, err = filters.ParseFlag(value, o.filter) - return err -} - -// Type returns the option type -func (o *FilterOpt) Type() string { - return "filter" -} - -// Value returns the value of this option -func (o *FilterOpt) Value() filters.Args { - return o.filter -} diff --git a/opts/opts_test.go b/opts/opts_test.go deleted file mode 100644 index 9f41e47864..0000000000 --- a/opts/opts_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package opts - -import ( - "fmt" - "strings" - "testing" -) - -func TestValidateIPAddress(t *testing.T) { - if ret, err := ValidateIPAddress(`1.2.3.4`); err != nil || ret == "" { - t.Fatalf("ValidateIPAddress(`1.2.3.4`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`127.0.0.1`); err != nil || ret == "" { - t.Fatalf("ValidateIPAddress(`127.0.0.1`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`::1`); err != nil || ret == "" { - t.Fatalf("ValidateIPAddress(`::1`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`127`); err == nil || ret != "" { - t.Fatalf("ValidateIPAddress(`127`) got %s %s", ret, err) - } - - if ret, err := ValidateIPAddress(`random invalid string`); err == nil || ret != "" { - t.Fatalf("ValidateIPAddress(`random invalid string`) got %s %s", ret, err) - } - -} - -func TestMapOpts(t *testing.T) { - tmpMap := make(map[string]string) - o := NewMapOpts(tmpMap, logOptsValidator) - o.Set("max-size=1") - if o.String() != "map[max-size:1]" { - t.Errorf("%s != [map[max-size:1]", o.String()) - } - - o.Set("max-file=2") - if len(tmpMap) != 2 { - t.Errorf("map length %d != 2", len(tmpMap)) - } - - if tmpMap["max-file"] != "2" { - t.Errorf("max-file = %s != 2", tmpMap["max-file"]) - } - - if tmpMap["max-size"] != "1" { - t.Errorf("max-size = %s != 1", tmpMap["max-size"]) - } - if o.Set("dummy-val=3") == nil { - t.Errorf("validator is not being called") - } -} - -func TestListOptsWithoutValidator(t *testing.T) { - o := NewListOpts(nil) - o.Set("foo") - if o.String() != "[foo]" { - t.Errorf("%s != [foo]", o.String()) - } - o.Set("bar") - if o.Len() != 2 { - t.Errorf("%d != 2", o.Len()) - } - o.Set("bar") - if o.Len() != 3 { - t.Errorf("%d != 3", o.Len()) - } - if !o.Get("bar") { - t.Error("o.Get(\"bar\") == false") - } - if o.Get("baz") { - t.Error("o.Get(\"baz\") == true") - } - o.Delete("foo") - if o.String() != "[bar bar]" { - t.Errorf("%s != [bar bar]", o.String()) - } - listOpts := o.GetAll() - if len(listOpts) != 2 || listOpts[0] != "bar" || listOpts[1] != "bar" { - t.Errorf("Expected [[bar bar]], got [%v]", listOpts) - } - mapListOpts := o.GetMap() - if len(mapListOpts) != 1 { - t.Errorf("Expected [map[bar:{}]], got [%v]", mapListOpts) - } - -} - -func TestListOptsWithValidator(t *testing.T) { - // Re-using logOptsvalidator (used by MapOpts) - o := NewListOpts(logOptsValidator) - o.Set("foo") - if o.String() != "[]" { - t.Errorf("%s != []", o.String()) - } - o.Set("foo=bar") - if o.String() != "[]" { - t.Errorf("%s != []", o.String()) - } - o.Set("max-file=2") - if o.Len() != 1 { - t.Errorf("%d != 1", o.Len()) - } - if !o.Get("max-file=2") { - t.Error("o.Get(\"max-file=2\") == false") - } - if o.Get("baz") { - t.Error("o.Get(\"baz\") == true") - } - o.Delete("max-file=2") - if o.String() != "[]" { - t.Errorf("%s != []", o.String()) - } -} - -func TestValidateDNSSearch(t *testing.T) { - valid := []string{ - `.`, - `a`, - `a.`, - `1.foo`, - `17.foo`, - `foo.bar`, - `foo.bar.baz`, - `foo.bar.`, - `foo.bar.baz`, - `foo1.bar2`, - `foo1.bar2.baz`, - `1foo.2bar.`, - `1foo.2bar.baz`, - `foo-1.bar-2`, - `foo-1.bar-2.baz`, - `foo-1.bar-2.`, - `foo-1.bar-2.baz`, - `1-foo.2-bar`, - `1-foo.2-bar.baz`, - `1-foo.2-bar.`, - `1-foo.2-bar.baz`, - } - - invalid := []string{ - ``, - ` `, - ` `, - `17`, - `17.`, - `.17`, - `17-.`, - `17-.foo`, - `.foo`, - `foo-.bar`, - `-foo.bar`, - `foo.bar-`, - `foo.bar-.baz`, - `foo.-bar`, - `foo.-bar.baz`, - `foo.bar.baz.this.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbethis.should.fail.on.long.name.beause.it.is.longer.thanisshouldbe`, - } - - for _, domain := range valid { - if ret, err := ValidateDNSSearch(domain); err != nil || ret == "" { - t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) - } - } - - for _, domain := range invalid { - if ret, err := ValidateDNSSearch(domain); err == nil || ret != "" { - t.Fatalf("ValidateDNSSearch(`"+domain+"`) got %s %s", ret, err) - } - } -} - -func TestValidateLabel(t *testing.T) { - if _, err := ValidateLabel("label"); err == nil || err.Error() != "bad attribute format: label" { - t.Fatalf("Expected an error [bad attribute format: label], go %v", err) - } - if actual, err := ValidateLabel("key1=value1"); err != nil || actual != "key1=value1" { - t.Fatalf("Expected [key1=value1], got [%v,%v]", actual, err) - } - // Validate it's working with more than one = - if actual, err := ValidateLabel("key1=value1=value2"); err != nil { - t.Fatalf("Expected [key1=value1=value2], got [%v,%v]", actual, err) - } - // Validate it's working with one more - if actual, err := ValidateLabel("key1=value1=value2=value3"); err != nil { - t.Fatalf("Expected [key1=value1=value2=value2], got [%v,%v]", actual, err) - } -} - -func logOptsValidator(val string) (string, error) { - allowedKeys := map[string]string{"max-size": "1", "max-file": "2"} - vals := strings.Split(val, "=") - if allowedKeys[vals[0]] != "" { - return val, nil - } - return "", fmt.Errorf("invalid key %s", vals[0]) -} - -func TestNamedListOpts(t *testing.T) { - var v []string - o := NewNamedListOptsRef("foo-name", &v, nil) - - o.Set("foo") - if o.String() != "[foo]" { - t.Errorf("%s != [foo]", o.String()) - } - if o.Name() != "foo-name" { - t.Errorf("%s != foo-name", o.Name()) - } - if len(v) != 1 { - t.Errorf("expected foo to be in the values, got %v", v) - } -} - -func TestNamedMapOpts(t *testing.T) { - tmpMap := make(map[string]string) - o := NewNamedMapOpts("max-name", tmpMap, nil) - - o.Set("max-size=1") - if o.String() != "map[max-size:1]" { - t.Errorf("%s != [map[max-size:1]", o.String()) - } - if o.Name() != "max-name" { - t.Errorf("%s != max-name", o.Name()) - } - if _, exist := tmpMap["max-size"]; !exist { - t.Errorf("expected map-size to be in the values, got %v", tmpMap) - } -} diff --git a/opts/opts_unix.go b/opts/opts_unix.go deleted file mode 100644 index f1ce844a8f..0000000000 --- a/opts/opts_unix.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build !windows - -package opts - -// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 -const DefaultHTTPHost = "localhost" diff --git a/opts/opts_windows.go b/opts/opts_windows.go deleted file mode 100644 index ebe40c969c..0000000000 --- a/opts/opts_windows.go +++ /dev/null @@ -1,56 +0,0 @@ -package opts - -// TODO Windows. Identify bug in GOLang 1.5.1+ and/or Windows Server 2016 TP5. -// @jhowardmsft, @swernli. -// -// On Windows, this mitigates a problem with the default options of running -// a docker client against a local docker daemon on TP5. -// -// What was found that if the default host is "localhost", even if the client -// (and daemon as this is local) is not physically on a network, and the DNS -// cache is flushed (ipconfig /flushdns), then the client will pause for -// exactly one second when connecting to the daemon for calls. For example -// using docker run windowsservercore cmd, the CLI will send a create followed -// by an attach. You see the delay between the attach finishing and the attach -// being seen by the daemon. -// -// Here's some daemon debug logs with additional debug spew put in. The -// AfterWriteJSON log is the very last thing the daemon does as part of the -// create call. The POST /attach is the second CLI call. Notice the second -// time gap. -// -// time="2015-11-06T13:38:37.259627400-08:00" level=debug msg="After createRootfs" -// time="2015-11-06T13:38:37.263626300-08:00" level=debug msg="After setHostConfig" -// time="2015-11-06T13:38:37.267631200-08:00" level=debug msg="before createContainerPl...." -// time="2015-11-06T13:38:37.271629500-08:00" level=debug msg=ToDiskLocking.... -// time="2015-11-06T13:38:37.275643200-08:00" level=debug msg="loggin event...." -// time="2015-11-06T13:38:37.277627600-08:00" level=debug msg="logged event...." -// time="2015-11-06T13:38:37.279631800-08:00" level=debug msg="In defer func" -// time="2015-11-06T13:38:37.282628100-08:00" level=debug msg="After daemon.create" -// time="2015-11-06T13:38:37.286651700-08:00" level=debug msg="return 2" -// time="2015-11-06T13:38:37.289629500-08:00" level=debug msg="Returned from daemon.ContainerCreate" -// time="2015-11-06T13:38:37.311629100-08:00" level=debug msg="After WriteJSON" -// ... 1 second gap here.... -// time="2015-11-06T13:38:38.317866200-08:00" level=debug msg="Calling POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach" -// time="2015-11-06T13:38:38.326882500-08:00" level=info msg="POST /v1.22/containers/984758282b842f779e805664b2c95d563adc9a979c8a3973e68c807843ee4757/attach?stderr=1&stdin=1&stdout=1&stream=1" -// -// We suspect this is either a bug introduced in GOLang 1.5.1, or that a change -// in GOLang 1.5.1 (from 1.4.3) is exposing a bug in Windows. In theory, -// the Windows networking stack is supposed to resolve "localhost" internally, -// without hitting DNS, or even reading the hosts file (which is why localhost -// is commented out in the hosts file on Windows). -// -// We have validated that working around this using the actual IPv4 localhost -// address does not cause the delay. -// -// This does not occur with the docker client built with 1.4.3 on the same -// Windows build, regardless of whether the daemon is built using 1.5.1 -// or 1.4.3. It does not occur on Linux. We also verified we see the same thing -// on a cross-compiled Windows binary (from Linux). -// -// Final note: This is a mitigation, not a 'real' fix. It is still susceptible -// to the delay if a user were to do 'docker run -H=tcp://localhost:2375...' -// explicitly. - -// DefaultHTTPHost Default HTTP Host used if only port is provided to -H flag e.g. docker daemon -H tcp://:8080 -const DefaultHTTPHost = "127.0.0.1" diff --git a/pkg/README.md b/pkg/README.md deleted file mode 100644 index c4b78a8ad8..0000000000 --- a/pkg/README.md +++ /dev/null @@ -1,11 +0,0 @@ -pkg/ is a collection of utility packages used by the Docker project without being specific to its internals. - -Utility packages are kept separate from the docker core codebase to keep it as small and concise as possible. -If some utilities grow larger and their APIs stabilize, they may be moved to their own repository under the -Docker organization, to facilitate re-use by other projects. However that is not the priority. - -The directory `pkg` is named after the same directory in the camlistore project. Since Brad is a core -Go maintainer, we thought it made sense to copy his methods for organizing Go code :) Thanks Brad! - -Because utility packages are small and neatly separated from the rest of the codebase, they are a good -place to start for aspiring maintainers and contributors. Get in touch if you want to help maintain them! diff --git a/pkg/aaparser/aaparser.go b/pkg/aaparser/aaparser.go deleted file mode 100644 index 507298f42a..0000000000 --- a/pkg/aaparser/aaparser.go +++ /dev/null @@ -1,92 +0,0 @@ -// Package aaparser is a convenience package interacting with `apparmor_parser`. -package aaparser - -import ( - "fmt" - "os/exec" - "path/filepath" - "strconv" - "strings" -) - -const ( - binary = "apparmor_parser" -) - -// GetVersion returns the major and minor version of apparmor_parser. -func GetVersion() (int, error) { - output, err := cmd("", "--version") - if err != nil { - return -1, err - } - - return parseVersion(output) -} - -// LoadProfile runs `apparmor_parser -r -W` on a specified apparmor profile to -// replace and write it to disk. -func LoadProfile(profilePath string) error { - _, err := cmd(filepath.Dir(profilePath), "-r", "-W", filepath.Base(profilePath)) - if err != nil { - return err - } - return nil -} - -// cmd runs `apparmor_parser` with the passed arguments. -func cmd(dir string, arg ...string) (string, error) { - c := exec.Command(binary, arg...) - c.Dir = dir - - output, err := c.CombinedOutput() - if err != nil { - return "", fmt.Errorf("running `%s %s` failed with output: %s\nerror: %v", c.Path, strings.Join(c.Args, " "), string(output), err) - } - - return string(output), nil -} - -// parseVersion takes the output from `apparmor_parser --version` and returns -// a representation of the {major, minor, patch} version as a single number of -// the form MMmmPPP {major, minor, patch}. -func parseVersion(output string) (int, error) { - // output is in the form of the following: - // AppArmor parser version 2.9.1 - // Copyright (C) 1999-2008 Novell Inc. - // Copyright 2009-2012 Canonical Ltd. - - lines := strings.SplitN(output, "\n", 2) - words := strings.Split(lines[0], " ") - version := words[len(words)-1] - - // split by major minor version - v := strings.Split(version, ".") - if len(v) == 0 || len(v) > 3 { - return -1, fmt.Errorf("parsing version failed for output: `%s`", output) - } - - // Default the versions to 0. - var majorVersion, minorVersion, patchLevel int - - majorVersion, err := strconv.Atoi(v[0]) - if err != nil { - return -1, err - } - - if len(v) > 1 { - minorVersion, err = strconv.Atoi(v[1]) - if err != nil { - return -1, err - } - } - if len(v) > 2 { - patchLevel, err = strconv.Atoi(v[2]) - if err != nil { - return -1, err - } - } - - // major*10^5 + minor*10^3 + patch*10^0 - numericVersion := majorVersion*1e5 + minorVersion*1e3 + patchLevel - return numericVersion, nil -} diff --git a/pkg/aaparser/aaparser_test.go b/pkg/aaparser/aaparser_test.go deleted file mode 100644 index 69bc8d2fd8..0000000000 --- a/pkg/aaparser/aaparser_test.go +++ /dev/null @@ -1,73 +0,0 @@ -package aaparser - -import ( - "testing" -) - -type versionExpected struct { - output string - version int -} - -func TestParseVersion(t *testing.T) { - versions := []versionExpected{ - { - output: `AppArmor parser version 2.10 -Copyright (C) 1999-2008 Novell Inc. -Copyright 2009-2012 Canonical Ltd. - -`, - version: 210000, - }, - { - output: `AppArmor parser version 2.8 -Copyright (C) 1999-2008 Novell Inc. -Copyright 2009-2012 Canonical Ltd. - -`, - version: 208000, - }, - { - output: `AppArmor parser version 2.20 -Copyright (C) 1999-2008 Novell Inc. -Copyright 2009-2012 Canonical Ltd. - -`, - version: 220000, - }, - { - output: `AppArmor parser version 2.05 -Copyright (C) 1999-2008 Novell Inc. -Copyright 2009-2012 Canonical Ltd. - -`, - version: 205000, - }, - { - output: `AppArmor parser version 2.9.95 -Copyright (C) 1999-2008 Novell Inc. -Copyright 2009-2012 Canonical Ltd. - -`, - version: 209095, - }, - { - output: `AppArmor parser version 3.14.159 -Copyright (C) 1999-2008 Novell Inc. -Copyright 2009-2012 Canonical Ltd. - -`, - version: 314159, - }, - } - - for _, v := range versions { - version, err := parseVersion(v.output) - if err != nil { - t.Fatalf("expected error to be nil for %#v, got: %v", v, err) - } - if version != v.version { - t.Fatalf("expected version to be %d, was %d, for: %#v\n", v.version, version, v) - } - } -} diff --git a/pkg/archive/README.md b/pkg/archive/README.md deleted file mode 100644 index 7307d9694f..0000000000 --- a/pkg/archive/README.md +++ /dev/null @@ -1 +0,0 @@ -This code provides helper functions for dealing with archive files. diff --git a/pkg/archive/archive.go b/pkg/archive/archive.go deleted file mode 100644 index ad3d65b2fc..0000000000 --- a/pkg/archive/archive.go +++ /dev/null @@ -1,1147 +0,0 @@ -package archive - -import ( - "archive/tar" - "bufio" - "bytes" - "compress/bzip2" - "compress/gzip" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/promise" - "github.com/docker/docker/pkg/system" -) - -type ( - // Archive is a type of io.ReadCloser which has two interfaces Read and Closer. - Archive io.ReadCloser - // Reader is a type of io.Reader. - Reader io.Reader - // Compression is the state represents if compressed or not. - Compression int - // WhiteoutFormat is the format of whiteouts unpacked - WhiteoutFormat int - // TarChownOptions wraps the chown options UID and GID. - TarChownOptions struct { - UID, GID int - } - // TarOptions wraps the tar options. - TarOptions struct { - IncludeFiles []string - ExcludePatterns []string - Compression Compression - NoLchown bool - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - ChownOpts *TarChownOptions - IncludeSourceDir bool - // WhiteoutFormat is the expected on disk format for whiteout files. - // This format will be converted to the standard format on pack - // and from the standard format on unpack. - WhiteoutFormat WhiteoutFormat - // When unpacking, specifies whether overwriting a directory with a - // non-directory is allowed and vice versa. - NoOverwriteDirNonDir bool - // For each include when creating an archive, the included name will be - // replaced with the matching name from this map. - RebaseNames map[string]string - } - - // Archiver allows the reuse of most utility functions of this package - // with a pluggable Untar function. Also, to facilitate the passing of - // specific id mappings for untar, an archiver can be created with maps - // which will then be passed to Untar operations - Archiver struct { - Untar func(io.Reader, string, *TarOptions) error - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - } - - // breakoutError is used to differentiate errors related to breaking out - // When testing archive breakout in the unit tests, this error is expected - // in order for the test to pass. - breakoutError error -) - -var ( - // ErrNotImplemented is the error message of function not implemented. - ErrNotImplemented = errors.New("Function not implemented") - defaultArchiver = &Archiver{Untar: Untar, UIDMaps: nil, GIDMaps: nil} -) - -const ( - // HeaderSize is the size in bytes of a tar header - HeaderSize = 512 -) - -const ( - // Uncompressed represents the uncompressed. - Uncompressed Compression = iota - // Bzip2 is bzip2 compression algorithm. - Bzip2 - // Gzip is gzip compression algorithm. - Gzip - // Xz is xz compression algorithm. - Xz -) - -const ( - // AUFSWhiteoutFormat is the default format for whiteouts - AUFSWhiteoutFormat WhiteoutFormat = iota - // OverlayWhiteoutFormat formats whiteout according to the overlay - // standard. - OverlayWhiteoutFormat -) - -// IsArchive checks for the magic bytes of a tar or any supported compression -// algorithm. -func IsArchive(header []byte) bool { - compression := DetectCompression(header) - if compression != Uncompressed { - return true - } - r := tar.NewReader(bytes.NewBuffer(header)) - _, err := r.Next() - return err == nil -} - -// IsArchivePath checks if the (possibly compressed) file at the given path -// starts with a tar file header. -func IsArchivePath(path string) bool { - file, err := os.Open(path) - if err != nil { - return false - } - defer file.Close() - rdr, err := DecompressStream(file) - if err != nil { - return false - } - r := tar.NewReader(rdr) - _, err = r.Next() - return err == nil -} - -// DetectCompression detects the compression algorithm of the source. -func DetectCompression(source []byte) Compression { - for compression, m := range map[Compression][]byte{ - Bzip2: {0x42, 0x5A, 0x68}, - Gzip: {0x1F, 0x8B, 0x08}, - Xz: {0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, - } { - if len(source) < len(m) { - logrus.Debug("Len too short") - continue - } - if bytes.Compare(m, source[:len(m)]) == 0 { - return compression - } - } - return Uncompressed -} - -func xzDecompress(archive io.Reader) (io.ReadCloser, <-chan struct{}, error) { - args := []string{"xz", "-d", "-c", "-q"} - - return cmdStream(exec.Command(args[0], args[1:]...), archive) -} - -// DecompressStream decompresses the archive and returns a ReaderCloser with the decompressed archive. -func DecompressStream(archive io.Reader) (io.ReadCloser, error) { - p := pools.BufioReader32KPool - buf := p.Get(archive) - bs, err := buf.Peek(10) - if err != nil && err != io.EOF { - // Note: we'll ignore any io.EOF error because there are some odd - // cases where the layer.tar file will be empty (zero bytes) and - // that results in an io.EOF from the Peek() call. So, in those - // cases we'll just treat it as a non-compressed stream and - // that means just create an empty layer. - // See Issue 18170 - return nil, err - } - - compression := DetectCompression(bs) - switch compression { - case Uncompressed: - readBufWrapper := p.NewReadCloserWrapper(buf, buf) - return readBufWrapper, nil - case Gzip: - gzReader, err := gzip.NewReader(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, gzReader) - return readBufWrapper, nil - case Bzip2: - bz2Reader := bzip2.NewReader(buf) - readBufWrapper := p.NewReadCloserWrapper(buf, bz2Reader) - return readBufWrapper, nil - case Xz: - xzReader, chdone, err := xzDecompress(buf) - if err != nil { - return nil, err - } - readBufWrapper := p.NewReadCloserWrapper(buf, xzReader) - return ioutils.NewReadCloserWrapper(readBufWrapper, func() error { - <-chdone - return readBufWrapper.Close() - }), nil - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// CompressStream compresseses the dest with specified compression algorithm. -func CompressStream(dest io.Writer, compression Compression) (io.WriteCloser, error) { - p := pools.BufioWriter32KPool - buf := p.Get(dest) - switch compression { - case Uncompressed: - writeBufWrapper := p.NewWriteCloserWrapper(buf, buf) - return writeBufWrapper, nil - case Gzip: - gzWriter := gzip.NewWriter(dest) - writeBufWrapper := p.NewWriteCloserWrapper(buf, gzWriter) - return writeBufWrapper, nil - case Bzip2, Xz: - // archive/bzip2 does not support writing, and there is no xz support at all - // However, this is not a problem as docker only currently generates gzipped tars - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - default: - return nil, fmt.Errorf("Unsupported compression format %s", (&compression).Extension()) - } -} - -// Extension returns the extension of a file that uses the specified compression algorithm. -func (compression *Compression) Extension() string { - switch *compression { - case Uncompressed: - return "tar" - case Bzip2: - return "tar.bz2" - case Gzip: - return "tar.gz" - case Xz: - return "tar.xz" - } - return "" -} - -type tarWhiteoutConverter interface { - ConvertWrite(*tar.Header, string, os.FileInfo) error - ConvertRead(*tar.Header, string) (bool, error) -} - -type tarAppender struct { - TarWriter *tar.Writer - Buffer *bufio.Writer - - // for hardlink mapping - SeenFiles map[uint64]string - UIDMaps []idtools.IDMap - GIDMaps []idtools.IDMap - - // For packing and unpacking whiteout files in the - // non standard format. The whiteout files defined - // by the AUFS standard are used as the tar whiteout - // standard. - WhiteoutConverter tarWhiteoutConverter -} - -// canonicalTarName provides a platform-independent and consistent posix-style -//path for files and directories to be archived regardless of the platform. -func canonicalTarName(name string, isDir bool) (string, error) { - name, err := CanonicalTarNameForPath(name) - if err != nil { - return "", err - } - - // suffix with '/' for directories - if isDir && !strings.HasSuffix(name, "/") { - name += "/" - } - return name, nil -} - -// addTarFile adds to the tar archive a file from `path` as `name` -func (ta *tarAppender) addTarFile(path, name string) error { - fi, err := os.Lstat(path) - if err != nil { - return err - } - - link := "" - if fi.Mode()&os.ModeSymlink != 0 { - if link, err = os.Readlink(path); err != nil { - return err - } - } - - hdr, err := tar.FileInfoHeader(fi, link) - if err != nil { - return err - } - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - name, err = canonicalTarName(name, fi.IsDir()) - if err != nil { - return fmt.Errorf("tar: cannot canonicalize path: %v", err) - } - hdr.Name = name - - inode, err := setHeaderForSpecialDevice(hdr, ta, name, fi.Sys()) - if err != nil { - return err - } - - // if it's not a directory and has more than 1 link, - // it's hardlinked, so set the type flag accordingly - if !fi.IsDir() && hasHardlinks(fi) { - // a link should have a name that it links too - // and that linked name should be first in the tar archive - if oldpath, ok := ta.SeenFiles[inode]; ok { - hdr.Typeflag = tar.TypeLink - hdr.Linkname = oldpath - hdr.Size = 0 // This Must be here for the writer math to add up! - } else { - ta.SeenFiles[inode] = name - } - } - - capability, _ := system.Lgetxattr(path, "security.capability") - if capability != nil { - hdr.Xattrs = make(map[string]string) - hdr.Xattrs["security.capability"] = string(capability) - } - - //handle re-mapping container ID mappings back to host ID mappings before - //writing tar headers/files. We skip whiteout files because they were written - //by the kernel and already have proper ownership relative to the host - if !strings.HasPrefix(filepath.Base(hdr.Name), WhiteoutPrefix) && (ta.UIDMaps != nil || ta.GIDMaps != nil) { - uid, gid, err := getFileUIDGID(fi.Sys()) - if err != nil { - return err - } - xUID, err := idtools.ToContainer(uid, ta.UIDMaps) - if err != nil { - return err - } - xGID, err := idtools.ToContainer(gid, ta.GIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - hdr.Gid = xGID - } - - if ta.WhiteoutConverter != nil { - if err := ta.WhiteoutConverter.ConvertWrite(hdr, path, fi); err != nil { - return err - } - } - - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - return err - } - - if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 { - file, err := os.Open(path) - if err != nil { - return err - } - - ta.Buffer.Reset(ta.TarWriter) - defer ta.Buffer.Reset(nil) - _, err = io.Copy(ta.Buffer, file) - file.Close() - if err != nil { - return err - } - err = ta.Buffer.Flush() - if err != nil { - return err - } - } - - return nil -} - -func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, Lchown bool, chownOpts *TarChownOptions) error { - // hdr.Mode is in linux format, which we can use for sycalls, - // but for os.Foo() calls we need the mode converted to os.FileMode, - // so use hdrInfo.Mode() (they differ for e.g. setuid bits) - hdrInfo := hdr.FileInfo() - - switch hdr.Typeflag { - case tar.TypeDir: - // Create directory unless it exists as a directory already. - // In that case we just want to merge the two - if fi, err := os.Lstat(path); !(err == nil && fi.IsDir()) { - if err := os.Mkdir(path, hdrInfo.Mode()); err != nil { - return err - } - } - - case tar.TypeReg, tar.TypeRegA: - // Source is regular file - file, err := os.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode()) - if err != nil { - return err - } - if _, err := io.Copy(file, reader); err != nil { - file.Close() - return err - } - file.Close() - - case tar.TypeBlock, tar.TypeChar, tar.TypeFifo: - // Handle this is an OS-specific way - if err := handleTarTypeBlockCharFifo(hdr, path); err != nil { - return err - } - - case tar.TypeLink: - targetPath := filepath.Join(extractDir, hdr.Linkname) - // check for hardlink breakout - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid hardlink %q -> %q", targetPath, hdr.Linkname)) - } - if err := os.Link(targetPath, path); err != nil { - return err - } - - case tar.TypeSymlink: - // path -> hdr.Linkname = targetPath - // e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file - targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) - - // the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because - // that symlink would first have to be created, which would be caught earlier, at this very check: - if !strings.HasPrefix(targetPath, extractDir) { - return breakoutError(fmt.Errorf("invalid symlink %q -> %q", path, hdr.Linkname)) - } - if err := os.Symlink(hdr.Linkname, path); err != nil { - return err - } - - case tar.TypeXGlobalHeader: - logrus.Debug("PAX Global Extended Headers found and ignored") - return nil - - default: - return fmt.Errorf("Unhandled tar header type %d\n", hdr.Typeflag) - } - - // Lchown is not supported on Windows. - if Lchown && runtime.GOOS != "windows" { - if chownOpts == nil { - chownOpts = &TarChownOptions{UID: hdr.Uid, GID: hdr.Gid} - } - if err := os.Lchown(path, chownOpts.UID, chownOpts.GID); err != nil { - return err - } - } - - var errors []string - for key, value := range hdr.Xattrs { - if err := system.Lsetxattr(path, key, []byte(value), 0); err != nil { - if err == syscall.ENOTSUP { - // We ignore errors here because not all graphdrivers support - // xattrs *cough* old versions of AUFS *cough*. However only - // ENOTSUP should be emitted in that case, otherwise we still - // bail. - errors = append(errors, err.Error()) - continue - } - return err - } - - } - - if len(errors) > 0 { - logrus.WithFields(logrus.Fields{ - "errors": errors, - }).Warn("ignored xattrs in archive: underlying filesystem doesn't support them") - } - - // There is no LChmod, so ignore mode for symlink. Also, this - // must happen after chown, as that can modify the file mode - if err := handleLChmod(hdr, path, hdrInfo); err != nil { - return err - } - - aTime := hdr.AccessTime - if aTime.Before(hdr.ModTime) { - // Last access time should never be before last modified time. - aTime = hdr.ModTime - } - - // system.Chtimes doesn't support a NOFOLLOW flag atm - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := system.Chtimes(path, aTime, hdr.ModTime); err != nil { - return err - } - } else { - ts := []syscall.Timespec{timeToTimespec(aTime), timeToTimespec(hdr.ModTime)} - if err := system.LUtimesNano(path, ts); err != nil && err != system.ErrNotSupportedPlatform { - return err - } - } - return nil -} - -// Tar creates an archive from the directory at `path`, and returns it as a -// stream of bytes. -func Tar(path string, compression Compression) (io.ReadCloser, error) { - return TarWithOptions(path, &TarOptions{Compression: compression}) -} - -// TarWithOptions creates an archive from the directory at `path`, only including files whose relative -// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`. -func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) { - - // Fix the source path to work with long path names. This is a no-op - // on platforms other than Windows. - srcPath = fixVolumePathPrefix(srcPath) - - patterns, patDirs, exceptions, err := fileutils.CleanPatterns(options.ExcludePatterns) - - if err != nil { - return nil, err - } - - pipeReader, pipeWriter := io.Pipe() - - compressWriter, err := CompressStream(pipeWriter, options.Compression) - if err != nil { - return nil, err - } - - go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(compressWriter), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - UIDMaps: options.UIDMaps, - GIDMaps: options.GIDMaps, - WhiteoutConverter: getWhiteoutConverter(options.WhiteoutFormat), - } - - defer func() { - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Errorf("Can't close tar writer: %s", err) - } - if err := compressWriter.Close(); err != nil { - logrus.Errorf("Can't close compress writer: %s", err) - } - if err := pipeWriter.Close(); err != nil { - logrus.Errorf("Can't close pipe writer: %s", err) - } - }() - - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - - stat, err := os.Lstat(srcPath) - if err != nil { - return - } - - if !stat.IsDir() { - // We can't later join a non-dir with any includes because the - // 'walk' will error if "file/." is stat-ed and "file" is not a - // directory. So, we must split the source path and use the - // basename as the include. - if len(options.IncludeFiles) > 0 { - logrus.Warn("Tar: Can't archive a file with includes") - } - - dir, base := SplitPathDirEntry(srcPath) - srcPath = dir - options.IncludeFiles = []string{base} - } - - if len(options.IncludeFiles) == 0 { - options.IncludeFiles = []string{"."} - } - - seen := make(map[string]bool) - - for _, include := range options.IncludeFiles { - rebaseName := options.RebaseNames[include] - - walkRoot := getWalkRoot(srcPath, include) - filepath.Walk(walkRoot, func(filePath string, f os.FileInfo, err error) error { - if err != nil { - logrus.Errorf("Tar: Can't stat file %s to tar: %s", srcPath, err) - return nil - } - - relFilePath, err := filepath.Rel(srcPath, filePath) - if err != nil || (!options.IncludeSourceDir && relFilePath == "." && f.IsDir()) { - // Error getting relative path OR we are looking - // at the source directory path. Skip in both situations. - return nil - } - - if options.IncludeSourceDir && include == "." && relFilePath != "." { - relFilePath = strings.Join([]string{".", relFilePath}, string(filepath.Separator)) - } - - skip := false - - // If "include" is an exact match for the current file - // then even if there's an "excludePatterns" pattern that - // matches it, don't skip it. IOW, assume an explicit 'include' - // is asking for that file no matter what - which is true - // for some files, like .dockerignore and Dockerfile (sometimes) - if include != relFilePath { - skip, err = fileutils.OptimizedMatches(relFilePath, patterns, patDirs) - if err != nil { - logrus.Errorf("Error matching %s: %v", relFilePath, err) - return err - } - } - - if skip { - // If we want to skip this file and its a directory - // then we should first check to see if there's an - // excludes pattern (eg !dir/file) that starts with this - // dir. If so then we can't skip this dir. - - // Its not a dir then so we can just return/skip. - if !f.IsDir() { - return nil - } - - // No exceptions (!...) in patterns so just skip dir - if !exceptions { - return filepath.SkipDir - } - - dirSlash := relFilePath + string(filepath.Separator) - - for _, pat := range patterns { - if pat[0] != '!' { - continue - } - pat = pat[1:] + string(filepath.Separator) - if strings.HasPrefix(pat, dirSlash) { - // found a match - so can't skip this dir - return nil - } - } - - // No matching exclusion dir so just skip dir - return filepath.SkipDir - } - - if seen[relFilePath] { - return nil - } - seen[relFilePath] = true - - // Rename the base resource. - if rebaseName != "" { - var replacement string - if rebaseName != string(filepath.Separator) { - // Special case the root directory to replace with an - // empty string instead so that we don't end up with - // double slashes in the paths. - replacement = rebaseName - } - - relFilePath = strings.Replace(relFilePath, include, replacement, 1) - } - - if err := ta.addTarFile(filePath, relFilePath); err != nil { - logrus.Errorf("Can't add file %s to tar: %s", filePath, err) - // if pipe is broken, stop writing tar stream to it - if err == io.ErrClosedPipe { - return err - } - } - return nil - }) - } - }() - - return pipeReader, nil -} - -// Unpack unpacks the decompressedArchive to dest with options. -func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) error { - tr := tar.NewReader(decompressedArchive) - trBuf := pools.BufioReader32KPool.Get(nil) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return err - } - whiteoutConverter := getWhiteoutConverter(options.WhiteoutFormat) - - // Iterate through the files in the archive. -loop: - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return err - } - - // Normalize name, for safety and for a simple is-root check - // This keeps "../" as-is, but normalizes "/../" to "/". Or Windows: - // This keeps "..\" as-is, but normalizes "\..\" to "\". - hdr.Name = filepath.Clean(hdr.Name) - - for _, exclude := range options.ExcludePatterns { - if strings.HasPrefix(hdr.Name, exclude) { - continue loop - } - } - - // After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in - // the filepath format for the OS on which the daemon is running. Hence - // the check for a slash-suffix MUST be done in an OS-agnostic way. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = idtools.MkdirAllNewAs(parentPath, 0777, remappedRootUID, remappedRootGID) - if err != nil { - return err - } - } - } - - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return err - } - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - - // If path exits we almost always just want to remove and replace it - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if options.NoOverwriteDirNonDir && fi.IsDir() && hdr.Typeflag != tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing directory with a non-directory from the archive. - return fmt.Errorf("cannot overwrite directory %q with non-directory %q", path, dest) - } - - if options.NoOverwriteDirNonDir && !fi.IsDir() && hdr.Typeflag == tar.TypeDir { - // If NoOverwriteDirNonDir is true then we cannot replace - // an existing non-directory with a directory from the archive. - return fmt.Errorf("cannot overwrite non-directory %q with directory %q", path, dest) - } - - if fi.IsDir() && hdr.Name == "." { - continue - } - - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return err - } - } - } - trBuf.Reset(tr) - - // if the options contain a uid & gid maps, convert header uid/gid - // entries using the maps such that lchown sets the proper mapped - // uid/gid after writing the file. We only perform this mapping if - // the file isn't already owned by the remapped root UID or GID, as - // that specific uid/gid has no mapping from container -> host, and - // those files already have the proper ownership for inside the - // container. - if hdr.Uid != remappedRootUID { - xUID, err := idtools.ToHost(hdr.Uid, options.UIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - } - if hdr.Gid != remappedRootGID { - xGID, err := idtools.ToHost(hdr.Gid, options.GIDMaps) - if err != nil { - return err - } - hdr.Gid = xGID - } - - if whiteoutConverter != nil { - writeFile, err := whiteoutConverter.ConvertRead(hdr, path) - if err != nil { - return err - } - if !writeFile { - continue - } - } - - if err := createTarFile(path, dest, hdr, trBuf, !options.NoLchown, options.ChownOpts); err != nil { - return err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return err - } - } - return nil -} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -// FIXME: specify behavior when target path exists vs. doesn't exist. -func Untar(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *TarOptions, decompress bool) error { - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - dest = filepath.Clean(dest) - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - r := tarArchive - if decompress { - decompressedArchive, err := DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return Unpack(r, dest, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func (archiver *Archiver) TarUntar(src, dst string) error { - logrus.Debugf("TarUntar(%s %s)", src, dst) - archive, err := TarWithOptions(src, &TarOptions{Compression: Uncompressed}) - if err != nil { - return err - } - defer archive.Close() - - var options *TarOptions - if archiver.UIDMaps != nil || archiver.GIDMaps != nil { - options = &TarOptions{ - UIDMaps: archiver.UIDMaps, - GIDMaps: archiver.GIDMaps, - } - } - return archiver.Untar(archive, dst, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func TarUntar(src, dst string) error { - return defaultArchiver.TarUntar(src, dst) -} - -// UntarPath untar a file from path to a destination, src is the source tar file path. -func (archiver *Archiver) UntarPath(src, dst string) error { - archive, err := os.Open(src) - if err != nil { - return err - } - defer archive.Close() - var options *TarOptions - if archiver.UIDMaps != nil || archiver.GIDMaps != nil { - options = &TarOptions{ - UIDMaps: archiver.UIDMaps, - GIDMaps: archiver.GIDMaps, - } - } - return archiver.Untar(archive, dst, options) -} - -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return defaultArchiver.UntarPath(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func (archiver *Archiver) CopyWithTar(src, dst string) error { - srcSt, err := os.Stat(src) - if err != nil { - return err - } - if !srcSt.IsDir() { - return archiver.CopyFileWithTar(src, dst) - } - - // if this archiver is set up with ID mapping we need to create - // the new destination directory with the remapped root UID/GID pair - // as owner - rootUID, rootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) - if err != nil { - return err - } - // Create dst, copy src's content into it - logrus.Debugf("Creating dest directory: %s", dst) - if err := idtools.MkdirAllNewAs(dst, 0755, rootUID, rootGID); err != nil { - return err - } - logrus.Debugf("Calling TarUntar(%s, %s)", src, dst) - return archiver.TarUntar(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func CopyWithTar(src, dst string) error { - return defaultArchiver.CopyWithTar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) { - logrus.Debugf("CopyFileWithTar(%s, %s)", src, dst) - srcSt, err := os.Stat(src) - if err != nil { - return err - } - - if srcSt.IsDir() { - return fmt.Errorf("Can't copy a directory") - } - - // Clean up the trailing slash. This must be done in an operating - // system specific manner. - if dst[len(dst)-1] == os.PathSeparator { - dst = filepath.Join(dst, filepath.Base(src)) - } - // Create the holding directory if necessary - if err := system.MkdirAll(filepath.Dir(dst), 0700); err != nil { - return err - } - - r, w := io.Pipe() - errC := promise.Go(func() error { - defer w.Close() - - srcF, err := os.Open(src) - if err != nil { - return err - } - defer srcF.Close() - - hdr, err := tar.FileInfoHeader(srcSt, "") - if err != nil { - return err - } - hdr.Name = filepath.Base(dst) - hdr.Mode = int64(chmodTarEntry(os.FileMode(hdr.Mode))) - - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(archiver.UIDMaps, archiver.GIDMaps) - if err != nil { - return err - } - - // only perform mapping if the file being copied isn't already owned by the - // uid or gid of the remapped root in the container - if remappedRootUID != hdr.Uid { - xUID, err := idtools.ToHost(hdr.Uid, archiver.UIDMaps) - if err != nil { - return err - } - hdr.Uid = xUID - } - if remappedRootGID != hdr.Gid { - xGID, err := idtools.ToHost(hdr.Gid, archiver.GIDMaps) - if err != nil { - return err - } - hdr.Gid = xGID - } - - tw := tar.NewWriter(w) - defer tw.Close() - if err := tw.WriteHeader(hdr); err != nil { - return err - } - if _, err := io.Copy(tw, srcF); err != nil { - return err - } - return nil - }) - defer func() { - if er := <-errC; err != nil { - err = er - } - }() - - err = archiver.Untar(r, filepath.Dir(dst), nil) - if err != nil { - r.CloseWithError(err) - } - return err -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// Destination handling is in an operating specific manner depending -// where the daemon is running. If `dst` ends with a trailing slash -// the final destination path will be `dst/base(src)` (Linux) or -// `dst\base(src)` (Windows). -func CopyFileWithTar(src, dst string) (err error) { - return defaultArchiver.CopyFileWithTar(src, dst) -} - -// cmdStream executes a command, and returns its stdout as a stream. -// If the command fails to run or doesn't complete successfully, an error -// will be returned, including anything written on stderr. -func cmdStream(cmd *exec.Cmd, input io.Reader) (io.ReadCloser, <-chan struct{}, error) { - chdone := make(chan struct{}) - cmd.Stdin = input - pipeR, pipeW := io.Pipe() - cmd.Stdout = pipeW - var errBuf bytes.Buffer - cmd.Stderr = &errBuf - - // Run the command and return the pipe - if err := cmd.Start(); err != nil { - return nil, nil, err - } - - // Copy stdout to the returned pipe - go func() { - if err := cmd.Wait(); err != nil { - pipeW.CloseWithError(fmt.Errorf("%s: %s", err, errBuf.String())) - } else { - pipeW.Close() - } - close(chdone) - }() - - return pipeR, chdone, nil -} - -// NewTempArchive reads the content of src into a temporary file, and returns the contents -// of that file as an archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -func NewTempArchive(src Archive, dir string) (*TempArchive, error) { - f, err := ioutil.TempFile(dir, "") - if err != nil { - return nil, err - } - if _, err := io.Copy(f, src); err != nil { - return nil, err - } - if _, err := f.Seek(0, 0); err != nil { - return nil, err - } - st, err := f.Stat() - if err != nil { - return nil, err - } - size := st.Size() - return &TempArchive{File: f, Size: size}, nil -} - -// TempArchive is a temporary archive. The archive can only be read once - as soon as reading completes, -// the file will be deleted. -type TempArchive struct { - *os.File - Size int64 // Pre-computed from Stat().Size() as a convenience - read int64 - closed bool -} - -// Close closes the underlying file if it's still open, or does a no-op -// to allow callers to try to close the TempArchive multiple times safely. -func (archive *TempArchive) Close() error { - if archive.closed { - return nil - } - - archive.closed = true - - return archive.File.Close() -} - -func (archive *TempArchive) Read(data []byte) (int, error) { - n, err := archive.File.Read(data) - archive.read += int64(n) - if err != nil || archive.read == archive.Size { - archive.Close() - os.Remove(archive.File.Name()) - } - return n, err -} diff --git a/pkg/archive/archive_linux.go b/pkg/archive/archive_linux.go deleted file mode 100644 index 5ec3ae1622..0000000000 --- a/pkg/archive/archive_linux.go +++ /dev/null @@ -1,91 +0,0 @@ -package archive - -import ( - "archive/tar" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/docker/docker/pkg/system" -) - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - if format == OverlayWhiteoutFormat { - return overlayWhiteoutConverter{} - } - return nil -} - -type overlayWhiteoutConverter struct{} - -func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os.FileInfo) error { - // convert whiteouts to AUFS format - if fi.Mode()&os.ModeCharDevice != 0 && hdr.Devmajor == 0 && hdr.Devminor == 0 { - // we just rename the file and make it normal - dir, filename := filepath.Split(hdr.Name) - hdr.Name = filepath.Join(dir, WhiteoutPrefix+filename) - hdr.Mode = 0600 - hdr.Typeflag = tar.TypeReg - hdr.Size = 0 - } - - if fi.Mode()&os.ModeDir != 0 { - // convert opaque dirs to AUFS format by writing an empty file with the prefix - opaque, err := system.Lgetxattr(path, "trusted.overlay.opaque") - if err != nil { - return err - } - if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' { - // create a header for the whiteout file - // it should inherit some properties from the parent, but be a regular file - *hdr = tar.Header{ - Typeflag: tar.TypeReg, - Mode: hdr.Mode & int64(os.ModePerm), - Name: filepath.Join(hdr.Name, WhiteoutOpaqueDir), - Size: 0, - Uid: hdr.Uid, - Uname: hdr.Uname, - Gid: hdr.Gid, - Gname: hdr.Gname, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - } - } - } - - return nil -} - -func (overlayWhiteoutConverter) ConvertRead(hdr *tar.Header, path string) (bool, error) { - base := filepath.Base(path) - dir := filepath.Dir(path) - - // if a directory is marked as opaque by the AUFS special file, we need to translate that to overlay - if base == WhiteoutOpaqueDir { - if err := syscall.Setxattr(dir, "trusted.overlay.opaque", []byte{'y'}, 0); err != nil { - return false, err - } - - // don't write the file itself - return false, nil - } - - // if a file was deleted and we are using overlay, we need to create a character device - if strings.HasPrefix(base, WhiteoutPrefix) { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - - if err := syscall.Mknod(originalPath, syscall.S_IFCHR, 0); err != nil { - return false, err - } - if err := os.Chown(originalPath, hdr.Uid, hdr.Gid); err != nil { - return false, err - } - - // don't write the file itself - return false, nil - } - - return true, nil -} diff --git a/pkg/archive/archive_other.go b/pkg/archive/archive_other.go deleted file mode 100644 index 54acbf2856..0000000000 --- a/pkg/archive/archive_other.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux - -package archive - -func getWhiteoutConverter(format WhiteoutFormat) tarWhiteoutConverter { - return nil -} diff --git a/pkg/archive/archive_test.go b/pkg/archive/archive_test.go deleted file mode 100644 index 85e41227c0..0000000000 --- a/pkg/archive/archive_test.go +++ /dev/null @@ -1,1148 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strings" - "testing" - "time" -) - -var tmp string - -func init() { - tmp = "/tmp/" - if runtime.GOOS == "windows" { - tmp = os.Getenv("TEMP") + `\` - } -} - -func TestIsArchiveNilHeader(t *testing.T) { - out := IsArchive(nil) - if out { - t.Fatalf("isArchive should return false as nil is not a valid archive header") - } -} - -func TestIsArchiveInvalidHeader(t *testing.T) { - header := []byte{0x00, 0x01, 0x02} - out := IsArchive(header) - if out { - t.Fatalf("isArchive should return false as %s is not a valid archive header", header) - } -} - -func TestIsArchiveBzip2(t *testing.T) { - header := []byte{0x42, 0x5A, 0x68} - out := IsArchive(header) - if !out { - t.Fatalf("isArchive should return true as %s is a bz2 header", header) - } -} - -func TestIsArchive7zip(t *testing.T) { - header := []byte{0x50, 0x4b, 0x03, 0x04} - out := IsArchive(header) - if out { - t.Fatalf("isArchive should return false as %s is a 7z header and it is not supported", header) - } -} - -func TestIsArchivePathDir(t *testing.T) { - cmd := exec.Command("sh", "-c", "mkdir -p /tmp/archivedir") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - if IsArchivePath(tmp + "archivedir") { - t.Fatalf("Incorrectly recognised directory as an archive") - } -} - -func TestIsArchivePathInvalidFile(t *testing.T) { - cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1K count=1 of=/tmp/archive && gzip --stdout /tmp/archive > /tmp/archive.gz") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - if IsArchivePath(tmp + "archive") { - t.Fatalf("Incorrectly recognised invalid tar path as archive") - } - if IsArchivePath(tmp + "archive.gz") { - t.Fatalf("Incorrectly recognised invalid compressed tar path as archive") - } -} - -func TestIsArchivePathTar(t *testing.T) { - cmd := exec.Command("sh", "-c", "touch /tmp/archivedata && tar -cf /tmp/archive /tmp/archivedata && gzip --stdout /tmp/archive > /tmp/archive.gz") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - if !IsArchivePath(tmp + "/archive") { - t.Fatalf("Did not recognise valid tar path as archive") - } - if !IsArchivePath(tmp + "archive.gz") { - t.Fatalf("Did not recognise valid compressed tar path as archive") - } -} - -func TestDecompressStreamGzip(t *testing.T) { - cmd := exec.Command("sh", "-c", "touch /tmp/archive && gzip -f /tmp/archive") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - archive, err := os.Open(tmp + "archive.gz") - _, err = DecompressStream(archive) - if err != nil { - t.Fatalf("Failed to decompress a gzip file.") - } -} - -func TestDecompressStreamBzip2(t *testing.T) { - cmd := exec.Command("sh", "-c", "touch /tmp/archive && bzip2 -f /tmp/archive") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - archive, err := os.Open(tmp + "archive.bz2") - _, err = DecompressStream(archive) - if err != nil { - t.Fatalf("Failed to decompress a bzip2 file.") - } -} - -func TestDecompressStreamXz(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("Xz not present in msys2") - } - cmd := exec.Command("sh", "-c", "touch /tmp/archive && xz -f /tmp/archive") - output, err := cmd.CombinedOutput() - if err != nil { - t.Fatalf("Fail to create an archive file for test : %s.", output) - } - archive, err := os.Open(tmp + "archive.xz") - _, err = DecompressStream(archive) - if err != nil { - t.Fatalf("Failed to decompress an xz file.") - } -} - -func TestCompressStreamXzUnsuported(t *testing.T) { - dest, err := os.Create(tmp + "dest") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - _, err = CompressStream(dest, Xz) - if err == nil { - t.Fatalf("Should fail as xz is unsupported for compression format.") - } -} - -func TestCompressStreamBzip2Unsupported(t *testing.T) { - dest, err := os.Create(tmp + "dest") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - _, err = CompressStream(dest, Xz) - if err == nil { - t.Fatalf("Should fail as xz is unsupported for compression format.") - } -} - -func TestCompressStreamInvalid(t *testing.T) { - dest, err := os.Create(tmp + "dest") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - _, err = CompressStream(dest, -1) - if err == nil { - t.Fatalf("Should fail as xz is unsupported for compression format.") - } -} - -func TestExtensionInvalid(t *testing.T) { - compression := Compression(-1) - output := compression.Extension() - if output != "" { - t.Fatalf("The extension of an invalid compression should be an empty string.") - } -} - -func TestExtensionUncompressed(t *testing.T) { - compression := Uncompressed - output := compression.Extension() - if output != "tar" { - t.Fatalf("The extension of an uncompressed archive should be 'tar'.") - } -} -func TestExtensionBzip2(t *testing.T) { - compression := Bzip2 - output := compression.Extension() - if output != "tar.bz2" { - t.Fatalf("The extension of a bzip2 archive should be 'tar.bz2'") - } -} -func TestExtensionGzip(t *testing.T) { - compression := Gzip - output := compression.Extension() - if output != "tar.gz" { - t.Fatalf("The extension of a bzip2 archive should be 'tar.gz'") - } -} -func TestExtensionXz(t *testing.T) { - compression := Xz - output := compression.Extension() - if output != "tar.xz" { - t.Fatalf("The extension of a bzip2 archive should be 'tar.xz'") - } -} - -func TestCmdStreamLargeStderr(t *testing.T) { - cmd := exec.Command("sh", "-c", "dd if=/dev/zero bs=1k count=1000 of=/dev/stderr; echo hello") - out, _, err := cmdStream(cmd, nil) - if err != nil { - t.Fatalf("Failed to start command: %s", err) - } - errCh := make(chan error) - go func() { - _, err := io.Copy(ioutil.Discard, out) - errCh <- err - }() - select { - case err := <-errCh: - if err != nil { - t.Fatalf("Command should not have failed (err=%.100s...)", err) - } - case <-time.After(5 * time.Second): - t.Fatalf("Command did not complete in 5 seconds; probable deadlock") - } -} - -func TestCmdStreamBad(t *testing.T) { - // TODO Windows: Figure out why this is failing in CI but not locally - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows CI machines") - } - badCmd := exec.Command("sh", "-c", "echo hello; echo >&2 error couldn\\'t reverse the phase pulser; exit 1") - out, _, err := cmdStream(badCmd, nil) - if err != nil { - t.Fatalf("Failed to start command: %s", err) - } - if output, err := ioutil.ReadAll(out); err == nil { - t.Fatalf("Command should have failed") - } else if err.Error() != "exit status 1: error couldn't reverse the phase pulser\n" { - t.Fatalf("Wrong error value (%s)", err) - } else if s := string(output); s != "hello\n" { - t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) - } -} - -func TestCmdStreamGood(t *testing.T) { - cmd := exec.Command("sh", "-c", "echo hello; exit 0") - out, _, err := cmdStream(cmd, nil) - if err != nil { - t.Fatal(err) - } - if output, err := ioutil.ReadAll(out); err != nil { - t.Fatalf("Command should not have failed (err=%s)", err) - } else if s := string(output); s != "hello\n" { - t.Fatalf("Command output should be '%s', not '%s'", "hello\\n", output) - } -} - -func TestUntarPathWithInvalidDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - invalidDestFolder := filepath.Join(tempFolder, "invalidDest") - // Create a src file - srcFile := filepath.Join(tempFolder, "src") - tarFile := filepath.Join(tempFolder, "src.tar") - os.Create(srcFile) - os.Create(invalidDestFolder) // being a file (not dir) should cause an error - - // Translate back to Unix semantics as next exec.Command is run under sh - srcFileU := srcFile - tarFileU := tarFile - if runtime.GOOS == "windows" { - tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" - srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" - } - - cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - - err = UntarPath(tarFile, invalidDestFolder) - if err == nil { - t.Fatalf("UntarPath with invalid destination path should throw an error.") - } -} - -func TestUntarPathWithInvalidSrc(t *testing.T) { - dest, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatalf("Fail to create the destination file") - } - defer os.RemoveAll(dest) - err = UntarPath("/invalid/path", dest) - if err == nil { - t.Fatalf("UntarPath with invalid src path should throw an error.") - } -} - -func TestUntarPath(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - srcFile := filepath.Join(tmpFolder, "src") - tarFile := filepath.Join(tmpFolder, "src.tar") - os.Create(filepath.Join(tmpFolder, "src")) - - destFolder := filepath.Join(tmpFolder, "dest") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatalf("Fail to create the destination file") - } - - // Translate back to Unix semantics as next exec.Command is run under sh - srcFileU := srcFile - tarFileU := tarFile - if runtime.GOOS == "windows" { - tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" - srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" - } - cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - - err = UntarPath(tarFile, destFolder) - if err != nil { - t.Fatalf("UntarPath shouldn't throw an error, %s.", err) - } - expectedFile := filepath.Join(destFolder, srcFileU) - _, err = os.Stat(expectedFile) - if err != nil { - t.Fatalf("Destination folder should contain the source file but did not.") - } -} - -// Do the same test as above but with the destination as file, it should fail -func TestUntarPathWithDestinationFile(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - srcFile := filepath.Join(tmpFolder, "src") - tarFile := filepath.Join(tmpFolder, "src.tar") - os.Create(filepath.Join(tmpFolder, "src")) - - // Translate back to Unix semantics as next exec.Command is run under sh - srcFileU := srcFile - tarFileU := tarFile - if runtime.GOOS == "windows" { - tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" - srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" - } - cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - destFile := filepath.Join(tmpFolder, "dest") - _, err = os.Create(destFile) - if err != nil { - t.Fatalf("Fail to create the destination file") - } - err = UntarPath(tarFile, destFile) - if err == nil { - t.Fatalf("UntarPath should throw an error if the destination if a file") - } -} - -// Do the same test as above but with the destination folder already exists -// and the destination file is a directory -// It's working, see https://github.com/docker/docker/issues/10040 -func TestUntarPathWithDestinationSrcFileAsFolder(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - srcFile := filepath.Join(tmpFolder, "src") - tarFile := filepath.Join(tmpFolder, "src.tar") - os.Create(srcFile) - - // Translate back to Unix semantics as next exec.Command is run under sh - srcFileU := srcFile - tarFileU := tarFile - if runtime.GOOS == "windows" { - tarFileU = "/tmp/" + filepath.Base(filepath.Dir(tarFile)) + "/src.tar" - srcFileU = "/tmp/" + filepath.Base(filepath.Dir(srcFile)) + "/src" - } - - cmd := exec.Command("sh", "-c", "tar cf "+tarFileU+" "+srcFileU) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - destFolder := filepath.Join(tmpFolder, "dest") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatalf("Fail to create the destination folder") - } - // Let's create a folder that will has the same path as the extracted file (from tar) - destSrcFileAsFolder := filepath.Join(destFolder, srcFileU) - err = os.MkdirAll(destSrcFileAsFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = UntarPath(tarFile, destFolder) - if err != nil { - t.Fatalf("UntarPath should throw not throw an error if the extracted file already exists and is a folder") - } -} - -func TestCopyWithTarInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(nil) - } - destFolder := filepath.Join(tempFolder, "dest") - invalidSrc := filepath.Join(tempFolder, "doesnotexists") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = CopyWithTar(invalidSrc, destFolder) - if err == nil { - t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") - } -} - -func TestCopyWithTarInexistentDestWillCreateIt(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(nil) - } - srcFolder := filepath.Join(tempFolder, "src") - inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = CopyWithTar(srcFolder, inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") - } - _, err = os.Stat(inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder should create it.") - } -} - -// Test CopyWithTar with a file as src -func TestCopyWithTarSrcFile(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := filepath.Join(folder, "dest") - srcFolder := filepath.Join(folder, "src") - src := filepath.Join(folder, filepath.Join("src", "src")) - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest) - if err != nil { - t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) - } - _, err = os.Stat(dest) - // FIXME Check the content - if err != nil { - t.Fatalf("Destination file should be the same as the source.") - } -} - -// Test CopyWithTar with a folder as src -func TestCopyWithTarSrcFolder(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := filepath.Join(folder, "dest") - src := filepath.Join(folder, filepath.Join("src", "folder")) - err = os.MkdirAll(src, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(filepath.Join(src, "file"), []byte("content"), 0777) - err = CopyWithTar(src, dest) - if err != nil { - t.Fatalf("archiver.CopyWithTar shouldn't throw an error, %s.", err) - } - _, err = os.Stat(dest) - // FIXME Check the content (the file inside) - if err != nil { - t.Fatalf("Destination folder should contain the source file but did not.") - } -} - -func TestCopyFileWithTarInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - destFolder := filepath.Join(tempFolder, "dest") - err = os.MkdirAll(destFolder, 0740) - if err != nil { - t.Fatal(err) - } - invalidFile := filepath.Join(tempFolder, "doesnotexists") - err = CopyFileWithTar(invalidFile, destFolder) - if err == nil { - t.Fatalf("archiver.CopyWithTar with invalid src path should throw an error.") - } -} - -func TestCopyFileWithTarInexistentDestWillCreateIt(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(nil) - } - defer os.RemoveAll(tempFolder) - srcFile := filepath.Join(tempFolder, "src") - inexistentDestFolder := filepath.Join(tempFolder, "doesnotexists") - _, err = os.Create(srcFile) - if err != nil { - t.Fatal(err) - } - err = CopyFileWithTar(srcFile, inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder shouldn't fail.") - } - _, err = os.Stat(inexistentDestFolder) - if err != nil { - t.Fatalf("CopyWithTar with an inexistent folder should create it.") - } - // FIXME Test the src file and content -} - -func TestCopyFileWithTarSrcFolder(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-copyfilewithtar-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := filepath.Join(folder, "dest") - src := filepath.Join(folder, "srcfolder") - err = os.MkdirAll(src, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - err = CopyFileWithTar(src, dest) - if err == nil { - t.Fatalf("CopyFileWithTar should throw an error with a folder.") - } -} - -func TestCopyFileWithTarSrcFile(t *testing.T) { - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := filepath.Join(folder, "dest") - srcFolder := filepath.Join(folder, "src") - src := filepath.Join(folder, filepath.Join("src", "src")) - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - err = os.MkdirAll(dest, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest+"/") - if err != nil { - t.Fatalf("archiver.CopyFileWithTar shouldn't throw an error, %s.", err) - } - _, err = os.Stat(dest) - if err != nil { - t.Fatalf("Destination folder should contain the source file but did not.") - } -} - -func TestTarFiles(t *testing.T) { - // TODO Windows: Figure out how to port this test. - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - // try without hardlinks - if err := checkNoChanges(1000, false); err != nil { - t.Fatal(err) - } - // try with hardlinks - if err := checkNoChanges(1000, true); err != nil { - t.Fatal(err) - } -} - -func checkNoChanges(fileNum int, hardlinks bool) error { - srcDir, err := ioutil.TempDir("", "docker-test-srcDir") - if err != nil { - return err - } - defer os.RemoveAll(srcDir) - - destDir, err := ioutil.TempDir("", "docker-test-destDir") - if err != nil { - return err - } - defer os.RemoveAll(destDir) - - _, err = prepareUntarSourceDirectory(fileNum, srcDir, hardlinks) - if err != nil { - return err - } - - err = TarUntar(srcDir, destDir) - if err != nil { - return err - } - - changes, err := ChangesDirs(destDir, srcDir) - if err != nil { - return err - } - if len(changes) > 0 { - return fmt.Errorf("with %d files and %v hardlinks: expected 0 changes, got %d", fileNum, hardlinks, len(changes)) - } - return nil -} - -func tarUntar(t *testing.T, origin string, options *TarOptions) ([]Change, error) { - archive, err := TarWithOptions(origin, options) - if err != nil { - t.Fatal(err) - } - defer archive.Close() - - buf := make([]byte, 10) - if _, err := archive.Read(buf); err != nil { - return nil, err - } - wrap := io.MultiReader(bytes.NewReader(buf), archive) - - detectedCompression := DetectCompression(buf) - compression := options.Compression - if detectedCompression.Extension() != compression.Extension() { - return nil, fmt.Errorf("Wrong compression detected. Actual compression: %s, found %s", compression.Extension(), detectedCompression.Extension()) - } - - tmp, err := ioutil.TempDir("", "docker-test-untar") - if err != nil { - return nil, err - } - defer os.RemoveAll(tmp) - if err := Untar(wrap, tmp, nil); err != nil { - return nil, err - } - if _, err := os.Stat(tmp); err != nil { - return nil, err - } - - return ChangesDirs(origin, tmp) -} - -func TestTarUntar(t *testing.T) { - // TODO Windows: Figure out how to fix this test. - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { - t.Fatal(err) - } - - for _, c := range []Compression{ - Uncompressed, - Gzip, - } { - changes, err := tarUntar(t, origin, &TarOptions{ - Compression: c, - ExcludePatterns: []string{"3"}, - }) - - if err != nil { - t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) - } - - if len(changes) != 1 || changes[0].Path != "/3" { - t.Fatalf("Unexpected differences after tarUntar: %v", changes) - } - } -} - -func TestTarWithOptions(t *testing.T) { - // TODO Windows: Figure out how to fix this test. - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - if _, err := ioutil.TempDir(origin, "folder"); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - - cases := []struct { - opts *TarOptions - numChanges int - }{ - {&TarOptions{IncludeFiles: []string{"1"}}, 2}, - {&TarOptions{ExcludePatterns: []string{"2"}}, 1}, - {&TarOptions{ExcludePatterns: []string{"1", "folder*"}}, 2}, - {&TarOptions{IncludeFiles: []string{"1", "1"}}, 2}, - {&TarOptions{IncludeFiles: []string{"1"}, RebaseNames: map[string]string{"1": "test"}}, 4}, - } - for _, testCase := range cases { - changes, err := tarUntar(t, origin, testCase.opts) - if err != nil { - t.Fatalf("Error tar/untar when testing inclusion/exclusion: %s", err) - } - if len(changes) != testCase.numChanges { - t.Errorf("Expected %d changes, got %d for %+v:", - testCase.numChanges, len(changes), testCase.opts) - } - } -} - -// Some tar archives such as http://haproxy.1wt.eu/download/1.5/src/devel/haproxy-1.5-dev21.tar.gz -// use PAX Global Extended Headers. -// Failing prevents the archives from being uncompressed during ADD -func TestTypeXGlobalHeaderDoesNotFail(t *testing.T) { - hdr := tar.Header{Typeflag: tar.TypeXGlobalHeader} - tmpDir, err := ioutil.TempDir("", "docker-test-archive-pax-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - err = createTarFile(filepath.Join(tmpDir, "pax_global_header"), tmpDir, &hdr, nil, true, nil) - if err != nil { - t.Fatal(err) - } -} - -// Some tar have both GNU specific (huge uid) and Ustar specific (long name) things. -// Not supposed to happen (should use PAX instead of Ustar for long name) but it does and it should still work. -func TestUntarUstarGnuConflict(t *testing.T) { - f, err := os.Open("testdata/broken.tar") - if err != nil { - t.Fatal(err) - } - found := false - tr := tar.NewReader(f) - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - t.Fatal(err) - } - if hdr.Name == "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm" { - found = true - break - } - } - if !found { - t.Fatalf("%s not found in the archive", "root/.cpanm/work/1395823785.24209/Plack-1.0030/blib/man3/Plack::Middleware::LighttpdScriptNameFix.3pm") - } -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} - -func BenchmarkTarUntar(b *testing.B) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - b.Fatal(err) - } - tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") - if err != nil { - b.Fatal(err) - } - target := filepath.Join(tempDir, "dest") - n, err := prepareUntarSourceDirectory(100, origin, false) - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(origin) - defer os.RemoveAll(tempDir) - - b.ResetTimer() - b.SetBytes(int64(n)) - for n := 0; n < b.N; n++ { - err := TarUntar(origin, target) - if err != nil { - b.Fatal(err) - } - os.RemoveAll(target) - } -} - -func BenchmarkTarUntarWithLinks(b *testing.B) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - b.Fatal(err) - } - tempDir, err := ioutil.TempDir("", "docker-test-untar-destination") - if err != nil { - b.Fatal(err) - } - target := filepath.Join(tempDir, "dest") - n, err := prepareUntarSourceDirectory(100, origin, true) - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(origin) - defer os.RemoveAll(tempDir) - - b.ResetTimer() - b.SetBytes(int64(n)) - for n := 0; n < b.N; n++ { - err := TarUntar(origin, target) - if err != nil { - b.Fatal(err) - } - os.RemoveAll(target) - } -} - -func TestUntarInvalidFilenames(t *testing.T) { - // TODO Windows: Figure out how to fix this test. - if runtime.GOOS == "windows" { - t.Skip("Passes but hits breakoutError: platform and architecture is not supported") - } - for i, headers := range [][]*tar.Header{ - { - { - Name: "../victim/dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { - { - // Note the leading slash - Name: "/../victim/slash-dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidFilenames", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarHardlinkToSymlink(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - if runtime.GOOS == "windows" { - t.Skip("hardlinks on Windows") - } - for i, headers := range [][]*tar.Header{ - { - { - Name: "symlink1", - Typeflag: tar.TypeSymlink, - Linkname: "regfile", - Mode: 0644, - }, - { - Name: "symlink2", - Typeflag: tar.TypeLink, - Linkname: "symlink1", - Mode: 0644, - }, - { - Name: "regfile", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarHardlinkToSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarInvalidHardlink(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - if runtime.GOOS == "windows" { - t.Skip("hardlinks on Windows") - } - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeLink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeLink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (hardlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try reading victim/hello (hardlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try removing victim directory (hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidHardlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestUntarInvalidSymlink(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - if runtime.GOOS == "windows" { - t.Skip("hardlinks on Windows") - } - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeSymlink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeSymlink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try removing victim directory (symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try writing to victim/newdir/newfile with a symlink in the path - { - // this header needs to be before the next one, or else there is an error - Name: "dir/loophole", - Typeflag: tar.TypeSymlink, - Linkname: "../../victim", - Mode: 0755, - }, - { - Name: "dir/loophole/newdir/newfile", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("untar", "docker-TestUntarInvalidSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestTempArchiveCloseMultipleTimes(t *testing.T) { - reader := ioutil.NopCloser(strings.NewReader("hello")) - tempArchive, err := NewTempArchive(reader, "") - buf := make([]byte, 10) - n, err := tempArchive.Read(buf) - if n != 5 { - t.Fatalf("Expected to read 5 bytes. Read %d instead", n) - } - for i := 0; i < 3; i++ { - if err = tempArchive.Close(); err != nil { - t.Fatalf("i=%d. Unexpected error closing temp archive: %v", i, err) - } - } -} diff --git a/pkg/archive/archive_unix.go b/pkg/archive/archive_unix.go deleted file mode 100644 index fbc3bb8c4d..0000000000 --- a/pkg/archive/archive_unix.go +++ /dev/null @@ -1,112 +0,0 @@ -// +build !windows - -package archive - -import ( - "archive/tar" - "errors" - "os" - "path/filepath" - "syscall" - - "github.com/docker/docker/pkg/system" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return srcPath -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. On Linux, we -// can't use filepath.Join(srcPath,include) because this will clean away -// a trailing "." or "/" which may be important. -func getWalkRoot(srcPath string, include string) string { - return srcPath + string(filepath.Separator) + include -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - return p, nil // already unix-style -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. - -func chmodTarEntry(perm os.FileMode) os.FileMode { - return perm // noop for unix as golang APIs provide perm bits correctly -} - -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - err = errors.New("cannot convert stat value to syscall.Stat_t") - return - } - - inode = uint64(s.Ino) - - // Currently go does not fill in the major/minors - if s.Mode&syscall.S_IFBLK != 0 || - s.Mode&syscall.S_IFCHR != 0 { - hdr.Devmajor = int64(major(uint64(s.Rdev))) - hdr.Devminor = int64(minor(uint64(s.Rdev))) - } - - return -} - -func getFileUIDGID(stat interface{}) (int, int, error) { - s, ok := stat.(*syscall.Stat_t) - - if !ok { - return -1, -1, errors.New("cannot convert stat value to syscall.Stat_t") - } - return int(s.Uid), int(s.Gid), nil -} - -func major(device uint64) uint64 { - return (device >> 8) & 0xfff -} - -func minor(device uint64) uint64 { - return (device & 0xff) | ((device >> 12) & 0xfff00) -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - mode := uint32(hdr.Mode & 07777) - switch hdr.Typeflag { - case tar.TypeBlock: - mode |= syscall.S_IFBLK - case tar.TypeChar: - mode |= syscall.S_IFCHR - case tar.TypeFifo: - mode |= syscall.S_IFIFO - } - - if err := system.Mknod(path, mode, int(system.Mkdev(hdr.Devmajor, hdr.Devminor))); err != nil { - return err - } - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - if hdr.Typeflag == tar.TypeLink { - if fi, err := os.Lstat(hdr.Linkname); err == nil && (fi.Mode()&os.ModeSymlink == 0) { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - } else if hdr.Typeflag != tar.TypeSymlink { - if err := os.Chmod(path, hdrInfo.Mode()); err != nil { - return err - } - } - return nil -} diff --git a/pkg/archive/archive_unix_test.go b/pkg/archive/archive_unix_test.go deleted file mode 100644 index 548391b35d..0000000000 --- a/pkg/archive/archive_unix_test.go +++ /dev/null @@ -1,245 +0,0 @@ -// +build !windows - -package archive - -import ( - "bytes" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" - - "github.com/docker/docker/pkg/system" -) - -func TestCanonicalTarNameForPath(t *testing.T) { - cases := []struct{ in, expected string }{ - {"foo", "foo"}, - {"foo/bar", "foo/bar"}, - {"foo/dir/", "foo/dir/"}, - } - for _, v := range cases { - if out, err := CanonicalTarNameForPath(v.in); err != nil { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestCanonicalTarName(t *testing.T) { - cases := []struct { - in string - isDir bool - expected string - }{ - {"foo", false, "foo"}, - {"foo", true, "foo/"}, - {"foo/bar", false, "foo/bar"}, - {"foo/bar", true, "foo/bar/"}, - } - for _, v := range cases { - if out, err := canonicalTarName(v.in, v.isDir); err != nil { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestChmodTarEntry(t *testing.T) { - cases := []struct { - in, expected os.FileMode - }{ - {0000, 0000}, - {0777, 0777}, - {0644, 0644}, - {0755, 0755}, - {0444, 0444}, - } - for _, v := range cases { - if out := chmodTarEntry(v.in); out != v.expected { - t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) - } - } -} - -func TestTarWithHardLink(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := os.Link(filepath.Join(origin, "1"), filepath.Join(origin, "2")); err != nil { - t.Fatal(err) - } - - var i1, i2 uint64 - if i1, err = getNlink(filepath.Join(origin, "1")); err != nil { - t.Fatal(err) - } - // sanity check that we can hardlink - if i1 != 2 { - t.Skipf("skipping since hardlinks don't work here; expected 2 links, got %d", i1) - } - - dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dest) - - // we'll do this in two steps to separate failure - fh, err := Tar(origin, Uncompressed) - if err != nil { - t.Fatal(err) - } - - // ensure we can read the whole thing with no error, before writing back out - buf, err := ioutil.ReadAll(fh) - if err != nil { - t.Fatal(err) - } - - bRdr := bytes.NewReader(buf) - err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - if err != nil { - t.Fatal(err) - } - - if i1, err = getInode(filepath.Join(dest, "1")); err != nil { - t.Fatal(err) - } - if i2, err = getInode(filepath.Join(dest, "2")); err != nil { - t.Fatal(err) - } - - if i1 != i2 { - t.Errorf("expected matching inodes, but got %d and %d", i1, i2) - } -} - -func getNlink(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - statT, ok := stat.Sys().(*syscall.Stat_t) - if !ok { - return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) - } - // We need this conversion on ARM64 - return uint64(statT.Nlink), nil -} - -func getInode(path string) (uint64, error) { - stat, err := os.Stat(path) - if err != nil { - return 0, err - } - statT, ok := stat.Sys().(*syscall.Stat_t) - if !ok { - return 0, fmt.Errorf("expected type *syscall.Stat_t, got %t", stat.Sys()) - } - return statT.Ino, nil -} - -func TestTarWithBlockCharFifo(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-tar-hardlink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := system.Mknod(filepath.Join(origin, "2"), syscall.S_IFBLK, int(system.Mkdev(int64(12), int64(5)))); err != nil { - t.Fatal(err) - } - if err := system.Mknod(filepath.Join(origin, "3"), syscall.S_IFCHR, int(system.Mkdev(int64(12), int64(5)))); err != nil { - t.Fatal(err) - } - if err := system.Mknod(filepath.Join(origin, "4"), syscall.S_IFIFO, int(system.Mkdev(int64(12), int64(5)))); err != nil { - t.Fatal(err) - } - - dest, err := ioutil.TempDir("", "docker-test-tar-hardlink-dest") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dest) - - // we'll do this in two steps to separate failure - fh, err := Tar(origin, Uncompressed) - if err != nil { - t.Fatal(err) - } - - // ensure we can read the whole thing with no error, before writing back out - buf, err := ioutil.ReadAll(fh) - if err != nil { - t.Fatal(err) - } - - bRdr := bytes.NewReader(buf) - err = Untar(bRdr, dest, &TarOptions{Compression: Uncompressed}) - if err != nil { - t.Fatal(err) - } - - changes, err := ChangesDirs(origin, dest) - if err != nil { - t.Fatal(err) - } - if len(changes) > 0 { - t.Fatalf("Tar with special device (block, char, fifo) should keep them (recreate them when untar) : %v", changes) - } -} - -// TestTarUntarWithXattr is Unix as Lsetxattr is not supported on Windows -func TestTarUntarWithXattr(t *testing.T) { - origin, err := ioutil.TempDir("", "docker-test-untar-origin") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(origin) - if err := ioutil.WriteFile(filepath.Join(origin, "1"), []byte("hello world"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(origin, "2"), []byte("welcome!"), 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(origin, "3"), []byte("will be ignored"), 0700); err != nil { - t.Fatal(err) - } - if err := system.Lsetxattr(filepath.Join(origin, "2"), "security.capability", []byte{0x00}, 0); err != nil { - t.Fatal(err) - } - - for _, c := range []Compression{ - Uncompressed, - Gzip, - } { - changes, err := tarUntar(t, origin, &TarOptions{ - Compression: c, - ExcludePatterns: []string{"3"}, - }) - - if err != nil { - t.Fatalf("Error tar/untar for compression %s: %s", c.Extension(), err) - } - - if len(changes) != 1 || changes[0].Path != "/3" { - t.Fatalf("Unexpected differences after tarUntar: %v", changes) - } - capability, _ := system.Lgetxattr(filepath.Join(origin, "2"), "security.capability") - if capability == nil && capability[0] != 0x00 { - t.Fatalf("Untar should have kept the 'security.capability' xattr.") - } - } -} diff --git a/pkg/archive/archive_windows.go b/pkg/archive/archive_windows.go deleted file mode 100644 index 5c3a1be340..0000000000 --- a/pkg/archive/archive_windows.go +++ /dev/null @@ -1,70 +0,0 @@ -// +build windows - -package archive - -import ( - "archive/tar" - "fmt" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/longpath" -) - -// fixVolumePathPrefix does platform specific processing to ensure that if -// the path being passed in is not in a volume path format, convert it to one. -func fixVolumePathPrefix(srcPath string) string { - return longpath.AddPrefix(srcPath) -} - -// getWalkRoot calculates the root path when performing a TarWithOptions. -// We use a separate function as this is platform specific. -func getWalkRoot(srcPath string, include string) string { - return filepath.Join(srcPath, include) -} - -// CanonicalTarNameForPath returns platform-specific filepath -// to canonical posix-style path for tar archival. p is relative -// path. -func CanonicalTarNameForPath(p string) (string, error) { - // windows: convert windows style relative path with backslashes - // into forward slashes. Since windows does not allow '/' or '\' - // in file names, it is mostly safe to replace however we must - // check just in case - if strings.Contains(p, "/") { - return "", fmt.Errorf("Windows path contains forward slash: %s", p) - } - return strings.Replace(p, string(os.PathSeparator), "/", -1), nil - -} - -// chmodTarEntry is used to adjust the file permissions used in tar header based -// on the platform the archival is done. -func chmodTarEntry(perm os.FileMode) os.FileMode { - perm &= 0755 - // Add the x bit: make everything +x from windows - perm |= 0111 - - return perm -} - -func setHeaderForSpecialDevice(hdr *tar.Header, ta *tarAppender, name string, stat interface{}) (inode uint64, err error) { - // do nothing. no notion of Rdev, Inode, Nlink in stat on Windows - return -} - -// handleTarTypeBlockCharFifo is an OS-specific helper function used by -// createTarFile to handle the following types of header: Block; Char; Fifo -func handleTarTypeBlockCharFifo(hdr *tar.Header, path string) error { - return nil -} - -func handleLChmod(hdr *tar.Header, path string, hdrInfo os.FileInfo) error { - return nil -} - -func getFileUIDGID(stat interface{}) (int, int, error) { - // no notion of file ownership mapping yet on Windows - return 0, 0, nil -} diff --git a/pkg/archive/archive_windows_test.go b/pkg/archive/archive_windows_test.go deleted file mode 100644 index 0c6733d6bd..0000000000 --- a/pkg/archive/archive_windows_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build windows - -package archive - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestCopyFileWithInvalidDest(t *testing.T) { - // TODO Windows: This is currently failing. Not sure what has - // recently changed in CopyWithTar as used to pass. Further investigation - // is required. - t.Skip("Currently fails") - folder, err := ioutil.TempDir("", "docker-archive-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(folder) - dest := "c:dest" - srcFolder := filepath.Join(folder, "src") - src := filepath.Join(folder, "src", "src") - err = os.MkdirAll(srcFolder, 0740) - if err != nil { - t.Fatal(err) - } - ioutil.WriteFile(src, []byte("content"), 0777) - err = CopyWithTar(src, dest) - if err == nil { - t.Fatalf("archiver.CopyWithTar should throw an error on invalid dest.") - } -} - -func TestCanonicalTarNameForPath(t *testing.T) { - cases := []struct { - in, expected string - shouldFail bool - }{ - {"foo", "foo", false}, - {"foo/bar", "___", true}, // unix-styled windows path must fail - {`foo\bar`, "foo/bar", false}, - } - for _, v := range cases { - if out, err := CanonicalTarNameForPath(v.in); err != nil && !v.shouldFail { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if v.shouldFail && err == nil { - t.Fatalf("canonical path call should have failed with error. in=%s out=%s", v.in, out) - } else if !v.shouldFail && out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestCanonicalTarName(t *testing.T) { - cases := []struct { - in string - isDir bool - expected string - }{ - {"foo", false, "foo"}, - {"foo", true, "foo/"}, - {`foo\bar`, false, "foo/bar"}, - {`foo\bar`, true, "foo/bar/"}, - } - for _, v := range cases { - if out, err := canonicalTarName(v.in, v.isDir); err != nil { - t.Fatalf("cannot get canonical name for path: %s: %v", v.in, err) - } else if out != v.expected { - t.Fatalf("wrong canonical tar name. expected:%s got:%s", v.expected, out) - } - } -} - -func TestChmodTarEntry(t *testing.T) { - cases := []struct { - in, expected os.FileMode - }{ - {0000, 0111}, - {0777, 0755}, - {0644, 0755}, - {0755, 0755}, - {0444, 0555}, - } - for _, v := range cases { - if out := chmodTarEntry(v.in); out != v.expected { - t.Fatalf("wrong chmod. expected:%v got:%v", v.expected, out) - } - } -} diff --git a/pkg/archive/changes.go b/pkg/archive/changes.go deleted file mode 100644 index 4e2d8e54f7..0000000000 --- a/pkg/archive/changes.go +++ /dev/null @@ -1,446 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "sort" - "strings" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" -) - -// ChangeType represents the change type. -type ChangeType int - -const ( - // ChangeModify represents the modify operation. - ChangeModify = iota - // ChangeAdd represents the add operation. - ChangeAdd - // ChangeDelete represents the delete operation. - ChangeDelete -) - -func (c ChangeType) String() string { - switch c { - case ChangeModify: - return "C" - case ChangeAdd: - return "A" - case ChangeDelete: - return "D" - } - return "" -} - -// Change represents a change, it wraps the change type and path. -// It describes changes of the files in the path respect to the -// parent layers. The change could be modify, add, delete. -// This is used for layer diff. -type Change struct { - Path string - Kind ChangeType -} - -func (change *Change) String() string { - return fmt.Sprintf("%s %s", change.Kind, change.Path) -} - -// for sort.Sort -type changesByPath []Change - -func (c changesByPath) Less(i, j int) bool { return c[i].Path < c[j].Path } -func (c changesByPath) Len() int { return len(c) } -func (c changesByPath) Swap(i, j int) { c[j], c[i] = c[i], c[j] } - -// Gnu tar and the go tar writer don't have sub-second mtime -// precision, which is problematic when we apply changes via tar -// files, we handle this by comparing for exact times, *or* same -// second count and either a or b having exactly 0 nanoseconds -func sameFsTime(a, b time.Time) bool { - return a == b || - (a.Unix() == b.Unix() && - (a.Nanosecond() == 0 || b.Nanosecond() == 0)) -} - -func sameFsTimeSpec(a, b syscall.Timespec) bool { - return a.Sec == b.Sec && - (a.Nsec == b.Nsec || a.Nsec == 0 || b.Nsec == 0) -} - -// Changes walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func Changes(layers []string, rw string) ([]Change, error) { - return changes(layers, rw, aufsDeletedFile, aufsMetadataSkip) -} - -func aufsMetadataSkip(path string) (skip bool, err error) { - skip, err = filepath.Match(string(os.PathSeparator)+WhiteoutMetaPrefix+"*", path) - if err != nil { - skip = true - } - return -} - -func aufsDeletedFile(root, path string, fi os.FileInfo) (string, error) { - f := filepath.Base(path) - - // If there is a whiteout, then the file was removed - if strings.HasPrefix(f, WhiteoutPrefix) { - originalFile := f[len(WhiteoutPrefix):] - return filepath.Join(filepath.Dir(path), originalFile), nil - } - - return "", nil -} - -type skipChange func(string) (bool, error) -type deleteChange func(string, string, os.FileInfo) (string, error) - -func changes(layers []string, rw string, dc deleteChange, sc skipChange) ([]Change, error) { - var ( - changes []Change - changedDirs = make(map[string]struct{}) - ) - - err := filepath.Walk(rw, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - path, err = filepath.Rel(rw, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - path = filepath.Join(string(os.PathSeparator), path) - - // Skip root - if path == string(os.PathSeparator) { - return nil - } - - if sc != nil { - if skip, err := sc(path); skip { - return err - } - } - - change := Change{ - Path: path, - } - - deletedFile, err := dc(rw, path, f) - if err != nil { - return err - } - - // Find out what kind of modification happened - if deletedFile != "" { - change.Path = deletedFile - change.Kind = ChangeDelete - } else { - // Otherwise, the file was added - change.Kind = ChangeAdd - - // ...Unless it already existed in a top layer, in which case, it's a modification - for _, layer := range layers { - stat, err := os.Stat(filepath.Join(layer, path)) - if err != nil && !os.IsNotExist(err) { - return err - } - if err == nil { - // The file existed in the top layer, so that's a modification - - // However, if it's a directory, maybe it wasn't actually modified. - // If you modify /foo/bar/baz, then /foo will be part of the changed files only because it's the parent of bar - if stat.IsDir() && f.IsDir() { - if f.Size() == stat.Size() && f.Mode() == stat.Mode() && sameFsTime(f.ModTime(), stat.ModTime()) { - // Both directories are the same, don't record the change - return nil - } - } - change.Kind = ChangeModify - break - } - } - } - - // If /foo/bar/file.txt is modified, then /foo/bar must be part of the changed files. - // This block is here to ensure the change is recorded even if the - // modify time, mode and size of the parent directory in the rw and ro layers are all equal. - // Check https://github.com/docker/docker/pull/13590 for details. - if f.IsDir() { - changedDirs[path] = struct{}{} - } - if change.Kind == ChangeAdd || change.Kind == ChangeDelete { - parent := filepath.Dir(path) - if _, ok := changedDirs[parent]; !ok && parent != "/" { - changes = append(changes, Change{Path: parent, Kind: ChangeModify}) - changedDirs[parent] = struct{}{} - } - } - - // Record change - changes = append(changes, change) - return nil - }) - if err != nil && !os.IsNotExist(err) { - return nil, err - } - return changes, nil -} - -// FileInfo describes the information of a file. -type FileInfo struct { - parent *FileInfo - name string - stat *system.StatT - children map[string]*FileInfo - capability []byte - added bool -} - -// LookUp looks up the file information of a file. -func (info *FileInfo) LookUp(path string) *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - parent := info - if path == string(os.PathSeparator) { - return info - } - - pathElements := strings.Split(path, string(os.PathSeparator)) - for _, elem := range pathElements { - if elem != "" { - child := parent.children[elem] - if child == nil { - return nil - } - parent = child - } - } - return parent -} - -func (info *FileInfo) path() string { - if info.parent == nil { - // As this runs on the daemon side, file paths are OS specific. - return string(os.PathSeparator) - } - return filepath.Join(info.parent.path(), info.name) -} - -func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) { - - sizeAtEntry := len(*changes) - - if oldInfo == nil { - // add - change := Change{ - Path: info.path(), - Kind: ChangeAdd, - } - *changes = append(*changes, change) - info.added = true - } - - // We make a copy so we can modify it to detect additions - // also, we only recurse on the old dir if the new info is a directory - // otherwise any previous delete/change is considered recursive - oldChildren := make(map[string]*FileInfo) - if oldInfo != nil && info.isDir() { - for k, v := range oldInfo.children { - oldChildren[k] = v - } - } - - for name, newChild := range info.children { - oldChild, _ := oldChildren[name] - if oldChild != nil { - // change? - oldStat := oldChild.stat - newStat := newChild.stat - // Note: We can't compare inode or ctime or blocksize here, because these change - // when copying a file into a container. However, that is not generally a problem - // because any content change will change mtime, and any status change should - // be visible when actually comparing the stat fields. The only time this - // breaks down is if some code intentionally hides a change by setting - // back mtime - if statDifferent(oldStat, newStat) || - bytes.Compare(oldChild.capability, newChild.capability) != 0 { - change := Change{ - Path: newChild.path(), - Kind: ChangeModify, - } - *changes = append(*changes, change) - newChild.added = true - } - - // Remove from copy so we can detect deletions - delete(oldChildren, name) - } - - newChild.addChanges(oldChild, changes) - } - for _, oldChild := range oldChildren { - // delete - change := Change{ - Path: oldChild.path(), - Kind: ChangeDelete, - } - *changes = append(*changes, change) - } - - // If there were changes inside this directory, we need to add it, even if the directory - // itself wasn't changed. This is needed to properly save and restore filesystem permissions. - // As this runs on the daemon side, file paths are OS specific. - if len(*changes) > sizeAtEntry && info.isDir() && !info.added && info.path() != string(os.PathSeparator) { - change := Change{ - Path: info.path(), - Kind: ChangeModify, - } - // Let's insert the directory entry before the recently added entries located inside this dir - *changes = append(*changes, change) // just to resize the slice, will be overwritten - copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:]) - (*changes)[sizeAtEntry] = change - } - -} - -// Changes add changes to file information. -func (info *FileInfo) Changes(oldInfo *FileInfo) []Change { - var changes []Change - - info.addChanges(oldInfo, &changes) - - return changes -} - -func newRootFileInfo() *FileInfo { - // As this runs on the daemon side, file paths are OS specific. - root := &FileInfo{ - name: string(os.PathSeparator), - children: make(map[string]*FileInfo), - } - return root -} - -// ChangesDirs compares two directories and generates an array of Change objects describing the changes. -// If oldDir is "", then all files in newDir will be Add-Changes. -func ChangesDirs(newDir, oldDir string) ([]Change, error) { - var ( - oldRoot, newRoot *FileInfo - ) - if oldDir == "" { - emptyDir, err := ioutil.TempDir("", "empty") - if err != nil { - return nil, err - } - defer os.Remove(emptyDir) - oldDir = emptyDir - } - oldRoot, newRoot, err := collectFileInfoForChanges(oldDir, newDir) - if err != nil { - return nil, err - } - - return newRoot.Changes(oldRoot), nil -} - -// ChangesSize calculates the size in bytes of the provided changes, based on newDir. -func ChangesSize(newDir string, changes []Change) int64 { - var ( - size int64 - sf = make(map[uint64]struct{}) - ) - for _, change := range changes { - if change.Kind == ChangeModify || change.Kind == ChangeAdd { - file := filepath.Join(newDir, change.Path) - fileInfo, err := os.Lstat(file) - if err != nil { - logrus.Errorf("Can not stat %q: %s", file, err) - continue - } - - if fileInfo != nil && !fileInfo.IsDir() { - if hasHardlinks(fileInfo) { - inode := getIno(fileInfo) - if _, ok := sf[inode]; !ok { - size += fileInfo.Size() - sf[inode] = struct{}{} - } - } else { - size += fileInfo.Size() - } - } - } - } - return size -} - -// ExportChanges produces an Archive from the provided changes, relative to dir. -func ExportChanges(dir string, changes []Change, uidMaps, gidMaps []idtools.IDMap) (Archive, error) { - reader, writer := io.Pipe() - go func() { - ta := &tarAppender{ - TarWriter: tar.NewWriter(writer), - Buffer: pools.BufioWriter32KPool.Get(nil), - SeenFiles: make(map[uint64]string), - UIDMaps: uidMaps, - GIDMaps: gidMaps, - } - // this buffer is needed for the duration of this piped stream - defer pools.BufioWriter32KPool.Put(ta.Buffer) - - sort.Sort(changesByPath(changes)) - - // In general we log errors here but ignore them because - // during e.g. a diff operation the container can continue - // mutating the filesystem and we can see transient errors - // from this - for _, change := range changes { - if change.Kind == ChangeDelete { - whiteOutDir := filepath.Dir(change.Path) - whiteOutBase := filepath.Base(change.Path) - whiteOut := filepath.Join(whiteOutDir, WhiteoutPrefix+whiteOutBase) - timestamp := time.Now() - hdr := &tar.Header{ - Name: whiteOut[1:], - Size: 0, - ModTime: timestamp, - AccessTime: timestamp, - ChangeTime: timestamp, - } - if err := ta.TarWriter.WriteHeader(hdr); err != nil { - logrus.Debugf("Can't write whiteout header: %s", err) - } - } else { - path := filepath.Join(dir, change.Path) - if err := ta.addTarFile(path, change.Path[1:]); err != nil { - logrus.Debugf("Can't add file %s to tar: %s", path, err) - } - } - } - - // Make sure to check the error on Close. - if err := ta.TarWriter.Close(); err != nil { - logrus.Debugf("Can't close layer: %s", err) - } - if err := writer.Close(); err != nil { - logrus.Debugf("failed close Changes writer: %s", err) - } - }() - return reader, nil -} diff --git a/pkg/archive/changes_linux.go b/pkg/archive/changes_linux.go deleted file mode 100644 index a4cc0c65d3..0000000000 --- a/pkg/archive/changes_linux.go +++ /dev/null @@ -1,312 +0,0 @@ -package archive - -import ( - "bytes" - "fmt" - "os" - "path/filepath" - "sort" - "syscall" - "unsafe" - - "github.com/docker/docker/pkg/system" -) - -// walker is used to implement collectFileInfoForChanges on linux. Where this -// method in general returns the entire contents of two directory trees, we -// optimize some FS calls out on linux. In particular, we take advantage of the -// fact that getdents(2) returns the inode of each file in the directory being -// walked, which, when walking two trees in parallel to generate a list of -// changes, can be used to prune subtrees without ever having to lstat(2) them -// directly. Eliminating stat calls in this way can save up to seconds on large -// images. -type walker struct { - dir1 string - dir2 string - root1 *FileInfo - root2 *FileInfo -} - -// collectFileInfoForChanges returns a complete representation of the trees -// rooted at dir1 and dir2, with one important exception: any subtree or -// leaf where the inode and device numbers are an exact match between dir1 -// and dir2 will be pruned from the results. This method is *only* to be used -// to generating a list of changes between the two directories, as it does not -// reflect the full contents. -func collectFileInfoForChanges(dir1, dir2 string) (*FileInfo, *FileInfo, error) { - w := &walker{ - dir1: dir1, - dir2: dir2, - root1: newRootFileInfo(), - root2: newRootFileInfo(), - } - - i1, err := os.Lstat(w.dir1) - if err != nil { - return nil, nil, err - } - i2, err := os.Lstat(w.dir2) - if err != nil { - return nil, nil, err - } - - if err := w.walk("/", i1, i2); err != nil { - return nil, nil, err - } - - return w.root1, w.root2, nil -} - -// Given a FileInfo, its path info, and a reference to the root of the tree -// being constructed, register this file with the tree. -func walkchunk(path string, fi os.FileInfo, dir string, root *FileInfo) error { - if fi == nil { - return nil - } - parent := root.LookUp(filepath.Dir(path)) - if parent == nil { - return fmt.Errorf("collectFileInfoForChanges: Unexpectedly no parent for %s", path) - } - info := &FileInfo{ - name: filepath.Base(path), - children: make(map[string]*FileInfo), - parent: parent, - } - cpath := filepath.Join(dir, path) - stat, err := system.FromStatT(fi.Sys().(*syscall.Stat_t)) - if err != nil { - return err - } - info.stat = stat - info.capability, _ = system.Lgetxattr(cpath, "security.capability") // lgetxattr(2): fs access - parent.children[info.name] = info - return nil -} - -// Walk a subtree rooted at the same path in both trees being iterated. For -// example, /docker/overlay/1234/a/b/c/d and /docker/overlay/8888/a/b/c/d -func (w *walker) walk(path string, i1, i2 os.FileInfo) (err error) { - // Register these nodes with the return trees, unless we're still at the - // (already-created) roots: - if path != "/" { - if err := walkchunk(path, i1, w.dir1, w.root1); err != nil { - return err - } - if err := walkchunk(path, i2, w.dir2, w.root2); err != nil { - return err - } - } - - is1Dir := i1 != nil && i1.IsDir() - is2Dir := i2 != nil && i2.IsDir() - - sameDevice := false - if i1 != nil && i2 != nil { - si1 := i1.Sys().(*syscall.Stat_t) - si2 := i2.Sys().(*syscall.Stat_t) - if si1.Dev == si2.Dev { - sameDevice = true - } - } - - // If these files are both non-existent, or leaves (non-dirs), we are done. - if !is1Dir && !is2Dir { - return nil - } - - // Fetch the names of all the files contained in both directories being walked: - var names1, names2 []nameIno - if is1Dir { - names1, err = readdirnames(filepath.Join(w.dir1, path)) // getdents(2): fs access - if err != nil { - return err - } - } - if is2Dir { - names2, err = readdirnames(filepath.Join(w.dir2, path)) // getdents(2): fs access - if err != nil { - return err - } - } - - // We have lists of the files contained in both parallel directories, sorted - // in the same order. Walk them in parallel, generating a unique merged list - // of all items present in either or both directories. - var names []string - ix1 := 0 - ix2 := 0 - - for { - if ix1 >= len(names1) { - break - } - if ix2 >= len(names2) { - break - } - - ni1 := names1[ix1] - ni2 := names2[ix2] - - switch bytes.Compare([]byte(ni1.name), []byte(ni2.name)) { - case -1: // ni1 < ni2 -- advance ni1 - // we will not encounter ni1 in names2 - names = append(names, ni1.name) - ix1++ - case 0: // ni1 == ni2 - if ni1.ino != ni2.ino || !sameDevice { - names = append(names, ni1.name) - } - ix1++ - ix2++ - case 1: // ni1 > ni2 -- advance ni2 - // we will not encounter ni2 in names1 - names = append(names, ni2.name) - ix2++ - } - } - for ix1 < len(names1) { - names = append(names, names1[ix1].name) - ix1++ - } - for ix2 < len(names2) { - names = append(names, names2[ix2].name) - ix2++ - } - - // For each of the names present in either or both of the directories being - // iterated, stat the name under each root, and recurse the pair of them: - for _, name := range names { - fname := filepath.Join(path, name) - var cInfo1, cInfo2 os.FileInfo - if is1Dir { - cInfo1, err = os.Lstat(filepath.Join(w.dir1, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if is2Dir { - cInfo2, err = os.Lstat(filepath.Join(w.dir2, fname)) // lstat(2): fs access - if err != nil && !os.IsNotExist(err) { - return err - } - } - if err = w.walk(fname, cInfo1, cInfo2); err != nil { - return err - } - } - return nil -} - -// {name,inode} pairs used to support the early-pruning logic of the walker type -type nameIno struct { - name string - ino uint64 -} - -type nameInoSlice []nameIno - -func (s nameInoSlice) Len() int { return len(s) } -func (s nameInoSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nameInoSlice) Less(i, j int) bool { return s[i].name < s[j].name } - -// readdirnames is a hacked-apart version of the Go stdlib code, exposing inode -// numbers further up the stack when reading directory contents. Unlike -// os.Readdirnames, which returns a list of filenames, this function returns a -// list of {filename,inode} pairs. -func readdirnames(dirname string) (names []nameIno, err error) { - var ( - size = 100 - buf = make([]byte, 4096) - nbuf int - bufp int - nb int - ) - - f, err := os.Open(dirname) - if err != nil { - return nil, err - } - defer f.Close() - - names = make([]nameIno, 0, size) // Empty with room to grow. - for { - // Refill the buffer if necessary - if bufp >= nbuf { - bufp = 0 - nbuf, err = syscall.ReadDirent(int(f.Fd()), buf) // getdents on linux - if nbuf < 0 { - nbuf = 0 - } - if err != nil { - return nil, os.NewSyscallError("readdirent", err) - } - if nbuf <= 0 { - break // EOF - } - } - - // Drain the buffer - nb, names = parseDirent(buf[bufp:nbuf], names) - bufp += nb - } - - sl := nameInoSlice(names) - sort.Sort(sl) - return sl, nil -} - -// parseDirent is a minor modification of syscall.ParseDirent (linux version) -// which returns {name,inode} pairs instead of just names. -func parseDirent(buf []byte, names []nameIno) (consumed int, newnames []nameIno) { - origlen := len(buf) - for len(buf) > 0 { - dirent := (*syscall.Dirent)(unsafe.Pointer(&buf[0])) - buf = buf[dirent.Reclen:] - if dirent.Ino == 0 { // File absent in directory. - continue - } - bytes := (*[10000]byte)(unsafe.Pointer(&dirent.Name[0])) - var name = string(bytes[0:clen(bytes[:])]) - if name == "." || name == ".." { // Useless names - continue - } - names = append(names, nameIno{name, dirent.Ino}) - } - return origlen - len(buf), names -} - -func clen(n []byte) int { - for i := 0; i < len(n); i++ { - if n[i] == 0 { - return i - } - } - return len(n) -} - -// OverlayChanges walks the path rw and determines changes for the files in the path, -// with respect to the parent layers -func OverlayChanges(layers []string, rw string) ([]Change, error) { - return changes(layers, rw, overlayDeletedFile, nil) -} - -func overlayDeletedFile(root, path string, fi os.FileInfo) (string, error) { - if fi.Mode()&os.ModeCharDevice != 0 { - s := fi.Sys().(*syscall.Stat_t) - if major(uint64(s.Rdev)) == 0 && minor(uint64(s.Rdev)) == 0 { - return path, nil - } - } - if fi.Mode()&os.ModeDir != 0 { - opaque, err := system.Lgetxattr(filepath.Join(root, path), "trusted.overlay.opaque") - if err != nil { - return "", err - } - if opaque != nil && len(opaque) == 1 && opaque[0] == 'y' { - return path, nil - } - } - - return "", nil - -} diff --git a/pkg/archive/changes_other.go b/pkg/archive/changes_other.go deleted file mode 100644 index da70ed37c4..0000000000 --- a/pkg/archive/changes_other.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build !linux - -package archive - -import ( - "fmt" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/docker/docker/pkg/system" -) - -func collectFileInfoForChanges(oldDir, newDir string) (*FileInfo, *FileInfo, error) { - var ( - oldRoot, newRoot *FileInfo - err1, err2 error - errs = make(chan error, 2) - ) - go func() { - oldRoot, err1 = collectFileInfo(oldDir) - errs <- err1 - }() - go func() { - newRoot, err2 = collectFileInfo(newDir) - errs <- err2 - }() - - // block until both routines have returned - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - return nil, nil, err - } - } - - return oldRoot, newRoot, nil -} - -func collectFileInfo(sourceDir string) (*FileInfo, error) { - root := newRootFileInfo() - - err := filepath.Walk(sourceDir, func(path string, f os.FileInfo, err error) error { - if err != nil { - return err - } - - // Rebase path - relPath, err := filepath.Rel(sourceDir, path) - if err != nil { - return err - } - - // As this runs on the daemon side, file paths are OS specific. - relPath = filepath.Join(string(os.PathSeparator), relPath) - - // See https://github.com/golang/go/issues/9168 - bug in filepath.Join. - // Temporary workaround. If the returned path starts with two backslashes, - // trim it down to a single backslash. Only relevant on Windows. - if runtime.GOOS == "windows" { - if strings.HasPrefix(relPath, `\\`) { - relPath = relPath[1:] - } - } - - if relPath == string(os.PathSeparator) { - return nil - } - - parent := root.LookUp(filepath.Dir(relPath)) - if parent == nil { - return fmt.Errorf("collectFileInfo: Unexpectedly no parent for %s", relPath) - } - - info := &FileInfo{ - name: filepath.Base(relPath), - children: make(map[string]*FileInfo), - parent: parent, - } - - s, err := system.Lstat(path) - if err != nil { - return err - } - info.stat = s - - info.capability, _ = system.Lgetxattr(path, "security.capability") - - parent.children[info.name] = info - - return nil - }) - if err != nil { - return nil, err - } - return root, nil -} diff --git a/pkg/archive/changes_posix_test.go b/pkg/archive/changes_posix_test.go deleted file mode 100644 index 5a3282b5a8..0000000000 --- a/pkg/archive/changes_posix_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package archive - -import ( - "archive/tar" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "sort" - "testing" -) - -func TestHardLinkOrder(t *testing.T) { - names := []string{"file1.txt", "file2.txt", "file3.txt"} - msg := []byte("Hey y'all") - - // Create dir - src, err := ioutil.TempDir("", "docker-hardlink-test-src-") - if err != nil { - t.Fatal(err) - } - //defer os.RemoveAll(src) - for _, name := range names { - func() { - fh, err := os.Create(path.Join(src, name)) - if err != nil { - t.Fatal(err) - } - defer fh.Close() - if _, err = fh.Write(msg); err != nil { - t.Fatal(err) - } - }() - } - // Create dest, with changes that includes hardlinks - dest, err := ioutil.TempDir("", "docker-hardlink-test-dest-") - if err != nil { - t.Fatal(err) - } - os.RemoveAll(dest) // we just want the name, at first - if err := copyDir(src, dest); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dest) - for _, name := range names { - for i := 0; i < 5; i++ { - if err := os.Link(path.Join(dest, name), path.Join(dest, fmt.Sprintf("%s.link%d", name, i))); err != nil { - t.Fatal(err) - } - } - } - - // get changes - changes, err := ChangesDirs(dest, src) - if err != nil { - t.Fatal(err) - } - - // sort - sort.Sort(changesByPath(changes)) - - // ExportChanges - ar, err := ExportChanges(dest, changes, nil, nil) - if err != nil { - t.Fatal(err) - } - hdrs, err := walkHeaders(ar) - if err != nil { - t.Fatal(err) - } - - // reverse sort - sort.Sort(sort.Reverse(changesByPath(changes))) - // ExportChanges - arRev, err := ExportChanges(dest, changes, nil, nil) - if err != nil { - t.Fatal(err) - } - hdrsRev, err := walkHeaders(arRev) - if err != nil { - t.Fatal(err) - } - - // line up the two sets - sort.Sort(tarHeaders(hdrs)) - sort.Sort(tarHeaders(hdrsRev)) - - // compare Size and LinkName - for i := range hdrs { - if hdrs[i].Name != hdrsRev[i].Name { - t.Errorf("headers - expected name %q; but got %q", hdrs[i].Name, hdrsRev[i].Name) - } - if hdrs[i].Size != hdrsRev[i].Size { - t.Errorf("headers - %q expected size %d; but got %d", hdrs[i].Name, hdrs[i].Size, hdrsRev[i].Size) - } - if hdrs[i].Typeflag != hdrsRev[i].Typeflag { - t.Errorf("headers - %q expected type %d; but got %d", hdrs[i].Name, hdrs[i].Typeflag, hdrsRev[i].Typeflag) - } - if hdrs[i].Linkname != hdrsRev[i].Linkname { - t.Errorf("headers - %q expected linkname %q; but got %q", hdrs[i].Name, hdrs[i].Linkname, hdrsRev[i].Linkname) - } - } - -} - -type tarHeaders []tar.Header - -func (th tarHeaders) Len() int { return len(th) } -func (th tarHeaders) Swap(i, j int) { th[j], th[i] = th[i], th[j] } -func (th tarHeaders) Less(i, j int) bool { return th[i].Name < th[j].Name } - -func walkHeaders(r io.Reader) ([]tar.Header, error) { - t := tar.NewReader(r) - headers := []tar.Header{} - for { - hdr, err := t.Next() - if err != nil { - if err == io.EOF { - break - } - return headers, err - } - headers = append(headers, *hdr) - } - return headers, nil -} diff --git a/pkg/archive/changes_test.go b/pkg/archive/changes_test.go deleted file mode 100644 index 8a2d0e8b15..0000000000 --- a/pkg/archive/changes_test.go +++ /dev/null @@ -1,565 +0,0 @@ -package archive - -import ( - "io/ioutil" - "os" - "os/exec" - "path" - "runtime" - "sort" - "testing" - "time" - - "github.com/docker/docker/pkg/system" -) - -func max(x, y int) int { - if x >= y { - return x - } - return y -} - -func copyDir(src, dst string) error { - cmd := exec.Command("cp", "-a", src, dst) - if err := cmd.Run(); err != nil { - return err - } - return nil -} - -type FileType uint32 - -const ( - Regular FileType = iota - Dir - Symlink -) - -type FileData struct { - filetype FileType - path string - contents string - permissions os.FileMode -} - -func createSampleDir(t *testing.T, root string) { - files := []FileData{ - {Regular, "file1", "file1\n", 0600}, - {Regular, "file2", "file2\n", 0666}, - {Regular, "file3", "file3\n", 0404}, - {Regular, "file4", "file4\n", 0600}, - {Regular, "file5", "file5\n", 0600}, - {Regular, "file6", "file6\n", 0600}, - {Regular, "file7", "file7\n", 0600}, - {Dir, "dir1", "", 0740}, - {Regular, "dir1/file1-1", "file1-1\n", 01444}, - {Regular, "dir1/file1-2", "file1-2\n", 0666}, - {Dir, "dir2", "", 0700}, - {Regular, "dir2/file2-1", "file2-1\n", 0666}, - {Regular, "dir2/file2-2", "file2-2\n", 0666}, - {Dir, "dir3", "", 0700}, - {Regular, "dir3/file3-1", "file3-1\n", 0666}, - {Regular, "dir3/file3-2", "file3-2\n", 0666}, - {Dir, "dir4", "", 0700}, - {Regular, "dir4/file3-1", "file4-1\n", 0666}, - {Regular, "dir4/file3-2", "file4-2\n", 0666}, - {Symlink, "symlink1", "target1", 0666}, - {Symlink, "symlink2", "target2", 0666}, - {Symlink, "symlink3", root + "/file1", 0666}, - {Symlink, "symlink4", root + "/symlink3", 0666}, - {Symlink, "dirSymlink", root + "/dir1", 0740}, - } - - now := time.Now() - for _, info := range files { - p := path.Join(root, info.path) - if info.filetype == Dir { - if err := os.MkdirAll(p, info.permissions); err != nil { - t.Fatal(err) - } - } else if info.filetype == Regular { - if err := ioutil.WriteFile(p, []byte(info.contents), info.permissions); err != nil { - t.Fatal(err) - } - } else if info.filetype == Symlink { - if err := os.Symlink(info.contents, p); err != nil { - t.Fatal(err) - } - } - - if info.filetype != Symlink { - // Set a consistent ctime, atime for all files and dirs - if err := system.Chtimes(p, now, now); err != nil { - t.Fatal(err) - } - } - } -} - -func TestChangeString(t *testing.T) { - modifiyChange := Change{"change", ChangeModify} - toString := modifiyChange.String() - if toString != "C change" { - t.Fatalf("String() of a change with ChangeModifiy Kind should have been %s but was %s", "C change", toString) - } - addChange := Change{"change", ChangeAdd} - toString = addChange.String() - if toString != "A change" { - t.Fatalf("String() of a change with ChangeAdd Kind should have been %s but was %s", "A change", toString) - } - deleteChange := Change{"change", ChangeDelete} - toString = deleteChange.String() - if toString != "D change" { - t.Fatalf("String() of a change with ChangeDelete Kind should have been %s but was %s", "D change", toString) - } -} - -func TestChangesWithNoChanges(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - // as createSampleDir uses symlinks. - if runtime.GOOS == "windows" { - t.Skip("symlinks on Windows") - } - rwLayer, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(rwLayer) - layer, err := ioutil.TempDir("", "docker-changes-test-layer") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(layer) - createSampleDir(t, layer) - changes, err := Changes([]string{layer}, rwLayer) - if err != nil { - t.Fatal(err) - } - if len(changes) != 0 { - t.Fatalf("Changes with no difference should have detect no changes, but detected %d", len(changes)) - } -} - -func TestChangesWithChanges(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - // as createSampleDir uses symlinks. - if runtime.GOOS == "windows" { - t.Skip("symlinks on Windows") - } - // Mock the readonly layer - layer, err := ioutil.TempDir("", "docker-changes-test-layer") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(layer) - createSampleDir(t, layer) - os.MkdirAll(path.Join(layer, "dir1/subfolder"), 0740) - - // Mock the RW layer - rwLayer, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(rwLayer) - - // Create a folder in RW layer - dir1 := path.Join(rwLayer, "dir1") - os.MkdirAll(dir1, 0740) - deletedFile := path.Join(dir1, ".wh.file1-2") - ioutil.WriteFile(deletedFile, []byte{}, 0600) - modifiedFile := path.Join(dir1, "file1-1") - ioutil.WriteFile(modifiedFile, []byte{0x00}, 01444) - // Let's add a subfolder for a newFile - subfolder := path.Join(dir1, "subfolder") - os.MkdirAll(subfolder, 0740) - newFile := path.Join(subfolder, "newFile") - ioutil.WriteFile(newFile, []byte{}, 0740) - - changes, err := Changes([]string{layer}, rwLayer) - if err != nil { - t.Fatal(err) - } - - expectedChanges := []Change{ - {"/dir1", ChangeModify}, - {"/dir1/file1-1", ChangeModify}, - {"/dir1/file1-2", ChangeDelete}, - {"/dir1/subfolder", ChangeModify}, - {"/dir1/subfolder/newFile", ChangeAdd}, - } - checkChanges(expectedChanges, changes, t) -} - -// See https://github.com/docker/docker/pull/13590 -func TestChangesWithChangesGH13590(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - // as createSampleDir uses symlinks. - if runtime.GOOS == "windows" { - t.Skip("symlinks on Windows") - } - baseLayer, err := ioutil.TempDir("", "docker-changes-test.") - defer os.RemoveAll(baseLayer) - - dir3 := path.Join(baseLayer, "dir1/dir2/dir3") - os.MkdirAll(dir3, 07400) - - file := path.Join(dir3, "file.txt") - ioutil.WriteFile(file, []byte("hello"), 0666) - - layer, err := ioutil.TempDir("", "docker-changes-test2.") - defer os.RemoveAll(layer) - - // Test creating a new file - if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { - t.Fatalf("Cmd failed: %q", err) - } - - os.Remove(path.Join(layer, "dir1/dir2/dir3/file.txt")) - file = path.Join(layer, "dir1/dir2/dir3/file1.txt") - ioutil.WriteFile(file, []byte("bye"), 0666) - - changes, err := Changes([]string{baseLayer}, layer) - if err != nil { - t.Fatal(err) - } - - expectedChanges := []Change{ - {"/dir1/dir2/dir3", ChangeModify}, - {"/dir1/dir2/dir3/file1.txt", ChangeAdd}, - } - checkChanges(expectedChanges, changes, t) - - // Now test changing a file - layer, err = ioutil.TempDir("", "docker-changes-test3.") - defer os.RemoveAll(layer) - - if err := copyDir(baseLayer+"/dir1", layer+"/"); err != nil { - t.Fatalf("Cmd failed: %q", err) - } - - file = path.Join(layer, "dir1/dir2/dir3/file.txt") - ioutil.WriteFile(file, []byte("bye"), 0666) - - changes, err = Changes([]string{baseLayer}, layer) - if err != nil { - t.Fatal(err) - } - - expectedChanges = []Change{ - {"/dir1/dir2/dir3/file.txt", ChangeModify}, - } - checkChanges(expectedChanges, changes, t) -} - -// Create a directory, copy it, make sure we report no changes between the two -func TestChangesDirsEmpty(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - // as createSampleDir uses symlinks. - if runtime.GOOS == "windows" { - t.Skip("symlinks on Windows") - } - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(src) - createSampleDir(t, src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(dst) - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - if len(changes) != 0 { - t.Fatalf("Reported changes for identical dirs: %v", changes) - } - os.RemoveAll(src) - os.RemoveAll(dst) -} - -func mutateSampleDir(t *testing.T, root string) { - // Remove a regular file - if err := os.RemoveAll(path.Join(root, "file1")); err != nil { - t.Fatal(err) - } - - // Remove a directory - if err := os.RemoveAll(path.Join(root, "dir1")); err != nil { - t.Fatal(err) - } - - // Remove a symlink - if err := os.RemoveAll(path.Join(root, "symlink1")); err != nil { - t.Fatal(err) - } - - // Rewrite a file - if err := ioutil.WriteFile(path.Join(root, "file2"), []byte("fileNN\n"), 0777); err != nil { - t.Fatal(err) - } - - // Replace a file - if err := os.RemoveAll(path.Join(root, "file3")); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(root, "file3"), []byte("fileMM\n"), 0404); err != nil { - t.Fatal(err) - } - - // Touch file - if err := system.Chtimes(path.Join(root, "file4"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { - t.Fatal(err) - } - - // Replace file with dir - if err := os.RemoveAll(path.Join(root, "file5")); err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(path.Join(root, "file5"), 0666); err != nil { - t.Fatal(err) - } - - // Create new file - if err := ioutil.WriteFile(path.Join(root, "filenew"), []byte("filenew\n"), 0777); err != nil { - t.Fatal(err) - } - - // Create new dir - if err := os.MkdirAll(path.Join(root, "dirnew"), 0766); err != nil { - t.Fatal(err) - } - - // Create a new symlink - if err := os.Symlink("targetnew", path.Join(root, "symlinknew")); err != nil { - t.Fatal(err) - } - - // Change a symlink - if err := os.RemoveAll(path.Join(root, "symlink2")); err != nil { - t.Fatal(err) - } - if err := os.Symlink("target2change", path.Join(root, "symlink2")); err != nil { - t.Fatal(err) - } - - // Replace dir with file - if err := os.RemoveAll(path.Join(root, "dir2")); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(path.Join(root, "dir2"), []byte("dir2\n"), 0777); err != nil { - t.Fatal(err) - } - - // Touch dir - if err := system.Chtimes(path.Join(root, "dir3"), time.Now().Add(time.Second), time.Now().Add(time.Second)); err != nil { - t.Fatal(err) - } -} - -func TestChangesDirsMutated(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - // as createSampleDir uses symlinks. - if runtime.GOOS == "windows" { - t.Skip("symlinks on Windows") - } - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - createSampleDir(t, src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(src) - defer os.RemoveAll(dst) - - mutateSampleDir(t, dst) - - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - sort.Sort(changesByPath(changes)) - - expectedChanges := []Change{ - {"/dir1", ChangeDelete}, - {"/dir2", ChangeModify}, - {"/dirnew", ChangeAdd}, - {"/file1", ChangeDelete}, - {"/file2", ChangeModify}, - {"/file3", ChangeModify}, - {"/file4", ChangeModify}, - {"/file5", ChangeModify}, - {"/filenew", ChangeAdd}, - {"/symlink1", ChangeDelete}, - {"/symlink2", ChangeModify}, - {"/symlinknew", ChangeAdd}, - } - - for i := 0; i < max(len(changes), len(expectedChanges)); i++ { - if i >= len(expectedChanges) { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } - if i >= len(changes) { - t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) - } - if changes[i].Path == expectedChanges[i].Path { - if changes[i] != expectedChanges[i] { - t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) - } - } else if changes[i].Path < expectedChanges[i].Path { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } else { - t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) - } - } -} - -func TestApplyLayer(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - // as createSampleDir uses symlinks. - if runtime.GOOS == "windows" { - t.Skip("symlinks on Windows") - } - src, err := ioutil.TempDir("", "docker-changes-test") - if err != nil { - t.Fatal(err) - } - createSampleDir(t, src) - defer os.RemoveAll(src) - dst := src + "-copy" - if err := copyDir(src, dst); err != nil { - t.Fatal(err) - } - mutateSampleDir(t, dst) - defer os.RemoveAll(dst) - - changes, err := ChangesDirs(dst, src) - if err != nil { - t.Fatal(err) - } - - layer, err := ExportChanges(dst, changes, nil, nil) - if err != nil { - t.Fatal(err) - } - - layerCopy, err := NewTempArchive(layer, "") - if err != nil { - t.Fatal(err) - } - - if _, err := ApplyLayer(src, layerCopy); err != nil { - t.Fatal(err) - } - - changes2, err := ChangesDirs(src, dst) - if err != nil { - t.Fatal(err) - } - - if len(changes2) != 0 { - t.Fatalf("Unexpected differences after reapplying mutation: %v", changes2) - } -} - -func TestChangesSizeWithHardlinks(t *testing.T) { - // TODO Windows. There may be a way of running this, but turning off for now - // as createSampleDir uses symlinks. - if runtime.GOOS == "windows" { - t.Skip("hardlinks on Windows") - } - srcDir, err := ioutil.TempDir("", "docker-test-srcDir") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(srcDir) - - destDir, err := ioutil.TempDir("", "docker-test-destDir") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(destDir) - - creationSize, err := prepareUntarSourceDirectory(100, destDir, true) - if err != nil { - t.Fatal(err) - } - - changes, err := ChangesDirs(destDir, srcDir) - if err != nil { - t.Fatal(err) - } - - got := ChangesSize(destDir, changes) - if got != int64(creationSize) { - t.Errorf("Expected %d bytes of changes, got %d", creationSize, got) - } -} - -func TestChangesSizeWithNoChanges(t *testing.T) { - size := ChangesSize("/tmp", nil) - if size != 0 { - t.Fatalf("ChangesSizes with no changes should be 0, was %d", size) - } -} - -func TestChangesSizeWithOnlyDeleteChanges(t *testing.T) { - changes := []Change{ - {Path: "deletedPath", Kind: ChangeDelete}, - } - size := ChangesSize("/tmp", changes) - if size != 0 { - t.Fatalf("ChangesSizes with only delete changes should be 0, was %d", size) - } -} - -func TestChangesSize(t *testing.T) { - parentPath, err := ioutil.TempDir("", "docker-changes-test") - defer os.RemoveAll(parentPath) - addition := path.Join(parentPath, "addition") - if err := ioutil.WriteFile(addition, []byte{0x01, 0x01, 0x01}, 0744); err != nil { - t.Fatal(err) - } - modification := path.Join(parentPath, "modification") - if err = ioutil.WriteFile(modification, []byte{0x01, 0x01, 0x01}, 0744); err != nil { - t.Fatal(err) - } - changes := []Change{ - {Path: "addition", Kind: ChangeAdd}, - {Path: "modification", Kind: ChangeModify}, - } - size := ChangesSize(parentPath, changes) - if size != 6 { - t.Fatalf("Expected 6 bytes of changes, got %d", size) - } -} - -func checkChanges(expectedChanges, changes []Change, t *testing.T) { - sort.Sort(changesByPath(expectedChanges)) - sort.Sort(changesByPath(changes)) - for i := 0; i < max(len(changes), len(expectedChanges)); i++ { - if i >= len(expectedChanges) { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } - if i >= len(changes) { - t.Fatalf("no change for expected change %s\n", expectedChanges[i].String()) - } - if changes[i].Path == expectedChanges[i].Path { - if changes[i] != expectedChanges[i] { - t.Fatalf("Wrong change for %s, expected %s, got %s\n", changes[i].Path, changes[i].String(), expectedChanges[i].String()) - } - } else if changes[i].Path < expectedChanges[i].Path { - t.Fatalf("unexpected change %s\n", changes[i].String()) - } else { - t.Fatalf("no change for expected change %s != %s\n", expectedChanges[i].String(), changes[i].String()) - } - } -} diff --git a/pkg/archive/changes_unix.go b/pkg/archive/changes_unix.go deleted file mode 100644 index 3778b732cf..0000000000 --- a/pkg/archive/changes_unix.go +++ /dev/null @@ -1,36 +0,0 @@ -// +build !windows - -package archive - -import ( - "os" - "syscall" - - "github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - // Don't look at size for dirs, its not a good measure of change - if oldStat.Mode() != newStat.Mode() || - oldStat.UID() != newStat.UID() || - oldStat.GID() != newStat.GID() || - oldStat.Rdev() != newStat.Rdev() || - // Don't look at size for dirs, its not a good measure of change - (oldStat.Mode()&syscall.S_IFDIR != syscall.S_IFDIR && - (!sameFsTimeSpec(oldStat.Mtim(), newStat.Mtim()) || (oldStat.Size() != newStat.Size()))) { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.Mode()&syscall.S_IFDIR != 0 -} - -func getIno(fi os.FileInfo) uint64 { - return uint64(fi.Sys().(*syscall.Stat_t).Ino) -} - -func hasHardlinks(fi os.FileInfo) bool { - return fi.Sys().(*syscall.Stat_t).Nlink > 1 -} diff --git a/pkg/archive/changes_windows.go b/pkg/archive/changes_windows.go deleted file mode 100644 index af94243fc4..0000000000 --- a/pkg/archive/changes_windows.go +++ /dev/null @@ -1,30 +0,0 @@ -package archive - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -func statDifferent(oldStat *system.StatT, newStat *system.StatT) bool { - - // Don't look at size for dirs, its not a good measure of change - if oldStat.ModTime() != newStat.ModTime() || - oldStat.Mode() != newStat.Mode() || - oldStat.Size() != newStat.Size() && !oldStat.IsDir() { - return true - } - return false -} - -func (info *FileInfo) isDir() bool { - return info.parent == nil || info.stat.IsDir() -} - -func getIno(fi os.FileInfo) (inode uint64) { - return -} - -func hasHardlinks(fi os.FileInfo) bool { - return false -} diff --git a/pkg/archive/copy.go b/pkg/archive/copy.go deleted file mode 100644 index a60c948d0d..0000000000 --- a/pkg/archive/copy.go +++ /dev/null @@ -1,458 +0,0 @@ -package archive - -import ( - "archive/tar" - "errors" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/system" -) - -// Errors used or returned by this file. -var ( - ErrNotDirectory = errors.New("not a directory") - ErrDirNotExists = errors.New("no such directory") - ErrCannotCopyDir = errors.New("cannot copy directory") - ErrInvalidCopySource = errors.New("invalid copy source content") -) - -// PreserveTrailingDotOrSeparator returns the given cleaned path (after -// processing using any utility functions from the path or filepath stdlib -// packages) and appends a trailing `/.` or `/` if its corresponding original -// path (from before being processed by utility functions from the path or -// filepath stdlib packages) ends with a trailing `/.` or `/`. If the cleaned -// path already ends in a `.` path segment, then another is not added. If the -// clean path already ends in a path separator, then another is not added. -func PreserveTrailingDotOrSeparator(cleanedPath, originalPath string) string { - // Ensure paths are in platform semantics - cleanedPath = normalizePath(cleanedPath) - originalPath = normalizePath(originalPath) - - if !specifiesCurrentDir(cleanedPath) && specifiesCurrentDir(originalPath) { - if !hasTrailingPathSeparator(cleanedPath) { - // Add a separator if it doesn't already end with one (a cleaned - // path would only end in a separator if it is the root). - cleanedPath += string(filepath.Separator) - } - cleanedPath += "." - } - - if !hasTrailingPathSeparator(cleanedPath) && hasTrailingPathSeparator(originalPath) { - cleanedPath += string(filepath.Separator) - } - - return cleanedPath -} - -// assertsDirectory returns whether the given path is -// asserted to be a directory, i.e., the path ends with -// a trailing '/' or `/.`, assuming a path separator of `/`. -func assertsDirectory(path string) bool { - return hasTrailingPathSeparator(path) || specifiesCurrentDir(path) -} - -// hasTrailingPathSeparator returns whether the given -// path ends with the system's path separator character. -func hasTrailingPathSeparator(path string) bool { - return len(path) > 0 && os.IsPathSeparator(path[len(path)-1]) -} - -// specifiesCurrentDir returns whether the given path specifies -// a "current directory", i.e., the last path segment is `.`. -func specifiesCurrentDir(path string) bool { - return filepath.Base(path) == "." -} - -// SplitPathDirEntry splits the given path between its directory name and its -// basename by first cleaning the path but preserves a trailing "." if the -// original path specified the current directory. -func SplitPathDirEntry(path string) (dir, base string) { - cleanedPath := filepath.Clean(normalizePath(path)) - - if specifiesCurrentDir(path) { - cleanedPath += string(filepath.Separator) + "." - } - - return filepath.Dir(cleanedPath), filepath.Base(cleanedPath) -} - -// TarResource archives the resource described by the given CopyInfo to a Tar -// archive. A non-nil error is returned if sourcePath does not exist or is -// asserted to be a directory but exists as another type of file. -// -// This function acts as a convenient wrapper around TarWithOptions, which -// requires a directory as the source path. TarResource accepts either a -// directory or a file path and correctly sets the Tar options. -func TarResource(sourceInfo CopyInfo) (content Archive, err error) { - return TarResourceRebase(sourceInfo.Path, sourceInfo.RebaseName) -} - -// TarResourceRebase is like TarResource but renames the first path element of -// items in the resulting tar archive to match the given rebaseName if not "". -func TarResourceRebase(sourcePath, rebaseName string) (content Archive, err error) { - sourcePath = normalizePath(sourcePath) - if _, err = os.Lstat(sourcePath); err != nil { - // Catches the case where the source does not exist or is not a - // directory if asserted to be a directory, as this also causes an - // error. - return - } - - // Separate the source path between its directory and - // the entry in that directory which we are archiving. - sourceDir, sourceBase := SplitPathDirEntry(sourcePath) - - filter := []string{sourceBase} - - logrus.Debugf("copying %q from %q", sourceBase, sourceDir) - - return TarWithOptions(sourceDir, &TarOptions{ - Compression: Uncompressed, - IncludeFiles: filter, - IncludeSourceDir: true, - RebaseNames: map[string]string{ - sourceBase: rebaseName, - }, - }) -} - -// CopyInfo holds basic info about the source -// or destination path of a copy operation. -type CopyInfo struct { - Path string - Exists bool - IsDir bool - RebaseName string -} - -// CopyInfoSourcePath stats the given path to create a CopyInfo -// struct representing that resource for the source of an archive copy -// operation. The given path should be an absolute local path. A source path -// has all symlinks evaluated that appear before the last path separator ("/" -// on Unix). As it is to be a copy source, the path must exist. -func CopyInfoSourcePath(path string, followLink bool) (CopyInfo, error) { - // normalize the file path and then evaluate the symbol link - // we will use the target file instead of the symbol link if - // followLink is set - path = normalizePath(path) - - resolvedPath, rebaseName, err := ResolveHostSourcePath(path, followLink) - if err != nil { - return CopyInfo{}, err - } - - stat, err := os.Lstat(resolvedPath) - if err != nil { - return CopyInfo{}, err - } - - return CopyInfo{ - Path: resolvedPath, - Exists: true, - IsDir: stat.IsDir(), - RebaseName: rebaseName, - }, nil -} - -// CopyInfoDestinationPath stats the given path to create a CopyInfo -// struct representing that resource for the destination of an archive copy -// operation. The given path should be an absolute local path. -func CopyInfoDestinationPath(path string) (info CopyInfo, err error) { - maxSymlinkIter := 10 // filepath.EvalSymlinks uses 255, but 10 already seems like a lot. - path = normalizePath(path) - originalPath := path - - stat, err := os.Lstat(path) - - if err == nil && stat.Mode()&os.ModeSymlink == 0 { - // The path exists and is not a symlink. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil - } - - // While the path is a symlink. - for n := 0; err == nil && stat.Mode()&os.ModeSymlink != 0; n++ { - if n > maxSymlinkIter { - // Don't follow symlinks more than this arbitrary number of times. - return CopyInfo{}, errors.New("too many symlinks in " + originalPath) - } - - // The path is a symbolic link. We need to evaluate it so that the - // destination of the copy operation is the link target and not the - // link itself. This is notably different than CopyInfoSourcePath which - // only evaluates symlinks before the last appearing path separator. - // Also note that it is okay if the last path element is a broken - // symlink as the copy operation should create the target. - var linkTarget string - - linkTarget, err = os.Readlink(path) - if err != nil { - return CopyInfo{}, err - } - - if !system.IsAbs(linkTarget) { - // Join with the parent directory. - dstParent, _ := SplitPathDirEntry(path) - linkTarget = filepath.Join(dstParent, linkTarget) - } - - path = linkTarget - stat, err = os.Lstat(path) - } - - if err != nil { - // It's okay if the destination path doesn't exist. We can still - // continue the copy operation if the parent directory exists. - if !os.IsNotExist(err) { - return CopyInfo{}, err - } - - // Ensure destination parent dir exists. - dstParent, _ := SplitPathDirEntry(path) - - parentDirStat, err := os.Lstat(dstParent) - if err != nil { - return CopyInfo{}, err - } - if !parentDirStat.IsDir() { - return CopyInfo{}, ErrNotDirectory - } - - return CopyInfo{Path: path}, nil - } - - // The path exists after resolving symlinks. - return CopyInfo{ - Path: path, - Exists: true, - IsDir: stat.IsDir(), - }, nil -} - -// PrepareArchiveCopy prepares the given srcContent archive, which should -// contain the archived resource described by srcInfo, to the destination -// described by dstInfo. Returns the possibly modified content archive along -// with the path to the destination directory which it should be extracted to. -func PrepareArchiveCopy(srcContent Reader, srcInfo, dstInfo CopyInfo) (dstDir string, content Archive, err error) { - // Ensure in platform semantics - srcInfo.Path = normalizePath(srcInfo.Path) - dstInfo.Path = normalizePath(dstInfo.Path) - - // Separate the destination path between its directory and base - // components in case the source archive contents need to be rebased. - dstDir, dstBase := SplitPathDirEntry(dstInfo.Path) - _, srcBase := SplitPathDirEntry(srcInfo.Path) - - switch { - case dstInfo.Exists && dstInfo.IsDir: - // The destination exists as a directory. No alteration - // to srcContent is needed as its contents can be - // simply extracted to the destination directory. - return dstInfo.Path, ioutil.NopCloser(srcContent), nil - case dstInfo.Exists && srcInfo.IsDir: - // The destination exists as some type of file and the source - // content is a directory. This is an error condition since - // you cannot copy a directory to an existing file location. - return "", nil, ErrCannotCopyDir - case dstInfo.Exists: - // The destination exists as some type of file and the source content - // is also a file. The source content entry will have to be renamed to - // have a basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case srcInfo.IsDir: - // The destination does not exist and the source content is an archive - // of a directory. The archive should be extracted to the parent of - // the destination path instead, and when it is, the directory that is - // created as a result should take the name of the destination path. - // The source content entries will have to be renamed to have a - // basename which matches the destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - case assertsDirectory(dstInfo.Path): - // The destination does not exist and is asserted to be created as a - // directory, but the source content is not a directory. This is an - // error condition since you cannot create a directory from a file - // source. - return "", nil, ErrDirNotExists - default: - // The last remaining case is when the destination does not exist, is - // not asserted to be a directory, and the source content is not an - // archive of a directory. It this case, the destination file will need - // to be created when the archive is extracted and the source content - // entry will have to be renamed to have a basename which matches the - // destination path's basename. - if len(srcInfo.RebaseName) != 0 { - srcBase = srcInfo.RebaseName - } - return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil - } - -} - -// RebaseArchiveEntries rewrites the given srcContent archive replacing -// an occurrence of oldBase with newBase at the beginning of entry names. -func RebaseArchiveEntries(srcContent Reader, oldBase, newBase string) Archive { - if oldBase == string(os.PathSeparator) { - // If oldBase specifies the root directory, use an empty string as - // oldBase instead so that newBase doesn't replace the path separator - // that all paths will start with. - oldBase = "" - } - - rebased, w := io.Pipe() - - go func() { - srcTar := tar.NewReader(srcContent) - rebasedTar := tar.NewWriter(w) - - for { - hdr, err := srcTar.Next() - if err == io.EOF { - // Signals end of archive. - rebasedTar.Close() - w.Close() - return - } - if err != nil { - w.CloseWithError(err) - return - } - - hdr.Name = strings.Replace(hdr.Name, oldBase, newBase, 1) - - if err = rebasedTar.WriteHeader(hdr); err != nil { - w.CloseWithError(err) - return - } - - if _, err = io.Copy(rebasedTar, srcTar); err != nil { - w.CloseWithError(err) - return - } - } - }() - - return rebased -} - -// CopyResource performs an archive copy from the given source path to the -// given destination path. The source path MUST exist and the destination -// path's parent directory must exist. -func CopyResource(srcPath, dstPath string, followLink bool) error { - var ( - srcInfo CopyInfo - err error - ) - - // Ensure in platform semantics - srcPath = normalizePath(srcPath) - dstPath = normalizePath(dstPath) - - // Clean the source and destination paths. - srcPath = PreserveTrailingDotOrSeparator(filepath.Clean(srcPath), srcPath) - dstPath = PreserveTrailingDotOrSeparator(filepath.Clean(dstPath), dstPath) - - if srcInfo, err = CopyInfoSourcePath(srcPath, followLink); err != nil { - return err - } - - content, err := TarResource(srcInfo) - if err != nil { - return err - } - defer content.Close() - - return CopyTo(content, srcInfo, dstPath) -} - -// CopyTo handles extracting the given content whose -// entries should be sourced from srcInfo to dstPath. -func CopyTo(content Reader, srcInfo CopyInfo, dstPath string) error { - // The destination path need not exist, but CopyInfoDestinationPath will - // ensure that at least the parent directory exists. - dstInfo, err := CopyInfoDestinationPath(normalizePath(dstPath)) - if err != nil { - return err - } - - dstDir, copyArchive, err := PrepareArchiveCopy(content, srcInfo, dstInfo) - if err != nil { - return err - } - defer copyArchive.Close() - - options := &TarOptions{ - NoLchown: true, - NoOverwriteDirNonDir: true, - } - - return Untar(copyArchive, dstDir, options) -} - -// ResolveHostSourcePath decides real path need to be copied with parameters such as -// whether to follow symbol link or not, if followLink is true, resolvedPath will return -// link target of any symbol link file, else it will only resolve symlink of directory -// but return symbol link file itself without resolving. -func ResolveHostSourcePath(path string, followLink bool) (resolvedPath, rebaseName string, err error) { - if followLink { - resolvedPath, err = filepath.EvalSymlinks(path) - if err != nil { - return - } - - resolvedPath, rebaseName = GetRebaseName(path, resolvedPath) - } else { - dirPath, basePath := filepath.Split(path) - - // if not follow symbol link, then resolve symbol link of parent dir - var resolvedDirPath string - resolvedDirPath, err = filepath.EvalSymlinks(dirPath) - if err != nil { - return - } - // resolvedDirPath will have been cleaned (no trailing path separators) so - // we can manually join it with the base path element. - resolvedPath = resolvedDirPath + string(filepath.Separator) + basePath - if hasTrailingPathSeparator(path) && filepath.Base(path) != filepath.Base(resolvedPath) { - rebaseName = filepath.Base(path) - } - } - return resolvedPath, rebaseName, nil -} - -// GetRebaseName normalizes and compares path and resolvedPath, -// return completed resolved path and rebased file name -func GetRebaseName(path, resolvedPath string) (string, string) { - // linkTarget will have been cleaned (no trailing path separators and dot) so - // we can manually join it with them - var rebaseName string - if specifiesCurrentDir(path) && !specifiesCurrentDir(resolvedPath) { - resolvedPath += string(filepath.Separator) + "." - } - - if hasTrailingPathSeparator(path) && !hasTrailingPathSeparator(resolvedPath) { - resolvedPath += string(filepath.Separator) - } - - if filepath.Base(path) != filepath.Base(resolvedPath) { - // In the case where the path had a trailing separator and a symlink - // evaluation has changed the last path component, we will need to - // rebase the name in the archive that is being copied to match the - // originally requested name. - rebaseName = filepath.Base(path) - } - return resolvedPath, rebaseName -} diff --git a/pkg/archive/copy_unix.go b/pkg/archive/copy_unix.go deleted file mode 100644 index e305b5e4af..0000000000 --- a/pkg/archive/copy_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.ToSlash(path) -} diff --git a/pkg/archive/copy_unix_test.go b/pkg/archive/copy_unix_test.go deleted file mode 100644 index ecbfc172b0..0000000000 --- a/pkg/archive/copy_unix_test.go +++ /dev/null @@ -1,978 +0,0 @@ -// +build !windows - -// TODO Windows: Some of these tests may be salvagable and portable to Windows. - -package archive - -import ( - "bytes" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" -) - -func removeAllPaths(paths ...string) { - for _, path := range paths { - os.RemoveAll(path) - } -} - -func getTestTempDirs(t *testing.T) (tmpDirA, tmpDirB string) { - var err error - - if tmpDirA, err = ioutil.TempDir("", "archive-copy-test"); err != nil { - t.Fatal(err) - } - - if tmpDirB, err = ioutil.TempDir("", "archive-copy-test"); err != nil { - t.Fatal(err) - } - - return -} - -func isNotDir(err error) bool { - return strings.Contains(err.Error(), "not a directory") -} - -func joinTrailingSep(pathElements ...string) string { - joined := filepath.Join(pathElements...) - - return fmt.Sprintf("%s%c", joined, filepath.Separator) -} - -func fileContentsEqual(t *testing.T, filenameA, filenameB string) (err error) { - t.Logf("checking for equal file contents: %q and %q\n", filenameA, filenameB) - - fileA, err := os.Open(filenameA) - if err != nil { - return - } - defer fileA.Close() - - fileB, err := os.Open(filenameB) - if err != nil { - return - } - defer fileB.Close() - - hasher := sha256.New() - - if _, err = io.Copy(hasher, fileA); err != nil { - return - } - - hashA := hasher.Sum(nil) - hasher.Reset() - - if _, err = io.Copy(hasher, fileB); err != nil { - return - } - - hashB := hasher.Sum(nil) - - if !bytes.Equal(hashA, hashB) { - err = fmt.Errorf("file content hashes not equal - expected %s, got %s", hex.EncodeToString(hashA), hex.EncodeToString(hashB)) - } - - return -} - -func dirContentsEqual(t *testing.T, newDir, oldDir string) (err error) { - t.Logf("checking for equal directory contents: %q and %q\n", newDir, oldDir) - - var changes []Change - - if changes, err = ChangesDirs(newDir, oldDir); err != nil { - return - } - - if len(changes) != 0 { - err = fmt.Errorf("expected no changes between directories, but got: %v", changes) - } - - return -} - -func logDirContents(t *testing.T, dirPath string) { - logWalkedPaths := filepath.WalkFunc(func(path string, info os.FileInfo, err error) error { - if err != nil { - t.Errorf("stat error for path %q: %s", path, err) - return nil - } - - if info.IsDir() { - path = joinTrailingSep(path) - } - - t.Logf("\t%s", path) - - return nil - }) - - t.Logf("logging directory contents: %q", dirPath) - - if err := filepath.Walk(dirPath, logWalkedPaths); err != nil { - t.Fatal(err) - } -} - -func testCopyHelper(t *testing.T, srcPath, dstPath string) (err error) { - t.Logf("copying from %q to %q (not follow symbol link)", srcPath, dstPath) - - return CopyResource(srcPath, dstPath, false) -} - -func testCopyHelperFSym(t *testing.T, srcPath, dstPath string) (err error) { - t.Logf("copying from %q to %q (follow symbol link)", srcPath, dstPath) - - return CopyResource(srcPath, dstPath, true) -} - -// Basic assumptions about SRC and DST: -// 1. SRC must exist. -// 2. If SRC ends with a trailing separator, it must be a directory. -// 3. DST parent directory must exist. -// 4. If DST exists as a file, it must not end with a trailing separator. - -// First get these easy error cases out of the way. - -// Test for error when SRC does not exist. -func TestCopyErrSrcNotExists(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - if _, err := CopyInfoSourcePath(filepath.Join(tmpDirA, "file1"), false); !os.IsNotExist(err) { - t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) - } -} - -// Test for error when SRC ends in a trailing -// path separator but it exists as a file. -func TestCopyErrSrcNotDir(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - if _, err := CopyInfoSourcePath(joinTrailingSep(tmpDirA, "file1"), false); !isNotDir(err) { - t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) - } -} - -// Test for error when SRC is a valid file or directory, -// but the DST parent directory does not exist. -func TestCopyErrDstParentNotExists(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} - - // Try with a file source. - content, err := TarResource(srcInfo) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - // Copy to a file whose parent does not exist. - if err = CopyTo(content, srcInfo, filepath.Join(tmpDirB, "fakeParentDir", "file1")); err == nil { - t.Fatal("expected IsNotExist error, but got nil instead") - } - - if !os.IsNotExist(err) { - t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) - } - - // Try with a directory source. - srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} - - content, err = TarResource(srcInfo) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - // Copy to a directory whose parent does not exist. - if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "fakeParentDir", "fakeDstDir")); err == nil { - t.Fatal("expected IsNotExist error, but got nil instead") - } - - if !os.IsNotExist(err) { - t.Fatalf("expected IsNotExist error, but got %T: %s", err, err) - } -} - -// Test for error when DST ends in a trailing -// path separator but exists as a file. -func TestCopyErrDstNotDir(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - // Try with a file source. - srcInfo := CopyInfo{Path: filepath.Join(tmpDirA, "file1"), Exists: true, IsDir: false} - - content, err := TarResource(srcInfo) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { - t.Fatal("expected IsNotDir error, but got nil instead") - } - - if !isNotDir(err) { - t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) - } - - // Try with a directory source. - srcInfo = CopyInfo{Path: filepath.Join(tmpDirA, "dir1"), Exists: true, IsDir: true} - - content, err = TarResource(srcInfo) - if err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - defer content.Close() - - if err = CopyTo(content, srcInfo, joinTrailingSep(tmpDirB, "file1")); err == nil { - t.Fatal("expected IsNotDir error, but got nil instead") - } - - if !isNotDir(err) { - t.Fatalf("expected IsNotDir error, but got %T: %s", err, err) - } -} - -// Possibilities are reduced to the remaining 10 cases: -// -// case | srcIsDir | onlyDirContents | dstExists | dstIsDir | dstTrSep | action -// =================================================================================================== -// A | no | - | no | - | no | create file -// B | no | - | no | - | yes | error -// C | no | - | yes | no | - | overwrite file -// D | no | - | yes | yes | - | create file in dst dir -// E | yes | no | no | - | - | create dir, copy contents -// F | yes | no | yes | no | - | error -// G | yes | no | yes | yes | - | copy dir and contents -// H | yes | yes | no | - | - | create dir, copy contents -// I | yes | yes | yes | no | - | error -// J | yes | yes | yes | yes | - | copy dir contents -// - -// A. SRC specifies a file and DST (no trailing path separator) doesn't -// exist. This should create a file with the name DST and copy the -// contents of the source file into it. -func TestCopyCaseA(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcPath := filepath.Join(tmpDirA, "file1") - dstPath := filepath.Join(tmpDirB, "itWorks.txt") - - var err error - - if err = testCopyHelper(t, srcPath, dstPath); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } - os.Remove(dstPath) - - symlinkPath := filepath.Join(tmpDirA, "symlink3") - symlinkPath1 := filepath.Join(tmpDirA, "symlink4") - linkTarget := filepath.Join(tmpDirA, "file1") - - if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { - t.Fatal(err) - } - os.Remove(dstPath) - if err = testCopyHelperFSym(t, symlinkPath1, dstPath); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { - t.Fatal(err) - } -} - -// B. SRC specifies a file and DST (with trailing path separator) doesn't -// exist. This should cause an error because the copy operation cannot -// create a directory when copying a single file. -func TestCopyCaseB(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcPath := filepath.Join(tmpDirA, "file1") - dstDir := joinTrailingSep(tmpDirB, "testDir") - - var err error - - if err = testCopyHelper(t, srcPath, dstDir); err == nil { - t.Fatal("expected ErrDirNotExists error, but got nil instead") - } - - if err != ErrDirNotExists { - t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) - } - - symlinkPath := filepath.Join(tmpDirA, "symlink3") - - if err = testCopyHelperFSym(t, symlinkPath, dstDir); err == nil { - t.Fatal("expected ErrDirNotExists error, but got nil instead") - } - if err != ErrDirNotExists { - t.Fatalf("expected ErrDirNotExists error, but got %T: %s", err, err) - } - -} - -// C. SRC specifies a file and DST exists as a file. This should overwrite -// the file at DST with the contents of the source file. -func TestCopyCaseC(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcPath := filepath.Join(tmpDirA, "file1") - dstPath := filepath.Join(tmpDirB, "file2") - - var err error - - // Ensure they start out different. - if err = fileContentsEqual(t, srcPath, dstPath); err == nil { - t.Fatal("expected different file contents") - } - - if err = testCopyHelper(t, srcPath, dstPath); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } -} - -// C. Symbol link following version: -// SRC specifies a file and DST exists as a file. This should overwrite -// the file at DST with the contents of the source file. -func TestCopyCaseCFSym(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - symlinkPathBad := filepath.Join(tmpDirA, "symlink1") - symlinkPath := filepath.Join(tmpDirA, "symlink3") - linkTarget := filepath.Join(tmpDirA, "file1") - dstPath := filepath.Join(tmpDirB, "file2") - - var err error - - // first to test broken link - if err = testCopyHelperFSym(t, symlinkPathBad, dstPath); err == nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - // test symbol link -> symbol link -> target - // Ensure they start out different. - if err = fileContentsEqual(t, linkTarget, dstPath); err == nil { - t.Fatal("expected different file contents") - } - - if err = testCopyHelperFSym(t, symlinkPath, dstPath); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { - t.Fatal(err) - } -} - -// D. SRC specifies a file and DST exists as a directory. This should place -// a copy of the source file inside it using the basename from SRC. Ensure -// this works whether DST has a trailing path separator or not. -func TestCopyCaseD(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcPath := filepath.Join(tmpDirA, "file1") - dstDir := filepath.Join(tmpDirB, "dir1") - dstPath := filepath.Join(dstDir, "file1") - - var err error - - // Ensure that dstPath doesn't exist. - if _, err = os.Stat(dstPath); !os.IsNotExist(err) { - t.Fatalf("did not expect dstPath %q to exist", dstPath) - } - - if err = testCopyHelper(t, srcPath, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir1") - - if err = testCopyHelper(t, srcPath, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, srcPath, dstPath); err != nil { - t.Fatal(err) - } -} - -// D. Symbol link following version: -// SRC specifies a file and DST exists as a directory. This should place -// a copy of the source file inside it using the basename from SRC. Ensure -// this works whether DST has a trailing path separator or not. -func TestCopyCaseDFSym(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcPath := filepath.Join(tmpDirA, "symlink4") - linkTarget := filepath.Join(tmpDirA, "file1") - dstDir := filepath.Join(tmpDirB, "dir1") - dstPath := filepath.Join(dstDir, "symlink4") - - var err error - - // Ensure that dstPath doesn't exist. - if _, err = os.Stat(dstPath); !os.IsNotExist(err) { - t.Fatalf("did not expect dstPath %q to exist", dstPath) - } - - if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir1") - - if err = testCopyHelperFSym(t, srcPath, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = fileContentsEqual(t, linkTarget, dstPath); err != nil { - t.Fatal(err) - } -} - -// E. SRC specifies a directory and DST does not exist. This should create a -// directory at DST and copy the contents of the SRC directory into the DST -// directory. Ensure this works whether DST has a trailing path separator or -// not. -func TestCopyCaseE(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcDir := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "testDir") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "testDir") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Fatal(err) - } -} - -// E. Symbol link following version: -// SRC specifies a directory and DST does not exist. This should create a -// directory at DST and copy the contents of the SRC directory into the DST -// directory. Ensure this works whether DST has a trailing path separator or -// not. -func TestCopyCaseEFSym(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcDir := filepath.Join(tmpDirA, "dirSymlink") - linkTarget := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "testDir") - - var err error - - if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "testDir") - - if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { - t.Fatal(err) - } -} - -// F. SRC specifies a directory and DST exists as a file. This should cause an -// error as it is not possible to overwrite a file with a directory. -func TestCopyCaseF(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := filepath.Join(tmpDirA, "dir1") - symSrcDir := filepath.Join(tmpDirA, "dirSymlink") - dstFile := filepath.Join(tmpDirB, "file1") - - var err error - - if err = testCopyHelper(t, srcDir, dstFile); err == nil { - t.Fatal("expected ErrCannotCopyDir error, but got nil instead") - } - - if err != ErrCannotCopyDir { - t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) - } - - // now test with symbol link - if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { - t.Fatal("expected ErrCannotCopyDir error, but got nil instead") - } - - if err != ErrCannotCopyDir { - t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) - } -} - -// G. SRC specifies a directory and DST exists as a directory. This should copy -// the SRC directory and all its contents to the DST directory. Ensure this -// works whether DST has a trailing path separator or not. -func TestCopyCaseG(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "dir2") - resultDir := filepath.Join(dstDir, "dir1") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, resultDir, srcDir); err != nil { - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir2") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, resultDir, srcDir); err != nil { - t.Fatal(err) - } -} - -// G. Symbol link version: -// SRC specifies a directory and DST exists as a directory. This should copy -// the SRC directory and all its contents to the DST directory. Ensure this -// works whether DST has a trailing path separator or not. -func TestCopyCaseGFSym(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := filepath.Join(tmpDirA, "dirSymlink") - linkTarget := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "dir2") - resultDir := filepath.Join(dstDir, "dirSymlink") - - var err error - - if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir2") - - if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, resultDir, linkTarget); err != nil { - t.Fatal(err) - } -} - -// H. SRC specifies a directory's contents only and DST does not exist. This -// should create a directory at DST and copy the contents of the SRC -// directory (but not the directory itself) into the DST directory. Ensure -// this works whether DST has a trailing path separator or not. -func TestCopyCaseH(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcDir := joinTrailingSep(tmpDirA, "dir1") + "." - dstDir := filepath.Join(tmpDirB, "testDir") - - var err error - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "testDir") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } -} - -// H. Symbol link following version: -// SRC specifies a directory's contents only and DST does not exist. This -// should create a directory at DST and copy the contents of the SRC -// directory (but not the directory itself) into the DST directory. Ensure -// this works whether DST has a trailing path separator or not. -func TestCopyCaseHFSym(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A with some sample files and directories. - createSampleDir(t, tmpDirA) - - srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." - linkTarget := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "testDir") - - var err error - - if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "testDir") - - if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { - t.Log("dir contents not equal") - logDirContents(t, tmpDirA) - logDirContents(t, tmpDirB) - t.Fatal(err) - } -} - -// I. SRC specifies a directory's contents only and DST exists as a file. This -// should cause an error as it is not possible to overwrite a file with a -// directory. -func TestCopyCaseI(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := joinTrailingSep(tmpDirA, "dir1") + "." - symSrcDir := filepath.Join(tmpDirB, "dirSymlink") - dstFile := filepath.Join(tmpDirB, "file1") - - var err error - - if err = testCopyHelper(t, srcDir, dstFile); err == nil { - t.Fatal("expected ErrCannotCopyDir error, but got nil instead") - } - - if err != ErrCannotCopyDir { - t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) - } - - // now try with symbol link of dir - if err = testCopyHelperFSym(t, symSrcDir, dstFile); err == nil { - t.Fatal("expected ErrCannotCopyDir error, but got nil instead") - } - - if err != ErrCannotCopyDir { - t.Fatalf("expected ErrCannotCopyDir error, but got %T: %s", err, err) - } -} - -// J. SRC specifies a directory's contents only and DST exists as a directory. -// This should copy the contents of the SRC directory (but not the directory -// itself) into the DST directory. Ensure this works whether DST has a -// trailing path separator or not. -func TestCopyCaseJ(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := joinTrailingSep(tmpDirA, "dir1") + "." - dstDir := filepath.Join(tmpDirB, "dir5") - - var err error - - // first to create an empty dir - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir5") - - if err = testCopyHelper(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, srcDir); err != nil { - t.Fatal(err) - } -} - -// J. Symbol link following version: -// SRC specifies a directory's contents only and DST exists as a directory. -// This should copy the contents of the SRC directory (but not the directory -// itself) into the DST directory. Ensure this works whether DST has a -// trailing path separator or not. -func TestCopyCaseJFSym(t *testing.T) { - tmpDirA, tmpDirB := getTestTempDirs(t) - defer removeAllPaths(tmpDirA, tmpDirB) - - // Load A and B with some sample files and directories. - createSampleDir(t, tmpDirA) - createSampleDir(t, tmpDirB) - - srcDir := joinTrailingSep(tmpDirA, "dirSymlink") + "." - linkTarget := filepath.Join(tmpDirA, "dir1") - dstDir := filepath.Join(tmpDirB, "dir5") - - var err error - - // first to create an empty dir - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { - t.Fatal(err) - } - - // Now try again but using a trailing path separator for dstDir. - - if err = os.RemoveAll(dstDir); err != nil { - t.Fatalf("unable to remove dstDir: %s", err) - } - - if err = os.MkdirAll(dstDir, os.FileMode(0755)); err != nil { - t.Fatalf("unable to make dstDir: %s", err) - } - - dstDir = joinTrailingSep(tmpDirB, "dir5") - - if err = testCopyHelperFSym(t, srcDir, dstDir); err != nil { - t.Fatalf("unexpected error %T: %s", err, err) - } - - if err = dirContentsEqual(t, dstDir, linkTarget); err != nil { - t.Fatal(err) - } -} diff --git a/pkg/archive/copy_windows.go b/pkg/archive/copy_windows.go deleted file mode 100644 index 2b775b45c4..0000000000 --- a/pkg/archive/copy_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -package archive - -import ( - "path/filepath" -) - -func normalizePath(path string) string { - return filepath.FromSlash(path) -} diff --git a/pkg/archive/diff.go b/pkg/archive/diff.go deleted file mode 100644 index 1b08ad33ab..0000000000 --- a/pkg/archive/diff.go +++ /dev/null @@ -1,279 +0,0 @@ -package archive - -import ( - "archive/tar" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/idtools" - "github.com/docker/docker/pkg/pools" - "github.com/docker/docker/pkg/system" -) - -// UnpackLayer unpack `layer` to a `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func UnpackLayer(dest string, layer Reader, options *TarOptions) (size int64, err error) { - tr := tar.NewReader(layer) - trBuf := pools.BufioReader32KPool.Get(tr) - defer pools.BufioReader32KPool.Put(trBuf) - - var dirs []*tar.Header - unpackedPaths := make(map[string]struct{}) - - if options == nil { - options = &TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - remappedRootUID, remappedRootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return 0, err - } - - aufsTempdir := "" - aufsHardlinks := make(map[string]*tar.Header) - - if options == nil { - options = &TarOptions{} - } - // Iterate through the files in the archive. - for { - hdr, err := tr.Next() - if err == io.EOF { - // end of tar archive - break - } - if err != nil { - return 0, err - } - - size += hdr.Size - - // Normalize name, for safety and for a simple is-root check - hdr.Name = filepath.Clean(hdr.Name) - - // Windows does not support filenames with colons in them. Ignore - // these files. This is not a problem though (although it might - // appear that it is). Let's suppose a client is running docker pull. - // The daemon it points to is Windows. Would it make sense for the - // client to be doing a docker pull Ubuntu for example (which has files - // with colons in the name under /usr/share/man/man3)? No, absolutely - // not as it would really only make sense that they were pulling a - // Windows image. However, for development, it is necessary to be able - // to pull Linux images which are in the repository. - // - // TODO Windows. Once the registry is aware of what images are Windows- - // specific or Linux-specific, this warning should be changed to an error - // to cater for the situation where someone does manage to upload a Linux - // image but have it tagged as Windows inadvertently. - if runtime.GOOS == "windows" { - if strings.Contains(hdr.Name, ":") { - logrus.Warnf("Windows: Ignoring %s (is this a Linux image?)", hdr.Name) - continue - } - } - - // Note as these operations are platform specific, so must the slash be. - if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) { - // Not the root directory, ensure that the parent directory exists. - // This happened in some tests where an image had a tarfile without any - // parent directories. - parent := filepath.Dir(hdr.Name) - parentPath := filepath.Join(dest, parent) - - if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) { - err = system.MkdirAll(parentPath, 0600) - if err != nil { - return 0, err - } - } - } - - // Skip AUFS metadata dirs - if strings.HasPrefix(hdr.Name, WhiteoutMetaPrefix) { - // Regular files inside /.wh..wh.plnk can be used as hardlink targets - // We don't want this directory, but we need the files in them so that - // such hardlinks can be resolved. - if strings.HasPrefix(hdr.Name, WhiteoutLinkDir) && hdr.Typeflag == tar.TypeReg { - basename := filepath.Base(hdr.Name) - aufsHardlinks[basename] = hdr - if aufsTempdir == "" { - if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil { - return 0, err - } - defer os.RemoveAll(aufsTempdir) - } - if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true, nil); err != nil { - return 0, err - } - } - - if hdr.Name != WhiteoutOpaqueDir { - continue - } - } - path := filepath.Join(dest, hdr.Name) - rel, err := filepath.Rel(dest, path) - if err != nil { - return 0, err - } - - // Note as these operations are platform specific, so must the slash be. - if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) { - return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest)) - } - base := filepath.Base(path) - - if strings.HasPrefix(base, WhiteoutPrefix) { - dir := filepath.Dir(path) - if base == WhiteoutOpaqueDir { - _, err := os.Lstat(dir) - if err != nil { - return 0, err - } - err = filepath.Walk(dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - if os.IsNotExist(err) { - err = nil // parent was deleted - } - return err - } - if path == dir { - return nil - } - if _, exists := unpackedPaths[path]; !exists { - err := os.RemoveAll(path) - return err - } - return nil - }) - if err != nil { - return 0, err - } - } else { - originalBase := base[len(WhiteoutPrefix):] - originalPath := filepath.Join(dir, originalBase) - if err := os.RemoveAll(originalPath); err != nil { - return 0, err - } - } - } else { - // If path exits we almost always just want to remove and replace it. - // The only exception is when it is a directory *and* the file from - // the layer is also a directory. Then we want to merge them (i.e. - // just apply the metadata from the layer). - if fi, err := os.Lstat(path); err == nil { - if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) { - if err := os.RemoveAll(path); err != nil { - return 0, err - } - } - } - - trBuf.Reset(tr) - srcData := io.Reader(trBuf) - srcHdr := hdr - - // Hard links into /.wh..wh.plnk don't work, as we don't extract that directory, so - // we manually retarget these into the temporary files we extracted them into - if hdr.Typeflag == tar.TypeLink && strings.HasPrefix(filepath.Clean(hdr.Linkname), WhiteoutLinkDir) { - linkBasename := filepath.Base(hdr.Linkname) - srcHdr = aufsHardlinks[linkBasename] - if srcHdr == nil { - return 0, fmt.Errorf("Invalid aufs hardlink") - } - tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename)) - if err != nil { - return 0, err - } - defer tmpFile.Close() - srcData = tmpFile - } - - // if the options contain a uid & gid maps, convert header uid/gid - // entries using the maps such that lchown sets the proper mapped - // uid/gid after writing the file. We only perform this mapping if - // the file isn't already owned by the remapped root UID or GID, as - // that specific uid/gid has no mapping from container -> host, and - // those files already have the proper ownership for inside the - // container. - if srcHdr.Uid != remappedRootUID { - xUID, err := idtools.ToHost(srcHdr.Uid, options.UIDMaps) - if err != nil { - return 0, err - } - srcHdr.Uid = xUID - } - if srcHdr.Gid != remappedRootGID { - xGID, err := idtools.ToHost(srcHdr.Gid, options.GIDMaps) - if err != nil { - return 0, err - } - srcHdr.Gid = xGID - } - if err := createTarFile(path, dest, srcHdr, srcData, true, nil); err != nil { - return 0, err - } - - // Directory mtimes must be handled at the end to avoid further - // file creation in them to modify the directory mtime - if hdr.Typeflag == tar.TypeDir { - dirs = append(dirs, hdr) - } - unpackedPaths[path] = struct{}{} - } - } - - for _, hdr := range dirs { - path := filepath.Join(dest, hdr.Name) - if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil { - return 0, err - } - } - - return size, nil -} - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can be -// compressed or uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer Reader) (int64, error) { - return applyLayerHandler(dest, layer, &TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer Reader, options *TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} - -// do the bulk load of ApplyLayer, but allow for not calling DecompressStream -func applyLayerHandler(dest string, layer Reader, options *TarOptions, decompress bool) (int64, error) { - dest = filepath.Clean(dest) - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - if err != nil { - return 0, err - } - defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform - - if decompress { - layer, err = DecompressStream(layer) - if err != nil { - return 0, err - } - } - return UnpackLayer(dest, layer, options) -} diff --git a/pkg/archive/diff_test.go b/pkg/archive/diff_test.go deleted file mode 100644 index 8167941ac0..0000000000 --- a/pkg/archive/diff_test.go +++ /dev/null @@ -1,386 +0,0 @@ -package archive - -import ( - "archive/tar" - "io" - "io/ioutil" - "os" - "path/filepath" - "reflect" - "runtime" - "testing" - - "github.com/docker/docker/pkg/ioutils" -) - -func TestApplyLayerInvalidFilenames(t *testing.T) { - // TODO Windows: Figure out how to fix this test. - if runtime.GOOS == "windows" { - t.Skip("Passes but hits breakoutError: platform and architecture is not supported") - } - for i, headers := range [][]*tar.Header{ - { - { - Name: "../victim/dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { - { - // Note the leading slash - Name: "/../victim/slash-dotdot", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidFilenames", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestApplyLayerInvalidHardlink(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("TypeLink support on Windows") - } - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeLink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeLink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (hardlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try reading victim/hello (hardlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // Try removing victim directory (hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeLink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidHardlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestApplyLayerInvalidSymlink(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("TypeSymLink support on Windows") - } - for i, headers := range [][]*tar.Header{ - { // try reading victim/hello (../) - { - Name: "dotdot", - Typeflag: tar.TypeSymlink, - Linkname: "../victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (/../) - { - Name: "slash-dotdot", - Typeflag: tar.TypeSymlink, - // Note the leading slash - Linkname: "/../victim/hello", - Mode: 0644, - }, - }, - { // try writing victim/file - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim/file", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "symlink", - Typeflag: tar.TypeSymlink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try reading victim/hello (symlink, hardlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "hardlink", - Typeflag: tar.TypeLink, - Linkname: "loophole-victim/hello", - Mode: 0644, - }, - }, - { // try removing victim directory (symlink) - { - Name: "loophole-victim", - Typeflag: tar.TypeSymlink, - Linkname: "../victim", - Mode: 0755, - }, - { - Name: "loophole-victim", - Typeflag: tar.TypeReg, - Mode: 0644, - }, - }, - } { - if err := testBreakout("applylayer", "docker-TestApplyLayerInvalidSymlink", headers); err != nil { - t.Fatalf("i=%d. %v", i, err) - } - } -} - -func TestApplyLayerWhiteouts(t *testing.T) { - // TODO Windows: Figure out why this test fails - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - - wd, err := ioutil.TempDir("", "graphdriver-test-whiteouts") - if err != nil { - return - } - defer os.RemoveAll(wd) - - base := []string{ - ".baz", - "bar/", - "bar/bax", - "bar/bay/", - "baz", - "foo/", - "foo/.abc", - "foo/.bcd/", - "foo/.bcd/a", - "foo/cde/", - "foo/cde/def", - "foo/cde/efg", - "foo/fgh", - "foobar", - } - - type tcase struct { - change, expected []string - } - - tcases := []tcase{ - { - base, - base, - }, - { - []string{ - ".bay", - ".wh.baz", - "foo/", - "foo/.bce", - "foo/.wh..wh..opq", - "foo/cde/", - "foo/cde/efg", - }, - []string{ - ".bay", - ".baz", - "bar/", - "bar/bax", - "bar/bay/", - "foo/", - "foo/.bce", - "foo/cde/", - "foo/cde/efg", - "foobar", - }, - }, - { - []string{ - ".bay", - ".wh..baz", - ".wh.foobar", - "foo/", - "foo/.abc", - "foo/.wh.cde", - "bar/", - }, - []string{ - ".bay", - "bar/", - "bar/bax", - "bar/bay/", - "foo/", - "foo/.abc", - "foo/.bce", - }, - }, - { - []string{ - ".abc", - ".wh..wh..opq", - "foobar", - }, - []string{ - ".abc", - "foobar", - }, - }, - } - - for i, tc := range tcases { - l, err := makeTestLayer(tc.change) - if err != nil { - t.Fatal(err) - } - - _, err = UnpackLayer(wd, l, nil) - if err != nil { - t.Fatal(err) - } - err = l.Close() - if err != nil { - t.Fatal(err) - } - - paths, err := readDirContents(wd) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(tc.expected, paths) { - t.Fatalf("invalid files for layer %d: expected %q, got %q", i, tc.expected, paths) - } - } - -} - -func makeTestLayer(paths []string) (rc io.ReadCloser, err error) { - tmpDir, err := ioutil.TempDir("", "graphdriver-test-mklayer") - if err != nil { - return - } - defer func() { - if err != nil { - os.RemoveAll(tmpDir) - } - }() - for _, p := range paths { - if p[len(p)-1] == filepath.Separator { - if err = os.MkdirAll(filepath.Join(tmpDir, p), 0700); err != nil { - return - } - } else { - if err = ioutil.WriteFile(filepath.Join(tmpDir, p), nil, 0600); err != nil { - return - } - } - } - archive, err := Tar(tmpDir, Uncompressed) - if err != nil { - return - } - return ioutils.NewReadCloserWrapper(archive, func() error { - err := archive.Close() - os.RemoveAll(tmpDir) - return err - }), nil -} - -func readDirContents(root string) ([]string, error) { - var files []string - err := filepath.Walk(root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if path == root { - return nil - } - rel, err := filepath.Rel(root, path) - if err != nil { - return err - } - if info.IsDir() { - rel = rel + "/" - } - files = append(files, rel) - return nil - }) - if err != nil { - return nil, err - } - return files, nil -} diff --git a/pkg/archive/example_changes.go b/pkg/archive/example_changes.go deleted file mode 100644 index cedd46a408..0000000000 --- a/pkg/archive/example_changes.go +++ /dev/null @@ -1,97 +0,0 @@ -// +build ignore - -// Simple tool to create an archive stream from an old and new directory -// -// By default it will stream the comparison of two temporary directories with junk files -package main - -import ( - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "path" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/archive" -) - -var ( - flDebug = flag.Bool("D", false, "debugging output") - flNewDir = flag.String("newdir", "", "") - flOldDir = flag.String("olddir", "", "") - log = logrus.New() -) - -func main() { - flag.Usage = func() { - fmt.Println("Produce a tar from comparing two directory paths. By default a demo tar is created of around 200 files (including hardlinks)") - fmt.Printf("%s [OPTIONS]\n", os.Args[0]) - flag.PrintDefaults() - } - flag.Parse() - log.Out = os.Stderr - if (len(os.Getenv("DEBUG")) > 0) || *flDebug { - logrus.SetLevel(logrus.DebugLevel) - } - var newDir, oldDir string - - if len(*flNewDir) == 0 { - var err error - newDir, err = ioutil.TempDir("", "docker-test-newDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(newDir) - if _, err := prepareUntarSourceDirectory(100, newDir, true); err != nil { - log.Fatal(err) - } - } else { - newDir = *flNewDir - } - - if len(*flOldDir) == 0 { - oldDir, err := ioutil.TempDir("", "docker-test-oldDir") - if err != nil { - log.Fatal(err) - } - defer os.RemoveAll(oldDir) - } else { - oldDir = *flOldDir - } - - changes, err := archive.ChangesDirs(newDir, oldDir) - if err != nil { - log.Fatal(err) - } - - a, err := archive.ExportChanges(newDir, changes) - if err != nil { - log.Fatal(err) - } - defer a.Close() - - i, err := io.Copy(os.Stdout, a) - if err != nil && err != io.EOF { - log.Fatal(err) - } - fmt.Fprintf(os.Stderr, "wrote archive of %d bytes", i) -} - -func prepareUntarSourceDirectory(numberOfFiles int, targetPath string, makeLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(path.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeLinks { - if err := os.Link(path.Join(targetPath, fileName), path.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} diff --git a/pkg/archive/testdata/broken.tar b/pkg/archive/testdata/broken.tar deleted file mode 100644 index 8f10ea6b87d3eb4fed572349dfe87695603b10a5..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 13824 zcmeHN>rxv>7UtLfn5Q@^l8gXrG&7O_li)0oQBai)6v9rjo&-ixOPXbFo(r;aaqT1Q zi|o_vJM6y3ey8Um2^?(fm~vH66==Hq^tqqYr_U$~f~3CkaX-4=)VFkfMbAE0zj=1W zFdGeXOK)!KtrgwSO|!8=t&huAhCPiFI|54|O6#g{AByje_D5`gZ4lbN_tD%y+P?+6 zW}mCyJbT6dM$<6v?SB_8uxS5j5M6u>C%C=+&BoS!{NIK7SFYLLXgq9fL;u??&1{)C_QVb?f0pB4xfD_C1pX2f z=LE&>$4O)llEszRik&8tAi~^>9~IXb2tQsXkop&XF!hz8gWXO)O@R9>nS~7H1w&*U zWf1ryXPidjED|qMClc|F!YuB;N}eT-8}IBqwJ!w!F&$m$r;a;(N7!YIEb7h<=ej}& zT~f;Cd!ZOC&mX2n zv4)UvkOa{z8}jxVC6bTq+3^R;Sok8c6EQsN&k9^`&h(Hc32JVwt-Hrj<{`vG3V< zCk?#){6BW>!9@+(L2u}{Jos}CZh!u_HaA;$dH(--^ZzaF-*=tS5&i^O)@Me!3BwBQ`@=VE zIl)Fp0MG z@%2K`G+^8HA?T&;xGZB%_q<@Vt&(_!w-gfXxk@mb9|fb)1BuBGk_ptuvx%G~pq0Kb zb&?6Szj_3#ClOiI_3vu1e+mOX z9k`Og2B5RmN7LGZ)c;3%E%Ip__9KKUf&G&zD9jkJNr-{ibNby{ds> zUrSU_0z^Wf<)}gE{Jb22kgArW_I#nO79{eFvL6rZP*4oJ7H%7}fn5i&1ZT@5hDK4~ z(U`5S#`Fws86Z{2P=gP6usiI=mKaOr@4W|(?6Ye5$Oayf(LUxEb zaN*HO8gZBg{sZJ1)pg4>36^kmC*dQ2;oE@^#)cw_*aI^!cM=y1Rqga(?Ey`Mja44@ zco?Vs7`J_y5ir%m6vXp*y&Gb{4lfBvR0R>wjxNBA^zHAzdc;~eK6(s=AB|{$OM8p} zp9LwiIkAyG5Q$+F3`7h$CPJbL(j-h1h61!ZViYo4dBXOg@lop12w4VYz!&$vL+Po-n0lE6B8Y;6$Ar89(FQ zU43m0VVC)g+}A0GY(H3=vGXH;5|6sFnZk+NN-WF&+)64KnDBNmlR?P<{j247c6ZGs zY`hF!K4&Hi(0r~#=6sH0f#>;~|6uT_GuPArovwt~PT&t2-pNh;x9aMe7i;!lK!(<$ z?d`g5*7a@bJ?(y(Y4ln98)|Cinp8V=gdKs-N$TT&k8N344C6y&*H}a~{9Pg&%cB8( zs3gwCMEH-=;aI?u+)#>TQj}R!`jyO-QsK*KZS|lK9+9#7oV0B(la+@sRbyfJf~*mY z#+u;OA2B@66aq^nOW6`=t5qYdRV{oFkE8T+GhJI-*NldTtcr!I|PQf({z2i zZs;`}x~m6ks)bXh@+($$(s>pJ`5X6~16{UfoJC(mW1b(MtJcpN$ZBT3r1B`&Cx9{-iF=!{A}z(ob033DW~d!*9$cfm zVNC%z6l$8Qz0LiPv&`A!8a*yd3zi-in+*e-!2$MiQNyE>1xX!65{vsnGKkf9!|0+OGBAb= z5*&U!Rl91sZq^%6Di#9<<87G)rv;99!{p6oE&}gq)LXeeJT)kYlsjz{ehkbMY(O`q zGvc6vviAh-6>EFt+I|*)$Z&%o;(ob2LAmI= zd);1Ux&vAHF3sW+ZYtInM5`7V!gWe@@A3}gzBN4OzKHcFXhsnBZ62vkM}c;c8?C16|}T)I>F_`E4y<`7O_Uv z_IIGuK3}j6k8x0(NE^)|N^6ztuoF5wcqyCPP4-b>1H5)kQM(q_kYzo37tjs2w1@@5 z)pou5q*BNKlggS#-4TOxF*--bZwQgZIP>8>Wh4R6qJg1trGj7P+M9C-U$bgV0-Bbc zM}8SyaI1`5o3Hn=gK~dij~yq2v7>PXETRIqq!En36W>+P9az*N;)5;FK054lzkPPH zcY4hR*Orc{l5us$Y*nZ!(@__9wdDn6|B~BL+;v!B^Cr(N`)UtH54-56s#rGO&e@Q}~KNYPdQ94MZxA|gP9PSIqe@Ff$9bNNvws)xH zUYfZ#^MIJly?f4ly_CL`QQoB~o&>3jKAlL=*#tHX$;*%#;^sVnJHGU0={L0dh$?du z$V*u|2o=sbG6HQV;$?~-5Xh?Gjf~m#{@1wY+1@T!Us<#xZ;2Rn{Y@!B=|jZ;TY#GL zQet9G=4h_z5?#7$NWf6BJyZ3f$1aFp02S_lpyVtB;|niLX54VbZP`xU1YMSiGnf#! zBhWBJBLfCg3eCtIG~av^x3Yo4twnBx#0a&E>6G9&~+z{;Wn%CtG>DYD1(pjqYiYL oJsf9Rk?Q4-IWqA2mih3}{ZBUT=3UD@m3s}`Yv5i3pOOat4?XSI`2YX_ diff --git a/pkg/archive/time_linux.go b/pkg/archive/time_linux.go deleted file mode 100644 index 3448569b1e..0000000000 --- a/pkg/archive/time_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -package archive - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - if time.IsZero() { - // Return UTIME_OMIT special value - ts.Sec = 0 - ts.Nsec = ((1 << 30) - 2) - return - } - return syscall.NsecToTimespec(time.UnixNano()) -} diff --git a/pkg/archive/time_unsupported.go b/pkg/archive/time_unsupported.go deleted file mode 100644 index e85aac0540..0000000000 --- a/pkg/archive/time_unsupported.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !linux - -package archive - -import ( - "syscall" - "time" -) - -func timeToTimespec(time time.Time) (ts syscall.Timespec) { - nsec := int64(0) - if !time.IsZero() { - nsec = time.UnixNano() - } - return syscall.NsecToTimespec(nsec) -} diff --git a/pkg/archive/utils_test.go b/pkg/archive/utils_test.go deleted file mode 100644 index 98719032f3..0000000000 --- a/pkg/archive/utils_test.go +++ /dev/null @@ -1,166 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - "time" -) - -var testUntarFns = map[string]func(string, io.Reader) error{ - "untar": func(dest string, r io.Reader) error { - return Untar(r, dest, nil) - }, - "applylayer": func(dest string, r io.Reader) error { - _, err := ApplyLayer(dest, Reader(r)) - return err - }, -} - -// testBreakout is a helper function that, within the provided `tmpdir` directory, -// creates a `victim` folder with a generated `hello` file in it. -// `untar` extracts to a directory named `dest`, the tar file created from `headers`. -// -// Here are the tested scenarios: -// - removed `victim` folder (write) -// - removed files from `victim` folder (write) -// - new files in `victim` folder (write) -// - modified files in `victim` folder (write) -// - file in `dest` with same content as `victim/hello` (read) -// -// When using testBreakout make sure you cover one of the scenarios listed above. -func testBreakout(untarFn string, tmpdir string, headers []*tar.Header) error { - tmpdir, err := ioutil.TempDir("", tmpdir) - if err != nil { - return err - } - defer os.RemoveAll(tmpdir) - - dest := filepath.Join(tmpdir, "dest") - if err := os.Mkdir(dest, 0755); err != nil { - return err - } - - victim := filepath.Join(tmpdir, "victim") - if err := os.Mkdir(victim, 0755); err != nil { - return err - } - hello := filepath.Join(victim, "hello") - helloData, err := time.Now().MarshalText() - if err != nil { - return err - } - if err := ioutil.WriteFile(hello, helloData, 0644); err != nil { - return err - } - helloStat, err := os.Stat(hello) - if err != nil { - return err - } - - reader, writer := io.Pipe() - go func() { - t := tar.NewWriter(writer) - for _, hdr := range headers { - t.WriteHeader(hdr) - } - t.Close() - }() - - untar := testUntarFns[untarFn] - if untar == nil { - return fmt.Errorf("could not find untar function %q in testUntarFns", untarFn) - } - if err := untar(dest, reader); err != nil { - if _, ok := err.(breakoutError); !ok { - // If untar returns an error unrelated to an archive breakout, - // then consider this an unexpected error and abort. - return err - } - // Here, untar detected the breakout. - // Let's move on verifying that indeed there was no breakout. - fmt.Printf("breakoutError: %v\n", err) - } - - // Check victim folder - f, err := os.Open(victim) - if err != nil { - // codepath taken if victim folder was removed - return fmt.Errorf("archive breakout: error reading %q: %v", victim, err) - } - defer f.Close() - - // Check contents of victim folder - // - // We are only interested in getting 2 files from the victim folder, because if all is well - // we expect only one result, the `hello` file. If there is a second result, it cannot - // hold the same name `hello` and we assume that a new file got created in the victim folder. - // That is enough to detect an archive breakout. - names, err := f.Readdirnames(2) - if err != nil { - // codepath taken if victim is not a folder - return fmt.Errorf("archive breakout: error reading directory content of %q: %v", victim, err) - } - for _, name := range names { - if name != "hello" { - // codepath taken if new file was created in victim folder - return fmt.Errorf("archive breakout: new file %q", name) - } - } - - // Check victim/hello - f, err = os.Open(hello) - if err != nil { - // codepath taken if read permissions were removed - return fmt.Errorf("archive breakout: could not lstat %q: %v", hello, err) - } - defer f.Close() - b, err := ioutil.ReadAll(f) - if err != nil { - return err - } - fi, err := f.Stat() - if err != nil { - return err - } - if helloStat.IsDir() != fi.IsDir() || - // TODO: cannot check for fi.ModTime() change - helloStat.Mode() != fi.Mode() || - helloStat.Size() != fi.Size() || - !bytes.Equal(helloData, b) { - // codepath taken if hello has been modified - return fmt.Errorf("archive breakout: file %q has been modified. Contents: expected=%q, got=%q. FileInfo: expected=%#v, got=%#v", hello, helloData, b, helloStat, fi) - } - - // Check that nothing in dest/ has the same content as victim/hello. - // Since victim/hello was generated with time.Now(), it is safe to assume - // that any file whose content matches exactly victim/hello, managed somehow - // to access victim/hello. - return filepath.Walk(dest, func(path string, info os.FileInfo, err error) error { - if info.IsDir() { - if err != nil { - // skip directory if error - return filepath.SkipDir - } - // enter directory - return nil - } - if err != nil { - // skip file if error - return nil - } - b, err := ioutil.ReadFile(path) - if err != nil { - // Houston, we have a problem. Aborting (space)walk. - return err - } - if bytes.Equal(helloData, b) { - return fmt.Errorf("archive breakout: file %q has been accessed via %q", hello, path) - } - return nil - }) -} diff --git a/pkg/archive/whiteouts.go b/pkg/archive/whiteouts.go deleted file mode 100644 index d20478a10d..0000000000 --- a/pkg/archive/whiteouts.go +++ /dev/null @@ -1,23 +0,0 @@ -package archive - -// Whiteouts are files with a special meaning for the layered filesystem. -// Docker uses AUFS whiteout files inside exported archives. In other -// filesystems these files are generated/handled on tar creation/extraction. - -// WhiteoutPrefix prefix means file is a whiteout. If this is followed by a -// filename this means that file has been removed from the base layer. -const WhiteoutPrefix = ".wh." - -// WhiteoutMetaPrefix prefix means whiteout has a special meaning and is not -// for removing an actual file. Normally these files are excluded from exported -// archives. -const WhiteoutMetaPrefix = WhiteoutPrefix + WhiteoutPrefix - -// WhiteoutLinkDir is a directory AUFS uses for storing hardlink links to other -// layers. Normally these should not go into exported archives and all changed -// hardlinks should be copied to the top layer. -const WhiteoutLinkDir = WhiteoutMetaPrefix + "plnk" - -// WhiteoutOpaqueDir file means directory has been made opaque - meaning -// readdir calls to this directory do not follow to lower layers. -const WhiteoutOpaqueDir = WhiteoutMetaPrefix + ".opq" diff --git a/pkg/archive/wrap.go b/pkg/archive/wrap.go deleted file mode 100644 index dfb335c0b6..0000000000 --- a/pkg/archive/wrap.go +++ /dev/null @@ -1,59 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "io/ioutil" -) - -// Generate generates a new archive from the content provided -// as input. -// -// `files` is a sequence of path/content pairs. A new file is -// added to the archive for each pair. -// If the last pair is incomplete, the file is created with an -// empty content. For example: -// -// Generate("foo.txt", "hello world", "emptyfile") -// -// The above call will return an archive with 2 files: -// * ./foo.txt with content "hello world" -// * ./empty with empty content -// -// FIXME: stream content instead of buffering -// FIXME: specify permissions and other archive metadata -func Generate(input ...string) (Archive, error) { - files := parseStringPairs(input...) - buf := new(bytes.Buffer) - tw := tar.NewWriter(buf) - for _, file := range files { - name, content := file[0], file[1] - hdr := &tar.Header{ - Name: name, - Size: int64(len(content)), - } - if err := tw.WriteHeader(hdr); err != nil { - return nil, err - } - if _, err := tw.Write([]byte(content)); err != nil { - return nil, err - } - } - if err := tw.Close(); err != nil { - return nil, err - } - return ioutil.NopCloser(buf), nil -} - -func parseStringPairs(input ...string) (output [][2]string) { - output = make([][2]string, 0, len(input)/2+1) - for i := 0; i < len(input); i += 2 { - var pair [2]string - pair[0] = input[i] - if i+1 < len(input) { - pair[1] = input[i+1] - } - output = append(output, pair) - } - return -} diff --git a/pkg/archive/wrap_test.go b/pkg/archive/wrap_test.go deleted file mode 100644 index 46ab36697a..0000000000 --- a/pkg/archive/wrap_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package archive - -import ( - "archive/tar" - "bytes" - "io" - "testing" -) - -func TestGenerateEmptyFile(t *testing.T) { - archive, err := Generate("emptyFile") - if err != nil { - t.Fatal(err) - } - if archive == nil { - t.Fatal("The generated archive should not be nil.") - } - - expectedFiles := [][]string{ - {"emptyFile", ""}, - } - - tr := tar.NewReader(archive) - actualFiles := make([][]string, 0, 10) - i := 0 - for { - hdr, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } - buf := new(bytes.Buffer) - buf.ReadFrom(tr) - content := buf.String() - actualFiles = append(actualFiles, []string{hdr.Name, content}) - i++ - } - if len(actualFiles) != len(expectedFiles) { - t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) - } - for i := 0; i < len(expectedFiles); i++ { - actual := actualFiles[i] - expected := expectedFiles[i] - if actual[0] != expected[0] { - t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) - } - if actual[1] != expected[1] { - t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) - } - } -} - -func TestGenerateWithContent(t *testing.T) { - archive, err := Generate("file", "content") - if err != nil { - t.Fatal(err) - } - if archive == nil { - t.Fatal("The generated archive should not be nil.") - } - - expectedFiles := [][]string{ - {"file", "content"}, - } - - tr := tar.NewReader(archive) - actualFiles := make([][]string, 0, 10) - i := 0 - for { - hdr, err := tr.Next() - if err == io.EOF { - break - } - if err != nil { - t.Fatal(err) - } - buf := new(bytes.Buffer) - buf.ReadFrom(tr) - content := buf.String() - actualFiles = append(actualFiles, []string{hdr.Name, content}) - i++ - } - if len(actualFiles) != len(expectedFiles) { - t.Fatalf("Number of expected file %d, got %d.", len(expectedFiles), len(actualFiles)) - } - for i := 0; i < len(expectedFiles); i++ { - actual := actualFiles[i] - expected := expectedFiles[i] - if actual[0] != expected[0] { - t.Fatalf("Expected name '%s', Actual name '%s'", expected[0], actual[0]) - } - if actual[1] != expected[1] { - t.Fatalf("Expected content '%s', Actual content '%s'", expected[1], actual[1]) - } - } -} diff --git a/pkg/authorization/api.go b/pkg/authorization/api.go deleted file mode 100644 index fc82c46b01..0000000000 --- a/pkg/authorization/api.go +++ /dev/null @@ -1,54 +0,0 @@ -package authorization - -const ( - // AuthZApiRequest is the url for daemon request authorization - AuthZApiRequest = "AuthZPlugin.AuthZReq" - - // AuthZApiResponse is the url for daemon response authorization - AuthZApiResponse = "AuthZPlugin.AuthZRes" - - // AuthZApiImplements is the name of the interface all AuthZ plugins implement - AuthZApiImplements = "authz" -) - -// Request holds data required for authZ plugins -type Request struct { - // User holds the user extracted by AuthN mechanism - User string `json:"User,omitempty"` - - // UserAuthNMethod holds the mechanism used to extract user details (e.g., krb) - UserAuthNMethod string `json:"UserAuthNMethod,omitempty"` - - // RequestMethod holds the HTTP method (GET/POST/PUT) - RequestMethod string `json:"RequestMethod,omitempty"` - - // RequestUri holds the full HTTP uri (e.g., /v1.21/version) - RequestURI string `json:"RequestUri,omitempty"` - - // RequestBody stores the raw request body sent to the docker daemon - RequestBody []byte `json:"RequestBody,omitempty"` - - // RequestHeaders stores the raw request headers sent to the docker daemon - RequestHeaders map[string]string `json:"RequestHeaders,omitempty"` - - // ResponseStatusCode stores the status code returned from docker daemon - ResponseStatusCode int `json:"ResponseStatusCode,omitempty"` - - // ResponseBody stores the raw response body sent from docker daemon - ResponseBody []byte `json:"ResponseBody,omitempty"` - - // ResponseHeaders stores the response headers sent to the docker daemon - ResponseHeaders map[string]string `json:"ResponseHeaders,omitempty"` -} - -// Response represents authZ plugin response -type Response struct { - // Allow indicating whether the user is allowed or not - Allow bool `json:"Allow"` - - // Msg stores the authorization message - Msg string `json:"Msg,omitempty"` - - // Err stores a message in case there's an error - Err string `json:"Err,omitempty"` -} diff --git a/pkg/authorization/authz.go b/pkg/authorization/authz.go deleted file mode 100644 index 1f960289ad..0000000000 --- a/pkg/authorization/authz.go +++ /dev/null @@ -1,179 +0,0 @@ -package authorization - -import ( - "bufio" - "bytes" - "fmt" - "io" - "net/http" - "strings" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/ioutils" -) - -const maxBodySize = 1048576 // 1MB - -// NewCtx creates new authZ context, it is used to store authorization information related to a specific docker -// REST http session -// A context provides two method: -// Authenticate Request: -// Call authZ plugins with current REST request and AuthN response -// Request contains full HTTP packet sent to the docker daemon -// https://docs.docker.com/reference/api/docker_remote_api/ -// -// Authenticate Response: -// Call authZ plugins with full info about current REST request, REST response and AuthN response -// The response from this method may contains content that overrides the daemon response -// This allows authZ plugins to filter privileged content -// -// If multiple authZ plugins are specified, the block/allow decision is based on ANDing all plugin results -// For response manipulation, the response from each plugin is piped between plugins. Plugin execution order -// is determined according to daemon parameters -func NewCtx(authZPlugins []Plugin, user, userAuthNMethod, requestMethod, requestURI string) *Ctx { - return &Ctx{ - plugins: authZPlugins, - user: user, - userAuthNMethod: userAuthNMethod, - requestMethod: requestMethod, - requestURI: requestURI, - } -} - -// Ctx stores a single request-response interaction context -type Ctx struct { - user string - userAuthNMethod string - requestMethod string - requestURI string - plugins []Plugin - // authReq stores the cached request object for the current transaction - authReq *Request -} - -// AuthZRequest authorized the request to the docker daemon using authZ plugins -func (ctx *Ctx) AuthZRequest(w http.ResponseWriter, r *http.Request) error { - var body []byte - if sendBody(ctx.requestURI, r.Header) && r.ContentLength > 0 && r.ContentLength < maxBodySize { - var err error - body, r.Body, err = drainBody(r.Body) - if err != nil { - return err - } - } - - var h bytes.Buffer - if err := r.Header.Write(&h); err != nil { - return err - } - - ctx.authReq = &Request{ - User: ctx.user, - UserAuthNMethod: ctx.userAuthNMethod, - RequestMethod: ctx.requestMethod, - RequestURI: ctx.requestURI, - RequestBody: body, - RequestHeaders: headers(r.Header), - } - - for _, plugin := range ctx.plugins { - logrus.Debugf("AuthZ request using plugin %s", plugin.Name()) - - authRes, err := plugin.AuthZRequest(ctx.authReq) - if err != nil { - return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) - } - - if !authRes.Allow { - return newAuthorizationError(plugin.Name(), authRes.Msg) - } - } - - return nil -} - -// AuthZResponse authorized and manipulates the response from docker daemon using authZ plugins -func (ctx *Ctx) AuthZResponse(rm ResponseModifier, r *http.Request) error { - ctx.authReq.ResponseStatusCode = rm.StatusCode() - ctx.authReq.ResponseHeaders = headers(rm.Header()) - - if sendBody(ctx.requestURI, rm.Header()) { - ctx.authReq.ResponseBody = rm.RawBody() - } - - for _, plugin := range ctx.plugins { - logrus.Debugf("AuthZ response using plugin %s", plugin.Name()) - - authRes, err := plugin.AuthZResponse(ctx.authReq) - if err != nil { - return fmt.Errorf("plugin %s failed with error: %s", plugin.Name(), err) - } - - if !authRes.Allow { - return newAuthorizationError(plugin.Name(), authRes.Msg) - } - } - - rm.FlushAll() - - return nil -} - -// drainBody dump the body (if its length is less than 1MB) without modifying the request state -func drainBody(body io.ReadCloser) ([]byte, io.ReadCloser, error) { - bufReader := bufio.NewReaderSize(body, maxBodySize) - newBody := ioutils.NewReadCloserWrapper(bufReader, func() error { return body.Close() }) - - data, err := bufReader.Peek(maxBodySize) - // Body size exceeds max body size - if err == nil { - logrus.Warnf("Request body is larger than: '%d' skipping body", maxBodySize) - return nil, newBody, nil - } - // Body size is less than maximum size - if err == io.EOF { - return data, newBody, nil - } - // Unknown error - return nil, newBody, err -} - -// sendBody returns true when request/response body should be sent to AuthZPlugin -func sendBody(url string, header http.Header) bool { - // Skip body for auth endpoint - if strings.HasSuffix(url, "/auth") { - return false - } - - // body is sent only for text or json messages - return header.Get("Content-Type") == "application/json" -} - -// headers returns flatten version of the http headers excluding authorization -func headers(header http.Header) map[string]string { - v := make(map[string]string, 0) - for k, values := range header { - // Skip authorization headers - if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "X-Registry-Config") || strings.EqualFold(k, "X-Registry-Auth") { - continue - } - for _, val := range values { - v[k] = val - } - } - return v -} - -// authorizationError represents an authorization deny error -type authorizationError struct { - error -} - -// HTTPErrorStatusCode returns the authorization error status code (forbidden) -func (e authorizationError) HTTPErrorStatusCode() int { - return http.StatusForbidden -} - -func newAuthorizationError(plugin, msg string) authorizationError { - return authorizationError{error: fmt.Errorf("authorization denied by plugin %s: %s", plugin, msg)} -} diff --git a/pkg/authorization/authz_unix_test.go b/pkg/authorization/authz_unix_test.go deleted file mode 100644 index e13303f7a5..0000000000 --- a/pkg/authorization/authz_unix_test.go +++ /dev/null @@ -1,282 +0,0 @@ -// +build !windows - -// TODO Windows: This uses a Unix socket for testing. This might be possible -// to port to Windows using a named pipe instead. - -package authorization - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net" - "net/http" - "net/http/httptest" - "os" - "path" - "reflect" - "strings" - "testing" - - "github.com/docker/docker/pkg/plugins" - "github.com/docker/go-connections/tlsconfig" - "github.com/gorilla/mux" -) - -const ( - pluginAddress = "authz-test-plugin.sock" -) - -func TestAuthZRequestPluginError(t *testing.T) { - server := authZPluginTestServer{t: t} - server.start() - defer server.stop() - - authZPlugin := createTestPlugin(t) - - request := Request{ - User: "user", - RequestBody: []byte("sample body"), - RequestURI: "www.authz.com/auth", - RequestMethod: "GET", - RequestHeaders: map[string]string{"header": "value"}, - } - server.replayResponse = Response{ - Err: "an error", - } - - actualResponse, err := authZPlugin.AuthZRequest(&request) - if err != nil { - t.Fatalf("Failed to authorize request %v", err) - } - - if !reflect.DeepEqual(server.replayResponse, *actualResponse) { - t.Fatal("Response must be equal") - } - if !reflect.DeepEqual(request, server.recordedRequest) { - t.Fatal("Requests must be equal") - } -} - -func TestAuthZRequestPlugin(t *testing.T) { - server := authZPluginTestServer{t: t} - server.start() - defer server.stop() - - authZPlugin := createTestPlugin(t) - - request := Request{ - User: "user", - RequestBody: []byte("sample body"), - RequestURI: "www.authz.com/auth", - RequestMethod: "GET", - RequestHeaders: map[string]string{"header": "value"}, - } - server.replayResponse = Response{ - Allow: true, - Msg: "Sample message", - } - - actualResponse, err := authZPlugin.AuthZRequest(&request) - if err != nil { - t.Fatalf("Failed to authorize request %v", err) - } - - if !reflect.DeepEqual(server.replayResponse, *actualResponse) { - t.Fatal("Response must be equal") - } - if !reflect.DeepEqual(request, server.recordedRequest) { - t.Fatal("Requests must be equal") - } -} - -func TestAuthZResponsePlugin(t *testing.T) { - server := authZPluginTestServer{t: t} - server.start() - defer server.stop() - - authZPlugin := createTestPlugin(t) - - request := Request{ - User: "user", - RequestURI: "someting.com/auth", - RequestBody: []byte("sample body"), - } - server.replayResponse = Response{ - Allow: true, - Msg: "Sample message", - } - - actualResponse, err := authZPlugin.AuthZResponse(&request) - if err != nil { - t.Fatalf("Failed to authorize request %v", err) - } - - if !reflect.DeepEqual(server.replayResponse, *actualResponse) { - t.Fatal("Response must be equal") - } - if !reflect.DeepEqual(request, server.recordedRequest) { - t.Fatal("Requests must be equal") - } -} - -func TestResponseModifier(t *testing.T) { - r := httptest.NewRecorder() - m := NewResponseModifier(r) - m.Header().Set("h1", "v1") - m.Write([]byte("body")) - m.WriteHeader(500) - - m.FlushAll() - if r.Header().Get("h1") != "v1" { - t.Fatalf("Header value must exists %s", r.Header().Get("h1")) - } - if !reflect.DeepEqual(r.Body.Bytes(), []byte("body")) { - t.Fatalf("Body value must exists %s", r.Body.Bytes()) - } - if r.Code != 500 { - t.Fatalf("Status code must be correct %d", r.Code) - } -} - -func TestDrainBody(t *testing.T) { - tests := []struct { - length int // length is the message length send to drainBody - expectedBodyLength int // expectedBodyLength is the expected body length after drainBody is called - }{ - {10, 10}, // Small message size - {maxBodySize - 1, maxBodySize - 1}, // Max message size - {maxBodySize * 2, 0}, // Large message size (skip copying body) - - } - - for _, test := range tests { - msg := strings.Repeat("a", test.length) - body, closer, err := drainBody(ioutil.NopCloser(bytes.NewReader([]byte(msg)))) - if err != nil { - t.Fatal(err) - } - if len(body) != test.expectedBodyLength { - t.Fatalf("Body must be copied, actual length: '%d'", len(body)) - } - if closer == nil { - t.Fatal("Closer must not be nil") - } - modified, err := ioutil.ReadAll(closer) - if err != nil { - t.Fatalf("Error must not be nil: '%v'", err) - } - if len(modified) != len(msg) { - t.Fatalf("Result should not be truncated. Original length: '%d', new length: '%d'", len(msg), len(modified)) - } - } -} - -func TestResponseModifierOverride(t *testing.T) { - r := httptest.NewRecorder() - m := NewResponseModifier(r) - m.Header().Set("h1", "v1") - m.Write([]byte("body")) - m.WriteHeader(500) - - overrideHeader := make(http.Header) - overrideHeader.Add("h1", "v2") - overrideHeaderBytes, err := json.Marshal(overrideHeader) - if err != nil { - t.Fatalf("override header failed %v", err) - } - - m.OverrideHeader(overrideHeaderBytes) - m.OverrideBody([]byte("override body")) - m.OverrideStatusCode(404) - m.FlushAll() - if r.Header().Get("h1") != "v2" { - t.Fatalf("Header value must exists %s", r.Header().Get("h1")) - } - if !reflect.DeepEqual(r.Body.Bytes(), []byte("override body")) { - t.Fatalf("Body value must exists %s", r.Body.Bytes()) - } - if r.Code != 404 { - t.Fatalf("Status code must be correct %d", r.Code) - } -} - -// createTestPlugin creates a new sample authorization plugin -func createTestPlugin(t *testing.T) *authorizationPlugin { - pwd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - - client, err := plugins.NewClient("unix:///"+path.Join(pwd, pluginAddress), &tlsconfig.Options{InsecureSkipVerify: true}) - if err != nil { - t.Fatalf("Failed to create client %v", err) - } - - return &authorizationPlugin{name: "plugin", plugin: client} -} - -// AuthZPluginTestServer is a simple server that implements the authZ plugin interface -type authZPluginTestServer struct { - listener net.Listener - t *testing.T - // request stores the request sent from the daemon to the plugin - recordedRequest Request - // response stores the response sent from the plugin to the daemon - replayResponse Response - server *httptest.Server -} - -// start starts the test server that implements the plugin -func (t *authZPluginTestServer) start() { - r := mux.NewRouter() - l, err := net.Listen("unix", pluginAddress) - if err != nil { - t.t.Fatal(err) - } - t.listener = l - r.HandleFunc("/Plugin.Activate", t.activate) - r.HandleFunc("/"+AuthZApiRequest, t.auth) - r.HandleFunc("/"+AuthZApiResponse, t.auth) - t.server = &httptest.Server{ - Listener: l, - Config: &http.Server{ - Handler: r, - Addr: pluginAddress, - }, - } - t.server.Start() -} - -// stop stops the test server that implements the plugin -func (t *authZPluginTestServer) stop() { - t.server.Close() - os.Remove(pluginAddress) - if t.listener != nil { - t.listener.Close() - } -} - -// auth is a used to record/replay the authentication api messages -func (t *authZPluginTestServer) auth(w http.ResponseWriter, r *http.Request) { - t.recordedRequest = Request{} - body, err := ioutil.ReadAll(r.Body) - if err != nil { - t.t.Fatal(err) - } - r.Body.Close() - json.Unmarshal(body, &t.recordedRequest) - b, err := json.Marshal(t.replayResponse) - if err != nil { - t.t.Fatal(err) - } - w.Write(b) -} - -func (t *authZPluginTestServer) activate(w http.ResponseWriter, r *http.Request) { - b, err := json.Marshal(plugins.Manifest{Implements: []string{AuthZApiImplements}}) - if err != nil { - t.t.Fatal(err) - } - w.Write(b) -} diff --git a/pkg/authorization/middleware.go b/pkg/authorization/middleware.go deleted file mode 100644 index 73511a8148..0000000000 --- a/pkg/authorization/middleware.go +++ /dev/null @@ -1,60 +0,0 @@ -package authorization - -import ( - "net/http" - - "github.com/Sirupsen/logrus" - "golang.org/x/net/context" -) - -// Middleware uses a list of plugins to -// handle authorization in the API requests. -type Middleware struct { - plugins []Plugin -} - -// NewMiddleware creates a new Middleware -// with a slice of plugins. -func NewMiddleware(p []Plugin) Middleware { - return Middleware{ - plugins: p, - } -} - -// WrapHandler returns a new handler function wrapping the previous one in the request chain. -func (m Middleware) WrapHandler(handler func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error) func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - return func(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error { - - user := "" - userAuthNMethod := "" - - // Default authorization using existing TLS connection credentials - // FIXME: Non trivial authorization mechanisms (such as advanced certificate validations, kerberos support - // and ldap) will be extracted using AuthN feature, which is tracked under: - // https://github.com/docker/docker/pull/20883 - if r.TLS != nil && len(r.TLS.PeerCertificates) > 0 { - user = r.TLS.PeerCertificates[0].Subject.CommonName - userAuthNMethod = "TLS" - } - - authCtx := NewCtx(m.plugins, user, userAuthNMethod, r.Method, r.RequestURI) - - if err := authCtx.AuthZRequest(w, r); err != nil { - logrus.Errorf("AuthZRequest for %s %s returned error: %s", r.Method, r.RequestURI, err) - return err - } - - rw := NewResponseModifier(w) - - if err := handler(ctx, rw, r, vars); err != nil { - logrus.Errorf("Handler for %s %s returned error: %s", r.Method, r.RequestURI, err) - return err - } - - if err := authCtx.AuthZResponse(rw, r); err != nil { - logrus.Errorf("AuthZResponse for %s %s returned error: %s", r.Method, r.RequestURI, err) - return err - } - return nil - } -} diff --git a/pkg/authorization/plugin.go b/pkg/authorization/plugin.go deleted file mode 100644 index fc5c7efb4b..0000000000 --- a/pkg/authorization/plugin.go +++ /dev/null @@ -1,92 +0,0 @@ -package authorization - -import ( - "sync" - - "github.com/docker/docker/pkg/plugins" -) - -// Plugin allows third party plugins to authorize requests and responses -// in the context of docker API -type Plugin interface { - // Name returns the registered plugin name - Name() string - - // AuthZRequest authorizes the request from the client to the daemon - AuthZRequest(*Request) (*Response, error) - - // AuthZResponse authorizes the response from the daemon to the client - AuthZResponse(*Request) (*Response, error) -} - -// NewPlugins constructs and initializes the authorization plugins based on plugin names -func NewPlugins(names []string) []Plugin { - plugins := []Plugin{} - pluginsMap := make(map[string]struct{}) - for _, name := range names { - if _, ok := pluginsMap[name]; ok { - continue - } - pluginsMap[name] = struct{}{} - plugins = append(plugins, newAuthorizationPlugin(name)) - } - return plugins -} - -// authorizationPlugin is an internal adapter to docker plugin system -type authorizationPlugin struct { - plugin *plugins.Client - name string - once sync.Once -} - -func newAuthorizationPlugin(name string) Plugin { - return &authorizationPlugin{name: name} -} - -func (a *authorizationPlugin) Name() string { - return a.name -} - -func (a *authorizationPlugin) AuthZRequest(authReq *Request) (*Response, error) { - if err := a.initPlugin(); err != nil { - return nil, err - } - - authRes := &Response{} - if err := a.plugin.Call(AuthZApiRequest, authReq, authRes); err != nil { - return nil, err - } - - return authRes, nil -} - -func (a *authorizationPlugin) AuthZResponse(authReq *Request) (*Response, error) { - if err := a.initPlugin(); err != nil { - return nil, err - } - - authRes := &Response{} - if err := a.plugin.Call(AuthZApiResponse, authReq, authRes); err != nil { - return nil, err - } - - return authRes, nil -} - -// initPlugin initializes the authorization plugin if needed -func (a *authorizationPlugin) initPlugin() error { - // Lazy loading of plugins - var err error - a.once.Do(func() { - if a.plugin == nil { - plugin, e := plugins.Get(a.name, AuthZApiImplements) - if e != nil { - err = e - return - } - a.plugin = plugin.Client() - } - }) - return err -} diff --git a/pkg/authorization/response.go b/pkg/authorization/response.go deleted file mode 100644 index f7ce7364ce..0000000000 --- a/pkg/authorization/response.go +++ /dev/null @@ -1,203 +0,0 @@ -package authorization - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "net" - "net/http" - - "github.com/Sirupsen/logrus" -) - -// ResponseModifier allows authorization plugins to read and modify the content of the http.response -type ResponseModifier interface { - http.ResponseWriter - http.Flusher - http.CloseNotifier - - // RawBody returns the current http content - RawBody() []byte - - // RawHeaders returns the current content of the http headers - RawHeaders() ([]byte, error) - - // StatusCode returns the current status code - StatusCode() int - - // OverrideBody replaces the body of the HTTP reply - OverrideBody(b []byte) - - // OverrideHeader replaces the headers of the HTTP reply - OverrideHeader(b []byte) error - - // OverrideStatusCode replaces the status code of the HTTP reply - OverrideStatusCode(statusCode int) - - // Flush flushes all data to the HTTP response - FlushAll() error - - // Hijacked indicates the response has been hijacked by the Docker daemon - Hijacked() bool -} - -// NewResponseModifier creates a wrapper to an http.ResponseWriter to allow inspecting and modifying the content -func NewResponseModifier(rw http.ResponseWriter) ResponseModifier { - return &responseModifier{rw: rw, header: make(http.Header)} -} - -// responseModifier is used as an adapter to http.ResponseWriter in order to manipulate and explore -// the http request/response from docker daemon -type responseModifier struct { - // The original response writer - rw http.ResponseWriter - // body holds the response body - body []byte - // header holds the response header - header http.Header - // statusCode holds the response status code - statusCode int - // hijacked indicates the request has been hijacked - hijacked bool -} - -func (rm *responseModifier) Hijacked() bool { - return rm.hijacked -} - -// WriterHeader stores the http status code -func (rm *responseModifier) WriteHeader(s int) { - - // Use original request if hijacked - if rm.hijacked { - rm.rw.WriteHeader(s) - return - } - - rm.statusCode = s -} - -// Header returns the internal http header -func (rm *responseModifier) Header() http.Header { - - // Use original header if hijacked - if rm.hijacked { - return rm.rw.Header() - } - - return rm.header -} - -// StatusCode returns the http status code -func (rm *responseModifier) StatusCode() int { - return rm.statusCode -} - -// OverrideBody replaces the body of the HTTP response -func (rm *responseModifier) OverrideBody(b []byte) { - rm.body = b -} - -// OverrideStatusCode replaces the status code of the HTTP response -func (rm *responseModifier) OverrideStatusCode(statusCode int) { - rm.statusCode = statusCode -} - -// OverrideHeader replaces the headers of the HTTP response -func (rm *responseModifier) OverrideHeader(b []byte) error { - header := http.Header{} - if err := json.Unmarshal(b, &header); err != nil { - return err - } - rm.header = header - return nil -} - -// Write stores the byte array inside content -func (rm *responseModifier) Write(b []byte) (int, error) { - - if rm.hijacked { - return rm.rw.Write(b) - } - - rm.body = append(rm.body, b...) - return len(b), nil -} - -// Body returns the response body -func (rm *responseModifier) RawBody() []byte { - return rm.body -} - -func (rm *responseModifier) RawHeaders() ([]byte, error) { - var b bytes.Buffer - if err := rm.header.Write(&b); err != nil { - return nil, err - } - return b.Bytes(), nil -} - -// Hijack returns the internal connection of the wrapped http.ResponseWriter -func (rm *responseModifier) Hijack() (net.Conn, *bufio.ReadWriter, error) { - - rm.hijacked = true - rm.FlushAll() - - hijacker, ok := rm.rw.(http.Hijacker) - if !ok { - return nil, nil, fmt.Errorf("Internal response writer doesn't support the Hijacker interface") - } - return hijacker.Hijack() -} - -// CloseNotify uses the internal close notify API of the wrapped http.ResponseWriter -func (rm *responseModifier) CloseNotify() <-chan bool { - closeNotifier, ok := rm.rw.(http.CloseNotifier) - if !ok { - logrus.Error("Internal response writer doesn't support the CloseNotifier interface") - return nil - } - return closeNotifier.CloseNotify() -} - -// Flush uses the internal flush API of the wrapped http.ResponseWriter -func (rm *responseModifier) Flush() { - flusher, ok := rm.rw.(http.Flusher) - if !ok { - logrus.Error("Internal response writer doesn't support the Flusher interface") - return - } - - rm.FlushAll() - flusher.Flush() -} - -// FlushAll flushes all data to the HTTP response -func (rm *responseModifier) FlushAll() error { - // Copy the header - for k, vv := range rm.header { - for _, v := range vv { - rm.rw.Header().Add(k, v) - } - } - - // Copy the status code - // Also WriteHeader needs to be done after all the headers - // have been copied (above). - if rm.statusCode > 0 { - rm.rw.WriteHeader(rm.statusCode) - } - - var err error - if len(rm.body) > 0 { - // Write body - _, err = rm.rw.Write(rm.body) - } - - // Clean previous data - rm.body = nil - rm.statusCode = 0 - rm.header = http.Header{} - return err -} diff --git a/pkg/broadcaster/unbuffered.go b/pkg/broadcaster/unbuffered.go deleted file mode 100644 index 784d65d6fe..0000000000 --- a/pkg/broadcaster/unbuffered.go +++ /dev/null @@ -1,49 +0,0 @@ -package broadcaster - -import ( - "io" - "sync" -) - -// Unbuffered accumulates multiple io.WriteCloser by stream. -type Unbuffered struct { - mu sync.Mutex - writers []io.WriteCloser -} - -// Add adds new io.WriteCloser. -func (w *Unbuffered) Add(writer io.WriteCloser) { - w.mu.Lock() - w.writers = append(w.writers, writer) - w.mu.Unlock() -} - -// Write writes bytes to all writers. Failed writers will be evicted during -// this call. -func (w *Unbuffered) Write(p []byte) (n int, err error) { - w.mu.Lock() - var evict []int - for i, sw := range w.writers { - if n, err := sw.Write(p); err != nil || n != len(p) { - // On error, evict the writer - evict = append(evict, i) - } - } - for n, i := range evict { - w.writers = append(w.writers[:i-n], w.writers[i-n+1:]...) - } - w.mu.Unlock() - return len(p), nil -} - -// Clean closes and removes all writers. Last non-eol-terminated part of data -// will be saved. -func (w *Unbuffered) Clean() error { - w.mu.Lock() - for _, sw := range w.writers { - sw.Close() - } - w.writers = nil - w.mu.Unlock() - return nil -} diff --git a/pkg/broadcaster/unbuffered_test.go b/pkg/broadcaster/unbuffered_test.go deleted file mode 100644 index 9f8e72bc0f..0000000000 --- a/pkg/broadcaster/unbuffered_test.go +++ /dev/null @@ -1,162 +0,0 @@ -package broadcaster - -import ( - "bytes" - "errors" - "strings" - - "testing" -) - -type dummyWriter struct { - buffer bytes.Buffer - failOnWrite bool -} - -func (dw *dummyWriter) Write(p []byte) (n int, err error) { - if dw.failOnWrite { - return 0, errors.New("Fake fail") - } - return dw.buffer.Write(p) -} - -func (dw *dummyWriter) String() string { - return dw.buffer.String() -} - -func (dw *dummyWriter) Close() error { - return nil -} - -func TestUnbuffered(t *testing.T) { - writer := new(Unbuffered) - - // Test 1: Both bufferA and bufferB should contain "foo" - bufferA := &dummyWriter{} - writer.Add(bufferA) - bufferB := &dummyWriter{} - writer.Add(bufferB) - writer.Write([]byte("foo")) - - if bufferA.String() != "foo" { - t.Errorf("Buffer contains %v", bufferA.String()) - } - - if bufferB.String() != "foo" { - t.Errorf("Buffer contains %v", bufferB.String()) - } - - // Test2: bufferA and bufferB should contain "foobar", - // while bufferC should only contain "bar" - bufferC := &dummyWriter{} - writer.Add(bufferC) - writer.Write([]byte("bar")) - - if bufferA.String() != "foobar" { - t.Errorf("Buffer contains %v", bufferA.String()) - } - - if bufferB.String() != "foobar" { - t.Errorf("Buffer contains %v", bufferB.String()) - } - - if bufferC.String() != "bar" { - t.Errorf("Buffer contains %v", bufferC.String()) - } - - // Test3: Test eviction on failure - bufferA.failOnWrite = true - writer.Write([]byte("fail")) - if bufferA.String() != "foobar" { - t.Errorf("Buffer contains %v", bufferA.String()) - } - if bufferC.String() != "barfail" { - t.Errorf("Buffer contains %v", bufferC.String()) - } - // Even though we reset the flag, no more writes should go in there - bufferA.failOnWrite = false - writer.Write([]byte("test")) - if bufferA.String() != "foobar" { - t.Errorf("Buffer contains %v", bufferA.String()) - } - if bufferC.String() != "barfailtest" { - t.Errorf("Buffer contains %v", bufferC.String()) - } - - // Test4: Test eviction on multiple simultaneous failures - bufferB.failOnWrite = true - bufferC.failOnWrite = true - bufferD := &dummyWriter{} - writer.Add(bufferD) - writer.Write([]byte("yo")) - writer.Write([]byte("ink")) - if strings.Contains(bufferB.String(), "yoink") { - t.Errorf("bufferB received write. contents: %q", bufferB) - } - if strings.Contains(bufferC.String(), "yoink") { - t.Errorf("bufferC received write. contents: %q", bufferC) - } - if g, w := bufferD.String(), "yoink"; g != w { - t.Errorf("bufferD = %q, want %q", g, w) - } - - writer.Clean() -} - -type devNullCloser int - -func (d devNullCloser) Close() error { - return nil -} - -func (d devNullCloser) Write(buf []byte) (int, error) { - return len(buf), nil -} - -// This test checks for races. It is only useful when run with the race detector. -func TestRaceUnbuffered(t *testing.T) { - writer := new(Unbuffered) - c := make(chan bool) - go func() { - writer.Add(devNullCloser(0)) - c <- true - }() - writer.Write([]byte("hello")) - <-c -} - -func BenchmarkUnbuffered(b *testing.B) { - writer := new(Unbuffered) - setUpWriter := func() { - for i := 0; i < 100; i++ { - writer.Add(devNullCloser(0)) - writer.Add(devNullCloser(0)) - writer.Add(devNullCloser(0)) - } - } - testLine := "Line that thinks that it is log line from docker" - var buf bytes.Buffer - for i := 0; i < 100; i++ { - buf.Write([]byte(testLine + "\n")) - } - // line without eol - buf.Write([]byte(testLine)) - testText := buf.Bytes() - b.SetBytes(int64(5 * len(testText))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - b.StopTimer() - setUpWriter() - b.StartTimer() - - for j := 0; j < 5; j++ { - if _, err := writer.Write(testText); err != nil { - b.Fatal(err) - } - } - - b.StopTimer() - writer.Clean() - b.StartTimer() - } -} diff --git a/pkg/chrootarchive/archive.go b/pkg/chrootarchive/archive.go deleted file mode 100644 index a7814f5b90..0000000000 --- a/pkg/chrootarchive/archive.go +++ /dev/null @@ -1,97 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/idtools" -) - -var chrootArchiver = &archive.Archiver{Untar: Untar} - -// Untar reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive may be compressed with one of the following algorithms: -// identity (uncompressed), gzip, bzip2, xz. -func Untar(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return untarHandler(tarArchive, dest, options, true) -} - -// UntarUncompressed reads a stream of bytes from `archive`, parses it as a tar archive, -// and unpacks it into the directory at `dest`. -// The archive must be an uncompressed stream. -func UntarUncompressed(tarArchive io.Reader, dest string, options *archive.TarOptions) error { - return untarHandler(tarArchive, dest, options, false) -} - -// Handler for teasing out the automatic decompression -func untarHandler(tarArchive io.Reader, dest string, options *archive.TarOptions, decompress bool) error { - - if tarArchive == nil { - return fmt.Errorf("Empty archive") - } - if options == nil { - options = &archive.TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - rootUID, rootGID, err := idtools.GetRootUIDGID(options.UIDMaps, options.GIDMaps) - if err != nil { - return err - } - - dest = filepath.Clean(dest) - if _, err := os.Stat(dest); os.IsNotExist(err) { - if err := idtools.MkdirAllNewAs(dest, 0755, rootUID, rootGID); err != nil { - return err - } - } - - r := ioutil.NopCloser(tarArchive) - if decompress { - decompressedArchive, err := archive.DecompressStream(tarArchive) - if err != nil { - return err - } - defer decompressedArchive.Close() - r = decompressedArchive - } - - return invokeUnpack(r, dest, options) -} - -// TarUntar is a convenience function which calls Tar and Untar, with the output of one piped into the other. -// If either Tar or Untar fails, TarUntar aborts and returns the error. -func TarUntar(src, dst string) error { - return chrootArchiver.TarUntar(src, dst) -} - -// CopyWithTar creates a tar archive of filesystem path `src`, and -// unpacks it at filesystem path `dst`. -// The archive is streamed directly with fixed buffering and no -// intermediary disk IO. -func CopyWithTar(src, dst string) error { - return chrootArchiver.CopyWithTar(src, dst) -} - -// CopyFileWithTar emulates the behavior of the 'cp' command-line -// for a single file. It copies a regular file from path `src` to -// path `dst`, and preserves all its metadata. -// -// If `dst` ends with a trailing slash '/' ('\' on Windows), the final -// destination path will be `dst/base(src)` or `dst\base(src)` -func CopyFileWithTar(src, dst string) (err error) { - return chrootArchiver.CopyFileWithTar(src, dst) -} - -// UntarPath is a convenience function which looks for an archive -// at filesystem path `src`, and unpacks it at `dst`. -func UntarPath(src, dst string) error { - return chrootArchiver.UntarPath(src, dst) -} diff --git a/pkg/chrootarchive/archive_test.go b/pkg/chrootarchive/archive_test.go deleted file mode 100644 index 5fbe20843f..0000000000 --- a/pkg/chrootarchive/archive_test.go +++ /dev/null @@ -1,394 +0,0 @@ -package chrootarchive - -import ( - "bytes" - "fmt" - "hash/crc32" - "io" - "io/ioutil" - "os" - "path/filepath" - "runtime" - "strings" - "testing" - "time" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/docker/pkg/system" -) - -func init() { - reexec.Init() -} - -func TestChrootTarUntar(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntar") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(src, "lolo"), []byte("hello lolo"), 0644); err != nil { - t.Fatal(err) - } - stream, err := archive.Tar(src, archive.Uncompressed) - if err != nil { - t.Fatal(err) - } - dest := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(dest, 0700); err != nil { - t.Fatal(err) - } - if err := Untar(stream, dest, &archive.TarOptions{ExcludePatterns: []string{"lolo"}}); err != nil { - t.Fatal(err) - } -} - -// gh#10426: Verify the fix for having a huge excludes list (like on `docker load` with large # of -// local images) -func TestChrootUntarWithHugeExcludesList(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarHugeExcludes") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(src, "toto"), []byte("hello toto"), 0644); err != nil { - t.Fatal(err) - } - stream, err := archive.Tar(src, archive.Uncompressed) - if err != nil { - t.Fatal(err) - } - dest := filepath.Join(tmpdir, "dest") - if err := system.MkdirAll(dest, 0700); err != nil { - t.Fatal(err) - } - options := &archive.TarOptions{} - //65534 entries of 64-byte strings ~= 4MB of environment space which should overflow - //on most systems when passed via environment or command line arguments - excludes := make([]string, 65534, 65534) - for i := 0; i < 65534; i++ { - excludes[i] = strings.Repeat(string(i), 64) - } - options.ExcludePatterns = excludes - if err := Untar(stream, dest, options); err != nil { - t.Fatal(err) - } -} - -func TestChrootUntarEmptyArchive(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchive") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - if err := Untar(nil, tmpdir, nil); err == nil { - t.Fatal("expected error on empty archive") - } -} - -func prepareSourceDirectory(numberOfFiles int, targetPath string, makeSymLinks bool) (int, error) { - fileData := []byte("fooo") - for n := 0; n < numberOfFiles; n++ { - fileName := fmt.Sprintf("file-%d", n) - if err := ioutil.WriteFile(filepath.Join(targetPath, fileName), fileData, 0700); err != nil { - return 0, err - } - if makeSymLinks { - if err := os.Symlink(filepath.Join(targetPath, fileName), filepath.Join(targetPath, fileName+"-link")); err != nil { - return 0, err - } - } - } - totalSize := numberOfFiles * len(fileData) - return totalSize, nil -} - -func getHash(filename string) (uint32, error) { - stream, err := ioutil.ReadFile(filename) - if err != nil { - return 0, err - } - hash := crc32.NewIEEE() - hash.Write(stream) - return hash.Sum32(), nil -} - -func compareDirectories(src string, dest string) error { - changes, err := archive.ChangesDirs(dest, src) - if err != nil { - return err - } - if len(changes) > 0 { - return fmt.Errorf("Unexpected differences after untar: %v", changes) - } - return nil -} - -func compareFiles(src string, dest string) error { - srcHash, err := getHash(src) - if err != nil { - return err - } - destHash, err := getHash(dest) - if err != nil { - return err - } - if srcHash != destHash { - return fmt.Errorf("%s is different from %s", src, dest) - } - return nil -} - -func TestChrootTarUntarWithSymlink(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - tmpdir, err := ioutil.TempDir("", "docker-TestChrootTarUntarWithSymlink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { - t.Fatal(err) - } - if _, err := prepareSourceDirectory(10, src, true); err != nil { - t.Fatal(err) - } - dest := filepath.Join(tmpdir, "dest") - if err := TarUntar(src, dest); err != nil { - t.Fatal(err) - } - if err := compareDirectories(src, dest); err != nil { - t.Fatal(err) - } -} - -func TestChrootCopyWithTar(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyWithTar") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { - t.Fatal(err) - } - if _, err := prepareSourceDirectory(10, src, true); err != nil { - t.Fatal(err) - } - - // Copy directory - dest := filepath.Join(tmpdir, "dest") - if err := CopyWithTar(src, dest); err != nil { - t.Fatal(err) - } - if err := compareDirectories(src, dest); err != nil { - t.Fatal(err) - } - - // Copy file - srcfile := filepath.Join(src, "file-1") - dest = filepath.Join(tmpdir, "destFile") - destfile := filepath.Join(dest, "file-1") - if err := CopyWithTar(srcfile, destfile); err != nil { - t.Fatal(err) - } - if err := compareFiles(srcfile, destfile); err != nil { - t.Fatal(err) - } - - // Copy symbolic link - srcLinkfile := filepath.Join(src, "file-1-link") - dest = filepath.Join(tmpdir, "destSymlink") - destLinkfile := filepath.Join(dest, "file-1-link") - if err := CopyWithTar(srcLinkfile, destLinkfile); err != nil { - t.Fatal(err) - } - if err := compareFiles(srcLinkfile, destLinkfile); err != nil { - t.Fatal(err) - } -} - -func TestChrootCopyFileWithTar(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "docker-TestChrootCopyFileWithTar") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { - t.Fatal(err) - } - if _, err := prepareSourceDirectory(10, src, true); err != nil { - t.Fatal(err) - } - - // Copy directory - dest := filepath.Join(tmpdir, "dest") - if err := CopyFileWithTar(src, dest); err == nil { - t.Fatal("Expected error on copying directory") - } - - // Copy file - srcfile := filepath.Join(src, "file-1") - dest = filepath.Join(tmpdir, "destFile") - destfile := filepath.Join(dest, "file-1") - if err := CopyFileWithTar(srcfile, destfile); err != nil { - t.Fatal(err) - } - if err := compareFiles(srcfile, destfile); err != nil { - t.Fatal(err) - } - - // Copy symbolic link - srcLinkfile := filepath.Join(src, "file-1-link") - dest = filepath.Join(tmpdir, "destSymlink") - destLinkfile := filepath.Join(dest, "file-1-link") - if err := CopyFileWithTar(srcLinkfile, destLinkfile); err != nil { - t.Fatal(err) - } - if err := compareFiles(srcLinkfile, destLinkfile); err != nil { - t.Fatal(err) - } -} - -func TestChrootUntarPath(t *testing.T) { - // TODO Windows: Figure out why this is failing - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows") - } - tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarPath") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { - t.Fatal(err) - } - if _, err := prepareSourceDirectory(10, src, true); err != nil { - t.Fatal(err) - } - dest := filepath.Join(tmpdir, "dest") - // Untar a directory - if err := UntarPath(src, dest); err == nil { - t.Fatal("Expected error on untaring a directory") - } - - // Untar a tar file - stream, err := archive.Tar(src, archive.Uncompressed) - if err != nil { - t.Fatal(err) - } - buf := new(bytes.Buffer) - buf.ReadFrom(stream) - tarfile := filepath.Join(tmpdir, "src.tar") - if err := ioutil.WriteFile(tarfile, buf.Bytes(), 0644); err != nil { - t.Fatal(err) - } - if err := UntarPath(tarfile, dest); err != nil { - t.Fatal(err) - } - if err := compareDirectories(src, dest); err != nil { - t.Fatal(err) - } -} - -type slowEmptyTarReader struct { - size int - offset int - chunkSize int -} - -// Read is a slow reader of an empty tar (like the output of "tar c --files-from /dev/null") -func (s *slowEmptyTarReader) Read(p []byte) (int, error) { - time.Sleep(100 * time.Millisecond) - count := s.chunkSize - if len(p) < s.chunkSize { - count = len(p) - } - for i := 0; i < count; i++ { - p[i] = 0 - } - s.offset += count - if s.offset > s.size { - return count, io.EOF - } - return count, nil -} - -func TestChrootUntarEmptyArchiveFromSlowReader(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "docker-TestChrootUntarEmptyArchiveFromSlowReader") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - dest := filepath.Join(tmpdir, "dest") - if err := system.MkdirAll(dest, 0700); err != nil { - t.Fatal(err) - } - stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} - if err := Untar(stream, dest, nil); err != nil { - t.Fatal(err) - } -} - -func TestChrootApplyEmptyArchiveFromSlowReader(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyEmptyArchiveFromSlowReader") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - dest := filepath.Join(tmpdir, "dest") - if err := system.MkdirAll(dest, 0700); err != nil { - t.Fatal(err) - } - stream := &slowEmptyTarReader{size: 10240, chunkSize: 1024} - if _, err := ApplyLayer(dest, stream); err != nil { - t.Fatal(err) - } -} - -func TestChrootApplyDotDotFile(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "docker-TestChrootApplyDotDotFile") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - src := filepath.Join(tmpdir, "src") - if err := system.MkdirAll(src, 0700); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(filepath.Join(src, "..gitme"), []byte(""), 0644); err != nil { - t.Fatal(err) - } - stream, err := archive.Tar(src, archive.Uncompressed) - if err != nil { - t.Fatal(err) - } - dest := filepath.Join(tmpdir, "dest") - if err := system.MkdirAll(dest, 0700); err != nil { - t.Fatal(err) - } - if _, err := ApplyLayer(dest, stream); err != nil { - t.Fatal(err) - } -} diff --git a/pkg/chrootarchive/archive_unix.go b/pkg/chrootarchive/archive_unix.go deleted file mode 100644 index f2325abd74..0000000000 --- a/pkg/chrootarchive/archive_unix.go +++ /dev/null @@ -1,86 +0,0 @@ -// +build !windows - -package chrootarchive - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" -) - -// untar is the entry-point for docker-untar on re-exec. This is not used on -// Windows as it does not support chroot, hence no point sandboxing through -// chroot and rexec. -func untar() { - runtime.LockOSThread() - flag.Parse() - - var options *archive.TarOptions - - //read the options from the pipe "ExtraFiles" - if err := json.NewDecoder(os.NewFile(3, "options")).Decode(&options); err != nil { - fatal(err) - } - - if err := chroot(flag.Arg(0)); err != nil { - fatal(err) - } - - if err := archive.Unpack(os.Stdin, "/", options); err != nil { - fatal(err) - } - // fully consume stdin in case it is zero padded - if _, err := flush(os.Stdin); err != nil { - fatal(err) - } - - os.Exit(0) -} - -func invokeUnpack(decompressedArchive io.Reader, dest string, options *archive.TarOptions) error { - - // We can't pass a potentially large exclude list directly via cmd line - // because we easily overrun the kernel's max argument/environment size - // when the full image list is passed (e.g. when this is used by - // `docker load`). We will marshall the options via a pipe to the - // child - r, w, err := os.Pipe() - if err != nil { - return fmt.Errorf("Untar pipe failure: %v", err) - } - - cmd := reexec.Command("docker-untar", dest) - cmd.Stdin = decompressedArchive - - cmd.ExtraFiles = append(cmd.ExtraFiles, r) - output := bytes.NewBuffer(nil) - cmd.Stdout = output - cmd.Stderr = output - - if err := cmd.Start(); err != nil { - return fmt.Errorf("Untar error on re-exec cmd: %v", err) - } - //write the options to the pipe for the untar exec to read - if err := json.NewEncoder(w).Encode(options); err != nil { - return fmt.Errorf("Untar json encode to pipe failed: %v", err) - } - w.Close() - - if err := cmd.Wait(); err != nil { - // when `xz -d -c -q | docker-untar ...` failed on docker-untar side, - // we need to exhaust `xz`'s output, otherwise the `xz` side will be - // pending on write pipe forever - io.Copy(ioutil.Discard, decompressedArchive) - - return fmt.Errorf("Error processing tar file(%v): %s", err, output) - } - return nil -} diff --git a/pkg/chrootarchive/archive_windows.go b/pkg/chrootarchive/archive_windows.go deleted file mode 100644 index 0a500ed5c2..0000000000 --- a/pkg/chrootarchive/archive_windows.go +++ /dev/null @@ -1,22 +0,0 @@ -package chrootarchive - -import ( - "io" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/longpath" -) - -// chroot is not supported by Windows -func chroot(path string) error { - return nil -} - -func invokeUnpack(decompressedArchive io.ReadCloser, - dest string, - options *archive.TarOptions) error { - // Windows is different to Linux here because Windows does not support - // chroot. Hence there is no point sandboxing a chrooted process to - // do the unpack. We call inline instead within the daemon process. - return archive.Unpack(decompressedArchive, longpath.AddPrefix(dest), options) -} diff --git a/pkg/chrootarchive/chroot_linux.go b/pkg/chrootarchive/chroot_linux.go deleted file mode 100644 index cefbef9df4..0000000000 --- a/pkg/chrootarchive/chroot_linux.go +++ /dev/null @@ -1,103 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "syscall" - - "github.com/docker/docker/pkg/mount" -) - -// chroot on linux uses pivot_root instead of chroot -// pivot_root takes a new root and an old root. -// Old root must be a sub-dir of new root, it is where the current rootfs will reside after the call to pivot_root. -// New root is where the new rootfs is set to. -// Old root is removed after the call to pivot_root so it is no longer available under the new root. -// This is similar to how libcontainer sets up a container's rootfs -func chroot(path string) (err error) { - if err := syscall.Unshare(syscall.CLONE_NEWNS); err != nil { - return fmt.Errorf("Error creating mount namespace before pivot: %v", err) - } - - if err := mount.MakeRPrivate(path); err != nil { - return err - } - - // setup oldRoot for pivot_root - pivotDir, err := ioutil.TempDir(path, ".pivot_root") - if err != nil { - return fmt.Errorf("Error setting up pivot dir: %v", err) - } - - var mounted bool - defer func() { - if mounted { - // make sure pivotDir is not mounted before we try to remove it - if errCleanup := syscall.Unmount(pivotDir, syscall.MNT_DETACH); errCleanup != nil { - if err == nil { - err = errCleanup - } - return - } - } - - errCleanup := os.Remove(pivotDir) - // pivotDir doesn't exist if pivot_root failed and chroot+chdir was successful - // because we already cleaned it up on failed pivot_root - if errCleanup != nil && !os.IsNotExist(errCleanup) { - errCleanup = fmt.Errorf("Error cleaning up after pivot: %v", errCleanup) - if err == nil { - err = errCleanup - } - } - - if errCleanup := syscall.Unmount("/", syscall.MNT_DETACH); errCleanup != nil { - if err == nil { - err = fmt.Errorf("error unmounting root: %v", errCleanup) - } - return - } - }() - - if err := syscall.PivotRoot(path, pivotDir); err != nil { - // If pivot fails, fall back to the normal chroot after cleaning up temp dir - if err := os.Remove(pivotDir); err != nil { - return fmt.Errorf("Error cleaning up after failed pivot: %v", err) - } - return realChroot(path) - } - mounted = true - - // This is the new path for where the old root (prior to the pivot) has been moved to - // This dir contains the rootfs of the caller, which we need to remove so it is not visible during extraction - pivotDir = filepath.Join("/", filepath.Base(pivotDir)) - - if err := syscall.Chdir("/"); err != nil { - return fmt.Errorf("Error changing to new root: %v", err) - } - - // Make the pivotDir (where the old root lives) private so it can be unmounted without propagating to the host - if err := syscall.Mount("", pivotDir, "", syscall.MS_PRIVATE|syscall.MS_REC, ""); err != nil { - return fmt.Errorf("Error making old root private after pivot: %v", err) - } - - // Now unmount the old root so it's no longer visible from the new root - if err := syscall.Unmount(pivotDir, syscall.MNT_DETACH); err != nil { - return fmt.Errorf("Error while unmounting old root after pivot: %v", err) - } - mounted = false - - return nil -} - -func realChroot(path string) error { - if err := syscall.Chroot(path); err != nil { - return fmt.Errorf("Error after fallback to chroot: %v", err) - } - if err := syscall.Chdir("/"); err != nil { - return fmt.Errorf("Error changing to new root after chroot: %v", err) - } - return nil -} diff --git a/pkg/chrootarchive/chroot_unix.go b/pkg/chrootarchive/chroot_unix.go deleted file mode 100644 index 16354bf648..0000000000 --- a/pkg/chrootarchive/chroot_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows,!linux - -package chrootarchive - -import "syscall" - -func chroot(path string) error { - if err := syscall.Chroot(path); err != nil { - return err - } - return syscall.Chdir("/") -} diff --git a/pkg/chrootarchive/diff.go b/pkg/chrootarchive/diff.go deleted file mode 100644 index 94131a6eb8..0000000000 --- a/pkg/chrootarchive/diff.go +++ /dev/null @@ -1,19 +0,0 @@ -package chrootarchive - -import "github.com/docker/docker/pkg/archive" - -// ApplyLayer parses a diff in the standard layer format from `layer`, -// and applies it to the directory `dest`. The stream `layer` can only be -// uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyLayer(dest string, layer archive.Reader) (size int64, err error) { - return applyLayerHandler(dest, layer, &archive.TarOptions{}, true) -} - -// ApplyUncompressedLayer parses a diff in the standard layer format from -// `layer`, and applies it to the directory `dest`. The stream `layer` -// can only be uncompressed. -// Returns the size in bytes of the contents of the layer. -func ApplyUncompressedLayer(dest string, layer archive.Reader, options *archive.TarOptions) (int64, error) { - return applyLayerHandler(dest, layer, options, false) -} diff --git a/pkg/chrootarchive/diff_unix.go b/pkg/chrootarchive/diff_unix.go deleted file mode 100644 index a4adb74d58..0000000000 --- a/pkg/chrootarchive/diff_unix.go +++ /dev/null @@ -1,120 +0,0 @@ -//+build !windows - -package chrootarchive - -import ( - "bytes" - "encoding/json" - "flag" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/reexec" - "github.com/docker/docker/pkg/system" -) - -type applyLayerResponse struct { - LayerSize int64 `json:"layerSize"` -} - -// applyLayer is the entry-point for docker-applylayer on re-exec. This is not -// used on Windows as it does not support chroot, hence no point sandboxing -// through chroot and rexec. -func applyLayer() { - - var ( - tmpDir = "" - err error - options *archive.TarOptions - ) - runtime.LockOSThread() - flag.Parse() - - if err := chroot(flag.Arg(0)); err != nil { - fatal(err) - } - - // We need to be able to set any perms - oldmask, err := system.Umask(0) - defer system.Umask(oldmask) - if err != nil { - fatal(err) - } - - if err := json.Unmarshal([]byte(os.Getenv("OPT")), &options); err != nil { - fatal(err) - } - - if tmpDir, err = ioutil.TempDir("/", "temp-docker-extract"); err != nil { - fatal(err) - } - - os.Setenv("TMPDIR", tmpDir) - size, err := archive.UnpackLayer("/", os.Stdin, options) - os.RemoveAll(tmpDir) - if err != nil { - fatal(err) - } - - encoder := json.NewEncoder(os.Stdout) - if err := encoder.Encode(applyLayerResponse{size}); err != nil { - fatal(fmt.Errorf("unable to encode layerSize JSON: %s", err)) - } - - if _, err := flush(os.Stdin); err != nil { - fatal(err) - } - - os.Exit(0) -} - -// applyLayerHandler parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. Returns the size in bytes of the -// contents of the layer. -func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { - dest = filepath.Clean(dest) - if decompress { - decompressed, err := archive.DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompressed.Close() - - layer = decompressed - } - if options == nil { - options = &archive.TarOptions{} - } - if options.ExcludePatterns == nil { - options.ExcludePatterns = []string{} - } - - data, err := json.Marshal(options) - if err != nil { - return 0, fmt.Errorf("ApplyLayer json encode: %v", err) - } - - cmd := reexec.Command("docker-applyLayer", dest) - cmd.Stdin = layer - cmd.Env = append(cmd.Env, fmt.Sprintf("OPT=%s", data)) - - outBuf, errBuf := new(bytes.Buffer), new(bytes.Buffer) - cmd.Stdout, cmd.Stderr = outBuf, errBuf - - if err = cmd.Run(); err != nil { - return 0, fmt.Errorf("ApplyLayer %s stdout: %s stderr: %s", err, outBuf, errBuf) - } - - // Stdout should be a valid JSON struct representing an applyLayerResponse. - response := applyLayerResponse{} - decoder := json.NewDecoder(outBuf) - if err = decoder.Decode(&response); err != nil { - return 0, fmt.Errorf("unable to decode ApplyLayer JSON response: %s", err) - } - - return response.LayerSize, nil -} diff --git a/pkg/chrootarchive/diff_windows.go b/pkg/chrootarchive/diff_windows.go deleted file mode 100644 index 8e1830cb83..0000000000 --- a/pkg/chrootarchive/diff_windows.go +++ /dev/null @@ -1,44 +0,0 @@ -package chrootarchive - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/longpath" -) - -// applyLayerHandler parses a diff in the standard layer format from `layer`, and -// applies it to the directory `dest`. Returns the size in bytes of the -// contents of the layer. -func applyLayerHandler(dest string, layer archive.Reader, options *archive.TarOptions, decompress bool) (size int64, err error) { - dest = filepath.Clean(dest) - - // Ensure it is a Windows-style volume path - dest = longpath.AddPrefix(dest) - - if decompress { - decompressed, err := archive.DecompressStream(layer) - if err != nil { - return 0, err - } - defer decompressed.Close() - - layer = decompressed - } - - tmpDir, err := ioutil.TempDir(os.Getenv("temp"), "temp-docker-extract") - if err != nil { - return 0, fmt.Errorf("ApplyLayer failed to create temp-docker-extract under %s. %s", dest, err) - } - - s, err := archive.UnpackLayer(dest, layer, nil) - os.RemoveAll(tmpDir) - if err != nil { - return 0, fmt.Errorf("ApplyLayer %s failed UnpackLayer to %s", err, dest) - } - - return s, nil -} diff --git a/pkg/chrootarchive/init_unix.go b/pkg/chrootarchive/init_unix.go deleted file mode 100644 index 4f637f17b8..0000000000 --- a/pkg/chrootarchive/init_unix.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build !windows - -package chrootarchive - -import ( - "fmt" - "io" - "io/ioutil" - "os" - - "github.com/docker/docker/pkg/reexec" -) - -func init() { - reexec.Register("docker-applyLayer", applyLayer) - reexec.Register("docker-untar", untar) -} - -func fatal(err error) { - fmt.Fprint(os.Stderr, err) - os.Exit(1) -} - -// flush consumes all the bytes from the reader discarding -// any errors -func flush(r io.Reader) (bytes int64, err error) { - return io.Copy(ioutil.Discard, r) -} diff --git a/pkg/chrootarchive/init_windows.go b/pkg/chrootarchive/init_windows.go deleted file mode 100644 index fa17c9bf83..0000000000 --- a/pkg/chrootarchive/init_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package chrootarchive - -func init() { -} diff --git a/pkg/devicemapper/devmapper.go b/pkg/devicemapper/devmapper.go deleted file mode 100644 index 724ef63bf4..0000000000 --- a/pkg/devicemapper/devmapper.go +++ /dev/null @@ -1,828 +0,0 @@ -// +build linux - -package devicemapper - -import ( - "errors" - "fmt" - "os" - "runtime" - "syscall" - "unsafe" - - "github.com/Sirupsen/logrus" -) - -// DevmapperLogger defines methods for logging with devicemapper. -type DevmapperLogger interface { - DMLog(level int, file string, line int, dmError int, message string) -} - -const ( - deviceCreate TaskType = iota - deviceReload - deviceRemove - deviceRemoveAll - deviceSuspend - deviceResume - deviceInfo - deviceDeps - deviceRename - deviceVersion - deviceStatus - deviceTable - deviceWaitevent - deviceList - deviceClear - deviceMknodes - deviceListVersions - deviceTargetMsg - deviceSetGeometry -) - -const ( - addNodeOnResume AddNodeType = iota - addNodeOnCreate -) - -// List of errors returned when using devicemapper. -var ( - ErrTaskRun = errors.New("dm_task_run failed") - ErrTaskSetName = errors.New("dm_task_set_name failed") - ErrTaskSetMessage = errors.New("dm_task_set_message failed") - ErrTaskSetAddNode = errors.New("dm_task_set_add_node failed") - ErrTaskSetRo = errors.New("dm_task_set_ro failed") - ErrTaskAddTarget = errors.New("dm_task_add_target failed") - ErrTaskSetSector = errors.New("dm_task_set_sector failed") - ErrTaskGetDeps = errors.New("dm_task_get_deps failed") - ErrTaskGetInfo = errors.New("dm_task_get_info failed") - ErrTaskGetDriverVersion = errors.New("dm_task_get_driver_version failed") - ErrTaskDeferredRemove = errors.New("dm_task_deferred_remove failed") - ErrTaskSetCookie = errors.New("dm_task_set_cookie failed") - ErrNilCookie = errors.New("cookie ptr can't be nil") - ErrGetBlockSize = errors.New("Can't get block size") - ErrUdevWait = errors.New("wait on udev cookie failed") - ErrSetDevDir = errors.New("dm_set_dev_dir failed") - ErrGetLibraryVersion = errors.New("dm_get_library_version failed") - ErrCreateRemoveTask = errors.New("Can't create task of type deviceRemove") - ErrRunRemoveDevice = errors.New("running RemoveDevice failed") - ErrInvalidAddNode = errors.New("Invalid AddNode type") - ErrBusy = errors.New("Device is Busy") - ErrDeviceIDExists = errors.New("Device Id Exists") - ErrEnxio = errors.New("No such device or address") -) - -var ( - dmSawBusy bool - dmSawExist bool - dmSawEnxio bool // No Such Device or Address -) - -type ( - // Task represents a devicemapper task (like lvcreate, etc.) ; a task is needed for each ioctl - // command to execute. - Task struct { - unmanaged *cdmTask - } - // Deps represents dependents (layer) of a device. - Deps struct { - Count uint32 - Filler uint32 - Device []uint64 - } - // Info represents information about a device. - Info struct { - Exists int - Suspended int - LiveTable int - InactiveTable int - OpenCount int32 - EventNr uint32 - Major uint32 - Minor uint32 - ReadOnly int - TargetCount int32 - DeferredRemove int - } - // TaskType represents a type of task - TaskType int - // AddNodeType represents a type of node to be added - AddNodeType int -) - -// DeviceIDExists returns whether error conveys the information about device Id already -// exist or not. This will be true if device creation or snap creation -// operation fails if device or snap device already exists in pool. -// Current implementation is little crude as it scans the error string -// for exact pattern match. Replacing it with more robust implementation -// is desirable. -func DeviceIDExists(err error) bool { - return fmt.Sprint(err) == fmt.Sprint(ErrDeviceIDExists) -} - -func (t *Task) destroy() { - if t != nil { - DmTaskDestroy(t.unmanaged) - runtime.SetFinalizer(t, nil) - } -} - -// TaskCreateNamed is a convenience function for TaskCreate when a name -// will be set on the task as well -func TaskCreateNamed(t TaskType, name string) (*Task, error) { - task := TaskCreate(t) - if task == nil { - return nil, fmt.Errorf("devicemapper: Can't create task of type %d", int(t)) - } - if err := task.setName(name); err != nil { - return nil, fmt.Errorf("devicemapper: Can't set task name %s", name) - } - return task, nil -} - -// TaskCreate initializes a devicemapper task of tasktype -func TaskCreate(tasktype TaskType) *Task { - Ctask := DmTaskCreate(int(tasktype)) - if Ctask == nil { - return nil - } - task := &Task{unmanaged: Ctask} - runtime.SetFinalizer(task, (*Task).destroy) - return task -} - -func (t *Task) run() error { - if res := DmTaskRun(t.unmanaged); res != 1 { - return ErrTaskRun - } - return nil -} - -func (t *Task) setName(name string) error { - if res := DmTaskSetName(t.unmanaged, name); res != 1 { - return ErrTaskSetName - } - return nil -} - -func (t *Task) setMessage(message string) error { - if res := DmTaskSetMessage(t.unmanaged, message); res != 1 { - return ErrTaskSetMessage - } - return nil -} - -func (t *Task) setSector(sector uint64) error { - if res := DmTaskSetSector(t.unmanaged, sector); res != 1 { - return ErrTaskSetSector - } - return nil -} - -func (t *Task) setCookie(cookie *uint, flags uint16) error { - if cookie == nil { - return ErrNilCookie - } - if res := DmTaskSetCookie(t.unmanaged, cookie, flags); res != 1 { - return ErrTaskSetCookie - } - return nil -} - -func (t *Task) setAddNode(addNode AddNodeType) error { - if addNode != addNodeOnResume && addNode != addNodeOnCreate { - return ErrInvalidAddNode - } - if res := DmTaskSetAddNode(t.unmanaged, addNode); res != 1 { - return ErrTaskSetAddNode - } - return nil -} - -func (t *Task) setRo() error { - if res := DmTaskSetRo(t.unmanaged); res != 1 { - return ErrTaskSetRo - } - return nil -} - -func (t *Task) addTarget(start, size uint64, ttype, params string) error { - if res := DmTaskAddTarget(t.unmanaged, start, size, - ttype, params); res != 1 { - return ErrTaskAddTarget - } - return nil -} - -func (t *Task) getDeps() (*Deps, error) { - var deps *Deps - if deps = DmTaskGetDeps(t.unmanaged); deps == nil { - return nil, ErrTaskGetDeps - } - return deps, nil -} - -func (t *Task) getInfo() (*Info, error) { - info := &Info{} - if res := DmTaskGetInfo(t.unmanaged, info); res != 1 { - return nil, ErrTaskGetInfo - } - return info, nil -} - -func (t *Task) getInfoWithDeferred() (*Info, error) { - info := &Info{} - if res := DmTaskGetInfoWithDeferred(t.unmanaged, info); res != 1 { - return nil, ErrTaskGetInfo - } - return info, nil -} - -func (t *Task) getDriverVersion() (string, error) { - res := DmTaskGetDriverVersion(t.unmanaged) - if res == "" { - return "", ErrTaskGetDriverVersion - } - return res, nil -} - -func (t *Task) getNextTarget(next unsafe.Pointer) (nextPtr unsafe.Pointer, start uint64, - length uint64, targetType string, params string) { - - return DmGetNextTarget(t.unmanaged, next, &start, &length, - &targetType, ¶ms), - start, length, targetType, params -} - -// UdevWait waits for any processes that are waiting for udev to complete the specified cookie. -func UdevWait(cookie *uint) error { - if res := DmUdevWait(*cookie); res != 1 { - logrus.Debugf("devicemapper: Failed to wait on udev cookie %d", *cookie) - return ErrUdevWait - } - return nil -} - -// LogInitVerbose is an interface to initialize the verbose logger for the device mapper library. -func LogInitVerbose(level int) { - DmLogInitVerbose(level) -} - -var dmLogger DevmapperLogger - -// LogInit initializes the logger for the device mapper library. -func LogInit(logger DevmapperLogger) { - dmLogger = logger - LogWithErrnoInit() -} - -// SetDevDir sets the dev folder for the device mapper library (usually /dev). -func SetDevDir(dir string) error { - if res := DmSetDevDir(dir); res != 1 { - logrus.Debug("devicemapper: Error dm_set_dev_dir") - return ErrSetDevDir - } - return nil -} - -// GetLibraryVersion returns the device mapper library version. -func GetLibraryVersion() (string, error) { - var version string - if res := DmGetLibraryVersion(&version); res != 1 { - return "", ErrGetLibraryVersion - } - return version, nil -} - -// UdevSyncSupported returns whether device-mapper is able to sync with udev -// -// This is essential otherwise race conditions can arise where both udev and -// device-mapper attempt to create and destroy devices. -func UdevSyncSupported() bool { - return DmUdevGetSyncSupport() != 0 -} - -// UdevSetSyncSupport allows setting whether the udev sync should be enabled. -// The return bool indicates the state of whether the sync is enabled. -func UdevSetSyncSupport(enable bool) bool { - if enable { - DmUdevSetSyncSupport(1) - } else { - DmUdevSetSyncSupport(0) - } - - return UdevSyncSupported() -} - -// CookieSupported returns whether the version of device-mapper supports the -// use of cookie's in the tasks. -// This is largely a lower level call that other functions use. -func CookieSupported() bool { - return DmCookieSupported() != 0 -} - -// RemoveDevice is a useful helper for cleaning up a device. -func RemoveDevice(name string) error { - task, err := TaskCreateNamed(deviceRemove, name) - if task == nil { - return err - } - - var cookie uint - if err := task.setCookie(&cookie, 0); err != nil { - return fmt.Errorf("devicemapper: Can not set cookie: %s", err) - } - defer UdevWait(&cookie) - - dmSawBusy = false // reset before the task is run - if err = task.run(); err != nil { - if dmSawBusy { - return ErrBusy - } - return fmt.Errorf("devicemapper: Error running RemoveDevice %s", err) - } - - return nil -} - -// RemoveDeviceDeferred is a useful helper for cleaning up a device, but deferred. -func RemoveDeviceDeferred(name string) error { - logrus.Debugf("devicemapper: RemoveDeviceDeferred START(%s)", name) - defer logrus.Debugf("devicemapper: RemoveDeviceDeferred END(%s)", name) - task, err := TaskCreateNamed(deviceRemove, name) - if task == nil { - return err - } - - if err := DmTaskDeferredRemove(task.unmanaged); err != 1 { - return ErrTaskDeferredRemove - } - - // set a task cookie and disable library fallback, or else libdevmapper will - // disable udev dm rules and delete the symlink under /dev/mapper by itself, - // even if the removal is deferred by the kernel. - var cookie uint - var flags uint16 - flags = DmUdevDisableLibraryFallback - if err := task.setCookie(&cookie, flags); err != nil { - return fmt.Errorf("devicemapper: Can not set cookie: %s", err) - } - - // libdevmapper and udev relies on System V semaphore for synchronization, - // semaphores created in `task.setCookie` will be cleaned up in `UdevWait`. - // So these two function call must come in pairs, otherwise semaphores will - // be leaked, and the limit of number of semaphores defined in `/proc/sys/kernel/sem` - // will be reached, which will eventually make all follwing calls to 'task.SetCookie' - // fail. - // this call will not wait for the deferred removal's final executing, since no - // udev event will be generated, and the semaphore's value will not be incremented - // by udev, what UdevWait is just cleaning up the semaphore. - defer UdevWait(&cookie) - - if err = task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running RemoveDeviceDeferred %s", err) - } - - return nil -} - -// CancelDeferredRemove cancels a deferred remove for a device. -func CancelDeferredRemove(deviceName string) error { - task, err := TaskCreateNamed(deviceTargetMsg, deviceName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("@cancel_deferred_remove")); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawBusy = false - dmSawEnxio = false - if err := task.run(); err != nil { - // A device might be being deleted already - if dmSawBusy { - return ErrBusy - } else if dmSawEnxio { - return ErrEnxio - } - return fmt.Errorf("devicemapper: Error running CancelDeferredRemove %s", err) - - } - return nil -} - -// GetBlockDeviceSize returns the size of a block device identified by the specified file. -func GetBlockDeviceSize(file *os.File) (uint64, error) { - size, err := ioctlBlkGetSize64(file.Fd()) - if err != nil { - logrus.Errorf("devicemapper: Error getblockdevicesize: %s", err) - return 0, ErrGetBlockSize - } - return uint64(size), nil -} - -// BlockDeviceDiscard runs discard for the given path. -// This is used as a workaround for the kernel not discarding block so -// on the thin pool when we remove a thinp device, so we do it -// manually -func BlockDeviceDiscard(path string) error { - file, err := os.OpenFile(path, os.O_RDWR, 0) - if err != nil { - return err - } - defer file.Close() - - size, err := GetBlockDeviceSize(file) - if err != nil { - return err - } - - if err := ioctlBlkDiscard(file.Fd(), 0, size); err != nil { - return err - } - - // Without this sometimes the remove of the device that happens after - // discard fails with EBUSY. - syscall.Sync() - - return nil -} - -// CreatePool is the programmatic example of "dmsetup create". -// It creates a device with the specified poolName, data and metadata file and block size. -func CreatePool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { - task, err := TaskCreateNamed(deviceCreate, poolName) - if task == nil { - return err - } - - size, err := GetBlockDeviceSize(dataFile) - if err != nil { - return fmt.Errorf("devicemapper: Can't get data size %s", err) - } - - params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) - if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { - return fmt.Errorf("devicemapper: Can't add target %s", err) - } - - var cookie uint - var flags uint16 - flags = DmUdevDisableSubsystemRulesFlag | DmUdevDisableDiskRulesFlag | DmUdevDisableOtherRulesFlag - if err := task.setCookie(&cookie, flags); err != nil { - return fmt.Errorf("devicemapper: Can't set cookie %s", err) - } - defer UdevWait(&cookie) - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceCreate (CreatePool) %s", err) - } - - return nil -} - -// ReloadPool is the programmatic example of "dmsetup reload". -// It reloads the table with the specified poolName, data and metadata file and block size. -func ReloadPool(poolName string, dataFile, metadataFile *os.File, poolBlockSize uint32) error { - task, err := TaskCreateNamed(deviceReload, poolName) - if task == nil { - return err - } - - size, err := GetBlockDeviceSize(dataFile) - if err != nil { - return fmt.Errorf("devicemapper: Can't get data size %s", err) - } - - params := fmt.Sprintf("%s %s %d 32768 1 skip_block_zeroing", metadataFile.Name(), dataFile.Name(), poolBlockSize) - if err := task.addTarget(0, size/512, "thin-pool", params); err != nil { - return fmt.Errorf("devicemapper: Can't add target %s", err) - } - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceCreate %s", err) - } - - return nil -} - -// GetDeps is the programmatic example of "dmsetup deps". -// It outputs a list of devices referenced by the live table for the specified device. -func GetDeps(name string) (*Deps, error) { - task, err := TaskCreateNamed(deviceDeps, name) - if task == nil { - return nil, err - } - if err := task.run(); err != nil { - return nil, err - } - return task.getDeps() -} - -// GetInfo is the programmatic example of "dmsetup info". -// It outputs some brief information about the device. -func GetInfo(name string) (*Info, error) { - task, err := TaskCreateNamed(deviceInfo, name) - if task == nil { - return nil, err - } - if err := task.run(); err != nil { - return nil, err - } - return task.getInfo() -} - -// GetInfoWithDeferred is the programmatic example of "dmsetup info", but deferred. -// It outputs some brief information about the device. -func GetInfoWithDeferred(name string) (*Info, error) { - task, err := TaskCreateNamed(deviceInfo, name) - if task == nil { - return nil, err - } - if err := task.run(); err != nil { - return nil, err - } - return task.getInfoWithDeferred() -} - -// GetDriverVersion is the programmatic example of "dmsetup version". -// It outputs version information of the driver. -func GetDriverVersion() (string, error) { - task := TaskCreate(deviceVersion) - if task == nil { - return "", fmt.Errorf("devicemapper: Can't create deviceVersion task") - } - if err := task.run(); err != nil { - return "", err - } - return task.getDriverVersion() -} - -// GetStatus is the programmatic example of "dmsetup status". -// It outputs status information for the specified device name. -func GetStatus(name string) (uint64, uint64, string, string, error) { - task, err := TaskCreateNamed(deviceStatus, name) - if task == nil { - logrus.Debugf("devicemapper: GetStatus() Error TaskCreateNamed: %s", err) - return 0, 0, "", "", err - } - if err := task.run(); err != nil { - logrus.Debugf("devicemapper: GetStatus() Error Run: %s", err) - return 0, 0, "", "", err - } - - devinfo, err := task.getInfo() - if err != nil { - logrus.Debugf("devicemapper: GetStatus() Error GetInfo: %s", err) - return 0, 0, "", "", err - } - if devinfo.Exists == 0 { - logrus.Debugf("devicemapper: GetStatus() Non existing device %s", name) - return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) - } - - _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) - return start, length, targetType, params, nil -} - -// GetTable is the programmatic example for "dmsetup table". -// It outputs the current table for the specified device name. -func GetTable(name string) (uint64, uint64, string, string, error) { - task, err := TaskCreateNamed(deviceTable, name) - if task == nil { - logrus.Debugf("devicemapper: GetTable() Error TaskCreateNamed: %s", err) - return 0, 0, "", "", err - } - if err := task.run(); err != nil { - logrus.Debugf("devicemapper: GetTable() Error Run: %s", err) - return 0, 0, "", "", err - } - - devinfo, err := task.getInfo() - if err != nil { - logrus.Debugf("devicemapper: GetTable() Error GetInfo: %s", err) - return 0, 0, "", "", err - } - if devinfo.Exists == 0 { - logrus.Debugf("devicemapper: GetTable() Non existing device %s", name) - return 0, 0, "", "", fmt.Errorf("devicemapper: Non existing device %s", name) - } - - _, start, length, targetType, params := task.getNextTarget(unsafe.Pointer(nil)) - return start, length, targetType, params, nil -} - -// SetTransactionID sets a transaction id for the specified device name. -func SetTransactionID(poolName string, oldID uint64, newID uint64) error { - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("set_transaction_id %d %d", oldID, newID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running SetTransactionID %s", err) - } - return nil -} - -// SuspendDevice is the programmatic example of "dmsetup suspend". -// It suspends the specified device. -func SuspendDevice(name string) error { - task, err := TaskCreateNamed(deviceSuspend, name) - if task == nil { - return err - } - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceSuspend %s", err) - } - return nil -} - -// ResumeDevice is the programmatic example of "dmsetup resume". -// It un-suspends the specified device. -func ResumeDevice(name string) error { - task, err := TaskCreateNamed(deviceResume, name) - if task == nil { - return err - } - - var cookie uint - if err := task.setCookie(&cookie, 0); err != nil { - return fmt.Errorf("devicemapper: Can't set cookie %s", err) - } - defer UdevWait(&cookie) - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceResume %s", err) - } - - return nil -} - -// CreateDevice creates a device with the specified poolName with the specified device id. -func CreateDevice(poolName string, deviceID int) error { - logrus.Debugf("devicemapper: CreateDevice(poolName=%v, deviceID=%v)", poolName, deviceID) - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("create_thin %d", deviceID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawExist = false // reset before the task is run - if err := task.run(); err != nil { - // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. - if dmSawExist { - return ErrDeviceIDExists - } - - return fmt.Errorf("devicemapper: Error running CreateDevice %s", err) - - } - return nil -} - -// DeleteDevice deletes a device with the specified poolName with the specified device id. -func DeleteDevice(poolName string, deviceID int) error { - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - return err - } - - if err := task.setSector(0); err != nil { - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("delete %d", deviceID)); err != nil { - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawBusy = false - if err := task.run(); err != nil { - if dmSawBusy { - return ErrBusy - } - return fmt.Errorf("devicemapper: Error running DeleteDevice %s", err) - } - return nil -} - -// ActivateDevice activates the device identified by the specified -// poolName, name and deviceID with the specified size. -func ActivateDevice(poolName string, name string, deviceID int, size uint64) error { - return activateDevice(poolName, name, deviceID, size, "") -} - -// ActivateDeviceWithExternal activates the device identified by the specified -// poolName, name and deviceID with the specified size. -func ActivateDeviceWithExternal(poolName string, name string, deviceID int, size uint64, external string) error { - return activateDevice(poolName, name, deviceID, size, external) -} - -func activateDevice(poolName string, name string, deviceID int, size uint64, external string) error { - task, err := TaskCreateNamed(deviceCreate, name) - if task == nil { - return err - } - - var params string - if len(external) > 0 { - params = fmt.Sprintf("%s %d %s", poolName, deviceID, external) - } else { - params = fmt.Sprintf("%s %d", poolName, deviceID) - } - if err := task.addTarget(0, size/512, "thin", params); err != nil { - return fmt.Errorf("devicemapper: Can't add target %s", err) - } - if err := task.setAddNode(addNodeOnCreate); err != nil { - return fmt.Errorf("devicemapper: Can't add node %s", err) - } - - var cookie uint - if err := task.setCookie(&cookie, 0); err != nil { - return fmt.Errorf("devicemapper: Can't set cookie %s", err) - } - - defer UdevWait(&cookie) - - if err := task.run(); err != nil { - return fmt.Errorf("devicemapper: Error running deviceCreate (ActivateDevice) %s", err) - } - - return nil -} - -// CreateSnapDevice creates a snapshot based on the device identified by the baseName and baseDeviceId, -func CreateSnapDevice(poolName string, deviceID int, baseName string, baseDeviceID int) error { - devinfo, _ := GetInfo(baseName) - doSuspend := devinfo != nil && devinfo.Exists != 0 - - if doSuspend { - if err := SuspendDevice(baseName); err != nil { - return err - } - } - - task, err := TaskCreateNamed(deviceTargetMsg, poolName) - if task == nil { - if doSuspend { - ResumeDevice(baseName) - } - return err - } - - if err := task.setSector(0); err != nil { - if doSuspend { - ResumeDevice(baseName) - } - return fmt.Errorf("devicemapper: Can't set sector %s", err) - } - - if err := task.setMessage(fmt.Sprintf("create_snap %d %d", deviceID, baseDeviceID)); err != nil { - if doSuspend { - ResumeDevice(baseName) - } - return fmt.Errorf("devicemapper: Can't set message %s", err) - } - - dmSawExist = false // reset before the task is run - if err := task.run(); err != nil { - if doSuspend { - ResumeDevice(baseName) - } - // Caller wants to know about ErrDeviceIDExists so that it can try with a different device id. - if dmSawExist { - return ErrDeviceIDExists - } - - return fmt.Errorf("devicemapper: Error running deviceCreate (createSnapDevice) %s", err) - - } - - if doSuspend { - if err := ResumeDevice(baseName); err != nil { - return err - } - } - - return nil -} diff --git a/pkg/devicemapper/devmapper_log.go b/pkg/devicemapper/devmapper_log.go deleted file mode 100644 index 8477e36fec..0000000000 --- a/pkg/devicemapper/devmapper_log.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build linux - -package devicemapper - -import "C" - -import ( - "strings" -) - -// Due to the way cgo works this has to be in a separate file, as devmapper.go has -// definitions in the cgo block, which is incompatible with using "//export" - -// DevmapperLogCallback exports the devmapper log callback for cgo. -//export DevmapperLogCallback -func DevmapperLogCallback(level C.int, file *C.char, line C.int, dmErrnoOrClass C.int, message *C.char) { - msg := C.GoString(message) - if level < 7 { - if strings.Contains(msg, "busy") { - dmSawBusy = true - } - - if strings.Contains(msg, "File exists") { - dmSawExist = true - } - - if strings.Contains(msg, "No such device or address") { - dmSawEnxio = true - } - } - - if dmLogger != nil { - dmLogger.DMLog(int(level), C.GoString(file), int(line), int(dmErrnoOrClass), msg) - } -} diff --git a/pkg/devicemapper/devmapper_wrapper.go b/pkg/devicemapper/devmapper_wrapper.go deleted file mode 100644 index 91fbc85b3a..0000000000 --- a/pkg/devicemapper/devmapper_wrapper.go +++ /dev/null @@ -1,251 +0,0 @@ -// +build linux - -package devicemapper - -/* -#cgo LDFLAGS: -L. -ldevmapper -#include -#include // FIXME: present only for BLKGETSIZE64, maybe we can remove it? - -// FIXME: Can't we find a way to do the logging in pure Go? -extern void DevmapperLogCallback(int level, char *file, int line, int dm_errno_or_class, char *str); - -static void log_cb(int level, const char *file, int line, int dm_errno_or_class, const char *f, ...) -{ - char buffer[256]; - va_list ap; - - va_start(ap, f); - vsnprintf(buffer, 256, f, ap); - va_end(ap); - - DevmapperLogCallback(level, (char *)file, line, dm_errno_or_class, buffer); -} - -static void log_with_errno_init() -{ - dm_log_with_errno_init(log_cb); -} -*/ -import "C" - -import ( - "reflect" - "unsafe" -) - -type ( - cdmTask C.struct_dm_task -) - -// IOCTL consts -const ( - BlkGetSize64 = C.BLKGETSIZE64 - BlkDiscard = C.BLKDISCARD -) - -// Devicemapper cookie flags. -const ( - DmUdevDisableSubsystemRulesFlag = C.DM_UDEV_DISABLE_SUBSYSTEM_RULES_FLAG - DmUdevDisableDiskRulesFlag = C.DM_UDEV_DISABLE_DISK_RULES_FLAG - DmUdevDisableOtherRulesFlag = C.DM_UDEV_DISABLE_OTHER_RULES_FLAG - DmUdevDisableLibraryFallback = C.DM_UDEV_DISABLE_LIBRARY_FALLBACK -) - -// DeviceMapper mapped functions. -var ( - DmGetLibraryVersion = dmGetLibraryVersionFct - DmGetNextTarget = dmGetNextTargetFct - DmLogInitVerbose = dmLogInitVerboseFct - DmSetDevDir = dmSetDevDirFct - DmTaskAddTarget = dmTaskAddTargetFct - DmTaskCreate = dmTaskCreateFct - DmTaskDestroy = dmTaskDestroyFct - DmTaskGetDeps = dmTaskGetDepsFct - DmTaskGetInfo = dmTaskGetInfoFct - DmTaskGetDriverVersion = dmTaskGetDriverVersionFct - DmTaskRun = dmTaskRunFct - DmTaskSetAddNode = dmTaskSetAddNodeFct - DmTaskSetCookie = dmTaskSetCookieFct - DmTaskSetMessage = dmTaskSetMessageFct - DmTaskSetName = dmTaskSetNameFct - DmTaskSetRo = dmTaskSetRoFct - DmTaskSetSector = dmTaskSetSectorFct - DmUdevWait = dmUdevWaitFct - DmUdevSetSyncSupport = dmUdevSetSyncSupportFct - DmUdevGetSyncSupport = dmUdevGetSyncSupportFct - DmCookieSupported = dmCookieSupportedFct - LogWithErrnoInit = logWithErrnoInitFct - DmTaskDeferredRemove = dmTaskDeferredRemoveFct - DmTaskGetInfoWithDeferred = dmTaskGetInfoWithDeferredFct -) - -func free(p *C.char) { - C.free(unsafe.Pointer(p)) -} - -func dmTaskDestroyFct(task *cdmTask) { - C.dm_task_destroy((*C.struct_dm_task)(task)) -} - -func dmTaskCreateFct(taskType int) *cdmTask { - return (*cdmTask)(C.dm_task_create(C.int(taskType))) -} - -func dmTaskRunFct(task *cdmTask) int { - ret, _ := C.dm_task_run((*C.struct_dm_task)(task)) - return int(ret) -} - -func dmTaskSetNameFct(task *cdmTask, name string) int { - Cname := C.CString(name) - defer free(Cname) - - return int(C.dm_task_set_name((*C.struct_dm_task)(task), Cname)) -} - -func dmTaskSetMessageFct(task *cdmTask, message string) int { - Cmessage := C.CString(message) - defer free(Cmessage) - - return int(C.dm_task_set_message((*C.struct_dm_task)(task), Cmessage)) -} - -func dmTaskSetSectorFct(task *cdmTask, sector uint64) int { - return int(C.dm_task_set_sector((*C.struct_dm_task)(task), C.uint64_t(sector))) -} - -func dmTaskSetCookieFct(task *cdmTask, cookie *uint, flags uint16) int { - cCookie := C.uint32_t(*cookie) - defer func() { - *cookie = uint(cCookie) - }() - return int(C.dm_task_set_cookie((*C.struct_dm_task)(task), &cCookie, C.uint16_t(flags))) -} - -func dmTaskSetAddNodeFct(task *cdmTask, addNode AddNodeType) int { - return int(C.dm_task_set_add_node((*C.struct_dm_task)(task), C.dm_add_node_t(addNode))) -} - -func dmTaskSetRoFct(task *cdmTask) int { - return int(C.dm_task_set_ro((*C.struct_dm_task)(task))) -} - -func dmTaskAddTargetFct(task *cdmTask, - start, size uint64, ttype, params string) int { - - Cttype := C.CString(ttype) - defer free(Cttype) - - Cparams := C.CString(params) - defer free(Cparams) - - return int(C.dm_task_add_target((*C.struct_dm_task)(task), C.uint64_t(start), C.uint64_t(size), Cttype, Cparams)) -} - -func dmTaskGetDepsFct(task *cdmTask) *Deps { - Cdeps := C.dm_task_get_deps((*C.struct_dm_task)(task)) - if Cdeps == nil { - return nil - } - - // golang issue: https://github.com/golang/go/issues/11925 - hdr := reflect.SliceHeader{ - Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))), - Len: int(Cdeps.count), - Cap: int(Cdeps.count), - } - devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr)) - - deps := &Deps{ - Count: uint32(Cdeps.count), - Filler: uint32(Cdeps.filler), - } - for _, device := range devices { - deps.Device = append(deps.Device, uint64(device)) - } - return deps -} - -func dmTaskGetInfoFct(task *cdmTask, info *Info) int { - Cinfo := C.struct_dm_info{} - defer func() { - info.Exists = int(Cinfo.exists) - info.Suspended = int(Cinfo.suspended) - info.LiveTable = int(Cinfo.live_table) - info.InactiveTable = int(Cinfo.inactive_table) - info.OpenCount = int32(Cinfo.open_count) - info.EventNr = uint32(Cinfo.event_nr) - info.Major = uint32(Cinfo.major) - info.Minor = uint32(Cinfo.minor) - info.ReadOnly = int(Cinfo.read_only) - info.TargetCount = int32(Cinfo.target_count) - }() - return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) -} - -func dmTaskGetDriverVersionFct(task *cdmTask) string { - buffer := C.malloc(128) - defer C.free(buffer) - res := C.dm_task_get_driver_version((*C.struct_dm_task)(task), (*C.char)(buffer), 128) - if res == 0 { - return "" - } - return C.GoString((*C.char)(buffer)) -} - -func dmGetNextTargetFct(task *cdmTask, next unsafe.Pointer, start, length *uint64, target, params *string) unsafe.Pointer { - var ( - Cstart, Clength C.uint64_t - CtargetType, Cparams *C.char - ) - defer func() { - *start = uint64(Cstart) - *length = uint64(Clength) - *target = C.GoString(CtargetType) - *params = C.GoString(Cparams) - }() - - nextp := C.dm_get_next_target((*C.struct_dm_task)(task), next, &Cstart, &Clength, &CtargetType, &Cparams) - return nextp -} - -func dmUdevSetSyncSupportFct(syncWithUdev int) { - (C.dm_udev_set_sync_support(C.int(syncWithUdev))) -} - -func dmUdevGetSyncSupportFct() int { - return int(C.dm_udev_get_sync_support()) -} - -func dmUdevWaitFct(cookie uint) int { - return int(C.dm_udev_wait(C.uint32_t(cookie))) -} - -func dmCookieSupportedFct() int { - return int(C.dm_cookie_supported()) -} - -func dmLogInitVerboseFct(level int) { - C.dm_log_init_verbose(C.int(level)) -} - -func logWithErrnoInitFct() { - C.log_with_errno_init() -} - -func dmSetDevDirFct(dir string) int { - Cdir := C.CString(dir) - defer free(Cdir) - - return int(C.dm_set_dev_dir(Cdir)) -} - -func dmGetLibraryVersionFct(version *string) int { - buffer := C.CString(string(make([]byte, 128))) - defer free(buffer) - defer func() { - *version = C.GoString(buffer) - }() - return int(C.dm_get_library_version(buffer, 128)) -} diff --git a/pkg/devicemapper/devmapper_wrapper_deferred_remove.go b/pkg/devicemapper/devmapper_wrapper_deferred_remove.go deleted file mode 100644 index dc361eab76..0000000000 --- a/pkg/devicemapper/devmapper_wrapper_deferred_remove.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build linux,!libdm_no_deferred_remove - -package devicemapper - -/* -#cgo LDFLAGS: -L. -ldevmapper -#include -*/ -import "C" - -// LibraryDeferredRemovalSupport is supported when statically linked. -const LibraryDeferredRemovalSupport = true - -func dmTaskDeferredRemoveFct(task *cdmTask) int { - return int(C.dm_task_deferred_remove((*C.struct_dm_task)(task))) -} - -func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { - Cinfo := C.struct_dm_info{} - defer func() { - info.Exists = int(Cinfo.exists) - info.Suspended = int(Cinfo.suspended) - info.LiveTable = int(Cinfo.live_table) - info.InactiveTable = int(Cinfo.inactive_table) - info.OpenCount = int32(Cinfo.open_count) - info.EventNr = uint32(Cinfo.event_nr) - info.Major = uint32(Cinfo.major) - info.Minor = uint32(Cinfo.minor) - info.ReadOnly = int(Cinfo.read_only) - info.TargetCount = int32(Cinfo.target_count) - info.DeferredRemove = int(Cinfo.deferred_remove) - }() - return int(C.dm_task_get_info((*C.struct_dm_task)(task), &Cinfo)) -} diff --git a/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go b/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go deleted file mode 100644 index 4a6665de86..0000000000 --- a/pkg/devicemapper/devmapper_wrapper_no_deferred_remove.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build linux,libdm_no_deferred_remove - -package devicemapper - -// LibraryDeferredRemovalsupport is not supported when statically linked. -const LibraryDeferredRemovalSupport = false - -func dmTaskDeferredRemoveFct(task *cdmTask) int { - // Error. Nobody should be calling it. - return -1 -} - -func dmTaskGetInfoWithDeferredFct(task *cdmTask, info *Info) int { - return -1 -} diff --git a/pkg/devicemapper/ioctl.go b/pkg/devicemapper/ioctl.go deleted file mode 100644 index 581b57eb86..0000000000 --- a/pkg/devicemapper/ioctl.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build linux - -package devicemapper - -import ( - "syscall" - "unsafe" -) - -func ioctlBlkGetSize64(fd uintptr) (int64, error) { - var size int64 - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkGetSize64, uintptr(unsafe.Pointer(&size))); err != 0 { - return 0, err - } - return size, nil -} - -func ioctlBlkDiscard(fd uintptr, offset, length uint64) error { - var r [2]uint64 - r[0] = offset - r[1] = length - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, BlkDiscard, uintptr(unsafe.Pointer(&r[0]))); err != 0 { - return err - } - return nil -} diff --git a/pkg/devicemapper/log.go b/pkg/devicemapper/log.go deleted file mode 100644 index cee5e54549..0000000000 --- a/pkg/devicemapper/log.go +++ /dev/null @@ -1,11 +0,0 @@ -package devicemapper - -// definitions from lvm2 lib/log/log.h -const ( - LogLevelFatal = 2 + iota // _LOG_FATAL - LogLevelErr // _LOG_ERR - LogLevelWarn // _LOG_WARN - LogLevelNotice // _LOG_NOTICE - LogLevelInfo // _LOG_INFO - LogLevelDebug // _LOG_DEBUG -) diff --git a/pkg/directory/directory.go b/pkg/directory/directory.go deleted file mode 100644 index 1715ef45d9..0000000000 --- a/pkg/directory/directory.go +++ /dev/null @@ -1,26 +0,0 @@ -package directory - -import ( - "io/ioutil" - "os" - "path/filepath" -) - -// MoveToSubdir moves all contents of a directory to a subdirectory underneath the original path -func MoveToSubdir(oldpath, subdir string) error { - - infos, err := ioutil.ReadDir(oldpath) - if err != nil { - return err - } - for _, info := range infos { - if info.Name() != subdir { - oldName := filepath.Join(oldpath, info.Name()) - newName := filepath.Join(oldpath, subdir, info.Name()) - if err := os.Rename(oldName, newName); err != nil { - return err - } - } - } - return nil -} diff --git a/pkg/directory/directory_test.go b/pkg/directory/directory_test.go deleted file mode 100644 index 2b7a4657be..0000000000 --- a/pkg/directory/directory_test.go +++ /dev/null @@ -1,192 +0,0 @@ -package directory - -import ( - "io/ioutil" - "os" - "path/filepath" - "reflect" - "sort" - "testing" -) - -// Size of an empty directory should be 0 -func TestSizeEmpty(t *testing.T) { - var dir string - var err error - if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyDirectory"); err != nil { - t.Fatalf("failed to create directory: %s", err) - } - - var size int64 - if size, _ = Size(dir); size != 0 { - t.Fatalf("empty directory has size: %d", size) - } -} - -// Size of a directory with one empty file should be 0 -func TestSizeEmptyFile(t *testing.T) { - var dir string - var err error - if dir, err = ioutil.TempDir(os.TempDir(), "testSizeEmptyFile"); err != nil { - t.Fatalf("failed to create directory: %s", err) - } - - var file *os.File - if file, err = ioutil.TempFile(dir, "file"); err != nil { - t.Fatalf("failed to create file: %s", err) - } - - var size int64 - if size, _ = Size(file.Name()); size != 0 { - t.Fatalf("directory with one file has size: %d", size) - } -} - -// Size of a directory with one 5-byte file should be 5 -func TestSizeNonemptyFile(t *testing.T) { - var dir string - var err error - if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNonemptyFile"); err != nil { - t.Fatalf("failed to create directory: %s", err) - } - - var file *os.File - if file, err = ioutil.TempFile(dir, "file"); err != nil { - t.Fatalf("failed to create file: %s", err) - } - - d := []byte{97, 98, 99, 100, 101} - file.Write(d) - - var size int64 - if size, _ = Size(file.Name()); size != 5 { - t.Fatalf("directory with one 5-byte file has size: %d", size) - } -} - -// Size of a directory with one empty directory should be 0 -func TestSizeNestedDirectoryEmpty(t *testing.T) { - var dir string - var err error - if dir, err = ioutil.TempDir(os.TempDir(), "testSizeNestedDirectoryEmpty"); err != nil { - t.Fatalf("failed to create directory: %s", err) - } - if dir, err = ioutil.TempDir(dir, "nested"); err != nil { - t.Fatalf("failed to create nested directory: %s", err) - } - - var size int64 - if size, _ = Size(dir); size != 0 { - t.Fatalf("directory with one empty directory has size: %d", size) - } -} - -// Test directory with 1 file and 1 empty directory -func TestSizeFileAndNestedDirectoryEmpty(t *testing.T) { - var dir string - var err error - if dir, err = ioutil.TempDir(os.TempDir(), "testSizeFileAndNestedDirectoryEmpty"); err != nil { - t.Fatalf("failed to create directory: %s", err) - } - if dir, err = ioutil.TempDir(dir, "nested"); err != nil { - t.Fatalf("failed to create nested directory: %s", err) - } - - var file *os.File - if file, err = ioutil.TempFile(dir, "file"); err != nil { - t.Fatalf("failed to create file: %s", err) - } - - d := []byte{100, 111, 99, 107, 101, 114} - file.Write(d) - - var size int64 - if size, _ = Size(dir); size != 6 { - t.Fatalf("directory with 6-byte file and empty directory has size: %d", size) - } -} - -// Test directory with 1 file and 1 non-empty directory -func TestSizeFileAndNestedDirectoryNonempty(t *testing.T) { - var dir, dirNested string - var err error - if dir, err = ioutil.TempDir(os.TempDir(), "TestSizeFileAndNestedDirectoryNonempty"); err != nil { - t.Fatalf("failed to create directory: %s", err) - } - if dirNested, err = ioutil.TempDir(dir, "nested"); err != nil { - t.Fatalf("failed to create nested directory: %s", err) - } - - var file *os.File - if file, err = ioutil.TempFile(dir, "file"); err != nil { - t.Fatalf("failed to create file: %s", err) - } - - data := []byte{100, 111, 99, 107, 101, 114} - file.Write(data) - - var nestedFile *os.File - if nestedFile, err = ioutil.TempFile(dirNested, "file"); err != nil { - t.Fatalf("failed to create file in nested directory: %s", err) - } - - nestedData := []byte{100, 111, 99, 107, 101, 114} - nestedFile.Write(nestedData) - - var size int64 - if size, _ = Size(dir); size != 12 { - t.Fatalf("directory with 6-byte file and nested directory with 6-byte file has size: %d", size) - } -} - -// Test migration of directory to a subdir underneath itself -func TestMoveToSubdir(t *testing.T) { - var outerDir, subDir string - var err error - - if outerDir, err = ioutil.TempDir(os.TempDir(), "TestMoveToSubdir"); err != nil { - t.Fatalf("failed to create directory: %v", err) - } - - if subDir, err = ioutil.TempDir(outerDir, "testSub"); err != nil { - t.Fatalf("failed to create subdirectory: %v", err) - } - - // write 4 temp files in the outer dir to get moved - filesList := []string{"a", "b", "c", "d"} - for _, fName := range filesList { - if file, err := os.Create(filepath.Join(outerDir, fName)); err != nil { - t.Fatalf("couldn't create temp file %q: %v", fName, err) - } else { - file.WriteString(fName) - file.Close() - } - } - - if err = MoveToSubdir(outerDir, filepath.Base(subDir)); err != nil { - t.Fatalf("Error during migration of content to subdirectory: %v", err) - } - // validate that the files were moved to the subdirectory - infos, err := ioutil.ReadDir(subDir) - if err != nil { - t.Fatal(err) - } - if len(infos) != 4 { - t.Fatalf("Should be four files in the subdir after the migration: actual length: %d", len(infos)) - } - var results []string - for _, info := range infos { - results = append(results, info.Name()) - } - sort.Sort(sort.StringSlice(results)) - if !reflect.DeepEqual(filesList, results) { - t.Fatalf("Results after migration do not equal list of files: expected: %v, got: %v", filesList, results) - } -} - -// Test a non-existing directory -func TestSizeNonExistingDirectory(t *testing.T) { - if _, err := Size("/thisdirectoryshouldnotexist/TestSizeNonExistingDirectory"); err == nil { - t.Fatalf("error is expected") - } -} diff --git a/pkg/directory/directory_unix.go b/pkg/directory/directory_unix.go deleted file mode 100644 index 397251bdb8..0000000000 --- a/pkg/directory/directory_unix.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build linux freebsd solaris - -package directory - -import ( - "os" - "path/filepath" - "syscall" -) - -// Size walks a directory tree and returns its total size in bytes. -func Size(dir string) (size int64, err error) { - data := make(map[uint64]struct{}) - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { - if err != nil { - // if dir does not exist, Size() returns the error. - // if dir/x disappeared while walking, Size() ignores dir/x. - if os.IsNotExist(err) && d != dir { - return nil - } - return err - } - - // Ignore directory sizes - if fileInfo == nil { - return nil - } - - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil - } - - // Check inode to handle hard links correctly - inode := fileInfo.Sys().(*syscall.Stat_t).Ino - // inode is not a uint64 on all platforms. Cast it to avoid issues. - if _, exists := data[uint64(inode)]; exists { - return nil - } - // inode is not a uint64 on all platforms. Cast it to avoid issues. - data[uint64(inode)] = struct{}{} - - size += s - - return nil - }) - return -} diff --git a/pkg/directory/directory_windows.go b/pkg/directory/directory_windows.go deleted file mode 100644 index 6fb0917c4c..0000000000 --- a/pkg/directory/directory_windows.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build windows - -package directory - -import ( - "os" - "path/filepath" -) - -// Size walks a directory tree and returns its total size in bytes. -func Size(dir string) (size int64, err error) { - err = filepath.Walk(dir, func(d string, fileInfo os.FileInfo, err error) error { - if err != nil { - // if dir does not exist, Size() returns the error. - // if dir/x disappeared while walking, Size() ignores dir/x. - if os.IsNotExist(err) && d != dir { - return nil - } - return err - } - - // Ignore directory sizes - if fileInfo == nil { - return nil - } - - s := fileInfo.Size() - if fileInfo.IsDir() || s == 0 { - return nil - } - - size += s - - return nil - }) - return -} diff --git a/pkg/discovery/README.md b/pkg/discovery/README.md deleted file mode 100644 index 39777c2171..0000000000 --- a/pkg/discovery/README.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -page_title: Docker discovery -page_description: discovery -page_keywords: docker, clustering, discovery ---- - -# Discovery - -Docker comes with multiple Discovery backends. - -## Backends - -### Using etcd - -Point your Docker Engine instances to a common etcd instance. You can specify -the address Docker uses to advertise the node using the `--cluster-advertise` -flag. - -```bash -$ docker daemon -H= --cluster-advertise= --cluster-store etcd://,/ -``` - -### Using consul - -Point your Docker Engine instances to a common Consul instance. You can specify -the address Docker uses to advertise the node using the `--cluster-advertise` -flag. - -```bash -$ docker daemon -H= --cluster-advertise= --cluster-store consul:/// -``` - -### Using zookeeper - -Point your Docker Engine instances to a common Zookeeper instance. You can specify -the address Docker uses to advertise the node using the `--cluster-advertise` -flag. - -```bash -$ docker daemon -H= --cluster-advertise= --cluster-store zk://,/ -``` diff --git a/pkg/discovery/backends.go b/pkg/discovery/backends.go deleted file mode 100644 index edfa4fd3a8..0000000000 --- a/pkg/discovery/backends.go +++ /dev/null @@ -1,107 +0,0 @@ -package discovery - -import ( - "fmt" - "net" - "strings" - "time" - - log "github.com/Sirupsen/logrus" -) - -var ( - // Backends is a global map of discovery backends indexed by their - // associated scheme. - backends = make(map[string]Backend) -) - -// Register makes a discovery backend available by the provided scheme. -// If Register is called twice with the same scheme an error is returned. -func Register(scheme string, d Backend) error { - if _, exists := backends[scheme]; exists { - return fmt.Errorf("scheme already registered %s", scheme) - } - log.WithField("name", scheme).Debug("Registering discovery service") - backends[scheme] = d - return nil -} - -func parse(rawurl string) (string, string) { - parts := strings.SplitN(rawurl, "://", 2) - - // nodes:port,node2:port => nodes://node1:port,node2:port - if len(parts) == 1 { - return "nodes", parts[0] - } - return parts[0], parts[1] -} - -// ParseAdvertise parses the --cluster-advertise daemon config which accepts -// : or : -func ParseAdvertise(advertise string) (string, error) { - var ( - iface *net.Interface - addrs []net.Addr - err error - ) - - addr, port, err := net.SplitHostPort(advertise) - - if err != nil { - return "", fmt.Errorf("invalid --cluster-advertise configuration: %s: %v", advertise, err) - } - - ip := net.ParseIP(addr) - // If it is a valid ip-address, use it as is - if ip != nil { - return advertise, nil - } - - // If advertise is a valid interface name, get the valid IPv4 address and use it to advertise - ifaceName := addr - iface, err = net.InterfaceByName(ifaceName) - if err != nil { - return "", fmt.Errorf("invalid cluster advertise IP address or interface name (%s) : %v", advertise, err) - } - - addrs, err = iface.Addrs() - if err != nil { - return "", fmt.Errorf("unable to get advertise IP address from interface (%s) : %v", advertise, err) - } - - if addrs == nil || len(addrs) == 0 { - return "", fmt.Errorf("no available advertise IP address in interface (%s)", advertise) - } - - addr = "" - for _, a := range addrs { - ip, _, err := net.ParseCIDR(a.String()) - if err != nil { - return "", fmt.Errorf("error deriving advertise ip-address in interface (%s) : %v", advertise, err) - } - if ip.To4() == nil || ip.IsLoopback() { - continue - } - addr = ip.String() - break - } - if addr == "" { - return "", fmt.Errorf("couldnt find a valid ip-address in interface %s", advertise) - } - - addr = net.JoinHostPort(addr, port) - return addr, nil -} - -// New returns a new Discovery given a URL, heartbeat and ttl settings. -// Returns an error if the URL scheme is not supported. -func New(rawurl string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) (Backend, error) { - scheme, uri := parse(rawurl) - if backend, exists := backends[scheme]; exists { - log.WithFields(log.Fields{"name": scheme, "uri": uri}).Debug("Initializing discovery service") - err := backend.Initialize(uri, heartbeat, ttl, clusterOpts) - return backend, err - } - - return nil, ErrNotSupported -} diff --git a/pkg/discovery/discovery.go b/pkg/discovery/discovery.go deleted file mode 100644 index ca7f587458..0000000000 --- a/pkg/discovery/discovery.go +++ /dev/null @@ -1,35 +0,0 @@ -package discovery - -import ( - "errors" - "time" -) - -var ( - // ErrNotSupported is returned when a discovery service is not supported. - ErrNotSupported = errors.New("discovery service not supported") - - // ErrNotImplemented is returned when discovery feature is not implemented - // by discovery backend. - ErrNotImplemented = errors.New("not implemented in this discovery service") -) - -// Watcher provides watching over a cluster for nodes joining and leaving. -type Watcher interface { - // Watch the discovery for entry changes. - // Returns a channel that will receive changes or an error. - // Providing a non-nil stopCh can be used to stop watching. - Watch(stopCh <-chan struct{}) (<-chan Entries, <-chan error) -} - -// Backend is implemented by discovery backends which manage cluster entries. -type Backend interface { - // Watcher must be provided by every backend. - Watcher - - // Initialize the discovery with URIs, a heartbeat, a ttl and optional settings. - Initialize(string, time.Duration, time.Duration, map[string]string) error - - // Register to the discovery. - Register(string) error -} diff --git a/pkg/discovery/discovery_test.go b/pkg/discovery/discovery_test.go deleted file mode 100644 index 6084f3ef0d..0000000000 --- a/pkg/discovery/discovery_test.go +++ /dev/null @@ -1,137 +0,0 @@ -package discovery - -import ( - "testing" - - "github.com/go-check/check" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -type DiscoverySuite struct{} - -var _ = check.Suite(&DiscoverySuite{}) - -func (s *DiscoverySuite) TestNewEntry(c *check.C) { - entry, err := NewEntry("127.0.0.1:2375") - c.Assert(err, check.IsNil) - c.Assert(entry.Equals(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true) - c.Assert(entry.String(), check.Equals, "127.0.0.1:2375") - - entry, err = NewEntry("[2001:db8:0:f101::2]:2375") - c.Assert(err, check.IsNil) - c.Assert(entry.Equals(&Entry{Host: "2001:db8:0:f101::2", Port: "2375"}), check.Equals, true) - c.Assert(entry.String(), check.Equals, "[2001:db8:0:f101::2]:2375") - - _, err = NewEntry("127.0.0.1") - c.Assert(err, check.NotNil) -} - -func (s *DiscoverySuite) TestParse(c *check.C) { - scheme, uri := parse("127.0.0.1:2375") - c.Assert(scheme, check.Equals, "nodes") - c.Assert(uri, check.Equals, "127.0.0.1:2375") - - scheme, uri = parse("localhost:2375") - c.Assert(scheme, check.Equals, "nodes") - c.Assert(uri, check.Equals, "localhost:2375") - - scheme, uri = parse("scheme://127.0.0.1:2375") - c.Assert(scheme, check.Equals, "scheme") - c.Assert(uri, check.Equals, "127.0.0.1:2375") - - scheme, uri = parse("scheme://localhost:2375") - c.Assert(scheme, check.Equals, "scheme") - c.Assert(uri, check.Equals, "localhost:2375") - - scheme, uri = parse("") - c.Assert(scheme, check.Equals, "nodes") - c.Assert(uri, check.Equals, "") -} - -func (s *DiscoverySuite) TestCreateEntries(c *check.C) { - entries, err := CreateEntries(nil) - c.Assert(entries, check.DeepEquals, Entries{}) - c.Assert(err, check.IsNil) - - entries, err = CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", "[2001:db8:0:f101::2]:2375", ""}) - c.Assert(err, check.IsNil) - expected := Entries{ - &Entry{Host: "127.0.0.1", Port: "2375"}, - &Entry{Host: "127.0.0.2", Port: "2375"}, - &Entry{Host: "2001:db8:0:f101::2", Port: "2375"}, - } - c.Assert(entries.Equals(expected), check.Equals, true) - - _, err = CreateEntries([]string{"127.0.0.1", "127.0.0.2"}) - c.Assert(err, check.NotNil) -} - -func (s *DiscoverySuite) TestContainsEntry(c *check.C) { - entries, err := CreateEntries([]string{"127.0.0.1:2375", "127.0.0.2:2375", ""}) - c.Assert(err, check.IsNil) - c.Assert(entries.Contains(&Entry{Host: "127.0.0.1", Port: "2375"}), check.Equals, true) - c.Assert(entries.Contains(&Entry{Host: "127.0.0.3", Port: "2375"}), check.Equals, false) -} - -func (s *DiscoverySuite) TestEntriesEquality(c *check.C) { - entries := Entries{ - &Entry{Host: "127.0.0.1", Port: "2375"}, - &Entry{Host: "127.0.0.2", Port: "2375"}, - } - - // Same - c.Assert(entries.Equals(Entries{ - &Entry{Host: "127.0.0.1", Port: "2375"}, - &Entry{Host: "127.0.0.2", Port: "2375"}, - }), check. - Equals, true) - - // Different size - c.Assert(entries.Equals(Entries{ - &Entry{Host: "127.0.0.1", Port: "2375"}, - &Entry{Host: "127.0.0.2", Port: "2375"}, - &Entry{Host: "127.0.0.3", Port: "2375"}, - }), check. - Equals, false) - - // Different content - c.Assert(entries.Equals(Entries{ - &Entry{Host: "127.0.0.1", Port: "2375"}, - &Entry{Host: "127.0.0.42", Port: "2375"}, - }), check. - Equals, false) - -} - -func (s *DiscoverySuite) TestEntriesDiff(c *check.C) { - entry1 := &Entry{Host: "1.1.1.1", Port: "1111"} - entry2 := &Entry{Host: "2.2.2.2", Port: "2222"} - entry3 := &Entry{Host: "3.3.3.3", Port: "3333"} - entries := Entries{entry1, entry2} - - // No diff - added, removed := entries.Diff(Entries{entry2, entry1}) - c.Assert(added, check.HasLen, 0) - c.Assert(removed, check.HasLen, 0) - - // Add - added, removed = entries.Diff(Entries{entry2, entry3, entry1}) - c.Assert(added, check.HasLen, 1) - c.Assert(added.Contains(entry3), check.Equals, true) - c.Assert(removed, check.HasLen, 0) - - // Remove - added, removed = entries.Diff(Entries{entry2}) - c.Assert(added, check.HasLen, 0) - c.Assert(removed, check.HasLen, 1) - c.Assert(removed.Contains(entry1), check.Equals, true) - - // Add and remove - added, removed = entries.Diff(Entries{entry1, entry3}) - c.Assert(added, check.HasLen, 1) - c.Assert(added.Contains(entry3), check.Equals, true) - c.Assert(removed, check.HasLen, 1) - c.Assert(removed.Contains(entry2), check.Equals, true) -} diff --git a/pkg/discovery/entry.go b/pkg/discovery/entry.go deleted file mode 100644 index ce23bbf89b..0000000000 --- a/pkg/discovery/entry.go +++ /dev/null @@ -1,94 +0,0 @@ -package discovery - -import "net" - -// NewEntry creates a new entry. -func NewEntry(url string) (*Entry, error) { - host, port, err := net.SplitHostPort(url) - if err != nil { - return nil, err - } - return &Entry{host, port}, nil -} - -// An Entry represents a host. -type Entry struct { - Host string - Port string -} - -// Equals returns true if cmp contains the same data. -func (e *Entry) Equals(cmp *Entry) bool { - return e.Host == cmp.Host && e.Port == cmp.Port -} - -// String returns the string form of an entry. -func (e *Entry) String() string { - return net.JoinHostPort(e.Host, e.Port) -} - -// Entries is a list of *Entry with some helpers. -type Entries []*Entry - -// Equals returns true if cmp contains the same data. -func (e Entries) Equals(cmp Entries) bool { - // Check if the file has really changed. - if len(e) != len(cmp) { - return false - } - for i := range e { - if !e[i].Equals(cmp[i]) { - return false - } - } - return true -} - -// Contains returns true if the Entries contain a given Entry. -func (e Entries) Contains(entry *Entry) bool { - for _, curr := range e { - if curr.Equals(entry) { - return true - } - } - return false -} - -// Diff compares two entries and returns the added and removed entries. -func (e Entries) Diff(cmp Entries) (Entries, Entries) { - added := Entries{} - for _, entry := range cmp { - if !e.Contains(entry) { - added = append(added, entry) - } - } - - removed := Entries{} - for _, entry := range e { - if !cmp.Contains(entry) { - removed = append(removed, entry) - } - } - - return added, removed -} - -// CreateEntries returns an array of entries based on the given addresses. -func CreateEntries(addrs []string) (Entries, error) { - entries := Entries{} - if addrs == nil { - return entries, nil - } - - for _, addr := range addrs { - if len(addr) == 0 { - continue - } - entry, err := NewEntry(addr) - if err != nil { - return nil, err - } - entries = append(entries, entry) - } - return entries, nil -} diff --git a/pkg/discovery/file/file.go b/pkg/discovery/file/file.go deleted file mode 100644 index b4f870b864..0000000000 --- a/pkg/discovery/file/file.go +++ /dev/null @@ -1,109 +0,0 @@ -package file - -import ( - "fmt" - "io/ioutil" - "strings" - "time" - - "github.com/docker/docker/pkg/discovery" -) - -// Discovery is exported -type Discovery struct { - heartbeat time.Duration - path string -} - -func init() { - Init() -} - -// Init is exported -func Init() { - discovery.Register("file", &Discovery{}) -} - -// Initialize is exported -func (s *Discovery) Initialize(path string, heartbeat time.Duration, ttl time.Duration, _ map[string]string) error { - s.path = path - s.heartbeat = heartbeat - return nil -} - -func parseFileContent(content []byte) []string { - var result []string - for _, line := range strings.Split(strings.TrimSpace(string(content)), "\n") { - line = strings.TrimSpace(line) - // Ignoring line starts with # - if strings.HasPrefix(line, "#") { - continue - } - // Inlined # comment also ignored. - if strings.Contains(line, "#") { - line = line[0:strings.Index(line, "#")] - // Trim additional spaces caused by above stripping. - line = strings.TrimSpace(line) - } - for _, ip := range discovery.Generate(line) { - result = append(result, ip) - } - } - return result -} - -func (s *Discovery) fetch() (discovery.Entries, error) { - fileContent, err := ioutil.ReadFile(s.path) - if err != nil { - return nil, fmt.Errorf("failed to read '%s': %v", s.path, err) - } - return discovery.CreateEntries(parseFileContent(fileContent)) -} - -// Watch is exported -func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - ch := make(chan discovery.Entries) - errCh := make(chan error) - ticker := time.NewTicker(s.heartbeat) - - go func() { - defer close(errCh) - defer close(ch) - - // Send the initial entries if available. - currentEntries, err := s.fetch() - if err != nil { - errCh <- err - } else { - ch <- currentEntries - } - - // Periodically send updates. - for { - select { - case <-ticker.C: - newEntries, err := s.fetch() - if err != nil { - errCh <- err - continue - } - - // Check if the file has really changed. - if !newEntries.Equals(currentEntries) { - ch <- newEntries - } - currentEntries = newEntries - case <-stopCh: - ticker.Stop() - return - } - } - }() - - return ch, errCh -} - -// Register is exported -func (s *Discovery) Register(addr string) error { - return discovery.ErrNotImplemented -} diff --git a/pkg/discovery/file/file_test.go b/pkg/discovery/file/file_test.go deleted file mode 100644 index 667f00ba0d..0000000000 --- a/pkg/discovery/file/file_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package file - -import ( - "io/ioutil" - "os" - "testing" - - "github.com/docker/docker/pkg/discovery" - - "github.com/go-check/check" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -type DiscoverySuite struct{} - -var _ = check.Suite(&DiscoverySuite{}) - -func (s *DiscoverySuite) TestInitialize(c *check.C) { - d := &Discovery{} - d.Initialize("/path/to/file", 1000, 0, nil) - c.Assert(d.path, check.Equals, "/path/to/file") -} - -func (s *DiscoverySuite) TestNew(c *check.C) { - d, err := discovery.New("file:///path/to/file", 0, 0, nil) - c.Assert(err, check.IsNil) - c.Assert(d.(*Discovery).path, check.Equals, "/path/to/file") -} - -func (s *DiscoverySuite) TestContent(c *check.C) { - data := ` -1.1.1.[1:2]:1111 -2.2.2.[2:4]:2222 -` - ips := parseFileContent([]byte(data)) - c.Assert(ips, check.HasLen, 5) - c.Assert(ips[0], check.Equals, "1.1.1.1:1111") - c.Assert(ips[1], check.Equals, "1.1.1.2:1111") - c.Assert(ips[2], check.Equals, "2.2.2.2:2222") - c.Assert(ips[3], check.Equals, "2.2.2.3:2222") - c.Assert(ips[4], check.Equals, "2.2.2.4:2222") -} - -func (s *DiscoverySuite) TestRegister(c *check.C) { - discovery := &Discovery{path: "/path/to/file"} - c.Assert(discovery.Register("0.0.0.0"), check.NotNil) -} - -func (s *DiscoverySuite) TestParsingContentsWithComments(c *check.C) { - data := ` -### test ### -1.1.1.1:1111 # inline comment -# 2.2.2.2:2222 - ### empty line with comment - 3.3.3.3:3333 -### test ### -` - ips := parseFileContent([]byte(data)) - c.Assert(ips, check.HasLen, 2) - c.Assert("1.1.1.1:1111", check.Equals, ips[0]) - c.Assert("3.3.3.3:3333", check.Equals, ips[1]) -} - -func (s *DiscoverySuite) TestWatch(c *check.C) { - data := ` -1.1.1.1:1111 -2.2.2.2:2222 -` - expected := discovery.Entries{ - &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, - &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, - } - - // Create a temporary file and remove it. - tmp, err := ioutil.TempFile(os.TempDir(), "discovery-file-test") - c.Assert(err, check.IsNil) - c.Assert(tmp.Close(), check.IsNil) - c.Assert(os.Remove(tmp.Name()), check.IsNil) - - // Set up file discovery. - d := &Discovery{} - d.Initialize(tmp.Name(), 1000, 0, nil) - stopCh := make(chan struct{}) - ch, errCh := d.Watch(stopCh) - - // Make sure it fires errors since the file doesn't exist. - c.Assert(<-errCh, check.NotNil) - // We have to drain the error channel otherwise Watch will get stuck. - go func() { - for range errCh { - } - }() - - // Write the file and make sure we get the expected value back. - c.Assert(ioutil.WriteFile(tmp.Name(), []byte(data), 0600), check.IsNil) - c.Assert(<-ch, check.DeepEquals, expected) - - // Add a new entry and look it up. - expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) - f, err := os.OpenFile(tmp.Name(), os.O_APPEND|os.O_WRONLY, 0600) - c.Assert(err, check.IsNil) - c.Assert(f, check.NotNil) - _, err = f.WriteString("\n3.3.3.3:3333\n") - c.Assert(err, check.IsNil) - f.Close() - c.Assert(<-ch, check.DeepEquals, expected) - - // Stop and make sure it closes all channels. - close(stopCh) - c.Assert(<-ch, check.IsNil) - c.Assert(<-errCh, check.IsNil) -} diff --git a/pkg/discovery/generator.go b/pkg/discovery/generator.go deleted file mode 100644 index d22298298f..0000000000 --- a/pkg/discovery/generator.go +++ /dev/null @@ -1,35 +0,0 @@ -package discovery - -import ( - "fmt" - "regexp" - "strconv" -) - -// Generate takes care of IP generation -func Generate(pattern string) []string { - re, _ := regexp.Compile(`\[(.+):(.+)\]`) - submatch := re.FindStringSubmatch(pattern) - if submatch == nil { - return []string{pattern} - } - - from, err := strconv.Atoi(submatch[1]) - if err != nil { - return []string{pattern} - } - to, err := strconv.Atoi(submatch[2]) - if err != nil { - return []string{pattern} - } - - template := re.ReplaceAllString(pattern, "%d") - - var result []string - for val := from; val <= to; val++ { - entry := fmt.Sprintf(template, val) - result = append(result, entry) - } - - return result -} diff --git a/pkg/discovery/generator_test.go b/pkg/discovery/generator_test.go deleted file mode 100644 index 6281c46665..0000000000 --- a/pkg/discovery/generator_test.go +++ /dev/null @@ -1,53 +0,0 @@ -package discovery - -import ( - "github.com/go-check/check" -) - -func (s *DiscoverySuite) TestGeneratorNotGenerate(c *check.C) { - ips := Generate("127.0.0.1") - c.Assert(len(ips), check.Equals, 1) - c.Assert(ips[0], check.Equals, "127.0.0.1") -} - -func (s *DiscoverySuite) TestGeneratorWithPortNotGenerate(c *check.C) { - ips := Generate("127.0.0.1:8080") - c.Assert(len(ips), check.Equals, 1) - c.Assert(ips[0], check.Equals, "127.0.0.1:8080") -} - -func (s *DiscoverySuite) TestGeneratorMatchFailedNotGenerate(c *check.C) { - ips := Generate("127.0.0.[1]") - c.Assert(len(ips), check.Equals, 1) - c.Assert(ips[0], check.Equals, "127.0.0.[1]") -} - -func (s *DiscoverySuite) TestGeneratorWithPort(c *check.C) { - ips := Generate("127.0.0.[1:11]:2375") - c.Assert(len(ips), check.Equals, 11) - c.Assert(ips[0], check.Equals, "127.0.0.1:2375") - c.Assert(ips[1], check.Equals, "127.0.0.2:2375") - c.Assert(ips[2], check.Equals, "127.0.0.3:2375") - c.Assert(ips[3], check.Equals, "127.0.0.4:2375") - c.Assert(ips[4], check.Equals, "127.0.0.5:2375") - c.Assert(ips[5], check.Equals, "127.0.0.6:2375") - c.Assert(ips[6], check.Equals, "127.0.0.7:2375") - c.Assert(ips[7], check.Equals, "127.0.0.8:2375") - c.Assert(ips[8], check.Equals, "127.0.0.9:2375") - c.Assert(ips[9], check.Equals, "127.0.0.10:2375") - c.Assert(ips[10], check.Equals, "127.0.0.11:2375") -} - -func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeStart(c *check.C) { - malformedInput := "127.0.0.[x:11]:2375" - ips := Generate(malformedInput) - c.Assert(len(ips), check.Equals, 1) - c.Assert(ips[0], check.Equals, malformedInput) -} - -func (s *DiscoverySuite) TestGenerateWithMalformedInputAtRangeEnd(c *check.C) { - malformedInput := "127.0.0.[1:x]:2375" - ips := Generate(malformedInput) - c.Assert(len(ips), check.Equals, 1) - c.Assert(ips[0], check.Equals, malformedInput) -} diff --git a/pkg/discovery/kv/kv.go b/pkg/discovery/kv/kv.go deleted file mode 100644 index f371c0cba0..0000000000 --- a/pkg/discovery/kv/kv.go +++ /dev/null @@ -1,192 +0,0 @@ -package kv - -import ( - "fmt" - "path" - "strings" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/discovery" - "github.com/docker/go-connections/tlsconfig" - "github.com/docker/libkv" - "github.com/docker/libkv/store" - "github.com/docker/libkv/store/consul" - "github.com/docker/libkv/store/etcd" - "github.com/docker/libkv/store/zookeeper" -) - -const ( - defaultDiscoveryPath = "docker/nodes" -) - -// Discovery is exported -type Discovery struct { - backend store.Backend - store store.Store - heartbeat time.Duration - ttl time.Duration - prefix string - path string -} - -func init() { - Init() -} - -// Init is exported -func Init() { - // Register to libkv - zookeeper.Register() - consul.Register() - etcd.Register() - - // Register to internal discovery service - discovery.Register("zk", &Discovery{backend: store.ZK}) - discovery.Register("consul", &Discovery{backend: store.CONSUL}) - discovery.Register("etcd", &Discovery{backend: store.ETCD}) -} - -// Initialize is exported -func (s *Discovery) Initialize(uris string, heartbeat time.Duration, ttl time.Duration, clusterOpts map[string]string) error { - var ( - parts = strings.SplitN(uris, "/", 2) - addrs = strings.Split(parts[0], ",") - err error - ) - - // A custom prefix to the path can be optionally used. - if len(parts) == 2 { - s.prefix = parts[1] - } - - s.heartbeat = heartbeat - s.ttl = ttl - - // Use a custom path if specified in discovery options - dpath := defaultDiscoveryPath - if clusterOpts["kv.path"] != "" { - dpath = clusterOpts["kv.path"] - } - - s.path = path.Join(s.prefix, dpath) - - var config *store.Config - if clusterOpts["kv.cacertfile"] != "" && clusterOpts["kv.certfile"] != "" && clusterOpts["kv.keyfile"] != "" { - log.Info("Initializing discovery with TLS") - tlsConfig, err := tlsconfig.Client(tlsconfig.Options{ - CAFile: clusterOpts["kv.cacertfile"], - CertFile: clusterOpts["kv.certfile"], - KeyFile: clusterOpts["kv.keyfile"], - }) - if err != nil { - return err - } - config = &store.Config{ - // Set ClientTLS to trigger https (bug in libkv/etcd) - ClientTLS: &store.ClientTLSConfig{ - CACertFile: clusterOpts["kv.cacertfile"], - CertFile: clusterOpts["kv.certfile"], - KeyFile: clusterOpts["kv.keyfile"], - }, - // The actual TLS config that will be used - TLS: tlsConfig, - } - } else { - log.Info("Initializing discovery without TLS") - } - - // Creates a new store, will ignore options given - // if not supported by the chosen store - s.store, err = libkv.NewStore(s.backend, addrs, config) - return err -} - -// Watch the store until either there's a store error or we receive a stop request. -// Returns false if we shouldn't attempt watching the store anymore (stop request received). -func (s *Discovery) watchOnce(stopCh <-chan struct{}, watchCh <-chan []*store.KVPair, discoveryCh chan discovery.Entries, errCh chan error) bool { - for { - select { - case pairs := <-watchCh: - if pairs == nil { - return true - } - - log.WithField("discovery", s.backend).Debugf("Watch triggered with %d nodes", len(pairs)) - - // Convert `KVPair` into `discovery.Entry`. - addrs := make([]string, len(pairs)) - for _, pair := range pairs { - addrs = append(addrs, string(pair.Value)) - } - - entries, err := discovery.CreateEntries(addrs) - if err != nil { - errCh <- err - } else { - discoveryCh <- entries - } - case <-stopCh: - // We were requested to stop watching. - return false - } - } -} - -// Watch is exported -func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - ch := make(chan discovery.Entries) - errCh := make(chan error) - - go func() { - defer close(ch) - defer close(errCh) - - // Forever: Create a store watch, watch until we get an error and then try again. - // Will only stop if we receive a stopCh request. - for { - // Create the path to watch if it does not exist yet - exists, err := s.store.Exists(s.path) - if err != nil { - errCh <- err - } - if !exists { - if err := s.store.Put(s.path, []byte(""), &store.WriteOptions{IsDir: true}); err != nil { - errCh <- err - } - } - - // Set up a watch. - watchCh, err := s.store.WatchTree(s.path, stopCh) - if err != nil { - errCh <- err - } else { - if !s.watchOnce(stopCh, watchCh, ch, errCh) { - return - } - } - - // If we get here it means the store watch channel was closed. This - // is unexpected so let's retry later. - errCh <- fmt.Errorf("Unexpected watch error") - time.Sleep(s.heartbeat) - } - }() - return ch, errCh -} - -// Register is exported -func (s *Discovery) Register(addr string) error { - opts := &store.WriteOptions{TTL: s.ttl} - return s.store.Put(path.Join(s.path, addr), []byte(addr), opts) -} - -// Store returns the underlying store used by KV discovery. -func (s *Discovery) Store() store.Store { - return s.store -} - -// Prefix returns the store prefix -func (s *Discovery) Prefix() string { - return s.prefix -} diff --git a/pkg/discovery/kv/kv_test.go b/pkg/discovery/kv/kv_test.go deleted file mode 100644 index dab3939dd0..0000000000 --- a/pkg/discovery/kv/kv_test.go +++ /dev/null @@ -1,324 +0,0 @@ -package kv - -import ( - "errors" - "io/ioutil" - "os" - "path" - "testing" - "time" - - "github.com/docker/docker/pkg/discovery" - "github.com/docker/libkv" - "github.com/docker/libkv/store" - - "github.com/go-check/check" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -type DiscoverySuite struct{} - -var _ = check.Suite(&DiscoverySuite{}) - -func (ds *DiscoverySuite) TestInitialize(c *check.C) { - storeMock := &FakeStore{ - Endpoints: []string{"127.0.0.1"}, - } - d := &Discovery{backend: store.CONSUL} - d.Initialize("127.0.0.1", 0, 0, nil) - d.store = storeMock - - s := d.store.(*FakeStore) - c.Assert(s.Endpoints, check.HasLen, 1) - c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1") - c.Assert(d.path, check.Equals, defaultDiscoveryPath) - - storeMock = &FakeStore{ - Endpoints: []string{"127.0.0.1:1234"}, - } - d = &Discovery{backend: store.CONSUL} - d.Initialize("127.0.0.1:1234/path", 0, 0, nil) - d.store = storeMock - - s = d.store.(*FakeStore) - c.Assert(s.Endpoints, check.HasLen, 1) - c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234") - c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath) - - storeMock = &FakeStore{ - Endpoints: []string{"127.0.0.1:1234", "127.0.0.2:1234", "127.0.0.3:1234"}, - } - d = &Discovery{backend: store.CONSUL} - d.Initialize("127.0.0.1:1234,127.0.0.2:1234,127.0.0.3:1234/path", 0, 0, nil) - d.store = storeMock - - s = d.store.(*FakeStore) - c.Assert(s.Endpoints, check.HasLen, 3) - c.Assert(s.Endpoints[0], check.Equals, "127.0.0.1:1234") - c.Assert(s.Endpoints[1], check.Equals, "127.0.0.2:1234") - c.Assert(s.Endpoints[2], check.Equals, "127.0.0.3:1234") - - c.Assert(d.path, check.Equals, "path/"+defaultDiscoveryPath) -} - -// Extremely limited mock store so we can test initialization -type Mock struct { - // Endpoints passed to InitializeMock - Endpoints []string - - // Options passed to InitializeMock - Options *store.Config -} - -func NewMock(endpoints []string, options *store.Config) (store.Store, error) { - s := &Mock{} - s.Endpoints = endpoints - s.Options = options - return s, nil -} -func (s *Mock) Put(key string, value []byte, opts *store.WriteOptions) error { - return errors.New("Put not supported") -} -func (s *Mock) Get(key string) (*store.KVPair, error) { - return nil, errors.New("Get not supported") -} -func (s *Mock) Delete(key string) error { - return errors.New("Delete not supported") -} - -// Exists mock -func (s *Mock) Exists(key string) (bool, error) { - return false, errors.New("Exists not supported") -} - -// Watch mock -func (s *Mock) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { - return nil, errors.New("Watch not supported") -} - -// WatchTree mock -func (s *Mock) WatchTree(prefix string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { - return nil, errors.New("WatchTree not supported") -} - -// NewLock mock -func (s *Mock) NewLock(key string, options *store.LockOptions) (store.Locker, error) { - return nil, errors.New("NewLock not supported") -} - -// List mock -func (s *Mock) List(prefix string) ([]*store.KVPair, error) { - return nil, errors.New("List not supported") -} - -// DeleteTree mock -func (s *Mock) DeleteTree(prefix string) error { - return errors.New("DeleteTree not supported") -} - -// AtomicPut mock -func (s *Mock) AtomicPut(key string, value []byte, previous *store.KVPair, opts *store.WriteOptions) (bool, *store.KVPair, error) { - return false, nil, errors.New("AtomicPut not supported") -} - -// AtomicDelete mock -func (s *Mock) AtomicDelete(key string, previous *store.KVPair) (bool, error) { - return false, errors.New("AtomicDelete not supported") -} - -// Close mock -func (s *Mock) Close() { - return -} - -func (ds *DiscoverySuite) TestInitializeWithCerts(c *check.C) { - cert := `-----BEGIN CERTIFICATE----- -MIIDCDCCAfKgAwIBAgIICifG7YeiQOEwCwYJKoZIhvcNAQELMBIxEDAOBgNVBAMT -B1Rlc3QgQ0EwHhcNMTUxMDAxMjMwMDAwWhcNMjAwOTI5MjMwMDAwWjASMRAwDgYD -VQQDEwdUZXN0IENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1wRC -O+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4+zE9h80aC4hz+6caRpds -+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhRSoSi3nY+B7F2E8cuz14q -V2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZrpXUyXxAvzXfpFXo1RhSb -UywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUerVYrCPq8vqfn//01qz55 -Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHojxOpXTBepUCIJLbtNnWFT -V44t9gh5IqIWtoBReQIDAQABo2YwZDAOBgNVHQ8BAf8EBAMCAAYwEgYDVR0TAQH/ -BAgwBgEB/wIBAjAdBgNVHQ4EFgQUZKUI8IIjIww7X/6hvwggQK4bD24wHwYDVR0j -BBgwFoAUZKUI8IIjIww7X/6hvwggQK4bD24wCwYJKoZIhvcNAQELA4IBAQDES2cz -7sCQfDCxCIWH7X8kpi/JWExzUyQEJ0rBzN1m3/x8ySRxtXyGekimBqQwQdFqlwMI -xzAQKkh3ue8tNSzRbwqMSyH14N1KrSxYS9e9szJHfUasoTpQGPmDmGIoRJuq1h6M -ej5x1SCJ7GWCR6xEXKUIE9OftXm9TdFzWa7Ja3OHz/mXteii8VXDuZ5ACq6EE5bY -8sP4gcICfJ5fTrpTlk9FIqEWWQrCGa5wk95PGEj+GJpNogjXQ97wVoo/Y3p1brEn -t5zjN9PAq4H1fuCMdNNA+p1DHNwd+ELTxcMAnb2ajwHvV6lKPXutrTFc4umJToBX -FpTxDmJHEV4bzUzh ------END CERTIFICATE----- -` - key := `-----BEGIN RSA PRIVATE KEY----- -MIIEpQIBAAKCAQEA1wRCO+flnLTK5ImjTurNRHwSejuqGbc4CAvpB0hS+z0QlSs4 -+zE9h80aC4hz+6caRpds+J908Q+RvAittMHbpc7VjbZP72G6fiXk7yPPl6C10HhR -SoSi3nY+B7F2E8cuz14qV2e+ejhWhSrBb/keyXpcyjoW1BOAAJ2TIclRRkICSCZr -pXUyXxAvzXfpFXo1RhSbUywN11pfiCQzDUN7sPww9UzFHuAHZHoyfTr27XnJYVUe -rVYrCPq8vqfn//01qz55Xs0hvzGdlTFXhuabFtQnKFH5SNwo/fcznhB7rePOwHoj -xOpXTBepUCIJLbtNnWFTV44t9gh5IqIWtoBReQIDAQABAoIBAHSWipORGp/uKFXj -i/mut776x8ofsAxhnLBARQr93ID+i49W8H7EJGkOfaDjTICYC1dbpGrri61qk8sx -qX7p3v/5NzKwOIfEpirgwVIqSNYe/ncbxnhxkx6tXtUtFKmEx40JskvSpSYAhmmO -1XSx0E/PWaEN/nLgX/f1eWJIlxlQkk3QeqL+FGbCXI48DEtlJ9+MzMu4pAwZTpj5 -5qtXo5JJ0jRGfJVPAOznRsYqv864AhMdMIWguzk6EGnbaCWwPcfcn+h9a5LMdony -MDHfBS7bb5tkF3+AfnVY3IBMVx7YlsD9eAyajlgiKu4zLbwTRHjXgShy+4Oussz0 -ugNGnkECgYEA/hi+McrZC8C4gg6XqK8+9joD8tnyDZDz88BQB7CZqABUSwvjDqlP -L8hcwo/lzvjBNYGkqaFPUICGWKjeCtd8pPS2DCVXxDQX4aHF1vUur0uYNncJiV3N -XQz4Iemsa6wnKf6M67b5vMXICw7dw0HZCdIHD1hnhdtDz0uVpeevLZ8CgYEA2KCT -Y43lorjrbCgMqtlefkr3GJA9dey+hTzCiWEOOqn9RqGoEGUday0sKhiLofOgmN2B -LEukpKIey8s+Q/cb6lReajDVPDsMweX8i7hz3Wa4Ugp4Xa5BpHqu8qIAE2JUZ7bU -t88aQAYE58pUF+/Lq1QzAQdrjjzQBx6SrBxieecCgYEAvukoPZEC8mmiN1VvbTX+ -QFHmlZha3QaDxChB+QUe7bMRojEUL/fVnzkTOLuVFqSfxevaI/km9n0ac5KtAchV -xjp2bTnBb5EUQFqjopYktWA+xO07JRJtMfSEmjZPbbay1kKC7rdTfBm961EIHaRj -xZUf6M+rOE8964oGrdgdLlECgYEA046GQmx6fh7/82FtdZDRQp9tj3SWQUtSiQZc -qhO59Lq8mjUXz+MgBuJXxkiwXRpzlbaFB0Bca1fUoYw8o915SrDYf/Zu2OKGQ/qa -V81sgiVmDuEgycR7YOlbX6OsVUHrUlpwhY3hgfMe6UtkMvhBvHF/WhroBEIJm1pV -PXZ/CbMCgYEApNWVktFBjOaYfY6SNn4iSts1jgsQbbpglg3kT7PLKjCAhI6lNsbk -dyT7ut01PL6RaW4SeQWtrJIVQaM6vF3pprMKqlc5XihOGAmVqH7rQx9rtQB5TicL -BFrwkQE4HQtQBV60hYQUzzlSk44VFDz+jxIEtacRHaomDRh2FtOTz+I= ------END RSA PRIVATE KEY----- -` - certFile, err := ioutil.TempFile("", "cert") - c.Assert(err, check.IsNil) - defer os.Remove(certFile.Name()) - certFile.Write([]byte(cert)) - certFile.Close() - keyFile, err := ioutil.TempFile("", "key") - c.Assert(err, check.IsNil) - defer os.Remove(keyFile.Name()) - keyFile.Write([]byte(key)) - keyFile.Close() - - libkv.AddStore("mock", NewMock) - d := &Discovery{backend: "mock"} - err = d.Initialize("127.0.0.3:1234", 0, 0, map[string]string{ - "kv.cacertfile": certFile.Name(), - "kv.certfile": certFile.Name(), - "kv.keyfile": keyFile.Name(), - }) - c.Assert(err, check.IsNil) - s := d.store.(*Mock) - c.Assert(s.Options.TLS, check.NotNil) - c.Assert(s.Options.TLS.RootCAs, check.NotNil) - c.Assert(s.Options.TLS.Certificates, check.HasLen, 1) -} - -func (ds *DiscoverySuite) TestWatch(c *check.C) { - mockCh := make(chan []*store.KVPair) - - storeMock := &FakeStore{ - Endpoints: []string{"127.0.0.1:1234"}, - mockKVChan: mockCh, - } - - d := &Discovery{backend: store.CONSUL} - d.Initialize("127.0.0.1:1234/path", 0, 0, nil) - d.store = storeMock - - expected := discovery.Entries{ - &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, - &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, - } - kvs := []*store.KVPair{ - {Key: path.Join("path", defaultDiscoveryPath, "1.1.1.1"), Value: []byte("1.1.1.1:1111")}, - {Key: path.Join("path", defaultDiscoveryPath, "2.2.2.2"), Value: []byte("2.2.2.2:2222")}, - } - - stopCh := make(chan struct{}) - ch, errCh := d.Watch(stopCh) - - // It should fire an error since the first WatchTree call failed. - c.Assert(<-errCh, check.ErrorMatches, "test error") - // We have to drain the error channel otherwise Watch will get stuck. - go func() { - for range errCh { - } - }() - - // Push the entries into the store channel and make sure discovery emits. - mockCh <- kvs - c.Assert(<-ch, check.DeepEquals, expected) - - // Add a new entry. - expected = append(expected, &discovery.Entry{Host: "3.3.3.3", Port: "3333"}) - kvs = append(kvs, &store.KVPair{Key: path.Join("path", defaultDiscoveryPath, "3.3.3.3"), Value: []byte("3.3.3.3:3333")}) - mockCh <- kvs - c.Assert(<-ch, check.DeepEquals, expected) - - close(mockCh) - // Give it enough time to call WatchTree. - time.Sleep(3 * time.Second) - - // Stop and make sure it closes all channels. - close(stopCh) - c.Assert(<-ch, check.IsNil) - c.Assert(<-errCh, check.IsNil) -} - -// FakeStore implements store.Store methods. It mocks all store -// function in a simple, naive way. -type FakeStore struct { - Endpoints []string - Options *store.Config - mockKVChan <-chan []*store.KVPair - - watchTreeCallCount int -} - -func (s *FakeStore) Put(key string, value []byte, options *store.WriteOptions) error { - return nil -} - -func (s *FakeStore) Get(key string) (*store.KVPair, error) { - return nil, nil -} - -func (s *FakeStore) Delete(key string) error { - return nil -} - -func (s *FakeStore) Exists(key string) (bool, error) { - return true, nil -} - -func (s *FakeStore) Watch(key string, stopCh <-chan struct{}) (<-chan *store.KVPair, error) { - return nil, nil -} - -// WatchTree will fail the first time, and return the mockKVchan afterwards. -// This is the behavior we need for testing.. If we need 'moar', should update this. -func (s *FakeStore) WatchTree(directory string, stopCh <-chan struct{}) (<-chan []*store.KVPair, error) { - if s.watchTreeCallCount == 0 { - s.watchTreeCallCount = 1 - return nil, errors.New("test error") - } - // First calls error - return s.mockKVChan, nil -} - -func (s *FakeStore) NewLock(key string, options *store.LockOptions) (store.Locker, error) { - return nil, nil -} - -func (s *FakeStore) List(directory string) ([]*store.KVPair, error) { - return []*store.KVPair{}, nil -} - -func (s *FakeStore) DeleteTree(directory string) error { - return nil -} - -func (s *FakeStore) AtomicPut(key string, value []byte, previous *store.KVPair, options *store.WriteOptions) (bool, *store.KVPair, error) { - return true, nil, nil -} - -func (s *FakeStore) AtomicDelete(key string, previous *store.KVPair) (bool, error) { - return true, nil -} - -func (s *FakeStore) Close() { -} diff --git a/pkg/discovery/memory/memory.go b/pkg/discovery/memory/memory.go deleted file mode 100644 index ba8b1f55f3..0000000000 --- a/pkg/discovery/memory/memory.go +++ /dev/null @@ -1,93 +0,0 @@ -package memory - -import ( - "sync" - "time" - - "github.com/docker/docker/pkg/discovery" -) - -// Discovery implements a discovery backend that keeps -// data in memory. -type Discovery struct { - heartbeat time.Duration - values []string - mu sync.Mutex -} - -func init() { - Init() -} - -// Init registers the memory backend on demand. -func Init() { - discovery.Register("memory", &Discovery{}) -} - -// Initialize sets the heartbeat for the memory backend. -func (s *Discovery) Initialize(_ string, heartbeat time.Duration, _ time.Duration, _ map[string]string) error { - s.heartbeat = heartbeat - s.values = make([]string, 0) - return nil -} - -// Watch sends periodic discovery updates to a channel. -func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - ch := make(chan discovery.Entries) - errCh := make(chan error) - ticker := time.NewTicker(s.heartbeat) - - go func() { - defer close(errCh) - defer close(ch) - - // Send the initial entries if available. - var currentEntries discovery.Entries - var err error - - s.mu.Lock() - if len(s.values) > 0 { - currentEntries, err = discovery.CreateEntries(s.values) - } - s.mu.Unlock() - - if err != nil { - errCh <- err - } else if currentEntries != nil { - ch <- currentEntries - } - - // Periodically send updates. - for { - select { - case <-ticker.C: - s.mu.Lock() - newEntries, err := discovery.CreateEntries(s.values) - s.mu.Unlock() - if err != nil { - errCh <- err - continue - } - - // Check if the file has really changed. - if !newEntries.Equals(currentEntries) { - ch <- newEntries - } - currentEntries = newEntries - case <-stopCh: - ticker.Stop() - return - } - } - }() - - return ch, errCh -} - -// Register adds a new address to the discovery. -func (s *Discovery) Register(addr string) error { - s.mu.Lock() - s.values = append(s.values, addr) - s.mu.Unlock() - return nil -} diff --git a/pkg/discovery/memory/memory_test.go b/pkg/discovery/memory/memory_test.go deleted file mode 100644 index c2da0a068e..0000000000 --- a/pkg/discovery/memory/memory_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package memory - -import ( - "testing" - - "github.com/docker/docker/pkg/discovery" - "github.com/go-check/check" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -type discoverySuite struct{} - -var _ = check.Suite(&discoverySuite{}) - -func (s *discoverySuite) TestWatch(c *check.C) { - d := &Discovery{} - d.Initialize("foo", 1000, 0, nil) - stopCh := make(chan struct{}) - ch, errCh := d.Watch(stopCh) - - // We have to drain the error channel otherwise Watch will get stuck. - go func() { - for range errCh { - } - }() - - expected := discovery.Entries{ - &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, - } - - c.Assert(d.Register("1.1.1.1:1111"), check.IsNil) - c.Assert(<-ch, check.DeepEquals, expected) - - expected = discovery.Entries{ - &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, - &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, - } - - c.Assert(d.Register("2.2.2.2:2222"), check.IsNil) - c.Assert(<-ch, check.DeepEquals, expected) - - // Stop and make sure it closes all channels. - close(stopCh) - c.Assert(<-ch, check.IsNil) - c.Assert(<-errCh, check.IsNil) -} diff --git a/pkg/discovery/nodes/nodes.go b/pkg/discovery/nodes/nodes.go deleted file mode 100644 index c0e3c07b22..0000000000 --- a/pkg/discovery/nodes/nodes.go +++ /dev/null @@ -1,54 +0,0 @@ -package nodes - -import ( - "fmt" - "strings" - "time" - - "github.com/docker/docker/pkg/discovery" -) - -// Discovery is exported -type Discovery struct { - entries discovery.Entries -} - -func init() { - Init() -} - -// Init is exported -func Init() { - discovery.Register("nodes", &Discovery{}) -} - -// Initialize is exported -func (s *Discovery) Initialize(uris string, _ time.Duration, _ time.Duration, _ map[string]string) error { - for _, input := range strings.Split(uris, ",") { - for _, ip := range discovery.Generate(input) { - entry, err := discovery.NewEntry(ip) - if err != nil { - return fmt.Errorf("%s, please check you are using the correct discovery (missing token:// ?)", err.Error()) - } - s.entries = append(s.entries, entry) - } - } - - return nil -} - -// Watch is exported -func (s *Discovery) Watch(stopCh <-chan struct{}) (<-chan discovery.Entries, <-chan error) { - ch := make(chan discovery.Entries) - go func() { - defer close(ch) - ch <- s.entries - <-stopCh - }() - return ch, nil -} - -// Register is exported -func (s *Discovery) Register(addr string) error { - return discovery.ErrNotImplemented -} diff --git a/pkg/discovery/nodes/nodes_test.go b/pkg/discovery/nodes/nodes_test.go deleted file mode 100644 index e26568cf54..0000000000 --- a/pkg/discovery/nodes/nodes_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package nodes - -import ( - "testing" - - "github.com/docker/docker/pkg/discovery" - - "github.com/go-check/check" -) - -// Hook up gocheck into the "go test" runner. -func Test(t *testing.T) { check.TestingT(t) } - -type DiscoverySuite struct{} - -var _ = check.Suite(&DiscoverySuite{}) - -func (s *DiscoverySuite) TestInitialize(c *check.C) { - d := &Discovery{} - d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) - c.Assert(len(d.entries), check.Equals, 2) - c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111") - c.Assert(d.entries[1].String(), check.Equals, "2.2.2.2:2222") -} - -func (s *DiscoverySuite) TestInitializeWithPattern(c *check.C) { - d := &Discovery{} - d.Initialize("1.1.1.[1:2]:1111,2.2.2.[2:4]:2222", 0, 0, nil) - c.Assert(len(d.entries), check.Equals, 5) - c.Assert(d.entries[0].String(), check.Equals, "1.1.1.1:1111") - c.Assert(d.entries[1].String(), check.Equals, "1.1.1.2:1111") - c.Assert(d.entries[2].String(), check.Equals, "2.2.2.2:2222") - c.Assert(d.entries[3].String(), check.Equals, "2.2.2.3:2222") - c.Assert(d.entries[4].String(), check.Equals, "2.2.2.4:2222") -} - -func (s *DiscoverySuite) TestWatch(c *check.C) { - d := &Discovery{} - d.Initialize("1.1.1.1:1111,2.2.2.2:2222", 0, 0, nil) - expected := discovery.Entries{ - &discovery.Entry{Host: "1.1.1.1", Port: "1111"}, - &discovery.Entry{Host: "2.2.2.2", Port: "2222"}, - } - ch, _ := d.Watch(nil) - c.Assert(expected.Equals(<-ch), check.Equals, true) -} - -func (s *DiscoverySuite) TestRegister(c *check.C) { - d := &Discovery{} - c.Assert(d.Register("0.0.0.0"), check.NotNil) -} diff --git a/pkg/filenotify/filenotify.go b/pkg/filenotify/filenotify.go deleted file mode 100644 index 23befae678..0000000000 --- a/pkg/filenotify/filenotify.go +++ /dev/null @@ -1,40 +0,0 @@ -// Package filenotify provides a mechanism for watching file(s) for changes. -// Generally leans on fsnotify, but provides a poll-based notifier which fsnotify does not support. -// These are wrapped up in a common interface so that either can be used interchangeably in your code. -package filenotify - -import "gopkg.in/fsnotify.v1" - -// FileWatcher is an interface for implementing file notification watchers -type FileWatcher interface { - Events() <-chan fsnotify.Event - Errors() <-chan error - Add(name string) error - Remove(name string) error - Close() error -} - -// New tries to use an fs-event watcher, and falls back to the poller if there is an error -func New() (FileWatcher, error) { - if watcher, err := NewEventWatcher(); err == nil { - return watcher, nil - } - return NewPollingWatcher(), nil -} - -// NewPollingWatcher returns a poll-based file watcher -func NewPollingWatcher() FileWatcher { - return &filePoller{ - events: make(chan fsnotify.Event), - errors: make(chan error), - } -} - -// NewEventWatcher returns an fs-event based file watcher -func NewEventWatcher() (FileWatcher, error) { - watcher, err := fsnotify.NewWatcher() - if err != nil { - return nil, err - } - return &fsNotifyWatcher{watcher}, nil -} diff --git a/pkg/filenotify/fsnotify.go b/pkg/filenotify/fsnotify.go deleted file mode 100644 index 4203883585..0000000000 --- a/pkg/filenotify/fsnotify.go +++ /dev/null @@ -1,18 +0,0 @@ -package filenotify - -import "gopkg.in/fsnotify.v1" - -// fsNotify wraps the fsnotify package to satisfy the FileNotifer interface -type fsNotifyWatcher struct { - *fsnotify.Watcher -} - -// GetEvents returns the fsnotify event channel receiver -func (w *fsNotifyWatcher) Events() <-chan fsnotify.Event { - return w.Watcher.Events -} - -// GetErrors returns the fsnotify error channel receiver -func (w *fsNotifyWatcher) Errors() <-chan error { - return w.Watcher.Errors -} diff --git a/pkg/filenotify/poller.go b/pkg/filenotify/poller.go deleted file mode 100644 index 5261085346..0000000000 --- a/pkg/filenotify/poller.go +++ /dev/null @@ -1,204 +0,0 @@ -package filenotify - -import ( - "errors" - "fmt" - "os" - "sync" - "time" - - "github.com/Sirupsen/logrus" - - "gopkg.in/fsnotify.v1" -) - -var ( - // errPollerClosed is returned when the poller is closed - errPollerClosed = errors.New("poller is closed") - // errNoSuchPoller is returned when trying to remove a watch that doesn't exist - errNoSuchWatch = errors.New("poller does not exist") -) - -// watchWaitTime is the time to wait between file poll loops -const watchWaitTime = 200 * time.Millisecond - -// filePoller is used to poll files for changes, especially in cases where fsnotify -// can't be run (e.g. when inotify handles are exhausted) -// filePoller satisfies the FileWatcher interface -type filePoller struct { - // watches is the list of files currently being polled, close the associated channel to stop the watch - watches map[string]chan struct{} - // events is the channel to listen to for watch events - events chan fsnotify.Event - // errors is the channel to listen to for watch errors - errors chan error - // mu locks the poller for modification - mu sync.Mutex - // closed is used to specify when the poller has already closed - closed bool -} - -// Add adds a filename to the list of watches -// once added the file is polled for changes in a separate goroutine -func (w *filePoller) Add(name string) error { - w.mu.Lock() - defer w.mu.Unlock() - - if w.closed == true { - return errPollerClosed - } - - f, err := os.Open(name) - if err != nil { - return err - } - fi, err := os.Stat(name) - if err != nil { - return err - } - - if w.watches == nil { - w.watches = make(map[string]chan struct{}) - } - if _, exists := w.watches[name]; exists { - return fmt.Errorf("watch exists") - } - chClose := make(chan struct{}) - w.watches[name] = chClose - - go w.watch(f, fi, chClose) - return nil -} - -// Remove stops and removes watch with the specified name -func (w *filePoller) Remove(name string) error { - w.mu.Lock() - defer w.mu.Unlock() - return w.remove(name) -} - -func (w *filePoller) remove(name string) error { - if w.closed == true { - return errPollerClosed - } - - chClose, exists := w.watches[name] - if !exists { - return errNoSuchWatch - } - close(chClose) - delete(w.watches, name) - return nil -} - -// Events returns the event channel -// This is used for notifications on events about watched files -func (w *filePoller) Events() <-chan fsnotify.Event { - return w.events -} - -// Errors returns the errors channel -// This is used for notifications about errors on watched files -func (w *filePoller) Errors() <-chan error { - return w.errors -} - -// Close closes the poller -// All watches are stopped, removed, and the poller cannot be added to -func (w *filePoller) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - - if w.closed { - return nil - } - - w.closed = true - for name := range w.watches { - w.remove(name) - delete(w.watches, name) - } - return nil -} - -// sendEvent publishes the specified event to the events channel -func (w *filePoller) sendEvent(e fsnotify.Event, chClose <-chan struct{}) error { - select { - case w.events <- e: - case <-chClose: - return fmt.Errorf("closed") - } - return nil -} - -// sendErr publishes the specified error to the errors channel -func (w *filePoller) sendErr(e error, chClose <-chan struct{}) error { - select { - case w.errors <- e: - case <-chClose: - return fmt.Errorf("closed") - } - return nil -} - -// watch is responsible for polling the specified file for changes -// upon finding changes to a file or errors, sendEvent/sendErr is called -func (w *filePoller) watch(f *os.File, lastFi os.FileInfo, chClose chan struct{}) { - defer f.Close() - for { - time.Sleep(watchWaitTime) - select { - case <-chClose: - logrus.Debugf("watch for %s closed", f.Name()) - return - default: - } - - fi, err := os.Stat(f.Name()) - if err != nil { - // if we got an error here and lastFi is not set, we can presume that nothing has changed - // This should be safe since before `watch()` is called, a stat is performed, there is any error `watch` is not called - if lastFi == nil { - continue - } - // If it doesn't exist at this point, it must have been removed - // no need to send the error here since this is a valid operation - if os.IsNotExist(err) { - if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Remove, Name: f.Name()}, chClose); err != nil { - return - } - lastFi = nil - continue - } - // at this point, send the error - if err := w.sendErr(err, chClose); err != nil { - return - } - continue - } - - if lastFi == nil { - if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Create, Name: fi.Name()}, chClose); err != nil { - return - } - lastFi = fi - continue - } - - if fi.Mode() != lastFi.Mode() { - if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Chmod, Name: fi.Name()}, chClose); err != nil { - return - } - lastFi = fi - continue - } - - if fi.ModTime() != lastFi.ModTime() || fi.Size() != lastFi.Size() { - if err := w.sendEvent(fsnotify.Event{Op: fsnotify.Write, Name: fi.Name()}, chClose); err != nil { - return - } - lastFi = fi - continue - } - } -} diff --git a/pkg/filenotify/poller_test.go b/pkg/filenotify/poller_test.go deleted file mode 100644 index 4f5026237c..0000000000 --- a/pkg/filenotify/poller_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package filenotify - -import ( - "fmt" - "io/ioutil" - "os" - "runtime" - "testing" - "time" - - "gopkg.in/fsnotify.v1" -) - -func TestPollerAddRemove(t *testing.T) { - w := NewPollingWatcher() - - if err := w.Add("no-such-file"); err == nil { - t.Fatal("should have gotten error when adding a non-existent file") - } - if err := w.Remove("no-such-file"); err == nil { - t.Fatal("should have gotten error when removing non-existent watch") - } - - f, err := ioutil.TempFile("", "asdf") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(f.Name()) - - if err := w.Add(f.Name()); err != nil { - t.Fatal(err) - } - - if err := w.Remove(f.Name()); err != nil { - t.Fatal(err) - } -} - -func TestPollerEvent(t *testing.T) { - if runtime.GOOS == "windows" { - t.Skip("No chmod on Windows") - } - w := NewPollingWatcher() - - f, err := ioutil.TempFile("", "test-poller") - if err != nil { - t.Fatal("error creating temp file") - } - defer os.RemoveAll(f.Name()) - f.Close() - - if err := w.Add(f.Name()); err != nil { - t.Fatal(err) - } - - select { - case <-w.Events(): - t.Fatal("got event before anything happened") - case <-w.Errors(): - t.Fatal("got error before anything happened") - default: - } - - if err := ioutil.WriteFile(f.Name(), []byte("hello"), 644); err != nil { - t.Fatal(err) - } - if err := assertEvent(w, fsnotify.Write); err != nil { - t.Fatal(err) - } - - if err := os.Chmod(f.Name(), 600); err != nil { - t.Fatal(err) - } - if err := assertEvent(w, fsnotify.Chmod); err != nil { - t.Fatal(err) - } - - if err := os.Remove(f.Name()); err != nil { - t.Fatal(err) - } - if err := assertEvent(w, fsnotify.Remove); err != nil { - t.Fatal(err) - } -} - -func TestPollerClose(t *testing.T) { - w := NewPollingWatcher() - if err := w.Close(); err != nil { - t.Fatal(err) - } - // test double-close - if err := w.Close(); err != nil { - t.Fatal(err) - } - - f, err := ioutil.TempFile("", "asdf") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(f.Name()) - if err := w.Add(f.Name()); err == nil { - t.Fatal("should have gotten error adding watch for closed watcher") - } -} - -func assertEvent(w FileWatcher, eType fsnotify.Op) error { - var err error - select { - case e := <-w.Events(): - if e.Op != eType { - err = fmt.Errorf("got wrong event type, expected %q: %v", eType, e) - } - case e := <-w.Errors(): - err = fmt.Errorf("got unexpected error waiting for events %v: %v", eType, e) - case <-time.After(watchWaitTime * 3): - err = fmt.Errorf("timeout waiting for event %v", eType) - } - return err -} diff --git a/pkg/fileutils/fileutils.go b/pkg/fileutils/fileutils.go deleted file mode 100644 index c00a0cdee3..0000000000 --- a/pkg/fileutils/fileutils.go +++ /dev/null @@ -1,283 +0,0 @@ -package fileutils - -import ( - "errors" - "fmt" - "io" - "os" - "path/filepath" - "regexp" - "strings" - "text/scanner" - - "github.com/Sirupsen/logrus" -) - -// exclusion returns true if the specified pattern is an exclusion -func exclusion(pattern string) bool { - return pattern[0] == '!' -} - -// empty returns true if the specified pattern is empty -func empty(pattern string) bool { - return pattern == "" -} - -// CleanPatterns takes a slice of patterns returns a new -// slice of patterns cleaned with filepath.Clean, stripped -// of any empty patterns and lets the caller know whether the -// slice contains any exception patterns (prefixed with !). -func CleanPatterns(patterns []string) ([]string, [][]string, bool, error) { - // Loop over exclusion patterns and: - // 1. Clean them up. - // 2. Indicate whether we are dealing with any exception rules. - // 3. Error if we see a single exclusion marker on its own (!). - cleanedPatterns := []string{} - patternDirs := [][]string{} - exceptions := false - for _, pattern := range patterns { - // Eliminate leading and trailing whitespace. - pattern = strings.TrimSpace(pattern) - if empty(pattern) { - continue - } - if exclusion(pattern) { - if len(pattern) == 1 { - return nil, nil, false, errors.New("Illegal exclusion pattern: !") - } - exceptions = true - } - pattern = filepath.Clean(pattern) - cleanedPatterns = append(cleanedPatterns, pattern) - if exclusion(pattern) { - pattern = pattern[1:] - } - patternDirs = append(patternDirs, strings.Split(pattern, string(os.PathSeparator))) - } - - return cleanedPatterns, patternDirs, exceptions, nil -} - -// Matches returns true if file matches any of the patterns -// and isn't excluded by any of the subsequent patterns. -func Matches(file string, patterns []string) (bool, error) { - file = filepath.Clean(file) - - if file == "." { - // Don't let them exclude everything, kind of silly. - return false, nil - } - - patterns, patDirs, _, err := CleanPatterns(patterns) - if err != nil { - return false, err - } - - return OptimizedMatches(file, patterns, patDirs) -} - -// OptimizedMatches is basically the same as fileutils.Matches() but optimized for archive.go. -// It will assume that the inputs have been preprocessed and therefore the function -// doesn't need to do as much error checking and clean-up. This was done to avoid -// repeating these steps on each file being checked during the archive process. -// The more generic fileutils.Matches() can't make these assumptions. -func OptimizedMatches(file string, patterns []string, patDirs [][]string) (bool, error) { - matched := false - file = filepath.FromSlash(file) - parentPath := filepath.Dir(file) - parentPathDirs := strings.Split(parentPath, string(os.PathSeparator)) - - for i, pattern := range patterns { - negative := false - - if exclusion(pattern) { - negative = true - pattern = pattern[1:] - } - - match, err := regexpMatch(pattern, file) - if err != nil { - return false, fmt.Errorf("Error in pattern (%s): %s", pattern, err) - } - - if !match && parentPath != "." { - // Check to see if the pattern matches one of our parent dirs. - if len(patDirs[i]) <= len(parentPathDirs) { - match, _ = regexpMatch(strings.Join(patDirs[i], string(os.PathSeparator)), - strings.Join(parentPathDirs[:len(patDirs[i])], string(os.PathSeparator))) - } - } - - if match { - matched = !negative - } - } - - if matched { - logrus.Debugf("Skipping excluded path: %s", file) - } - - return matched, nil -} - -// regexpMatch tries to match the logic of filepath.Match but -// does so using regexp logic. We do this so that we can expand the -// wildcard set to include other things, like "**" to mean any number -// of directories. This means that we should be backwards compatible -// with filepath.Match(). We'll end up supporting more stuff, due to -// the fact that we're using regexp, but that's ok - it does no harm. -// -// As per the comment in golangs filepath.Match, on Windows, escaping -// is disabled. Instead, '\\' is treated as path separator. -func regexpMatch(pattern, path string) (bool, error) { - regStr := "^" - - // Do some syntax checking on the pattern. - // filepath's Match() has some really weird rules that are inconsistent - // so instead of trying to dup their logic, just call Match() for its - // error state and if there is an error in the pattern return it. - // If this becomes an issue we can remove this since its really only - // needed in the error (syntax) case - which isn't really critical. - if _, err := filepath.Match(pattern, path); err != nil { - return false, err - } - - // Go through the pattern and convert it to a regexp. - // We use a scanner so we can support utf-8 chars. - var scan scanner.Scanner - scan.Init(strings.NewReader(pattern)) - - sl := string(os.PathSeparator) - escSL := sl - if sl == `\` { - escSL += `\` - } - - for scan.Peek() != scanner.EOF { - ch := scan.Next() - - if ch == '*' { - if scan.Peek() == '*' { - // is some flavor of "**" - scan.Next() - - if scan.Peek() == scanner.EOF { - // is "**EOF" - to align with .gitignore just accept all - regStr += ".*" - } else { - // is "**" - regStr += "((.*" + escSL + ")|([^" + escSL + "]*))" - } - - // Treat **/ as ** so eat the "/" - if string(scan.Peek()) == sl { - scan.Next() - } - } else { - // is "*" so map it to anything but "/" - regStr += "[^" + escSL + "]*" - } - } else if ch == '?' { - // "?" is any char except "/" - regStr += "[^" + escSL + "]" - } else if strings.Index(".$", string(ch)) != -1 { - // Escape some regexp special chars that have no meaning - // in golang's filepath.Match - regStr += `\` + string(ch) - } else if ch == '\\' { - // escape next char. Note that a trailing \ in the pattern - // will be left alone (but need to escape it) - if sl == `\` { - // On windows map "\" to "\\", meaning an escaped backslash, - // and then just continue because filepath.Match on - // Windows doesn't allow escaping at all - regStr += escSL - continue - } - if scan.Peek() != scanner.EOF { - regStr += `\` + string(scan.Next()) - } else { - regStr += `\` - } - } else { - regStr += string(ch) - } - } - - regStr += "$" - - res, err := regexp.MatchString(regStr, path) - - // Map regexp's error to filepath's so no one knows we're not using filepath - if err != nil { - err = filepath.ErrBadPattern - } - - return res, err -} - -// CopyFile copies from src to dst until either EOF is reached -// on src or an error occurs. It verifies src exists and removes -// the dst if it exists. -func CopyFile(src, dst string) (int64, error) { - cleanSrc := filepath.Clean(src) - cleanDst := filepath.Clean(dst) - if cleanSrc == cleanDst { - return 0, nil - } - sf, err := os.Open(cleanSrc) - if err != nil { - return 0, err - } - defer sf.Close() - if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) { - return 0, err - } - df, err := os.Create(cleanDst) - if err != nil { - return 0, err - } - defer df.Close() - return io.Copy(df, sf) -} - -// ReadSymlinkedDirectory returns the target directory of a symlink. -// The target of the symbolic link may not be a file. -func ReadSymlinkedDirectory(path string) (string, error) { - var realPath string - var err error - if realPath, err = filepath.Abs(path); err != nil { - return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err) - } - if realPath, err = filepath.EvalSymlinks(realPath); err != nil { - return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err) - } - realPathInfo, err := os.Stat(realPath) - if err != nil { - return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err) - } - if !realPathInfo.Mode().IsDir() { - return "", fmt.Errorf("canonical path points to a file '%s'", realPath) - } - return realPath, nil -} - -// CreateIfNotExists creates a file or a directory only if it does not already exist. -func CreateIfNotExists(path string, isDir bool) error { - if _, err := os.Stat(path); err != nil { - if os.IsNotExist(err) { - if isDir { - return os.MkdirAll(path, 0755) - } - if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil { - return err - } - f, err := os.OpenFile(path, os.O_CREATE, 0755) - if err != nil { - return err - } - f.Close() - } - } - return nil -} diff --git a/pkg/fileutils/fileutils_solaris.go b/pkg/fileutils/fileutils_solaris.go deleted file mode 100644 index 0f2cb7ab93..0000000000 --- a/pkg/fileutils/fileutils_solaris.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. -// On Solaris these limits are per process and not systemwide -func GetTotalUsedFds() int { - return -1 -} diff --git a/pkg/fileutils/fileutils_test.go b/pkg/fileutils/fileutils_test.go deleted file mode 100644 index 6df1be89bb..0000000000 --- a/pkg/fileutils/fileutils_test.go +++ /dev/null @@ -1,585 +0,0 @@ -package fileutils - -import ( - "io/ioutil" - "os" - "path" - "path/filepath" - "runtime" - "strings" - "testing" -) - -// CopyFile with invalid src -func TestCopyFileWithInvalidSrc(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile("/invalid/file/path", path.Join(tempFolder, "dest")) - if err == nil { - t.Fatal("Should have fail to copy an invalid src file") - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes") - } - -} - -// CopyFile with invalid dest -func TestCopyFileWithInvalidDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - src := path.Join(tempFolder, "file") - err = ioutil.WriteFile(src, []byte("content"), 0740) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile(src, path.Join(tempFolder, "/invalid/dest/path")) - if err == nil { - t.Fatal("Should have fail to copy an invalid src file") - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes") - } - -} - -// CopyFile with same src and dest -func TestCopyFileWithSameSrcAndDest(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - file := path.Join(tempFolder, "file") - err = ioutil.WriteFile(file, []byte("content"), 0740) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile(file, file) - if err != nil { - t.Fatal(err) - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes as it is the same file.") - } -} - -// CopyFile with same src and dest but path is different and not clean -func TestCopyFileWithSameSrcAndDestWithPathNameDifferent(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - testFolder := path.Join(tempFolder, "test") - err = os.MkdirAll(testFolder, 0740) - if err != nil { - t.Fatal(err) - } - file := path.Join(testFolder, "file") - sameFile := testFolder + "/../test/file" - err = ioutil.WriteFile(file, []byte("content"), 0740) - if err != nil { - t.Fatal(err) - } - bytes, err := CopyFile(file, sameFile) - if err != nil { - t.Fatal(err) - } - if bytes != 0 { - t.Fatal("Should have written 0 bytes as it is the same file.") - } -} - -func TestCopyFile(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - defer os.RemoveAll(tempFolder) - if err != nil { - t.Fatal(err) - } - src := path.Join(tempFolder, "src") - dest := path.Join(tempFolder, "dest") - ioutil.WriteFile(src, []byte("content"), 0777) - ioutil.WriteFile(dest, []byte("destContent"), 0777) - bytes, err := CopyFile(src, dest) - if err != nil { - t.Fatal(err) - } - if bytes != 7 { - t.Fatalf("Should have written %d bytes but wrote %d", 7, bytes) - } - actual, err := ioutil.ReadFile(dest) - if err != nil { - t.Fatal(err) - } - if string(actual) != "content" { - t.Fatalf("Dest content was '%s', expected '%s'", string(actual), "content") - } -} - -// Reading a symlink to a directory must return the directory -func TestReadSymlinkedDirectoryExistingDirectory(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - var err error - if err = os.Mkdir("/tmp/testReadSymlinkToExistingDirectory", 0777); err != nil { - t.Errorf("failed to create directory: %s", err) - } - - if err = os.Symlink("/tmp/testReadSymlinkToExistingDirectory", "/tmp/dirLinkTest"); err != nil { - t.Errorf("failed to create symlink: %s", err) - } - - var path string - if path, err = ReadSymlinkedDirectory("/tmp/dirLinkTest"); err != nil { - t.Fatalf("failed to read symlink to directory: %s", err) - } - - if path != "/tmp/testReadSymlinkToExistingDirectory" { - t.Fatalf("symlink returned unexpected directory: %s", path) - } - - if err = os.Remove("/tmp/testReadSymlinkToExistingDirectory"); err != nil { - t.Errorf("failed to remove temporary directory: %s", err) - } - - if err = os.Remove("/tmp/dirLinkTest"); err != nil { - t.Errorf("failed to remove symlink: %s", err) - } -} - -// Reading a non-existing symlink must fail -func TestReadSymlinkedDirectoryNonExistingSymlink(t *testing.T) { - var path string - var err error - if path, err = ReadSymlinkedDirectory("/tmp/test/foo/Non/ExistingPath"); err == nil { - t.Fatalf("error expected for non-existing symlink") - } - - if path != "" { - t.Fatalf("expected empty path, but '%s' was returned", path) - } -} - -// Reading a symlink to a file must fail -func TestReadSymlinkedDirectoryToFile(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - var err error - var file *os.File - - if file, err = os.Create("/tmp/testReadSymlinkToFile"); err != nil { - t.Fatalf("failed to create file: %s", err) - } - - file.Close() - - if err = os.Symlink("/tmp/testReadSymlinkToFile", "/tmp/fileLinkTest"); err != nil { - t.Errorf("failed to create symlink: %s", err) - } - - var path string - if path, err = ReadSymlinkedDirectory("/tmp/fileLinkTest"); err == nil { - t.Fatalf("ReadSymlinkedDirectory on a symlink to a file should've failed") - } - - if path != "" { - t.Fatalf("path should've been empty: %s", path) - } - - if err = os.Remove("/tmp/testReadSymlinkToFile"); err != nil { - t.Errorf("failed to remove file: %s", err) - } - - if err = os.Remove("/tmp/fileLinkTest"); err != nil { - t.Errorf("failed to remove symlink: %s", err) - } -} - -func TestWildcardMatches(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"*"}) - if match != true { - t.Errorf("failed to get a wildcard match, got %v", match) - } -} - -// A simple pattern match should return true. -func TestPatternMatches(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"*.go"}) - if match != true { - t.Errorf("failed to get a match, got %v", match) - } -} - -// An exclusion followed by an inclusion should return true. -func TestExclusionPatternMatchesPatternBefore(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"!fileutils.go", "*.go"}) - if match != true { - t.Errorf("failed to get true match on exclusion pattern, got %v", match) - } -} - -// A folder pattern followed by an exception should return false. -func TestPatternMatchesFolderExclusions(t *testing.T) { - match, _ := Matches("docs/README.md", []string{"docs", "!docs/README.md"}) - if match != false { - t.Errorf("failed to get a false match on exclusion pattern, got %v", match) - } -} - -// A folder pattern followed by an exception should return false. -func TestPatternMatchesFolderWithSlashExclusions(t *testing.T) { - match, _ := Matches("docs/README.md", []string{"docs/", "!docs/README.md"}) - if match != false { - t.Errorf("failed to get a false match on exclusion pattern, got %v", match) - } -} - -// A folder pattern followed by an exception should return false. -func TestPatternMatchesFolderWildcardExclusions(t *testing.T) { - match, _ := Matches("docs/README.md", []string{"docs/*", "!docs/README.md"}) - if match != false { - t.Errorf("failed to get a false match on exclusion pattern, got %v", match) - } -} - -// A pattern followed by an exclusion should return false. -func TestExclusionPatternMatchesPatternAfter(t *testing.T) { - match, _ := Matches("fileutils.go", []string{"*.go", "!fileutils.go"}) - if match != false { - t.Errorf("failed to get false match on exclusion pattern, got %v", match) - } -} - -// A filename evaluating to . should return false. -func TestExclusionPatternMatchesWholeDirectory(t *testing.T) { - match, _ := Matches(".", []string{"*.go"}) - if match != false { - t.Errorf("failed to get false match on ., got %v", match) - } -} - -// A single ! pattern should return an error. -func TestSingleExclamationError(t *testing.T) { - _, err := Matches("fileutils.go", []string{"!"}) - if err == nil { - t.Errorf("failed to get an error for a single exclamation point, got %v", err) - } -} - -// A string preceded with a ! should return true from Exclusion. -func TestExclusion(t *testing.T) { - exclusion := exclusion("!") - if !exclusion { - t.Errorf("failed to get true for a single !, got %v", exclusion) - } -} - -// Matches with no patterns -func TestMatchesWithNoPatterns(t *testing.T) { - matches, err := Matches("/any/path/there", []string{}) - if err != nil { - t.Fatal(err) - } - if matches { - t.Fatalf("Should not have match anything") - } -} - -// Matches with malformed patterns -func TestMatchesWithMalformedPatterns(t *testing.T) { - matches, err := Matches("/any/path/there", []string{"["}) - if err == nil { - t.Fatal("Should have failed because of a malformed syntax in the pattern") - } - if matches { - t.Fatalf("Should not have match anything") - } -} - -// Test lots of variants of patterns & strings -func TestMatches(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - tests := []struct { - pattern string - text string - pass bool - }{ - {"**", "file", true}, - {"**", "file/", true}, - {"**/", "file", true}, // weird one - {"**/", "file/", true}, - {"**", "/", true}, - {"**/", "/", true}, - {"**", "dir/file", true}, - {"**/", "dir/file", false}, - {"**", "dir/file/", true}, - {"**/", "dir/file/", true}, - {"**/**", "dir/file", true}, - {"**/**", "dir/file/", true}, - {"dir/**", "dir/file", true}, - {"dir/**", "dir/file/", true}, - {"dir/**", "dir/dir2/file", true}, - {"dir/**", "dir/dir2/file/", true}, - {"**/dir2/*", "dir/dir2/file", true}, - {"**/dir2/*", "dir/dir2/file/", false}, - {"**/dir2/**", "dir/dir2/dir3/file", true}, - {"**/dir2/**", "dir/dir2/dir3/file/", true}, - {"**file", "file", true}, - {"**file", "dir/file", true}, - {"**/file", "dir/file", true}, - {"**file", "dir/dir/file", true}, - {"**/file", "dir/dir/file", true}, - {"**/file*", "dir/dir/file", true}, - {"**/file*", "dir/dir/file.txt", true}, - {"**/file*txt", "dir/dir/file.txt", true}, - {"**/file*.txt", "dir/dir/file.txt", true}, - {"**/file*.txt*", "dir/dir/file.txt", true}, - {"**/**/*.txt", "dir/dir/file.txt", true}, - {"**/**/*.txt2", "dir/dir/file.txt", false}, - {"**/*.txt", "file.txt", true}, - {"**/**/*.txt", "file.txt", true}, - {"a**/*.txt", "a/file.txt", true}, - {"a**/*.txt", "a/dir/file.txt", true}, - {"a**/*.txt", "a/dir/dir/file.txt", true}, - {"a/*.txt", "a/dir/file.txt", false}, - {"a/*.txt", "a/file.txt", true}, - {"a/*.txt**", "a/file.txt", true}, - {"a[b-d]e", "ae", false}, - {"a[b-d]e", "ace", true}, - {"a[b-d]e", "aae", false}, - {"a[^b-d]e", "aze", true}, - {".*", ".foo", true}, - {".*", "foo", false}, - {"abc.def", "abcdef", false}, - {"abc.def", "abc.def", true}, - {"abc.def", "abcZdef", false}, - {"abc?def", "abcZdef", true}, - {"abc?def", "abcdef", false}, - {"a\\*b", "a*b", true}, - {"a\\", "a", false}, - {"a\\", "a\\", false}, - {"a\\\\", "a\\", true}, - {"**/foo/bar", "foo/bar", true}, - {"**/foo/bar", "dir/foo/bar", true}, - {"**/foo/bar", "dir/dir2/foo/bar", true}, - {"abc/**", "abc", false}, - {"abc/**", "abc/def", true}, - {"abc/**", "abc/def/ghi", true}, - } - - for _, test := range tests { - res, _ := regexpMatch(test.pattern, test.text) - if res != test.pass { - t.Fatalf("Failed: %v - res:%v", test, res) - } - } -} - -// An empty string should return true from Empty. -func TestEmpty(t *testing.T) { - empty := empty("") - if !empty { - t.Errorf("failed to get true for an empty string, got %v", empty) - } -} - -func TestCleanPatterns(t *testing.T) { - cleaned, _, _, _ := CleanPatterns([]string{"docs", "config"}) - if len(cleaned) != 2 { - t.Errorf("expected 2 element slice, got %v", len(cleaned)) - } -} - -func TestCleanPatternsStripEmptyPatterns(t *testing.T) { - cleaned, _, _, _ := CleanPatterns([]string{"docs", "config", ""}) - if len(cleaned) != 2 { - t.Errorf("expected 2 element slice, got %v", len(cleaned)) - } -} - -func TestCleanPatternsExceptionFlag(t *testing.T) { - _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md"}) - if !exceptions { - t.Errorf("expected exceptions to be true, got %v", exceptions) - } -} - -func TestCleanPatternsLeadingSpaceTrimmed(t *testing.T) { - _, _, exceptions, _ := CleanPatterns([]string{"docs", " !docs/README.md"}) - if !exceptions { - t.Errorf("expected exceptions to be true, got %v", exceptions) - } -} - -func TestCleanPatternsTrailingSpaceTrimmed(t *testing.T) { - _, _, exceptions, _ := CleanPatterns([]string{"docs", "!docs/README.md "}) - if !exceptions { - t.Errorf("expected exceptions to be true, got %v", exceptions) - } -} - -func TestCleanPatternsErrorSingleException(t *testing.T) { - _, _, _, err := CleanPatterns([]string{"!"}) - if err == nil { - t.Errorf("expected error on single exclamation point, got %v", err) - } -} - -func TestCleanPatternsFolderSplit(t *testing.T) { - _, dirs, _, _ := CleanPatterns([]string{"docs/config/CONFIG.md"}) - if dirs[0][0] != "docs" { - t.Errorf("expected first element in dirs slice to be docs, got %v", dirs[0][1]) - } - if dirs[0][1] != "config" { - t.Errorf("expected first element in dirs slice to be config, got %v", dirs[0][1]) - } -} - -func TestCreateIfNotExistsDir(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - - folderToCreate := filepath.Join(tempFolder, "tocreate") - - if err := CreateIfNotExists(folderToCreate, true); err != nil { - t.Fatal(err) - } - fileinfo, err := os.Stat(folderToCreate) - if err != nil { - t.Fatalf("Should have create a folder, got %v", err) - } - - if !fileinfo.IsDir() { - t.Fatalf("Should have been a dir, seems it's not") - } -} - -func TestCreateIfNotExistsFile(t *testing.T) { - tempFolder, err := ioutil.TempDir("", "docker-fileutils-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tempFolder) - - fileToCreate := filepath.Join(tempFolder, "file/to/create") - - if err := CreateIfNotExists(fileToCreate, false); err != nil { - t.Fatal(err) - } - fileinfo, err := os.Stat(fileToCreate) - if err != nil { - t.Fatalf("Should have create a file, got %v", err) - } - - if fileinfo.IsDir() { - t.Fatalf("Should have been a file, seems it's not") - } -} - -// These matchTests are stolen from go's filepath Match tests. -type matchTest struct { - pattern, s string - match bool - err error -} - -var matchTests = []matchTest{ - {"abc", "abc", true, nil}, - {"*", "abc", true, nil}, - {"*c", "abc", true, nil}, - {"a*", "a", true, nil}, - {"a*", "abc", true, nil}, - {"a*", "ab/c", false, nil}, - {"a*/b", "abc/b", true, nil}, - {"a*/b", "a/c/b", false, nil}, - {"a*b*c*d*e*/f", "axbxcxdxe/f", true, nil}, - {"a*b*c*d*e*/f", "axbxcxdxexxx/f", true, nil}, - {"a*b*c*d*e*/f", "axbxcxdxe/xxx/f", false, nil}, - {"a*b*c*d*e*/f", "axbxcxdxexxx/fff", false, nil}, - {"a*b?c*x", "abxbbxdbxebxczzx", true, nil}, - {"a*b?c*x", "abxbbxdbxebxczzy", false, nil}, - {"ab[c]", "abc", true, nil}, - {"ab[b-d]", "abc", true, nil}, - {"ab[e-g]", "abc", false, nil}, - {"ab[^c]", "abc", false, nil}, - {"ab[^b-d]", "abc", false, nil}, - {"ab[^e-g]", "abc", true, nil}, - {"a\\*b", "a*b", true, nil}, - {"a\\*b", "ab", false, nil}, - {"a?b", "a☺b", true, nil}, - {"a[^a]b", "a☺b", true, nil}, - {"a???b", "a☺b", false, nil}, - {"a[^a][^a][^a]b", "a☺b", false, nil}, - {"[a-ζ]*", "α", true, nil}, - {"*[a-ζ]", "A", false, nil}, - {"a?b", "a/b", false, nil}, - {"a*b", "a/b", false, nil}, - {"[\\]a]", "]", true, nil}, - {"[\\-]", "-", true, nil}, - {"[x\\-]", "x", true, nil}, - {"[x\\-]", "-", true, nil}, - {"[x\\-]", "z", false, nil}, - {"[\\-x]", "x", true, nil}, - {"[\\-x]", "-", true, nil}, - {"[\\-x]", "a", false, nil}, - {"[]a]", "]", false, filepath.ErrBadPattern}, - {"[-]", "-", false, filepath.ErrBadPattern}, - {"[x-]", "x", false, filepath.ErrBadPattern}, - {"[x-]", "-", false, filepath.ErrBadPattern}, - {"[x-]", "z", false, filepath.ErrBadPattern}, - {"[-x]", "x", false, filepath.ErrBadPattern}, - {"[-x]", "-", false, filepath.ErrBadPattern}, - {"[-x]", "a", false, filepath.ErrBadPattern}, - {"\\", "a", false, filepath.ErrBadPattern}, - {"[a-b-c]", "a", false, filepath.ErrBadPattern}, - {"[", "a", false, filepath.ErrBadPattern}, - {"[^", "a", false, filepath.ErrBadPattern}, - {"[^bc", "a", false, filepath.ErrBadPattern}, - {"a[", "a", false, filepath.ErrBadPattern}, // was nil but IMO its wrong - {"a[", "ab", false, filepath.ErrBadPattern}, - {"*x", "xxx", true, nil}, -} - -func errp(e error) string { - if e == nil { - return "" - } - return e.Error() -} - -// TestMatch test's our version of filepath.Match, called regexpMatch. -func TestMatch(t *testing.T) { - for _, tt := range matchTests { - pattern := tt.pattern - s := tt.s - if runtime.GOOS == "windows" { - if strings.Index(pattern, "\\") >= 0 { - // no escape allowed on windows. - continue - } - pattern = filepath.Clean(pattern) - s = filepath.Clean(s) - } - ok, err := regexpMatch(pattern, s) - if ok != tt.match || err != tt.err { - t.Fatalf("Match(%#q, %#q) = %v, %q want %v, %q", pattern, s, ok, errp(err), tt.match, errp(tt.err)) - } - } -} diff --git a/pkg/fileutils/fileutils_unix.go b/pkg/fileutils/fileutils_unix.go deleted file mode 100644 index d5c3abf568..0000000000 --- a/pkg/fileutils/fileutils_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build linux freebsd - -package fileutils - -import ( - "fmt" - "io/ioutil" - "os" - - "github.com/Sirupsen/logrus" -) - -// GetTotalUsedFds Returns the number of used File Descriptors by -// reading it via /proc filesystem. -func GetTotalUsedFds() int { - if fds, err := ioutil.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil { - logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err) - } else { - return len(fds) - } - return -1 -} diff --git a/pkg/fileutils/fileutils_windows.go b/pkg/fileutils/fileutils_windows.go deleted file mode 100644 index 5ec21cace5..0000000000 --- a/pkg/fileutils/fileutils_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package fileutils - -// GetTotalUsedFds Returns the number of used File Descriptors. Not supported -// on Windows. -func GetTotalUsedFds() int { - return -1 -} diff --git a/pkg/gitutils/gitutils.go b/pkg/gitutils/gitutils.go deleted file mode 100644 index ded091f2a2..0000000000 --- a/pkg/gitutils/gitutils.go +++ /dev/null @@ -1,100 +0,0 @@ -package gitutils - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/url" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/symlink" - "github.com/docker/docker/pkg/urlutil" -) - -// Clone clones a repository into a newly created directory which -// will be under "docker-build-git" -func Clone(remoteURL string) (string, error) { - if !urlutil.IsGitTransport(remoteURL) { - remoteURL = "https://" + remoteURL - } - root, err := ioutil.TempDir("", "docker-build-git") - if err != nil { - return "", err - } - - u, err := url.Parse(remoteURL) - if err != nil { - return "", err - } - - fragment := u.Fragment - clone := cloneArgs(u, root) - - if output, err := git(clone...); err != nil { - return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) - } - - return checkoutGit(fragment, root) -} - -func cloneArgs(remoteURL *url.URL, root string) []string { - args := []string{"clone", "--recursive"} - shallow := len(remoteURL.Fragment) == 0 - - if shallow && strings.HasPrefix(remoteURL.Scheme, "http") { - res, err := http.Head(fmt.Sprintf("%s/info/refs?service=git-upload-pack", remoteURL)) - if err != nil || res.Header.Get("Content-Type") != "application/x-git-upload-pack-advertisement" { - shallow = false - } - } - - if shallow { - args = append(args, "--depth", "1") - } - - if remoteURL.Fragment != "" { - remoteURL.Fragment = "" - } - - return append(args, remoteURL.String(), root) -} - -func checkoutGit(fragment, root string) (string, error) { - refAndDir := strings.SplitN(fragment, ":", 2) - - if len(refAndDir[0]) != 0 { - if output, err := gitWithinDir(root, "checkout", refAndDir[0]); err != nil { - return "", fmt.Errorf("Error trying to use git: %s (%s)", err, output) - } - } - - if len(refAndDir) > 1 && len(refAndDir[1]) != 0 { - newCtx, err := symlink.FollowSymlinkInScope(filepath.Join(root, refAndDir[1]), root) - if err != nil { - return "", fmt.Errorf("Error setting git context, %q not within git root: %s", refAndDir[1], err) - } - - fi, err := os.Stat(newCtx) - if err != nil { - return "", err - } - if !fi.IsDir() { - return "", fmt.Errorf("Error setting git context, not a directory: %s", newCtx) - } - root = newCtx - } - - return root, nil -} - -func gitWithinDir(dir string, args ...string) ([]byte, error) { - a := []string{"--work-tree", dir, "--git-dir", filepath.Join(dir, ".git")} - return git(append(a, args...)...) -} - -func git(args ...string) ([]byte, error) { - return exec.Command("git", args...).CombinedOutput() -} diff --git a/pkg/gitutils/gitutils_test.go b/pkg/gitutils/gitutils_test.go deleted file mode 100644 index d197058d20..0000000000 --- a/pkg/gitutils/gitutils_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package gitutils - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "net/url" - "os" - "path/filepath" - "reflect" - "runtime" - "strings" - "testing" -) - -func TestCloneArgsSmartHttp(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - serverURL, _ := url.Parse(server.URL) - - serverURL.Path = "/repo.git" - gitURL := serverURL.String() - - mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { - q := r.URL.Query().Get("service") - w.Header().Set("Content-Type", fmt.Sprintf("application/x-%s-advertisement", q)) - }) - - args := cloneArgs(serverURL, "/tmp") - exp := []string{"clone", "--recursive", "--depth", "1", gitURL, "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func TestCloneArgsDumbHttp(t *testing.T) { - mux := http.NewServeMux() - server := httptest.NewServer(mux) - serverURL, _ := url.Parse(server.URL) - - serverURL.Path = "/repo.git" - gitURL := serverURL.String() - - mux.HandleFunc("/repo.git/info/refs", func(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "text/plain") - }) - - args := cloneArgs(serverURL, "/tmp") - exp := []string{"clone", "--recursive", gitURL, "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func TestCloneArgsGit(t *testing.T) { - u, _ := url.Parse("git://github.com/docker/docker") - args := cloneArgs(u, "/tmp") - exp := []string{"clone", "--recursive", "--depth", "1", "git://github.com/docker/docker", "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func TestCloneArgsStripFragment(t *testing.T) { - u, _ := url.Parse("git://github.com/docker/docker#test") - args := cloneArgs(u, "/tmp") - exp := []string{"clone", "--recursive", "git://github.com/docker/docker", "/tmp"} - if !reflect.DeepEqual(args, exp) { - t.Fatalf("Expected %v, got %v", exp, args) - } -} - -func gitGetConfig(name string) string { - b, err := git([]string{"config", "--get", name}...) - if err != nil { - // since we are interested in empty or non empty string, - // we can safely ignore the err here. - return "" - } - return strings.TrimSpace(string(b)) -} - -func TestCheckoutGit(t *testing.T) { - root, err := ioutil.TempDir("", "docker-build-git-checkout") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(root) - - autocrlf := gitGetConfig("core.autocrlf") - if !(autocrlf == "true" || autocrlf == "false" || - autocrlf == "input" || autocrlf == "") { - t.Logf("unknown core.autocrlf value: \"%s\"", autocrlf) - } - eol := "\n" - if autocrlf == "true" { - eol = "\r\n" - } - - gitDir := filepath.Join(root, "repo") - _, err = git("init", gitDir) - if err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "config", "user.email", "test@docker.com"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "config", "user.name", "Docker test"); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch"), 0644); err != nil { - t.Fatal(err) - } - - subDir := filepath.Join(gitDir, "subdir") - if err = os.Mkdir(subDir, 0755); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 5000"), 0644); err != nil { - t.Fatal(err) - } - - if runtime.GOOS != "windows" { - if err = os.Symlink("../subdir", filepath.Join(gitDir, "parentlink")); err != nil { - t.Fatal(err) - } - - if err = os.Symlink("/subdir", filepath.Join(gitDir, "absolutelink")); err != nil { - t.Fatal(err) - } - } - - if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "commit", "-am", "First commit"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "checkout", "-b", "test"); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(gitDir, "Dockerfile"), []byte("FROM scratch\nEXPOSE 3000"), 0644); err != nil { - t.Fatal(err) - } - - if err = ioutil.WriteFile(filepath.Join(subDir, "Dockerfile"), []byte("FROM busybox\nEXPOSE 5000"), 0644); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "add", "-A"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "commit", "-am", "Branch commit"); err != nil { - t.Fatal(err) - } - - if _, err = gitWithinDir(gitDir, "checkout", "master"); err != nil { - t.Fatal(err) - } - - type singleCase struct { - frag string - exp string - fail bool - } - - cases := []singleCase{ - {"", "FROM scratch", false}, - {"master", "FROM scratch", false}, - {":subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, - {":nosubdir", "", true}, // missing directory error - {":Dockerfile", "", true}, // not a directory error - {"master:nosubdir", "", true}, - {"master:subdir", "FROM scratch" + eol + "EXPOSE 5000", false}, - {"master:../subdir", "", true}, - {"test", "FROM scratch" + eol + "EXPOSE 3000", false}, - {"test:", "FROM scratch" + eol + "EXPOSE 3000", false}, - {"test:subdir", "FROM busybox" + eol + "EXPOSE 5000", false}, - } - - if runtime.GOOS != "windows" { - // Windows GIT (2.7.1 x64) does not support parentlink/absolutelink. Sample output below - // git --work-tree .\repo --git-dir .\repo\.git add -A - // error: readlink("absolutelink"): Function not implemented - // error: unable to index file absolutelink - // fatal: adding files failed - cases = append(cases, singleCase{frag: "master:absolutelink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) - cases = append(cases, singleCase{frag: "master:parentlink", exp: "FROM scratch" + eol + "EXPOSE 5000", fail: false}) - } - - for _, c := range cases { - r, err := checkoutGit(c.frag, gitDir) - - fail := err != nil - if fail != c.fail { - t.Fatalf("Expected %v failure, error was %v\n", c.fail, err) - } - if c.fail { - continue - } - - b, err := ioutil.ReadFile(filepath.Join(r, "Dockerfile")) - if err != nil { - t.Fatal(err) - } - - if string(b) != c.exp { - t.Fatalf("Expected %v, was %v\n", c.exp, string(b)) - } - } -} diff --git a/pkg/graphdb/conn_sqlite3.go b/pkg/graphdb/conn_sqlite3.go deleted file mode 100644 index dbcf44c256..0000000000 --- a/pkg/graphdb/conn_sqlite3.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build cgo - -package graphdb - -import "database/sql" - -// NewSqliteConn opens a connection to a sqlite -// database. -func NewSqliteConn(root string) (*Database, error) { - conn, err := sql.Open("sqlite3", root) - if err != nil { - return nil, err - } - return NewDatabase(conn) -} diff --git a/pkg/graphdb/conn_sqlite3_unix.go b/pkg/graphdb/conn_sqlite3_unix.go deleted file mode 100644 index f932fff286..0000000000 --- a/pkg/graphdb/conn_sqlite3_unix.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build cgo,!windows - -package graphdb - -import ( - _ "github.com/mattn/go-sqlite3" // registers sqlite -) diff --git a/pkg/graphdb/conn_sqlite3_windows.go b/pkg/graphdb/conn_sqlite3_windows.go deleted file mode 100644 index 52590303d4..0000000000 --- a/pkg/graphdb/conn_sqlite3_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build cgo,windows - -package graphdb - -import ( - _ "github.com/mattn/go-sqlite3" // registers sqlite -) diff --git a/pkg/graphdb/conn_unsupported.go b/pkg/graphdb/conn_unsupported.go deleted file mode 100644 index cf977050da..0000000000 --- a/pkg/graphdb/conn_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !cgo - -package graphdb - -// NewSqliteConn return a new sqlite connection. -func NewSqliteConn(root string) (*Database, error) { - panic("Not implemented") -} diff --git a/pkg/graphdb/graphdb.go b/pkg/graphdb/graphdb.go deleted file mode 100644 index eca433fa85..0000000000 --- a/pkg/graphdb/graphdb.go +++ /dev/null @@ -1,551 +0,0 @@ -package graphdb - -import ( - "database/sql" - "fmt" - "path" - "strings" - "sync" -) - -const ( - createEntityTable = ` - CREATE TABLE IF NOT EXISTS entity ( - id text NOT NULL PRIMARY KEY - );` - - createEdgeTable = ` - CREATE TABLE IF NOT EXISTS edge ( - "entity_id" text NOT NULL, - "parent_id" text NULL, - "name" text NOT NULL, - CONSTRAINT "parent_fk" FOREIGN KEY ("parent_id") REFERENCES "entity" ("id"), - CONSTRAINT "entity_fk" FOREIGN KEY ("entity_id") REFERENCES "entity" ("id") - ); - ` - - createEdgeIndices = ` - CREATE UNIQUE INDEX IF NOT EXISTS "name_parent_ix" ON "edge" (parent_id, name); - ` -) - -// Entity with a unique id. -type Entity struct { - id string -} - -// An Edge connects two entities together. -type Edge struct { - EntityID string - Name string - ParentID string -} - -// Entities stores the list of entities. -type Entities map[string]*Entity - -// Edges stores the relationships between entities. -type Edges []*Edge - -// WalkFunc is a function invoked to process an individual entity. -type WalkFunc func(fullPath string, entity *Entity) error - -// Database is a graph database for storing entities and their relationships. -type Database struct { - conn *sql.DB - mux sync.RWMutex -} - -// IsNonUniqueNameError processes the error to check if it's caused by -// a constraint violation. -// This is necessary because the error isn't the same across various -// sqlite versions. -func IsNonUniqueNameError(err error) bool { - str := err.Error() - // sqlite 3.7.17-1ubuntu1 returns: - // Set failure: Abort due to constraint violation: columns parent_id, name are not unique - if strings.HasSuffix(str, "name are not unique") { - return true - } - // sqlite-3.8.3-1.fc20 returns: - // Set failure: Abort due to constraint violation: UNIQUE constraint failed: edge.parent_id, edge.name - if strings.Contains(str, "UNIQUE constraint failed") && strings.Contains(str, "edge.name") { - return true - } - // sqlite-3.6.20-1.el6 returns: - // Set failure: Abort due to constraint violation: constraint failed - if strings.HasSuffix(str, "constraint failed") { - return true - } - return false -} - -// NewDatabase creates a new graph database initialized with a root entity. -func NewDatabase(conn *sql.DB) (*Database, error) { - if conn == nil { - return nil, fmt.Errorf("Database connection cannot be nil") - } - db := &Database{conn: conn} - - // Create root entities - tx, err := conn.Begin() - if err != nil { - return nil, err - } - - if _, err := tx.Exec(createEntityTable); err != nil { - return nil, err - } - if _, err := tx.Exec(createEdgeTable); err != nil { - return nil, err - } - if _, err := tx.Exec(createEdgeIndices); err != nil { - return nil, err - } - - if _, err := tx.Exec("DELETE FROM entity where id = ?", "0"); err != nil { - tx.Rollback() - return nil, err - } - - if _, err := tx.Exec("INSERT INTO entity (id) VALUES (?);", "0"); err != nil { - tx.Rollback() - return nil, err - } - - if _, err := tx.Exec("DELETE FROM edge where entity_id=? and name=?", "0", "/"); err != nil { - tx.Rollback() - return nil, err - } - - if _, err := tx.Exec("INSERT INTO edge (entity_id, name) VALUES(?,?);", "0", "/"); err != nil { - tx.Rollback() - return nil, err - } - - if err := tx.Commit(); err != nil { - return nil, err - } - - return db, nil -} - -// Close the underlying connection to the database. -func (db *Database) Close() error { - return db.conn.Close() -} - -// Set the entity id for a given path. -func (db *Database) Set(fullPath, id string) (*Entity, error) { - db.mux.Lock() - defer db.mux.Unlock() - - tx, err := db.conn.Begin() - if err != nil { - return nil, err - } - - var entityID string - if err := tx.QueryRow("SELECT id FROM entity WHERE id = ?;", id).Scan(&entityID); err != nil { - if err == sql.ErrNoRows { - if _, err := tx.Exec("INSERT INTO entity (id) VALUES(?);", id); err != nil { - tx.Rollback() - return nil, err - } - } else { - tx.Rollback() - return nil, err - } - } - e := &Entity{id} - - parentPath, name := splitPath(fullPath) - if err := db.setEdge(parentPath, name, e, tx); err != nil { - tx.Rollback() - return nil, err - } - - if err := tx.Commit(); err != nil { - return nil, err - } - return e, nil -} - -// Exists returns true if a name already exists in the database. -func (db *Database) Exists(name string) bool { - db.mux.RLock() - defer db.mux.RUnlock() - - e, err := db.get(name) - if err != nil { - return false - } - return e != nil -} - -func (db *Database) setEdge(parentPath, name string, e *Entity, tx *sql.Tx) error { - parent, err := db.get(parentPath) - if err != nil { - return err - } - if parent.id == e.id { - return fmt.Errorf("Cannot set self as child") - } - - if _, err := tx.Exec("INSERT INTO edge (parent_id, name, entity_id) VALUES (?,?,?);", parent.id, name, e.id); err != nil { - return err - } - return nil -} - -// RootEntity returns the root "/" entity for the database. -func (db *Database) RootEntity() *Entity { - return &Entity{ - id: "0", - } -} - -// Get returns the entity for a given path. -func (db *Database) Get(name string) *Entity { - db.mux.RLock() - defer db.mux.RUnlock() - - e, err := db.get(name) - if err != nil { - return nil - } - return e -} - -func (db *Database) get(name string) (*Entity, error) { - e := db.RootEntity() - // We always know the root name so return it if - // it is requested - if name == "/" { - return e, nil - } - - parts := split(name) - for i := 1; i < len(parts); i++ { - p := parts[i] - if p == "" { - continue - } - - next := db.child(e, p) - if next == nil { - return nil, fmt.Errorf("Cannot find child for %s", name) - } - e = next - } - return e, nil - -} - -// List all entities by from the name. -// The key will be the full path of the entity. -func (db *Database) List(name string, depth int) Entities { - db.mux.RLock() - defer db.mux.RUnlock() - - out := Entities{} - e, err := db.get(name) - if err != nil { - return out - } - - children, err := db.children(e, name, depth, nil) - if err != nil { - return out - } - - for _, c := range children { - out[c.FullPath] = c.Entity - } - return out -} - -// Walk through the child graph of an entity, calling walkFunc for each child entity. -// It is safe for walkFunc to call graph functions. -func (db *Database) Walk(name string, walkFunc WalkFunc, depth int) error { - children, err := db.Children(name, depth) - if err != nil { - return err - } - - // Note: the database lock must not be held while calling walkFunc - for _, c := range children { - if err := walkFunc(c.FullPath, c.Entity); err != nil { - return err - } - } - return nil -} - -// Children returns the children of the specified entity. -func (db *Database) Children(name string, depth int) ([]WalkMeta, error) { - db.mux.RLock() - defer db.mux.RUnlock() - - e, err := db.get(name) - if err != nil { - return nil, err - } - - return db.children(e, name, depth, nil) -} - -// Parents returns the parents of a specified entity. -func (db *Database) Parents(name string) ([]string, error) { - db.mux.RLock() - defer db.mux.RUnlock() - - e, err := db.get(name) - if err != nil { - return nil, err - } - return db.parents(e) -} - -// Refs returns the reference count for a specified id. -func (db *Database) Refs(id string) int { - db.mux.RLock() - defer db.mux.RUnlock() - - var count int - if err := db.conn.QueryRow("SELECT COUNT(*) FROM edge WHERE entity_id = ?;", id).Scan(&count); err != nil { - return 0 - } - return count -} - -// RefPaths returns all the id's path references. -func (db *Database) RefPaths(id string) Edges { - db.mux.RLock() - defer db.mux.RUnlock() - - refs := Edges{} - - rows, err := db.conn.Query("SELECT name, parent_id FROM edge WHERE entity_id = ?;", id) - if err != nil { - return refs - } - defer rows.Close() - - for rows.Next() { - var name string - var parentID string - if err := rows.Scan(&name, &parentID); err != nil { - return refs - } - refs = append(refs, &Edge{ - EntityID: id, - Name: name, - ParentID: parentID, - }) - } - return refs -} - -// Delete the reference to an entity at a given path. -func (db *Database) Delete(name string) error { - db.mux.Lock() - defer db.mux.Unlock() - - if name == "/" { - return fmt.Errorf("Cannot delete root entity") - } - - parentPath, n := splitPath(name) - parent, err := db.get(parentPath) - if err != nil { - return err - } - - if _, err := db.conn.Exec("DELETE FROM edge WHERE parent_id = ? AND name = ?;", parent.id, n); err != nil { - return err - } - return nil -} - -// Purge removes the entity with the specified id -// Walk the graph to make sure all references to the entity -// are removed and return the number of references removed -func (db *Database) Purge(id string) (int, error) { - db.mux.Lock() - defer db.mux.Unlock() - - tx, err := db.conn.Begin() - if err != nil { - return -1, err - } - - // Delete all edges - rows, err := tx.Exec("DELETE FROM edge WHERE entity_id = ?;", id) - if err != nil { - tx.Rollback() - return -1, err - } - changes, err := rows.RowsAffected() - if err != nil { - return -1, err - } - - // Clear who's using this id as parent - refs, err := tx.Exec("DELETE FROM edge WHERE parent_id = ?;", id) - if err != nil { - tx.Rollback() - return -1, err - } - refsCount, err := refs.RowsAffected() - if err != nil { - return -1, err - } - - // Delete entity - if _, err := tx.Exec("DELETE FROM entity where id = ?;", id); err != nil { - tx.Rollback() - return -1, err - } - - if err := tx.Commit(); err != nil { - return -1, err - } - - return int(changes + refsCount), nil -} - -// Rename an edge for a given path -func (db *Database) Rename(currentName, newName string) error { - db.mux.Lock() - defer db.mux.Unlock() - - parentPath, name := splitPath(currentName) - newParentPath, newEdgeName := splitPath(newName) - - if parentPath != newParentPath { - return fmt.Errorf("Cannot rename when root paths do not match %s != %s", parentPath, newParentPath) - } - - parent, err := db.get(parentPath) - if err != nil { - return err - } - - rows, err := db.conn.Exec("UPDATE edge SET name = ? WHERE parent_id = ? AND name = ?;", newEdgeName, parent.id, name) - if err != nil { - return err - } - i, err := rows.RowsAffected() - if err != nil { - return err - } - if i == 0 { - return fmt.Errorf("Cannot locate edge for %s %s", parent.id, name) - } - return nil -} - -// WalkMeta stores the walk metadata. -type WalkMeta struct { - Parent *Entity - Entity *Entity - FullPath string - Edge *Edge -} - -func (db *Database) children(e *Entity, name string, depth int, entities []WalkMeta) ([]WalkMeta, error) { - if e == nil { - return entities, nil - } - - rows, err := db.conn.Query("SELECT entity_id, name FROM edge where parent_id = ?;", e.id) - if err != nil { - return nil, err - } - defer rows.Close() - - for rows.Next() { - var entityID, entityName string - if err := rows.Scan(&entityID, &entityName); err != nil { - return nil, err - } - child := &Entity{entityID} - edge := &Edge{ - ParentID: e.id, - Name: entityName, - EntityID: child.id, - } - - meta := WalkMeta{ - Parent: e, - Entity: child, - FullPath: path.Join(name, edge.Name), - Edge: edge, - } - - entities = append(entities, meta) - - if depth != 0 { - nDepth := depth - if depth != -1 { - nDepth-- - } - entities, err = db.children(child, meta.FullPath, nDepth, entities) - if err != nil { - return nil, err - } - } - } - - return entities, nil -} - -func (db *Database) parents(e *Entity) (parents []string, err error) { - if e == nil { - return parents, nil - } - - rows, err := db.conn.Query("SELECT parent_id FROM edge where entity_id = ?;", e.id) - if err != nil { - return nil, err - } - defer rows.Close() - - for rows.Next() { - var parentID string - if err := rows.Scan(&parentID); err != nil { - return nil, err - } - parents = append(parents, parentID) - } - - return parents, nil -} - -// Return the entity based on the parent path and name. -func (db *Database) child(parent *Entity, name string) *Entity { - var id string - if err := db.conn.QueryRow("SELECT entity_id FROM edge WHERE parent_id = ? AND name = ?;", parent.id, name).Scan(&id); err != nil { - return nil - } - return &Entity{id} -} - -// ID returns the id used to reference this entity. -func (e *Entity) ID() string { - return e.id -} - -// Paths returns the paths sorted by depth. -func (e Entities) Paths() []string { - out := make([]string, len(e)) - var i int - for k := range e { - out[i] = k - i++ - } - sortByDepth(out) - - return out -} diff --git a/pkg/graphdb/graphdb_test.go b/pkg/graphdb/graphdb_test.go deleted file mode 100644 index f0fb074b4d..0000000000 --- a/pkg/graphdb/graphdb_test.go +++ /dev/null @@ -1,721 +0,0 @@ -package graphdb - -import ( - "database/sql" - "fmt" - "os" - "path" - "runtime" - "strconv" - "testing" - - _ "github.com/mattn/go-sqlite3" -) - -func newTestDb(t *testing.T) (*Database, string) { - p := path.Join(os.TempDir(), "sqlite.db") - conn, err := sql.Open("sqlite3", p) - db, err := NewDatabase(conn) - if err != nil { - t.Fatal(err) - } - return db, p -} - -func destroyTestDb(dbPath string) { - os.Remove(dbPath) -} - -func TestNewDatabase(t *testing.T) { - db, dbpath := newTestDb(t) - if db == nil { - t.Fatal("Database should not be nil") - } - db.Close() - defer destroyTestDb(dbpath) -} - -func TestCreateRootEntity(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - root := db.RootEntity() - if root == nil { - t.Fatal("Root entity should not be nil") - } -} - -func TestGetRootEntity(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - e := db.Get("/") - if e == nil { - t.Fatal("Entity should not be nil") - } - if e.ID() != "0" { - t.Fatalf("Entity id should be 0, got %s", e.ID()) - } -} - -func TestSetEntityWithDifferentName(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/test", "1") - if _, err := db.Set("/other", "1"); err != nil { - t.Fatal(err) - } -} - -func TestSetDuplicateEntity(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - if _, err := db.Set("/foo", "42"); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/foo", "43"); err == nil { - t.Fatalf("Creating an entry with a duplicate path did not cause an error") - } -} - -func TestCreateChild(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - child, err := db.Set("/db", "1") - if err != nil { - t.Fatal(err) - } - if child == nil { - t.Fatal("Child should not be nil") - } - if child.ID() != "1" { - t.Fail() - } -} - -func TestParents(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - for i := 1; i < 6; i++ { - a := strconv.Itoa(i) - if _, err := db.Set("/"+a, a); err != nil { - t.Fatal(err) - } - } - - for i := 6; i < 11; i++ { - a := strconv.Itoa(i) - p := strconv.Itoa(i - 5) - - key := fmt.Sprintf("/%s/%s", p, a) - - if _, err := db.Set(key, a); err != nil { - t.Fatal(err) - } - - parents, err := db.Parents(key) - if err != nil { - t.Fatal(err) - } - - if len(parents) != 1 { - t.Fatalf("Expected 1 entry for %s got %d", key, len(parents)) - } - - if parents[0] != p { - t.Fatalf("ID %s received, %s expected", parents[0], p) - } - } -} - -func TestChildren(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - str := "/" - for i := 1; i < 6; i++ { - a := strconv.Itoa(i) - if _, err := db.Set(str+a, a); err != nil { - t.Fatal(err) - } - - str = str + a + "/" - } - - str = "/" - for i := 10; i < 30; i++ { // 20 entities - a := strconv.Itoa(i) - if _, err := db.Set(str+a, a); err != nil { - t.Fatal(err) - } - - str = str + a + "/" - } - entries, err := db.Children("/", 5) - if err != nil { - t.Fatal(err) - } - - if len(entries) != 11 { - t.Fatalf("Expect 11 entries for / got %d", len(entries)) - } - - entries, err = db.Children("/", 20) - if err != nil { - t.Fatal(err) - } - - if len(entries) != 25 { - t.Fatalf("Expect 25 entries for / got %d", len(entries)) - } -} - -func TestListAllRootChildren(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - for i := 1; i < 6; i++ { - a := strconv.Itoa(i) - if _, err := db.Set("/"+a, a); err != nil { - t.Fatal(err) - } - } - entries := db.List("/", -1) - if len(entries) != 5 { - t.Fatalf("Expect 5 entries for / got %d", len(entries)) - } -} - -func TestListAllSubChildren(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - _, err := db.Set("/webapp", "1") - if err != nil { - t.Fatal(err) - } - child2, err := db.Set("/db", "2") - if err != nil { - t.Fatal(err) - } - child4, err := db.Set("/logs", "4") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/db/logs", child4.ID()); err != nil { - t.Fatal(err) - } - - child3, err := db.Set("/sentry", "3") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/db", child2.ID()); err != nil { - t.Fatal(err) - } - - entries := db.List("/webapp", 1) - if len(entries) != 3 { - t.Fatalf("Expect 3 entries for / got %d", len(entries)) - } - - entries = db.List("/webapp", 0) - if len(entries) != 2 { - t.Fatalf("Expect 2 entries for / got %d", len(entries)) - } -} - -func TestAddSelfAsChild(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - child, err := db.Set("/test", "1") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/test/other", child.ID()); err == nil { - t.Fatal("Error should not be nil") - } -} - -func TestAddChildToNonExistentRoot(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - if _, err := db.Set("/myapp", "1"); err != nil { - t.Fatal(err) - } - - if _, err := db.Set("/myapp/proxy/db", "2"); err == nil { - t.Fatal("Error should not be nil") - } -} - -func TestWalkAll(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - _, err := db.Set("/webapp", "1") - if err != nil { - t.Fatal(err) - } - child2, err := db.Set("/db", "2") - if err != nil { - t.Fatal(err) - } - child4, err := db.Set("/db/logs", "4") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/logs", child4.ID()); err != nil { - t.Fatal(err) - } - - child3, err := db.Set("/sentry", "3") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/db", child2.ID()); err != nil { - t.Fatal(err) - } - - child5, err := db.Set("/gograph", "5") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { - t.Fatal(err) - } - - if err := db.Walk("/", func(p string, e *Entity) error { - t.Logf("Path: %s Entity: %s", p, e.ID()) - return nil - }, -1); err != nil { - t.Fatal(err) - } -} - -func TestGetEntityByPath(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - _, err := db.Set("/webapp", "1") - if err != nil { - t.Fatal(err) - } - child2, err := db.Set("/db", "2") - if err != nil { - t.Fatal(err) - } - child4, err := db.Set("/logs", "4") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/db/logs", child4.ID()); err != nil { - t.Fatal(err) - } - - child3, err := db.Set("/sentry", "3") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/db", child2.ID()); err != nil { - t.Fatal(err) - } - - child5, err := db.Set("/gograph", "5") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { - t.Fatal(err) - } - - entity := db.Get("/webapp/db/logs") - if entity == nil { - t.Fatal("Entity should not be nil") - } - if entity.ID() != "4" { - t.Fatalf("Expected to get entity with id 4, got %s", entity.ID()) - } -} - -func TestEnitiesPaths(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - _, err := db.Set("/webapp", "1") - if err != nil { - t.Fatal(err) - } - child2, err := db.Set("/db", "2") - if err != nil { - t.Fatal(err) - } - child4, err := db.Set("/logs", "4") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/db/logs", child4.ID()); err != nil { - t.Fatal(err) - } - - child3, err := db.Set("/sentry", "3") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/db", child2.ID()); err != nil { - t.Fatal(err) - } - - child5, err := db.Set("/gograph", "5") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { - t.Fatal(err) - } - - out := db.List("/", -1) - for _, p := range out.Paths() { - t.Log(p) - } -} - -func TestDeleteRootEntity(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - if err := db.Delete("/"); err == nil { - t.Fatal("Error should not be nil") - } -} - -func TestDeleteEntity(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - _, err := db.Set("/webapp", "1") - if err != nil { - t.Fatal(err) - } - child2, err := db.Set("/db", "2") - if err != nil { - t.Fatal(err) - } - child4, err := db.Set("/logs", "4") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/db/logs", child4.ID()); err != nil { - t.Fatal(err) - } - - child3, err := db.Set("/sentry", "3") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/sentry", child3.ID()); err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/db", child2.ID()); err != nil { - t.Fatal(err) - } - - child5, err := db.Set("/gograph", "5") - if err != nil { - t.Fatal(err) - } - if _, err := db.Set("/webapp/same-ref-diff-name", child5.ID()); err != nil { - t.Fatal(err) - } - - if err := db.Delete("/webapp/sentry"); err != nil { - t.Fatal(err) - } - entity := db.Get("/webapp/sentry") - if entity != nil { - t.Fatal("Entity /webapp/sentry should be nil") - } -} - -func TestCountRefs(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/webapp", "1") - - if db.Refs("1") != 1 { - t.Fatal("Expect reference count to be 1") - } - - db.Set("/db", "2") - db.Set("/webapp/db", "2") - if db.Refs("2") != 2 { - t.Fatal("Expect reference count to be 2") - } -} - -func TestPurgeId(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/webapp", "1") - - if c := db.Refs("1"); c != 1 { - t.Fatalf("Expect reference count to be 1, got %d", c) - } - - db.Set("/db", "2") - db.Set("/webapp/db", "2") - - count, err := db.Purge("2") - if err != nil { - t.Fatal(err) - } - if count != 2 { - t.Fatalf("Expected 2 references to be removed, got %d", count) - } -} - -// Regression test https://github.com/docker/docker/issues/12334 -func TestPurgeIdRefPaths(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/webapp", "1") - db.Set("/db", "2") - - db.Set("/db/webapp", "1") - - if c := db.Refs("1"); c != 2 { - t.Fatalf("Expected 2 reference for webapp, got %d", c) - } - if c := db.Refs("2"); c != 1 { - t.Fatalf("Expected 1 reference for db, got %d", c) - } - - if rp := db.RefPaths("2"); len(rp) != 1 { - t.Fatalf("Expected 1 reference path for db, got %d", len(rp)) - } - - count, err := db.Purge("2") - if err != nil { - t.Fatal(err) - } - - if count != 2 { - t.Fatalf("Expected 2 rows to be removed, got %d", count) - } - - if c := db.Refs("2"); c != 0 { - t.Fatalf("Expected 0 reference for db, got %d", c) - } - if c := db.Refs("1"); c != 1 { - t.Fatalf("Expected 1 reference for webapp, got %d", c) - } -} - -func TestRename(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/webapp", "1") - - if db.Refs("1") != 1 { - t.Fatal("Expect reference count to be 1") - } - - db.Set("/db", "2") - db.Set("/webapp/db", "2") - - if db.Get("/webapp/db") == nil { - t.Fatal("Cannot find entity at path /webapp/db") - } - - if err := db.Rename("/webapp/db", "/webapp/newdb"); err != nil { - t.Fatal(err) - } - if db.Get("/webapp/db") != nil { - t.Fatal("Entity should not exist at /webapp/db") - } - if db.Get("/webapp/newdb") == nil { - t.Fatal("Cannot find entity at path /webapp/newdb") - } - -} - -func TestCreateMultipleNames(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/db", "1") - if _, err := db.Set("/myapp", "1"); err != nil { - t.Fatal(err) - } - - db.Walk("/", func(p string, e *Entity) error { - t.Logf("%s\n", p) - return nil - }, -1) -} - -func TestRefPaths(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/webapp", "1") - - db.Set("/db", "2") - db.Set("/webapp/db", "2") - - refs := db.RefPaths("2") - if len(refs) != 2 { - t.Fatalf("Expected reference count to be 2, got %d", len(refs)) - } -} - -func TestExistsTrue(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/testing", "1") - - if !db.Exists("/testing") { - t.Fatalf("/tesing should exist") - } -} - -func TestExistsFalse(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/toerhe", "1") - - if db.Exists("/testing") { - t.Fatalf("/tesing should not exist") - } - -} - -func TestGetNameWithTrailingSlash(t *testing.T) { - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - db.Set("/todo", "1") - - e := db.Get("/todo/") - if e == nil { - t.Fatalf("Entity should not be nil") - } -} - -func TestConcurrentWrites(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - db, dbpath := newTestDb(t) - defer destroyTestDb(dbpath) - - errs := make(chan error, 2) - - save := func(name string, id string) { - if _, err := db.Set(fmt.Sprintf("/%s", name), id); err != nil { - errs <- err - } - errs <- nil - } - purge := func(id string) { - if _, err := db.Purge(id); err != nil { - errs <- err - } - errs <- nil - } - - save("/1", "1") - - go purge("1") - go save("/2", "2") - - any := false - for i := 0; i < 2; i++ { - if err := <-errs; err != nil { - any = true - t.Log(err) - } - } - if any { - t.Fail() - } -} diff --git a/pkg/graphdb/sort.go b/pkg/graphdb/sort.go deleted file mode 100644 index c07df077d8..0000000000 --- a/pkg/graphdb/sort.go +++ /dev/null @@ -1,27 +0,0 @@ -package graphdb - -import "sort" - -type pathSorter struct { - paths []string - by func(i, j string) bool -} - -func sortByDepth(paths []string) { - s := &pathSorter{paths, func(i, j string) bool { - return PathDepth(i) > PathDepth(j) - }} - sort.Sort(s) -} - -func (s *pathSorter) Len() int { - return len(s.paths) -} - -func (s *pathSorter) Swap(i, j int) { - s.paths[i], s.paths[j] = s.paths[j], s.paths[i] -} - -func (s *pathSorter) Less(i, j int) bool { - return s.by(s.paths[i], s.paths[j]) -} diff --git a/pkg/graphdb/sort_test.go b/pkg/graphdb/sort_test.go deleted file mode 100644 index ddf2266f60..0000000000 --- a/pkg/graphdb/sort_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package graphdb - -import ( - "testing" -) - -func TestSort(t *testing.T) { - paths := []string{ - "/", - "/myreallylongname", - "/app/db", - } - - sortByDepth(paths) - - if len(paths) != 3 { - t.Fatalf("Expected 3 parts got %d", len(paths)) - } - - if paths[0] != "/app/db" { - t.Fatalf("Expected /app/db got %s", paths[0]) - } - if paths[1] != "/myreallylongname" { - t.Fatalf("Expected /myreallylongname got %s", paths[1]) - } - if paths[2] != "/" { - t.Fatalf("Expected / got %s", paths[2]) - } -} diff --git a/pkg/graphdb/utils.go b/pkg/graphdb/utils.go deleted file mode 100644 index 9edd79c35e..0000000000 --- a/pkg/graphdb/utils.go +++ /dev/null @@ -1,32 +0,0 @@ -package graphdb - -import ( - "path" - "strings" -) - -// Split p on / -func split(p string) []string { - return strings.Split(p, "/") -} - -// PathDepth returns the depth or number of / in a given path -func PathDepth(p string) int { - parts := split(p) - if len(parts) == 2 && parts[1] == "" { - return 1 - } - return len(parts) -} - -func splitPath(p string) (parent, name string) { - if p[0] != '/' { - p = "/" + p - } - parent, name = path.Split(p) - l := len(parent) - if parent[l-1] == '/' { - parent = parent[:l-1] - } - return -} diff --git a/pkg/homedir/homedir.go b/pkg/homedir/homedir.go deleted file mode 100644 index 8154e83f0c..0000000000 --- a/pkg/homedir/homedir.go +++ /dev/null @@ -1,39 +0,0 @@ -package homedir - -import ( - "os" - "runtime" - - "github.com/opencontainers/runc/libcontainer/user" -) - -// Key returns the env var name for the user's home dir based on -// the platform being run on -func Key() string { - if runtime.GOOS == "windows" { - return "USERPROFILE" - } - return "HOME" -} - -// Get returns the home directory of the current user with the help of -// environment variables depending on the target operating system. -// Returned path should be used with "path/filepath" to form new paths. -func Get() string { - home := os.Getenv(Key()) - if home == "" && runtime.GOOS != "windows" { - if u, err := user.CurrentUser(); err == nil { - return u.Home - } - } - return home -} - -// GetShortcutString returns the string that is shortcut to user's home directory -// in the native shell of the platform running on. -func GetShortcutString() string { - if runtime.GOOS == "windows" { - return "%USERPROFILE%" // be careful while using in format functions - } - return "~" -} diff --git a/pkg/homedir/homedir_test.go b/pkg/homedir/homedir_test.go deleted file mode 100644 index 7a95cb2bd7..0000000000 --- a/pkg/homedir/homedir_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package homedir - -import ( - "path/filepath" - "testing" -) - -func TestGet(t *testing.T) { - home := Get() - if home == "" { - t.Fatal("returned home directory is empty") - } - - if !filepath.IsAbs(home) { - t.Fatalf("returned path is not absolute: %s", home) - } -} - -func TestGetShortcutString(t *testing.T) { - shortcut := GetShortcutString() - if shortcut == "" { - t.Fatal("returned shortcut string is empty") - } -} diff --git a/pkg/httputils/httputils.go b/pkg/httputils/httputils.go deleted file mode 100644 index d7dc43877d..0000000000 --- a/pkg/httputils/httputils.go +++ /dev/null @@ -1,56 +0,0 @@ -package httputils - -import ( - "errors" - "fmt" - "net/http" - "regexp" - "strings" - - "github.com/docker/docker/pkg/jsonmessage" -) - -var ( - headerRegexp = regexp.MustCompile(`^(?:(.+)/(.+?))\((.+)\).*$`) - errInvalidHeader = errors.New("Bad header, should be in format `docker/version (platform)`") -) - -// Download requests a given URL and returns an io.Reader. -func Download(url string) (resp *http.Response, err error) { - if resp, err = http.Get(url); err != nil { - return nil, err - } - if resp.StatusCode >= 400 { - return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status) - } - return resp, nil -} - -// NewHTTPRequestError returns a JSON response error. -func NewHTTPRequestError(msg string, res *http.Response) error { - return &jsonmessage.JSONError{ - Message: msg, - Code: res.StatusCode, - } -} - -// ServerHeader contains the server information. -type ServerHeader struct { - App string // docker - Ver string // 1.8.0-dev - OS string // windows or linux -} - -// ParseServerHeader extracts pieces from an HTTP server header -// which is in the format "docker/version (os)" eg docker/1.8.0-dev (windows). -func ParseServerHeader(hdr string) (*ServerHeader, error) { - matches := headerRegexp.FindStringSubmatch(hdr) - if len(matches) != 4 { - return nil, errInvalidHeader - } - return &ServerHeader{ - App: strings.TrimSpace(matches[1]), - Ver: strings.TrimSpace(matches[2]), - OS: strings.TrimSpace(matches[3]), - }, nil -} diff --git a/pkg/httputils/httputils_test.go b/pkg/httputils/httputils_test.go deleted file mode 100644 index d35d082156..0000000000 --- a/pkg/httputils/httputils_test.go +++ /dev/null @@ -1,115 +0,0 @@ -package httputils - -import ( - "fmt" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestDownload(t *testing.T) { - expected := "Hello, docker !" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, expected) - })) - defer ts.Close() - response, err := Download(ts.URL) - if err != nil { - t.Fatal(err) - } - - actual, err := ioutil.ReadAll(response.Body) - response.Body.Close() - - if err != nil || string(actual) != expected { - t.Fatalf("Expected the response %q, got err:%v, response:%v, actual:%s", expected, err, response, string(actual)) - } -} - -func TestDownload400Errors(t *testing.T) { - expectedError := "Got HTTP status code >= 400: 403 Forbidden" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // 403 - http.Error(w, "something failed (forbidden)", http.StatusForbidden) - })) - defer ts.Close() - // Expected status code = 403 - if _, err := Download(ts.URL); err == nil || err.Error() != expectedError { - t.Fatalf("Expected the the error %q, got %v", expectedError, err) - } -} - -func TestDownloadOtherErrors(t *testing.T) { - if _, err := Download("I'm not an url.."); err == nil || !strings.Contains(err.Error(), "unsupported protocol scheme") { - t.Fatalf("Expected an error with 'unsupported protocol scheme', got %v", err) - } -} - -func TestNewHTTPRequestError(t *testing.T) { - errorMessage := "Some error message" - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // 403 - http.Error(w, errorMessage, http.StatusForbidden) - })) - defer ts.Close() - httpResponse, err := http.Get(ts.URL) - if err != nil { - t.Fatal(err) - } - if err := NewHTTPRequestError(errorMessage, httpResponse); err.Error() != errorMessage { - t.Fatalf("Expected err to be %q, got %v", errorMessage, err) - } -} - -func TestParseServerHeader(t *testing.T) { - inputs := map[string][]string{ - "bad header": {"error"}, - "(bad header)": {"error"}, - "(without/spaces)": {"error"}, - "(header/with spaces)": {"error"}, - "foo/bar (baz)": {"foo", "bar", "baz"}, - "foo/bar": {"error"}, - "foo": {"error"}, - "foo/bar (baz space)": {"foo", "bar", "baz space"}, - " f f / b b ( b s ) ": {"f f", "b b", "b s"}, - "foo/bar (baz) ignore": {"foo", "bar", "baz"}, - "foo/bar ()": {"error"}, - "foo/bar()": {"error"}, - "foo/bar(baz)": {"foo", "bar", "baz"}, - "foo/bar/zzz(baz)": {"foo/bar", "zzz", "baz"}, - "foo/bar(baz/abc)": {"foo", "bar", "baz/abc"}, - "foo/bar(baz (abc))": {"foo", "bar", "baz (abc)"}, - } - - for header, values := range inputs { - serverHeader, err := ParseServerHeader(header) - if err != nil { - if err != errInvalidHeader { - t.Fatalf("Failed to parse %q, and got some unexpected error: %q", header, err) - } - if values[0] == "error" { - continue - } - t.Fatalf("Header %q failed to parse when it shouldn't have", header) - } - if values[0] == "error" { - t.Fatalf("Header %q parsed ok when it should have failed(%q).", header, serverHeader) - } - - if serverHeader.App != values[0] { - t.Fatalf("Expected serverHeader.App for %q to equal %q, got %q", header, values[0], serverHeader.App) - } - - if serverHeader.Ver != values[1] { - t.Fatalf("Expected serverHeader.Ver for %q to equal %q, got %q", header, values[1], serverHeader.Ver) - } - - if serverHeader.OS != values[2] { - t.Fatalf("Expected serverHeader.OS for %q to equal %q, got %q", header, values[2], serverHeader.OS) - } - - } - -} diff --git a/pkg/httputils/mimetype.go b/pkg/httputils/mimetype.go deleted file mode 100644 index d5cf34e4f2..0000000000 --- a/pkg/httputils/mimetype.go +++ /dev/null @@ -1,30 +0,0 @@ -package httputils - -import ( - "mime" - "net/http" -) - -// MimeTypes stores the MIME content type. -var MimeTypes = struct { - TextPlain string - Tar string - OctetStream string -}{"text/plain", "application/tar", "application/octet-stream"} - -// DetectContentType returns a best guess representation of the MIME -// content type for the bytes at c. The value detected by -// http.DetectContentType is guaranteed not be nil, defaulting to -// application/octet-stream when a better guess cannot be made. The -// result of this detection is then run through mime.ParseMediaType() -// which separates the actual MIME string from any parameters. -func DetectContentType(c []byte) (string, map[string]string, error) { - - ct := http.DetectContentType(c) - contentType, args, err := mime.ParseMediaType(ct) - if err != nil { - return "", nil, err - } - - return contentType, args, nil -} diff --git a/pkg/httputils/mimetype_test.go b/pkg/httputils/mimetype_test.go deleted file mode 100644 index 9de433ee8c..0000000000 --- a/pkg/httputils/mimetype_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package httputils - -import ( - "testing" -) - -func TestDetectContentType(t *testing.T) { - input := []byte("That is just a plain text") - - if contentType, _, err := DetectContentType(input); err != nil || contentType != "text/plain" { - t.Errorf("TestDetectContentType failed") - } -} diff --git a/pkg/httputils/resumablerequestreader.go b/pkg/httputils/resumablerequestreader.go deleted file mode 100644 index bebc8608cd..0000000000 --- a/pkg/httputils/resumablerequestreader.go +++ /dev/null @@ -1,95 +0,0 @@ -package httputils - -import ( - "fmt" - "io" - "net/http" - "time" - - "github.com/Sirupsen/logrus" -) - -type resumableRequestReader struct { - client *http.Client - request *http.Request - lastRange int64 - totalSize int64 - currentResponse *http.Response - failures uint32 - maxFailures uint32 -} - -// ResumableRequestReader makes it possible to resume reading a request's body transparently -// maxfail is the number of times we retry to make requests again (not resumes) -// totalsize is the total length of the body; auto detect if not provided -func ResumableRequestReader(c *http.Client, r *http.Request, maxfail uint32, totalsize int64) io.ReadCloser { - return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize} -} - -// ResumableRequestReaderWithInitialResponse makes it possible to resume -// reading the body of an already initiated request. -func ResumableRequestReaderWithInitialResponse(c *http.Client, r *http.Request, maxfail uint32, totalsize int64, initialResponse *http.Response) io.ReadCloser { - return &resumableRequestReader{client: c, request: r, maxFailures: maxfail, totalSize: totalsize, currentResponse: initialResponse} -} - -func (r *resumableRequestReader) Read(p []byte) (n int, err error) { - if r.client == nil || r.request == nil { - return 0, fmt.Errorf("client and request can't be nil\n") - } - isFreshRequest := false - if r.lastRange != 0 && r.currentResponse == nil { - readRange := fmt.Sprintf("bytes=%d-%d", r.lastRange, r.totalSize) - r.request.Header.Set("Range", readRange) - time.Sleep(5 * time.Second) - } - if r.currentResponse == nil { - r.currentResponse, err = r.client.Do(r.request) - isFreshRequest = true - } - if err != nil && r.failures+1 != r.maxFailures { - r.cleanUpResponse() - r.failures++ - time.Sleep(5 * time.Duration(r.failures) * time.Second) - return 0, nil - } else if err != nil { - r.cleanUpResponse() - return 0, err - } - if r.currentResponse.StatusCode == 416 && r.lastRange == r.totalSize && r.currentResponse.ContentLength == 0 { - r.cleanUpResponse() - return 0, io.EOF - } else if r.currentResponse.StatusCode != 206 && r.lastRange != 0 && isFreshRequest { - r.cleanUpResponse() - return 0, fmt.Errorf("the server doesn't support byte ranges") - } - if r.totalSize == 0 { - r.totalSize = r.currentResponse.ContentLength - } else if r.totalSize <= 0 { - r.cleanUpResponse() - return 0, fmt.Errorf("failed to auto detect content length") - } - n, err = r.currentResponse.Body.Read(p) - r.lastRange += int64(n) - if err != nil { - r.cleanUpResponse() - } - if err != nil && err != io.EOF { - logrus.Infof("encountered error during pull and clearing it before resume: %s", err) - err = nil - } - return n, err -} - -func (r *resumableRequestReader) Close() error { - r.cleanUpResponse() - r.client = nil - r.request = nil - return nil -} - -func (r *resumableRequestReader) cleanUpResponse() { - if r.currentResponse != nil { - r.currentResponse.Body.Close() - r.currentResponse = nil - } -} diff --git a/pkg/httputils/resumablerequestreader_test.go b/pkg/httputils/resumablerequestreader_test.go deleted file mode 100644 index 5a2906db77..0000000000 --- a/pkg/httputils/resumablerequestreader_test.go +++ /dev/null @@ -1,307 +0,0 @@ -package httputils - -import ( - "fmt" - "io" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func TestResumableRequestHeaderSimpleErrors(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, "Hello, world !") - })) - defer ts.Close() - - client := &http.Client{} - - var req *http.Request - req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } - - expectedError := "client and request can't be nil\n" - resreq := &resumableRequestReader{} - _, err = resreq.Read([]byte{}) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) - } - - resreq = &resumableRequestReader{ - client: client, - request: req, - totalSize: -1, - } - expectedError = "failed to auto detect content length" - _, err = resreq.Read([]byte{}) - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with '%s', got %v.", expectedError, err) - } - -} - -// Not too much failures, bails out after some wait -func TestResumableRequestHeaderNotTooMuchFailures(t *testing.T) { - client := &http.Client{} - - var badReq *http.Request - badReq, err := http.NewRequest("GET", "I'm not an url", nil) - if err != nil { - t.Fatal(err) - } - - resreq := &resumableRequestReader{ - client: client, - request: badReq, - failures: 0, - maxFailures: 2, - } - read, err := resreq.Read([]byte{}) - if err != nil || read != 0 { - t.Fatalf("Expected no error and no byte read, got err:%v, read:%v.", err, read) - } -} - -// Too much failures, returns the error -func TestResumableRequestHeaderTooMuchFailures(t *testing.T) { - client := &http.Client{} - - var badReq *http.Request - badReq, err := http.NewRequest("GET", "I'm not an url", nil) - if err != nil { - t.Fatal(err) - } - - resreq := &resumableRequestReader{ - client: client, - request: badReq, - failures: 0, - maxFailures: 1, - } - defer resreq.Close() - - expectedError := `Get I%27m%20not%20an%20url: unsupported protocol scheme ""` - read, err := resreq.Read([]byte{}) - if err == nil || err.Error() != expectedError || read != 0 { - t.Fatalf("Expected the error '%s', got err:%v, read:%v.", expectedError, err, read) - } -} - -type errorReaderCloser struct{} - -func (errorReaderCloser) Close() error { return nil } - -func (errorReaderCloser) Read(p []byte) (n int, err error) { - return 0, fmt.Errorf("An error occurred") -} - -// If an unknown error is encountered, return 0, nil and log it -func TestResumableRequestReaderWithReadError(t *testing.T) { - var req *http.Request - req, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - - response := &http.Response{ - Status: "500 Internal Server", - StatusCode: 500, - ContentLength: 0, - Close: true, - Body: errorReaderCloser{}, - } - - resreq := &resumableRequestReader{ - client: client, - request: req, - currentResponse: response, - lastRange: 1, - totalSize: 1, - } - defer resreq.Close() - - buf := make([]byte, 1) - read, err := resreq.Read(buf) - if err != nil { - t.Fatal(err) - } - - if read != 0 { - t.Fatalf("Expected to have read nothing, but read %v", read) - } -} - -func TestResumableRequestReaderWithEOFWith416Response(t *testing.T) { - var req *http.Request - req, err := http.NewRequest("GET", "", nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - - response := &http.Response{ - Status: "416 Requested Range Not Satisfiable", - StatusCode: 416, - ContentLength: 0, - Close: true, - Body: ioutil.NopCloser(strings.NewReader("")), - } - - resreq := &resumableRequestReader{ - client: client, - request: req, - currentResponse: response, - lastRange: 1, - totalSize: 1, - } - defer resreq.Close() - - buf := make([]byte, 1) - _, err = resreq.Read(buf) - if err == nil || err != io.EOF { - t.Fatalf("Expected an io.EOF error, got %v", err) - } -} - -func TestResumableRequestReaderWithServerDoesntSupportByteRanges(t *testing.T) { - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Header.Get("Range") == "" { - t.Fatalf("Expected a Range HTTP header, got nothing") - } - })) - defer ts.Close() - - var req *http.Request - req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - - resreq := &resumableRequestReader{ - client: client, - request: req, - lastRange: 1, - } - defer resreq.Close() - - buf := make([]byte, 2) - _, err = resreq.Read(buf) - if err == nil || err.Error() != "the server doesn't support byte ranges" { - t.Fatalf("Expected an error 'the server doesn't support byte ranges', got %v", err) - } -} - -func TestResumableRequestReaderWithZeroTotalSize(t *testing.T) { - - srvtxt := "some response text data" - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, srvtxt) - })) - defer ts.Close() - - var req *http.Request - req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - retries := uint32(5) - - resreq := ResumableRequestReader(client, req, retries, 0) - defer resreq.Close() - - data, err := ioutil.ReadAll(resreq) - if err != nil { - t.Fatal(err) - } - - resstr := strings.TrimSuffix(string(data), "\n") - - if resstr != srvtxt { - t.Errorf("resstr != srvtxt") - } -} - -func TestResumableRequestReader(t *testing.T) { - - srvtxt := "some response text data" - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, srvtxt) - })) - defer ts.Close() - - var req *http.Request - req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - retries := uint32(5) - imgSize := int64(len(srvtxt)) - - resreq := ResumableRequestReader(client, req, retries, imgSize) - defer resreq.Close() - - data, err := ioutil.ReadAll(resreq) - if err != nil { - t.Fatal(err) - } - - resstr := strings.TrimSuffix(string(data), "\n") - - if resstr != srvtxt { - t.Errorf("resstr != srvtxt") - } -} - -func TestResumableRequestReaderWithInitialResponse(t *testing.T) { - - srvtxt := "some response text data" - - ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintln(w, srvtxt) - })) - defer ts.Close() - - var req *http.Request - req, err := http.NewRequest("GET", ts.URL, nil) - if err != nil { - t.Fatal(err) - } - - client := &http.Client{} - retries := uint32(5) - imgSize := int64(len(srvtxt)) - - res, err := client.Do(req) - if err != nil { - t.Fatal(err) - } - - resreq := ResumableRequestReaderWithInitialResponse(client, req, retries, imgSize, res) - defer resreq.Close() - - data, err := ioutil.ReadAll(resreq) - if err != nil { - t.Fatal(err) - } - - resstr := strings.TrimSuffix(string(data), "\n") - - if resstr != srvtxt { - t.Errorf("resstr != srvtxt") - } -} diff --git a/pkg/idtools/idtools.go b/pkg/idtools/idtools.go deleted file mode 100644 index 6bca466286..0000000000 --- a/pkg/idtools/idtools.go +++ /dev/null @@ -1,197 +0,0 @@ -package idtools - -import ( - "bufio" - "fmt" - "os" - "sort" - "strconv" - "strings" -) - -// IDMap contains a single entry for user namespace range remapping. An array -// of IDMap entries represents the structure that will be provided to the Linux -// kernel for creating a user namespace. -type IDMap struct { - ContainerID int `json:"container_id"` - HostID int `json:"host_id"` - Size int `json:"size"` -} - -type subIDRange struct { - Start int - Length int -} - -type ranges []subIDRange - -func (e ranges) Len() int { return len(e) } -func (e ranges) Swap(i, j int) { e[i], e[j] = e[j], e[i] } -func (e ranges) Less(i, j int) bool { return e[i].Start < e[j].Start } - -const ( - subuidFileName string = "/etc/subuid" - subgidFileName string = "/etc/subgid" -) - -// MkdirAllAs creates a directory (include any along the path) and then modifies -// ownership to the requested uid/gid. If the directory already exists, this -// function will still change ownership to the requested uid/gid pair. -func MkdirAllAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, true) -} - -// MkdirAllNewAs creates a directory (include any along the path) and then modifies -// ownership ONLY of newly created directories to the requested uid/gid. If the -// directories along the path exist, no change of ownership will be performed -func MkdirAllNewAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, true, false) -} - -// MkdirAs creates a directory and then modifies ownership to the requested uid/gid. -// If the directory already exists, this function still changes ownership -func MkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int) error { - return mkdirAs(path, mode, ownerUID, ownerGID, false, true) -} - -// GetRootUIDGID retrieves the remapped root uid/gid pair from the set of maps. -// If the maps are empty, then the root uid/gid will default to "real" 0/0 -func GetRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) { - var uid, gid int - - if uidMap != nil { - xUID, err := ToHost(0, uidMap) - if err != nil { - return -1, -1, err - } - uid = xUID - } - if gidMap != nil { - xGID, err := ToHost(0, gidMap) - if err != nil { - return -1, -1, err - } - gid = xGID - } - return uid, gid, nil -} - -// ToContainer takes an id mapping, and uses it to translate a -// host ID to the remapped ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id -func ToContainer(hostID int, idMap []IDMap) (int, error) { - if idMap == nil { - return hostID, nil - } - for _, m := range idMap { - if (hostID >= m.HostID) && (hostID <= (m.HostID + m.Size - 1)) { - contID := m.ContainerID + (hostID - m.HostID) - return contID, nil - } - } - return -1, fmt.Errorf("Host ID %d cannot be mapped to a container ID", hostID) -} - -// ToHost takes an id mapping and a remapped ID, and translates the -// ID to the mapped host ID. If no map is provided, then the translation -// assumes a 1-to-1 mapping and returns the passed in id # -func ToHost(contID int, idMap []IDMap) (int, error) { - if idMap == nil { - return contID, nil - } - for _, m := range idMap { - if (contID >= m.ContainerID) && (contID <= (m.ContainerID + m.Size - 1)) { - hostID := m.HostID + (contID - m.ContainerID) - return hostID, nil - } - } - return -1, fmt.Errorf("Container ID %d cannot be mapped to a host ID", contID) -} - -// CreateIDMappings takes a requested user and group name and -// using the data from /etc/sub{uid,gid} ranges, creates the -// proper uid and gid remapping ranges for that user/group pair -func CreateIDMappings(username, groupname string) ([]IDMap, []IDMap, error) { - subuidRanges, err := parseSubuid(username) - if err != nil { - return nil, nil, err - } - subgidRanges, err := parseSubgid(groupname) - if err != nil { - return nil, nil, err - } - if len(subuidRanges) == 0 { - return nil, nil, fmt.Errorf("No subuid ranges found for user %q", username) - } - if len(subgidRanges) == 0 { - return nil, nil, fmt.Errorf("No subgid ranges found for group %q", groupname) - } - - return createIDMap(subuidRanges), createIDMap(subgidRanges), nil -} - -func createIDMap(subidRanges ranges) []IDMap { - idMap := []IDMap{} - - // sort the ranges by lowest ID first - sort.Sort(subidRanges) - containerID := 0 - for _, idrange := range subidRanges { - idMap = append(idMap, IDMap{ - ContainerID: containerID, - HostID: idrange.Start, - Size: idrange.Length, - }) - containerID = containerID + idrange.Length - } - return idMap -} - -func parseSubuid(username string) (ranges, error) { - return parseSubidFile(subuidFileName, username) -} - -func parseSubgid(username string) (ranges, error) { - return parseSubidFile(subgidFileName, username) -} - -// parseSubidFile will read the appropriate file (/etc/subuid or /etc/subgid) -// and return all found ranges for a specified username. If the special value -// "ALL" is supplied for username, then all ranges in the file will be returned -func parseSubidFile(path, username string) (ranges, error) { - var rangeList ranges - - subidFile, err := os.Open(path) - if err != nil { - return rangeList, err - } - defer subidFile.Close() - - s := bufio.NewScanner(subidFile) - for s.Scan() { - if err := s.Err(); err != nil { - return rangeList, err - } - - text := strings.TrimSpace(s.Text()) - if text == "" || strings.HasPrefix(text, "#") { - continue - } - parts := strings.Split(text, ":") - if len(parts) != 3 { - return rangeList, fmt.Errorf("Cannot parse subuid/gid information: Format not correct for %s file", path) - } - if parts[0] == username || username == "ALL" { - startid, err := strconv.Atoi(parts[1]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - length, err := strconv.Atoi(parts[2]) - if err != nil { - return rangeList, fmt.Errorf("String to int conversion failed during subuid/gid parsing of %s: %v", path, err) - } - rangeList = append(rangeList, subIDRange{startid, length}) - } - } - return rangeList, nil -} diff --git a/pkg/idtools/idtools_unix.go b/pkg/idtools/idtools_unix.go deleted file mode 100644 index b57d6ef125..0000000000 --- a/pkg/idtools/idtools_unix.go +++ /dev/null @@ -1,60 +0,0 @@ -// +build !windows - -package idtools - -import ( - "os" - "path/filepath" - - "github.com/docker/docker/pkg/system" -) - -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - // make an array containing the original path asked for, plus (for mkAll == true) - // all path components leading up to the complete path that don't exist before we MkdirAll - // so that we can chown all of them properly at the end. If chownExisting is false, we won't - // chown the full directory path if it exists - var paths []string - if _, err := os.Stat(path); err != nil && os.IsNotExist(err) { - paths = []string{path} - } else if err == nil && chownExisting { - if err := os.Chown(path, ownerUID, ownerGID); err != nil { - return err - } - // short-circuit--we were called with an existing directory and chown was requested - return nil - } else if err == nil { - // nothing to do; directory path fully exists already and chown was NOT requested - return nil - } - - if mkAll { - // walk back to "/" looking for directories which do not exist - // and add them to the paths array for chown after creation - dirPath := path - for { - dirPath = filepath.Dir(dirPath) - if dirPath == "/" { - break - } - if _, err := os.Stat(dirPath); err != nil && os.IsNotExist(err) { - paths = append(paths, dirPath) - } - } - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { - return err - } - } else { - if err := os.Mkdir(path, mode); err != nil && !os.IsExist(err) { - return err - } - } - // even if it existed, we will chown the requested path + any subpaths that - // didn't exist when we called MkdirAll - for _, pathComponent := range paths { - if err := os.Chown(pathComponent, ownerUID, ownerGID); err != nil { - return err - } - } - return nil -} diff --git a/pkg/idtools/idtools_unix_test.go b/pkg/idtools/idtools_unix_test.go deleted file mode 100644 index 540d3079ee..0000000000 --- a/pkg/idtools/idtools_unix_test.go +++ /dev/null @@ -1,271 +0,0 @@ -// +build !windows - -package idtools - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" -) - -type node struct { - uid int - gid int -} - -func TestMkdirAllAs(t *testing.T) { - dirName, err := ioutil.TempDir("", "mkdirall") - if err != nil { - t.Fatalf("Couldn't create temp dir: %v", err) - } - defer os.RemoveAll(dirName) - - testTree := map[string]node{ - "usr": {0, 0}, - "usr/bin": {0, 0}, - "lib": {33, 33}, - "lib/x86_64": {45, 45}, - "lib/x86_64/share": {1, 1}, - } - - if err := buildTree(dirName, testTree); err != nil { - t.Fatal(err) - } - - // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid - if err := MkdirAllAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { - t.Fatal(err) - } - testTree["usr/share"] = node{99, 99} - verifyTree, err := readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // test 2-deep new directories--both should be owned by the uid/gid pair - if err := MkdirAllAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { - t.Fatal(err) - } - testTree["lib/some"] = node{101, 101} - testTree["lib/some/other"] = node{101, 101} - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // test a directory that already exists; should be chowned, but nothing else - if err := MkdirAllAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { - t.Fatal(err) - } - testTree["usr"] = node{102, 102} - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } -} - -func TestMkdirAllNewAs(t *testing.T) { - - dirName, err := ioutil.TempDir("", "mkdirnew") - if err != nil { - t.Fatalf("Couldn't create temp dir: %v", err) - } - defer os.RemoveAll(dirName) - - testTree := map[string]node{ - "usr": {0, 0}, - "usr/bin": {0, 0}, - "lib": {33, 33}, - "lib/x86_64": {45, 45}, - "lib/x86_64/share": {1, 1}, - } - - if err := buildTree(dirName, testTree); err != nil { - t.Fatal(err) - } - - // test adding a directory to a pre-existing dir; only the new dir is owned by the uid/gid - if err := MkdirAllNewAs(filepath.Join(dirName, "usr", "share"), 0755, 99, 99); err != nil { - t.Fatal(err) - } - testTree["usr/share"] = node{99, 99} - verifyTree, err := readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // test 2-deep new directories--both should be owned by the uid/gid pair - if err := MkdirAllNewAs(filepath.Join(dirName, "lib", "some", "other"), 0755, 101, 101); err != nil { - t.Fatal(err) - } - testTree["lib/some"] = node{101, 101} - testTree["lib/some/other"] = node{101, 101} - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // test a directory that already exists; should NOT be chowned - if err := MkdirAllNewAs(filepath.Join(dirName, "usr"), 0755, 102, 102); err != nil { - t.Fatal(err) - } - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } -} - -func TestMkdirAs(t *testing.T) { - - dirName, err := ioutil.TempDir("", "mkdir") - if err != nil { - t.Fatalf("Couldn't create temp dir: %v", err) - } - defer os.RemoveAll(dirName) - - testTree := map[string]node{ - "usr": {0, 0}, - } - if err := buildTree(dirName, testTree); err != nil { - t.Fatal(err) - } - - // test a directory that already exists; should just chown to the requested uid/gid - if err := MkdirAs(filepath.Join(dirName, "usr"), 0755, 99, 99); err != nil { - t.Fatal(err) - } - testTree["usr"] = node{99, 99} - verifyTree, err := readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } - - // create a subdir under a dir which doesn't exist--should fail - if err := MkdirAs(filepath.Join(dirName, "usr", "bin", "subdir"), 0755, 102, 102); err == nil { - t.Fatalf("Trying to create a directory with Mkdir where the parent doesn't exist should have failed") - } - - // create a subdir under an existing dir; should only change the ownership of the new subdir - if err := MkdirAs(filepath.Join(dirName, "usr", "bin"), 0755, 102, 102); err != nil { - t.Fatal(err) - } - testTree["usr/bin"] = node{102, 102} - verifyTree, err = readTree(dirName, "") - if err != nil { - t.Fatal(err) - } - if err := compareTrees(testTree, verifyTree); err != nil { - t.Fatal(err) - } -} - -func buildTree(base string, tree map[string]node) error { - for path, node := range tree { - fullPath := filepath.Join(base, path) - if err := os.MkdirAll(fullPath, 0755); err != nil { - return fmt.Errorf("Couldn't create path: %s; error: %v", fullPath, err) - } - if err := os.Chown(fullPath, node.uid, node.gid); err != nil { - return fmt.Errorf("Couldn't chown path: %s; error: %v", fullPath, err) - } - } - return nil -} - -func readTree(base, root string) (map[string]node, error) { - tree := make(map[string]node) - - dirInfos, err := ioutil.ReadDir(base) - if err != nil { - return nil, fmt.Errorf("Couldn't read directory entries for %q: %v", base, err) - } - - for _, info := range dirInfos { - s := &syscall.Stat_t{} - if err := syscall.Stat(filepath.Join(base, info.Name()), s); err != nil { - return nil, fmt.Errorf("Can't stat file %q: %v", filepath.Join(base, info.Name()), err) - } - tree[filepath.Join(root, info.Name())] = node{int(s.Uid), int(s.Gid)} - if info.IsDir() { - // read the subdirectory - subtree, err := readTree(filepath.Join(base, info.Name()), filepath.Join(root, info.Name())) - if err != nil { - return nil, err - } - for path, nodeinfo := range subtree { - tree[path] = nodeinfo - } - } - } - return tree, nil -} - -func compareTrees(left, right map[string]node) error { - if len(left) != len(right) { - return fmt.Errorf("Trees aren't the same size") - } - for path, nodeLeft := range left { - if nodeRight, ok := right[path]; ok { - if nodeRight.uid != nodeLeft.uid || nodeRight.gid != nodeLeft.gid { - // mismatch - return fmt.Errorf("mismatched ownership for %q: expected: %d:%d, got: %d:%d", path, - nodeLeft.uid, nodeLeft.gid, nodeRight.uid, nodeRight.gid) - } - continue - } - return fmt.Errorf("right tree didn't contain path %q", path) - } - return nil -} - -func TestParseSubidFileWithNewlinesAndComments(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "parsesubid") - if err != nil { - t.Fatal(err) - } - fnamePath := filepath.Join(tmpDir, "testsubuid") - fcontent := `tss:100000:65536 -# empty default subuid/subgid file - -dockremap:231072:65536` - if err := ioutil.WriteFile(fnamePath, []byte(fcontent), 0644); err != nil { - t.Fatal(err) - } - ranges, err := parseSubidFile(fnamePath, "dockremap") - if err != nil { - t.Fatal(err) - } - if len(ranges) != 1 { - t.Fatalf("wanted 1 element in ranges, got %d instead", len(ranges)) - } - if ranges[0].Start != 231072 { - t.Fatalf("wanted 231072, got %d instead", ranges[0].Start) - } - if ranges[0].Length != 65536 { - t.Fatalf("wanted 65536, got %d instead", ranges[0].Length) - } -} diff --git a/pkg/idtools/idtools_windows.go b/pkg/idtools/idtools_windows.go deleted file mode 100644 index c9e3c937cd..0000000000 --- a/pkg/idtools/idtools_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package idtools - -import ( - "os" - - "github.com/docker/docker/pkg/system" -) - -// Platforms such as Windows do not support the UID/GID concept. So make this -// just a wrapper around system.MkdirAll. -func mkdirAs(path string, mode os.FileMode, ownerUID, ownerGID int, mkAll, chownExisting bool) error { - if err := system.MkdirAll(path, mode); err != nil && !os.IsExist(err) { - return err - } - return nil -} diff --git a/pkg/idtools/usergroupadd_linux.go b/pkg/idtools/usergroupadd_linux.go deleted file mode 100644 index 4a4aaed04d..0000000000 --- a/pkg/idtools/usergroupadd_linux.go +++ /dev/null @@ -1,188 +0,0 @@ -package idtools - -import ( - "fmt" - "os/exec" - "path/filepath" - "regexp" - "sort" - "strconv" - "strings" - "sync" -) - -// add a user and/or group to Linux /etc/passwd, /etc/group using standard -// Linux distribution commands: -// adduser --system --shell /bin/false --disabled-login --disabled-password --no-create-home --group -// useradd -r -s /bin/false - -var ( - once sync.Once - userCommand string - - cmdTemplates = map[string]string{ - "adduser": "--system --shell /bin/false --no-create-home --disabled-login --disabled-password --group %s", - "useradd": "-r -s /bin/false %s", - "usermod": "-%s %d-%d %s", - } - - idOutRegexp = regexp.MustCompile(`uid=([0-9]+).*gid=([0-9]+)`) - // default length for a UID/GID subordinate range - defaultRangeLen = 65536 - defaultRangeStart = 100000 - userMod = "usermod" -) - -func resolveBinary(binname string) (string, error) { - binaryPath, err := exec.LookPath(binname) - if err != nil { - return "", err - } - resolvedPath, err := filepath.EvalSymlinks(binaryPath) - if err != nil { - return "", err - } - //only return no error if the final resolved binary basename - //matches what was searched for - if filepath.Base(resolvedPath) == binname { - return resolvedPath, nil - } - return "", fmt.Errorf("Binary %q does not resolve to a binary of that name in $PATH (%q)", binname, resolvedPath) -} - -// AddNamespaceRangesUser takes a username and uses the standard system -// utility to create a system user/group pair used to hold the -// /etc/sub{uid,gid} ranges which will be used for user namespace -// mapping ranges in containers. -func AddNamespaceRangesUser(name string) (int, int, error) { - if err := addUser(name); err != nil { - return -1, -1, fmt.Errorf("Error adding user %q: %v", name, err) - } - - // Query the system for the created uid and gid pair - out, err := execCmd("id", name) - if err != nil { - return -1, -1, fmt.Errorf("Error trying to find uid/gid for new user %q: %v", name, err) - } - matches := idOutRegexp.FindStringSubmatch(strings.TrimSpace(string(out))) - if len(matches) != 3 { - return -1, -1, fmt.Errorf("Can't find uid, gid from `id` output: %q", string(out)) - } - uid, err := strconv.Atoi(matches[1]) - if err != nil { - return -1, -1, fmt.Errorf("Can't convert found uid (%s) to int: %v", matches[1], err) - } - gid, err := strconv.Atoi(matches[2]) - if err != nil { - return -1, -1, fmt.Errorf("Can't convert found gid (%s) to int: %v", matches[2], err) - } - - // Now we need to create the subuid/subgid ranges for our new user/group (system users - // do not get auto-created ranges in subuid/subgid) - - if err := createSubordinateRanges(name); err != nil { - return -1, -1, fmt.Errorf("Couldn't create subordinate ID ranges: %v", err) - } - return uid, gid, nil -} - -func addUser(userName string) error { - once.Do(func() { - // set up which commands are used for adding users/groups dependent on distro - if _, err := resolveBinary("adduser"); err == nil { - userCommand = "adduser" - } else if _, err := resolveBinary("useradd"); err == nil { - userCommand = "useradd" - } - }) - if userCommand == "" { - return fmt.Errorf("Cannot add user; no useradd/adduser binary found") - } - args := fmt.Sprintf(cmdTemplates[userCommand], userName) - out, err := execCmd(userCommand, args) - if err != nil { - return fmt.Errorf("Failed to add user with error: %v; output: %q", err, string(out)) - } - return nil -} - -func createSubordinateRanges(name string) error { - - // first, we should verify that ranges weren't automatically created - // by the distro tooling - ranges, err := parseSubuid(name) - if err != nil { - return fmt.Errorf("Error while looking for subuid ranges for user %q: %v", name, err) - } - if len(ranges) == 0 { - // no UID ranges; let's create one - startID, err := findNextUIDRange() - if err != nil { - return fmt.Errorf("Can't find available subuid range: %v", err) - } - out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "v", startID, startID+defaultRangeLen-1, name)) - if err != nil { - return fmt.Errorf("Unable to add subuid range to user: %q; output: %s, err: %v", name, out, err) - } - } - - ranges, err = parseSubgid(name) - if err != nil { - return fmt.Errorf("Error while looking for subgid ranges for user %q: %v", name, err) - } - if len(ranges) == 0 { - // no GID ranges; let's create one - startID, err := findNextGIDRange() - if err != nil { - return fmt.Errorf("Can't find available subgid range: %v", err) - } - out, err := execCmd(userMod, fmt.Sprintf(cmdTemplates[userMod], "w", startID, startID+defaultRangeLen-1, name)) - if err != nil { - return fmt.Errorf("Unable to add subgid range to user: %q; output: %s, err: %v", name, out, err) - } - } - return nil -} - -func findNextUIDRange() (int, error) { - ranges, err := parseSubuid("ALL") - if err != nil { - return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subuid file: %v", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextGIDRange() (int, error) { - ranges, err := parseSubgid("ALL") - if err != nil { - return -1, fmt.Errorf("Couldn't parse all ranges in /etc/subgid file: %v", err) - } - sort.Sort(ranges) - return findNextRangeStart(ranges) -} - -func findNextRangeStart(rangeList ranges) (int, error) { - startID := defaultRangeStart - for _, arange := range rangeList { - if wouldOverlap(arange, startID) { - startID = arange.Start + arange.Length - } - } - return startID, nil -} - -func wouldOverlap(arange subIDRange, ID int) bool { - low := ID - high := ID + defaultRangeLen - if (low >= arange.Start && low <= arange.Start+arange.Length) || - (high <= arange.Start+arange.Length && high >= arange.Start) { - return true - } - return false -} - -func execCmd(cmd, args string) ([]byte, error) { - execCmd := exec.Command(cmd, strings.Split(args, " ")...) - return execCmd.CombinedOutput() -} diff --git a/pkg/idtools/usergroupadd_unsupported.go b/pkg/idtools/usergroupadd_unsupported.go deleted file mode 100644 index d98b354cbd..0000000000 --- a/pkg/idtools/usergroupadd_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux - -package idtools - -import "fmt" - -// AddNamespaceRangesUser takes a name and finds an unused uid, gid pair -// and calls the appropriate helper function to add the group and then -// the user to the group in /etc/group and /etc/passwd respectively. -func AddNamespaceRangesUser(name string) (int, int, error) { - return -1, -1, fmt.Errorf("No support for adding users or groups on this OS") -} diff --git a/pkg/integration/checker/checker.go b/pkg/integration/checker/checker.go deleted file mode 100644 index d1b703a599..0000000000 --- a/pkg/integration/checker/checker.go +++ /dev/null @@ -1,46 +0,0 @@ -// Package checker provides Docker specific implementations of the go-check.Checker interface. -package checker - -import ( - "github.com/go-check/check" - "github.com/vdemeester/shakers" -) - -// As a commodity, we bring all check.Checker variables into the current namespace to avoid having -// to think about check.X versus checker.X. -var ( - DeepEquals = check.DeepEquals - ErrorMatches = check.ErrorMatches - FitsTypeOf = check.FitsTypeOf - HasLen = check.HasLen - Implements = check.Implements - IsNil = check.IsNil - Matches = check.Matches - Not = check.Not - NotNil = check.NotNil - PanicMatches = check.PanicMatches - Panics = check.Panics - - Contains = shakers.Contains - ContainsAny = shakers.ContainsAny - Count = shakers.Count - Equals = shakers.Equals - EqualFold = shakers.EqualFold - False = shakers.False - GreaterOrEqualThan = shakers.GreaterOrEqualThan - GreaterThan = shakers.GreaterThan - HasPrefix = shakers.HasPrefix - HasSuffix = shakers.HasSuffix - Index = shakers.Index - IndexAny = shakers.IndexAny - IsAfter = shakers.IsAfter - IsBefore = shakers.IsBefore - IsBetween = shakers.IsBetween - IsLower = shakers.IsLower - IsUpper = shakers.IsUpper - LessOrEqualThan = shakers.LessOrEqualThan - LessThan = shakers.LessThan - TimeEquals = shakers.TimeEquals - True = shakers.True - TimeIgnore = shakers.TimeIgnore -) diff --git a/pkg/integration/dockerCmd_utils.go b/pkg/integration/dockerCmd_utils.go deleted file mode 100644 index fab3e062dd..0000000000 --- a/pkg/integration/dockerCmd_utils.go +++ /dev/null @@ -1,78 +0,0 @@ -package integration - -import ( - "fmt" - "os/exec" - "strings" - "time" - - "github.com/go-check/check" -) - -// We use the elongated quote mechanism for quoting error returns as -// the use of strconv.Quote or %q in fmt.Errorf will escape characters. This -// has a big downside on Windows where the args include paths, so instead -// of something like c:\directory\file.txt, the output would be -// c:\\directory\\file.txt. This is highly misleading. -const quote = `"` - -var execCommand = exec.Command - -// DockerCmdWithError executes a docker command that is supposed to fail and returns -// the output, the exit code and the error. -func DockerCmdWithError(dockerBinary string, args ...string) (string, int, error) { - return RunCommandWithOutput(execCommand(dockerBinary, args...)) -} - -// DockerCmdWithStdoutStderr executes a docker command and returns the content of the -// stdout, stderr and the exit code. If a check.C is passed, it will fail and stop tests -// if the error is not nil. -func DockerCmdWithStdoutStderr(dockerBinary string, c *check.C, args ...string) (string, string, int) { - stdout, stderr, status, err := RunCommandWithStdoutStderr(execCommand(dockerBinary, args...)) - if c != nil { - c.Assert(err, check.IsNil, check.Commentf(quote+"%v"+quote+" failed with errors: %s, %v", strings.Join(args, " "), stderr, err)) - } - return stdout, stderr, status -} - -// DockerCmd executes a docker command and returns the output and the exit code. If the -// command returns an error, it will fail and stop the tests. -func DockerCmd(dockerBinary string, c *check.C, args ...string) (string, int) { - out, status, err := RunCommandWithOutput(execCommand(dockerBinary, args...)) - c.Assert(err, check.IsNil, check.Commentf(quote+"%v"+quote+" failed with errors: %s, %v", strings.Join(args, " "), out, err)) - return out, status -} - -// DockerCmdWithTimeout executes a docker command with a timeout, and returns the output, -// the exit code and the error (if any). -func DockerCmdWithTimeout(dockerBinary string, timeout time.Duration, args ...string) (string, int, error) { - out, status, err := RunCommandWithOutputAndTimeout(execCommand(dockerBinary, args...), timeout) - if err != nil { - return out, status, fmt.Errorf(quote+"%v"+quote+" failed with errors: %v : %q", strings.Join(args, " "), err, out) - } - return out, status, err -} - -// DockerCmdInDir executes a docker command in a directory and returns the output, the -// exit code and the error (if any). -func DockerCmdInDir(dockerBinary string, path string, args ...string) (string, int, error) { - dockerCommand := execCommand(dockerBinary, args...) - dockerCommand.Dir = path - out, status, err := RunCommandWithOutput(dockerCommand) - if err != nil { - return out, status, fmt.Errorf(quote+"%v"+quote+" failed with errors: %v : %q", strings.Join(args, " "), err, out) - } - return out, status, err -} - -// DockerCmdInDirWithTimeout executes a docker command in a directory with a timeout and -// returns the output, the exit code and the error (if any). -func DockerCmdInDirWithTimeout(dockerBinary string, timeout time.Duration, path string, args ...string) (string, int, error) { - dockerCommand := execCommand(dockerBinary, args...) - dockerCommand.Dir = path - out, status, err := RunCommandWithOutputAndTimeout(dockerCommand, timeout) - if err != nil { - return out, status, fmt.Errorf(quote+"%v"+quote+" failed with errors: %v : %q", strings.Join(args, " "), err, out) - } - return out, status, err -} diff --git a/pkg/integration/dockerCmd_utils_test.go b/pkg/integration/dockerCmd_utils_test.go deleted file mode 100644 index 3dd5d11461..0000000000 --- a/pkg/integration/dockerCmd_utils_test.go +++ /dev/null @@ -1,405 +0,0 @@ -package integration - -import ( - "fmt" - "os" - "os/exec" - "testing" - - "io/ioutil" - "strings" - "time" - - "github.com/go-check/check" -) - -const dockerBinary = "docker" - -// Setup go-check for this test -func Test(t *testing.T) { - check.TestingT(t) -} - -func init() { - check.Suite(&DockerCmdSuite{}) -} - -type DockerCmdSuite struct{} - -// Fake the exec.Command to use our mock. -func (s *DockerCmdSuite) SetUpTest(c *check.C) { - execCommand = fakeExecCommand -} - -// And bring it back to normal after the test. -func (s *DockerCmdSuite) TearDownTest(c *check.C) { - execCommand = exec.Command -} - -// DockerCmdWithError tests - -func (s *DockerCmdSuite) TestDockerCmdWithError(c *check.C) { - cmds := []struct { - binary string - args []string - expectedOut string - expectedExitCode int - expectedError error - }{ - { - "doesnotexists", - []string{}, - "Command doesnotexists not found.", - 1, - fmt.Errorf("exit status 1"), - }, - { - dockerBinary, - []string{"an", "error"}, - "an error has occurred", - 1, - fmt.Errorf("exit status 1"), - }, - { - dockerBinary, - []string{"an", "exitCode", "127"}, - "an error has occurred with exitCode 127", - 127, - fmt.Errorf("exit status 127"), - }, - { - dockerBinary, - []string{"run", "-ti", "ubuntu", "echo", "hello"}, - "hello", - 0, - nil, - }, - } - for _, cmd := range cmds { - out, exitCode, error := DockerCmdWithError(cmd.binary, cmd.args...) - c.Assert(out, check.Equals, cmd.expectedOut, check.Commentf("Expected output %q for arguments %v, got %q", cmd.expectedOut, cmd.args, out)) - c.Assert(exitCode, check.Equals, cmd.expectedExitCode, check.Commentf("Expected exitCode %q for arguments %v, got %q", cmd.expectedExitCode, cmd.args, exitCode)) - if cmd.expectedError != nil { - c.Assert(error, check.NotNil, check.Commentf("Expected an error %q, got nothing", cmd.expectedError)) - c.Assert(error.Error(), check.Equals, cmd.expectedError.Error(), check.Commentf("Expected error %q for arguments %v, got %q", cmd.expectedError.Error(), cmd.args, error.Error())) - } else { - c.Assert(error, check.IsNil, check.Commentf("Expected no error, got %v", error)) - } - } -} - -// DockerCmdWithStdoutStderr tests - -type dockerCmdWithStdoutStderrErrorSuite struct{} - -func (s *dockerCmdWithStdoutStderrErrorSuite) Test(c *check.C) { - // Should fail, the test too - DockerCmdWithStdoutStderr(dockerBinary, c, "an", "error") -} - -type dockerCmdWithStdoutStderrSuccessSuite struct{} - -func (s *dockerCmdWithStdoutStderrSuccessSuite) Test(c *check.C) { - stdout, stderr, exitCode := DockerCmdWithStdoutStderr(dockerBinary, c, "run", "-ti", "ubuntu", "echo", "hello") - c.Assert(stdout, check.Equals, "hello") - c.Assert(stderr, check.Equals, "") - c.Assert(exitCode, check.Equals, 0) - -} - -func (s *DockerCmdSuite) TestDockerCmdWithStdoutStderrError(c *check.C) { - // Run error suite, should fail. - output := String{} - result := check.Run(&dockerCmdWithStdoutStderrErrorSuite{}, &check.RunConf{Output: &output}) - c.Check(result.Succeeded, check.Equals, 0) - c.Check(result.Failed, check.Equals, 1) -} - -func (s *DockerCmdSuite) TestDockerCmdWithStdoutStderrSuccess(c *check.C) { - // Run error suite, should fail. - output := String{} - result := check.Run(&dockerCmdWithStdoutStderrSuccessSuite{}, &check.RunConf{Output: &output}) - c.Check(result.Succeeded, check.Equals, 1) - c.Check(result.Failed, check.Equals, 0) -} - -// DockerCmd tests - -type dockerCmdErrorSuite struct{} - -func (s *dockerCmdErrorSuite) Test(c *check.C) { - // Should fail, the test too - DockerCmd(dockerBinary, c, "an", "error") -} - -type dockerCmdSuccessSuite struct{} - -func (s *dockerCmdSuccessSuite) Test(c *check.C) { - stdout, exitCode := DockerCmd(dockerBinary, c, "run", "-ti", "ubuntu", "echo", "hello") - c.Assert(stdout, check.Equals, "hello") - c.Assert(exitCode, check.Equals, 0) - -} - -func (s *DockerCmdSuite) TestDockerCmdError(c *check.C) { - // Run error suite, should fail. - output := String{} - result := check.Run(&dockerCmdErrorSuite{}, &check.RunConf{Output: &output}) - c.Check(result.Succeeded, check.Equals, 0) - c.Check(result.Failed, check.Equals, 1) -} - -func (s *DockerCmdSuite) TestDockerCmdSuccess(c *check.C) { - // Run error suite, should fail. - output := String{} - result := check.Run(&dockerCmdSuccessSuite{}, &check.RunConf{Output: &output}) - c.Check(result.Succeeded, check.Equals, 1) - c.Check(result.Failed, check.Equals, 0) -} - -// DockerCmdWithTimeout tests - -func (s *DockerCmdSuite) TestDockerCmdWithTimeout(c *check.C) { - cmds := []struct { - binary string - args []string - timeout time.Duration - expectedOut string - expectedExitCode int - expectedError error - }{ - { - "doesnotexists", - []string{}, - 200 * time.Millisecond, - `Command doesnotexists not found.`, - 1, - fmt.Errorf(`"" failed with errors: exit status 1 : "Command doesnotexists not found."`), - }, - { - dockerBinary, - []string{"an", "error"}, - 200 * time.Millisecond, - `an error has occurred`, - 1, - fmt.Errorf(`"an error" failed with errors: exit status 1 : "an error has occurred"`), - }, - { - dockerBinary, - []string{"a", "command", "that", "times", "out"}, - 5 * time.Millisecond, - "", - 0, - fmt.Errorf(`"a command that times out" failed with errors: command timed out : ""`), - }, - { - dockerBinary, - []string{"run", "-ti", "ubuntu", "echo", "hello"}, - 200 * time.Millisecond, - "hello", - 0, - nil, - }, - } - for _, cmd := range cmds { - out, exitCode, error := DockerCmdWithTimeout(cmd.binary, cmd.timeout, cmd.args...) - c.Assert(out, check.Equals, cmd.expectedOut, check.Commentf("Expected output %q for arguments %v, got %q", cmd.expectedOut, cmd.args, out)) - c.Assert(exitCode, check.Equals, cmd.expectedExitCode, check.Commentf("Expected exitCode %q for arguments %v, got %q", cmd.expectedExitCode, cmd.args, exitCode)) - if cmd.expectedError != nil { - c.Assert(error, check.NotNil, check.Commentf("Expected an error %q, got nothing", cmd.expectedError)) - c.Assert(error.Error(), check.Equals, cmd.expectedError.Error(), check.Commentf("Expected error %q for arguments %v, got %q", cmd.expectedError.Error(), cmd.args, error.Error())) - } else { - c.Assert(error, check.IsNil, check.Commentf("Expected no error, got %v", error)) - } - } -} - -// DockerCmdInDir tests - -func (s *DockerCmdSuite) TestDockerCmdInDir(c *check.C) { - tempFolder, err := ioutil.TempDir("", "test-docker-cmd-in-dir") - c.Assert(err, check.IsNil) - - cmds := []struct { - binary string - args []string - expectedOut string - expectedExitCode int - expectedError error - }{ - { - "doesnotexists", - []string{}, - `Command doesnotexists not found.`, - 1, - fmt.Errorf(`"dir:%s" failed with errors: exit status 1 : "Command doesnotexists not found."`, tempFolder), - }, - { - dockerBinary, - []string{"an", "error"}, - `an error has occurred`, - 1, - fmt.Errorf(`"dir:%s an error" failed with errors: exit status 1 : "an error has occurred"`, tempFolder), - }, - { - dockerBinary, - []string{"run", "-ti", "ubuntu", "echo", "hello"}, - "hello", - 0, - nil, - }, - } - for _, cmd := range cmds { - // We prepend the arguments with dir:thefolder.. the fake command will check - // that the current workdir is the same as the one we are passing. - args := append([]string{"dir:" + tempFolder}, cmd.args...) - out, exitCode, error := DockerCmdInDir(cmd.binary, tempFolder, args...) - c.Assert(out, check.Equals, cmd.expectedOut, check.Commentf("Expected output %q for arguments %v, got %q", cmd.expectedOut, cmd.args, out)) - c.Assert(exitCode, check.Equals, cmd.expectedExitCode, check.Commentf("Expected exitCode %q for arguments %v, got %q", cmd.expectedExitCode, cmd.args, exitCode)) - if cmd.expectedError != nil { - c.Assert(error, check.NotNil, check.Commentf("Expected an error %q, got nothing", cmd.expectedError)) - c.Assert(error.Error(), check.Equals, cmd.expectedError.Error(), check.Commentf("Expected error %q for arguments %v, got %q", cmd.expectedError.Error(), cmd.args, error.Error())) - } else { - c.Assert(error, check.IsNil, check.Commentf("Expected no error, got %v", error)) - } - } -} - -// DockerCmdInDirWithTimeout tests - -func (s *DockerCmdSuite) TestDockerCmdInDirWithTimeout(c *check.C) { - tempFolder, err := ioutil.TempDir("", "test-docker-cmd-in-dir") - c.Assert(err, check.IsNil) - - cmds := []struct { - binary string - args []string - timeout time.Duration - expectedOut string - expectedExitCode int - expectedError error - }{ - { - "doesnotexists", - []string{}, - 200 * time.Millisecond, - `Command doesnotexists not found.`, - 1, - fmt.Errorf(`"dir:%s" failed with errors: exit status 1 : "Command doesnotexists not found."`, tempFolder), - }, - { - dockerBinary, - []string{"an", "error"}, - 200 * time.Millisecond, - `an error has occurred`, - 1, - fmt.Errorf(`"dir:%s an error" failed with errors: exit status 1 : "an error has occurred"`, tempFolder), - }, - { - dockerBinary, - []string{"a", "command", "that", "times", "out"}, - 5 * time.Millisecond, - "", - 0, - fmt.Errorf(`"dir:%s a command that times out" failed with errors: command timed out : ""`, tempFolder), - }, - { - dockerBinary, - []string{"run", "-ti", "ubuntu", "echo", "hello"}, - 200 * time.Millisecond, - "hello", - 0, - nil, - }, - } - for _, cmd := range cmds { - // We prepend the arguments with dir:thefolder.. the fake command will check - // that the current workdir is the same as the one we are passing. - args := append([]string{"dir:" + tempFolder}, cmd.args...) - out, exitCode, error := DockerCmdInDirWithTimeout(cmd.binary, cmd.timeout, tempFolder, args...) - c.Assert(out, check.Equals, cmd.expectedOut, check.Commentf("Expected output %q for arguments %v, got %q", cmd.expectedOut, cmd.args, out)) - c.Assert(exitCode, check.Equals, cmd.expectedExitCode, check.Commentf("Expected exitCode %q for arguments %v, got %q", cmd.expectedExitCode, cmd.args, exitCode)) - if cmd.expectedError != nil { - c.Assert(error, check.NotNil, check.Commentf("Expected an error %q, got nothing", cmd.expectedError)) - c.Assert(error.Error(), check.Equals, cmd.expectedError.Error(), check.Commentf("Expected error %q for arguments %v, got %q", cmd.expectedError.Error(), cmd.args, error.Error())) - } else { - c.Assert(error, check.IsNil, check.Commentf("Expected no error, got %v", error)) - } - } -} - -// Helpers :) - -// Type implementing the io.Writer interface for analyzing output. -type String struct { - value string -} - -// The only function required by the io.Writer interface. Will append -// written data to the String.value string. -func (s *String) Write(p []byte) (n int, err error) { - s.value += string(p) - return len(p), nil -} - -// Helper function that mock the exec.Command call (and call the test binary) -func fakeExecCommand(command string, args ...string) *exec.Cmd { - cs := []string{"-test.run=TestHelperProcess", "--", command} - cs = append(cs, args...) - cmd := exec.Command(os.Args[0], cs...) - cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"} - return cmd -} - -func TestHelperProcess(t *testing.T) { - if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" { - return - } - args := os.Args - - // Previous arguments are tests stuff, that looks like : - // /tmp/go-build970079519/…/_test/integration.test -test.run=TestHelperProcess -- - cmd, args := args[3], args[4:] - // Handle the case where args[0] is dir:... - if len(args) > 0 && strings.HasPrefix(args[0], "dir:") { - expectedCwd := args[0][4:] - if len(args) > 1 { - args = args[1:] - } - cwd, err := os.Getwd() - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to get workingdir: %v", err) - os.Exit(1) - } - // This checks that the given path is the same as the currend working dire - if expectedCwd != cwd { - fmt.Fprintf(os.Stderr, "Current workdir should be %q, but is %q", expectedCwd, cwd) - } - } - switch cmd { - case dockerBinary: - argsStr := strings.Join(args, " ") - switch argsStr { - case "an exitCode 127": - fmt.Fprintf(os.Stderr, "an error has occurred with exitCode 127") - os.Exit(127) - case "an error": - fmt.Fprintf(os.Stderr, "an error has occurred") - os.Exit(1) - case "a command that times out": - time.Sleep(10 * time.Second) - fmt.Fprintf(os.Stdout, "too long, should be killed") - // A random exit code (that should never happened in tests) - os.Exit(7) - case "run -ti ubuntu echo hello": - fmt.Fprintf(os.Stdout, "hello") - default: - fmt.Fprintf(os.Stdout, "no arguments") - } - default: - fmt.Fprintf(os.Stderr, "Command %s not found.", cmd) - os.Exit(1) - } - // some code here to check arguments perhaps? - os.Exit(0) -} diff --git a/pkg/integration/utils.go b/pkg/integration/utils.go deleted file mode 100644 index cfccc80143..0000000000 --- a/pkg/integration/utils.go +++ /dev/null @@ -1,361 +0,0 @@ -package integration - -import ( - "archive/tar" - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "os" - "os/exec" - "path/filepath" - "reflect" - "strings" - "syscall" - "time" - - "github.com/docker/docker/pkg/stringutils" -) - -// GetExitCode returns the ExitStatus of the specified error if its type is -// exec.ExitError, returns 0 and an error otherwise. -func GetExitCode(err error) (int, error) { - exitCode := 0 - if exiterr, ok := err.(*exec.ExitError); ok { - if procExit, ok := exiterr.Sys().(syscall.WaitStatus); ok { - return procExit.ExitStatus(), nil - } - } - return exitCode, fmt.Errorf("failed to get exit code") -} - -// ProcessExitCode process the specified error and returns the exit status code -// if the error was of type exec.ExitError, returns nothing otherwise. -func ProcessExitCode(err error) (exitCode int) { - if err != nil { - var exiterr error - if exitCode, exiterr = GetExitCode(err); exiterr != nil { - // TODO: Fix this so we check the error's text. - // we've failed to retrieve exit code, so we set it to 127 - exitCode = 127 - } - } - return -} - -// IsKilled process the specified error and returns whether the process was killed or not. -func IsKilled(err error) bool { - if exitErr, ok := err.(*exec.ExitError); ok { - status, ok := exitErr.Sys().(syscall.WaitStatus) - if !ok { - return false - } - // status.ExitStatus() is required on Windows because it does not - // implement Signal() nor Signaled(). Just check it had a bad exit - // status could mean it was killed (and in tests we do kill) - return (status.Signaled() && status.Signal() == os.Kill) || status.ExitStatus() != 0 - } - return false -} - -// RunCommandWithOutput runs the specified command and returns the combined output (stdout/stderr) -// with the exitCode different from 0 and the error if something bad happened -func RunCommandWithOutput(cmd *exec.Cmd) (output string, exitCode int, err error) { - exitCode = 0 - out, err := cmd.CombinedOutput() - exitCode = ProcessExitCode(err) - output = string(out) - return -} - -// RunCommandWithStdoutStderr runs the specified command and returns stdout and stderr separately -// with the exitCode different from 0 and the error if something bad happened -func RunCommandWithStdoutStderr(cmd *exec.Cmd) (stdout string, stderr string, exitCode int, err error) { - var ( - stderrBuffer, stdoutBuffer bytes.Buffer - ) - exitCode = 0 - cmd.Stderr = &stderrBuffer - cmd.Stdout = &stdoutBuffer - err = cmd.Run() - exitCode = ProcessExitCode(err) - - stdout = stdoutBuffer.String() - stderr = stderrBuffer.String() - return -} - -// RunCommandWithOutputForDuration runs the specified command "timeboxed" by the specified duration. -// If the process is still running when the timebox is finished, the process will be killed and . -// It will returns the output with the exitCode different from 0 and the error if something bad happened -// and a boolean whether it has been killed or not. -func RunCommandWithOutputForDuration(cmd *exec.Cmd, duration time.Duration) (output string, exitCode int, timedOut bool, err error) { - var outputBuffer bytes.Buffer - if cmd.Stdout != nil { - err = errors.New("cmd.Stdout already set") - return - } - cmd.Stdout = &outputBuffer - - if cmd.Stderr != nil { - err = errors.New("cmd.Stderr already set") - return - } - cmd.Stderr = &outputBuffer - - // Start the command in the main thread.. - err = cmd.Start() - if err != nil { - err = fmt.Errorf("Fail to start command %v : %v", cmd, err) - } - - type exitInfo struct { - exitErr error - exitCode int - } - - done := make(chan exitInfo, 1) - - go func() { - // And wait for it to exit in the goroutine :) - info := exitInfo{} - info.exitErr = cmd.Wait() - info.exitCode = ProcessExitCode(info.exitErr) - done <- info - }() - - select { - case <-time.After(duration): - killErr := cmd.Process.Kill() - if killErr != nil { - fmt.Printf("failed to kill (pid=%d): %v\n", cmd.Process.Pid, killErr) - } - timedOut = true - case info := <-done: - err = info.exitErr - exitCode = info.exitCode - } - output = outputBuffer.String() - return -} - -var errCmdTimeout = fmt.Errorf("command timed out") - -// RunCommandWithOutputAndTimeout runs the specified command "timeboxed" by the specified duration. -// It returns the output with the exitCode different from 0 and the error if something bad happened or -// if the process timed out (and has been killed). -func RunCommandWithOutputAndTimeout(cmd *exec.Cmd, timeout time.Duration) (output string, exitCode int, err error) { - var timedOut bool - output, exitCode, timedOut, err = RunCommandWithOutputForDuration(cmd, timeout) - if timedOut { - err = errCmdTimeout - } - return -} - -// RunCommand runs the specified command and returns the exitCode different from 0 -// and the error if something bad happened. -func RunCommand(cmd *exec.Cmd) (exitCode int, err error) { - exitCode = 0 - err = cmd.Run() - exitCode = ProcessExitCode(err) - return -} - -// RunCommandPipelineWithOutput runs the array of commands with the output -// of each pipelined with the following (like cmd1 | cmd2 | cmd3 would do). -// It returns the final output, the exitCode different from 0 and the error -// if something bad happened. -func RunCommandPipelineWithOutput(cmds ...*exec.Cmd) (output string, exitCode int, err error) { - if len(cmds) < 2 { - return "", 0, errors.New("pipeline does not have multiple cmds") - } - - // connect stdin of each cmd to stdout pipe of previous cmd - for i, cmd := range cmds { - if i > 0 { - prevCmd := cmds[i-1] - cmd.Stdin, err = prevCmd.StdoutPipe() - - if err != nil { - return "", 0, fmt.Errorf("cannot set stdout pipe for %s: %v", cmd.Path, err) - } - } - } - - // start all cmds except the last - for _, cmd := range cmds[:len(cmds)-1] { - if err = cmd.Start(); err != nil { - return "", 0, fmt.Errorf("starting %s failed with error: %v", cmd.Path, err) - } - } - - var pipelineError error - defer func() { - // wait all cmds except the last to release their resources - for _, cmd := range cmds[:len(cmds)-1] { - if err := cmd.Wait(); err != nil { - pipelineError = fmt.Errorf("command %s failed with error: %v", cmd.Path, err) - break - } - } - }() - if pipelineError != nil { - return "", 0, pipelineError - } - - // wait on last cmd - return RunCommandWithOutput(cmds[len(cmds)-1]) -} - -// UnmarshalJSON deserialize a JSON in the given interface. -func UnmarshalJSON(data []byte, result interface{}) error { - if err := json.Unmarshal(data, result); err != nil { - return err - } - - return nil -} - -// ConvertSliceOfStringsToMap converts a slices of string in a map -// with the strings as key and an empty string as values. -func ConvertSliceOfStringsToMap(input []string) map[string]struct{} { - output := make(map[string]struct{}) - for _, v := range input { - output[v] = struct{}{} - } - return output -} - -// CompareDirectoryEntries compares two sets of FileInfo (usually taken from a directory) -// and returns an error if different. -func CompareDirectoryEntries(e1 []os.FileInfo, e2 []os.FileInfo) error { - var ( - e1Entries = make(map[string]struct{}) - e2Entries = make(map[string]struct{}) - ) - for _, e := range e1 { - e1Entries[e.Name()] = struct{}{} - } - for _, e := range e2 { - e2Entries[e.Name()] = struct{}{} - } - if !reflect.DeepEqual(e1Entries, e2Entries) { - return fmt.Errorf("entries differ") - } - return nil -} - -// ListTar lists the entries of a tar. -func ListTar(f io.Reader) ([]string, error) { - tr := tar.NewReader(f) - var entries []string - - for { - th, err := tr.Next() - if err == io.EOF { - // end of tar archive - return entries, nil - } - if err != nil { - return entries, err - } - entries = append(entries, th.Name) - } -} - -// RandomTmpDirPath provides a temporary path with rand string appended. -// does not create or checks if it exists. -func RandomTmpDirPath(s string, platform string) string { - tmp := "/tmp" - if platform == "windows" { - tmp = os.Getenv("TEMP") - } - path := filepath.Join(tmp, fmt.Sprintf("%s.%s", s, stringutils.GenerateRandomAlphaOnlyString(10))) - if platform == "windows" { - return filepath.FromSlash(path) // Using \ - } - return filepath.ToSlash(path) // Using / -} - -// ConsumeWithSpeed reads chunkSize bytes from reader before sleeping -// for interval duration. Returns total read bytes. Send true to the -// stop channel to return before reading to EOF on the reader. -func ConsumeWithSpeed(reader io.Reader, chunkSize int, interval time.Duration, stop chan bool) (n int, err error) { - buffer := make([]byte, chunkSize) - for { - var readBytes int - readBytes, err = reader.Read(buffer) - n += readBytes - if err != nil { - if err == io.EOF { - err = nil - } - return - } - select { - case <-stop: - return - case <-time.After(interval): - } - } -} - -// ParseCgroupPaths parses 'procCgroupData', which is output of '/proc//cgroup', and returns -// a map which cgroup name as key and path as value. -func ParseCgroupPaths(procCgroupData string) map[string]string { - cgroupPaths := map[string]string{} - for _, line := range strings.Split(procCgroupData, "\n") { - parts := strings.Split(line, ":") - if len(parts) != 3 { - continue - } - cgroupPaths[parts[1]] = parts[2] - } - return cgroupPaths -} - -// ChannelBuffer holds a chan of byte array that can be populate in a goroutine. -type ChannelBuffer struct { - C chan []byte -} - -// Write implements Writer. -func (c *ChannelBuffer) Write(b []byte) (int, error) { - c.C <- b - return len(b), nil -} - -// Close closes the go channel. -func (c *ChannelBuffer) Close() error { - close(c.C) - return nil -} - -// ReadTimeout reads the content of the channel in the specified byte array with -// the specified duration as timeout. -func (c *ChannelBuffer) ReadTimeout(p []byte, n time.Duration) (int, error) { - select { - case b := <-c.C: - return copy(p[0:], b), nil - case <-time.After(n): - return -1, fmt.Errorf("timeout reading from channel") - } -} - -// RunAtDifferentDate runs the specified function with the given time. -// It changes the date of the system, which can led to weird behaviors. -func RunAtDifferentDate(date time.Time, block func()) { - // Layout for date. MMDDhhmmYYYY - const timeLayout = "010203042006" - // Ensure we bring time back to now - now := time.Now().Format(timeLayout) - dateReset := exec.Command("date", now) - defer RunCommand(dateReset) - - dateChange := exec.Command("date", date.Format(timeLayout)) - RunCommand(dateChange) - block() - return -} diff --git a/pkg/integration/utils_test.go b/pkg/integration/utils_test.go deleted file mode 100644 index b354ab932d..0000000000 --- a/pkg/integration/utils_test.go +++ /dev/null @@ -1,572 +0,0 @@ -package integration - -import ( - "io" - "io/ioutil" - "os" - "os/exec" - "path/filepath" - "runtime" - "strconv" - "strings" - "testing" - "time" -) - -func TestIsKilledFalseWithNonKilledProcess(t *testing.T) { - var lsCmd *exec.Cmd - if runtime.GOOS != "windows" { - lsCmd = exec.Command("ls") - } else { - lsCmd = exec.Command("cmd", "/c", "dir") - } - - err := lsCmd.Run() - if IsKilled(err) { - t.Fatalf("Expected the ls command to not be killed, was.") - } -} - -func TestIsKilledTrueWithKilledProcess(t *testing.T) { - var longCmd *exec.Cmd - if runtime.GOOS != "windows" { - longCmd = exec.Command("top") - } else { - longCmd = exec.Command("powershell", "while ($true) { sleep 1 }") - } - - // Start a command - err := longCmd.Start() - if err != nil { - t.Fatal(err) - } - // Capture the error when *dying* - done := make(chan error, 1) - go func() { - done <- longCmd.Wait() - }() - // Then kill it - longCmd.Process.Kill() - // Get the error - err = <-done - if !IsKilled(err) { - t.Fatalf("Expected the command to be killed, was not.") - } -} - -func TestRunCommandWithOutput(t *testing.T) { - var ( - echoHelloWorldCmd *exec.Cmd - expected string - ) - if runtime.GOOS != "windows" { - echoHelloWorldCmd = exec.Command("echo", "hello", "world") - expected = "hello world\n" - } else { - echoHelloWorldCmd = exec.Command("cmd", "/s", "/c", "echo", "hello", "world") - expected = "hello world\r\n" - } - - out, exitCode, err := RunCommandWithOutput(echoHelloWorldCmd) - if out != expected || exitCode != 0 || err != nil { - t.Fatalf("Expected command to output %s, got %s, %v with exitCode %v", expected, out, err, exitCode) - } -} - -func TestRunCommandWithOutputError(t *testing.T) { - var ( - p string - wrongCmd *exec.Cmd - expected string - expectedExitCode int - ) - - if runtime.GOOS != "windows" { - p = "$PATH" - wrongCmd = exec.Command("ls", "-z") - expected = `ls: invalid option -- 'z' -Try 'ls --help' for more information. -` - expectedExitCode = 2 - } else { - p = "%PATH%" - wrongCmd = exec.Command("cmd", "/s", "/c", "dir", "/Z") - expected = "Invalid switch - " + strconv.Quote("Z") + ".\r\n" - expectedExitCode = 1 - } - cmd := exec.Command("doesnotexists") - out, exitCode, err := RunCommandWithOutput(cmd) - expectedError := `exec: "doesnotexists": executable file not found in ` + p - if out != "" || exitCode != 127 || err == nil || err.Error() != expectedError { - t.Fatalf("Expected command to output %s, got %s, %v with exitCode %v", expectedError, out, err, exitCode) - } - - out, exitCode, err = RunCommandWithOutput(wrongCmd) - - if out != expected || exitCode != expectedExitCode || err == nil || !strings.Contains(err.Error(), "exit status "+strconv.Itoa(expectedExitCode)) { - t.Fatalf("Expected command to output %s, got out:xxx%sxxx, err:%v with exitCode %v", expected, out, err, exitCode) - } -} - -func TestRunCommandWithStdoutStderr(t *testing.T) { - echoHelloWorldCmd := exec.Command("echo", "hello", "world") - stdout, stderr, exitCode, err := RunCommandWithStdoutStderr(echoHelloWorldCmd) - expected := "hello world\n" - if stdout != expected || stderr != "" || exitCode != 0 || err != nil { - t.Fatalf("Expected command to output %s, got stdout:%s, stderr:%s, err:%v with exitCode %v", expected, stdout, stderr, err, exitCode) - } -} - -func TestRunCommandWithStdoutStderrError(t *testing.T) { - p := "$PATH" - if runtime.GOOS == "windows" { - p = "%PATH%" - } - cmd := exec.Command("doesnotexists") - stdout, stderr, exitCode, err := RunCommandWithStdoutStderr(cmd) - expectedError := `exec: "doesnotexists": executable file not found in ` + p - if stdout != "" || stderr != "" || exitCode != 127 || err == nil || err.Error() != expectedError { - t.Fatalf("Expected command to output out:%s, stderr:%s, got stdout:%s, stderr:%s, err:%v with exitCode %v", "", "", stdout, stderr, err, exitCode) - } - - wrongLsCmd := exec.Command("ls", "-z") - expected := `ls: invalid option -- 'z' -Try 'ls --help' for more information. -` - - stdout, stderr, exitCode, err = RunCommandWithStdoutStderr(wrongLsCmd) - if stdout != "" && stderr != expected || exitCode != 2 || err == nil || err.Error() != "exit status 2" { - t.Fatalf("Expected command to output out:%s, stderr:%s, got stdout:%s, stderr:%s, err:%v with exitCode %v", "", expectedError, stdout, stderr, err, exitCode) - } -} - -func TestRunCommandWithOutputForDurationFinished(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - cmd := exec.Command("ls") - out, exitCode, timedOut, err := RunCommandWithOutputForDuration(cmd, 50*time.Millisecond) - if out == "" || exitCode != 0 || timedOut || err != nil { - t.Fatalf("Expected the command to run for less 50 milliseconds and thus not time out, but did not : out:[%s], exitCode:[%d], timedOut:[%v], err:[%v]", out, exitCode, timedOut, err) - } -} - -func TestRunCommandWithOutputForDurationKilled(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - cmd := exec.Command("sh", "-c", "while true ; do echo 1 ; sleep .1 ; done") - out, exitCode, timedOut, err := RunCommandWithOutputForDuration(cmd, 500*time.Millisecond) - ones := strings.Split(out, "\n") - if len(ones) != 6 || exitCode != 0 || !timedOut || err != nil { - t.Fatalf("Expected the command to run for 500 milliseconds (and thus print six lines (five with 1, one empty) and time out, but did not : out:[%s], exitCode:%d, timedOut:%v, err:%v", out, exitCode, timedOut, err) - } -} - -func TestRunCommandWithOutputForDurationErrors(t *testing.T) { - cmd := exec.Command("ls") - cmd.Stdout = os.Stdout - if _, _, _, err := RunCommandWithOutputForDuration(cmd, 1*time.Millisecond); err == nil || err.Error() != "cmd.Stdout already set" { - t.Fatalf("Expected an error as cmd.Stdout was already set, did not (err:%s).", err) - } - cmd = exec.Command("ls") - cmd.Stderr = os.Stderr - if _, _, _, err := RunCommandWithOutputForDuration(cmd, 1*time.Millisecond); err == nil || err.Error() != "cmd.Stderr already set" { - t.Fatalf("Expected an error as cmd.Stderr was already set, did not (err:%s).", err) - } -} - -func TestRunCommandWithOutputAndTimeoutFinished(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - cmd := exec.Command("ls") - out, exitCode, err := RunCommandWithOutputAndTimeout(cmd, 50*time.Millisecond) - if out == "" || exitCode != 0 || err != nil { - t.Fatalf("Expected the command to run for less 50 milliseconds and thus not time out, but did not : out:[%s], exitCode:[%d], err:[%v]", out, exitCode, err) - } -} - -func TestRunCommandWithOutputAndTimeoutKilled(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - cmd := exec.Command("sh", "-c", "while true ; do echo 1 ; sleep .1 ; done") - out, exitCode, err := RunCommandWithOutputAndTimeout(cmd, 500*time.Millisecond) - ones := strings.Split(out, "\n") - if len(ones) != 6 || exitCode != 0 || err == nil || err.Error() != "command timed out" { - t.Fatalf("Expected the command to run for 500 milliseconds (and thus print six lines (five with 1, one empty) and time out with an error 'command timed out', but did not : out:[%s], exitCode:%d, err:%v", out, exitCode, err) - } -} - -func TestRunCommandWithOutputAndTimeoutErrors(t *testing.T) { - cmd := exec.Command("ls") - cmd.Stdout = os.Stdout - if _, _, err := RunCommandWithOutputAndTimeout(cmd, 1*time.Millisecond); err == nil || err.Error() != "cmd.Stdout already set" { - t.Fatalf("Expected an error as cmd.Stdout was already set, did not (err:%s).", err) - } - cmd = exec.Command("ls") - cmd.Stderr = os.Stderr - if _, _, err := RunCommandWithOutputAndTimeout(cmd, 1*time.Millisecond); err == nil || err.Error() != "cmd.Stderr already set" { - t.Fatalf("Expected an error as cmd.Stderr was already set, did not (err:%s).", err) - } -} - -func TestRunCommand(t *testing.T) { - // TODO Windows: Port this test - if runtime.GOOS == "windows" { - t.Skip("Needs porting to Windows") - } - - p := "$PATH" - if runtime.GOOS == "windows" { - p = "%PATH%" - } - lsCmd := exec.Command("ls") - exitCode, err := RunCommand(lsCmd) - if exitCode != 0 || err != nil { - t.Fatalf("Expected runCommand to run the command successfully, got: exitCode:%d, err:%v", exitCode, err) - } - - var expectedError string - - exitCode, err = RunCommand(exec.Command("doesnotexists")) - expectedError = `exec: "doesnotexists": executable file not found in ` + p - if exitCode != 127 || err == nil || err.Error() != expectedError { - t.Fatalf("Expected runCommand to run the command successfully, got: exitCode:%d, err:%v", exitCode, err) - } - wrongLsCmd := exec.Command("ls", "-z") - expected := 2 - expectedError = `exit status 2` - exitCode, err = RunCommand(wrongLsCmd) - if exitCode != expected || err == nil || err.Error() != expectedError { - t.Fatalf("Expected runCommand to run the command successfully, got: exitCode:%d, err:%v", exitCode, err) - } -} - -func TestRunCommandPipelineWithOutputWithNotEnoughCmds(t *testing.T) { - _, _, err := RunCommandPipelineWithOutput(exec.Command("ls")) - expectedError := "pipeline does not have multiple cmds" - if err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with %s, got err:%s", expectedError, err) - } -} - -func TestRunCommandPipelineWithOutputErrors(t *testing.T) { - p := "$PATH" - if runtime.GOOS == "windows" { - p = "%PATH%" - } - cmd1 := exec.Command("ls") - cmd1.Stdout = os.Stdout - cmd2 := exec.Command("anything really") - _, _, err := RunCommandPipelineWithOutput(cmd1, cmd2) - if err == nil || err.Error() != "cannot set stdout pipe for anything really: exec: Stdout already set" { - t.Fatalf("Expected an error, got %v", err) - } - - cmdWithError := exec.Command("doesnotexists") - cmdCat := exec.Command("cat") - _, _, err = RunCommandPipelineWithOutput(cmdWithError, cmdCat) - if err == nil || err.Error() != `starting doesnotexists failed with error: exec: "doesnotexists": executable file not found in `+p { - t.Fatalf("Expected an error, got %v", err) - } -} - -func TestRunCommandPipelineWithOutput(t *testing.T) { - cmds := []*exec.Cmd{ - // Print 2 characters - exec.Command("echo", "-n", "11"), - // Count the number or char from stdin (previous command) - exec.Command("wc", "-m"), - } - out, exitCode, err := RunCommandPipelineWithOutput(cmds...) - expectedOutput := "2\n" - if out != expectedOutput || exitCode != 0 || err != nil { - t.Fatalf("Expected %s for commands %v, got out:%s, exitCode:%d, err:%v", expectedOutput, cmds, out, exitCode, err) - } -} - -// Simple simple test as it is just a passthrough for json.Unmarshal -func TestUnmarshalJSON(t *testing.T) { - emptyResult := struct{}{} - if err := UnmarshalJSON([]byte(""), &emptyResult); err == nil { - t.Fatalf("Expected an error, got nothing") - } - result := struct{ Name string }{} - if err := UnmarshalJSON([]byte(`{"name": "name"}`), &result); err != nil { - t.Fatal(err) - } - if result.Name != "name" { - t.Fatalf("Expected result.name to be 'name', was '%s'", result.Name) - } -} - -func TestConvertSliceOfStringsToMap(t *testing.T) { - input := []string{"a", "b"} - actual := ConvertSliceOfStringsToMap(input) - for _, key := range input { - if _, ok := actual[key]; !ok { - t.Fatalf("Expected output to contains key %s, did not: %v", key, actual) - } - } -} - -func TestCompareDirectoryEntries(t *testing.T) { - tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-compare-directories") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - - file1 := filepath.Join(tmpFolder, "file1") - file2 := filepath.Join(tmpFolder, "file2") - os.Create(file1) - os.Create(file2) - - fi1, err := os.Stat(file1) - if err != nil { - t.Fatal(err) - } - fi1bis, err := os.Stat(file1) - if err != nil { - t.Fatal(err) - } - fi2, err := os.Stat(file2) - if err != nil { - t.Fatal(err) - } - - cases := []struct { - e1 []os.FileInfo - e2 []os.FileInfo - shouldError bool - }{ - // Empty directories - { - []os.FileInfo{}, - []os.FileInfo{}, - false, - }, - // Same FileInfos - { - []os.FileInfo{fi1}, - []os.FileInfo{fi1}, - false, - }, - // Different FileInfos but same names - { - []os.FileInfo{fi1}, - []os.FileInfo{fi1bis}, - false, - }, - // Different FileInfos, different names - { - []os.FileInfo{fi1}, - []os.FileInfo{fi2}, - true, - }, - } - for _, elt := range cases { - err := CompareDirectoryEntries(elt.e1, elt.e2) - if elt.shouldError && err == nil { - t.Fatalf("Should have return an error, did not with %v and %v", elt.e1, elt.e2) - } - if !elt.shouldError && err != nil { - t.Fatalf("Should have not returned an error, but did : %v with %v and %v", err, elt.e1, elt.e2) - } - } -} - -// FIXME make an "unhappy path" test for ListTar without "panicking" :-) -func TestListTar(t *testing.T) { - // TODO Windows: Figure out why this fails. Should be portable. - if runtime.GOOS == "windows" { - t.Skip("Failing on Windows - needs further investigation") - } - tmpFolder, err := ioutil.TempDir("", "integration-cli-utils-list-tar") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpFolder) - - // Let's create a Tar file - srcFile := filepath.Join(tmpFolder, "src") - tarFile := filepath.Join(tmpFolder, "src.tar") - os.Create(srcFile) - cmd := exec.Command("sh", "-c", "tar cf "+tarFile+" "+srcFile) - _, err = cmd.CombinedOutput() - if err != nil { - t.Fatal(err) - } - - reader, err := os.Open(tarFile) - if err != nil { - t.Fatal(err) - } - defer reader.Close() - - entries, err := ListTar(reader) - if err != nil { - t.Fatal(err) - } - if len(entries) != 1 && entries[0] != "src" { - t.Fatalf("Expected a tar file with 1 entry (%s), got %v", srcFile, entries) - } -} - -func TestRandomTmpDirPath(t *testing.T) { - path := RandomTmpDirPath("something", runtime.GOOS) - - prefix := "/tmp/something" - if runtime.GOOS == "windows" { - prefix = os.Getenv("TEMP") + `\something` - } - expectedSize := len(prefix) + 11 - - if !strings.HasPrefix(path, prefix) { - t.Fatalf("Expected generated path to have '%s' as prefix, got %s'", prefix, path) - } - if len(path) != expectedSize { - t.Fatalf("Expected generated path to be %d, got %d", expectedSize, len(path)) - } -} - -func TestConsumeWithSpeed(t *testing.T) { - reader := strings.NewReader("1234567890") - chunksize := 2 - - bytes1, err := ConsumeWithSpeed(reader, chunksize, 1*time.Second, nil) - if err != nil { - t.Fatal(err) - } - - if bytes1 != 10 { - t.Fatalf("Expected to have read 10 bytes, got %d", bytes1) - } - -} - -func TestConsumeWithSpeedWithStop(t *testing.T) { - reader := strings.NewReader("1234567890") - chunksize := 2 - - stopIt := make(chan bool) - - go func() { - time.Sleep(1 * time.Millisecond) - stopIt <- true - }() - - bytes1, err := ConsumeWithSpeed(reader, chunksize, 20*time.Millisecond, stopIt) - if err != nil { - t.Fatal(err) - } - - if bytes1 != 2 { - t.Fatalf("Expected to have read 2 bytes, got %d", bytes1) - } - -} - -func TestParseCgroupPathsEmpty(t *testing.T) { - cgroupMap := ParseCgroupPaths("") - if len(cgroupMap) != 0 { - t.Fatalf("Expected an empty map, got %v", cgroupMap) - } - cgroupMap = ParseCgroupPaths("\n") - if len(cgroupMap) != 0 { - t.Fatalf("Expected an empty map, got %v", cgroupMap) - } - cgroupMap = ParseCgroupPaths("something:else\nagain:here") - if len(cgroupMap) != 0 { - t.Fatalf("Expected an empty map, got %v", cgroupMap) - } -} - -func TestParseCgroupPaths(t *testing.T) { - cgroupMap := ParseCgroupPaths("2:memory:/a\n1:cpuset:/b") - if len(cgroupMap) != 2 { - t.Fatalf("Expected a map with 2 entries, got %v", cgroupMap) - } - if value, ok := cgroupMap["memory"]; !ok || value != "/a" { - t.Fatalf("Expected cgroupMap to contains an entry for 'memory' with value '/a', got %v", cgroupMap) - } - if value, ok := cgroupMap["cpuset"]; !ok || value != "/b" { - t.Fatalf("Expected cgroupMap to contains an entry for 'cpuset' with value '/b', got %v", cgroupMap) - } -} - -func TestChannelBufferTimeout(t *testing.T) { - expected := "11" - - buf := &ChannelBuffer{make(chan []byte, 1)} - defer buf.Close() - - done := make(chan struct{}, 1) - go func() { - time.Sleep(100 * time.Millisecond) - io.Copy(buf, strings.NewReader(expected)) - done <- struct{}{} - }() - - // Wait long enough - b := make([]byte, 2) - _, err := buf.ReadTimeout(b, 50*time.Millisecond) - if err == nil && err.Error() != "timeout reading from channel" { - t.Fatalf("Expected an error, got %s", err) - } - <-done -} - -func TestChannelBuffer(t *testing.T) { - expected := "11" - - buf := &ChannelBuffer{make(chan []byte, 1)} - defer buf.Close() - - go func() { - time.Sleep(100 * time.Millisecond) - io.Copy(buf, strings.NewReader(expected)) - }() - - // Wait long enough - b := make([]byte, 2) - _, err := buf.ReadTimeout(b, 200*time.Millisecond) - if err != nil { - t.Fatal(err) - } - - if string(b) != expected { - t.Fatalf("Expected '%s', got '%s'", expected, string(b)) - } -} - -// FIXME doesn't work -// func TestRunAtDifferentDate(t *testing.T) { -// var date string - -// // Layout for date. MMDDhhmmYYYY -// const timeLayout = "20060102" -// expectedDate := "20100201" -// theDate, err := time.Parse(timeLayout, expectedDate) -// if err != nil { -// t.Fatal(err) -// } - -// RunAtDifferentDate(theDate, func() { -// cmd := exec.Command("date", "+%Y%M%d") -// out, err := cmd.Output() -// if err != nil { -// t.Fatal(err) -// } -// date = string(out) -// }) -// } diff --git a/pkg/ioutils/buffer.go b/pkg/ioutils/buffer.go deleted file mode 100644 index 3d737b3e19..0000000000 --- a/pkg/ioutils/buffer.go +++ /dev/null @@ -1,51 +0,0 @@ -package ioutils - -import ( - "errors" - "io" -) - -var errBufferFull = errors.New("buffer is full") - -type fixedBuffer struct { - buf []byte - pos int - lastRead int -} - -func (b *fixedBuffer) Write(p []byte) (int, error) { - n := copy(b.buf[b.pos:cap(b.buf)], p) - b.pos += n - - if n < len(p) { - if b.pos == cap(b.buf) { - return n, errBufferFull - } - return n, io.ErrShortWrite - } - return n, nil -} - -func (b *fixedBuffer) Read(p []byte) (int, error) { - n := copy(p, b.buf[b.lastRead:b.pos]) - b.lastRead += n - return n, nil -} - -func (b *fixedBuffer) Len() int { - return b.pos - b.lastRead -} - -func (b *fixedBuffer) Cap() int { - return cap(b.buf) -} - -func (b *fixedBuffer) Reset() { - b.pos = 0 - b.lastRead = 0 - b.buf = b.buf[:0] -} - -func (b *fixedBuffer) String() string { - return string(b.buf[b.lastRead:b.pos]) -} diff --git a/pkg/ioutils/buffer_test.go b/pkg/ioutils/buffer_test.go deleted file mode 100644 index 41098fa6e7..0000000000 --- a/pkg/ioutils/buffer_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package ioutils - -import ( - "bytes" - "testing" -) - -func TestFixedBufferWrite(t *testing.T) { - buf := &fixedBuffer{buf: make([]byte, 0, 64)} - n, err := buf.Write([]byte("hello")) - if err != nil { - t.Fatal(err) - } - - if n != 5 { - t.Fatalf("expected 5 bytes written, got %d", n) - } - - if string(buf.buf[:5]) != "hello" { - t.Fatalf("expected \"hello\", got %q", string(buf.buf[:5])) - } - - n, err = buf.Write(bytes.Repeat([]byte{1}, 64)) - if err != errBufferFull { - t.Fatalf("expected errBufferFull, got %v - %v", err, buf.buf[:64]) - } -} - -func TestFixedBufferRead(t *testing.T) { - buf := &fixedBuffer{buf: make([]byte, 0, 64)} - if _, err := buf.Write([]byte("hello world")); err != nil { - t.Fatal(err) - } - - b := make([]byte, 5) - n, err := buf.Read(b) - if err != nil { - t.Fatal(err) - } - - if n != 5 { - t.Fatalf("expected 5 bytes read, got %d - %s", n, buf.String()) - } - - if string(b) != "hello" { - t.Fatalf("expected \"hello\", got %q", string(b)) - } - - n, err = buf.Read(b) - if err != nil { - t.Fatal(err) - } - - if n != 5 { - t.Fatalf("expected 5 bytes read, got %d", n) - } - - if string(b) != " worl" { - t.Fatalf("expected \" worl\", got %s", string(b)) - } - - b = b[:1] - n, err = buf.Read(b) - if err != nil { - t.Fatal(err) - } - - if n != 1 { - t.Fatalf("expected 1 byte read, got %d - %s", n, buf.String()) - } - - if string(b) != "d" { - t.Fatalf("expected \"d\", got %s", string(b)) - } -} diff --git a/pkg/ioutils/bytespipe.go b/pkg/ioutils/bytespipe.go deleted file mode 100644 index 72a04f3491..0000000000 --- a/pkg/ioutils/bytespipe.go +++ /dev/null @@ -1,186 +0,0 @@ -package ioutils - -import ( - "errors" - "io" - "sync" -) - -// maxCap is the highest capacity to use in byte slices that buffer data. -const maxCap = 1e6 - -// minCap is the lowest capacity to use in byte slices that buffer data -const minCap = 64 - -// blockThreshold is the minimum number of bytes in the buffer which will cause -// a write to BytesPipe to block when allocating a new slice. -const blockThreshold = 1e6 - -var ( - // ErrClosed is returned when Write is called on a closed BytesPipe. - ErrClosed = errors.New("write to closed BytesPipe") - - bufPools = make(map[int]*sync.Pool) - bufPoolsLock sync.Mutex -) - -// BytesPipe is io.ReadWriteCloser which works similarly to pipe(queue). -// All written data may be read at most once. Also, BytesPipe allocates -// and releases new byte slices to adjust to current needs, so the buffer -// won't be overgrown after peak loads. -type BytesPipe struct { - mu sync.Mutex - wait *sync.Cond - buf []*fixedBuffer - bufLen int - closeErr error // error to return from next Read. set to nil if not closed. -} - -// NewBytesPipe creates new BytesPipe, initialized by specified slice. -// If buf is nil, then it will be initialized with slice which cap is 64. -// buf will be adjusted in a way that len(buf) == 0, cap(buf) == cap(buf). -func NewBytesPipe() *BytesPipe { - bp := &BytesPipe{} - bp.buf = append(bp.buf, getBuffer(minCap)) - bp.wait = sync.NewCond(&bp.mu) - return bp -} - -// Write writes p to BytesPipe. -// It can allocate new []byte slices in a process of writing. -func (bp *BytesPipe) Write(p []byte) (int, error) { - bp.mu.Lock() - - written := 0 -loop0: - for { - if bp.closeErr != nil { - bp.mu.Unlock() - return written, ErrClosed - } - - if len(bp.buf) == 0 { - bp.buf = append(bp.buf, getBuffer(64)) - } - // get the last buffer - b := bp.buf[len(bp.buf)-1] - - n, err := b.Write(p) - written += n - bp.bufLen += n - - // errBufferFull is an error we expect to get if the buffer is full - if err != nil && err != errBufferFull { - bp.wait.Broadcast() - bp.mu.Unlock() - return written, err - } - - // if there was enough room to write all then break - if len(p) == n { - break - } - - // more data: write to the next slice - p = p[n:] - - // make sure the buffer doesn't grow too big from this write - for bp.bufLen >= blockThreshold { - bp.wait.Wait() - if bp.closeErr != nil { - continue loop0 - } - } - - // add new byte slice to the buffers slice and continue writing - nextCap := b.Cap() * 2 - if nextCap > maxCap { - nextCap = maxCap - } - bp.buf = append(bp.buf, getBuffer(nextCap)) - } - bp.wait.Broadcast() - bp.mu.Unlock() - return written, nil -} - -// CloseWithError causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) CloseWithError(err error) error { - bp.mu.Lock() - if err != nil { - bp.closeErr = err - } else { - bp.closeErr = io.EOF - } - bp.wait.Broadcast() - bp.mu.Unlock() - return nil -} - -// Close causes further reads from a BytesPipe to return immediately. -func (bp *BytesPipe) Close() error { - return bp.CloseWithError(nil) -} - -// Read reads bytes from BytesPipe. -// Data could be read only once. -func (bp *BytesPipe) Read(p []byte) (n int, err error) { - bp.mu.Lock() - if bp.bufLen == 0 { - if bp.closeErr != nil { - bp.mu.Unlock() - return 0, bp.closeErr - } - bp.wait.Wait() - if bp.bufLen == 0 && bp.closeErr != nil { - err := bp.closeErr - bp.mu.Unlock() - return 0, err - } - } - - for bp.bufLen > 0 { - b := bp.buf[0] - read, _ := b.Read(p) // ignore error since fixedBuffer doesn't really return an error - n += read - bp.bufLen -= read - - if b.Len() == 0 { - // it's empty so return it to the pool and move to the next one - returnBuffer(b) - bp.buf[0] = nil - bp.buf = bp.buf[1:] - } - - if len(p) == read { - break - } - - p = p[read:] - } - - bp.wait.Broadcast() - bp.mu.Unlock() - return -} - -func returnBuffer(b *fixedBuffer) { - b.Reset() - bufPoolsLock.Lock() - pool := bufPools[b.Cap()] - bufPoolsLock.Unlock() - if pool != nil { - pool.Put(b) - } -} - -func getBuffer(size int) *fixedBuffer { - bufPoolsLock.Lock() - pool, ok := bufPools[size] - if !ok { - pool = &sync.Pool{New: func() interface{} { return &fixedBuffer{buf: make([]byte, 0, size)} }} - bufPools[size] = pool - } - bufPoolsLock.Unlock() - return pool.Get().(*fixedBuffer) -} diff --git a/pkg/ioutils/bytespipe_test.go b/pkg/ioutils/bytespipe_test.go deleted file mode 100644 index 300fb5f6d5..0000000000 --- a/pkg/ioutils/bytespipe_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package ioutils - -import ( - "crypto/sha1" - "encoding/hex" - "math/rand" - "testing" - "time" -) - -func TestBytesPipeRead(t *testing.T) { - buf := NewBytesPipe() - buf.Write([]byte("12")) - buf.Write([]byte("34")) - buf.Write([]byte("56")) - buf.Write([]byte("78")) - buf.Write([]byte("90")) - rd := make([]byte, 4) - n, err := buf.Read(rd) - if err != nil { - t.Fatal(err) - } - if n != 4 { - t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) - } - if string(rd) != "1234" { - t.Fatalf("Read %s, but must be %s", rd, "1234") - } - n, err = buf.Read(rd) - if err != nil { - t.Fatal(err) - } - if n != 4 { - t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 4) - } - if string(rd) != "5678" { - t.Fatalf("Read %s, but must be %s", rd, "5679") - } - n, err = buf.Read(rd) - if err != nil { - t.Fatal(err) - } - if n != 2 { - t.Fatalf("Wrong number of bytes read: %d, should be %d", n, 2) - } - if string(rd[:n]) != "90" { - t.Fatalf("Read %s, but must be %s", rd, "90") - } -} - -func TestBytesPipeWrite(t *testing.T) { - buf := NewBytesPipe() - buf.Write([]byte("12")) - buf.Write([]byte("34")) - buf.Write([]byte("56")) - buf.Write([]byte("78")) - buf.Write([]byte("90")) - if buf.buf[0].String() != "1234567890" { - t.Fatalf("Buffer %q, must be %q", buf.buf[0].String(), "1234567890") - } -} - -// Write and read in different speeds/chunk sizes and check valid data is read. -func TestBytesPipeWriteRandomChunks(t *testing.T) { - cases := []struct{ iterations, writesPerLoop, readsPerLoop int }{ - {100, 10, 1}, - {1000, 10, 5}, - {1000, 100, 0}, - {1000, 5, 6}, - {10000, 50, 25}, - } - - testMessage := []byte("this is a random string for testing") - // random slice sizes to read and write - writeChunks := []int{25, 35, 15, 20} - readChunks := []int{5, 45, 20, 25} - - for _, c := range cases { - // first pass: write directly to hash - hash := sha1.New() - for i := 0; i < c.iterations*c.writesPerLoop; i++ { - if _, err := hash.Write(testMessage[:writeChunks[i%len(writeChunks)]]); err != nil { - t.Fatal(err) - } - } - expected := hex.EncodeToString(hash.Sum(nil)) - - // write/read through buffer - buf := NewBytesPipe() - hash.Reset() - - done := make(chan struct{}) - - go func() { - // random delay before read starts - <-time.After(time.Duration(rand.Intn(10)) * time.Millisecond) - for i := 0; ; i++ { - p := make([]byte, readChunks[(c.iterations*c.readsPerLoop+i)%len(readChunks)]) - n, _ := buf.Read(p) - if n == 0 { - break - } - hash.Write(p[:n]) - } - - close(done) - }() - - for i := 0; i < c.iterations; i++ { - for w := 0; w < c.writesPerLoop; w++ { - buf.Write(testMessage[:writeChunks[(i*c.writesPerLoop+w)%len(writeChunks)]]) - } - } - buf.Close() - <-done - - actual := hex.EncodeToString(hash.Sum(nil)) - - if expected != actual { - t.Fatalf("BytesPipe returned invalid data. Expected checksum %v, got %v", expected, actual) - } - - } -} - -func BenchmarkBytesPipeWrite(b *testing.B) { - testData := []byte("pretty short line, because why not?") - for i := 0; i < b.N; i++ { - readBuf := make([]byte, 1024) - buf := NewBytesPipe() - go func() { - var err error - for err == nil { - _, err = buf.Read(readBuf) - } - }() - for j := 0; j < 1000; j++ { - buf.Write(testData) - } - buf.Close() - } -} - -func BenchmarkBytesPipeRead(b *testing.B) { - rd := make([]byte, 512) - for i := 0; i < b.N; i++ { - b.StopTimer() - buf := NewBytesPipe() - for j := 0; j < 500; j++ { - buf.Write(make([]byte, 1024)) - } - b.StartTimer() - for j := 0; j < 1000; j++ { - if n, _ := buf.Read(rd); n != 512 { - b.Fatalf("Wrong number of bytes: %d", n) - } - } - } -} diff --git a/pkg/ioutils/fmt.go b/pkg/ioutils/fmt.go deleted file mode 100644 index 0b04b0ba3e..0000000000 --- a/pkg/ioutils/fmt.go +++ /dev/null @@ -1,22 +0,0 @@ -package ioutils - -import ( - "fmt" - "io" -) - -// FprintfIfNotEmpty prints the string value if it's not empty -func FprintfIfNotEmpty(w io.Writer, format, value string) (int, error) { - if value != "" { - return fmt.Fprintf(w, format, value) - } - return 0, nil -} - -// FprintfIfTrue prints the boolean value if it's true -func FprintfIfTrue(w io.Writer, format string, ok bool) (int, error) { - if ok { - return fmt.Fprintf(w, format, ok) - } - return 0, nil -} diff --git a/pkg/ioutils/fmt_test.go b/pkg/ioutils/fmt_test.go deleted file mode 100644 index 8968863296..0000000000 --- a/pkg/ioutils/fmt_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package ioutils - -import "testing" - -func TestFprintfIfNotEmpty(t *testing.T) { - wc := NewWriteCounter(&NopWriter{}) - n, _ := FprintfIfNotEmpty(wc, "foo%s", "") - - if wc.Count != 0 || n != 0 { - t.Errorf("Wrong count: %v vs. %v vs. 0", wc.Count, n) - } - - n, _ = FprintfIfNotEmpty(wc, "foo%s", "bar") - if wc.Count != 6 || n != 6 { - t.Errorf("Wrong count: %v vs. %v vs. 6", wc.Count, n) - } -} diff --git a/pkg/ioutils/fswriters.go b/pkg/ioutils/fswriters.go deleted file mode 100644 index 6dc50a03dc..0000000000 --- a/pkg/ioutils/fswriters.go +++ /dev/null @@ -1,82 +0,0 @@ -package ioutils - -import ( - "io" - "io/ioutil" - "os" - "path/filepath" -) - -// NewAtomicFileWriter returns WriteCloser so that writing to it writes to a -// temporary file and closing it atomically changes the temporary file to -// destination path. Writing and closing concurrently is not allowed. -func NewAtomicFileWriter(filename string, perm os.FileMode) (io.WriteCloser, error) { - f, err := ioutil.TempFile(filepath.Dir(filename), ".tmp-"+filepath.Base(filename)) - if err != nil { - return nil, err - } - - abspath, err := filepath.Abs(filename) - if err != nil { - return nil, err - } - return &atomicFileWriter{ - f: f, - fn: abspath, - perm: perm, - }, nil -} - -// AtomicWriteFile atomically writes data to a file named by filename. -func AtomicWriteFile(filename string, data []byte, perm os.FileMode) error { - f, err := NewAtomicFileWriter(filename, perm) - if err != nil { - return err - } - n, err := f.Write(data) - if err == nil && n < len(data) { - err = io.ErrShortWrite - f.(*atomicFileWriter).writeErr = err - } - if err1 := f.Close(); err == nil { - err = err1 - } - return err -} - -type atomicFileWriter struct { - f *os.File - fn string - writeErr error - perm os.FileMode -} - -func (w *atomicFileWriter) Write(dt []byte) (int, error) { - n, err := w.f.Write(dt) - if err != nil { - w.writeErr = err - } - return n, err -} - -func (w *atomicFileWriter) Close() (retErr error) { - defer func() { - if retErr != nil || w.writeErr != nil { - os.Remove(w.f.Name()) - } - }() - if err := w.f.Sync(); err != nil { - w.f.Close() - return err - } - if err := w.f.Close(); err != nil { - return err - } - if err := os.Chmod(w.f.Name(), w.perm); err != nil { - return err - } - if w.writeErr == nil { - return os.Rename(w.f.Name(), w.fn) - } - return nil -} diff --git a/pkg/ioutils/fswriters_test.go b/pkg/ioutils/fswriters_test.go deleted file mode 100644 index 470ca1a6f4..0000000000 --- a/pkg/ioutils/fswriters_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package ioutils - -import ( - "bytes" - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestAtomicWriteToFile(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "atomic-writers-test") - if err != nil { - t.Fatalf("Error when creating temporary directory: %s", err) - } - defer os.RemoveAll(tmpDir) - - expected := []byte("barbaz") - if err := AtomicWriteFile(filepath.Join(tmpDir, "foo"), expected, 0666); err != nil { - t.Fatalf("Error writing to file: %v", err) - } - - actual, err := ioutil.ReadFile(filepath.Join(tmpDir, "foo")) - if err != nil { - t.Fatalf("Error reading from file: %v", err) - } - - if bytes.Compare(actual, expected) != 0 { - t.Fatalf("Data mismatch, expected %q, got %q", expected, actual) - } - - st, err := os.Stat(filepath.Join(tmpDir, "foo")) - if err != nil { - t.Fatalf("Error statting file: %v", err) - } - if expected := os.FileMode(0666); st.Mode() != expected { - t.Fatalf("Mode mismatched, expected %o, got %o", expected, st.Mode()) - } -} diff --git a/pkg/ioutils/multireader.go b/pkg/ioutils/multireader.go deleted file mode 100644 index 0d2d76b479..0000000000 --- a/pkg/ioutils/multireader.go +++ /dev/null @@ -1,226 +0,0 @@ -package ioutils - -import ( - "bytes" - "fmt" - "io" - "os" -) - -type pos struct { - idx int - offset int64 -} - -type multiReadSeeker struct { - readers []io.ReadSeeker - pos *pos - posIdx map[io.ReadSeeker]int -} - -func (r *multiReadSeeker) Seek(offset int64, whence int) (int64, error) { - var tmpOffset int64 - switch whence { - case os.SEEK_SET: - for i, rdr := range r.readers { - // get size of the current reader - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - if offset > tmpOffset+s { - if i == len(r.readers)-1 { - rdrOffset := s + (offset - tmpOffset) - if _, err := rdr.Seek(rdrOffset, os.SEEK_SET); err != nil { - return -1, err - } - r.pos = &pos{i, rdrOffset} - return offset, nil - } - - tmpOffset += s - continue - } - - rdrOffset := offset - tmpOffset - idx := i - - rdr.Seek(rdrOffset, os.SEEK_SET) - // make sure all following readers are at 0 - for _, rdr := range r.readers[i+1:] { - rdr.Seek(0, os.SEEK_SET) - } - - if rdrOffset == s && i != len(r.readers)-1 { - idx++ - rdrOffset = 0 - } - r.pos = &pos{idx, rdrOffset} - return offset, nil - } - case os.SEEK_END: - for _, rdr := range r.readers { - s, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - tmpOffset += s - } - r.Seek(tmpOffset+offset, os.SEEK_SET) - return tmpOffset + offset, nil - case os.SEEK_CUR: - if r.pos == nil { - return r.Seek(offset, os.SEEK_SET) - } - // Just return the current offset - if offset == 0 { - return r.getCurOffset() - } - - curOffset, err := r.getCurOffset() - if err != nil { - return -1, err - } - rdr, rdrOffset, err := r.getReaderForOffset(curOffset + offset) - if err != nil { - return -1, err - } - - r.pos = &pos{r.posIdx[rdr], rdrOffset} - return curOffset + offset, nil - default: - return -1, fmt.Errorf("Invalid whence: %d", whence) - } - - return -1, fmt.Errorf("Error seeking for whence: %d, offset: %d", whence, offset) -} - -func (r *multiReadSeeker) getReaderForOffset(offset int64) (io.ReadSeeker, int64, error) { - var rdr io.ReadSeeker - var rdrOffset int64 - - for i, rdr := range r.readers { - offsetTo, err := r.getOffsetToReader(rdr) - if err != nil { - return nil, -1, err - } - if offsetTo > offset { - rdr = r.readers[i-1] - rdrOffset = offsetTo - offset - break - } - - if rdr == r.readers[len(r.readers)-1] { - rdrOffset = offsetTo + offset - break - } - } - - return rdr, rdrOffset, nil -} - -func (r *multiReadSeeker) getCurOffset() (int64, error) { - var totalSize int64 - for _, rdr := range r.readers[:r.pos.idx+1] { - if r.posIdx[rdr] == r.pos.idx { - totalSize += r.pos.offset - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, fmt.Errorf("error getting seeker size: %v", err) - } - totalSize += size - } - return totalSize, nil -} - -func (r *multiReadSeeker) getOffsetToReader(rdr io.ReadSeeker) (int64, error) { - var offset int64 - for _, r := range r.readers { - if r == rdr { - break - } - - size, err := getReadSeekerSize(rdr) - if err != nil { - return -1, err - } - offset += size - } - return offset, nil -} - -func (r *multiReadSeeker) Read(b []byte) (int, error) { - if r.pos == nil { - r.pos = &pos{0, 0} - } - - bCap := int64(cap(b)) - buf := bytes.NewBuffer(nil) - var rdr io.ReadSeeker - - for _, rdr = range r.readers[r.pos.idx:] { - readBytes, err := io.CopyN(buf, rdr, bCap) - if err != nil && err != io.EOF { - return -1, err - } - bCap -= readBytes - - if bCap == 0 { - break - } - } - - rdrPos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - r.pos = &pos{r.posIdx[rdr], rdrPos} - return buf.Read(b) -} - -func getReadSeekerSize(rdr io.ReadSeeker) (int64, error) { - // save the current position - pos, err := rdr.Seek(0, os.SEEK_CUR) - if err != nil { - return -1, err - } - - // get the size - size, err := rdr.Seek(0, os.SEEK_END) - if err != nil { - return -1, err - } - - // reset the position - if _, err := rdr.Seek(pos, os.SEEK_SET); err != nil { - return -1, err - } - return size, nil -} - -// MultiReadSeeker returns a ReadSeeker that's the logical concatenation of the provided -// input readseekers. After calling this method the initial position is set to the -// beginning of the first ReadSeeker. At the end of a ReadSeeker, Read always advances -// to the beginning of the next ReadSeeker and returns EOF at the end of the last ReadSeeker. -// Seek can be used over the sum of lengths of all readseekers. -// -// When a MultiReadSeeker is used, no Read and Seek operations should be made on -// its ReadSeeker components. Also, users should make no assumption on the state -// of individual readseekers while the MultiReadSeeker is used. -func MultiReadSeeker(readers ...io.ReadSeeker) io.ReadSeeker { - if len(readers) == 1 { - return readers[0] - } - idx := make(map[io.ReadSeeker]int) - for i, rdr := range readers { - idx[rdr] = i - } - return &multiReadSeeker{ - readers: readers, - posIdx: idx, - } -} diff --git a/pkg/ioutils/multireader_test.go b/pkg/ioutils/multireader_test.go deleted file mode 100644 index de495b56da..0000000000 --- a/pkg/ioutils/multireader_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package ioutils - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "testing" -) - -func TestMultiReadSeekerReadAll(t *testing.T) { - str := "hello world" - s1 := strings.NewReader(str + " 1") - s2 := strings.NewReader(str + " 2") - s3 := strings.NewReader(str + " 3") - mr := MultiReadSeeker(s1, s2, s3) - - expectedSize := int64(s1.Len() + s2.Len() + s3.Len()) - - b, err := ioutil.ReadAll(mr) - if err != nil { - t.Fatal(err) - } - - expected := "hello world 1hello world 2hello world 3" - if string(b) != expected { - t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) - } - - size, err := mr.Seek(0, os.SEEK_END) - if err != nil { - t.Fatal(err) - } - if size != expectedSize { - t.Fatalf("reader size does not match, got %d, expected %d", size, expectedSize) - } - - // Reset the position and read again - pos, err := mr.Seek(0, os.SEEK_SET) - if err != nil { - t.Fatal(err) - } - if pos != 0 { - t.Fatalf("expected position to be set to 0, got %d", pos) - } - - b, err = ioutil.ReadAll(mr) - if err != nil { - t.Fatal(err) - } - - if string(b) != expected { - t.Fatalf("ReadAll failed, got: %q, expected %q", string(b), expected) - } -} - -func TestMultiReadSeekerReadEach(t *testing.T) { - str := "hello world" - s1 := strings.NewReader(str + " 1") - s2 := strings.NewReader(str + " 2") - s3 := strings.NewReader(str + " 3") - mr := MultiReadSeeker(s1, s2, s3) - - var totalBytes int64 - for i, s := range []*strings.Reader{s1, s2, s3} { - sLen := int64(s.Len()) - buf := make([]byte, s.Len()) - expected := []byte(fmt.Sprintf("%s %d", str, i+1)) - - if _, err := mr.Read(buf); err != nil && err != io.EOF { - t.Fatal(err) - } - - if !bytes.Equal(buf, expected) { - t.Fatalf("expected %q to be %q", string(buf), string(expected)) - } - - pos, err := mr.Seek(0, os.SEEK_CUR) - if err != nil { - t.Fatalf("iteration: %d, error: %v", i+1, err) - } - - // check that the total bytes read is the current position of the seeker - totalBytes += sLen - if pos != totalBytes { - t.Fatalf("expected current position to be: %d, got: %d, iteration: %d", totalBytes, pos, i+1) - } - - // This tests not only that SEEK_SET and SEEK_CUR give the same values, but that the next iteration is in the expected position as well - newPos, err := mr.Seek(pos, os.SEEK_SET) - if err != nil { - t.Fatal(err) - } - if newPos != pos { - t.Fatalf("expected to get same position when calling SEEK_SET with value from SEEK_CUR, cur: %d, set: %d", pos, newPos) - } - } -} - -func TestMultiReadSeekerReadSpanningChunks(t *testing.T) { - str := "hello world" - s1 := strings.NewReader(str + " 1") - s2 := strings.NewReader(str + " 2") - s3 := strings.NewReader(str + " 3") - mr := MultiReadSeeker(s1, s2, s3) - - buf := make([]byte, s1.Len()+3) - _, err := mr.Read(buf) - if err != nil { - t.Fatal(err) - } - - // expected is the contents of s1 + 3 bytes from s2, ie, the `hel` at the end of this string - expected := "hello world 1hel" - if string(buf) != expected { - t.Fatalf("expected %s to be %s", string(buf), expected) - } -} - -func TestMultiReadSeekerNegativeSeek(t *testing.T) { - str := "hello world" - s1 := strings.NewReader(str + " 1") - s2 := strings.NewReader(str + " 2") - s3 := strings.NewReader(str + " 3") - mr := MultiReadSeeker(s1, s2, s3) - - s1Len := s1.Len() - s2Len := s2.Len() - s3Len := s3.Len() - - s, err := mr.Seek(int64(-1*s3.Len()), os.SEEK_END) - if err != nil { - t.Fatal(err) - } - if s != int64(s1Len+s2Len) { - t.Fatalf("expected %d to be %d", s, s1.Len()+s2.Len()) - } - - buf := make([]byte, s3Len) - if _, err := mr.Read(buf); err != nil && err != io.EOF { - t.Fatal(err) - } - expected := fmt.Sprintf("%s %d", str, 3) - if string(buf) != fmt.Sprintf("%s %d", str, 3) { - t.Fatalf("expected %q to be %q", string(buf), expected) - } -} diff --git a/pkg/ioutils/readers.go b/pkg/ioutils/readers.go deleted file mode 100644 index 63f3c07f46..0000000000 --- a/pkg/ioutils/readers.go +++ /dev/null @@ -1,154 +0,0 @@ -package ioutils - -import ( - "crypto/sha256" - "encoding/hex" - "io" - - "golang.org/x/net/context" -) - -type readCloserWrapper struct { - io.Reader - closer func() error -} - -func (r *readCloserWrapper) Close() error { - return r.closer() -} - -// NewReadCloserWrapper returns a new io.ReadCloser. -func NewReadCloserWrapper(r io.Reader, closer func() error) io.ReadCloser { - return &readCloserWrapper{ - Reader: r, - closer: closer, - } -} - -type readerErrWrapper struct { - reader io.Reader - closer func() -} - -func (r *readerErrWrapper) Read(p []byte) (int, error) { - n, err := r.reader.Read(p) - if err != nil { - r.closer() - } - return n, err -} - -// NewReaderErrWrapper returns a new io.Reader. -func NewReaderErrWrapper(r io.Reader, closer func()) io.Reader { - return &readerErrWrapper{ - reader: r, - closer: closer, - } -} - -// HashData returns the sha256 sum of src. -func HashData(src io.Reader) (string, error) { - h := sha256.New() - if _, err := io.Copy(h, src); err != nil { - return "", err - } - return "sha256:" + hex.EncodeToString(h.Sum(nil)), nil -} - -// OnEOFReader wraps an io.ReadCloser and a function -// the function will run at the end of file or close the file. -type OnEOFReader struct { - Rc io.ReadCloser - Fn func() -} - -func (r *OnEOFReader) Read(p []byte) (n int, err error) { - n, err = r.Rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -// Close closes the file and run the function. -func (r *OnEOFReader) Close() error { - err := r.Rc.Close() - r.runFunc() - return err -} - -func (r *OnEOFReader) runFunc() { - if fn := r.Fn; fn != nil { - fn() - r.Fn = nil - } -} - -// cancelReadCloser wraps an io.ReadCloser with a context for cancelling read -// operations. -type cancelReadCloser struct { - cancel func() - pR *io.PipeReader // Stream to read from - pW *io.PipeWriter -} - -// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the -// context is cancelled. The returned io.ReadCloser must be closed when it is -// no longer needed. -func NewCancelReadCloser(ctx context.Context, in io.ReadCloser) io.ReadCloser { - pR, pW := io.Pipe() - - // Create a context used to signal when the pipe is closed - doneCtx, cancel := context.WithCancel(context.Background()) - - p := &cancelReadCloser{ - cancel: cancel, - pR: pR, - pW: pW, - } - - go func() { - _, err := io.Copy(pW, in) - select { - case <-ctx.Done(): - // If the context was closed, p.closeWithError - // was already called. Calling it again would - // change the error that Read returns. - default: - p.closeWithError(err) - } - in.Close() - }() - go func() { - for { - select { - case <-ctx.Done(): - p.closeWithError(ctx.Err()) - case <-doneCtx.Done(): - return - } - } - }() - - return p -} - -// Read wraps the Read method of the pipe that provides data from the wrapped -// ReadCloser. -func (p *cancelReadCloser) Read(buf []byte) (n int, err error) { - return p.pR.Read(buf) -} - -// closeWithError closes the wrapper and its underlying reader. It will -// cause future calls to Read to return err. -func (p *cancelReadCloser) closeWithError(err error) { - p.pW.CloseWithError(err) - p.cancel() -} - -// Close closes the wrapper its underlying reader. It will cause -// future calls to Read to return io.EOF. -func (p *cancelReadCloser) Close() error { - p.closeWithError(io.EOF) - return nil -} diff --git a/pkg/ioutils/readers_test.go b/pkg/ioutils/readers_test.go deleted file mode 100644 index 9abc1054df..0000000000 --- a/pkg/ioutils/readers_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package ioutils - -import ( - "fmt" - "io/ioutil" - "strings" - "testing" - "time" - - "golang.org/x/net/context" -) - -// Implement io.Reader -type errorReader struct{} - -func (r *errorReader) Read(p []byte) (int, error) { - return 0, fmt.Errorf("Error reader always fail.") -} - -func TestReadCloserWrapperClose(t *testing.T) { - reader := strings.NewReader("A string reader") - wrapper := NewReadCloserWrapper(reader, func() error { - return fmt.Errorf("This will be called when closing") - }) - err := wrapper.Close() - if err == nil || !strings.Contains(err.Error(), "This will be called when closing") { - t.Fatalf("readCloserWrapper should have call the anonymous func and thus, fail.") - } -} - -func TestReaderErrWrapperReadOnError(t *testing.T) { - called := false - reader := &errorReader{} - wrapper := NewReaderErrWrapper(reader, func() { - called = true - }) - _, err := wrapper.Read([]byte{}) - if err == nil || !strings.Contains(err.Error(), "Error reader always fail.") { - t.Fatalf("readErrWrapper should returned an error") - } - if !called { - t.Fatalf("readErrWrapper should have call the anonymous function on failure") - } -} - -func TestReaderErrWrapperRead(t *testing.T) { - reader := strings.NewReader("a string reader.") - wrapper := NewReaderErrWrapper(reader, func() { - t.Fatalf("readErrWrapper should not have called the anonymous function") - }) - // Read 20 byte (should be ok with the string above) - num, err := wrapper.Read(make([]byte, 20)) - if err != nil { - t.Fatal(err) - } - if num != 16 { - t.Fatalf("readerErrWrapper should have read 16 byte, but read %d", num) - } -} - -func TestHashData(t *testing.T) { - reader := strings.NewReader("hash-me") - actual, err := HashData(reader) - if err != nil { - t.Fatal(err) - } - expected := "sha256:4d11186aed035cc624d553e10db358492c84a7cd6b9670d92123c144930450aa" - if actual != expected { - t.Fatalf("Expecting %s, got %s", expected, actual) - } -} - -type perpetualReader struct{} - -func (p *perpetualReader) Read(buf []byte) (n int, err error) { - for i := 0; i != len(buf); i++ { - buf[i] = 'a' - } - return len(buf), nil -} - -func TestCancelReadCloser(t *testing.T) { - ctx, _ := context.WithTimeout(context.Background(), 100*time.Millisecond) - cancelReadCloser := NewCancelReadCloser(ctx, ioutil.NopCloser(&perpetualReader{})) - for { - var buf [128]byte - _, err := cancelReadCloser.Read(buf[:]) - if err == context.DeadlineExceeded { - break - } else if err != nil { - t.Fatalf("got unexpected error: %v", err) - } - } -} diff --git a/pkg/ioutils/temp_unix.go b/pkg/ioutils/temp_unix.go deleted file mode 100644 index 1539ad21b5..0000000000 --- a/pkg/ioutils/temp_unix.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !windows - -package ioutils - -import "io/ioutil" - -// TempDir on Unix systems is equivalent to ioutil.TempDir. -func TempDir(dir, prefix string) (string, error) { - return ioutil.TempDir(dir, prefix) -} diff --git a/pkg/ioutils/temp_windows.go b/pkg/ioutils/temp_windows.go deleted file mode 100644 index c258e5fdd8..0000000000 --- a/pkg/ioutils/temp_windows.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build windows - -package ioutils - -import ( - "io/ioutil" - - "github.com/docker/docker/pkg/longpath" -) - -// TempDir is the equivalent of ioutil.TempDir, except that the result is in Windows longpath format. -func TempDir(dir, prefix string) (string, error) { - tempDir, err := ioutil.TempDir(dir, prefix) - if err != nil { - return "", err - } - return longpath.AddPrefix(tempDir), nil -} diff --git a/pkg/ioutils/writeflusher.go b/pkg/ioutils/writeflusher.go deleted file mode 100644 index 52a4901ade..0000000000 --- a/pkg/ioutils/writeflusher.go +++ /dev/null @@ -1,92 +0,0 @@ -package ioutils - -import ( - "io" - "sync" -) - -// WriteFlusher wraps the Write and Flush operation ensuring that every write -// is a flush. In addition, the Close method can be called to intercept -// Read/Write calls if the targets lifecycle has already ended. -type WriteFlusher struct { - w io.Writer - flusher flusher - flushed chan struct{} - flushedOnce sync.Once - closed chan struct{} - closeLock sync.Mutex -} - -type flusher interface { - Flush() -} - -var errWriteFlusherClosed = io.EOF - -func (wf *WriteFlusher) Write(b []byte) (n int, err error) { - select { - case <-wf.closed: - return 0, errWriteFlusherClosed - default: - } - - n, err = wf.w.Write(b) - wf.Flush() // every write is a flush. - return n, err -} - -// Flush the stream immediately. -func (wf *WriteFlusher) Flush() { - select { - case <-wf.closed: - return - default: - } - - wf.flushedOnce.Do(func() { - close(wf.flushed) - }) - wf.flusher.Flush() -} - -// Flushed returns the state of flushed. -// If it's flushed, return true, or else it return false. -func (wf *WriteFlusher) Flushed() bool { - // BUG(stevvooe): Remove this method. Its use is inherently racy. Seems to - // be used to detect whether or a response code has been issued or not. - // Another hook should be used instead. - var flushed bool - select { - case <-wf.flushed: - flushed = true - default: - } - return flushed -} - -// Close closes the write flusher, disallowing any further writes to the -// target. After the flusher is closed, all calls to write or flush will -// result in an error. -func (wf *WriteFlusher) Close() error { - wf.closeLock.Lock() - defer wf.closeLock.Unlock() - - select { - case <-wf.closed: - return errWriteFlusherClosed - default: - close(wf.closed) - } - return nil -} - -// NewWriteFlusher returns a new WriteFlusher. -func NewWriteFlusher(w io.Writer) *WriteFlusher { - var fl flusher - if f, ok := w.(flusher); ok { - fl = f - } else { - fl = &NopFlusher{} - } - return &WriteFlusher{w: w, flusher: fl, closed: make(chan struct{}), flushed: make(chan struct{})} -} diff --git a/pkg/ioutils/writers.go b/pkg/ioutils/writers.go deleted file mode 100644 index ccc7f9c23e..0000000000 --- a/pkg/ioutils/writers.go +++ /dev/null @@ -1,66 +0,0 @@ -package ioutils - -import "io" - -// NopWriter represents a type which write operation is nop. -type NopWriter struct{} - -func (*NopWriter) Write(buf []byte) (int, error) { - return len(buf), nil -} - -type nopWriteCloser struct { - io.Writer -} - -func (w *nopWriteCloser) Close() error { return nil } - -// NopWriteCloser returns a nopWriteCloser. -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &nopWriteCloser{w} -} - -// NopFlusher represents a type which flush operation is nop. -type NopFlusher struct{} - -// Flush is a nop operation. -func (f *NopFlusher) Flush() {} - -type writeCloserWrapper struct { - io.Writer - closer func() error -} - -func (r *writeCloserWrapper) Close() error { - return r.closer() -} - -// NewWriteCloserWrapper returns a new io.WriteCloser. -func NewWriteCloserWrapper(r io.Writer, closer func() error) io.WriteCloser { - return &writeCloserWrapper{ - Writer: r, - closer: closer, - } -} - -// WriteCounter wraps a concrete io.Writer and hold a count of the number -// of bytes written to the writer during a "session". -// This can be convenient when write return is masked -// (e.g., json.Encoder.Encode()) -type WriteCounter struct { - Count int64 - Writer io.Writer -} - -// NewWriteCounter returns a new WriteCounter. -func NewWriteCounter(w io.Writer) *WriteCounter { - return &WriteCounter{ - Writer: w, - } -} - -func (wc *WriteCounter) Write(p []byte) (count int, err error) { - count, err = wc.Writer.Write(p) - wc.Count += int64(count) - return -} diff --git a/pkg/ioutils/writers_test.go b/pkg/ioutils/writers_test.go deleted file mode 100644 index 564b1cd4f5..0000000000 --- a/pkg/ioutils/writers_test.go +++ /dev/null @@ -1,65 +0,0 @@ -package ioutils - -import ( - "bytes" - "strings" - "testing" -) - -func TestWriteCloserWrapperClose(t *testing.T) { - called := false - writer := bytes.NewBuffer([]byte{}) - wrapper := NewWriteCloserWrapper(writer, func() error { - called = true - return nil - }) - if err := wrapper.Close(); err != nil { - t.Fatal(err) - } - if !called { - t.Fatalf("writeCloserWrapper should have call the anonymous function.") - } -} - -func TestNopWriteCloser(t *testing.T) { - writer := bytes.NewBuffer([]byte{}) - wrapper := NopWriteCloser(writer) - if err := wrapper.Close(); err != nil { - t.Fatal("NopWriteCloser always return nil on Close.") - } - -} - -func TestNopWriter(t *testing.T) { - nw := &NopWriter{} - l, err := nw.Write([]byte{'c'}) - if err != nil { - t.Fatal(err) - } - if l != 1 { - t.Fatalf("Expected 1 got %d", l) - } -} - -func TestWriteCounter(t *testing.T) { - dummy1 := "This is a dummy string." - dummy2 := "This is another dummy string." - totalLength := int64(len(dummy1) + len(dummy2)) - - reader1 := strings.NewReader(dummy1) - reader2 := strings.NewReader(dummy2) - - var buffer bytes.Buffer - wc := NewWriteCounter(&buffer) - - reader1.WriteTo(wc) - reader2.WriteTo(wc) - - if wc.Count != totalLength { - t.Errorf("Wrong count: %d vs. %d", wc.Count, totalLength) - } - - if buffer.String() != dummy1+dummy2 { - t.Error("Wrong message written") - } -} diff --git a/pkg/jsonlog/jsonlog.go b/pkg/jsonlog/jsonlog.go deleted file mode 100644 index 4734c31119..0000000000 --- a/pkg/jsonlog/jsonlog.go +++ /dev/null @@ -1,42 +0,0 @@ -package jsonlog - -import ( - "encoding/json" - "fmt" - "time" -) - -// JSONLog represents a log message, typically a single entry from a given log stream. -// JSONLogs can be easily serialized to and from JSON and support custom formatting. -type JSONLog struct { - // Log is the log message - Log string `json:"log,omitempty"` - // Stream is the log source - Stream string `json:"stream,omitempty"` - // Created is the created timestamp of log - Created time.Time `json:"time"` - // Attrs is the list of extra attributes provided by the user - Attrs map[string]string `json:"attrs,omitempty"` -} - -// Format returns the log formatted according to format -// If format is nil, returns the log message -// If format is json, returns the log marshaled in json format -// By default, returns the log with the log time formatted according to format. -func (jl *JSONLog) Format(format string) (string, error) { - if format == "" { - return jl.Log, nil - } - if format == "json" { - m, err := json.Marshal(jl) - return string(m), err - } - return fmt.Sprintf("%s %s", jl.Created.Format(format), jl.Log), nil -} - -// Reset resets the log to nil. -func (jl *JSONLog) Reset() { - jl.Log = "" - jl.Stream = "" - jl.Created = time.Time{} -} diff --git a/pkg/jsonlog/jsonlog_marshalling.go b/pkg/jsonlog/jsonlog_marshalling.go deleted file mode 100644 index 83ce684a8e..0000000000 --- a/pkg/jsonlog/jsonlog_marshalling.go +++ /dev/null @@ -1,178 +0,0 @@ -// This code was initially generated by ffjson -// This code was generated via the following steps: -// $ go get -u github.com/pquerna/ffjson -// $ make BIND_DIR=. shell -// $ ffjson pkg/jsonlog/jsonlog.go -// $ mv pkg/jsonglog/jsonlog_ffjson.go pkg/jsonlog/jsonlog_marshalling.go -// -// It has been modified to improve the performance of time marshalling to JSON -// and to clean it up. -// Should this code need to be regenerated when the JSONLog struct is changed, -// the relevant changes which have been made are: -// import ( -// "bytes" -//- -// "unicode/utf8" -// ) -// -// func (mj *JSONLog) MarshalJSON() ([]byte, error) { -//@@ -20,13 +16,13 @@ func (mj *JSONLog) MarshalJSON() ([]byte, error) { -// } -// return buf.Bytes(), nil -// } -//+ -// func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -//- var err error -//- var obj []byte -//- var first bool = true -//- _ = obj -//- _ = err -//- _ = first -//+ var ( -//+ err error -//+ timestamp string -//+ first bool = true -//+ ) -// buf.WriteString(`{`) -// if len(mj.Log) != 0 { -// if first == true { -//@@ -52,11 +48,11 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -// buf.WriteString(`,`) -// } -// buf.WriteString(`"time":`) -//- obj, err = mj.Created.MarshalJSON() -//+ timestamp, err = FastTimeMarshalJSON(mj.Created) -// if err != nil { -// return err -// } -//- buf.Write(obj) -//+ buf.WriteString(timestamp) -// buf.WriteString(`}`) -// return nil -// } -// @@ -81,9 +81,10 @@ func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { -// if len(mj.Log) != 0 { -// - if first == true { -// - first = false -// - } else { -// - buf.WriteString(`,`) -// - } -// + first = false -// buf.WriteString(`"log":`) -// ffjsonWriteJSONString(buf, mj.Log) -// } - -package jsonlog - -import ( - "bytes" - "unicode/utf8" -) - -// MarshalJSON marshals the JSONLog. -func (mj *JSONLog) MarshalJSON() ([]byte, error) { - var buf bytes.Buffer - buf.Grow(1024) - if err := mj.MarshalJSONBuf(&buf); err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalJSONBuf marshals the JSONLog and stores the result to a bytes.Buffer. -func (mj *JSONLog) MarshalJSONBuf(buf *bytes.Buffer) error { - var ( - err error - timestamp string - first = true - ) - buf.WriteString(`{`) - if len(mj.Log) != 0 { - first = false - buf.WriteString(`"log":`) - ffjsonWriteJSONString(buf, mj.Log) - } - if len(mj.Stream) != 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"stream":`) - ffjsonWriteJSONString(buf, mj.Stream) - } - if !first { - buf.WriteString(`,`) - } - buf.WriteString(`"time":`) - timestamp, err = FastTimeMarshalJSON(mj.Created) - if err != nil { - return err - } - buf.WriteString(timestamp) - buf.WriteString(`}`) - return nil -} - -func ffjsonWriteJSONString(buf *bytes.Buffer, s string) { - const hex = "0123456789abcdef" - - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.WriteString(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRuneInString(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.WriteString(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.WriteString(s[start:]) - } - buf.WriteByte('"') -} diff --git a/pkg/jsonlog/jsonlog_marshalling_test.go b/pkg/jsonlog/jsonlog_marshalling_test.go deleted file mode 100644 index 3edb271410..0000000000 --- a/pkg/jsonlog/jsonlog_marshalling_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package jsonlog - -import ( - "regexp" - "testing" -) - -func TestJSONLogMarshalJSON(t *testing.T) { - logs := map[*JSONLog]string{ - &JSONLog{Log: `"A log line with \\"`}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: "A log line"}: `^{\"log\":\"A log line\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: "A log line with \r"}: `^{\"log\":\"A log line with \\r\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: "A log line with & < >"}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: "A log line with utf8 : 🚀 ψ ω β"}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":\".{20,}\"}$`, - &JSONLog{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":\".{20,}\"}$`, - &JSONLog{}: `^{\"time\":\".{20,}\"}$`, - // These ones are a little weird - &JSONLog{Log: "\u2028 \u2029"}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: string([]byte{0xaF})}: `^{\"log\":\"\\ufffd\",\"time\":\".{20,}\"}$`, - &JSONLog{Log: string([]byte{0x7F})}: `^{\"log\":\"\x7f\",\"time\":\".{20,}\"}$`, - } - for jsonLog, expression := range logs { - data, err := jsonLog.MarshalJSON() - if err != nil { - t.Fatal(err) - } - res := string(data) - t.Logf("Result of WriteLog: %q", res) - logRe := regexp.MustCompile(expression) - if !logRe.MatchString(res) { - t.Fatalf("Log line not in expected format [%v]: %q", expression, res) - } - } -} diff --git a/pkg/jsonlog/jsonlogbytes.go b/pkg/jsonlog/jsonlogbytes.go deleted file mode 100644 index df522c0d66..0000000000 --- a/pkg/jsonlog/jsonlogbytes.go +++ /dev/null @@ -1,122 +0,0 @@ -package jsonlog - -import ( - "bytes" - "encoding/json" - "unicode/utf8" -) - -// JSONLogs is based on JSONLog. -// It allows marshalling JSONLog from Log as []byte -// and an already marshalled Created timestamp. -type JSONLogs struct { - Log []byte `json:"log,omitempty"` - Stream string `json:"stream,omitempty"` - Created string `json:"time"` - - // json-encoded bytes - RawAttrs json.RawMessage `json:"attrs,omitempty"` -} - -// MarshalJSONBuf is based on the same method from JSONLog -// It has been modified to take into account the necessary changes. -func (mj *JSONLogs) MarshalJSONBuf(buf *bytes.Buffer) error { - var first = true - - buf.WriteString(`{`) - if len(mj.Log) != 0 { - first = false - buf.WriteString(`"log":`) - ffjsonWriteJSONBytesAsString(buf, mj.Log) - } - if len(mj.Stream) != 0 { - if first == true { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"stream":`) - ffjsonWriteJSONString(buf, mj.Stream) - } - if len(mj.RawAttrs) > 0 { - if first { - first = false - } else { - buf.WriteString(`,`) - } - buf.WriteString(`"attrs":`) - buf.Write(mj.RawAttrs) - } - if !first { - buf.WriteString(`,`) - } - buf.WriteString(`"time":`) - buf.WriteString(mj.Created) - buf.WriteString(`}`) - return nil -} - -// This is based on ffjsonWriteJSONBytesAsString. It has been changed -// to accept a string passed as a slice of bytes. -func ffjsonWriteJSONBytesAsString(buf *bytes.Buffer, s []byte) { - const hex = "0123456789abcdef" - - buf.WriteByte('"') - start := 0 - for i := 0; i < len(s); { - if b := s[i]; b < utf8.RuneSelf { - if 0x20 <= b && b != '\\' && b != '"' && b != '<' && b != '>' && b != '&' { - i++ - continue - } - if start < i { - buf.Write(s[start:i]) - } - switch b { - case '\\', '"': - buf.WriteByte('\\') - buf.WriteByte(b) - case '\n': - buf.WriteByte('\\') - buf.WriteByte('n') - case '\r': - buf.WriteByte('\\') - buf.WriteByte('r') - default: - - buf.WriteString(`\u00`) - buf.WriteByte(hex[b>>4]) - buf.WriteByte(hex[b&0xF]) - } - i++ - start = i - continue - } - c, size := utf8.DecodeRune(s[i:]) - if c == utf8.RuneError && size == 1 { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\ufffd`) - i += size - start = i - continue - } - - if c == '\u2028' || c == '\u2029' { - if start < i { - buf.Write(s[start:i]) - } - buf.WriteString(`\u202`) - buf.WriteByte(hex[c&0xF]) - i += size - start = i - continue - } - i += size - } - if start < len(s) { - buf.Write(s[start:]) - } - buf.WriteByte('"') -} diff --git a/pkg/jsonlog/jsonlogbytes_test.go b/pkg/jsonlog/jsonlogbytes_test.go deleted file mode 100644 index 6d6ad21583..0000000000 --- a/pkg/jsonlog/jsonlogbytes_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package jsonlog - -import ( - "bytes" - "regexp" - "testing" -) - -func TestJSONLogsMarshalJSONBuf(t *testing.T) { - logs := map[*JSONLogs]string{ - &JSONLogs{Log: []byte(`"A log line with \\"`)}: `^{\"log\":\"\\\"A log line with \\\\\\\\\\\"\",\"time\":}$`, - &JSONLogs{Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"time\":}$`, - &JSONLogs{Log: []byte("A log line with \r")}: `^{\"log\":\"A log line with \\r\",\"time\":}$`, - &JSONLogs{Log: []byte("A log line with & < >")}: `^{\"log\":\"A log line with \\u0026 \\u003c \\u003e\",\"time\":}$`, - &JSONLogs{Log: []byte("A log line with utf8 : 🚀 ψ ω β")}: `^{\"log\":\"A log line with utf8 : 🚀 ψ ω β\",\"time\":}$`, - &JSONLogs{Stream: "stdout"}: `^{\"stream\":\"stdout\",\"time\":}$`, - &JSONLogs{Stream: "stdout", Log: []byte("A log line")}: `^{\"log\":\"A log line\",\"stream\":\"stdout\",\"time\":}$`, - &JSONLogs{Created: "time"}: `^{\"time\":time}$`, - &JSONLogs{}: `^{\"time\":}$`, - // These ones are a little weird - &JSONLogs{Log: []byte("\u2028 \u2029")}: `^{\"log\":\"\\u2028 \\u2029\",\"time\":}$`, - &JSONLogs{Log: []byte{0xaF}}: `^{\"log\":\"\\ufffd\",\"time\":}$`, - &JSONLogs{Log: []byte{0x7F}}: `^{\"log\":\"\x7f\",\"time\":}$`, - // with raw attributes - &JSONLogs{Log: []byte("A log line"), RawAttrs: []byte(`{"hello":"world","value":1234}`)}: `^{\"log\":\"A log line\",\"attrs\":{\"hello\":\"world\",\"value\":1234},\"time\":}$`, - } - for jsonLog, expression := range logs { - var buf bytes.Buffer - if err := jsonLog.MarshalJSONBuf(&buf); err != nil { - t.Fatal(err) - } - res := buf.String() - t.Logf("Result of WriteLog: %q", res) - logRe := regexp.MustCompile(expression) - if !logRe.MatchString(res) { - t.Fatalf("Log line not in expected format [%v]: %q", expression, res) - } - } -} diff --git a/pkg/jsonlog/time_marshalling.go b/pkg/jsonlog/time_marshalling.go deleted file mode 100644 index 2117338149..0000000000 --- a/pkg/jsonlog/time_marshalling.go +++ /dev/null @@ -1,27 +0,0 @@ -// Package jsonlog provides helper functions to parse and print time (time.Time) as JSON. -package jsonlog - -import ( - "errors" - "time" -) - -const ( - // RFC3339NanoFixed is our own version of RFC339Nano because we want one - // that pads the nano seconds part with zeros to ensure - // the timestamps are aligned in the logs. - RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00" - // JSONFormat is the format used by FastMarshalJSON - JSONFormat = `"` + time.RFC3339Nano + `"` -) - -// FastTimeMarshalJSON avoids one of the extra allocations that -// time.MarshalJSON is making. -func FastTimeMarshalJSON(t time.Time) (string, error) { - if y := t.Year(); y < 0 || y >= 10000 { - // RFC 3339 is clear that years are 4 digits exactly. - // See golang.org/issue/4556#c15 for more discussion. - return "", errors.New("time.MarshalJSON: year outside of range [0,9999]") - } - return t.Format(JSONFormat), nil -} diff --git a/pkg/jsonlog/time_marshalling_test.go b/pkg/jsonlog/time_marshalling_test.go deleted file mode 100644 index 02d0302c4a..0000000000 --- a/pkg/jsonlog/time_marshalling_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package jsonlog - -import ( - "testing" - "time" -) - -// Testing to ensure 'year' fields is between 0 and 9999 -func TestFastTimeMarshalJSONWithInvalidDate(t *testing.T) { - aTime := time.Date(-1, 1, 1, 0, 0, 0, 0, time.Local) - json, err := FastTimeMarshalJSON(aTime) - if err == nil { - t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) - } - anotherTime := time.Date(10000, 1, 1, 0, 0, 0, 0, time.Local) - json, err = FastTimeMarshalJSON(anotherTime) - if err == nil { - t.Fatalf("FastTimeMarshalJSON should throw an error, but was '%v'", json) - } - -} - -func TestFastTimeMarshalJSON(t *testing.T) { - aTime := time.Date(2015, 5, 29, 11, 1, 2, 3, time.UTC) - json, err := FastTimeMarshalJSON(aTime) - if err != nil { - t.Fatal(err) - } - expected := "\"2015-05-29T11:01:02.000000003Z\"" - if json != expected { - t.Fatalf("Expected %v, got %v", expected, json) - } - - location, err := time.LoadLocation("Europe/Paris") - if err != nil { - t.Fatal(err) - } - aTime = time.Date(2015, 5, 29, 11, 1, 2, 3, location) - json, err = FastTimeMarshalJSON(aTime) - if err != nil { - t.Fatal(err) - } - expected = "\"2015-05-29T11:01:02.000000003+02:00\"" - if json != expected { - t.Fatalf("Expected %v, got %v", expected, json) - } -} diff --git a/pkg/jsonmessage/jsonmessage.go b/pkg/jsonmessage/jsonmessage.go deleted file mode 100644 index 91b073b731..0000000000 --- a/pkg/jsonmessage/jsonmessage.go +++ /dev/null @@ -1,221 +0,0 @@ -package jsonmessage - -import ( - "encoding/json" - "fmt" - "io" - "strings" - "time" - - "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/term" - "github.com/docker/go-units" -) - -// JSONError wraps a concrete Code and Message, `Code` is -// is an integer error code, `Message` is the error message. -type JSONError struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func (e *JSONError) Error() string { - return e.Message -} - -// JSONProgress describes a Progress. terminalFd is the fd of the current terminal, -// Start is the initial value for the operation. Current is the current status and -// value of the progress made towards Total. Total is the end value describing when -// we made 100% progress for an operation. -type JSONProgress struct { - terminalFd uintptr - Current int64 `json:"current,omitempty"` - Total int64 `json:"total,omitempty"` - Start int64 `json:"start,omitempty"` -} - -func (p *JSONProgress) String() string { - var ( - width = 200 - pbBox string - numbersBox string - timeLeftBox string - ) - - ws, err := term.GetWinsize(p.terminalFd) - if err == nil { - width = int(ws.Width) - } - - if p.Current <= 0 && p.Total <= 0 { - return "" - } - current := units.HumanSize(float64(p.Current)) - if p.Total <= 0 { - return fmt.Sprintf("%8v", current) - } - total := units.HumanSize(float64(p.Total)) - percentage := int(float64(p.Current)/float64(p.Total)*100) / 2 - if percentage > 50 { - percentage = 50 - } - if width > 110 { - // this number can't be negative gh#7136 - numSpaces := 0 - if 50-percentage > 0 { - numSpaces = 50 - percentage - } - pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces)) - } - - numbersBox = fmt.Sprintf("%8v/%v", current, total) - - if p.Current > p.Total { - // remove total display if the reported current is wonky. - numbersBox = fmt.Sprintf("%8v", current) - } - - if p.Current > 0 && p.Start > 0 && percentage < 50 { - fromStart := time.Now().UTC().Sub(time.Unix(p.Start, 0)) - perEntry := fromStart / time.Duration(p.Current) - left := time.Duration(p.Total-p.Current) * perEntry - left = (left / time.Second) * time.Second - - if width > 50 { - timeLeftBox = " " + left.String() - } - } - return pbBox + numbersBox + timeLeftBox -} - -// JSONMessage defines a message struct. It describes -// the created time, where it from, status, ID of the -// message. It's used for docker events. -type JSONMessage struct { - Stream string `json:"stream,omitempty"` - Status string `json:"status,omitempty"` - Progress *JSONProgress `json:"progressDetail,omitempty"` - ProgressMessage string `json:"progress,omitempty"` //deprecated - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` - Error *JSONError `json:"errorDetail,omitempty"` - ErrorMessage string `json:"error,omitempty"` //deprecated - // Aux contains out-of-band data, such as digests for push signing. - Aux *json.RawMessage `json:"aux,omitempty"` -} - -// Display displays the JSONMessage to `out`. `isTerminal` describes if `out` -// is a terminal. If this is the case, it will erase the entire current line -// when displaying the progressbar. -func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error { - if jm.Error != nil { - if jm.Error.Code == 401 { - return fmt.Errorf("Authentication is required.") - } - return jm.Error - } - var endl string - if isTerminal && jm.Stream == "" && jm.Progress != nil { - // [2K = erase entire current line - fmt.Fprintf(out, "%c[2K\r", 27) - endl = "\r" - } else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal - return nil - } - if jm.TimeNano != 0 { - fmt.Fprintf(out, "%s ", time.Unix(0, jm.TimeNano).Format(jsonlog.RFC3339NanoFixed)) - } else if jm.Time != 0 { - fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(jsonlog.RFC3339NanoFixed)) - } - if jm.ID != "" { - fmt.Fprintf(out, "%s: ", jm.ID) - } - if jm.From != "" { - fmt.Fprintf(out, "(from %s) ", jm.From) - } - if jm.Progress != nil && isTerminal { - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl) - } else if jm.ProgressMessage != "" { //deprecated - fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl) - } else if jm.Stream != "" { - fmt.Fprintf(out, "%s%s", jm.Stream, endl) - } else { - fmt.Fprintf(out, "%s%s\n", jm.Status, endl) - } - return nil -} - -// DisplayJSONMessagesStream displays a json message stream from `in` to `out`, `isTerminal` -// describes if `out` is a terminal. If this is the case, it will print `\n` at the end of -// each line and move the cursor while displaying. -func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool, auxCallback func(*json.RawMessage)) error { - var ( - dec = json.NewDecoder(in) - ids = make(map[string]int) - ) - for { - diff := 0 - var jm JSONMessage - if err := dec.Decode(&jm); err != nil { - if err == io.EOF { - break - } - return err - } - - if jm.Aux != nil { - if auxCallback != nil { - auxCallback(jm.Aux) - } - continue - } - - if jm.Progress != nil { - jm.Progress.terminalFd = terminalFd - } - if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") { - line, ok := ids[jm.ID] - if !ok { - // NOTE: This approach of using len(id) to - // figure out the number of lines of history - // only works as long as we clear the history - // when we output something that's not - // accounted for in the map, such as a line - // with no ID. - line = len(ids) - ids[jm.ID] = line - if isTerminal { - fmt.Fprintf(out, "\n") - } - } else { - diff = len(ids) - line - } - if isTerminal { - // NOTE: this appears to be necessary even if - // diff == 0. - // [{diff}A = move cursor up diff rows - fmt.Fprintf(out, "%c[%dA", 27, diff) - } - } else { - // When outputting something that isn't progress - // output, clear the history of previous lines. We - // don't want progress entries from some previous - // operation to be updated (for example, pull -a - // with multiple tags). - ids = make(map[string]int) - } - err := jm.Display(out, isTerminal) - if jm.ID != "" && isTerminal { - // NOTE: this appears to be necessary even if - // diff == 0. - // [{diff}B = move cursor down diff rows - fmt.Fprintf(out, "%c[%dB", 27, diff) - } - if err != nil { - return err - } - } - return nil -} diff --git a/pkg/jsonmessage/jsonmessage_test.go b/pkg/jsonmessage/jsonmessage_test.go deleted file mode 100644 index 479857d904..0000000000 --- a/pkg/jsonmessage/jsonmessage_test.go +++ /dev/null @@ -1,245 +0,0 @@ -package jsonmessage - -import ( - "bytes" - "fmt" - "strings" - "testing" - "time" - - "github.com/docker/docker/pkg/jsonlog" - "github.com/docker/docker/pkg/term" -) - -func TestError(t *testing.T) { - je := JSONError{404, "Not found"} - if je.Error() != "Not found" { - t.Fatalf("Expected 'Not found' got '%s'", je.Error()) - } -} - -func TestProgress(t *testing.T) { - termsz, err := term.GetWinsize(0) - if err != nil { - // we can safely ignore the err here - termsz = nil - } - jp := JSONProgress{} - if jp.String() != "" { - t.Fatalf("Expected empty string, got '%s'", jp.String()) - } - - expected := " 1 B" - jp2 := JSONProgress{Current: 1} - if jp2.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp2.String()) - } - - expectedStart := "[==========> ] 20 B/100 B" - if termsz != nil && termsz.Width <= 110 { - expectedStart = " 20 B/100 B" - } - jp3 := JSONProgress{Current: 20, Total: 100, Start: time.Now().Unix()} - // Just look at the start of the string - // (the remaining time is really hard to test -_-) - if jp3.String()[:len(expectedStart)] != expectedStart { - t.Fatalf("Expected to start with %q, got %q", expectedStart, jp3.String()) - } - - expected = "[=========================> ] 50 B/100 B" - if termsz != nil && termsz.Width <= 110 { - expected = " 50 B/100 B" - } - jp4 := JSONProgress{Current: 50, Total: 100} - if jp4.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp4.String()) - } - - // this number can't be negative gh#7136 - expected = "[==================================================>] 50 B" - if termsz != nil && termsz.Width <= 110 { - expected = " 50 B" - } - jp5 := JSONProgress{Current: 50, Total: 40} - if jp5.String() != expected { - t.Fatalf("Expected %q, got %q", expected, jp5.String()) - } -} - -func TestJSONMessageDisplay(t *testing.T) { - now := time.Now() - messages := map[JSONMessage][]string{ - // Empty - JSONMessage{}: {"\n", "\n"}, - // Status - JSONMessage{ - Status: "status", - }: { - "status\n", - "status\n", - }, - // General - JSONMessage{ - Time: now.Unix(), - ID: "ID", - From: "From", - Status: "status", - }: { - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(now.Unix(), 0).Format(jsonlog.RFC3339NanoFixed)), - }, - // General, with nano precision time - JSONMessage{ - TimeNano: now.UnixNano(), - ID: "ID", - From: "From", - Status: "status", - }: { - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), - }, - // General, with both times Nano is preferred - JSONMessage{ - Time: now.Unix(), - TimeNano: now.UnixNano(), - ID: "ID", - From: "From", - Status: "status", - }: { - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), - fmt.Sprintf("%v ID: (from From) status\n", time.Unix(0, now.UnixNano()).Format(jsonlog.RFC3339NanoFixed)), - }, - // Stream over status - JSONMessage{ - Status: "status", - Stream: "stream", - }: { - "stream", - "stream", - }, - // With progress message - JSONMessage{ - Status: "status", - ProgressMessage: "progressMessage", - }: { - "status progressMessage", - "status progressMessage", - }, - // With progress, stream empty - JSONMessage{ - Status: "status", - Stream: "", - Progress: &JSONProgress{Current: 1}, - }: { - "", - fmt.Sprintf("%c[2K\rstatus 1 B\r", 27), - }, - } - - // The tests :) - for jsonMessage, expectedMessages := range messages { - // Without terminal - data := bytes.NewBuffer([]byte{}) - if err := jsonMessage.Display(data, false); err != nil { - t.Fatal(err) - } - if data.String() != expectedMessages[0] { - t.Fatalf("Expected [%v], got [%v]", expectedMessages[0], data.String()) - } - // With terminal - data = bytes.NewBuffer([]byte{}) - if err := jsonMessage.Display(data, true); err != nil { - t.Fatal(err) - } - if data.String() != expectedMessages[1] { - t.Fatalf("Expected [%v], got [%v]", expectedMessages[1], data.String()) - } - } -} - -// Test JSONMessage with an Error. It will return an error with the text as error, not the meaning of the HTTP code. -func TestJSONMessageDisplayWithJSONError(t *testing.T) { - data := bytes.NewBuffer([]byte{}) - jsonMessage := JSONMessage{Error: &JSONError{404, "Can't find it"}} - - err := jsonMessage.Display(data, true) - if err == nil || err.Error() != "Can't find it" { - t.Fatalf("Expected a JSONError 404, got [%v]", err) - } - - jsonMessage = JSONMessage{Error: &JSONError{401, "Anything"}} - err = jsonMessage.Display(data, true) - if err == nil || err.Error() != "Authentication is required." { - t.Fatalf("Expected an error [Authentication is required.], got [%v]", err) - } -} - -func TestDisplayJSONMessagesStreamInvalidJSON(t *testing.T) { - var ( - inFd uintptr - ) - data := bytes.NewBuffer([]byte{}) - reader := strings.NewReader("This is not a 'valid' JSON []") - inFd, _ = term.GetFdInfo(reader) - - if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err == nil && err.Error()[:17] != "invalid character" { - t.Fatalf("Should have thrown an error (invalid character in ..), got [%v]", err) - } -} - -func TestDisplayJSONMessagesStream(t *testing.T) { - var ( - inFd uintptr - ) - - messages := map[string][]string{ - // empty string - "": { - "", - ""}, - // Without progress & ID - "{ \"status\": \"status\" }": { - "status\n", - "status\n", - }, - // Without progress, with ID - "{ \"id\": \"ID\",\"status\": \"status\" }": { - "ID: status\n", - fmt.Sprintf("ID: status\n%c[%dB", 27, 0), - }, - // With progress - "{ \"id\": \"ID\", \"status\": \"status\", \"progress\": \"ProgressMessage\" }": { - "ID: status ProgressMessage", - fmt.Sprintf("\n%c[%dAID: status ProgressMessage%c[%dB", 27, 0, 27, 0), - }, - // With progressDetail - "{ \"id\": \"ID\", \"status\": \"status\", \"progressDetail\": { \"Current\": 1} }": { - "", // progressbar is disabled in non-terminal - fmt.Sprintf("\n%c[%dA%c[2K\rID: status 1 B\r%c[%dB", 27, 0, 27, 27, 0), - }, - } - for jsonMessage, expectedMessages := range messages { - data := bytes.NewBuffer([]byte{}) - reader := strings.NewReader(jsonMessage) - inFd, _ = term.GetFdInfo(reader) - - // Without terminal - if err := DisplayJSONMessagesStream(reader, data, inFd, false, nil); err != nil { - t.Fatal(err) - } - if data.String() != expectedMessages[0] { - t.Fatalf("Expected an [%v], got [%v]", expectedMessages[0], data.String()) - } - - // With terminal - data = bytes.NewBuffer([]byte{}) - reader = strings.NewReader(jsonMessage) - if err := DisplayJSONMessagesStream(reader, data, inFd, true, nil); err != nil { - t.Fatal(err) - } - if data.String() != expectedMessages[1] { - t.Fatalf("Expected an [%v], got [%v]", expectedMessages[1], data.String()) - } - } - -} diff --git a/pkg/listeners/listeners_solaris.go b/pkg/listeners/listeners_solaris.go deleted file mode 100644 index ff833e3741..0000000000 --- a/pkg/listeners/listeners_solaris.go +++ /dev/null @@ -1,31 +0,0 @@ -package listeners - -import ( - "crypto/tls" - "fmt" - "net" - - "github.com/docker/go-connections/sockets" -) - -// Init creates new listeners for the server. -func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) (ls []net.Listener, err error) { - switch proto { - case "tcp": - l, err := sockets.NewTCPSocket(addr, tlsConfig) - if err != nil { - return nil, err - } - ls = append(ls, l) - case "unix": - l, err := sockets.NewUnixSocket(addr, socketGroup) - if err != nil { - return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) - } - ls = append(ls, l) - default: - return nil, fmt.Errorf("Invalid protocol format: %q", proto) - } - - return -} diff --git a/pkg/listeners/listeners_unix.go b/pkg/listeners/listeners_unix.go deleted file mode 100644 index 1bcae7aa3e..0000000000 --- a/pkg/listeners/listeners_unix.go +++ /dev/null @@ -1,94 +0,0 @@ -// +build !windows,!solaris - -package listeners - -import ( - "crypto/tls" - "fmt" - "net" - "strconv" - - "github.com/Sirupsen/logrus" - "github.com/coreos/go-systemd/activation" - "github.com/docker/go-connections/sockets" -) - -// Init creates new listeners for the server. -// TODO: Clean up the fact that socketGroup and tlsConfig aren't always used. -func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { - ls := []net.Listener{} - - switch proto { - case "fd": - fds, err := listenFD(addr, tlsConfig) - if err != nil { - return nil, err - } - ls = append(ls, fds...) - case "tcp": - l, err := sockets.NewTCPSocket(addr, tlsConfig) - if err != nil { - return nil, err - } - ls = append(ls, l) - case "unix": - l, err := sockets.NewUnixSocket(addr, socketGroup) - if err != nil { - return nil, fmt.Errorf("can't create unix socket %s: %v", addr, err) - } - ls = append(ls, l) - default: - return nil, fmt.Errorf("invalid protocol format: %q", proto) - } - - return ls, nil -} - -// listenFD returns the specified socket activated files as a slice of -// net.Listeners or all of the activated files if "*" is given. -func listenFD(addr string, tlsConfig *tls.Config) ([]net.Listener, error) { - var ( - err error - listeners []net.Listener - ) - // socket activation - if tlsConfig != nil { - listeners, err = activation.TLSListeners(false, tlsConfig) - } else { - listeners, err = activation.Listeners(false) - } - if err != nil { - return nil, err - } - - if len(listeners) == 0 { - return nil, fmt.Errorf("no sockets found via socket activation: make sure the service was started by systemd") - } - - // default to all fds just like unix:// and tcp:// - if addr == "" || addr == "*" { - return listeners, nil - } - - fdNum, err := strconv.Atoi(addr) - if err != nil { - return nil, fmt.Errorf("failed to parse systemd fd address: should be a number: %v", addr) - } - fdOffset := fdNum - 3 - if len(listeners) < int(fdOffset)+1 { - return nil, fmt.Errorf("too few socket activated files passed in by systemd") - } - if listeners[fdOffset] == nil { - return nil, fmt.Errorf("failed to listen on systemd activated file: fd %d", fdOffset+3) - } - for i, ls := range listeners { - if i == fdOffset || ls == nil { - continue - } - if err := ls.Close(); err != nil { - // TODO: We shouldn't log inside a library. Remove this or error out. - logrus.Errorf("failed to close systemd activated file: fd %d: %v", fdOffset+3, err) - } - } - return []net.Listener{listeners[fdOffset]}, nil -} diff --git a/pkg/listeners/listeners_windows.go b/pkg/listeners/listeners_windows.go deleted file mode 100644 index 5b5a470fc6..0000000000 --- a/pkg/listeners/listeners_windows.go +++ /dev/null @@ -1,54 +0,0 @@ -package listeners - -import ( - "crypto/tls" - "fmt" - "net" - "strings" - - "github.com/Microsoft/go-winio" - "github.com/docker/go-connections/sockets" -) - -// Init creates new listeners for the server. -func Init(proto, addr, socketGroup string, tlsConfig *tls.Config) ([]net.Listener, error) { - ls := []net.Listener{} - - switch proto { - case "tcp": - l, err := sockets.NewTCPSocket(addr, tlsConfig) - if err != nil { - return nil, err - } - ls = append(ls, l) - - case "npipe": - // allow Administrators and SYSTEM, plus whatever additional users or groups were specified - sddl := "D:P(A;;GA;;;BA)(A;;GA;;;SY)" - if socketGroup != "" { - for _, g := range strings.Split(socketGroup, ",") { - sid, err := winio.LookupSidByName(g) - if err != nil { - return nil, err - } - sddl += fmt.Sprintf("(A;;GRGW;;;%s)", sid) - } - } - c := winio.PipeConfig{ - SecurityDescriptor: sddl, - MessageMode: true, // Use message mode so that CloseWrite() is supported - InputBufferSize: 65536, // Use 64KB buffers to improve performance - OutputBufferSize: 65536, - } - l, err := winio.ListenPipe(addr, &c) - if err != nil { - return nil, err - } - ls = append(ls, l) - - default: - return nil, fmt.Errorf("invalid protocol format: windows only supports tcp and npipe") - } - - return ls, nil -} diff --git a/pkg/locker/README.md b/pkg/locker/README.md deleted file mode 100644 index e84a815cc5..0000000000 --- a/pkg/locker/README.md +++ /dev/null @@ -1,65 +0,0 @@ -Locker -===== - -locker provides a mechanism for creating finer-grained locking to help -free up more global locks to handle other tasks. - -The implementation looks close to a sync.Mutex, however the user must provide a -reference to use to refer to the underlying lock when locking and unlocking, -and unlock may generate an error. - -If a lock with a given name does not exist when `Lock` is called, one is -created. -Lock references are automatically cleaned up on `Unlock` if nothing else is -waiting for the lock. - - -## Usage - -```go -package important - -import ( - "sync" - "time" - - "github.com/docker/docker/pkg/locker" -) - -type important struct { - locks *locker.Locker - data map[string]interface{} - mu sync.Mutex -} - -func (i *important) Get(name string) interface{} { - i.locks.Lock(name) - defer i.locks.Unlock(name) - return data[name] -} - -func (i *important) Create(name string, data interface{}) { - i.locks.Lock(name) - defer i.locks.Unlock(name) - - i.createImportant(data) - - s.mu.Lock() - i.data[name] = data - s.mu.Unlock() -} - -func (i *important) createImportant(data interface{}) { - time.Sleep(10 * time.Second) -} -``` - -For functions dealing with a given name, always lock at the beginning of the -function (or before doing anything with the underlying state), this ensures any -other function that is dealing with the same name will block. - -When needing to modify the underlying data, use the global lock to ensure nothing -else is modfying it at the same time. -Since name lock is already in place, no reads will occur while the modification -is being performed. - diff --git a/pkg/locker/locker.go b/pkg/locker/locker.go deleted file mode 100644 index 0b22ddfab8..0000000000 --- a/pkg/locker/locker.go +++ /dev/null @@ -1,112 +0,0 @@ -/* -Package locker provides a mechanism for creating finer-grained locking to help -free up more global locks to handle other tasks. - -The implementation looks close to a sync.Mutex, however the user must provide a -reference to use to refer to the underlying lock when locking and unlocking, -and unlock may generate an error. - -If a lock with a given name does not exist when `Lock` is called, one is -created. -Lock references are automatically cleaned up on `Unlock` if nothing else is -waiting for the lock. -*/ -package locker - -import ( - "errors" - "sync" - "sync/atomic" -) - -// ErrNoSuchLock is returned when the requested lock does not exist -var ErrNoSuchLock = errors.New("no such lock") - -// Locker provides a locking mechanism based on the passed in reference name -type Locker struct { - mu sync.Mutex - locks map[string]*lockCtr -} - -// lockCtr is used by Locker to represent a lock with a given name. -type lockCtr struct { - mu sync.Mutex - // waiters is the number of waiters waiting to acquire the lock - // this is int32 instead of uint32 so we can add `-1` in `dec()` - waiters int32 -} - -// inc increments the number of waiters waiting for the lock -func (l *lockCtr) inc() { - atomic.AddInt32(&l.waiters, 1) -} - -// dec decrements the number of waiters waiting on the lock -func (l *lockCtr) dec() { - atomic.AddInt32(&l.waiters, -1) -} - -// count gets the current number of waiters -func (l *lockCtr) count() int32 { - return atomic.LoadInt32(&l.waiters) -} - -// Lock locks the mutex -func (l *lockCtr) Lock() { - l.mu.Lock() -} - -// Unlock unlocks the mutex -func (l *lockCtr) Unlock() { - l.mu.Unlock() -} - -// New creates a new Locker -func New() *Locker { - return &Locker{ - locks: make(map[string]*lockCtr), - } -} - -// Lock locks a mutex with the given name. If it doesn't exist, one is created -func (l *Locker) Lock(name string) { - l.mu.Lock() - if l.locks == nil { - l.locks = make(map[string]*lockCtr) - } - - nameLock, exists := l.locks[name] - if !exists { - nameLock = &lockCtr{} - l.locks[name] = nameLock - } - - // increment the nameLock waiters while inside the main mutex - // this makes sure that the lock isn't deleted if `Lock` and `Unlock` are called concurrently - nameLock.inc() - l.mu.Unlock() - - // Lock the nameLock outside the main mutex so we don't block other operations - // once locked then we can decrement the number of waiters for this lock - nameLock.Lock() - nameLock.dec() -} - -// Unlock unlocks the mutex with the given name -// If the given lock is not being waited on by any other callers, it is deleted -func (l *Locker) Unlock(name string) error { - l.mu.Lock() - nameLock, exists := l.locks[name] - if !exists { - l.mu.Unlock() - return ErrNoSuchLock - } - - if nameLock.count() == 0 { - delete(l.locks, name) - } - nameLock.Unlock() - - l.mu.Unlock() - return nil -} diff --git a/pkg/locker/locker_test.go b/pkg/locker/locker_test.go deleted file mode 100644 index 5a297dd47b..0000000000 --- a/pkg/locker/locker_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package locker - -import ( - "sync" - "testing" - "time" -) - -func TestLockCounter(t *testing.T) { - l := &lockCtr{} - l.inc() - - if l.waiters != 1 { - t.Fatal("counter inc failed") - } - - l.dec() - if l.waiters != 0 { - t.Fatal("counter dec failed") - } -} - -func TestLockerLock(t *testing.T) { - l := New() - l.Lock("test") - ctr := l.locks["test"] - - if ctr.count() != 0 { - t.Fatalf("expected waiters to be 0, got :%d", ctr.waiters) - } - - chDone := make(chan struct{}) - go func() { - l.Lock("test") - close(chDone) - }() - - chWaiting := make(chan struct{}) - go func() { - for range time.Tick(1 * time.Millisecond) { - if ctr.count() == 1 { - close(chWaiting) - break - } - } - }() - - select { - case <-chWaiting: - case <-time.After(3 * time.Second): - t.Fatal("timed out waiting for lock waiters to be incremented") - } - - select { - case <-chDone: - t.Fatal("lock should not have returned while it was still held") - default: - } - - if err := l.Unlock("test"); err != nil { - t.Fatal(err) - } - - select { - case <-chDone: - case <-time.After(3 * time.Second): - t.Fatalf("lock should have completed") - } - - if ctr.count() != 0 { - t.Fatalf("expected waiters to be 0, got: %d", ctr.count()) - } -} - -func TestLockerUnlock(t *testing.T) { - l := New() - - l.Lock("test") - l.Unlock("test") - - chDone := make(chan struct{}) - go func() { - l.Lock("test") - close(chDone) - }() - - select { - case <-chDone: - case <-time.After(3 * time.Second): - t.Fatalf("lock should not be blocked") - } -} - -func TestLockerConcurrency(t *testing.T) { - l := New() - - var wg sync.WaitGroup - for i := 0; i <= 10000; i++ { - wg.Add(1) - go func() { - l.Lock("test") - // if there is a concurrency issue, will very likely panic here - l.Unlock("test") - wg.Done() - }() - } - - chDone := make(chan struct{}) - go func() { - wg.Wait() - close(chDone) - }() - - select { - case <-chDone: - case <-time.After(10 * time.Second): - t.Fatal("timeout waiting for locks to complete") - } - - // Since everything has unlocked this should not exist anymore - if ctr, exists := l.locks["test"]; exists { - t.Fatalf("lock should not exist: %v", ctr) - } -} diff --git a/pkg/longpath/longpath.go b/pkg/longpath/longpath.go deleted file mode 100644 index 9b15bfff4c..0000000000 --- a/pkg/longpath/longpath.go +++ /dev/null @@ -1,26 +0,0 @@ -// longpath introduces some constants and helper functions for handling long paths -// in Windows, which are expected to be prepended with `\\?\` and followed by either -// a drive letter, a UNC server\share, or a volume identifier. - -package longpath - -import ( - "strings" -) - -// Prefix is the longpath prefix for Windows file paths. -const Prefix = `\\?\` - -// AddPrefix will add the Windows long path prefix to the path provided if -// it does not already have it. -func AddPrefix(path string) string { - if !strings.HasPrefix(path, Prefix) { - if strings.HasPrefix(path, `\\`) { - // This is a UNC path, so we need to add 'UNC' to the path as well. - path = Prefix + `UNC` + path[1:] - } else { - path = Prefix + path - } - } - return path -} diff --git a/pkg/longpath/longpath_test.go b/pkg/longpath/longpath_test.go deleted file mode 100644 index 01865eff09..0000000000 --- a/pkg/longpath/longpath_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package longpath - -import ( - "strings" - "testing" -) - -func TestStandardLongPath(t *testing.T) { - c := `C:\simple\path` - longC := AddPrefix(c) - if !strings.EqualFold(longC, `\\?\C:\simple\path`) { - t.Errorf("Wrong long path returned. Original = %s ; Long = %s", c, longC) - } -} - -func TestUNCLongPath(t *testing.T) { - c := `\\server\share\path` - longC := AddPrefix(c) - if !strings.EqualFold(longC, `\\?\UNC\server\share\path`) { - t.Errorf("Wrong UNC long path returned. Original = %s ; Long = %s", c, longC) - } -} diff --git a/pkg/loopback/attach_loopback.go b/pkg/loopback/attach_loopback.go deleted file mode 100644 index 971f45eb48..0000000000 --- a/pkg/loopback/attach_loopback.go +++ /dev/null @@ -1,137 +0,0 @@ -// +build linux - -package loopback - -import ( - "errors" - "fmt" - "os" - "syscall" - - "github.com/Sirupsen/logrus" -) - -// Loopback related errors -var ( - ErrAttachLoopbackDevice = errors.New("loopback attach failed") - ErrGetLoopbackBackingFile = errors.New("Unable to get loopback backing file") - ErrSetCapacity = errors.New("Unable set loopback capacity") -) - -func stringToLoopName(src string) [LoNameSize]uint8 { - var dst [LoNameSize]uint8 - copy(dst[:], src[:]) - return dst -} - -func getNextFreeLoopbackIndex() (int, error) { - f, err := os.OpenFile("/dev/loop-control", os.O_RDONLY, 0644) - if err != nil { - return 0, err - } - defer f.Close() - - index, err := ioctlLoopCtlGetFree(f.Fd()) - if index < 0 { - index = 0 - } - return index, err -} - -func openNextAvailableLoopback(index int, sparseFile *os.File) (loopFile *os.File, err error) { - // Start looking for a free /dev/loop - for { - target := fmt.Sprintf("/dev/loop%d", index) - index++ - - fi, err := os.Stat(target) - if err != nil { - if os.IsNotExist(err) { - logrus.Error("There are no more loopback devices available.") - } - return nil, ErrAttachLoopbackDevice - } - - if fi.Mode()&os.ModeDevice != os.ModeDevice { - logrus.Errorf("Loopback device %s is not a block device.", target) - continue - } - - // OpenFile adds O_CLOEXEC - loopFile, err = os.OpenFile(target, os.O_RDWR, 0644) - if err != nil { - logrus.Errorf("Error opening loopback device: %s", err) - return nil, ErrAttachLoopbackDevice - } - - // Try to attach to the loop file - if err := ioctlLoopSetFd(loopFile.Fd(), sparseFile.Fd()); err != nil { - loopFile.Close() - - // If the error is EBUSY, then try the next loopback - if err != syscall.EBUSY { - logrus.Errorf("Cannot set up loopback device %s: %s", target, err) - return nil, ErrAttachLoopbackDevice - } - - // Otherwise, we keep going with the loop - continue - } - // In case of success, we finished. Break the loop. - break - } - - // This can't happen, but let's be sure - if loopFile == nil { - logrus.Errorf("Unreachable code reached! Error attaching %s to a loopback device.", sparseFile.Name()) - return nil, ErrAttachLoopbackDevice - } - - return loopFile, nil -} - -// AttachLoopDevice attaches the given sparse file to the next -// available loopback device. It returns an opened *os.File. -func AttachLoopDevice(sparseName string) (loop *os.File, err error) { - - // Try to retrieve the next available loopback device via syscall. - // If it fails, we discard error and start looping for a - // loopback from index 0. - startIndex, err := getNextFreeLoopbackIndex() - if err != nil { - logrus.Debugf("Error retrieving the next available loopback: %s", err) - } - - // OpenFile adds O_CLOEXEC - sparseFile, err := os.OpenFile(sparseName, os.O_RDWR, 0644) - if err != nil { - logrus.Errorf("Error opening sparse file %s: %s", sparseName, err) - return nil, ErrAttachLoopbackDevice - } - defer sparseFile.Close() - - loopFile, err := openNextAvailableLoopback(startIndex, sparseFile) - if err != nil { - return nil, err - } - - // Set the status of the loopback device - loopInfo := &loopInfo64{ - loFileName: stringToLoopName(loopFile.Name()), - loOffset: 0, - loFlags: LoFlagsAutoClear, - } - - if err := ioctlLoopSetStatus64(loopFile.Fd(), loopInfo); err != nil { - logrus.Errorf("Cannot set up loopback device info: %s", err) - - // If the call failed, then free the loopback device - if err := ioctlLoopClrFd(loopFile.Fd()); err != nil { - logrus.Error("Error while cleaning up the loopback device") - } - loopFile.Close() - return nil, ErrAttachLoopbackDevice - } - - return loopFile, nil -} diff --git a/pkg/loopback/ioctl.go b/pkg/loopback/ioctl.go deleted file mode 100644 index 0714eb5f87..0000000000 --- a/pkg/loopback/ioctl.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build linux - -package loopback - -import ( - "syscall" - "unsafe" -) - -func ioctlLoopCtlGetFree(fd uintptr) (int, error) { - index, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, LoopCtlGetFree, 0) - if err != 0 { - return 0, err - } - return int(index), nil -} - -func ioctlLoopSetFd(loopFd, sparseFd uintptr) error { - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetFd, sparseFd); err != 0 { - return err - } - return nil -} - -func ioctlLoopSetStatus64(loopFd uintptr, loopInfo *loopInfo64) error { - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { - return err - } - return nil -} - -func ioctlLoopClrFd(loopFd uintptr) error { - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopClrFd, 0); err != 0 { - return err - } - return nil -} - -func ioctlLoopGetStatus64(loopFd uintptr) (*loopInfo64, error) { - loopInfo := &loopInfo64{} - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopGetStatus64, uintptr(unsafe.Pointer(loopInfo))); err != 0 { - return nil, err - } - return loopInfo, nil -} - -func ioctlLoopSetCapacity(loopFd uintptr, value int) error { - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, loopFd, LoopSetCapacity, uintptr(value)); err != 0 { - return err - } - return nil -} diff --git a/pkg/loopback/loop_wrapper.go b/pkg/loopback/loop_wrapper.go deleted file mode 100644 index e1100ce156..0000000000 --- a/pkg/loopback/loop_wrapper.go +++ /dev/null @@ -1,52 +0,0 @@ -// +build linux - -package loopback - -/* -#include // FIXME: present only for defines, maybe we can remove it? - -#ifndef LOOP_CTL_GET_FREE - #define LOOP_CTL_GET_FREE 0x4C82 -#endif - -#ifndef LO_FLAGS_PARTSCAN - #define LO_FLAGS_PARTSCAN 8 -#endif - -*/ -import "C" - -type loopInfo64 struct { - loDevice uint64 /* ioctl r/o */ - loInode uint64 /* ioctl r/o */ - loRdevice uint64 /* ioctl r/o */ - loOffset uint64 - loSizelimit uint64 /* bytes, 0 == max available */ - loNumber uint32 /* ioctl r/o */ - loEncryptType uint32 - loEncryptKeySize uint32 /* ioctl w/o */ - loFlags uint32 /* ioctl r/o */ - loFileName [LoNameSize]uint8 - loCryptName [LoNameSize]uint8 - loEncryptKey [LoKeySize]uint8 /* ioctl w/o */ - loInit [2]uint64 -} - -// IOCTL consts -const ( - LoopSetFd = C.LOOP_SET_FD - LoopCtlGetFree = C.LOOP_CTL_GET_FREE - LoopGetStatus64 = C.LOOP_GET_STATUS64 - LoopSetStatus64 = C.LOOP_SET_STATUS64 - LoopClrFd = C.LOOP_CLR_FD - LoopSetCapacity = C.LOOP_SET_CAPACITY -) - -// LOOP consts. -const ( - LoFlagsAutoClear = C.LO_FLAGS_AUTOCLEAR - LoFlagsReadOnly = C.LO_FLAGS_READ_ONLY - LoFlagsPartScan = C.LO_FLAGS_PARTSCAN - LoKeySize = C.LO_KEY_SIZE - LoNameSize = C.LO_NAME_SIZE -) diff --git a/pkg/loopback/loopback.go b/pkg/loopback/loopback.go deleted file mode 100644 index bc0479284c..0000000000 --- a/pkg/loopback/loopback.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build linux - -package loopback - -import ( - "fmt" - "os" - "syscall" - - "github.com/Sirupsen/logrus" -) - -func getLoopbackBackingFile(file *os.File) (uint64, uint64, error) { - loopInfo, err := ioctlLoopGetStatus64(file.Fd()) - if err != nil { - logrus.Errorf("Error get loopback backing file: %s", err) - return 0, 0, ErrGetLoopbackBackingFile - } - return loopInfo.loDevice, loopInfo.loInode, nil -} - -// SetCapacity reloads the size for the loopback device. -func SetCapacity(file *os.File) error { - if err := ioctlLoopSetCapacity(file.Fd(), 0); err != nil { - logrus.Errorf("Error loopbackSetCapacity: %s", err) - return ErrSetCapacity - } - return nil -} - -// FindLoopDeviceFor returns a loopback device file for the specified file which -// is backing file of a loop back device. -func FindLoopDeviceFor(file *os.File) *os.File { - stat, err := file.Stat() - if err != nil { - return nil - } - targetInode := stat.Sys().(*syscall.Stat_t).Ino - targetDevice := stat.Sys().(*syscall.Stat_t).Dev - - for i := 0; true; i++ { - path := fmt.Sprintf("/dev/loop%d", i) - - file, err := os.OpenFile(path, os.O_RDWR, 0) - if err != nil { - if os.IsNotExist(err) { - return nil - } - - // Ignore all errors until the first not-exist - // we want to continue looking for the file - continue - } - - dev, inode, err := getLoopbackBackingFile(file) - if err == nil && dev == targetDevice && inode == targetInode { - return file - } - file.Close() - } - - return nil -} diff --git a/pkg/mflag/LICENSE b/pkg/mflag/LICENSE deleted file mode 100644 index 9b4f4a294e..0000000000 --- a/pkg/mflag/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/mflag/README.md b/pkg/mflag/README.md deleted file mode 100644 index 5e81bb2a36..0000000000 --- a/pkg/mflag/README.md +++ /dev/null @@ -1,40 +0,0 @@ -Package mflag (aka multiple-flag) implements command-line flag parsing. -It's a **hacky** fork of the [official golang package](http://golang.org/pkg/flag/) - -It adds: - -* both short and long flag version -`./example -s red` `./example --string blue` - -* multiple names for the same option -``` -$>./example -h -Usage of example: - -s, --string="": a simple string -``` - -___ -It is very flexible on purpose, so you can do things like: -``` -$>./example -h -Usage of example: - -s, -string, --string="": a simple string -``` - -Or: -``` -$>./example -h -Usage of example: - -oldflag, --newflag="": a simple string -``` - -You can also hide some flags from the usage, so if we want only `--newflag`: -``` -$>./example -h -Usage of example: - --newflag="": a simple string -$>./example -oldflag str -str -``` - -See [example.go](example/example.go) for more details. diff --git a/pkg/mflag/example/example.go b/pkg/mflag/example/example.go deleted file mode 100644 index 2e766dd1e5..0000000000 --- a/pkg/mflag/example/example.go +++ /dev/null @@ -1,36 +0,0 @@ -package main - -import ( - "fmt" - - flag "github.com/docker/docker/pkg/mflag" -) - -var ( - i int - str string - b, b2, h bool -) - -func init() { - flag.Bool([]string{"#hp", "#-halp"}, false, "display the halp") - flag.BoolVar(&b, []string{"b", "#bal", "#bol", "-bal"}, false, "a simple bool") - flag.BoolVar(&b, []string{"g", "#gil"}, false, "a simple bool") - flag.BoolVar(&b2, []string{"#-bool"}, false, "a simple bool") - flag.IntVar(&i, []string{"-integer", "-number"}, -1, "a simple integer") - flag.StringVar(&str, []string{"s", "#hidden", "-string"}, "", "a simple string") //-s -hidden and --string will work, but -hidden won't be in the usage - flag.BoolVar(&h, []string{"h", "#help", "-help"}, false, "display the help") - flag.StringVar(&str, []string{"mode"}, "mode1", "set the mode\nmode1: use the mode1\nmode2: use the mode2\nmode3: use the mode3") - flag.Parse() -} -func main() { - if h { - flag.PrintDefaults() - } else { - fmt.Printf("s/#hidden/-string: %s\n", str) - fmt.Printf("b: %t\n", b) - fmt.Printf("-bool: %t\n", b2) - fmt.Printf("s/#hidden/-string(via lookup): %s\n", flag.Lookup("s").Value.String()) - fmt.Printf("ARGS: %v\n", flag.Args()) - } -} diff --git a/pkg/mflag/flag.go b/pkg/mflag/flag.go deleted file mode 100644 index 5014157da6..0000000000 --- a/pkg/mflag/flag.go +++ /dev/null @@ -1,1280 +0,0 @@ -// Copyright 2014-2016 The Docker & Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package mflag implements command-line flag parsing. -// -// Usage: -// -// Define flags using flag.String(), Bool(), Int(), etc. -// -// This declares an integer flag, -f or --flagname, stored in the pointer ip, with type *int. -// import "flag /github.com/docker/docker/pkg/mflag" -// var ip = flag.Int([]string{"f", "-flagname"}, 1234, "help message for flagname") -// If you like, you can bind the flag to a variable using the Var() functions. -// var flagvar int -// func init() { -// // -flaghidden will work, but will be hidden from the usage -// flag.IntVar(&flagvar, []string{"f", "#flaghidden", "-flagname"}, 1234, "help message for flagname") -// } -// Or you can create custom flags that satisfy the Value interface (with -// pointer receivers) and couple them to flag parsing by -// flag.Var(&flagVal, []string{"name"}, "help message for flagname") -// For such flags, the default value is just the initial value of the variable. -// -// You can also add "deprecated" flags, they are still usable, but are not shown -// in the usage and will display a warning when you try to use them. `#` before -// an option means this option is deprecated, if there is a following option -// without `#` ahead, then that's the replacement, if not, it will just be removed: -// var ip = flag.Int([]string{"#f", "#flagname", "-flagname"}, 1234, "help message for flagname") -// this will display: `Warning: '-f' is deprecated, it will be replaced by '--flagname' soon. See usage.` or -// this will display: `Warning: '-flagname' is deprecated, it will be replaced by '--flagname' soon. See usage.` -// var ip = flag.Int([]string{"f", "#flagname"}, 1234, "help message for flagname") -// will display: `Warning: '-flagname' is deprecated, it will be removed soon. See usage.` -// so you can only use `-f`. -// -// You can also group one letter flags, bif you declare -// var v = flag.Bool([]string{"v", "-verbose"}, false, "help message for verbose") -// var s = flag.Bool([]string{"s", "-slow"}, false, "help message for slow") -// you will be able to use the -vs or -sv -// -// After all flags are defined, call -// flag.Parse() -// to parse the command line into the defined flags. -// -// Flags may then be used directly. If you're using the flags themselves, -// they are all pointers; if you bind to variables, they're values. -// fmt.Println("ip has value ", *ip) -// fmt.Println("flagvar has value ", flagvar) -// -// After parsing, the arguments after the flag are available as the -// slice flag.Args() or individually as flag.Arg(i). -// The arguments are indexed from 0 through flag.NArg()-1. -// -// Command line flag syntax: -// -flag -// -flag=x -// -flag="x" -// -flag='x' -// -flag x // non-boolean flags only -// One or two minus signs may be used; they are equivalent. -// The last form is not permitted for boolean flags because the -// meaning of the command -// cmd -x * -// will change if there is a file called 0, false, etc. You must -// use the -flag=false form to turn off a boolean flag. -// -// Flag parsing stops just before the first non-flag argument -// ("-" is a non-flag argument) or after the terminator "--". -// -// Integer flags accept 1234, 0664, 0x1234 and may be negative. -// Boolean flags may be 1, 0, t, f, true, false, TRUE, FALSE, True, False. -// Duration flags accept any input valid for time.ParseDuration. -// -// The default set of command-line flags is controlled by -// top-level functions. The FlagSet type allows one to define -// independent sets of flags, such as to implement subcommands -// in a command-line interface. The methods of FlagSet are -// analogous to the top-level functions for the command-line -// flag set. - -package mflag - -import ( - "errors" - "fmt" - "io" - "os" - "runtime" - "sort" - "strconv" - "strings" - "text/tabwriter" - "time" - - "github.com/docker/docker/pkg/homedir" -) - -// ErrHelp is the error returned if the flag -help is invoked but no such flag is defined. -var ErrHelp = errors.New("flag: help requested") - -// ErrRetry is the error returned if you need to try letter by letter -var ErrRetry = errors.New("flag: retry") - -// -- bool Value -type boolValue bool - -func newBoolValue(val bool, p *bool) *boolValue { - *p = val - return (*boolValue)(p) -} - -func (b *boolValue) Set(s string) error { - v, err := strconv.ParseBool(s) - *b = boolValue(v) - return err -} - -func (b *boolValue) Get() interface{} { return bool(*b) } - -func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) } - -func (b *boolValue) IsBoolFlag() bool { return true } - -// optional interface to indicate boolean flags that can be -// supplied without "=value" text -type boolFlag interface { - Value - IsBoolFlag() bool -} - -// -- int Value -type intValue int - -func newIntValue(val int, p *int) *intValue { - *p = val - return (*intValue)(p) -} - -func (i *intValue) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = intValue(v) - return err -} - -func (i *intValue) Get() interface{} { return int(*i) } - -func (i *intValue) String() string { return fmt.Sprintf("%v", *i) } - -// -- int64 Value -type int64Value int64 - -func newInt64Value(val int64, p *int64) *int64Value { - *p = val - return (*int64Value)(p) -} - -func (i *int64Value) Set(s string) error { - v, err := strconv.ParseInt(s, 0, 64) - *i = int64Value(v) - return err -} - -func (i *int64Value) Get() interface{} { return int64(*i) } - -func (i *int64Value) String() string { return fmt.Sprintf("%v", *i) } - -// -- uint Value -type uintValue uint - -func newUintValue(val uint, p *uint) *uintValue { - *p = val - return (*uintValue)(p) -} - -func (i *uintValue) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uintValue(v) - return err -} - -func (i *uintValue) Get() interface{} { return uint(*i) } - -func (i *uintValue) String() string { return fmt.Sprintf("%v", *i) } - -// -- uint64 Value -type uint64Value uint64 - -func newUint64Value(val uint64, p *uint64) *uint64Value { - *p = val - return (*uint64Value)(p) -} - -func (i *uint64Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 64) - *i = uint64Value(v) - return err -} - -func (i *uint64Value) Get() interface{} { return uint64(*i) } - -func (i *uint64Value) String() string { return fmt.Sprintf("%v", *i) } - -// -- uint16 Value -type uint16Value uint16 - -func newUint16Value(val uint16, p *uint16) *uint16Value { - *p = val - return (*uint16Value)(p) -} - -func (i *uint16Value) Set(s string) error { - v, err := strconv.ParseUint(s, 0, 16) - *i = uint16Value(v) - return err -} - -func (i *uint16Value) Get() interface{} { return uint16(*i) } - -func (i *uint16Value) String() string { return fmt.Sprintf("%v", *i) } - -// -- string Value -type stringValue string - -func newStringValue(val string, p *string) *stringValue { - *p = val - return (*stringValue)(p) -} - -func (s *stringValue) Set(val string) error { - *s = stringValue(val) - return nil -} - -func (s *stringValue) Get() interface{} { return string(*s) } - -func (s *stringValue) String() string { return fmt.Sprintf("%s", *s) } - -// -- float64 Value -type float64Value float64 - -func newFloat64Value(val float64, p *float64) *float64Value { - *p = val - return (*float64Value)(p) -} - -func (f *float64Value) Set(s string) error { - v, err := strconv.ParseFloat(s, 64) - *f = float64Value(v) - return err -} - -func (f *float64Value) Get() interface{} { return float64(*f) } - -func (f *float64Value) String() string { return fmt.Sprintf("%v", *f) } - -// -- time.Duration Value -type durationValue time.Duration - -func newDurationValue(val time.Duration, p *time.Duration) *durationValue { - *p = val - return (*durationValue)(p) -} - -func (d *durationValue) Set(s string) error { - v, err := time.ParseDuration(s) - *d = durationValue(v) - return err -} - -func (d *durationValue) Get() interface{} { return time.Duration(*d) } - -func (d *durationValue) String() string { return (*time.Duration)(d).String() } - -// Value is the interface to the dynamic value stored in a flag. -// (The default value is represented as a string.) -// -// If a Value has an IsBoolFlag() bool method returning true, -// the command-line parser makes -name equivalent to -name=true -// rather than using the next command-line argument. -type Value interface { - String() string - Set(string) error -} - -// Getter is an interface that allows the contents of a Value to be retrieved. -// It wraps the Value interface, rather than being part of it, because it -// appeared after Go 1 and its compatibility rules. All Value types provided -// by this package satisfy the Getter interface. -type Getter interface { - Value - Get() interface{} -} - -// ErrorHandling defines how to handle flag parsing errors. -type ErrorHandling int - -// ErrorHandling strategies available when a flag parsing error occurs -const ( - ContinueOnError ErrorHandling = iota - ExitOnError - PanicOnError -) - -// A FlagSet represents a set of defined flags. The zero value of a FlagSet -// has no name and has ContinueOnError error handling. -type FlagSet struct { - // Usage is the function called when an error occurs while parsing flags. - // The field is a function (not a method) that may be changed to point to - // a custom error handler. - Usage func() - ShortUsage func() - - name string - parsed bool - actual map[string]*Flag - formal map[string]*Flag - args []string // arguments after flags - errorHandling ErrorHandling - output io.Writer // nil means stderr; use Out() accessor - nArgRequirements []nArgRequirement -} - -// A Flag represents the state of a flag. -type Flag struct { - Names []string // name as it appears on command line - Usage string // help message - Value Value // value as set - DefValue string // default value (as text); for usage message -} - -type flagSlice []string - -func (p flagSlice) Len() int { return len(p) } -func (p flagSlice) Less(i, j int) bool { - pi, pj := strings.TrimPrefix(p[i], "-"), strings.TrimPrefix(p[j], "-") - lpi, lpj := strings.ToLower(pi), strings.ToLower(pj) - if lpi != lpj { - return lpi < lpj - } - return pi < pj -} -func (p flagSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -// sortFlags returns the flags as a slice in lexicographical sorted order. -func sortFlags(flags map[string]*Flag) []*Flag { - var list flagSlice - - // The sorted list is based on the first name, when flag map might use the other names. - nameMap := make(map[string]string) - - for n, f := range flags { - fName := strings.TrimPrefix(f.Names[0], "#") - nameMap[fName] = n - if len(f.Names) == 1 { - list = append(list, fName) - continue - } - - found := false - for _, name := range list { - if name == fName { - found = true - break - } - } - if !found { - list = append(list, fName) - } - } - sort.Sort(list) - result := make([]*Flag, len(list)) - for i, name := range list { - result[i] = flags[nameMap[name]] - } - return result -} - -// Name returns the name of the FlagSet. -func (fs *FlagSet) Name() string { - return fs.name -} - -// Out returns the destination for usage and error messages. -func (fs *FlagSet) Out() io.Writer { - if fs.output == nil { - return os.Stderr - } - return fs.output -} - -// SetOutput sets the destination for usage and error messages. -// If output is nil, os.Stderr is used. -func (fs *FlagSet) SetOutput(output io.Writer) { - fs.output = output -} - -// VisitAll visits the flags in lexicographical order, calling fn for each. -// It visits all flags, even those not set. -func (fs *FlagSet) VisitAll(fn func(*Flag)) { - for _, flag := range sortFlags(fs.formal) { - fn(flag) - } -} - -// VisitAll visits the command-line flags in lexicographical order, calling -// fn for each. It visits all flags, even those not set. -func VisitAll(fn func(*Flag)) { - CommandLine.VisitAll(fn) -} - -// Visit visits the flags in lexicographical order, calling fn for each. -// It visits only those flags that have been set. -func (fs *FlagSet) Visit(fn func(*Flag)) { - for _, flag := range sortFlags(fs.actual) { - fn(flag) - } -} - -// Visit visits the command-line flags in lexicographical order, calling fn -// for each. It visits only those flags that have been set. -func Visit(fn func(*Flag)) { - CommandLine.Visit(fn) -} - -// Lookup returns the Flag structure of the named flag, returning nil if none exists. -func (fs *FlagSet) Lookup(name string) *Flag { - return fs.formal[name] -} - -// IsSet indicates whether the specified flag is set in the given FlagSet -func (fs *FlagSet) IsSet(name string) bool { - return fs.actual[name] != nil -} - -// Lookup returns the Flag structure of the named command-line flag, -// returning nil if none exists. -func Lookup(name string) *Flag { - return CommandLine.formal[name] -} - -// IsSet indicates whether the specified flag was specified at all on the cmd line. -func IsSet(name string) bool { - return CommandLine.IsSet(name) -} - -type nArgRequirementType int - -// Indicator used to pass to BadArgs function -const ( - Exact nArgRequirementType = iota - Max - Min -) - -type nArgRequirement struct { - Type nArgRequirementType - N int -} - -// Require adds a requirement about the number of arguments for the FlagSet. -// The first parameter can be Exact, Max, or Min to respectively specify the exact, -// the maximum, or the minimal number of arguments required. -// The actual check is done in FlagSet.CheckArgs(). -func (fs *FlagSet) Require(nArgRequirementType nArgRequirementType, nArg int) { - fs.nArgRequirements = append(fs.nArgRequirements, nArgRequirement{nArgRequirementType, nArg}) -} - -// CheckArgs uses the requirements set by FlagSet.Require() to validate -// the number of arguments. If the requirements are not met, -// an error message string is returned. -func (fs *FlagSet) CheckArgs() (message string) { - for _, req := range fs.nArgRequirements { - var arguments string - if req.N == 1 { - arguments = "1 argument" - } else { - arguments = fmt.Sprintf("%d arguments", req.N) - } - - str := func(kind string) string { - return fmt.Sprintf("%q requires %s%s", fs.name, kind, arguments) - } - - switch req.Type { - case Exact: - if fs.NArg() != req.N { - return str("") - } - case Max: - if fs.NArg() > req.N { - return str("a maximum of ") - } - case Min: - if fs.NArg() < req.N { - return str("a minimum of ") - } - } - } - return "" -} - -// Set sets the value of the named flag. -func (fs *FlagSet) Set(name, value string) error { - flag, ok := fs.formal[name] - if !ok { - return fmt.Errorf("no such flag -%v", name) - } - if err := flag.Value.Set(value); err != nil { - return err - } - if fs.actual == nil { - fs.actual = make(map[string]*Flag) - } - fs.actual[name] = flag - return nil -} - -// Set sets the value of the named command-line flag. -func Set(name, value string) error { - return CommandLine.Set(name, value) -} - -// isZeroValue guesses whether the string represents the zero -// value for a flag. It is not accurate but in practice works OK. -func isZeroValue(value string) bool { - switch value { - case "false": - return true - case "": - return true - case "0": - return true - } - return false -} - -// PrintDefaults prints, to standard error unless configured -// otherwise, the default values of all defined flags in the set. -func (fs *FlagSet) PrintDefaults() { - writer := tabwriter.NewWriter(fs.Out(), 20, 1, 3, ' ', 0) - home := homedir.Get() - - // Don't substitute when HOME is / - if runtime.GOOS != "windows" && home == "/" { - home = "" - } - - // Add a blank line between cmd description and list of options - if fs.FlagCount() > 0 { - fmt.Fprintln(writer, "") - } - - fs.VisitAll(func(flag *Flag) { - names := []string{} - for _, name := range flag.Names { - if name[0] != '#' { - names = append(names, name) - } - } - if len(names) > 0 && len(flag.Usage) > 0 { - val := flag.DefValue - - if home != "" && strings.HasPrefix(val, home) { - val = homedir.GetShortcutString() + val[len(home):] - } - - if isZeroValue(val) { - format := " -%s" - fmt.Fprintf(writer, format, strings.Join(names, ", -")) - } else { - format := " -%s=%s" - fmt.Fprintf(writer, format, strings.Join(names, ", -"), val) - } - for _, line := range strings.Split(flag.Usage, "\n") { - fmt.Fprintln(writer, "\t", line) - } - } - }) - writer.Flush() -} - -// PrintDefaults prints to standard error the default values of all defined command-line flags. -func PrintDefaults() { - CommandLine.PrintDefaults() -} - -// defaultUsage is the default function to print a usage message. -func defaultUsage(fs *FlagSet) { - if fs.name == "" { - fmt.Fprintf(fs.Out(), "Usage:\n") - } else { - fmt.Fprintf(fs.Out(), "Usage of %s:\n", fs.name) - } - fs.PrintDefaults() -} - -// NOTE: Usage is not just defaultUsage(CommandLine) -// because it serves (via godoc flag Usage) as the example -// for how to write your own usage function. - -// Usage prints to standard error a usage message documenting all defined command-line flags. -// The function is a variable that may be changed to point to a custom function. -var Usage = func() { - fmt.Fprintf(CommandLine.Out(), "Usage of %s:\n", os.Args[0]) - PrintDefaults() -} - -// ShortUsage prints to standard error a usage message documenting the standard command layout -// The function is a variable that may be changed to point to a custom function. -var ShortUsage = func() { - fmt.Fprintf(CommandLine.output, "Usage of %s:\n", os.Args[0]) -} - -// FlagCount returns the number of flags that have been defined. -func (fs *FlagSet) FlagCount() int { return len(sortFlags(fs.formal)) } - -// FlagCountUndeprecated returns the number of undeprecated flags that have been defined. -func (fs *FlagSet) FlagCountUndeprecated() int { - count := 0 - for _, flag := range sortFlags(fs.formal) { - for _, name := range flag.Names { - if name[0] != '#' { - count++ - break - } - } - } - return count -} - -// NFlag returns the number of flags that have been set. -func (fs *FlagSet) NFlag() int { return len(fs.actual) } - -// NFlag returns the number of command-line flags that have been set. -func NFlag() int { return len(CommandLine.actual) } - -// Arg returns the i'th argument. Arg(0) is the first remaining argument -// after flags have been processed. -func (fs *FlagSet) Arg(i int) string { - if i < 0 || i >= len(fs.args) { - return "" - } - return fs.args[i] -} - -// Arg returns the i'th command-line argument. Arg(0) is the first remaining argument -// after flags have been processed. -func Arg(i int) string { - return CommandLine.Arg(i) -} - -// NArg is the number of arguments remaining after flags have been processed. -func (fs *FlagSet) NArg() int { return len(fs.args) } - -// NArg is the number of arguments remaining after flags have been processed. -func NArg() int { return len(CommandLine.args) } - -// Args returns the non-flag arguments. -func (fs *FlagSet) Args() []string { return fs.args } - -// Args returns the non-flag command-line arguments. -func Args() []string { return CommandLine.args } - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func (fs *FlagSet) BoolVar(p *bool, names []string, value bool, usage string) { - fs.Var(newBoolValue(value, p), names, usage) -} - -// BoolVar defines a bool flag with specified name, default value, and usage string. -// The argument p points to a bool variable in which to store the value of the flag. -func BoolVar(p *bool, names []string, value bool, usage string) { - CommandLine.Var(newBoolValue(value, p), names, usage) -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func (fs *FlagSet) Bool(names []string, value bool, usage string) *bool { - p := new(bool) - fs.BoolVar(p, names, value, usage) - return p -} - -// Bool defines a bool flag with specified name, default value, and usage string. -// The return value is the address of a bool variable that stores the value of the flag. -func Bool(names []string, value bool, usage string) *bool { - return CommandLine.Bool(names, value, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func (fs *FlagSet) IntVar(p *int, names []string, value int, usage string) { - fs.Var(newIntValue(value, p), names, usage) -} - -// IntVar defines an int flag with specified name, default value, and usage string. -// The argument p points to an int variable in which to store the value of the flag. -func IntVar(p *int, names []string, value int, usage string) { - CommandLine.Var(newIntValue(value, p), names, usage) -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func (fs *FlagSet) Int(names []string, value int, usage string) *int { - p := new(int) - fs.IntVar(p, names, value, usage) - return p -} - -// Int defines an int flag with specified name, default value, and usage string. -// The return value is the address of an int variable that stores the value of the flag. -func Int(names []string, value int, usage string) *int { - return CommandLine.Int(names, value, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func (fs *FlagSet) Int64Var(p *int64, names []string, value int64, usage string) { - fs.Var(newInt64Value(value, p), names, usage) -} - -// Int64Var defines an int64 flag with specified name, default value, and usage string. -// The argument p points to an int64 variable in which to store the value of the flag. -func Int64Var(p *int64, names []string, value int64, usage string) { - CommandLine.Var(newInt64Value(value, p), names, usage) -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func (fs *FlagSet) Int64(names []string, value int64, usage string) *int64 { - p := new(int64) - fs.Int64Var(p, names, value, usage) - return p -} - -// Int64 defines an int64 flag with specified name, default value, and usage string. -// The return value is the address of an int64 variable that stores the value of the flag. -func Int64(names []string, value int64, usage string) *int64 { - return CommandLine.Int64(names, value, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func (fs *FlagSet) UintVar(p *uint, names []string, value uint, usage string) { - fs.Var(newUintValue(value, p), names, usage) -} - -// UintVar defines a uint flag with specified name, default value, and usage string. -// The argument p points to a uint variable in which to store the value of the flag. -func UintVar(p *uint, names []string, value uint, usage string) { - CommandLine.Var(newUintValue(value, p), names, usage) -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func (fs *FlagSet) Uint(names []string, value uint, usage string) *uint { - p := new(uint) - fs.UintVar(p, names, value, usage) - return p -} - -// Uint defines a uint flag with specified name, default value, and usage string. -// The return value is the address of a uint variable that stores the value of the flag. -func Uint(names []string, value uint, usage string) *uint { - return CommandLine.Uint(names, value, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func (fs *FlagSet) Uint64Var(p *uint64, names []string, value uint64, usage string) { - fs.Var(newUint64Value(value, p), names, usage) -} - -// Uint64Var defines a uint64 flag with specified name, default value, and usage string. -// The argument p points to a uint64 variable in which to store the value of the flag. -func Uint64Var(p *uint64, names []string, value uint64, usage string) { - CommandLine.Var(newUint64Value(value, p), names, usage) -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func (fs *FlagSet) Uint64(names []string, value uint64, usage string) *uint64 { - p := new(uint64) - fs.Uint64Var(p, names, value, usage) - return p -} - -// Uint64 defines a uint64 flag with specified name, default value, and usage string. -// The return value is the address of a uint64 variable that stores the value of the flag. -func Uint64(names []string, value uint64, usage string) *uint64 { - return CommandLine.Uint64(names, value, usage) -} - -// Uint16Var defines a uint16 flag with specified name, default value, and usage string. -// The argument p points to a uint16 variable in which to store the value of the flag. -func (fs *FlagSet) Uint16Var(p *uint16, names []string, value uint16, usage string) { - fs.Var(newUint16Value(value, p), names, usage) -} - -// Uint16Var defines a uint16 flag with specified name, default value, and usage string. -// The argument p points to a uint16 variable in which to store the value of the flag. -func Uint16Var(p *uint16, names []string, value uint16, usage string) { - CommandLine.Var(newUint16Value(value, p), names, usage) -} - -// Uint16 defines a uint16 flag with specified name, default value, and usage string. -// The return value is the address of a uint16 variable that stores the value of the flag. -func (fs *FlagSet) Uint16(names []string, value uint16, usage string) *uint16 { - p := new(uint16) - fs.Uint16Var(p, names, value, usage) - return p -} - -// Uint16 defines a uint16 flag with specified name, default value, and usage string. -// The return value is the address of a uint16 variable that stores the value of the flag. -func Uint16(names []string, value uint16, usage string) *uint16 { - return CommandLine.Uint16(names, value, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func (fs *FlagSet) StringVar(p *string, names []string, value string, usage string) { - fs.Var(newStringValue(value, p), names, usage) -} - -// StringVar defines a string flag with specified name, default value, and usage string. -// The argument p points to a string variable in which to store the value of the flag. -func StringVar(p *string, names []string, value string, usage string) { - CommandLine.Var(newStringValue(value, p), names, usage) -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func (fs *FlagSet) String(names []string, value string, usage string) *string { - p := new(string) - fs.StringVar(p, names, value, usage) - return p -} - -// String defines a string flag with specified name, default value, and usage string. -// The return value is the address of a string variable that stores the value of the flag. -func String(names []string, value string, usage string) *string { - return CommandLine.String(names, value, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func (fs *FlagSet) Float64Var(p *float64, names []string, value float64, usage string) { - fs.Var(newFloat64Value(value, p), names, usage) -} - -// Float64Var defines a float64 flag with specified name, default value, and usage string. -// The argument p points to a float64 variable in which to store the value of the flag. -func Float64Var(p *float64, names []string, value float64, usage string) { - CommandLine.Var(newFloat64Value(value, p), names, usage) -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func (fs *FlagSet) Float64(names []string, value float64, usage string) *float64 { - p := new(float64) - fs.Float64Var(p, names, value, usage) - return p -} - -// Float64 defines a float64 flag with specified name, default value, and usage string. -// The return value is the address of a float64 variable that stores the value of the flag. -func Float64(names []string, value float64, usage string) *float64 { - return CommandLine.Float64(names, value, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func (fs *FlagSet) DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { - fs.Var(newDurationValue(value, p), names, usage) -} - -// DurationVar defines a time.Duration flag with specified name, default value, and usage string. -// The argument p points to a time.Duration variable in which to store the value of the flag. -func DurationVar(p *time.Duration, names []string, value time.Duration, usage string) { - CommandLine.Var(newDurationValue(value, p), names, usage) -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func (fs *FlagSet) Duration(names []string, value time.Duration, usage string) *time.Duration { - p := new(time.Duration) - fs.DurationVar(p, names, value, usage) - return p -} - -// Duration defines a time.Duration flag with specified name, default value, and usage string. -// The return value is the address of a time.Duration variable that stores the value of the flag. -func Duration(names []string, value time.Duration, usage string) *time.Duration { - return CommandLine.Duration(names, value, usage) -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func (fs *FlagSet) Var(value Value, names []string, usage string) { - // Remember the default value as a string; it won't change. - flag := &Flag{names, usage, value, value.String()} - for _, name := range names { - name = strings.TrimPrefix(name, "#") - _, alreadythere := fs.formal[name] - if alreadythere { - var msg string - if fs.name == "" { - msg = fmt.Sprintf("flag redefined: %s", name) - } else { - msg = fmt.Sprintf("%s flag redefined: %s", fs.name, name) - } - fmt.Fprintln(fs.Out(), msg) - panic(msg) // Happens only if flags are declared with identical names - } - if fs.formal == nil { - fs.formal = make(map[string]*Flag) - } - fs.formal[name] = flag - } -} - -// Var defines a flag with the specified name and usage string. The type and -// value of the flag are represented by the first argument, of type Value, which -// typically holds a user-defined implementation of Value. For instance, the -// caller could create a flag that turns a comma-separated string into a slice -// of strings by giving the slice the methods of Value; in particular, Set would -// decompose the comma-separated string into the slice. -func Var(value Value, names []string, usage string) { - CommandLine.Var(value, names, usage) -} - -// failf prints to standard error a formatted error and usage message and -// returns the error. -func (fs *FlagSet) failf(format string, a ...interface{}) error { - err := fmt.Errorf(format, a...) - fmt.Fprintln(fs.Out(), err) - if os.Args[0] == fs.name { - fmt.Fprintf(fs.Out(), "See '%s --help'.\n", os.Args[0]) - } else { - fmt.Fprintf(fs.Out(), "See '%s %s --help'.\n", os.Args[0], fs.name) - } - return err -} - -// usage calls the Usage method for the flag set, or the usage function if -// the flag set is CommandLine. -func (fs *FlagSet) usage() { - if fs == CommandLine { - Usage() - } else if fs.Usage == nil { - defaultUsage(fs) - } else { - fs.Usage() - } -} - -func trimQuotes(str string) string { - if len(str) == 0 { - return str - } - type quote struct { - start, end byte - } - - // All valid quote types. - quotes := []quote{ - // Double quotes - { - start: '"', - end: '"', - }, - - // Single quotes - { - start: '\'', - end: '\'', - }, - } - - for _, quote := range quotes { - // Only strip if outermost match. - if str[0] == quote.start && str[len(str)-1] == quote.end { - str = str[1 : len(str)-1] - break - } - } - - return str -} - -// parseOne parses one flag. It reports whether a flag was seen. -func (fs *FlagSet) parseOne() (bool, string, error) { - if len(fs.args) == 0 { - return false, "", nil - } - s := fs.args[0] - if len(s) == 0 || s[0] != '-' || len(s) == 1 { - return false, "", nil - } - if s[1] == '-' && len(s) == 2 { // "--" terminates the flags - fs.args = fs.args[1:] - return false, "", nil - } - name := s[1:] - if len(name) == 0 || name[0] == '=' { - return false, "", fs.failf("bad flag syntax: %s", s) - } - - // it's a flag. does it have an argument? - fs.args = fs.args[1:] - hasValue := false - value := "" - if i := strings.Index(name, "="); i != -1 { - value = trimQuotes(name[i+1:]) - hasValue = true - name = name[:i] - } - - m := fs.formal - flag, alreadythere := m[name] // BUG - if !alreadythere { - if name == "-help" || name == "help" || name == "h" { // special case for nice help message. - fs.usage() - return false, "", ErrHelp - } - if len(name) > 0 && name[0] == '-' { - return false, "", fs.failf("flag provided but not defined: -%s", name) - } - return false, name, ErrRetry - } - if fv, ok := flag.Value.(boolFlag); ok && fv.IsBoolFlag() { // special case: doesn't need an arg - if hasValue { - if err := fv.Set(value); err != nil { - return false, "", fs.failf("invalid boolean value %q for -%s: %v", value, name, err) - } - } else { - fv.Set("true") - } - } else { - // It must have a value, which might be the next argument. - if !hasValue && len(fs.args) > 0 { - // value is the next arg - hasValue = true - value, fs.args = fs.args[0], fs.args[1:] - } - if !hasValue { - return false, "", fs.failf("flag needs an argument: -%s", name) - } - if err := flag.Value.Set(value); err != nil { - return false, "", fs.failf("invalid value %q for flag -%s: %v", value, name, err) - } - } - if fs.actual == nil { - fs.actual = make(map[string]*Flag) - } - fs.actual[name] = flag - for i, n := range flag.Names { - if n == fmt.Sprintf("#%s", name) { - replacement := "" - for j := i; j < len(flag.Names); j++ { - if flag.Names[j][0] != '#' { - replacement = flag.Names[j] - break - } - } - if replacement != "" { - fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be replaced by '-%s' soon. See usage.\n", name, replacement) - } else { - fmt.Fprintf(fs.Out(), "Warning: '-%s' is deprecated, it will be removed soon. See usage.\n", name) - } - } - } - return true, "", nil -} - -// Parse parses flag definitions from the argument list, which should not -// include the command name. Must be called after all flags in the FlagSet -// are defined and before flags are accessed by the program. -// The return value will be ErrHelp if -help was set but not defined. -func (fs *FlagSet) Parse(arguments []string) error { - fs.parsed = true - fs.args = arguments - for { - seen, name, err := fs.parseOne() - if seen { - continue - } - if err == nil { - break - } - if err == ErrRetry { - if len(name) > 1 { - err = nil - for _, letter := range strings.Split(name, "") { - fs.args = append([]string{"-" + letter}, fs.args...) - seen2, _, err2 := fs.parseOne() - if seen2 { - continue - } - if err2 != nil { - err = fs.failf("flag provided but not defined: -%s", name) - break - } - } - if err == nil { - continue - } - } else { - err = fs.failf("flag provided but not defined: -%s", name) - } - } - switch fs.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(125) - case PanicOnError: - panic(err) - } - } - return nil -} - -// ParseFlags is a utility function that adds a help flag if withHelp is true, -// calls fs.Parse(args) and prints a relevant error message if there are -// incorrect number of arguments. It returns error only if error handling is -// set to ContinueOnError and parsing fails. If error handling is set to -// ExitOnError, it's safe to ignore the return value. -func (fs *FlagSet) ParseFlags(args []string, withHelp bool) error { - var help *bool - if withHelp { - help = fs.Bool([]string{"#help", "-help"}, false, "Print usage") - } - if err := fs.Parse(args); err != nil { - return err - } - if help != nil && *help { - fs.SetOutput(os.Stdout) - fs.Usage() - os.Exit(0) - } - if str := fs.CheckArgs(); str != "" { - fs.SetOutput(os.Stderr) - fs.ReportError(str, withHelp) - fs.ShortUsage() - os.Exit(1) - } - return nil -} - -// ReportError is a utility method that prints a user-friendly message -// containing the error that occurred during parsing and a suggestion to get help -func (fs *FlagSet) ReportError(str string, withHelp bool) { - if withHelp { - if os.Args[0] == fs.Name() { - str += ".\nSee '" + os.Args[0] + " --help'" - } else { - str += ".\nSee '" + os.Args[0] + " " + fs.Name() + " --help'" - } - } - fmt.Fprintf(fs.Out(), "%s: %s.\n", os.Args[0], str) -} - -// Parsed reports whether fs.Parse has been called. -func (fs *FlagSet) Parsed() bool { - return fs.parsed -} - -// Parse parses the command-line flags from os.Args[1:]. Must be called -// after all flags are defined and before flags are accessed by the program. -func Parse() { - // Ignore errors; CommandLine is set for ExitOnError. - CommandLine.Parse(os.Args[1:]) -} - -// Parsed returns true if the command-line flags have been parsed. -func Parsed() bool { - return CommandLine.Parsed() -} - -// CommandLine is the default set of command-line flags, parsed from os.Args. -// The top-level functions such as BoolVar, Arg, and on are wrappers for the -// methods of CommandLine. -var CommandLine = NewFlagSet(os.Args[0], ExitOnError) - -// NewFlagSet returns a new, empty flag set with the specified name and -// error handling property. -func NewFlagSet(name string, errorHandling ErrorHandling) *FlagSet { - f := &FlagSet{ - name: name, - errorHandling: errorHandling, - } - return f -} - -// Init sets the name and error handling property for a flag set. -// By default, the zero FlagSet uses an empty name and the -// ContinueOnError error handling policy. -func (fs *FlagSet) Init(name string, errorHandling ErrorHandling) { - fs.name = name - fs.errorHandling = errorHandling -} - -type mergeVal struct { - Value - key string - fset *FlagSet -} - -func (v mergeVal) Set(s string) error { - return v.fset.Set(v.key, s) -} - -func (v mergeVal) IsBoolFlag() bool { - if b, ok := v.Value.(boolFlag); ok { - return b.IsBoolFlag() - } - return false -} - -// Name returns the name of a mergeVal. -// If the original value had a name, return the original name, -// otherwise, return the key asinged to this mergeVal. -func (v mergeVal) Name() string { - type namedValue interface { - Name() string - } - if nVal, ok := v.Value.(namedValue); ok { - return nVal.Name() - } - return v.key -} - -// Merge is a helper function that merges n FlagSets into a single dest FlagSet -// In case of name collision between the flagsets it will apply -// the destination FlagSet's errorHandling behavior. -func Merge(dest *FlagSet, flagsets ...*FlagSet) error { - for _, fset := range flagsets { - if fset.formal == nil { - continue - } - for k, f := range fset.formal { - if _, ok := dest.formal[k]; ok { - var err error - if fset.name == "" { - err = fmt.Errorf("flag redefined: %s", k) - } else { - err = fmt.Errorf("%s flag redefined: %s", fset.name, k) - } - fmt.Fprintln(fset.Out(), err.Error()) - // Happens only if flags are declared with identical names - switch dest.errorHandling { - case ContinueOnError: - return err - case ExitOnError: - os.Exit(2) - case PanicOnError: - panic(err) - } - } - newF := *f - newF.Value = mergeVal{f.Value, k, fset} - if dest.formal == nil { - dest.formal = make(map[string]*Flag) - } - dest.formal[k] = &newF - } - } - return nil -} - -// IsEmpty reports if the FlagSet is actually empty. -func (fs *FlagSet) IsEmpty() bool { - return len(fs.actual) == 0 -} diff --git a/pkg/mflag/flag_test.go b/pkg/mflag/flag_test.go deleted file mode 100644 index 138355546e..0000000000 --- a/pkg/mflag/flag_test.go +++ /dev/null @@ -1,527 +0,0 @@ -// Copyright 2014-2016 The Docker & Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package mflag - -import ( - "bytes" - "fmt" - "os" - "sort" - "strings" - "testing" - "time" -) - -// ResetForTesting clears all flag state and sets the usage function as directed. -// After calling ResetForTesting, parse errors in flag handling will not -// exit the program. -func ResetForTesting(usage func()) { - CommandLine = NewFlagSet(os.Args[0], ContinueOnError) - Usage = usage -} -func boolString(s string) string { - if s == "0" { - return "false" - } - return "true" -} - -func TestEverything(t *testing.T) { - ResetForTesting(nil) - Bool([]string{"test_bool"}, false, "bool value") - Int([]string{"test_int"}, 0, "int value") - Int64([]string{"test_int64"}, 0, "int64 value") - Uint([]string{"test_uint"}, 0, "uint value") - Uint64([]string{"test_uint64"}, 0, "uint64 value") - String([]string{"test_string"}, "0", "string value") - Float64([]string{"test_float64"}, 0, "float64 value") - Duration([]string{"test_duration"}, 0, "time.Duration value") - - m := make(map[string]*Flag) - desired := "0" - visitor := func(f *Flag) { - for _, name := range f.Names { - if len(name) > 5 && name[0:5] == "test_" { - m[name] = f - ok := false - switch { - case f.Value.String() == desired: - ok = true - case name == "test_bool" && f.Value.String() == boolString(desired): - ok = true - case name == "test_duration" && f.Value.String() == desired+"s": - ok = true - } - if !ok { - t.Error("Visit: bad value", f.Value.String(), "for", name) - } - } - } - } - VisitAll(visitor) - if len(m) != 8 { - t.Error("VisitAll misses some flags") - for k, v := range m { - t.Log(k, *v) - } - } - m = make(map[string]*Flag) - Visit(visitor) - if len(m) != 0 { - t.Errorf("Visit sees unset flags") - for k, v := range m { - t.Log(k, *v) - } - } - // Now set all flags - Set("test_bool", "true") - Set("test_int", "1") - Set("test_int64", "1") - Set("test_uint", "1") - Set("test_uint64", "1") - Set("test_string", "1") - Set("test_float64", "1") - Set("test_duration", "1s") - desired = "1" - Visit(visitor) - if len(m) != 8 { - t.Error("Visit fails after set") - for k, v := range m { - t.Log(k, *v) - } - } - // Now test they're visited in sort order. - var flagNames []string - Visit(func(f *Flag) { - for _, name := range f.Names { - flagNames = append(flagNames, name) - } - }) - if !sort.StringsAreSorted(flagNames) { - t.Errorf("flag names not sorted: %v", flagNames) - } -} - -func TestGet(t *testing.T) { - ResetForTesting(nil) - Bool([]string{"test_bool"}, true, "bool value") - Int([]string{"test_int"}, 1, "int value") - Int64([]string{"test_int64"}, 2, "int64 value") - Uint([]string{"test_uint"}, 3, "uint value") - Uint64([]string{"test_uint64"}, 4, "uint64 value") - String([]string{"test_string"}, "5", "string value") - Float64([]string{"test_float64"}, 6, "float64 value") - Duration([]string{"test_duration"}, 7, "time.Duration value") - - visitor := func(f *Flag) { - for _, name := range f.Names { - if len(name) > 5 && name[0:5] == "test_" { - g, ok := f.Value.(Getter) - if !ok { - t.Errorf("Visit: value does not satisfy Getter: %T", f.Value) - return - } - switch name { - case "test_bool": - ok = g.Get() == true - case "test_int": - ok = g.Get() == int(1) - case "test_int64": - ok = g.Get() == int64(2) - case "test_uint": - ok = g.Get() == uint(3) - case "test_uint64": - ok = g.Get() == uint64(4) - case "test_string": - ok = g.Get() == "5" - case "test_float64": - ok = g.Get() == float64(6) - case "test_duration": - ok = g.Get() == time.Duration(7) - } - if !ok { - t.Errorf("Visit: bad value %T(%v) for %s", g.Get(), g.Get(), name) - } - } - } - } - VisitAll(visitor) -} - -func testParse(f *FlagSet, t *testing.T) { - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - boolFlag := f.Bool([]string{"bool"}, false, "bool value") - bool2Flag := f.Bool([]string{"bool2"}, false, "bool2 value") - f.Bool([]string{"bool3"}, false, "bool3 value") - bool4Flag := f.Bool([]string{"bool4"}, false, "bool4 value") - intFlag := f.Int([]string{"-int"}, 0, "int value") - int64Flag := f.Int64([]string{"-int64"}, 0, "int64 value") - uintFlag := f.Uint([]string{"uint"}, 0, "uint value") - uint64Flag := f.Uint64([]string{"-uint64"}, 0, "uint64 value") - stringFlag := f.String([]string{"string"}, "0", "string value") - f.String([]string{"string2"}, "0", "string2 value") - singleQuoteFlag := f.String([]string{"squote"}, "", "single quoted value") - doubleQuoteFlag := f.String([]string{"dquote"}, "", "double quoted value") - mixedQuoteFlag := f.String([]string{"mquote"}, "", "mixed quoted value") - mixed2QuoteFlag := f.String([]string{"mquote2"}, "", "mixed2 quoted value") - nestedQuoteFlag := f.String([]string{"nquote"}, "", "nested quoted value") - nested2QuoteFlag := f.String([]string{"nquote2"}, "", "nested2 quoted value") - float64Flag := f.Float64([]string{"float64"}, 0, "float64 value") - durationFlag := f.Duration([]string{"duration"}, 5*time.Second, "time.Duration value") - extra := "one-extra-argument" - args := []string{ - "-bool", - "-bool2=true", - "-bool4=false", - "--int", "22", - "--int64", "0x23", - "-uint", "24", - "--uint64", "25", - "-string", "hello", - "-squote='single'", - `-dquote="double"`, - `-mquote='mixed"`, - `-mquote2="mixed2'`, - `-nquote="'single nested'"`, - `-nquote2='"double nested"'`, - "-float64", "2718e28", - "-duration", "2m", - extra, - } - if err := f.Parse(args); err != nil { - t.Fatal(err) - } - if !f.Parsed() { - t.Error("f.Parse() = false after Parse") - } - if *boolFlag != true { - t.Error("bool flag should be true, is ", *boolFlag) - } - if *bool2Flag != true { - t.Error("bool2 flag should be true, is ", *bool2Flag) - } - if !f.IsSet("bool2") { - t.Error("bool2 should be marked as set") - } - if f.IsSet("bool3") { - t.Error("bool3 should not be marked as set") - } - if !f.IsSet("bool4") { - t.Error("bool4 should be marked as set") - } - if *bool4Flag != false { - t.Error("bool4 flag should be false, is ", *bool4Flag) - } - if *intFlag != 22 { - t.Error("int flag should be 22, is ", *intFlag) - } - if *int64Flag != 0x23 { - t.Error("int64 flag should be 0x23, is ", *int64Flag) - } - if *uintFlag != 24 { - t.Error("uint flag should be 24, is ", *uintFlag) - } - if *uint64Flag != 25 { - t.Error("uint64 flag should be 25, is ", *uint64Flag) - } - if *stringFlag != "hello" { - t.Error("string flag should be `hello`, is ", *stringFlag) - } - if !f.IsSet("string") { - t.Error("string flag should be marked as set") - } - if f.IsSet("string2") { - t.Error("string2 flag should not be marked as set") - } - if *singleQuoteFlag != "single" { - t.Error("single quote string flag should be `single`, is ", *singleQuoteFlag) - } - if *doubleQuoteFlag != "double" { - t.Error("double quote string flag should be `double`, is ", *doubleQuoteFlag) - } - if *mixedQuoteFlag != `'mixed"` { - t.Error("mixed quote string flag should be `'mixed\"`, is ", *mixedQuoteFlag) - } - if *mixed2QuoteFlag != `"mixed2'` { - t.Error("mixed2 quote string flag should be `\"mixed2'`, is ", *mixed2QuoteFlag) - } - if *nestedQuoteFlag != "'single nested'" { - t.Error("nested quote string flag should be `'single nested'`, is ", *nestedQuoteFlag) - } - if *nested2QuoteFlag != `"double nested"` { - t.Error("double quote string flag should be `\"double nested\"`, is ", *nested2QuoteFlag) - } - if *float64Flag != 2718e28 { - t.Error("float64 flag should be 2718e28, is ", *float64Flag) - } - if *durationFlag != 2*time.Minute { - t.Error("duration flag should be 2m, is ", *durationFlag) - } - if len(f.Args()) != 1 { - t.Error("expected one argument, got", len(f.Args())) - } else if f.Args()[0] != extra { - t.Errorf("expected argument %q got %q", extra, f.Args()[0]) - } -} - -func testPanic(f *FlagSet, t *testing.T) { - f.Int([]string{"-int"}, 0, "int value") - if f.Parsed() { - t.Error("f.Parse() = true before Parse") - } - args := []string{ - "-int", "21", - } - f.Parse(args) -} - -func TestParsePanic(t *testing.T) { - ResetForTesting(func() {}) - testPanic(CommandLine, t) -} - -func TestParse(t *testing.T) { - ResetForTesting(func() { t.Error("bad parse") }) - testParse(CommandLine, t) -} - -func TestFlagSetParse(t *testing.T) { - testParse(NewFlagSet("test", ContinueOnError), t) -} - -// Declare a user-defined flag type. -type flagVar []string - -func (f *flagVar) String() string { - return fmt.Sprint([]string(*f)) -} - -func (f *flagVar) Set(value string) error { - *f = append(*f, value) - return nil -} - -func TestUserDefined(t *testing.T) { - var flags FlagSet - flags.Init("test", ContinueOnError) - var v flagVar - flags.Var(&v, []string{"v"}, "usage") - if err := flags.Parse([]string{"-v", "1", "-v", "2", "-v=3"}); err != nil { - t.Error(err) - } - if len(v) != 3 { - t.Fatal("expected 3 args; got ", len(v)) - } - expect := "[1 2 3]" - if v.String() != expect { - t.Errorf("expected value %q got %q", expect, v.String()) - } -} - -// Declare a user-defined boolean flag type. -type boolFlagVar struct { - count int -} - -func (b *boolFlagVar) String() string { - return fmt.Sprintf("%d", b.count) -} - -func (b *boolFlagVar) Set(value string) error { - if value == "true" { - b.count++ - } - return nil -} - -func (b *boolFlagVar) IsBoolFlag() bool { - return b.count < 4 -} - -func TestUserDefinedBool(t *testing.T) { - var flags FlagSet - flags.Init("test", ContinueOnError) - var b boolFlagVar - var err error - flags.Var(&b, []string{"b"}, "usage") - if err = flags.Parse([]string{"-b", "-b", "-b", "-b=true", "-b=false", "-b", "barg", "-b"}); err != nil { - if b.count < 4 { - t.Error(err) - } - } - - if b.count != 4 { - t.Errorf("want: %d; got: %d", 4, b.count) - } - - if err == nil { - t.Error("expected error; got none") - } -} - -func TestSetOutput(t *testing.T) { - var flags FlagSet - var buf bytes.Buffer - flags.SetOutput(&buf) - flags.Init("test", ContinueOnError) - flags.Parse([]string{"-unknown"}) - if out := buf.String(); !strings.Contains(out, "-unknown") { - t.Logf("expected output mentioning unknown; got %q", out) - } -} - -// This tests that one can reset the flags. This still works but not well, and is -// superseded by FlagSet. -func TestChangingArgs(t *testing.T) { - ResetForTesting(func() { t.Fatal("bad parse") }) - oldArgs := os.Args - defer func() { os.Args = oldArgs }() - os.Args = []string{"cmd", "-before", "subcmd", "-after", "args"} - before := Bool([]string{"before"}, false, "") - if err := CommandLine.Parse(os.Args[1:]); err != nil { - t.Fatal(err) - } - cmd := Arg(0) - os.Args = Args() - after := Bool([]string{"after"}, false, "") - Parse() - args := Args() - - if !*before || cmd != "subcmd" || !*after || len(args) != 1 || args[0] != "args" { - t.Fatalf("expected true subcmd true [args] got %v %v %v %v", *before, cmd, *after, args) - } -} - -// Test that -help invokes the usage message and returns ErrHelp. -func TestHelp(t *testing.T) { - var helpCalled = false - fs := NewFlagSet("help test", ContinueOnError) - fs.Usage = func() { helpCalled = true } - var flag bool - fs.BoolVar(&flag, []string{"flag"}, false, "regular flag") - // Regular flag invocation should work - err := fs.Parse([]string{"-flag=true"}) - if err != nil { - t.Fatal("expected no error; got ", err) - } - if !flag { - t.Error("flag was not set by -flag") - } - if helpCalled { - t.Error("help called for regular flag") - helpCalled = false // reset for next test - } - // Help flag should work as expected. - err = fs.Parse([]string{"-help"}) - if err == nil { - t.Fatal("error expected") - } - if err != ErrHelp { - t.Fatal("expected ErrHelp; got ", err) - } - if !helpCalled { - t.Fatal("help was not called") - } - // If we define a help flag, that should override. - var help bool - fs.BoolVar(&help, []string{"help"}, false, "help flag") - helpCalled = false - err = fs.Parse([]string{"-help"}) - if err != nil { - t.Fatal("expected no error for defined -help; got ", err) - } - if helpCalled { - t.Fatal("help was called; should not have been for defined help flag") - } -} - -// Test the flag count functions. -func TestFlagCounts(t *testing.T) { - fs := NewFlagSet("help test", ContinueOnError) - var flag bool - fs.BoolVar(&flag, []string{"flag1"}, false, "regular flag") - fs.BoolVar(&flag, []string{"#deprecated1"}, false, "regular flag") - fs.BoolVar(&flag, []string{"f", "flag2"}, false, "regular flag") - fs.BoolVar(&flag, []string{"#d", "#deprecated2"}, false, "regular flag") - fs.BoolVar(&flag, []string{"flag3"}, false, "regular flag") - fs.BoolVar(&flag, []string{"g", "#flag4", "-flag4"}, false, "regular flag") - - if fs.FlagCount() != 6 { - t.Fatal("FlagCount wrong. ", fs.FlagCount()) - } - if fs.FlagCountUndeprecated() != 4 { - t.Fatal("FlagCountUndeprecated wrong. ", fs.FlagCountUndeprecated()) - } - if fs.NFlag() != 0 { - t.Fatal("NFlag wrong. ", fs.NFlag()) - } - err := fs.Parse([]string{"-fd", "-g", "-flag4"}) - if err != nil { - t.Fatal("expected no error for defined -help; got ", err) - } - if fs.NFlag() != 4 { - t.Fatal("NFlag wrong. ", fs.NFlag()) - } -} - -// Show up bug in sortFlags -func TestSortFlags(t *testing.T) { - fs := NewFlagSet("help TestSortFlags", ContinueOnError) - - var err error - - var b bool - fs.BoolVar(&b, []string{"b", "-banana"}, false, "usage") - - err = fs.Parse([]string{"--banana=true"}) - if err != nil { - t.Fatal("expected no error; got ", err) - } - - count := 0 - - fs.VisitAll(func(flag *Flag) { - count++ - if flag == nil { - t.Fatal("VisitAll should not return a nil flag") - } - }) - flagcount := fs.FlagCount() - if flagcount != count { - t.Fatalf("FlagCount (%d) != number (%d) of elements visited", flagcount, count) - } - // Make sure its idempotent - if flagcount != fs.FlagCount() { - t.Fatalf("FlagCount (%d) != fs.FlagCount() (%d) of elements visited", flagcount, fs.FlagCount()) - } - - count = 0 - fs.Visit(func(flag *Flag) { - count++ - if flag == nil { - t.Fatal("Visit should not return a nil flag") - } - }) - nflag := fs.NFlag() - if nflag != count { - t.Fatalf("NFlag (%d) != number (%d) of elements visited", nflag, count) - } - if nflag != fs.NFlag() { - t.Fatalf("NFlag (%d) != fs.NFlag() (%d) of elements visited", nflag, fs.NFlag()) - } -} - -func TestMergeFlags(t *testing.T) { - base := NewFlagSet("base", ContinueOnError) - base.String([]string{"f"}, "", "") - - fs := NewFlagSet("test", ContinueOnError) - Merge(fs, base) - if len(fs.formal) != 1 { - t.Fatalf("FlagCount (%d) != number (1) of elements merged", len(fs.formal)) - } -} diff --git a/pkg/mount/flags.go b/pkg/mount/flags.go deleted file mode 100644 index 607dbed43a..0000000000 --- a/pkg/mount/flags.go +++ /dev/null @@ -1,149 +0,0 @@ -package mount - -import ( - "fmt" - "strings" -) - -var flags = map[string]struct { - clear bool - flag int -}{ - "defaults": {false, 0}, - "ro": {false, RDONLY}, - "rw": {true, RDONLY}, - "suid": {true, NOSUID}, - "nosuid": {false, NOSUID}, - "dev": {true, NODEV}, - "nodev": {false, NODEV}, - "exec": {true, NOEXEC}, - "noexec": {false, NOEXEC}, - "sync": {false, SYNCHRONOUS}, - "async": {true, SYNCHRONOUS}, - "dirsync": {false, DIRSYNC}, - "remount": {false, REMOUNT}, - "mand": {false, MANDLOCK}, - "nomand": {true, MANDLOCK}, - "atime": {true, NOATIME}, - "noatime": {false, NOATIME}, - "diratime": {true, NODIRATIME}, - "nodiratime": {false, NODIRATIME}, - "bind": {false, BIND}, - "rbind": {false, RBIND}, - "unbindable": {false, UNBINDABLE}, - "runbindable": {false, RUNBINDABLE}, - "private": {false, PRIVATE}, - "rprivate": {false, RPRIVATE}, - "shared": {false, SHARED}, - "rshared": {false, RSHARED}, - "slave": {false, SLAVE}, - "rslave": {false, RSLAVE}, - "relatime": {false, RELATIME}, - "norelatime": {true, RELATIME}, - "strictatime": {false, STRICTATIME}, - "nostrictatime": {true, STRICTATIME}, -} - -var validFlags = map[string]bool{ - "": true, - "size": true, - "mode": true, - "uid": true, - "gid": true, - "nr_inodes": true, - "nr_blocks": true, - "mpol": true, -} - -var propagationFlags = map[string]bool{ - "bind": true, - "rbind": true, - "unbindable": true, - "runbindable": true, - "private": true, - "rprivate": true, - "shared": true, - "rshared": true, - "slave": true, - "rslave": true, -} - -// MergeTmpfsOptions merge mount options to make sure there is no duplicate. -func MergeTmpfsOptions(options []string) ([]string, error) { - // We use collisions maps to remove duplicates. - // For flag, the key is the flag value (the key for propagation flag is -1) - // For data=value, the key is the data - flagCollisions := map[int]bool{} - dataCollisions := map[string]bool{} - - var newOptions []string - // We process in reverse order - for i := len(options) - 1; i >= 0; i-- { - option := options[i] - if option == "defaults" { - continue - } - if f, ok := flags[option]; ok && f.flag != 0 { - // There is only one propagation mode - key := f.flag - if propagationFlags[option] { - key = -1 - } - // Check to see if there is collision for flag - if !flagCollisions[key] { - // We prepend the option and add to collision map - newOptions = append([]string{option}, newOptions...) - flagCollisions[key] = true - } - continue - } - opt := strings.SplitN(option, "=", 2) - if len(opt) != 2 || !validFlags[opt[0]] { - return nil, fmt.Errorf("Invalid tmpfs option %q", opt) - } - if !dataCollisions[opt[0]] { - // We prepend the option and add to collision map - newOptions = append([]string{option}, newOptions...) - dataCollisions[opt[0]] = true - } - } - - return newOptions, nil -} - -// Parse fstab type mount options into mount() flags -// and device specific data -func parseOptions(options string) (int, string) { - var ( - flag int - data []string - ) - - for _, o := range strings.Split(options, ",") { - // If the option does not exist in the flags table or the flag - // is not supported on the platform, - // then it is a data value for a specific fs type - if f, exists := flags[o]; exists && f.flag != 0 { - if f.clear { - flag &= ^f.flag - } else { - flag |= f.flag - } - } else { - data = append(data, o) - } - } - return flag, strings.Join(data, ",") -} - -// ParseTmpfsOptions parse fstab type mount options into flags and data -func ParseTmpfsOptions(options string) (int, string, error) { - flags, data := parseOptions(options) - for _, o := range strings.Split(data, ",") { - opt := strings.SplitN(o, "=", 2) - if !validFlags[opt[0]] { - return 0, "", fmt.Errorf("Invalid tmpfs option %q", opt) - } - } - return flags, data, nil -} diff --git a/pkg/mount/flags_freebsd.go b/pkg/mount/flags_freebsd.go deleted file mode 100644 index f166cb2f77..0000000000 --- a/pkg/mount/flags_freebsd.go +++ /dev/null @@ -1,48 +0,0 @@ -// +build freebsd,cgo - -package mount - -/* -#include -*/ -import "C" - -const ( - // RDONLY will mount the filesystem as read-only. - RDONLY = C.MNT_RDONLY - - // NOSUID will not allow set-user-identifier or set-group-identifier bits to - // take effect. - NOSUID = C.MNT_NOSUID - - // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = C.MNT_NOEXEC - - // SYNCHRONOUS will allow any I/O to the file system to be done synchronously. - SYNCHRONOUS = C.MNT_SYNCHRONOUS - - // NOATIME will not update the file access time when reading from a file. - NOATIME = C.MNT_NOATIME -) - -// These flags are unsupported. -const ( - BIND = 0 - DIRSYNC = 0 - MANDLOCK = 0 - NODEV = 0 - NODIRATIME = 0 - UNBINDABLE = 0 - RUNBINDABLE = 0 - PRIVATE = 0 - RPRIVATE = 0 - SHARED = 0 - RSHARED = 0 - SLAVE = 0 - RSLAVE = 0 - RBIND = 0 - RELATIVE = 0 - RELATIME = 0 - REMOUNT = 0 - STRICTATIME = 0 -) diff --git a/pkg/mount/flags_linux.go b/pkg/mount/flags_linux.go deleted file mode 100644 index dc696dce90..0000000000 --- a/pkg/mount/flags_linux.go +++ /dev/null @@ -1,85 +0,0 @@ -package mount - -import ( - "syscall" -) - -const ( - // RDONLY will mount the file system read-only. - RDONLY = syscall.MS_RDONLY - - // NOSUID will not allow set-user-identifier or set-group-identifier bits to - // take effect. - NOSUID = syscall.MS_NOSUID - - // NODEV will not interpret character or block special devices on the file - // system. - NODEV = syscall.MS_NODEV - - // NOEXEC will not allow execution of any binaries on the mounted file system. - NOEXEC = syscall.MS_NOEXEC - - // SYNCHRONOUS will allow I/O to the file system to be done synchronously. - SYNCHRONOUS = syscall.MS_SYNCHRONOUS - - // DIRSYNC will force all directory updates within the file system to be done - // synchronously. This affects the following system calls: create, link, - // unlink, symlink, mkdir, rmdir, mknod and rename. - DIRSYNC = syscall.MS_DIRSYNC - - // REMOUNT will attempt to remount an already-mounted file system. This is - // commonly used to change the mount flags for a file system, especially to - // make a readonly file system writeable. It does not change device or mount - // point. - REMOUNT = syscall.MS_REMOUNT - - // MANDLOCK will force mandatory locks on a filesystem. - MANDLOCK = syscall.MS_MANDLOCK - - // NOATIME will not update the file access time when reading from a file. - NOATIME = syscall.MS_NOATIME - - // NODIRATIME will not update the directory access time. - NODIRATIME = syscall.MS_NODIRATIME - - // BIND remounts a subtree somewhere else. - BIND = syscall.MS_BIND - - // RBIND remounts a subtree and all possible submounts somewhere else. - RBIND = syscall.MS_BIND | syscall.MS_REC - - // UNBINDABLE creates a mount which cannot be cloned through a bind operation. - UNBINDABLE = syscall.MS_UNBINDABLE - - // RUNBINDABLE marks the entire mount tree as UNBINDABLE. - RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC - - // PRIVATE creates a mount which carries no propagation abilities. - PRIVATE = syscall.MS_PRIVATE - - // RPRIVATE marks the entire mount tree as PRIVATE. - RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC - - // SLAVE creates a mount which receives propagation from its master, but not - // vice versa. - SLAVE = syscall.MS_SLAVE - - // RSLAVE marks the entire mount tree as SLAVE. - RSLAVE = syscall.MS_SLAVE | syscall.MS_REC - - // SHARED creates a mount which provides the ability to create mirrors of - // that mount such that mounts and unmounts within any of the mirrors - // propagate to the other mirrors. - SHARED = syscall.MS_SHARED - - // RSHARED marks the entire mount tree as SHARED. - RSHARED = syscall.MS_SHARED | syscall.MS_REC - - // RELATIME updates inode access times relative to modify or change time. - RELATIME = syscall.MS_RELATIME - - // STRICTATIME allows to explicitly request full atime updates. This makes - // it possible for the kernel to default to relatime or noatime but still - // allow userspace to override it. - STRICTATIME = syscall.MS_STRICTATIME -) diff --git a/pkg/mount/flags_unsupported.go b/pkg/mount/flags_unsupported.go deleted file mode 100644 index 5564f7b3cd..0000000000 --- a/pkg/mount/flags_unsupported.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !linux,!freebsd freebsd,!cgo solaris,!cgo - -package mount - -// These flags are unsupported. -const ( - BIND = 0 - DIRSYNC = 0 - MANDLOCK = 0 - NOATIME = 0 - NODEV = 0 - NODIRATIME = 0 - NOEXEC = 0 - NOSUID = 0 - UNBINDABLE = 0 - RUNBINDABLE = 0 - PRIVATE = 0 - RPRIVATE = 0 - SHARED = 0 - RSHARED = 0 - SLAVE = 0 - RSLAVE = 0 - RBIND = 0 - RELATIME = 0 - RELATIVE = 0 - REMOUNT = 0 - STRICTATIME = 0 - SYNCHRONOUS = 0 - RDONLY = 0 -) diff --git a/pkg/mount/mount.go b/pkg/mount/mount.go deleted file mode 100644 index 66ac4bf472..0000000000 --- a/pkg/mount/mount.go +++ /dev/null @@ -1,74 +0,0 @@ -package mount - -import ( - "time" -) - -// GetMounts retrieves a list of mounts for the current running process. -func GetMounts() ([]*Info, error) { - return parseMountTable() -} - -// Mounted determines if a specified mountpoint has been mounted. -// On Linux it looks at /proc/self/mountinfo and on Solaris at mnttab. -func Mounted(mountpoint string) (bool, error) { - entries, err := parseMountTable() - if err != nil { - return false, err - } - - // Search the table for the mountpoint - for _, e := range entries { - if e.Mountpoint == mountpoint { - return true, nil - } - } - return false, nil -} - -// Mount will mount filesystem according to the specified configuration, on the -// condition that the target path is *not* already mounted. Options must be -// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See -// flags.go for supported option flags. -func Mount(device, target, mType, options string) error { - flag, _ := parseOptions(options) - if flag&REMOUNT != REMOUNT { - if mounted, err := Mounted(target); err != nil || mounted { - return err - } - } - return ForceMount(device, target, mType, options) -} - -// ForceMount will mount a filesystem according to the specified configuration, -// *regardless* if the target path is not already mounted. Options must be -// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See -// flags.go for supported option flags. -func ForceMount(device, target, mType, options string) error { - flag, data := parseOptions(options) - if err := mount(device, target, mType, uintptr(flag), data); err != nil { - return err - } - return nil -} - -// Unmount will unmount the target filesystem, so long as it is mounted. -func Unmount(target string) error { - if mounted, err := Mounted(target); err != nil || !mounted { - return err - } - return ForceUnmount(target) -} - -// ForceUnmount will force an unmount of the target filesystem, regardless if -// it is mounted or not. -func ForceUnmount(target string) (err error) { - // Simple retry logic for unmount - for i := 0; i < 10; i++ { - if err = unmount(target, 0); err == nil { - return nil - } - time.Sleep(100 * time.Millisecond) - } - return -} diff --git a/pkg/mount/mount_unix_test.go b/pkg/mount/mount_unix_test.go deleted file mode 100644 index 90fa348b22..0000000000 --- a/pkg/mount/mount_unix_test.go +++ /dev/null @@ -1,162 +0,0 @@ -// +build !windows - -package mount - -import ( - "os" - "path" - "testing" -) - -func TestMountOptionsParsing(t *testing.T) { - options := "noatime,ro,size=10k" - - flag, data := parseOptions(options) - - if data != "size=10k" { - t.Fatalf("Expected size=10 got %s", data) - } - - expectedFlag := NOATIME | RDONLY - - if flag != expectedFlag { - t.Fatalf("Expected %d got %d", expectedFlag, flag) - } -} - -func TestMounted(t *testing.T) { - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - sourcePath = path.Join(sourceDir, "file.txt") - targetPath = path.Join(targetDir, "file.txt") - ) - - os.Mkdir(sourceDir, 0777) - os.Mkdir(targetDir, 0777) - - f, err := os.Create(sourcePath) - if err != nil { - t.Fatal(err) - } - f.WriteString("hello") - f.Close() - - f, err = os.Create(targetPath) - if err != nil { - t.Fatal(err) - } - f.Close() - - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - mounted, err := Mounted(targetDir) - if err != nil { - t.Fatal(err) - } - if !mounted { - t.Fatalf("Expected %s to be mounted", targetDir) - } - if _, err := os.Stat(targetDir); err != nil { - t.Fatal(err) - } -} - -func TestMountReadonly(t *testing.T) { - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - sourcePath = path.Join(sourceDir, "file.txt") - targetPath = path.Join(targetDir, "file.txt") - ) - - os.Mkdir(sourceDir, 0777) - os.Mkdir(targetDir, 0777) - - f, err := os.Create(sourcePath) - if err != nil { - t.Fatal(err) - } - f.WriteString("hello") - f.Close() - - f, err = os.Create(targetPath) - if err != nil { - t.Fatal(err) - } - f.Close() - - if err := Mount(sourceDir, targetDir, "none", "bind,ro"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - f, err = os.OpenFile(targetPath, os.O_RDWR, 0777) - if err == nil { - t.Fatal("Should not be able to open a ro file as rw") - } -} - -func TestGetMounts(t *testing.T) { - mounts, err := GetMounts() - if err != nil { - t.Fatal(err) - } - - root := false - for _, entry := range mounts { - if entry.Mountpoint == "/" { - root = true - } - } - - if !root { - t.Fatal("/ should be mounted at least") - } -} - -func TestMergeTmpfsOptions(t *testing.T) { - options := []string{"noatime", "ro", "size=10k", "defaults", "atime", "defaults", "rw", "rprivate", "size=1024k", "slave"} - expected := []string{"atime", "rw", "size=1024k", "slave"} - merged, err := MergeTmpfsOptions(options) - if err != nil { - t.Fatal(err) - } - if len(expected) != len(merged) { - t.Fatalf("Expected %s got %s", expected, merged) - } - for index := range merged { - if merged[index] != expected[index] { - t.Fatalf("Expected %s for the %dth option, got %s", expected, index, merged) - } - } - - options = []string{"noatime", "ro", "size=10k", "atime", "rw", "rprivate", "size=1024k", "slave", "size"} - _, err = MergeTmpfsOptions(options) - if err == nil { - t.Fatal("Expected error got nil") - } -} diff --git a/pkg/mount/mounter_freebsd.go b/pkg/mount/mounter_freebsd.go deleted file mode 100644 index bb870e6f59..0000000000 --- a/pkg/mount/mounter_freebsd.go +++ /dev/null @@ -1,59 +0,0 @@ -package mount - -/* -#include -#include -#include -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "strings" - "syscall" - "unsafe" -) - -func allocateIOVecs(options []string) []C.struct_iovec { - out := make([]C.struct_iovec, len(options)) - for i, option := range options { - out[i].iov_base = unsafe.Pointer(C.CString(option)) - out[i].iov_len = C.size_t(len(option) + 1) - } - return out -} - -func mount(device, target, mType string, flag uintptr, data string) error { - isNullFS := false - - xs := strings.Split(data, ",") - for _, x := range xs { - if x == "bind" { - isNullFS = true - } - } - - options := []string{"fspath", target} - if isNullFS { - options = append(options, "fstype", "nullfs", "target", device) - } else { - options = append(options, "fstype", mType, "from", device) - } - rawOptions := allocateIOVecs(options) - for _, rawOption := range rawOptions { - defer C.free(rawOption.iov_base) - } - - if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 { - reason := C.GoString(C.strerror(*C.__error())) - return fmt.Errorf("Failed to call nmount: %s", reason) - } - return nil -} - -func unmount(target string, flag int) error { - return syscall.Unmount(target, flag) -} diff --git a/pkg/mount/mounter_linux.go b/pkg/mount/mounter_linux.go deleted file mode 100644 index dd4280c777..0000000000 --- a/pkg/mount/mounter_linux.go +++ /dev/null @@ -1,21 +0,0 @@ -package mount - -import ( - "syscall" -) - -func mount(device, target, mType string, flag uintptr, data string) error { - if err := syscall.Mount(device, target, mType, flag, data); err != nil { - return err - } - - // If we have a bind mount or remount, remount... - if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY { - return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data) - } - return nil -} - -func unmount(target string, flag int) error { - return syscall.Unmount(target, flag) -} diff --git a/pkg/mount/mounter_solaris.go b/pkg/mount/mounter_solaris.go deleted file mode 100644 index c684aa81fc..0000000000 --- a/pkg/mount/mounter_solaris.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build solaris,cgo - -package mount - -import ( - "golang.org/x/sys/unix" - "unsafe" -) - -// #include -// #include -// #include -// int Mount(const char *spec, const char *dir, int mflag, -// char *fstype, char *dataptr, int datalen, char *optptr, int optlen) { -// return mount(spec, dir, mflag, fstype, dataptr, datalen, optptr, optlen); -// } -import "C" - -func mount(device, target, mType string, flag uintptr, data string) error { - spec := C.CString(device) - dir := C.CString(target) - fstype := C.CString(mType) - _, err := C.Mount(spec, dir, C.int(flag), fstype, nil, 0, nil, 0) - C.free(unsafe.Pointer(spec)) - C.free(unsafe.Pointer(dir)) - C.free(unsafe.Pointer(fstype)) - return err -} - -func unmount(target string, flag int) error { - err := unix.Unmount(target, flag) - return err -} diff --git a/pkg/mount/mounter_unsupported.go b/pkg/mount/mounter_unsupported.go deleted file mode 100644 index a2a3bb457f..0000000000 --- a/pkg/mount/mounter_unsupported.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo - -package mount - -func mount(device, target, mType string, flag uintptr, data string) error { - panic("Not implemented") -} - -func unmount(target string, flag int) error { - panic("Not implemented") -} diff --git a/pkg/mount/mountinfo.go b/pkg/mount/mountinfo.go deleted file mode 100644 index e3fc3535e9..0000000000 --- a/pkg/mount/mountinfo.go +++ /dev/null @@ -1,40 +0,0 @@ -package mount - -// Info reveals information about a particular mounted filesystem. This -// struct is populated from the content in the /proc//mountinfo file. -type Info struct { - // ID is a unique identifier of the mount (may be reused after umount). - ID int - - // Parent indicates the ID of the mount parent (or of self for the top of the - // mount tree). - Parent int - - // Major indicates one half of the device ID which identifies the device class. - Major int - - // Minor indicates one half of the device ID which identifies a specific - // instance of device. - Minor int - - // Root of the mount within the filesystem. - Root string - - // Mountpoint indicates the mount point relative to the process's root. - Mountpoint string - - // Opts represents mount-specific options. - Opts string - - // Optional represents optional fields. - Optional string - - // Fstype indicates the type of filesystem, such as EXT3. - Fstype string - - // Source indicates filesystem specific information or "none". - Source string - - // VfsOpts represents per super block options. - VfsOpts string -} diff --git a/pkg/mount/mountinfo_freebsd.go b/pkg/mount/mountinfo_freebsd.go deleted file mode 100644 index 4f32edcd90..0000000000 --- a/pkg/mount/mountinfo_freebsd.go +++ /dev/null @@ -1,41 +0,0 @@ -package mount - -/* -#include -#include -#include -*/ -import "C" - -import ( - "fmt" - "reflect" - "unsafe" -) - -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts. -func parseMountTable() ([]*Info, error) { - var rawEntries *C.struct_statfs - - count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT)) - if count == 0 { - return nil, fmt.Errorf("Failed to call getmntinfo") - } - - var entries []C.struct_statfs - header := (*reflect.SliceHeader)(unsafe.Pointer(&entries)) - header.Cap = count - header.Len = count - header.Data = uintptr(unsafe.Pointer(rawEntries)) - - var out []*Info - for _, entry := range entries { - var mountinfo Info - mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0]) - mountinfo.Source = C.GoString(&entry.f_mntfromname[0]) - mountinfo.Fstype = C.GoString(&entry.f_fstypename[0]) - out = append(out, &mountinfo) - } - return out, nil -} diff --git a/pkg/mount/mountinfo_linux.go b/pkg/mount/mountinfo_linux.go deleted file mode 100644 index be69fee1d7..0000000000 --- a/pkg/mount/mountinfo_linux.go +++ /dev/null @@ -1,95 +0,0 @@ -// +build linux - -package mount - -import ( - "bufio" - "fmt" - "io" - "os" - "strings" -) - -const ( - /* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue - (1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11) - - (1) mount ID: unique identifier of the mount (may be reused after umount) - (2) parent ID: ID of parent (or of self for the top of the mount tree) - (3) major:minor: value of st_dev for files on filesystem - (4) root: root of the mount within the filesystem - (5) mount point: mount point relative to the process's root - (6) mount options: per mount options - (7) optional fields: zero or more fields of the form "tag[:value]" - (8) separator: marks the end of the optional fields - (9) filesystem type: name of filesystem of the form "type[.subtype]" - (10) mount source: filesystem specific information or "none" - (11) super options: per super block options*/ - mountinfoFormat = "%d %d %d:%d %s %s %s %s" -) - -// Parse /proc/self/mountinfo because comparing Dev and ino does not work from -// bind mounts -func parseMountTable() ([]*Info, error) { - f, err := os.Open("/proc/self/mountinfo") - if err != nil { - return nil, err - } - defer f.Close() - - return parseInfoFile(f) -} - -func parseInfoFile(r io.Reader) ([]*Info, error) { - var ( - s = bufio.NewScanner(r) - out = []*Info{} - ) - - for s.Scan() { - if err := s.Err(); err != nil { - return nil, err - } - - var ( - p = &Info{} - text = s.Text() - optionalFields string - ) - - if _, err := fmt.Sscanf(text, mountinfoFormat, - &p.ID, &p.Parent, &p.Major, &p.Minor, - &p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil { - return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err) - } - // Safe as mountinfo encodes mountpoints with spaces as \040. - index := strings.Index(text, " - ") - postSeparatorFields := strings.Fields(text[index+3:]) - if len(postSeparatorFields) < 3 { - return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text) - } - - if optionalFields != "-" { - p.Optional = optionalFields - } - - p.Fstype = postSeparatorFields[0] - p.Source = postSeparatorFields[1] - p.VfsOpts = strings.Join(postSeparatorFields[2:], " ") - out = append(out, p) - } - return out, nil -} - -// PidMountInfo collects the mounts for a specific process ID. If the process -// ID is unknown, it is better to use `GetMounts` which will inspect -// "/proc/self/mountinfo" instead. -func PidMountInfo(pid int) ([]*Info, error) { - f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid)) - if err != nil { - return nil, err - } - defer f.Close() - - return parseInfoFile(f) -} diff --git a/pkg/mount/mountinfo_linux_test.go b/pkg/mount/mountinfo_linux_test.go deleted file mode 100644 index bd100e1d49..0000000000 --- a/pkg/mount/mountinfo_linux_test.go +++ /dev/null @@ -1,476 +0,0 @@ -// +build linux - -package mount - -import ( - "bytes" - "testing" -) - -const ( - fedoraMountinfo = `15 35 0:3 / /proc rw,nosuid,nodev,noexec,relatime shared:5 - proc proc rw - 16 35 0:14 / /sys rw,nosuid,nodev,noexec,relatime shared:6 - sysfs sysfs rw,seclabel - 17 35 0:5 / /dev rw,nosuid shared:2 - devtmpfs devtmpfs rw,seclabel,size=8056484k,nr_inodes=2014121,mode=755 - 18 16 0:15 / /sys/kernel/security rw,nosuid,nodev,noexec,relatime shared:7 - securityfs securityfs rw - 19 16 0:13 / /sys/fs/selinux rw,relatime shared:8 - selinuxfs selinuxfs rw - 20 17 0:16 / /dev/shm rw,nosuid,nodev shared:3 - tmpfs tmpfs rw,seclabel - 21 17 0:10 / /dev/pts rw,nosuid,noexec,relatime shared:4 - devpts devpts rw,seclabel,gid=5,mode=620,ptmxmode=000 - 22 35 0:17 / /run rw,nosuid,nodev shared:21 - tmpfs tmpfs rw,seclabel,mode=755 - 23 16 0:18 / /sys/fs/cgroup rw,nosuid,nodev,noexec shared:9 - tmpfs tmpfs rw,seclabel,mode=755 - 24 23 0:19 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime shared:10 - cgroup cgroup rw,xattr,release_agent=/usr/lib/systemd/systemd-cgroups-agent,name=systemd - 25 16 0:20 / /sys/fs/pstore rw,nosuid,nodev,noexec,relatime shared:20 - pstore pstore rw - 26 23 0:21 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime shared:11 - cgroup cgroup rw,cpuset,clone_children - 27 23 0:22 / /sys/fs/cgroup/cpu,cpuacct rw,nosuid,nodev,noexec,relatime shared:12 - cgroup cgroup rw,cpuacct,cpu,clone_children - 28 23 0:23 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime shared:13 - cgroup cgroup rw,memory,clone_children - 29 23 0:24 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime shared:14 - cgroup cgroup rw,devices,clone_children - 30 23 0:25 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime shared:15 - cgroup cgroup rw,freezer,clone_children - 31 23 0:26 / /sys/fs/cgroup/net_cls rw,nosuid,nodev,noexec,relatime shared:16 - cgroup cgroup rw,net_cls,clone_children - 32 23 0:27 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime shared:17 - cgroup cgroup rw,blkio,clone_children - 33 23 0:28 / /sys/fs/cgroup/perf_event rw,nosuid,nodev,noexec,relatime shared:18 - cgroup cgroup rw,perf_event,clone_children - 34 23 0:29 / /sys/fs/cgroup/hugetlb rw,nosuid,nodev,noexec,relatime shared:19 - cgroup cgroup rw,hugetlb,clone_children - 35 1 253:2 / / rw,relatime shared:1 - ext4 /dev/mapper/ssd-root--f20 rw,seclabel,data=ordered - 36 15 0:30 / /proc/sys/fs/binfmt_misc rw,relatime shared:22 - autofs systemd-1 rw,fd=38,pgrp=1,timeout=300,minproto=5,maxproto=5,direct - 37 17 0:12 / /dev/mqueue rw,relatime shared:23 - mqueue mqueue rw,seclabel - 38 35 0:31 / /tmp rw shared:24 - tmpfs tmpfs rw,seclabel - 39 17 0:32 / /dev/hugepages rw,relatime shared:25 - hugetlbfs hugetlbfs rw,seclabel - 40 16 0:7 / /sys/kernel/debug rw,relatime shared:26 - debugfs debugfs rw - 41 16 0:33 / /sys/kernel/config rw,relatime shared:27 - configfs configfs rw - 42 35 0:34 / /var/lib/nfs/rpc_pipefs rw,relatime shared:28 - rpc_pipefs sunrpc rw - 43 15 0:35 / /proc/fs/nfsd rw,relatime shared:29 - nfsd sunrpc rw - 45 35 8:17 / /boot rw,relatime shared:30 - ext4 /dev/sdb1 rw,seclabel,data=ordered - 46 35 253:4 / /home rw,relatime shared:31 - ext4 /dev/mapper/ssd-home rw,seclabel,data=ordered - 47 35 253:5 / /var/lib/libvirt/images rw,noatime,nodiratime shared:32 - ext4 /dev/mapper/ssd-virt rw,seclabel,discard,data=ordered - 48 35 253:12 / /mnt/old rw,relatime shared:33 - ext4 /dev/mapper/HelpDeskRHEL6-FedoraRoot rw,seclabel,data=ordered - 121 22 0:36 / /run/user/1000/gvfs rw,nosuid,nodev,relatime shared:104 - fuse.gvfsd-fuse gvfsd-fuse rw,user_id=1000,group_id=1000 - 124 16 0:37 / /sys/fs/fuse/connections rw,relatime shared:107 - fusectl fusectl rw - 165 38 253:3 / /tmp/mnt rw,relatime shared:147 - ext4 /dev/mapper/ssd-root rw,seclabel,data=ordered - 167 35 253:15 / /var/lib/docker/devicemapper/mnt/aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,relatime shared:149 - ext4 /dev/mapper/docker-253:2-425882-aae4076022f0e2b80a2afbf8fc6df450c52080191fcef7fb679a73e6f073e5c2 rw,seclabel,discard,stripe=16,data=ordered - 171 35 253:16 / /var/lib/docker/devicemapper/mnt/c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,relatime shared:153 - ext4 /dev/mapper/docker-253:2-425882-c71be651f114db95180e472f7871b74fa597ee70a58ccc35cb87139ddea15373 rw,seclabel,discard,stripe=16,data=ordered - 175 35 253:17 / /var/lib/docker/devicemapper/mnt/1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,relatime shared:157 - ext4 /dev/mapper/docker-253:2-425882-1bac6ab72862d2d5626560df6197cf12036b82e258c53d981fa29adce6f06c3c rw,seclabel,discard,stripe=16,data=ordered - 179 35 253:18 / /var/lib/docker/devicemapper/mnt/d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,relatime shared:161 - ext4 /dev/mapper/docker-253:2-425882-d710a357d77158e80d5b2c55710ae07c94e76d34d21ee7bae65ce5418f739b09 rw,seclabel,discard,stripe=16,data=ordered - 183 35 253:19 / /var/lib/docker/devicemapper/mnt/6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,relatime shared:165 - ext4 /dev/mapper/docker-253:2-425882-6479f52366114d5f518db6837254baab48fab39f2ac38d5099250e9a6ceae6c7 rw,seclabel,discard,stripe=16,data=ordered - 187 35 253:20 / /var/lib/docker/devicemapper/mnt/8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,relatime shared:169 - ext4 /dev/mapper/docker-253:2-425882-8d9df91c4cca5aef49eeb2725292aab324646f723a7feab56be34c2ad08268e1 rw,seclabel,discard,stripe=16,data=ordered - 191 35 253:21 / /var/lib/docker/devicemapper/mnt/c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,relatime shared:173 - ext4 /dev/mapper/docker-253:2-425882-c8240b768603d32e920d365dc9d1dc2a6af46cd23e7ae819947f969e1b4ec661 rw,seclabel,discard,stripe=16,data=ordered - 195 35 253:22 / /var/lib/docker/devicemapper/mnt/2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,relatime shared:177 - ext4 /dev/mapper/docker-253:2-425882-2eb3a01278380bbf3ed12d86ac629eaa70a4351301ee307a5cabe7b5f3b1615f rw,seclabel,discard,stripe=16,data=ordered - 199 35 253:23 / /var/lib/docker/devicemapper/mnt/37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,relatime shared:181 - ext4 /dev/mapper/docker-253:2-425882-37a17fb7c9d9b80821235d5f2662879bd3483915f245f9b49cdaa0e38779b70b rw,seclabel,discard,stripe=16,data=ordered - 203 35 253:24 / /var/lib/docker/devicemapper/mnt/aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,relatime shared:185 - ext4 /dev/mapper/docker-253:2-425882-aea459ae930bf1de913e2f29428fd80ee678a1e962d4080019d9f9774331ee2b rw,seclabel,discard,stripe=16,data=ordered - 207 35 253:25 / /var/lib/docker/devicemapper/mnt/928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,relatime shared:189 - ext4 /dev/mapper/docker-253:2-425882-928ead0bc06c454bd9f269e8585aeae0a6bd697f46dc8754c2a91309bc810882 rw,seclabel,discard,stripe=16,data=ordered - 211 35 253:26 / /var/lib/docker/devicemapper/mnt/0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,relatime shared:193 - ext4 /dev/mapper/docker-253:2-425882-0f284d18481d671644706e7a7244cbcf63d590d634cc882cb8721821929d0420 rw,seclabel,discard,stripe=16,data=ordered - 215 35 253:27 / /var/lib/docker/devicemapper/mnt/d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,relatime shared:197 - ext4 /dev/mapper/docker-253:2-425882-d9dd16722ab34c38db2733e23f69e8f4803ce59658250dd63e98adff95d04919 rw,seclabel,discard,stripe=16,data=ordered - 219 35 253:28 / /var/lib/docker/devicemapper/mnt/bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,relatime shared:201 - ext4 /dev/mapper/docker-253:2-425882-bc4500479f18c2c08c21ad5282e5f826a016a386177d9874c2764751c031d634 rw,seclabel,discard,stripe=16,data=ordered - 223 35 253:29 / /var/lib/docker/devicemapper/mnt/7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,relatime shared:205 - ext4 /dev/mapper/docker-253:2-425882-7770c8b24eb3d5cc159a065910076938910d307ab2f5d94e1dc3b24c06ee2c8a rw,seclabel,discard,stripe=16,data=ordered - 227 35 253:30 / /var/lib/docker/devicemapper/mnt/c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,relatime shared:209 - ext4 /dev/mapper/docker-253:2-425882-c280cd3d0bf0aa36b478b292279671624cceafc1a67eaa920fa1082601297adf rw,seclabel,discard,stripe=16,data=ordered - 231 35 253:31 / /var/lib/docker/devicemapper/mnt/8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,relatime shared:213 - ext4 /dev/mapper/docker-253:2-425882-8b59a7d9340279f09fea67fd6ad89ddef711e9e7050eb647984f8b5ef006335f rw,seclabel,discard,stripe=16,data=ordered - 235 35 253:32 / /var/lib/docker/devicemapper/mnt/1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,relatime shared:217 - ext4 /dev/mapper/docker-253:2-425882-1a28059f29eda821578b1bb27a60cc71f76f846a551abefabce6efd0146dce9f rw,seclabel,discard,stripe=16,data=ordered - 239 35 253:33 / /var/lib/docker/devicemapper/mnt/e9aa60c60128cad1 rw,relatime shared:221 - ext4 /dev/mapper/docker-253:2-425882-e9aa60c60128cad1 rw,seclabel,discard,stripe=16,data=ordered - 243 35 253:34 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,relatime shared:225 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d-init rw,seclabel,discard,stripe=16,data=ordered - 247 35 253:35 / /var/lib/docker/devicemapper/mnt/5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,relatime shared:229 - ext4 /dev/mapper/docker-253:2-425882-5fec11304b6f4713fea7b6ccdcc1adc0a1966187f590fe25a8227428a8df275d rw,seclabel,discard,stripe=16,data=ordered - 31 21 0:23 / /DATA/foo_bla_bla rw,relatime - cifs //foo/BLA\040BLA\040BLA/ rw,sec=ntlm,cache=loose,unc=\\foo\BLA BLA BLA,username=my_login,domain=mydomain.com,uid=12345678,forceuid,gid=12345678,forcegid,addr=10.1.30.10,file_mode=0755,dir_mode=0755,nounix,rsize=61440,wsize=65536,actimeo=1` - - ubuntuMountInfo = `15 20 0:14 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw -16 20 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw -17 20 0:5 / /dev rw,relatime - devtmpfs udev rw,size=1015140k,nr_inodes=253785,mode=755 -18 17 0:11 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 -19 20 0:15 / /run rw,nosuid,noexec,relatime - tmpfs tmpfs rw,size=205044k,mode=755 -20 1 253:0 / / rw,relatime - ext4 /dev/disk/by-label/DOROOT rw,errors=remount-ro,data=ordered -21 15 0:16 / /sys/fs/cgroup rw,relatime - tmpfs none rw,size=4k,mode=755 -22 15 0:17 / /sys/fs/fuse/connections rw,relatime - fusectl none rw -23 15 0:6 / /sys/kernel/debug rw,relatime - debugfs none rw -24 15 0:10 / /sys/kernel/security rw,relatime - securityfs none rw -25 19 0:18 / /run/lock rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=5120k -26 21 0:19 / /sys/fs/cgroup/cpuset rw,relatime - cgroup cgroup rw,cpuset,clone_children -27 19 0:20 / /run/shm rw,nosuid,nodev,relatime - tmpfs none rw -28 21 0:21 / /sys/fs/cgroup/cpu rw,relatime - cgroup cgroup rw,cpu -29 19 0:22 / /run/user rw,nosuid,nodev,noexec,relatime - tmpfs none rw,size=102400k,mode=755 -30 15 0:23 / /sys/fs/pstore rw,relatime - pstore none rw -31 21 0:24 / /sys/fs/cgroup/cpuacct rw,relatime - cgroup cgroup rw,cpuacct -32 21 0:25 / /sys/fs/cgroup/memory rw,relatime - cgroup cgroup rw,memory -33 21 0:26 / /sys/fs/cgroup/devices rw,relatime - cgroup cgroup rw,devices -34 21 0:27 / /sys/fs/cgroup/freezer rw,relatime - cgroup cgroup rw,freezer -35 21 0:28 / /sys/fs/cgroup/blkio rw,relatime - cgroup cgroup rw,blkio -36 21 0:29 / /sys/fs/cgroup/perf_event rw,relatime - cgroup cgroup rw,perf_event -37 21 0:30 / /sys/fs/cgroup/hugetlb rw,relatime - cgroup cgroup rw,hugetlb -38 21 0:31 / /sys/fs/cgroup/systemd rw,nosuid,nodev,noexec,relatime - cgroup systemd rw,name=systemd -39 20 0:32 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=caafa54fdc06525 -40 20 0:33 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8-init rw,relatime - aufs none rw,si=caafa54f882b525 -41 20 0:34 / /var/lib/docker/aufs/mnt/2eed44ac7ce7c75af04f088ed6cb4ce9d164801e91d78c6db65d7ef6d572bba8 rw,relatime - aufs none rw,si=caafa54f8829525 -42 20 0:35 / /var/lib/docker/aufs/mnt/16f4d7e96dd612903f425bfe856762f291ff2e36a8ecd55a2209b7d7cd81c30b rw,relatime - aufs none rw,si=caafa54f882d525 -43 20 0:36 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e-init rw,relatime - aufs none rw,si=caafa54f882f525 -44 20 0:37 / /var/lib/docker/aufs/mnt/63ca08b75d7438a9469a5954e003f48ffede73541f6286ce1cb4d7dd4811da7e rw,relatime - aufs none rw,si=caafa54f88ba525 -45 20 0:38 / /var/lib/docker/aufs/mnt/283f35a910233c756409313be71ecd8fcfef0df57108b8d740b61b3e88860452 rw,relatime - aufs none rw,si=caafa54f88b8525 -46 20 0:39 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1-init rw,relatime - aufs none rw,si=caafa54f88be525 -47 20 0:40 / /var/lib/docker/aufs/mnt/2c6c7253d4090faa3886871fb21bd660609daeb0206588c0602007f7d0f254b1 rw,relatime - aufs none rw,si=caafa54f882c525 -48 20 0:41 / /var/lib/docker/aufs/mnt/de2b538c97d6366cc80e8658547c923ea1d042f85580df379846f36a4df7049d rw,relatime - aufs none rw,si=caafa54f85bb525 -49 20 0:42 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49-init rw,relatime - aufs none rw,si=caafa54fdc00525 -50 20 0:43 / /var/lib/docker/aufs/mnt/94a3d8ed7c27e5b0aa71eba46c736bfb2742afda038e74f2dd6035fb28415b49 rw,relatime - aufs none rw,si=caafa54fbaec525 -51 20 0:44 / /var/lib/docker/aufs/mnt/6ac1cace985c9fc9bea32234de8b36dba49bdd5e29a2972b327ff939d78a6274 rw,relatime - aufs none rw,si=caafa54f8e1a525 -52 20 0:45 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b-init rw,relatime - aufs none rw,si=caafa54f8e1d525 -53 20 0:46 / /var/lib/docker/aufs/mnt/dff147033e3a0ef061e1de1ad34256b523d4a8c1fa6bba71a0ab538e8628ff0b rw,relatime - aufs none rw,si=caafa54f8e1b525 -54 20 0:47 / /var/lib/docker/aufs/mnt/cabb117d997f0f93519185aea58389a9762770b7496ed0b74a3e4a083fa45902 rw,relatime - aufs none rw,si=caafa54f810a525 -55 20 0:48 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33-init rw,relatime - aufs none rw,si=caafa54f8529525 -56 20 0:49 / /var/lib/docker/aufs/mnt/e1c8a94ffaa9d532bbbdc6ef771ce8a6c2c06757806ecaf8b68e9108fec65f33 rw,relatime - aufs none rw,si=caafa54f852f525 -57 20 0:50 / /var/lib/docker/aufs/mnt/16a1526fa445b84ce84f89506d219e87fa488a814063baf045d88b02f21166b3 rw,relatime - aufs none rw,si=caafa54f9e1d525 -58 20 0:51 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f-init rw,relatime - aufs none rw,si=caafa54f854d525 -59 20 0:52 / /var/lib/docker/aufs/mnt/57b9c92e1e368fa7dbe5079f7462e917777829caae732828b003c355fe49da9f rw,relatime - aufs none rw,si=caafa54f854e525 -60 20 0:53 / /var/lib/docker/aufs/mnt/e370c3e286bea027917baa0e4d251262681a472a87056e880dfd0513516dffd9 rw,relatime - aufs none rw,si=caafa54f840a525 -61 20 0:54 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e-init rw,relatime - aufs none rw,si=caafa54f8408525 -62 20 0:55 / /var/lib/docker/aufs/mnt/6b00d3b4f32b41997ec07412b5e18204f82fbe643e7122251cdeb3582abd424e rw,relatime - aufs none rw,si=caafa54f8409525 -63 20 0:56 / /var/lib/docker/aufs/mnt/abd0b5ea5d355a67f911475e271924a5388ee60c27185fcd60d095afc4a09dc7 rw,relatime - aufs none rw,si=caafa54f9eb1525 -64 20 0:57 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2-init rw,relatime - aufs none rw,si=caafa54f85bf525 -65 20 0:58 / /var/lib/docker/aufs/mnt/336222effc3f7b89867bb39ff7792ae5412c35c749f127c29159d046b6feedd2 rw,relatime - aufs none rw,si=caafa54f85b8525 -66 20 0:59 / /var/lib/docker/aufs/mnt/912e1bf28b80a09644503924a8a1a4fb8ed10b808ca847bda27a369919aa52fa rw,relatime - aufs none rw,si=caafa54fbaea525 -67 20 0:60 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576-init rw,relatime - aufs none rw,si=caafa54f8472525 -68 20 0:61 / /var/lib/docker/aufs/mnt/386f722875013b4a875118367abc783fc6617a3cb7cf08b2b4dcf550b4b9c576 rw,relatime - aufs none rw,si=caafa54f8474525 -69 20 0:62 / /var/lib/docker/aufs/mnt/5aaebb79ef3097dfca377889aeb61a0c9d5e3795117d2b08d0751473c671dfb2 rw,relatime - aufs none rw,si=caafa54f8c5e525 -70 20 0:63 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2-init rw,relatime - aufs none rw,si=caafa54f8c3b525 -71 20 0:64 / /var/lib/docker/aufs/mnt/5ba3e493279d01277d583600b81c7c079e691b73c3a2bdea8e4b12a35a418be2 rw,relatime - aufs none rw,si=caafa54f8c3d525 -72 20 0:65 / /var/lib/docker/aufs/mnt/2777f0763da4de93f8bebbe1595cc77f739806a158657b033eca06f827b6028a rw,relatime - aufs none rw,si=caafa54f8c3e525 -73 20 0:66 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e-init rw,relatime - aufs none rw,si=caafa54f8c39525 -74 20 0:67 / /var/lib/docker/aufs/mnt/5d7445562acf73c6f0ae34c3dd0921d7457de1ba92a587d9e06a44fa209eeb3e rw,relatime - aufs none rw,si=caafa54f854f525 -75 20 0:68 / /var/lib/docker/aufs/mnt/06400b526ec18b66639c96efc41a84f4ae0b117cb28dafd56be420651b4084a0 rw,relatime - aufs none rw,si=caafa54f840b525 -76 20 0:69 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785-init rw,relatime - aufs none rw,si=caafa54fdddf525 -77 20 0:70 / /var/lib/docker/aufs/mnt/e051d45ec42d8e3e1cc57bb39871a40de486dc123522e9c067fbf2ca6a357785 rw,relatime - aufs none rw,si=caafa54f854b525 -78 20 0:71 / /var/lib/docker/aufs/mnt/1ff414fa93fd61ec81b0ab7b365a841ff6545accae03cceac702833aaeaf718f rw,relatime - aufs none rw,si=caafa54f8d85525 -79 20 0:72 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8-init rw,relatime - aufs none rw,si=caafa54f8da3525 -80 20 0:73 / /var/lib/docker/aufs/mnt/c661b2f871dd5360e46a2aebf8f970f6d39a2ff64e06979aa0361227c88128b8 rw,relatime - aufs none rw,si=caafa54f8da2525 -81 20 0:74 / /var/lib/docker/aufs/mnt/b68b1d4fe4d30016c552398e78b379a39f651661d8e1fa5f2460c24a5e723420 rw,relatime - aufs none rw,si=caafa54f8d81525 -82 20 0:75 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739-init rw,relatime - aufs none rw,si=caafa54f8da1525 -83 20 0:76 / /var/lib/docker/aufs/mnt/c5c5979c936cd0153a4c626fa9d69ce4fce7d924cc74fa68b025d2f585031739 rw,relatime - aufs none rw,si=caafa54f8da0525 -84 20 0:77 / /var/lib/docker/aufs/mnt/53e10b0329afc0e0d3322d31efaed4064139dc7027fe6ae445cffd7104bcc94f rw,relatime - aufs none rw,si=caafa54f8c35525 -85 20 0:78 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494-init rw,relatime - aufs none rw,si=caafa54f8db8525 -86 20 0:79 / /var/lib/docker/aufs/mnt/3bfafd09ff2603e2165efacc2215c1f51afabba6c42d04a68cc2df0e8cc31494 rw,relatime - aufs none rw,si=caafa54f8dba525 -87 20 0:80 / /var/lib/docker/aufs/mnt/90fdd2c03eeaf65311f88f4200e18aef6d2772482712d9aea01cd793c64781b5 rw,relatime - aufs none rw,si=caafa54f8315525 -88 20 0:81 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f-init rw,relatime - aufs none rw,si=caafa54f8fc6525 -89 20 0:82 / /var/lib/docker/aufs/mnt/7bdf2591c06c154ceb23f5e74b1d03b18fbf6fe96e35fbf539b82d446922442f rw,relatime - aufs none rw,si=caafa54f8468525 -90 20 0:83 / /var/lib/docker/aufs/mnt/8cf9a993f50f3305abad3da268c0fc44ff78a1e7bba595ef9de963497496c3f9 rw,relatime - aufs none rw,si=caafa54f8c59525 -91 20 0:84 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173-init rw,relatime - aufs none rw,si=caafa54f846a525 -92 20 0:85 / /var/lib/docker/aufs/mnt/ecc896fd74b21840a8d35e8316b92a08b1b9c83d722a12acff847e9f0ff17173 rw,relatime - aufs none rw,si=caafa54f846b525 -93 20 0:86 / /var/lib/docker/aufs/mnt/d8c8288ec920439a48b5796bab5883ee47a019240da65e8d8f33400c31bac5df rw,relatime - aufs none rw,si=caafa54f8dbf525 -94 20 0:87 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6-init rw,relatime - aufs none rw,si=caafa54f810f525 -95 20 0:88 / /var/lib/docker/aufs/mnt/ecba66710bcd03199b9398e46c005cd6b68d0266ec81dc8b722a29cc417997c6 rw,relatime - aufs none rw,si=caafa54fbae9525 -96 20 0:89 / /var/lib/docker/aufs/mnt/befc1c67600df449dddbe796c0d06da7caff1d2bbff64cde1f0ba82d224996b5 rw,relatime - aufs none rw,si=caafa54f8dab525 -97 20 0:90 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562-init rw,relatime - aufs none rw,si=caafa54fdc02525 -98 20 0:91 / /var/lib/docker/aufs/mnt/c9f470e73d2742629cdc4084a1b2c1a8302914f2aa0d0ec4542371df9a050562 rw,relatime - aufs none rw,si=caafa54f9eb0525 -99 20 0:92 / /var/lib/docker/aufs/mnt/2a31f10029f04ff9d4381167a9b739609853d7220d55a56cb654779a700ee246 rw,relatime - aufs none rw,si=caafa54f8c37525 -100 20 0:93 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927-init rw,relatime - aufs none rw,si=caafa54fd173525 -101 20 0:94 / /var/lib/docker/aufs/mnt/8c4261b8e3e4b21ebba60389bd64b6261217e7e6b9fd09e201d5a7f6760f6927 rw,relatime - aufs none rw,si=caafa54f8108525 -102 20 0:95 / /var/lib/docker/aufs/mnt/eaa0f57403a3dc685268f91df3fbcd7a8423cee50e1a9ee5c3e1688d9d676bb4 rw,relatime - aufs none rw,si=caafa54f852d525 -103 20 0:96 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b-init rw,relatime - aufs none rw,si=caafa54f8d80525 -104 20 0:97 / /var/lib/docker/aufs/mnt/9cfe69a2cbffd9bfc7f396d4754f6fe5cc457ef417b277797be3762dfe955a6b rw,relatime - aufs none rw,si=caafa54f8fc3525 -105 20 0:98 / /var/lib/docker/aufs/mnt/d1b322ae17613c6adee84e709641a9244ac56675244a89a64dc0075075fcbb83 rw,relatime - aufs none rw,si=caafa54f8c58525 -106 20 0:99 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd-init rw,relatime - aufs none rw,si=caafa54f8c63525 -107 20 0:100 / /var/lib/docker/aufs/mnt/d46c2a8e9da7e91ab34fd9c192851c246a4e770a46720bda09e55c7554b9dbbd rw,relatime - aufs none rw,si=caafa54f8c67525 -108 20 0:101 / /var/lib/docker/aufs/mnt/bc9d2a264158f83a617a069bf17cbbf2a2ba453db7d3951d9dc63cc1558b1c2b rw,relatime - aufs none rw,si=caafa54f8dbe525 -109 20 0:102 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99-init rw,relatime - aufs none rw,si=caafa54f9e0d525 -110 20 0:103 / /var/lib/docker/aufs/mnt/9e6abb8d72bbeb4d5cf24b96018528015ba830ce42b4859965bd482cbd034e99 rw,relatime - aufs none rw,si=caafa54f9e1b525 -111 20 0:104 / /var/lib/docker/aufs/mnt/d4dca7b02569c732e740071e1c654d4ad282de5c41edb619af1f0aafa618be26 rw,relatime - aufs none rw,si=caafa54f8dae525 -112 20 0:105 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7-init rw,relatime - aufs none rw,si=caafa54f8c5c525 -113 20 0:106 / /var/lib/docker/aufs/mnt/fea63da40fa1c5ffbad430dde0bc64a8fc2edab09a051fff55b673c40a08f6b7 rw,relatime - aufs none rw,si=caafa54fd172525 -114 20 0:107 / /var/lib/docker/aufs/mnt/e60c57499c0b198a6734f77f660cdbbd950a5b78aa23f470ca4f0cfcc376abef rw,relatime - aufs none rw,si=caafa54909c4525 -115 20 0:108 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35-init rw,relatime - aufs none rw,si=caafa54909c3525 -116 20 0:109 / /var/lib/docker/aufs/mnt/099c78e7ccd9c8717471bb1bbfff838c0a9913321ba2f214fbeaf92c678e5b35 rw,relatime - aufs none rw,si=caafa54909c7525 -117 20 0:110 / /var/lib/docker/aufs/mnt/2997be666d58b9e71469759bcb8bd9608dad0e533a1a7570a896919ba3388825 rw,relatime - aufs none rw,si=caafa54f8557525 -118 20 0:111 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93-init rw,relatime - aufs none rw,si=caafa54c6e88525 -119 20 0:112 / /var/lib/docker/aufs/mnt/730694eff438ef20569df38dfb38a920969d7ff2170cc9aa7cb32a7ed8147a93 rw,relatime - aufs none rw,si=caafa54c6e8e525 -120 20 0:113 / /var/lib/docker/aufs/mnt/a672a1e2f2f051f6e19ed1dfbe80860a2d774174c49f7c476695f5dd1d5b2f67 rw,relatime - aufs none rw,si=caafa54c6e15525 -121 20 0:114 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420-init rw,relatime - aufs none rw,si=caafa54f8dad525 -122 20 0:115 / /var/lib/docker/aufs/mnt/aba3570e17859f76cf29d282d0d150659c6bd80780fdc52a465ba05245c2a420 rw,relatime - aufs none rw,si=caafa54f8d84525 -123 20 0:116 / /var/lib/docker/aufs/mnt/2abc86007aca46fb4a817a033e2a05ccacae40b78ea4b03f8ea616b9ada40e2e rw,relatime - aufs none rw,si=caafa54c6e8b525 -124 20 0:117 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374-init rw,relatime - aufs none rw,si=caafa54c6e8d525 -125 20 0:118 / /var/lib/docker/aufs/mnt/36352f27f7878e648367a135bd1ec3ed497adcb8ac13577ee892a0bd921d2374 rw,relatime - aufs none rw,si=caafa54f8c34525 -126 20 0:119 / /var/lib/docker/aufs/mnt/2f95ca1a629cea8363b829faa727dd52896d5561f2c96ddee4f697ea2fc872c2 rw,relatime - aufs none rw,si=caafa54c6e8a525 -127 20 0:120 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2-init rw,relatime - aufs none rw,si=caafa54f8e19525 -128 20 0:121 / /var/lib/docker/aufs/mnt/f108c8291654f179ef143a3e07de2b5a34adbc0b28194a0ab17742b6db9a7fb2 rw,relatime - aufs none rw,si=caafa54fa8c6525 -129 20 0:122 / /var/lib/docker/aufs/mnt/c1d04dfdf8cccb3676d5a91e84e9b0781ce40623d127d038bcfbe4c761b27401 rw,relatime - aufs none rw,si=caafa54f8c30525 -130 20 0:123 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a-init rw,relatime - aufs none rw,si=caafa54c6e1a525 -131 20 0:124 / /var/lib/docker/aufs/mnt/3f4898ffd0e1239aeebf1d1412590cdb7254207fa3883663e2c40cf772e5f05a rw,relatime - aufs none rw,si=caafa54c6e1c525 -132 20 0:125 / /var/lib/docker/aufs/mnt/5ae3b6fccb1539fc02d420e86f3e9637bef5b711fed2ca31a2f426c8f5deddbf rw,relatime - aufs none rw,si=caafa54c4fea525 -133 20 0:126 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0-init rw,relatime - aufs none rw,si=caafa54c6e1e525 -134 20 0:127 / /var/lib/docker/aufs/mnt/310bfaf80d57020f2e73b06aeffb0b9b0ca2f54895f88bf5e4d1529ccac58fe0 rw,relatime - aufs none rw,si=caafa54fa8c0525 -135 20 0:128 / /var/lib/docker/aufs/mnt/f382bd5aaccaf2d04a59089ac7cb12ec87efd769fd0c14d623358fbfd2a3f896 rw,relatime - aufs none rw,si=caafa54c4fec525 -136 20 0:129 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735-init rw,relatime - aufs none rw,si=caafa54c4fef525 -137 20 0:130 / /var/lib/docker/aufs/mnt/50d45e9bb2d779bc6362824085564c7578c231af5ae3b3da116acf7e17d00735 rw,relatime - aufs none rw,si=caafa54c4feb525 -138 20 0:131 / /var/lib/docker/aufs/mnt/a9c5ee0854dc083b6bf62b7eb1e5291aefbb10702289a446471ce73aba0d5d7d rw,relatime - aufs none rw,si=caafa54909c6525 -139 20 0:134 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0-init rw,relatime - aufs none rw,si=caafa54804fe525 -140 20 0:135 / /var/lib/docker/aufs/mnt/03a613e7bd5078819d1fd92df4e671c0127559a5e0b5a885cc8d5616875162f0 rw,relatime - aufs none rw,si=caafa54804fa525 -141 20 0:136 / /var/lib/docker/aufs/mnt/7ec3277e5c04c907051caf9c9c35889f5fcd6463e5485971b25404566830bb70 rw,relatime - aufs none rw,si=caafa54804f9525 -142 20 0:139 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8-init rw,relatime - aufs none rw,si=caafa54c6ef6525 -143 20 0:140 / /var/lib/docker/aufs/mnt/26b5b5d71d79a5b2bfcf8bc4b2280ee829f261eb886745dd90997ed410f7e8b8 rw,relatime - aufs none rw,si=caafa54c6ef5525 -144 20 0:356 / /var/lib/docker/aufs/mnt/e6ecde9e2c18cd3c75f424c67b6d89685cfee0fc67abf2cb6bdc0867eb998026 rw,relatime - aufs none rw,si=caafa548068e525` - - gentooMountinfo = `15 1 8:6 / / rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -16 15 0:3 / /proc rw,nosuid,nodev,noexec,relatime - proc proc rw -17 15 0:14 / /run rw,nosuid,nodev,relatime - tmpfs tmpfs rw,size=3292172k,mode=755 -18 15 0:5 / /dev rw,nosuid,relatime - devtmpfs udev rw,size=10240k,nr_inodes=4106451,mode=755 -19 18 0:12 / /dev/mqueue rw,nosuid,nodev,noexec,relatime - mqueue mqueue rw -20 18 0:10 / /dev/pts rw,nosuid,noexec,relatime - devpts devpts rw,gid=5,mode=620,ptmxmode=000 -21 18 0:15 / /dev/shm rw,nosuid,nodev,noexec,relatime - tmpfs shm rw -22 15 0:16 / /sys rw,nosuid,nodev,noexec,relatime - sysfs sysfs rw -23 22 0:7 / /sys/kernel/debug rw,nosuid,nodev,noexec,relatime - debugfs debugfs rw -24 22 0:17 / /sys/fs/cgroup rw,nosuid,nodev,noexec,relatime - tmpfs cgroup_root rw,size=10240k,mode=755 -25 24 0:18 / /sys/fs/cgroup/openrc rw,nosuid,nodev,noexec,relatime - cgroup openrc rw,release_agent=/lib64/rc/sh/cgroup-release-agent.sh,name=openrc -26 24 0:19 / /sys/fs/cgroup/cpuset rw,nosuid,nodev,noexec,relatime - cgroup cpuset rw,cpuset,clone_children -27 24 0:20 / /sys/fs/cgroup/cpu rw,nosuid,nodev,noexec,relatime - cgroup cpu rw,cpu,clone_children -28 24 0:21 / /sys/fs/cgroup/cpuacct rw,nosuid,nodev,noexec,relatime - cgroup cpuacct rw,cpuacct,clone_children -29 24 0:22 / /sys/fs/cgroup/memory rw,nosuid,nodev,noexec,relatime - cgroup memory rw,memory,clone_children -30 24 0:23 / /sys/fs/cgroup/devices rw,nosuid,nodev,noexec,relatime - cgroup devices rw,devices,clone_children -31 24 0:24 / /sys/fs/cgroup/freezer rw,nosuid,nodev,noexec,relatime - cgroup freezer rw,freezer,clone_children -32 24 0:25 / /sys/fs/cgroup/blkio rw,nosuid,nodev,noexec,relatime - cgroup blkio rw,blkio,clone_children -33 15 8:1 / /boot rw,noatime,nodiratime - vfat /dev/sda1 rw,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro -34 15 8:18 / /mnt/xfs rw,noatime,nodiratime - xfs /dev/sdb2 rw,attr2,inode64,noquota -35 15 0:26 / /tmp rw,relatime - tmpfs tmpfs rw -36 16 0:27 / /proc/sys/fs/binfmt_misc rw,nosuid,nodev,noexec,relatime - binfmt_misc binfmt_misc rw -42 15 0:33 / /var/lib/nfs/rpc_pipefs rw,relatime - rpc_pipefs rpc_pipefs rw -43 16 0:34 / /proc/fs/nfsd rw,nosuid,nodev,noexec,relatime - nfsd nfsd rw -44 15 0:35 / /home/tianon/.gvfs rw,nosuid,nodev,relatime - fuse.gvfs-fuse-daemon gvfs-fuse-daemon rw,user_id=1000,group_id=1000 -68 15 0:3336 / /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd rw,relatime - aufs none rw,si=9b4a7640128db39c -86 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/config.env /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/.dockerenv rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -87 68 8:6 /etc/resolv.conf /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/resolv.conf rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -88 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hostname /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hostname rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -89 68 8:6 /var/lib/docker/containers/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/hosts /var/lib/docker/aufs/mnt/3597a1a6d6298c1decc339ebb90aad6f7d6ba2e15af3131b1f85e7ee4787a0cd/etc/hosts rw,noatime,nodiratime - ext4 /dev/sda6 rw,data=ordered -38 15 0:3384 / /var/lib/docker/aufs/mnt/0292005a9292401bb5197657f2b682d97d8edcb3b72b5e390d2a680139985b55 rw,relatime - aufs none rw,si=9b4a7642b584939c -39 15 0:3385 / /var/lib/docker/aufs/mnt/59db98c889de5f71b70cfb82c40cbe47b64332f0f56042a2987a9e5df6e5e3aa rw,relatime - aufs none rw,si=9b4a7642b584e39c -40 15 0:3386 / /var/lib/docker/aufs/mnt/0545f0f2b6548eb9601d08f35a08f5a0a385407d36027a28f58e06e9f61e0278 rw,relatime - aufs none rw,si=9b4a7642b584b39c -41 15 0:3387 / /var/lib/docker/aufs/mnt/d882cfa16d1aa8fe0331a36e79be3d80b151e49f24fc39a39c3fed1735d5feb5 rw,relatime - aufs none rw,si=9b4a76453040039c -45 15 0:3388 / /var/lib/docker/aufs/mnt/055ca3befcb1626e74f5344b3398724ff05c0de0e20021683d04305c9e70a3f6 rw,relatime - aufs none rw,si=9b4a76453040739c -46 15 0:3389 / /var/lib/docker/aufs/mnt/b899e4567a351745d4285e7f1c18fdece75d877deb3041981cd290be348b7aa6 rw,relatime - aufs none rw,si=9b4a7647def4039c -47 15 0:3390 / /var/lib/docker/aufs/mnt/067ca040292c58954c5129f953219accfae0d40faca26b4d05e76ca76a998f16 rw,relatime - aufs none rw,si=9b4a7647def4239c -48 15 0:3391 / /var/lib/docker/aufs/mnt/8c995e7cb6e5082742daeea720e340b021d288d25d92e0412c03d200df308a11 rw,relatime - aufs none rw,si=9b4a764479c1639c -49 15 0:3392 / /var/lib/docker/aufs/mnt/07cc54dfae5b45300efdacdd53cc72c01b9044956a86ce7bff42d087e426096d rw,relatime - aufs none rw,si=9b4a764479c1739c -50 15 0:3393 / /var/lib/docker/aufs/mnt/0a9c95cf4c589c05b06baa79150b0cc1d8e7102759fe3ce4afaabb8247ca4f85 rw,relatime - aufs none rw,si=9b4a7644059c839c -51 15 0:3394 / /var/lib/docker/aufs/mnt/468fa98cececcf4e226e8370f18f4f848d63faf287fb8321a07f73086441a3a0 rw,relatime - aufs none rw,si=9b4a7644059ca39c -52 15 0:3395 / /var/lib/docker/aufs/mnt/0b826192231c5ce066fffb5beff4397337b5fc19a377aa7c6282c7c0ce7f111f rw,relatime - aufs none rw,si=9b4a764479c1339c -53 15 0:3396 / /var/lib/docker/aufs/mnt/93b8ba1b772fbe79709b909c43ea4b2c30d712e53548f467db1ffdc7a384f196 rw,relatime - aufs none rw,si=9b4a7640798a739c -54 15 0:3397 / /var/lib/docker/aufs/mnt/0c0d0acfb506859b12ef18cdfef9ebed0b43a611482403564224bde9149d373c rw,relatime - aufs none rw,si=9b4a7640798a039c -55 15 0:3398 / /var/lib/docker/aufs/mnt/33648c39ab6c7c74af0243d6d6a81b052e9e25ad1e04b19892eb2dde013e358b rw,relatime - aufs none rw,si=9b4a7644b439b39c -56 15 0:3399 / /var/lib/docker/aufs/mnt/0c12bea97a1c958a3c739fb148536c1c89351d48e885ecda8f0499b5cc44407e rw,relatime - aufs none rw,si=9b4a7640798a239c -57 15 0:3400 / /var/lib/docker/aufs/mnt/ed443988ce125f172d7512e84a4de2627405990fd767a16adefa8ce700c19ce8 rw,relatime - aufs none rw,si=9b4a7644c8ed339c -59 15 0:3402 / /var/lib/docker/aufs/mnt/f61612c324ff3c924d3f7a82fb00a0f8d8f73c248c41897061949e9f5ab7e3b1 rw,relatime - aufs none rw,si=9b4a76442810c39c -60 15 0:3403 / /var/lib/docker/aufs/mnt/0f1ee55c6c4e25027b80de8e64b8b6fb542b3b41aa0caab9261da75752e22bfd rw,relatime - aufs none rw,si=9b4a76442810e39c -61 15 0:3404 / /var/lib/docker/aufs/mnt/956f6cc4af5785cb3ee6963dcbca668219437d9b28f513290b1453ac64a34f97 rw,relatime - aufs none rw,si=9b4a7644303ec39c -62 15 0:3405 / /var/lib/docker/aufs/mnt/1099769158c4b4773e2569e38024e8717e400f87a002c41d8cf47cb81b051ba6 rw,relatime - aufs none rw,si=9b4a7644303ee39c -63 15 0:3406 / /var/lib/docker/aufs/mnt/11890ceb98d4442595b676085cd7b21550ab85c5df841e0fba997ff54e3d522d rw,relatime - aufs none rw,si=9b4a7644303ed39c -64 15 0:3407 / /var/lib/docker/aufs/mnt/acdb90dc378e8ed2420b43a6d291f1c789a081cd1904018780cc038fcd7aae53 rw,relatime - aufs none rw,si=9b4a76434be2139c -65 15 0:3408 / /var/lib/docker/aufs/mnt/120e716f19d4714fbe63cc1ed246204f2c1106eefebc6537ba2587d7e7711959 rw,relatime - aufs none rw,si=9b4a76434be2339c -66 15 0:3409 / /var/lib/docker/aufs/mnt/b197b7fffb61d89e0ba1c40de9a9fc0d912e778b3c1bd828cf981ff37c1963bc rw,relatime - aufs none rw,si=9b4a76434be2039c -70 15 0:3412 / /var/lib/docker/aufs/mnt/1434b69d2e1bb18a9f0b96b9cdac30132b2688f5d1379f68a39a5e120c2f93eb rw,relatime - aufs none rw,si=9b4a76434be2639c -71 15 0:3413 / /var/lib/docker/aufs/mnt/16006e83caf33ab5eb0cd6afc92ea2ee8edeff897496b0bb3ec3a75b767374b3 rw,relatime - aufs none rw,si=9b4a7644d790439c -72 15 0:3414 / /var/lib/docker/aufs/mnt/55bfa5f44e94d27f91f79ba901b118b15098449165c87abf1b53ffff147ff164 rw,relatime - aufs none rw,si=9b4a7644d790239c -73 15 0:3415 / /var/lib/docker/aufs/mnt/1912b97a07ab21ccd98a2a27bc779bf3cf364a3138afa3c3e6f7f169a3c3eab5 rw,relatime - aufs none rw,si=9b4a76441822739c -76 15 0:3418 / /var/lib/docker/aufs/mnt/1a7c3292e8879bd91ffd9282e954f643b1db5683093574c248ff14a9609f2f56 rw,relatime - aufs none rw,si=9b4a76438cb7239c -77 15 0:3419 / /var/lib/docker/aufs/mnt/bb1faaf0d076ddba82c2318305a85f490dafa4e8a8640a8db8ed657c439120cc rw,relatime - aufs none rw,si=9b4a76438cb7339c -78 15 0:3420 / /var/lib/docker/aufs/mnt/1ab869f21d2241a73ac840c7f988490313f909ac642eba71d092204fec66dd7c rw,relatime - aufs none rw,si=9b4a76438cb7639c -79 15 0:3421 / /var/lib/docker/aufs/mnt/fd7245b2cfe3890fa5f5b452260e4edf9e7fb7746532ed9d83f7a0d7dbaa610e rw,relatime - aufs none rw,si=9b4a7644bdc0139c -80 15 0:3422 / /var/lib/docker/aufs/mnt/1e5686c5301f26b9b3cd24e322c608913465cc6c5d0dcd7c5e498d1314747d61 rw,relatime - aufs none rw,si=9b4a7644bdc0639c -81 15 0:3423 / /var/lib/docker/aufs/mnt/52edf6ee6e40bfec1e9301a4d4a92ab83d144e2ae4ce5099e99df6138cb844bf rw,relatime - aufs none rw,si=9b4a7644bdc0239c -82 15 0:3424 / /var/lib/docker/aufs/mnt/1ea10fb7085d28cda4904657dff0454e52598d28e1d77e4f2965bbc3666e808f rw,relatime - aufs none rw,si=9b4a76438cb7139c -83 15 0:3425 / /var/lib/docker/aufs/mnt/9c03e98c3593946dbd4087f8d83f9ca262f4a2efdc952ce60690838b9ba6c526 rw,relatime - aufs none rw,si=9b4a76443020639c -84 15 0:3426 / /var/lib/docker/aufs/mnt/220a2344d67437602c6d2cee9a98c46be13f82c2a8063919dd2fad52bf2fb7dd rw,relatime - aufs none rw,si=9b4a76434bff339c -94 15 0:3427 / /var/lib/docker/aufs/mnt/3b32876c5b200312c50baa476ff342248e88c8ea96e6a1032cd53a88738a1cf2 rw,relatime - aufs none rw,si=9b4a76434bff139c -95 15 0:3428 / /var/lib/docker/aufs/mnt/23ee2b8b0d4ae8db6f6d1e168e2c6f79f8a18f953b09f65e0d22cc1e67a3a6fa rw,relatime - aufs none rw,si=9b4a7646c305c39c -96 15 0:3429 / /var/lib/docker/aufs/mnt/e86e6daa70b61b57945fa178222615f3c3d6bcef12c9f28e9f8623d44dc2d429 rw,relatime - aufs none rw,si=9b4a7646c305f39c -97 15 0:3430 / /var/lib/docker/aufs/mnt/2413d07623e80860bb2e9e306fbdee699afd07525785c025c591231e864aa162 rw,relatime - aufs none rw,si=9b4a76434bff039c -98 15 0:3431 / /var/lib/docker/aufs/mnt/adfd622eb22340fc80b429e5564b125668e260bf9068096c46dd59f1386a4b7d rw,relatime - aufs none rw,si=9b4a7646a7a1039c -102 15 0:3435 / /var/lib/docker/aufs/mnt/27cd92e7a91d02e2d6b44d16679a00fb6d169b19b88822891084e7fd1a84882d rw,relatime - aufs none rw,si=9b4a7646f25ec39c -103 15 0:3436 / /var/lib/docker/aufs/mnt/27dfdaf94cfbf45055c748293c37dd68d9140240bff4c646cb09216015914a88 rw,relatime - aufs none rw,si=9b4a7646732f939c -104 15 0:3437 / /var/lib/docker/aufs/mnt/5ed7524aff68dfbf0fc601cbaeac01bab14391850a973dabf3653282a627920f rw,relatime - aufs none rw,si=9b4a7646732f839c -105 15 0:3438 / /var/lib/docker/aufs/mnt/2a0d4767e536beb5785b60e071e3ac8e5e812613ab143a9627bee77d0c9ab062 rw,relatime - aufs none rw,si=9b4a7646732fe39c -106 15 0:3439 / /var/lib/docker/aufs/mnt/dea3fc045d9f4ae51ba952450b948a822cf85c39411489ca5224f6d9a8d02bad rw,relatime - aufs none rw,si=9b4a764012ad839c -107 15 0:3440 / /var/lib/docker/aufs/mnt/2d140a787160798da60cb67c21b1210054ad4dafecdcf832f015995b9aa99cfd rw,relatime - aufs none rw,si=9b4a764012add39c -108 15 0:3441 / /var/lib/docker/aufs/mnt/cb190b2a8e984475914430fbad2382e0d20b9b659f8ef83ae8d170cc672e519c rw,relatime - aufs none rw,si=9b4a76454d9c239c -109 15 0:3442 / /var/lib/docker/aufs/mnt/2f4a012d5a7ffd90256a6e9aa479054b3dddbc3c6a343f26dafbf3196890223b rw,relatime - aufs none rw,si=9b4a76454d9c439c -110 15 0:3443 / /var/lib/docker/aufs/mnt/63cc77904b80c4ffbf49cb974c5d8733dc52ad7640d3ae87554b325d7312d87f rw,relatime - aufs none rw,si=9b4a76454d9c339c -111 15 0:3444 / /var/lib/docker/aufs/mnt/30333e872c451482ea2d235ff2192e875bd234006b238ae2bdde3b91a86d7522 rw,relatime - aufs none rw,si=9b4a76422cebf39c -112 15 0:3445 / /var/lib/docker/aufs/mnt/6c54fc1125da3925cae65b5c9a98f3be55b0a2c2666082e5094a4ba71beb5bff rw,relatime - aufs none rw,si=9b4a7646dd5a439c -113 15 0:3446 / /var/lib/docker/aufs/mnt/3087d48cb01cda9d0a83a9ca301e6ea40e8593d18c4921be4794c91a420ab9a3 rw,relatime - aufs none rw,si=9b4a7646dd5a739c -114 15 0:3447 / /var/lib/docker/aufs/mnt/cc2607462a8f55b179a749b144c3fdbb50678e1a4f3065ea04e283e9b1f1d8e2 rw,relatime - aufs none rw,si=9b4a7646dd5a239c -117 15 0:3450 / /var/lib/docker/aufs/mnt/310c5e8392b29e8658a22e08d96d63936633b7e2c38e8d220047928b00a03d24 rw,relatime - aufs none rw,si=9b4a7647932d739c -118 15 0:3451 / /var/lib/docker/aufs/mnt/38a1f0029406ba9c3b6058f2f406d8a1d23c855046cf355c91d87d446fcc1460 rw,relatime - aufs none rw,si=9b4a76445abc939c -119 15 0:3452 / /var/lib/docker/aufs/mnt/42e109ab7914ae997a11ccd860fd18e4d488c50c044c3240423ce15774b8b62e rw,relatime - aufs none rw,si=9b4a76445abca39c -120 15 0:3453 / /var/lib/docker/aufs/mnt/365d832af0402d052b389c1e9c0d353b48487533d20cd4351df8e24ec4e4f9d8 rw,relatime - aufs none rw,si=9b4a7644066aa39c -121 15 0:3454 / /var/lib/docker/aufs/mnt/d3fa8a24d695b6cda9b64f96188f701963d28bef0473343f8b212df1a2cf1d2b rw,relatime - aufs none rw,si=9b4a7644066af39c -122 15 0:3455 / /var/lib/docker/aufs/mnt/37d4f491919abc49a15d0c7a7cc8383f087573525d7d288accd14f0b4af9eae0 rw,relatime - aufs none rw,si=9b4a7644066ad39c -123 15 0:3456 / /var/lib/docker/aufs/mnt/93902707fe12cbdd0068ce73f2baad4b3a299189b1b19cb5f8a2025e106ae3f5 rw,relatime - aufs none rw,si=9b4a76444445f39c -126 15 0:3459 / /var/lib/docker/aufs/mnt/3b49291670a625b9bbb329ffba99bf7fa7abff80cefef040f8b89e2b3aad4f9f rw,relatime - aufs none rw,si=9b4a7640798a339c -127 15 0:3460 / /var/lib/docker/aufs/mnt/8d9c7b943cc8f854f4d0d4ec19f7c16c13b0cc4f67a41472a072648610cecb59 rw,relatime - aufs none rw,si=9b4a76427383039c -128 15 0:3461 / /var/lib/docker/aufs/mnt/3b6c90036526c376307df71d49c9f5fce334c01b926faa6a78186842de74beac rw,relatime - aufs none rw,si=9b4a7644badd439c -130 15 0:3463 / /var/lib/docker/aufs/mnt/7b24158eeddfb5d31b7e932e406ea4899fd728344335ff8e0765e89ddeb351dd rw,relatime - aufs none rw,si=9b4a7644badd539c -131 15 0:3464 / /var/lib/docker/aufs/mnt/3ead6dd5773765c74850cf6c769f21fe65c29d622ffa712664f9f5b80364ce27 rw,relatime - aufs none rw,si=9b4a7642f469939c -132 15 0:3465 / /var/lib/docker/aufs/mnt/3f825573b29547744a37b65597a9d6d15a8350be4429b7038d126a4c9a8e178f rw,relatime - aufs none rw,si=9b4a7642f469c39c -133 15 0:3466 / /var/lib/docker/aufs/mnt/f67aaaeb3681e5dcb99a41f847087370bd1c206680cb8c7b6a9819fd6c97a331 rw,relatime - aufs none rw,si=9b4a7647cc25939c -134 15 0:3467 / /var/lib/docker/aufs/mnt/41afe6cfb3c1fc2280b869db07699da88552786e28793f0bc048a265c01bd942 rw,relatime - aufs none rw,si=9b4a7647cc25c39c -135 15 0:3468 / /var/lib/docker/aufs/mnt/b8092ea59da34a40b120e8718c3ae9fa8436996edc4fc50e4b99c72dfd81e1af rw,relatime - aufs none rw,si=9b4a76445abc439c -136 15 0:3469 / /var/lib/docker/aufs/mnt/42c69d2cc179e2684458bb8596a9da6dad182c08eae9b74d5f0e615b399f75a5 rw,relatime - aufs none rw,si=9b4a76455ddbe39c -137 15 0:3470 / /var/lib/docker/aufs/mnt/ea0871954acd2d62a211ac60e05969622044d4c74597870c4f818fbb0c56b09b rw,relatime - aufs none rw,si=9b4a76455ddbf39c -138 15 0:3471 / /var/lib/docker/aufs/mnt/4307906b275ab3fc971786b3841ae3217ac85b6756ddeb7ad4ba09cd044c2597 rw,relatime - aufs none rw,si=9b4a76455ddb839c -139 15 0:3472 / /var/lib/docker/aufs/mnt/4390b872928c53500a5035634f3421622ed6299dc1472b631fc45de9f56dc180 rw,relatime - aufs none rw,si=9b4a76402f2fd39c -140 15 0:3473 / /var/lib/docker/aufs/mnt/6bb41e78863b85e4aa7da89455314855c8c3bda64e52a583bab15dc1fa2e80c2 rw,relatime - aufs none rw,si=9b4a76402f2fa39c -141 15 0:3474 / /var/lib/docker/aufs/mnt/4444f583c2a79c66608f4673a32c9c812154f027045fbd558c2d69920c53f835 rw,relatime - aufs none rw,si=9b4a764479dbd39c -142 15 0:3475 / /var/lib/docker/aufs/mnt/6f11883af4a05ea362e0c54df89058da4859f977efd07b6f539e1f55c1d2a668 rw,relatime - aufs none rw,si=9b4a76402f30b39c -143 15 0:3476 / /var/lib/docker/aufs/mnt/453490dd32e7c2e9ef906f995d8fb3c2753923d1a5e0ba3fd3296e2e4dc238e7 rw,relatime - aufs none rw,si=9b4a76402f30c39c -144 15 0:3477 / /var/lib/docker/aufs/mnt/45e5945735ee102b5e891c91650c57ec4b52bb53017d68f02d50ea8a6e230610 rw,relatime - aufs none rw,si=9b4a76423260739c -147 15 0:3480 / /var/lib/docker/aufs/mnt/4727a64a5553a1125f315b96bed10d3073d6988225a292cce732617c925b56ab rw,relatime - aufs none rw,si=9b4a76443030339c -150 15 0:3483 / /var/lib/docker/aufs/mnt/4e348b5187b9a567059306afc72d42e0ec5c893b0d4abd547526d5f9b6fb4590 rw,relatime - aufs none rw,si=9b4a7644f5d8c39c -151 15 0:3484 / /var/lib/docker/aufs/mnt/4efc616bfbc3f906718b052da22e4335f8e9f91ee9b15866ed3a8029645189ef rw,relatime - aufs none rw,si=9b4a7644f5d8939c -152 15 0:3485 / /var/lib/docker/aufs/mnt/83e730ae9754d5adb853b64735472d98dfa17136b8812ac9cfcd1eba7f4e7d2d rw,relatime - aufs none rw,si=9b4a76469aa7139c -153 15 0:3486 / /var/lib/docker/aufs/mnt/4fc5ba8a5b333be2b7eefacccb626772eeec0ae8a6975112b56c9fb36c0d342f rw,relatime - aufs none rw,si=9b4a7640128dc39c -154 15 0:3487 / /var/lib/docker/aufs/mnt/50200d5edff5dfe8d1ef3c78b0bbd709793ac6e936aa16d74ff66f7ea577b6f9 rw,relatime - aufs none rw,si=9b4a7640128da39c -155 15 0:3488 / /var/lib/docker/aufs/mnt/51e5e51604361448f0b9777f38329f414bc5ba9cf238f26d465ff479bd574b61 rw,relatime - aufs none rw,si=9b4a76444f68939c -156 15 0:3489 / /var/lib/docker/aufs/mnt/52a142149aa98bba83df8766bbb1c629a97b9799944ead90dd206c4bdf0b8385 rw,relatime - aufs none rw,si=9b4a76444f68b39c -157 15 0:3490 / /var/lib/docker/aufs/mnt/52dd21a94a00f58a1ed489312fcfffb91578089c76c5650364476f1d5de031bc rw,relatime - aufs none rw,si=9b4a76444f68f39c -158 15 0:3491 / /var/lib/docker/aufs/mnt/ee562415ddaad353ed22c88d0ca768a0c74bfba6333b6e25c46849ee22d990da rw,relatime - aufs none rw,si=9b4a7640128d839c -159 15 0:3492 / /var/lib/docker/aufs/mnt/db47a9e87173f7554f550c8a01891de79cf12acdd32e01f95c1a527a08bdfb2c rw,relatime - aufs none rw,si=9b4a764405a1d39c -160 15 0:3493 / /var/lib/docker/aufs/mnt/55e827bf6d44d930ec0b827c98356eb8b68c3301e2d60d1429aa72e05b4c17df rw,relatime - aufs none rw,si=9b4a764405a1a39c -162 15 0:3495 / /var/lib/docker/aufs/mnt/578dc4e0a87fc37ec081ca098430499a59639c09f6f12a8f48de29828a091aa6 rw,relatime - aufs none rw,si=9b4a76406d7d439c -163 15 0:3496 / /var/lib/docker/aufs/mnt/728cc1cb04fa4bc6f7bf7a90980beda6d8fc0beb71630874c0747b994efb0798 rw,relatime - aufs none rw,si=9b4a76444f20e39c -164 15 0:3497 / /var/lib/docker/aufs/mnt/5850cc4bd9b55aea46c7ad598f1785117607974084ea643580f58ce3222e683a rw,relatime - aufs none rw,si=9b4a7644a824239c -165 15 0:3498 / /var/lib/docker/aufs/mnt/89443b3f766d5a37bc8b84e29da8b84e6a3ea8486d3cf154e2aae1816516e4a8 rw,relatime - aufs none rw,si=9b4a7644a824139c -166 15 0:3499 / /var/lib/docker/aufs/mnt/f5ae8fd5a41a337907d16515bc3162525154b59c32314c695ecd092c3b47943d rw,relatime - aufs none rw,si=9b4a7644a824439c -167 15 0:3500 / /var/lib/docker/aufs/mnt/5a430854f2a03a9e5f7cbc9f3fb46a8ebca526a5b3f435236d8295e5998798f5 rw,relatime - aufs none rw,si=9b4a7647fc82439c -168 15 0:3501 / /var/lib/docker/aufs/mnt/eda16901ae4cead35070c39845cbf1e10bd6b8cb0ffa7879ae2d8a186e460f91 rw,relatime - aufs none rw,si=9b4a76441e0df39c -169 15 0:3502 / /var/lib/docker/aufs/mnt/5a593721430c2a51b119ff86a7e06ea2b37e3b4131f8f1344d402b61b0c8d868 rw,relatime - aufs none rw,si=9b4a764248bad39c -170 15 0:3503 / /var/lib/docker/aufs/mnt/d662ad0a30fbfa902e0962108685b9330597e1ee2abb16dc9462eb5a67fdd23f rw,relatime - aufs none rw,si=9b4a764248bae39c -171 15 0:3504 / /var/lib/docker/aufs/mnt/5bc9de5c79812843fb36eee96bef1ddba812407861f572e33242f4ee10da2c15 rw,relatime - aufs none rw,si=9b4a764248ba839c -172 15 0:3505 / /var/lib/docker/aufs/mnt/5e763de8e9b0f7d58d2e12a341e029ab4efb3b99788b175090d8209e971156c1 rw,relatime - aufs none rw,si=9b4a764248baa39c -173 15 0:3506 / /var/lib/docker/aufs/mnt/b4431dc2739936f1df6387e337f5a0c99cf051900c896bd7fd46a870ce61c873 rw,relatime - aufs none rw,si=9b4a76401263539c -174 15 0:3507 / /var/lib/docker/aufs/mnt/5f37830e5a02561ab8c67ea3113137ba69f67a60e41c05cb0e7a0edaa1925b24 rw,relatime - aufs none rw,si=9b4a76401263639c -184 15 0:3508 / /var/lib/docker/aufs/mnt/62ea10b957e6533538a4633a1e1d678502f50ddcdd354b2ca275c54dd7a7793a rw,relatime - aufs none rw,si=9b4a76401263039c -187 15 0:3509 / /var/lib/docker/aufs/mnt/d56ee9d44195fe390e042fda75ec15af5132adb6d5c69468fa8792f4e54a6953 rw,relatime - aufs none rw,si=9b4a76401263239c -188 15 0:3510 / /var/lib/docker/aufs/mnt/6a300930673174549c2b62f36c933f0332a20735978c007c805a301f897146c5 rw,relatime - aufs none rw,si=9b4a76455d4c539c -189 15 0:3511 / /var/lib/docker/aufs/mnt/64496c45c84d348c24d410015456d101601c30cab4d1998c395591caf7e57a70 rw,relatime - aufs none rw,si=9b4a76455d4c639c -190 15 0:3512 / /var/lib/docker/aufs/mnt/65a6a645883fe97a7422cd5e71ebe0bc17c8e6302a5361edf52e89747387e908 rw,relatime - aufs none rw,si=9b4a76455d4c039c -191 15 0:3513 / /var/lib/docker/aufs/mnt/672be40695f7b6e13b0a3ed9fc996c73727dede3481f58155950fcfad57ed616 rw,relatime - aufs none rw,si=9b4a76455d4c239c -192 15 0:3514 / /var/lib/docker/aufs/mnt/d42438acb2bfb2169e1c0d8e917fc824f7c85d336dadb0b0af36dfe0f001b3ba rw,relatime - aufs none rw,si=9b4a7642bfded39c -193 15 0:3515 / /var/lib/docker/aufs/mnt/b48a54abf26d01cb2ddd908b1ed6034d17397c1341bf0eb2b251a3e5b79be854 rw,relatime - aufs none rw,si=9b4a7642bfdee39c -194 15 0:3516 / /var/lib/docker/aufs/mnt/76f27134491f052bfb87f59092126e53ef875d6851990e59195a9da16a9412f8 rw,relatime - aufs none rw,si=9b4a7642bfde839c -195 15 0:3517 / /var/lib/docker/aufs/mnt/6bd626a5462b4f8a8e1cc7d10351326dca97a59b2758e5ea549a4f6350ce8a90 rw,relatime - aufs none rw,si=9b4a7642bfdea39c -196 15 0:3518 / /var/lib/docker/aufs/mnt/f1fe3549dbd6f5ca615e9139d9b53f0c83a3b825565df37628eacc13e70cbd6d rw,relatime - aufs none rw,si=9b4a7642bfdf539c -197 15 0:3519 / /var/lib/docker/aufs/mnt/6d0458c8426a9e93d58d0625737e6122e725c9408488ed9e3e649a9984e15c34 rw,relatime - aufs none rw,si=9b4a7642bfdf639c -198 15 0:3520 / /var/lib/docker/aufs/mnt/6e4c97db83aa82145c9cf2bafc20d500c0b5389643b689e3ae84188c270a48c5 rw,relatime - aufs none rw,si=9b4a7642bfdf039c -199 15 0:3521 / /var/lib/docker/aufs/mnt/eb94d6498f2c5969eaa9fa11ac2934f1ab90ef88e2d002258dca08e5ba74ea27 rw,relatime - aufs none rw,si=9b4a7642bfdf239c -200 15 0:3522 / /var/lib/docker/aufs/mnt/fe3f88f0c511608a2eec5f13a98703aa16e55dbf930309723d8a37101f539fe1 rw,relatime - aufs none rw,si=9b4a7642bfc3539c -201 15 0:3523 / /var/lib/docker/aufs/mnt/6f40c229fb9cad85fabf4b64a2640a5403ec03fe5ac1a57d0609fb8b606b9c83 rw,relatime - aufs none rw,si=9b4a7642bfc3639c -202 15 0:3524 / /var/lib/docker/aufs/mnt/7513e9131f7a8acf58ff15248237feb767c78732ca46e159f4d791e6ef031dbc rw,relatime - aufs none rw,si=9b4a7642bfc3039c -203 15 0:3525 / /var/lib/docker/aufs/mnt/79f48b00aa713cdf809c6bb7c7cb911b66e9a8076c81d6c9d2504139984ea2da rw,relatime - aufs none rw,si=9b4a7642bfc3239c -204 15 0:3526 / /var/lib/docker/aufs/mnt/c3680418350d11358f0a96c676bc5aa74fa00a7c89e629ef5909d3557b060300 rw,relatime - aufs none rw,si=9b4a7642f47cd39c -205 15 0:3527 / /var/lib/docker/aufs/mnt/7a1744dd350d7fcc0cccb6f1757ca4cbe5453f203a5888b0f1014d96ad5a5ef9 rw,relatime - aufs none rw,si=9b4a7642f47ce39c -206 15 0:3528 / /var/lib/docker/aufs/mnt/7fa99662db046be9f03c33c35251afda9ccdc0085636bbba1d90592cec3ff68d rw,relatime - aufs none rw,si=9b4a7642f47c839c -207 15 0:3529 / /var/lib/docker/aufs/mnt/f815021ef20da9c9b056bd1d52d8aaf6e2c0c19f11122fc793eb2b04eb995e35 rw,relatime - aufs none rw,si=9b4a7642f47ca39c -208 15 0:3530 / /var/lib/docker/aufs/mnt/801086ae3110192d601dfcebdba2db92e86ce6b6a9dba6678ea04488e4513669 rw,relatime - aufs none rw,si=9b4a7642dc6dd39c -209 15 0:3531 / /var/lib/docker/aufs/mnt/822ba7db69f21daddda87c01cfbfbf73013fc03a879daf96d16cdde6f9b1fbd6 rw,relatime - aufs none rw,si=9b4a7642dc6de39c -210 15 0:3532 / /var/lib/docker/aufs/mnt/834227c1a950fef8cae3827489129d0dd220541e60c6b731caaa765bf2e6a199 rw,relatime - aufs none rw,si=9b4a7642dc6d839c -211 15 0:3533 / /var/lib/docker/aufs/mnt/83dccbc385299bd1c7cf19326e791b33a544eea7b4cdfb6db70ea94eed4389fb rw,relatime - aufs none rw,si=9b4a7642dc6da39c -212 15 0:3534 / /var/lib/docker/aufs/mnt/f1b8e6f0e7c8928b5dcdab944db89306ebcae3e0b32f9ff40d2daa8329f21600 rw,relatime - aufs none rw,si=9b4a7645a126039c -213 15 0:3535 / /var/lib/docker/aufs/mnt/970efb262c7a020c2404cbcc5b3259efba0d110a786079faeef05bc2952abf3a rw,relatime - aufs none rw,si=9b4a7644c8ed139c -214 15 0:3536 / /var/lib/docker/aufs/mnt/84b6d73af7450f3117a77e15a5ca1255871fea6182cd8e8a7be6bc744be18c2c rw,relatime - aufs none rw,si=9b4a76406559139c -215 15 0:3537 / /var/lib/docker/aufs/mnt/88be2716e026bc681b5e63fe7942068773efbd0b6e901ca7ba441412006a96b6 rw,relatime - aufs none rw,si=9b4a76406559339c -216 15 0:3538 / /var/lib/docker/aufs/mnt/c81939aa166ce50cd8bca5cfbbcc420a78e0318dd5cd7c755209b9166a00a752 rw,relatime - aufs none rw,si=9b4a76406559239c -217 15 0:3539 / /var/lib/docker/aufs/mnt/e0f241645d64b7dc5ff6a8414087cca226be08fb54ce987d1d1f6350c57083aa rw,relatime - aufs none rw,si=9b4a7647cfc0f39c -218 15 0:3540 / /var/lib/docker/aufs/mnt/e10e2bf75234ed51d8a6a4bb39e465404fecbe318e54400d3879cdb2b0679c78 rw,relatime - aufs none rw,si=9b4a7647cfc0939c -219 15 0:3541 / /var/lib/docker/aufs/mnt/8f71d74c8cfc3228b82564aa9f09b2e576cff0083ddfb6aa5cb350346063f080 rw,relatime - aufs none rw,si=9b4a7647cfc0a39c -220 15 0:3542 / /var/lib/docker/aufs/mnt/9159f1eba2aef7f5205cc18d015cda7f5933cd29bba3b1b8aed5ccb5824c69ee rw,relatime - aufs none rw,si=9b4a76468cedd39c -221 15 0:3543 / /var/lib/docker/aufs/mnt/932cad71e652e048e500d9fbb5b8ea4fc9a269d42a3134ce527ceef42a2be56b rw,relatime - aufs none rw,si=9b4a76468cede39c -222 15 0:3544 / /var/lib/docker/aufs/mnt/bf1e1b5f529e8943cc0144ee86dbaaa37885c1ddffcef29537e0078ee7dd316a rw,relatime - aufs none rw,si=9b4a76468ced839c -223 15 0:3545 / /var/lib/docker/aufs/mnt/949d93ecf3322e09f858ce81d5f4b434068ec44ff84c375de03104f7b45ee955 rw,relatime - aufs none rw,si=9b4a76468ceda39c -224 15 0:3546 / /var/lib/docker/aufs/mnt/d65c6087f92dc2a3841b5251d2fe9ca07d4c6e5b021597692479740816e4e2a1 rw,relatime - aufs none rw,si=9b4a7645a126239c -225 15 0:3547 / /var/lib/docker/aufs/mnt/98a0153119d0651c193d053d254f6e16a68345a141baa80c87ae487e9d33f290 rw,relatime - aufs none rw,si=9b4a7640787cf39c -226 15 0:3548 / /var/lib/docker/aufs/mnt/99daf7fe5847c017392f6e59aa9706b3dfdd9e6d1ba11dae0f7fffde0a60b5e5 rw,relatime - aufs none rw,si=9b4a7640787c839c -227 15 0:3549 / /var/lib/docker/aufs/mnt/9ad1f2fe8a5599d4e10c5a6effa7f03d932d4e92ee13149031a372087a359079 rw,relatime - aufs none rw,si=9b4a7640787ca39c -228 15 0:3550 / /var/lib/docker/aufs/mnt/c26d64494da782ddac26f8370d86ac93e7c1666d88a7b99110fc86b35ea6a85d rw,relatime - aufs none rw,si=9b4a7642fc6b539c -229 15 0:3551 / /var/lib/docker/aufs/mnt/a49e4a8275133c230ec640997f35f172312eb0ea5bd2bbe10abf34aae98f30eb rw,relatime - aufs none rw,si=9b4a7642fc6b639c -230 15 0:3552 / /var/lib/docker/aufs/mnt/b5e2740c867ed843025f49d84e8d769de9e8e6039b3c8cb0735b5bf358994bc7 rw,relatime - aufs none rw,si=9b4a7642fc6b039c -231 15 0:3553 / /var/lib/docker/aufs/mnt/a826fdcf3a7039b30570054579b65763db605a314275d7aef31b872c13311b4b rw,relatime - aufs none rw,si=9b4a7642fc6b239c -232 15 0:3554 / /var/lib/docker/aufs/mnt/addf3025babf5e43b5a3f4a0da7ad863dda3c01fb8365c58fd8d28bb61dc11bc rw,relatime - aufs none rw,si=9b4a76407871d39c -233 15 0:3555 / /var/lib/docker/aufs/mnt/c5b6c6813ab3e5ebdc6d22cb2a3d3106a62095f2c298be52b07a3b0fa20ff690 rw,relatime - aufs none rw,si=9b4a76407871e39c -234 15 0:3556 / /var/lib/docker/aufs/mnt/af0609eaaf64e2392060cb46f5a9f3d681a219bb4c651d4f015bf573fbe6c4cf rw,relatime - aufs none rw,si=9b4a76407871839c -235 15 0:3557 / /var/lib/docker/aufs/mnt/e7f20e3c37ecad39cd90a97cd3549466d0d106ce4f0a930b8495442634fa4a1f rw,relatime - aufs none rw,si=9b4a76407871a39c -237 15 0:3559 / /var/lib/docker/aufs/mnt/b57a53d440ffd0c1295804fa68cdde35d2fed5409484627e71b9c37e4249fd5c rw,relatime - aufs none rw,si=9b4a76444445a39c -238 15 0:3560 / /var/lib/docker/aufs/mnt/b5e7d7b8f35e47efbba3d80c5d722f5e7bd43e54c824e54b4a4b351714d36d42 rw,relatime - aufs none rw,si=9b4a7647932d439c -239 15 0:3561 / /var/lib/docker/aufs/mnt/f1b136def157e9465640658f277f3347de593c6ae76412a2e79f7002f091cae2 rw,relatime - aufs none rw,si=9b4a76445abcd39c -240 15 0:3562 / /var/lib/docker/aufs/mnt/b750fe79269d2ec9a3c593ef05b4332b1d1a02a62b4accb2c21d589ff2f5f2dc rw,relatime - aufs none rw,si=9b4a7644403b339c -241 15 0:3563 / /var/lib/docker/aufs/mnt/b89b140cdbc95063761864e0a23346207fa27ee4c5c63a1ae85c9069a9d9cf1d rw,relatime - aufs none rw,si=9b4a7644aa19739c -242 15 0:3564 / /var/lib/docker/aufs/mnt/bc6a69ed51c07f5228f6b4f161c892e6a949c0e7e86a9c3432049d4c0e5cd298 rw,relatime - aufs none rw,si=9b4a7644aa19139c -243 15 0:3565 / /var/lib/docker/aufs/mnt/be4e2ba3f136933e239f7cf3d136f484fb9004f1fbdfee24a62a2c7b0ab30670 rw,relatime - aufs none rw,si=9b4a7644aa19339c -244 15 0:3566 / /var/lib/docker/aufs/mnt/e04ca1a4a5171e30d20f0c92f90a50b8b6f8600af5459c4b4fb25e42e864dfe1 rw,relatime - aufs none rw,si=9b4a7647932d139c -245 15 0:3567 / /var/lib/docker/aufs/mnt/be61576b31db893129aaffcd3dcb5ce35e49c4b71b30c392a78609a45c7323d8 rw,relatime - aufs none rw,si=9b4a7642d85f739c -246 15 0:3568 / /var/lib/docker/aufs/mnt/dda42c191e56becf672327658ab84fcb563322db3764b91c2fefe4aaef04c624 rw,relatime - aufs none rw,si=9b4a7642d85f139c -247 15 0:3569 / /var/lib/docker/aufs/mnt/c0a7995053330f3d88969247a2e72b07e2dd692133f5668a4a35ea3905561072 rw,relatime - aufs none rw,si=9b4a7642d85f339c -249 15 0:3571 / /var/lib/docker/aufs/mnt/c3594b2e5f08c59ff5ed338a1ba1eceeeb1f7fc5d180068338110c00b1eb8502 rw,relatime - aufs none rw,si=9b4a7642738c739c -250 15 0:3572 / /var/lib/docker/aufs/mnt/c58dce03a0ab0a7588393880379dc3bce9f96ec08ed3f99cf1555260ff0031e8 rw,relatime - aufs none rw,si=9b4a7642738c139c -251 15 0:3573 / /var/lib/docker/aufs/mnt/c73e9f1d109c9d14cb36e1c7489df85649be3911116d76c2fd3648ec8fd94e23 rw,relatime - aufs none rw,si=9b4a7642738c339c -252 15 0:3574 / /var/lib/docker/aufs/mnt/c9eef28c344877cd68aa09e543c0710ab2b305a0ff96dbb859bfa7808c3e8d01 rw,relatime - aufs none rw,si=9b4a7642d85f439c -253 15 0:3575 / /var/lib/docker/aufs/mnt/feb67148f548d70cb7484f2aaad2a86051cd6867a561741a2f13b552457d666e rw,relatime - aufs none rw,si=9b4a76468c55739c -254 15 0:3576 / /var/lib/docker/aufs/mnt/cdf1f96c36d35a96041a896bf398ec0f7dc3b0fb0643612a0f4b6ff96e04e1bb rw,relatime - aufs none rw,si=9b4a76468c55139c -255 15 0:3577 / /var/lib/docker/aufs/mnt/ec6e505872353268451ac4bc034c1df00f3bae4a3ea2261c6e48f7bd5417c1b3 rw,relatime - aufs none rw,si=9b4a76468c55339c -256 15 0:3578 / /var/lib/docker/aufs/mnt/d6dc8aca64efd90e0bc10274001882d0efb310d42ccbf5712b99b169053b8b1a rw,relatime - aufs none rw,si=9b4a7642738c439c -257 15 0:3579 / /var/lib/docker/aufs/mnt/d712594e2ff6eaeb895bfd150d694bd1305fb927e7a186b2dab7df2ea95f8f81 rw,relatime - aufs none rw,si=9b4a76401268f39c -259 15 0:3581 / /var/lib/docker/aufs/mnt/dbfa1174cd78cde2d7410eae442af0b416c4a0e6f87ed4ff1e9f169a0029abc0 rw,relatime - aufs none rw,si=9b4a76401268b39c -260 15 0:3582 / /var/lib/docker/aufs/mnt/e883f5a82316d7856fbe93ee8c0af5a920b7079619dd95c4ffd88bbd309d28dd rw,relatime - aufs none rw,si=9b4a76468c55439c -261 15 0:3583 / /var/lib/docker/aufs/mnt/fdec3eff581c4fc2b09f87befa2fa021f3f2d373bea636a87f1fb5b367d6347a rw,relatime - aufs none rw,si=9b4a7644aa1af39c -262 15 0:3584 / /var/lib/docker/aufs/mnt/ef764e26712184653067ecf7afea18a80854c41331ca0f0ef03e1bacf90a6ffc rw,relatime - aufs none rw,si=9b4a7644aa1a939c -263 15 0:3585 / /var/lib/docker/aufs/mnt/f3176b40c41fce8ce6942936359a2001a6f1b5c1bb40ee224186db0789ec2f76 rw,relatime - aufs none rw,si=9b4a7644aa1ab39c -264 15 0:3586 / /var/lib/docker/aufs/mnt/f5daf06785d3565c6dd18ea7d953d9a8b9606107781e63270fe0514508736e6a rw,relatime - aufs none rw,si=9b4a76401268c39c -58 15 0:3587 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8-init rw,relatime - aufs none rw,si=9b4a76444445839c -67 15 0:3588 / /var/lib/docker/aufs/mnt/cde8c40f6524b7361af4f5ad05bb857dc9ee247c20852ba666195c0739e3a2b8 rw,relatime - aufs none rw,si=9b4a7644badd339c -265 15 0:3610 / /var/lib/docker/aufs/mnt/e812472cd2c8c4748d1ef71fac4e77e50d661b9349abe66ce3e23511ed44f414 rw,relatime - aufs none rw,si=9b4a76427937d39c -270 15 0:3615 / /var/lib/docker/aufs/mnt/997636e7c5c9d0d1376a217e295c14c205350b62bc12052804fb5f90abe6f183 rw,relatime - aufs none rw,si=9b4a76406540739c -273 15 0:3618 / /var/lib/docker/aufs/mnt/d5794d080417b6e52e69227c3873e0e4c1ff0d5a845ebe3860ec2f89a47a2a1e rw,relatime - aufs none rw,si=9b4a76454814039c -278 15 0:3623 / /var/lib/docker/aufs/mnt/586bdd48baced671bb19bc4d294ec325f26c55545ae267db426424f157d59c48 rw,relatime - aufs none rw,si=9b4a7644b439f39c -281 15 0:3626 / /var/lib/docker/aufs/mnt/69739d022f89f8586908bbd5edbbdd95ea5256356f177f9ffcc6ef9c0ea752d2 rw,relatime - aufs none rw,si=9b4a7644a0f1b39c -286 15 0:3631 / /var/lib/docker/aufs/mnt/ff28c27d5f894363993622de26d5dd352dba072f219e4691d6498c19bbbc15a9 rw,relatime - aufs none rw,si=9b4a7642265b339c -289 15 0:3634 / /var/lib/docker/aufs/mnt/aa128fe0e64fdede333aa48fd9de39530c91a9244a0f0649a3c411c61e372daa rw,relatime - aufs none rw,si=9b4a764012ada39c -99 15 8:33 / /media/REMOVE\040ME rw,nosuid,nodev,relatime - fuseblk /dev/sdc1 rw,user_id=0,group_id=0,allow_other,blksize=4096` -) - -func TestParseFedoraMountinfo(t *testing.T) { - r := bytes.NewBuffer([]byte(fedoraMountinfo)) - _, err := parseInfoFile(r) - if err != nil { - t.Fatal(err) - } -} - -func TestParseUbuntuMountinfo(t *testing.T) { - r := bytes.NewBuffer([]byte(ubuntuMountInfo)) - _, err := parseInfoFile(r) - if err != nil { - t.Fatal(err) - } -} - -func TestParseGentooMountinfo(t *testing.T) { - r := bytes.NewBuffer([]byte(gentooMountinfo)) - _, err := parseInfoFile(r) - if err != nil { - t.Fatal(err) - } -} - -func TestParseFedoraMountinfoFields(t *testing.T) { - r := bytes.NewBuffer([]byte(fedoraMountinfo)) - infos, err := parseInfoFile(r) - if err != nil { - t.Fatal(err) - } - expectedLength := 58 - if len(infos) != expectedLength { - t.Fatalf("Expected %d entries, got %d", expectedLength, len(infos)) - } - mi := Info{ - ID: 15, - Parent: 35, - Major: 0, - Minor: 3, - Root: "/", - Mountpoint: "/proc", - Opts: "rw,nosuid,nodev,noexec,relatime", - Optional: "shared:5", - Fstype: "proc", - Source: "proc", - VfsOpts: "rw", - } - - if *infos[0] != mi { - t.Fatalf("expected %#v, got %#v", mi, infos[0]) - } -} diff --git a/pkg/mount/mountinfo_solaris.go b/pkg/mount/mountinfo_solaris.go deleted file mode 100644 index ad9ab57f8b..0000000000 --- a/pkg/mount/mountinfo_solaris.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build solaris,cgo - -package mount - -/* -#include -#include -*/ -import "C" - -import ( - "fmt" -) - -func parseMountTable() ([]*Info, error) { - mnttab := C.fopen(C.CString(C.MNTTAB), C.CString("r")) - if mnttab == nil { - return nil, fmt.Errorf("Failed to open %s", C.MNTTAB) - } - - var out []*Info - var mp C.struct_mnttab - - ret := C.getmntent(mnttab, &mp) - for ret == 0 { - var mountinfo Info - mountinfo.Mountpoint = C.GoString(mp.mnt_mountp) - mountinfo.Source = C.GoString(mp.mnt_special) - mountinfo.Fstype = C.GoString(mp.mnt_fstype) - mountinfo.Opts = C.GoString(mp.mnt_mntopts) - out = append(out, &mountinfo) - ret = C.getmntent(mnttab, &mp) - } - - C.fclose(mnttab) - return out, nil -} diff --git a/pkg/mount/mountinfo_unsupported.go b/pkg/mount/mountinfo_unsupported.go deleted file mode 100644 index 7fbcf19214..0000000000 --- a/pkg/mount/mountinfo_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows,!linux,!freebsd,!solaris freebsd,!cgo solaris,!cgo - -package mount - -import ( - "fmt" - "runtime" -) - -func parseMountTable() ([]*Info, error) { - return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH) -} diff --git a/pkg/mount/mountinfo_windows.go b/pkg/mount/mountinfo_windows.go deleted file mode 100644 index dab8a37ed0..0000000000 --- a/pkg/mount/mountinfo_windows.go +++ /dev/null @@ -1,6 +0,0 @@ -package mount - -func parseMountTable() ([]*Info, error) { - // Do NOT return an error! - return nil, nil -} diff --git a/pkg/mount/sharedsubtree_linux.go b/pkg/mount/sharedsubtree_linux.go deleted file mode 100644 index 8ceec84bc6..0000000000 --- a/pkg/mount/sharedsubtree_linux.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build linux - -package mount - -// MakeShared ensures a mounted filesystem has the SHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "shared") -} - -// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRShared(mountPoint string) error { - return ensureMountedAs(mountPoint, "rshared") -} - -// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled. -// See the supported options in flags.go for further reference. -func MakePrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "private") -} - -// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeRPrivate(mountPoint string) error { - return ensureMountedAs(mountPoint, "rprivate") -} - -// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "slave") -} - -// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled. -// See the supported options in flags.go for further reference. -func MakeRSlave(mountPoint string) error { - return ensureMountedAs(mountPoint, "rslave") -} - -// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option -// enabled. See the supported options in flags.go for further reference. -func MakeUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "unbindable") -} - -// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount -// option enabled. See the supported options in flags.go for further reference. -func MakeRUnbindable(mountPoint string) error { - return ensureMountedAs(mountPoint, "runbindable") -} - -func ensureMountedAs(mountPoint, options string) error { - mounted, err := Mounted(mountPoint) - if err != nil { - return err - } - - if !mounted { - if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil { - return err - } - } - if _, err = Mounted(mountPoint); err != nil { - return err - } - - return ForceMount("", mountPoint, "none", options) -} diff --git a/pkg/mount/sharedsubtree_linux_test.go b/pkg/mount/sharedsubtree_linux_test.go deleted file mode 100644 index c1837942e3..0000000000 --- a/pkg/mount/sharedsubtree_linux_test.go +++ /dev/null @@ -1,331 +0,0 @@ -// +build linux - -package mount - -import ( - "os" - "path" - "syscall" - "testing" -) - -// nothing is propagated in or out -func TestSubtreePrivate(t *testing.T) { - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - outside1Dir = path.Join(tmp, "outside1") - outside2Dir = path.Join(tmp, "outside2") - - outside1Path = path.Join(outside1Dir, "file.txt") - outside2Path = path.Join(outside2Dir, "file.txt") - outside1CheckPath = path.Join(targetDir, "a", "file.txt") - outside2CheckPath = path.Join(sourceDir, "b", "file.txt") - ) - if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(targetDir, 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(outside1Dir, 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(outside2Dir, 0777); err != nil { - t.Fatal(err) - } - - if err := createFile(outside1Path); err != nil { - t.Fatal(err) - } - if err := createFile(outside2Path); err != nil { - t.Fatal(err) - } - - // mount the shared directory to a target - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - // next, make the target private - if err := MakePrivate(targetDir); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - // mount in an outside path to a mounted path inside the _source_ - if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(path.Join(sourceDir, "a")); err != nil { - t.Fatal(err) - } - }() - - // check that this file _does_not_ show in the _target_ - if _, err := os.Stat(outside1CheckPath); err != nil && !os.IsNotExist(err) { - t.Fatal(err) - } else if err == nil { - t.Fatalf("%q should not be visible, but is", outside1CheckPath) - } - - // next mount outside2Dir into the _target_ - if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(path.Join(targetDir, "b")); err != nil { - t.Fatal(err) - } - }() - - // check that this file _does_not_ show in the _source_ - if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { - t.Fatal(err) - } else if err == nil { - t.Fatalf("%q should not be visible, but is", outside2CheckPath) - } -} - -// Testing that when a target is a shared mount, -// then child mounts propagate to the source -func TestSubtreeShared(t *testing.T) { - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - outsideDir = path.Join(tmp, "outside") - - outsidePath = path.Join(outsideDir, "file.txt") - sourceCheckPath = path.Join(sourceDir, "a", "file.txt") - ) - - if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(targetDir, 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(outsideDir, 0777); err != nil { - t.Fatal(err) - } - - if err := createFile(outsidePath); err != nil { - t.Fatal(err) - } - - // mount the source as shared - if err := MakeShared(sourceDir); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(sourceDir); err != nil { - t.Fatal(err) - } - }() - - // mount the shared directory to a target - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - // mount in an outside path to a mounted path inside the target - if err := Mount(outsideDir, path.Join(targetDir, "a"), "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(path.Join(targetDir, "a")); err != nil { - t.Fatal(err) - } - }() - - // NOW, check that the file from the outside directory is available in the source directory - if _, err := os.Stat(sourceCheckPath); err != nil { - t.Fatal(err) - } -} - -// testing that mounts to a shared source show up in the slave target, -// and that mounts into a slave target do _not_ show up in the shared source -func TestSubtreeSharedSlave(t *testing.T) { - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - outside1Dir = path.Join(tmp, "outside1") - outside2Dir = path.Join(tmp, "outside2") - - outside1Path = path.Join(outside1Dir, "file.txt") - outside2Path = path.Join(outside2Dir, "file.txt") - outside1CheckPath = path.Join(targetDir, "a", "file.txt") - outside2CheckPath = path.Join(sourceDir, "b", "file.txt") - ) - if err := os.MkdirAll(path.Join(sourceDir, "a"), 0777); err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(path.Join(sourceDir, "b"), 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(targetDir, 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(outside1Dir, 0777); err != nil { - t.Fatal(err) - } - if err := os.Mkdir(outside2Dir, 0777); err != nil { - t.Fatal(err) - } - - if err := createFile(outside1Path); err != nil { - t.Fatal(err) - } - if err := createFile(outside2Path); err != nil { - t.Fatal(err) - } - - // mount the source as shared - if err := MakeShared(sourceDir); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(sourceDir); err != nil { - t.Fatal(err) - } - }() - - // mount the shared directory to a target - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - // next, make the target slave - if err := MakeSlave(targetDir); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() - - // mount in an outside path to a mounted path inside the _source_ - if err := Mount(outside1Dir, path.Join(sourceDir, "a"), "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(path.Join(sourceDir, "a")); err != nil { - t.Fatal(err) - } - }() - - // check that this file _does_ show in the _target_ - if _, err := os.Stat(outside1CheckPath); err != nil { - t.Fatal(err) - } - - // next mount outside2Dir into the _target_ - if err := Mount(outside2Dir, path.Join(targetDir, "b"), "none", "bind,rw"); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(path.Join(targetDir, "b")); err != nil { - t.Fatal(err) - } - }() - - // check that this file _does_not_ show in the _source_ - if _, err := os.Stat(outside2CheckPath); err != nil && !os.IsNotExist(err) { - t.Fatal(err) - } else if err == nil { - t.Fatalf("%q should not be visible, but is", outside2CheckPath) - } -} - -func TestSubtreeUnbindable(t *testing.T) { - tmp := path.Join(os.TempDir(), "mount-tests") - if err := os.MkdirAll(tmp, 0777); err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmp) - - var ( - sourceDir = path.Join(tmp, "source") - targetDir = path.Join(tmp, "target") - ) - if err := os.MkdirAll(sourceDir, 0777); err != nil { - t.Fatal(err) - } - if err := os.MkdirAll(targetDir, 0777); err != nil { - t.Fatal(err) - } - - // next, make the source unbindable - if err := MakeUnbindable(sourceDir); err != nil { - t.Fatal(err) - } - defer func() { - if err := Unmount(sourceDir); err != nil { - t.Fatal(err) - } - }() - - // then attempt to mount it to target. It should fail - if err := Mount(sourceDir, targetDir, "none", "bind,rw"); err != nil && err != syscall.EINVAL { - t.Fatal(err) - } else if err == nil { - t.Fatalf("%q should not have been bindable", sourceDir) - } - defer func() { - if err := Unmount(targetDir); err != nil { - t.Fatal(err) - } - }() -} - -func createFile(path string) error { - f, err := os.Create(path) - if err != nil { - return err - } - f.WriteString("hello world!") - return f.Close() -} diff --git a/pkg/namesgenerator/cmd/names-generator/main.go b/pkg/namesgenerator/cmd/names-generator/main.go deleted file mode 100644 index 18a939b70b..0000000000 --- a/pkg/namesgenerator/cmd/names-generator/main.go +++ /dev/null @@ -1,11 +0,0 @@ -package main - -import ( - "fmt" - - "github.com/docker/docker/pkg/namesgenerator" -) - -func main() { - fmt.Println(namesgenerator.GetRandomName(0)) -} diff --git a/pkg/namesgenerator/names-generator.go b/pkg/namesgenerator/names-generator.go deleted file mode 100644 index 4577ef8551..0000000000 --- a/pkg/namesgenerator/names-generator.go +++ /dev/null @@ -1,543 +0,0 @@ -package namesgenerator - -import ( - "fmt" - - "github.com/docker/docker/pkg/random" -) - -var ( - left = [...]string{ - "admiring", - "adoring", - "agitated", - "amazing", - "angry", - "awesome", - "backstabbing", - "berserk", - "big", - "boring", - "clever", - "cocky", - "compassionate", - "condescending", - "cranky", - "desperate", - "determined", - "distracted", - "dreamy", - "drunk", - "ecstatic", - "elated", - "elegant", - "evil", - "fervent", - "focused", - "furious", - "gigantic", - "gloomy", - "goofy", - "grave", - "happy", - "high", - "hopeful", - "hungry", - "infallible", - "jolly", - "jovial", - "kickass", - "lonely", - "loving", - "mad", - "modest", - "naughty", - "nauseous", - "nostalgic", - "peaceful", - "pedantic", - "pensive", - "prickly", - "reverent", - "romantic", - "sad", - "serene", - "sharp", - "sick", - "silly", - "sleepy", - "small", - "stoic", - "stupefied", - "suspicious", - "tender", - "thirsty", - "tiny", - "trusting", - "zen", - } - - // Docker, starting from 0.7.x, generates names from notable scientists and hackers. - // Please, for any amazing man that you add to the list, consider adding an equally amazing woman to it, and vice versa. - right = [...]string{ - // Muhammad ibn Jābir al-Ḥarrānī al-Battānī was a founding father of astronomy. https://en.wikipedia.org/wiki/Mu%E1%B8%A5ammad_ibn_J%C4%81bir_al-%E1%B8%A4arr%C4%81n%C4%AB_al-Batt%C4%81n%C4%AB - "albattani", - - // Frances E. Allen, became the first female IBM Fellow in 1989. In 2006, she became the first female recipient of the ACM's Turing Award. https://en.wikipedia.org/wiki/Frances_E._Allen - "allen", - - // June Almeida - Scottish virologist who took the first pictures of the rubella virus - https://en.wikipedia.org/wiki/June_Almeida - "almeida", - - // Maria Gaetana Agnesi - Italian mathematician, philosopher, theologian and humanitarian. She was the first woman to write a mathematics handbook and the first woman appointed as a Mathematics Professor at a University. https://en.wikipedia.org/wiki/Maria_Gaetana_Agnesi - "agnesi", - - // Archimedes was a physicist, engineer and mathematician who invented too many things to list them here. https://en.wikipedia.org/wiki/Archimedes - "archimedes", - - // Maria Ardinghelli - Italian translator, mathematician and physicist - https://en.wikipedia.org/wiki/Maria_Ardinghelli - "ardinghelli", - - // Aryabhata - Ancient Indian mathematician-astronomer during 476-550 CE https://en.wikipedia.org/wiki/Aryabhata - "aryabhata", - - // Wanda Austin - Wanda Austin is the President and CEO of The Aerospace Corporation, a leading architect for the US security space programs. https://en.wikipedia.org/wiki/Wanda_Austin - "austin", - - // Charles Babbage invented the concept of a programmable computer. https://en.wikipedia.org/wiki/Charles_Babbage. - "babbage", - - // Stefan Banach - Polish mathematician, was one of the founders of modern functional analysis. https://en.wikipedia.org/wiki/Stefan_Banach - "banach", - - // John Bardeen co-invented the transistor - https://en.wikipedia.org/wiki/John_Bardeen - "bardeen", - - // Jean Bartik, born Betty Jean Jennings, was one of the original programmers for the ENIAC computer. https://en.wikipedia.org/wiki/Jean_Bartik - "bartik", - - // Laura Bassi, the world's first female professor https://en.wikipedia.org/wiki/Laura_Bassi - "bassi", - - // Alexander Graham Bell - an eminent Scottish-born scientist, inventor, engineer and innovator who is credited with inventing the first practical telephone - https://en.wikipedia.org/wiki/Alexander_Graham_Bell - "bell", - - // Homi J Bhabha - was an Indian nuclear physicist, founding director, and professor of physics at the Tata Institute of Fundamental Research. Colloquially known as "father of Indian nuclear programme"- https://en.wikipedia.org/wiki/Homi_J._Bhabha - "bhabha", - - // Bhaskara II - Ancient Indian mathematician-astronomer whose work on calculus predates Newton and Leibniz by over half a millennium - https://en.wikipedia.org/wiki/Bh%C4%81skara_II#Calculus - "bhaskara", - - // Elizabeth Blackwell - American doctor and first American woman to receive a medical degree - https://en.wikipedia.org/wiki/Elizabeth_Blackwell - "blackwell", - - // Niels Bohr is the father of quantum theory. https://en.wikipedia.org/wiki/Niels_Bohr. - "bohr", - - // Kathleen Booth, she's credited with writing the first assembly language. https://en.wikipedia.org/wiki/Kathleen_Booth - "booth", - - // Anita Borg - Anita Borg was the founding director of the Institute for Women and Technology (IWT). https://en.wikipedia.org/wiki/Anita_Borg - "borg", - - // Satyendra Nath Bose - He provided the foundation for Bose–Einstein statistics and the theory of the Bose–Einstein condensate. - https://en.wikipedia.org/wiki/Satyendra_Nath_Bose - "bose", - - // Evelyn Boyd Granville - She was one of the first African-American woman to receive a Ph.D. in mathematics; she earned it in 1949 from Yale University. https://en.wikipedia.org/wiki/Evelyn_Boyd_Granville - "boyd", - - // Brahmagupta - Ancient Indian mathematician during 598-670 CE who gave rules to compute with zero - https://en.wikipedia.org/wiki/Brahmagupta#Zero - "brahmagupta", - - // Walter Houser Brattain co-invented the transistor - https://en.wikipedia.org/wiki/Walter_Houser_Brattain - "brattain", - - // Emmett Brown invented time travel. https://en.wikipedia.org/wiki/Emmett_Brown (thanks Brian Goff) - "brown", - - // Rachel Carson - American marine biologist and conservationist, her book Silent Spring and other writings are credited with advancing the global environmental movement. https://en.wikipedia.org/wiki/Rachel_Carson - "carson", - - // Subrahmanyan Chandrasekhar - Astrophysicist known for his mathematical theory on different stages and evolution in structures of the stars. He has won nobel prize for physics - https://en.wikipedia.org/wiki/Subrahmanyan_Chandrasekhar - "chandrasekhar", - - //Claude Shannon - The father of information theory and founder of digital circuit design theory. (https://en.wikipedia.org/wiki/Claude_Shannon) - "shannon", - - // Jane Colden - American botanist widely considered the first female American botanist - https://en.wikipedia.org/wiki/Jane_Colden - "colden", - - // Gerty Theresa Cori - American biochemist who became the third woman—and first American woman—to win a Nobel Prize in science, and the first woman to be awarded the Nobel Prize in Physiology or Medicine. Cori was born in Prague. https://en.wikipedia.org/wiki/Gerty_Cori - "cori", - - // Seymour Roger Cray was an American electrical engineer and supercomputer architect who designed a series of computers that were the fastest in the world for decades. https://en.wikipedia.org/wiki/Seymour_Cray - "cray", - - // This entry reflects a husband and wife team who worked together: - // Joan Curran was a Welsh scientist who developed radar and invented chaff, a radar countermeasure. https://en.wikipedia.org/wiki/Joan_Curran - // Samuel Curran was an Irish physicist who worked alongside his wife during WWII and invented the proximity fuse. https://en.wikipedia.org/wiki/Samuel_Curran - "curran", - - // Marie Curie discovered radioactivity. https://en.wikipedia.org/wiki/Marie_Curie. - "curie", - - // Charles Darwin established the principles of natural evolution. https://en.wikipedia.org/wiki/Charles_Darwin. - "darwin", - - // Leonardo Da Vinci invented too many things to list here. https://en.wikipedia.org/wiki/Leonardo_da_Vinci. - "davinci", - - // Edsger Wybe Dijkstra was a Dutch computer scientist and mathematical scientist. https://en.wikipedia.org/wiki/Edsger_W._Dijkstra. - "dijkstra", - - // Donna Dubinsky - played an integral role in the development of personal digital assistants (PDAs) serving as CEO of Palm, Inc. and co-founding Handspring. https://en.wikipedia.org/wiki/Donna_Dubinsky - "dubinsky", - - // Annie Easley - She was a leading member of the team which developed software for the Centaur rocket stage and one of the first African-Americans in her field. https://en.wikipedia.org/wiki/Annie_Easley - "easley", - - // Albert Einstein invented the general theory of relativity. https://en.wikipedia.org/wiki/Albert_Einstein - "einstein", - - // Gertrude Elion - American biochemist, pharmacologist and the 1988 recipient of the Nobel Prize in Medicine - https://en.wikipedia.org/wiki/Gertrude_Elion - "elion", - - // Douglas Engelbart gave the mother of all demos: https://en.wikipedia.org/wiki/Douglas_Engelbart - "engelbart", - - // Euclid invented geometry. https://en.wikipedia.org/wiki/Euclid - "euclid", - - // Leonhard Euler invented large parts of modern mathematics. https://de.wikipedia.org/wiki/Leonhard_Euler - "euler", - - // Pierre de Fermat pioneered several aspects of modern mathematics. https://en.wikipedia.org/wiki/Pierre_de_Fermat - "fermat", - - // Enrico Fermi invented the first nuclear reactor. https://en.wikipedia.org/wiki/Enrico_Fermi. - "fermi", - - // Richard Feynman was a key contributor to quantum mechanics and particle physics. https://en.wikipedia.org/wiki/Richard_Feynman - "feynman", - - // Benjamin Franklin is famous for his experiments in electricity and the invention of the lightning rod. - "franklin", - - // Galileo was a founding father of modern astronomy, and faced politics and obscurantism to establish scientific truth. https://en.wikipedia.org/wiki/Galileo_Galilei - "galileo", - - // William Henry "Bill" Gates III is an American business magnate, philanthropist, investor, computer programmer, and inventor. https://en.wikipedia.org/wiki/Bill_Gates - "gates", - - // Adele Goldberg, was one of the designers and developers of the Smalltalk language. https://en.wikipedia.org/wiki/Adele_Goldberg_(computer_scientist) - "goldberg", - - // Adele Goldstine, born Adele Katz, wrote the complete technical description for the first electronic digital computer, ENIAC. https://en.wikipedia.org/wiki/Adele_Goldstine - "goldstine", - - // Shafi Goldwasser is a computer scientist known for creating theoretical foundations of modern cryptography. Winner of 2012 ACM Turing Award. https://en.wikipedia.org/wiki/Shafi_Goldwasser - "goldwasser", - - // James Golick, all around gangster. - "golick", - - // Jane Goodall - British primatologist, ethologist, and anthropologist who is considered to be the world's foremost expert on chimpanzees - https://en.wikipedia.org/wiki/Jane_Goodall - "goodall", - - // Margaret Hamilton - Director of the Software Engineering Division of the MIT Instrumentation Laboratory, which developed on-board flight software for the Apollo space program. https://en.wikipedia.org/wiki/Margaret_Hamilton_(scientist) - "hamilton", - - // Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. https://en.wikipedia.org/wiki/Stephen_Hawking - "hawking", - - // Werner Heisenberg was a founding father of quantum mechanics. https://en.wikipedia.org/wiki/Werner_Heisenberg - "heisenberg", - - // Jaroslav Heyrovský was the inventor of the polarographic method, father of the electroanalytical method, and recipient of the Nobel Prize in 1959. His main field of work was polarography. https://en.wikipedia.org/wiki/Jaroslav_Heyrovsk%C3%BD - "heyrovsky", - - // Dorothy Hodgkin was a British biochemist, credited with the development of protein crystallography. She was awarded the Nobel Prize in Chemistry in 1964. https://en.wikipedia.org/wiki/Dorothy_Hodgkin - "hodgkin", - - // Erna Schneider Hoover revolutionized modern communication by inventing a computerized telephone switching method. https://en.wikipedia.org/wiki/Erna_Schneider_Hoover - "hoover", - - // Grace Hopper developed the first compiler for a computer programming language and is credited with popularizing the term "debugging" for fixing computer glitches. https://en.wikipedia.org/wiki/Grace_Hopper - "hopper", - - // Frances Hugle, she was an American scientist, engineer, and inventor who contributed to the understanding of semiconductors, integrated circuitry, and the unique electrical principles of microscopic materials. https://en.wikipedia.org/wiki/Frances_Hugle - "hugle", - - // Hypatia - Greek Alexandrine Neoplatonist philosopher in Egypt who was one of the earliest mothers of mathematics - https://en.wikipedia.org/wiki/Hypatia - "hypatia", - - // Yeong-Sil Jang was a Korean scientist and astronomer during the Joseon Dynasty; he invented the first metal printing press and water gauge. https://en.wikipedia.org/wiki/Jang_Yeong-sil - "jang", - - // Betty Jennings - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Jean_Bartik - "jennings", - - // Mary Lou Jepsen, was the founder and chief technology officer of One Laptop Per Child (OLPC), and the founder of Pixel Qi. https://en.wikipedia.org/wiki/Mary_Lou_Jepsen - "jepsen", - - // Irène Joliot-Curie - French scientist who was awarded the Nobel Prize for Chemistry in 1935. Daughter of Marie and Pierre Curie. https://en.wikipedia.org/wiki/Ir%C3%A8ne_Joliot-Curie - "joliot", - - // Karen Spärck Jones came up with the concept of inverse document frequency, which is used in most search engines today. https://en.wikipedia.org/wiki/Karen_Sp%C3%A4rck_Jones - "jones", - - // A. P. J. Abdul Kalam - is an Indian scientist aka Missile Man of India for his work on the development of ballistic missile and launch vehicle technology - https://en.wikipedia.org/wiki/A._P._J._Abdul_Kalam - "kalam", - - // Susan Kare, created the icons and many of the interface elements for the original Apple Macintosh in the 1980s, and was an original employee of NeXT, working as the Creative Director. https://en.wikipedia.org/wiki/Susan_Kare - "kare", - - // Mary Kenneth Keller, Sister Mary Kenneth Keller became the first American woman to earn a PhD in Computer Science in 1965. https://en.wikipedia.org/wiki/Mary_Kenneth_Keller - "keller", - - // Har Gobind Khorana - Indian-American biochemist who shared the 1968 Nobel Prize for Physiology - https://en.wikipedia.org/wiki/Har_Gobind_Khorana - "khorana", - - // Jack Kilby invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Jack_Kilby - "kilby", - - // Maria Kirch - German astronomer and first woman to discover a comet - https://en.wikipedia.org/wiki/Maria_Margarethe_Kirch - "kirch", - - // Donald Knuth - American computer scientist, author of "The Art of Computer Programming" and creator of the TeX typesetting system. https://en.wikipedia.org/wiki/Donald_Knuth - "knuth", - - // Sophie Kowalevski - Russian mathematician responsible for important original contributions to analysis, differential equations and mechanics - https://en.wikipedia.org/wiki/Sofia_Kovalevskaya - "kowalevski", - - // Marie-Jeanne de Lalande - French astronomer, mathematician and cataloguer of stars - https://en.wikipedia.org/wiki/Marie-Jeanne_de_Lalande - "lalande", - - // Hedy Lamarr - Actress and inventor. The principles of her work are now incorporated into modern Wi-Fi, CDMA and Bluetooth technology. https://en.wikipedia.org/wiki/Hedy_Lamarr - "lamarr", - - // Leslie B. Lamport - American computer scientist. Lamport is best known for his seminal work in distributed systems and was the winner of the 2013 Turing Award. https://en.wikipedia.org/wiki/Leslie_Lamport - "lamport", - - // Mary Leakey - British paleoanthropologist who discovered the first fossilized Proconsul skull - https://en.wikipedia.org/wiki/Mary_Leakey - "leakey", - - // Henrietta Swan Leavitt - she was an American astronomer who discovered the relation between the luminosity and the period of Cepheid variable stars. https://en.wikipedia.org/wiki/Henrietta_Swan_Leavitt - "leavitt", - - // Ruth Lichterman - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Ruth_Teitelbaum - "lichterman", - - // Barbara Liskov - co-developed the Liskov substitution principle. Liskov was also the winner of the Turing Prize in 2008. - https://en.wikipedia.org/wiki/Barbara_Liskov - "liskov", - - // Ada Lovelace invented the first algorithm. https://en.wikipedia.org/wiki/Ada_Lovelace (thanks James Turnbull) - "lovelace", - - // Auguste and Louis Lumière - the first filmmakers in history - https://en.wikipedia.org/wiki/Auguste_and_Louis_Lumi%C3%A8re - "lumiere", - - // Mahavira - Ancient Indian mathematician during 9th century AD who discovered basic algebraic identities - https://en.wikipedia.org/wiki/Mah%C4%81v%C4%ABra_(mathematician) - "mahavira", - - // Maria Mayer - American theoretical physicist and Nobel laureate in Physics for proposing the nuclear shell model of the atomic nucleus - https://en.wikipedia.org/wiki/Maria_Mayer - "mayer", - - // John McCarthy invented LISP: https://en.wikipedia.org/wiki/John_McCarthy_(computer_scientist) - "mccarthy", - - // Barbara McClintock - a distinguished American cytogeneticist, 1983 Nobel Laureate in Physiology or Medicine for discovering transposons. https://en.wikipedia.org/wiki/Barbara_McClintock - "mcclintock", - - // Malcolm McLean invented the modern shipping container: https://en.wikipedia.org/wiki/Malcom_McLean - "mclean", - - // Kay McNulty - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Kathleen_Antonelli - "mcnulty", - - // Lise Meitner - Austrian/Swedish physicist who was involved in the discovery of nuclear fission. The element meitnerium is named after her - https://en.wikipedia.org/wiki/Lise_Meitner - "meitner", - - // Carla Meninsky, was the game designer and programmer for Atari 2600 games Dodge 'Em and Warlords. https://en.wikipedia.org/wiki/Carla_Meninsky - "meninsky", - - // Johanna Mestorf - German prehistoric archaeologist and first female museum director in Germany - https://en.wikipedia.org/wiki/Johanna_Mestorf - "mestorf", - - // Marvin Minsky - Pioneer in Artificial Intelligence, co-founder of the MIT's AI Lab, won the Turing Award in 1969. https://en.wikipedia.org/wiki/Marvin_Minsky - "minsky", - - // Maryam Mirzakhani - an Iranian mathematician and the first woman to win the Fields Medal. https://en.wikipedia.org/wiki/Maryam_Mirzakhani - "mirzakhani", - - // Samuel Morse - contributed to the invention of a single-wire telegraph system based on European telegraphs and was a co-developer of the Morse code - https://en.wikipedia.org/wiki/Samuel_Morse - "morse", - - // Ian Murdock - founder of the Debian project - https://en.wikipedia.org/wiki/Ian_Murdock - "murdock", - - // Isaac Newton invented classic mechanics and modern optics. https://en.wikipedia.org/wiki/Isaac_Newton - "newton", - - // Alfred Nobel - a Swedish chemist, engineer, innovator, and armaments manufacturer (inventor of dynamite) - https://en.wikipedia.org/wiki/Alfred_Nobel - "nobel", - - // Emmy Noether, German mathematician. Noether's Theorem is named after her. https://en.wikipedia.org/wiki/Emmy_Noether - "noether", - - // Poppy Northcutt. Poppy Northcutt was the first woman to work as part of NASA’s Mission Control. http://www.businessinsider.com/poppy-northcutt-helped-apollo-astronauts-2014-12?op=1 - "northcutt", - - // Robert Noyce invented silicone integrated circuits and gave Silicon Valley its name. - https://en.wikipedia.org/wiki/Robert_Noyce - "noyce", - - // Panini - Ancient Indian linguist and grammarian from 4th century CE who worked on the world's first formal system - https://en.wikipedia.org/wiki/P%C4%81%E1%B9%87ini#Comparison_with_modern_formal_systems - "panini", - - // Ambroise Pare invented modern surgery. https://en.wikipedia.org/wiki/Ambroise_Par%C3%A9 - "pare", - - // Louis Pasteur discovered vaccination, fermentation and pasteurization. https://en.wikipedia.org/wiki/Louis_Pasteur. - "pasteur", - - // Cecilia Payne-Gaposchkin was an astronomer and astrophysicist who, in 1925, proposed in her Ph.D. thesis an explanation for the composition of stars in terms of the relative abundances of hydrogen and helium. https://en.wikipedia.org/wiki/Cecilia_Payne-Gaposchkin - "payne", - - // Radia Perlman is a software designer and network engineer and most famous for her invention of the spanning-tree protocol (STP). https://en.wikipedia.org/wiki/Radia_Perlman - "perlman", - - // Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. https://en.wikipedia.org/wiki/Rob_Pike - "pike", - - // Henri Poincaré made fundamental contributions in several fields of mathematics. https://en.wikipedia.org/wiki/Henri_Poincar%C3%A9 - "poincare", - - // Laura Poitras is a director and producer whose work, made possible by open source crypto tools, advances the causes of truth and freedom of information by reporting disclosures by whistleblowers such as Edward Snowden. https://en.wikipedia.org/wiki/Laura_Poitras - "poitras", - - // Claudius Ptolemy - a Greco-Egyptian writer of Alexandria, known as a mathematician, astronomer, geographer, astrologer, and poet of a single epigram in the Greek Anthology - https://en.wikipedia.org/wiki/Ptolemy - "ptolemy", - - // C. V. Raman - Indian physicist who won the Nobel Prize in 1930 for proposing the Raman effect. - https://en.wikipedia.org/wiki/C._V._Raman - "raman", - - // Srinivasa Ramanujan - Indian mathematician and autodidact who made extraordinary contributions to mathematical analysis, number theory, infinite series, and continued fractions. - https://en.wikipedia.org/wiki/Srinivasa_Ramanujan - "ramanujan", - - // Sally Kristen Ride was an American physicist and astronaut. She was the first American woman in space, and the youngest American astronaut. https://en.wikipedia.org/wiki/Sally_Ride - "ride", - - // Rita Levi-Montalcini - Won Nobel Prize in Physiology or Medicine jointly with colleague Stanley Cohen for the discovery of nerve growth factor (https://en.wikipedia.org/wiki/Rita_Levi-Montalcini) - "montalcini", - - // Dennis Ritchie - co-creator of UNIX and the C programming language. - https://en.wikipedia.org/wiki/Dennis_Ritchie - "ritchie", - - // Wilhelm Conrad Röntgen - German physicist who was awarded the first Nobel Prize in Physics in 1901 for the discovery of X-rays (Röntgen rays). https://en.wikipedia.org/wiki/Wilhelm_R%C3%B6ntgen - "roentgen", - - // Rosalind Franklin - British biophysicist and X-ray crystallographer whose research was critical to the understanding of DNA - https://en.wikipedia.org/wiki/Rosalind_Franklin - "rosalind", - - // Meghnad Saha - Indian astrophysicist best known for his development of the Saha equation, used to describe chemical and physical conditions in stars - https://en.wikipedia.org/wiki/Meghnad_Saha - "saha", - - // Jean E. Sammet developed FORMAC, the first widely used computer language for symbolic manipulation of mathematical formulas. https://en.wikipedia.org/wiki/Jean_E._Sammet - "sammet", - - // Carol Shaw - Originally an Atari employee, Carol Shaw is said to be the first female video game designer. https://en.wikipedia.org/wiki/Carol_Shaw_(video_game_designer) - "shaw", - - // Dame Stephanie "Steve" Shirley - Founded a software company in 1962 employing women working from home. https://en.wikipedia.org/wiki/Steve_Shirley - "shirley", - - // William Shockley co-invented the transistor - https://en.wikipedia.org/wiki/William_Shockley - "shockley", - - // Françoise Barré-Sinoussi - French virologist and Nobel Prize Laureate in Physiology or Medicine; her work was fundamental in identifying HIV as the cause of AIDS. https://en.wikipedia.org/wiki/Fran%C3%A7oise_Barr%C3%A9-Sinoussi - "sinoussi", - - // Betty Snyder - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Betty_Holberton - "snyder", - - // Frances Spence - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Frances_Spence - "spence", - - // Richard Matthew Stallman - the founder of the Free Software movement, the GNU project, the Free Software Foundation, and the League for Programming Freedom. He also invented the concept of copyleft to protect the ideals of this movement, and enshrined this concept in the widely-used GPL (General Public License) for software. https://en.wikiquote.org/wiki/Richard_Stallman - "stallman", - - // Michael Stonebraker is a database research pioneer and architect of Ingres, Postgres, VoltDB and SciDB. Winner of 2014 ACM Turing Award. https://en.wikipedia.org/wiki/Michael_Stonebraker - "stonebraker", - - // Janese Swanson (with others) developed the first of the Carmen Sandiego games. She went on to found Girl Tech. https://en.wikipedia.org/wiki/Janese_Swanson - "swanson", - - // Aaron Swartz was influential in creating RSS, Markdown, Creative Commons, Reddit, and much of the internet as we know it today. He was devoted to freedom of information on the web. https://en.wikiquote.org/wiki/Aaron_Swartz - "swartz", - - // Bertha Swirles was a theoretical physicist who made a number of contributions to early quantum theory. https://en.wikipedia.org/wiki/Bertha_Swirles - "swirles", - - // Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla - "tesla", - - // Ken Thompson - co-creator of UNIX and the C programming language - https://en.wikipedia.org/wiki/Ken_Thompson - "thompson", - - // Linus Torvalds invented Linux and Git. https://en.wikipedia.org/wiki/Linus_Torvalds - "torvalds", - - // Alan Turing was a founding father of computer science. https://en.wikipedia.org/wiki/Alan_Turing. - "turing", - - // Varahamihira - Ancient Indian mathematician who discovered trigonometric formulae during 505-587 CE - https://en.wikipedia.org/wiki/Var%C4%81hamihira#Contributions - "varahamihira", - - // Sir Mokshagundam Visvesvaraya - is a notable Indian engineer. He is a recipient of the Indian Republic's highest honour, the Bharat Ratna, in 1955. On his birthday, 15 September is celebrated as Engineer's Day in India in his memory - https://en.wikipedia.org/wiki/Visvesvaraya - "visvesvaraya", - - // Christiane Nüsslein-Volhard - German biologist, won Nobel Prize in Physiology or Medicine in 1995 for research on the genetic control of embryonic development. https://en.wikipedia.org/wiki/Christiane_N%C3%BCsslein-Volhard - "volhard", - - // Marlyn Wescoff - one of the original programmers of the ENIAC. https://en.wikipedia.org/wiki/ENIAC - https://en.wikipedia.org/wiki/Marlyn_Meltzer - "wescoff", - - // Roberta Williams, did pioneering work in graphical adventure games for personal computers, particularly the King's Quest series. https://en.wikipedia.org/wiki/Roberta_Williams - "williams", - - // Sophie Wilson designed the first Acorn Micro-Computer and the instruction set for ARM processors. https://en.wikipedia.org/wiki/Sophie_Wilson - "wilson", - - // Jeannette Wing - co-developed the Liskov substitution principle. - https://en.wikipedia.org/wiki/Jeannette_Wing - "wing", - - // Steve Wozniak invented the Apple I and Apple II. https://en.wikipedia.org/wiki/Steve_Wozniak - "wozniak", - - // The Wright brothers, Orville and Wilbur - credited with inventing and building the world's first successful airplane and making the first controlled, powered and sustained heavier-than-air human flight - https://en.wikipedia.org/wiki/Wright_brothers - "wright", - - // Rosalyn Sussman Yalow - Rosalyn Sussman Yalow was an American medical physicist, and a co-winner of the 1977 Nobel Prize in Physiology or Medicine for development of the radioimmunoassay technique. https://en.wikipedia.org/wiki/Rosalyn_Sussman_Yalow - "yalow", - - // Ada Yonath - an Israeli crystallographer, the first woman from the Middle East to win a Nobel prize in the sciences. https://en.wikipedia.org/wiki/Ada_Yonath - "yonath", - } -) - -// GetRandomName generates a random name from the list of adjectives and surnames in this package -// formatted as "adjective_surname". For example 'focused_turing'. If retry is non-zero, a random -// integer between 0 and 10 will be added to the end of the name, e.g `focused_turing3` -func GetRandomName(retry int) string { - rnd := random.Rand -begin: - name := fmt.Sprintf("%s_%s", left[rnd.Intn(len(left))], right[rnd.Intn(len(right))]) - if name == "boring_wozniak" /* Steve Wozniak is not boring */ { - goto begin - } - - if retry > 0 { - name = fmt.Sprintf("%s%d", name, rnd.Intn(10)) - } - return name -} diff --git a/pkg/namesgenerator/names-generator_test.go b/pkg/namesgenerator/names-generator_test.go deleted file mode 100644 index d1a94977d7..0000000000 --- a/pkg/namesgenerator/names-generator_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package namesgenerator - -import ( - "strings" - "testing" -) - -func TestNameFormat(t *testing.T) { - name := GetRandomName(0) - if !strings.Contains(name, "_") { - t.Fatalf("Generated name does not contain an underscore") - } - if strings.ContainsAny(name, "0123456789") { - t.Fatalf("Generated name contains numbers!") - } -} - -func TestNameRetries(t *testing.T) { - name := GetRandomName(1) - if !strings.Contains(name, "_") { - t.Fatalf("Generated name does not contain an underscore") - } - if !strings.ContainsAny(name, "0123456789") { - t.Fatalf("Generated name doesn't contain a number") - } - -} diff --git a/pkg/parsers/kernel/kernel.go b/pkg/parsers/kernel/kernel.go deleted file mode 100644 index 7738fc7411..0000000000 --- a/pkg/parsers/kernel/kernel.go +++ /dev/null @@ -1,74 +0,0 @@ -// +build !windows - -// Package kernel provides helper function to get, parse and compare kernel -// versions for different platforms. -package kernel - -import ( - "errors" - "fmt" -) - -// VersionInfo holds information about the kernel. -type VersionInfo struct { - Kernel int // Version of the kernel (e.g. 4.1.2-generic -> 4) - Major int // Major part of the kernel version (e.g. 4.1.2-generic -> 1) - Minor int // Minor part of the kernel version (e.g. 4.1.2-generic -> 2) - Flavor string // Flavor of the kernel version (e.g. 4.1.2-generic -> generic) -} - -func (k *VersionInfo) String() string { - return fmt.Sprintf("%d.%d.%d%s", k.Kernel, k.Major, k.Minor, k.Flavor) -} - -// CompareKernelVersion compares two kernel.VersionInfo structs. -// Returns -1 if a < b, 0 if a == b, 1 it a > b -func CompareKernelVersion(a, b VersionInfo) int { - if a.Kernel < b.Kernel { - return -1 - } else if a.Kernel > b.Kernel { - return 1 - } - - if a.Major < b.Major { - return -1 - } else if a.Major > b.Major { - return 1 - } - - if a.Minor < b.Minor { - return -1 - } else if a.Minor > b.Minor { - return 1 - } - - return 0 -} - -// ParseRelease parses a string and creates a VersionInfo based on it. -func ParseRelease(release string) (*VersionInfo, error) { - var ( - kernel, major, minor, parsed int - flavor, partial string - ) - - // Ignore error from Sscanf to allow an empty flavor. Instead, just - // make sure we got all the version numbers. - parsed, _ = fmt.Sscanf(release, "%d.%d%s", &kernel, &major, &partial) - if parsed < 2 { - return nil, errors.New("Can't parse kernel version " + release) - } - - // sometimes we have 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64 - parsed, _ = fmt.Sscanf(partial, ".%d%s", &minor, &flavor) - if parsed < 1 { - flavor = partial - } - - return &VersionInfo{ - Kernel: kernel, - Major: major, - Minor: minor, - Flavor: flavor, - }, nil -} diff --git a/pkg/parsers/kernel/kernel_darwin.go b/pkg/parsers/kernel/kernel_darwin.go deleted file mode 100644 index 71f205b285..0000000000 --- a/pkg/parsers/kernel/kernel_darwin.go +++ /dev/null @@ -1,56 +0,0 @@ -// +build darwin - -// Package kernel provides helper function to get, parse and compare kernel -// versions for different platforms. -package kernel - -import ( - "fmt" - "os/exec" - "strings" - - "github.com/mattn/go-shellwords" -) - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - release, err := getRelease() - if err != nil { - return nil, err - } - - return ParseRelease(release) -} - -// getRelease uses `system_profiler SPSoftwareDataType` to get OSX kernel version -func getRelease() (string, error) { - cmd := exec.Command("system_profiler", "SPSoftwareDataType") - osName, err := cmd.Output() - if err != nil { - return "", err - } - - var release string - data := strings.Split(string(osName), "\n") - for _, line := range data { - if strings.Contains(line, "Kernel Version") { - // It has the format like ' Kernel Version: Darwin 14.5.0' - content := strings.SplitN(line, ":", 2) - if len(content) != 2 { - return "", fmt.Errorf("Kernel Version is invalid") - } - - prettyNames, err := shellwords.Parse(content[1]) - if err != nil { - return "", fmt.Errorf("Kernel Version is invalid: %s", err.Error()) - } - - if len(prettyNames) != 2 { - return "", fmt.Errorf("Kernel Version needs to be 'Darwin x.x.x' ") - } - release = prettyNames[1] - } - } - - return release, nil -} diff --git a/pkg/parsers/kernel/kernel_unix.go b/pkg/parsers/kernel/kernel_unix.go deleted file mode 100644 index 54a89d28c6..0000000000 --- a/pkg/parsers/kernel/kernel_unix.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build linux freebsd solaris - -// Package kernel provides helper function to get, parse and compare kernel -// versions for different platforms. -package kernel - -import ( - "bytes" -) - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - uts, err := uname() - if err != nil { - return nil, err - } - - release := make([]byte, len(uts.Release)) - - i := 0 - for _, c := range uts.Release { - release[i] = byte(c) - i++ - } - - // Remove the \x00 from the release for Atoi to parse correctly - release = release[:bytes.IndexByte(release, 0)] - - return ParseRelease(string(release)) -} diff --git a/pkg/parsers/kernel/kernel_unix_test.go b/pkg/parsers/kernel/kernel_unix_test.go deleted file mode 100644 index dc8c0e307b..0000000000 --- a/pkg/parsers/kernel/kernel_unix_test.go +++ /dev/null @@ -1,96 +0,0 @@ -// +build !windows - -package kernel - -import ( - "fmt" - "testing" -) - -func assertParseRelease(t *testing.T, release string, b *VersionInfo, result int) { - var ( - a *VersionInfo - ) - a, _ = ParseRelease(release) - - if r := CompareKernelVersion(*a, *b); r != result { - t.Fatalf("Unexpected kernel version comparison result for (%v,%v). Found %d, expected %d", release, b, r, result) - } - if a.Flavor != b.Flavor { - t.Fatalf("Unexpected parsed kernel flavor. Found %s, expected %s", a.Flavor, b.Flavor) - } -} - -// TestParseRelease tests the ParseRelease() function -func TestParseRelease(t *testing.T) { - assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 3, Major: 8, Minor: 0}, 0) - assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) - assertParseRelease(t, "3.4.54.longterm-1", &VersionInfo{Kernel: 3, Major: 4, Minor: 54, Flavor: ".longterm-1"}, 0) - assertParseRelease(t, "3.8.0-19-generic", &VersionInfo{Kernel: 3, Major: 8, Minor: 0, Flavor: "-19-generic"}, 0) - assertParseRelease(t, "3.12.8tag", &VersionInfo{Kernel: 3, Major: 12, Minor: 8, Flavor: "tag"}, 0) - assertParseRelease(t, "3.12-1-amd64", &VersionInfo{Kernel: 3, Major: 12, Minor: 0, Flavor: "-1-amd64"}, 0) - assertParseRelease(t, "3.8.0", &VersionInfo{Kernel: 4, Major: 8, Minor: 0}, -1) - // Errors - invalids := []string{ - "3", - "a", - "a.a", - "a.a.a-a", - } - for _, invalid := range invalids { - expectedMessage := fmt.Sprintf("Can't parse kernel version %v", invalid) - if _, err := ParseRelease(invalid); err == nil || err.Error() != expectedMessage { - - } - } -} - -func assertKernelVersion(t *testing.T, a, b VersionInfo, result int) { - if r := CompareKernelVersion(a, b); r != result { - t.Fatalf("Unexpected kernel version comparison result. Found %d, expected %d", r, result) - } -} - -// TestCompareKernelVersion tests the CompareKernelVersion() function -func TestCompareKernelVersion(t *testing.T) { - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - 0) - assertKernelVersion(t, - VersionInfo{Kernel: 2, Major: 6, Minor: 0}, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - -1) - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - VersionInfo{Kernel: 2, Major: 6, Minor: 0}, - 1) - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - 0) - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 8, Minor: 5}, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - 1) - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 0, Minor: 20}, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - -1) - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 7, Minor: 20}, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - -1) - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 8, Minor: 20}, - VersionInfo{Kernel: 3, Major: 7, Minor: 0}, - 1) - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 8, Minor: 20}, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - 1) - assertKernelVersion(t, - VersionInfo{Kernel: 3, Major: 8, Minor: 0}, - VersionInfo{Kernel: 3, Major: 8, Minor: 20}, - -1) -} diff --git a/pkg/parsers/kernel/kernel_windows.go b/pkg/parsers/kernel/kernel_windows.go deleted file mode 100644 index 80fab8ff64..0000000000 --- a/pkg/parsers/kernel/kernel_windows.go +++ /dev/null @@ -1,69 +0,0 @@ -// +build windows - -package kernel - -import ( - "fmt" - "syscall" - "unsafe" -) - -// VersionInfo holds information about the kernel. -type VersionInfo struct { - kvi string // Version of the kernel (e.g. 6.1.7601.17592 -> 6) - major int // Major part of the kernel version (e.g. 6.1.7601.17592 -> 1) - minor int // Minor part of the kernel version (e.g. 6.1.7601.17592 -> 7601) - build int // Build number of the kernel version (e.g. 6.1.7601.17592 -> 17592) -} - -func (k *VersionInfo) String() string { - return fmt.Sprintf("%d.%d %d (%s)", k.major, k.minor, k.build, k.kvi) -} - -// GetKernelVersion gets the current kernel version. -func GetKernelVersion() (*VersionInfo, error) { - - var ( - h syscall.Handle - dwVersion uint32 - err error - ) - - KVI := &VersionInfo{"Unknown", 0, 0, 0} - - if err = syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, - syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), - 0, - syscall.KEY_READ, - &h); err != nil { - return KVI, err - } - defer syscall.RegCloseKey(h) - - var buf [1 << 10]uint16 - var typ uint32 - n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 - - if err = syscall.RegQueryValueEx(h, - syscall.StringToUTF16Ptr("BuildLabEx"), - nil, - &typ, - (*byte)(unsafe.Pointer(&buf[0])), - &n); err != nil { - return KVI, err - } - - KVI.kvi = syscall.UTF16ToString(buf[:]) - - // Important - docker.exe MUST be manifested for this API to return - // the correct information. - if dwVersion, err = syscall.GetVersion(); err != nil { - return KVI, err - } - - KVI.major = int(dwVersion & 0xFF) - KVI.minor = int((dwVersion & 0XFF00) >> 8) - KVI.build = int((dwVersion & 0xFFFF0000) >> 16) - - return KVI, nil -} diff --git a/pkg/parsers/kernel/uname_linux.go b/pkg/parsers/kernel/uname_linux.go deleted file mode 100644 index bb9b32641e..0000000000 --- a/pkg/parsers/kernel/uname_linux.go +++ /dev/null @@ -1,19 +0,0 @@ -package kernel - -import ( - "syscall" -) - -// Utsname represents the system name structure. -// It is passthrough for syscall.Utsname in order to make it portable with -// other platforms where it is not available. -type Utsname syscall.Utsname - -func uname() (*syscall.Utsname, error) { - uts := &syscall.Utsname{} - - if err := syscall.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/pkg/parsers/kernel/uname_solaris.go b/pkg/parsers/kernel/uname_solaris.go deleted file mode 100644 index 49370bd3dd..0000000000 --- a/pkg/parsers/kernel/uname_solaris.go +++ /dev/null @@ -1,14 +0,0 @@ -package kernel - -import ( - "golang.org/x/sys/unix" -) - -func uname() (*unix.Utsname, error) { - uts := &unix.Utsname{} - - if err := unix.Uname(uts); err != nil { - return nil, err - } - return uts, nil -} diff --git a/pkg/parsers/kernel/uname_unsupported.go b/pkg/parsers/kernel/uname_unsupported.go deleted file mode 100644 index 1da3f239fa..0000000000 --- a/pkg/parsers/kernel/uname_unsupported.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !linux,!solaris - -package kernel - -import ( - "errors" -) - -// Utsname represents the system name structure. -// It is defined here to make it portable as it is available on linux but not -// on windows. -type Utsname struct { - Release [65]byte -} - -func uname() (*Utsname, error) { - return nil, errors.New("Kernel version detection is available only on linux") -} diff --git a/pkg/parsers/operatingsystem/operatingsystem_linux.go b/pkg/parsers/operatingsystem/operatingsystem_linux.go deleted file mode 100644 index e04a3499af..0000000000 --- a/pkg/parsers/operatingsystem/operatingsystem_linux.go +++ /dev/null @@ -1,77 +0,0 @@ -// Package operatingsystem provides helper function to get the operating system -// name for different platforms. -package operatingsystem - -import ( - "bufio" - "bytes" - "fmt" - "io/ioutil" - "os" - "strings" - - "github.com/mattn/go-shellwords" -) - -var ( - // file to use to detect if the daemon is running in a container - proc1Cgroup = "/proc/1/cgroup" - - // file to check to determine Operating System - etcOsRelease = "/etc/os-release" - - // used by stateless systems like Clear Linux - altOsRelease = "/usr/lib/os-release" -) - -// GetOperatingSystem gets the name of the current operating system. -func GetOperatingSystem() (string, error) { - osReleaseFile, err := os.Open(etcOsRelease) - if err != nil { - if !os.IsNotExist(err) { - return "", fmt.Errorf("Error opening %s: %v", etcOsRelease, err) - } - osReleaseFile, err = os.Open(altOsRelease) - if err != nil { - return "", fmt.Errorf("Error opening %s: %v", altOsRelease, err) - } - } - defer osReleaseFile.Close() - - var prettyName string - scanner := bufio.NewScanner(osReleaseFile) - for scanner.Scan() { - line := scanner.Text() - if strings.HasPrefix(line, "PRETTY_NAME=") { - data := strings.SplitN(line, "=", 2) - prettyNames, err := shellwords.Parse(data[1]) - if err != nil { - return "", fmt.Errorf("PRETTY_NAME is invalid: %s", err.Error()) - } - if len(prettyNames) != 1 { - return "", fmt.Errorf("PRETTY_NAME needs to be enclosed by quotes if they have spaces: %s", data[1]) - } - prettyName = prettyNames[0] - } - } - if prettyName != "" { - return prettyName, nil - } - // If not set, defaults to PRETTY_NAME="Linux" - // c.f. http://www.freedesktop.org/software/systemd/man/os-release.html - return "Linux", nil -} - -// IsContainerized returns true if we are running inside a container. -func IsContainerized() (bool, error) { - b, err := ioutil.ReadFile(proc1Cgroup) - if err != nil { - return false, err - } - for _, line := range bytes.Split(b, []byte{'\n'}) { - if len(line) > 0 && !bytes.HasSuffix(line, []byte{'/'}) && !bytes.HasSuffix(line, []byte("init.scope")) { - return true, nil - } - } - return false, nil -} diff --git a/pkg/parsers/operatingsystem/operatingsystem_solaris.go b/pkg/parsers/operatingsystem/operatingsystem_solaris.go deleted file mode 100644 index d08ad14860..0000000000 --- a/pkg/parsers/operatingsystem/operatingsystem_solaris.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build solaris,cgo - -package operatingsystem - -/* -#include -*/ -import "C" - -import ( - "bytes" - "errors" - "io/ioutil" -) - -var etcOsRelease = "/etc/release" - -// GetOperatingSystem gets the name of the current operating system. -func GetOperatingSystem() (string, error) { - b, err := ioutil.ReadFile(etcOsRelease) - if err != nil { - return "", err - } - if i := bytes.Index(b, []byte("\n")); i >= 0 { - b = bytes.Trim(b[:i], " ") - return string(b), nil - } - return "", errors.New("release not found") -} - -// IsContainerized returns true if we are running inside a container. -func IsContainerized() (bool, error) { - if C.getzoneid() != 0 { - return true, nil - } - return false, nil -} diff --git a/pkg/parsers/operatingsystem/operatingsystem_unix.go b/pkg/parsers/operatingsystem/operatingsystem_unix.go deleted file mode 100644 index bc91c3c533..0000000000 --- a/pkg/parsers/operatingsystem/operatingsystem_unix.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build freebsd darwin - -package operatingsystem - -import ( - "errors" - "os/exec" -) - -// GetOperatingSystem gets the name of the current operating system. -func GetOperatingSystem() (string, error) { - cmd := exec.Command("uname", "-s") - osName, err := cmd.Output() - if err != nil { - return "", err - } - return string(osName), nil -} - -// IsContainerized returns true if we are running inside a container. -// No-op on FreeBSD and Darwin, always returns false. -func IsContainerized() (bool, error) { - // TODO: Implement jail detection for freeBSD - return false, errors.New("Cannot detect if we are in container") -} diff --git a/pkg/parsers/operatingsystem/operatingsystem_unix_test.go b/pkg/parsers/operatingsystem/operatingsystem_unix_test.go deleted file mode 100644 index e7120c65c4..0000000000 --- a/pkg/parsers/operatingsystem/operatingsystem_unix_test.go +++ /dev/null @@ -1,247 +0,0 @@ -// +build linux freebsd - -package operatingsystem - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestGetOperatingSystem(t *testing.T) { - var backup = etcOsRelease - - invalids := []struct { - content string - errorExpected string - }{ - { - `PRETTY_NAME=Source Mage GNU/Linux -PRETTY_NAME=Ubuntu 14.04.LTS`, - "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Source Mage GNU/Linux", - }, - { - `PRETTY_NAME="Ubuntu Linux -PRETTY_NAME=Ubuntu 14.04.LTS`, - "PRETTY_NAME is invalid: invalid command line string", - }, - { - `PRETTY_NAME=Ubuntu' -PRETTY_NAME=Ubuntu 14.04.LTS`, - "PRETTY_NAME is invalid: invalid command line string", - }, - { - `PRETTY_NAME' -PRETTY_NAME=Ubuntu 14.04.LTS`, - "PRETTY_NAME needs to be enclosed by quotes if they have spaces: Ubuntu 14.04.LTS", - }, - } - - valids := []struct { - content string - expected string - }{ - { - `NAME="Ubuntu" -PRETTY_NAME_AGAIN="Ubuntu 14.04.LTS" -VERSION="14.04, Trusty Tahr" -ID=ubuntu -ID_LIKE=debian -VERSION_ID="14.04" -HOME_URL="http://www.ubuntu.com/" -SUPPORT_URL="http://help.ubuntu.com/" -BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, - "Linux", - }, - { - `NAME="Ubuntu" -VERSION="14.04, Trusty Tahr" -ID=ubuntu -ID_LIKE=debian -VERSION_ID="14.04" -HOME_URL="http://www.ubuntu.com/" -SUPPORT_URL="http://help.ubuntu.com/" -BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, - "Linux", - }, - { - `NAME=Gentoo -ID=gentoo -PRETTY_NAME="Gentoo/Linux" -ANSI_COLOR="1;32" -HOME_URL="http://www.gentoo.org/" -SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" -BUG_REPORT_URL="https://bugs.gentoo.org/" -`, - "Gentoo/Linux", - }, - { - `NAME="Ubuntu" -VERSION="14.04, Trusty Tahr" -ID=ubuntu -ID_LIKE=debian -PRETTY_NAME="Ubuntu 14.04 LTS" -VERSION_ID="14.04" -HOME_URL="http://www.ubuntu.com/" -SUPPORT_URL="http://help.ubuntu.com/" -BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"`, - "Ubuntu 14.04 LTS", - }, - { - `NAME="Ubuntu" -VERSION="14.04, Trusty Tahr" -ID=ubuntu -ID_LIKE=debian -PRETTY_NAME='Ubuntu 14.04 LTS'`, - "Ubuntu 14.04 LTS", - }, - { - `PRETTY_NAME=Source -NAME="Source Mage"`, - "Source", - }, - { - `PRETTY_NAME=Source -PRETTY_NAME="Source Mage"`, - "Source Mage", - }, - } - - dir := os.TempDir() - etcOsRelease = filepath.Join(dir, "etcOsRelease") - - defer func() { - os.Remove(etcOsRelease) - etcOsRelease = backup - }() - - for _, elt := range invalids { - if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { - t.Fatalf("failed to write to %s: %v", etcOsRelease, err) - } - s, err := GetOperatingSystem() - if err == nil || err.Error() != elt.errorExpected { - t.Fatalf("Expected an error %q, got %q (err: %v)", elt.errorExpected, s, err) - } - } - - for _, elt := range valids { - if err := ioutil.WriteFile(etcOsRelease, []byte(elt.content), 0600); err != nil { - t.Fatalf("failed to write to %s: %v", etcOsRelease, err) - } - s, err := GetOperatingSystem() - if err != nil || s != elt.expected { - t.Fatalf("Expected %q, got %q (err: %v)", elt.expected, s, err) - } - } -} - -func TestIsContainerized(t *testing.T) { - var ( - backup = proc1Cgroup - nonContainerizedProc1Cgroupsystemd226 = []byte(`9:memory:/init.scope -8:net_cls,net_prio:/ -7:cpuset:/ -6:freezer:/ -5:devices:/init.scope -4:blkio:/init.scope -3:cpu,cpuacct:/init.scope -2:perf_event:/ -1:name=systemd:/init.scope -`) - nonContainerizedProc1Cgroup = []byte(`14:name=systemd:/ -13:hugetlb:/ -12:net_prio:/ -11:perf_event:/ -10:bfqio:/ -9:blkio:/ -8:net_cls:/ -7:freezer:/ -6:devices:/ -5:memory:/ -4:cpuacct:/ -3:cpu:/ -2:cpuset:/ -`) - containerizedProc1Cgroup = []byte(`9:perf_event:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -8:blkio:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -7:net_cls:/ -6:freezer:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -5:devices:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -4:memory:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -3:cpuacct:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -2:cpu:/docker/3cef1b53c50b0fa357d994f8a1a8cd783c76bbf4f5dd08b226e38a8bd331338d -1:cpuset:/`) - ) - - dir := os.TempDir() - proc1Cgroup = filepath.Join(dir, "proc1Cgroup") - - defer func() { - os.Remove(proc1Cgroup) - proc1Cgroup = backup - }() - - if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroup, 0600); err != nil { - t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) - } - inContainer, err := IsContainerized() - if err != nil { - t.Fatal(err) - } - if inContainer { - t.Fatal("Wrongly assuming containerized") - } - - if err := ioutil.WriteFile(proc1Cgroup, nonContainerizedProc1Cgroupsystemd226, 0600); err != nil { - t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) - } - inContainer, err = IsContainerized() - if err != nil { - t.Fatal(err) - } - if inContainer { - t.Fatal("Wrongly assuming containerized for systemd /init.scope cgroup layout") - } - - if err := ioutil.WriteFile(proc1Cgroup, containerizedProc1Cgroup, 0600); err != nil { - t.Fatalf("failed to write to %s: %v", proc1Cgroup, err) - } - inContainer, err = IsContainerized() - if err != nil { - t.Fatal(err) - } - if !inContainer { - t.Fatal("Wrongly assuming non-containerized") - } -} - -func TestOsReleaseFallback(t *testing.T) { - var backup = etcOsRelease - var altBackup = altOsRelease - dir := os.TempDir() - etcOsRelease = filepath.Join(dir, "etcOsRelease") - altOsRelease = filepath.Join(dir, "altOsRelease") - - defer func() { - os.Remove(dir) - etcOsRelease = backup - altOsRelease = altBackup - }() - content := `NAME=Gentoo -ID=gentoo -PRETTY_NAME="Gentoo/Linux" -ANSI_COLOR="1;32" -HOME_URL="http://www.gentoo.org/" -SUPPORT_URL="http://www.gentoo.org/main/en/support.xml" -BUG_REPORT_URL="https://bugs.gentoo.org/" -` - if err := ioutil.WriteFile(altOsRelease, []byte(content), 0600); err != nil { - t.Fatalf("failed to write to %s: %v", etcOsRelease, err) - } - s, err := GetOperatingSystem() - if err != nil || s != "Gentoo/Linux" { - t.Fatalf("Expected %q, got %q (err: %v)", "Gentoo/Linux", s, err) - } -} diff --git a/pkg/parsers/operatingsystem/operatingsystem_windows.go b/pkg/parsers/operatingsystem/operatingsystem_windows.go deleted file mode 100644 index 3c86b6af9c..0000000000 --- a/pkg/parsers/operatingsystem/operatingsystem_windows.go +++ /dev/null @@ -1,49 +0,0 @@ -package operatingsystem - -import ( - "syscall" - "unsafe" -) - -// See https://code.google.com/p/go/source/browse/src/pkg/mime/type_windows.go?r=d14520ac25bf6940785aabb71f5be453a286f58c -// for a similar sample - -// GetOperatingSystem gets the name of the current operating system. -func GetOperatingSystem() (string, error) { - - var h syscall.Handle - - // Default return value - ret := "Unknown Operating System" - - if err := syscall.RegOpenKeyEx(syscall.HKEY_LOCAL_MACHINE, - syscall.StringToUTF16Ptr(`SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\`), - 0, - syscall.KEY_READ, - &h); err != nil { - return ret, err - } - defer syscall.RegCloseKey(h) - - var buf [1 << 10]uint16 - var typ uint32 - n := uint32(len(buf) * 2) // api expects array of bytes, not uint16 - - if err := syscall.RegQueryValueEx(h, - syscall.StringToUTF16Ptr("ProductName"), - nil, - &typ, - (*byte)(unsafe.Pointer(&buf[0])), - &n); err != nil { - return ret, err - } - ret = syscall.UTF16ToString(buf[:]) - - return ret, nil -} - -// IsContainerized returns true if we are running inside a container. -// No-op on Windows, always returns false. -func IsContainerized() (bool, error) { - return false, nil -} diff --git a/pkg/parsers/parsers.go b/pkg/parsers/parsers.go deleted file mode 100644 index acc897168f..0000000000 --- a/pkg/parsers/parsers.go +++ /dev/null @@ -1,69 +0,0 @@ -// Package parsers provides helper functions to parse and validate different type -// of string. It can be hosts, unix addresses, tcp addresses, filters, kernel -// operating system versions. -package parsers - -import ( - "fmt" - "strconv" - "strings" -) - -// ParseKeyValueOpt parses and validates the specified string as a key/value pair (key=value) -func ParseKeyValueOpt(opt string) (string, string, error) { - parts := strings.SplitN(opt, "=", 2) - if len(parts) != 2 { - return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt) - } - return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil -} - -// ParseUintList parses and validates the specified string as the value -// found in some cgroup file (e.g. `cpuset.cpus`, `cpuset.mems`), which could be -// one of the formats below. Note that duplicates are actually allowed in the -// input string. It returns a `map[int]bool` with available elements from `val` -// set to `true`. -// Supported formats: -// 7 -// 1-6 -// 0,3-4,7,8-10 -// 0-0,0,1-7 -// 03,1-3 <- this is gonna get parsed as [1,2,3] -// 3,2,1 -// 0-2,3,1 -func ParseUintList(val string) (map[int]bool, error) { - if val == "" { - return map[int]bool{}, nil - } - - availableInts := make(map[int]bool) - split := strings.Split(val, ",") - errInvalidFormat := fmt.Errorf("invalid format: %s", val) - - for _, r := range split { - if !strings.Contains(r, "-") { - v, err := strconv.Atoi(r) - if err != nil { - return nil, errInvalidFormat - } - availableInts[v] = true - } else { - split := strings.SplitN(r, "-", 2) - min, err := strconv.Atoi(split[0]) - if err != nil { - return nil, errInvalidFormat - } - max, err := strconv.Atoi(split[1]) - if err != nil { - return nil, errInvalidFormat - } - if max < min { - return nil, errInvalidFormat - } - for i := min; i <= max; i++ { - availableInts[i] = true - } - } - } - return availableInts, nil -} diff --git a/pkg/parsers/parsers_test.go b/pkg/parsers/parsers_test.go deleted file mode 100644 index 7f19e90279..0000000000 --- a/pkg/parsers/parsers_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package parsers - -import ( - "reflect" - "testing" -) - -func TestParseKeyValueOpt(t *testing.T) { - invalids := map[string]string{ - "": "Unable to parse key/value option: ", - "key": "Unable to parse key/value option: key", - } - for invalid, expectedError := range invalids { - if _, _, err := ParseKeyValueOpt(invalid); err == nil || err.Error() != expectedError { - t.Fatalf("Expected error %v for %v, got %v", expectedError, invalid, err) - } - } - valids := map[string][]string{ - "key=value": {"key", "value"}, - " key = value ": {"key", "value"}, - "key=value1=value2": {"key", "value1=value2"}, - " key = value1 = value2 ": {"key", "value1 = value2"}, - } - for valid, expectedKeyValue := range valids { - key, value, err := ParseKeyValueOpt(valid) - if err != nil { - t.Fatal(err) - } - if key != expectedKeyValue[0] || value != expectedKeyValue[1] { - t.Fatalf("Expected {%v: %v} got {%v: %v}", expectedKeyValue[0], expectedKeyValue[1], key, value) - } - } -} - -func TestParseUintList(t *testing.T) { - valids := map[string]map[int]bool{ - "": {}, - "7": {7: true}, - "1-6": {1: true, 2: true, 3: true, 4: true, 5: true, 6: true}, - "0-7": {0: true, 1: true, 2: true, 3: true, 4: true, 5: true, 6: true, 7: true}, - "0,3-4,7,8-10": {0: true, 3: true, 4: true, 7: true, 8: true, 9: true, 10: true}, - "0-0,0,1-4": {0: true, 1: true, 2: true, 3: true, 4: true}, - "03,1-3": {1: true, 2: true, 3: true}, - "3,2,1": {1: true, 2: true, 3: true}, - "0-2,3,1": {0: true, 1: true, 2: true, 3: true}, - } - for k, v := range valids { - out, err := ParseUintList(k) - if err != nil { - t.Fatalf("Expected not to fail, got %v", err) - } - if !reflect.DeepEqual(out, v) { - t.Fatalf("Expected %v, got %v", v, out) - } - } - - invalids := []string{ - "this", - "1--", - "1-10,,10", - "10-1", - "-1", - "-1,0", - } - for _, v := range invalids { - if out, err := ParseUintList(v); err == nil { - t.Fatalf("Expected failure with %s but got %v", v, out) - } - } -} diff --git a/pkg/pidfile/pidfile.go b/pkg/pidfile/pidfile.go deleted file mode 100644 index e1ac6bee35..0000000000 --- a/pkg/pidfile/pidfile.go +++ /dev/null @@ -1,49 +0,0 @@ -// Package pidfile provides structure and helper functions to create and remove -// PID file. A PID file is usually a file used to store the process ID of a -// running process. -package pidfile - -import ( - "fmt" - "io/ioutil" - "os" - "strconv" - "strings" -) - -// PIDFile is a file used to store the process ID of a running process. -type PIDFile struct { - path string -} - -func checkPIDFileAlreadyExists(path string) error { - if pidByte, err := ioutil.ReadFile(path); err == nil { - pidString := strings.TrimSpace(string(pidByte)) - if pid, err := strconv.Atoi(pidString); err == nil { - if processExists(pid) { - return fmt.Errorf("pid file found, ensure docker is not running or delete %s", path) - } - } - } - return nil -} - -// New creates a PIDfile using the specified path. -func New(path string) (*PIDFile, error) { - if err := checkPIDFileAlreadyExists(path); err != nil { - return nil, err - } - if err := ioutil.WriteFile(path, []byte(fmt.Sprintf("%d", os.Getpid())), 0644); err != nil { - return nil, err - } - - return &PIDFile{path: path}, nil -} - -// Remove removes the PIDFile. -func (file PIDFile) Remove() error { - if err := os.Remove(file.path); err != nil { - return err - } - return nil -} diff --git a/pkg/pidfile/pidfile_test.go b/pkg/pidfile/pidfile_test.go deleted file mode 100644 index 73e8af76db..0000000000 --- a/pkg/pidfile/pidfile_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package pidfile - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func TestNewAndRemove(t *testing.T) { - dir, err := ioutil.TempDir(os.TempDir(), "test-pidfile") - if err != nil { - t.Fatal("Could not create test directory") - } - - path := filepath.Join(dir, "testfile") - file, err := New(path) - if err != nil { - t.Fatal("Could not create test file", err) - } - - _, err = New(path) - if err == nil { - t.Fatal("Test file creation not blocked") - } - - if err := file.Remove(); err != nil { - t.Fatal("Could not delete created test file") - } -} - -func TestRemoveInvalidPath(t *testing.T) { - file := PIDFile{path: filepath.Join("foo", "bar")} - - if err := file.Remove(); err == nil { - t.Fatal("Non-existing file doesn't give an error on delete") - } -} diff --git a/pkg/pidfile/pidfile_unix.go b/pkg/pidfile/pidfile_unix.go deleted file mode 100644 index 28f3deca91..0000000000 --- a/pkg/pidfile/pidfile_unix.go +++ /dev/null @@ -1,16 +0,0 @@ -// +build !windows - -package pidfile - -import ( - "os" - "path/filepath" - "strconv" -) - -func processExists(pid int) bool { - if _, err := os.Stat(filepath.Join("/proc", strconv.Itoa(pid))); err == nil { - return true - } - return false -} diff --git a/pkg/pidfile/pidfile_windows.go b/pkg/pidfile/pidfile_windows.go deleted file mode 100644 index ae489c627a..0000000000 --- a/pkg/pidfile/pidfile_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -package pidfile - -import "syscall" - -const ( - processQueryLimitedInformation = 0x1000 - - stillActive = 259 -) - -func processExists(pid int) bool { - h, err := syscall.OpenProcess(processQueryLimitedInformation, false, uint32(pid)) - if err != nil { - return false - } - var c uint32 - err = syscall.GetExitCodeProcess(h, &c) - syscall.Close(h) - if err != nil { - return c == stillActive - } - return true -} diff --git a/pkg/platform/architecture_linux.go b/pkg/platform/architecture_linux.go deleted file mode 100644 index 2cdc2c5918..0000000000 --- a/pkg/platform/architecture_linux.go +++ /dev/null @@ -1,16 +0,0 @@ -// Package platform provides helper function to get the runtime architecture -// for different platforms. -package platform - -import ( - "syscall" -) - -// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) -func runtimeArchitecture() (string, error) { - utsname := &syscall.Utsname{} - if err := syscall.Uname(utsname); err != nil { - return "", err - } - return charsToString(utsname.Machine), nil -} diff --git a/pkg/platform/architecture_unix.go b/pkg/platform/architecture_unix.go deleted file mode 100644 index 45bbcf1535..0000000000 --- a/pkg/platform/architecture_unix.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build freebsd solaris darwin - -// Package platform provides helper function to get the runtime architecture -// for different platforms. -package platform - -import ( - "os/exec" - "strings" -) - -// runtimeArchitecture gets the name of the current architecture (x86, x86_64, i86pc, sun4v, ...) -func runtimeArchitecture() (string, error) { - cmd := exec.Command("/usr/bin/uname", "-m") - machine, err := cmd.Output() - if err != nil { - return "", err - } - return strings.TrimSpace(string(machine)), nil -} diff --git a/pkg/platform/architecture_windows.go b/pkg/platform/architecture_windows.go deleted file mode 100644 index 0dd8a2e416..0000000000 --- a/pkg/platform/architecture_windows.go +++ /dev/null @@ -1,52 +0,0 @@ -package platform - -import ( - "fmt" - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procGetSystemInfo = modkernel32.NewProc("GetSystemInfo") -) - -// see http://msdn.microsoft.com/en-us/library/windows/desktop/ms724958(v=vs.85).aspx -type systeminfo struct { - wProcessorArchitecture uint16 - wReserved uint16 - dwPageSize uint32 - lpMinimumApplicationAddress uintptr - lpMaximumApplicationAddress uintptr - dwActiveProcessorMask uintptr - dwNumberOfProcessors uint32 - dwProcessorType uint32 - dwAllocationGranularity uint32 - wProcessorLevel uint16 - wProcessorRevision uint16 -} - -// Constants -const ( - ProcessorArchitecture64 = 9 // PROCESSOR_ARCHITECTURE_AMD64 - ProcessorArchitectureIA64 = 6 // PROCESSOR_ARCHITECTURE_IA64 - ProcessorArchitecture32 = 0 // PROCESSOR_ARCHITECTURE_INTEL - ProcessorArchitectureArm = 5 // PROCESSOR_ARCHITECTURE_ARM -) - -var sysinfo systeminfo - -// runtimeArchitecture gets the name of the current architecture (x86, x86_64, …) -func runtimeArchitecture() (string, error) { - syscall.Syscall(procGetSystemInfo.Addr(), 1, uintptr(unsafe.Pointer(&sysinfo)), 0, 0) - switch sysinfo.wProcessorArchitecture { - case ProcessorArchitecture64, ProcessorArchitectureIA64: - return "x86_64", nil - case ProcessorArchitecture32: - return "i686", nil - case ProcessorArchitectureArm: - return "arm", nil - default: - return "", fmt.Errorf("Unknown processor architecture") - } -} diff --git a/pkg/platform/platform.go b/pkg/platform/platform.go deleted file mode 100644 index e4b03122f4..0000000000 --- a/pkg/platform/platform.go +++ /dev/null @@ -1,23 +0,0 @@ -package platform - -import ( - "runtime" - - "github.com/Sirupsen/logrus" -) - -var ( - // Architecture holds the runtime architecture of the process. - Architecture string - // OSType holds the runtime operating system type (Linux, …) of the process. - OSType string -) - -func init() { - var err error - Architecture, err = runtimeArchitecture() - if err != nil { - logrus.Errorf("Could not read system architecture info: %v", err) - } - OSType = runtime.GOOS -} diff --git a/pkg/platform/utsname_int8.go b/pkg/platform/utsname_int8.go deleted file mode 100644 index 5dcbadfdfe..0000000000 --- a/pkg/platform/utsname_int8.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build linux,386 linux,amd64 linux,arm64 -// see golang's sources src/syscall/ztypes_linux_*.go that use int8 - -package platform - -// Convert the OS/ARCH-specific utsname.Machine to string -// given as an array of signed int8 -func charsToString(ca [65]int8) string { - s := make([]byte, len(ca)) - var lens int - for ; lens < len(ca); lens++ { - if ca[lens] == 0 { - break - } - s[lens] = uint8(ca[lens]) - } - return string(s[0:lens]) -} diff --git a/pkg/platform/utsname_uint8.go b/pkg/platform/utsname_uint8.go deleted file mode 100644 index c9875cf6e6..0000000000 --- a/pkg/platform/utsname_uint8.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build linux,arm linux,ppc64 linux,ppc64le s390x -// see golang's sources src/syscall/ztypes_linux_*.go that use uint8 - -package platform - -// Convert the OS/ARCH-specific utsname.Machine to string -// given as an array of unsigned uint8 -func charsToString(ca [65]uint8) string { - s := make([]byte, len(ca)) - var lens int - for ; lens < len(ca); lens++ { - if ca[lens] == 0 { - break - } - s[lens] = ca[lens] - } - return string(s[0:lens]) -} diff --git a/pkg/plugins/client.go b/pkg/plugins/client.go deleted file mode 100644 index a778677f7c..0000000000 --- a/pkg/plugins/client.go +++ /dev/null @@ -1,188 +0,0 @@ -package plugins - -import ( - "bytes" - "encoding/json" - "io" - "io/ioutil" - "net/http" - "net/url" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/plugins/transport" - "github.com/docker/go-connections/sockets" - "github.com/docker/go-connections/tlsconfig" -) - -const ( - defaultTimeOut = 30 -) - -// NewClient creates a new plugin client (http). -func NewClient(addr string, tlsConfig *tlsconfig.Options) (*Client, error) { - tr := &http.Transport{} - - if tlsConfig != nil { - c, err := tlsconfig.Client(*tlsConfig) - if err != nil { - return nil, err - } - tr.TLSClientConfig = c - } - - u, err := url.Parse(addr) - if err != nil { - return nil, err - } - socket := u.Host - if socket == "" { - // valid local socket addresses have the host empty. - socket = u.Path - } - if err := sockets.ConfigureTransport(tr, u.Scheme, socket); err != nil { - return nil, err - } - scheme := httpScheme(u) - - clientTransport := transport.NewHTTPTransport(tr, scheme, socket) - return NewClientWithTransport(clientTransport), nil -} - -// NewClientWithTransport creates a new plugin client with a given transport. -func NewClientWithTransport(tr transport.Transport) *Client { - return &Client{ - http: &http.Client{ - Transport: tr, - }, - requestFactory: tr, - } -} - -// Client represents a plugin client. -type Client struct { - http *http.Client // http client to use - requestFactory transport.RequestFactory -} - -// Call calls the specified method with the specified arguments for the plugin. -// It will retry for 30 seconds if a failure occurs when calling. -func (c *Client) Call(serviceMethod string, args interface{}, ret interface{}) error { - var buf bytes.Buffer - if args != nil { - if err := json.NewEncoder(&buf).Encode(args); err != nil { - return err - } - } - body, err := c.callWithRetry(serviceMethod, &buf, true) - if err != nil { - return err - } - defer body.Close() - if ret != nil { - if err := json.NewDecoder(body).Decode(&ret); err != nil { - logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) - return err - } - } - return nil -} - -// Stream calls the specified method with the specified arguments for the plugin and returns the response body -func (c *Client) Stream(serviceMethod string, args interface{}) (io.ReadCloser, error) { - var buf bytes.Buffer - if err := json.NewEncoder(&buf).Encode(args); err != nil { - return nil, err - } - return c.callWithRetry(serviceMethod, &buf, true) -} - -// SendFile calls the specified method, and passes through the IO stream -func (c *Client) SendFile(serviceMethod string, data io.Reader, ret interface{}) error { - body, err := c.callWithRetry(serviceMethod, data, true) - if err != nil { - return err - } - defer body.Close() - if err := json.NewDecoder(body).Decode(&ret); err != nil { - logrus.Errorf("%s: error reading plugin resp: %v", serviceMethod, err) - return err - } - return nil -} - -func (c *Client) callWithRetry(serviceMethod string, data io.Reader, retry bool) (io.ReadCloser, error) { - req, err := c.requestFactory.NewRequest(serviceMethod, data) - if err != nil { - return nil, err - } - - var retries int - start := time.Now() - - for { - resp, err := c.http.Do(req) - if err != nil { - if !retry { - return nil, err - } - - timeOff := backoff(retries) - if abort(start, timeOff) { - return nil, err - } - retries++ - logrus.Warnf("Unable to connect to plugin: %s%s: %v, retrying in %v", req.URL.Host, req.URL.Path, err, timeOff) - time.Sleep(timeOff) - continue - } - - if resp.StatusCode != http.StatusOK { - b, err := ioutil.ReadAll(resp.Body) - resp.Body.Close() - if err != nil { - return nil, &statusError{resp.StatusCode, serviceMethod, err.Error()} - } - - // Plugins' Response(s) should have an Err field indicating what went - // wrong. Try to unmarshal into ResponseErr. Otherwise fallback to just - // return the string(body) - type responseErr struct { - Err string - } - remoteErr := responseErr{} - if err := json.Unmarshal(b, &remoteErr); err == nil { - if remoteErr.Err != "" { - return nil, &statusError{resp.StatusCode, serviceMethod, remoteErr.Err} - } - } - // old way... - return nil, &statusError{resp.StatusCode, serviceMethod, string(b)} - } - return resp.Body, nil - } -} - -func backoff(retries int) time.Duration { - b, max := 1, defaultTimeOut - for b < max && retries > 0 { - b *= 2 - retries-- - } - if b > max { - b = max - } - return time.Duration(b) * time.Second -} - -func abort(start time.Time, timeOff time.Duration) bool { - return timeOff+time.Since(start) >= time.Duration(defaultTimeOut)*time.Second -} - -func httpScheme(u *url.URL) string { - scheme := u.Scheme - if scheme != "https" { - scheme = "http" - } - return scheme -} diff --git a/pkg/plugins/client_test.go b/pkg/plugins/client_test.go deleted file mode 100644 index 9faad86a15..0000000000 --- a/pkg/plugins/client_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package plugins - -import ( - "io" - "net/http" - "net/http/httptest" - "net/url" - "reflect" - "testing" - "time" - - "github.com/docker/docker/pkg/plugins/transport" - "github.com/docker/go-connections/tlsconfig" -) - -var ( - mux *http.ServeMux - server *httptest.Server -) - -func setupRemotePluginServer() string { - mux = http.NewServeMux() - server = httptest.NewServer(mux) - return server.URL -} - -func teardownRemotePluginServer() { - if server != nil { - server.Close() - } -} - -func TestFailedConnection(t *testing.T) { - c, _ := NewClient("tcp://127.0.0.1:1", &tlsconfig.Options{InsecureSkipVerify: true}) - _, err := c.callWithRetry("Service.Method", nil, false) - if err == nil { - t.Fatal("Unexpected successful connection") - } -} - -func TestEchoInputOutput(t *testing.T) { - addr := setupRemotePluginServer() - defer teardownRemotePluginServer() - - m := Manifest{[]string{"VolumeDriver", "NetworkDriver"}} - - mux.HandleFunc("/Test.Echo", func(w http.ResponseWriter, r *http.Request) { - if r.Method != "POST" { - t.Fatalf("Expected POST, got %s\n", r.Method) - } - - header := w.Header() - header.Set("Content-Type", transport.VersionMimetype) - - io.Copy(w, r.Body) - }) - - c, _ := NewClient(addr, &tlsconfig.Options{InsecureSkipVerify: true}) - var output Manifest - err := c.Call("Test.Echo", m, &output) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(output, m) { - t.Fatalf("Expected %v, was %v\n", m, output) - } - err = c.Call("Test.Echo", nil, nil) - if err != nil { - t.Fatal(err) - } -} - -func TestBackoff(t *testing.T) { - cases := []struct { - retries int - expTimeOff time.Duration - }{ - {0, time.Duration(1)}, - {1, time.Duration(2)}, - {2, time.Duration(4)}, - {4, time.Duration(16)}, - {6, time.Duration(30)}, - {10, time.Duration(30)}, - } - - for _, c := range cases { - s := c.expTimeOff * time.Second - if d := backoff(c.retries); d != s { - t.Fatalf("Retry %v, expected %v, was %v\n", c.retries, s, d) - } - } -} - -func TestAbortRetry(t *testing.T) { - cases := []struct { - timeOff time.Duration - expAbort bool - }{ - {time.Duration(1), false}, - {time.Duration(2), false}, - {time.Duration(10), false}, - {time.Duration(30), true}, - {time.Duration(40), true}, - } - - for _, c := range cases { - s := c.timeOff * time.Second - if a := abort(time.Now(), s); a != c.expAbort { - t.Fatalf("Duration %v, expected %v, was %v\n", c.timeOff, s, a) - } - } -} - -func TestClientScheme(t *testing.T) { - cases := map[string]string{ - "tcp://127.0.0.1:8080": "http", - "unix:///usr/local/plugins/foo": "http", - "http://127.0.0.1:8080": "http", - "https://127.0.0.1:8080": "https", - } - - for addr, scheme := range cases { - u, err := url.Parse(addr) - if err != nil { - t.Fatal(err) - } - s := httpScheme(u) - - if s != scheme { - t.Fatalf("URL scheme mismatch, expected %s, got %s", scheme, s) - } - } -} diff --git a/pkg/plugins/discovery.go b/pkg/plugins/discovery.go deleted file mode 100644 index 4297612be0..0000000000 --- a/pkg/plugins/discovery.go +++ /dev/null @@ -1,132 +0,0 @@ -package plugins - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "net/url" - "os" - "path/filepath" - "strings" - "sync" -) - -var ( - // ErrNotFound plugin not found - ErrNotFound = errors.New("plugin not found") - socketsPath = "/run/docker/plugins" - specsPaths = []string{"/etc/docker/plugins", "/usr/lib/docker/plugins"} -) - -// localRegistry defines a registry that is local (using unix socket). -type localRegistry struct{} - -func newLocalRegistry() localRegistry { - return localRegistry{} -} - -// Scan scans all the plugin paths and returns all the names it found -func Scan() ([]string, error) { - var names []string - if err := filepath.Walk(socketsPath, func(path string, fi os.FileInfo, err error) error { - if err != nil { - return nil - } - - if fi.Mode()&os.ModeSocket != 0 { - name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) - names = append(names, name) - } - return nil - }); err != nil { - return nil, err - } - - for _, path := range specsPaths { - if err := filepath.Walk(path, func(p string, fi os.FileInfo, err error) error { - if err != nil || fi.IsDir() { - return nil - } - name := strings.TrimSuffix(fi.Name(), filepath.Ext(fi.Name())) - names = append(names, name) - return nil - }); err != nil { - return nil, err - } - } - return names, nil -} - -// Plugin returns the plugin registered with the given name (or returns an error). -func (l *localRegistry) Plugin(name string) (*Plugin, error) { - socketpaths := pluginPaths(socketsPath, name, ".sock") - - for _, p := range socketpaths { - if fi, err := os.Stat(p); err == nil && fi.Mode()&os.ModeSocket != 0 { - return NewLocalPlugin(name, "unix://"+p), nil - } - } - - var txtspecpaths []string - for _, p := range specsPaths { - txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".spec")...) - txtspecpaths = append(txtspecpaths, pluginPaths(p, name, ".json")...) - } - - for _, p := range txtspecpaths { - if _, err := os.Stat(p); err == nil { - if strings.HasSuffix(p, ".json") { - return readPluginJSONInfo(name, p) - } - return readPluginInfo(name, p) - } - } - return nil, ErrNotFound -} - -func readPluginInfo(name, path string) (*Plugin, error) { - content, err := ioutil.ReadFile(path) - if err != nil { - return nil, err - } - addr := strings.TrimSpace(string(content)) - - u, err := url.Parse(addr) - if err != nil { - return nil, err - } - - if len(u.Scheme) == 0 { - return nil, fmt.Errorf("Unknown protocol") - } - - return NewLocalPlugin(name, addr), nil -} - -func readPluginJSONInfo(name, path string) (*Plugin, error) { - f, err := os.Open(path) - if err != nil { - return nil, err - } - defer f.Close() - - var p Plugin - if err := json.NewDecoder(f).Decode(&p); err != nil { - return nil, err - } - p.name = name - if p.TLSConfig != nil && len(p.TLSConfig.CAFile) == 0 { - p.TLSConfig.InsecureSkipVerify = true - } - p.activateWait = sync.NewCond(&sync.Mutex{}) - - return &p, nil -} - -func pluginPaths(base, name, ext string) []string { - return []string{ - filepath.Join(base, name+ext), - filepath.Join(base, name, name+ext), - } -} diff --git a/pkg/plugins/discovery_test.go b/pkg/plugins/discovery_test.go deleted file mode 100644 index 203b048eed..0000000000 --- a/pkg/plugins/discovery_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package plugins - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -func Setup(t *testing.T) (string, func()) { - tmpdir, err := ioutil.TempDir("", "docker-test") - if err != nil { - t.Fatal(err) - } - backup := socketsPath - socketsPath = tmpdir - specsPaths = []string{tmpdir} - - return tmpdir, func() { - socketsPath = backup - os.RemoveAll(tmpdir) - } -} - -func TestFileSpecPlugin(t *testing.T) { - tmpdir, unregister := Setup(t) - defer unregister() - - cases := []struct { - path string - name string - addr string - fail bool - }{ - // TODO Windows: Factor out the unix:// variants. - {filepath.Join(tmpdir, "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false}, - {filepath.Join(tmpdir, "echo", "echo.spec"), "echo", "unix://var/lib/docker/plugins/echo.sock", false}, - {filepath.Join(tmpdir, "foo.spec"), "foo", "tcp://localhost:8080", false}, - {filepath.Join(tmpdir, "foo", "foo.spec"), "foo", "tcp://localhost:8080", false}, - {filepath.Join(tmpdir, "bar.spec"), "bar", "localhost:8080", true}, // unknown transport - } - - for _, c := range cases { - if err := os.MkdirAll(filepath.Dir(c.path), 0755); err != nil { - t.Fatal(err) - } - if err := ioutil.WriteFile(c.path, []byte(c.addr), 0644); err != nil { - t.Fatal(err) - } - - r := newLocalRegistry() - p, err := r.Plugin(c.name) - if c.fail && err == nil { - continue - } - - if err != nil { - t.Fatal(err) - } - - if p.name != c.name { - t.Fatalf("Expected plugin `%s`, got %s\n", c.name, p.Name) - } - - if p.Addr != c.addr { - t.Fatalf("Expected plugin addr `%s`, got %s\n", c.addr, p.Addr) - } - - if p.TLSConfig.InsecureSkipVerify != true { - t.Fatalf("Expected TLS verification to be skipped") - } - } -} - -func TestFileJSONSpecPlugin(t *testing.T) { - tmpdir, unregister := Setup(t) - defer unregister() - - p := filepath.Join(tmpdir, "example.json") - spec := `{ - "Name": "plugin-example", - "Addr": "https://example.com/docker/plugin", - "TLSConfig": { - "CAFile": "/usr/shared/docker/certs/example-ca.pem", - "CertFile": "/usr/shared/docker/certs/example-cert.pem", - "KeyFile": "/usr/shared/docker/certs/example-key.pem" - } -}` - - if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { - t.Fatal(err) - } - - r := newLocalRegistry() - plugin, err := r.Plugin("example") - if err != nil { - t.Fatal(err) - } - - if plugin.name != "example" { - t.Fatalf("Expected plugin `plugin-example`, got %s\n", plugin.Name) - } - - if plugin.Addr != "https://example.com/docker/plugin" { - t.Fatalf("Expected plugin addr `https://example.com/docker/plugin`, got %s\n", plugin.Addr) - } - - if plugin.TLSConfig.CAFile != "/usr/shared/docker/certs/example-ca.pem" { - t.Fatalf("Expected plugin CA `/usr/shared/docker/certs/example-ca.pem`, got %s\n", plugin.TLSConfig.CAFile) - } - - if plugin.TLSConfig.CertFile != "/usr/shared/docker/certs/example-cert.pem" { - t.Fatalf("Expected plugin Certificate `/usr/shared/docker/certs/example-cert.pem`, got %s\n", plugin.TLSConfig.CertFile) - } - - if plugin.TLSConfig.KeyFile != "/usr/shared/docker/certs/example-key.pem" { - t.Fatalf("Expected plugin Key `/usr/shared/docker/certs/example-key.pem`, got %s\n", plugin.TLSConfig.KeyFile) - } -} - -func TestFileJSONSpecPluginWithoutTLSConfig(t *testing.T) { - tmpdir, unregister := Setup(t) - defer unregister() - - p := filepath.Join(tmpdir, "example.json") - spec := `{ - "Name": "plugin-example", - "Addr": "https://example.com/docker/plugin" -}` - - if err := ioutil.WriteFile(p, []byte(spec), 0644); err != nil { - t.Fatal(err) - } - - r := newLocalRegistry() - plugin, err := r.Plugin("example") - if err != nil { - t.Fatal(err) - } - - if plugin.name != "example" { - t.Fatalf("Expected plugin `plugin-example`, got %s\n", plugin.Name) - } - - if plugin.Addr != "https://example.com/docker/plugin" { - t.Fatalf("Expected plugin addr `https://example.com/docker/plugin`, got %s\n", plugin.Addr) - } - - if plugin.TLSConfig != nil { - t.Fatalf("Expected plugin TLSConfig nil, got %v\n", plugin.TLSConfig) - } -} diff --git a/pkg/plugins/discovery_unix_test.go b/pkg/plugins/discovery_unix_test.go deleted file mode 100644 index 53e02d2858..0000000000 --- a/pkg/plugins/discovery_unix_test.go +++ /dev/null @@ -1,61 +0,0 @@ -// +build !windows - -package plugins - -import ( - "fmt" - "net" - "os" - "path/filepath" - "reflect" - "testing" -) - -func TestLocalSocket(t *testing.T) { - // TODO Windows: Enable a similar version for Windows named pipes - tmpdir, unregister := Setup(t) - defer unregister() - - cases := []string{ - filepath.Join(tmpdir, "echo.sock"), - filepath.Join(tmpdir, "echo", "echo.sock"), - } - - for _, c := range cases { - if err := os.MkdirAll(filepath.Dir(c), 0755); err != nil { - t.Fatal(err) - } - - l, err := net.Listen("unix", c) - if err != nil { - t.Fatal(err) - } - - r := newLocalRegistry() - p, err := r.Plugin("echo") - if err != nil { - t.Fatal(err) - } - - pp, err := r.Plugin("echo") - if err != nil { - t.Fatal(err) - } - if !reflect.DeepEqual(p, pp) { - t.Fatalf("Expected %v, was %v\n", p, pp) - } - - if p.name != "echo" { - t.Fatalf("Expected plugin `echo`, got %s\n", p.Name) - } - - addr := fmt.Sprintf("unix://%s", c) - if p.Addr != addr { - t.Fatalf("Expected plugin addr `%s`, got %s\n", addr, p.Addr) - } - if p.TLSConfig.InsecureSkipVerify != true { - t.Fatalf("Expected TLS verification to be skipped") - } - l.Close() - } -} diff --git a/pkg/plugins/errors.go b/pkg/plugins/errors.go deleted file mode 100644 index 7988471026..0000000000 --- a/pkg/plugins/errors.go +++ /dev/null @@ -1,33 +0,0 @@ -package plugins - -import ( - "fmt" - "net/http" -) - -type statusError struct { - status int - method string - err string -} - -// Error returns a formatted string for this error type -func (e *statusError) Error() string { - return fmt.Sprintf("%s: %v", e.method, e.err) -} - -// IsNotFound indicates if the passed in error is from an http.StatusNotFound from the plugin -func IsNotFound(err error) bool { - return isStatusError(err, http.StatusNotFound) -} - -func isStatusError(err error, status int) bool { - if err == nil { - return false - } - e, ok := err.(*statusError) - if !ok { - return false - } - return e.status == status -} diff --git a/pkg/plugins/pluginrpc-gen/README.md b/pkg/plugins/pluginrpc-gen/README.md deleted file mode 100644 index 0418a3e00a..0000000000 --- a/pkg/plugins/pluginrpc-gen/README.md +++ /dev/null @@ -1,58 +0,0 @@ -Plugin RPC Generator -==================== - -Generates go code from a Go interface definition for proxying between the plugin -API and the subsystem being extended. - -## Usage - -Given an interface definition: - -```go -type volumeDriver interface { - Create(name string, opts opts) (err error) - Remove(name string) (err error) - Path(name string) (mountpoint string, err error) - Mount(name string) (mountpoint string, err error) - Unmount(name string) (err error) -} -``` - -**Note**: All function options and return values must be named in the definition. - -Run the generator: - -```bash -$ pluginrpc-gen --type volumeDriver --name VolumeDriver -i volumes/drivers/extpoint.go -o volumes/drivers/proxy.go -``` - -Where: -- `--type` is the name of the interface to use -- `--name` is the subsystem that the plugin "Implements" -- `-i` is the input file containing the interface definition -- `-o` is the output file where the the generated code should go - -**Note**: The generated code will use the same package name as the one defined in the input file - -Optionally, you can skip functions on the interface that should not be -implemented in the generated proxy code by passing in the function name to `--skip`. -This flag can be specified multiple times. - -You can also add build tags that should be prepended to the generated code by -supplying `--tag`. This flag can be specified multiple times. - -## Known issues - -## go-generate - -You can also use this with go-generate, which is pretty awesome. -To do so, place the code at the top of the file which contains the interface -definition (i.e., the input file): - -```go -//go:generate pluginrpc-gen -i $GOFILE -o proxy.go -type volumeDriver -name VolumeDriver -``` - -Then cd to the package dir and run `go generate` - -**Note**: the `pluginrpc-gen` binary must be within your `$PATH` diff --git a/pkg/plugins/pluginrpc-gen/fixtures/foo.go b/pkg/plugins/pluginrpc-gen/fixtures/foo.go deleted file mode 100644 index 5695dcc2d4..0000000000 --- a/pkg/plugins/pluginrpc-gen/fixtures/foo.go +++ /dev/null @@ -1,89 +0,0 @@ -package foo - -import ( - "fmt" - - aliasedio "io" - - "github.com/docker/docker/pkg/plugins/pluginrpc-gen/fixtures/otherfixture" -) - -var ( - errFakeImport = fmt.Errorf("just to import fmt for imports tests") -) - -type wobble struct { - Some string - Val string - Inception *wobble -} - -// Fooer is an empty interface used for tests. -type Fooer interface{} - -// Fooer2 is an interface used for tests. -type Fooer2 interface { - Foo() -} - -// Fooer3 is an interface used for tests. -type Fooer3 interface { - Foo() - Bar(a string) - Baz(a string) (err error) - Qux(a, b string) (val string, err error) - Wobble() (w *wobble) - Wiggle() (w wobble) - WiggleWobble(a []*wobble, b []wobble, c map[string]*wobble, d map[*wobble]wobble, e map[string][]wobble, f []*otherfixture.Spaceship) (g map[*wobble]wobble, h [][]*wobble, i otherfixture.Spaceship, j *otherfixture.Spaceship, k map[*otherfixture.Spaceship]otherfixture.Spaceship, l []otherfixture.Spaceship) -} - -// Fooer4 is an interface used for tests. -type Fooer4 interface { - Foo() error -} - -// Bar is an interface used for tests. -type Bar interface { - Boo(a string, b string) (s string, err error) -} - -// Fooer5 is an interface used for tests. -type Fooer5 interface { - Foo() - Bar -} - -// Fooer6 is an interface used for tests. -type Fooer6 interface { - Foo(a otherfixture.Spaceship) -} - -// Fooer7 is an interface used for tests. -type Fooer7 interface { - Foo(a *otherfixture.Spaceship) -} - -// Fooer8 is an interface used for tests. -type Fooer8 interface { - Foo(a map[string]otherfixture.Spaceship) -} - -// Fooer9 is an interface used for tests. -type Fooer9 interface { - Foo(a map[string]*otherfixture.Spaceship) -} - -// Fooer10 is an interface used for tests. -type Fooer10 interface { - Foo(a []otherfixture.Spaceship) -} - -// Fooer11 is an interface used for tests. -type Fooer11 interface { - Foo(a []*otherfixture.Spaceship) -} - -// Fooer12 is an interface used for tests. -type Fooer12 interface { - Foo(a aliasedio.Reader) -} diff --git a/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go b/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go deleted file mode 100644 index 1937d1786c..0000000000 --- a/pkg/plugins/pluginrpc-gen/fixtures/otherfixture/spaceship.go +++ /dev/null @@ -1,4 +0,0 @@ -package otherfixture - -// Spaceship is a fixture for tests -type Spaceship struct{} diff --git a/pkg/plugins/pluginrpc-gen/main.go b/pkg/plugins/pluginrpc-gen/main.go deleted file mode 100644 index e77a7d45ff..0000000000 --- a/pkg/plugins/pluginrpc-gen/main.go +++ /dev/null @@ -1,91 +0,0 @@ -package main - -import ( - "bytes" - "flag" - "fmt" - "go/format" - "io/ioutil" - "os" - "unicode" - "unicode/utf8" -) - -type stringSet struct { - values map[string]struct{} -} - -func (s stringSet) String() string { - return "" -} - -func (s stringSet) Set(value string) error { - s.values[value] = struct{}{} - return nil -} -func (s stringSet) GetValues() map[string]struct{} { - return s.values -} - -var ( - typeName = flag.String("type", "", "interface type to generate plugin rpc proxy for") - rpcName = flag.String("name", *typeName, "RPC name, set if different from type") - inputFile = flag.String("i", "", "input file path") - outputFile = flag.String("o", *inputFile+"_proxy.go", "output file path") - - skipFuncs map[string]struct{} - flSkipFuncs = stringSet{make(map[string]struct{})} - - flBuildTags = stringSet{make(map[string]struct{})} -) - -func errorOut(msg string, err error) { - if err == nil { - return - } - fmt.Fprintf(os.Stderr, "%s: %v\n", msg, err) - os.Exit(1) -} - -func checkFlags() error { - if *outputFile == "" { - return fmt.Errorf("missing required flag `-o`") - } - if *inputFile == "" { - return fmt.Errorf("missing required flag `-i`") - } - return nil -} - -func main() { - flag.Var(flSkipFuncs, "skip", "skip parsing for function") - flag.Var(flBuildTags, "tag", "build tags to add to generated files") - flag.Parse() - skipFuncs = flSkipFuncs.GetValues() - - errorOut("error", checkFlags()) - - pkg, err := Parse(*inputFile, *typeName) - errorOut(fmt.Sprintf("error parsing requested type %s", *typeName), err) - - var analysis = struct { - InterfaceType string - RPCName string - BuildTags map[string]struct{} - *ParsedPkg - }{toLower(*typeName), *rpcName, flBuildTags.GetValues(), pkg} - var buf bytes.Buffer - - errorOut("parser error", generatedTempl.Execute(&buf, analysis)) - src, err := format.Source(buf.Bytes()) - errorOut("error formatting generated source:\n"+buf.String(), err) - errorOut("error writing file", ioutil.WriteFile(*outputFile, src, 0644)) -} - -func toLower(s string) string { - if s == "" { - return "" - } - r, n := utf8.DecodeRuneInString(s) - return string(unicode.ToLower(r)) + s[n:] -} diff --git a/pkg/plugins/pluginrpc-gen/parser.go b/pkg/plugins/pluginrpc-gen/parser.go deleted file mode 100644 index 6c547e18cf..0000000000 --- a/pkg/plugins/pluginrpc-gen/parser.go +++ /dev/null @@ -1,263 +0,0 @@ -package main - -import ( - "errors" - "fmt" - "go/ast" - "go/parser" - "go/token" - "path" - "reflect" - "strings" -) - -var errBadReturn = errors.New("found return arg with no name: all args must be named") - -type errUnexpectedType struct { - expected string - actual interface{} -} - -func (e errUnexpectedType) Error() string { - return fmt.Sprintf("got wrong type expecting %s, got: %v", e.expected, reflect.TypeOf(e.actual)) -} - -// ParsedPkg holds information about a package that has been parsed, -// its name and the list of functions. -type ParsedPkg struct { - Name string - Functions []function - Imports []importSpec -} - -type function struct { - Name string - Args []arg - Returns []arg - Doc string -} - -type arg struct { - Name string - ArgType string - PackageSelector string -} - -func (a *arg) String() string { - return a.Name + " " + a.ArgType -} - -type importSpec struct { - Name string - Path string -} - -func (s *importSpec) String() string { - var ss string - if len(s.Name) != 0 { - ss += s.Name - } - ss += s.Path - return ss -} - -// Parse parses the given file for an interface definition with the given name. -func Parse(filePath string, objName string) (*ParsedPkg, error) { - fs := token.NewFileSet() - pkg, err := parser.ParseFile(fs, filePath, nil, parser.AllErrors) - if err != nil { - return nil, err - } - p := &ParsedPkg{} - p.Name = pkg.Name.Name - obj, exists := pkg.Scope.Objects[objName] - if !exists { - return nil, fmt.Errorf("could not find object %s in %s", objName, filePath) - } - if obj.Kind != ast.Typ { - return nil, fmt.Errorf("exected type, got %s", obj.Kind) - } - spec, ok := obj.Decl.(*ast.TypeSpec) - if !ok { - return nil, errUnexpectedType{"*ast.TypeSpec", obj.Decl} - } - iface, ok := spec.Type.(*ast.InterfaceType) - if !ok { - return nil, errUnexpectedType{"*ast.InterfaceType", spec.Type} - } - - p.Functions, err = parseInterface(iface) - if err != nil { - return nil, err - } - - // figure out what imports will be needed - imports := make(map[string]importSpec) - for _, f := range p.Functions { - args := append(f.Args, f.Returns...) - for _, arg := range args { - if len(arg.PackageSelector) == 0 { - continue - } - - for _, i := range pkg.Imports { - if i.Name != nil { - if i.Name.Name != arg.PackageSelector { - continue - } - imports[i.Path.Value] = importSpec{Name: arg.PackageSelector, Path: i.Path.Value} - break - } - - _, name := path.Split(i.Path.Value) - splitName := strings.Split(name, "-") - if len(splitName) > 1 { - name = splitName[len(splitName)-1] - } - // import paths have quotes already added in, so need to remove them for name comparison - name = strings.TrimPrefix(name, `"`) - name = strings.TrimSuffix(name, `"`) - if name == arg.PackageSelector { - imports[i.Path.Value] = importSpec{Path: i.Path.Value} - break - } - } - } - } - - for _, spec := range imports { - p.Imports = append(p.Imports, spec) - } - - return p, nil -} - -func parseInterface(iface *ast.InterfaceType) ([]function, error) { - var functions []function - for _, field := range iface.Methods.List { - switch f := field.Type.(type) { - case *ast.FuncType: - method, err := parseFunc(field) - if err != nil { - return nil, err - } - if method == nil { - continue - } - functions = append(functions, *method) - case *ast.Ident: - spec, ok := f.Obj.Decl.(*ast.TypeSpec) - if !ok { - return nil, errUnexpectedType{"*ast.TypeSpec", f.Obj.Decl} - } - iface, ok := spec.Type.(*ast.InterfaceType) - if !ok { - return nil, errUnexpectedType{"*ast.TypeSpec", spec.Type} - } - funcs, err := parseInterface(iface) - if err != nil { - fmt.Println(err) - continue - } - functions = append(functions, funcs...) - default: - return nil, errUnexpectedType{"*astFuncType or *ast.Ident", f} - } - } - return functions, nil -} - -func parseFunc(field *ast.Field) (*function, error) { - f := field.Type.(*ast.FuncType) - method := &function{Name: field.Names[0].Name} - if _, exists := skipFuncs[method.Name]; exists { - fmt.Println("skipping:", method.Name) - return nil, nil - } - if f.Params != nil { - args, err := parseArgs(f.Params.List) - if err != nil { - return nil, err - } - method.Args = args - } - if f.Results != nil { - returns, err := parseArgs(f.Results.List) - if err != nil { - return nil, fmt.Errorf("error parsing function returns for %q: %v", method.Name, err) - } - method.Returns = returns - } - return method, nil -} - -func parseArgs(fields []*ast.Field) ([]arg, error) { - var args []arg - for _, f := range fields { - if len(f.Names) == 0 { - return nil, errBadReturn - } - for _, name := range f.Names { - p, err := parseExpr(f.Type) - if err != nil { - return nil, err - } - args = append(args, arg{name.Name, p.value, p.pkg}) - } - } - return args, nil -} - -type parsedExpr struct { - value string - pkg string -} - -func parseExpr(e ast.Expr) (parsedExpr, error) { - var parsed parsedExpr - switch i := e.(type) { - case *ast.Ident: - parsed.value += i.Name - case *ast.StarExpr: - p, err := parseExpr(i.X) - if err != nil { - return parsed, err - } - parsed.value += "*" - parsed.value += p.value - parsed.pkg = p.pkg - case *ast.SelectorExpr: - p, err := parseExpr(i.X) - if err != nil { - return parsed, err - } - parsed.pkg = p.value - parsed.value += p.value + "." - parsed.value += i.Sel.Name - case *ast.MapType: - parsed.value += "map[" - p, err := parseExpr(i.Key) - if err != nil { - return parsed, err - } - parsed.value += p.value - parsed.value += "]" - p, err = parseExpr(i.Value) - if err != nil { - return parsed, err - } - parsed.value += p.value - parsed.pkg = p.pkg - case *ast.ArrayType: - parsed.value += "[]" - p, err := parseExpr(i.Elt) - if err != nil { - return parsed, err - } - parsed.value += p.value - parsed.pkg = p.pkg - default: - return parsed, errUnexpectedType{"*ast.Ident or *ast.StarExpr", i} - } - return parsed, nil -} diff --git a/pkg/plugins/pluginrpc-gen/parser_test.go b/pkg/plugins/pluginrpc-gen/parser_test.go deleted file mode 100644 index a1b1ac9567..0000000000 --- a/pkg/plugins/pluginrpc-gen/parser_test.go +++ /dev/null @@ -1,222 +0,0 @@ -package main - -import ( - "fmt" - "path/filepath" - "runtime" - "strings" - "testing" -) - -const testFixture = "fixtures/foo.go" - -func TestParseEmptyInterface(t *testing.T) { - pkg, err := Parse(testFixture, "Fooer") - if err != nil { - t.Fatal(err) - } - - assertName(t, "foo", pkg.Name) - assertNum(t, 0, len(pkg.Functions)) -} - -func TestParseNonInterfaceType(t *testing.T) { - _, err := Parse(testFixture, "wobble") - if _, ok := err.(errUnexpectedType); !ok { - t.Fatal("expected type error when parsing non-interface type") - } -} - -func TestParseWithOneFunction(t *testing.T) { - pkg, err := Parse(testFixture, "Fooer2") - if err != nil { - t.Fatal(err) - } - - assertName(t, "foo", pkg.Name) - assertNum(t, 1, len(pkg.Functions)) - assertName(t, "Foo", pkg.Functions[0].Name) - assertNum(t, 0, len(pkg.Functions[0].Args)) - assertNum(t, 0, len(pkg.Functions[0].Returns)) -} - -func TestParseWithMultipleFuncs(t *testing.T) { - pkg, err := Parse(testFixture, "Fooer3") - if err != nil { - t.Fatal(err) - } - - assertName(t, "foo", pkg.Name) - assertNum(t, 7, len(pkg.Functions)) - - f := pkg.Functions[0] - assertName(t, "Foo", f.Name) - assertNum(t, 0, len(f.Args)) - assertNum(t, 0, len(f.Returns)) - - f = pkg.Functions[1] - assertName(t, "Bar", f.Name) - assertNum(t, 1, len(f.Args)) - assertNum(t, 0, len(f.Returns)) - arg := f.Args[0] - assertName(t, "a", arg.Name) - assertName(t, "string", arg.ArgType) - - f = pkg.Functions[2] - assertName(t, "Baz", f.Name) - assertNum(t, 1, len(f.Args)) - assertNum(t, 1, len(f.Returns)) - arg = f.Args[0] - assertName(t, "a", arg.Name) - assertName(t, "string", arg.ArgType) - arg = f.Returns[0] - assertName(t, "err", arg.Name) - assertName(t, "error", arg.ArgType) - - f = pkg.Functions[3] - assertName(t, "Qux", f.Name) - assertNum(t, 2, len(f.Args)) - assertNum(t, 2, len(f.Returns)) - arg = f.Args[0] - assertName(t, "a", f.Args[0].Name) - assertName(t, "string", f.Args[0].ArgType) - arg = f.Args[1] - assertName(t, "b", arg.Name) - assertName(t, "string", arg.ArgType) - arg = f.Returns[0] - assertName(t, "val", arg.Name) - assertName(t, "string", arg.ArgType) - arg = f.Returns[1] - assertName(t, "err", arg.Name) - assertName(t, "error", arg.ArgType) - - f = pkg.Functions[4] - assertName(t, "Wobble", f.Name) - assertNum(t, 0, len(f.Args)) - assertNum(t, 1, len(f.Returns)) - arg = f.Returns[0] - assertName(t, "w", arg.Name) - assertName(t, "*wobble", arg.ArgType) - - f = pkg.Functions[5] - assertName(t, "Wiggle", f.Name) - assertNum(t, 0, len(f.Args)) - assertNum(t, 1, len(f.Returns)) - arg = f.Returns[0] - assertName(t, "w", arg.Name) - assertName(t, "wobble", arg.ArgType) - - f = pkg.Functions[6] - assertName(t, "WiggleWobble", f.Name) - assertNum(t, 6, len(f.Args)) - assertNum(t, 6, len(f.Returns)) - expectedArgs := [][]string{ - {"a", "[]*wobble"}, - {"b", "[]wobble"}, - {"c", "map[string]*wobble"}, - {"d", "map[*wobble]wobble"}, - {"e", "map[string][]wobble"}, - {"f", "[]*otherfixture.Spaceship"}, - } - for i, arg := range f.Args { - assertName(t, expectedArgs[i][0], arg.Name) - assertName(t, expectedArgs[i][1], arg.ArgType) - } - expectedReturns := [][]string{ - {"g", "map[*wobble]wobble"}, - {"h", "[][]*wobble"}, - {"i", "otherfixture.Spaceship"}, - {"j", "*otherfixture.Spaceship"}, - {"k", "map[*otherfixture.Spaceship]otherfixture.Spaceship"}, - {"l", "[]otherfixture.Spaceship"}, - } - for i, ret := range f.Returns { - assertName(t, expectedReturns[i][0], ret.Name) - assertName(t, expectedReturns[i][1], ret.ArgType) - } -} - -func TestParseWithUnamedReturn(t *testing.T) { - _, err := Parse(testFixture, "Fooer4") - if !strings.HasSuffix(err.Error(), errBadReturn.Error()) { - t.Fatalf("expected ErrBadReturn, got %v", err) - } -} - -func TestEmbeddedInterface(t *testing.T) { - pkg, err := Parse(testFixture, "Fooer5") - if err != nil { - t.Fatal(err) - } - - assertName(t, "foo", pkg.Name) - assertNum(t, 2, len(pkg.Functions)) - - f := pkg.Functions[0] - assertName(t, "Foo", f.Name) - assertNum(t, 0, len(f.Args)) - assertNum(t, 0, len(f.Returns)) - - f = pkg.Functions[1] - assertName(t, "Boo", f.Name) - assertNum(t, 2, len(f.Args)) - assertNum(t, 2, len(f.Returns)) - - arg := f.Args[0] - assertName(t, "a", arg.Name) - assertName(t, "string", arg.ArgType) - - arg = f.Args[1] - assertName(t, "b", arg.Name) - assertName(t, "string", arg.ArgType) - - arg = f.Returns[0] - assertName(t, "s", arg.Name) - assertName(t, "string", arg.ArgType) - - arg = f.Returns[1] - assertName(t, "err", arg.Name) - assertName(t, "error", arg.ArgType) -} - -func TestParsedImports(t *testing.T) { - cases := []string{"Fooer6", "Fooer7", "Fooer8", "Fooer9", "Fooer10", "Fooer11"} - for _, testCase := range cases { - pkg, err := Parse(testFixture, testCase) - if err != nil { - t.Fatal(err) - } - - assertNum(t, 1, len(pkg.Imports)) - importPath := strings.Split(pkg.Imports[0].Path, "/") - assertName(t, "otherfixture\"", importPath[len(importPath)-1]) - assertName(t, "", pkg.Imports[0].Name) - } -} - -func TestAliasedImports(t *testing.T) { - pkg, err := Parse(testFixture, "Fooer12") - if err != nil { - t.Fatal(err) - } - - assertNum(t, 1, len(pkg.Imports)) - assertName(t, "aliasedio", pkg.Imports[0].Name) -} - -func assertName(t *testing.T, expected, actual string) { - if expected != actual { - fatalOut(t, fmt.Sprintf("expected name to be `%s`, got: %s", expected, actual)) - } -} - -func assertNum(t *testing.T, expected, actual int) { - if expected != actual { - fatalOut(t, fmt.Sprintf("expected number to be %d, got: %d", expected, actual)) - } -} - -func fatalOut(t *testing.T, msg string) { - _, file, ln, _ := runtime.Caller(2) - t.Fatalf("%s:%d: %s", filepath.Base(file), ln, msg) -} diff --git a/pkg/plugins/pluginrpc-gen/template.go b/pkg/plugins/pluginrpc-gen/template.go deleted file mode 100644 index 50ed9293c1..0000000000 --- a/pkg/plugins/pluginrpc-gen/template.go +++ /dev/null @@ -1,118 +0,0 @@ -package main - -import ( - "strings" - "text/template" -) - -func printArgs(args []arg) string { - var argStr []string - for _, arg := range args { - argStr = append(argStr, arg.String()) - } - return strings.Join(argStr, ", ") -} - -func buildImports(specs []importSpec) string { - if len(specs) == 0 { - return `import "errors"` - } - imports := "import(\n" - imports += "\t\"errors\"\n" - for _, i := range specs { - imports += "\t" + i.String() + "\n" - } - imports += ")" - return imports -} - -func marshalType(t string) string { - switch t { - case "error": - // convert error types to plain strings to ensure the values are encoded/decoded properly - return "string" - default: - return t - } -} - -func isErr(t string) bool { - switch t { - case "error": - return true - default: - return false - } -} - -// Need to use this helper due to issues with go-vet -func buildTag(s string) string { - return "+build " + s -} - -var templFuncs = template.FuncMap{ - "printArgs": printArgs, - "marshalType": marshalType, - "isErr": isErr, - "lower": strings.ToLower, - "title": title, - "tag": buildTag, - "imports": buildImports, -} - -func title(s string) string { - if strings.ToLower(s) == "id" { - return "ID" - } - return strings.Title(s) -} - -var generatedTempl = template.Must(template.New("rpc_cient").Funcs(templFuncs).Parse(` -// generated code - DO NOT EDIT -{{ range $k, $v := .BuildTags }} - // {{ tag $k }} {{ end }} - -package {{ .Name }} - -{{ imports .Imports }} - -type client interface{ - Call(string, interface{}, interface{}) error -} - -type {{ .InterfaceType }}Proxy struct { - client -} - -{{ range .Functions }} - type {{ $.InterfaceType }}Proxy{{ .Name }}Request struct{ - {{ range .Args }} - {{ title .Name }} {{ .ArgType }} {{ end }} - } - - type {{ $.InterfaceType }}Proxy{{ .Name }}Response struct{ - {{ range .Returns }} - {{ title .Name }} {{ marshalType .ArgType }} {{ end }} - } - - func (pp *{{ $.InterfaceType }}Proxy) {{ .Name }}({{ printArgs .Args }}) ({{ printArgs .Returns }}) { - var( - req {{ $.InterfaceType }}Proxy{{ .Name }}Request - ret {{ $.InterfaceType }}Proxy{{ .Name }}Response - ) - {{ range .Args }} - req.{{ title .Name }} = {{ lower .Name }} {{ end }} - if err = pp.Call("{{ $.RPCName }}.{{ .Name }}", req, &ret); err != nil { - return - } - {{ range $r := .Returns }} - {{ if isErr .ArgType }} - if ret.{{ title .Name }} != "" { - {{ lower .Name }} = errors.New(ret.{{ title .Name }}) - } {{ end }} - {{ if isErr .ArgType | not }} {{ lower .Name }} = ret.{{ title .Name }} {{ end }} {{ end }} - - return - } -{{ end }} -`)) diff --git a/pkg/plugins/plugins.go b/pkg/plugins/plugins.go deleted file mode 100644 index debcd087c9..0000000000 --- a/pkg/plugins/plugins.go +++ /dev/null @@ -1,274 +0,0 @@ -// Package plugins provides structures and helper functions to manage Docker -// plugins. -// -// Docker discovers plugins by looking for them in the plugin directory whenever -// a user or container tries to use one by name. UNIX domain socket files must -// be located under /run/docker/plugins, whereas spec files can be located -// either under /etc/docker/plugins or /usr/lib/docker/plugins. This is handled -// by the Registry interface, which lets you list all plugins or get a plugin by -// its name if it exists. -// -// The plugins need to implement an HTTP server and bind this to the UNIX socket -// or the address specified in the spec files. -// A handshake is send at /Plugin.Activate, and plugins are expected to return -// a Manifest with a list of of Docker subsystems which this plugin implements. -// -// In order to use a plugins, you can use the ``Get`` with the name of the -// plugin and the subsystem it implements. -// -// plugin, err := plugins.Get("example", "VolumeDriver") -// if err != nil { -// return fmt.Errorf("Error looking up volume plugin example: %v", err) -// } -package plugins - -import ( - "errors" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/go-connections/tlsconfig" -) - -var ( - // ErrNotImplements is returned if the plugin does not implement the requested driver. - ErrNotImplements = errors.New("Plugin does not implement the requested driver") -) - -type plugins struct { - sync.Mutex - plugins map[string]*Plugin -} - -var ( - storage = plugins{plugins: make(map[string]*Plugin)} - extpointHandlers = make(map[string]func(string, *Client)) -) - -// Manifest lists what a plugin implements. -type Manifest struct { - // List of subsystem the plugin implements. - Implements []string -} - -// Plugin is the definition of a docker plugin. -type Plugin struct { - // Name of the plugin - name string - // Address of the plugin - Addr string - // TLS configuration of the plugin - TLSConfig *tlsconfig.Options - // Client attached to the plugin - client *Client - // Manifest of the plugin (see above) - Manifest *Manifest `json:"-"` - - // error produced by activation - activateErr error - // specifies if the activation sequence is completed (not if it is successful or not) - activated bool - // wait for activation to finish - activateWait *sync.Cond -} - -// Name returns the name of the plugin. -func (p *Plugin) Name() string { - return p.name -} - -// Client returns a ready-to-use plugin client that can be used to communicate with the plugin. -func (p *Plugin) Client() *Client { - return p.client -} - -// IsLegacy returns true for legacy plugins and false otherwise. -func (p *Plugin) IsLegacy() bool { - return true -} - -// NewLocalPlugin creates a new local plugin. -func NewLocalPlugin(name, addr string) *Plugin { - return &Plugin{ - name: name, - Addr: addr, - // TODO: change to nil - TLSConfig: &tlsconfig.Options{InsecureSkipVerify: true}, - activateWait: sync.NewCond(&sync.Mutex{}), - } -} - -func (p *Plugin) activate() error { - p.activateWait.L.Lock() - if p.activated { - p.activateWait.L.Unlock() - return p.activateErr - } - - p.activateErr = p.activateWithLock() - p.activated = true - - p.activateWait.L.Unlock() - p.activateWait.Broadcast() - return p.activateErr -} - -func (p *Plugin) activateWithLock() error { - c, err := NewClient(p.Addr, p.TLSConfig) - if err != nil { - return err - } - p.client = c - - m := new(Manifest) - if err = p.client.Call("Plugin.Activate", nil, m); err != nil { - return err - } - - p.Manifest = m - - for _, iface := range m.Implements { - handler, handled := extpointHandlers[iface] - if !handled { - continue - } - handler(p.name, p.client) - } - return nil -} - -func (p *Plugin) waitActive() error { - p.activateWait.L.Lock() - for !p.activated { - p.activateWait.Wait() - } - p.activateWait.L.Unlock() - return p.activateErr -} - -func (p *Plugin) implements(kind string) bool { - if err := p.waitActive(); err != nil { - return false - } - for _, driver := range p.Manifest.Implements { - if driver == kind { - return true - } - } - return false -} - -func load(name string) (*Plugin, error) { - return loadWithRetry(name, true) -} - -func loadWithRetry(name string, retry bool) (*Plugin, error) { - registry := newLocalRegistry() - start := time.Now() - - var retries int - for { - pl, err := registry.Plugin(name) - if err != nil { - if !retry { - return nil, err - } - - timeOff := backoff(retries) - if abort(start, timeOff) { - return nil, err - } - retries++ - logrus.Warnf("Unable to locate plugin: %s, retrying in %v", name, timeOff) - time.Sleep(timeOff) - continue - } - - storage.Lock() - storage.plugins[name] = pl - storage.Unlock() - - err = pl.activate() - - if err != nil { - storage.Lock() - delete(storage.plugins, name) - storage.Unlock() - } - - return pl, err - } -} - -func get(name string) (*Plugin, error) { - storage.Lock() - pl, ok := storage.plugins[name] - storage.Unlock() - if ok { - return pl, pl.activate() - } - return load(name) -} - -// Get returns the plugin given the specified name and requested implementation. -func Get(name, imp string) (*Plugin, error) { - pl, err := get(name) - if err != nil { - return nil, err - } - if pl.implements(imp) { - logrus.Debugf("%s implements: %s", name, imp) - return pl, nil - } - return nil, ErrNotImplements -} - -// Handle adds the specified function to the extpointHandlers. -func Handle(iface string, fn func(string, *Client)) { - extpointHandlers[iface] = fn -} - -// GetAll returns all the plugins for the specified implementation -func GetAll(imp string) ([]*Plugin, error) { - pluginNames, err := Scan() - if err != nil { - return nil, err - } - - type plLoad struct { - pl *Plugin - err error - } - - chPl := make(chan *plLoad, len(pluginNames)) - var wg sync.WaitGroup - for _, name := range pluginNames { - if pl, ok := storage.plugins[name]; ok { - chPl <- &plLoad{pl, nil} - continue - } - - wg.Add(1) - go func(name string) { - defer wg.Done() - pl, err := loadWithRetry(name, false) - chPl <- &plLoad{pl, err} - }(name) - } - - wg.Wait() - close(chPl) - - var out []*Plugin - for pl := range chPl { - if pl.err != nil { - logrus.Error(pl.err) - continue - } - if pl.pl.implements(imp) { - out = append(out, pl.pl) - } - } - return out, nil -} diff --git a/pkg/plugins/transport/http.go b/pkg/plugins/transport/http.go deleted file mode 100644 index 5be146af65..0000000000 --- a/pkg/plugins/transport/http.go +++ /dev/null @@ -1,36 +0,0 @@ -package transport - -import ( - "io" - "net/http" -) - -// httpTransport holds an http.RoundTripper -// and information about the scheme and address the transport -// sends request to. -type httpTransport struct { - http.RoundTripper - scheme string - addr string -} - -// NewHTTPTransport creates a new httpTransport. -func NewHTTPTransport(r http.RoundTripper, scheme, addr string) Transport { - return httpTransport{ - RoundTripper: r, - scheme: scheme, - addr: addr, - } -} - -// NewRequest creates a new http.Request and sets the URL -// scheme and address with the transport's fields. -func (t httpTransport) NewRequest(path string, data io.Reader) (*http.Request, error) { - req, err := newHTTPRequest(path, data) - if err != nil { - return nil, err - } - req.URL.Scheme = t.scheme - req.URL.Host = t.addr - return req, nil -} diff --git a/pkg/plugins/transport/transport.go b/pkg/plugins/transport/transport.go deleted file mode 100644 index d7f1e2100c..0000000000 --- a/pkg/plugins/transport/transport.go +++ /dev/null @@ -1,36 +0,0 @@ -package transport - -import ( - "io" - "net/http" - "strings" -) - -// VersionMimetype is the Content-Type the engine sends to plugins. -const VersionMimetype = "application/vnd.docker.plugins.v1.2+json" - -// RequestFactory defines an interface that -// transports can implement to create new requests. -type RequestFactory interface { - NewRequest(path string, data io.Reader) (*http.Request, error) -} - -// Transport defines an interface that plugin transports -// must implement. -type Transport interface { - http.RoundTripper - RequestFactory -} - -// newHTTPRequest creates a new request with a path and a body. -func newHTTPRequest(path string, data io.Reader) (*http.Request, error) { - if !strings.HasPrefix(path, "/") { - path = "/" + path - } - req, err := http.NewRequest("POST", path, data) - if err != nil { - return nil, err - } - req.Header.Add("Accept", VersionMimetype) - return req, nil -} diff --git a/pkg/pools/pools.go b/pkg/pools/pools.go deleted file mode 100644 index 6f5988e267..0000000000 --- a/pkg/pools/pools.go +++ /dev/null @@ -1,119 +0,0 @@ -// Package pools provides a collection of pools which provide various -// data types with buffers. These can be used to lower the number of -// memory allocations and reuse buffers. -// -// New pools should be added to this package to allow them to be -// shared across packages. -// -// Utility functions which operate on pools should be added to this -// package to allow them to be reused. -package pools - -import ( - "bufio" - "io" - "sync" - - "github.com/docker/docker/pkg/ioutils" -) - -var ( - // BufioReader32KPool is a pool which returns bufio.Reader with a 32K buffer. - BufioReader32KPool *BufioReaderPool - // BufioWriter32KPool is a pool which returns bufio.Writer with a 32K buffer. - BufioWriter32KPool *BufioWriterPool -) - -const buffer32K = 32 * 1024 - -// BufioReaderPool is a bufio reader that uses sync.Pool. -type BufioReaderPool struct { - pool *sync.Pool -} - -func init() { - BufioReader32KPool = newBufioReaderPoolWithSize(buffer32K) - BufioWriter32KPool = newBufioWriterPoolWithSize(buffer32K) -} - -// newBufioReaderPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioReaderPoolWithSize(size int) *BufioReaderPool { - pool := &sync.Pool{ - New: func() interface{} { return bufio.NewReaderSize(nil, size) }, - } - return &BufioReaderPool{pool: pool} -} - -// Get returns a bufio.Reader which reads from r. The buffer size is that of the pool. -func (bufPool *BufioReaderPool) Get(r io.Reader) *bufio.Reader { - buf := bufPool.pool.Get().(*bufio.Reader) - buf.Reset(r) - return buf -} - -// Put puts the bufio.Reader back into the pool. -func (bufPool *BufioReaderPool) Put(b *bufio.Reader) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// Copy is a convenience wrapper which uses a buffer to avoid allocation in io.Copy. -func Copy(dst io.Writer, src io.Reader) (written int64, err error) { - buf := BufioReader32KPool.Get(src) - written, err = io.Copy(dst, buf) - BufioReader32KPool.Put(buf) - return -} - -// NewReadCloserWrapper returns a wrapper which puts the bufio.Reader back -// into the pool and closes the reader if it's an io.ReadCloser. -func (bufPool *BufioReaderPool) NewReadCloserWrapper(buf *bufio.Reader, r io.Reader) io.ReadCloser { - return ioutils.NewReadCloserWrapper(r, func() error { - if readCloser, ok := r.(io.ReadCloser); ok { - readCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} - -// BufioWriterPool is a bufio writer that uses sync.Pool. -type BufioWriterPool struct { - pool *sync.Pool -} - -// newBufioWriterPoolWithSize is unexported because new pools should be -// added here to be shared where required. -func newBufioWriterPoolWithSize(size int) *BufioWriterPool { - pool := &sync.Pool{ - New: func() interface{} { return bufio.NewWriterSize(nil, size) }, - } - return &BufioWriterPool{pool: pool} -} - -// Get returns a bufio.Writer which writes to w. The buffer size is that of the pool. -func (bufPool *BufioWriterPool) Get(w io.Writer) *bufio.Writer { - buf := bufPool.pool.Get().(*bufio.Writer) - buf.Reset(w) - return buf -} - -// Put puts the bufio.Writer back into the pool. -func (bufPool *BufioWriterPool) Put(b *bufio.Writer) { - b.Reset(nil) - bufPool.pool.Put(b) -} - -// NewWriteCloserWrapper returns a wrapper which puts the bufio.Writer back -// into the pool and closes the writer if it's an io.Writecloser. -func (bufPool *BufioWriterPool) NewWriteCloserWrapper(buf *bufio.Writer, w io.Writer) io.WriteCloser { - return ioutils.NewWriteCloserWrapper(w, func() error { - buf.Flush() - if writeCloser, ok := w.(io.WriteCloser); ok { - writeCloser.Close() - } - bufPool.Put(buf) - return nil - }) -} diff --git a/pkg/pools/pools_test.go b/pkg/pools/pools_test.go deleted file mode 100644 index 1661b780c9..0000000000 --- a/pkg/pools/pools_test.go +++ /dev/null @@ -1,161 +0,0 @@ -package pools - -import ( - "bufio" - "bytes" - "io" - "strings" - "testing" -) - -func TestBufioReaderPoolGetWithNoReaderShouldCreateOne(t *testing.T) { - reader := BufioReader32KPool.Get(nil) - if reader == nil { - t.Fatalf("BufioReaderPool should have create a bufio.Reader but did not.") - } -} - -func TestBufioReaderPoolPutAndGet(t *testing.T) { - sr := bufio.NewReader(strings.NewReader("foobar")) - reader := BufioReader32KPool.Get(sr) - if reader == nil { - t.Fatalf("BufioReaderPool should not return a nil reader.") - } - // verify the first 3 byte - buf1 := make([]byte, 3) - _, err := reader.Read(buf1) - if err != nil { - t.Fatal(err) - } - if actual := string(buf1); actual != "foo" { - t.Fatalf("The first letter should have been 'foo' but was %v", actual) - } - BufioReader32KPool.Put(reader) - // Try to read the next 3 bytes - _, err = sr.Read(make([]byte, 3)) - if err == nil || err != io.EOF { - t.Fatalf("The buffer should have been empty, issue an EOF error.") - } -} - -type simpleReaderCloser struct { - io.Reader - closed bool -} - -func (r *simpleReaderCloser) Close() error { - r.closed = true - return nil -} - -func TestNewReadCloserWrapperWithAReadCloser(t *testing.T) { - br := bufio.NewReader(strings.NewReader("")) - sr := &simpleReaderCloser{ - Reader: strings.NewReader("foobar"), - closed: false, - } - reader := BufioReader32KPool.NewReadCloserWrapper(br, sr) - if reader == nil { - t.Fatalf("NewReadCloserWrapper should not return a nil reader.") - } - // Verify the content of reader - buf := make([]byte, 3) - _, err := reader.Read(buf) - if err != nil { - t.Fatal(err) - } - if actual := string(buf); actual != "foo" { - t.Fatalf("The first 3 letter should have been 'foo' but were %v", actual) - } - reader.Close() - // Read 3 more bytes "bar" - _, err = reader.Read(buf) - if err != nil { - t.Fatal(err) - } - if actual := string(buf); actual != "bar" { - t.Fatalf("The first 3 letter should have been 'bar' but were %v", actual) - } - if !sr.closed { - t.Fatalf("The ReaderCloser should have been closed, it is not.") - } -} - -func TestBufioWriterPoolGetWithNoReaderShouldCreateOne(t *testing.T) { - writer := BufioWriter32KPool.Get(nil) - if writer == nil { - t.Fatalf("BufioWriterPool should have create a bufio.Writer but did not.") - } -} - -func TestBufioWriterPoolPutAndGet(t *testing.T) { - buf := new(bytes.Buffer) - bw := bufio.NewWriter(buf) - writer := BufioWriter32KPool.Get(bw) - if writer == nil { - t.Fatalf("BufioReaderPool should not return a nil writer.") - } - written, err := writer.Write([]byte("foobar")) - if err != nil { - t.Fatal(err) - } - if written != 6 { - t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) - } - // Make sure we Flush all the way ? - writer.Flush() - bw.Flush() - if len(buf.Bytes()) != 6 { - t.Fatalf("The buffer should contain 6 bytes ('foobar') but contains %v ('%v')", buf.Bytes(), string(buf.Bytes())) - } - // Reset the buffer - buf.Reset() - BufioWriter32KPool.Put(writer) - // Try to write something - if _, err = writer.Write([]byte("barfoo")); err != nil { - t.Fatal(err) - } - // If we now try to flush it, it should panic (the writer is nil) - // recover it - defer func() { - if r := recover(); r == nil { - t.Fatal("Trying to flush the writter should have 'paniced', did not.") - } - }() - writer.Flush() -} - -type simpleWriterCloser struct { - io.Writer - closed bool -} - -func (r *simpleWriterCloser) Close() error { - r.closed = true - return nil -} - -func TestNewWriteCloserWrapperWithAWriteCloser(t *testing.T) { - buf := new(bytes.Buffer) - bw := bufio.NewWriter(buf) - sw := &simpleWriterCloser{ - Writer: new(bytes.Buffer), - closed: false, - } - bw.Flush() - writer := BufioWriter32KPool.NewWriteCloserWrapper(bw, sw) - if writer == nil { - t.Fatalf("BufioReaderPool should not return a nil writer.") - } - written, err := writer.Write([]byte("foobar")) - if err != nil { - t.Fatal(err) - } - if written != 6 { - t.Fatalf("Should have written 6 bytes, but wrote %v bytes", written) - } - writer.Close() - if !sw.closed { - t.Fatalf("The ReaderCloser should have been closed, it is not.") - } -} diff --git a/pkg/progress/progress.go b/pkg/progress/progress.go deleted file mode 100644 index df3c2ba91a..0000000000 --- a/pkg/progress/progress.go +++ /dev/null @@ -1,73 +0,0 @@ -package progress - -import ( - "fmt" -) - -// Progress represents the progress of a transfer. -type Progress struct { - ID string - - // Progress contains a Message or... - Message string - - // ...progress of an action - Action string - Current int64 - Total int64 - - // Aux contains extra information not presented to the user, such as - // digests for push signing. - Aux interface{} - - LastUpdate bool -} - -// Output is an interface for writing progress information. It's -// like a writer for progress, but we don't call it Writer because -// that would be confusing next to ProgressReader (also, because it -// doesn't implement the io.Writer interface). -type Output interface { - WriteProgress(Progress) error -} - -type chanOutput chan<- Progress - -func (out chanOutput) WriteProgress(p Progress) error { - out <- p - return nil -} - -// ChanOutput returns an Output that writes progress updates to the -// supplied channel. -func ChanOutput(progressChan chan<- Progress) Output { - return chanOutput(progressChan) -} - -// Update is a convenience function to write a progress update to the channel. -func Update(out Output, id, action string) { - out.WriteProgress(Progress{ID: id, Action: action}) -} - -// Updatef is a convenience function to write a printf-formatted progress update -// to the channel. -func Updatef(out Output, id, format string, a ...interface{}) { - Update(out, id, fmt.Sprintf(format, a...)) -} - -// Message is a convenience function to write a progress message to the channel. -func Message(out Output, id, message string) { - out.WriteProgress(Progress{ID: id, Message: message}) -} - -// Messagef is a convenience function to write a printf-formatted progress -// message to the channel. -func Messagef(out Output, id, format string, a ...interface{}) { - Message(out, id, fmt.Sprintf(format, a...)) -} - -// Aux sends auxiliary information over a progress interface, which will not be -// formatted for the UI. This is used for things such as push signing. -func Aux(out Output, a interface{}) { - out.WriteProgress(Progress{Aux: a}) -} diff --git a/pkg/progress/progressreader.go b/pkg/progress/progressreader.go deleted file mode 100644 index c39e2b69fb..0000000000 --- a/pkg/progress/progressreader.go +++ /dev/null @@ -1,59 +0,0 @@ -package progress - -import ( - "io" -) - -// Reader is a Reader with progress bar. -type Reader struct { - in io.ReadCloser // Stream to read from - out Output // Where to send progress bar to - size int64 - current int64 - lastUpdate int64 - id string - action string -} - -// NewProgressReader creates a new ProgressReader. -func NewProgressReader(in io.ReadCloser, out Output, size int64, id, action string) *Reader { - return &Reader{ - in: in, - out: out, - size: size, - id: id, - action: action, - } -} - -func (p *Reader) Read(buf []byte) (n int, err error) { - read, err := p.in.Read(buf) - p.current += int64(read) - updateEvery := int64(1024 * 512) //512kB - if p.size > 0 { - // Update progress for every 1% read if 1% < 512kB - if increment := int64(0.01 * float64(p.size)); increment < updateEvery { - updateEvery = increment - } - } - if p.current-p.lastUpdate > updateEvery || err != nil { - p.updateProgress(err != nil && read == 0) - p.lastUpdate = p.current - } - - return read, err -} - -// Close closes the progress reader and its underlying reader. -func (p *Reader) Close() error { - if p.current < p.size { - // print a full progress bar when closing prematurely - p.current = p.size - p.updateProgress(false) - } - return p.in.Close() -} - -func (p *Reader) updateProgress(last bool) { - p.out.WriteProgress(Progress{ID: p.id, Action: p.action, Current: p.current, Total: p.size, LastUpdate: last}) -} diff --git a/pkg/progress/progressreader_test.go b/pkg/progress/progressreader_test.go deleted file mode 100644 index b14d401561..0000000000 --- a/pkg/progress/progressreader_test.go +++ /dev/null @@ -1,75 +0,0 @@ -package progress - -import ( - "bytes" - "io" - "io/ioutil" - "testing" -) - -func TestOutputOnPrematureClose(t *testing.T) { - content := []byte("TESTING") - reader := ioutil.NopCloser(bytes.NewReader(content)) - progressChan := make(chan Progress, 10) - - pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") - - part := make([]byte, 4, 4) - _, err := io.ReadFull(pr, part) - if err != nil { - pr.Close() - t.Fatal(err) - } - -drainLoop: - for { - select { - case <-progressChan: - default: - break drainLoop - } - } - - pr.Close() - - select { - case <-progressChan: - default: - t.Fatalf("Expected some output when closing prematurely") - } -} - -func TestCompleteSilently(t *testing.T) { - content := []byte("TESTING") - reader := ioutil.NopCloser(bytes.NewReader(content)) - progressChan := make(chan Progress, 10) - - pr := NewProgressReader(reader, ChanOutput(progressChan), int64(len(content)), "Test", "Read") - - out, err := ioutil.ReadAll(pr) - if err != nil { - pr.Close() - t.Fatal(err) - } - if string(out) != "TESTING" { - pr.Close() - t.Fatalf("Unexpected output %q from reader", string(out)) - } - -drainLoop: - for { - select { - case <-progressChan: - default: - break drainLoop - } - } - - pr.Close() - - select { - case <-progressChan: - t.Fatalf("Should have closed silently when read is complete") - default: - } -} diff --git a/pkg/promise/promise.go b/pkg/promise/promise.go deleted file mode 100644 index dd52b9082f..0000000000 --- a/pkg/promise/promise.go +++ /dev/null @@ -1,11 +0,0 @@ -package promise - -// Go is a basic promise implementation: it wraps calls a function in a goroutine, -// and returns a channel which will later return the function's return value. -func Go(f func() error) chan error { - ch := make(chan error, 1) - go func() { - ch <- f() - }() - return ch -} diff --git a/pkg/pubsub/publisher.go b/pkg/pubsub/publisher.go deleted file mode 100644 index 09364617e4..0000000000 --- a/pkg/pubsub/publisher.go +++ /dev/null @@ -1,111 +0,0 @@ -package pubsub - -import ( - "sync" - "time" -) - -var wgPool = sync.Pool{New: func() interface{} { return new(sync.WaitGroup) }} - -// NewPublisher creates a new pub/sub publisher to broadcast messages. -// The duration is used as the send timeout as to not block the publisher publishing -// messages to other clients if one client is slow or unresponsive. -// The buffer is used when creating new channels for subscribers. -func NewPublisher(publishTimeout time.Duration, buffer int) *Publisher { - return &Publisher{ - buffer: buffer, - timeout: publishTimeout, - subscribers: make(map[subscriber]topicFunc), - } -} - -type subscriber chan interface{} -type topicFunc func(v interface{}) bool - -// Publisher is basic pub/sub structure. Allows to send events and subscribe -// to them. Can be safely used from multiple goroutines. -type Publisher struct { - m sync.RWMutex - buffer int - timeout time.Duration - subscribers map[subscriber]topicFunc -} - -// Len returns the number of subscribers for the publisher -func (p *Publisher) Len() int { - p.m.RLock() - i := len(p.subscribers) - p.m.RUnlock() - return i -} - -// Subscribe adds a new subscriber to the publisher returning the channel. -func (p *Publisher) Subscribe() chan interface{} { - return p.SubscribeTopic(nil) -} - -// SubscribeTopic adds a new subscriber that filters messages sent by a topic. -func (p *Publisher) SubscribeTopic(topic topicFunc) chan interface{} { - ch := make(chan interface{}, p.buffer) - p.m.Lock() - p.subscribers[ch] = topic - p.m.Unlock() - return ch -} - -// Evict removes the specified subscriber from receiving any more messages. -func (p *Publisher) Evict(sub chan interface{}) { - p.m.Lock() - delete(p.subscribers, sub) - close(sub) - p.m.Unlock() -} - -// Publish sends the data in v to all subscribers currently registered with the publisher. -func (p *Publisher) Publish(v interface{}) { - p.m.RLock() - if len(p.subscribers) == 0 { - p.m.RUnlock() - return - } - - wg := wgPool.Get().(*sync.WaitGroup) - for sub, topic := range p.subscribers { - wg.Add(1) - go p.sendTopic(sub, topic, v, wg) - } - wg.Wait() - wgPool.Put(wg) - p.m.RUnlock() -} - -// Close closes the channels to all subscribers registered with the publisher. -func (p *Publisher) Close() { - p.m.Lock() - for sub := range p.subscribers { - delete(p.subscribers, sub) - close(sub) - } - p.m.Unlock() -} - -func (p *Publisher) sendTopic(sub subscriber, topic topicFunc, v interface{}, wg *sync.WaitGroup) { - defer wg.Done() - if topic != nil && !topic(v) { - return - } - - // send under a select as to not block if the receiver is unavailable - if p.timeout > 0 { - select { - case sub <- v: - case <-time.After(p.timeout): - } - return - } - - select { - case sub <- v: - default: - } -} diff --git a/pkg/pubsub/publisher_test.go b/pkg/pubsub/publisher_test.go deleted file mode 100644 index d6b0a1d59a..0000000000 --- a/pkg/pubsub/publisher_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package pubsub - -import ( - "fmt" - "testing" - "time" -) - -func TestSendToOneSub(t *testing.T) { - p := NewPublisher(100*time.Millisecond, 10) - c := p.Subscribe() - - p.Publish("hi") - - msg := <-c - if msg.(string) != "hi" { - t.Fatalf("expected message hi but received %v", msg) - } -} - -func TestSendToMultipleSubs(t *testing.T) { - p := NewPublisher(100*time.Millisecond, 10) - subs := []chan interface{}{} - subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) - - p.Publish("hi") - - for _, c := range subs { - msg := <-c - if msg.(string) != "hi" { - t.Fatalf("expected message hi but received %v", msg) - } - } -} - -func TestEvictOneSub(t *testing.T) { - p := NewPublisher(100*time.Millisecond, 10) - s1 := p.Subscribe() - s2 := p.Subscribe() - - p.Evict(s1) - p.Publish("hi") - if _, ok := <-s1; ok { - t.Fatal("expected s1 to not receive the published message") - } - - msg := <-s2 - if msg.(string) != "hi" { - t.Fatalf("expected message hi but received %v", msg) - } -} - -func TestClosePublisher(t *testing.T) { - p := NewPublisher(100*time.Millisecond, 10) - subs := []chan interface{}{} - subs = append(subs, p.Subscribe(), p.Subscribe(), p.Subscribe()) - p.Close() - - for _, c := range subs { - if _, ok := <-c; ok { - t.Fatal("expected all subscriber channels to be closed") - } - } -} - -const sampleText = "test" - -type testSubscriber struct { - dataCh chan interface{} - ch chan error -} - -func (s *testSubscriber) Wait() error { - return <-s.ch -} - -func newTestSubscriber(p *Publisher) *testSubscriber { - ts := &testSubscriber{ - dataCh: p.Subscribe(), - ch: make(chan error), - } - go func() { - for data := range ts.dataCh { - s, ok := data.(string) - if !ok { - ts.ch <- fmt.Errorf("Unexpected type %T", data) - break - } - if s != sampleText { - ts.ch <- fmt.Errorf("Unexpected text %s", s) - break - } - } - close(ts.ch) - }() - return ts -} - -// for testing with -race -func TestPubSubRace(t *testing.T) { - p := NewPublisher(0, 1024) - var subs [](*testSubscriber) - for j := 0; j < 50; j++ { - subs = append(subs, newTestSubscriber(p)) - } - for j := 0; j < 1000; j++ { - p.Publish(sampleText) - } - time.AfterFunc(1*time.Second, func() { - for _, s := range subs { - p.Evict(s.dataCh) - } - }) - for _, s := range subs { - s.Wait() - } -} - -func BenchmarkPubSub(b *testing.B) { - for i := 0; i < b.N; i++ { - b.StopTimer() - p := NewPublisher(0, 1024) - var subs [](*testSubscriber) - for j := 0; j < 50; j++ { - subs = append(subs, newTestSubscriber(p)) - } - b.StartTimer() - for j := 0; j < 1000; j++ { - p.Publish(sampleText) - } - time.AfterFunc(1*time.Second, func() { - for _, s := range subs { - p.Evict(s.dataCh) - } - }) - for _, s := range subs { - if err := s.Wait(); err != nil { - b.Fatal(err) - } - } - } -} diff --git a/pkg/random/random.go b/pkg/random/random.go deleted file mode 100644 index 70de4d1304..0000000000 --- a/pkg/random/random.go +++ /dev/null @@ -1,71 +0,0 @@ -package random - -import ( - cryptorand "crypto/rand" - "io" - "math" - "math/big" - "math/rand" - "sync" - "time" -) - -// Rand is a global *rand.Rand instance, which initialized with NewSource() source. -var Rand = rand.New(NewSource()) - -// Reader is a global, shared instance of a pseudorandom bytes generator. -// It doesn't consume entropy. -var Reader io.Reader = &reader{rnd: Rand} - -// copypaste from standard math/rand -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} - -// NewSource returns math/rand.Source safe for concurrent use and initialized -// with current unix-nano timestamp -func NewSource() rand.Source { - var seed int64 - if cryptoseed, err := cryptorand.Int(cryptorand.Reader, big.NewInt(math.MaxInt64)); err != nil { - // This should not happen, but worst-case fallback to time-based seed. - seed = time.Now().UnixNano() - } else { - seed = cryptoseed.Int64() - } - return &lockedSource{ - src: rand.NewSource(seed), - } -} - -type reader struct { - rnd *rand.Rand -} - -func (r *reader) Read(b []byte) (int, error) { - i := 0 - for { - val := r.rnd.Int63() - for val > 0 { - b[i] = byte(val) - i++ - if i == len(b) { - return i, nil - } - val >>= 8 - } - } -} diff --git a/pkg/random/random_test.go b/pkg/random/random_test.go deleted file mode 100644 index cf405f78cb..0000000000 --- a/pkg/random/random_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package random - -import ( - "math/rand" - "sync" - "testing" -) - -// for go test -v -race -func TestConcurrency(t *testing.T) { - rnd := rand.New(NewSource()) - var wg sync.WaitGroup - - for i := 0; i < 10; i++ { - wg.Add(1) - go func() { - rnd.Int63() - wg.Done() - }() - } - wg.Wait() -} diff --git a/pkg/reexec/README.md b/pkg/reexec/README.md deleted file mode 100644 index 45592ce85a..0000000000 --- a/pkg/reexec/README.md +++ /dev/null @@ -1,5 +0,0 @@ -## reexec - -The `reexec` package facilitates the busybox style reexec of the docker binary that we require because -of the forking limitations of using Go. Handlers can be registered with a name and the argv 0 of -the exec of the binary will be used to find and execute custom init paths. diff --git a/pkg/reexec/command_linux.go b/pkg/reexec/command_linux.go deleted file mode 100644 index 3c3a73a9d5..0000000000 --- a/pkg/reexec/command_linux.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build linux - -package reexec - -import ( - "os/exec" - "syscall" -) - -// Self returns the path to the current process's binary. -// Returns "/proc/self/exe". -func Self() string { - return "/proc/self/exe" -} - -// Command returns *exec.Cmd which have Path as current binary. Also it setting -// SysProcAttr.Pdeathsig to SIGTERM. -// This will use the in-memory version (/proc/self/exe) of the current binary, -// it is thus safe to delete or replace the on-disk binary (os.Args[0]). -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - SysProcAttr: &syscall.SysProcAttr{ - Pdeathsig: syscall.SIGTERM, - }, - } -} diff --git a/pkg/reexec/command_unix.go b/pkg/reexec/command_unix.go deleted file mode 100644 index b70edcb316..0000000000 --- a/pkg/reexec/command_unix.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build freebsd solaris - -package reexec - -import ( - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which have Path as current binary. -// For example if current binary is "docker" at "/usr/bin/", then cmd.Path will -// be set to "/usr/bin/docker". -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - } -} diff --git a/pkg/reexec/command_unsupported.go b/pkg/reexec/command_unsupported.go deleted file mode 100644 index 9aed004e86..0000000000 --- a/pkg/reexec/command_unsupported.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !linux,!windows,!freebsd,!solaris - -package reexec - -import ( - "os/exec" -) - -// Command is unsupported on operating systems apart from Linux and Windows. -func Command(args ...string) *exec.Cmd { - return nil -} diff --git a/pkg/reexec/command_windows.go b/pkg/reexec/command_windows.go deleted file mode 100644 index 8d65e0ae1a..0000000000 --- a/pkg/reexec/command_windows.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build windows - -package reexec - -import ( - "os/exec" -) - -// Self returns the path to the current process's binary. -// Uses os.Args[0]. -func Self() string { - return naiveSelf() -} - -// Command returns *exec.Cmd which have Path as current binary. -// For example if current binary is "docker.exe" at "C:\", then cmd.Path will -// be set to "C:\docker.exe". -func Command(args ...string) *exec.Cmd { - return &exec.Cmd{ - Path: Self(), - Args: args, - } -} diff --git a/pkg/reexec/reexec.go b/pkg/reexec/reexec.go deleted file mode 100644 index c56671d919..0000000000 --- a/pkg/reexec/reexec.go +++ /dev/null @@ -1,47 +0,0 @@ -package reexec - -import ( - "fmt" - "os" - "os/exec" - "path/filepath" -) - -var registeredInitializers = make(map[string]func()) - -// Register adds an initialization func under the specified name -func Register(name string, initializer func()) { - if _, exists := registeredInitializers[name]; exists { - panic(fmt.Sprintf("reexec func already registered under name %q", name)) - } - - registeredInitializers[name] = initializer -} - -// Init is called as the first part of the exec process and returns true if an -// initialization function was called. -func Init() bool { - initializer, exists := registeredInitializers[os.Args[0]] - if exists { - initializer() - - return true - } - return false -} - -func naiveSelf() string { - name := os.Args[0] - if filepath.Base(name) == name { - if lp, err := exec.LookPath(name); err == nil { - return lp - } - } - // handle conversion of relative paths to absolute - if absName, err := filepath.Abs(name); err == nil { - return absName - } - // if we couldn't get absolute name, return original - // (NOTE: Go only errors on Abs() if os.Getwd fails) - return name -} diff --git a/pkg/registrar/registrar.go b/pkg/registrar/registrar.go deleted file mode 100644 index 1e75ee995b..0000000000 --- a/pkg/registrar/registrar.go +++ /dev/null @@ -1,127 +0,0 @@ -// Package registrar provides name registration. It reserves a name to a given key. -package registrar - -import ( - "errors" - "sync" -) - -var ( - // ErrNameReserved is an error which is returned when a name is requested to be reserved that already is reserved - ErrNameReserved = errors.New("name is reserved") - // ErrNameNotReserved is an error which is returned when trying to find a name that is not reserved - ErrNameNotReserved = errors.New("name is not reserved") - // ErrNoSuchKey is returned when trying to find the names for a key which is not known - ErrNoSuchKey = errors.New("provided key does not exist") -) - -// Registrar stores indexes a list of keys and their registered names as well as indexes names and the key that they are registered to -// Names must be unique. -// Registrar is safe for concurrent access. -type Registrar struct { - idx map[string][]string - names map[string]string - mu sync.Mutex -} - -// NewRegistrar creates a new Registrar with the an empty index -func NewRegistrar() *Registrar { - return &Registrar{ - idx: make(map[string][]string), - names: make(map[string]string), - } -} - -// Reserve registers a key to a name -// Reserve is idempotent -// Attempting to reserve a key to a name that already exists results in an `ErrNameReserved` -// A name reservation is globally unique -func (r *Registrar) Reserve(name, key string) error { - r.mu.Lock() - defer r.mu.Unlock() - - if k, exists := r.names[name]; exists { - if k != key { - return ErrNameReserved - } - return nil - } - - r.idx[key] = append(r.idx[key], name) - r.names[name] = key - return nil -} - -// Release releases the reserved name -// Once released, a name can be reserved again -func (r *Registrar) Release(name string) { - r.mu.Lock() - defer r.mu.Unlock() - - key, exists := r.names[name] - if !exists { - return - } - - for i, n := range r.idx[key] { - if n != name { - continue - } - r.idx[key] = append(r.idx[key][:i], r.idx[key][i+1:]...) - break - } - - delete(r.names, name) - - if len(r.idx[key]) == 0 { - delete(r.idx, key) - } -} - -// Delete removes all reservations for the passed in key. -// All names reserved to this key are released. -func (r *Registrar) Delete(key string) { - r.mu.Lock() - for _, name := range r.idx[key] { - delete(r.names, name) - } - delete(r.idx, key) - r.mu.Unlock() -} - -// GetNames lists all the reserved names for the given key -func (r *Registrar) GetNames(key string) ([]string, error) { - r.mu.Lock() - defer r.mu.Unlock() - - names, exists := r.idx[key] - if !exists { - return nil, ErrNoSuchKey - } - return names, nil -} - -// Get returns the key that the passed in name is reserved to -func (r *Registrar) Get(name string) (string, error) { - r.mu.Lock() - key, exists := r.names[name] - r.mu.Unlock() - - if !exists { - return "", ErrNameNotReserved - } - return key, nil -} - -// GetAll returns all registered names -func (r *Registrar) GetAll() map[string][]string { - out := make(map[string][]string) - - r.mu.Lock() - // copy index into out - for id, names := range r.idx { - out[id] = names - } - r.mu.Unlock() - return out -} diff --git a/pkg/registrar/registrar_test.go b/pkg/registrar/registrar_test.go deleted file mode 100644 index 0c1ef312ae..0000000000 --- a/pkg/registrar/registrar_test.go +++ /dev/null @@ -1,119 +0,0 @@ -package registrar - -import ( - "reflect" - "testing" -) - -func TestReserve(t *testing.T) { - r := NewRegistrar() - - obj := "test1" - if err := r.Reserve("test", obj); err != nil { - t.Fatal(err) - } - - if err := r.Reserve("test", obj); err != nil { - t.Fatal(err) - } - - obj2 := "test2" - err := r.Reserve("test", obj2) - if err == nil { - t.Fatalf("expected error when reserving an already reserved name to another object") - } - if err != ErrNameReserved { - t.Fatal("expected `ErrNameReserved` error when attempting to reserve an already reserved name") - } -} - -func TestRelease(t *testing.T) { - r := NewRegistrar() - obj := "testing" - - if err := r.Reserve("test", obj); err != nil { - t.Fatal(err) - } - r.Release("test") - r.Release("test") // Ensure there is no panic here - - if err := r.Reserve("test", obj); err != nil { - t.Fatal(err) - } -} - -func TestGetNames(t *testing.T) { - r := NewRegistrar() - obj := "testing" - names := []string{"test1", "test2"} - - for _, name := range names { - if err := r.Reserve(name, obj); err != nil { - t.Fatal(err) - } - } - r.Reserve("test3", "other") - - names2, err := r.GetNames(obj) - if err != nil { - t.Fatal(err) - } - - if !reflect.DeepEqual(names, names2) { - t.Fatalf("Exepected: %v, Got: %v", names, names2) - } -} - -func TestDelete(t *testing.T) { - r := NewRegistrar() - obj := "testing" - names := []string{"test1", "test2"} - for _, name := range names { - if err := r.Reserve(name, obj); err != nil { - t.Fatal(err) - } - } - - r.Reserve("test3", "other") - r.Delete(obj) - - _, err := r.GetNames(obj) - if err == nil { - t.Fatal("expected error getting names for deleted key") - } - - if err != ErrNoSuchKey { - t.Fatal("expected `ErrNoSuchKey`") - } -} - -func TestGet(t *testing.T) { - r := NewRegistrar() - obj := "testing" - name := "test" - - _, err := r.Get(name) - if err == nil { - t.Fatal("expected error when key does not exist") - } - if err != ErrNameNotReserved { - t.Fatal(err) - } - - if err := r.Reserve(name, obj); err != nil { - t.Fatal(err) - } - - if _, err = r.Get(name); err != nil { - t.Fatal(err) - } - - r.Delete(obj) - _, err = r.Get(name) - if err == nil { - t.Fatal("expected error when key does not exist") - } - if err != ErrNameNotReserved { - t.Fatal(err) - } -} diff --git a/pkg/signal/README.md b/pkg/signal/README.md deleted file mode 100644 index 2b237a5942..0000000000 --- a/pkg/signal/README.md +++ /dev/null @@ -1 +0,0 @@ -This package provides helper functions for dealing with signals across various operating systems \ No newline at end of file diff --git a/pkg/signal/signal.go b/pkg/signal/signal.go deleted file mode 100644 index 68bb77cf58..0000000000 --- a/pkg/signal/signal.go +++ /dev/null @@ -1,54 +0,0 @@ -// Package signal provides helper functions for dealing with signals across -// various operating systems. -package signal - -import ( - "fmt" - "os" - "os/signal" - "strconv" - "strings" - "syscall" -) - -// CatchAll catches all signals and relays them to the specified channel. -func CatchAll(sigc chan os.Signal) { - handledSigs := []os.Signal{} - for _, s := range SignalMap { - handledSigs = append(handledSigs, s) - } - signal.Notify(sigc, handledSigs...) -} - -// StopCatch stops catching the signals and closes the specified channel. -func StopCatch(sigc chan os.Signal) { - signal.Stop(sigc) - close(sigc) -} - -// ParseSignal translates a string to a valid syscall signal. -// It returns an error if the signal map doesn't include the given signal. -func ParseSignal(rawSignal string) (syscall.Signal, error) { - s, err := strconv.Atoi(rawSignal) - if err == nil { - if s == 0 { - return -1, fmt.Errorf("Invalid signal: %s", rawSignal) - } - return syscall.Signal(s), nil - } - signal, ok := SignalMap[strings.TrimPrefix(strings.ToUpper(rawSignal), "SIG")] - if !ok { - return -1, fmt.Errorf("Invalid signal: %s", rawSignal) - } - return signal, nil -} - -// ValidSignalForPlatform returns true if a signal is valid on the platform -func ValidSignalForPlatform(sig syscall.Signal) bool { - for _, v := range SignalMap { - if v == sig { - return true - } - } - return false -} diff --git a/pkg/signal/signal_darwin.go b/pkg/signal/signal_darwin.go deleted file mode 100644 index 946de87e94..0000000000 --- a/pkg/signal/signal_darwin.go +++ /dev/null @@ -1,41 +0,0 @@ -package signal - -import ( - "syscall" -) - -// SignalMap is a map of Darwin signals. -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUG": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CONT": syscall.SIGCONT, - "EMT": syscall.SIGEMT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INFO": syscall.SIGINFO, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "PIPE": syscall.SIGPIPE, - "PROF": syscall.SIGPROF, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, -} diff --git a/pkg/signal/signal_freebsd.go b/pkg/signal/signal_freebsd.go deleted file mode 100644 index 6b9569bb75..0000000000 --- a/pkg/signal/signal_freebsd.go +++ /dev/null @@ -1,43 +0,0 @@ -package signal - -import ( - "syscall" -) - -// SignalMap is a map of FreeBSD signals. -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUF": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CONT": syscall.SIGCONT, - "EMT": syscall.SIGEMT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INFO": syscall.SIGINFO, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "LWP": syscall.SIGLWP, - "PIPE": syscall.SIGPIPE, - "PROF": syscall.SIGPROF, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "THR": syscall.SIGTHR, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, -} diff --git a/pkg/signal/signal_linux.go b/pkg/signal/signal_linux.go deleted file mode 100644 index d418cbe9e3..0000000000 --- a/pkg/signal/signal_linux.go +++ /dev/null @@ -1,80 +0,0 @@ -package signal - -import ( - "syscall" -) - -const ( - sigrtmin = 34 - sigrtmax = 64 -) - -// SignalMap is a map of Linux signals. -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUS": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CLD": syscall.SIGCLD, - "CONT": syscall.SIGCONT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "PIPE": syscall.SIGPIPE, - "POLL": syscall.SIGPOLL, - "PROF": syscall.SIGPROF, - "PWR": syscall.SIGPWR, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STKFLT": syscall.SIGSTKFLT, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "UNUSED": syscall.SIGUNUSED, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, - "RTMIN": sigrtmin, - "RTMIN+1": sigrtmin + 1, - "RTMIN+2": sigrtmin + 2, - "RTMIN+3": sigrtmin + 3, - "RTMIN+4": sigrtmin + 4, - "RTMIN+5": sigrtmin + 5, - "RTMIN+6": sigrtmin + 6, - "RTMIN+7": sigrtmin + 7, - "RTMIN+8": sigrtmin + 8, - "RTMIN+9": sigrtmin + 9, - "RTMIN+10": sigrtmin + 10, - "RTMIN+11": sigrtmin + 11, - "RTMIN+12": sigrtmin + 12, - "RTMIN+13": sigrtmin + 13, - "RTMIN+14": sigrtmin + 14, - "RTMIN+15": sigrtmin + 15, - "RTMAX-14": sigrtmax - 14, - "RTMAX-13": sigrtmax - 13, - "RTMAX-12": sigrtmax - 12, - "RTMAX-11": sigrtmax - 11, - "RTMAX-10": sigrtmax - 10, - "RTMAX-9": sigrtmax - 9, - "RTMAX-8": sigrtmax - 8, - "RTMAX-7": sigrtmax - 7, - "RTMAX-6": sigrtmax - 6, - "RTMAX-5": sigrtmax - 5, - "RTMAX-4": sigrtmax - 4, - "RTMAX-3": sigrtmax - 3, - "RTMAX-2": sigrtmax - 2, - "RTMAX-1": sigrtmax - 1, - "RTMAX": sigrtmax, -} diff --git a/pkg/signal/signal_solaris.go b/pkg/signal/signal_solaris.go deleted file mode 100644 index 89576b9e3b..0000000000 --- a/pkg/signal/signal_solaris.go +++ /dev/null @@ -1,42 +0,0 @@ -package signal - -import ( - "syscall" -) - -// SignalMap is a map of Solaris signals. -// SIGINFO and SIGTHR not defined for Solaris -var SignalMap = map[string]syscall.Signal{ - "ABRT": syscall.SIGABRT, - "ALRM": syscall.SIGALRM, - "BUF": syscall.SIGBUS, - "CHLD": syscall.SIGCHLD, - "CONT": syscall.SIGCONT, - "EMT": syscall.SIGEMT, - "FPE": syscall.SIGFPE, - "HUP": syscall.SIGHUP, - "ILL": syscall.SIGILL, - "INT": syscall.SIGINT, - "IO": syscall.SIGIO, - "IOT": syscall.SIGIOT, - "KILL": syscall.SIGKILL, - "LWP": syscall.SIGLWP, - "PIPE": syscall.SIGPIPE, - "PROF": syscall.SIGPROF, - "QUIT": syscall.SIGQUIT, - "SEGV": syscall.SIGSEGV, - "STOP": syscall.SIGSTOP, - "SYS": syscall.SIGSYS, - "TERM": syscall.SIGTERM, - "TRAP": syscall.SIGTRAP, - "TSTP": syscall.SIGTSTP, - "TTIN": syscall.SIGTTIN, - "TTOU": syscall.SIGTTOU, - "URG": syscall.SIGURG, - "USR1": syscall.SIGUSR1, - "USR2": syscall.SIGUSR2, - "VTALRM": syscall.SIGVTALRM, - "WINCH": syscall.SIGWINCH, - "XCPU": syscall.SIGXCPU, - "XFSZ": syscall.SIGXFSZ, -} diff --git a/pkg/signal/signal_unix.go b/pkg/signal/signal_unix.go deleted file mode 100644 index 6621d37184..0000000000 --- a/pkg/signal/signal_unix.go +++ /dev/null @@ -1,21 +0,0 @@ -// +build !windows - -package signal - -import ( - "syscall" -) - -// Signals used in api/client (no windows equivalent, use -// invalid signals so they don't get handled) - -const ( - // SIGCHLD is a signal sent to a process when a child process terminates, is interrupted, or resumes after being interrupted. - SIGCHLD = syscall.SIGCHLD - // SIGWINCH is a signal sent to a process when its controlling terminal changes its size - SIGWINCH = syscall.SIGWINCH - // SIGPIPE is a signal sent to a process when a pipe is written to before the other end is open for reading - SIGPIPE = syscall.SIGPIPE - // DefaultStopSignal is the syscall signal used to stop a container in unix systems. - DefaultStopSignal = "SIGTERM" -) diff --git a/pkg/signal/signal_unsupported.go b/pkg/signal/signal_unsupported.go deleted file mode 100644 index c592d37dfe..0000000000 --- a/pkg/signal/signal_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !linux,!darwin,!freebsd,!windows,!solaris - -package signal - -import ( - "syscall" -) - -// SignalMap is an empty map of signals for unsupported platform. -var SignalMap = map[string]syscall.Signal{} diff --git a/pkg/signal/signal_windows.go b/pkg/signal/signal_windows.go deleted file mode 100644 index 698cbf2dc8..0000000000 --- a/pkg/signal/signal_windows.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build windows - -package signal - -import ( - "syscall" -) - -// Signals used in api/client (no windows equivalent, use -// invalid signals so they don't get handled) -const ( - SIGCHLD = syscall.Signal(0xff) - SIGWINCH = syscall.Signal(0xff) - SIGPIPE = syscall.Signal(0xff) - // DefaultStopSignal is the syscall signal used to stop a container in windows systems. - DefaultStopSignal = "15" -) - -// SignalMap is a map of "supported" signals. As per the comment in GOLang's -// ztypes_windows.go: "More invented values for signals". Windows doesn't -// really support signals in any way, shape or form that Unix does. -// -// We have these so that docker kill can be used to gracefully (TERM) and -// forcibly (KILL) terminate a container on Windows. -var SignalMap = map[string]syscall.Signal{ - "KILL": syscall.SIGKILL, - "TERM": syscall.SIGTERM, -} diff --git a/pkg/signal/trap.go b/pkg/signal/trap.go deleted file mode 100644 index d35ef0e862..0000000000 --- a/pkg/signal/trap.go +++ /dev/null @@ -1,81 +0,0 @@ -package signal - -import ( - "os" - gosignal "os/signal" - "runtime" - "sync/atomic" - "syscall" - - "github.com/Sirupsen/logrus" -) - -// Trap sets up a simplified signal "trap", appropriate for common -// behavior expected from a vanilla unix command-line tool in general -// (and the Docker engine in particular). -// -// * If SIGINT or SIGTERM are received, `cleanup` is called, then the process is terminated. -// * If SIGINT or SIGTERM are received 3 times before cleanup is complete, then cleanup is -// skipped and the process is terminated immediately (allows force quit of stuck daemon) -// * A SIGQUIT always causes an exit without cleanup, with a goroutine dump preceding exit. -// * Ignore SIGPIPE events. These are generated by systemd when journald is restarted while -// the docker daemon is not restarted and also running under systemd. -// Fixes https://github.com/docker/docker/issues/19728 -// -func Trap(cleanup func()) { - c := make(chan os.Signal, 1) - // we will handle INT, TERM, QUIT, SIGPIPE here - signals := []os.Signal{os.Interrupt, syscall.SIGTERM, syscall.SIGQUIT, syscall.SIGPIPE} - gosignal.Notify(c, signals...) - go func() { - interruptCount := uint32(0) - for sig := range c { - if sig == syscall.SIGPIPE { - continue - } - - go func(sig os.Signal) { - logrus.Infof("Processing signal '%v'", sig) - switch sig { - case os.Interrupt, syscall.SIGTERM: - if atomic.LoadUint32(&interruptCount) < 3 { - // Initiate the cleanup only once - if atomic.AddUint32(&interruptCount, 1) == 1 { - // Call the provided cleanup handler - cleanup() - os.Exit(0) - } else { - return - } - } else { - // 3 SIGTERM/INT signals received; force exit without cleanup - logrus.Info("Forcing docker daemon shutdown without cleanup; 3 interrupts received") - } - case syscall.SIGQUIT: - DumpStacks() - logrus.Info("Forcing docker daemon shutdown without cleanup on SIGQUIT") - } - //for the SIGINT/TERM, and SIGQUIT non-clean shutdown case, exit with 128 + signal # - os.Exit(128 + int(sig.(syscall.Signal))) - }(sig) - } - }() -} - -// DumpStacks dumps the runtime stack. -func DumpStacks() { - var ( - buf []byte - stackSize int - ) - bufferLen := 16384 - for stackSize == len(buf) { - buf = make([]byte, bufferLen) - stackSize = runtime.Stack(buf, true) - bufferLen *= 2 - } - buf = buf[:stackSize] - // Note that if the daemon is started with a less-verbose log-level than "info" (the default), the goroutine - // traces won't show up in the log. - logrus.Infof("=== BEGIN goroutine stack dump ===\n%s\n=== END goroutine stack dump ===", buf) -} diff --git a/pkg/stdcopy/stdcopy.go b/pkg/stdcopy/stdcopy.go deleted file mode 100644 index 8f67ece949..0000000000 --- a/pkg/stdcopy/stdcopy.go +++ /dev/null @@ -1,185 +0,0 @@ -package stdcopy - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "sync" - - "github.com/Sirupsen/logrus" -) - -// StdType is the type of standard stream -// a writer can multiplex to. -type StdType byte - -const ( - // Stdin represents standard input stream type. - Stdin StdType = iota - // Stdout represents standard output stream type. - Stdout - // Stderr represents standard error steam type. - Stderr - - stdWriterPrefixLen = 8 - stdWriterFdIndex = 0 - stdWriterSizeIndex = 4 - - startingBufLen = 32*1024 + stdWriterPrefixLen + 1 -) - -var bufPool = &sync.Pool{New: func() interface{} { return bytes.NewBuffer(nil) }} - -// stdWriter is wrapper of io.Writer with extra customized info. -type stdWriter struct { - io.Writer - prefix byte -} - -// Write sends the buffer to the underneath writer. -// It inserts the prefix header before the buffer, -// so stdcopy.StdCopy knows where to multiplex the output. -// It makes stdWriter to implement io.Writer. -func (w *stdWriter) Write(p []byte) (n int, err error) { - if w == nil || w.Writer == nil { - return 0, errors.New("Writer not instantiated") - } - if p == nil { - return 0, nil - } - - header := [stdWriterPrefixLen]byte{stdWriterFdIndex: w.prefix} - binary.BigEndian.PutUint32(header[stdWriterSizeIndex:], uint32(len(p))) - buf := bufPool.Get().(*bytes.Buffer) - buf.Write(header[:]) - buf.Write(p) - - n, err = w.Writer.Write(buf.Bytes()) - n -= stdWriterPrefixLen - if n < 0 { - n = 0 - } - - buf.Reset() - bufPool.Put(buf) - return -} - -// NewStdWriter instantiates a new Writer. -// Everything written to it will be encapsulated using a custom format, -// and written to the underlying `w` stream. -// This allows multiple write streams (e.g. stdout and stderr) to be muxed into a single connection. -// `t` indicates the id of the stream to encapsulate. -// It can be stdcopy.Stdin, stdcopy.Stdout, stdcopy.Stderr. -func NewStdWriter(w io.Writer, t StdType) io.Writer { - return &stdWriter{ - Writer: w, - prefix: byte(t), - } -} - -// StdCopy is a modified version of io.Copy. -// -// StdCopy will demultiplex `src`, assuming that it contains two streams, -// previously multiplexed together using a StdWriter instance. -// As it reads from `src`, StdCopy will write to `dstout` and `dsterr`. -// -// StdCopy will read until it hits EOF on `src`. It will then return a nil error. -// In other words: if `err` is non nil, it indicates a real underlying error. -// -// `written` will hold the total number of bytes written to `dstout` and `dsterr`. -func StdCopy(dstout, dsterr io.Writer, src io.Reader) (written int64, err error) { - var ( - buf = make([]byte, startingBufLen) - bufLen = len(buf) - nr, nw int - er, ew error - out io.Writer - frameSize int - ) - - for { - // Make sure we have at least a full header - for nr < stdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < stdWriterPrefixLen { - logrus.Debugf("Corrupted prefix: %v", buf[:nr]) - return written, nil - } - break - } - if er != nil { - logrus.Debugf("Error reading header: %s", er) - return 0, er - } - } - - // Check the first byte to know where to write - switch StdType(buf[stdWriterFdIndex]) { - case Stdin: - fallthrough - case Stdout: - // Write on stdout - out = dstout - case Stderr: - // Write on stderr - out = dsterr - default: - logrus.Debugf("Error selecting output fd: (%d)", buf[stdWriterFdIndex]) - return 0, fmt.Errorf("Unrecognized input header: %d", buf[stdWriterFdIndex]) - } - - // Retrieve the size of the frame - frameSize = int(binary.BigEndian.Uint32(buf[stdWriterSizeIndex : stdWriterSizeIndex+4])) - logrus.Debugf("framesize: %d", frameSize) - - // Check if the buffer is big enough to read the frame. - // Extend it if necessary. - if frameSize+stdWriterPrefixLen > bufLen { - logrus.Debugf("Extending buffer cap by %d (was %d)", frameSize+stdWriterPrefixLen-bufLen+1, len(buf)) - buf = append(buf, make([]byte, frameSize+stdWriterPrefixLen-bufLen+1)...) - bufLen = len(buf) - } - - // While the amount of bytes read is less than the size of the frame + header, we keep reading - for nr < frameSize+stdWriterPrefixLen { - var nr2 int - nr2, er = src.Read(buf[nr:]) - nr += nr2 - if er == io.EOF { - if nr < frameSize+stdWriterPrefixLen { - logrus.Debugf("Corrupted frame: %v", buf[stdWriterPrefixLen:nr]) - return written, nil - } - break - } - if er != nil { - logrus.Debugf("Error reading frame: %s", er) - return 0, er - } - } - - // Write the retrieved frame (without header) - nw, ew = out.Write(buf[stdWriterPrefixLen : frameSize+stdWriterPrefixLen]) - if ew != nil { - logrus.Debugf("Error writing frame: %s", ew) - return 0, ew - } - // If the frame has not been fully written: error - if nw != frameSize { - logrus.Debugf("Error Short Write: (%d on %d)", nw, frameSize) - return 0, io.ErrShortWrite - } - written += int64(nw) - - // Move the rest of the buffer to the beginning - copy(buf, buf[frameSize+stdWriterPrefixLen:]) - // Move the index - nr -= frameSize + stdWriterPrefixLen - } -} diff --git a/pkg/stdcopy/stdcopy_test.go b/pkg/stdcopy/stdcopy_test.go deleted file mode 100644 index 3137a75239..0000000000 --- a/pkg/stdcopy/stdcopy_test.go +++ /dev/null @@ -1,260 +0,0 @@ -package stdcopy - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "strings" - "testing" -) - -func TestNewStdWriter(t *testing.T) { - writer := NewStdWriter(ioutil.Discard, Stdout) - if writer == nil { - t.Fatalf("NewStdWriter with an invalid StdType should not return nil.") - } -} - -func TestWriteWithUnitializedStdWriter(t *testing.T) { - writer := stdWriter{ - Writer: nil, - prefix: byte(Stdout), - } - n, err := writer.Write([]byte("Something here")) - if n != 0 || err == nil { - t.Fatalf("Should fail when given an uncomplete or uninitialized StdWriter") - } -} - -func TestWriteWithNilBytes(t *testing.T) { - writer := NewStdWriter(ioutil.Discard, Stdout) - n, err := writer.Write(nil) - if err != nil { - t.Fatalf("Shouldn't have fail when given no data") - } - if n > 0 { - t.Fatalf("Write should have written 0 byte, but has written %d", n) - } -} - -func TestWrite(t *testing.T) { - writer := NewStdWriter(ioutil.Discard, Stdout) - data := []byte("Test StdWrite.Write") - n, err := writer.Write(data) - if err != nil { - t.Fatalf("Error while writing with StdWrite") - } - if n != len(data) { - t.Fatalf("Write should have written %d byte but wrote %d.", len(data), n) - } -} - -type errWriter struct { - n int - err error -} - -func (f *errWriter) Write(buf []byte) (int, error) { - return f.n, f.err -} - -func TestWriteWithWriterError(t *testing.T) { - expectedError := errors.New("expected") - expectedReturnedBytes := 10 - writer := NewStdWriter(&errWriter{ - n: stdWriterPrefixLen + expectedReturnedBytes, - err: expectedError}, Stdout) - data := []byte("This won't get written, sigh") - n, err := writer.Write(data) - if err != expectedError { - t.Fatalf("Didn't get expected error.") - } - if n != expectedReturnedBytes { - t.Fatalf("Didn't get expected written bytes %d, got %d.", - expectedReturnedBytes, n) - } -} - -func TestWriteDoesNotReturnNegativeWrittenBytes(t *testing.T) { - writer := NewStdWriter(&errWriter{n: -1}, Stdout) - data := []byte("This won't get written, sigh") - actual, _ := writer.Write(data) - if actual != 0 { - t.Fatalf("Expected returned written bytes equal to 0, got %d", actual) - } -} - -func getSrcBuffer(stdOutBytes, stdErrBytes []byte) (buffer *bytes.Buffer, err error) { - buffer = new(bytes.Buffer) - dstOut := NewStdWriter(buffer, Stdout) - _, err = dstOut.Write(stdOutBytes) - if err != nil { - return - } - dstErr := NewStdWriter(buffer, Stderr) - _, err = dstErr.Write(stdErrBytes) - return -} - -func TestStdCopyWriteAndRead(t *testing.T) { - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - written, err := StdCopy(ioutil.Discard, ioutil.Discard, buffer) - if err != nil { - t.Fatal(err) - } - expectedTotalWritten := len(stdOutBytes) + len(stdErrBytes) - if written != int64(expectedTotalWritten) { - t.Fatalf("Expected to have total of %d bytes written, got %d", expectedTotalWritten, written) - } -} - -type customReader struct { - n int - err error - totalCalls int - correctCalls int - src *bytes.Buffer -} - -func (f *customReader) Read(buf []byte) (int, error) { - f.totalCalls++ - if f.totalCalls <= f.correctCalls { - return f.src.Read(buf) - } - return f.n, f.err -} - -func TestStdCopyReturnsErrorReadingHeader(t *testing.T) { - expectedError := errors.New("error") - reader := &customReader{ - err: expectedError} - written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) - if written != 0 { - t.Fatalf("Expected 0 bytes read, got %d", written) - } - if err != expectedError { - t.Fatalf("Didn't get expected error") - } -} - -func TestStdCopyReturnsErrorReadingFrame(t *testing.T) { - expectedError := errors.New("error") - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - reader := &customReader{ - correctCalls: 1, - n: stdWriterPrefixLen + 1, - err: expectedError, - src: buffer} - written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) - if written != 0 { - t.Fatalf("Expected 0 bytes read, got %d", written) - } - if err != expectedError { - t.Fatalf("Didn't get expected error") - } -} - -func TestStdCopyDetectsCorruptedFrame(t *testing.T) { - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - reader := &customReader{ - correctCalls: 1, - n: stdWriterPrefixLen + 1, - err: io.EOF, - src: buffer} - written, err := StdCopy(ioutil.Discard, ioutil.Discard, reader) - if written != startingBufLen { - t.Fatalf("Expected %d bytes read, got %d", startingBufLen, written) - } - if err != nil { - t.Fatal("Didn't get nil error") - } -} - -func TestStdCopyWithInvalidInputHeader(t *testing.T) { - dstOut := NewStdWriter(ioutil.Discard, Stdout) - dstErr := NewStdWriter(ioutil.Discard, Stderr) - src := strings.NewReader("Invalid input") - _, err := StdCopy(dstOut, dstErr, src) - if err == nil { - t.Fatal("StdCopy with invalid input header should fail.") - } -} - -func TestStdCopyWithCorruptedPrefix(t *testing.T) { - data := []byte{0x01, 0x02, 0x03} - src := bytes.NewReader(data) - written, err := StdCopy(nil, nil, src) - if err != nil { - t.Fatalf("StdCopy should not return an error with corrupted prefix.") - } - if written != 0 { - t.Fatalf("StdCopy should have written 0, but has written %d", written) - } -} - -func TestStdCopyReturnsWriteErrors(t *testing.T) { - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - expectedError := errors.New("expected") - - dstOut := &errWriter{err: expectedError} - - written, err := StdCopy(dstOut, ioutil.Discard, buffer) - if written != 0 { - t.Fatalf("StdCopy should have written 0, but has written %d", written) - } - if err != expectedError { - t.Fatalf("Didn't get expected error, got %v", err) - } -} - -func TestStdCopyDetectsNotFullyWrittenFrames(t *testing.T) { - stdOutBytes := []byte(strings.Repeat("o", startingBufLen)) - stdErrBytes := []byte(strings.Repeat("e", startingBufLen)) - buffer, err := getSrcBuffer(stdOutBytes, stdErrBytes) - if err != nil { - t.Fatal(err) - } - dstOut := &errWriter{n: startingBufLen - 10} - - written, err := StdCopy(dstOut, ioutil.Discard, buffer) - if written != 0 { - t.Fatalf("StdCopy should have return 0 written bytes, but returned %d", written) - } - if err != io.ErrShortWrite { - t.Fatalf("Didn't get expected io.ErrShortWrite error") - } -} - -func BenchmarkWrite(b *testing.B) { - w := NewStdWriter(ioutil.Discard, Stdout) - data := []byte("Test line for testing stdwriter performance\n") - data = bytes.Repeat(data, 100) - b.SetBytes(int64(len(data))) - b.ResetTimer() - for i := 0; i < b.N; i++ { - if _, err := w.Write(data); err != nil { - b.Fatal(err) - } - } -} diff --git a/pkg/streamformatter/streamformatter.go b/pkg/streamformatter/streamformatter.go deleted file mode 100644 index ce6ea79dee..0000000000 --- a/pkg/streamformatter/streamformatter.go +++ /dev/null @@ -1,172 +0,0 @@ -// Package streamformatter provides helper functions to format a stream. -package streamformatter - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/docker/docker/pkg/jsonmessage" - "github.com/docker/docker/pkg/progress" -) - -// StreamFormatter formats a stream, optionally using JSON. -type StreamFormatter struct { - json bool -} - -// NewStreamFormatter returns a simple StreamFormatter -func NewStreamFormatter() *StreamFormatter { - return &StreamFormatter{} -} - -// NewJSONStreamFormatter returns a StreamFormatter configured to stream json -func NewJSONStreamFormatter() *StreamFormatter { - return &StreamFormatter{true} -} - -const streamNewline = "\r\n" - -var streamNewlineBytes = []byte(streamNewline) - -// FormatStream formats the specified stream. -func (sf *StreamFormatter) FormatStream(str string) []byte { - if sf.json { - b, err := json.Marshal(&jsonmessage.JSONMessage{Stream: str}) - if err != nil { - return sf.FormatError(err) - } - return append(b, streamNewlineBytes...) - } - return []byte(str + "\r") -} - -// FormatStatus formats the specified objects according to the specified format (and id). -func (sf *StreamFormatter) FormatStatus(id, format string, a ...interface{}) []byte { - str := fmt.Sprintf(format, a...) - if sf.json { - b, err := json.Marshal(&jsonmessage.JSONMessage{ID: id, Status: str}) - if err != nil { - return sf.FormatError(err) - } - return append(b, streamNewlineBytes...) - } - return []byte(str + streamNewline) -} - -// FormatError formats the specified error. -func (sf *StreamFormatter) FormatError(err error) []byte { - if sf.json { - jsonError, ok := err.(*jsonmessage.JSONError) - if !ok { - jsonError = &jsonmessage.JSONError{Message: err.Error()} - } - if b, err := json.Marshal(&jsonmessage.JSONMessage{Error: jsonError, ErrorMessage: err.Error()}); err == nil { - return append(b, streamNewlineBytes...) - } - return []byte("{\"error\":\"format error\"}" + streamNewline) - } - return []byte("Error: " + err.Error() + streamNewline) -} - -// FormatProgress formats the progress information for a specified action. -func (sf *StreamFormatter) FormatProgress(id, action string, progress *jsonmessage.JSONProgress, aux interface{}) []byte { - if progress == nil { - progress = &jsonmessage.JSONProgress{} - } - if sf.json { - var auxJSON *json.RawMessage - if aux != nil { - auxJSONBytes, err := json.Marshal(aux) - if err != nil { - return nil - } - auxJSON = new(json.RawMessage) - *auxJSON = auxJSONBytes - } - b, err := json.Marshal(&jsonmessage.JSONMessage{ - Status: action, - ProgressMessage: progress.String(), - Progress: progress, - ID: id, - Aux: auxJSON, - }) - if err != nil { - return nil - } - return append(b, streamNewlineBytes...) - } - endl := "\r" - if progress.String() == "" { - endl += "\n" - } - return []byte(action + " " + progress.String() + endl) -} - -// NewProgressOutput returns a progress.Output object that can be passed to -// progress.NewProgressReader. -func (sf *StreamFormatter) NewProgressOutput(out io.Writer, newLines bool) progress.Output { - return &progressOutput{ - sf: sf, - out: out, - newLines: newLines, - } -} - -type progressOutput struct { - sf *StreamFormatter - out io.Writer - newLines bool -} - -// WriteProgress formats progress information from a ProgressReader. -func (out *progressOutput) WriteProgress(prog progress.Progress) error { - var formatted []byte - if prog.Message != "" { - formatted = out.sf.FormatStatus(prog.ID, prog.Message) - } else { - jsonProgress := jsonmessage.JSONProgress{Current: prog.Current, Total: prog.Total} - formatted = out.sf.FormatProgress(prog.ID, prog.Action, &jsonProgress, prog.Aux) - } - _, err := out.out.Write(formatted) - if err != nil { - return err - } - - if out.newLines && prog.LastUpdate { - _, err = out.out.Write(out.sf.FormatStatus("", "")) - return err - } - - return nil -} - -// StdoutFormatter is a streamFormatter that writes to the standard output. -type StdoutFormatter struct { - io.Writer - *StreamFormatter -} - -func (sf *StdoutFormatter) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream(string(buf)) - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite - } - return len(buf), err -} - -// StderrFormatter is a streamFormatter that writes to the standard error. -type StderrFormatter struct { - io.Writer - *StreamFormatter -} - -func (sf *StderrFormatter) Write(buf []byte) (int, error) { - formattedBuf := sf.StreamFormatter.FormatStream("\033[91m" + string(buf) + "\033[0m") - n, err := sf.Writer.Write(formattedBuf) - if n != len(formattedBuf) { - return n, io.ErrShortWrite - } - return len(buf), err -} diff --git a/pkg/streamformatter/streamformatter_test.go b/pkg/streamformatter/streamformatter_test.go deleted file mode 100644 index 93ec90f5f7..0000000000 --- a/pkg/streamformatter/streamformatter_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package streamformatter - -import ( - "encoding/json" - "errors" - "reflect" - "strings" - "testing" - - "github.com/docker/docker/pkg/jsonmessage" -) - -func TestFormatStream(t *testing.T) { - sf := NewStreamFormatter() - res := sf.FormatStream("stream") - if string(res) != "stream"+"\r" { - t.Fatalf("%q", res) - } -} - -func TestFormatJSONStatus(t *testing.T) { - sf := NewStreamFormatter() - res := sf.FormatStatus("ID", "%s%d", "a", 1) - if string(res) != "a1\r\n" { - t.Fatalf("%q", res) - } -} - -func TestFormatSimpleError(t *testing.T) { - sf := NewStreamFormatter() - res := sf.FormatError(errors.New("Error for formatter")) - if string(res) != "Error: Error for formatter\r\n" { - t.Fatalf("%q", res) - } -} - -func TestJSONFormatStream(t *testing.T) { - sf := NewJSONStreamFormatter() - res := sf.FormatStream("stream") - if string(res) != `{"stream":"stream"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestJSONFormatStatus(t *testing.T) { - sf := NewJSONStreamFormatter() - res := sf.FormatStatus("ID", "%s%d", "a", 1) - if string(res) != `{"status":"a1","id":"ID"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestJSONFormatSimpleError(t *testing.T) { - sf := NewJSONStreamFormatter() - res := sf.FormatError(errors.New("Error for formatter")) - if string(res) != `{"errorDetail":{"message":"Error for formatter"},"error":"Error for formatter"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestJSONFormatJSONError(t *testing.T) { - sf := NewJSONStreamFormatter() - err := &jsonmessage.JSONError{Code: 50, Message: "Json error"} - res := sf.FormatError(err) - if string(res) != `{"errorDetail":{"code":50,"message":"Json error"},"error":"Json error"}`+"\r\n" { - t.Fatalf("%q", res) - } -} - -func TestJSONFormatProgress(t *testing.T) { - sf := NewJSONStreamFormatter() - progress := &jsonmessage.JSONProgress{ - Current: 15, - Total: 30, - Start: 1, - } - res := sf.FormatProgress("id", "action", progress, nil) - msg := &jsonmessage.JSONMessage{} - if err := json.Unmarshal(res, msg); err != nil { - t.Fatal(err) - } - if msg.ID != "id" { - t.Fatalf("ID must be 'id', got: %s", msg.ID) - } - if msg.Status != "action" { - t.Fatalf("Status must be 'action', got: %s", msg.Status) - } - - // The progress will always be in the format of: - // [=========================> ] 15 B/30 B 404933h7m11s - // The last entry '404933h7m11s' is the timeLeftBox. - // However, the timeLeftBox field may change as progress.String() depends on time.Now(). - // Therefore, we have to strip the timeLeftBox from the strings to do the comparison. - - // Compare the progress strings before the timeLeftBox - expectedProgress := "[=========================> ] 15 B/30 B" - // if terminal column is <= 110, expectedProgressShort is expected. - expectedProgressShort := " 15 B/30 B" - if !(strings.HasPrefix(msg.ProgressMessage, expectedProgress) || - strings.HasPrefix(msg.ProgressMessage, expectedProgressShort)) { - t.Fatalf("ProgressMessage without the timeLeftBox must be %s or %s, got: %s", - expectedProgress, expectedProgressShort, msg.ProgressMessage) - } - - if !reflect.DeepEqual(msg.Progress, progress) { - t.Fatal("Original progress not equals progress from FormatProgress") - } -} diff --git a/pkg/stringid/README.md b/pkg/stringid/README.md deleted file mode 100644 index 37a5098fd9..0000000000 --- a/pkg/stringid/README.md +++ /dev/null @@ -1 +0,0 @@ -This package provides helper functions for dealing with string identifiers diff --git a/pkg/stringid/stringid.go b/pkg/stringid/stringid.go deleted file mode 100644 index 161184ff8a..0000000000 --- a/pkg/stringid/stringid.go +++ /dev/null @@ -1,71 +0,0 @@ -// Package stringid provides helper functions for dealing with string identifiers -package stringid - -import ( - "crypto/rand" - "encoding/hex" - "io" - "regexp" - "strconv" - "strings" - - "github.com/docker/docker/pkg/random" -) - -const shortLen = 12 - -var validShortID = regexp.MustCompile("^[a-z0-9]{12}$") - -// IsShortID determines if an arbitrary string *looks like* a short ID. -func IsShortID(id string) bool { - return validShortID.MatchString(id) -} - -// TruncateID returns a shorthand version of a string identifier for convenience. -// A collision with other shorthands is very unlikely, but possible. -// In case of a collision a lookup with TruncIndex.Get() will fail, and the caller -// will need to use a longer prefix, or the full-length Id. -func TruncateID(id string) string { - if i := strings.IndexRune(id, ':'); i >= 0 { - id = id[i+1:] - } - trimTo := shortLen - if len(id) < shortLen { - trimTo = len(id) - } - return id[:trimTo] -} - -func generateID(crypto bool) string { - b := make([]byte, 32) - r := random.Reader - if crypto { - r = rand.Reader - } - for { - if _, err := io.ReadFull(r, b); err != nil { - panic(err) // This shouldn't happen - } - id := hex.EncodeToString(b) - // if we try to parse the truncated for as an int and we don't have - // an error then the value is all numeric and causes issues when - // used as a hostname. ref #3869 - if _, err := strconv.ParseInt(TruncateID(id), 10, 64); err == nil { - continue - } - return id - } -} - -// GenerateRandomID returns a unique id. -func GenerateRandomID() string { - return generateID(true) - -} - -// GenerateNonCryptoID generates unique id without using cryptographically -// secure sources of random. -// It helps you to save entropy. -func GenerateNonCryptoID() string { - return generateID(false) -} diff --git a/pkg/stringid/stringid_test.go b/pkg/stringid/stringid_test.go deleted file mode 100644 index bcb1365495..0000000000 --- a/pkg/stringid/stringid_test.go +++ /dev/null @@ -1,56 +0,0 @@ -package stringid - -import ( - "strings" - "testing" -) - -func TestGenerateRandomID(t *testing.T) { - id := GenerateRandomID() - - if len(id) != 64 { - t.Fatalf("Id returned is incorrect: %s", id) - } -} - -func TestShortenId(t *testing.T) { - id := GenerateRandomID() - truncID := TruncateID(id) - if len(truncID) != 12 { - t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) - } -} - -func TestShortenIdEmpty(t *testing.T) { - id := "" - truncID := TruncateID(id) - if len(truncID) > len(id) { - t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) - } -} - -func TestShortenIdInvalid(t *testing.T) { - id := "1234" - truncID := TruncateID(id) - if len(truncID) != len(id) { - t.Fatalf("Id returned is incorrect: truncate on %s returned %s", id, truncID) - } -} - -func TestIsShortIDNonHex(t *testing.T) { - id := "some non-hex value" - if IsShortID(id) { - t.Fatalf("%s is not a short ID", id) - } -} - -func TestIsShortIDNotCorrectSize(t *testing.T) { - id := strings.Repeat("a", shortLen+1) - if IsShortID(id) { - t.Fatalf("%s is not a short ID", id) - } - id = strings.Repeat("a", shortLen-1) - if IsShortID(id) { - t.Fatalf("%s is not a short ID", id) - } -} diff --git a/pkg/stringutils/README.md b/pkg/stringutils/README.md deleted file mode 100644 index b3e454573c..0000000000 --- a/pkg/stringutils/README.md +++ /dev/null @@ -1 +0,0 @@ -This package provides helper functions for dealing with strings diff --git a/pkg/stringutils/stringutils.go b/pkg/stringutils/stringutils.go deleted file mode 100644 index 7c00b972dd..0000000000 --- a/pkg/stringutils/stringutils.go +++ /dev/null @@ -1,87 +0,0 @@ -// Package stringutils provides helper functions for dealing with strings. -package stringutils - -import ( - "bytes" - "math/rand" - "strings" - - "github.com/docker/docker/pkg/random" -) - -// GenerateRandomAlphaOnlyString generates an alphabetical random string with length n. -func GenerateRandomAlphaOnlyString(n int) string { - // make a really long string - letters := []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - b := make([]byte, n) - for i := range b { - b[i] = letters[random.Rand.Intn(len(letters))] - } - return string(b) -} - -// GenerateRandomASCIIString generates an ASCII random string with length n. -func GenerateRandomASCIIString(n int) string { - chars := "abcdefghijklmnopqrstuvwxyz" + - "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + - "~!@#$%^&*()-_+={}[]\\|<,>.?/\"';:` " - res := make([]byte, n) - for i := 0; i < n; i++ { - res[i] = chars[rand.Intn(len(chars))] - } - return string(res) -} - -// Truncate truncates a string to maxlen. -func Truncate(s string, maxlen int) string { - if len(s) <= maxlen { - return s - } - return s[:maxlen] -} - -// InSlice tests whether a string is contained in a slice of strings or not. -// Comparison is case insensitive -func InSlice(slice []string, s string) bool { - for _, ss := range slice { - if strings.ToLower(s) == strings.ToLower(ss) { - return true - } - } - return false -} - -func quote(word string, buf *bytes.Buffer) { - // Bail out early for "simple" strings - if word != "" && !strings.ContainsAny(word, "\\'\"`${[|&;<>()~*?! \t\n") { - buf.WriteString(word) - return - } - - buf.WriteString("'") - - for i := 0; i < len(word); i++ { - b := word[i] - if b == '\'' { - // Replace literal ' with a close ', a \', and a open ' - buf.WriteString("'\\''") - } else { - buf.WriteByte(b) - } - } - - buf.WriteString("'") -} - -// ShellQuoteArguments takes a list of strings and escapes them so they will be -// handled right when passed as arguments to a program via a shell -func ShellQuoteArguments(args []string) string { - var buf bytes.Buffer - for i, arg := range args { - if i != 0 { - buf.WriteByte(' ') - } - quote(arg, &buf) - } - return buf.String() -} diff --git a/pkg/stringutils/stringutils_test.go b/pkg/stringutils/stringutils_test.go deleted file mode 100644 index fec59450bc..0000000000 --- a/pkg/stringutils/stringutils_test.go +++ /dev/null @@ -1,105 +0,0 @@ -package stringutils - -import "testing" - -func testLengthHelper(generator func(int) string, t *testing.T) { - expectedLength := 20 - s := generator(expectedLength) - if len(s) != expectedLength { - t.Fatalf("Length of %s was %d but expected length %d", s, len(s), expectedLength) - } -} - -func testUniquenessHelper(generator func(int) string, t *testing.T) { - repeats := 25 - set := make(map[string]struct{}, repeats) - for i := 0; i < repeats; i = i + 1 { - str := generator(64) - if len(str) != 64 { - t.Fatalf("Id returned is incorrect: %s", str) - } - if _, ok := set[str]; ok { - t.Fatalf("Random number is repeated") - } - set[str] = struct{}{} - } -} - -func isASCII(s string) bool { - for _, c := range s { - if c > 127 { - return false - } - } - return true -} - -func TestGenerateRandomAlphaOnlyStringLength(t *testing.T) { - testLengthHelper(GenerateRandomAlphaOnlyString, t) -} - -func TestGenerateRandomAlphaOnlyStringUniqueness(t *testing.T) { - testUniquenessHelper(GenerateRandomAlphaOnlyString, t) -} - -func TestGenerateRandomAsciiStringLength(t *testing.T) { - testLengthHelper(GenerateRandomASCIIString, t) -} - -func TestGenerateRandomAsciiStringUniqueness(t *testing.T) { - testUniquenessHelper(GenerateRandomASCIIString, t) -} - -func TestGenerateRandomAsciiStringIsAscii(t *testing.T) { - str := GenerateRandomASCIIString(64) - if !isASCII(str) { - t.Fatalf("%s contained non-ascii characters", str) - } -} - -func TestTruncate(t *testing.T) { - str := "teststring" - newstr := Truncate(str, 4) - if newstr != "test" { - t.Fatalf("Expected test, got %s", newstr) - } - newstr = Truncate(str, 20) - if newstr != "teststring" { - t.Fatalf("Expected teststring, got %s", newstr) - } -} - -func TestInSlice(t *testing.T) { - slice := []string{"test", "in", "slice"} - - test := InSlice(slice, "test") - if !test { - t.Fatalf("Expected string test to be in slice") - } - test = InSlice(slice, "SLICE") - if !test { - t.Fatalf("Expected string SLICE to be in slice") - } - test = InSlice(slice, "notinslice") - if test { - t.Fatalf("Expected string notinslice not to be in slice") - } -} - -func TestShellQuoteArgumentsEmpty(t *testing.T) { - actual := ShellQuoteArguments([]string{}) - expected := "" - if actual != expected { - t.Fatalf("Expected an empty string") - } -} - -func TestShellQuoteArguments(t *testing.T) { - simpleString := "simpleString" - complexString := "This is a 'more' complex $tring with some special char *" - actual := ShellQuoteArguments([]string{simpleString, complexString}) - expected := "simpleString 'This is a '\\''more'\\'' complex $tring with some special char *'" - if actual != expected { - t.Fatalf("Expected \"%v\", got \"%v\"", expected, actual) - } -} diff --git a/pkg/symlink/LICENSE.APACHE b/pkg/symlink/LICENSE.APACHE deleted file mode 100644 index 34c4ea7c50..0000000000 --- a/pkg/symlink/LICENSE.APACHE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2014-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/pkg/symlink/LICENSE.BSD b/pkg/symlink/LICENSE.BSD deleted file mode 100644 index 9b4f4a294e..0000000000 --- a/pkg/symlink/LICENSE.BSD +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2014-2016 The Docker & Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/pkg/symlink/README.md b/pkg/symlink/README.md deleted file mode 100644 index 8dba54fd08..0000000000 --- a/pkg/symlink/README.md +++ /dev/null @@ -1,6 +0,0 @@ -Package symlink implements EvalSymlinksInScope which is an extension of filepath.EvalSymlinks, -as well as a Windows long-path aware version of filepath.EvalSymlinks -from the [Go standard library](https://golang.org/pkg/path/filepath). - -The code from filepath.EvalSymlinks has been adapted in fs.go. -Please read the LICENSE.BSD file that governs fs.go and LICENSE.APACHE for fs_test.go. diff --git a/pkg/symlink/fs.go b/pkg/symlink/fs.go deleted file mode 100644 index dcf707f426..0000000000 --- a/pkg/symlink/fs.go +++ /dev/null @@ -1,143 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE.BSD file. - -// This code is a modified version of path/filepath/symlink.go from the Go standard library. - -package symlink - -import ( - "bytes" - "errors" - "os" - "path/filepath" - "strings" - - "github.com/docker/docker/pkg/system" -) - -// FollowSymlinkInScope is a wrapper around evalSymlinksInScope that returns an -// absolute path. This function handles paths in a platform-agnostic manner. -func FollowSymlinkInScope(path, root string) (string, error) { - path, err := filepath.Abs(filepath.FromSlash(path)) - if err != nil { - return "", err - } - root, err = filepath.Abs(filepath.FromSlash(root)) - if err != nil { - return "", err - } - return evalSymlinksInScope(path, root) -} - -// evalSymlinksInScope will evaluate symlinks in `path` within a scope `root` and return -// a result guaranteed to be contained within the scope `root`, at the time of the call. -// Symlinks in `root` are not evaluated and left as-is. -// Errors encountered while attempting to evaluate symlinks in path will be returned. -// Non-existing paths are valid and do not constitute an error. -// `path` has to contain `root` as a prefix, or else an error will be returned. -// Trying to break out from `root` does not constitute an error. -// -// Example: -// If /foo/bar -> /outside, -// FollowSymlinkInScope("/foo/bar", "/foo") == "/foo/outside" instead of "/oustide" -// -// IMPORTANT: it is the caller's responsibility to call evalSymlinksInScope *after* relevant symlinks -// are created and not to create subsequently, additional symlinks that could potentially make a -// previously-safe path, unsafe. Example: if /foo/bar does not exist, evalSymlinksInScope("/foo/bar", "/foo") -// would return "/foo/bar". If one makes /foo/bar a symlink to /baz subsequently, then "/foo/bar" should -// no longer be considered safely contained in "/foo". -func evalSymlinksInScope(path, root string) (string, error) { - root = filepath.Clean(root) - if path == root { - return path, nil - } - if !strings.HasPrefix(path, root) { - return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) - } - const maxIter = 255 - originalPath := path - // given root of "/a" and path of "/a/b/../../c" we want path to be "/b/../../c" - path = path[len(root):] - if root == string(filepath.Separator) { - path = string(filepath.Separator) + path - } - if !strings.HasPrefix(path, string(filepath.Separator)) { - return "", errors.New("evalSymlinksInScope: " + path + " is not in " + root) - } - path = filepath.Clean(path) - // consume path by taking each frontmost path element, - // expanding it if it's a symlink, and appending it to b - var b bytes.Buffer - // b here will always be considered to be the "current absolute path inside - // root" when we append paths to it, we also append a slash and use - // filepath.Clean after the loop to trim the trailing slash - for n := 0; path != ""; n++ { - if n > maxIter { - return "", errors.New("evalSymlinksInScope: too many links in " + originalPath) - } - - // find next path component, p - i := strings.IndexRune(path, filepath.Separator) - var p string - if i == -1 { - p, path = path, "" - } else { - p, path = path[:i], path[i+1:] - } - - if p == "" { - continue - } - - // this takes a b.String() like "b/../" and a p like "c" and turns it - // into "/b/../c" which then gets filepath.Cleaned into "/c" and then - // root gets prepended and we Clean again (to remove any trailing slash - // if the first Clean gave us just "/") - cleanP := filepath.Clean(string(filepath.Separator) + b.String() + p) - if cleanP == string(filepath.Separator) { - // never Lstat "/" itself - b.Reset() - continue - } - fullP := filepath.Clean(root + cleanP) - - fi, err := os.Lstat(fullP) - if os.IsNotExist(err) { - // if p does not exist, accept it - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - if err != nil { - return "", err - } - if fi.Mode()&os.ModeSymlink == 0 { - b.WriteString(p + string(filepath.Separator)) - continue - } - - // it's a symlink, put it at the front of path - dest, err := os.Readlink(fullP) - if err != nil { - return "", err - } - if system.IsAbs(dest) { - b.Reset() - } - path = dest + string(filepath.Separator) + path - } - - // see note above on "fullP := ..." for why this is double-cleaned and - // what's happening here - return filepath.Clean(root + filepath.Clean(string(filepath.Separator)+b.String())), nil -} - -// EvalSymlinks returns the path name after the evaluation of any symbolic -// links. -// If path is relative the result will be relative to the current directory, -// unless one of the components is an absolute symbolic link. -// This version has been updated to support long paths prepended with `\\?\`. -func EvalSymlinks(path string) (string, error) { - return evalSymlinks(path) -} diff --git a/pkg/symlink/fs_unix.go b/pkg/symlink/fs_unix.go deleted file mode 100644 index 818004f26c..0000000000 --- a/pkg/symlink/fs_unix.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !windows - -package symlink - -import ( - "path/filepath" -) - -func evalSymlinks(path string) (string, error) { - return filepath.EvalSymlinks(path) -} diff --git a/pkg/symlink/fs_unix_test.go b/pkg/symlink/fs_unix_test.go deleted file mode 100644 index 7085c0b666..0000000000 --- a/pkg/symlink/fs_unix_test.go +++ /dev/null @@ -1,407 +0,0 @@ -// +build !windows - -// Licensed under the Apache License, Version 2.0; See LICENSE.APACHE - -package symlink - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" -) - -// TODO Windows: This needs some serious work to port to Windows. For now, -// turning off testing in this package. - -type dirOrLink struct { - path string - target string -} - -func makeFs(tmpdir string, fs []dirOrLink) error { - for _, s := range fs { - s.path = filepath.Join(tmpdir, s.path) - if s.target == "" { - os.MkdirAll(s.path, 0755) - continue - } - if err := os.MkdirAll(filepath.Dir(s.path), 0755); err != nil { - return err - } - if err := os.Symlink(s.target, s.path); err != nil && !os.IsExist(err) { - return err - } - } - return nil -} - -func testSymlink(tmpdir, path, expected, scope string) error { - rewrite, err := FollowSymlinkInScope(filepath.Join(tmpdir, path), filepath.Join(tmpdir, scope)) - if err != nil { - return err - } - expected, err = filepath.Abs(filepath.Join(tmpdir, expected)) - if err != nil { - return err - } - if expected != rewrite { - return fmt.Errorf("Expected %q got %q", expected, rewrite) - } - return nil -} - -func TestFollowSymlinkAbsolute(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkAbsolute") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/a/d/c/data", "testdata/b/c/data", "testdata"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkRelativePath(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/i", target: "a"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/i", "testdata/fs/a", "testdata"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkSkipSymlinksOutsideScope(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSkipSymlinksOutsideScope") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - if err := makeFs(tmpdir, []dirOrLink{ - {path: "linkdir", target: "realdir"}, - {path: "linkdir/foo/bar"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "linkdir/foo/bar", "linkdir/foo/bar", "linkdir/foo"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkInvalidScopePathPair(t *testing.T) { - if _, err := FollowSymlinkInScope("toto", "testdata"); err == nil { - t.Fatal("expected an error") - } -} - -func TestFollowSymlinkLastLink(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkLastLink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/d", target: "/b"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/a/d", "testdata/b", "testdata"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkRelativeLinkChangeScope(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChangeScope") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/e", target: "../b"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/a/e/c/data", "testdata/fs/b/c/data", "testdata"); err != nil { - t.Fatal(err) - } - // avoid letting allowing symlink e lead us to ../b - // normalize to the "testdata/fs/a" - if err := testSymlink(tmpdir, "testdata/fs/a/e", "testdata/fs/a/b", "testdata/fs/a"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkDeepRelativeLinkChangeScope(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDeepRelativeLinkChangeScope") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/a/f", target: "../../../../test"}}); err != nil { - t.Fatal(err) - } - // avoid letting symlink f lead us out of the "testdata" scope - // we don't normalize because symlink f is in scope and there is no - // information leak - if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/test", "testdata"); err != nil { - t.Fatal(err) - } - // avoid letting symlink f lead us out of the "testdata/fs" scope - // we don't normalize because symlink f is in scope and there is no - // information leak - if err := testSymlink(tmpdir, "testdata/fs/a/f", "testdata/fs/test", "testdata/fs"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkRelativeLinkChain(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativeLinkChain") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - // avoid letting symlink g (pointed at by symlink h) take out of scope - // TODO: we should probably normalize to scope here because ../[....]/root - // is out of scope and we leak information - if err := makeFs(tmpdir, []dirOrLink{ - {path: "testdata/fs/b/h", target: "../g"}, - {path: "testdata/fs/g", target: "../../../../../../../../../../../../root"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/b/h", "testdata/root", "testdata"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkBreakoutPath(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutPath") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - // avoid letting symlink -> ../directory/file escape from scope - // normalize to "testdata/fs/j" - if err := makeFs(tmpdir, []dirOrLink{{path: "testdata/fs/j/k", target: "../i/a"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "testdata/fs/j/k", "testdata/fs/j/i/a", "testdata/fs/j"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkToRoot(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkToRoot") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - // make sure we don't allow escaping to / - // normalize to dir - if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "foo", "", ""); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkSlashDotdot(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkSlashDotdot") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - tmpdir = filepath.Join(tmpdir, "dir", "subdir") - - // make sure we don't allow escaping to / - // normalize to dir - if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "/../../"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "foo", "", ""); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkDotdot(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkDotdot") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - tmpdir = filepath.Join(tmpdir, "dir", "subdir") - - // make sure we stay in scope without leaking information - // this also checks for escaping to / - // normalize to dir - if err := makeFs(tmpdir, []dirOrLink{{path: "foo", target: "../../"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "foo", "", ""); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkRelativePath2(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRelativePath2") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{{path: "bar/foo", target: "baz/target"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "bar/foo", "bar/baz/target", ""); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkScopeLink(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkScopeLink") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{ - {path: "root2"}, - {path: "root", target: "root2"}, - {path: "root2/foo", target: "../bar"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/foo", "root/bar", "root"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkRootScope(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkRootScope") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - expected, err := filepath.EvalSymlinks(tmpdir) - if err != nil { - t.Fatal(err) - } - rewrite, err := FollowSymlinkInScope(tmpdir, "/") - if err != nil { - t.Fatal(err) - } - if rewrite != expected { - t.Fatalf("expected %q got %q", expected, rewrite) - } -} - -func TestFollowSymlinkEmpty(t *testing.T) { - res, err := FollowSymlinkInScope("", "") - if err != nil { - t.Fatal(err) - } - wd, err := os.Getwd() - if err != nil { - t.Fatal(err) - } - if res != wd { - t.Fatalf("expected %q got %q", wd, res) - } -} - -func TestFollowSymlinkCircular(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkCircular") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{{path: "root/foo", target: "foo"}}); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { - t.Fatal("expected an error for foo -> foo") - } - - if err := makeFs(tmpdir, []dirOrLink{ - {path: "root/bar", target: "baz"}, - {path: "root/baz", target: "../bak"}, - {path: "root/bak", target: "/bar"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/foo", "", "root"); err == nil { - t.Fatal("expected an error for bar -> baz -> bak -> bar") - } -} - -func TestFollowSymlinkComplexChainWithTargetPathsContainingLinks(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkComplexChainWithTargetPathsContainingLinks") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{ - {path: "root2"}, - {path: "root", target: "root2"}, - {path: "root/a", target: "r/s"}, - {path: "root/r", target: "../root/t"}, - {path: "root/root/t/s/b", target: "/../u"}, - {path: "root/u/c", target: "."}, - {path: "root/u/x/y", target: "../v"}, - {path: "root/u/v", target: "/../w"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/a/b/c/x/y/z", "root/w/z", "root"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkBreakoutNonExistent(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkBreakoutNonExistent") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{ - {path: "root/slash", target: "/"}, - {path: "root/sym", target: "/idontexist/../slash"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/sym/file", "root/file", "root"); err != nil { - t.Fatal(err) - } -} - -func TestFollowSymlinkNoLexicalCleaning(t *testing.T) { - tmpdir, err := ioutil.TempDir("", "TestFollowSymlinkNoLexicalCleaning") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpdir) - - if err := makeFs(tmpdir, []dirOrLink{ - {path: "root/sym", target: "/foo/bar"}, - {path: "root/hello", target: "/sym/../baz"}, - }); err != nil { - t.Fatal(err) - } - if err := testSymlink(tmpdir, "root/hello", "root/foo/baz", "root"); err != nil { - t.Fatal(err) - } -} diff --git a/pkg/symlink/fs_windows.go b/pkg/symlink/fs_windows.go deleted file mode 100644 index 449fe56483..0000000000 --- a/pkg/symlink/fs_windows.go +++ /dev/null @@ -1,155 +0,0 @@ -package symlink - -import ( - "bytes" - "errors" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/docker/docker/pkg/longpath" -) - -func toShort(path string) (string, error) { - p, err := syscall.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetShortPathName says we can reuse buffer - n, err := syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - if _, err = syscall.GetShortPathName(&p[0], &b[0], uint32(len(b))); err != nil { - return "", err - } - } - return syscall.UTF16ToString(b), nil -} - -func toLong(path string) (string, error) { - p, err := syscall.UTF16FromString(path) - if err != nil { - return "", err - } - b := p // GetLongPathName says we can reuse buffer - n, err := syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - if n > uint32(len(b)) { - b = make([]uint16, n) - n, err = syscall.GetLongPathName(&p[0], &b[0], uint32(len(b))) - if err != nil { - return "", err - } - } - b = b[:n] - return syscall.UTF16ToString(b), nil -} - -func evalSymlinks(path string) (string, error) { - path, err := walkSymlinks(path) - if err != nil { - return "", err - } - - p, err := toShort(path) - if err != nil { - return "", err - } - p, err = toLong(p) - if err != nil { - return "", err - } - // syscall.GetLongPathName does not change the case of the drive letter, - // but the result of EvalSymlinks must be unique, so we have - // EvalSymlinks(`c:\a`) == EvalSymlinks(`C:\a`). - // Make drive letter upper case. - if len(p) >= 2 && p[1] == ':' && 'a' <= p[0] && p[0] <= 'z' { - p = string(p[0]+'A'-'a') + p[1:] - } else if len(p) >= 6 && p[5] == ':' && 'a' <= p[4] && p[4] <= 'z' { - p = p[:3] + string(p[4]+'A'-'a') + p[5:] - } - return filepath.Clean(p), nil -} - -const utf8RuneSelf = 0x80 - -func walkSymlinks(path string) (string, error) { - const maxIter = 255 - originalPath := path - // consume path by taking each frontmost path element, - // expanding it if it's a symlink, and appending it to b - var b bytes.Buffer - for n := 0; path != ""; n++ { - if n > maxIter { - return "", errors.New("EvalSymlinks: too many links in " + originalPath) - } - - // A path beginning with `\\?\` represents the root, so automatically - // skip that part and begin processing the next segment. - if strings.HasPrefix(path, longpath.Prefix) { - b.WriteString(longpath.Prefix) - path = path[4:] - continue - } - - // find next path component, p - var i = -1 - for j, c := range path { - if c < utf8RuneSelf && os.IsPathSeparator(uint8(c)) { - i = j - break - } - } - var p string - if i == -1 { - p, path = path, "" - } else { - p, path = path[:i], path[i+1:] - } - - if p == "" { - if b.Len() == 0 { - // must be absolute path - b.WriteRune(filepath.Separator) - } - continue - } - - // If this is the first segment after the long path prefix, accept the - // current segment as a volume root or UNC share and move on to the next. - if b.String() == longpath.Prefix { - b.WriteString(p) - b.WriteRune(filepath.Separator) - continue - } - - fi, err := os.Lstat(b.String() + p) - if err != nil { - return "", err - } - if fi.Mode()&os.ModeSymlink == 0 { - b.WriteString(p) - if path != "" || (b.Len() == 2 && len(p) == 2 && p[1] == ':') { - b.WriteRune(filepath.Separator) - } - continue - } - - // it's a symlink, put it at the front of path - dest, err := os.Readlink(b.String() + p) - if err != nil { - return "", err - } - if filepath.IsAbs(dest) || os.IsPathSeparator(dest[0]) { - b.Reset() - } - path = dest + string(filepath.Separator) + path - } - return filepath.Clean(b.String()), nil -} diff --git a/pkg/sysinfo/README.md b/pkg/sysinfo/README.md deleted file mode 100644 index c1530cef0d..0000000000 --- a/pkg/sysinfo/README.md +++ /dev/null @@ -1 +0,0 @@ -SysInfo stores information about which features a kernel supports. diff --git a/pkg/sysinfo/sysinfo.go b/pkg/sysinfo/sysinfo.go deleted file mode 100644 index cbd0099957..0000000000 --- a/pkg/sysinfo/sysinfo.go +++ /dev/null @@ -1,128 +0,0 @@ -package sysinfo - -import "github.com/docker/docker/pkg/parsers" - -// SysInfo stores information about which features a kernel supports. -// TODO Windows: Factor out platform specific capabilities. -type SysInfo struct { - // Whether the kernel supports AppArmor or not - AppArmor bool - // Whether the kernel supports Seccomp or not - Seccomp bool - - cgroupMemInfo - cgroupCPUInfo - cgroupBlkioInfo - cgroupCpusetInfo - cgroupPids - - // Whether IPv4 forwarding is supported or not, if this was disabled, networking will not work - IPv4ForwardingDisabled bool - - // Whether bridge-nf-call-iptables is supported or not - BridgeNFCallIPTablesDisabled bool - - // Whether bridge-nf-call-ip6tables is supported or not - BridgeNFCallIP6TablesDisabled bool - - // Whether the cgroup has the mountpoint of "devices" or not - CgroupDevicesEnabled bool -} - -type cgroupMemInfo struct { - // Whether memory limit is supported or not - MemoryLimit bool - - // Whether swap limit is supported or not - SwapLimit bool - - // Whether soft limit is supported or not - MemoryReservation bool - - // Whether OOM killer disable is supported or not - OomKillDisable bool - - // Whether memory swappiness is supported or not - MemorySwappiness bool - - // Whether kernel memory limit is supported or not - KernelMemory bool -} - -type cgroupCPUInfo struct { - // Whether CPU shares is supported or not - CPUShares bool - - // Whether CPU CFS(Completely Fair Scheduler) period is supported or not - CPUCfsPeriod bool - - // Whether CPU CFS(Completely Fair Scheduler) quota is supported or not - CPUCfsQuota bool -} - -type cgroupBlkioInfo struct { - // Whether Block IO weight is supported or not - BlkioWeight bool - - // Whether Block IO weight_device is supported or not - BlkioWeightDevice bool - - // Whether Block IO read limit in bytes per second is supported or not - BlkioReadBpsDevice bool - - // Whether Block IO write limit in bytes per second is supported or not - BlkioWriteBpsDevice bool - - // Whether Block IO read limit in IO per second is supported or not - BlkioReadIOpsDevice bool - - // Whether Block IO write limit in IO per second is supported or not - BlkioWriteIOpsDevice bool -} - -type cgroupCpusetInfo struct { - // Whether Cpuset is supported or not - Cpuset bool - - // Available Cpuset's cpus - Cpus string - - // Available Cpuset's memory nodes - Mems string -} - -type cgroupPids struct { - // Whether Pids Limit is supported or not - PidsLimit bool -} - -// IsCpusetCpusAvailable returns `true` if the provided string set is contained -// in cgroup's cpuset.cpus set, `false` otherwise. -// If error is not nil a parsing error occurred. -func (c cgroupCpusetInfo) IsCpusetCpusAvailable(provided string) (bool, error) { - return isCpusetListAvailable(provided, c.Cpus) -} - -// IsCpusetMemsAvailable returns `true` if the provided string set is contained -// in cgroup's cpuset.mems set, `false` otherwise. -// If error is not nil a parsing error occurred. -func (c cgroupCpusetInfo) IsCpusetMemsAvailable(provided string) (bool, error) { - return isCpusetListAvailable(provided, c.Mems) -} - -func isCpusetListAvailable(provided, available string) (bool, error) { - parsedProvided, err := parsers.ParseUintList(provided) - if err != nil { - return false, err - } - parsedAvailable, err := parsers.ParseUintList(available) - if err != nil { - return false, err - } - for k := range parsedProvided { - if !parsedAvailable[k] { - return false, nil - } - } - return true, nil -} diff --git a/pkg/sysinfo/sysinfo_freebsd.go b/pkg/sysinfo/sysinfo_freebsd.go deleted file mode 100644 index 22ae0d95a9..0000000000 --- a/pkg/sysinfo/sysinfo_freebsd.go +++ /dev/null @@ -1,7 +0,0 @@ -package sysinfo - -// New returns an empty SysInfo for freebsd for now. -func New(quiet bool) *SysInfo { - sysInfo := &SysInfo{} - return sysInfo -} diff --git a/pkg/sysinfo/sysinfo_linux.go b/pkg/sysinfo/sysinfo_linux.go deleted file mode 100644 index 8ba3ce4d39..0000000000 --- a/pkg/sysinfo/sysinfo_linux.go +++ /dev/null @@ -1,246 +0,0 @@ -package sysinfo - -import ( - "fmt" - "io/ioutil" - "os" - "path" - "strings" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer/cgroups" -) - -const ( - // SeccompModeFilter refers to the syscall argument SECCOMP_MODE_FILTER. - SeccompModeFilter = uintptr(2) -) - -func findCgroupMountpoints() (map[string]string, error) { - cgMounts, err := cgroups.GetCgroupMounts() - if err != nil { - return nil, fmt.Errorf("Failed to parse cgroup information: %v", err) - } - mps := make(map[string]string) - for _, m := range cgMounts { - for _, ss := range m.Subsystems { - mps[ss] = m.Mountpoint - } - } - return mps, nil -} - -// New returns a new SysInfo, using the filesystem to detect which features -// the kernel supports. If `quiet` is `false` warnings are printed in logs -// whenever an error occurs or misconfigurations are present. -func New(quiet bool) *SysInfo { - sysInfo := &SysInfo{} - cgMounts, err := findCgroupMountpoints() - if err != nil { - logrus.Warnf("Failed to parse cgroup information: %v", err) - } else { - sysInfo.cgroupMemInfo = checkCgroupMem(cgMounts, quiet) - sysInfo.cgroupCPUInfo = checkCgroupCPU(cgMounts, quiet) - sysInfo.cgroupBlkioInfo = checkCgroupBlkioInfo(cgMounts, quiet) - sysInfo.cgroupCpusetInfo = checkCgroupCpusetInfo(cgMounts, quiet) - sysInfo.cgroupPids = checkCgroupPids(quiet) - } - - _, ok := cgMounts["devices"] - sysInfo.CgroupDevicesEnabled = ok - - sysInfo.IPv4ForwardingDisabled = !readProcBool("/proc/sys/net/ipv4/ip_forward") - sysInfo.BridgeNFCallIPTablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-iptables") - sysInfo.BridgeNFCallIP6TablesDisabled = !readProcBool("/proc/sys/net/bridge/bridge-nf-call-ip6tables") - - // Check if AppArmor is supported. - if _, err := os.Stat("/sys/kernel/security/apparmor"); !os.IsNotExist(err) { - sysInfo.AppArmor = true - } - - // Check if Seccomp is supported, via CONFIG_SECCOMP. - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_GET_SECCOMP, 0, 0); err != syscall.EINVAL { - // Make sure the kernel has CONFIG_SECCOMP_FILTER. - if _, _, err := syscall.RawSyscall(syscall.SYS_PRCTL, syscall.PR_SET_SECCOMP, SeccompModeFilter, 0); err != syscall.EINVAL { - sysInfo.Seccomp = true - } - } - - return sysInfo -} - -// checkCgroupMem reads the memory information from the memory cgroup mount point. -func checkCgroupMem(cgMounts map[string]string, quiet bool) cgroupMemInfo { - mountPoint, ok := cgMounts["memory"] - if !ok { - if !quiet { - logrus.Warn("Your kernel does not support cgroup memory limit") - } - return cgroupMemInfo{} - } - - swapLimit := cgroupEnabled(mountPoint, "memory.memsw.limit_in_bytes") - if !quiet && !swapLimit { - logrus.Warn("Your kernel does not support swap memory limit.") - } - memoryReservation := cgroupEnabled(mountPoint, "memory.soft_limit_in_bytes") - if !quiet && !memoryReservation { - logrus.Warn("Your kernel does not support memory reservation.") - } - oomKillDisable := cgroupEnabled(mountPoint, "memory.oom_control") - if !quiet && !oomKillDisable { - logrus.Warn("Your kernel does not support oom control.") - } - memorySwappiness := cgroupEnabled(mountPoint, "memory.swappiness") - if !quiet && !memorySwappiness { - logrus.Warn("Your kernel does not support memory swappiness.") - } - kernelMemory := cgroupEnabled(mountPoint, "memory.kmem.limit_in_bytes") - if !quiet && !kernelMemory { - logrus.Warn("Your kernel does not support kernel memory limit.") - } - - return cgroupMemInfo{ - MemoryLimit: true, - SwapLimit: swapLimit, - MemoryReservation: memoryReservation, - OomKillDisable: oomKillDisable, - MemorySwappiness: memorySwappiness, - KernelMemory: kernelMemory, - } -} - -// checkCgroupCPU reads the cpu information from the cpu cgroup mount point. -func checkCgroupCPU(cgMounts map[string]string, quiet bool) cgroupCPUInfo { - mountPoint, ok := cgMounts["cpu"] - if !ok { - if !quiet { - logrus.Warn("Unable to find cpu cgroup in mounts") - } - return cgroupCPUInfo{} - } - - cpuShares := cgroupEnabled(mountPoint, "cpu.shares") - if !quiet && !cpuShares { - logrus.Warn("Your kernel does not support cgroup cpu shares") - } - - cpuCfsPeriod := cgroupEnabled(mountPoint, "cpu.cfs_period_us") - if !quiet && !cpuCfsPeriod { - logrus.Warn("Your kernel does not support cgroup cfs period") - } - - cpuCfsQuota := cgroupEnabled(mountPoint, "cpu.cfs_quota_us") - if !quiet && !cpuCfsQuota { - logrus.Warn("Your kernel does not support cgroup cfs quotas") - } - return cgroupCPUInfo{ - CPUShares: cpuShares, - CPUCfsPeriod: cpuCfsPeriod, - CPUCfsQuota: cpuCfsQuota, - } -} - -// checkCgroupBlkioInfo reads the blkio information from the blkio cgroup mount point. -func checkCgroupBlkioInfo(cgMounts map[string]string, quiet bool) cgroupBlkioInfo { - mountPoint, ok := cgMounts["blkio"] - if !ok { - if !quiet { - logrus.Warn("Unable to find blkio cgroup in mounts") - } - return cgroupBlkioInfo{} - } - - weight := cgroupEnabled(mountPoint, "blkio.weight") - if !quiet && !weight { - logrus.Warn("Your kernel does not support cgroup blkio weight") - } - - weightDevice := cgroupEnabled(mountPoint, "blkio.weight_device") - if !quiet && !weightDevice { - logrus.Warn("Your kernel does not support cgroup blkio weight_device") - } - - readBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_bps_device") - if !quiet && !readBpsDevice { - logrus.Warn("Your kernel does not support cgroup blkio throttle.read_bps_device") - } - - writeBpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_bps_device") - if !quiet && !writeBpsDevice { - logrus.Warn("Your kernel does not support cgroup blkio throttle.write_bps_device") - } - readIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.read_iops_device") - if !quiet && !readIOpsDevice { - logrus.Warn("Your kernel does not support cgroup blkio throttle.read_iops_device") - } - - writeIOpsDevice := cgroupEnabled(mountPoint, "blkio.throttle.write_iops_device") - if !quiet && !writeIOpsDevice { - logrus.Warn("Your kernel does not support cgroup blkio throttle.write_iops_device") - } - return cgroupBlkioInfo{ - BlkioWeight: weight, - BlkioWeightDevice: weightDevice, - BlkioReadBpsDevice: readBpsDevice, - BlkioWriteBpsDevice: writeBpsDevice, - BlkioReadIOpsDevice: readIOpsDevice, - BlkioWriteIOpsDevice: writeIOpsDevice, - } -} - -// checkCgroupCpusetInfo reads the cpuset information from the cpuset cgroup mount point. -func checkCgroupCpusetInfo(cgMounts map[string]string, quiet bool) cgroupCpusetInfo { - mountPoint, ok := cgMounts["cpuset"] - if !ok { - if !quiet { - logrus.Warn("Unable to find cpuset cgroup in mounts") - } - return cgroupCpusetInfo{} - } - - cpus, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.cpus")) - if err != nil { - return cgroupCpusetInfo{} - } - - mems, err := ioutil.ReadFile(path.Join(mountPoint, "cpuset.mems")) - if err != nil { - return cgroupCpusetInfo{} - } - - return cgroupCpusetInfo{ - Cpuset: true, - Cpus: strings.TrimSpace(string(cpus)), - Mems: strings.TrimSpace(string(mems)), - } -} - -// checkCgroupPids reads the pids information from the pids cgroup mount point. -func checkCgroupPids(quiet bool) cgroupPids { - _, err := cgroups.FindCgroupMountpoint("pids") - if err != nil { - if !quiet { - logrus.Warn(err) - } - return cgroupPids{} - } - - return cgroupPids{ - PidsLimit: true, - } -} - -func cgroupEnabled(mountPoint, name string) bool { - _, err := os.Stat(path.Join(mountPoint, name)) - return err == nil -} - -func readProcBool(path string) bool { - val, err := ioutil.ReadFile(path) - if err != nil { - return false - } - return strings.TrimSpace(string(val)) == "1" -} diff --git a/pkg/sysinfo/sysinfo_linux_test.go b/pkg/sysinfo/sysinfo_linux_test.go deleted file mode 100644 index fae0fdffbb..0000000000 --- a/pkg/sysinfo/sysinfo_linux_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package sysinfo - -import ( - "io/ioutil" - "os" - "path" - "path/filepath" - "testing" -) - -func TestReadProcBool(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "test-sysinfo-proc") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(tmpDir) - - procFile := filepath.Join(tmpDir, "read-proc-bool") - if err := ioutil.WriteFile(procFile, []byte("1"), 644); err != nil { - t.Fatal(err) - } - - if !readProcBool(procFile) { - t.Fatal("expected proc bool to be true, got false") - } - - if err := ioutil.WriteFile(procFile, []byte("0"), 644); err != nil { - t.Fatal(err) - } - if readProcBool(procFile) { - t.Fatal("expected proc bool to be false, got false") - } - - if readProcBool(path.Join(tmpDir, "no-exist")) { - t.Fatal("should be false for non-existent entry") - } - -} - -func TestCgroupEnabled(t *testing.T) { - cgroupDir, err := ioutil.TempDir("", "cgroup-test") - if err != nil { - t.Fatal(err) - } - defer os.RemoveAll(cgroupDir) - - if cgroupEnabled(cgroupDir, "test") { - t.Fatal("cgroupEnabled should be false") - } - - if err := ioutil.WriteFile(path.Join(cgroupDir, "test"), []byte{}, 644); err != nil { - t.Fatal(err) - } - - if !cgroupEnabled(cgroupDir, "test") { - t.Fatal("cgroupEnabled should be true") - } -} diff --git a/pkg/sysinfo/sysinfo_solaris.go b/pkg/sysinfo/sysinfo_solaris.go deleted file mode 100644 index 75a9c9bb2d..0000000000 --- a/pkg/sysinfo/sysinfo_solaris.go +++ /dev/null @@ -1,119 +0,0 @@ -// +build solaris,cgo - -package sysinfo - -import ( - "bytes" - "os/exec" - "strconv" - "strings" -) - -/* -#cgo LDFLAGS: -llgrp -#include -#include -#include -int getLgrpCount() { - lgrp_cookie_t lgrpcookie = LGRP_COOKIE_NONE; - uint_t nlgrps; - - if ((lgrpcookie = lgrp_init(LGRP_VIEW_OS)) == LGRP_COOKIE_NONE) { - return -1; - } - nlgrps = lgrp_nlgrps(lgrpcookie); - return nlgrps; -} -*/ -import "C" - -// IsCPUSharesAvailable returns whether CPUShares setting is supported. -// We need FSS to be set as default scheduling class to support CPU Shares -func IsCPUSharesAvailable() bool { - cmd := exec.Command("/usr/sbin/dispadmin", "-d") - outBuf := new(bytes.Buffer) - errBuf := new(bytes.Buffer) - cmd.Stderr = errBuf - cmd.Stdout = outBuf - - if err := cmd.Run(); err != nil { - return false - } - return (strings.Contains(outBuf.String(), "FSS")) -} - -// New returns a new SysInfo, using the filesystem to detect which features -// the kernel supports. -//NOTE Solaris: If we change the below capabilities be sure -// to update verifyPlatformContainerSettings() in daemon_solaris.go -func New(quiet bool) *SysInfo { - sysInfo := &SysInfo{} - sysInfo.cgroupMemInfo = setCgroupMem(quiet) - sysInfo.cgroupCPUInfo = setCgroupCPU(quiet) - sysInfo.cgroupBlkioInfo = setCgroupBlkioInfo(quiet) - sysInfo.cgroupCpusetInfo = setCgroupCPUsetInfo(quiet) - - sysInfo.IPv4ForwardingDisabled = false - - sysInfo.AppArmor = false - - return sysInfo -} - -// setCgroupMem reads the memory information for Solaris. -func setCgroupMem(quiet bool) cgroupMemInfo { - - return cgroupMemInfo{ - MemoryLimit: true, - SwapLimit: true, - MemoryReservation: false, - OomKillDisable: false, - MemorySwappiness: false, - KernelMemory: false, - } -} - -// setCgroupCPU reads the cpu information for Solaris. -func setCgroupCPU(quiet bool) cgroupCPUInfo { - - return cgroupCPUInfo{ - CPUShares: true, - CPUCfsPeriod: false, - CPUCfsQuota: true, - } -} - -// blkio switches are not supported in Solaris. -func setCgroupBlkioInfo(quiet bool) cgroupBlkioInfo { - - return cgroupBlkioInfo{ - BlkioWeight: false, - BlkioWeightDevice: false, - } -} - -// setCgroupCPUsetInfo reads the cpuset information for Solaris. -func setCgroupCPUsetInfo(quiet bool) cgroupCpusetInfo { - - return cgroupCpusetInfo{ - Cpuset: true, - Cpus: getCPUCount(), - Mems: getLgrpCount(), - } -} - -func getCPUCount() string { - ncpus := C.sysconf(C._SC_NPROCESSORS_ONLN) - if ncpus <= 0 { - return "" - } - return strconv.FormatInt(int64(ncpus), 16) -} - -func getLgrpCount() string { - nlgrps := C.getLgrpCount() - if nlgrps <= 0 { - return "" - } - return strconv.FormatInt(int64(nlgrps), 16) -} diff --git a/pkg/sysinfo/sysinfo_test.go b/pkg/sysinfo/sysinfo_test.go deleted file mode 100644 index b61fbcf541..0000000000 --- a/pkg/sysinfo/sysinfo_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package sysinfo - -import "testing" - -func TestIsCpusetListAvailable(t *testing.T) { - cases := []struct { - provided string - available string - res bool - err bool - }{ - {"1", "0-4", true, false}, - {"01,3", "0-4", true, false}, - {"", "0-7", true, false}, - {"1--42", "0-7", false, true}, - {"1-42", "00-1,8,,9", false, true}, - {"1,41-42", "43,45", false, false}, - {"0-3", "", false, false}, - } - for _, c := range cases { - r, err := isCpusetListAvailable(c.provided, c.available) - if (c.err && err == nil) && r != c.res { - t.Fatalf("Expected pair: %v, %v for %s, %s. Got %v, %v instead", c.res, c.err, c.provided, c.available, (c.err && err == nil), r) - } - } -} diff --git a/pkg/sysinfo/sysinfo_windows.go b/pkg/sysinfo/sysinfo_windows.go deleted file mode 100644 index 8889318c39..0000000000 --- a/pkg/sysinfo/sysinfo_windows.go +++ /dev/null @@ -1,7 +0,0 @@ -package sysinfo - -// New returns an empty SysInfo for windows for now. -func New(quiet bool) *SysInfo { - sysInfo := &SysInfo{} - return sysInfo -} diff --git a/pkg/system/chtimes.go b/pkg/system/chtimes.go deleted file mode 100644 index 7637f12e1a..0000000000 --- a/pkg/system/chtimes.go +++ /dev/null @@ -1,52 +0,0 @@ -package system - -import ( - "os" - "syscall" - "time" - "unsafe" -) - -var ( - maxTime time.Time -) - -func init() { - if unsafe.Sizeof(syscall.Timespec{}.Nsec) == 8 { - // This is a 64 bit timespec - // os.Chtimes limits time to the following - maxTime = time.Unix(0, 1<<63-1) - } else { - // This is a 32 bit timespec - maxTime = time.Unix(1<<31-1, 0) - } -} - -// Chtimes changes the access time and modified time of a file at the given path -func Chtimes(name string, atime time.Time, mtime time.Time) error { - unixMinTime := time.Unix(0, 0) - unixMaxTime := maxTime - - // If the modified time is prior to the Unix Epoch, or after the - // end of Unix Time, os.Chtimes has undefined behavior - // default to Unix Epoch in this case, just in case - - if atime.Before(unixMinTime) || atime.After(unixMaxTime) { - atime = unixMinTime - } - - if mtime.Before(unixMinTime) || mtime.After(unixMaxTime) { - mtime = unixMinTime - } - - if err := os.Chtimes(name, atime, mtime); err != nil { - return err - } - - // Take platform specific action for setting create time. - if err := setCTime(name, mtime); err != nil { - return err - } - - return nil -} diff --git a/pkg/system/chtimes_test.go b/pkg/system/chtimes_test.go deleted file mode 100644 index 5c87df32a2..0000000000 --- a/pkg/system/chtimes_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package system - -import ( - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" -) - -// prepareTempFile creates a temporary file in a temporary directory. -func prepareTempFile(t *testing.T) (string, string) { - dir, err := ioutil.TempDir("", "docker-system-test") - if err != nil { - t.Fatal(err) - } - - file := filepath.Join(dir, "exist") - if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { - t.Fatal(err) - } - return file, dir -} - -// TestChtimes tests Chtimes on a tempfile. Test only mTime, because aTime is OS dependent -func TestChtimes(t *testing.T) { - file, dir := prepareTempFile(t) - defer os.RemoveAll(dir) - - beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) - unixEpochTime := time.Unix(0, 0) - afterUnixEpochTime := time.Unix(100, 0) - unixMaxTime := maxTime - - // Test both aTime and mTime set to Unix Epoch - Chtimes(file, unixEpochTime, unixEpochTime) - - f, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) - } - - // Test aTime before Unix Epoch and mTime set to Unix Epoch - Chtimes(file, beforeUnixEpochTime, unixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixEpochTime, beforeUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, f.ModTime()) - } - - // Test both aTime and mTime set to after Unix Epoch (valid time) - Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime() != afterUnixEpochTime { - t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, f.ModTime()) - } - - // Test both aTime and mTime set to Unix max time - Chtimes(file, unixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - if f.ModTime().Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { - t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), f.ModTime().Truncate(time.Second)) - } -} diff --git a/pkg/system/chtimes_unix.go b/pkg/system/chtimes_unix.go deleted file mode 100644 index 09d58bcbfd..0000000000 --- a/pkg/system/chtimes_unix.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !windows - -package system - -import ( - "time" -) - -//setCTime will set the create time on a file. On Unix, the create -//time is updated as a side effect of setting the modified time, so -//no action is required. -func setCTime(path string, ctime time.Time) error { - return nil -} diff --git a/pkg/system/chtimes_unix_test.go b/pkg/system/chtimes_unix_test.go deleted file mode 100644 index 0aafe1d845..0000000000 --- a/pkg/system/chtimes_unix_test.go +++ /dev/null @@ -1,91 +0,0 @@ -// +build !windows - -package system - -import ( - "os" - "syscall" - "testing" - "time" -) - -// TestChtimes tests Chtimes access time on a tempfile on Linux -func TestChtimesLinux(t *testing.T) { - file, dir := prepareTempFile(t) - defer os.RemoveAll(dir) - - beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) - unixEpochTime := time.Unix(0, 0) - afterUnixEpochTime := time.Unix(100, 0) - unixMaxTime := maxTime - - // Test both aTime and mTime set to Unix Epoch - Chtimes(file, unixEpochTime, unixEpochTime) - - f, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat := f.Sys().(*syscall.Stat_t) - aTime := time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime before Unix Epoch and mTime set to Unix Epoch - Chtimes(file, beforeUnixEpochTime, unixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixEpochTime, beforeUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test both aTime and mTime set to after Unix Epoch (valid time) - Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime != afterUnixEpochTime { - t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) - } - - // Test both aTime and mTime set to Unix max time - Chtimes(file, unixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - stat = f.Sys().(*syscall.Stat_t) - aTime = time.Unix(int64(stat.Atim.Sec), int64(stat.Atim.Nsec)) - if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { - t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) - } -} diff --git a/pkg/system/chtimes_windows.go b/pkg/system/chtimes_windows.go deleted file mode 100644 index 2945868465..0000000000 --- a/pkg/system/chtimes_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// +build windows - -package system - -import ( - "syscall" - "time" -) - -//setCTime will set the create time on a file. On Windows, this requires -//calling SetFileTime and explicitly including the create time. -func setCTime(path string, ctime time.Time) error { - ctimespec := syscall.NsecToTimespec(ctime.UnixNano()) - pathp, e := syscall.UTF16PtrFromString(path) - if e != nil { - return e - } - h, e := syscall.CreateFile(pathp, - syscall.FILE_WRITE_ATTRIBUTES, syscall.FILE_SHARE_WRITE, nil, - syscall.OPEN_EXISTING, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) - if e != nil { - return e - } - defer syscall.Close(h) - c := syscall.NsecToFiletime(syscall.TimespecToNsec(ctimespec)) - return syscall.SetFileTime(h, &c, nil, nil) -} diff --git a/pkg/system/chtimes_windows_test.go b/pkg/system/chtimes_windows_test.go deleted file mode 100644 index be57558e1b..0000000000 --- a/pkg/system/chtimes_windows_test.go +++ /dev/null @@ -1,86 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "syscall" - "testing" - "time" -) - -// TestChtimes tests Chtimes access time on a tempfile on Windows -func TestChtimesWindows(t *testing.T) { - file, dir := prepareTempFile(t) - defer os.RemoveAll(dir) - - beforeUnixEpochTime := time.Unix(0, 0).Add(-100 * time.Second) - unixEpochTime := time.Unix(0, 0) - afterUnixEpochTime := time.Unix(100, 0) - unixMaxTime := maxTime - - // Test both aTime and mTime set to Unix Epoch - Chtimes(file, unixEpochTime, unixEpochTime) - - f, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime := time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime before Unix Epoch and mTime set to Unix Epoch - Chtimes(file, beforeUnixEpochTime, unixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test aTime set to Unix Epoch and mTime before Unix Epoch - Chtimes(file, unixEpochTime, beforeUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != unixEpochTime { - t.Fatalf("Expected: %s, got: %s", unixEpochTime, aTime) - } - - // Test both aTime and mTime set to after Unix Epoch (valid time) - Chtimes(file, afterUnixEpochTime, afterUnixEpochTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime != afterUnixEpochTime { - t.Fatalf("Expected: %s, got: %s", afterUnixEpochTime, aTime) - } - - // Test both aTime and mTime set to Unix max time - Chtimes(file, unixMaxTime, unixMaxTime) - - f, err = os.Stat(file) - if err != nil { - t.Fatal(err) - } - - aTime = time.Unix(0, f.Sys().(*syscall.Win32FileAttributeData).LastAccessTime.Nanoseconds()) - if aTime.Truncate(time.Second) != unixMaxTime.Truncate(time.Second) { - t.Fatalf("Expected: %s, got: %s", unixMaxTime.Truncate(time.Second), aTime.Truncate(time.Second)) - } -} diff --git a/pkg/system/errors.go b/pkg/system/errors.go deleted file mode 100644 index 288318985e..0000000000 --- a/pkg/system/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package system - -import ( - "errors" -) - -var ( - // ErrNotSupportedPlatform means the platform is not supported. - ErrNotSupportedPlatform = errors.New("platform and architecture is not supported") -) diff --git a/pkg/system/events_windows.go b/pkg/system/events_windows.go deleted file mode 100644 index 04e2de7871..0000000000 --- a/pkg/system/events_windows.go +++ /dev/null @@ -1,83 +0,0 @@ -package system - -// This file implements syscalls for Win32 events which are not implemented -// in golang. - -import ( - "syscall" - "unsafe" -) - -var ( - procCreateEvent = modkernel32.NewProc("CreateEventW") - procOpenEvent = modkernel32.NewProc("OpenEventW") - procSetEvent = modkernel32.NewProc("SetEvent") - procResetEvent = modkernel32.NewProc("ResetEvent") - procPulseEvent = modkernel32.NewProc("PulseEvent") -) - -// CreateEvent implements win32 CreateEventW func in golang. It will create an event object. -func CreateEvent(eventAttributes *syscall.SecurityAttributes, manualReset bool, initialState bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if manualReset { - _p1 = 1 - } - var _p2 uint32 - if initialState { - _p2 = 1 - } - r0, _, e1 := procCreateEvent.Call(uintptr(unsafe.Pointer(eventAttributes)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// OpenEvent implements win32 OpenEventW func in golang. It opens an event object. -func OpenEvent(desiredAccess uint32, inheritHandle bool, name string) (handle syscall.Handle, err error) { - namep, _ := syscall.UTF16PtrFromString(name) - var _p1 uint32 - if inheritHandle { - _p1 = 1 - } - r0, _, e1 := procOpenEvent.Call(uintptr(desiredAccess), uintptr(_p1), uintptr(unsafe.Pointer(namep))) - use(unsafe.Pointer(namep)) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - err = e1 - } - return -} - -// SetEvent implements win32 SetEvent func in golang. -func SetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procSetEvent) -} - -// ResetEvent implements win32 ResetEvent func in golang. -func ResetEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procResetEvent) -} - -// PulseEvent implements win32 PulseEvent func in golang. -func PulseEvent(handle syscall.Handle) (err error) { - return setResetPulse(handle, procPulseEvent) -} - -func setResetPulse(handle syscall.Handle, proc *syscall.LazyProc) (err error) { - r0, _, _ := proc.Call(uintptr(handle)) - if r0 != 0 { - err = syscall.Errno(r0) - } - return -} - -var temp unsafe.Pointer - -// use ensures a variable is kept alive without the GC freeing while still needed -func use(p unsafe.Pointer) { - temp = p -} diff --git a/pkg/system/filesys.go b/pkg/system/filesys.go deleted file mode 100644 index c14feb8496..0000000000 --- a/pkg/system/filesys.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package system - -import ( - "os" - "path/filepath" -) - -// MkdirAll creates a directory named path along with any necessary parents, -// with permission specified by attribute perm for all dir created. -func MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. -func IsAbs(path string) bool { - return filepath.IsAbs(path) -} diff --git a/pkg/system/filesys_windows.go b/pkg/system/filesys_windows.go deleted file mode 100644 index 16823d5517..0000000000 --- a/pkg/system/filesys_windows.go +++ /dev/null @@ -1,82 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "path/filepath" - "regexp" - "strings" - "syscall" -) - -// MkdirAll implementation that is volume path aware for Windows. -func MkdirAll(path string, perm os.FileMode) error { - if re := regexp.MustCompile(`^\\\\\?\\Volume{[a-z0-9-]+}$`); re.MatchString(path) { - return nil - } - - // The rest of this method is copied from os.MkdirAll and should be kept - // as-is to ensure compatibility. - - // Fast path: if we can tell whether path is a directory or file, stop with success or error. - dir, err := os.Stat(path) - if err == nil { - if dir.IsDir() { - return nil - } - return &os.PathError{ - Op: "mkdir", - Path: path, - Err: syscall.ENOTDIR, - } - } - - // Slow path: make sure parent exists and then call Mkdir for path. - i := len(path) - for i > 0 && os.IsPathSeparator(path[i-1]) { // Skip trailing path separator. - i-- - } - - j := i - for j > 0 && !os.IsPathSeparator(path[j-1]) { // Scan backward over element. - j-- - } - - if j > 1 { - // Create parent - err = MkdirAll(path[0:j-1], perm) - if err != nil { - return err - } - } - - // Parent now exists; invoke Mkdir and use its result. - err = os.Mkdir(path, perm) - if err != nil { - // Handle arguments like "foo/." by - // double-checking that directory doesn't exist. - dir, err1 := os.Lstat(path) - if err1 == nil && dir.IsDir() { - return nil - } - return err - } - return nil -} - -// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows, -// golang filepath.IsAbs does not consider a path \windows\system32 as absolute -// as it doesn't start with a drive-letter/colon combination. However, in -// docker we need to verify things such as WORKDIR /windows/system32 in -// a Dockerfile (which gets translated to \windows\system32 when being processed -// by the daemon. This SHOULD be treated as absolute from a docker processing -// perspective. -func IsAbs(path string) bool { - if !filepath.IsAbs(path) { - if !strings.HasPrefix(path, string(os.PathSeparator)) { - return false - } - } - return true -} diff --git a/pkg/system/lstat.go b/pkg/system/lstat.go deleted file mode 100644 index bd23c4d50b..0000000000 --- a/pkg/system/lstat.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Lstat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Lstat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Lstat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/pkg/system/lstat_unix_test.go b/pkg/system/lstat_unix_test.go deleted file mode 100644 index 062cf53bfe..0000000000 --- a/pkg/system/lstat_unix_test.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build linux freebsd - -package system - -import ( - "os" - "testing" -) - -// TestLstat tests Lstat for existing and non existing files -func TestLstat(t *testing.T) { - file, invalid, _, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - statFile, err := Lstat(file) - if err != nil { - t.Fatal(err) - } - if statFile == nil { - t.Fatal("returned empty stat for existing file") - } - - statInvalid, err := Lstat(invalid) - if err == nil { - t.Fatal("did not return error for non-existing file") - } - if statInvalid != nil { - t.Fatal("returned non-nil stat for non-existing file") - } -} diff --git a/pkg/system/lstat_windows.go b/pkg/system/lstat_windows.go deleted file mode 100644 index 49e87eb40b..0000000000 --- a/pkg/system/lstat_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// +build windows - -package system - -import ( - "os" -) - -// Lstat calls os.Lstat to get a fileinfo interface back. -// This is then copied into our own locally defined structure. -// Note the Linux version uses fromStatT to do the copy back, -// but that not strictly necessary when already in an OS specific module. -func Lstat(path string) (*StatT, error) { - fi, err := os.Lstat(path) - if err != nil { - return nil, err - } - - return &StatT{ - name: fi.Name(), - size: fi.Size(), - mode: fi.Mode(), - modTime: fi.ModTime(), - isDir: fi.IsDir()}, nil -} diff --git a/pkg/system/meminfo.go b/pkg/system/meminfo.go deleted file mode 100644 index 3b6e947e67..0000000000 --- a/pkg/system/meminfo.go +++ /dev/null @@ -1,17 +0,0 @@ -package system - -// MemInfo contains memory statistics of the host system. -type MemInfo struct { - // Total usable RAM (i.e. physical RAM minus a few reserved bits and the - // kernel binary code). - MemTotal int64 - - // Amount of free memory. - MemFree int64 - - // Total amount of swap space available. - SwapTotal int64 - - // Amount of swap space that is currently unused. - SwapFree int64 -} diff --git a/pkg/system/meminfo_linux.go b/pkg/system/meminfo_linux.go deleted file mode 100644 index 385f1d5e73..0000000000 --- a/pkg/system/meminfo_linux.go +++ /dev/null @@ -1,65 +0,0 @@ -package system - -import ( - "bufio" - "io" - "os" - "strconv" - "strings" - - "github.com/docker/go-units" -) - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - file, err := os.Open("/proc/meminfo") - if err != nil { - return nil, err - } - defer file.Close() - return parseMemInfo(file) -} - -// parseMemInfo parses the /proc/meminfo file into -// a MemInfo object given an io.Reader to the file. -// Throws error if there are problems reading from the file -func parseMemInfo(reader io.Reader) (*MemInfo, error) { - meminfo := &MemInfo{} - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - // Expected format: ["MemTotal:", "1234", "kB"] - parts := strings.Fields(scanner.Text()) - - // Sanity checks: Skip malformed entries. - if len(parts) < 3 || parts[2] != "kB" { - continue - } - - // Convert to bytes. - size, err := strconv.Atoi(parts[1]) - if err != nil { - continue - } - bytes := int64(size) * units.KiB - - switch parts[0] { - case "MemTotal:": - meminfo.MemTotal = bytes - case "MemFree:": - meminfo.MemFree = bytes - case "SwapTotal:": - meminfo.SwapTotal = bytes - case "SwapFree:": - meminfo.SwapFree = bytes - } - - } - - // Handle errors that may have occurred during the reading of the file. - if err := scanner.Err(); err != nil { - return nil, err - } - - return meminfo, nil -} diff --git a/pkg/system/meminfo_solaris.go b/pkg/system/meminfo_solaris.go deleted file mode 100644 index 313c601b12..0000000000 --- a/pkg/system/meminfo_solaris.go +++ /dev/null @@ -1,128 +0,0 @@ -// +build solaris,cgo - -package system - -import ( - "fmt" - "unsafe" -) - -// #cgo LDFLAGS: -lkstat -// #include -// #include -// #include -// #include -// #include -// #include -// struct swaptable *allocSwaptable(int num) { -// struct swaptable *st; -// struct swapent *swapent; -// st = (struct swaptable *)malloc(num * sizeof(swapent_t) + sizeof (int)); -// swapent = st->swt_ent; -// for (int i = 0; i < num; i++,swapent++) { -// swapent->ste_path = (char *)malloc(MAXPATHLEN * sizeof (char)); -// } -// st->swt_n = num; -// return st; -//} -// void freeSwaptable (struct swaptable *st) { -// struct swapent *swapent = st->swt_ent; -// for (int i = 0; i < st->swt_n; i++,swapent++) { -// free(swapent->ste_path); -// } -// free(st); -// } -// swapent_t getSwapEnt(swapent_t *ent, int i) { -// return ent[i]; -// } -// int64_t getPpKernel() { -// int64_t pp_kernel = 0; -// kstat_ctl_t *ksc; -// kstat_t *ks; -// kstat_named_t *knp; -// kid_t kid; -// -// if ((ksc = kstat_open()) == NULL) { -// return -1; -// } -// if ((ks = kstat_lookup(ksc, "unix", 0, "system_pages")) == NULL) { -// return -1; -// } -// if (((kid = kstat_read(ksc, ks, NULL)) == -1) || -// ((knp = kstat_data_lookup(ks, "pp_kernel")) == NULL)) { -// return -1; -// } -// switch (knp->data_type) { -// case KSTAT_DATA_UINT64: -// pp_kernel = knp->value.ui64; -// break; -// case KSTAT_DATA_UINT32: -// pp_kernel = knp->value.ui32; -// break; -// } -// pp_kernel *= sysconf(_SC_PAGESIZE); -// return (pp_kernel > 0 ? pp_kernel : -1); -// } -import "C" - -// Get the system memory info using sysconf same as prtconf -func getTotalMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_PHYS_PAGES) - return int64(pagesize * npages) -} - -func getFreeMem() int64 { - pagesize := C.sysconf(C._SC_PAGESIZE) - npages := C.sysconf(C._SC_AVPHYS_PAGES) - return int64(pagesize * npages) -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - - ppKernel := C.getPpKernel() - MemTotal := getTotalMem() - MemFree := getFreeMem() - SwapTotal, SwapFree, err := getSysSwap() - - if ppKernel < 0 || MemTotal < 0 || MemFree < 0 || SwapTotal < 0 || - SwapFree < 0 { - return nil, fmt.Errorf("Error getting system memory info %v\n", err) - } - - meminfo := &MemInfo{} - // Total memory is total physical memory less than memory locked by kernel - meminfo.MemTotal = MemTotal - int64(ppKernel) - meminfo.MemFree = MemFree - meminfo.SwapTotal = SwapTotal - meminfo.SwapFree = SwapFree - - return meminfo, nil -} - -func getSysSwap() (int64, int64, error) { - var tSwap int64 - var fSwap int64 - var diskblksPerPage int64 - num, err := C.swapctl(C.SC_GETNSWP, nil) - if err != nil { - return -1, -1, err - } - st := C.allocSwaptable(num) - _, err = C.swapctl(C.SC_LIST, unsafe.Pointer(st)) - if err != nil { - C.freeSwaptable(st) - return -1, -1, err - } - - diskblksPerPage = int64(C.sysconf(C._SC_PAGESIZE) >> C.DEV_BSHIFT) - for i := 0; i < int(num); i++ { - swapent := C.getSwapEnt(&st.swt_ent[0], C.int(i)) - tSwap += int64(swapent.ste_pages) * diskblksPerPage - fSwap += int64(swapent.ste_free) * diskblksPerPage - } - C.freeSwaptable(st) - return tSwap, fSwap, nil -} diff --git a/pkg/system/meminfo_unix_test.go b/pkg/system/meminfo_unix_test.go deleted file mode 100644 index 44f5562882..0000000000 --- a/pkg/system/meminfo_unix_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// +build linux freebsd - -package system - -import ( - "strings" - "testing" - - "github.com/docker/go-units" -) - -// TestMemInfo tests parseMemInfo with a static meminfo string -func TestMemInfo(t *testing.T) { - const input = ` - MemTotal: 1 kB - MemFree: 2 kB - SwapTotal: 3 kB - SwapFree: 4 kB - Malformed1: - Malformed2: 1 - Malformed3: 2 MB - Malformed4: X kB - ` - meminfo, err := parseMemInfo(strings.NewReader(input)) - if err != nil { - t.Fatal(err) - } - if meminfo.MemTotal != 1*units.KiB { - t.Fatalf("Unexpected MemTotal: %d", meminfo.MemTotal) - } - if meminfo.MemFree != 2*units.KiB { - t.Fatalf("Unexpected MemFree: %d", meminfo.MemFree) - } - if meminfo.SwapTotal != 3*units.KiB { - t.Fatalf("Unexpected SwapTotal: %d", meminfo.SwapTotal) - } - if meminfo.SwapFree != 4*units.KiB { - t.Fatalf("Unexpected SwapFree: %d", meminfo.SwapFree) - } -} diff --git a/pkg/system/meminfo_unsupported.go b/pkg/system/meminfo_unsupported.go deleted file mode 100644 index 3ce019dffd..0000000000 --- a/pkg/system/meminfo_unsupported.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !linux,!windows,!solaris - -package system - -// ReadMemInfo is not supported on platforms other than linux and windows. -func ReadMemInfo() (*MemInfo, error) { - return nil, ErrNotSupportedPlatform -} diff --git a/pkg/system/meminfo_windows.go b/pkg/system/meminfo_windows.go deleted file mode 100644 index d46642598c..0000000000 --- a/pkg/system/meminfo_windows.go +++ /dev/null @@ -1,44 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - - procGlobalMemoryStatusEx = modkernel32.NewProc("GlobalMemoryStatusEx") -) - -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366589(v=vs.85).aspx -// https://msdn.microsoft.com/en-us/library/windows/desktop/aa366770(v=vs.85).aspx -type memorystatusex struct { - dwLength uint32 - dwMemoryLoad uint32 - ullTotalPhys uint64 - ullAvailPhys uint64 - ullTotalPageFile uint64 - ullAvailPageFile uint64 - ullTotalVirtual uint64 - ullAvailVirtual uint64 - ullAvailExtendedVirtual uint64 -} - -// ReadMemInfo retrieves memory statistics of the host system and returns a -// MemInfo type. -func ReadMemInfo() (*MemInfo, error) { - msi := &memorystatusex{ - dwLength: 64, - } - r1, _, _ := procGlobalMemoryStatusEx.Call(uintptr(unsafe.Pointer(msi))) - if r1 == 0 { - return &MemInfo{}, nil - } - return &MemInfo{ - MemTotal: int64(msi.ullTotalPhys), - MemFree: int64(msi.ullAvailPhys), - SwapTotal: int64(msi.ullTotalPageFile), - SwapFree: int64(msi.ullAvailPageFile), - }, nil -} diff --git a/pkg/system/mknod.go b/pkg/system/mknod.go deleted file mode 100644 index 73958182b4..0000000000 --- a/pkg/system/mknod.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Mknod creates a filesystem node (file, device special file or named pipe) named path -// with attributes specified by mode and dev. -func Mknod(path string, mode uint32, dev int) error { - return syscall.Mknod(path, mode, dev) -} - -// Mkdev is used to build the value of linux devices (in /dev/) which specifies major -// and minor number of the newly created device special file. -// Linux device nodes are a bit weird due to backwards compat with 16 bit device nodes. -// They are, from low to high: the lower 8 bits of the minor, then 12 bits of the major, -// then the top 12 bits of the minor. -func Mkdev(major int64, minor int64) uint32 { - return uint32(((minor & 0xfff00) << 12) | ((major & 0xfff) << 8) | (minor & 0xff)) -} diff --git a/pkg/system/mknod_windows.go b/pkg/system/mknod_windows.go deleted file mode 100644 index 2e863c0215..0000000000 --- a/pkg/system/mknod_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build windows - -package system - -// Mknod is not implemented on Windows. -func Mknod(path string, mode uint32, dev int) error { - return ErrNotSupportedPlatform -} - -// Mkdev is not implemented on Windows. -func Mkdev(major int64, minor int64) uint32 { - panic("Mkdev not implemented on Windows.") -} diff --git a/pkg/system/path_unix.go b/pkg/system/path_unix.go deleted file mode 100644 index c607c4db09..0000000000 --- a/pkg/system/path_unix.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build !windows - -package system - -// DefaultPathEnv is unix style list of directories to search for -// executables. Each directory is separated from the next by a colon -// ':' character . -const DefaultPathEnv = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin" - -// CheckSystemDriveAndRemoveDriveLetter verifies that a path, if it includes a drive letter, -// is the system drive. This is a no-op on Linux. -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - return path, nil -} diff --git a/pkg/system/path_windows.go b/pkg/system/path_windows.go deleted file mode 100644 index cbfe2c1576..0000000000 --- a/pkg/system/path_windows.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build windows - -package system - -import ( - "fmt" - "path/filepath" - "strings" -) - -// DefaultPathEnv is deliberately empty on Windows as the default path will be set by -// the container. Docker has no context of what the default path should be. -const DefaultPathEnv = "" - -// CheckSystemDriveAndRemoveDriveLetter verifies and manipulates a Windows path. -// This is used, for example, when validating a user provided path in docker cp. -// If a drive letter is supplied, it must be the system drive. The drive letter -// is always removed. Also, it translates it to OS semantics (IOW / to \). We -// need the path in this syntax so that it can ultimately be contatenated with -// a Windows long-path which doesn't support drive-letters. Examples: -// C: --> Fail -// C:\ --> \ -// a --> a -// /a --> \a -// d:\ --> Fail -func CheckSystemDriveAndRemoveDriveLetter(path string) (string, error) { - if len(path) == 2 && string(path[1]) == ":" { - return "", fmt.Errorf("No relative path specified in %q", path) - } - if !filepath.IsAbs(path) || len(path) < 2 { - return filepath.FromSlash(path), nil - } - if string(path[1]) == ":" && !strings.EqualFold(string(path[0]), "c") { - return "", fmt.Errorf("The specified path is not on the system drive (C:)") - } - return filepath.FromSlash(path[2:]), nil -} diff --git a/pkg/system/path_windows_test.go b/pkg/system/path_windows_test.go deleted file mode 100644 index eccb26aaea..0000000000 --- a/pkg/system/path_windows_test.go +++ /dev/null @@ -1,78 +0,0 @@ -// +build windows - -package system - -import "testing" - -// TestCheckSystemDriveAndRemoveDriveLetter tests CheckSystemDriveAndRemoveDriveLetter -func TestCheckSystemDriveAndRemoveDriveLetter(t *testing.T) { - // Fails if not C drive. - path, err := CheckSystemDriveAndRemoveDriveLetter(`d:\`) - if err == nil || (err != nil && err.Error() != "The specified path is not on the system drive (C:)") { - t.Fatalf("Expected error for d:") - } - - // Single character is unchanged - if path, err = CheckSystemDriveAndRemoveDriveLetter("z"); err != nil { - t.Fatalf("Single character should pass") - } - if path != "z" { - t.Fatalf("Single character should be unchanged") - } - - // Two characters without colon is unchanged - if path, err = CheckSystemDriveAndRemoveDriveLetter("AB"); err != nil { - t.Fatalf("2 characters without colon should pass") - } - if path != "AB" { - t.Fatalf("2 characters without colon should be unchanged") - } - - // Abs path without drive letter - if path, err = CheckSystemDriveAndRemoveDriveLetter(`\l`); err != nil { - t.Fatalf("abs path no drive letter should pass") - } - if path != `\l` { - t.Fatalf("abs path without drive letter should be unchanged") - } - - // Abs path without drive letter, linux style - if path, err = CheckSystemDriveAndRemoveDriveLetter(`/l`); err != nil { - t.Fatalf("abs path no drive letter linux style should pass") - } - if path != `\l` { - t.Fatalf("abs path without drive letter linux failed %s", path) - } - - // Drive-colon should be stripped - if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:\`); err != nil { - t.Fatalf("An absolute path should pass") - } - if path != `\` { - t.Fatalf(`An absolute path should have been shortened to \ %s`, path) - } - - // Verify with a linux-style path - if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:/`); err != nil { - t.Fatalf("An absolute path should pass") - } - if path != `\` { - t.Fatalf(`A linux style absolute path should have been shortened to \ %s`, path) - } - - // Failure on c: - if path, err = CheckSystemDriveAndRemoveDriveLetter(`c:`); err == nil { - t.Fatalf("c: should fail") - } - if err.Error() != `No relative path specified in "c:"` { - t.Fatalf(path, err) - } - - // Failure on d: - if path, err = CheckSystemDriveAndRemoveDriveLetter(`d:`); err == nil { - t.Fatalf("c: should fail") - } - if err.Error() != `No relative path specified in "d:"` { - t.Fatalf(path, err) - } -} diff --git a/pkg/system/stat.go b/pkg/system/stat.go deleted file mode 100644 index 087034c5ec..0000000000 --- a/pkg/system/stat.go +++ /dev/null @@ -1,53 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// StatT type contains status of a file. It contains metadata -// like permission, owner, group, size, etc about a file. -type StatT struct { - mode uint32 - uid uint32 - gid uint32 - rdev uint64 - size int64 - mtim syscall.Timespec -} - -// Mode returns file's permission mode. -func (s StatT) Mode() uint32 { - return s.mode -} - -// UID returns file's user id of owner. -func (s StatT) UID() uint32 { - return s.uid -} - -// GID returns file's group id of owner. -func (s StatT) GID() uint32 { - return s.gid -} - -// Rdev returns file's device ID (if it's special file). -func (s StatT) Rdev() uint64 { - return s.rdev -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mtim returns file's last modification time. -func (s StatT) Mtim() syscall.Timespec { - return s.mtim -} - -// GetLastModification returns file's last modification time. -func (s StatT) GetLastModification() syscall.Timespec { - return s.Mtim() -} diff --git a/pkg/system/stat_freebsd.go b/pkg/system/stat_freebsd.go deleted file mode 100644 index d0fb6f1519..0000000000 --- a/pkg/system/stat_freebsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} - -// Stat takes a path to a file and returns -// a system.Stat_t type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/pkg/system/stat_linux.go b/pkg/system/stat_linux.go deleted file mode 100644 index 8b1eded138..0000000000 --- a/pkg/system/stat_linux.go +++ /dev/null @@ -1,33 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT converts a syscall.Stat_t type to a system.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: s.Mode, - uid: s.Uid, - gid: s.Gid, - rdev: s.Rdev, - mtim: s.Mtim}, nil -} - -// FromStatT exists only on linux, and loads a system.StatT from a -// syscal.Stat_t. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/pkg/system/stat_openbsd.go b/pkg/system/stat_openbsd.go deleted file mode 100644 index 3c3b71fb21..0000000000 --- a/pkg/system/stat_openbsd.go +++ /dev/null @@ -1,15 +0,0 @@ -package system - -import ( - "syscall" -) - -// fromStatT creates a system.StatT type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil -} diff --git a/pkg/system/stat_solaris.go b/pkg/system/stat_solaris.go deleted file mode 100644 index 0216985a25..0000000000 --- a/pkg/system/stat_solaris.go +++ /dev/null @@ -1,34 +0,0 @@ -// +build solaris - -package system - -import ( - "syscall" -) - -// fromStatT creates a system.StatT type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtim}, nil -} - -// FromStatT loads a system.StatT from a syscal.Stat_t. -func FromStatT(s *syscall.Stat_t) (*StatT, error) { - return fromStatT(s) -} - -// Stat takes a path to a file and returns -// a system.StatT type pertaining to that file. -// -// Throws an error if the file does not exist -func Stat(path string) (*StatT, error) { - s := &syscall.Stat_t{} - if err := syscall.Stat(path, s); err != nil { - return nil, err - } - return fromStatT(s) -} diff --git a/pkg/system/stat_unix_test.go b/pkg/system/stat_unix_test.go deleted file mode 100644 index dee8d30a19..0000000000 --- a/pkg/system/stat_unix_test.go +++ /dev/null @@ -1,39 +0,0 @@ -// +build linux freebsd - -package system - -import ( - "os" - "syscall" - "testing" -) - -// TestFromStatT tests fromStatT for a tempfile -func TestFromStatT(t *testing.T) { - file, _, _, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - stat := &syscall.Stat_t{} - err := syscall.Lstat(file, stat) - - s, err := fromStatT(stat) - if err != nil { - t.Fatal(err) - } - - if stat.Mode != s.Mode() { - t.Fatal("got invalid mode") - } - if stat.Uid != s.UID() { - t.Fatal("got invalid uid") - } - if stat.Gid != s.GID() { - t.Fatal("got invalid gid") - } - if stat.Rdev != s.Rdev() { - t.Fatal("got invalid rdev") - } - if stat.Mtim != s.Mtim() { - t.Fatal("got invalid mtim") - } -} diff --git a/pkg/system/stat_unsupported.go b/pkg/system/stat_unsupported.go deleted file mode 100644 index f53e9de4d1..0000000000 --- a/pkg/system/stat_unsupported.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build !linux,!windows,!freebsd,!solaris,!openbsd - -package system - -import ( - "syscall" -) - -// fromStatT creates a system.StatT type from a syscall.Stat_t type -func fromStatT(s *syscall.Stat_t) (*StatT, error) { - return &StatT{size: s.Size, - mode: uint32(s.Mode), - uid: s.Uid, - gid: s.Gid, - rdev: uint64(s.Rdev), - mtim: s.Mtimespec}, nil -} diff --git a/pkg/system/stat_windows.go b/pkg/system/stat_windows.go deleted file mode 100644 index 39490c625c..0000000000 --- a/pkg/system/stat_windows.go +++ /dev/null @@ -1,43 +0,0 @@ -// +build windows - -package system - -import ( - "os" - "time" -) - -// StatT type contains status of a file. It contains metadata -// like name, permission, size, etc about a file. -type StatT struct { - name string - size int64 - mode os.FileMode - modTime time.Time - isDir bool -} - -// Name returns file's name. -func (s StatT) Name() string { - return s.name -} - -// Size returns file's size. -func (s StatT) Size() int64 { - return s.size -} - -// Mode returns file's permission mode. -func (s StatT) Mode() os.FileMode { - return s.mode -} - -// ModTime returns file's last modification time. -func (s StatT) ModTime() time.Time { - return s.modTime -} - -// IsDir returns whether file is actually a directory. -func (s StatT) IsDir() bool { - return s.isDir -} diff --git a/pkg/system/syscall_unix.go b/pkg/system/syscall_unix.go deleted file mode 100644 index 3ae9128468..0000000000 --- a/pkg/system/syscall_unix.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build linux freebsd - -package system - -import "syscall" - -// Unmount is a platform-specific helper function to call -// the unmount syscall. -func Unmount(dest string) error { - return syscall.Unmount(dest, 0) -} - -// CommandLineToArgv should not be used on Unix. -// It simply returns commandLine in the only element in the returned array. -func CommandLineToArgv(commandLine string) ([]string, error) { - return []string{commandLine}, nil -} diff --git a/pkg/system/syscall_windows.go b/pkg/system/syscall_windows.go deleted file mode 100644 index f5f2d56941..0000000000 --- a/pkg/system/syscall_windows.go +++ /dev/null @@ -1,103 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" - - "github.com/Sirupsen/logrus" -) - -var ( - ntuserApiset = syscall.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0") - procGetVersionExW = modkernel32.NewProc("GetVersionExW") -) - -// OSVersion is a wrapper for Windows version information -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724439(v=vs.85).aspx -type OSVersion struct { - Version uint32 - MajorVersion uint8 - MinorVersion uint8 - Build uint16 -} - -// https://msdn.microsoft.com/en-us/library/windows/desktop/ms724833(v=vs.85).aspx -type osVersionInfoEx struct { - OSVersionInfoSize uint32 - MajorVersion uint32 - MinorVersion uint32 - BuildNumber uint32 - PlatformID uint32 - CSDVersion [128]uint16 - ServicePackMajor uint16 - ServicePackMinor uint16 - SuiteMask uint16 - ProductType byte - Reserve byte -} - -// GetOSVersion gets the operating system version on Windows. Note that -// docker.exe must be manifested to get the correct version information. -func GetOSVersion() OSVersion { - var err error - osv := OSVersion{} - osv.Version, err = syscall.GetVersion() - if err != nil { - // GetVersion never fails. - panic(err) - } - osv.MajorVersion = uint8(osv.Version & 0xFF) - osv.MinorVersion = uint8(osv.Version >> 8 & 0xFF) - osv.Build = uint16(osv.Version >> 16) - return osv -} - -// IsWindowsClient returns true if the SKU is client -func IsWindowsClient() bool { - osviex := &osVersionInfoEx{OSVersionInfoSize: 284} - r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex))) - if r1 == 0 { - logrus.Warnf("GetVersionExW failed - assuming server SKU: %v", err) - return false - } - const verNTWorkstation = 0x00000001 - return osviex.ProductType == verNTWorkstation -} - -// Unmount is a platform-specific helper function to call -// the unmount syscall. Not supported on Windows -func Unmount(dest string) error { - return nil -} - -// CommandLineToArgv wraps the Windows syscall to turn a commandline into an argument array. -func CommandLineToArgv(commandLine string) ([]string, error) { - var argc int32 - - argsPtr, err := syscall.UTF16PtrFromString(commandLine) - if err != nil { - return nil, err - } - - argv, err := syscall.CommandLineToArgv(argsPtr, &argc) - if err != nil { - return nil, err - } - defer syscall.LocalFree(syscall.Handle(uintptr(unsafe.Pointer(argv)))) - - newArgs := make([]string, argc) - for i, v := range (*argv)[:argc] { - newArgs[i] = string(syscall.UTF16ToString((*v)[:])) - } - - return newArgs, nil -} - -// HasWin32KSupport determines whether containers that depend on win32k can -// run on this machine. Win32k is the driver used to implement windowing. -func HasWin32KSupport() bool { - // For now, check for ntuser API support on the host. In the future, a host - // may support win32k in containers even if the host does not support ntuser - // APIs. - return ntuserApiset.Load() == nil -} diff --git a/pkg/system/syscall_windows_test.go b/pkg/system/syscall_windows_test.go deleted file mode 100644 index 4886b2b9b4..0000000000 --- a/pkg/system/syscall_windows_test.go +++ /dev/null @@ -1,9 +0,0 @@ -package system - -import "testing" - -func TestHasWin32KSupport(t *testing.T) { - s := HasWin32KSupport() // make sure this doesn't panic - - t.Logf("win32k: %v", s) // will be different on different platforms -- informative only -} diff --git a/pkg/system/umask.go b/pkg/system/umask.go deleted file mode 100644 index 3d0146b01a..0000000000 --- a/pkg/system/umask.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !windows - -package system - -import ( - "syscall" -) - -// Umask sets current process's file mode creation mask to newmask -// and returns oldmask. -func Umask(newmask int) (oldmask int, err error) { - return syscall.Umask(newmask), nil -} diff --git a/pkg/system/umask_windows.go b/pkg/system/umask_windows.go deleted file mode 100644 index 13f1de1769..0000000000 --- a/pkg/system/umask_windows.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package system - -// Umask is not supported on the windows platform. -func Umask(newmask int) (oldmask int, err error) { - // should not be called on cli code path - return 0, ErrNotSupportedPlatform -} diff --git a/pkg/system/utimes_darwin.go b/pkg/system/utimes_darwin.go deleted file mode 100644 index 0a16197544..0000000000 --- a/pkg/system/utimes_darwin.go +++ /dev/null @@ -1,8 +0,0 @@ -package system - -import "syscall" - -// LUtimesNano is not supported by darwin platform. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/pkg/system/utimes_freebsd.go b/pkg/system/utimes_freebsd.go deleted file mode 100644 index e2eac3b553..0000000000 --- a/pkg/system/utimes_freebsd.go +++ /dev/null @@ -1,22 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - var _path *byte - _path, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := syscall.Syscall(syscall.SYS_LUTIMES, uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), 0); err != 0 && err != syscall.ENOSYS { - return err - } - - return nil -} diff --git a/pkg/system/utimes_linux.go b/pkg/system/utimes_linux.go deleted file mode 100644 index fc8a1aba95..0000000000 --- a/pkg/system/utimes_linux.go +++ /dev/null @@ -1,26 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// LUtimesNano is used to change access and modification time of the specified path. -// It's used for symbol link file because syscall.UtimesNano doesn't support a NOFOLLOW flag atm. -func LUtimesNano(path string, ts []syscall.Timespec) error { - // These are not currently available in syscall - atFdCwd := -100 - atSymLinkNoFollow := 0x100 - - var _path *byte - _path, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - - if _, _, err := syscall.Syscall6(syscall.SYS_UTIMENSAT, uintptr(atFdCwd), uintptr(unsafe.Pointer(_path)), uintptr(unsafe.Pointer(&ts[0])), uintptr(atSymLinkNoFollow), 0, 0); err != 0 && err != syscall.ENOSYS { - return err - } - - return nil -} diff --git a/pkg/system/utimes_unix_test.go b/pkg/system/utimes_unix_test.go deleted file mode 100644 index 1ee0d099f9..0000000000 --- a/pkg/system/utimes_unix_test.go +++ /dev/null @@ -1,68 +0,0 @@ -// +build linux freebsd - -package system - -import ( - "io/ioutil" - "os" - "path/filepath" - "syscall" - "testing" -) - -// prepareFiles creates files for testing in the temp directory -func prepareFiles(t *testing.T) (string, string, string, string) { - dir, err := ioutil.TempDir("", "docker-system-test") - if err != nil { - t.Fatal(err) - } - - file := filepath.Join(dir, "exist") - if err := ioutil.WriteFile(file, []byte("hello"), 0644); err != nil { - t.Fatal(err) - } - - invalid := filepath.Join(dir, "doesnt-exist") - - symlink := filepath.Join(dir, "symlink") - if err := os.Symlink(file, symlink); err != nil { - t.Fatal(err) - } - - return file, invalid, symlink, dir -} - -func TestLUtimesNano(t *testing.T) { - file, invalid, symlink, dir := prepareFiles(t) - defer os.RemoveAll(dir) - - before, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - - ts := []syscall.Timespec{{0, 0}, {0, 0}} - if err := LUtimesNano(symlink, ts); err != nil { - t.Fatal(err) - } - - symlinkInfo, err := os.Lstat(symlink) - if err != nil { - t.Fatal(err) - } - if before.ModTime().Unix() == symlinkInfo.ModTime().Unix() { - t.Fatal("The modification time of the symlink should be different") - } - - fileInfo, err := os.Stat(file) - if err != nil { - t.Fatal(err) - } - if before.ModTime().Unix() != fileInfo.ModTime().Unix() { - t.Fatal("The modification time of the file should be same") - } - - if err := LUtimesNano(invalid, ts); err == nil { - t.Fatal("Doesn't return an error on a non-existing file") - } -} diff --git a/pkg/system/utimes_unsupported.go b/pkg/system/utimes_unsupported.go deleted file mode 100644 index 50c3a04364..0000000000 --- a/pkg/system/utimes_unsupported.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build !linux,!freebsd,!darwin - -package system - -import "syscall" - -// LUtimesNano is not supported on platforms other than linux, freebsd and darwin. -func LUtimesNano(path string, ts []syscall.Timespec) error { - return ErrNotSupportedPlatform -} diff --git a/pkg/system/xattrs_linux.go b/pkg/system/xattrs_linux.go deleted file mode 100644 index d2e2c05799..0000000000 --- a/pkg/system/xattrs_linux.go +++ /dev/null @@ -1,63 +0,0 @@ -package system - -import ( - "syscall" - "unsafe" -) - -// Lgetxattr retrieves the value of the extended attribute identified by attr -// and associated with the given path in the file system. -// It will returns a nil slice and nil error if the xattr is not set. -func Lgetxattr(path string, attr string) ([]byte, error) { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return nil, err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return nil, err - } - - dest := make([]byte, 128) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno := syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - if errno == syscall.ENODATA { - return nil, nil - } - if errno == syscall.ERANGE { - dest = make([]byte, sz) - destBytes := unsafe.Pointer(&dest[0]) - sz, _, errno = syscall.Syscall6(syscall.SYS_LGETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(destBytes), uintptr(len(dest)), 0, 0) - } - if errno != 0 { - return nil, errno - } - - return dest[:sz], nil -} - -var _zero uintptr - -// Lsetxattr sets the value of the extended attribute identified by attr -// and associated with the given path in the file system. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - pathBytes, err := syscall.BytePtrFromString(path) - if err != nil { - return err - } - attrBytes, err := syscall.BytePtrFromString(attr) - if err != nil { - return err - } - var dataBytes unsafe.Pointer - if len(data) > 0 { - dataBytes = unsafe.Pointer(&data[0]) - } else { - dataBytes = unsafe.Pointer(&_zero) - } - _, _, errno := syscall.Syscall6(syscall.SYS_LSETXATTR, uintptr(unsafe.Pointer(pathBytes)), uintptr(unsafe.Pointer(attrBytes)), uintptr(dataBytes), uintptr(len(data)), uintptr(flags), 0) - if errno != 0 { - return errno - } - return nil -} diff --git a/pkg/system/xattrs_unsupported.go b/pkg/system/xattrs_unsupported.go deleted file mode 100644 index 0114f2227c..0000000000 --- a/pkg/system/xattrs_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build !linux - -package system - -// Lgetxattr is not supported on platforms other than linux. -func Lgetxattr(path string, attr string) ([]byte, error) { - return nil, ErrNotSupportedPlatform -} - -// Lsetxattr is not supported on platforms other than linux. -func Lsetxattr(path string, attr string, data []byte, flags int) error { - return ErrNotSupportedPlatform -} diff --git a/pkg/tailfile/tailfile.go b/pkg/tailfile/tailfile.go deleted file mode 100644 index d580584d61..0000000000 --- a/pkg/tailfile/tailfile.go +++ /dev/null @@ -1,66 +0,0 @@ -// Package tailfile provides helper functions to read the nth lines of any -// ReadSeeker. -package tailfile - -import ( - "bytes" - "errors" - "io" - "os" -) - -const blockSize = 1024 - -var eol = []byte("\n") - -// ErrNonPositiveLinesNumber is an error returned if the lines number was negative. -var ErrNonPositiveLinesNumber = errors.New("The number of lines to extract from the file must be positive") - -//TailFile returns last n lines of reader f (could be a fil). -func TailFile(f io.ReadSeeker, n int) ([][]byte, error) { - if n <= 0 { - return nil, ErrNonPositiveLinesNumber - } - size, err := f.Seek(0, os.SEEK_END) - if err != nil { - return nil, err - } - block := -1 - var data []byte - var cnt int - for { - var b []byte - step := int64(block * blockSize) - left := size + step // how many bytes to beginning - if left < 0 { - if _, err := f.Seek(0, os.SEEK_SET); err != nil { - return nil, err - } - b = make([]byte, blockSize+left) - if _, err := f.Read(b); err != nil { - return nil, err - } - data = append(b, data...) - break - } else { - b = make([]byte, blockSize) - if _, err := f.Seek(step, os.SEEK_END); err != nil { - return nil, err - } - if _, err := f.Read(b); err != nil { - return nil, err - } - data = append(b, data...) - } - cnt += bytes.Count(b, eol) - if cnt > n { - break - } - block-- - } - lines := bytes.Split(data, eol) - if n < len(lines) { - return lines[len(lines)-n-1 : len(lines)-1], nil - } - return lines[:len(lines)-1], nil -} diff --git a/pkg/tailfile/tailfile_test.go b/pkg/tailfile/tailfile_test.go deleted file mode 100644 index 31217c036c..0000000000 --- a/pkg/tailfile/tailfile_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package tailfile - -import ( - "io/ioutil" - "os" - "testing" -) - -func TestTailFile(t *testing.T) { - f, err := ioutil.TempFile("", "tail-test") - if err != nil { - t.Fatal(err) - } - defer f.Close() - defer os.RemoveAll(f.Name()) - testFile := []byte(`first line -second line -third line -fourth line -fifth line -next first line -next second line -next third line -next fourth line -next fifth line -last first line -next first line -next second line -next third line -next fourth line -next fifth line -next first line -next second line -next third line -next fourth line -next fifth line -last second line -last third line -last fourth line -last fifth line -truncated line`) - if _, err := f.Write(testFile); err != nil { - t.Fatal(err) - } - if _, err := f.Seek(0, os.SEEK_SET); err != nil { - t.Fatal(err) - } - expected := []string{"last fourth line", "last fifth line"} - res, err := TailFile(f, 2) - if err != nil { - t.Fatal(err) - } - for i, l := range res { - t.Logf("%s", l) - if expected[i] != string(l) { - t.Fatalf("Expected line %s, got %s", expected[i], l) - } - } -} - -func TestTailFileManyLines(t *testing.T) { - f, err := ioutil.TempFile("", "tail-test") - if err != nil { - t.Fatal(err) - } - defer f.Close() - defer os.RemoveAll(f.Name()) - testFile := []byte(`first line -second line -truncated line`) - if _, err := f.Write(testFile); err != nil { - t.Fatal(err) - } - if _, err := f.Seek(0, os.SEEK_SET); err != nil { - t.Fatal(err) - } - expected := []string{"first line", "second line"} - res, err := TailFile(f, 10000) - if err != nil { - t.Fatal(err) - } - for i, l := range res { - t.Logf("%s", l) - if expected[i] != string(l) { - t.Fatalf("Expected line %s, got %s", expected[i], l) - } - } -} - -func TestTailEmptyFile(t *testing.T) { - f, err := ioutil.TempFile("", "tail-test") - if err != nil { - t.Fatal(err) - } - defer f.Close() - defer os.RemoveAll(f.Name()) - res, err := TailFile(f, 10000) - if err != nil { - t.Fatal(err) - } - if len(res) != 0 { - t.Fatal("Must be empty slice from empty file") - } -} - -func TestTailNegativeN(t *testing.T) { - f, err := ioutil.TempFile("", "tail-test") - if err != nil { - t.Fatal(err) - } - defer f.Close() - defer os.RemoveAll(f.Name()) - testFile := []byte(`first line -second line -truncated line`) - if _, err := f.Write(testFile); err != nil { - t.Fatal(err) - } - if _, err := f.Seek(0, os.SEEK_SET); err != nil { - t.Fatal(err) - } - if _, err := TailFile(f, -1); err != ErrNonPositiveLinesNumber { - t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) - } - if _, err := TailFile(f, 0); err != ErrNonPositiveLinesNumber { - t.Fatalf("Expected ErrNonPositiveLinesNumber, got %s", err) - } -} - -func BenchmarkTail(b *testing.B) { - f, err := ioutil.TempFile("", "tail-test") - if err != nil { - b.Fatal(err) - } - defer f.Close() - defer os.RemoveAll(f.Name()) - for i := 0; i < 10000; i++ { - if _, err := f.Write([]byte("tailfile pretty interesting line\n")); err != nil { - b.Fatal(err) - } - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - if _, err := TailFile(f, 1000); err != nil { - b.Fatal(err) - } - } -} diff --git a/pkg/tarsum/builder_context.go b/pkg/tarsum/builder_context.go deleted file mode 100644 index b42983e984..0000000000 --- a/pkg/tarsum/builder_context.go +++ /dev/null @@ -1,21 +0,0 @@ -package tarsum - -// BuilderContext is an interface extending TarSum by adding the Remove method. -// In general there was concern about adding this method to TarSum itself -// so instead it is being added just to "BuilderContext" which will then -// only be used during the .dockerignore file processing -// - see builder/evaluator.go -type BuilderContext interface { - TarSum - Remove(string) -} - -func (bc *tarSum) Remove(filename string) { - for i, fis := range bc.sums { - if fis.Name() == filename { - bc.sums = append(bc.sums[:i], bc.sums[i+1:]...) - // Note, we don't just return because there could be - // more than one with this name - } - } -} diff --git a/pkg/tarsum/builder_context_test.go b/pkg/tarsum/builder_context_test.go deleted file mode 100644 index 719f72895d..0000000000 --- a/pkg/tarsum/builder_context_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package tarsum - -import ( - "io" - "io/ioutil" - "os" - "testing" -) - -// Try to remove tarsum (in the BuilderContext) that do not exists, won't change a thing -func TestTarSumRemoveNonExistent(t *testing.T) { - filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" - reader, err := os.Open(filename) - if err != nil { - t.Fatal(err) - } - ts, err := NewTarSum(reader, false, Version0) - if err != nil { - t.Fatal(err) - } - - // Read and discard bytes so that it populates sums - _, err = io.Copy(ioutil.Discard, ts) - if err != nil { - t.Errorf("failed to read from %s: %s", filename, err) - } - - expected := len(ts.GetSums()) - - ts.(BuilderContext).Remove("") - ts.(BuilderContext).Remove("Anything") - - if len(ts.GetSums()) != expected { - t.Fatalf("Expected %v sums, go %v.", expected, ts.GetSums()) - } -} - -// Remove a tarsum (in the BuilderContext) -func TestTarSumRemove(t *testing.T) { - filename := "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar" - reader, err := os.Open(filename) - if err != nil { - t.Fatal(err) - } - ts, err := NewTarSum(reader, false, Version0) - if err != nil { - t.Fatal(err) - } - - // Read and discard bytes so that it populates sums - _, err = io.Copy(ioutil.Discard, ts) - if err != nil { - t.Errorf("failed to read from %s: %s", filename, err) - } - - expected := len(ts.GetSums()) - 1 - - ts.(BuilderContext).Remove("etc/sudoers") - - if len(ts.GetSums()) != expected { - t.Fatalf("Expected %v sums, go %v.", expected, len(ts.GetSums())) - } -} diff --git a/pkg/tarsum/fileinfosums.go b/pkg/tarsum/fileinfosums.go deleted file mode 100644 index 5abf5e7ba3..0000000000 --- a/pkg/tarsum/fileinfosums.go +++ /dev/null @@ -1,126 +0,0 @@ -package tarsum - -import "sort" - -// FileInfoSumInterface provides an interface for accessing file checksum -// information within a tar file. This info is accessed through interface -// so the actual name and sum cannot be melded with. -type FileInfoSumInterface interface { - // File name - Name() string - // Checksum of this particular file and its headers - Sum() string - // Position of file in the tar - Pos() int64 -} - -type fileInfoSum struct { - name string - sum string - pos int64 -} - -func (fis fileInfoSum) Name() string { - return fis.name -} -func (fis fileInfoSum) Sum() string { - return fis.sum -} -func (fis fileInfoSum) Pos() int64 { - return fis.pos -} - -// FileInfoSums provides a list of FileInfoSumInterfaces. -type FileInfoSums []FileInfoSumInterface - -// GetFile returns the first FileInfoSumInterface with a matching name. -func (fis FileInfoSums) GetFile(name string) FileInfoSumInterface { - for i := range fis { - if fis[i].Name() == name { - return fis[i] - } - } - return nil -} - -// GetAllFile returns a FileInfoSums with all matching names. -func (fis FileInfoSums) GetAllFile(name string) FileInfoSums { - f := FileInfoSums{} - for i := range fis { - if fis[i].Name() == name { - f = append(f, fis[i]) - } - } - return f -} - -// GetDuplicatePaths returns a FileInfoSums with all duplicated paths. -func (fis FileInfoSums) GetDuplicatePaths() (dups FileInfoSums) { - seen := make(map[string]int, len(fis)) // allocate earl. no need to grow this map. - for i := range fis { - f := fis[i] - if _, ok := seen[f.Name()]; ok { - dups = append(dups, f) - } else { - seen[f.Name()] = 0 - } - } - return dups -} - -// Len returns the size of the FileInfoSums. -func (fis FileInfoSums) Len() int { return len(fis) } - -// Swap swaps two FileInfoSum values if a FileInfoSums list. -func (fis FileInfoSums) Swap(i, j int) { fis[i], fis[j] = fis[j], fis[i] } - -// SortByPos sorts FileInfoSums content by position. -func (fis FileInfoSums) SortByPos() { - sort.Sort(byPos{fis}) -} - -// SortByNames sorts FileInfoSums content by name. -func (fis FileInfoSums) SortByNames() { - sort.Sort(byName{fis}) -} - -// SortBySums sorts FileInfoSums content by sums. -func (fis FileInfoSums) SortBySums() { - dups := fis.GetDuplicatePaths() - if len(dups) > 0 { - sort.Sort(bySum{fis, dups}) - } else { - sort.Sort(bySum{fis, nil}) - } -} - -// byName is a sort.Sort helper for sorting by file names. -// If names are the same, order them by their appearance in the tar archive -type byName struct{ FileInfoSums } - -func (bn byName) Less(i, j int) bool { - if bn.FileInfoSums[i].Name() == bn.FileInfoSums[j].Name() { - return bn.FileInfoSums[i].Pos() < bn.FileInfoSums[j].Pos() - } - return bn.FileInfoSums[i].Name() < bn.FileInfoSums[j].Name() -} - -// bySum is a sort.Sort helper for sorting by the sums of all the fileinfos in the tar archive -type bySum struct { - FileInfoSums - dups FileInfoSums -} - -func (bs bySum) Less(i, j int) bool { - if bs.dups != nil && bs.FileInfoSums[i].Name() == bs.FileInfoSums[j].Name() { - return bs.FileInfoSums[i].Pos() < bs.FileInfoSums[j].Pos() - } - return bs.FileInfoSums[i].Sum() < bs.FileInfoSums[j].Sum() -} - -// byPos is a sort.Sort helper for sorting by the sums of all the fileinfos by their original order -type byPos struct{ FileInfoSums } - -func (bp byPos) Less(i, j int) bool { - return bp.FileInfoSums[i].Pos() < bp.FileInfoSums[j].Pos() -} diff --git a/pkg/tarsum/fileinfosums_test.go b/pkg/tarsum/fileinfosums_test.go deleted file mode 100644 index bb700d8bde..0000000000 --- a/pkg/tarsum/fileinfosums_test.go +++ /dev/null @@ -1,62 +0,0 @@ -package tarsum - -import "testing" - -func newFileInfoSums() FileInfoSums { - return FileInfoSums{ - fileInfoSum{name: "file3", sum: "2abcdef1234567890", pos: 2}, - fileInfoSum{name: "dup1", sum: "deadbeef1", pos: 5}, - fileInfoSum{name: "file1", sum: "0abcdef1234567890", pos: 0}, - fileInfoSum{name: "file4", sum: "3abcdef1234567890", pos: 3}, - fileInfoSum{name: "dup1", sum: "deadbeef0", pos: 4}, - fileInfoSum{name: "file2", sum: "1abcdef1234567890", pos: 1}, - } -} - -func TestSortFileInfoSums(t *testing.T) { - dups := newFileInfoSums().GetAllFile("dup1") - if len(dups) != 2 { - t.Errorf("expected length 2, got %d", len(dups)) - } - dups.SortByNames() - if dups[0].Pos() != 4 { - t.Errorf("sorted dups should be ordered by position. Expected 4, got %d", dups[0].Pos()) - } - - fis := newFileInfoSums() - expected := "0abcdef1234567890" - fis.SortBySums() - got := fis[0].Sum() - if got != expected { - t.Errorf("Expected %q, got %q", expected, got) - } - - fis = newFileInfoSums() - expected = "dup1" - fis.SortByNames() - gotFis := fis[0] - if gotFis.Name() != expected { - t.Errorf("Expected %q, got %q", expected, gotFis.Name()) - } - // since a duplicate is first, ensure it is ordered first by position too - if gotFis.Pos() != 4 { - t.Errorf("Expected %d, got %d", 4, gotFis.Pos()) - } - - fis = newFileInfoSums() - fis.SortByPos() - if fis[0].Pos() != 0 { - t.Errorf("sorted fileInfoSums by Pos should order them by position.") - } - - fis = newFileInfoSums() - expected = "deadbeef1" - gotFileInfoSum := fis.GetFile("dup1") - if gotFileInfoSum.Sum() != expected { - t.Errorf("Expected %q, got %q", expected, gotFileInfoSum) - } - if fis.GetFile("noPresent") != nil { - t.Errorf("Should have return nil if name not found.") - } - -} diff --git a/pkg/tarsum/tarsum.go b/pkg/tarsum/tarsum.go deleted file mode 100644 index 154788db82..0000000000 --- a/pkg/tarsum/tarsum.go +++ /dev/null @@ -1,295 +0,0 @@ -// Package tarsum provides algorithms to perform checksum calculation on -// filesystem layers. -// -// The transportation of filesystems, regarding Docker, is done with tar(1) -// archives. There are a variety of tar serialization formats [2], and a key -// concern here is ensuring a repeatable checksum given a set of inputs from a -// generic tar archive. Types of transportation include distribution to and from a -// registry endpoint, saving and loading through commands or Docker daemon APIs, -// transferring the build context from client to Docker daemon, and committing the -// filesystem of a container to become an image. -// -// As tar archives are used for transit, but not preserved in many situations, the -// focus of the algorithm is to ensure the integrity of the preserved filesystem, -// while maintaining a deterministic accountability. This includes neither -// constraining the ordering or manipulation of the files during the creation or -// unpacking of the archive, nor include additional metadata state about the file -// system attributes. -package tarsum - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "crypto" - "crypto/sha256" - "encoding/hex" - "errors" - "fmt" - "hash" - "io" - "path" - "strings" -) - -const ( - buf8K = 8 * 1024 - buf16K = 16 * 1024 - buf32K = 32 * 1024 -) - -// NewTarSum creates a new interface for calculating a fixed time checksum of a -// tar archive. -// -// This is used for calculating checksums of layers of an image, in some cases -// including the byte payload of the image's json metadata as well, and for -// calculating the checksums for buildcache. -func NewTarSum(r io.Reader, dc bool, v Version) (TarSum, error) { - return NewTarSumHash(r, dc, v, DefaultTHash) -} - -// NewTarSumHash creates a new TarSum, providing a THash to use rather than -// the DefaultTHash. -func NewTarSumHash(r io.Reader, dc bool, v Version, tHash THash) (TarSum, error) { - headerSelector, err := getTarHeaderSelector(v) - if err != nil { - return nil, err - } - ts := &tarSum{Reader: r, DisableCompression: dc, tarSumVersion: v, headerSelector: headerSelector, tHash: tHash} - err = ts.initTarSum() - return ts, err -} - -// NewTarSumForLabel creates a new TarSum using the provided TarSum version+hash label. -func NewTarSumForLabel(r io.Reader, disableCompression bool, label string) (TarSum, error) { - parts := strings.SplitN(label, "+", 2) - if len(parts) != 2 { - return nil, errors.New("tarsum label string should be of the form: {tarsum_version}+{hash_name}") - } - - versionName, hashName := parts[0], parts[1] - - version, ok := tarSumVersionsByName[versionName] - if !ok { - return nil, fmt.Errorf("unknown TarSum version name: %q", versionName) - } - - hashConfig, ok := standardHashConfigs[hashName] - if !ok { - return nil, fmt.Errorf("unknown TarSum hash name: %q", hashName) - } - - tHash := NewTHash(hashConfig.name, hashConfig.hash.New) - - return NewTarSumHash(r, disableCompression, version, tHash) -} - -// TarSum is the generic interface for calculating fixed time -// checksums of a tar archive. -type TarSum interface { - io.Reader - GetSums() FileInfoSums - Sum([]byte) string - Version() Version - Hash() THash -} - -// tarSum struct is the structure for a Version0 checksum calculation. -type tarSum struct { - io.Reader - tarR *tar.Reader - tarW *tar.Writer - writer writeCloseFlusher - bufTar *bytes.Buffer - bufWriter *bytes.Buffer - bufData []byte - h hash.Hash - tHash THash - sums FileInfoSums - fileCounter int64 - currentFile string - finished bool - first bool - DisableCompression bool // false by default. When false, the output gzip compressed. - tarSumVersion Version // this field is not exported so it can not be mutated during use - headerSelector tarHeaderSelector // handles selecting and ordering headers for files in the archive -} - -func (ts tarSum) Hash() THash { - return ts.tHash -} - -func (ts tarSum) Version() Version { - return ts.tarSumVersion -} - -// THash provides a hash.Hash type generator and its name. -type THash interface { - Hash() hash.Hash - Name() string -} - -// NewTHash is a convenience method for creating a THash. -func NewTHash(name string, h func() hash.Hash) THash { - return simpleTHash{n: name, h: h} -} - -type tHashConfig struct { - name string - hash crypto.Hash -} - -var ( - // NOTE: DO NOT include MD5 or SHA1, which are considered insecure. - standardHashConfigs = map[string]tHashConfig{ - "sha256": {name: "sha256", hash: crypto.SHA256}, - "sha512": {name: "sha512", hash: crypto.SHA512}, - } -) - -// DefaultTHash is default TarSum hashing algorithm - "sha256". -var DefaultTHash = NewTHash("sha256", sha256.New) - -type simpleTHash struct { - n string - h func() hash.Hash -} - -func (sth simpleTHash) Name() string { return sth.n } -func (sth simpleTHash) Hash() hash.Hash { return sth.h() } - -func (ts *tarSum) encodeHeader(h *tar.Header) error { - for _, elem := range ts.headerSelector.selectHeaders(h) { - if _, err := ts.h.Write([]byte(elem[0] + elem[1])); err != nil { - return err - } - } - return nil -} - -func (ts *tarSum) initTarSum() error { - ts.bufTar = bytes.NewBuffer([]byte{}) - ts.bufWriter = bytes.NewBuffer([]byte{}) - ts.tarR = tar.NewReader(ts.Reader) - ts.tarW = tar.NewWriter(ts.bufTar) - if !ts.DisableCompression { - ts.writer = gzip.NewWriter(ts.bufWriter) - } else { - ts.writer = &nopCloseFlusher{Writer: ts.bufWriter} - } - if ts.tHash == nil { - ts.tHash = DefaultTHash - } - ts.h = ts.tHash.Hash() - ts.h.Reset() - ts.first = true - ts.sums = FileInfoSums{} - return nil -} - -func (ts *tarSum) Read(buf []byte) (int, error) { - if ts.finished { - return ts.bufWriter.Read(buf) - } - if len(ts.bufData) < len(buf) { - switch { - case len(buf) <= buf8K: - ts.bufData = make([]byte, buf8K) - case len(buf) <= buf16K: - ts.bufData = make([]byte, buf16K) - case len(buf) <= buf32K: - ts.bufData = make([]byte, buf32K) - default: - ts.bufData = make([]byte, len(buf)) - } - } - buf2 := ts.bufData[:len(buf)] - - n, err := ts.tarR.Read(buf2) - if err != nil { - if err == io.EOF { - if _, err := ts.h.Write(buf2[:n]); err != nil { - return 0, err - } - if !ts.first { - ts.sums = append(ts.sums, fileInfoSum{name: ts.currentFile, sum: hex.EncodeToString(ts.h.Sum(nil)), pos: ts.fileCounter}) - ts.fileCounter++ - ts.h.Reset() - } else { - ts.first = false - } - - currentHeader, err := ts.tarR.Next() - if err != nil { - if err == io.EOF { - if err := ts.tarW.Close(); err != nil { - return 0, err - } - if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - if err := ts.writer.Close(); err != nil { - return 0, err - } - ts.finished = true - return n, nil - } - return n, err - } - ts.currentFile = path.Clean(currentHeader.Name) - if err := ts.encodeHeader(currentHeader); err != nil { - return 0, err - } - if err := ts.tarW.WriteHeader(currentHeader); err != nil { - return 0, err - } - if _, err := ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - ts.tarW.Flush() - if _, err := io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - ts.writer.Flush() - - return ts.bufWriter.Read(buf) - } - return n, err - } - - // Filling the hash buffer - if _, err = ts.h.Write(buf2[:n]); err != nil { - return 0, err - } - - // Filling the tar writer - if _, err = ts.tarW.Write(buf2[:n]); err != nil { - return 0, err - } - ts.tarW.Flush() - - // Filling the output writer - if _, err = io.Copy(ts.writer, ts.bufTar); err != nil { - return 0, err - } - ts.writer.Flush() - - return ts.bufWriter.Read(buf) -} - -func (ts *tarSum) Sum(extra []byte) string { - ts.sums.SortBySums() - h := ts.tHash.Hash() - if extra != nil { - h.Write(extra) - } - for _, fis := range ts.sums { - h.Write([]byte(fis.Sum())) - } - checksum := ts.Version().String() + "+" + ts.tHash.Name() + ":" + hex.EncodeToString(h.Sum(nil)) - return checksum -} - -func (ts *tarSum) GetSums() FileInfoSums { - return ts.sums -} diff --git a/pkg/tarsum/tarsum_spec.md b/pkg/tarsum/tarsum_spec.md deleted file mode 100644 index 89b2e49f98..0000000000 --- a/pkg/tarsum/tarsum_spec.md +++ /dev/null @@ -1,230 +0,0 @@ -page_title: TarSum checksum specification -page_description: Documentation for algorithms used in the TarSum checksum calculation -page_keywords: docker, checksum, validation, tarsum - -# TarSum Checksum Specification - -## Abstract - -This document describes the algorithms used in performing the TarSum checksum -calculation on filesystem layers, the need for this method over existing -methods, and the versioning of this calculation. - -## Warning - -This checksum algorithm is for best-effort comparison of file trees with fuzzy logic. - -This is _not_ a cryptographic attestation, and should not be considered secure. - -## Introduction - -The transportation of filesystems, regarding Docker, is done with tar(1) -archives. There are a variety of tar serialization formats [2], and a key -concern here is ensuring a repeatable checksum given a set of inputs from a -generic tar archive. Types of transportation include distribution to and from a -registry endpoint, saving and loading through commands or Docker daemon APIs, -transferring the build context from client to Docker daemon, and committing the -filesystem of a container to become an image. - -As tar archives are used for transit, but not preserved in many situations, the -focus of the algorithm is to ensure the integrity of the preserved filesystem, -while maintaining a deterministic accountability. This includes neither -constraining the ordering or manipulation of the files during the creation or -unpacking of the archive, nor include additional metadata state about the file -system attributes. - -## Intended Audience - -This document is outlining the methods used for consistent checksum calculation -for filesystems transported via tar archives. - -Auditing these methodologies is an open and iterative process. This document -should accommodate the review of source code. Ultimately, this document should -be the starting point of further refinements to the algorithm and its future -versions. - -## Concept - -The checksum mechanism must ensure the integrity and assurance of the -filesystem payload. - -## Checksum Algorithm Profile - -A checksum mechanism must define the following operations and attributes: - -* Associated hashing cipher - used to checksum each file payload and attribute - information. -* Checksum list - each file of the filesystem archive has its checksum - calculated from the payload and attributes of the file. The final checksum is - calculated from this list, with specific ordering. -* Version - as the algorithm adapts to requirements, there are behaviors of the - algorithm to manage by versioning. -* Archive being calculated - the tar archive having its checksum calculated - -## Elements of TarSum checksum - -The calculated sum output is a text string. The elements included in the output -of the calculated sum comprise the information needed for validation of the sum -(TarSum version and hashing cipher used) and the expected checksum in hexadecimal -form. - -There are two delimiters used: -* '+' separates TarSum version from hashing cipher -* ':' separates calculation mechanics from expected hash - -Example: - -``` - "tarsum.v1+sha256:220a60ecd4a3c32c282622a625a54db9ba0ff55b5ba9c29c7064a2bc358b6a3e" - | | \ | - | | \ | - |_version_|_cipher__|__ | - | \ | - |_calculation_mechanics_|______________________expected_sum_______________________| -``` - -## Versioning - -Versioning was introduced [0] to accommodate differences in calculation needed, -and ability to maintain reverse compatibility. - -The general algorithm will be describe further in the 'Calculation'. - -### Version0 - -This is the initial version of TarSum. - -Its element in the TarSum checksum string is `tarsum`. - -### Version1 - -Its element in the TarSum checksum is `tarsum.v1`. - -The notable changes in this version: -* Exclusion of file `mtime` from the file information headers, in each file - checksum calculation -* Inclusion of extended attributes (`xattrs`. Also seen as `SCHILY.xattr.` prefixed Pax - tar file info headers) keys and values in each file checksum calculation - -### VersionDev - -*Do not use unless validating refinements to the checksum algorithm* - -Its element in the TarSum checksum is `tarsum.dev`. - -This is a floating place holder for a next version and grounds for testing -changes. The methods used for calculation are subject to change without notice, -and this version is for testing and not for production use. - -## Ciphers - -The official default and standard hashing cipher used in the calculation mechanic -is `sha256`. This refers to SHA256 hash algorithm as defined in FIPS 180-4. - -Though the TarSum algorithm itself is not exclusively bound to the single -hashing cipher `sha256`, support for alternate hashing ciphers was later added -[1]. Use cases for alternate cipher could include future-proofing TarSum -checksum format and using faster cipher hashes for tar filesystem checksums. - -## Calculation - -### Requirement - -As mentioned earlier, the calculation is such that it takes into consideration -the lifecycle of the tar archive. In that the tar archive is not an immutable, -permanent artifact. Otherwise options like relying on a known hashing cipher -checksum of the archive itself would be reliable enough. The tar archive of the -filesystem is used as a transportation medium for Docker images, and the -archive is discarded once its contents are extracted. Therefore, for consistent -validation items such as order of files in the tar archive and time stamps are -subject to change once an image is received. - -### Process - -The method is typically iterative due to reading tar info headers from the -archive stream, though this is not a strict requirement. - -#### Files - -Each file in the tar archive have their contents (headers and body) checksummed -individually using the designated associated hashing cipher. The ordered -headers of the file are written to the checksum calculation first, and then the -payload of the file body. - -The resulting checksum of the file is appended to the list of file sums. The -sum is encoded as a string of the hexadecimal digest. Additionally, the file -name and position in the archive is kept as reference for special ordering. - -#### Headers - -The following headers are read, in this -order ( and the corresponding representation of its value): -* 'name' - string -* 'mode' - string of the base10 integer -* 'uid' - string of the integer -* 'gid' - string of the integer -* 'size' - string of the integer -* 'mtime' (_Version0 only_) - string of integer of the seconds since 1970-01-01 00:00:00 UTC -* 'typeflag' - string of the char -* 'linkname' - string -* 'uname' - string -* 'gname' - string -* 'devmajor' - string of the integer -* 'devminor' - string of the integer - -For >= Version1, the extended attribute headers ("SCHILY.xattr." prefixed pax -headers) included after the above list. These xattrs key/values are first -sorted by the keys. - -#### Header Format - -The ordered headers are written to the hash in the format of - - "{.key}{.value}" - -with no newline. - -#### Body - -After the order headers of the file have been added to the checksum for the -file, the body of the file is written to the hash. - -#### List of file sums - -The list of file sums is sorted by the string of the hexadecimal digest. - -If there are two files in the tar with matching paths, the order of occurrence -for that path is reflected for the sums of the corresponding file header and -body. - -#### Final Checksum - -Begin with a fresh or initial state of the associated hash cipher. If there is -additional payload to include in the TarSum calculation for the archive, it is -written first. Then each checksum from the ordered list of file sums is written -to the hash. - -The resulting digest is formatted per the Elements of TarSum checksum, -including the TarSum version, the associated hash cipher and the hexadecimal -encoded checksum digest. - -## Security Considerations - -The initial version of TarSum has undergone one update that could invalidate -handcrafted tar archives. The tar archive format supports appending of files -with same names as prior files in the archive. The latter file will clobber the -prior file of the same path. Due to this the algorithm now accounts for files -with matching paths, and orders the list of file sums accordingly [3]. - -## Footnotes - -* [0] Versioning https://github.com/docker/docker/commit/747f89cd327db9d50251b17797c4d825162226d0 -* [1] Alternate ciphers https://github.com/docker/docker/commit/4e9925d780665149b8bc940d5ba242ada1973c4e -* [2] Tar http://en.wikipedia.org/wiki/Tar_%28computing%29 -* [3] Name collision https://github.com/docker/docker/commit/c5e6362c53cbbc09ddbabd5a7323e04438b57d31 - -## Acknowledgments - -Joffrey F (shin-) and Guillaume J. Charmes (creack) on the initial work of the -TarSum calculation. - diff --git a/pkg/tarsum/tarsum_test.go b/pkg/tarsum/tarsum_test.go deleted file mode 100644 index 54bec53fc9..0000000000 --- a/pkg/tarsum/tarsum_test.go +++ /dev/null @@ -1,656 +0,0 @@ -package tarsum - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "crypto/md5" - "crypto/rand" - "crypto/sha1" - "crypto/sha256" - "crypto/sha512" - "encoding/hex" - "fmt" - "io" - "io/ioutil" - "os" - "strings" - "testing" -) - -type testLayer struct { - filename string - options *sizedOptions - jsonfile string - gzip bool - tarsum string - version Version - hash THash -} - -var testLayers = []testLayer{ - { - filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", - jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", - version: Version0, - tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, - { - filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", - jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", - version: VersionDev, - tarsum: "tarsum.dev+sha256:db56e35eec6ce65ba1588c20ba6b1ea23743b59e81fb6b7f358ccbde5580345c"}, - { - filename: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar", - jsonfile: "testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json", - gzip: true, - tarsum: "tarsum+sha256:4095cc12fa5fdb1ab2760377e1cd0c4ecdd3e61b4f9b82319d96fcea6c9a41c6"}, - { - // Tests existing version of TarSum when xattrs are present - filename: "testdata/xattr/layer.tar", - jsonfile: "testdata/xattr/json", - version: Version0, - tarsum: "tarsum+sha256:07e304a8dbcb215b37649fde1a699f8aeea47e60815707f1cdf4d55d25ff6ab4"}, - { - // Tests next version of TarSum when xattrs are present - filename: "testdata/xattr/layer.tar", - jsonfile: "testdata/xattr/json", - version: VersionDev, - tarsum: "tarsum.dev+sha256:6c58917892d77b3b357b0f9ad1e28e1f4ae4de3a8006bd3beb8beda214d8fd16"}, - { - filename: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar", - jsonfile: "testdata/511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/json", - tarsum: "tarsum+sha256:c66bd5ec9f87b8f4c6135ca37684618f486a3dd1d113b138d0a177bfa39c2571"}, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha256:8bf12d7e67c51ee2e8306cba569398b1b9f419969521a12ffb9d8875e8836738"}, - { - // this tar has two files with the same path - filename: "testdata/collision/collision-0.tar", - tarsum: "tarsum+sha256:08653904a68d3ab5c59e65ef58c49c1581caa3c34744f8d354b3f575ea04424a"}, - { - // this tar has the same two files (with the same path), but reversed order. ensuring is has different hash than above - filename: "testdata/collision/collision-1.tar", - tarsum: "tarsum+sha256:b51c13fbefe158b5ce420d2b930eef54c5cd55c50a2ee4abdddea8fa9f081e0d"}, - { - // this tar has newer of collider-0.tar, ensuring is has different hash - filename: "testdata/collision/collision-2.tar", - tarsum: "tarsum+sha256:381547080919bb82691e995508ae20ed33ce0f6948d41cafbeb70ce20c73ee8e"}, - { - // this tar has newer of collider-1.tar, ensuring is has different hash - filename: "testdata/collision/collision-3.tar", - tarsum: "tarsum+sha256:f886e431c08143164a676805205979cd8fa535dfcef714db5515650eea5a7c0f"}, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+md5:0d7529ec7a8360155b48134b8e599f53", - hash: md5THash, - }, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha1:f1fee39c5925807ff75ef1925e7a23be444ba4df", - hash: sha1Hash, - }, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha224:6319390c0b061d639085d8748b14cd55f697cf9313805218b21cf61c", - hash: sha224Hash, - }, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha384:a578ce3ce29a2ae03b8ed7c26f47d0f75b4fc849557c62454be4b5ffd66ba021e713b48ce71e947b43aab57afd5a7636", - hash: sha384Hash, - }, - { - options: &sizedOptions{1, 1024 * 1024, false, false}, // a 1mb file (in memory) - tarsum: "tarsum+sha512:e9bfb90ca5a4dfc93c46ee061a5cf9837de6d2fdf82544d6460d3147290aecfabf7b5e415b9b6e72db9b8941f149d5d69fb17a394cbfaf2eac523bd9eae21855", - hash: sha512Hash, - }, -} - -type sizedOptions struct { - num int64 - size int64 - isRand bool - realFile bool -} - -// make a tar: -// * num is the number of files the tar should have -// * size is the bytes per file -// * isRand is whether the contents of the files should be a random chunk (otherwise it's all zeros) -// * realFile will write to a TempFile, instead of an in memory buffer -func sizedTar(opts sizedOptions) io.Reader { - var ( - fh io.ReadWriter - err error - ) - if opts.realFile { - fh, err = ioutil.TempFile("", "tarsum") - if err != nil { - return nil - } - } else { - fh = bytes.NewBuffer([]byte{}) - } - tarW := tar.NewWriter(fh) - defer tarW.Close() - for i := int64(0); i < opts.num; i++ { - err := tarW.WriteHeader(&tar.Header{ - Name: fmt.Sprintf("/testdata%d", i), - Mode: 0755, - Uid: 0, - Gid: 0, - Size: opts.size, - }) - if err != nil { - return nil - } - var rBuf []byte - if opts.isRand { - rBuf = make([]byte, 8) - _, err = rand.Read(rBuf) - if err != nil { - return nil - } - } else { - rBuf = []byte{0, 0, 0, 0, 0, 0, 0, 0} - } - - for i := int64(0); i < opts.size/int64(8); i++ { - tarW.Write(rBuf) - } - } - return fh -} - -func emptyTarSum(gzip bool) (TarSum, error) { - reader, writer := io.Pipe() - tarWriter := tar.NewWriter(writer) - - // Immediately close tarWriter and write-end of the - // Pipe in a separate goroutine so we don't block. - go func() { - tarWriter.Close() - writer.Close() - }() - - return NewTarSum(reader, !gzip, Version0) -} - -// Test errors on NewTarsumForLabel -func TestNewTarSumForLabelInvalid(t *testing.T) { - reader := strings.NewReader("") - - if _, err := NewTarSumForLabel(reader, true, "invalidlabel"); err == nil { - t.Fatalf("Expected an error, got nothing.") - } - - if _, err := NewTarSumForLabel(reader, true, "invalid+sha256"); err == nil { - t.Fatalf("Expected an error, got nothing.") - } - if _, err := NewTarSumForLabel(reader, true, "tarsum.v1+invalid"); err == nil { - t.Fatalf("Expected an error, got nothing.") - } -} - -func TestNewTarSumForLabel(t *testing.T) { - - layer := testLayers[0] - - reader, err := os.Open(layer.filename) - if err != nil { - t.Fatal(err) - } - label := strings.Split(layer.tarsum, ":")[0] - ts, err := NewTarSumForLabel(reader, false, label) - if err != nil { - t.Fatal(err) - } - - // Make sure it actually worked by reading a little bit of it - nbByteToRead := 8 * 1024 - dBuf := make([]byte, nbByteToRead) - _, err = ts.Read(dBuf) - if err != nil { - t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) - } -} - -// TestEmptyTar tests that tarsum does not fail to read an empty tar -// and correctly returns the hex digest of an empty hash. -func TestEmptyTar(t *testing.T) { - // Test without gzip. - ts, err := emptyTarSum(false) - if err != nil { - t.Fatal(err) - } - - zeroBlock := make([]byte, 1024) - buf := new(bytes.Buffer) - - n, err := io.Copy(buf, ts) - if err != nil { - t.Fatal(err) - } - - if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), zeroBlock) { - t.Fatalf("tarSum did not write the correct number of zeroed bytes: %d", n) - } - - expectedSum := ts.Version().String() + "+sha256:" + hex.EncodeToString(sha256.New().Sum(nil)) - resultSum := ts.Sum(nil) - - if resultSum != expectedSum { - t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) - } - - // Test with gzip. - ts, err = emptyTarSum(true) - if err != nil { - t.Fatal(err) - } - buf.Reset() - - n, err = io.Copy(buf, ts) - if err != nil { - t.Fatal(err) - } - - bufgz := new(bytes.Buffer) - gz := gzip.NewWriter(bufgz) - n, err = io.Copy(gz, bytes.NewBuffer(zeroBlock)) - gz.Close() - gzBytes := bufgz.Bytes() - - if n != int64(len(zeroBlock)) || !bytes.Equal(buf.Bytes(), gzBytes) { - t.Fatalf("tarSum did not write the correct number of gzipped-zeroed bytes: %d", n) - } - - resultSum = ts.Sum(nil) - - if resultSum != expectedSum { - t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) - } - - // Test without ever actually writing anything. - if ts, err = NewTarSum(bytes.NewReader([]byte{}), true, Version0); err != nil { - t.Fatal(err) - } - - resultSum = ts.Sum(nil) - - if resultSum != expectedSum { - t.Fatalf("expected [%s] but got [%s]", expectedSum, resultSum) - } -} - -var ( - md5THash = NewTHash("md5", md5.New) - sha1Hash = NewTHash("sha1", sha1.New) - sha224Hash = NewTHash("sha224", sha256.New224) - sha384Hash = NewTHash("sha384", sha512.New384) - sha512Hash = NewTHash("sha512", sha512.New) -) - -// Test all the build-in read size : buf8K, buf16K, buf32K and more -func TestTarSumsReadSize(t *testing.T) { - // Test always on the same layer (that is big enough) - layer := testLayers[0] - - for i := 0; i < 5; i++ { - - reader, err := os.Open(layer.filename) - if err != nil { - t.Fatal(err) - } - ts, err := NewTarSum(reader, false, layer.version) - if err != nil { - t.Fatal(err) - } - - // Read and discard bytes so that it populates sums - nbByteToRead := (i + 1) * 8 * 1024 - dBuf := make([]byte, nbByteToRead) - _, err = ts.Read(dBuf) - if err != nil { - t.Errorf("failed to read %vKB from %s: %s", nbByteToRead, layer.filename, err) - continue - } - } -} - -func TestTarSums(t *testing.T) { - for _, layer := range testLayers { - var ( - fh io.Reader - err error - ) - if len(layer.filename) > 0 { - fh, err = os.Open(layer.filename) - if err != nil { - t.Errorf("failed to open %s: %s", layer.filename, err) - continue - } - } else if layer.options != nil { - fh = sizedTar(*layer.options) - } else { - // What else is there to test? - t.Errorf("what to do with %#v", layer) - continue - } - if file, ok := fh.(*os.File); ok { - defer file.Close() - } - - var ts TarSum - if layer.hash == nil { - // double negatives! - ts, err = NewTarSum(fh, !layer.gzip, layer.version) - } else { - ts, err = NewTarSumHash(fh, !layer.gzip, layer.version, layer.hash) - } - if err != nil { - t.Errorf("%q :: %q", err, layer.filename) - continue - } - - // Read variable number of bytes to test dynamic buffer - dBuf := make([]byte, 1) - _, err = ts.Read(dBuf) - if err != nil { - t.Errorf("failed to read 1B from %s: %s", layer.filename, err) - continue - } - dBuf = make([]byte, 16*1024) - _, err = ts.Read(dBuf) - if err != nil { - t.Errorf("failed to read 16KB from %s: %s", layer.filename, err) - continue - } - - // Read and discard remaining bytes - _, err = io.Copy(ioutil.Discard, ts) - if err != nil { - t.Errorf("failed to copy from %s: %s", layer.filename, err) - continue - } - var gotSum string - if len(layer.jsonfile) > 0 { - jfh, err := os.Open(layer.jsonfile) - if err != nil { - t.Errorf("failed to open %s: %s", layer.jsonfile, err) - continue - } - buf, err := ioutil.ReadAll(jfh) - if err != nil { - t.Errorf("failed to readAll %s: %s", layer.jsonfile, err) - continue - } - gotSum = ts.Sum(buf) - } else { - gotSum = ts.Sum(nil) - } - - if layer.tarsum != gotSum { - t.Errorf("expecting [%s], but got [%s]", layer.tarsum, gotSum) - } - var expectedHashName string - if layer.hash != nil { - expectedHashName = layer.hash.Name() - } else { - expectedHashName = DefaultTHash.Name() - } - if expectedHashName != ts.Hash().Name() { - t.Errorf("expecting hash [%v], but got [%s]", expectedHashName, ts.Hash().Name()) - } - } -} - -func TestIteration(t *testing.T) { - headerTests := []struct { - expectedSum string // TODO(vbatts) it would be nice to get individual sums of each - version Version - hdr *tar.Header - data []byte - }{ - { - "tarsum+sha256:626c4a2e9a467d65c33ae81f7f3dedd4de8ccaee72af73223c4bc4718cbc7bbd", - Version0, - &tar.Header{ - Name: "file.txt", - Size: 0, - Typeflag: tar.TypeReg, - Devminor: 0, - Devmajor: 0, - }, - []byte(""), - }, - { - "tarsum.dev+sha256:6ffd43a1573a9913325b4918e124ee982a99c0f3cba90fc032a65f5e20bdd465", - VersionDev, - &tar.Header{ - Name: "file.txt", - Size: 0, - Typeflag: tar.TypeReg, - Devminor: 0, - Devmajor: 0, - }, - []byte(""), - }, - { - "tarsum.dev+sha256:b38166c059e11fb77bef30bf16fba7584446e80fcc156ff46d47e36c5305d8ef", - VersionDev, - &tar.Header{ - Name: "another.txt", - Uid: 1000, - Gid: 1000, - Uname: "slartibartfast", - Gname: "users", - Size: 4, - Typeflag: tar.TypeReg, - Devminor: 0, - Devmajor: 0, - }, - []byte("test"), - }, - { - "tarsum.dev+sha256:4cc2e71ac5d31833ab2be9b4f7842a14ce595ec96a37af4ed08f87bc374228cd", - VersionDev, - &tar.Header{ - Name: "xattrs.txt", - Uid: 1000, - Gid: 1000, - Uname: "slartibartfast", - Gname: "users", - Size: 4, - Typeflag: tar.TypeReg, - Xattrs: map[string]string{ - "user.key1": "value1", - "user.key2": "value2", - }, - }, - []byte("test"), - }, - { - "tarsum.dev+sha256:65f4284fa32c0d4112dd93c3637697805866415b570587e4fd266af241503760", - VersionDev, - &tar.Header{ - Name: "xattrs.txt", - Uid: 1000, - Gid: 1000, - Uname: "slartibartfast", - Gname: "users", - Size: 4, - Typeflag: tar.TypeReg, - Xattrs: map[string]string{ - "user.KEY1": "value1", // adding different case to ensure different sum - "user.key2": "value2", - }, - }, - []byte("test"), - }, - { - "tarsum+sha256:c12bb6f1303a9ddbf4576c52da74973c00d14c109bcfa76b708d5da1154a07fa", - Version0, - &tar.Header{ - Name: "xattrs.txt", - Uid: 1000, - Gid: 1000, - Uname: "slartibartfast", - Gname: "users", - Size: 4, - Typeflag: tar.TypeReg, - Xattrs: map[string]string{ - "user.NOT": "CALCULATED", - }, - }, - []byte("test"), - }, - } - for _, htest := range headerTests { - s, err := renderSumForHeader(htest.version, htest.hdr, htest.data) - if err != nil { - t.Fatal(err) - } - - if s != htest.expectedSum { - t.Errorf("expected sum: %q, got: %q", htest.expectedSum, s) - } - } - -} - -func renderSumForHeader(v Version, h *tar.Header, data []byte) (string, error) { - buf := bytes.NewBuffer(nil) - // first build our test tar - tw := tar.NewWriter(buf) - if err := tw.WriteHeader(h); err != nil { - return "", err - } - if _, err := tw.Write(data); err != nil { - return "", err - } - tw.Close() - - ts, err := NewTarSum(buf, true, v) - if err != nil { - return "", err - } - tr := tar.NewReader(ts) - for { - hdr, err := tr.Next() - if hdr == nil || err == io.EOF { - // Signals the end of the archive. - break - } - if err != nil { - return "", err - } - if _, err = io.Copy(ioutil.Discard, tr); err != nil { - return "", err - } - } - return ts.Sum(nil), nil -} - -func Benchmark9kTar(b *testing.B) { - buf := bytes.NewBuffer([]byte{}) - fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") - if err != nil { - b.Error(err) - return - } - n, err := io.Copy(buf, fh) - if err != nil { - b.Error(err) - return - } - fh.Close() - - reader := bytes.NewReader(buf.Bytes()) - - b.SetBytes(n) - b.ResetTimer() - for i := 0; i < b.N; i++ { - reader.Seek(0, 0) - ts, err := NewTarSum(reader, true, Version0) - if err != nil { - b.Error(err) - return - } - io.Copy(ioutil.Discard, ts) - ts.Sum(nil) - } -} - -func Benchmark9kTarGzip(b *testing.B) { - buf := bytes.NewBuffer([]byte{}) - fh, err := os.Open("testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar") - if err != nil { - b.Error(err) - return - } - n, err := io.Copy(buf, fh) - if err != nil { - b.Error(err) - return - } - fh.Close() - - reader := bytes.NewReader(buf.Bytes()) - - b.SetBytes(n) - b.ResetTimer() - for i := 0; i < b.N; i++ { - reader.Seek(0, 0) - ts, err := NewTarSum(reader, false, Version0) - if err != nil { - b.Error(err) - return - } - io.Copy(ioutil.Discard, ts) - ts.Sum(nil) - } -} - -// this is a single big file in the tar archive -func Benchmark1mbSingleFileTar(b *testing.B) { - benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, false) -} - -// this is a single big file in the tar archive -func Benchmark1mbSingleFileTarGzip(b *testing.B) { - benchmarkTar(b, sizedOptions{1, 1024 * 1024, true, true}, true) -} - -// this is 1024 1k files in the tar archive -func Benchmark1kFilesTar(b *testing.B) { - benchmarkTar(b, sizedOptions{1024, 1024, true, true}, false) -} - -// this is 1024 1k files in the tar archive -func Benchmark1kFilesTarGzip(b *testing.B) { - benchmarkTar(b, sizedOptions{1024, 1024, true, true}, true) -} - -func benchmarkTar(b *testing.B, opts sizedOptions, isGzip bool) { - var fh *os.File - tarReader := sizedTar(opts) - if br, ok := tarReader.(*os.File); ok { - fh = br - } - defer os.Remove(fh.Name()) - defer fh.Close() - - b.SetBytes(opts.size * opts.num) - b.ResetTimer() - for i := 0; i < b.N; i++ { - ts, err := NewTarSum(fh, !isGzip, Version0) - if err != nil { - b.Error(err) - return - } - io.Copy(ioutil.Discard, ts) - ts.Sum(nil) - fh.Seek(0, 0) - } -} diff --git a/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json b/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json deleted file mode 100644 index 48e2af349c..0000000000 --- a/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/json +++ /dev/null @@ -1 +0,0 @@ -{"id":"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457","parent":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","created":"2014-04-07T02:45:52.610504484Z","container":"e0f07f8d72cae171a3dcc35859960e7e956e0628bce6fedc4122bf55b2c287c7","container_config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":["/bin/sh","-c","sed -ri 's/^(%wheel.*)(ALL)$/\\1NOPASSWD: \\2/' /etc/sudoers"],"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"docker_version":"0.9.1-dev","config":{"Hostname":"88807319f25e","Domainname":"","User":"","Memory":0,"MemorySwap":0,"CpuShares":0,"AttachStdin":false,"AttachStdout":false,"AttachStderr":false,"ExposedPorts":null,"Tty":false,"OpenStdin":false,"StdinOnce":false,"Env":["HOME=/","PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"],"Cmd":null,"Image":"def3f9165934325dfd027c86530b2ea49bb57a0963eb1336b3a0415ff6fd56de","Volumes":null,"WorkingDir":"","Entrypoint":null,"NetworkDisabled":false,"OnBuild":[]},"architecture":"amd64","os":"linux","Size":3425} \ No newline at end of file diff --git a/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar b/pkg/tarsum/testdata/46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar deleted file mode 100644 index dfd5c204aea77673f13fdd2f81cb4af1c155c00c..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 9216 zcmeHMYfsx)8s=;H6|bl&iYAZ?p-5<1$(y*vYHk~c?XX{vu}|fzRr1|&zyvK1! zQq)nWWVPA}63Myvy*}^F5Qtg*V8=g=M!Ru&adFTnf40B*^q|=~Z#CM@#>M%EgGRH_ zXtfULV#j(J_Jz`34wZgZ*0ym!%kRHL9{_(p&BZRoHJYu)<>loz?$!PU{9Bjp<^i?p zS)Tg!r=9Az$G@(0Ao6^75%A;qpMSV)ukcqQn%1X5y|oh!_xLmZX`y%GUBmQG;D6af z{a@yPg@1D=8t(B&ZtcXgE2ck=f9pf*x&ANlU$J}L#UB59rsJ=#>(otde**vZ1?PXJ z)y|dMh8z!Kfh=;zN!B|J)*y8)L$Hbq5c2K_rK=l{{8R8czxwV#$Odd zDsuJ8oS)h8`+U3IsNVOszdy8F?XCC!X1jHMK)Xr!XT8koFP{Hz-;!IxPhJ$Ib48h# zYv~t}ms6n-7Nk?ki-cxgF4IDhpT@D51d2R$2x=V)%F|Svhif#KI>gHaB|@O7JU(A% zo>KEP56(cuboN&-&LROexgfmf&txD1^0c9NNVQI5N~dNwm64!nnnQFH317=JF`{vu zi^$WUtCWHQq4Y!Yy@W{oRoV29sUd<=@!~sJ;!ok8>_qYfz|Ch12+9P6$8i`#qvqS zhsLT-8QL!zwhRx(aXaYF&PwD5LLOm%T#Ds>) z{YV0A>qPL*aFLnz9*nfyl@!I3_Ss=Y=MKNEA zG8|$lPj#9`#(W1sgCgK@f)P?2A)0uPB8Gf6TLITOAl@|29e$jAvBox=W-QCrr59N% zKg$7Xy=69F7QR_X7D_-i2hs*J)6%&RIBr9LDPPP_-? z-X`DPuwzY(j+Gk=rWL_Msfvvp-prW$3W(MwPPgEZO^EI!{*XIAuLp zlpj9k85vO{{2kR4hD{4c;~{+QmhNVfq;xeepJc>QQ@QJfEkdQVBbPJuiA~nsv9l~O zrN&UpxC9i`6;rQ>v?7%WUrr@(gXOs4JE=IN=}4(?RS=2GEd9-ogTEiuP>Fqyb6;vM ziV-Q;Z|ZT?Vz^rPk?`^}6a`cC_=9V1=*>jc&y0jq{h|=m&BK+Jpv}ea1?sKVi^Gj` zk<9K*;4?gK^?Jl6-g0L4kQcX>OZUHi{>Odi#u~f!gnqSdCpW{f zGr2q31WO6O$i;nz9#NH-D^8Rv6Xcv%XFkhmyBsZ;8k2ftd;fPtN1v+`G zPRv~5E)wm1y}~(Py9GwK;`;9K2C_2#(Rc=qFBTa z>?ZUNHvSmq9G9)M%0u+CW!J=jv1~Clz-avUIImk%<&=a9uI;2EY~~stiCKTsh|Oow<5; z$eY1%WV!B_?iFikc)C2TV46YQucl=WfmM#jY|_4sK>Njf)j#u#Y{x@V_A!c2o<`D? zX*2YQ4A)U054Qh4y3hVk?0?5^Us~rh*TViU9vl!r009ILKmY**5I_I{1Q0*~0R#|0 Y009ILKmY**5I_I{1Q0*~fqxTt0{2EK)Bpeg diff --git a/pkg/tarsum/testdata/collision/collision-2.tar b/pkg/tarsum/testdata/collision/collision-2.tar deleted file mode 100644 index 7b5c04a9644808851fcccab5c3c240bf342abd93..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 10240 zcmeIuF%E+;425COJw=XS2L~?Dp<74P5hRe1I+e8NZ(w35>V(Abzr};)_<@(2e`|Ha`Z>GG~@_KYd${~ON w0tg_000IagfB*srAbVE5xzPBd+@To)G|2840byWhU|?oqf;;~Mb02E{2kHRk de~R-YhD)#rjPU%AB}7JrMnhmU1V%^*0091(G-Ch& diff --git a/pkg/tarsum/versioning.go b/pkg/tarsum/versioning.go deleted file mode 100644 index 2882286854..0000000000 --- a/pkg/tarsum/versioning.go +++ /dev/null @@ -1,150 +0,0 @@ -package tarsum - -import ( - "archive/tar" - "errors" - "sort" - "strconv" - "strings" -) - -// Version is used for versioning of the TarSum algorithm -// based on the prefix of the hash used -// i.e. "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b" -type Version int - -// Prefix of "tarsum" -const ( - Version0 Version = iota - Version1 - // VersionDev this constant will be either the latest or an unsettled next-version of the TarSum calculation - VersionDev -) - -// VersionLabelForChecksum returns the label for the given tarsum -// checksum, i.e., everything before the first `+` character in -// the string or an empty string if no label separator is found. -func VersionLabelForChecksum(checksum string) string { - // Checksums are in the form: {versionLabel}+{hashID}:{hex} - sepIndex := strings.Index(checksum, "+") - if sepIndex < 0 { - return "" - } - return checksum[:sepIndex] -} - -// GetVersions gets a list of all known tarsum versions. -func GetVersions() []Version { - v := []Version{} - for k := range tarSumVersions { - v = append(v, k) - } - return v -} - -var ( - tarSumVersions = map[Version]string{ - Version0: "tarsum", - Version1: "tarsum.v1", - VersionDev: "tarsum.dev", - } - tarSumVersionsByName = map[string]Version{ - "tarsum": Version0, - "tarsum.v1": Version1, - "tarsum.dev": VersionDev, - } -) - -func (tsv Version) String() string { - return tarSumVersions[tsv] -} - -// GetVersionFromTarsum returns the Version from the provided string. -func GetVersionFromTarsum(tarsum string) (Version, error) { - tsv := tarsum - if strings.Contains(tarsum, "+") { - tsv = strings.SplitN(tarsum, "+", 2)[0] - } - for v, s := range tarSumVersions { - if s == tsv { - return v, nil - } - } - return -1, ErrNotVersion -} - -// Errors that may be returned by functions in this package -var ( - ErrNotVersion = errors.New("string does not include a TarSum Version") - ErrVersionNotImplemented = errors.New("TarSum Version is not yet implemented") -) - -// tarHeaderSelector is the interface which different versions -// of tarsum should use for selecting and ordering tar headers -// for each item in the archive. -type tarHeaderSelector interface { - selectHeaders(h *tar.Header) (orderedHeaders [][2]string) -} - -type tarHeaderSelectFunc func(h *tar.Header) (orderedHeaders [][2]string) - -func (f tarHeaderSelectFunc) selectHeaders(h *tar.Header) (orderedHeaders [][2]string) { - return f(h) -} - -func v0TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { - return [][2]string{ - {"name", h.Name}, - {"mode", strconv.FormatInt(h.Mode, 10)}, - {"uid", strconv.Itoa(h.Uid)}, - {"gid", strconv.Itoa(h.Gid)}, - {"size", strconv.FormatInt(h.Size, 10)}, - {"mtime", strconv.FormatInt(h.ModTime.UTC().Unix(), 10)}, - {"typeflag", string([]byte{h.Typeflag})}, - {"linkname", h.Linkname}, - {"uname", h.Uname}, - {"gname", h.Gname}, - {"devmajor", strconv.FormatInt(h.Devmajor, 10)}, - {"devminor", strconv.FormatInt(h.Devminor, 10)}, - } -} - -func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { - // Get extended attributes. - xAttrKeys := make([]string, len(h.Xattrs)) - for k := range h.Xattrs { - xAttrKeys = append(xAttrKeys, k) - } - sort.Strings(xAttrKeys) - - // Make the slice with enough capacity to hold the 11 basic headers - // we want from the v0 selector plus however many xattrs we have. - orderedHeaders = make([][2]string, 0, 11+len(xAttrKeys)) - - // Copy all headers from v0 excluding the 'mtime' header (the 5th element). - v0headers := v0TarHeaderSelect(h) - orderedHeaders = append(orderedHeaders, v0headers[0:5]...) - orderedHeaders = append(orderedHeaders, v0headers[6:]...) - - // Finally, append the sorted xattrs. - for _, k := range xAttrKeys { - orderedHeaders = append(orderedHeaders, [2]string{k, h.Xattrs[k]}) - } - - return -} - -var registeredHeaderSelectors = map[Version]tarHeaderSelectFunc{ - Version0: v0TarHeaderSelect, - Version1: v1TarHeaderSelect, - VersionDev: v1TarHeaderSelect, -} - -func getTarHeaderSelector(v Version) (tarHeaderSelector, error) { - headerSelector, ok := registeredHeaderSelectors[v] - if !ok { - return nil, ErrVersionNotImplemented - } - - return headerSelector, nil -} diff --git a/pkg/tarsum/versioning_test.go b/pkg/tarsum/versioning_test.go deleted file mode 100644 index 88e0a5783c..0000000000 --- a/pkg/tarsum/versioning_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package tarsum - -import ( - "testing" -) - -func TestVersionLabelForChecksum(t *testing.T) { - version := VersionLabelForChecksum("tarsum+sha256:deadbeef") - if version != "tarsum" { - t.Fatalf("Version should have been 'tarsum', was %v", version) - } - version = VersionLabelForChecksum("tarsum.v1+sha256:deadbeef") - if version != "tarsum.v1" { - t.Fatalf("Version should have been 'tarsum.v1', was %v", version) - } - version = VersionLabelForChecksum("something+somethingelse") - if version != "something" { - t.Fatalf("Version should have been 'something', was %v", version) - } - version = VersionLabelForChecksum("invalidChecksum") - if version != "" { - t.Fatalf("Version should have been empty, was %v", version) - } -} - -func TestVersion(t *testing.T) { - expected := "tarsum" - var v Version - if v.String() != expected { - t.Errorf("expected %q, got %q", expected, v.String()) - } - - expected = "tarsum.v1" - v = 1 - if v.String() != expected { - t.Errorf("expected %q, got %q", expected, v.String()) - } - - expected = "tarsum.dev" - v = 2 - if v.String() != expected { - t.Errorf("expected %q, got %q", expected, v.String()) - } -} - -func TestGetVersion(t *testing.T) { - testSet := []struct { - Str string - Expected Version - }{ - {"tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", Version0}, - {"tarsum+sha256", Version0}, - {"tarsum", Version0}, - {"tarsum.dev", VersionDev}, - {"tarsum.dev+sha256:deadbeef", VersionDev}, - } - - for _, ts := range testSet { - v, err := GetVersionFromTarsum(ts.Str) - if err != nil { - t.Fatalf("%q : %s", err, ts.Str) - } - if v != ts.Expected { - t.Errorf("expected %d (%q), got %d (%q)", ts.Expected, ts.Expected, v, v) - } - } - - // test one that does not exist, to ensure it errors - str := "weak+md5:abcdeabcde" - _, err := GetVersionFromTarsum(str) - if err != ErrNotVersion { - t.Fatalf("%q : %s", err, str) - } -} - -func TestGetVersions(t *testing.T) { - expected := []Version{ - Version0, - Version1, - VersionDev, - } - versions := GetVersions() - if len(versions) != len(expected) { - t.Fatalf("Expected %v versions, got %v", len(expected), len(versions)) - } - if !containsVersion(versions, expected[0]) || !containsVersion(versions, expected[1]) || !containsVersion(versions, expected[2]) { - t.Fatalf("Expected [%v], got [%v]", expected, versions) - } -} - -func containsVersion(versions []Version, version Version) bool { - for _, v := range versions { - if v == version { - return true - } - } - return false -} diff --git a/pkg/tarsum/writercloser.go b/pkg/tarsum/writercloser.go deleted file mode 100644 index 9727ecde3e..0000000000 --- a/pkg/tarsum/writercloser.go +++ /dev/null @@ -1,22 +0,0 @@ -package tarsum - -import ( - "io" -) - -type writeCloseFlusher interface { - io.WriteCloser - Flush() error -} - -type nopCloseFlusher struct { - io.Writer -} - -func (n *nopCloseFlusher) Close() error { - return nil -} - -func (n *nopCloseFlusher) Flush() error { - return nil -} diff --git a/pkg/term/ascii.go b/pkg/term/ascii.go deleted file mode 100644 index f5262bccf5..0000000000 --- a/pkg/term/ascii.go +++ /dev/null @@ -1,66 +0,0 @@ -package term - -import ( - "fmt" - "strings" -) - -// ASCII list the possible supported ASCII key sequence -var ASCII = []string{ - "ctrl-@", - "ctrl-a", - "ctrl-b", - "ctrl-c", - "ctrl-d", - "ctrl-e", - "ctrl-f", - "ctrl-g", - "ctrl-h", - "ctrl-i", - "ctrl-j", - "ctrl-k", - "ctrl-l", - "ctrl-m", - "ctrl-n", - "ctrl-o", - "ctrl-p", - "ctrl-q", - "ctrl-r", - "ctrl-s", - "ctrl-t", - "ctrl-u", - "ctrl-v", - "ctrl-w", - "ctrl-x", - "ctrl-y", - "ctrl-z", - "ctrl-[", - "ctrl-\\", - "ctrl-]", - "ctrl-^", - "ctrl-_", -} - -// ToBytes converts a string representing a suite of key-sequence to the corresponding ASCII code. -func ToBytes(keys string) ([]byte, error) { - codes := []byte{} -next: - for _, key := range strings.Split(keys, ",") { - if len(key) != 1 { - for code, ctrl := range ASCII { - if ctrl == key { - codes = append(codes, byte(code)) - continue next - } - } - if key == "DEL" { - codes = append(codes, 127) - } else { - return nil, fmt.Errorf("Unknown character: '%s'", key) - } - } else { - codes = append(codes, byte(key[0])) - } - } - return codes, nil -} diff --git a/pkg/term/ascii_test.go b/pkg/term/ascii_test.go deleted file mode 100644 index 4a1e7f302c..0000000000 --- a/pkg/term/ascii_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package term - -import "testing" - -func TestToBytes(t *testing.T) { - codes, err := ToBytes("ctrl-a,a") - if err != nil { - t.Fatal(err) - } - if len(codes) != 2 { - t.Fatalf("Expected 2 codes, got %d", len(codes)) - } - if codes[0] != 1 || codes[1] != 97 { - t.Fatalf("Expected '1' '97', got '%d' '%d'", codes[0], codes[1]) - } - - codes, err = ToBytes("shift-z") - if err == nil { - t.Fatalf("Expected error, got none") - } - - codes, err = ToBytes("ctrl-@,ctrl-[,~,ctrl-o") - if err != nil { - t.Fatal(err) - } - if len(codes) != 4 { - t.Fatalf("Expected 4 codes, got %d", len(codes)) - } - if codes[0] != 0 || codes[1] != 27 || codes[2] != 126 || codes[3] != 15 { - t.Fatalf("Expected '0' '27' '126', '15', got '%d' '%d' '%d' '%d'", codes[0], codes[1], codes[2], codes[3]) - } - - codes, err = ToBytes("DEL,+") - if err != nil { - t.Fatal(err) - } - if len(codes) != 2 { - t.Fatalf("Expected 2 codes, got %d", len(codes)) - } - if codes[0] != 127 || codes[1] != 43 { - t.Fatalf("Expected '127 '43'', got '%d' '%d'", codes[0], codes[1]) - } -} diff --git a/pkg/term/tc_linux_cgo.go b/pkg/term/tc_linux_cgo.go deleted file mode 100644 index 59dac5ba8e..0000000000 --- a/pkg/term/tc_linux_cgo.go +++ /dev/null @@ -1,50 +0,0 @@ -// +build linux,cgo - -package term - -import ( - "syscall" - "unsafe" -) - -// #include -import "C" - -// Termios is the Unix API for terminal I/O. -// It is passthrough for syscall.Termios in order to make it portable with -// other platforms where it is not available or handled differently. -type Termios syscall.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - C.cfmakeraw((*C.struct_termios)(unsafe.Pointer(&newState))) - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/pkg/term/tc_other.go b/pkg/term/tc_other.go deleted file mode 100644 index 750d7c3f60..0000000000 --- a/pkg/term/tc_other.go +++ /dev/null @@ -1,20 +0,0 @@ -// +build !windows -// +build !linux !cgo -// +build !solaris !cgo - -package term - -import ( - "syscall" - "unsafe" -) - -func tcget(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(p))) - return err -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(p))) - return err -} diff --git a/pkg/term/tc_solaris_cgo.go b/pkg/term/tc_solaris_cgo.go deleted file mode 100644 index c9139d0ca8..0000000000 --- a/pkg/term/tc_solaris_cgo.go +++ /dev/null @@ -1,63 +0,0 @@ -// +build solaris,cgo - -package term - -import ( - "syscall" - "unsafe" -) - -// #include -import "C" - -// Termios is the Unix API for terminal I/O. -// It is passthrough for syscall.Termios in order to make it portable with -// other platforms where it is not available or handled differently. -type Termios syscall.Termios - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - newState := oldState.termios - - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON | syscall.IXANY) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 - - /* - VMIN is the minimum number of characters that needs to be read in non-canonical mode for it to be returned - Since VMIN is overloaded with another element in canonical mode when we switch modes it defaults to 4. It - needs to be explicitly set to 1. - */ - newState.Cc[C.VMIN] = 1 - newState.Cc[C.VTIME] = 0 - - if err := tcset(fd, &newState); err != 0 { - return nil, err - } - return &oldState, nil -} - -func tcget(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcgetattr(C.int(fd), (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} - -func tcset(fd uintptr, p *Termios) syscall.Errno { - ret, err := C.tcsetattr(C.int(fd), C.TCSANOW, (*C.struct_termios)(unsafe.Pointer(p))) - if ret != 0 { - return err.(syscall.Errno) - } - return 0 -} diff --git a/pkg/term/term.go b/pkg/term/term.go deleted file mode 100644 index 1609a900a9..0000000000 --- a/pkg/term/term.go +++ /dev/null @@ -1,117 +0,0 @@ -// +build !windows - -// Package term provides provides structures and helper functions to work with -// terminal (state, sizes). -package term - -import ( - "errors" - "io" - "os" - "os/signal" - "syscall" -) - -var ( - // ErrInvalidState is returned if the state of the terminal is invalid. - ErrInvalidState = errors.New("Invalid terminal state") -) - -// State represents the state of the terminal. -type State struct { - termios Termios -} - -// Winsize represents the size of the terminal window. -type Winsize struct { - Height uint16 - Width uint16 - x uint16 - y uint16 -} - -// StdStreams returns the standard streams (stdin, stdout, stedrr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - return os.Stdin, os.Stdout, os.Stderr -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - var inFd uintptr - var isTerminalIn bool - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminalIn = IsTerminal(inFd) - } - return inFd, isTerminalIn -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - var termios Termios - return tcget(fd, &termios) == 0 -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - if state == nil { - return ErrInvalidState - } - if err := tcset(fd, &state.termios); err != 0 { - return err - } - return nil -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - var oldState State - if err := tcget(fd, &oldState.termios); err != 0 { - return nil, err - } - - return &oldState, nil -} - -// DisableEcho applies the specified state to the terminal connected to the file -// descriptor, with echo disabled. -func DisableEcho(fd uintptr, state *State) error { - newState := state.termios - newState.Lflag &^= syscall.ECHO - - if err := tcset(fd, &newState); err != 0 { - return err - } - handleInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - oldState, err := MakeRaw(fd) - if err != nil { - return nil, err - } - handleInterrupt(fd, oldState) - return oldState, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - return nil, nil -} - -func handleInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - RestoreTerminal(fd, state) - }() -} diff --git a/pkg/term/term_solaris.go b/pkg/term/term_solaris.go deleted file mode 100644 index 112debbec5..0000000000 --- a/pkg/term/term_solaris.go +++ /dev/null @@ -1,41 +0,0 @@ -// +build solaris - -package term - -import ( - "syscall" - "unsafe" -) - -/* -#include -#include -#include - -// Small wrapper to get rid of variadic args of ioctl() -int my_ioctl(int fd, int cmd, struct winsize *ws) { - return ioctl(fd, cmd, ws); -} -*/ -import "C" - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCGWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) - // Skip retval = 0 - if ret == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - ret, err := C.my_ioctl(C.int(fd), C.int(syscall.TIOCSWINSZ), (*C.struct_winsize)(unsafe.Pointer(ws))) - // Skip retval = 0 - if ret == 0 { - return nil - } - return err -} diff --git a/pkg/term/term_unix.go b/pkg/term/term_unix.go deleted file mode 100644 index ddf87a0e58..0000000000 --- a/pkg/term/term_unix.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build !solaris,!windows - -package term - -import ( - "syscall" - "unsafe" -) - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - ws := &Winsize{} - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCGWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 - if err == 0 { - return ws, nil - } - return ws, err -} - -// SetWinsize tries to set the specified window size for the specified file descriptor. -func SetWinsize(fd uintptr, ws *Winsize) error { - _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(syscall.TIOCSWINSZ), uintptr(unsafe.Pointer(ws))) - // Skipp errno = 0 - if err == 0 { - return nil - } - return err -} diff --git a/pkg/term/term_windows.go b/pkg/term/term_windows.go deleted file mode 100644 index 11a16fdea8..0000000000 --- a/pkg/term/term_windows.go +++ /dev/null @@ -1,233 +0,0 @@ -// +build windows - -package term - -import ( - "io" - "os" - "os/signal" - "syscall" - - "github.com/Azure/go-ansiterm/winterm" - "github.com/docker/docker/pkg/term/windows" -) - -// State holds the console mode for the terminal. -type State struct { - mode uint32 -} - -// Winsize is used for window size. -type Winsize struct { - Height uint16 - Width uint16 -} - -const ( - // https://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx - enableVirtualTerminalInput = 0x0200 - enableVirtualTerminalProcessing = 0x0004 - disableNewlineAutoReturn = 0x0008 -) - -// vtInputSupported is true if enableVirtualTerminalInput is supported by the console -var vtInputSupported bool - -// StdStreams returns the standard streams (stdin, stdout, stedrr). -func StdStreams() (stdIn io.ReadCloser, stdOut, stdErr io.Writer) { - // Turn on VT handling on all std handles, if possible. This might - // fail, in which case we will fall back to terminal emulation. - var emulateStdin, emulateStdout, emulateStderr bool - fd := os.Stdin.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate that enableVirtualTerminalInput is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalInput); err != nil { - emulateStdin = true - } else { - vtInputSupported = true - } - // Unconditionally set the console mode back even on failure because SetConsoleMode - // remembers invalid bits on input handles. - winterm.SetConsoleMode(fd, mode) - } - - fd = os.Stdout.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate disableNewlineAutoReturn is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { - emulateStdout = true - } else { - winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) - } - } - - fd = os.Stderr.Fd() - if mode, err := winterm.GetConsoleMode(fd); err == nil { - // Validate disableNewlineAutoReturn is supported, but do not set it. - if err = winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing|disableNewlineAutoReturn); err != nil { - emulateStderr = true - } else { - winterm.SetConsoleMode(fd, mode|enableVirtualTerminalProcessing) - } - } - - if os.Getenv("ConEmuANSI") == "ON" { - // The ConEmu terminal emulates ANSI on output streams well. - emulateStdin = true - emulateStdout = false - emulateStderr = false - } - - if emulateStdin { - stdIn = windows.NewAnsiReader(syscall.STD_INPUT_HANDLE) - } else { - stdIn = os.Stdin - } - - if emulateStdout { - stdOut = windows.NewAnsiWriter(syscall.STD_OUTPUT_HANDLE) - } else { - stdOut = os.Stdout - } - - if emulateStderr { - stdErr = windows.NewAnsiWriter(syscall.STD_ERROR_HANDLE) - } else { - stdErr = os.Stderr - } - - return -} - -// GetFdInfo returns the file descriptor for an os.File and indicates whether the file represents a terminal. -func GetFdInfo(in interface{}) (uintptr, bool) { - return windows.GetHandleInfo(in) -} - -// GetWinsize returns the window size based on the specified file descriptor. -func GetWinsize(fd uintptr) (*Winsize, error) { - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil, err - } - - winsize := &Winsize{ - Width: uint16(info.Window.Right - info.Window.Left + 1), - Height: uint16(info.Window.Bottom - info.Window.Top + 1), - } - - return winsize, nil -} - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal(fd uintptr) bool { - return windows.IsConsole(fd) -} - -// RestoreTerminal restores the terminal connected to the given file descriptor -// to a previous state. -func RestoreTerminal(fd uintptr, state *State) error { - return winterm.SetConsoleMode(fd, state.mode) -} - -// SaveState saves the state of the terminal connected to the given file descriptor. -func SaveState(fd uintptr) (*State, error) { - mode, e := winterm.GetConsoleMode(fd) - if e != nil { - return nil, e - } - - return &State{mode: mode}, nil -} - -// DisableEcho disables echo for the terminal connected to the given file descriptor. -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx -func DisableEcho(fd uintptr, state *State) error { - mode := state.mode - mode &^= winterm.ENABLE_ECHO_INPUT - mode |= winterm.ENABLE_PROCESSED_INPUT | winterm.ENABLE_LINE_INPUT - err := winterm.SetConsoleMode(fd, mode) - if err != nil { - return err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return nil -} - -// SetRawTerminal puts the terminal connected to the given file descriptor into -// raw mode and returns the previous state. On UNIX, this puts both the input -// and output into raw mode. On Windows, it only puts the input into raw mode. -func SetRawTerminal(fd uintptr) (*State, error) { - state, err := MakeRaw(fd) - if err != nil { - return nil, err - } - - // Register an interrupt handler to catch and restore prior state - restoreAtInterrupt(fd, state) - return state, err -} - -// SetRawTerminalOutput puts the output of terminal connected to the given file -// descriptor into raw mode. On UNIX, this does nothing and returns nil for the -// state. On Windows, it disables LF -> CRLF translation. -func SetRawTerminalOutput(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - // Ignore failures, since disableNewlineAutoReturn might not be supported on this - // version of Windows. - winterm.SetConsoleMode(fd, state.mode|disableNewlineAutoReturn) - return state, err -} - -// MakeRaw puts the terminal (Windows Console) connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be restored. -func MakeRaw(fd uintptr) (*State, error) { - state, err := SaveState(fd) - if err != nil { - return nil, err - } - - mode := state.mode - - // See - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx - // -- https://msdn.microsoft.com/en-us/library/windows/desktop/ms683462(v=vs.85).aspx - - // Disable these modes - mode &^= winterm.ENABLE_ECHO_INPUT - mode &^= winterm.ENABLE_LINE_INPUT - mode &^= winterm.ENABLE_MOUSE_INPUT - mode &^= winterm.ENABLE_WINDOW_INPUT - mode &^= winterm.ENABLE_PROCESSED_INPUT - - // Enable these modes - mode |= winterm.ENABLE_EXTENDED_FLAGS - mode |= winterm.ENABLE_INSERT_MODE - mode |= winterm.ENABLE_QUICK_EDIT_MODE - if vtInputSupported { - mode |= enableVirtualTerminalInput - } - - err = winterm.SetConsoleMode(fd, mode) - if err != nil { - return nil, err - } - return state, nil -} - -func restoreAtInterrupt(fd uintptr, state *State) { - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, os.Interrupt) - - go func() { - _ = <-sigchan - RestoreTerminal(fd, state) - os.Exit(0) - }() -} diff --git a/pkg/term/termios_darwin.go b/pkg/term/termios_darwin.go deleted file mode 100644 index 480db900ac..0000000000 --- a/pkg/term/termios_darwin.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint64 - Oflag uint64 - Cflag uint64 - Lflag uint64 - Cc [20]byte - Ispeed uint64 - Ospeed uint64 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/pkg/term/termios_freebsd.go b/pkg/term/termios_freebsd.go deleted file mode 100644 index ed843ad69c..0000000000 --- a/pkg/term/termios_freebsd.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/pkg/term/termios_linux.go b/pkg/term/termios_linux.go deleted file mode 100644 index 22921b6aef..0000000000 --- a/pkg/term/termios_linux.go +++ /dev/null @@ -1,47 +0,0 @@ -// +build !cgo - -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TCGETS - setTermios = syscall.TCSETS -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, getTermios, uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - - newState.Iflag &^= (syscall.IGNBRK | syscall.BRKINT | syscall.PARMRK | syscall.ISTRIP | syscall.INLCR | syscall.IGNCR | syscall.ICRNL | syscall.IXON) - newState.Oflag &^= syscall.OPOST - newState.Lflag &^= (syscall.ECHO | syscall.ECHONL | syscall.ICANON | syscall.ISIG | syscall.IEXTEN) - newState.Cflag &^= (syscall.CSIZE | syscall.PARENB) - newState.Cflag |= syscall.CS8 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, setTermios, uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - return &oldState, nil -} diff --git a/pkg/term/termios_openbsd.go b/pkg/term/termios_openbsd.go deleted file mode 100644 index ed843ad69c..0000000000 --- a/pkg/term/termios_openbsd.go +++ /dev/null @@ -1,69 +0,0 @@ -package term - -import ( - "syscall" - "unsafe" -) - -const ( - getTermios = syscall.TIOCGETA - setTermios = syscall.TIOCSETA -) - -// Termios magic numbers, passthrough to the ones defined in syscall. -const ( - IGNBRK = syscall.IGNBRK - PARMRK = syscall.PARMRK - INLCR = syscall.INLCR - IGNCR = syscall.IGNCR - ECHONL = syscall.ECHONL - CSIZE = syscall.CSIZE - ICRNL = syscall.ICRNL - ISTRIP = syscall.ISTRIP - PARENB = syscall.PARENB - ECHO = syscall.ECHO - ICANON = syscall.ICANON - ISIG = syscall.ISIG - IXON = syscall.IXON - BRKINT = syscall.BRKINT - INPCK = syscall.INPCK - OPOST = syscall.OPOST - CS8 = syscall.CS8 - IEXTEN = syscall.IEXTEN -) - -// Termios is the Unix API for terminal I/O. -type Termios struct { - Iflag uint32 - Oflag uint32 - Cflag uint32 - Lflag uint32 - Cc [20]byte - Ispeed uint32 - Ospeed uint32 -} - -// MakeRaw put the terminal connected to the given file descriptor into raw -// mode and returns the previous state of the terminal so that it can be -// restored. -func MakeRaw(fd uintptr) (*State, error) { - var oldState State - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(getTermios), uintptr(unsafe.Pointer(&oldState.termios))); err != 0 { - return nil, err - } - - newState := oldState.termios - newState.Iflag &^= (IGNBRK | BRKINT | PARMRK | ISTRIP | INLCR | IGNCR | ICRNL | IXON) - newState.Oflag &^= OPOST - newState.Lflag &^= (ECHO | ECHONL | ICANON | ISIG | IEXTEN) - newState.Cflag &^= (CSIZE | PARENB) - newState.Cflag |= CS8 - newState.Cc[syscall.VMIN] = 1 - newState.Cc[syscall.VTIME] = 0 - - if _, _, err := syscall.Syscall(syscall.SYS_IOCTL, fd, uintptr(setTermios), uintptr(unsafe.Pointer(&newState))); err != 0 { - return nil, err - } - - return &oldState, nil -} diff --git a/pkg/term/windows/ansi_reader.go b/pkg/term/windows/ansi_reader.go deleted file mode 100644 index 58452ad786..0000000000 --- a/pkg/term/windows/ansi_reader.go +++ /dev/null @@ -1,261 +0,0 @@ -// +build windows - -package windows - -import ( - "bytes" - "errors" - "fmt" - "io" - "os" - "strings" - "unsafe" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -const ( - escapeSequence = ansiterm.KEY_ESC_CSI -) - -// ansiReader wraps a standard input file (e.g., os.Stdin) providing ANSI sequence translation. -type ansiReader struct { - file *os.File - fd uintptr - buffer []byte - cbBuffer int - command []byte -} - -// NewAnsiReader returns an io.ReadCloser that provides VT100 terminal emulation on top of a -// Windows console input handle. -func NewAnsiReader(nFile int) io.ReadCloser { - initLogger() - file, fd := winterm.GetStdFile(nFile) - return &ansiReader{ - file: file, - fd: fd, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - buffer: make([]byte, 0), - } -} - -// Close closes the wrapped file. -func (ar *ansiReader) Close() (err error) { - return ar.file.Close() -} - -// Fd returns the file descriptor of the wrapped file. -func (ar *ansiReader) Fd() uintptr { - return ar.fd -} - -// Read reads up to len(p) bytes of translated input events into p. -func (ar *ansiReader) Read(p []byte) (int, error) { - if len(p) == 0 { - return 0, nil - } - - // Previously read bytes exist, read as much as we can and return - if len(ar.buffer) > 0 { - logger.Debugf("Reading previously cached bytes") - - originalLength := len(ar.buffer) - copiedLength := copy(p, ar.buffer) - - if copiedLength == originalLength { - ar.buffer = make([]byte, 0, len(p)) - } else { - ar.buffer = ar.buffer[copiedLength:] - } - - logger.Debugf("Read from cache p[%d]: % x", copiedLength, p) - return copiedLength, nil - } - - // Read and translate key events - events, err := readInputEvents(ar.fd, len(p)) - if err != nil { - return 0, err - } else if len(events) == 0 { - logger.Debug("No input events detected") - return 0, nil - } - - keyBytes := translateKeyEvents(events, []byte(escapeSequence)) - - // Save excess bytes and right-size keyBytes - if len(keyBytes) > len(p) { - logger.Debugf("Received %d keyBytes, only room for %d bytes", len(keyBytes), len(p)) - ar.buffer = keyBytes[len(p):] - keyBytes = keyBytes[:len(p)] - } else if len(keyBytes) == 0 { - logger.Debug("No key bytes returned from the translator") - return 0, nil - } - - copiedLength := copy(p, keyBytes) - if copiedLength != len(keyBytes) { - return 0, errors.New("Unexpected copy length encountered.") - } - - logger.Debugf("Read p[%d]: % x", copiedLength, p) - logger.Debugf("Read keyBytes[%d]: % x", copiedLength, keyBytes) - return copiedLength, nil -} - -// readInputEvents polls until at least one event is available. -func readInputEvents(fd uintptr, maxBytes int) ([]winterm.INPUT_RECORD, error) { - // Determine the maximum number of records to retrieve - // -- Cast around the type system to obtain the size of a single INPUT_RECORD. - // unsafe.Sizeof requires an expression vs. a type-reference; the casting - // tricks the type system into believing it has such an expression. - recordSize := int(unsafe.Sizeof(*((*winterm.INPUT_RECORD)(unsafe.Pointer(&maxBytes))))) - countRecords := maxBytes / recordSize - if countRecords > ansiterm.MAX_INPUT_EVENTS { - countRecords = ansiterm.MAX_INPUT_EVENTS - } - logger.Debugf("[windows] readInputEvents: Reading %v records (buffer size %v, record size %v)", countRecords, maxBytes, recordSize) - - // Wait for and read input events - events := make([]winterm.INPUT_RECORD, countRecords) - nEvents := uint32(0) - eventsExist, err := winterm.WaitForSingleObject(fd, winterm.WAIT_INFINITE) - if err != nil { - return nil, err - } - - if eventsExist { - err = winterm.ReadConsoleInput(fd, events, &nEvents) - if err != nil { - return nil, err - } - } - - // Return a slice restricted to the number of returned records - logger.Debugf("[windows] readInputEvents: Read %v events", nEvents) - return events[:nEvents], nil -} - -// KeyEvent Translation Helpers - -var arrowKeyMapPrefix = map[uint16]string{ - winterm.VK_UP: "%s%sA", - winterm.VK_DOWN: "%s%sB", - winterm.VK_RIGHT: "%s%sC", - winterm.VK_LEFT: "%s%sD", -} - -var keyMapPrefix = map[uint16]string{ - winterm.VK_UP: "\x1B[%sA", - winterm.VK_DOWN: "\x1B[%sB", - winterm.VK_RIGHT: "\x1B[%sC", - winterm.VK_LEFT: "\x1B[%sD", - winterm.VK_HOME: "\x1B[1%s~", // showkey shows ^[[1 - winterm.VK_END: "\x1B[4%s~", // showkey shows ^[[4 - winterm.VK_INSERT: "\x1B[2%s~", - winterm.VK_DELETE: "\x1B[3%s~", - winterm.VK_PRIOR: "\x1B[5%s~", - winterm.VK_NEXT: "\x1B[6%s~", - winterm.VK_F1: "", - winterm.VK_F2: "", - winterm.VK_F3: "\x1B[13%s~", - winterm.VK_F4: "\x1B[14%s~", - winterm.VK_F5: "\x1B[15%s~", - winterm.VK_F6: "\x1B[17%s~", - winterm.VK_F7: "\x1B[18%s~", - winterm.VK_F8: "\x1B[19%s~", - winterm.VK_F9: "\x1B[20%s~", - winterm.VK_F10: "\x1B[21%s~", - winterm.VK_F11: "\x1B[23%s~", - winterm.VK_F12: "\x1B[24%s~", -} - -// translateKeyEvents converts the input events into the appropriate ANSI string. -func translateKeyEvents(events []winterm.INPUT_RECORD, escapeSequence []byte) []byte { - var buffer bytes.Buffer - for _, event := range events { - if event.EventType == winterm.KEY_EVENT && event.KeyEvent.KeyDown != 0 { - buffer.WriteString(keyToString(&event.KeyEvent, escapeSequence)) - } - } - - return buffer.Bytes() -} - -// keyToString maps the given input event record to the corresponding string. -func keyToString(keyEvent *winterm.KEY_EVENT_RECORD, escapeSequence []byte) string { - if keyEvent.UnicodeChar == 0 { - return formatVirtualKey(keyEvent.VirtualKeyCode, keyEvent.ControlKeyState, escapeSequence) - } - - _, alt, control := getControlKeys(keyEvent.ControlKeyState) - if control { - // TODO(azlinux): Implement following control sequences - // -D Signals the end of input from the keyboard; also exits current shell. - // -H Deletes the first character to the left of the cursor. Also called the ERASE key. - // -Q Restarts printing after it has been stopped with -s. - // -S Suspends printing on the screen (does not stop the program). - // -U Deletes all characters on the current line. Also called the KILL key. - // -E Quits current command and creates a core - - } - - // +Key generates ESC N Key - if !control && alt { - return ansiterm.KEY_ESC_N + strings.ToLower(string(keyEvent.UnicodeChar)) - } - - return string(keyEvent.UnicodeChar) -} - -// formatVirtualKey converts a virtual key (e.g., up arrow) into the appropriate ANSI string. -func formatVirtualKey(key uint16, controlState uint32, escapeSequence []byte) string { - shift, alt, control := getControlKeys(controlState) - modifier := getControlKeysModifier(shift, alt, control) - - if format, ok := arrowKeyMapPrefix[key]; ok { - return fmt.Sprintf(format, escapeSequence, modifier) - } - - if format, ok := keyMapPrefix[key]; ok { - return fmt.Sprintf(format, modifier) - } - - return "" -} - -// getControlKeys extracts the shift, alt, and ctrl key states. -func getControlKeys(controlState uint32) (shift, alt, control bool) { - shift = 0 != (controlState & winterm.SHIFT_PRESSED) - alt = 0 != (controlState & (winterm.LEFT_ALT_PRESSED | winterm.RIGHT_ALT_PRESSED)) - control = 0 != (controlState & (winterm.LEFT_CTRL_PRESSED | winterm.RIGHT_CTRL_PRESSED)) - return shift, alt, control -} - -// getControlKeysModifier returns the ANSI modifier for the given combination of control keys. -func getControlKeysModifier(shift, alt, control bool) string { - if shift && alt && control { - return ansiterm.KEY_CONTROL_PARAM_8 - } - if alt && control { - return ansiterm.KEY_CONTROL_PARAM_7 - } - if shift && control { - return ansiterm.KEY_CONTROL_PARAM_6 - } - if control { - return ansiterm.KEY_CONTROL_PARAM_5 - } - if shift && alt { - return ansiterm.KEY_CONTROL_PARAM_4 - } - if alt { - return ansiterm.KEY_CONTROL_PARAM_3 - } - if shift { - return ansiterm.KEY_CONTROL_PARAM_2 - } - return "" -} diff --git a/pkg/term/windows/ansi_writer.go b/pkg/term/windows/ansi_writer.go deleted file mode 100644 index a3ce5697d9..0000000000 --- a/pkg/term/windows/ansi_writer.go +++ /dev/null @@ -1,64 +0,0 @@ -// +build windows - -package windows - -import ( - "io" - "os" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Azure/go-ansiterm/winterm" -) - -// ansiWriter wraps a standard output file (e.g., os.Stdout) providing ANSI sequence translation. -type ansiWriter struct { - file *os.File - fd uintptr - infoReset *winterm.CONSOLE_SCREEN_BUFFER_INFO - command []byte - escapeSequence []byte - inAnsiSequence bool - parser *ansiterm.AnsiParser -} - -// NewAnsiWriter returns an io.Writer that provides VT100 terminal emulation on top of a -// Windows console output handle. -func NewAnsiWriter(nFile int) io.Writer { - initLogger() - file, fd := winterm.GetStdFile(nFile) - info, err := winterm.GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - parser := ansiterm.CreateParser("Ground", winterm.CreateWinEventHandler(fd, file)) - logger.Infof("newAnsiWriter: parser %p", parser) - - aw := &ansiWriter{ - file: file, - fd: fd, - infoReset: info, - command: make([]byte, 0, ansiterm.ANSI_MAX_CMD_LENGTH), - escapeSequence: []byte(ansiterm.KEY_ESC_CSI), - parser: parser, - } - - logger.Infof("newAnsiWriter: aw.parser %p", aw.parser) - logger.Infof("newAnsiWriter: %v", aw) - return aw -} - -func (aw *ansiWriter) Fd() uintptr { - return aw.fd -} - -// Write writes len(p) bytes from p to the underlying data stream. -func (aw *ansiWriter) Write(p []byte) (total int, err error) { - if len(p) == 0 { - return 0, nil - } - - logger.Infof("Write: % x", p) - logger.Infof("Write: %s", string(p)) - return aw.parser.Parse(p) -} diff --git a/pkg/term/windows/console.go b/pkg/term/windows/console.go deleted file mode 100644 index ca5c3b2e53..0000000000 --- a/pkg/term/windows/console.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build windows - -package windows - -import ( - "os" - - "github.com/Azure/go-ansiterm/winterm" -) - -// GetHandleInfo returns file descriptor and bool indicating whether the file is a console. -func GetHandleInfo(in interface{}) (uintptr, bool) { - switch t := in.(type) { - case *ansiReader: - return t.Fd(), true - case *ansiWriter: - return t.Fd(), true - } - - var inFd uintptr - var isTerminal bool - - if file, ok := in.(*os.File); ok { - inFd = file.Fd() - isTerminal = IsConsole(inFd) - } - return inFd, isTerminal -} - -// IsConsole returns true if the given file descriptor is a Windows Console. -// The code assumes that GetConsoleMode will return an error for file descriptors that are not a console. -func IsConsole(fd uintptr) bool { - _, e := winterm.GetConsoleMode(fd) - return e == nil -} diff --git a/pkg/term/windows/windows.go b/pkg/term/windows/windows.go deleted file mode 100644 index ce4cb5990e..0000000000 --- a/pkg/term/windows/windows.go +++ /dev/null @@ -1,33 +0,0 @@ -// These files implement ANSI-aware input and output streams for use by the Docker Windows client. -// When asked for the set of standard streams (e.g., stdin, stdout, stderr), the code will create -// and return pseudo-streams that convert ANSI sequences to / from Windows Console API calls. - -package windows - -import ( - "io/ioutil" - "os" - "sync" - - ansiterm "github.com/Azure/go-ansiterm" - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger -var initOnce sync.Once - -func initLogger() { - initOnce.Do(func() { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiReaderWriter.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - }) -} diff --git a/pkg/term/windows/windows_test.go b/pkg/term/windows/windows_test.go deleted file mode 100644 index 52aeab54ec..0000000000 --- a/pkg/term/windows/windows_test.go +++ /dev/null @@ -1,3 +0,0 @@ -// This file is necessary to pass the Docker tests. - -package windows diff --git a/pkg/testutil/assert/assert.go b/pkg/testutil/assert/assert.go deleted file mode 100644 index 5b0dcce67a..0000000000 --- a/pkg/testutil/assert/assert.go +++ /dev/null @@ -1,70 +0,0 @@ -// Package assert contains functions for making assertions in unit tests -package assert - -import ( - "fmt" - "path/filepath" - "runtime" - "strings" -) - -// TestingT is an interface which defines the methods of testing.T that are -// required by this package -type TestingT interface { - Fatalf(string, ...interface{}) -} - -// Equal compare the actual value to the expected value and fails the test if -// they are not equal. -func Equal(t TestingT, actual, expected interface{}) { - if expected != actual { - fatal(t, fmt.Sprintf("Expected '%v' (%T) got '%v' (%T)", expected, expected, actual, actual)) - } -} - -//EqualStringSlice compares two slices and fails the test if they do not contain -// the same items. -func EqualStringSlice(t TestingT, actual, expected []string) { - if len(actual) != len(expected) { - t.Fatalf("Expected (length %d): %q\nActual (length %d): %q", - len(expected), expected, len(actual), actual) - } - for i, item := range actual { - if item != expected[i] { - t.Fatalf("Slices differ at element %d, expected %q got %q", - i, expected[i], item) - } - } -} - -// NilError asserts that the error is nil, otherwise it fails the test. -func NilError(t TestingT, err error) { - if err != nil { - fatal(t, fmt.Sprintf("Expected no error, got: %s", err.Error())) - } -} - -// Error asserts that error is not nil, and contains the expected text, -// otherwise it fails the test. -func Error(t TestingT, err error, contains string) { - if err == nil { - fatal(t, "Expected an error, but error was nil") - } - - if !strings.Contains(err.Error(), contains) { - fatal(t, fmt.Sprintf("Expected error to contain '%s', got '%s'", contains, err.Error())) - } -} - -// Contains asserts that the string contains a substring, otherwise it fails the -// test. -func Contains(t TestingT, actual, contains string) { - if !strings.Contains(actual, contains) { - fatal(t, fmt.Sprintf("Expected '%s' to contain '%s'", actual, contains)) - } -} - -func fatal(t TestingT, msg string) { - _, file, line, _ := runtime.Caller(2) - t.Fatalf("%s:%d: %s", filepath.Base(file), line, msg) -} diff --git a/pkg/testutil/pkg.go b/pkg/testutil/pkg.go deleted file mode 100644 index 110b2e6a79..0000000000 --- a/pkg/testutil/pkg.go +++ /dev/null @@ -1 +0,0 @@ -package testutil diff --git a/pkg/tlsconfig/config.go b/pkg/tlsconfig/config.go deleted file mode 100644 index e3dfad1f0e..0000000000 --- a/pkg/tlsconfig/config.go +++ /dev/null @@ -1,133 +0,0 @@ -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -// As a reminder from https://golang.org/pkg/crypto/tls/#Config: -// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. -// A Config may be reused; the tls package will also not modify it. -package tlsconfig - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "os" - - "github.com/Sirupsen/logrus" -) - -// Options represents the information needed to create client and server TLS configurations. -type Options struct { - CAFile string - - // If either CertFile or KeyFile is empty, Client() will not load them - // preventing the client from authenticating to the server. - // However, Server() requires them and will error out if they are empty. - CertFile string - KeyFile string - - // client-only option - InsecureSkipVerify bool - // server-only option - ClientAuth tls.ClientAuthType -} - -// Extra (server-side) accepted CBC cipher suites - will phase out in the future -var acceptedCBCCiphers = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, -} - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} - -// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls -// options struct but wants to use a commonly accepted set of TLS cipher suites, with -// known weak algorithms removed. -var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) - -// ServerDefault is a secure-enough TLS configuration for the server TLS configuration. -var ServerDefault = tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: DefaultServerAcceptedCiphers, -} - -// ClientDefault is a secure-enough TLS configuration for the client TLS configuration. -var ClientDefault = tls.Config{ - // Prefer TLS1.2 as the client minimum - MinVersion: tls.VersionTLS12, - CipherSuites: clientCipherSuites, -} - -// certPool returns an X.509 certificate pool from `caFile`, the certificate file. -func certPool(caFile string) (*x509.CertPool, error) { - // If we should verify the server, we need to load a trusted ca - certPool := x509.NewCertPool() - pem, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err) - } - if !certPool.AppendCertsFromPEM(pem) { - return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) - } - s := certPool.Subjects() - subjects := make([]string, len(s)) - for i, subject := range s { - subjects[i] = string(subject) - } - logrus.Debugf("Trusting certs with subjects: %v", subjects) - return certPool, nil -} - -// Client returns a TLS configuration meant to be used by a client. -func Client(options Options) (*tls.Config, error) { - tlsConfig := ClientDefault - tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify - if !options.InsecureSkipVerify { - CAs, err := certPool(options.CAFile) - if err != nil { - return nil, err - } - tlsConfig.RootCAs = CAs - } - - if options.CertFile != "" && options.KeyFile != "" { - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - } - - return &tlsConfig, nil -} - -// Server returns a TLS configuration meant to be used by a server. -func Server(options Options) (*tls.Config, error) { - tlsConfig := ServerDefault - tlsConfig.ClientAuth = options.ClientAuth - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) - } - return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - if options.ClientAuth >= tls.VerifyClientCertIfGiven { - CAs, err := certPool(options.CAFile) - if err != nil { - return nil, err - } - tlsConfig.ClientCAs = CAs - } - return &tlsConfig, nil -} diff --git a/pkg/truncindex/truncindex.go b/pkg/truncindex/truncindex.go deleted file mode 100644 index 02610b8b7e..0000000000 --- a/pkg/truncindex/truncindex.go +++ /dev/null @@ -1,137 +0,0 @@ -// Package truncindex provides a general 'index tree', used by Docker -// in order to be able to reference containers by only a few unambiguous -// characters of their id. -package truncindex - -import ( - "errors" - "fmt" - "strings" - "sync" - - "github.com/tchap/go-patricia/patricia" -) - -var ( - // ErrEmptyPrefix is an error returned if the prefix was empty. - ErrEmptyPrefix = errors.New("Prefix can't be empty") - - // ErrIllegalChar is returned when a space is in the ID - ErrIllegalChar = errors.New("illegal character: ' '") - - // ErrNotExist is returned when ID or its prefix not found in index. - ErrNotExist = errors.New("ID does not exist") -) - -// ErrAmbiguousPrefix is returned if the prefix was ambiguous -// (multiple ids for the prefix). -type ErrAmbiguousPrefix struct { - prefix string -} - -func (e ErrAmbiguousPrefix) Error() string { - return fmt.Sprintf("Multiple IDs found with provided prefix: %s", e.prefix) -} - -// TruncIndex allows the retrieval of string identifiers by any of their unique prefixes. -// This is used to retrieve image and container IDs by more convenient shorthand prefixes. -type TruncIndex struct { - sync.RWMutex - trie *patricia.Trie - ids map[string]struct{} -} - -// NewTruncIndex creates a new TruncIndex and initializes with a list of IDs. -func NewTruncIndex(ids []string) (idx *TruncIndex) { - idx = &TruncIndex{ - ids: make(map[string]struct{}), - - // Change patricia max prefix per node length, - // because our len(ID) always 64 - trie: patricia.NewTrie(patricia.MaxPrefixPerNode(64)), - } - for _, id := range ids { - idx.addID(id) - } - return -} - -func (idx *TruncIndex) addID(id string) error { - if strings.Contains(id, " ") { - return ErrIllegalChar - } - if id == "" { - return ErrEmptyPrefix - } - if _, exists := idx.ids[id]; exists { - return fmt.Errorf("id already exists: '%s'", id) - } - idx.ids[id] = struct{}{} - if inserted := idx.trie.Insert(patricia.Prefix(id), struct{}{}); !inserted { - return fmt.Errorf("failed to insert id: %s", id) - } - return nil -} - -// Add adds a new ID to the TruncIndex. -func (idx *TruncIndex) Add(id string) error { - idx.Lock() - defer idx.Unlock() - if err := idx.addID(id); err != nil { - return err - } - return nil -} - -// Delete removes an ID from the TruncIndex. If there are multiple IDs -// with the given prefix, an error is thrown. -func (idx *TruncIndex) Delete(id string) error { - idx.Lock() - defer idx.Unlock() - if _, exists := idx.ids[id]; !exists || id == "" { - return fmt.Errorf("no such id: '%s'", id) - } - delete(idx.ids, id) - if deleted := idx.trie.Delete(patricia.Prefix(id)); !deleted { - return fmt.Errorf("no such id: '%s'", id) - } - return nil -} - -// Get retrieves an ID from the TruncIndex. If there are multiple IDs -// with the given prefix, an error is thrown. -func (idx *TruncIndex) Get(s string) (string, error) { - if s == "" { - return "", ErrEmptyPrefix - } - var ( - id string - ) - subTreeVisitFunc := func(prefix patricia.Prefix, item patricia.Item) error { - if id != "" { - // we haven't found the ID if there are two or more IDs - id = "" - return ErrAmbiguousPrefix{prefix: string(prefix)} - } - id = string(prefix) - return nil - } - - idx.RLock() - defer idx.RUnlock() - if err := idx.trie.VisitSubtree(patricia.Prefix(s), subTreeVisitFunc); err != nil { - return "", err - } - if id != "" { - return id, nil - } - return "", ErrNotExist -} - -// Iterate iterates over all stored IDs, and passes each of them to the given handler. -func (idx *TruncIndex) Iterate(handler func(id string)) { - idx.trie.Visit(func(prefix patricia.Prefix, item patricia.Item) error { - handler(string(prefix)) - return nil - }) -} diff --git a/pkg/truncindex/truncindex_test.go b/pkg/truncindex/truncindex_test.go deleted file mode 100644 index 8197baf7d4..0000000000 --- a/pkg/truncindex/truncindex_test.go +++ /dev/null @@ -1,429 +0,0 @@ -package truncindex - -import ( - "math/rand" - "testing" - - "github.com/docker/docker/pkg/stringid" -) - -// Test the behavior of TruncIndex, an index for querying IDs from a non-conflicting prefix. -func TestTruncIndex(t *testing.T) { - ids := []string{} - index := NewTruncIndex(ids) - // Get on an empty index - if _, err := index.Get("foobar"); err == nil { - t.Fatal("Get on an empty index should return an error") - } - - // Spaces should be illegal in an id - if err := index.Add("I have a space"); err == nil { - t.Fatalf("Adding an id with ' ' should return an error") - } - - id := "99b36c2c326ccc11e726eee6ee78a0baf166ef96" - // Add an id - if err := index.Add(id); err != nil { - t.Fatal(err) - } - - // Add an empty id (should fail) - if err := index.Add(""); err == nil { - t.Fatalf("Adding an empty id should return an error") - } - - // Get a non-existing id - assertIndexGet(t, index, "abracadabra", "", true) - // Get an empty id - assertIndexGet(t, index, "", "", true) - // Get the exact id - assertIndexGet(t, index, id, id, false) - // The first letter should match - assertIndexGet(t, index, id[:1], id, false) - // The first half should match - assertIndexGet(t, index, id[:len(id)/2], id, false) - // The second half should NOT match - assertIndexGet(t, index, id[len(id)/2:], "", true) - - id2 := id[:6] + "blabla" - // Add an id - if err := index.Add(id2); err != nil { - t.Fatal(err) - } - // Both exact IDs should work - assertIndexGet(t, index, id, id, false) - assertIndexGet(t, index, id2, id2, false) - - // 6 characters or less should conflict - assertIndexGet(t, index, id[:6], "", true) - assertIndexGet(t, index, id[:4], "", true) - assertIndexGet(t, index, id[:1], "", true) - - // An ambiguous id prefix should return an error - if _, err := index.Get(id[:4]); err == nil { - t.Fatal("An ambiguous id prefix should return an error") - } - - // 7 characters should NOT conflict - assertIndexGet(t, index, id[:7], id, false) - assertIndexGet(t, index, id2[:7], id2, false) - - // Deleting a non-existing id should return an error - if err := index.Delete("non-existing"); err == nil { - t.Fatalf("Deleting a non-existing id should return an error") - } - - // Deleting an empty id should return an error - if err := index.Delete(""); err == nil { - t.Fatal("Deleting an empty id should return an error") - } - - // Deleting id2 should remove conflicts - if err := index.Delete(id2); err != nil { - t.Fatal(err) - } - // id2 should no longer work - assertIndexGet(t, index, id2, "", true) - assertIndexGet(t, index, id2[:7], "", true) - assertIndexGet(t, index, id2[:11], "", true) - - // conflicts between id and id2 should be gone - assertIndexGet(t, index, id[:6], id, false) - assertIndexGet(t, index, id[:4], id, false) - assertIndexGet(t, index, id[:1], id, false) - - // non-conflicting substrings should still not conflict - assertIndexGet(t, index, id[:7], id, false) - assertIndexGet(t, index, id[:15], id, false) - assertIndexGet(t, index, id, id, false) - - assertIndexIterate(t) -} - -func assertIndexIterate(t *testing.T) { - ids := []string{ - "19b36c2c326ccc11e726eee6ee78a0baf166ef96", - "28b36c2c326ccc11e726eee6ee78a0baf166ef96", - "37b36c2c326ccc11e726eee6ee78a0baf166ef96", - "46b36c2c326ccc11e726eee6ee78a0baf166ef96", - } - - index := NewTruncIndex(ids) - - index.Iterate(func(targetId string) { - for _, id := range ids { - if targetId == id { - return - } - } - - t.Fatalf("An unknown ID '%s'", targetId) - }) -} - -func assertIndexGet(t *testing.T, index *TruncIndex, input, expectedResult string, expectError bool) { - if result, err := index.Get(input); err != nil && !expectError { - t.Fatalf("Unexpected error getting '%s': %s", input, err) - } else if err == nil && expectError { - t.Fatalf("Getting '%s' should return an error, not '%s'", input, result) - } else if result != expectedResult { - t.Fatalf("Getting '%s' returned '%s' instead of '%s'", input, result, expectedResult) - } -} - -func BenchmarkTruncIndexAdd100(b *testing.B) { - var testSet []string - for i := 0; i < 100; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkTruncIndexAdd250(b *testing.B) { - var testSet []string - for i := 0; i < 250; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkTruncIndexAdd500(b *testing.B) { - var testSet []string - for i := 0; i < 500; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkTruncIndexGet100(b *testing.B) { - var testSet []string - var testKeys []string - for i := 0; i < 100; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - l := rand.Intn(12) + 12 - testKeys = append(testKeys, id[:l]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, id := range testKeys { - if res, err := index.Get(id); err != nil { - b.Fatal(res, err) - } - } - } -} - -func BenchmarkTruncIndexGet250(b *testing.B) { - var testSet []string - var testKeys []string - for i := 0; i < 250; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - l := rand.Intn(12) + 12 - testKeys = append(testKeys, id[:l]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, id := range testKeys { - if res, err := index.Get(id); err != nil { - b.Fatal(res, err) - } - } - } -} - -func BenchmarkTruncIndexGet500(b *testing.B) { - var testSet []string - var testKeys []string - for i := 0; i < 500; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - l := rand.Intn(12) + 12 - testKeys = append(testKeys, id[:l]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, id := range testKeys { - if res, err := index.Get(id); err != nil { - b.Fatal(res, err) - } - } - } -} - -func BenchmarkTruncIndexDelete100(b *testing.B) { - var testSet []string - for i := 0; i < 100; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - b.StopTimer() - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - b.StartTimer() - for _, id := range testSet { - if err := index.Delete(id); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkTruncIndexDelete250(b *testing.B) { - var testSet []string - for i := 0; i < 250; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - b.StopTimer() - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - b.StartTimer() - for _, id := range testSet { - if err := index.Delete(id); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkTruncIndexDelete500(b *testing.B) { - var testSet []string - for i := 0; i < 500; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - b.StopTimer() - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - b.StartTimer() - for _, id := range testSet { - if err := index.Delete(id); err != nil { - b.Fatal(err) - } - } - } -} - -func BenchmarkTruncIndexNew100(b *testing.B) { - var testSet []string - for i := 0; i < 100; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - NewTruncIndex(testSet) - } -} - -func BenchmarkTruncIndexNew250(b *testing.B) { - var testSet []string - for i := 0; i < 250; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - NewTruncIndex(testSet) - } -} - -func BenchmarkTruncIndexNew500(b *testing.B) { - var testSet []string - for i := 0; i < 500; i++ { - testSet = append(testSet, stringid.GenerateNonCryptoID()) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - NewTruncIndex(testSet) - } -} - -func BenchmarkTruncIndexAddGet100(b *testing.B) { - var testSet []string - var testKeys []string - for i := 0; i < 500; i++ { - id := stringid.GenerateNonCryptoID() - testSet = append(testSet, id) - l := rand.Intn(12) + 12 - testKeys = append(testKeys, id[:l]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - for _, id := range testKeys { - if res, err := index.Get(id); err != nil { - b.Fatal(res, err) - } - } - } -} - -func BenchmarkTruncIndexAddGet250(b *testing.B) { - var testSet []string - var testKeys []string - for i := 0; i < 500; i++ { - id := stringid.GenerateNonCryptoID() - testSet = append(testSet, id) - l := rand.Intn(12) + 12 - testKeys = append(testKeys, id[:l]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - for _, id := range testKeys { - if res, err := index.Get(id); err != nil { - b.Fatal(res, err) - } - } - } -} - -func BenchmarkTruncIndexAddGet500(b *testing.B) { - var testSet []string - var testKeys []string - for i := 0; i < 500; i++ { - id := stringid.GenerateNonCryptoID() - testSet = append(testSet, id) - l := rand.Intn(12) + 12 - testKeys = append(testKeys, id[:l]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - index := NewTruncIndex([]string{}) - for _, id := range testSet { - if err := index.Add(id); err != nil { - b.Fatal(err) - } - } - for _, id := range testKeys { - if res, err := index.Get(id); err != nil { - b.Fatal(res, err) - } - } - } -} diff --git a/pkg/urlutil/urlutil.go b/pkg/urlutil/urlutil.go deleted file mode 100644 index 44152873b1..0000000000 --- a/pkg/urlutil/urlutil.go +++ /dev/null @@ -1,50 +0,0 @@ -// Package urlutil provides helper function to check urls kind. -// It supports http urls, git urls and transport url (tcp://, …) -package urlutil - -import ( - "regexp" - "strings" -) - -var ( - validPrefixes = map[string][]string{ - "url": {"http://", "https://"}, - "git": {"git://", "github.com/", "git@"}, - "transport": {"tcp://", "tcp+tls://", "udp://", "unix://", "unixgram://"}, - } - urlPathWithFragmentSuffix = regexp.MustCompile(".git(?:#.+)?$") -) - -// IsURL returns true if the provided str is an HTTP(S) URL. -func IsURL(str string) bool { - return checkURL(str, "url") -} - -// IsGitURL returns true if the provided str is a git repository URL. -func IsGitURL(str string) bool { - if IsURL(str) && urlPathWithFragmentSuffix.MatchString(str) { - return true - } - return checkURL(str, "git") -} - -// IsGitTransport returns true if the provided str is a git transport by inspecting -// the prefix of the string for known protocols used in git. -func IsGitTransport(str string) bool { - return IsURL(str) || strings.HasPrefix(str, "git://") || strings.HasPrefix(str, "git@") -} - -// IsTransportURL returns true if the provided str is a transport (tcp, tcp+tls, udp, unix) URL. -func IsTransportURL(str string) bool { - return checkURL(str, "transport") -} - -func checkURL(str, kind string) bool { - for _, prefix := range validPrefixes[kind] { - if strings.HasPrefix(str, prefix) { - return true - } - } - return false -} diff --git a/pkg/urlutil/urlutil_test.go b/pkg/urlutil/urlutil_test.go deleted file mode 100644 index 75eb464fe5..0000000000 --- a/pkg/urlutil/urlutil_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package urlutil - -import "testing" - -var ( - gitUrls = []string{ - "git://github.com/docker/docker", - "git@github.com:docker/docker.git", - "git@bitbucket.org:atlassianlabs/atlassian-docker.git", - "https://github.com/docker/docker.git", - "http://github.com/docker/docker.git", - "http://github.com/docker/docker.git#branch", - "http://github.com/docker/docker.git#:dir", - } - incompleteGitUrls = []string{ - "github.com/docker/docker", - } - invalidGitUrls = []string{ - "http://github.com/docker/docker.git:#branch", - } - transportUrls = []string{ - "tcp://example.com", - "tcp+tls://example.com", - "udp://example.com", - "unix:///example", - "unixgram:///example", - } -) - -func TestValidGitTransport(t *testing.T) { - for _, url := range gitUrls { - if IsGitTransport(url) == false { - t.Fatalf("%q should be detected as valid Git prefix", url) - } - } - - for _, url := range incompleteGitUrls { - if IsGitTransport(url) == true { - t.Fatalf("%q should not be detected as valid Git prefix", url) - } - } -} - -func TestIsGIT(t *testing.T) { - for _, url := range gitUrls { - if IsGitURL(url) == false { - t.Fatalf("%q should be detected as valid Git url", url) - } - } - - for _, url := range incompleteGitUrls { - if IsGitURL(url) == false { - t.Fatalf("%q should be detected as valid Git url", url) - } - } - - for _, url := range invalidGitUrls { - if IsGitURL(url) == true { - t.Fatalf("%q should not be detected as valid Git prefix", url) - } - } -} - -func TestIsTransport(t *testing.T) { - for _, url := range transportUrls { - if IsTransportURL(url) == false { - t.Fatalf("%q should be detected as valid Transport url", url) - } - } -} diff --git a/pkg/useragent/README.md b/pkg/useragent/README.md deleted file mode 100644 index d9cb367d10..0000000000 --- a/pkg/useragent/README.md +++ /dev/null @@ -1 +0,0 @@ -This package provides helper functions to pack version information into a single User-Agent header. diff --git a/pkg/useragent/useragent.go b/pkg/useragent/useragent.go deleted file mode 100644 index 1137db51b8..0000000000 --- a/pkg/useragent/useragent.go +++ /dev/null @@ -1,55 +0,0 @@ -// Package useragent provides helper functions to pack -// version information into a single User-Agent header. -package useragent - -import ( - "strings" -) - -// VersionInfo is used to model UserAgent versions. -type VersionInfo struct { - Name string - Version string -} - -func (vi *VersionInfo) isValid() bool { - const stopChars = " \t\r\n/" - name := vi.Name - vers := vi.Version - if len(name) == 0 || strings.ContainsAny(name, stopChars) { - return false - } - if len(vers) == 0 || strings.ContainsAny(vers, stopChars) { - return false - } - return true -} - -// AppendVersions converts versions to a string and appends the string to the string base. -// -// Each VersionInfo will be converted to a string in the format of -// "product/version", where the "product" is get from the name field, while -// version is get from the version field. Several pieces of version information -// will be concatenated and separated by space. -// -// Example: -// AppendVersions("base", VersionInfo{"foo", "1.0"}, VersionInfo{"bar", "2.0"}) -// results in "base foo/1.0 bar/2.0". -func AppendVersions(base string, versions ...VersionInfo) string { - if len(versions) == 0 { - return base - } - - verstrs := make([]string, 0, 1+len(versions)) - if len(base) > 0 { - verstrs = append(verstrs, base) - } - - for _, v := range versions { - if !v.isValid() { - continue - } - verstrs = append(verstrs, v.Name+"/"+v.Version) - } - return strings.Join(verstrs, " ") -} diff --git a/pkg/useragent/useragent_test.go b/pkg/useragent/useragent_test.go deleted file mode 100644 index 0ad7243a6d..0000000000 --- a/pkg/useragent/useragent_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package useragent - -import "testing" - -func TestVersionInfo(t *testing.T) { - vi := VersionInfo{"foo", "bar"} - if !vi.isValid() { - t.Fatalf("VersionInfo should be valid") - } - vi = VersionInfo{"", "bar"} - if vi.isValid() { - t.Fatalf("Expected VersionInfo to be invalid") - } - vi = VersionInfo{"foo", ""} - if vi.isValid() { - t.Fatalf("Expected VersionInfo to be invalid") - } -} - -func TestAppendVersions(t *testing.T) { - vis := []VersionInfo{ - {"foo", "1.0"}, - {"bar", "0.1"}, - {"pi", "3.1.4"}, - } - v := AppendVersions("base", vis...) - expect := "base foo/1.0 bar/0.1 pi/3.1.4" - if v != expect { - t.Fatalf("expected %q, got %q", expect, v) - } -} diff --git a/plugin/backend.go b/plugin/backend.go deleted file mode 100644 index 975c51632b..0000000000 --- a/plugin/backend.go +++ /dev/null @@ -1,153 +0,0 @@ -// +build experimental - -package plugin - -import ( - "fmt" - "net/http" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/stringid" - "github.com/docker/docker/plugin/distribution" - "github.com/docker/docker/reference" - "github.com/docker/engine-api/types" -) - -// Disable deactivates a plugin, which implies that they cannot be used by containers. -func (pm *Manager) Disable(name string) error { - p, err := pm.get(name) - if err != nil { - return err - } - if err := pm.disable(p); err != nil { - return err - } - pm.pluginEventLogger(p.PluginObj.ID, name, "disable") - return nil -} - -// Enable activates a plugin, which implies that they are ready to be used by containers. -func (pm *Manager) Enable(name string) error { - p, err := pm.get(name) - if err != nil { - return err - } - if err := pm.enable(p, false); err != nil { - return err - } - pm.pluginEventLogger(p.PluginObj.ID, name, "enable") - return nil -} - -// Inspect examines a plugin manifest -func (pm *Manager) Inspect(name string) (tp types.Plugin, err error) { - p, err := pm.get(name) - if err != nil { - return tp, err - } - return p.PluginObj, nil -} - -// Pull pulls a plugin and computes the privileges required to install it. -func (pm *Manager) Pull(name string, metaHeader http.Header, authConfig *types.AuthConfig) (types.PluginPrivileges, error) { - ref, err := reference.ParseNamed(name) - if err != nil { - logrus.Debugf("error in reference.ParseNamed: %v", err) - return nil, err - } - name = ref.String() - - if p, _ := pm.get(name); p != nil { - logrus.Debugf("plugin already exists") - return nil, fmt.Errorf("%s exists", name) - } - - pluginID := stringid.GenerateNonCryptoID() - - if err := os.MkdirAll(filepath.Join(pm.libRoot, pluginID), 0755); err != nil { - logrus.Debugf("error in MkdirAll: %v", err) - return nil, err - } - - pd, err := distribution.Pull(name, pm.registryService, metaHeader, authConfig) - if err != nil { - logrus.Debugf("error in distribution.Pull(): %v", err) - return nil, err - } - - if err := distribution.WritePullData(pd, filepath.Join(pm.libRoot, pluginID), true); err != nil { - logrus.Debugf("error in distribution.WritePullData(): %v", err) - return nil, err - } - - p := pm.newPlugin(ref, pluginID) - if err := pm.initPlugin(p); err != nil { - return nil, err - } - - pm.Lock() - pm.plugins[pluginID] = p - pm.nameToID[name] = pluginID - pm.save() - pm.Unlock() - - pm.pluginEventLogger(pluginID, name, "pull") - return computePrivileges(&p.PluginObj.Manifest), nil -} - -// List displays the list of plugins and associated metadata. -func (pm *Manager) List() ([]types.Plugin, error) { - out := make([]types.Plugin, 0, len(pm.plugins)) - for _, p := range pm.plugins { - out = append(out, p.PluginObj) - } - return out, nil -} - -// Push pushes a plugin to the store. -func (pm *Manager) Push(name string, metaHeader http.Header, authConfig *types.AuthConfig) error { - p, err := pm.get(name) - if err != nil { - return err - } - dest := filepath.Join(pm.libRoot, p.PluginObj.ID) - config, err := os.Open(filepath.Join(dest, "manifest.json")) - if err != nil { - return err - } - defer config.Close() - - rootfs, err := archive.Tar(filepath.Join(dest, "rootfs"), archive.Gzip) - if err != nil { - return err - } - _, err = distribution.Push(name, pm.registryService, metaHeader, authConfig, config, rootfs) - // XXX: Ignore returning digest for now. - // Since digest needs to be written to the ProgressWriter. - return err -} - -// Remove deletes plugin's root directory. -func (pm *Manager) Remove(name string) error { - p, err := pm.get(name) - if err != nil { - return err - } - if err := pm.remove(p); err != nil { - return err - } - pm.pluginEventLogger(p.PluginObj.ID, name, "remove") - return nil -} - -// Set sets plugin args -func (pm *Manager) Set(name string, args []string) error { - p, err := pm.get(name) - if err != nil { - return err - } - return pm.set(p, args) -} diff --git a/plugin/distribution/pull.go b/plugin/distribution/pull.go deleted file mode 100644 index 1bae8d4bb7..0000000000 --- a/plugin/distribution/pull.go +++ /dev/null @@ -1,212 +0,0 @@ -// +build experimental - -package distribution - -import ( - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "os" - "path/filepath" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/manifest/schema2" - dockerdist "github.com/docker/docker/distribution" - archive "github.com/docker/docker/pkg/chrootarchive" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// PullData is the plugin manifest and the rootfs -type PullData interface { - Config() ([]byte, error) - Layer() (io.ReadCloser, error) -} - -type pullData struct { - repository distribution.Repository - manifest schema2.Manifest - index int -} - -func (pd *pullData) Config() ([]byte, error) { - blobs := pd.repository.Blobs(context.Background()) - config, err := blobs.Get(context.Background(), pd.manifest.Config.Digest) - if err != nil { - return nil, err - } - // validate - var p types.Plugin - if err := json.Unmarshal(config, &p); err != nil { - return nil, err - } - return config, nil -} - -func (pd *pullData) Layer() (io.ReadCloser, error) { - if pd.index >= len(pd.manifest.Layers) { - return nil, io.EOF - } - - blobs := pd.repository.Blobs(context.Background()) - rsc, err := blobs.Open(context.Background(), pd.manifest.Layers[pd.index].Digest) - if err != nil { - return nil, err - } - pd.index++ - return rsc, nil -} - -// Pull downloads the plugin from Store -func Pull(name string, rs registry.Service, metaheader http.Header, authConfig *types.AuthConfig) (PullData, error) { - ref, err := reference.ParseNamed(name) - if err != nil { - logrus.Debugf("pull.go: error in ParseNamed: %v", err) - return nil, err - } - - repoInfo, err := rs.ResolveRepository(ref) - if err != nil { - logrus.Debugf("pull.go: error in ResolveRepository: %v", err) - return nil, err - } - - if err := dockerdist.ValidateRepoName(repoInfo.Name()); err != nil { - logrus.Debugf("pull.go: error in ValidateRepoName: %v", err) - return nil, err - } - - endpoints, err := rs.LookupPullEndpoints(repoInfo.Hostname()) - if err != nil { - logrus.Debugf("pull.go: error in LookupPullEndpoints: %v", err) - return nil, err - } - - var confirmedV2 bool - var repository distribution.Repository - - for _, endpoint := range endpoints { - if confirmedV2 && endpoint.Version == registry.APIVersion1 { - logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) - continue - } - - // TODO: reuse contexts - repository, confirmedV2, err = dockerdist.NewV2Repository(context.Background(), repoInfo, endpoint, metaheader, authConfig, "pull") - if err != nil { - logrus.Debugf("pull.go: error in NewV2Repository: %v", err) - return nil, err - } - if !confirmedV2 { - logrus.Debugf("pull.go: !confirmedV2") - return nil, ErrUnsupportedRegistry - } - logrus.Debugf("Trying to pull %s from %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) - break - } - - tag := DefaultTag - if ref, ok := ref.(reference.NamedTagged); ok { - tag = ref.Tag() - } - - // tags := repository.Tags(context.Background()) - // desc, err := tags.Get(context.Background(), tag) - // if err != nil { - // return nil, err - // } - // - msv, err := repository.Manifests(context.Background()) - if err != nil { - logrus.Debugf("pull.go: error in repository.Manifests: %v", err) - return nil, err - } - manifest, err := msv.Get(context.Background(), "", distribution.WithTag(tag)) - if err != nil { - // TODO: change 401 to 404 - logrus.Debugf("pull.go: error in msv.Get(): %v", err) - return nil, err - } - - _, pl, err := manifest.Payload() - if err != nil { - logrus.Debugf("pull.go: error in manifest.Payload(): %v", err) - return nil, err - } - var m schema2.Manifest - if err := json.Unmarshal(pl, &m); err != nil { - logrus.Debugf("pull.go: error in json.Unmarshal(): %v", err) - return nil, err - } - if m.Config.MediaType != MediaTypeConfig { - return nil, ErrUnsupportedMediaType - } - - pd := &pullData{ - repository: repository, - manifest: m, - } - - logrus.Debugf("manifest: %s", pl) - return pd, nil -} - -// WritePullData extracts manifest and rootfs to the disk. -func WritePullData(pd PullData, dest string, extract bool) error { - config, err := pd.Config() - if err != nil { - return err - } - var p types.Plugin - if err := json.Unmarshal(config, &p); err != nil { - return err - } - logrus.Debugf("%#v", p) - - if err := os.MkdirAll(dest, 0700); err != nil { - return err - } - - if extract { - if err := ioutil.WriteFile(filepath.Join(dest, "manifest.json"), config, 0600); err != nil { - return err - } - - if err := os.MkdirAll(filepath.Join(dest, "rootfs"), 0700); err != nil { - return err - } - } - - for i := 0; ; i++ { - l, err := pd.Layer() - if err == io.EOF { - break - } - if err != nil { - return err - } - - if !extract { - f, err := os.Create(filepath.Join(dest, fmt.Sprintf("layer%d.tar", i))) - if err != nil { - l.Close() - return err - } - io.Copy(f, l) - l.Close() - f.Close() - continue - } - - if _, err := archive.ApplyLayer(filepath.Join(dest, "rootfs"), l); err != nil { - return err - } - - } - return nil -} diff --git a/plugin/distribution/push.go b/plugin/distribution/push.go deleted file mode 100644 index 27e717af4c..0000000000 --- a/plugin/distribution/push.go +++ /dev/null @@ -1,135 +0,0 @@ -// +build experimental - -package distribution - -import ( - "crypto/sha256" - "io" - "net/http" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest/schema2" - dockerdist "github.com/docker/docker/distribution" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// Push pushes a plugin to a registry. -func Push(name string, rs registry.Service, metaHeader http.Header, authConfig *types.AuthConfig, config io.ReadCloser, layers io.ReadCloser) (digest.Digest, error) { - ref, err := reference.ParseNamed(name) - if err != nil { - return "", err - } - - repoInfo, err := rs.ResolveRepository(ref) - if err != nil { - return "", err - } - - if err := dockerdist.ValidateRepoName(repoInfo.Name()); err != nil { - return "", err - } - - endpoints, err := rs.LookupPushEndpoints(repoInfo.Hostname()) - if err != nil { - return "", err - } - - var confirmedV2 bool - var repository distribution.Repository - for _, endpoint := range endpoints { - if confirmedV2 && endpoint.Version == registry.APIVersion1 { - logrus.Debugf("Skipping v1 endpoint %s because v2 registry was detected", endpoint.URL) - continue - } - repository, confirmedV2, err = dockerdist.NewV2Repository(context.Background(), repoInfo, endpoint, metaHeader, authConfig, "push", "pull") - if err != nil { - return "", err - } - if !confirmedV2 { - return "", ErrUnsupportedRegistry - } - logrus.Debugf("Trying to push %s to %s %s", repoInfo.Name(), endpoint.URL, endpoint.Version) - // This means that we found an endpoint. and we are ready to push - break - } - - // Returns a reference to the repository's blob service. - blobs := repository.Blobs(context.Background()) - - // Descriptor = {mediaType, size, digest} - var descs []distribution.Descriptor - - for i, f := range []io.ReadCloser{config, layers} { - bw, err := blobs.Create(context.Background()) - if err != nil { - logrus.Debugf("Error in blobs.Create: %v", err) - return "", err - } - h := sha256.New() - r := io.TeeReader(f, h) - _, err = io.Copy(bw, r) - if err != nil { - f.Close() - logrus.Debugf("Error in io.Copy: %v", err) - return "", err - } - f.Close() - mt := MediaTypeLayer - if i == 0 { - mt = MediaTypeConfig - } - // Commit completes the write process to the BlobService. - // The descriptor arg to Commit is called the "provisional" descriptor and - // used for validation. - // The returned descriptor should be the one used. Its called the "Canonical" - // descriptor. - desc, err := bw.Commit(context.Background(), distribution.Descriptor{ - MediaType: mt, - // XXX: What about the Size? - Digest: digest.NewDigest("sha256", h), - }) - if err != nil { - logrus.Debugf("Error in bw.Commit: %v", err) - return "", err - } - // The canonical descriptor is set the mediatype again, just in case. - // Don't touch the digest or the size here. - desc.MediaType = mt - logrus.Debugf("pushed blob: %s %s", desc.MediaType, desc.Digest) - descs = append(descs, desc) - } - - // XXX: schema2.Versioned needs a MediaType as well. - // "application/vnd.docker.distribution.manifest.v2+json" - m, err := schema2.FromStruct(schema2.Manifest{Versioned: schema2.SchemaVersion, Config: descs[0], Layers: descs[1:]}) - if err != nil { - logrus.Debugf("error in schema2.FromStruct: %v", err) - return "", err - } - - msv, err := repository.Manifests(context.Background()) - if err != nil { - logrus.Debugf("error in repository.Manifests: %v", err) - return "", err - } - - _, pl, err := m.Payload() - if err != nil { - logrus.Debugf("error in m.Payload: %v", err) - return "", err - } - - logrus.Debugf("Pushed manifest: %s", pl) - - tag := DefaultTag - if tagged, ok := ref.(reference.NamedTagged); ok { - tag = tagged.Tag() - } - - return msv.Put(context.Background(), m, distribution.WithTag(tag)) -} diff --git a/plugin/distribution/types.go b/plugin/distribution/types.go deleted file mode 100644 index 0b1fd7aea1..0000000000 --- a/plugin/distribution/types.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build experimental - -package distribution - -import "errors" - -// ErrUnsupportedRegistry indicates that the registry does not support v2 protocol -var ErrUnsupportedRegistry = errors.New("only V2 repositories are supported for plugin distribution") - -// ErrUnsupportedMediaType indicates we are pulling content that's not a plugin -var ErrUnsupportedMediaType = errors.New("content is not a plugin") - -// Plugin related media types -const ( - MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" - MediaTypeConfig = "application/vnd.docker.plugin.v0+json" - MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" - DefaultTag = "latest" -) diff --git a/plugin/interface.go b/plugin/interface.go deleted file mode 100644 index 80e6b5b8df..0000000000 --- a/plugin/interface.go +++ /dev/null @@ -1,10 +0,0 @@ -package plugin - -import "github.com/docker/docker/pkg/plugins" - -// Plugin represents a plugin. It is used to abstract from an older plugin architecture (in pkg/plugins). -type Plugin interface { - Client() *plugins.Client - Name() string - IsLegacy() bool -} diff --git a/plugin/legacy.go b/plugin/legacy.go deleted file mode 100644 index 8ea4c0da96..0000000000 --- a/plugin/legacy.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build !experimental - -package plugin - -import "github.com/docker/docker/pkg/plugins" - -// FindWithCapability returns a list of plugins matching the given capability. -func FindWithCapability(capability string) ([]Plugin, error) { - pl, err := plugins.GetAll(capability) - if err != nil { - return nil, err - } - result := make([]Plugin, len(pl)) - for i, p := range pl { - result[i] = p - } - return result, nil -} - -// LookupWithCapability returns a plugin matching the given name and capability. -func LookupWithCapability(name, capability string) (Plugin, error) { - return plugins.Get(name, capability) -} diff --git a/plugin/manager.go b/plugin/manager.go deleted file mode 100644 index 254db32f40..0000000000 --- a/plugin/manager.go +++ /dev/null @@ -1,449 +0,0 @@ -// +build experimental - -package plugin - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "sync" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/pkg/ioutils" - "github.com/docker/docker/pkg/plugins" - "github.com/docker/docker/reference" - "github.com/docker/docker/registry" - "github.com/docker/docker/restartmanager" - "github.com/docker/engine-api/types" -) - -const defaultPluginRuntimeDestination = "/run/docker/plugins" - -var manager *Manager - -// ErrNotFound indicates that a plugin was not found locally. -type ErrNotFound string - -func (name ErrNotFound) Error() string { return fmt.Sprintf("plugin %q not found", string(name)) } - -// ErrInadequateCapability indicates that a plugin was found but did not have the requested capability. -type ErrInadequateCapability struct { - name string - capability string -} - -func (e ErrInadequateCapability) Error() string { - return fmt.Sprintf("plugin %q found, but not with %q capability", e.name, e.capability) -} - -type plugin struct { - //sync.RWMutex TODO - PluginObj types.Plugin `json:"plugin"` - client *plugins.Client - restartManager restartmanager.RestartManager - runtimeSourcePath string - exitChan chan bool -} - -func (p *plugin) Client() *plugins.Client { - return p.client -} - -// IsLegacy returns true for legacy plugins and false otherwise. -func (p *plugin) IsLegacy() bool { - return false -} - -func (p *plugin) Name() string { - name := p.PluginObj.Name - if len(p.PluginObj.Tag) > 0 { - // TODO: this feels hacky, maybe we should be storing the distribution reference rather than splitting these - name += ":" + p.PluginObj.Tag - } - return name -} - -func (pm *Manager) newPlugin(ref reference.Named, id string) *plugin { - p := &plugin{ - PluginObj: types.Plugin{ - Name: ref.Name(), - ID: id, - }, - runtimeSourcePath: filepath.Join(pm.runRoot, id), - } - if ref, ok := ref.(reference.NamedTagged); ok { - p.PluginObj.Tag = ref.Tag() - } - return p -} - -func (pm *Manager) restorePlugin(p *plugin) error { - p.runtimeSourcePath = filepath.Join(pm.runRoot, p.PluginObj.ID) - if p.PluginObj.Active { - return pm.restore(p) - } - return nil -} - -type pluginMap map[string]*plugin -type eventLogger func(id, name, action string) - -// Manager controls the plugin subsystem. -type Manager struct { - sync.RWMutex - libRoot string - runRoot string - plugins pluginMap // TODO: figure out why save() doesn't json encode *plugin object - nameToID map[string]string - handlers map[string]func(string, *plugins.Client) - containerdClient libcontainerd.Client - registryService registry.Service - handleLegacy bool - liveRestore bool - shutdown bool - pluginEventLogger eventLogger -} - -// GetManager returns the singleton plugin Manager -func GetManager() *Manager { - return manager -} - -// Init (was NewManager) instantiates the singleton Manager. -// TODO: revert this to NewManager once we get rid of all the singletons. -func Init(root string, remote libcontainerd.Remote, rs registry.Service, liveRestore bool, evL eventLogger) (err error) { - if manager != nil { - return nil - } - - root = filepath.Join(root, "plugins") - manager = &Manager{ - libRoot: root, - runRoot: "/run/docker", - plugins: make(map[string]*plugin), - nameToID: make(map[string]string), - handlers: make(map[string]func(string, *plugins.Client)), - registryService: rs, - handleLegacy: true, - liveRestore: liveRestore, - pluginEventLogger: evL, - } - if err := os.MkdirAll(manager.runRoot, 0700); err != nil { - return err - } - manager.containerdClient, err = remote.Client(manager) - if err != nil { - return err - } - if err := manager.init(); err != nil { - return err - } - return nil -} - -// Handle sets a callback for a given capability. The callback will be called for every plugin with a given capability. -// TODO: append instead of set? -func Handle(capability string, callback func(string, *plugins.Client)) { - pluginType := fmt.Sprintf("docker.%s/1", strings.ToLower(capability)) - manager.handlers[pluginType] = callback - if manager.handleLegacy { - plugins.Handle(capability, callback) - } -} - -func (pm *Manager) get(name string) (*plugin, error) { - pm.RLock() - defer pm.RUnlock() - - id, nameOk := pm.nameToID[name] - if !nameOk { - return nil, ErrNotFound(name) - } - - p, idOk := pm.plugins[id] - if !idOk { - return nil, ErrNotFound(name) - } - - return p, nil -} - -// FindWithCapability returns a list of plugins matching the given capability. -func FindWithCapability(capability string) ([]Plugin, error) { - handleLegacy := true - result := make([]Plugin, 0, 1) - if manager != nil { - handleLegacy = manager.handleLegacy - manager.RLock() - defer manager.RUnlock() - pluginLoop: - for _, p := range manager.plugins { - for _, typ := range p.PluginObj.Manifest.Interface.Types { - if typ.Capability != capability || typ.Prefix != "docker" { - continue pluginLoop - } - } - result = append(result, p) - } - } - if handleLegacy { - pl, err := plugins.GetAll(capability) - if err != nil { - return nil, fmt.Errorf("legacy plugin: %v", err) - } - for _, p := range pl { - if _, ok := manager.nameToID[p.Name()]; !ok { - result = append(result, p) - } - } - } - return result, nil -} - -// LookupWithCapability returns a plugin matching the given name and capability. -func LookupWithCapability(name, capability string) (Plugin, error) { - var ( - p *plugin - err error - ) - handleLegacy := true - if manager != nil { - fullName := name - if named, err := reference.ParseNamed(fullName); err == nil { // FIXME: validate - if reference.IsNameOnly(named) { - named = reference.WithDefaultTag(named) - } - ref, ok := named.(reference.NamedTagged) - if !ok { - return nil, fmt.Errorf("invalid name: %s", named.String()) - } - fullName = ref.String() - } - p, err = manager.get(fullName) - if err != nil { - if _, ok := err.(ErrNotFound); !ok { - return nil, err - } - handleLegacy = manager.handleLegacy - } else { - handleLegacy = false - } - } - if handleLegacy { - p, err := plugins.Get(name, capability) - if err != nil { - return nil, fmt.Errorf("legacy plugin: %v", err) - } - return p, nil - } else if err != nil { - return nil, err - } - - capability = strings.ToLower(capability) - for _, typ := range p.PluginObj.Manifest.Interface.Types { - if typ.Capability == capability && typ.Prefix == "docker" { - return p, nil - } - } - return nil, ErrInadequateCapability{name, capability} -} - -// StateChanged updates plugin internals using from libcontainerd events. -func (pm *Manager) StateChanged(id string, e libcontainerd.StateInfo) error { - logrus.Debugf("plugin state changed %s %#v", id, e) - - switch e.State { - case libcontainerd.StateExit: - pm.RLock() - p, idOk := pm.plugins[id] - pm.RUnlock() - if !idOk { - return ErrNotFound(id) - } - if pm.shutdown == true { - p.exitChan <- true - } - } - - return nil -} - -// AttachStreams attaches io streams to the plugin -func (pm *Manager) AttachStreams(id string, iop libcontainerd.IOPipe) error { - iop.Stdin.Close() - - logger := logrus.New() - logger.Hooks.Add(logHook{id}) - // TODO: cache writer per id - w := logger.Writer() - go func() { - io.Copy(w, iop.Stdout) - }() - go func() { - // TODO: update logrus and use logger.WriterLevel - io.Copy(w, iop.Stderr) - }() - return nil -} - -func (pm *Manager) init() error { - dt, err := os.Open(filepath.Join(pm.libRoot, "plugins.json")) - if err != nil { - if os.IsNotExist(err) { - return nil - } - return err - } - - if err := json.NewDecoder(dt).Decode(&pm.plugins); err != nil { - return err - } - - var group sync.WaitGroup - group.Add(len(pm.plugins)) - for _, p := range pm.plugins { - go func(p *plugin) { - defer group.Done() - if err := pm.restorePlugin(p); err != nil { - logrus.Errorf("Error restoring plugin '%s': %s", p.Name(), err) - return - } - - pm.Lock() - pm.nameToID[p.Name()] = p.PluginObj.ID - requiresManualRestore := !pm.liveRestore && p.PluginObj.Active - pm.Unlock() - - if requiresManualRestore { - // if liveRestore is not enabled, the plugin will be stopped now so we should enable it - if err := pm.enable(p, true); err != nil { - logrus.Errorf("Error enabling plugin '%s': %s", p.Name(), err) - } - } - }(p) - } - group.Wait() - return pm.save() -} - -func (pm *Manager) initPlugin(p *plugin) error { - dt, err := os.Open(filepath.Join(pm.libRoot, p.PluginObj.ID, "manifest.json")) - if err != nil { - return err - } - err = json.NewDecoder(dt).Decode(&p.PluginObj.Manifest) - dt.Close() - if err != nil { - return err - } - - p.PluginObj.Config.Mounts = make([]types.PluginMount, len(p.PluginObj.Manifest.Mounts)) - for i, mount := range p.PluginObj.Manifest.Mounts { - p.PluginObj.Config.Mounts[i] = mount - } - p.PluginObj.Config.Env = make([]string, 0, len(p.PluginObj.Manifest.Env)) - for _, env := range p.PluginObj.Manifest.Env { - if env.Value != nil { - p.PluginObj.Config.Env = append(p.PluginObj.Config.Env, fmt.Sprintf("%s=%s", env.Name, *env.Value)) - } - } - copy(p.PluginObj.Config.Args, p.PluginObj.Manifest.Args.Value) - - f, err := os.Create(filepath.Join(pm.libRoot, p.PluginObj.ID, "plugin-config.json")) - if err != nil { - return err - } - err = json.NewEncoder(f).Encode(&p.PluginObj.Config) - f.Close() - return err -} - -func (pm *Manager) remove(p *plugin) error { - if p.PluginObj.Active { - return fmt.Errorf("plugin %s is active", p.Name()) - } - pm.Lock() // fixme: lock single record - defer pm.Unlock() - delete(pm.plugins, p.PluginObj.ID) - delete(pm.nameToID, p.Name()) - pm.save() - return os.RemoveAll(filepath.Join(pm.libRoot, p.PluginObj.ID)) -} - -func (pm *Manager) set(p *plugin, args []string) error { - m := make(map[string]string, len(args)) - for _, arg := range args { - i := strings.Index(arg, "=") - if i < 0 { - return fmt.Errorf("No equal sign '=' found in %s", arg) - } - m[arg[:i]] = arg[i+1:] - } - return errors.New("not implemented") -} - -// fixme: not safe -func (pm *Manager) save() error { - filePath := filepath.Join(pm.libRoot, "plugins.json") - - jsonData, err := json.Marshal(pm.plugins) - if err != nil { - logrus.Debugf("Error in json.Marshal: %v", err) - return err - } - ioutils.AtomicWriteFile(filePath, jsonData, 0600) - return nil -} - -type logHook struct{ id string } - -func (logHook) Levels() []logrus.Level { - return logrus.AllLevels -} - -func (l logHook) Fire(entry *logrus.Entry) error { - entry.Data = logrus.Fields{"plugin": l.id} - return nil -} - -func computePrivileges(m *types.PluginManifest) types.PluginPrivileges { - var privileges types.PluginPrivileges - if m.Network.Type != "null" && m.Network.Type != "bridge" { - privileges = append(privileges, types.PluginPrivilege{ - Name: "network", - Description: "", - Value: []string{m.Network.Type}, - }) - } - for _, mount := range m.Mounts { - if mount.Source != nil { - privileges = append(privileges, types.PluginPrivilege{ - Name: "mount", - Description: "", - Value: []string{*mount.Source}, - }) - } - } - for _, device := range m.Devices { - if device.Path != nil { - privileges = append(privileges, types.PluginPrivilege{ - Name: "device", - Description: "", - Value: []string{*device.Path}, - }) - } - } - if len(m.Capabilities) > 0 { - privileges = append(privileges, types.PluginPrivilege{ - Name: "capabilities", - Description: "", - Value: m.Capabilities, - }) - } - return privileges -} diff --git a/plugin/manager_linux.go b/plugin/manager_linux.go deleted file mode 100644 index d18874d603..0000000000 --- a/plugin/manager_linux.go +++ /dev/null @@ -1,188 +0,0 @@ -// +build linux,experimental - -package plugin - -import ( - "fmt" - "os" - "path/filepath" - "syscall" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/docker/libcontainerd" - "github.com/docker/docker/oci" - "github.com/docker/docker/pkg/plugins" - "github.com/docker/docker/pkg/system" - "github.com/docker/docker/restartmanager" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/opencontainers/specs/specs-go" -) - -func (pm *Manager) enable(p *plugin, force bool) error { - if p.PluginObj.Active && !force { - return fmt.Errorf("plugin %s is already enabled", p.Name()) - } - spec, err := pm.initSpec(p) - if err != nil { - return err - } - - p.restartManager = restartmanager.New(container.RestartPolicy{Name: "always"}, 0) - if err := pm.containerdClient.Create(p.PluginObj.ID, libcontainerd.Spec(*spec), libcontainerd.WithRestartManager(p.restartManager)); err != nil { // POC-only - if err := p.restartManager.Cancel(); err != nil { - logrus.Errorf("enable: restartManager.Cancel failed due to %v", err) - } - return err - } - - socket := p.PluginObj.Manifest.Interface.Socket - p.client, err = plugins.NewClient("unix://"+filepath.Join(p.runtimeSourcePath, socket), nil) - if err != nil { - if err := p.restartManager.Cancel(); err != nil { - logrus.Errorf("enable: restartManager.Cancel failed due to %v", err) - } - return err - } - - pm.Lock() // fixme: lock single record - p.PluginObj.Active = true - pm.save() - pm.Unlock() - - for _, typ := range p.PluginObj.Manifest.Interface.Types { - if handler := pm.handlers[typ.String()]; handler != nil { - handler(p.Name(), p.Client()) - } - } - - return nil -} - -func (pm *Manager) restore(p *plugin) error { - p.restartManager = restartmanager.New(container.RestartPolicy{Name: "always"}, 0) - return pm.containerdClient.Restore(p.PluginObj.ID, libcontainerd.WithRestartManager(p.restartManager)) -} - -func (pm *Manager) initSpec(p *plugin) (*specs.Spec, error) { - s := oci.DefaultSpec() - - rootfs := filepath.Join(pm.libRoot, p.PluginObj.ID, "rootfs") - s.Root = specs.Root{ - Path: rootfs, - Readonly: false, // TODO: all plugins should be readonly? settable in manifest? - } - - mounts := append(p.PluginObj.Config.Mounts, types.PluginMount{ - Source: &p.runtimeSourcePath, - Destination: defaultPluginRuntimeDestination, - Type: "bind", - Options: []string{"rbind", "rshared"}, - }) - for _, mount := range mounts { - m := specs.Mount{ - Destination: mount.Destination, - Type: mount.Type, - Options: mount.Options, - } - // TODO: if nil, then it's required and user didn't set it - if mount.Source != nil { - m.Source = *mount.Source - } - - if m.Source != "" && m.Type == "bind" { - /* Debugging issue #25511: Volumes and other content created under the - bind mount should be recursively propagated. rshared, not shared. - This could be the reason for EBUSY during removal. Override options - with rbind, rshared and see if CI errors are fixed. */ - m.Options = []string{"rbind", "rshared"} - fi, err := os.Lstat(filepath.Join(rootfs, string(os.PathSeparator), m.Destination)) // TODO: followsymlinks - if err != nil { - return nil, err - } - if fi.IsDir() { - if err := os.MkdirAll(m.Source, 0700); err != nil { - return nil, err - } - } - } - s.Mounts = append(s.Mounts, m) - } - - envs := make([]string, 1, len(p.PluginObj.Config.Env)+1) - envs[0] = "PATH=" + system.DefaultPathEnv - envs = append(envs, p.PluginObj.Config.Env...) - - args := append(p.PluginObj.Manifest.Entrypoint, p.PluginObj.Config.Args...) - cwd := p.PluginObj.Manifest.Workdir - if len(cwd) == 0 { - cwd = "/" - } - s.Process = specs.Process{ - Terminal: false, - Args: args, - Cwd: cwd, - Env: envs, - } - - return &s, nil -} - -func (pm *Manager) disable(p *plugin) error { - if !p.PluginObj.Active { - return fmt.Errorf("plugin %s is already disabled", p.Name()) - } - if err := p.restartManager.Cancel(); err != nil { - logrus.Error(err) - } - if err := pm.containerdClient.Signal(p.PluginObj.ID, int(syscall.SIGKILL)); err != nil { - logrus.Error(err) - } - os.RemoveAll(p.runtimeSourcePath) - pm.Lock() // fixme: lock single record - defer pm.Unlock() - p.PluginObj.Active = false - pm.save() - return nil -} - -// Shutdown stops all plugins and called during daemon shutdown. -func (pm *Manager) Shutdown() { - pm.RLock() - defer pm.RUnlock() - - pm.shutdown = true - for _, p := range pm.plugins { - if pm.liveRestore && p.PluginObj.Active { - logrus.Debug("Plugin active when liveRestore is set, skipping shutdown") - continue - } - if p.restartManager != nil { - if err := p.restartManager.Cancel(); err != nil { - logrus.Error(err) - } - } - if pm.containerdClient != nil && p.PluginObj.Active { - p.exitChan = make(chan bool) - err := pm.containerdClient.Signal(p.PluginObj.ID, int(syscall.SIGTERM)) - if err != nil { - logrus.Errorf("Sending SIGTERM to plugin failed with error: %v", err) - } else { - select { - case <-p.exitChan: - logrus.Debug("Clean shutdown of plugin") - case <-time.After(time.Second * 10): - logrus.Debug("Force shutdown plugin") - if err := pm.containerdClient.Signal(p.PluginObj.ID, int(syscall.SIGKILL)); err != nil { - logrus.Errorf("Sending SIGKILL to plugin failed with error: %v", err) - } - } - } - close(p.exitChan) - } - if err := os.RemoveAll(p.runtimeSourcePath); err != nil { - logrus.Errorf("Remove plugin runtime failed with error: %v", err) - } - } -} diff --git a/plugin/manager_windows.go b/plugin/manager_windows.go deleted file mode 100644 index d423254e74..0000000000 --- a/plugin/manager_windows.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build windows,experimental - -package plugin - -import ( - "fmt" - - "github.com/opencontainers/specs/specs-go" -) - -func (pm *Manager) enable(p *plugin, force bool) error { - return fmt.Errorf("Not implemented") -} - -func (pm *Manager) initSpec(p *plugin) (*specs.Spec, error) { - return nil, fmt.Errorf("Not implemented") -} - -func (pm *Manager) disable(p *plugin) error { - return fmt.Errorf("Not implemented") -} - -func (pm *Manager) restore(p *plugin) error { - return fmt.Errorf("Not implemented") -} - -// Shutdown plugins -func (pm *Manager) Shutdown() { -} diff --git a/profiles/apparmor/apparmor.go b/profiles/apparmor/apparmor.go deleted file mode 100644 index 51dfa5cf9c..0000000000 --- a/profiles/apparmor/apparmor.go +++ /dev/null @@ -1,115 +0,0 @@ -// +build linux - -package apparmor - -import ( - "bufio" - "io" - "os" - "path" - "strings" - - "github.com/docker/docker/pkg/aaparser" - "github.com/docker/docker/utils/templates" -) - -var ( - // profileDirectory is the file store for apparmor profiles and macros. - profileDirectory = "/etc/apparmor.d" - // defaultProfilePath is the default path for the apparmor profile to be saved. - defaultProfilePath = path.Join(profileDirectory, "docker") -) - -// profileData holds information about the given profile for generation. -type profileData struct { - // Name is profile name. - Name string - // Imports defines the apparmor functions to import, before defining the profile. - Imports []string - // InnerImports defines the apparmor functions to import in the profile. - InnerImports []string - // Version is the {major, minor, patch} version of apparmor_parser as a single number. - Version int -} - -// generateDefault creates an apparmor profile from ProfileData. -func (p *profileData) generateDefault(out io.Writer) error { - compiled, err := templates.NewParse("apparmor_profile", baseTemplate) - if err != nil { - return err - } - - if macroExists("tunables/global") { - p.Imports = append(p.Imports, "#include ") - } else { - p.Imports = append(p.Imports, "@{PROC}=/proc/") - } - - if macroExists("abstractions/base") { - p.InnerImports = append(p.InnerImports, "#include ") - } - - ver, err := aaparser.GetVersion() - if err != nil { - return err - } - p.Version = ver - - if err := compiled.Execute(out, p); err != nil { - return err - } - return nil -} - -// macrosExists checks if the passed macro exists. -func macroExists(m string) bool { - _, err := os.Stat(path.Join(profileDirectory, m)) - return err == nil -} - -// InstallDefault generates a default profile and installs it in the -// ProfileDirectory with `apparmor_parser`. -func InstallDefault(name string) error { - // Make sure the path where they want to save the profile exists - if err := os.MkdirAll(profileDirectory, 0755); err != nil { - return err - } - - p := profileData{ - Name: name, - } - - f, err := os.OpenFile(defaultProfilePath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644) - if err != nil { - return err - } - if err := p.generateDefault(f); err != nil { - f.Close() - return err - } - f.Close() - - if err := aaparser.LoadProfile(defaultProfilePath); err != nil { - return err - } - - return nil -} - -// IsLoaded checks if a passed profile has been loaded into the kernel. -func IsLoaded(name string) error { - file, err := os.Open("/sys/kernel/security/apparmor/profiles") - if err != nil { - return err - } - r := bufio.NewReader(file) - for { - p, err := r.ReadString('\n') - if err != nil { - return err - } - if strings.HasPrefix(p, name+" ") { - return nil - } - } -} diff --git a/profiles/apparmor/template.go b/profiles/apparmor/template.go deleted file mode 100644 index ada33bf0f1..0000000000 --- a/profiles/apparmor/template.go +++ /dev/null @@ -1,46 +0,0 @@ -// +build linux - -package apparmor - -// baseTemplate defines the default apparmor profile for containers. -const baseTemplate = ` -{{range $value := .Imports}} -{{$value}} -{{end}} - -profile {{.Name}} flags=(attach_disconnected,mediate_deleted) { -{{range $value := .InnerImports}} - {{$value}} -{{end}} - - network, - capability, - file, - umount, - - deny @{PROC}/* w, # deny write for all files directly in /proc (not in a subdir) - # deny write to files not in /proc//** or /proc/sys/** - deny @{PROC}/{[^1-9],[^1-9][^0-9],[^1-9s][^0-9y][^0-9s],[^1-9][^0-9][^0-9][^0-9]*}/** w, - deny @{PROC}/sys/[^k]** w, # deny /proc/sys except /proc/sys/k* (effectively /proc/sys/kernel) - deny @{PROC}/sys/kernel/{?,??,[^s][^h][^m]**} w, # deny everything except shm* in /proc/sys/kernel/ - deny @{PROC}/sysrq-trigger rwklx, - deny @{PROC}/mem rwklx, - deny @{PROC}/kmem rwklx, - deny @{PROC}/kcore rwklx, - - deny mount, - - deny /sys/[^f]*/** wklx, - deny /sys/f[^s]*/** wklx, - deny /sys/fs/[^c]*/** wklx, - deny /sys/fs/c[^g]*/** wklx, - deny /sys/fs/cg[^r]*/** wklx, - deny /sys/firmware/efi/efivars/** rwklx, - deny /sys/kernel/security/** rwklx, - -{{if ge .Version 208095}} - # suppress ptrace denials when using 'docker ps' or using 'ps' inside a container - ptrace (trace,read) peer=docker-default, -{{end}} -} -` diff --git a/profiles/seccomp/default.json b/profiles/seccomp/default.json deleted file mode 100755 index 40af6ad3b5..0000000000 --- a/profiles/seccomp/default.json +++ /dev/null @@ -1,1593 +0,0 @@ -{ - "defaultAction": "SCMP_ACT_ERRNO", - "architectures": [ - "SCMP_ARCH_X86_64", - "SCMP_ARCH_X86", - "SCMP_ARCH_X32" - ], - "syscalls": [ - { - "name": "accept", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "accept4", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "access", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "alarm", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "bind", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "brk", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "capget", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "capset", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "chdir", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "chmod", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "chown", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "chown32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "clock_getres", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "clock_gettime", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "clock_nanosleep", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "close", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "connect", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "copy_file_range", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "creat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "dup", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "dup2", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "dup3", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "epoll_create", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "epoll_create1", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "epoll_ctl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "epoll_ctl_old", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "epoll_pwait", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "epoll_wait", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "epoll_wait_old", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "eventfd", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "eventfd2", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "execve", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "execveat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "exit", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "exit_group", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "faccessat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fadvise64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fadvise64_64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fallocate", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fanotify_mark", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fchdir", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fchmod", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fchmodat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fchown", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fchown32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fchownat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fcntl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fcntl64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fdatasync", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fgetxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "flistxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "flock", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fork", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fremovexattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fsetxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fstat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fstat64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fstatat64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fstatfs", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fstatfs64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "fsync", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "ftruncate", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "ftruncate64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "futex", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "futimesat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getcpu", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getcwd", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getdents", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getdents64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getegid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getegid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "geteuid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "geteuid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getgid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getgid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getgroups", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getgroups32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getitimer", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getpeername", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getpgid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getpgrp", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getpid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getppid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getpriority", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getrandom", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getresgid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getresgid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getresuid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getresuid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getrlimit", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "get_robust_list", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getrusage", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getsid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getsockname", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getsockopt", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "get_thread_area", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "gettid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "gettimeofday", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getuid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getuid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "getxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "inotify_add_watch", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "inotify_init", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "inotify_init1", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "inotify_rm_watch", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "io_cancel", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "ioctl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "io_destroy", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "io_getevents", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "ioprio_get", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "ioprio_set", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "io_setup", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "io_submit", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "ipc", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "kill", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lchown", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lchown32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lgetxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "link", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "linkat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "listen", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "listxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "llistxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "_llseek", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lremovexattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lseek", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lsetxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lstat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "lstat64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "madvise", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "memfd_create", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mincore", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mkdir", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mkdirat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mknod", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mknodat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mlock", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mlock2", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mlockall", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mmap", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mmap2", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mprotect", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mq_getsetattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mq_notify", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mq_open", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mq_timedreceive", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mq_timedsend", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mq_unlink", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "mremap", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "msgctl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "msgget", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "msgrcv", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "msgsnd", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "msync", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "munlock", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "munlockall", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "munmap", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "nanosleep", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "newfstatat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "_newselect", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "open", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "openat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "pause", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "personality", - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 0, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ] - }, - { - "name": "personality", - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 8, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ] - }, - { - "name": "personality", - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 4294967295, - "valueTwo": 0, - "op": "SCMP_CMP_EQ" - } - ] - }, - { - "name": "pipe", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "pipe2", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "poll", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "ppoll", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "prctl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "pread64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "preadv", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "prlimit64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "pselect6", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "pwrite64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "pwritev", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "read", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "readahead", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "readlink", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "readlinkat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "readv", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "recv", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "recvfrom", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "recvmmsg", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "recvmsg", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "remap_file_pages", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "removexattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rename", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "renameat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "renameat2", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "restart_syscall", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rmdir", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rt_sigaction", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rt_sigpending", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rt_sigprocmask", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rt_sigqueueinfo", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rt_sigreturn", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rt_sigsuspend", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rt_sigtimedwait", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "rt_tgsigqueueinfo", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_getaffinity", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_getattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_getparam", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_get_priority_max", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_get_priority_min", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_getscheduler", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_rr_get_interval", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_setaffinity", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_setattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_setparam", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_setscheduler", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sched_yield", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "seccomp", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "select", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "semctl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "semget", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "semop", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "semtimedop", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "send", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sendfile", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sendfile64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sendmmsg", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sendmsg", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sendto", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setfsgid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setfsgid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setfsuid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setfsuid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setgid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setgid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setgroups", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setgroups32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setitimer", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setpgid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setpriority", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setregid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setregid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setresgid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setresgid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setresuid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setresuid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setreuid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setreuid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setrlimit", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "set_robust_list", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setsid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setsockopt", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "set_thread_area", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "set_tid_address", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setuid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setuid32", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "setxattr", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "shmat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "shmctl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "shmdt", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "shmget", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "shutdown", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sigaltstack", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "signalfd", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "signalfd4", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sigreturn", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "socket", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "socketcall", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "socketpair", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "splice", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "stat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "stat64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "statfs", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "statfs64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "symlink", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "symlinkat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sync", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sync_file_range", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "syncfs", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "sysinfo", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "syslog", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "tee", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "tgkill", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "time", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "timer_create", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "timer_delete", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "timerfd_create", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "timerfd_gettime", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "timerfd_settime", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "timer_getoverrun", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "timer_gettime", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "timer_settime", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "times", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "tkill", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "truncate", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "truncate64", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "ugetrlimit", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "umask", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "uname", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "unlink", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "unlinkat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "utime", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "utimensat", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "utimes", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "vfork", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "vmsplice", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "wait4", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "waitid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "waitpid", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "write", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "writev", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "arch_prctl", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "modify_ldt", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "chroot", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "clone", - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 2080505856, - "valueTwo": 0, - "op": "SCMP_CMP_MASKED_EQ" - } - ] - } - ] -} \ No newline at end of file diff --git a/profiles/seccomp/fixtures/example.json b/profiles/seccomp/fixtures/example.json deleted file mode 100755 index 674ca50fd9..0000000000 --- a/profiles/seccomp/fixtures/example.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "defaultAction": "SCMP_ACT_ERRNO", - "syscalls": [ - { - "name": "clone", - "action": "SCMP_ACT_ALLOW", - "args": [ - { - "index": 0, - "value": 2080505856, - "valueTwo": 0, - "op": "SCMP_CMP_MASKED_EQ" - } - ] - }, - { - "name": "open", - "action": "SCMP_ACT_ALLOW", - "args": [] - }, - { - "name": "close", - "action": "SCMP_ACT_ALLOW", - "args": [] - } - ] -} diff --git a/profiles/seccomp/generate.go b/profiles/seccomp/generate.go deleted file mode 100644 index 059370bffe..0000000000 --- a/profiles/seccomp/generate.go +++ /dev/null @@ -1,35 +0,0 @@ -// +build ignore - -package main - -import ( - "encoding/json" - "io/ioutil" - "os" - "path/filepath" - - "github.com/docker/docker/oci" - "github.com/docker/docker/profiles/seccomp" -) - -// saves the default seccomp profile as a json file so people can use it as a -// base for their own custom profiles -func main() { - wd, err := os.Getwd() - if err != nil { - panic(err) - } - f := filepath.Join(wd, "default.json") - - rs := oci.DefaultSpec() - - // write the default profile to the file - b, err := json.MarshalIndent(seccomp.DefaultProfile(&rs), "", "\t") - if err != nil { - panic(err) - } - - if err := ioutil.WriteFile(f, b, 0644); err != nil { - panic(err) - } -} diff --git a/profiles/seccomp/seccomp.go b/profiles/seccomp/seccomp.go deleted file mode 100644 index 7a58e2c521..0000000000 --- a/profiles/seccomp/seccomp.go +++ /dev/null @@ -1,74 +0,0 @@ -// +build linux - -package seccomp - -import ( - "encoding/json" - "fmt" - - "github.com/docker/engine-api/types" - "github.com/opencontainers/specs/specs-go" -) - -//go:generate go run -tags 'seccomp' generate.go - -// GetDefaultProfile returns the default seccomp profile. -func GetDefaultProfile(rs *specs.Spec) (*specs.Seccomp, error) { - return setupSeccomp(DefaultProfile(rs)) -} - -// LoadProfile takes a file path and decodes the seccomp profile. -func LoadProfile(body string) (*specs.Seccomp, error) { - var config types.Seccomp - if err := json.Unmarshal([]byte(body), &config); err != nil { - return nil, fmt.Errorf("Decoding seccomp profile failed: %v", err) - } - - return setupSeccomp(&config) -} - -func setupSeccomp(config *types.Seccomp) (newConfig *specs.Seccomp, err error) { - if config == nil { - return nil, nil - } - - // No default action specified, no syscalls listed, assume seccomp disabled - if config.DefaultAction == "" && len(config.Syscalls) == 0 { - return nil, nil - } - - newConfig = &specs.Seccomp{} - - // if config.Architectures == 0 then libseccomp will figure out the architecture to use - if len(config.Architectures) > 0 { - for _, arch := range config.Architectures { - newConfig.Architectures = append(newConfig.Architectures, specs.Arch(arch)) - } - } - - newConfig.DefaultAction = specs.Action(config.DefaultAction) - - // Loop through all syscall blocks and convert them to libcontainer format - for _, call := range config.Syscalls { - newCall := specs.Syscall{ - Name: call.Name, - Action: specs.Action(call.Action), - } - - // Loop through all the arguments of the syscall and convert them - for _, arg := range call.Args { - newArg := specs.Arg{ - Index: arg.Index, - Value: arg.Value, - ValueTwo: arg.ValueTwo, - Op: specs.Operator(arg.Op), - } - - newCall.Args = append(newCall.Args, newArg) - } - - newConfig.Syscalls = append(newConfig.Syscalls, newCall) - } - - return newConfig, nil -} diff --git a/profiles/seccomp/seccomp_default.go b/profiles/seccomp/seccomp_default.go deleted file mode 100644 index d23c7732e3..0000000000 --- a/profiles/seccomp/seccomp_default.go +++ /dev/null @@ -1,1879 +0,0 @@ -// +build linux,seccomp - -package seccomp - -import ( - "syscall" - - "github.com/docker/engine-api/types" - "github.com/opencontainers/specs/specs-go" - libseccomp "github.com/seccomp/libseccomp-golang" -) - -func arches() []types.Arch { - var native, err = libseccomp.GetNativeArch() - if err != nil { - return []types.Arch{} - } - var a = native.String() - switch a { - case "amd64": - return []types.Arch{types.ArchX86_64, types.ArchX86, types.ArchX32} - case "arm64": - return []types.Arch{types.ArchARM, types.ArchAARCH64} - case "mips64": - return []types.Arch{types.ArchMIPS, types.ArchMIPS64, types.ArchMIPS64N32} - case "mips64n32": - return []types.Arch{types.ArchMIPS, types.ArchMIPS64, types.ArchMIPS64N32} - case "mipsel64": - return []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64, types.ArchMIPSEL64N32} - case "mipsel64n32": - return []types.Arch{types.ArchMIPSEL, types.ArchMIPSEL64, types.ArchMIPSEL64N32} - case "s390x": - return []types.Arch{types.ArchS390, types.ArchS390X} - default: - return []types.Arch{} - } -} - -// DefaultProfile defines the whitelist for the default seccomp profile. -func DefaultProfile(rs *specs.Spec) *types.Seccomp { - - syscalls := []*types.Syscall{ - { - Name: "accept", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "accept4", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "access", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "alarm", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "bind", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "brk", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "capget", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "capset", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "chdir", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "chmod", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "chown", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "chown32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - - { - Name: "clock_getres", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "clock_gettime", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "clock_nanosleep", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "close", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "connect", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "copy_file_range", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "creat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "dup", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "dup2", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "dup3", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "epoll_create", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "epoll_create1", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "epoll_ctl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "epoll_ctl_old", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "epoll_pwait", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "epoll_wait", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "epoll_wait_old", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "eventfd", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "eventfd2", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "execve", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "execveat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "exit", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "exit_group", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "faccessat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fadvise64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fadvise64_64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fallocate", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fanotify_mark", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fchdir", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fchmod", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fchmodat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fchown", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fchown32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fchownat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fcntl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fcntl64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fdatasync", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fgetxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "flistxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "flock", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fork", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fremovexattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fsetxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fstat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fstat64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fstatat64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fstatfs", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fstatfs64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fsync", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ftruncate", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ftruncate64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "futex", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "futimesat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getcpu", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getcwd", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getdents", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getdents64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getegid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getegid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "geteuid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "geteuid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getgid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getgid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getgroups", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getgroups32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getitimer", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getpeername", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getpgid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getpgrp", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getpid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getppid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getpriority", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getrandom", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getresgid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getresgid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getresuid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getresuid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getrlimit", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "get_robust_list", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getrusage", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getsid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getsockname", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getsockopt", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "get_thread_area", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "gettid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "gettimeofday", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getuid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getuid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "getxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "inotify_add_watch", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "inotify_init", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "inotify_init1", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "inotify_rm_watch", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "io_cancel", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ioctl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "io_destroy", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "io_getevents", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ioprio_get", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ioprio_set", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "io_setup", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "io_submit", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ipc", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "kill", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lchown", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lchown32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lgetxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "link", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "linkat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "listen", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "listxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "llistxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "_llseek", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lremovexattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lseek", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lsetxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lstat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lstat64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "madvise", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "memfd_create", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mincore", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mkdir", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mkdirat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mknod", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mknodat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mlock", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mlock2", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mlockall", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mmap", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mmap2", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mprotect", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mq_getsetattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mq_notify", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mq_open", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mq_timedreceive", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mq_timedsend", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mq_unlink", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mremap", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "msgctl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "msgget", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "msgrcv", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "msgsnd", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "msync", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "munlock", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "munlockall", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "munmap", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "nanosleep", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "newfstatat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "_newselect", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "open", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "openat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "pause", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "personality", - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 0x0, - Op: types.OpEqualTo, - }, - }, - }, - { - Name: "personality", - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 0x0008, - Op: types.OpEqualTo, - }, - }, - }, - { - Name: "personality", - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: 0, - Value: 0xffffffff, - Op: types.OpEqualTo, - }, - }, - }, - { - Name: "pipe", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "pipe2", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "poll", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ppoll", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "prctl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "pread64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "preadv", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "prlimit64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "pselect6", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "pwrite64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "pwritev", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "read", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "readahead", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "readlink", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "readlinkat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "readv", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "recv", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "recvfrom", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "recvmmsg", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "recvmsg", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "remap_file_pages", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "removexattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rename", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "renameat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "renameat2", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "restart_syscall", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rmdir", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rt_sigaction", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rt_sigpending", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rt_sigprocmask", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rt_sigqueueinfo", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rt_sigreturn", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rt_sigsuspend", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rt_sigtimedwait", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "rt_tgsigqueueinfo", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_getaffinity", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_getattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_getparam", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_get_priority_max", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_get_priority_min", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_getscheduler", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_rr_get_interval", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_setaffinity", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_setattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_setparam", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_setscheduler", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sched_yield", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "seccomp", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "select", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "semctl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "semget", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "semop", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "semtimedop", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "send", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sendfile", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sendfile64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sendmmsg", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sendmsg", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sendto", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setfsgid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setfsgid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setfsuid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setfsuid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setgid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setgid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setgroups", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setgroups32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setitimer", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setpgid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setpriority", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setregid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setregid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setresgid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setresgid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setresuid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setresuid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setreuid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setreuid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setrlimit", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "set_robust_list", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setsid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setsockopt", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "set_thread_area", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "set_tid_address", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setuid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setuid32", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setxattr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "shmat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "shmctl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "shmdt", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "shmget", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "shutdown", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sigaltstack", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "signalfd", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "signalfd4", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sigreturn", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "socket", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "socketcall", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "socketpair", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "splice", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "stat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "stat64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "statfs", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "statfs64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "symlink", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "symlinkat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sync", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sync_file_range", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "syncfs", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sysinfo", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "syslog", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "tee", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "tgkill", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "time", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "timer_create", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "timer_delete", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "timerfd_create", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "timerfd_gettime", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "timerfd_settime", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "timer_getoverrun", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "timer_gettime", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "timer_settime", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "times", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "tkill", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "truncate", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "truncate64", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ugetrlimit", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "umask", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "uname", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "unlink", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "unlinkat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "utime", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "utimensat", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "utimes", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "vfork", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "vmsplice", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "wait4", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "waitid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "waitpid", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "write", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "writev", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - } - - var sysCloneFlagsIndex uint - var arch string - var native, err = libseccomp.GetNativeArch() - if err == nil { - arch = native.String() - } - switch arch { - case "arm", "arm64": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "breakpoint", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "cacheflush", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "set_tls", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "amd64", "x32": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "arch_prctl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - fallthrough - case "x86": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "modify_ldt", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "s390", "s390x": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "s390_pci_mmio_read", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "s390_pci_mmio_write", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "s390_runtime_instr", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - /* Flags parameter of the clone syscall is the 2nd on s390 */ - sysCloneFlagsIndex = 1 - } - - capSysAdmin := false - - var cap string - for _, cap = range rs.Process.Capabilities { - switch cap { - case "CAP_DAC_READ_SEARCH": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "open_by_handle_at", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_ADMIN": - capSysAdmin = true - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "bpf", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "clone", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "fanotify_init", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "lookup_dcookie", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "mount", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "name_to_handle_at", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "perf_event_open", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setdomainname", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "sethostname", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "setns", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "umount", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "umount2", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "unshare", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_BOOT": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "reboot", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_CHROOT": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "chroot", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_MODULE": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "delete_module", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "init_module", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "finit_module", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "query_module", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_PACCT": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "acct", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_PTRACE": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "kcmp", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "process_vm_readv", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "process_vm_writev", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ptrace", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_RAWIO": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "iopl", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "ioperm", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_TIME": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "settimeofday", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "stime", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - { - Name: "adjtimex", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - case "CAP_SYS_TTY_CONFIG": - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "vhangup", - Action: types.ActAllow, - Args: []*types.Arg{}, - }, - }...) - } - } - - if !capSysAdmin { - syscalls = append(syscalls, []*types.Syscall{ - { - Name: "clone", - Action: types.ActAllow, - Args: []*types.Arg{ - { - Index: sysCloneFlagsIndex, - Value: syscall.CLONE_NEWNS | syscall.CLONE_NEWUTS | syscall.CLONE_NEWIPC | syscall.CLONE_NEWUSER | syscall.CLONE_NEWPID | syscall.CLONE_NEWNET, - ValueTwo: 0, - Op: types.OpMaskedEqual, - }, - }, - }, - }...) - } - - return &types.Seccomp{ - DefaultAction: types.ActErrno, - Architectures: arches(), - Syscalls: syscalls, - } -} diff --git a/profiles/seccomp/seccomp_test.go b/profiles/seccomp/seccomp_test.go deleted file mode 100644 index 2c9929e925..0000000000 --- a/profiles/seccomp/seccomp_test.go +++ /dev/null @@ -1,28 +0,0 @@ -// +build linux - -package seccomp - -import ( - "io/ioutil" - "testing" -) - -func TestLoadProfile(t *testing.T) { - f, err := ioutil.ReadFile("fixtures/example.json") - if err != nil { - t.Fatal(err) - } - if _, err := LoadProfile(string(f)); err != nil { - t.Fatal(err) - } -} - -func TestLoadDefaultProfile(t *testing.T) { - f, err := ioutil.ReadFile("default.json") - if err != nil { - t.Fatal(err) - } - if _, err := LoadProfile(string(f)); err != nil { - t.Fatal(err) - } -} diff --git a/profiles/seccomp/seccomp_unsupported.go b/profiles/seccomp/seccomp_unsupported.go deleted file mode 100644 index ec7399cd01..0000000000 --- a/profiles/seccomp/seccomp_unsupported.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build linux,!seccomp - -package seccomp - -import ( - "github.com/docker/engine-api/types" - "github.com/opencontainers/specs/specs-go" -) - -// DefaultProfile returns a nil pointer on unsupported systems. -func DefaultProfile(rs *specs.Spec) *types.Seccomp { - return nil -} diff --git a/project/ARM.md b/project/ARM.md deleted file mode 100644 index c4d21bf27a..0000000000 --- a/project/ARM.md +++ /dev/null @@ -1,45 +0,0 @@ -# ARM support - -The ARM support should be considered experimental. It will be extended step by step in the coming weeks. - -Building a Docker Development Image works in the same fashion as for Intel platform (x86-64). -Currently we have initial support for 32bit ARMv7 devices. - -To work with the Docker Development Image you have to clone the Docker/Docker repo on a supported device. -It needs to have a Docker Engine installed to build the Docker Development Image. - -From the root of the Docker/Docker repo one can use make to execute the following make targets: -- make validate -- make binary -- make build -- make deb -- make bundles -- make default -- make shell -- make test-unit -- make test-integration-cli -- make - -The Makefile does include logic to determine on which OS and architecture the Docker Development Image is built. -Based on OS and architecture it chooses the correct Dockerfile. -For the ARM 32bit architecture it uses `Dockerfile.armhf`. - -So for example in order to build a Docker binary one has to -1. clone the Docker/Docker repository on an ARM device `git clone git@github.com:docker/docker.git` -2. change into the checked out repository with `cd docker` -3. execute `make binary` to create a Docker Engine binary for ARM - -## Kernel modules -A few libnetwork integration tests require that the kernel be -configured with "dummy" network interface and has the module -loaded. However, the dummy module may be not loaded automatically. - -To load the kernel module permanently, run these commands as `root`. - - modprobe dummy - echo "dummy" >> /etc/modules - -On some systems you also have to sync your kernel modules. - - oc-sync-kernel-modules - depmod diff --git a/project/BRANCHES-AND-TAGS.md b/project/BRANCHES-AND-TAGS.md deleted file mode 100644 index 1c6f232524..0000000000 --- a/project/BRANCHES-AND-TAGS.md +++ /dev/null @@ -1,35 +0,0 @@ -Branches and tags -================= - -Note: details of the release process for the Engine are documented in the -[RELEASE-CHECKLIST](https://github.com/docker/docker/blob/master/project/RELEASE-CHECKLIST.md). - -# Branches - -The docker/docker repository should normally have only three living branches at all time, including -the regular `master` branch: - -## `docs` branch - -The `docs` branch supports documentation updates between product releases. This branch allow us to -decouple documentation releases from product releases. - -## `release` branch - -The `release` branch contains the last _released_ version of the code for the project. - -The `release` branch is only updated at each public release of the project. The mechanism for this -is that the release is materialized by a pull request against the `release` branch which lives for -the duration of the code freeze period. When this pull request is merged, the `release` branch gets -updated, and its new state is tagged accordingly. - -# Tags - -Any public release of a compiled binary, with the logical exception of nightly builds, should have -a corresponding tag in the repository. - -The general format of a tag is `vX.Y.Z[-suffix[N]]`: - -- All of `X`, `Y`, `Z` must be specified (example: `v1.0.0`) -- First release candidate for version `1.8.0` should be tagged `v1.8.0-rc1` -- Second alpha release of a product should be tagged `v1.0.0-alpha1` diff --git a/project/CONTRIBUTORS.md b/project/CONTRIBUTORS.md deleted file mode 120000 index 44fcc63439..0000000000 --- a/project/CONTRIBUTORS.md +++ /dev/null @@ -1 +0,0 @@ -../CONTRIBUTING.md \ No newline at end of file diff --git a/project/GOVERNANCE.md b/project/GOVERNANCE.md deleted file mode 100644 index 6ae7baf743..0000000000 --- a/project/GOVERNANCE.md +++ /dev/null @@ -1,17 +0,0 @@ -# Docker Governance Advisory Board Meetings - -In the spirit of openness, Docker created a Governance Advisory Board, and committed to make all materials and notes from the meetings of this group public. -All output from the meetings should be considered proposals only, and are subject to the review and approval of the community and the project leadership. - -The materials from the first Docker Governance Advisory Board meeting, held on October 28, 2014, are available at -[Google Docs Folder](https://goo.gl/Alfj8r) - -These include: - -* First Meeting Notes -* DGAB Charter -* Presentation 1: Introductory Presentation, including State of The Project -* Presentation 2: Overall Contribution Structure/Docker Project Core Proposal -* Presentation 3: Long Term Roadmap/Statement of Direction - - diff --git a/project/IRC-ADMINISTRATION.md b/project/IRC-ADMINISTRATION.md deleted file mode 100644 index 824a14bd51..0000000000 --- a/project/IRC-ADMINISTRATION.md +++ /dev/null @@ -1,37 +0,0 @@ -# Freenode IRC Administration Guidelines and Tips - -This is not meant to be a general "Here's how to IRC" document, so if you're -looking for that, check Google instead. ♥ - -If you've been charged with helping maintain one of Docker's now many IRC -channels, this might turn out to be useful. If there's information that you -wish you'd known about how a particular channel is organized, you should add -deets here! :) - -## `ChanServ` - -Most channel maintenance happens by talking to Freenode's `ChanServ` bot. For -example, `/msg ChanServ ACCESS LIST` will show you a list of everyone -with "access" privileges for a particular channel. - -A similar command is used to give someone a particular access level. For -example, to add a new maintainer to the `#docker-maintainers` access list so -that they can contribute to the discussions (after they've been merged -appropriately in a `MAINTAINERS` file, of course), one would use `/msg ChanServ -ACCESS #docker-maintainers ADD maintainer`. - -To setup a new channel with a similar `maintainer` access template, use a -command like `/msg ChanServ TEMPLATE maintainer +AV` (`+A` for letting -them view the `ACCESS LIST`, `+V` for auto-voice; see `/msg ChanServ HELP FLAGS` -for more details). - -## Troubleshooting - -The most common cause of not-getting-auto-`+v` woes is people not being -`IDENTIFY`ed with `NickServ` (or their current nickname not being `GROUP`ed with -their main nickname) -- often manifested by `ChanServ` responding to an `ACCESS -ADD` request with something like `xyz is not registered.`. - -This is easily fixed by doing `/msg NickServ IDENTIFY OldNick SecretPassword` -followed by `/msg NickServ GROUP` to group the two nicknames together. See -`/msg NickServ HELP GROUP` for more information. diff --git a/project/ISSUE-TRIAGE.md b/project/ISSUE-TRIAGE.md deleted file mode 100644 index 80b2232b13..0000000000 --- a/project/ISSUE-TRIAGE.md +++ /dev/null @@ -1,91 +0,0 @@ -Triaging of issues ------------------- - -Triage provides an important way to contribute to an open source project. Triage helps ensure issues resolve quickly by: - -- Describing the issue's intent and purpose is conveyed precisely. This is necessary because it can be difficult for an issue to explain how an end user experiences a problem and what actions they took. -- Giving a contributor the information they need before they commit to resolving an issue. -- Lowering the issue count by preventing duplicate issues. -- Streamlining the development process by preventing duplicate discussions. - -If you don't have time to code, consider helping with triage. The community will thank you for saving them time by spending some of yours. - -### 1. Ensure the issue contains basic information - -Before triaging an issue very far, make sure that the issue's author provided the standard issue information. This will help you make an educated recommendation on how this to categorize the issue. Standard information that *must* be included in most issues are things such as: - -- the output of `docker version` -- the output of `docker info` -- the output of `uname -a` -- a reproducible case if this is a bug, Dockerfiles FTW -- host distribution and version ( ubuntu 14.04, RHEL, fedora 23 ) -- page URL if this is a docs issue or the name of a man page - -Depending on the issue, you might not feel all this information is needed. Use your best judgement. If you cannot triage an issue using what its author provided, explain kindly to the author that they must provide the above information to clarify the problem. - -If the author provides the standard information but you are still unable to triage the issue, request additional information. Do this kindly and politely because you are asking for more of the author's time. - -If the author does not respond requested information within the timespan of a week, close the issue with a kind note stating that the author can request for the issue to be -reopened when the necessary information is provided. - -### 2. Classify the Issue - -An issue can have multiple of the following labels. - -#### Issue kind - -| Kind | Description | -|------------------|---------------------------------------------------------------------------------------------------------------------------------| -| kind/bug | Bugs are bugs. The cause may or may not be known at triage time so debugging should be taken account into the time estimate. | -| kind/docs | Writing documentation, man pages, articles, blogs, or other significant word-driven task. | -| kind/enhancement | Enhancement are not bugs or new features but can drastically improve usability or performance of a project component. | -| kind/feature | Functionality or other elements that the project does not currently support. Features are new and shiny. | -| kind/question | Contains a user or contributor question requiring a response. | - -#### Functional area - -| Area | -|---------------------------| -| area/api | -| area/builder | -| area/cli | -| area/kernel | -| area/runtime | -| area/storage | -| area/storage/aufs | -| area/storage/btrfs | -| area/storage/devicemapper | -| area/storage/overlay | -| area/storage/zfs | - -#### Experience level - -Experience level is a way for a contributor to find an issue based on their -skill set. Experience types are applied to the issue or pull request using -labels. - -| Level | Experience level guideline | -|------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| exp/beginner | New to Docker, and possibly Golang, and is looking to help while learning the basics. | -| exp/intermediate | Comfortable with golang and understands the core concepts of Docker and looking to dive deeper into the project. | -| exp/expert | Proficient with Docker and Golang and has been following, and active in, the community to understand the rationale behind design decisions and where the project is headed. | - -As the table states, these labels are meant as guidelines. You might have -written a whole plugin for Docker in a personal project and never contributed to -Docker. With that kind of experience, you could take on an exp/expert level task. - -### 3. Prioritizing issue - -When attached to a specific milestone, an issue can be attributed one of the -following labels to indicate their degree of priority (from more urgent to less -urgent). - -| Priority | Description | -|-------------|-----------------------------------------------------------------------------------------------------------------------------------| -| priority/P0 | Urgent: Security, critical bugs, blocking issues. P0 basically means drop everything you are doing until this issue is addressed. | -| priority/P1 | Important: P1 issues are a top priority and a must-have for the next release. | -| priority/P2 | Normal priority: default priority applied. | -| priority/P3 | Best effort: those are nice to have / minor issues. | - -And that's it. That should be all the information required for a new or existing contributor to come in an resolve an issue. diff --git a/project/PACKAGE-REPO-MAINTENANCE.md b/project/PACKAGE-REPO-MAINTENANCE.md deleted file mode 100644 index 3763f8798b..0000000000 --- a/project/PACKAGE-REPO-MAINTENANCE.md +++ /dev/null @@ -1,74 +0,0 @@ -# Apt & Yum Repository Maintenance -## A maintainer's guide to managing Docker's package repos - -### How to clean up old experimental debs and rpms - -We release debs and rpms for experimental nightly, so these can build up. -To remove old experimental debs and rpms, and _ONLY_ keep the latest, follow the -steps below. - -1. Checkout docker master - -2. Run clean scripts - -```bash -docker build --rm --force-rm -t docker-dev:master . -docker run --rm -it --privileged \ - -v /path/to/your/repos/dir:/volumes/repos \ - -v $HOME/.gnupg:/root/.gnupg \ - -e GPG_PASSPHRASE \ - -e DOCKER_RELEASE_DIR=/volumes/repos \ - docker-dev:master hack/make.sh clean-apt-repo clean-yum-repo generate-index-listing sign-repos -``` - -3. Upload the changed repos to `s3` (if you host on s3) - -4. Purge the cache, PURGE the cache, PURGE THE CACHE! - -### How to get out of a sticky situation - -Sh\*t happens. We know. Below are steps to get out of any "hash-sum mismatch" or -"gpg sig error" or the likes error that might happen to the apt repo. - -**NOTE:** These are apt repo specific, have had no experimence with anything similar -happening to the yum repo in the past so you can rest easy. - -For each step listed below, move on to the next if the previous didn't work. -Otherwise CELEBRATE! - -1. Purge the cache. - -2. Did you remember to sign the debs after releasing? - -Re-sign the repo with your gpg key: - -```bash -docker build --rm --force-rm -t docker-dev:master . -docker run --rm -it --privileged \ - -v /path/to/your/repos/dir:/volumes/repos \ - -v $HOME/.gnupg:/root/.gnupg \ - -e GPG_PASSPHRASE \ - -e DOCKER_RELEASE_DIR=/volumes/repos \ - docker-dev:master hack/make.sh sign-repos -``` - -Upload the changed repo to `s3` (if that is where you host) - -PURGE THE CACHE. - -3. Run Jess' magical, save all, only in case of extreme emergencies, "you are -going to have to break this glass to get it" script. - -```bash -docker build --rm --force-rm -t docker-dev:master . -docker run --rm -it --privileged \ - -v /path/to/your/repos/dir:/volumes/repos \ - -v $HOME/.gnupg:/root/.gnupg \ - -e GPG_PASSPHRASE \ - -e DOCKER_RELEASE_DIR=/volumes/repos \ - docker-dev:master hack/make.sh update-apt-repo generate-index-listing sign-repos -``` - -4. Upload the changed repo to `s3` (if that is where you host) - -PURGE THE CACHE. diff --git a/project/PACKAGERS.md b/project/PACKAGERS.md deleted file mode 100644 index 2ca5b6fe7c..0000000000 --- a/project/PACKAGERS.md +++ /dev/null @@ -1,307 +0,0 @@ -# Dear Packager, - -If you are looking to make Docker available on your favorite software -distribution, this document is for you. It summarizes the requirements for -building and running the Docker client and the Docker daemon. - -## Getting Started - -We want to help you package Docker successfully. Before doing any packaging, a -good first step is to introduce yourself on the [docker-dev mailing -list](https://groups.google.com/d/forum/docker-dev), explain what you're trying -to achieve, and tell us how we can help. Don't worry, we don't bite! There might -even be someone already working on packaging for the same distro! - -You can also join the IRC channel - #docker and #docker-dev on Freenode are both -active and friendly. - -We like to refer to Tianon ("@tianon" on GitHub and "tianon" on IRC) as our -"Packagers Relations", since he's always working to make sure our packagers have -a good, healthy upstream to work with (both in our communication and in our -build scripts). If you're having any kind of trouble, feel free to ping him -directly. He also likes to keep track of what distributions we have packagers -for, so feel free to reach out to him even just to say "Hi!" - -## Package Name - -If possible, your package should be called "docker". If that name is already -taken, a second choice is "docker-engine". Another possible choice is "docker.io". - -## Official Build vs Distro Build - -The Docker project maintains its own build and release toolchain. It is pretty -neat and entirely based on Docker (surprise!). This toolchain is the canonical -way to build Docker. We encourage you to give it a try, and if the circumstances -allow you to use it, we recommend that you do. - -You might not be able to use the official build toolchain - usually because your -distribution has a toolchain and packaging policy of its own. We get it! Your -house, your rules. The rest of this document should give you the information you -need to package Docker your way, without denaturing it in the process. - -## Build Dependencies - -To build Docker, you will need the following: - -* A recent version of Git and Mercurial -* Go version 1.6 or later -* A clean checkout of the source added to a valid [Go - workspace](https://golang.org/doc/code.html#Workspaces) under the path - *src/github.com/docker/docker* (unless you plan to use `AUTO_GOPATH`, - explained in more detail below) - -To build the Docker daemon, you will additionally need: - -* An amd64/x86_64 machine running Linux -* SQLite version 3.7.9 or later -* libdevmapper version 1.02.68-cvs (2012-01-26) or later from lvm2 version - 2.02.89 or later -* btrfs-progs version 3.16.1 or later (unless using an older version is - absolutely necessary, in which case 3.8 is the minimum) -* libseccomp version 2.2.1 or later (for build tag seccomp) - -Be sure to also check out Docker's Dockerfile for the most up-to-date list of -these build-time dependencies. - -### Go Dependencies - -All Go dependencies are vendored under "./vendor". They are used by the official -build, so the source of truth for the current version of each dependency is -whatever is in "./vendor". - -To use the vendored dependencies, simply make sure the path to "./vendor" is -included in `GOPATH` (or use `AUTO_GOPATH`, as explained below). - -If you would rather (or must, due to distro policy) package these dependencies -yourself, take a look at "./hack/vendor.sh" for an easy-to-parse list of the -exact version for each. - -NOTE: if you're not able to package the exact version (to the exact commit) of a -given dependency, please get in touch so we can remediate! Who knows what -discrepancies can be caused by even the slightest deviation. We promise to do -our best to make everybody happy. - -## Stripping Binaries - -Please, please, please do not strip any compiled binaries. This is really -important. - -In our own testing, stripping the resulting binaries sometimes results in a -binary that appears to work, but more often causes random panics, segfaults, and -other issues. Even if the binary appears to work, please don't strip. - -See the following quotes from Dave Cheney, which explain this position better -from the upstream Golang perspective. - -### [go issue #5855, comment #3](https://code.google.com/p/go/issues/detail?id=5855#c3) - -> Super super important: Do not strip go binaries or archives. It isn't tested, -> often breaks, and doesn't work. - -### [launchpad golang issue #1200255, comment #8](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/8) - -> To quote myself: "Please do not strip Go binaries, it is not supported, not -> tested, is often broken, and doesn't do what you want" -> -> To unpack that a bit -> -> * not supported, as in, we don't support it, and recommend against it when -> asked -> * not tested, we don't test stripped binaries as part of the build CI process -> * is often broken, stripping a go binary will produce anywhere from no, to -> subtle, to outright execution failure, see above - -### [launchpad golang issue #1200255, comment #13](https://bugs.launchpad.net/ubuntu/+source/golang/+bug/1200255/comments/13) - -> To clarify my previous statements. -> -> * I do not disagree with the debian policy, it is there for a good reason -> * Having said that, it stripping Go binaries doesn't work, and nobody is -> looking at making it work, so there is that. -> -> Thanks for patching the build formula. - -## Building Docker - -Please use our build script ("./hack/make.sh") for all your compilation of -Docker. If there's something you need that it isn't doing, or something it could -be doing to make your life as a packager easier, please get in touch with Tianon -and help us rectify the situation. Chances are good that other packagers have -probably run into the same problems and a fix might already be in the works, but -none of us will know for sure unless you harass Tianon about it. :) - -All the commands listed within this section should be run with the Docker source -checkout as the current working directory. - -### `AUTO_GOPATH` - -If you'd rather not be bothered with the hassles that setting up `GOPATH` -appropriately can be, and prefer to just get a "build that works", you should -add something similar to this to whatever script or process you're using to -build Docker: - -```bash -export AUTO_GOPATH=1 -``` - -This will cause the build scripts to set up a reasonable `GOPATH` that -automatically and properly includes both docker/docker from the local -directory, and the local "./vendor" directory as necessary. - -### `DOCKER_BUILDTAGS` - -If you're building a binary that may need to be used on platforms that include -AppArmor, you will need to set `DOCKER_BUILDTAGS` as follows: -```bash -export DOCKER_BUILDTAGS='apparmor' -``` - -If you're building a binary that may need to be used on platforms that include -SELinux, you will need to use the `selinux` build tag: -```bash -export DOCKER_BUILDTAGS='selinux' -``` - -If you're building a binary that may need to be used on platforms that include -seccomp, you will need to use the `seccomp` build tag: -```bash -export DOCKER_BUILDTAGS='seccomp' -``` - -There are build tags for disabling graphdrivers as well. By default, support -for all graphdrivers are built in. - -To disable btrfs: -```bash -export DOCKER_BUILDTAGS='exclude_graphdriver_btrfs' -``` - -To disable devicemapper: -```bash -export DOCKER_BUILDTAGS='exclude_graphdriver_devicemapper' -``` - -To disable aufs: -```bash -export DOCKER_BUILDTAGS='exclude_graphdriver_aufs' -``` - -NOTE: if you need to set more than one build tag, space separate them: -```bash -export DOCKER_BUILDTAGS='apparmor selinux exclude_graphdriver_aufs' -``` - -### Static Daemon - -If it is feasible within the constraints of your distribution, you should -seriously consider packaging Docker as a single static binary. A good comparison -is Busybox, which is often packaged statically as a feature to enable mass -portability. Because of the unique way Docker operates, being similarly static -is a "feature". - -To build a static Docker daemon binary, run the following command (first -ensuring that all the necessary libraries are available in static form for -linking - see the "Build Dependencies" section above, and the relevant lines -within Docker's own Dockerfile that set up our official build environment): - -```bash -./hack/make.sh binary -``` - -This will create a static binary under -"./bundles/$VERSION/binary/docker-$VERSION", where "$VERSION" is the contents of -the file "./VERSION". This binary is usually installed somewhere like -"/usr/bin/docker". - -### Dynamic Daemon / Client-only Binary - -If you are only interested in a Docker client binary, you can build using: - -```bash -./hack/make.sh binary-client -``` - -If you need to (due to distro policy, distro library availability, or for other -reasons) create a dynamically compiled daemon binary, or if you are only -interested in creating a client binary for Docker, use something similar to the -following: - -```bash -./hack/make.sh dynbinary-client -``` - -This will create "./bundles/$VERSION/dynbinary-client/docker-$VERSION", which for -client-only builds is the important file to grab and install as appropriate. - -## System Dependencies - -### Runtime Dependencies - -To function properly, the Docker daemon needs the following software to be -installed and available at runtime: - -* iptables version 1.4 or later -* procps (or similar provider of a "ps" executable) -* e2fsprogs version 1.4.12 or later (in use: mkfs.ext4, tune2fs) -* xfsprogs (in use: mkfs.xfs) -* XZ Utils version 4.9 or later -* a [properly - mounted](https://github.com/tianon/cgroupfs-mount/blob/master/cgroupfs-mount) - cgroupfs hierarchy (having a single, all-encompassing "cgroup" mount point - [is](https://github.com/docker/docker/issues/2683) - [not](https://github.com/docker/docker/issues/3485) - [sufficient](https://github.com/docker/docker/issues/4568)) - -Additionally, the Docker client needs the following software to be installed and -available at runtime: - -* Git version 1.7 or later - -### Kernel Requirements - -The Docker daemon has very specific kernel requirements. Most pre-packaged -kernels already include the necessary options enabled. If you are building your -own kernel, you will either need to discover the options necessary via trial and -error, or check out the [Gentoo -ebuild](https://github.com/tianon/docker-overlay/blob/master/app-emulation/docker/docker-9999.ebuild), -in which a list is maintained (and if there are any issues or discrepancies in -that list, please contact Tianon so they can be rectified). - -Note that in client mode, there are no specific kernel requirements, and that -the client will even run on alternative platforms such as Mac OS X / Darwin. - -### Optional Dependencies - -Some of Docker's features are activated by using optional command-line flags or -by having support for them in the kernel or userspace. A few examples include: - -* AUFS graph driver (requires AUFS patches/support enabled in the kernel, and at - least the "auplink" utility from aufs-tools) -* BTRFS graph driver (requires BTRFS support enabled in the kernel) -* ZFS graph driver (requires userspace zfs-utils and a corresponding kernel module) -* Libseccomp to allow running seccomp profiles with containers - -## Daemon Init Script - -Docker expects to run as a daemon at machine startup. Your package will need to -include a script for your distro's process supervisor of choice. Be sure to -check out the "contrib/init" folder in case a suitable init script already -exists (and if one does not, contact Tianon about whether it might be -appropriate for your distro's init script to live there too!). - -In general, Docker should be run as root, similar to the following: - -```bash -docker daemon -``` - -Generally, a `DOCKER_OPTS` variable of some kind is available for adding more -flags (such as changing the graph driver to use BTRFS, switching the location of -"/var/lib/docker", etc). - -## Communicate - -As a final note, please do feel free to reach out to Tianon at any time for -pretty much anything. He really does love hearing from our packagers and wants -to make sure we're not being a "hostile upstream". As should be a given, we -appreciate the work our packagers do to make sure we have broad distribution! diff --git a/project/PATCH-RELEASES.md b/project/PATCH-RELEASES.md deleted file mode 100644 index 548db9ab4d..0000000000 --- a/project/PATCH-RELEASES.md +++ /dev/null @@ -1,68 +0,0 @@ -# Docker patch (bugfix) release process - -Patch releases (the 'Z' in vX.Y.Z) are intended to fix major issues in a -release. Docker open source projects follow these procedures when creating a -patch release; - -After each release (both "major" (vX.Y.0) and "patch" releases (vX.Y.Z)), a -patch release milestone (vX.Y.Z + 1) is created. - -The creation of a patch release milestone is no obligation to actually -*create* a patch release. The purpose of these milestones is to collect -issues and pull requests that can *justify* a patch release; - -- Any maintainer is allowed to add issues and PR's to the milestone, when - doing so, preferably leave a comment on the issue or PR explaining *why* - you think it should be considered for inclusion in a patch release. -- Issues introduced in version vX.Y.0 get added to milestone X.Y.Z+1 -- Only *regressions* should be added. Issues *discovered* in version vX.Y.0, - but already present in version vX.Y-1.Z should not be added, unless - critical. -- Patch releases can *only* contain bug-fixes. New features should - *never* be added to a patch release. - -The release captain of the "major" (X.Y.0) release, is also responsible for -patch releases. The release captain, together with another maintainer, will -review issues and PRs on the milestone, and assigns `priority/`labels. These -review sessions take place on a weekly basis, more frequent if needed: - -- A P0 priority is assigned to critical issues. A maintainer *must* be - assigned to these issues. Maintainers should strive to fix a P0 within a week. -- A P1 priority is assigned to major issues, but not critical. A maintainer - *must* be assigned to these issues. -- P2 and P3 priorities are assigned to other issues. A maintainer can be - assigned. -- Non-critical issues and PR's can be removed from the milestone. Minor - changes, such as typo-fixes or omissions in the documentation can be - considered for inclusion in a patch release. - -## Deciding if a patch release should be done - -- Only a P0 can justify to proceed with the patch release. -- P1, P2, and P3 issues/PR's should not influence the decision, and - should be moved to the X.Y.Z+1 milestone, or removed from the - milestone. - -> **Note**: If the next "major" release is imminent, the release captain -> can decide to cancel a patch release, and include the patches in the -> upcoming major release. - -> **Note**: Security releases are also "patch releases", but follow -> a different procedure. Security releases are developed in a private -> repository, released and tested under embargo before they become -> publicly available. - -## Deciding on the content of a patch release - -When the criteria for moving forward with a patch release are met, the release -manager will decide on the exact content of the release. - -- Fixes to all P0 issues *must* be included in the release. -- Fixes to *some* P1, P2, and P3 issues *may* be included as part of the patch - release depending on the severity of the issue and the risk associated with - the patch. - -Any code delivered as part of a patch release should make life easier for a -significant amount of users with zero chance of degrading anybody's experience. -A good rule of thumb for that is to limit cherry-picking to small patches, which -fix well-understood issues, and which come with verifiable tests. diff --git a/project/PRINCIPLES.md b/project/PRINCIPLES.md deleted file mode 100644 index 53f03018ec..0000000000 --- a/project/PRINCIPLES.md +++ /dev/null @@ -1,19 +0,0 @@ -# Docker principles - -In the design and development of Docker we try to follow these principles: - -(Work in progress) - -* Don't try to replace every tool. Instead, be an ingredient to improve them. -* Less code is better. -* Fewer components are better. Do you really need to add one more class? -* 50 lines of straightforward, readable code is better than 10 lines of magic that nobody can understand. -* Don't do later what you can do now. "//FIXME: refactor" is not acceptable in new code. -* When hesitating between 2 options, choose the one that is easier to reverse. -* No is temporary, Yes is forever. If you're not sure about a new feature, say no. You can change your mind later. -* Containers must be portable to the greatest possible number of machines. Be suspicious of any change which makes machines less interchangeable. -* The less moving parts in a container, the better. -* Don't merge it unless you document it. -* Don't document it unless you can keep it up-to-date. -* Don't merge it unless you test it! -* Everyone's problem is slightly different. Focus on the part that is the same for everyone, and solve that. diff --git a/project/README.md b/project/README.md deleted file mode 100644 index 3ed68cf297..0000000000 --- a/project/README.md +++ /dev/null @@ -1,24 +0,0 @@ -# Hacking on Docker - -The `project/` directory holds information and tools for everyone involved in the process of creating and -distributing Docker, specifically: - -## Guides - -If you're a *contributor* or aspiring contributor, you should read [CONTRIBUTORS.md](../CONTRIBUTING.md). - -If you're a *maintainer* or aspiring maintainer, you should read [MAINTAINERS](../MAINTAINERS). - -If you're a *packager* or aspiring packager, you should read [PACKAGERS.md](./PACKAGERS.md). - -If you're a maintainer in charge of a *release*, you should read [RELEASE-CHECKLIST.md](./RELEASE-CHECKLIST.md). - -## Roadmap - -A high-level roadmap is available at [ROADMAP.md](../ROADMAP.md). - - -## Build tools - -[hack/make.sh](../hack/make.sh) is the primary build tool for docker. It is used for compiling the official binary, -running the test suite, and pushing releases. diff --git a/project/RELEASE-CHECKLIST.md b/project/RELEASE-CHECKLIST.md deleted file mode 100644 index b9dcf7f4ea..0000000000 --- a/project/RELEASE-CHECKLIST.md +++ /dev/null @@ -1,512 +0,0 @@ -# Release Checklist -## A maintainer's guide to releasing Docker - -So you're in charge of a Docker release? Cool. Here's what to do. - -If your experience deviates from this document, please document the changes -to keep it up-to-date. - -It is important to note that this document assumes that the git remote in your -repository that corresponds to "https://github.com/docker/docker" is named -"origin". If yours is not (for example, if you've chosen to name it "upstream" -or something similar instead), be sure to adjust the listed snippets for your -local environment accordingly. If you are not sure what your upstream remote is -named, use a command like `git remote -v` to find out. - -If you don't have an upstream remote, you can add one easily using something -like: - -```bash -export GITHUBUSER="YOUR_GITHUB_USER" -git remote add origin https://github.com/docker/docker.git -git remote add $GITHUBUSER git@github.com:$GITHUBUSER/docker.git -``` - -### 1. Pull from master and create a release branch - -All releases version numbers will be of the form: vX.Y.Z where X is the major -version number, Y is the minor version number and Z is the patch release version number. - -#### Major releases - -The release branch name is just vX.Y because it's going to be the basis for all .Z releases. - -```bash -export BASE=vX.Y -export VERSION=vX.Y.Z -git fetch origin -git checkout --track origin/master -git checkout -b release/$BASE -``` - -This new branch is going to be the base for the release. We need to push it to origin so we -can track the cherry-picked changes and the version bump: - -```bash -git push origin release/$BASE -``` - -When you have the major release branch in origin, we need to create the bump fork branch -that we'll push to our fork: - -```bash -git checkout -b bump_$VERSION -``` - -#### Patch releases - -If we have the release branch in origin, we can create the forked bump branch from it directly: - -```bash -export VERSION=vX.Y.Z -export PATCH=vX.Y.Z+1 -git fetch origin -git checkout --track origin/release/$BASE -git checkout -b bump_$PATCH -``` - -We cherry-pick only the commits we want into the bump branch: - -```bash -# get the commits ids we want to cherry-pick -git log -# cherry-pick the commits starting from the oldest one, without including merge commits -git cherry-pick -git cherry-pick -... -``` - -### 2. Update the VERSION files and API version on master - -We don't want to stop contributions to master just because we are releasing. -So, after the release branch is up, we bump the VERSION and API version to mark -the start of the "next" release. - -#### 2.1 Update the VERSION files - -Update the content of the `VERSION` file to be the next minor (incrementing Y) -and add the `-dev` suffix. For example, after the release branch for 1.5.0 is -created, the `VERSION` file gets updated to `1.6.0-dev` (as in "1.6.0 in the -making"). - -#### 2.2 Update API version on master - -We don't want API changes to go to the now frozen API version. Create a new -entry in `docs/reference/api/` by copying the latest and bumping the version -number (in both the file's name and content), and submit this in a PR against -master. - -### 3. Update CHANGELOG.md - -You can run this command for reference with git 2.0: - -```bash -git fetch --tags -LAST_VERSION=$(git tag -l --sort=-version:refname "v*" | grep -E 'v[0-9\.]+$' | head -1) -git log --stat $LAST_VERSION..bump_$VERSION -``` - -If you don't have git 2.0 but have a sort command that supports `-V`: -```bash -git fetch --tags -LAST_VERSION=$(git tag -l | grep -E 'v[0-9\.]+$' | sort -rV | head -1) -git log --stat $LAST_VERSION..bump_$VERSION -``` - -If releasing a major version (X or Y increased in vX.Y.Z), simply listing notable user-facing features is sufficient. -```markdown -#### Notable features since -* New docker command to do something useful -* Remote API change (deprecating old version) -* Performance improvements in some usecases -* ... -``` - -For minor releases (only Z increases in vX.Y.Z), provide a list of user-facing changes. -Each change should be listed under a category heading formatted as `#### CATEGORY`. - -`CATEGORY` should describe which part of the project is affected. - Valid categories are: - * Builder - * Documentation - * Hack - * Packaging - * Remote API - * Runtime - * Other (please use this category sparingly) - -Each change should be formatted as `BULLET DESCRIPTION`, given: - -* BULLET: either `-`, `+` or `*`, to indicate a bugfix, new feature or - upgrade, respectively. - -* DESCRIPTION: a concise description of the change that is relevant to the - end-user, using the present tense. Changes should be described in terms - of how they affect the user, for example "Add new feature X which allows Y", - "Fix bug which caused X", "Increase performance of Y". - -EXAMPLES: - -```markdown -## 0.3.6 (1995-12-25) - -#### Builder - -+ 'docker build -t FOO .' applies the tag FOO to the newly built image - -#### Remote API - -- Fix a bug in the optional unix socket transport - -#### Runtime - -* Improve detection of kernel version -``` - -If you need a list of contributors between the last major release and the -current bump branch, use something like: -```bash -git log --format='%aN <%aE>' v0.7.0...bump_v0.8.0 | sort -uf -``` -Obviously, you'll need to adjust version numbers as necessary. If you just need -a count, add a simple `| wc -l`. - -### 4. Change the contents of the VERSION file - -Before the big thing, you'll want to make successive release candidates and get -people to test. The release candidate number `N` should be part of the version: - -```bash -export RC_VERSION=${VERSION}-rcN -echo ${RC_VERSION#v} > VERSION -``` - -### 5. Test the docs - -Make sure that your tree includes documentation for any modified or -new features, syntax or semantic changes. - -To test locally: - -```bash -make docs -``` - -To make a shared test at https://beta-docs.docker.io: - -(You will need the `awsconfig` file added to the `docs/` dir) - -```bash -make AWS_S3_BUCKET=beta-docs.docker.io BUILD_ROOT=yes docs-release -``` - -### 6. Commit and create a pull request to the "release" branch - -```bash -git add VERSION CHANGELOG.md -git commit -m "Bump version to $VERSION" -git push $GITHUBUSER bump_$VERSION -echo "https://github.com/$GITHUBUSER/docker/compare/docker:release/$BASE...$GITHUBUSER:bump_$VERSION?expand=1" -``` - -That last command will give you the proper link to visit to ensure that you -open the PR against the "release" branch instead of accidentally against -"master" (like so many brave souls before you already have). - -### 7. Build release candidate rpms and debs - -**NOTE**: It will be a lot faster if you pass a different graphdriver with -`DOCKER_GRAPHDRIVER` than `vfs`. - -```bash -docker build -t docker . -docker run \ - --rm -t --privileged \ - -e DOCKER_GRAPHDRIVER=aufs \ - -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ - docker \ - hack/make.sh binary build-deb build-rpm -``` - -### 8. Publish release candidate rpms and debs - -With the rpms and debs you built from the last step you can release them on the -same server, or ideally, move them to a dedicated release box via scp into -another docker/docker directory in bundles. This next step assumes you have -a checkout of the docker source code at the same commit you used to build, with -the artifacts from the last step in `bundles`. - -**NOTE:** If you put a space before the command your `.bash_history` will not -save it. (for the `GPG_PASSPHRASE`). - -```bash -docker build -t docker . -docker run --rm -it --privileged \ - -v /volumes/repos:/volumes/repos \ - -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ - -v $HOME/.gnupg:/root/.gnupg \ - -e DOCKER_RELEASE_DIR=/volumes/repos \ - -e GPG_PASSPHRASE \ - -e KEEPBUNDLE=1 \ - docker \ - hack/make.sh release-deb release-rpm sign-repos generate-index-listing -``` - -### 9. Upload the changed repos to wherever you host - -For example, above we bind mounted `/volumes/repos` as the storage for -`DOCKER_RELEASE_DIR`. In this case `/volumes/repos/apt` can be synced with -a specific s3 bucket for the apt repo and `/volumes/repos/yum` can be synced with -a s3 bucket for the yum repo. - -### 10. Publish release candidate binaries - -To run this you will need access to the release credentials. Get them from the -Core maintainers. - -```bash -docker build -t docker . - -# static binaries are still pushed to s3 -docker run \ - -e AWS_S3_BUCKET=test.docker.com \ - -e AWS_ACCESS_KEY_ID \ - -e AWS_SECRET_ACCESS_KEY \ - -e AWS_DEFAULT_REGION \ - -i -t --privileged \ - docker \ - hack/release.sh -``` - -It will run the test suite, build the binaries and upload to the specified bucket, -so this is a good time to verify that you're running against **test**.docker.com. - -### 11. Purge the cache! - -After the binaries are uploaded to test.docker.com and the packages are on -apt.dockerproject.org and yum.dockerproject.org, make sure -they get tested in both Ubuntu and Debian for any obvious installation -issues or runtime issues. - -If everything looks good, it's time to create a git tag for this candidate: - -```bash -git tag -a $RC_VERSION -m $RC_VERSION bump_$VERSION -git push origin $RC_VERSION -``` - -Announcing on multiple medias is the best way to get some help testing! An easy -way to get some useful links for sharing: - -```bash -echo "Ubuntu/Debian: curl -sSL https://test.docker.com/ | sh" -echo "Linux 64bit binary: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}" -echo "Darwin/OSX 64bit client binary: https://test.docker.com/builds/Darwin/x86_64/docker-${VERSION#v}" -echo "Linux 64bit tgz: https://test.docker.com/builds/Linux/x86_64/docker-${VERSION#v}.tgz" -echo "Windows 64bit client binary: https://test.docker.com/builds/Windows/x86_64/docker-${VERSION#v}.exe" -echo "Windows 32bit client binary: https://test.docker.com/builds/Windows/i386/docker-${VERSION#v}.exe" -``` - -We recommend announcing the release candidate on: - -- IRC on #docker, #docker-dev, #docker-maintainers -- In a comment on the pull request to notify subscribed people on GitHub -- The [docker-dev](https://groups.google.com/forum/#!forum/docker-dev) group -- The [docker-maintainers](https://groups.google.com/a/dockerproject.org/forum/#!forum/maintainers) group -- Any social media that can bring some attention to the release candidate - -### 12. Iterate on successive release candidates - -Spend several days along with the community explicitly investing time and -resources to try and break Docker in every possible way, documenting any -findings pertinent to the release. This time should be spent testing and -finding ways in which the release might have caused various features or upgrade -environments to have issues, not coding. During this time, the release is in -code freeze, and any additional code changes will be pushed out to the next -release. - -It should include various levels of breaking Docker, beyond just using Docker -by the book. - -Any issues found may still remain issues for this release, but they should be -documented and give appropriate warnings. - -During this phase, the `bump_$VERSION` branch will keep evolving as you will -produce new release candidates. The frequency of new candidates is up to the -release manager: use your best judgement taking into account the severity of -reported issues, testers availability, and time to scheduled release date. - -Each time you'll want to produce a new release candidate, you will start by -adding commits to the branch, usually by cherry-picking from master: - -```bash -git cherry-pick -x -m0 -``` - -You want your "bump commit" (the one that updates the CHANGELOG and VERSION -files) to remain on top, so you'll have to `git rebase -i` to bring it back up. - -Now that your bump commit is back on top, you will need to update the CHANGELOG -file (if appropriate for this particular release candidate), and update the -VERSION file to increment the RC number: - -```bash -export RC_VERSION=$VERSION-rcN -echo $RC_VERSION > VERSION -``` - -You can now amend your last commit and update the bump branch: - -```bash -git commit --amend -git push -f $GITHUBUSER bump_$VERSION -``` - -Repeat step 6 to tag the code, publish new binaries, announce availability, and -get help testing. - -### 13. Finalize the bump branch - -When you're happy with the quality of a release candidate, you can move on and -create the real thing. - -You will first have to amend the "bump commit" to drop the release candidate -suffix in the VERSION file: - -```bash -echo $VERSION > VERSION -git add VERSION -git commit --amend -``` - -You will then repeat step 6 to publish the binaries to test - -### 14. Get 2 other maintainers to validate the pull request - -### 15. Build final rpms and debs - -```bash -docker build -t docker . -docker run \ - --rm -t --privileged \ - -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ - docker \ - hack/make.sh binary build-deb build-rpm -``` - -### 16. Publish final rpms and debs - -With the rpms and debs you built from the last step you can release them on the -same server, or ideally, move them to a dedicated release box via scp into -another docker/docker directory in bundles. This next step assumes you have -a checkout of the docker source code at the same commit you used to build, with -the artifacts from the last step in `bundles`. - -**NOTE:** If you put a space before the command your `.bash_history` will not -save it. (for the `GPG_PASSPHRASE`). - -```bash -docker build -t docker . -docker run --rm -it --privileged \ - -v /volumes/repos:/volumes/repos \ - -v $(pwd)/bundles:/go/src/github.com/docker/docker/bundles \ - -v $HOME/.gnupg:/root/.gnupg \ - -e DOCKER_RELEASE_DIR=/volumes/repos \ - -e GPG_PASSPHRASE \ - -e KEEPBUNDLE=1 \ - docker \ - hack/make.sh release-deb release-rpm sign-repos generate-index-listing -``` - -### 17. Upload the changed repos to wherever you host - -For example, above we bind mounted `/volumes/repos` as the storage for -`DOCKER_RELEASE_DIR`. In this case `/volumes/repos/apt` can be synced with -a specific s3 bucket for the apt repo and `/volumes/repos/yum` can be synced with -a s3 bucket for the yum repo. - -### 18. Publish final binaries - -Once they're tested and reasonably believed to be working, run against -get.docker.com: - -```bash -docker build -t docker . -# static binaries are still pushed to s3 -docker run \ - -e AWS_S3_BUCKET=get.docker.com \ - -e AWS_ACCESS_KEY_ID \ - -e AWS_SECRET_ACCESS_KEY \ - -e AWS_DEFAULT_REGION \ - -i -t --privileged \ - docker \ - hack/release.sh -``` - -### 19. Purge the cache! - -### 20. Apply tag and create release - -It's very important that we don't make the tag until after the official -release is uploaded to get.docker.com! - -```bash -git tag -a $VERSION -m $VERSION bump_$VERSION -git push origin $VERSION -``` - -Once the tag is pushed, go to GitHub and create a [new release](https://github.com/docker/docker/releases/new). -If the tag is for an RC make sure you check `This is a pre-release` at the bottom of the form. - -Select the tag that you just pushed as the version and paste the changelog in the description of the release. -You can see examples in this two links: - -https://github.com/docker/docker/releases/tag/v1.8.0 -https://github.com/docker/docker/releases/tag/v1.8.0-rc3 - -### 21. Go to github to merge the `bump_$VERSION` branch into release - -Don't forget to push that pretty blue button to delete the leftover -branch afterwards! - -### 22. Update the docs branch - -You will need to point the docs branch to the newly created release tag: - -```bash -git checkout origin/docs -git reset --hard origin/$VERSION -git push -f origin docs -``` - -The docs will appear on https://docs.docker.com/ (though there may be cached -versions, so its worth checking http://docs.docker.com.s3-website-us-east-1.amazonaws.com/). -For more information about documentation releases, see `docs/README.md`. - -Note that the new docs will not appear live on the site until the cache (a complex, -distributed CDN system) is flushed. The `make docs-release` command will do this -_if_ the `DISTRIBUTION_ID` is set correctly - this will take at least 15 minutes to run -and you can check its progress with the CDN Cloudfront Chrome addon. - -### 23. Create a new pull request to merge your bump commit back into master - -```bash -git checkout master -git fetch -git reset --hard origin/master -git cherry-pick $VERSION -git push $GITHUBUSER merge_release_$VERSION -echo "https://github.com/$GITHUBUSER/docker/compare/docker:master...$GITHUBUSER:merge_release_$VERSION?expand=1" -``` - -Again, get two maintainers to validate, then merge, then push that pretty -blue button to delete your branch. - -### 24. Rejoice and Evangelize! - -Congratulations! You're done. - -Go forth and announce the glad tidings of the new release in `#docker`, -`#docker-dev`, on the [dev mailing list](https://groups.google.com/forum/#!forum/docker-dev), -the [announce mailing list](https://groups.google.com/forum/#!forum/docker-announce), -and on Twitter! diff --git a/project/RELEASE-PROCESS.md b/project/RELEASE-PROCESS.md deleted file mode 100644 index d764e9d007..0000000000 --- a/project/RELEASE-PROCESS.md +++ /dev/null @@ -1,78 +0,0 @@ -# Docker Release Process - -This document describes how the Docker project is released. The Docker project -release process targets the Engine, Compose, Kitematic, Machine, Swarm, -Distribution, Notary and their underlying dependencies (libnetwork, libkv, -etc...). - -Step-by-step technical details of the process are described in -[RELEASE-CHECKLIST.md](https://github.com/docker/docker/blob/master/project/RELEASE-CHECKLIST.md). - -## Release cycle - -The Docker project follows a **time-based release cycle** and ships every nine -weeks. A release cycle starts the same day the previous release cycle ends. - -The first six weeks of the cycle are dedicated to development and review. During -this phase, new features and bugfixes submitted to any of the projects are -**eligible** to be shipped as part of the next release. No changeset submitted -during this period is however guaranteed to be merged for the current release -cycle. - -## The freeze period - -Six weeks after the beginning of the cycle, the codebase is officially frozen -and the codebase reaches a state close to the final release. A Release Candidate -(RC) gets created at the same time. The freeze period is used to find bugs and -get feedback on the state of the RC before the release. - -During this freeze period, while the `master` branch will continue its normal -development cycle, no new features are accepted into the RC. As bugs are fixed -in `master` the release owner will selectively 'cherry-pick' critical ones to -be included into the RC. As the RC changes, new ones are made available for the -community to test and review. - -This period lasts for three weeks. - -## How to maximize chances of being merged before the freeze date? - -First of all, there is never a guarantee that a specific changeset is going to -be merged. However there are different actions to follow to maximize the chances -for a changeset to be merged: - -- The team gives priority to review the PRs aligned with the Roadmap (usually -defined by a ROADMAP.md file at the root of the repository). -- The earlier a PR is opened, the more time the maintainers have to review. For -example, if a PR is opened the day before the freeze date, it’s very unlikely -that it will be merged for the release. -- Constant communication with the maintainers (mailing-list, IRC, Github issues, -etc.) allows to get early feedback on the design before getting into the -implementation, which usually reduces the time needed to discuss a changeset. -- If the code is commented, fully tested and by extension follows every single -rules defined by the [CONTRIBUTING guide]( -https://github.com/docker/docker/blob/master/CONTRIBUTING.md), this will help -the maintainers by speeding up the review. - -## The release - -At the end of the freeze (nine weeks after the start of the cycle), all the -projects are released together. - -``` - Codebase Release -Start of is frozen (end of the -the Cycle (7th week) 9th week) -+---------------------------------------+---------------------+ -| | | -| Development phase | Freeze phase | -| | | -+---------------------------------------+---------------------+ - 6 weeks 3 weeks -<---------------------------------------><--------------------> -``` - -## Exceptions - -If a critical issue is found at the end of the freeze period and more time is -needed to address it, the release will be pushed back. When a release gets -pushed back, the next release cycle gets delayed as well. diff --git a/project/REVIEWING.md b/project/REVIEWING.md deleted file mode 100644 index f8d9c1dab6..0000000000 --- a/project/REVIEWING.md +++ /dev/null @@ -1,209 +0,0 @@ -# Pull request reviewing process - -## Labels - -Labels are carefully picked to optimize for: - - - Readability: maintainers must immediately know the state of a PR - - Filtering simplicity: different labels represent many different aspects of - the reviewing work, and can even be targeted at different maintainers groups. - -A pull request should only be attributed labels documented in this section: other labels that may -exist on the repository should apply to issues. - -### DCO labels - - * `dco/no`: automatically set by a bot when one of the commits lacks proper signature - -### Status labels - - * `status/0-triage` - * `status/1-design-review` - * `status/2-code-review` - * `status/3-docs-review` - * `status/4-ready-to-merge` - -Special status labels: - - * `status/failing-ci`: indicates that the PR in its current state fails the test suite - * `status/needs-attention`: calls for a collective discussion during a review session - -### Specialty group labels - -Those labels are used to raise awareness of a particular specialty group, either because we need -help in reviewing the PR, or because of the potential impact of the PR on their work: - - * `group/distribution` - * `group/networking` - * `group/security` - * `group/windows` - -### Impact labels (apply to merged pull requests) - - * `impact/api` - * `impact/changelog` - * `impact/cli` - * `impact/deprecation` - * `impact/distribution` - * `impact/dockerfile` - -### Process labels (apply to merged pull requests) - -Process labels are to assist in preparing (patch) releases. These labels should only be used for pull requests. - -Label | Use for -------------------------------- | ------------------------------------------------------------------------- -`process/cherry-pick` | PRs that should be cherry-picked in the bump/release branch. These pull-requests must also be assigned to a milestone. -`process/cherry-picked` | PRs that have been cherry-picked. This label is helpful to find PR's that have been added to release-candidates, and to update the change log -`process/docs-cherry-pick` | PRs that should be cherry-picked in the docs branch. Only apply this label for changes that apply to the *current* release, and generic documentation fixes, such as Markdown and spelling fixes. -`process/docs-cherry-picked` | PRs that have been cherry-picked in the docs branch -`process/merge-to-master` | PRs that are opened directly on the bump/release branch, but also need to be merged back to "master" -`process/merged-to-master` | PRs that have been merged back to "master" - - -## Workflow - -An opened pull request can be in 1 of 5 distinct states, for each of which there is a corresponding -label that needs to be applied. - -### Triage - `status/0-triage` - -Maintainers are expected to triage new incoming pull requests by removing the `status/0-triage` -label and adding the correct labels (e.g. `status/1-design-review`) before any other interaction -with the PR. The starting label may potentially skip some steps depending on the kind of pull -request: use your best judgement. - -Maintainers should perform an initial, high-level, overview of the pull request before moving it to -the next appropriate stage: - - - Has DCO - - Contains sufficient justification (e.g., usecases) for the proposed change - - References the Github issue it fixes (if any) in the commit or the first Github comment - -Possible transitions from this state: - - * Close: e.g., unresponsive contributor without DCO - * `status/1-design-review`: general case - * `status/2-code-review`: e.g. trivial bugfix - * `status/3-docs-review`: non-proposal documentation-only change - -### Design review - `status/1-design-review` - -Maintainers are expected to comment on the design of the pull request. Review of documentation is -expected only in the context of design validation, not for stylistic changes. - -Ideally, documentation should reflect the expected behavior of the code. No code review should -take place in this step. - -There are no strict rules on the way a design is validated: we usually aim for a consensus, -although a single maintainer approval is often sufficient for obviously reasonable changes. In -general, strong disagreement expressed by any of the maintainers should not be taken lightly. - -Once design is approved, a maintainer should make sure to remove this label and add the next one. - -Possible transitions from this state: - - * Close: design rejected - * `status/2-code-review`: general case - * `status/3-docs-review`: proposals with only documentation changes - -### Code review - `status/2-code-review` - -Maintainers are expected to review the code and ensure that it is good quality and in accordance -with the documentation in the PR. - -New testcases are expected to be added. Ideally, those testcases should fail when the new code is -absent, and pass when present. The testcases should strive to test as many variants, code paths, as -possible to ensure maximum coverage. - -Changes to code must be reviewed and approved (LGTM'd) by a minimum of two code maintainers. When -the author of a PR is a maintainer, he still needs the approval of two other maintainers. - -Once code is approved according to the rules of the subsystem, a maintainer should make sure to -remove this label and add the next one. If documentation is absent but expected, maintainers should -ask for documentation and move to status `status/3-docs-review` for docs maintainer to follow. - -Possible transitions from this state: - - * Close - * `status/1-design-review`: new design concerns are raised - * `status/3-docs-review`: general case - * `status/4-ready-to-merge`: change not impacting documentation - -### Docs review - `status/3-docs-review` - -Maintainers are expected to review the documentation in its bigger context, ensuring consistency, -completeness, validity, and breadth of coverage across all existing and new documentation. - -They should ask for any editorial change that makes the documentation more consistent and easier to -understand. - -Changes and additions to docs must be reviewed and approved (LGTM'd) by a minimum of two docs -sub-project maintainers. If the docs change originates with a docs maintainer, only one additional -LGTM is required (since we assume a docs maintainer approves of their own PR). - -Once documentation is approved (see below), a maintainer should make sure to remove this label and -add the next one. - -Possible transitions from this state: - - * Close - * `status/1-design-review`: new design concerns are raised - * `status/2-code-review`: requires more code changes - * `status/4-ready-to-merge`: general case - -### Merge - `status/4-ready-to-merge` - -Maintainers are expected to merge this pull request as soon as possible. They can ask for a rebase -or carry the pull request themselves. - -Possible transitions from this state: - - * Merge: general case - * Close: carry PR - -After merging a pull request, the maintainer should consider applying one or multiple impact labels -to ease future classification: - - * `impact/api` signifies the patch impacted the remote API - * `impact/changelog` signifies the change is significant enough to make it in the changelog - * `impact/cli` signifies the patch impacted a CLI command - * `impact/dockerfile` signifies the patch impacted the Dockerfile syntax - * `impact/deprecation` signifies the patch participates in deprecating an existing feature - -### Close - -If a pull request is closed it is expected that sufficient justification will be provided. In -particular, if there are alternative ways of achieving the same net result then those needs to be -spelled out. If the pull request is trying to solve a use case that is not one that we (as a -community) want to support then a justification for why should be provided. - -The number of maintainers it takes to decide and close a PR is deliberately left unspecified. We -assume that the group of maintainers is bound by mutual trust and respect, and that opposition from -any single maintainer should be taken into consideration. Similarly, we expect maintainers to -justify their reasoning and to accept debating. - -## Escalation process - -Despite the previously described reviewing process, some PR might not show any progress for various -reasons: - - - No strong opinion for or against the proposed patch - - Debates about the proper way to solve the problem at hand - - Lack of consensus - - ... - -All these will eventually lead to stalled PR, where no apparent progress is made across several -weeks, or even months. - -Maintainers should use their best judgement and apply the `status/needs-attention` label. It must -be used sparingly, as each PR with such label will be discussed by a group of maintainers during a -review session. The goal of that session is to agree on one of the following outcomes for the PR: - - * Close, explaining the rationale for not pursuing further - * Continue, either by pushing the PR further in the workflow, or by deciding to carry the patch - (ideally, a maintainer should be immediately assigned to make sure that the PR keeps continued - attention) - * Escalate to Solomon by formulating a few specific questions on which his answers will allow - maintainers to decide. - diff --git a/project/TOOLS.md b/project/TOOLS.md deleted file mode 100644 index 26303c3021..0000000000 --- a/project/TOOLS.md +++ /dev/null @@ -1,63 +0,0 @@ -# Tools - -This page describes the tools we use and infrastructure that is in place for -the Docker project. - -### CI - -The Docker project uses [Jenkins](https://jenkins.dockerproject.org/) as our -continuous integration server. Each Pull Request to Docker is tested by running the -equivalent of `make all`. We chose Jenkins because we can host it ourselves and -we run Docker in Docker to test. - -#### Leeroy - -Leeroy is a Go application which integrates Jenkins with -GitHub pull requests. Leeroy uses -[GitHub hooks](https://developer.github.com/v3/repos/hooks/) -to listen for pull request notifications and starts jobs on your Jenkins -server. Using the Jenkins -[notification plugin][https://wiki.jenkins-ci.org/display/JENKINS/Notification+Plugin], -Leeroy updates the pull request using GitHub's -[status API](https://developer.github.com/v3/repos/statuses/) -with pending, success, failure, or error statuses. - -The leeroy repository is maintained at -[github.com/docker/leeroy](https://github.com/docker/leeroy). - -#### GordonTheTurtle IRC Bot - -The GordonTheTurtle IRC Bot lives in the -[#docker-maintainers](https://botbot.me/freenode/docker-maintainers/) channel -on Freenode. He is built in Go and is based off the project at -[github.com/fabioxgn/go-bot](https://github.com/fabioxgn/go-bot). - -His main command is `!rebuild`, which rebuilds a given Pull Request for a repository. -This command works by integrating with Leroy. He has a few other commands too, such -as `!gif` or `!godoc`, but we are always looking for more fun commands to add. - -The gordon-bot repository is maintained at -[github.com/docker/gordon-bot](https://github.com/docker/gordon-bot) - -### NSQ - -We use [NSQ](https://github.com/bitly/nsq) for various aspects of the project -infrastructure. - -#### Hooks - -The hooks project, -[github.com/crosbymichael/hooks](https://github.com/crosbymichael/hooks), -is a small Go application that manages web hooks from github, hub.docker.com, or -other third party services. - -It can be used for listening to github webhooks & pushing them to a queue, -archiving hooks to rethinkdb for processing, and broadcasting hooks to various -jobs. - -#### Docker Master Binaries - -One of the things queued from the Hooks are the building of the Master -Binaries. This happens on every push to the master branch of Docker. The -repository for this is maintained at -[github.com/docker/docker-bb](https://github.com/docker/docker-bb). diff --git a/reference/reference.go b/reference/reference.go deleted file mode 100644 index 136ef36b76..0000000000 --- a/reference/reference.go +++ /dev/null @@ -1,211 +0,0 @@ -package reference - -import ( - "errors" - "fmt" - "strings" - - "github.com/docker/distribution/digest" - distreference "github.com/docker/distribution/reference" - "github.com/docker/docker/image/v1" -) - -const ( - // DefaultTag defines the default tag used when performing images related actions and no tag or digest is specified - DefaultTag = "latest" - // DefaultHostname is the default built-in hostname - DefaultHostname = "docker.io" - // LegacyDefaultHostname is automatically converted to DefaultHostname - LegacyDefaultHostname = "index.docker.io" - // DefaultRepoPrefix is the prefix used for default repositories in default host - DefaultRepoPrefix = "library/" -) - -// Named is an object with a full name -type Named interface { - // Name returns normalized repository name, like "ubuntu". - Name() string - // String returns full reference, like "ubuntu@sha256:abcdef..." - String() string - // FullName returns full repository name with hostname, like "docker.io/library/ubuntu" - FullName() string - // Hostname returns hostname for the reference, like "docker.io" - Hostname() string - // RemoteName returns the repository component of the full name, like "library/ubuntu" - RemoteName() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Canonical reference is an object with a fully unique -// name including a name with hostname and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name, otherwise an error is -// returned. -// If an error was encountered it is returned, along with a nil Reference. -func ParseNamed(s string) (Named, error) { - named, err := distreference.ParseNamed(s) - if err != nil { - return nil, fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", s) - } - r, err := WithName(named.Name()) - if err != nil { - return nil, err - } - if canonical, isCanonical := named.(distreference.Canonical); isCanonical { - return WithDigest(r, canonical.Digest()) - } - if tagged, isTagged := named.(distreference.NamedTagged); isTagged { - return WithTag(r, tagged.Tag()) - } - return r, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - name, err := normalize(name) - if err != nil { - return nil, err - } - if err := validateName(name); err != nil { - return nil, err - } - r, err := distreference.WithName(name) - if err != nil { - return nil, err - } - return &namedRef{r}, nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - r, err := distreference.WithTag(name, tag) - if err != nil { - return nil, err - } - return &taggedRef{namedRef{r}}, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - r, err := distreference.WithDigest(name, digest) - if err != nil { - return nil, err - } - return &canonicalRef{namedRef{r}}, nil -} - -type namedRef struct { - distreference.Named -} -type taggedRef struct { - namedRef -} -type canonicalRef struct { - namedRef -} - -func (r *namedRef) FullName() string { - hostname, remoteName := splitHostname(r.Name()) - return hostname + "/" + remoteName -} -func (r *namedRef) Hostname() string { - hostname, _ := splitHostname(r.Name()) - return hostname -} -func (r *namedRef) RemoteName() string { - _, remoteName := splitHostname(r.Name()) - return remoteName -} -func (r *taggedRef) Tag() string { - return r.namedRef.Named.(distreference.NamedTagged).Tag() -} -func (r *canonicalRef) Digest() digest.Digest { - return r.namedRef.Named.(distreference.Canonical).Digest() -} - -// WithDefaultTag adds a default tag to a reference if it only has a repo name. -func WithDefaultTag(ref Named) Named { - if IsNameOnly(ref) { - ref, _ = WithTag(ref, DefaultTag) - } - return ref -} - -// IsNameOnly returns true if reference only contains a repo name. -func IsNameOnly(ref Named) bool { - if _, ok := ref.(NamedTagged); ok { - return false - } - if _, ok := ref.(Canonical); ok { - return false - } - return true -} - -// ParseIDOrReference parses string for an image ID or a reference. ID can be -// without a default prefix. -func ParseIDOrReference(idOrRef string) (digest.Digest, Named, error) { - if err := v1.ValidateID(idOrRef); err == nil { - idOrRef = "sha256:" + idOrRef - } - if dgst, err := digest.ParseDigest(idOrRef); err == nil { - return dgst, nil, nil - } - ref, err := ParseNamed(idOrRef) - return "", ref, err -} - -// splitHostname splits a repository name to hostname and remotename string. -// If no valid hostname is found, the default hostname is used. Repository name -// needs to be already validated before. -func splitHostname(name string) (hostname, remoteName string) { - i := strings.IndexRune(name, '/') - if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") { - hostname, remoteName = DefaultHostname, name - } else { - hostname, remoteName = name[:i], name[i+1:] - } - if hostname == LegacyDefaultHostname { - hostname = DefaultHostname - } - if hostname == DefaultHostname && !strings.ContainsRune(remoteName, '/') { - remoteName = DefaultRepoPrefix + remoteName - } - return -} - -// normalize returns a repository name in its normalized form, meaning it -// will not contain default hostname nor library/ prefix for official images. -func normalize(name string) (string, error) { - host, remoteName := splitHostname(name) - if strings.ToLower(remoteName) != remoteName { - return "", errors.New("invalid reference format: repository name must be lowercase") - } - if host == DefaultHostname { - if strings.HasPrefix(remoteName, DefaultRepoPrefix) { - return strings.TrimPrefix(remoteName, DefaultRepoPrefix), nil - } - return remoteName, nil - } - return name, nil -} - -func validateName(name string) error { - if err := v1.ValidateID(name); err == nil { - return fmt.Errorf("Invalid repository name (%s), cannot specify 64-byte hexadecimal strings", name) - } - return nil -} diff --git a/reference/reference_test.go b/reference/reference_test.go deleted file mode 100644 index ff35ba3da2..0000000000 --- a/reference/reference_test.go +++ /dev/null @@ -1,275 +0,0 @@ -package reference - -import ( - "testing" - - "github.com/docker/distribution/digest" -) - -func TestValidateReferenceName(t *testing.T) { - validRepoNames := []string{ - "docker/docker", - "library/debian", - "debian", - "docker.io/docker/docker", - "docker.io/library/debian", - "docker.io/debian", - "index.docker.io/docker/docker", - "index.docker.io/library/debian", - "index.docker.io/debian", - "127.0.0.1:5000/docker/docker", - "127.0.0.1:5000/library/debian", - "127.0.0.1:5000/debian", - "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", - } - invalidRepoNames := []string{ - "https://github.com/docker/docker", - "docker/Docker", - "-docker", - "-docker/docker", - "-docker.io/docker/docker", - "docker///docker", - "docker.io/docker/Docker", - "docker.io/docker///docker", - "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - "docker.io/1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - } - - for _, name := range invalidRepoNames { - _, err := ParseNamed(name) - if err == nil { - t.Fatalf("Expected invalid repo name for %q", name) - } - } - - for _, name := range validRepoNames { - _, err := ParseNamed(name) - if err != nil { - t.Fatalf("Error parsing repo name %s, got: %q", name, err) - } - } -} - -func TestValidateRemoteName(t *testing.T) { - validRepositoryNames := []string{ - // Sanity check. - "docker/docker", - - // Allow 64-character non-hexadecimal names (hexadecimal names are forbidden). - "thisisthesongthatneverendsitgoesonandonandonthisisthesongthatnev", - - // Allow embedded hyphens. - "docker-rules/docker", - - // Allow multiple hyphens as well. - "docker---rules/docker", - - //Username doc and image name docker being tested. - "doc/docker", - - // single character names are now allowed. - "d/docker", - "jess/t", - - // Consecutive underscores. - "dock__er/docker", - } - for _, repositoryName := range validRepositoryNames { - _, err := ParseNamed(repositoryName) - if err != nil { - t.Errorf("Repository name should be valid: %v. Error: %v", repositoryName, err) - } - } - - invalidRepositoryNames := []string{ - // Disallow capital letters. - "docker/Docker", - - // Only allow one slash. - "docker///docker", - - // Disallow 64-character hexadecimal. - "1a3f5e7d9c1b3a5f7e9d1c3b5a7f9e1d3c5b7a9f1e3d5d7c9b1a3f5e7d9c1b3a", - - // Disallow leading and trailing hyphens in namespace. - "-docker/docker", - "docker-/docker", - "-docker-/docker", - - // Don't allow underscores everywhere (as opposed to hyphens). - "____/____", - - "_docker/_docker", - - // Disallow consecutive periods. - "dock..er/docker", - "dock_.er/docker", - "dock-.er/docker", - - // No repository. - "docker/", - - //namespace too long - "this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255_this_is_not_a_valid_namespace_because_its_lenth_is_greater_than_255/docker", - } - for _, repositoryName := range invalidRepositoryNames { - if _, err := ParseNamed(repositoryName); err == nil { - t.Errorf("Repository name should be invalid: %v", repositoryName) - } - } -} - -func TestParseRepositoryInfo(t *testing.T) { - type tcase struct { - RemoteName, NormalizedName, FullName, AmbiguousName, Hostname string - } - - tcases := []tcase{ - { - RemoteName: "fooo/bar", - NormalizedName: "fooo/bar", - FullName: "docker.io/fooo/bar", - AmbiguousName: "index.docker.io/fooo/bar", - Hostname: "docker.io", - }, - { - RemoteName: "library/ubuntu", - NormalizedName: "ubuntu", - FullName: "docker.io/library/ubuntu", - AmbiguousName: "library/ubuntu", - Hostname: "docker.io", - }, - { - RemoteName: "nonlibrary/ubuntu", - NormalizedName: "nonlibrary/ubuntu", - FullName: "docker.io/nonlibrary/ubuntu", - AmbiguousName: "", - Hostname: "docker.io", - }, - { - RemoteName: "other/library", - NormalizedName: "other/library", - FullName: "docker.io/other/library", - AmbiguousName: "", - Hostname: "docker.io", - }, - { - RemoteName: "private/moonbase", - NormalizedName: "127.0.0.1:8000/private/moonbase", - FullName: "127.0.0.1:8000/private/moonbase", - AmbiguousName: "", - Hostname: "127.0.0.1:8000", - }, - { - RemoteName: "privatebase", - NormalizedName: "127.0.0.1:8000/privatebase", - FullName: "127.0.0.1:8000/privatebase", - AmbiguousName: "", - Hostname: "127.0.0.1:8000", - }, - { - RemoteName: "private/moonbase", - NormalizedName: "example.com/private/moonbase", - FullName: "example.com/private/moonbase", - AmbiguousName: "", - Hostname: "example.com", - }, - { - RemoteName: "privatebase", - NormalizedName: "example.com/privatebase", - FullName: "example.com/privatebase", - AmbiguousName: "", - Hostname: "example.com", - }, - { - RemoteName: "private/moonbase", - NormalizedName: "example.com:8000/private/moonbase", - FullName: "example.com:8000/private/moonbase", - AmbiguousName: "", - Hostname: "example.com:8000", - }, - { - RemoteName: "privatebasee", - NormalizedName: "example.com:8000/privatebasee", - FullName: "example.com:8000/privatebasee", - AmbiguousName: "", - Hostname: "example.com:8000", - }, - { - RemoteName: "library/ubuntu-12.04-base", - NormalizedName: "ubuntu-12.04-base", - FullName: "docker.io/library/ubuntu-12.04-base", - AmbiguousName: "index.docker.io/library/ubuntu-12.04-base", - Hostname: "docker.io", - }, - } - - for _, tcase := range tcases { - refStrings := []string{tcase.NormalizedName, tcase.FullName} - if tcase.AmbiguousName != "" { - refStrings = append(refStrings, tcase.AmbiguousName) - } - - var refs []Named - for _, r := range refStrings { - named, err := ParseNamed(r) - if err != nil { - t.Fatal(err) - } - refs = append(refs, named) - named, err = WithName(r) - if err != nil { - t.Fatal(err) - } - refs = append(refs, named) - } - - for _, r := range refs { - if expected, actual := tcase.NormalizedName, r.Name(); expected != actual { - t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.FullName, r.FullName(); expected != actual { - t.Fatalf("Invalid normalized reference for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.Hostname, r.Hostname(); expected != actual { - t.Fatalf("Invalid hostname for %q. Expected %q, got %q", r, expected, actual) - } - if expected, actual := tcase.RemoteName, r.RemoteName(); expected != actual { - t.Fatalf("Invalid remoteName for %q. Expected %q, got %q", r, expected, actual) - } - - } - } -} - -func TestParseReferenceWithTagAndDigest(t *testing.T) { - ref, err := ParseNamed("busybox:latest@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa") - if err != nil { - t.Fatal(err) - } - if _, isTagged := ref.(NamedTagged); isTagged { - t.Fatalf("Reference from %q should not support tag", ref) - } - if _, isCanonical := ref.(Canonical); !isCanonical { - t.Fatalf("Reference from %q should not support digest", ref) - } - if expected, actual := "busybox@sha256:86e0e091d0da6bde2456dbb48306f3956bbeb2eae1b5b9a43045843f69fe4aaa", ref.String(); actual != expected { - t.Fatalf("Invalid parsed reference for %q: expected %q, got %q", ref, expected, actual) - } -} - -func TestInvalidReferenceComponents(t *testing.T) { - if _, err := WithName("-foo"); err == nil { - t.Fatal("Expected WithName to detect invalid name") - } - ref, err := WithName("busybox") - if err != nil { - t.Fatal(err) - } - if _, err := WithTag(ref, "-foo"); err == nil { - t.Fatal("Expected WithName to detect invalid tag") - } - if _, err := WithDigest(ref, digest.Digest("foo")); err == nil { - t.Fatal("Expected WithName to detect invalid digest") - } -} diff --git a/reference/store.go b/reference/store.go deleted file mode 100644 index fb72fff90a..0000000000 --- a/reference/store.go +++ /dev/null @@ -1,287 +0,0 @@ -package reference - -import ( - "encoding/json" - "errors" - "fmt" - "os" - "path/filepath" - "sort" - "sync" - - "github.com/docker/distribution/digest" - "github.com/docker/docker/image" - "github.com/docker/docker/pkg/ioutils" -) - -var ( - // ErrDoesNotExist is returned if a reference is not found in the - // store. - ErrDoesNotExist = errors.New("reference does not exist") -) - -// An Association is a tuple associating a reference with an image ID. -type Association struct { - Ref Named - ImageID image.ID -} - -// Store provides the set of methods which can operate on a tag store. -type Store interface { - References(id image.ID) []Named - ReferencesByName(ref Named) []Association - AddTag(ref Named, id image.ID, force bool) error - AddDigest(ref Canonical, id image.ID, force bool) error - Delete(ref Named) (bool, error) - Get(ref Named) (image.ID, error) -} - -type store struct { - mu sync.RWMutex - // jsonPath is the path to the file where the serialized tag data is - // stored. - jsonPath string - // Repositories is a map of repositories, indexed by name. - Repositories map[string]repository - // referencesByIDCache is a cache of references indexed by ID, to speed - // up References. - referencesByIDCache map[image.ID]map[string]Named -} - -// Repository maps tags to image IDs. The key is a stringified Reference, -// including the repository name. -type repository map[string]image.ID - -type lexicalRefs []Named - -func (a lexicalRefs) Len() int { return len(a) } -func (a lexicalRefs) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a lexicalRefs) Less(i, j int) bool { return a[i].String() < a[j].String() } - -type lexicalAssociations []Association - -func (a lexicalAssociations) Len() int { return len(a) } -func (a lexicalAssociations) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a lexicalAssociations) Less(i, j int) bool { return a[i].Ref.String() < a[j].Ref.String() } - -// NewReferenceStore creates a new reference store, tied to a file path where -// the set of references are serialized in JSON format. -func NewReferenceStore(jsonPath string) (Store, error) { - abspath, err := filepath.Abs(jsonPath) - if err != nil { - return nil, err - } - - store := &store{ - jsonPath: abspath, - Repositories: make(map[string]repository), - referencesByIDCache: make(map[image.ID]map[string]Named), - } - // Load the json file if it exists, otherwise create it. - if err := store.reload(); os.IsNotExist(err) { - if err := store.save(); err != nil { - return nil, err - } - } else if err != nil { - return nil, err - } - return store, nil -} - -// AddTag adds a tag reference to the store. If force is set to true, existing -// references can be overwritten. This only works for tags, not digests. -func (store *store) AddTag(ref Named, id image.ID, force bool) error { - if _, isCanonical := ref.(Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - return store.addReference(WithDefaultTag(ref), id, force) -} - -// AddDigest adds a digest reference to the store. -func (store *store) AddDigest(ref Canonical, id image.ID, force bool) error { - return store.addReference(ref, id, force) -} - -func (store *store) addReference(ref Named, id image.ID, force bool) error { - if ref.Name() == string(digest.Canonical) { - return errors.New("refusing to create an ambiguous tag using digest algorithm as name") - } - - store.mu.Lock() - defer store.mu.Unlock() - - repository, exists := store.Repositories[ref.Name()] - if !exists || repository == nil { - repository = make(map[string]image.ID) - store.Repositories[ref.Name()] = repository - } - - refStr := ref.String() - oldID, exists := repository[refStr] - - if exists { - // force only works for tags - if digested, isDigest := ref.(Canonical); isDigest { - return fmt.Errorf("Cannot overwrite digest %s", digested.Digest().String()) - } - - if !force { - return fmt.Errorf("Conflict: Tag %s is already set to image %s, if you want to replace it, please use -f option", ref.String(), oldID.String()) - } - - if store.referencesByIDCache[oldID] != nil { - delete(store.referencesByIDCache[oldID], refStr) - if len(store.referencesByIDCache[oldID]) == 0 { - delete(store.referencesByIDCache, oldID) - } - } - } - - repository[refStr] = id - if store.referencesByIDCache[id] == nil { - store.referencesByIDCache[id] = make(map[string]Named) - } - store.referencesByIDCache[id][refStr] = ref - - return store.save() -} - -// Delete deletes a reference from the store. It returns true if a deletion -// happened, or false otherwise. -func (store *store) Delete(ref Named) (bool, error) { - ref = WithDefaultTag(ref) - - store.mu.Lock() - defer store.mu.Unlock() - - repoName := ref.Name() - - repository, exists := store.Repositories[repoName] - if !exists { - return false, ErrDoesNotExist - } - - refStr := ref.String() - if id, exists := repository[refStr]; exists { - delete(repository, refStr) - if len(repository) == 0 { - delete(store.Repositories, repoName) - } - if store.referencesByIDCache[id] != nil { - delete(store.referencesByIDCache[id], refStr) - if len(store.referencesByIDCache[id]) == 0 { - delete(store.referencesByIDCache, id) - } - } - return true, store.save() - } - - return false, ErrDoesNotExist -} - -// Get retrieves an item from the store by -func (store *store) Get(ref Named) (image.ID, error) { - ref = WithDefaultTag(ref) - - store.mu.RLock() - defer store.mu.RUnlock() - - repository, exists := store.Repositories[ref.Name()] - if !exists || repository == nil { - return "", ErrDoesNotExist - } - - id, exists := repository[ref.String()] - if !exists { - return "", ErrDoesNotExist - } - - return id, nil -} - -// References returns a slice of references to the given image ID. The slice -// will be nil if there are no references to this image ID. -func (store *store) References(id image.ID) []Named { - store.mu.RLock() - defer store.mu.RUnlock() - - // Convert the internal map to an array for two reasons: - // 1) We must not return a mutable - // 2) It would be ugly to expose the extraneous map keys to callers. - - var references []Named - for _, ref := range store.referencesByIDCache[id] { - references = append(references, ref) - } - - sort.Sort(lexicalRefs(references)) - - return references -} - -// ReferencesByName returns the references for a given repository name. -// If there are no references known for this repository name, -// ReferencesByName returns nil. -func (store *store) ReferencesByName(ref Named) []Association { - store.mu.RLock() - defer store.mu.RUnlock() - - repository, exists := store.Repositories[ref.Name()] - if !exists { - return nil - } - - var associations []Association - for refStr, refID := range repository { - ref, err := ParseNamed(refStr) - if err != nil { - // Should never happen - return nil - } - associations = append(associations, - Association{ - Ref: ref, - ImageID: refID, - }) - } - - sort.Sort(lexicalAssociations(associations)) - - return associations -} - -func (store *store) save() error { - // Store the json - jsonData, err := json.Marshal(store) - if err != nil { - return err - } - return ioutils.AtomicWriteFile(store.jsonPath, jsonData, 0600) -} - -func (store *store) reload() error { - f, err := os.Open(store.jsonPath) - if err != nil { - return err - } - defer f.Close() - if err := json.NewDecoder(f).Decode(&store); err != nil { - return err - } - - for _, repository := range store.Repositories { - for refStr, refID := range repository { - ref, err := ParseNamed(refStr) - if err != nil { - // Should never happen - continue - } - if store.referencesByIDCache[refID] == nil { - store.referencesByIDCache[refID] = make(map[string]Named) - } - store.referencesByIDCache[refID][refStr] = ref - } - } - - return nil -} diff --git a/reference/store_test.go b/reference/store_test.go deleted file mode 100644 index a877c55f58..0000000000 --- a/reference/store_test.go +++ /dev/null @@ -1,356 +0,0 @@ -package reference - -import ( - "bytes" - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/docker/docker/image" -) - -var ( - saveLoadTestCases = map[string]image.ID{ - "registry:5000/foobar:HEAD": "sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6", - "registry:5000/foobar:alternate": "sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793", - "registry:5000/foobar:latest": "sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b", - "registry:5000/foobar:master": "sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc", - "jess/hollywood:latest": "sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe", - "registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6": "sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c", - "busybox:latest": "sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c", - } - - marshalledSaveLoadTestCases = []byte(`{"Repositories":{"busybox":{"busybox:latest":"sha256:91e54dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c"},"jess/hollywood":{"jess/hollywood:latest":"sha256:ae7a5519a0a55a2d4ef20ddcbd5d0ca0888a1f7ab806acc8e2a27baf46f529fe"},"registry":{"registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6":"sha256:24126a56805beb9711be5f4590cc2eb55ab8d4a85ebd618eed72bb19fc50631c"},"registry:5000/foobar":{"registry:5000/foobar:HEAD":"sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6","registry:5000/foobar:alternate":"sha256:ae300ebc4a4f00693702cfb0a5e0b7bc527b353828dc86ad09fb95c8a681b793","registry:5000/foobar:latest":"sha256:6153498b9ac00968d71b66cca4eac37e990b5f9eb50c26877eb8799c8847451b","registry:5000/foobar:master":"sha256:6c9917af4c4e05001b346421959d7ea81b6dc9d25718466a37a6add865dfd7fc"}}}`) -) - -func TestLoad(t *testing.T) { - jsonFile, err := ioutil.TempFile("", "tag-store-test") - if err != nil { - t.Fatalf("error creating temp file: %v", err) - } - defer os.RemoveAll(jsonFile.Name()) - - // Write canned json to the temp file - _, err = jsonFile.Write(marshalledSaveLoadTestCases) - if err != nil { - t.Fatalf("error writing to temp file: %v", err) - } - jsonFile.Close() - - store, err := NewReferenceStore(jsonFile.Name()) - if err != nil { - t.Fatalf("error creating tag store: %v", err) - } - - for refStr, expectedID := range saveLoadTestCases { - ref, err := ParseNamed(refStr) - if err != nil { - t.Fatalf("failed to parse reference: %v", err) - } - id, err := store.Get(ref) - if err != nil { - t.Fatalf("could not find reference %s: %v", refStr, err) - } - if id != expectedID { - t.Fatalf("expected %s - got %s", expectedID, id) - } - } -} - -func TestSave(t *testing.T) { - jsonFile, err := ioutil.TempFile("", "tag-store-test") - if err != nil { - t.Fatalf("error creating temp file: %v", err) - } - _, err = jsonFile.Write([]byte(`{}`)) - jsonFile.Close() - defer os.RemoveAll(jsonFile.Name()) - - store, err := NewReferenceStore(jsonFile.Name()) - if err != nil { - t.Fatalf("error creating tag store: %v", err) - } - - for refStr, id := range saveLoadTestCases { - ref, err := ParseNamed(refStr) - if err != nil { - t.Fatalf("failed to parse reference: %v", err) - } - if canonical, ok := ref.(Canonical); ok { - err = store.AddDigest(canonical, id, false) - if err != nil { - t.Fatalf("could not add digest reference %s: %v", refStr, err) - } - } else { - err = store.AddTag(ref, id, false) - if err != nil { - t.Fatalf("could not add reference %s: %v", refStr, err) - } - } - } - - jsonBytes, err := ioutil.ReadFile(jsonFile.Name()) - if err != nil { - t.Fatalf("could not read json file: %v", err) - } - - if !bytes.Equal(jsonBytes, marshalledSaveLoadTestCases) { - t.Fatalf("save output did not match expectations\nexpected:\n%s\ngot:\n%s", marshalledSaveLoadTestCases, jsonBytes) - } -} - -func TestAddDeleteGet(t *testing.T) { - jsonFile, err := ioutil.TempFile("", "tag-store-test") - if err != nil { - t.Fatalf("error creating temp file: %v", err) - } - _, err = jsonFile.Write([]byte(`{}`)) - jsonFile.Close() - defer os.RemoveAll(jsonFile.Name()) - - store, err := NewReferenceStore(jsonFile.Name()) - if err != nil { - t.Fatalf("error creating tag store: %v", err) - } - - testImageID1 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9c") - testImageID2 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9d") - testImageID3 := image.ID("sha256:9655aef5fd742a1b4e1b7b163aa9f1c76c186304bf39102283d80927c916ca9e") - - // Try adding a reference with no tag or digest - nameOnly, err := WithName("username/repo") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - if err = store.AddTag(nameOnly, testImageID1, false); err != nil { - t.Fatalf("error adding to store: %v", err) - } - - // Add a few references - ref1, err := ParseNamed("username/repo1:latest") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - if err = store.AddTag(ref1, testImageID1, false); err != nil { - t.Fatalf("error adding to store: %v", err) - } - - ref2, err := ParseNamed("username/repo1:old") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - if err = store.AddTag(ref2, testImageID2, false); err != nil { - t.Fatalf("error adding to store: %v", err) - } - - ref3, err := ParseNamed("username/repo1:alias") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - if err = store.AddTag(ref3, testImageID1, false); err != nil { - t.Fatalf("error adding to store: %v", err) - } - - ref4, err := ParseNamed("username/repo2:latest") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - if err = store.AddTag(ref4, testImageID2, false); err != nil { - t.Fatalf("error adding to store: %v", err) - } - - ref5, err := ParseNamed("username/repo3@sha256:58153dfb11794fad694460162bf0cb0a4fa710cfa3f60979c177d920813e267c") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - if err = store.AddDigest(ref5.(Canonical), testImageID2, false); err != nil { - t.Fatalf("error adding to store: %v", err) - } - - // Attempt to overwrite with force == false - if err = store.AddTag(ref4, testImageID3, false); err == nil || !strings.HasPrefix(err.Error(), "Conflict:") { - t.Fatalf("did not get expected error on overwrite attempt - got %v", err) - } - // Repeat to overwrite with force == true - if err = store.AddTag(ref4, testImageID3, true); err != nil { - t.Fatalf("failed to force tag overwrite: %v", err) - } - - // Check references so far - id, err := store.Get(nameOnly) - if err != nil { - t.Fatalf("Get returned error: %v", err) - } - if id != testImageID1 { - t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) - } - - id, err = store.Get(ref1) - if err != nil { - t.Fatalf("Get returned error: %v", err) - } - if id != testImageID1 { - t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) - } - - id, err = store.Get(ref2) - if err != nil { - t.Fatalf("Get returned error: %v", err) - } - if id != testImageID2 { - t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID2.String()) - } - - id, err = store.Get(ref3) - if err != nil { - t.Fatalf("Get returned error: %v", err) - } - if id != testImageID1 { - t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID1.String()) - } - - id, err = store.Get(ref4) - if err != nil { - t.Fatalf("Get returned error: %v", err) - } - if id != testImageID3 { - t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) - } - - id, err = store.Get(ref5) - if err != nil { - t.Fatalf("Get returned error: %v", err) - } - if id != testImageID2 { - t.Fatalf("id mismatch: got %s instead of %s", id.String(), testImageID3.String()) - } - - // Get should return ErrDoesNotExist for a nonexistent repo - nonExistRepo, err := ParseNamed("username/nonexistrepo:latest") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - if _, err = store.Get(nonExistRepo); err != ErrDoesNotExist { - t.Fatal("Expected ErrDoesNotExist from Get") - } - - // Get should return ErrDoesNotExist for a nonexistent tag - nonExistTag, err := ParseNamed("username/repo1:nonexist") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - if _, err = store.Get(nonExistTag); err != ErrDoesNotExist { - t.Fatal("Expected ErrDoesNotExist from Get") - } - - // Check References - refs := store.References(testImageID1) - if len(refs) != 3 { - t.Fatal("unexpected number of references") - } - // Looking for the references in this order verifies that they are - // returned lexically sorted. - if refs[0].String() != ref3.String() { - t.Fatalf("unexpected reference: %v", refs[0].String()) - } - if refs[1].String() != ref1.String() { - t.Fatalf("unexpected reference: %v", refs[1].String()) - } - if refs[2].String() != nameOnly.String()+":latest" { - t.Fatalf("unexpected reference: %v", refs[2].String()) - } - - // Check ReferencesByName - repoName, err := WithName("username/repo1") - if err != nil { - t.Fatalf("could not parse reference: %v", err) - } - associations := store.ReferencesByName(repoName) - if len(associations) != 3 { - t.Fatal("unexpected number of associations") - } - // Looking for the associations in this order verifies that they are - // returned lexically sorted. - if associations[0].Ref.String() != ref3.String() { - t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) - } - if associations[0].ImageID != testImageID1 { - t.Fatalf("unexpected reference: %v", associations[0].Ref.String()) - } - if associations[1].Ref.String() != ref1.String() { - t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) - } - if associations[1].ImageID != testImageID1 { - t.Fatalf("unexpected reference: %v", associations[1].Ref.String()) - } - if associations[2].Ref.String() != ref2.String() { - t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) - } - if associations[2].ImageID != testImageID2 { - t.Fatalf("unexpected reference: %v", associations[2].Ref.String()) - } - - // Delete should return ErrDoesNotExist for a nonexistent repo - if _, err = store.Delete(nonExistRepo); err != ErrDoesNotExist { - t.Fatal("Expected ErrDoesNotExist from Delete") - } - - // Delete should return ErrDoesNotExist for a nonexistent tag - if _, err = store.Delete(nonExistTag); err != ErrDoesNotExist { - t.Fatal("Expected ErrDoesNotExist from Delete") - } - - // Delete a few references - if deleted, err := store.Delete(ref1); err != nil || deleted != true { - t.Fatal("Delete failed") - } - if _, err := store.Get(ref1); err != ErrDoesNotExist { - t.Fatal("Expected ErrDoesNotExist from Get") - } - if deleted, err := store.Delete(ref5); err != nil || deleted != true { - t.Fatal("Delete failed") - } - if _, err := store.Get(ref5); err != ErrDoesNotExist { - t.Fatal("Expected ErrDoesNotExist from Get") - } - if deleted, err := store.Delete(nameOnly); err != nil || deleted != true { - t.Fatal("Delete failed") - } - if _, err := store.Get(nameOnly); err != ErrDoesNotExist { - t.Fatal("Expected ErrDoesNotExist from Get") - } -} - -func TestInvalidTags(t *testing.T) { - tmpDir, err := ioutil.TempDir("", "tag-store-test") - defer os.RemoveAll(tmpDir) - - store, err := NewReferenceStore(filepath.Join(tmpDir, "repositories.json")) - if err != nil { - t.Fatalf("error creating tag store: %v", err) - } - id := image.ID("sha256:470022b8af682154f57a2163d030eb369549549cba00edc69e1b99b46bb924d6") - - // sha256 as repo name - ref, err := ParseNamed("sha256:abc") - if err != nil { - t.Fatal(err) - } - err = store.AddTag(ref, id, true) - if err == nil { - t.Fatalf("expected setting tag %q to fail", ref) - } - - // setting digest as a tag - ref, err = ParseNamed("registry@sha256:367eb40fd0330a7e464777121e39d2f5b3e8e23a1e159342e53ab05c9e4d94e6") - if err != nil { - t.Fatal(err) - } - err = store.AddTag(ref, id, true) - if err == nil { - t.Fatalf("expected setting digest %q to fail", ref) - } - -} diff --git a/restartmanager/restartmanager.go b/restartmanager/restartmanager.go deleted file mode 100644 index 9893183a2a..0000000000 --- a/restartmanager/restartmanager.go +++ /dev/null @@ -1,128 +0,0 @@ -package restartmanager - -import ( - "errors" - "fmt" - "sync" - "time" - - "github.com/docker/engine-api/types/container" -) - -const ( - backoffMultiplier = 2 - defaultTimeout = 100 * time.Millisecond -) - -// ErrRestartCanceled is returned when the restart manager has been -// canceled and will no longer restart the container. -var ErrRestartCanceled = errors.New("restart canceled") - -// RestartManager defines object that controls container restarting rules. -type RestartManager interface { - Cancel() error - ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) -} - -type restartManager struct { - sync.Mutex - sync.Once - policy container.RestartPolicy - restartCount int - timeout time.Duration - active bool - cancel chan struct{} - canceled bool -} - -// New returns a new restartmanager based on a policy. -func New(policy container.RestartPolicy, restartCount int) RestartManager { - return &restartManager{policy: policy, restartCount: restartCount, cancel: make(chan struct{})} -} - -func (rm *restartManager) SetPolicy(policy container.RestartPolicy) { - rm.Lock() - rm.policy = policy - rm.Unlock() -} - -func (rm *restartManager) ShouldRestart(exitCode uint32, hasBeenManuallyStopped bool, executionDuration time.Duration) (bool, chan error, error) { - if rm.policy.IsNone() { - return false, nil, nil - } - rm.Lock() - unlockOnExit := true - defer func() { - if unlockOnExit { - rm.Unlock() - } - }() - - if rm.canceled { - return false, nil, ErrRestartCanceled - } - - if rm.active { - return false, nil, fmt.Errorf("invalid call on active restartmanager") - } - // if the container ran for more than 10s, regardless of status and policy reset the - // the timeout back to the default. - if executionDuration.Seconds() >= 10 { - rm.timeout = 0 - } - if rm.timeout == 0 { - rm.timeout = defaultTimeout - } else { - rm.timeout *= backoffMultiplier - } - - var restart bool - switch { - case rm.policy.IsAlways(): - restart = true - case rm.policy.IsUnlessStopped() && !hasBeenManuallyStopped: - restart = true - case rm.policy.IsOnFailure(): - // the default value of 0 for MaximumRetryCount means that we will not enforce a maximum count - if max := rm.policy.MaximumRetryCount; max == 0 || rm.restartCount < max { - restart = exitCode != 0 - } - } - - if !restart { - rm.active = false - return false, nil, nil - } - - rm.restartCount++ - - unlockOnExit = false - rm.active = true - rm.Unlock() - - ch := make(chan error) - go func() { - select { - case <-rm.cancel: - ch <- ErrRestartCanceled - close(ch) - case <-time.After(rm.timeout): - rm.Lock() - close(ch) - rm.active = false - rm.Unlock() - } - }() - - return true, ch, nil -} - -func (rm *restartManager) Cancel() error { - rm.Do(func() { - rm.Lock() - rm.canceled = true - close(rm.cancel) - rm.Unlock() - }) - return nil -} diff --git a/restartmanager/restartmanager_test.go b/restartmanager/restartmanager_test.go deleted file mode 100644 index 95a36b426b..0000000000 --- a/restartmanager/restartmanager_test.go +++ /dev/null @@ -1,34 +0,0 @@ -package restartmanager - -import ( - "testing" - "time" - - "github.com/docker/engine-api/types/container" -) - -func TestRestartManagerTimeout(t *testing.T) { - rm := New(container.RestartPolicy{Name: "always"}, 0).(*restartManager) - should, _, err := rm.ShouldRestart(0, false, 1*time.Second) - if err != nil { - t.Fatal(err) - } - if !should { - t.Fatal("container should be restarted") - } - if rm.timeout != 100*time.Millisecond { - t.Fatalf("restart manager should have a timeout of 100ms but has %s", rm.timeout) - } -} - -func TestRestartManagerTimeoutReset(t *testing.T) { - rm := New(container.RestartPolicy{Name: "always"}, 0).(*restartManager) - rm.timeout = 5 * time.Second - _, _, err := rm.ShouldRestart(0, false, 10*time.Second) - if err != nil { - t.Fatal(err) - } - if rm.timeout != 100*time.Millisecond { - t.Fatalf("restart manager should have a timeout of 100ms but has %s", rm.timeout) - } -} diff --git a/runconfig/compare.go b/runconfig/compare.go deleted file mode 100644 index 61346aabf4..0000000000 --- a/runconfig/compare.go +++ /dev/null @@ -1,61 +0,0 @@ -package runconfig - -import "github.com/docker/engine-api/types/container" - -// Compare two Config struct. Do not compare the "Image" nor "Hostname" fields -// If OpenStdin is set, then it differs -func Compare(a, b *container.Config) bool { - if a == nil || b == nil || - a.OpenStdin || b.OpenStdin { - return false - } - if a.AttachStdout != b.AttachStdout || - a.AttachStderr != b.AttachStderr || - a.User != b.User || - a.OpenStdin != b.OpenStdin || - a.Tty != b.Tty { - return false - } - - if len(a.Cmd) != len(b.Cmd) || - len(a.Env) != len(b.Env) || - len(a.Labels) != len(b.Labels) || - len(a.ExposedPorts) != len(b.ExposedPorts) || - len(a.Entrypoint) != len(b.Entrypoint) || - len(a.Volumes) != len(b.Volumes) { - return false - } - - for i := 0; i < len(a.Cmd); i++ { - if a.Cmd[i] != b.Cmd[i] { - return false - } - } - for i := 0; i < len(a.Env); i++ { - if a.Env[i] != b.Env[i] { - return false - } - } - for k, v := range a.Labels { - if v != b.Labels[k] { - return false - } - } - for k := range a.ExposedPorts { - if _, exists := b.ExposedPorts[k]; !exists { - return false - } - } - - for i := 0; i < len(a.Entrypoint); i++ { - if a.Entrypoint[i] != b.Entrypoint[i] { - return false - } - } - for key := range a.Volumes { - if _, exists := b.Volumes[key]; !exists { - return false - } - } - return true -} diff --git a/runconfig/compare_test.go b/runconfig/compare_test.go deleted file mode 100644 index 9c17c553f3..0000000000 --- a/runconfig/compare_test.go +++ /dev/null @@ -1,126 +0,0 @@ -package runconfig - -import ( - "testing" - - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// Just to make life easier -func newPortNoError(proto, port string) nat.Port { - p, _ := nat.NewPort(proto, port) - return p -} - -func TestCompare(t *testing.T) { - ports1 := make(nat.PortSet) - ports1[newPortNoError("tcp", "1111")] = struct{}{} - ports1[newPortNoError("tcp", "2222")] = struct{}{} - ports2 := make(nat.PortSet) - ports2[newPortNoError("tcp", "3333")] = struct{}{} - ports2[newPortNoError("tcp", "4444")] = struct{}{} - ports3 := make(nat.PortSet) - ports3[newPortNoError("tcp", "1111")] = struct{}{} - ports3[newPortNoError("tcp", "2222")] = struct{}{} - ports3[newPortNoError("tcp", "5555")] = struct{}{} - volumes1 := make(map[string]struct{}) - volumes1["/test1"] = struct{}{} - volumes2 := make(map[string]struct{}) - volumes2["/test2"] = struct{}{} - volumes3 := make(map[string]struct{}) - volumes3["/test1"] = struct{}{} - volumes3["/test3"] = struct{}{} - envs1 := []string{"ENV1=value1", "ENV2=value2"} - envs2 := []string{"ENV1=value1", "ENV3=value3"} - entrypoint1 := strslice.StrSlice{"/bin/sh", "-c"} - entrypoint2 := strslice.StrSlice{"/bin/sh", "-d"} - entrypoint3 := strslice.StrSlice{"/bin/sh", "-c", "echo"} - cmd1 := strslice.StrSlice{"/bin/sh", "-c"} - cmd2 := strslice.StrSlice{"/bin/sh", "-d"} - cmd3 := strslice.StrSlice{"/bin/sh", "-c", "echo"} - labels1 := map[string]string{"LABEL1": "value1", "LABEL2": "value2"} - labels2 := map[string]string{"LABEL1": "value1", "LABEL2": "value3"} - labels3 := map[string]string{"LABEL1": "value1", "LABEL2": "value2", "LABEL3": "value3"} - - sameConfigs := map[*container.Config]*container.Config{ - // Empty config - &container.Config{}: {}, - // Does not compare hostname, domainname & image - &container.Config{ - Hostname: "host1", - Domainname: "domain1", - Image: "image1", - User: "user", - }: { - Hostname: "host2", - Domainname: "domain2", - Image: "image2", - User: "user", - }, - // only OpenStdin - &container.Config{OpenStdin: false}: {OpenStdin: false}, - // only env - &container.Config{Env: envs1}: {Env: envs1}, - // only cmd - &container.Config{Cmd: cmd1}: {Cmd: cmd1}, - // only labels - &container.Config{Labels: labels1}: {Labels: labels1}, - // only exposedPorts - &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports1}, - // only entrypoints - &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint1}, - // only volumes - &container.Config{Volumes: volumes1}: {Volumes: volumes1}, - } - differentConfigs := map[*container.Config]*container.Config{ - nil: nil, - &container.Config{ - Hostname: "host1", - Domainname: "domain1", - Image: "image1", - User: "user1", - }: { - Hostname: "host1", - Domainname: "domain1", - Image: "image1", - User: "user2", - }, - // only OpenStdin - &container.Config{OpenStdin: false}: {OpenStdin: true}, - &container.Config{OpenStdin: true}: {OpenStdin: false}, - // only env - &container.Config{Env: envs1}: {Env: envs2}, - // only cmd - &container.Config{Cmd: cmd1}: {Cmd: cmd2}, - // not the same number of parts - &container.Config{Cmd: cmd1}: {Cmd: cmd3}, - // only labels - &container.Config{Labels: labels1}: {Labels: labels2}, - // not the same number of labels - &container.Config{Labels: labels1}: {Labels: labels3}, - // only exposedPorts - &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports2}, - // not the same number of ports - &container.Config{ExposedPorts: ports1}: {ExposedPorts: ports3}, - // only entrypoints - &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint2}, - // not the same number of parts - &container.Config{Entrypoint: entrypoint1}: {Entrypoint: entrypoint3}, - // only volumes - &container.Config{Volumes: volumes1}: {Volumes: volumes2}, - // not the same number of labels - &container.Config{Volumes: volumes1}: {Volumes: volumes3}, - } - for config1, config2 := range sameConfigs { - if !Compare(config1, config2) { - t.Fatalf("Compare should be true for [%v] and [%v]", config1, config2) - } - } - for config1, config2 := range differentConfigs { - if Compare(config1, config2) { - t.Fatalf("Compare should be false for [%v] and [%v]", config1, config2) - } - } -} diff --git a/runconfig/config.go b/runconfig/config.go deleted file mode 100644 index 8145e4b1da..0000000000 --- a/runconfig/config.go +++ /dev/null @@ -1,90 +0,0 @@ -package runconfig - -import ( - "encoding/json" - "fmt" - "io" - - "github.com/docker/docker/volume" - "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" -) - -// ContainerDecoder implements httputils.ContainerDecoder -// calling DecodeContainerConfig. -type ContainerDecoder struct{} - -// DecodeConfig makes ContainerDecoder to implement httputils.ContainerDecoder -func (r ContainerDecoder) DecodeConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - return DecodeContainerConfig(src) -} - -// DecodeHostConfig makes ContainerDecoder to implement httputils.ContainerDecoder -func (r ContainerDecoder) DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { - return DecodeHostConfig(src) -} - -// DecodeContainerConfig decodes a json encoded config into a ContainerConfigWrapper -// struct and returns both a Config and a HostConfig struct -// Be aware this function is not checking whether the resulted structs are nil, -// it's your business to do so -func DecodeContainerConfig(src io.Reader) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - var w ContainerConfigWrapper - - decoder := json.NewDecoder(src) - if err := decoder.Decode(&w); err != nil { - return nil, nil, nil, err - } - - hc := w.getHostConfig() - - // Perform platform-specific processing of Volumes and Binds. - if w.Config != nil && hc != nil { - - // Initialize the volumes map if currently nil - if w.Config.Volumes == nil { - w.Config.Volumes = make(map[string]struct{}) - } - - // Now validate all the volumes and binds - if err := validateVolumesAndBindSettings(w.Config, hc); err != nil { - return nil, nil, nil, err - } - } - - // Certain parameters need daemon-side validation that cannot be done - // on the client, as only the daemon knows what is valid for the platform. - if err := ValidateNetMode(w.Config, hc); err != nil { - return nil, nil, nil, err - } - - // Validate isolation - if err := ValidateIsolation(hc); err != nil { - return nil, nil, nil, err - } - - // Validate QoS - if err := ValidateQoS(hc); err != nil { - return nil, nil, nil, err - } - return w.Config, hc, w.NetworkingConfig, nil -} - -// validateVolumesAndBindSettings validates each of the volumes and bind settings -// passed by the caller to ensure they are valid. -func validateVolumesAndBindSettings(c *container.Config, hc *container.HostConfig) error { - - // Ensure all volumes and binds are valid. - for spec := range c.Volumes { - if _, err := volume.ParseMountSpec(spec, hc.VolumeDriver); err != nil { - return fmt.Errorf("Invalid volume spec %q: %v", spec, err) - } - } - for _, spec := range hc.Binds { - if _, err := volume.ParseMountSpec(spec, hc.VolumeDriver); err != nil { - return fmt.Errorf("Invalid bind mount spec %q: %v", spec, err) - } - } - - return nil -} diff --git a/runconfig/config_test.go b/runconfig/config_test.go deleted file mode 100644 index 5804b12d0e..0000000000 --- a/runconfig/config_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package runconfig - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "runtime" - "strings" - "testing" - - "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/engine-api/types/strslice" -) - -type f struct { - file string - entrypoint strslice.StrSlice -} - -func TestDecodeContainerConfig(t *testing.T) { - - var ( - fixtures []f - image string - ) - - if runtime.GOOS != "windows" { - image = "ubuntu" - fixtures = []f{ - {"fixtures/unix/container_config_1_14.json", strslice.StrSlice{}}, - {"fixtures/unix/container_config_1_17.json", strslice.StrSlice{"bash"}}, - {"fixtures/unix/container_config_1_19.json", strslice.StrSlice{"bash"}}, - } - } else { - image = "windows" - fixtures = []f{ - {"fixtures/windows/container_config_1_19.json", strslice.StrSlice{"cmd"}}, - } - } - - for _, f := range fixtures { - b, err := ioutil.ReadFile(f.file) - if err != nil { - t.Fatal(err) - } - - c, h, _, err := DecodeContainerConfig(bytes.NewReader(b)) - if err != nil { - t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) - } - - if c.Image != image { - t.Fatalf("Expected %s image, found %s\n", image, c.Image) - } - - if len(c.Entrypoint) != len(f.entrypoint) { - t.Fatalf("Expected %v, found %v\n", f.entrypoint, c.Entrypoint) - } - - if h != nil && h.Memory != 1000 { - t.Fatalf("Expected memory to be 1000, found %d\n", h.Memory) - } - } -} - -// TestDecodeContainerConfigIsolation validates isolation passed -// to the daemon in the hostConfig structure. Note this is platform specific -// as to what level of container isolation is supported. -func TestDecodeContainerConfigIsolation(t *testing.T) { - - // An invalid isolation level - if _, _, _, err := callDecodeContainerConfigIsolation("invalid"); err != nil { - if !strings.Contains(err.Error(), `invalid --isolation: "invalid"`) { - t.Fatal(err) - } - } - - // Blank isolation (== default) - if _, _, _, err := callDecodeContainerConfigIsolation(""); err != nil { - t.Fatal("Blank isolation should have succeeded") - } - - // Default isolation - if _, _, _, err := callDecodeContainerConfigIsolation("default"); err != nil { - t.Fatal("default isolation should have succeeded") - } - - // Process isolation (Valid on Windows only) - if runtime.GOOS == "windows" { - if _, _, _, err := callDecodeContainerConfigIsolation("process"); err != nil { - t.Fatal("process isolation should have succeeded") - } - } else { - if _, _, _, err := callDecodeContainerConfigIsolation("process"); err != nil { - if !strings.Contains(err.Error(), `invalid --isolation: "process"`) { - t.Fatal(err) - } - } - } - - // Hyper-V Containers isolation (Valid on Windows only) - if runtime.GOOS == "windows" { - if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { - t.Fatal("hyperv isolation should have succeeded") - } - } else { - if _, _, _, err := callDecodeContainerConfigIsolation("hyperv"); err != nil { - if !strings.Contains(err.Error(), `invalid --isolation: "hyperv"`) { - t.Fatal(err) - } - } - } -} - -// callDecodeContainerConfigIsolation is a utility function to call -// DecodeContainerConfig for validating isolation -func callDecodeContainerConfigIsolation(isolation string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - var ( - b []byte - err error - ) - w := ContainerConfigWrapper{ - Config: &container.Config{}, - HostConfig: &container.HostConfig{ - NetworkMode: "none", - Isolation: container.Isolation(isolation)}, - } - if b, err = json.Marshal(w); err != nil { - return nil, nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) - } - return DecodeContainerConfig(bytes.NewReader(b)) -} diff --git a/runconfig/config_unix.go b/runconfig/config_unix.go deleted file mode 100644 index e5902fb024..0000000000 --- a/runconfig/config_unix.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build !windows - -package runconfig - -import ( - "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" -) - -// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) -// and the corresponding HostConfig (non-portable). -type ContainerConfigWrapper struct { - *container.Config - InnerHostConfig *container.HostConfig `json:"HostConfig,omitempty"` - Cpuset string `json:",omitempty"` // Deprecated. Exported for backwards compatibility. - NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` - *container.HostConfig // Deprecated. Exported to read attributes from json that are not in the inner host config structure. -} - -// getHostConfig gets the HostConfig of the Config. -// It's mostly there to handle Deprecated fields of the ContainerConfigWrapper -func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { - hc := w.HostConfig - - if hc == nil && w.InnerHostConfig != nil { - hc = w.InnerHostConfig - } else if w.InnerHostConfig != nil { - if hc.Memory != 0 && w.InnerHostConfig.Memory == 0 { - w.InnerHostConfig.Memory = hc.Memory - } - if hc.MemorySwap != 0 && w.InnerHostConfig.MemorySwap == 0 { - w.InnerHostConfig.MemorySwap = hc.MemorySwap - } - if hc.CPUShares != 0 && w.InnerHostConfig.CPUShares == 0 { - w.InnerHostConfig.CPUShares = hc.CPUShares - } - if hc.CpusetCpus != "" && w.InnerHostConfig.CpusetCpus == "" { - w.InnerHostConfig.CpusetCpus = hc.CpusetCpus - } - - if hc.VolumeDriver != "" && w.InnerHostConfig.VolumeDriver == "" { - w.InnerHostConfig.VolumeDriver = hc.VolumeDriver - } - - hc = w.InnerHostConfig - } - - if hc != nil { - if w.Cpuset != "" && hc.CpusetCpus == "" { - hc.CpusetCpus = w.Cpuset - } - } - - // Make sure NetworkMode has an acceptable value. We do this to ensure - // backwards compatible API behavior. - hc = SetDefaultNetModeIfBlank(hc) - - return hc -} diff --git a/runconfig/config_windows.go b/runconfig/config_windows.go deleted file mode 100644 index 50a5238000..0000000000 --- a/runconfig/config_windows.go +++ /dev/null @@ -1,19 +0,0 @@ -package runconfig - -import ( - "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" -) - -// ContainerConfigWrapper is a Config wrapper that holds the container Config (portable) -// and the corresponding HostConfig (non-portable). -type ContainerConfigWrapper struct { - *container.Config - HostConfig *container.HostConfig `json:"HostConfig,omitempty"` - NetworkingConfig *networktypes.NetworkingConfig `json:"NetworkingConfig,omitempty"` -} - -// getHostConfig gets the HostConfig of the Config. -func (w *ContainerConfigWrapper) getHostConfig() *container.HostConfig { - return w.HostConfig -} diff --git a/runconfig/errors.go b/runconfig/errors.go deleted file mode 100644 index d3608576e2..0000000000 --- a/runconfig/errors.go +++ /dev/null @@ -1,40 +0,0 @@ -package runconfig - -import ( - "fmt" -) - -var ( - // ErrConflictContainerNetworkAndLinks conflict between --net=container and links - ErrConflictContainerNetworkAndLinks = fmt.Errorf("Conflicting options: container type network can't be used with links. This would result in undefined behavior") - // ErrConflictUserDefinedNetworkAndLinks conflict between --net= and links - ErrConflictUserDefinedNetworkAndLinks = fmt.Errorf("Conflicting options: networking can't be used with links. This would result in undefined behavior") - // ErrConflictSharedNetwork conflict between private and other networks - ErrConflictSharedNetwork = fmt.Errorf("Container sharing network namespace with another container or host cannot be connected to any other network") - // ErrConflictHostNetwork conflict from being disconnected from host network or connected to host network. - ErrConflictHostNetwork = fmt.Errorf("Container cannot be disconnected from host network or connected to host network") - // ErrConflictNoNetwork conflict between private and other networks - ErrConflictNoNetwork = fmt.Errorf("Container cannot be connected to multiple networks with one of the networks in private (none) mode") - // ErrConflictNetworkAndDNS conflict between --dns and the network mode - ErrConflictNetworkAndDNS = fmt.Errorf("Conflicting options: dns and the network mode") - // ErrConflictNetworkHostname conflict between the hostname and the network mode - ErrConflictNetworkHostname = fmt.Errorf("Conflicting options: hostname and the network mode") - // ErrConflictHostNetworkAndLinks conflict between --net=host and links - ErrConflictHostNetworkAndLinks = fmt.Errorf("Conflicting options: host type networking can't be used with links. This would result in undefined behavior") - // ErrConflictContainerNetworkAndMac conflict between the mac address and the network mode - ErrConflictContainerNetworkAndMac = fmt.Errorf("Conflicting options: mac-address and the network mode") - // ErrConflictNetworkHosts conflict between add-host and the network mode - ErrConflictNetworkHosts = fmt.Errorf("Conflicting options: custom host-to-IP mapping and the network mode") - // ErrConflictNetworkPublishPorts conflict between the publish options and the network mode - ErrConflictNetworkPublishPorts = fmt.Errorf("Conflicting options: port publishing and the container type network mode") - // ErrConflictNetworkExposePorts conflict between the expose option and the network mode - ErrConflictNetworkExposePorts = fmt.Errorf("Conflicting options: port exposing and the container type network mode") - // ErrUnsupportedNetworkAndIP conflict between network mode and requested ip address - ErrUnsupportedNetworkAndIP = fmt.Errorf("User specified IP address is supported on user defined networks only") - // ErrUnsupportedNetworkNoSubnetAndIP conflict between network with no configured subnet and requested ip address - ErrUnsupportedNetworkNoSubnetAndIP = fmt.Errorf("User specified IP address is supported only when connecting to networks with user configured subnets") - // ErrUnsupportedNetworkAndAlias conflict between network mode and alias - ErrUnsupportedNetworkAndAlias = fmt.Errorf("Network-scoped alias is supported only for containers in user defined networks") - // ErrConflictUTSHostname conflict between the hostname and the UTS mode - ErrConflictUTSHostname = fmt.Errorf("Conflicting options: hostname and the UTS mode") -) diff --git a/runconfig/fixtures/unix/container_config_1_14.json b/runconfig/fixtures/unix/container_config_1_14.json deleted file mode 100644 index b08334c095..0000000000 --- a/runconfig/fixtures/unix/container_config_1_14.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "Hostname":"", - "Domainname": "", - "User":"", - "Memory": 1000, - "MemorySwap":0, - "CpuShares": 512, - "Cpuset": "0,1", - "AttachStdin":false, - "AttachStdout":true, - "AttachStderr":true, - "PortSpecs":null, - "Tty":false, - "OpenStdin":false, - "StdinOnce":false, - "Env":null, - "Cmd":[ - "bash" - ], - "Image":"ubuntu", - "Volumes":{ - "/tmp": {} - }, - "WorkingDir":"", - "NetworkDisabled": false, - "ExposedPorts":{ - "22/tcp": {} - }, - "RestartPolicy": { "Name": "always" } -} diff --git a/runconfig/fixtures/unix/container_config_1_17.json b/runconfig/fixtures/unix/container_config_1_17.json deleted file mode 100644 index 0d780877b4..0000000000 --- a/runconfig/fixtures/unix/container_config_1_17.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "Hostname": "", - "Domainname": "", - "User": "", - "Memory": 1000, - "MemorySwap": 0, - "CpuShares": 512, - "Cpuset": "0,1", - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Entrypoint": "bash", - "Image": "ubuntu", - "Volumes": { - "/tmp": {} - }, - "WorkingDir": "", - "NetworkDisabled": false, - "MacAddress": "12:34:56:78:9a:bc", - "ExposedPorts": { - "22/tcp": {} - }, - "SecurityOpt": [""], - "HostConfig": { - "Binds": ["/tmp:/tmp"], - "Links": ["redis3:redis"], - "LxcConf": {"lxc.utsname":"docker"}, - "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, - "PublishAllPorts": false, - "Privileged": false, - "ReadonlyRootfs": false, - "Dns": ["8.8.8.8"], - "DnsSearch": [""], - "DnsOptions": [""], - "ExtraHosts": null, - "VolumesFrom": ["parent", "other:ro"], - "CapAdd": ["NET_ADMIN"], - "CapDrop": ["MKNOD"], - "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, - "NetworkMode": "bridge", - "Devices": [] - } -} diff --git a/runconfig/fixtures/unix/container_config_1_19.json b/runconfig/fixtures/unix/container_config_1_19.json deleted file mode 100644 index de49cf3242..0000000000 --- a/runconfig/fixtures/unix/container_config_1_19.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "Hostname": "", - "Domainname": "", - "User": "", - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Entrypoint": "bash", - "Image": "ubuntu", - "Labels": { - "com.example.vendor": "Acme", - "com.example.license": "GPL", - "com.example.version": "1.0" - }, - "Volumes": { - "/tmp": {} - }, - "WorkingDir": "", - "NetworkDisabled": false, - "MacAddress": "12:34:56:78:9a:bc", - "ExposedPorts": { - "22/tcp": {} - }, - "HostConfig": { - "Binds": ["/tmp:/tmp"], - "Links": ["redis3:redis"], - "LxcConf": {"lxc.utsname":"docker"}, - "Memory": 1000, - "MemorySwap": 0, - "CpuShares": 512, - "CpusetCpus": "0,1", - "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, - "PublishAllPorts": false, - "Privileged": false, - "ReadonlyRootfs": false, - "Dns": ["8.8.8.8"], - "DnsSearch": [""], - "DnsOptions": [""], - "ExtraHosts": null, - "VolumesFrom": ["parent", "other:ro"], - "CapAdd": ["NET_ADMIN"], - "CapDrop": ["MKNOD"], - "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, - "NetworkMode": "bridge", - "Devices": [], - "Ulimits": [{}], - "LogConfig": { "Type": "json-file", "Config": {} }, - "SecurityOpt": [""], - "CgroupParent": "" - } -} diff --git a/runconfig/fixtures/unix/container_hostconfig_1_14.json b/runconfig/fixtures/unix/container_hostconfig_1_14.json deleted file mode 100644 index c72ac91cab..0000000000 --- a/runconfig/fixtures/unix/container_hostconfig_1_14.json +++ /dev/null @@ -1,18 +0,0 @@ -{ - "Binds": ["/tmp:/tmp"], - "ContainerIDFile": "", - "LxcConf": [], - "Privileged": false, - "PortBindings": { - "80/tcp": [ - { - "HostIp": "0.0.0.0", - "HostPort": "49153" - } - ] - }, - "Links": ["/name:alias"], - "PublishAllPorts": false, - "CapAdd": ["NET_ADMIN"], - "CapDrop": ["MKNOD"] -} diff --git a/runconfig/fixtures/unix/container_hostconfig_1_19.json b/runconfig/fixtures/unix/container_hostconfig_1_19.json deleted file mode 100644 index 5ca8aa7e19..0000000000 --- a/runconfig/fixtures/unix/container_hostconfig_1_19.json +++ /dev/null @@ -1,30 +0,0 @@ -{ - "Binds": ["/tmp:/tmp"], - "Links": ["redis3:redis"], - "LxcConf": {"lxc.utsname":"docker"}, - "Memory": 0, - "MemorySwap": 0, - "CpuShares": 512, - "CpuPeriod": 100000, - "CpusetCpus": "0,1", - "CpusetMems": "0,1", - "BlkioWeight": 300, - "OomKillDisable": false, - "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, - "PublishAllPorts": false, - "Privileged": false, - "ReadonlyRootfs": false, - "Dns": ["8.8.8.8"], - "DnsSearch": [""], - "ExtraHosts": null, - "VolumesFrom": ["parent", "other:ro"], - "CapAdd": ["NET_ADMIN"], - "CapDrop": ["MKNOD"], - "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, - "NetworkMode": "bridge", - "Devices": [], - "Ulimits": [{}], - "LogConfig": { "Type": "json-file", "Config": {} }, - "SecurityOpt": [""], - "CgroupParent": "" -} diff --git a/runconfig/fixtures/windows/container_config_1_19.json b/runconfig/fixtures/windows/container_config_1_19.json deleted file mode 100644 index 724320c760..0000000000 --- a/runconfig/fixtures/windows/container_config_1_19.json +++ /dev/null @@ -1,58 +0,0 @@ -{ - "Hostname": "", - "Domainname": "", - "User": "", - "AttachStdin": false, - "AttachStdout": true, - "AttachStderr": true, - "Tty": false, - "OpenStdin": false, - "StdinOnce": false, - "Env": null, - "Cmd": [ - "date" - ], - "Entrypoint": "cmd", - "Image": "windows", - "Labels": { - "com.example.vendor": "Acme", - "com.example.license": "GPL", - "com.example.version": "1.0" - }, - "Volumes": { - "c:/windows": {} - }, - "WorkingDir": "", - "NetworkDisabled": false, - "MacAddress": "12:34:56:78:9a:bc", - "ExposedPorts": { - "22/tcp": {} - }, - "HostConfig": { - "Binds": ["c:/windows:d:/tmp"], - "Links": ["redis3:redis"], - "LxcConf": {"lxc.utsname":"docker"}, - "Memory": 1000, - "MemorySwap": 0, - "CpuShares": 512, - "CpusetCpus": "0,1", - "PortBindings": { "22/tcp": [{ "HostPort": "11022" }] }, - "PublishAllPorts": false, - "Privileged": false, - "ReadonlyRootfs": false, - "Dns": ["8.8.8.8"], - "DnsSearch": [""], - "DnsOptions": [""], - "ExtraHosts": null, - "VolumesFrom": ["parent", "other:ro"], - "CapAdd": ["NET_ADMIN"], - "CapDrop": ["MKNOD"], - "RestartPolicy": { "Name": "", "MaximumRetryCount": 0 }, - "NetworkMode": "default", - "Devices": [], - "Ulimits": [{}], - "LogConfig": { "Type": "json-file", "Config": {} }, - "SecurityOpt": [""], - "CgroupParent": "" - } -} diff --git a/runconfig/hostconfig.go b/runconfig/hostconfig.go deleted file mode 100644 index 769cc9f5da..0000000000 --- a/runconfig/hostconfig.go +++ /dev/null @@ -1,35 +0,0 @@ -package runconfig - -import ( - "encoding/json" - "io" - - "github.com/docker/engine-api/types/container" -) - -// DecodeHostConfig creates a HostConfig based on the specified Reader. -// It assumes the content of the reader will be JSON, and decodes it. -func DecodeHostConfig(src io.Reader) (*container.HostConfig, error) { - decoder := json.NewDecoder(src) - - var w ContainerConfigWrapper - if err := decoder.Decode(&w); err != nil { - return nil, err - } - - hc := w.getHostConfig() - return hc, nil -} - -// SetDefaultNetModeIfBlank changes the NetworkMode in a HostConfig structure -// to default if it is not populated. This ensures backwards compatibility after -// the validation of the network mode was moved from the docker CLI to the -// docker daemon. -func SetDefaultNetModeIfBlank(hc *container.HostConfig) *container.HostConfig { - if hc != nil { - if hc.NetworkMode == container.NetworkMode("") { - hc.NetworkMode = container.NetworkMode("default") - } - } - return hc -} diff --git a/runconfig/hostconfig_solaris.go b/runconfig/hostconfig_solaris.go deleted file mode 100644 index 5c2e861202..0000000000 --- a/runconfig/hostconfig_solaris.go +++ /dev/null @@ -1,47 +0,0 @@ -package runconfig - -import ( - "fmt" - "strings" - - "github.com/docker/engine-api/types/container" -) - -// DefaultDaemonNetworkMode returns the default network stack the daemon should -// use. -func DefaultDaemonNetworkMode() container.NetworkMode { - return container.NetworkMode("default") -} - -// IsPreDefinedNetwork indicates if a network is predefined by the daemon -func IsPreDefinedNetwork(network string) bool { - return false -} - -// ValidateNetMode ensures that the various combinations of requested -// network settings are valid. -func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - parts := strings.Split(string(hc.NetworkMode), ":") - switch mode := parts[0]; mode { - case "default", "none": - default: - return fmt.Errorf("invalid --net: %s", hc.NetworkMode) - } - return nil -} - -// ValidateIsolation performs platform specific validation of the -// isolation level in the hostconfig structure. -// This setting is currently discarded for Solaris so this is a no-op. -func ValidateIsolation(hc *container.HostConfig) error { - return nil -} - -// ValidateQoS performs platform specific validation of the QoS settings -func ValidateQoS(hc *container.HostConfig) error { - return nil -} diff --git a/runconfig/hostconfig_test.go b/runconfig/hostconfig_test.go deleted file mode 100644 index 1fbc90a4cf..0000000000 --- a/runconfig/hostconfig_test.go +++ /dev/null @@ -1,222 +0,0 @@ -// +build !windows - -package runconfig - -import ( - "bytes" - "fmt" - "io/ioutil" - "testing" - - "github.com/docker/engine-api/types/container" -) - -// TODO Windows: This will need addressing for a Windows daemon. -func TestNetworkModeTest(t *testing.T) { - networkModes := map[container.NetworkMode][]bool{ - // private, bridge, host, container, none, default - "": {true, false, false, false, false, false}, - "something:weird": {true, false, false, false, false, false}, - "bridge": {true, true, false, false, false, false}, - DefaultDaemonNetworkMode(): {true, true, false, false, false, false}, - "host": {false, false, true, false, false, false}, - "container:name": {false, false, false, true, false, false}, - "none": {true, false, false, false, true, false}, - "default": {true, false, false, false, false, true}, - } - networkModeNames := map[container.NetworkMode]string{ - "": "", - "something:weird": "something:weird", - "bridge": "bridge", - DefaultDaemonNetworkMode(): "bridge", - "host": "host", - "container:name": "container", - "none": "none", - "default": "default", - } - for networkMode, state := range networkModes { - if networkMode.IsPrivate() != state[0] { - t.Fatalf("NetworkMode.IsPrivate for %v should have been %v but was %v", networkMode, state[0], networkMode.IsPrivate()) - } - if networkMode.IsBridge() != state[1] { - t.Fatalf("NetworkMode.IsBridge for %v should have been %v but was %v", networkMode, state[1], networkMode.IsBridge()) - } - if networkMode.IsHost() != state[2] { - t.Fatalf("NetworkMode.IsHost for %v should have been %v but was %v", networkMode, state[2], networkMode.IsHost()) - } - if networkMode.IsContainer() != state[3] { - t.Fatalf("NetworkMode.IsContainer for %v should have been %v but was %v", networkMode, state[3], networkMode.IsContainer()) - } - if networkMode.IsNone() != state[4] { - t.Fatalf("NetworkMode.IsNone for %v should have been %v but was %v", networkMode, state[4], networkMode.IsNone()) - } - if networkMode.IsDefault() != state[5] { - t.Fatalf("NetworkMode.IsDefault for %v should have been %v but was %v", networkMode, state[5], networkMode.IsDefault()) - } - if networkMode.NetworkName() != networkModeNames[networkMode] { - t.Fatalf("Expected name %v, got %v", networkModeNames[networkMode], networkMode.NetworkName()) - } - } -} - -func TestIpcModeTest(t *testing.T) { - ipcModes := map[container.IpcMode][]bool{ - // private, host, container, valid - "": {true, false, false, true}, - "something:weird": {true, false, false, false}, - ":weird": {true, false, false, true}, - "host": {false, true, false, true}, - "container:name": {false, false, true, true}, - "container:name:something": {false, false, true, false}, - "container:": {false, false, true, false}, - } - for ipcMode, state := range ipcModes { - if ipcMode.IsPrivate() != state[0] { - t.Fatalf("IpcMode.IsPrivate for %v should have been %v but was %v", ipcMode, state[0], ipcMode.IsPrivate()) - } - if ipcMode.IsHost() != state[1] { - t.Fatalf("IpcMode.IsHost for %v should have been %v but was %v", ipcMode, state[1], ipcMode.IsHost()) - } - if ipcMode.IsContainer() != state[2] { - t.Fatalf("IpcMode.IsContainer for %v should have been %v but was %v", ipcMode, state[2], ipcMode.IsContainer()) - } - if ipcMode.Valid() != state[3] { - t.Fatalf("IpcMode.Valid for %v should have been %v but was %v", ipcMode, state[3], ipcMode.Valid()) - } - } - containerIpcModes := map[container.IpcMode]string{ - "": "", - "something": "", - "something:weird": "weird", - "container": "", - "container:": "", - "container:name": "name", - "container:name1:name2": "name1:name2", - } - for ipcMode, container := range containerIpcModes { - if ipcMode.Container() != container { - t.Fatalf("Expected %v for %v but was %v", container, ipcMode, ipcMode.Container()) - } - } -} - -func TestUTSModeTest(t *testing.T) { - utsModes := map[container.UTSMode][]bool{ - // private, host, valid - "": {true, false, true}, - "something:weird": {true, false, false}, - "host": {false, true, true}, - "host:name": {true, false, true}, - } - for utsMode, state := range utsModes { - if utsMode.IsPrivate() != state[0] { - t.Fatalf("UtsMode.IsPrivate for %v should have been %v but was %v", utsMode, state[0], utsMode.IsPrivate()) - } - if utsMode.IsHost() != state[1] { - t.Fatalf("UtsMode.IsHost for %v should have been %v but was %v", utsMode, state[1], utsMode.IsHost()) - } - if utsMode.Valid() != state[2] { - t.Fatalf("UtsMode.Valid for %v should have been %v but was %v", utsMode, state[2], utsMode.Valid()) - } - } -} - -func TestUsernsModeTest(t *testing.T) { - usrensMode := map[container.UsernsMode][]bool{ - // private, host, valid - "": {true, false, true}, - "something:weird": {true, false, false}, - "host": {false, true, true}, - "host:name": {true, false, true}, - } - for usernsMode, state := range usrensMode { - if usernsMode.IsPrivate() != state[0] { - t.Fatalf("UsernsMode.IsPrivate for %v should have been %v but was %v", usernsMode, state[0], usernsMode.IsPrivate()) - } - if usernsMode.IsHost() != state[1] { - t.Fatalf("UsernsMode.IsHost for %v should have been %v but was %v", usernsMode, state[1], usernsMode.IsHost()) - } - if usernsMode.Valid() != state[2] { - t.Fatalf("UsernsMode.Valid for %v should have been %v but was %v", usernsMode, state[2], usernsMode.Valid()) - } - } -} - -func TestPidModeTest(t *testing.T) { - pidModes := map[container.PidMode][]bool{ - // private, host, valid - "": {true, false, true}, - "something:weird": {true, false, false}, - "host": {false, true, true}, - "host:name": {true, false, true}, - } - for pidMode, state := range pidModes { - if pidMode.IsPrivate() != state[0] { - t.Fatalf("PidMode.IsPrivate for %v should have been %v but was %v", pidMode, state[0], pidMode.IsPrivate()) - } - if pidMode.IsHost() != state[1] { - t.Fatalf("PidMode.IsHost for %v should have been %v but was %v", pidMode, state[1], pidMode.IsHost()) - } - if pidMode.Valid() != state[2] { - t.Fatalf("PidMode.Valid for %v should have been %v but was %v", pidMode, state[2], pidMode.Valid()) - } - } -} - -func TestRestartPolicy(t *testing.T) { - restartPolicies := map[container.RestartPolicy][]bool{ - // none, always, failure - container.RestartPolicy{}: {true, false, false}, - container.RestartPolicy{"something", 0}: {false, false, false}, - container.RestartPolicy{"no", 0}: {true, false, false}, - container.RestartPolicy{"always", 0}: {false, true, false}, - container.RestartPolicy{"on-failure", 0}: {false, false, true}, - } - for restartPolicy, state := range restartPolicies { - if restartPolicy.IsNone() != state[0] { - t.Fatalf("RestartPolicy.IsNone for %v should have been %v but was %v", restartPolicy, state[0], restartPolicy.IsNone()) - } - if restartPolicy.IsAlways() != state[1] { - t.Fatalf("RestartPolicy.IsAlways for %v should have been %v but was %v", restartPolicy, state[1], restartPolicy.IsAlways()) - } - if restartPolicy.IsOnFailure() != state[2] { - t.Fatalf("RestartPolicy.IsOnFailure for %v should have been %v but was %v", restartPolicy, state[2], restartPolicy.IsOnFailure()) - } - } -} -func TestDecodeHostConfig(t *testing.T) { - fixtures := []struct { - file string - }{ - {"fixtures/unix/container_hostconfig_1_14.json"}, - {"fixtures/unix/container_hostconfig_1_19.json"}, - } - - for _, f := range fixtures { - b, err := ioutil.ReadFile(f.file) - if err != nil { - t.Fatal(err) - } - - c, err := DecodeHostConfig(bytes.NewReader(b)) - if err != nil { - t.Fatal(fmt.Errorf("Error parsing %s: %v", f, err)) - } - - if c.Privileged != false { - t.Fatalf("Expected privileged false, found %v\n", c.Privileged) - } - - if l := len(c.Binds); l != 1 { - t.Fatalf("Expected 1 bind, found %d\n", l) - } - - if len(c.CapAdd) != 1 && c.CapAdd[0] != "NET_ADMIN" { - t.Fatalf("Expected CapAdd NET_ADMIN, got %v", c.CapAdd) - } - - if len(c.CapDrop) != 1 && c.CapDrop[0] != "NET_ADMIN" { - t.Fatalf("Expected CapDrop MKNOD, got %v", c.CapDrop) - } - } -} diff --git a/runconfig/hostconfig_unix.go b/runconfig/hostconfig_unix.go deleted file mode 100644 index c06b6ebfa1..0000000000 --- a/runconfig/hostconfig_unix.go +++ /dev/null @@ -1,106 +0,0 @@ -// +build !windows,!solaris - -package runconfig - -import ( - "fmt" - "runtime" - "strings" - - "github.com/docker/engine-api/types/container" -) - -// DefaultDaemonNetworkMode returns the default network stack the daemon should -// use. -func DefaultDaemonNetworkMode() container.NetworkMode { - return container.NetworkMode("bridge") -} - -// IsPreDefinedNetwork indicates if a network is predefined by the daemon -func IsPreDefinedNetwork(network string) bool { - n := container.NetworkMode(network) - return n.IsBridge() || n.IsHost() || n.IsNone() || n.IsDefault() || network == "ingress" -} - -// ValidateNetMode ensures that the various combinations of requested -// network settings are valid. -func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - parts := strings.Split(string(hc.NetworkMode), ":") - if parts[0] == "container" { - if len(parts) < 2 || parts[1] == "" { - return fmt.Errorf("--net: invalid net mode: invalid container format container:") - } - } - - if hc.NetworkMode.IsContainer() && c.Hostname != "" { - return ErrConflictNetworkHostname - } - - if hc.UTSMode.IsHost() && c.Hostname != "" { - return ErrConflictUTSHostname - } - - if hc.NetworkMode.IsHost() && len(hc.Links) > 0 { - return ErrConflictHostNetworkAndLinks - } - - if hc.NetworkMode.IsContainer() && len(hc.Links) > 0 { - return ErrConflictContainerNetworkAndLinks - } - - if hc.NetworkMode.IsContainer() && len(hc.DNS) > 0 { - return ErrConflictNetworkAndDNS - } - - if hc.NetworkMode.IsContainer() && len(hc.ExtraHosts) > 0 { - return ErrConflictNetworkHosts - } - - if (hc.NetworkMode.IsContainer() || hc.NetworkMode.IsHost()) && c.MacAddress != "" { - return ErrConflictContainerNetworkAndMac - } - - if hc.NetworkMode.IsContainer() && (len(hc.PortBindings) > 0 || hc.PublishAllPorts == true) { - return ErrConflictNetworkPublishPorts - } - - if hc.NetworkMode.IsContainer() && len(c.ExposedPorts) > 0 { - return ErrConflictNetworkExposePorts - } - return nil -} - -// ValidateIsolation performs platform specific validation of -// isolation in the hostconfig structure. Linux only supports "default" -// which is LXC container isolation -func ValidateIsolation(hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - if !hc.Isolation.IsValid() { - return fmt.Errorf("invalid --isolation: %q - %s only supports 'default'", hc.Isolation, runtime.GOOS) - } - return nil -} - -// ValidateQoS performs platform specific validation of the QoS settings -func ValidateQoS(hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - - if hc.IOMaximumBandwidth != 0 { - return fmt.Errorf("invalid QoS settings: %s does not support --io-maxbandwidth", runtime.GOOS) - } - - if hc.IOMaximumIOps != 0 { - return fmt.Errorf("invalid QoS settings: %s does not support --io-maxiops", runtime.GOOS) - } - return nil -} diff --git a/runconfig/hostconfig_windows.go b/runconfig/hostconfig_windows.go deleted file mode 100644 index d06452db18..0000000000 --- a/runconfig/hostconfig_windows.go +++ /dev/null @@ -1,51 +0,0 @@ -package runconfig - -import ( - "fmt" - "strings" - - "github.com/docker/engine-api/types/container" -) - -// DefaultDaemonNetworkMode returns the default network stack the daemon should -// use. -func DefaultDaemonNetworkMode() container.NetworkMode { - return container.NetworkMode("nat") -} - -// IsPreDefinedNetwork indicates if a network is predefined by the daemon -func IsPreDefinedNetwork(network string) bool { - return !container.NetworkMode(network).IsUserDefined() -} - -// ValidateNetMode ensures that the various combinations of requested -// network settings are valid. -func ValidateNetMode(c *container.Config, hc *container.HostConfig) error { - if hc == nil { - return nil - } - parts := strings.Split(string(hc.NetworkMode), ":") - if len(parts) > 1 { - return fmt.Errorf("invalid --net: %s", hc.NetworkMode) - } - return nil -} - -// ValidateIsolation performs platform specific validation of the -// isolation in the hostconfig structure. Windows supports 'default' (or -// blank), 'process', or 'hyperv'. -func ValidateIsolation(hc *container.HostConfig) error { - // We may not be passed a host config, such as in the case of docker commit - if hc == nil { - return nil - } - if !hc.Isolation.IsValid() { - return fmt.Errorf("invalid --isolation: %q. Windows supports 'default', 'process', or 'hyperv'", hc.Isolation) - } - return nil -} - -// ValidateQoS performs platform specific validation of the Qos settings -func ValidateQoS(hc *container.HostConfig) error { - return nil -} diff --git a/runconfig/opts/envfile.go b/runconfig/opts/envfile.go deleted file mode 100644 index ba8b4f2016..0000000000 --- a/runconfig/opts/envfile.go +++ /dev/null @@ -1,67 +0,0 @@ -package opts - -import ( - "bufio" - "fmt" - "os" - "strings" -) - -// ParseEnvFile reads a file with environment variables enumerated by lines -// -// ``Environment variable names used by the utilities in the Shell and -// Utilities volume of IEEE Std 1003.1-2001 consist solely of uppercase -// letters, digits, and the '_' (underscore) from the characters defined in -// Portable Character Set and do not begin with a digit. *But*, other -// characters may be permitted by an implementation; applications shall -// tolerate the presence of such names.'' -// -- http://pubs.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap08.html -// -// As of #16585, it's up to application inside docker to validate or not -// environment variables, that's why we just strip leading whitespace and -// nothing more. -func ParseEnvFile(filename string) ([]string, error) { - fh, err := os.Open(filename) - if err != nil { - return []string{}, err - } - defer fh.Close() - - lines := []string{} - scanner := bufio.NewScanner(fh) - for scanner.Scan() { - // trim the line from all leading whitespace first - line := strings.TrimLeft(scanner.Text(), whiteSpaces) - // line is not empty, and not starting with '#' - if len(line) > 0 && !strings.HasPrefix(line, "#") { - data := strings.SplitN(line, "=", 2) - - // trim the front of a variable, but nothing else - variable := strings.TrimLeft(data[0], whiteSpaces) - if strings.ContainsAny(variable, whiteSpaces) { - return []string{}, ErrBadEnvVariable{fmt.Sprintf("variable '%s' has white spaces", variable)} - } - - if len(data) > 1 { - - // pass the value through, no trimming - lines = append(lines, fmt.Sprintf("%s=%s", variable, data[1])) - } else { - // if only a pass-through variable is given, clean it up. - lines = append(lines, fmt.Sprintf("%s=%s", strings.TrimSpace(line), os.Getenv(line))) - } - } - } - return lines, scanner.Err() -} - -var whiteSpaces = " \t" - -// ErrBadEnvVariable typed error for bad environment variable -type ErrBadEnvVariable struct { - msg string -} - -func (e ErrBadEnvVariable) Error() string { - return fmt.Sprintf("poorly formatted environment: %s", e.msg) -} diff --git a/runconfig/opts/envfile_test.go b/runconfig/opts/envfile_test.go deleted file mode 100644 index 5dd7078bc0..0000000000 --- a/runconfig/opts/envfile_test.go +++ /dev/null @@ -1,142 +0,0 @@ -package opts - -import ( - "bufio" - "fmt" - "io/ioutil" - "os" - "reflect" - "strings" - "testing" -) - -func tmpFileWithContent(content string, t *testing.T) string { - tmpFile, err := ioutil.TempFile("", "envfile-test") - if err != nil { - t.Fatal(err) - } - defer tmpFile.Close() - - tmpFile.WriteString(content) - return tmpFile.Name() -} - -// Test ParseEnvFile for a file with a few well formatted lines -func TestParseEnvFileGoodFile(t *testing.T) { - content := `foo=bar - baz=quux -# comment - -_foobar=foobaz -with.dots=working -and_underscore=working too -` - // Adding a newline + a line with pure whitespace. - // This is being done like this instead of the block above - // because it's common for editors to trim trailing whitespace - // from lines, which becomes annoying since that's the - // exact thing we need to test. - content += "\n \t " - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - lines, err := ParseEnvFile(tmpFile) - if err != nil { - t.Fatal(err) - } - - expectedLines := []string{ - "foo=bar", - "baz=quux", - "_foobar=foobaz", - "with.dots=working", - "and_underscore=working too", - } - - if !reflect.DeepEqual(lines, expectedLines) { - t.Fatal("lines not equal to expected_lines") - } -} - -// Test ParseEnvFile for an empty file -func TestParseEnvFileEmptyFile(t *testing.T) { - tmpFile := tmpFileWithContent("", t) - defer os.Remove(tmpFile) - - lines, err := ParseEnvFile(tmpFile) - if err != nil { - t.Fatal(err) - } - - if len(lines) != 0 { - t.Fatal("lines not empty; expected empty") - } -} - -// Test ParseEnvFile for a non existent file -func TestParseEnvFileNonExistentFile(t *testing.T) { - _, err := ParseEnvFile("foo_bar_baz") - if err == nil { - t.Fatal("ParseEnvFile succeeded; expected failure") - } - if _, ok := err.(*os.PathError); !ok { - t.Fatalf("Expected a PathError, got [%v]", err) - } -} - -// Test ParseEnvFile for a badly formatted file -func TestParseEnvFileBadlyFormattedFile(t *testing.T) { - content := `foo=bar - f =quux -` - - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - if err == nil { - t.Fatalf("Expected an ErrBadEnvVariable, got nothing") - } - if _, ok := err.(ErrBadEnvVariable); !ok { - t.Fatalf("Expected an ErrBadEnvVariable, got [%v]", err) - } - expectedMessage := "poorly formatted environment: variable 'f ' has white spaces" - if err.Error() != expectedMessage { - t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) - } -} - -// Test ParseEnvFile for a file with a line exceeding bufio.MaxScanTokenSize -func TestParseEnvFileLineTooLongFile(t *testing.T) { - content := strings.Repeat("a", bufio.MaxScanTokenSize+42) - content = fmt.Sprint("foo=", content) - - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - if err == nil { - t.Fatal("ParseEnvFile succeeded; expected failure") - } -} - -// ParseEnvFile with a random file, pass through -func TestParseEnvFileRandomFile(t *testing.T) { - content := `first line -another invalid line` - tmpFile := tmpFileWithContent(content, t) - defer os.Remove(tmpFile) - - _, err := ParseEnvFile(tmpFile) - - if err == nil { - t.Fatalf("Expected an ErrBadEnvVariable, got nothing") - } - if _, ok := err.(ErrBadEnvVariable); !ok { - t.Fatalf("Expected an ErrBadEnvvariable, got [%v]", err) - } - expectedMessage := "poorly formatted environment: variable 'first line' has white spaces" - if err.Error() != expectedMessage { - t.Fatalf("Expected [%v], got [%v]", expectedMessage, err.Error()) - } -} diff --git a/runconfig/opts/fixtures/valid.env b/runconfig/opts/fixtures/valid.env deleted file mode 100644 index 3afbdc81c2..0000000000 --- a/runconfig/opts/fixtures/valid.env +++ /dev/null @@ -1 +0,0 @@ -ENV1=value1 diff --git a/runconfig/opts/fixtures/valid.label b/runconfig/opts/fixtures/valid.label deleted file mode 100644 index b4208bdf8f..0000000000 --- a/runconfig/opts/fixtures/valid.label +++ /dev/null @@ -1 +0,0 @@ -LABEL1=value1 diff --git a/runconfig/opts/opts.go b/runconfig/opts/opts.go deleted file mode 100644 index 37c070e6c1..0000000000 --- a/runconfig/opts/opts.go +++ /dev/null @@ -1,70 +0,0 @@ -package opts - -import ( - "fmt" - fopts "github.com/docker/docker/opts" - "net" - "os" - "strings" -) - -// ValidateAttach validates that the specified string is a valid attach option. -func ValidateAttach(val string) (string, error) { - s := strings.ToLower(val) - for _, str := range []string{"stdin", "stdout", "stderr"} { - if s == str { - return s, nil - } - } - return val, fmt.Errorf("valid streams are STDIN, STDOUT and STDERR") -} - -// ValidateEnv validates an environment variable and returns it. -// If no value is specified, it returns the current value using os.Getenv. -// -// As on ParseEnvFile and related to #16585, environment variable names -// are not validate what so ever, it's up to application inside docker -// to validate them or not. -func ValidateEnv(val string) (string, error) { - arr := strings.Split(val, "=") - if len(arr) > 1 { - return val, nil - } - if !doesEnvExist(val) { - return val, nil - } - return fmt.Sprintf("%s=%s", val, os.Getenv(val)), nil -} - -func doesEnvExist(name string) bool { - for _, entry := range os.Environ() { - parts := strings.SplitN(entry, "=", 2) - if parts[0] == name { - return true - } - } - return false -} - -// ValidateExtraHost validates that the specified string is a valid extrahost and returns it. -// ExtraHost is in the form of name:ip where the ip has to be a valid ip (IPv4 or IPv6). -func ValidateExtraHost(val string) (string, error) { - // allow for IPv6 addresses in extra hosts by only splitting on first ":" - arr := strings.SplitN(val, ":", 2) - if len(arr) != 2 || len(arr[0]) == 0 { - return "", fmt.Errorf("bad format for add-host: %q", val) - } - if _, err := fopts.ValidateIPAddress(arr[1]); err != nil { - return "", fmt.Errorf("invalid IP address in add-host: %q", arr[1]) - } - return val, nil -} - -// ValidateMACAddress validates a MAC address. -func ValidateMACAddress(val string) (string, error) { - _, err := net.ParseMAC(strings.TrimSpace(val)) - if err != nil { - return "", err - } - return val, nil -} diff --git a/runconfig/opts/opts_test.go b/runconfig/opts/opts_test.go deleted file mode 100644 index 69eac88adc..0000000000 --- a/runconfig/opts/opts_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package opts - -import ( - "fmt" - "os" - "strings" - "testing" -) - -func TestValidateAttach(t *testing.T) { - valid := []string{ - "stdin", - "stdout", - "stderr", - "STDIN", - "STDOUT", - "STDERR", - } - if _, err := ValidateAttach("invalid"); err == nil { - t.Fatalf("Expected error with [valid streams are STDIN, STDOUT and STDERR], got nothing") - } - - for _, attach := range valid { - value, err := ValidateAttach(attach) - if err != nil { - t.Fatal(err) - } - if value != strings.ToLower(attach) { - t.Fatalf("Expected [%v], got [%v]", attach, value) - } - } -} - -func TestValidateEnv(t *testing.T) { - valids := map[string]string{ - "a": "a", - "something": "something", - "_=a": "_=a", - "env1=value1": "env1=value1", - "_env1=value1": "_env1=value1", - "env2=value2=value3": "env2=value2=value3", - "env3=abc!qwe": "env3=abc!qwe", - "env_4=value 4": "env_4=value 4", - "PATH": fmt.Sprintf("PATH=%v", os.Getenv("PATH")), - "PATH=something": "PATH=something", - "asd!qwe": "asd!qwe", - "1asd": "1asd", - "123": "123", - "some space": "some space", - " some space before": " some space before", - "some space after ": "some space after ", - } - for value, expected := range valids { - actual, err := ValidateEnv(value) - if err != nil { - t.Fatal(err) - } - if actual != expected { - t.Fatalf("Expected [%v], got [%v]", expected, actual) - } - } -} - -func TestValidateExtraHosts(t *testing.T) { - valid := []string{ - `myhost:192.168.0.1`, - `thathost:10.0.2.1`, - `anipv6host:2003:ab34:e::1`, - `ipv6local:::1`, - } - - invalid := map[string]string{ - `myhost:192.notanipaddress.1`: `invalid IP`, - `thathost-nosemicolon10.0.0.1`: `bad format`, - `anipv6host:::::1`: `invalid IP`, - `ipv6local:::0::`: `invalid IP`, - } - - for _, extrahost := range valid { - if _, err := ValidateExtraHost(extrahost); err != nil { - t.Fatalf("ValidateExtraHost(`"+extrahost+"`) should succeed: error %v", err) - } - } - - for extraHost, expectedError := range invalid { - if _, err := ValidateExtraHost(extraHost); err == nil { - t.Fatalf("ValidateExtraHost(`%q`) should have failed validation", extraHost) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ValidateExtraHost(`%q`) error should contain %q", extraHost, expectedError) - } - } - } -} - -func TestValidateMACAddress(t *testing.T) { - if _, err := ValidateMACAddress(`92:d0:c6:0a:29:33`); err != nil { - t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:29:33`) got %s", err) - } - - if _, err := ValidateMACAddress(`92:d0:c6:0a:33`); err == nil { - t.Fatalf("ValidateMACAddress(`92:d0:c6:0a:33`) succeeded; expected failure on invalid MAC") - } - - if _, err := ValidateMACAddress(`random invalid string`); err == nil { - t.Fatalf("ValidateMACAddress(`random invalid string`) succeeded; expected failure on invalid MAC") - } -} diff --git a/runconfig/opts/parse.go b/runconfig/opts/parse.go deleted file mode 100644 index 9d2ba67ae6..0000000000 --- a/runconfig/opts/parse.go +++ /dev/null @@ -1,949 +0,0 @@ -package opts - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "path" - "strconv" - "strings" - "time" - - "github.com/docker/docker/opts" - "github.com/docker/docker/pkg/mount" - "github.com/docker/docker/pkg/signal" - "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" - units "github.com/docker/go-units" - "github.com/spf13/pflag" -) - -// ContainerOptions is a data object with all the options for creating a container -// TODO: remove fl prefix -type ContainerOptions struct { - flAttach opts.ListOpts - flVolumes opts.ListOpts - flTmpfs opts.ListOpts - flBlkioWeightDevice WeightdeviceOpt - flDeviceReadBps ThrottledeviceOpt - flDeviceWriteBps ThrottledeviceOpt - flLinks opts.ListOpts - flAliases opts.ListOpts - flLinkLocalIPs opts.ListOpts - flDeviceReadIOps ThrottledeviceOpt - flDeviceWriteIOps ThrottledeviceOpt - flEnv opts.ListOpts - flLabels opts.ListOpts - flDevices opts.ListOpts - flUlimits *UlimitOpt - flSysctls *opts.MapOpts - flPublish opts.ListOpts - flExpose opts.ListOpts - flDNS opts.ListOpts - flDNSSearch opts.ListOpts - flDNSOptions opts.ListOpts - flExtraHosts opts.ListOpts - flVolumesFrom opts.ListOpts - flEnvFile opts.ListOpts - flCapAdd opts.ListOpts - flCapDrop opts.ListOpts - flGroupAdd opts.ListOpts - flSecurityOpt opts.ListOpts - flStorageOpt opts.ListOpts - flLabelsFile opts.ListOpts - flLoggingOpts opts.ListOpts - flPrivileged bool - flPidMode string - flUTSMode string - flUsernsMode string - flPublishAll bool - flStdin bool - flTty bool - flOomKillDisable bool - flOomScoreAdj int - flContainerIDFile string - flEntrypoint string - flHostname string - flMemoryString string - flMemoryReservation string - flMemorySwap string - flKernelMemory string - flUser string - flWorkingDir string - flCPUShares int64 - flCPUPercent int64 - flCPUPeriod int64 - flCPUQuota int64 - flCpusetCpus string - flCpusetMems string - flBlkioWeight uint16 - flIOMaxBandwidth string - flIOMaxIOps uint64 - flSwappiness int64 - flNetMode string - flMacAddress string - flIPv4Address string - flIPv6Address string - flIpcMode string - flPidsLimit int64 - flRestartPolicy string - flReadonlyRootfs bool - flLoggingDriver string - flCgroupParent string - flVolumeDriver string - flStopSignal string - flIsolation string - flShmSize string - flNoHealthcheck bool - flHealthCmd string - flHealthInterval time.Duration - flHealthTimeout time.Duration - flHealthRetries int - flRuntime string - - Image string - Args []string -} - -// AddFlags adds all command line flags that will be used by Parse to the FlagSet -func AddFlags(flags *pflag.FlagSet) *ContainerOptions { - copts := &ContainerOptions{ - flAliases: opts.NewListOpts(nil), - flAttach: opts.NewListOpts(ValidateAttach), - flBlkioWeightDevice: NewWeightdeviceOpt(ValidateWeightDevice), - flCapAdd: opts.NewListOpts(nil), - flCapDrop: opts.NewListOpts(nil), - flDNS: opts.NewListOpts(opts.ValidateIPAddress), - flDNSOptions: opts.NewListOpts(nil), - flDNSSearch: opts.NewListOpts(opts.ValidateDNSSearch), - flDeviceReadBps: NewThrottledeviceOpt(ValidateThrottleBpsDevice), - flDeviceReadIOps: NewThrottledeviceOpt(ValidateThrottleIOpsDevice), - flDeviceWriteBps: NewThrottledeviceOpt(ValidateThrottleBpsDevice), - flDeviceWriteIOps: NewThrottledeviceOpt(ValidateThrottleIOpsDevice), - flDevices: opts.NewListOpts(ValidateDevice), - flEnv: opts.NewListOpts(ValidateEnv), - flEnvFile: opts.NewListOpts(nil), - flExpose: opts.NewListOpts(nil), - flExtraHosts: opts.NewListOpts(ValidateExtraHost), - flGroupAdd: opts.NewListOpts(nil), - flLabels: opts.NewListOpts(ValidateEnv), - flLabelsFile: opts.NewListOpts(nil), - flLinkLocalIPs: opts.NewListOpts(nil), - flLinks: opts.NewListOpts(ValidateLink), - flLoggingOpts: opts.NewListOpts(nil), - flPublish: opts.NewListOpts(nil), - flSecurityOpt: opts.NewListOpts(nil), - flStorageOpt: opts.NewListOpts(nil), - flSysctls: opts.NewMapOpts(nil, opts.ValidateSysctl), - flTmpfs: opts.NewListOpts(nil), - flUlimits: NewUlimitOpt(nil), - flVolumes: opts.NewListOpts(nil), - flVolumesFrom: opts.NewListOpts(nil), - } - - // General purpose flags - flags.VarP(&copts.flAttach, "attach", "a", "Attach to STDIN, STDOUT or STDERR") - flags.Var(&copts.flDevices, "device", "Add a host device to the container") - flags.VarP(&copts.flEnv, "env", "e", "Set environment variables") - flags.Var(&copts.flEnvFile, "env-file", "Read in a file of environment variables") - flags.StringVar(&copts.flEntrypoint, "entrypoint", "", "Overwrite the default ENTRYPOINT of the image") - flags.Var(&copts.flGroupAdd, "group-add", "Add additional groups to join") - flags.StringVarP(&copts.flHostname, "hostname", "h", "", "Container host name") - flags.BoolVarP(&copts.flStdin, "interactive", "i", false, "Keep STDIN open even if not attached") - flags.VarP(&copts.flLabels, "label", "l", "Set meta data on a container") - flags.Var(&copts.flLabelsFile, "label-file", "Read in a line delimited file of labels") - flags.BoolVar(&copts.flReadonlyRootfs, "read-only", false, "Mount the container's root filesystem as read only") - flags.StringVar(&copts.flRestartPolicy, "restart", "no", "Restart policy to apply when a container exits") - flags.StringVar(&copts.flStopSignal, "stop-signal", signal.DefaultStopSignal, fmt.Sprintf("Signal to stop a container, %v by default", signal.DefaultStopSignal)) - flags.Var(copts.flSysctls, "sysctl", "Sysctl options") - flags.BoolVarP(&copts.flTty, "tty", "t", false, "Allocate a pseudo-TTY") - flags.Var(copts.flUlimits, "ulimit", "Ulimit options") - flags.StringVarP(&copts.flUser, "user", "u", "", "Username or UID (format: [:])") - flags.StringVarP(&copts.flWorkingDir, "workdir", "w", "", "Working directory inside the container") - - // Security - flags.Var(&copts.flCapAdd, "cap-add", "Add Linux capabilities") - flags.Var(&copts.flCapDrop, "cap-drop", "Drop Linux capabilities") - flags.BoolVar(&copts.flPrivileged, "privileged", false, "Give extended privileges to this container") - flags.Var(&copts.flSecurityOpt, "security-opt", "Security Options") - flags.StringVar(&copts.flUsernsMode, "userns", "", "User namespace to use") - - // Network and port publishing flag - flags.Var(&copts.flExtraHosts, "add-host", "Add a custom host-to-IP mapping (host:ip)") - flags.Var(&copts.flDNS, "dns", "Set custom DNS servers") - flags.Var(&copts.flDNSOptions, "dns-opt", "Set DNS options") - flags.Var(&copts.flDNSSearch, "dns-search", "Set custom DNS search domains") - flags.Var(&copts.flExpose, "expose", "Expose a port or a range of ports") - flags.StringVar(&copts.flIPv4Address, "ip", "", "Container IPv4 address (e.g. 172.30.100.104)") - flags.StringVar(&copts.flIPv6Address, "ip6", "", "Container IPv6 address (e.g. 2001:db8::33)") - flags.Var(&copts.flLinks, "link", "Add link to another container") - flags.Var(&copts.flLinkLocalIPs, "link-local-ip", "Container IPv4/IPv6 link-local addresses") - flags.StringVar(&copts.flMacAddress, "mac-address", "", "Container MAC address (e.g. 92:d0:c6:0a:29:33)") - flags.VarP(&copts.flPublish, "publish", "p", "Publish a container's port(s) to the host") - flags.BoolVarP(&copts.flPublishAll, "publish-all", "P", false, "Publish all exposed ports to random ports") - // We allow for both "--net" and "--network", although the latter is the recommended way. - flags.StringVar(&copts.flNetMode, "net", "default", "Connect a container to a network") - flags.StringVar(&copts.flNetMode, "network", "default", "Connect a container to a network") - flags.MarkHidden("net") - // We allow for both "--net-alias" and "--network-alias", although the latter is the recommended way. - flags.Var(&copts.flAliases, "net-alias", "Add network-scoped alias for the container") - flags.Var(&copts.flAliases, "network-alias", "Add network-scoped alias for the container") - flags.MarkHidden("net-alias") - - // Logging and storage - flags.StringVar(&copts.flLoggingDriver, "log-driver", "", "Logging driver for the container") - flags.StringVar(&copts.flVolumeDriver, "volume-driver", "", "Optional volume driver for the container") - flags.Var(&copts.flLoggingOpts, "log-opt", "Log driver options") - flags.Var(&copts.flStorageOpt, "storage-opt", "Storage driver options for the container") - flags.Var(&copts.flTmpfs, "tmpfs", "Mount a tmpfs directory") - flags.Var(&copts.flVolumesFrom, "volumes-from", "Mount volumes from the specified container(s)") - flags.VarP(&copts.flVolumes, "volume", "v", "Bind mount a volume") - - // Health-checking - flags.StringVar(&copts.flHealthCmd, "health-cmd", "", "Command to run to check health") - flags.DurationVar(&copts.flHealthInterval, "health-interval", 0, "Time between running the check") - flags.IntVar(&copts.flHealthRetries, "health-retries", 0, "Consecutive failures needed to report unhealthy") - flags.DurationVar(&copts.flHealthTimeout, "health-timeout", 0, "Maximum time to allow one check to run") - flags.BoolVar(&copts.flNoHealthcheck, "no-healthcheck", false, "Disable any container-specified HEALTHCHECK") - - // Resource management - flags.Uint16Var(&copts.flBlkioWeight, "blkio-weight", 0, "Block IO (relative weight), between 10 and 1000") - flags.Var(&copts.flBlkioWeightDevice, "blkio-weight-device", "Block IO weight (relative device weight)") - flags.StringVar(&copts.flContainerIDFile, "cidfile", "", "Write the container ID to the file") - flags.StringVar(&copts.flCpusetCpus, "cpuset-cpus", "", "CPUs in which to allow execution (0-3, 0,1)") - flags.StringVar(&copts.flCpusetMems, "cpuset-mems", "", "MEMs in which to allow execution (0-3, 0,1)") - flags.Int64Var(&copts.flCPUPercent, "cpu-percent", 0, "CPU percent (Windows only)") - flags.Int64Var(&copts.flCPUPeriod, "cpu-period", 0, "Limit CPU CFS (Completely Fair Scheduler) period") - flags.Int64Var(&copts.flCPUQuota, "cpu-quota", 0, "Limit CPU CFS (Completely Fair Scheduler) quota") - flags.Int64VarP(&copts.flCPUShares, "cpu-shares", "c", 0, "CPU shares (relative weight)") - flags.Var(&copts.flDeviceReadBps, "device-read-bps", "Limit read rate (bytes per second) from a device") - flags.Var(&copts.flDeviceReadIOps, "device-read-iops", "Limit read rate (IO per second) from a device") - flags.Var(&copts.flDeviceWriteBps, "device-write-bps", "Limit write rate (bytes per second) to a device") - flags.Var(&copts.flDeviceWriteIOps, "device-write-iops", "Limit write rate (IO per second) to a device") - flags.StringVar(&copts.flIOMaxBandwidth, "io-maxbandwidth", "", "Maximum IO bandwidth limit for the system drive (Windows only)") - flags.Uint64Var(&copts.flIOMaxIOps, "io-maxiops", 0, "Maximum IOps limit for the system drive (Windows only)") - flags.StringVar(&copts.flKernelMemory, "kernel-memory", "", "Kernel memory limit") - flags.StringVarP(&copts.flMemoryString, "memory", "m", "", "Memory limit") - flags.StringVar(&copts.flMemoryReservation, "memory-reservation", "", "Memory soft limit") - flags.StringVar(&copts.flMemorySwap, "memory-swap", "", "Swap limit equal to memory plus swap: '-1' to enable unlimited swap") - flags.Int64Var(&copts.flSwappiness, "memory-swappiness", -1, "Tune container memory swappiness (0 to 100)") - flags.BoolVar(&copts.flOomKillDisable, "oom-kill-disable", false, "Disable OOM Killer") - flags.IntVar(&copts.flOomScoreAdj, "oom-score-adj", 0, "Tune host's OOM preferences (-1000 to 1000)") - flags.Int64Var(&copts.flPidsLimit, "pids-limit", 0, "Tune container pids limit (set -1 for unlimited)") - - // Low-level execution (cgroups, namespaces, ...) - flags.StringVar(&copts.flCgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container") - flags.StringVar(&copts.flIpcMode, "ipc", "", "IPC namespace to use") - flags.StringVar(&copts.flIsolation, "isolation", "", "Container isolation technology") - flags.StringVar(&copts.flPidMode, "pid", "", "PID namespace to use") - flags.StringVar(&copts.flShmSize, "shm-size", "", "Size of /dev/shm, default value is 64MB") - flags.StringVar(&copts.flUTSMode, "uts", "", "UTS namespace to use") - flags.StringVar(&copts.flRuntime, "runtime", "", "Runtime to use for this container") - return copts -} - -// Parse parses the args for the specified command and generates a Config, -// a HostConfig and returns them with the specified command. -// If the specified args are not valid, it will return an error. -func Parse(flags *pflag.FlagSet, copts *ContainerOptions) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - var ( - attachStdin = copts.flAttach.Get("stdin") - attachStdout = copts.flAttach.Get("stdout") - attachStderr = copts.flAttach.Get("stderr") - ) - - // Validate the input mac address - if copts.flMacAddress != "" { - if _, err := ValidateMACAddress(copts.flMacAddress); err != nil { - return nil, nil, nil, fmt.Errorf("%s is not a valid mac address", copts.flMacAddress) - } - } - if copts.flStdin { - attachStdin = true - } - // If -a is not set, attach to stdout and stderr - if copts.flAttach.Len() == 0 { - attachStdout = true - attachStderr = true - } - - var err error - - var flMemory int64 - if copts.flMemoryString != "" { - flMemory, err = units.RAMInBytes(copts.flMemoryString) - if err != nil { - return nil, nil, nil, err - } - } - - var MemoryReservation int64 - if copts.flMemoryReservation != "" { - MemoryReservation, err = units.RAMInBytes(copts.flMemoryReservation) - if err != nil { - return nil, nil, nil, err - } - } - - var memorySwap int64 - if copts.flMemorySwap != "" { - if copts.flMemorySwap == "-1" { - memorySwap = -1 - } else { - memorySwap, err = units.RAMInBytes(copts.flMemorySwap) - if err != nil { - return nil, nil, nil, err - } - } - } - - var KernelMemory int64 - if copts.flKernelMemory != "" { - KernelMemory, err = units.RAMInBytes(copts.flKernelMemory) - if err != nil { - return nil, nil, nil, err - } - } - - swappiness := copts.flSwappiness - if swappiness != -1 && (swappiness < 0 || swappiness > 100) { - return nil, nil, nil, fmt.Errorf("invalid value: %d. Valid memory swappiness range is 0-100", swappiness) - } - - var shmSize int64 - if copts.flShmSize != "" { - shmSize, err = units.RAMInBytes(copts.flShmSize) - if err != nil { - return nil, nil, nil, err - } - } - - // TODO FIXME units.RAMInBytes should have a uint64 version - var maxIOBandwidth int64 - if copts.flIOMaxBandwidth != "" { - maxIOBandwidth, err = units.RAMInBytes(copts.flIOMaxBandwidth) - if err != nil { - return nil, nil, nil, err - } - if maxIOBandwidth < 0 { - return nil, nil, nil, fmt.Errorf("invalid value: %s. Maximum IO Bandwidth must be positive", copts.flIOMaxBandwidth) - } - } - - var binds []string - // add any bind targets to the list of container volumes - for bind := range copts.flVolumes.GetMap() { - if arr := volumeSplitN(bind, 2); len(arr) > 1 { - // after creating the bind mount we want to delete it from the copts.flVolumes values because - // we do not want bind mounts being committed to image configs - binds = append(binds, bind) - copts.flVolumes.Delete(bind) - } - } - - // Can't evaluate options passed into --tmpfs until we actually mount - tmpfs := make(map[string]string) - for _, t := range copts.flTmpfs.GetAll() { - if arr := strings.SplitN(t, ":", 2); len(arr) > 1 { - if _, _, err := mount.ParseTmpfsOptions(arr[1]); err != nil { - return nil, nil, nil, err - } - tmpfs[arr[0]] = arr[1] - } else { - tmpfs[arr[0]] = "" - } - } - - var ( - runCmd strslice.StrSlice - entrypoint strslice.StrSlice - ) - if len(copts.Args) > 0 { - runCmd = strslice.StrSlice(copts.Args) - } - if copts.flEntrypoint != "" { - entrypoint = strslice.StrSlice{copts.flEntrypoint} - } - - ports, portBindings, err := nat.ParsePortSpecs(copts.flPublish.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - // Merge in exposed ports to the map of published ports - for _, e := range copts.flExpose.GetAll() { - if strings.Contains(e, ":") { - return nil, nil, nil, fmt.Errorf("invalid port format for --expose: %s", e) - } - //support two formats for expose, original format /[] or /[] - proto, port := nat.SplitProtoPort(e) - //parse the start and end port and create a sequence of ports to expose - //if expose a port, the start and end port are the same - start, end, err := nat.ParsePortRange(port) - if err != nil { - return nil, nil, nil, fmt.Errorf("invalid range format for --expose: %s, error: %s", e, err) - } - for i := start; i <= end; i++ { - p, err := nat.NewPort(proto, strconv.FormatUint(i, 10)) - if err != nil { - return nil, nil, nil, err - } - if _, exists := ports[p]; !exists { - ports[p] = struct{}{} - } - } - } - - // parse device mappings - deviceMappings := []container.DeviceMapping{} - for _, device := range copts.flDevices.GetAll() { - deviceMapping, err := ParseDevice(device) - if err != nil { - return nil, nil, nil, err - } - deviceMappings = append(deviceMappings, deviceMapping) - } - - // collect all the environment variables for the container - envVariables, err := readKVStrings(copts.flEnvFile.GetAll(), copts.flEnv.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - // collect all the labels for the container - labels, err := readKVStrings(copts.flLabelsFile.GetAll(), copts.flLabels.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - ipcMode := container.IpcMode(copts.flIpcMode) - if !ipcMode.Valid() { - return nil, nil, nil, fmt.Errorf("--ipc: invalid IPC mode") - } - - pidMode := container.PidMode(copts.flPidMode) - if !pidMode.Valid() { - return nil, nil, nil, fmt.Errorf("--pid: invalid PID mode") - } - - utsMode := container.UTSMode(copts.flUTSMode) - if !utsMode.Valid() { - return nil, nil, nil, fmt.Errorf("--uts: invalid UTS mode") - } - - usernsMode := container.UsernsMode(copts.flUsernsMode) - if !usernsMode.Valid() { - return nil, nil, nil, fmt.Errorf("--userns: invalid USER mode") - } - - restartPolicy, err := ParseRestartPolicy(copts.flRestartPolicy) - if err != nil { - return nil, nil, nil, err - } - - loggingOpts, err := parseLoggingOpts(copts.flLoggingDriver, copts.flLoggingOpts.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - securityOpts, err := parseSecurityOpts(copts.flSecurityOpt.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - storageOpts, err := parseStorageOpts(copts.flStorageOpt.GetAll()) - if err != nil { - return nil, nil, nil, err - } - - // Healthcheck - var healthConfig *container.HealthConfig - haveHealthSettings := copts.flHealthCmd != "" || - copts.flHealthInterval != 0 || - copts.flHealthTimeout != 0 || - copts.flHealthRetries != 0 - if copts.flNoHealthcheck { - if haveHealthSettings { - return nil, nil, nil, fmt.Errorf("--no-healthcheck conflicts with --health-* options") - } - test := strslice.StrSlice{"NONE"} - healthConfig = &container.HealthConfig{Test: test} - } else if haveHealthSettings { - var probe strslice.StrSlice - if copts.flHealthCmd != "" { - args := []string{"CMD-SHELL", copts.flHealthCmd} - probe = strslice.StrSlice(args) - } - if copts.flHealthInterval < 0 { - return nil, nil, nil, fmt.Errorf("--health-interval cannot be negative") - } - if copts.flHealthTimeout < 0 { - return nil, nil, nil, fmt.Errorf("--health-timeout cannot be negative") - } - - healthConfig = &container.HealthConfig{ - Test: probe, - Interval: copts.flHealthInterval, - Timeout: copts.flHealthTimeout, - Retries: copts.flHealthRetries, - } - } - - resources := container.Resources{ - CgroupParent: copts.flCgroupParent, - Memory: flMemory, - MemoryReservation: MemoryReservation, - MemorySwap: memorySwap, - MemorySwappiness: &copts.flSwappiness, - KernelMemory: KernelMemory, - OomKillDisable: &copts.flOomKillDisable, - CPUPercent: copts.flCPUPercent, - CPUShares: copts.flCPUShares, - CPUPeriod: copts.flCPUPeriod, - CpusetCpus: copts.flCpusetCpus, - CpusetMems: copts.flCpusetMems, - CPUQuota: copts.flCPUQuota, - PidsLimit: copts.flPidsLimit, - BlkioWeight: copts.flBlkioWeight, - BlkioWeightDevice: copts.flBlkioWeightDevice.GetList(), - BlkioDeviceReadBps: copts.flDeviceReadBps.GetList(), - BlkioDeviceWriteBps: copts.flDeviceWriteBps.GetList(), - BlkioDeviceReadIOps: copts.flDeviceReadIOps.GetList(), - BlkioDeviceWriteIOps: copts.flDeviceWriteIOps.GetList(), - IOMaximumIOps: copts.flIOMaxIOps, - IOMaximumBandwidth: uint64(maxIOBandwidth), - Ulimits: copts.flUlimits.GetList(), - Devices: deviceMappings, - } - - config := &container.Config{ - Hostname: copts.flHostname, - ExposedPorts: ports, - User: copts.flUser, - Tty: copts.flTty, - // TODO: deprecated, it comes from -n, --networking - // it's still needed internally to set the network to disabled - // if e.g. bridge is none in daemon opts, and in inspect - NetworkDisabled: false, - OpenStdin: copts.flStdin, - AttachStdin: attachStdin, - AttachStdout: attachStdout, - AttachStderr: attachStderr, - Env: envVariables, - Cmd: runCmd, - Image: copts.Image, - Volumes: copts.flVolumes.GetMap(), - MacAddress: copts.flMacAddress, - Entrypoint: entrypoint, - WorkingDir: copts.flWorkingDir, - Labels: ConvertKVStringsToMap(labels), - Healthcheck: healthConfig, - } - if flags.Changed("stop-signal") { - config.StopSignal = copts.flStopSignal - } - - hostConfig := &container.HostConfig{ - Binds: binds, - ContainerIDFile: copts.flContainerIDFile, - OomScoreAdj: copts.flOomScoreAdj, - Privileged: copts.flPrivileged, - PortBindings: portBindings, - Links: copts.flLinks.GetAll(), - PublishAllPorts: copts.flPublishAll, - // Make sure the dns fields are never nil. - // New containers don't ever have those fields nil, - // but pre created containers can still have those nil values. - // See https://github.com/docker/docker/pull/17779 - // for a more detailed explanation on why we don't want that. - DNS: copts.flDNS.GetAllOrEmpty(), - DNSSearch: copts.flDNSSearch.GetAllOrEmpty(), - DNSOptions: copts.flDNSOptions.GetAllOrEmpty(), - ExtraHosts: copts.flExtraHosts.GetAll(), - VolumesFrom: copts.flVolumesFrom.GetAll(), - NetworkMode: container.NetworkMode(copts.flNetMode), - IpcMode: ipcMode, - PidMode: pidMode, - UTSMode: utsMode, - UsernsMode: usernsMode, - CapAdd: strslice.StrSlice(copts.flCapAdd.GetAll()), - CapDrop: strslice.StrSlice(copts.flCapDrop.GetAll()), - GroupAdd: copts.flGroupAdd.GetAll(), - RestartPolicy: restartPolicy, - SecurityOpt: securityOpts, - StorageOpt: storageOpts, - ReadonlyRootfs: copts.flReadonlyRootfs, - LogConfig: container.LogConfig{Type: copts.flLoggingDriver, Config: loggingOpts}, - VolumeDriver: copts.flVolumeDriver, - Isolation: container.Isolation(copts.flIsolation), - ShmSize: shmSize, - Resources: resources, - Tmpfs: tmpfs, - Sysctls: copts.flSysctls.GetAll(), - Runtime: copts.flRuntime, - } - - // When allocating stdin in attached mode, close stdin at client disconnect - if config.OpenStdin && config.AttachStdin { - config.StdinOnce = true - } - - networkingConfig := &networktypes.NetworkingConfig{ - EndpointsConfig: make(map[string]*networktypes.EndpointSettings), - } - - if copts.flIPv4Address != "" || copts.flIPv6Address != "" || copts.flLinkLocalIPs.Len() > 0 { - epConfig := &networktypes.EndpointSettings{} - networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig - - epConfig.IPAMConfig = &networktypes.EndpointIPAMConfig{ - IPv4Address: copts.flIPv4Address, - IPv6Address: copts.flIPv6Address, - } - - if copts.flLinkLocalIPs.Len() > 0 { - epConfig.IPAMConfig.LinkLocalIPs = make([]string, copts.flLinkLocalIPs.Len()) - copy(epConfig.IPAMConfig.LinkLocalIPs, copts.flLinkLocalIPs.GetAll()) - } - } - - if hostConfig.NetworkMode.IsUserDefined() && len(hostConfig.Links) > 0 { - epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] - if epConfig == nil { - epConfig = &networktypes.EndpointSettings{} - } - epConfig.Links = make([]string, len(hostConfig.Links)) - copy(epConfig.Links, hostConfig.Links) - networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig - } - - if copts.flAliases.Len() > 0 { - epConfig := networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] - if epConfig == nil { - epConfig = &networktypes.EndpointSettings{} - } - epConfig.Aliases = make([]string, copts.flAliases.Len()) - copy(epConfig.Aliases, copts.flAliases.GetAll()) - networkingConfig.EndpointsConfig[string(hostConfig.NetworkMode)] = epConfig - } - - return config, hostConfig, networkingConfig, nil -} - -// reads a file of line terminated key=value pairs, and overrides any keys -// present in the file with additional pairs specified in the override parameter -func readKVStrings(files []string, override []string) ([]string, error) { - envVariables := []string{} - for _, ef := range files { - parsedVars, err := ParseEnvFile(ef) - if err != nil { - return nil, err - } - envVariables = append(envVariables, parsedVars...) - } - // parse the '-e' and '--env' after, to allow override - envVariables = append(envVariables, override...) - - return envVariables, nil -} - -// ConvertKVStringsToMap converts ["key=value"] to {"key":"value"} -func ConvertKVStringsToMap(values []string) map[string]string { - result := make(map[string]string, len(values)) - for _, value := range values { - kv := strings.SplitN(value, "=", 2) - if len(kv) == 1 { - result[kv[0]] = "" - } else { - result[kv[0]] = kv[1] - } - } - - return result -} - -func parseLoggingOpts(loggingDriver string, loggingOpts []string) (map[string]string, error) { - loggingOptsMap := ConvertKVStringsToMap(loggingOpts) - if loggingDriver == "none" && len(loggingOpts) > 0 { - return map[string]string{}, fmt.Errorf("invalid logging opts for driver %s", loggingDriver) - } - return loggingOptsMap, nil -} - -// takes a local seccomp daemon, reads the file contents for sending to the daemon -func parseSecurityOpts(securityOpts []string) ([]string, error) { - for key, opt := range securityOpts { - con := strings.SplitN(opt, "=", 2) - if len(con) == 1 && con[0] != "no-new-privileges" { - if strings.Index(opt, ":") != -1 { - con = strings.SplitN(opt, ":", 2) - } else { - return securityOpts, fmt.Errorf("Invalid --security-opt: %q", opt) - } - } - if con[0] == "seccomp" && con[1] != "unconfined" { - f, err := ioutil.ReadFile(con[1]) - if err != nil { - return securityOpts, fmt.Errorf("opening seccomp profile (%s) failed: %v", con[1], err) - } - b := bytes.NewBuffer(nil) - if err := json.Compact(b, f); err != nil { - return securityOpts, fmt.Errorf("compacting json for seccomp profile (%s) failed: %v", con[1], err) - } - securityOpts[key] = fmt.Sprintf("seccomp=%s", b.Bytes()) - } - } - - return securityOpts, nil -} - -// parses storage options per container into a map -func parseStorageOpts(storageOpts []string) (map[string]string, error) { - m := make(map[string]string) - for _, option := range storageOpts { - if strings.Contains(option, "=") { - opt := strings.SplitN(option, "=", 2) - m[opt[0]] = opt[1] - } else { - return nil, fmt.Errorf("Invalid storage option.") - } - } - return m, nil -} - -// ParseRestartPolicy returns the parsed policy or an error indicating what is incorrect -func ParseRestartPolicy(policy string) (container.RestartPolicy, error) { - p := container.RestartPolicy{} - - if policy == "" { - return p, nil - } - - var ( - parts = strings.Split(policy, ":") - name = parts[0] - ) - - p.Name = name - switch name { - case "always", "unless-stopped": - if len(parts) > 1 { - return p, fmt.Errorf("maximum restart count not valid with restart policy of \"%s\"", name) - } - case "no": - // do nothing - case "on-failure": - if len(parts) > 2 { - return p, fmt.Errorf("restart count format is not valid, usage: 'on-failure:N' or 'on-failure'") - } - if len(parts) == 2 { - count, err := strconv.Atoi(parts[1]) - if err != nil { - return p, err - } - - p.MaximumRetryCount = count - } - default: - return p, fmt.Errorf("invalid restart policy %s", name) - } - - return p, nil -} - -// ParseDevice parses a device mapping string to a container.DeviceMapping struct -func ParseDevice(device string) (container.DeviceMapping, error) { - src := "" - dst := "" - permissions := "rwm" - arr := strings.Split(device, ":") - switch len(arr) { - case 3: - permissions = arr[2] - fallthrough - case 2: - if ValidDeviceMode(arr[1]) { - permissions = arr[1] - } else { - dst = arr[1] - } - fallthrough - case 1: - src = arr[0] - default: - return container.DeviceMapping{}, fmt.Errorf("invalid device specification: %s", device) - } - - if dst == "" { - dst = src - } - - deviceMapping := container.DeviceMapping{ - PathOnHost: src, - PathInContainer: dst, - CgroupPermissions: permissions, - } - return deviceMapping, nil -} - -// ParseLink parses and validates the specified string as a link format (name:alias) -func ParseLink(val string) (string, string, error) { - if val == "" { - return "", "", fmt.Errorf("empty string specified for links") - } - arr := strings.Split(val, ":") - if len(arr) > 2 { - return "", "", fmt.Errorf("bad format for links: %s", val) - } - if len(arr) == 1 { - return val, val, nil - } - // This is kept because we can actually get a HostConfig with links - // from an already created container and the format is not `foo:bar` - // but `/foo:/c1/bar` - if strings.HasPrefix(arr[0], "/") { - _, alias := path.Split(arr[1]) - return arr[0][1:], alias, nil - } - return arr[0], arr[1], nil -} - -// ValidateLink validates that the specified string has a valid link format (containerName:alias). -func ValidateLink(val string) (string, error) { - if _, _, err := ParseLink(val); err != nil { - return val, err - } - return val, nil -} - -// ValidDeviceMode checks if the mode for device is valid or not. -// Valid mode is a composition of r (read), w (write), and m (mknod). -func ValidDeviceMode(mode string) bool { - var legalDeviceMode = map[rune]bool{ - 'r': true, - 'w': true, - 'm': true, - } - if mode == "" { - return false - } - for _, c := range mode { - if !legalDeviceMode[c] { - return false - } - legalDeviceMode[c] = false - } - return true -} - -// ValidateDevice validates a path for devices -// It will make sure 'val' is in the form: -// [host-dir:]container-path[:mode] -// It also validates the device mode. -func ValidateDevice(val string) (string, error) { - return validatePath(val, ValidDeviceMode) -} - -func validatePath(val string, validator func(string) bool) (string, error) { - var containerPath string - var mode string - - if strings.Count(val, ":") > 2 { - return val, fmt.Errorf("bad format for path: %s", val) - } - - split := strings.SplitN(val, ":", 3) - if split[0] == "" { - return val, fmt.Errorf("bad format for path: %s", val) - } - switch len(split) { - case 1: - containerPath = split[0] - val = path.Clean(containerPath) - case 2: - if isValid := validator(split[1]); isValid { - containerPath = split[0] - mode = split[1] - val = fmt.Sprintf("%s:%s", path.Clean(containerPath), mode) - } else { - containerPath = split[1] - val = fmt.Sprintf("%s:%s", split[0], path.Clean(containerPath)) - } - case 3: - containerPath = split[1] - mode = split[2] - if isValid := validator(split[2]); !isValid { - return val, fmt.Errorf("bad mode specified: %s", mode) - } - val = fmt.Sprintf("%s:%s:%s", split[0], containerPath, mode) - } - - if !path.IsAbs(containerPath) { - return val, fmt.Errorf("%s is not an absolute path", containerPath) - } - return val, nil -} - -// volumeSplitN splits raw into a maximum of n parts, separated by a separator colon. -// A separator colon is the last `:` character in the regex `[:\\]?[a-zA-Z]:` (note `\\` is `\` escaped). -// In Windows driver letter appears in two situations: -// a. `^[a-zA-Z]:` (A colon followed by `^[a-zA-Z]:` is OK as colon is the separator in volume option) -// b. A string in the format like `\\?\C:\Windows\...` (UNC). -// Therefore, a driver letter can only follow either a `:` or `\\` -// This allows to correctly split strings such as `C:\foo:D:\:rw` or `/tmp/q:/foo`. -func volumeSplitN(raw string, n int) []string { - var array []string - if len(raw) == 0 || raw[0] == ':' { - // invalid - return nil - } - // numberOfParts counts the number of parts separated by a separator colon - numberOfParts := 0 - // left represents the left-most cursor in raw, updated at every `:` character considered as a separator. - left := 0 - // right represents the right-most cursor in raw incremented with the loop. Note this - // starts at index 1 as index 0 is already handle above as a special case. - for right := 1; right < len(raw); right++ { - // stop parsing if reached maximum number of parts - if n >= 0 && numberOfParts >= n { - break - } - if raw[right] != ':' { - continue - } - potentialDriveLetter := raw[right-1] - if (potentialDriveLetter >= 'A' && potentialDriveLetter <= 'Z') || (potentialDriveLetter >= 'a' && potentialDriveLetter <= 'z') { - if right > 1 { - beforePotentialDriveLetter := raw[right-2] - // Only `:` or `\\` are checked (`/` could fall into the case of `/tmp/q:/foo`) - if beforePotentialDriveLetter != ':' && beforePotentialDriveLetter != '\\' { - // e.g. `C:` is not preceded by any delimiter, therefore it was not a drive letter but a path ending with `C:`. - array = append(array, raw[left:right]) - left = right + 1 - numberOfParts++ - } - // else, `C:` is considered as a drive letter and not as a delimiter, so we continue parsing. - } - // if right == 1, then `C:` is the beginning of the raw string, therefore `:` is again not considered a delimiter and we continue parsing. - } else { - // if `:` is not preceded by a potential drive letter, then consider it as a delimiter. - array = append(array, raw[left:right]) - left = right + 1 - numberOfParts++ - } - } - // need to take care of the last part - if left < len(raw) { - if n >= 0 && numberOfParts >= n { - // if the maximum number of parts is reached, just append the rest to the last part - // left-1 is at the last `:` that needs to be included since not considered a separator. - array[n-1] += raw[left-1:] - } else { - array = append(array, raw[left:]) - } - } - return array -} diff --git a/runconfig/opts/parse_test.go b/runconfig/opts/parse_test.go deleted file mode 100644 index d0acd8be87..0000000000 --- a/runconfig/opts/parse_test.go +++ /dev/null @@ -1,870 +0,0 @@ -package opts - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "os" - "runtime" - "strings" - "testing" - "time" - - "github.com/docker/docker/runconfig" - "github.com/docker/engine-api/types/container" - networktypes "github.com/docker/engine-api/types/network" - "github.com/docker/go-connections/nat" - "github.com/spf13/pflag" -) - -func parseRun(args []string) (*container.Config, *container.HostConfig, *networktypes.NetworkingConfig, error) { - flags := pflag.NewFlagSet("run", pflag.ContinueOnError) - flags.SetOutput(ioutil.Discard) - flags.Usage = nil - copts := AddFlags(flags) - if err := flags.Parse(args); err != nil { - return nil, nil, nil, err - } - return Parse(flags, copts) -} - -func parse(t *testing.T, args string) (*container.Config, *container.HostConfig, error) { - config, hostConfig, _, err := parseRun(strings.Split(args+" ubuntu bash", " ")) - return config, hostConfig, err -} - -func mustParse(t *testing.T, args string) (*container.Config, *container.HostConfig) { - config, hostConfig, err := parse(t, args) - if err != nil { - t.Fatal(err) - } - return config, hostConfig -} - -func TestParseRunLinks(t *testing.T) { - if _, hostConfig := mustParse(t, "--link a:b"); len(hostConfig.Links) == 0 || hostConfig.Links[0] != "a:b" { - t.Fatalf("Error parsing links. Expected []string{\"a:b\"}, received: %v", hostConfig.Links) - } - if _, hostConfig := mustParse(t, "--link a:b --link c:d"); len(hostConfig.Links) < 2 || hostConfig.Links[0] != "a:b" || hostConfig.Links[1] != "c:d" { - t.Fatalf("Error parsing links. Expected []string{\"a:b\", \"c:d\"}, received: %v", hostConfig.Links) - } - if _, hostConfig := mustParse(t, ""); len(hostConfig.Links) != 0 { - t.Fatalf("Error parsing links. No link expected, received: %v", hostConfig.Links) - } -} - -func TestParseRunAttach(t *testing.T) { - if config, _ := mustParse(t, "-a stdin"); !config.AttachStdin || config.AttachStdout || config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect only Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-a stdin -a stdout"); !config.AttachStdin || !config.AttachStdout || config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect only Stdin and Stdout enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-a stdin -a stdout -a stderr"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect all attach enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, ""); config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect Stdin disabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - if config, _ := mustParse(t, "-i"); !config.AttachStdin || !config.AttachStdout || !config.AttachStderr { - t.Fatalf("Error parsing attach flags. Expect Stdin enabled. Received: in: %v, out: %v, err: %v", config.AttachStdin, config.AttachStdout, config.AttachStderr) - } - - if _, _, err := parse(t, "-a"); err == nil { - t.Fatalf("Error parsing attach flags, `-a` should be an error but is not") - } - if _, _, err := parse(t, "-a invalid"); err == nil { - t.Fatalf("Error parsing attach flags, `-a invalid` should be an error but is not") - } - if _, _, err := parse(t, "-a invalid -a stdout"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -a invalid` should be an error but is not") - } - if _, _, err := parse(t, "-a stdout -a stderr -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -a stderr -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stdin -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdin -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stdout -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stdout -d` should be an error but is not") - } - if _, _, err := parse(t, "-a stderr -d"); err == nil { - t.Fatalf("Error parsing attach flags, `-a stderr -d` should be an error but is not") - } - if _, _, err := parse(t, "-d --rm"); err == nil { - t.Fatalf("Error parsing attach flags, `-d --rm` should be an error but is not") - } -} - -func TestParseRunVolumes(t *testing.T) { - - // A single volume - arr, tryit := setupPlatformVolume([]string{`/tmp`}, []string{`c:\tmp`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) - } else if _, exists := config.Volumes[arr[0]]; !exists { - t.Fatalf("Error parsing volume flags, %q is missing from volumes. Received %v", tryit, config.Volumes) - } - - // Two volumes - arr, tryit = setupPlatformVolume([]string{`/tmp`, `/var`}, []string{`c:\tmp`, `c:\var`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, %q should not mount-bind anything. Received %v", tryit, hostConfig.Binds) - } else if _, exists := config.Volumes[arr[0]]; !exists { - t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[0], config.Volumes) - } else if _, exists := config.Volumes[arr[1]]; !exists { - t.Fatalf("Error parsing volume flags, %s is missing from volumes. Received %v", arr[1], config.Volumes) - } - - // A single bind-mount - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || hostConfig.Binds[0] != arr[0] { - t.Fatalf("Error parsing volume flags, %q should mount-bind the path before the colon into the path after the colon. Received %v %v", arr[0], hostConfig.Binds, config.Volumes) - } - - // Two bind-mounts. - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/hostVar:/containerVar`}, []string{os.Getenv("ProgramData") + `:c:\ContainerPD`, os.Getenv("TEMP") + `:c:\containerTmp`}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - - // Two bind-mounts, first read-only, second read-write. - // TODO Windows: The Windows version uses read-write as that's the only mode it supports. Can change this post TP4 - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro`, `/hostVar:/containerVar:rw`}, []string{os.Getenv("TEMP") + `:c:\containerTmp:rw`, os.Getenv("ProgramData") + `:c:\ContainerPD:rw`}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - - // Similar to previous test but with alternate modes which are only supported by Linux - if runtime.GOOS != "windows" { - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:ro,Z`, `/hostVar:/containerVar:rw,Z`}, []string{}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp:Z`, `/hostVar:/containerVar:z`}, []string{}) - if _, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || compareRandomizedStrings(hostConfig.Binds[0], hostConfig.Binds[1], arr[0], arr[1]) != nil { - t.Fatalf("Error parsing volume flags, `%s and %s` did not mount-bind correctly. Received %v", arr[0], arr[1], hostConfig.Binds) - } - } - - // One bind mount and one volume - arr, tryit = setupPlatformVolume([]string{`/hostTmp:/containerTmp`, `/containerVar`}, []string{os.Getenv("TEMP") + `:c:\containerTmp`, `c:\containerTmp`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] { - t.Fatalf("Error parsing volume flags, %s and %s should only one and only one bind mount %s. Received %s", arr[0], arr[1], arr[0], hostConfig.Binds) - } else if _, exists := config.Volumes[arr[1]]; !exists { - t.Fatalf("Error parsing volume flags %s and %s. %s is missing from volumes. Received %v", arr[0], arr[1], arr[1], config.Volumes) - } - - // Root to non-c: drive letter (Windows specific) - if runtime.GOOS == "windows" { - arr, tryit = setupPlatformVolume([]string{}, []string{os.Getenv("SystemDrive") + `\:d:`}) - if config, hostConfig := mustParse(t, tryit); hostConfig.Binds == nil || len(hostConfig.Binds) > 1 || hostConfig.Binds[0] != arr[0] || len(config.Volumes) != 0 { - t.Fatalf("Error parsing %s. Should have a single bind mount and no volumes", arr[0]) - } - } - -} - -// This tests the cases for binds which are generated through -// DecodeContainerConfig rather than Parse() -func TestDecodeContainerConfigVolumes(t *testing.T) { - - // Root to root - bindsOrVols, _ := setupPlatformVolume([]string{`/:/`}, []string{os.Getenv("SystemDrive") + `\:c:\`}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("volume %v should have failed", bindsOrVols) - } - - // No destination path - bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:`}, []string{os.Getenv("TEMP") + `\:`}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // // No destination path or mode - bindsOrVols, _ = setupPlatformVolume([]string{`/tmp::`}, []string{os.Getenv("TEMP") + `\::`}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // A whole lot of nothing - bindsOrVols = []string{`:`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // A whole lot of nothing with no mode - bindsOrVols = []string{`::`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // Too much including an invalid mode - wTmp := os.Getenv("TEMP") - bindsOrVols, _ = setupPlatformVolume([]string{`/tmp:/tmp:/tmp:/tmp`}, []string{wTmp + ":" + wTmp + ":" + wTmp + ":" + wTmp}) - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // Windows specific error tests - if runtime.GOOS == "windows" { - // Volume which does not include a drive letter - bindsOrVols = []string{`\tmp`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // Root to C-Drive - bindsOrVols = []string{os.Getenv("SystemDrive") + `\:c:`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // Container path that does not include a drive letter - bindsOrVols = []string{`c:\windows:\somewhere`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - } - - // Linux-specific error tests - if runtime.GOOS != "windows" { - // Just root - bindsOrVols = []string{`/`} - if _, _, err := callDecodeContainerConfig(nil, bindsOrVols); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - if _, _, err := callDecodeContainerConfig(bindsOrVols, nil); err == nil { - t.Fatalf("binds %v should have failed", bindsOrVols) - } - - // A single volume that looks like a bind mount passed in Volumes. - // This should be handled as a bind mount, not a volume. - vols := []string{`/foo:/bar`} - if config, hostConfig, err := callDecodeContainerConfig(vols, nil); err != nil { - t.Fatal("Volume /foo:/bar should have succeeded as a volume name") - } else if hostConfig.Binds != nil { - t.Fatalf("Error parsing volume flags, /foo:/bar should not mount-bind anything. Received %v", hostConfig.Binds) - } else if _, exists := config.Volumes[vols[0]]; !exists { - t.Fatalf("Error parsing volume flags, /foo:/bar is missing from volumes. Received %v", config.Volumes) - } - - } -} - -// callDecodeContainerConfig is a utility function used by TestDecodeContainerConfigVolumes -// to call DecodeContainerConfig. It effectively does what a client would -// do when calling the daemon by constructing a JSON stream of a -// ContainerConfigWrapper which is populated by the set of volume specs -// passed into it. It returns a config and a hostconfig which can be -// validated to ensure DecodeContainerConfig has manipulated the structures -// correctly. -func callDecodeContainerConfig(volumes []string, binds []string) (*container.Config, *container.HostConfig, error) { - var ( - b []byte - err error - c *container.Config - h *container.HostConfig - ) - w := runconfig.ContainerConfigWrapper{ - Config: &container.Config{ - Volumes: map[string]struct{}{}, - }, - HostConfig: &container.HostConfig{ - NetworkMode: "none", - Binds: binds, - }, - } - for _, v := range volumes { - w.Config.Volumes[v] = struct{}{} - } - if b, err = json.Marshal(w); err != nil { - return nil, nil, fmt.Errorf("Error on marshal %s", err.Error()) - } - c, h, _, err = runconfig.DecodeContainerConfig(bytes.NewReader(b)) - if err != nil { - return nil, nil, fmt.Errorf("Error parsing %s: %v", string(b), err) - } - if c == nil || h == nil { - return nil, nil, fmt.Errorf("Empty config or hostconfig") - } - - return c, h, err -} - -// check if (a == c && b == d) || (a == d && b == c) -// because maps are randomized -func compareRandomizedStrings(a, b, c, d string) error { - if a == c && b == d { - return nil - } - if a == d && b == c { - return nil - } - return fmt.Errorf("strings don't match") -} - -// setupPlatformVolume takes two arrays of volume specs - a Unix style -// spec and a Windows style spec. Depending on the platform being unit tested, -// it returns one of them, along with a volume string that would be passed -// on the docker CLI (eg -v /bar -v /foo). -func setupPlatformVolume(u []string, w []string) ([]string, string) { - var a []string - if runtime.GOOS == "windows" { - a = w - } else { - a = u - } - s := "" - for _, v := range a { - s = s + "-v " + v + " " - } - return a, s -} - -// Simple parse with MacAddress validation -func TestParseWithMacAddress(t *testing.T) { - invalidMacAddress := "--mac-address=invalidMacAddress" - validMacAddress := "--mac-address=92:d0:c6:0a:29:33" - if _, _, _, err := parseRun([]string{invalidMacAddress, "img", "cmd"}); err != nil && err.Error() != "invalidMacAddress is not a valid mac address" { - t.Fatalf("Expected an error with %v mac-address, got %v", invalidMacAddress, err) - } - if config, _ := mustParse(t, validMacAddress); config.MacAddress != "92:d0:c6:0a:29:33" { - t.Fatalf("Expected the config to have '92:d0:c6:0a:29:33' as MacAddress, got '%v'", config.MacAddress) - } -} - -func TestParseWithMemory(t *testing.T) { - invalidMemory := "--memory=invalid" - validMemory := "--memory=1G" - if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err != nil && err.Error() != "invalid size: 'invalid'" { - t.Fatalf("Expected an error with '%v' Memory, got '%v'", invalidMemory, err) - } - if _, hostconfig := mustParse(t, validMemory); hostconfig.Memory != 1073741824 { - t.Fatalf("Expected the config to have '1G' as Memory, got '%v'", hostconfig.Memory) - } -} - -func TestParseWithMemorySwap(t *testing.T) { - invalidMemory := "--memory-swap=invalid" - validMemory := "--memory-swap=1G" - anotherValidMemory := "--memory-swap=-1" - if _, _, _, err := parseRun([]string{invalidMemory, "img", "cmd"}); err == nil || err.Error() != "invalid size: 'invalid'" { - t.Fatalf("Expected an error with '%v' MemorySwap, got '%v'", invalidMemory, err) - } - if _, hostconfig := mustParse(t, validMemory); hostconfig.MemorySwap != 1073741824 { - t.Fatalf("Expected the config to have '1073741824' as MemorySwap, got '%v'", hostconfig.MemorySwap) - } - if _, hostconfig := mustParse(t, anotherValidMemory); hostconfig.MemorySwap != -1 { - t.Fatalf("Expected the config to have '-1' as MemorySwap, got '%v'", hostconfig.MemorySwap) - } -} - -func TestParseHostname(t *testing.T) { - validHostnames := map[string]string{ - "hostname": "hostname", - "host-name": "host-name", - "hostname123": "hostname123", - "123hostname": "123hostname", - "hostname-of-63-bytes-long-should-be-valid-and-without-any-error": "hostname-of-63-bytes-long-should-be-valid-and-without-any-error", - } - hostnameWithDomain := "--hostname=hostname.domainname" - hostnameWithDomainTld := "--hostname=hostname.domainname.tld" - for hostname, expectedHostname := range validHostnames { - if config, _ := mustParse(t, fmt.Sprintf("--hostname=%s", hostname)); config.Hostname != expectedHostname { - t.Fatalf("Expected the config to have 'hostname' as hostname, got '%v'", config.Hostname) - } - } - if config, _ := mustParse(t, hostnameWithDomain); config.Hostname != "hostname.domainname" && config.Domainname != "" { - t.Fatalf("Expected the config to have 'hostname' as hostname.domainname, got '%v'", config.Hostname) - } - if config, _ := mustParse(t, hostnameWithDomainTld); config.Hostname != "hostname.domainname.tld" && config.Domainname != "" { - t.Fatalf("Expected the config to have 'hostname' as hostname.domainname.tld, got '%v'", config.Hostname) - } -} - -func TestParseWithExpose(t *testing.T) { - invalids := map[string]string{ - ":": "invalid port format for --expose: :", - "8080:9090": "invalid port format for --expose: 8080:9090", - "/tcp": "invalid range format for --expose: /tcp, error: Empty string specified for ports.", - "/udp": "invalid range format for --expose: /udp, error: Empty string specified for ports.", - "NaN/tcp": `invalid range format for --expose: NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, - "NaN-NaN/tcp": `invalid range format for --expose: NaN-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, - "8080-NaN/tcp": `invalid range format for --expose: 8080-NaN/tcp, error: strconv.ParseUint: parsing "NaN": invalid syntax`, - "1234567890-8080/tcp": `invalid range format for --expose: 1234567890-8080/tcp, error: strconv.ParseUint: parsing "1234567890": value out of range`, - } - valids := map[string][]nat.Port{ - "8080/tcp": {"8080/tcp"}, - "8080/udp": {"8080/udp"}, - "8080/ncp": {"8080/ncp"}, - "8080-8080/udp": {"8080/udp"}, - "8080-8082/tcp": {"8080/tcp", "8081/tcp", "8082/tcp"}, - } - for expose, expectedError := range invalids { - if _, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}); err == nil || err.Error() != expectedError { - t.Fatalf("Expected error '%v' with '--expose=%v', got '%v'", expectedError, expose, err) - } - } - for expose, exposedPorts := range valids { - config, _, _, err := parseRun([]string{fmt.Sprintf("--expose=%v", expose), "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.ExposedPorts) != len(exposedPorts) { - t.Fatalf("Expected %v exposed port, got %v", len(exposedPorts), len(config.ExposedPorts)) - } - for _, port := range exposedPorts { - if _, ok := config.ExposedPorts[port]; !ok { - t.Fatalf("Expected %v, got %v", exposedPorts, config.ExposedPorts) - } - } - } - // Merge with actual published port - config, _, _, err := parseRun([]string{"--publish=80", "--expose=80-81/tcp", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.ExposedPorts) != 2 { - t.Fatalf("Expected 2 exposed ports, got %v", config.ExposedPorts) - } - ports := []nat.Port{"80/tcp", "81/tcp"} - for _, port := range ports { - if _, ok := config.ExposedPorts[port]; !ok { - t.Fatalf("Expected %v, got %v", ports, config.ExposedPorts) - } - } -} - -func TestParseDevice(t *testing.T) { - valids := map[string]container.DeviceMapping{ - "/dev/snd": { - PathOnHost: "/dev/snd", - PathInContainer: "/dev/snd", - CgroupPermissions: "rwm", - }, - "/dev/snd:rw": { - PathOnHost: "/dev/snd", - PathInContainer: "/dev/snd", - CgroupPermissions: "rw", - }, - "/dev/snd:/something": { - PathOnHost: "/dev/snd", - PathInContainer: "/something", - CgroupPermissions: "rwm", - }, - "/dev/snd:/something:rw": { - PathOnHost: "/dev/snd", - PathInContainer: "/something", - CgroupPermissions: "rw", - }, - } - for device, deviceMapping := range valids { - _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--device=%v", device), "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(hostconfig.Devices) != 1 { - t.Fatalf("Expected 1 devices, got %v", hostconfig.Devices) - } - if hostconfig.Devices[0] != deviceMapping { - t.Fatalf("Expected %v, got %v", deviceMapping, hostconfig.Devices) - } - } - -} - -func TestParseModes(t *testing.T) { - // ipc ko - if _, _, _, err := parseRun([]string{"--ipc=container:", "img", "cmd"}); err == nil || err.Error() != "--ipc: invalid IPC mode" { - t.Fatalf("Expected an error with message '--ipc: invalid IPC mode', got %v", err) - } - // ipc ok - _, hostconfig, _, err := parseRun([]string{"--ipc=host", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if !hostconfig.IpcMode.Valid() { - t.Fatalf("Expected a valid IpcMode, got %v", hostconfig.IpcMode) - } - // pid ko - if _, _, _, err := parseRun([]string{"--pid=container:", "img", "cmd"}); err == nil || err.Error() != "--pid: invalid PID mode" { - t.Fatalf("Expected an error with message '--pid: invalid PID mode', got %v", err) - } - // pid ok - _, hostconfig, _, err = parseRun([]string{"--pid=host", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if !hostconfig.PidMode.Valid() { - t.Fatalf("Expected a valid PidMode, got %v", hostconfig.PidMode) - } - // uts ko - if _, _, _, err := parseRun([]string{"--uts=container:", "img", "cmd"}); err == nil || err.Error() != "--uts: invalid UTS mode" { - t.Fatalf("Expected an error with message '--uts: invalid UTS mode', got %v", err) - } - // uts ok - _, hostconfig, _, err = parseRun([]string{"--uts=host", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if !hostconfig.UTSMode.Valid() { - t.Fatalf("Expected a valid UTSMode, got %v", hostconfig.UTSMode) - } - // shm-size ko - if _, _, _, err = parseRun([]string{"--shm-size=a128m", "img", "cmd"}); err == nil || err.Error() != "invalid size: 'a128m'" { - t.Fatalf("Expected an error with message 'invalid size: a128m', got %v", err) - } - // shm-size ok - _, hostconfig, _, err = parseRun([]string{"--shm-size=128m", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if hostconfig.ShmSize != 134217728 { - t.Fatalf("Expected a valid ShmSize, got %d", hostconfig.ShmSize) - } -} - -func TestParseRestartPolicy(t *testing.T) { - invalids := map[string]string{ - "something": "invalid restart policy something", - "always:2": "maximum restart count not valid with restart policy of \"always\"", - "always:2:3": "maximum restart count not valid with restart policy of \"always\"", - "on-failure:invalid": `strconv.ParseInt: parsing "invalid": invalid syntax`, - "on-failure:2:5": "restart count format is not valid, usage: 'on-failure:N' or 'on-failure'", - } - valids := map[string]container.RestartPolicy{ - "": {}, - "always": { - Name: "always", - MaximumRetryCount: 0, - }, - "on-failure:1": { - Name: "on-failure", - MaximumRetryCount: 1, - }, - } - for restart, expectedError := range invalids { - if _, _, _, err := parseRun([]string{fmt.Sprintf("--restart=%s", restart), "img", "cmd"}); err == nil || err.Error() != expectedError { - t.Fatalf("Expected an error with message '%v' for %v, got %v", expectedError, restart, err) - } - } - for restart, expected := range valids { - _, hostconfig, _, err := parseRun([]string{fmt.Sprintf("--restart=%v", restart), "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if hostconfig.RestartPolicy != expected { - t.Fatalf("Expected %v, got %v", expected, hostconfig.RestartPolicy) - } - } -} - -func TestParseHealth(t *testing.T) { - checkOk := func(args ...string) *container.HealthConfig { - config, _, _, err := parseRun(args) - if err != nil { - t.Fatalf("%#v: %v", args, err) - } - return config.Healthcheck - } - checkError := func(expected string, args ...string) { - config, _, _, err := parseRun(args) - if err == nil { - t.Fatalf("Expected error, but got %#v", config) - } - if err.Error() != expected { - t.Fatalf("Expected %#v, got %#v", expected, err) - } - } - health := checkOk("--no-healthcheck", "img", "cmd") - if health == nil || len(health.Test) != 1 || health.Test[0] != "NONE" { - t.Fatalf("--no-healthcheck failed: %#v", health) - } - - health = checkOk("--health-cmd=/check.sh -q", "img", "cmd") - if len(health.Test) != 2 || health.Test[0] != "CMD-SHELL" || health.Test[1] != "/check.sh -q" { - t.Fatalf("--health-cmd: got %#v", health.Test) - } - if health.Timeout != 0 { - t.Fatalf("--health-cmd: timeout = %f", health.Timeout) - } - - checkError("--no-healthcheck conflicts with --health-* options", - "--no-healthcheck", "--health-cmd=/check.sh -q", "img", "cmd") - - health = checkOk("--health-timeout=2s", "--health-retries=3", "--health-interval=4.5s", "img", "cmd") - if health.Timeout != 2*time.Second || health.Retries != 3 || health.Interval != 4500*time.Millisecond { - t.Fatalf("--health-*: got %#v", health) - } -} - -func TestParseLoggingOpts(t *testing.T) { - // logging opts ko - if _, _, _, err := parseRun([]string{"--log-driver=none", "--log-opt=anything", "img", "cmd"}); err == nil || err.Error() != "invalid logging opts for driver none" { - t.Fatalf("Expected an error with message 'invalid logging opts for driver none', got %v", err) - } - // logging opts ok - _, hostconfig, _, err := parseRun([]string{"--log-driver=syslog", "--log-opt=something", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if hostconfig.LogConfig.Type != "syslog" || len(hostconfig.LogConfig.Config) != 1 { - t.Fatalf("Expected a 'syslog' LogConfig with one config, got %v", hostconfig.RestartPolicy) - } -} - -func TestParseEnvfileVariables(t *testing.T) { - e := "open nonexistent: no such file or directory" - if runtime.GOOS == "windows" { - e = "open nonexistent: The system cannot find the file specified." - } - // env ko - if _, _, _, err := parseRun([]string{"--env-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { - t.Fatalf("Expected an error with message '%s', got %v", e, err) - } - // env ok - config, _, _, err := parseRun([]string{"--env-file=fixtures/valid.env", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Env) != 1 || config.Env[0] != "ENV1=value1" { - t.Fatalf("Expected a config with [ENV1=value1], got %v", config.Env) - } - config, _, _, err = parseRun([]string{"--env-file=fixtures/valid.env", "--env=ENV2=value2", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Env) != 2 || config.Env[0] != "ENV1=value1" || config.Env[1] != "ENV2=value2" { - t.Fatalf("Expected a config with [ENV1=value1 ENV2=value2], got %v", config.Env) - } -} - -func TestParseLabelfileVariables(t *testing.T) { - e := "open nonexistent: no such file or directory" - if runtime.GOOS == "windows" { - e = "open nonexistent: The system cannot find the file specified." - } - // label ko - if _, _, _, err := parseRun([]string{"--label-file=nonexistent", "img", "cmd"}); err == nil || err.Error() != e { - t.Fatalf("Expected an error with message '%s', got %v", e, err) - } - // label ok - config, _, _, err := parseRun([]string{"--label-file=fixtures/valid.label", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Labels) != 1 || config.Labels["LABEL1"] != "value1" { - t.Fatalf("Expected a config with [LABEL1:value1], got %v", config.Labels) - } - config, _, _, err = parseRun([]string{"--label-file=fixtures/valid.label", "--label=LABEL2=value2", "img", "cmd"}) - if err != nil { - t.Fatal(err) - } - if len(config.Labels) != 2 || config.Labels["LABEL1"] != "value1" || config.Labels["LABEL2"] != "value2" { - t.Fatalf("Expected a config with [LABEL1:value1 LABEL2:value2], got %v", config.Labels) - } -} - -func TestParseEntryPoint(t *testing.T) { - config, _, _, err := parseRun([]string{"--entrypoint=anything", "cmd", "img"}) - if err != nil { - t.Fatal(err) - } - if len(config.Entrypoint) != 1 && config.Entrypoint[0] != "anything" { - t.Fatalf("Expected entrypoint 'anything', got %v", config.Entrypoint) - } -} - -func TestValidateLink(t *testing.T) { - valid := []string{ - "name", - "dcdfbe62ecd0:alias", - "7a67485460b7642516a4ad82ecefe7f57d0c4916f530561b71a50a3f9c4e33da", - "angry_torvalds:linus", - } - invalid := map[string]string{ - "": "empty string specified for links", - "too:much:of:it": "bad format for links: too:much:of:it", - } - - for _, link := range valid { - if _, err := ValidateLink(link); err != nil { - t.Fatalf("ValidateLink(`%q`) should succeed: error %q", link, err) - } - } - - for link, expectedError := range invalid { - if _, err := ValidateLink(link); err == nil { - t.Fatalf("ValidateLink(`%q`) should have failed validation", link) - } else { - if !strings.Contains(err.Error(), expectedError) { - t.Fatalf("ValidateLink(`%q`) error should contain %q", link, expectedError) - } - } - } -} - -func TestParseLink(t *testing.T) { - name, alias, err := ParseLink("name:alias") - if err != nil { - t.Fatalf("Expected not to error out on a valid name:alias format but got: %v", err) - } - if name != "name" { - t.Fatalf("Link name should have been name, got %s instead", name) - } - if alias != "alias" { - t.Fatalf("Link alias should have been alias, got %s instead", alias) - } - // short format definition - name, alias, err = ParseLink("name") - if err != nil { - t.Fatalf("Expected not to error out on a valid name only format but got: %v", err) - } - if name != "name" { - t.Fatalf("Link name should have been name, got %s instead", name) - } - if alias != "name" { - t.Fatalf("Link alias should have been name, got %s instead", alias) - } - // empty string link definition is not allowed - if _, _, err := ParseLink(""); err == nil || !strings.Contains(err.Error(), "empty string specified for links") { - t.Fatalf("Expected error 'empty string specified for links' but got: %v", err) - } - // more than two colons are not allowed - if _, _, err := ParseLink("link:alias:wrong"); err == nil || !strings.Contains(err.Error(), "bad format for links: link:alias:wrong") { - t.Fatalf("Expected error 'bad format for links: link:alias:wrong' but got: %v", err) - } -} - -func TestValidateDevice(t *testing.T) { - valid := []string{ - "/home", - "/home:/home", - "/home:/something/else", - "/with space", - "/home:/with space", - "relative:/absolute-path", - "hostPath:/containerPath:r", - "/hostPath:/containerPath:rw", - "/hostPath:/containerPath:mrw", - } - invalid := map[string]string{ - "": "bad format for path: ", - "./": "./ is not an absolute path", - "../": "../ is not an absolute path", - "/:../": "../ is not an absolute path", - "/:path": "path is not an absolute path", - ":": "bad format for path: :", - "/tmp:": " is not an absolute path", - ":test": "bad format for path: :test", - ":/test": "bad format for path: :/test", - "tmp:": " is not an absolute path", - ":test:": "bad format for path: :test:", - "::": "bad format for path: ::", - ":::": "bad format for path: :::", - "/tmp:::": "bad format for path: /tmp:::", - ":/tmp::": "bad format for path: :/tmp::", - "path:ro": "ro is not an absolute path", - "path:rr": "rr is not an absolute path", - "a:/b:ro": "bad mode specified: ro", - "a:/b:rr": "bad mode specified: rr", - } - - for _, path := range valid { - if _, err := ValidateDevice(path); err != nil { - t.Fatalf("ValidateDevice(`%q`) should succeed: error %q", path, err) - } - } - - for path, expectedError := range invalid { - if _, err := ValidateDevice(path); err == nil { - t.Fatalf("ValidateDevice(`%q`) should have failed validation", path) - } else { - if err.Error() != expectedError { - t.Fatalf("ValidateDevice(`%q`) error should contain %q, got %q", path, expectedError, err.Error()) - } - } - } -} - -func TestVolumeSplitN(t *testing.T) { - for _, x := range []struct { - input string - n int - expected []string - }{ - {`C:\foo:d:`, -1, []string{`C:\foo`, `d:`}}, - {`:C:\foo:d:`, -1, nil}, - {`/foo:/bar:ro`, 3, []string{`/foo`, `/bar`, `ro`}}, - {`/foo:/bar:ro`, 2, []string{`/foo`, `/bar:ro`}}, - {`C:\foo\:/foo`, -1, []string{`C:\foo\`, `/foo`}}, - - {`d:\`, -1, []string{`d:\`}}, - {`d:`, -1, []string{`d:`}}, - {`d:\path`, -1, []string{`d:\path`}}, - {`d:\path with space`, -1, []string{`d:\path with space`}}, - {`d:\pathandmode:rw`, -1, []string{`d:\pathandmode`, `rw`}}, - {`c:\:d:\`, -1, []string{`c:\`, `d:\`}}, - {`c:\windows\:d:`, -1, []string{`c:\windows\`, `d:`}}, - {`c:\windows:d:\s p a c e`, -1, []string{`c:\windows`, `d:\s p a c e`}}, - {`c:\windows:d:\s p a c e:RW`, -1, []string{`c:\windows`, `d:\s p a c e`, `RW`}}, - {`c:\program files:d:\s p a c e i n h o s t d i r`, -1, []string{`c:\program files`, `d:\s p a c e i n h o s t d i r`}}, - {`0123456789name:d:`, -1, []string{`0123456789name`, `d:`}}, - {`MiXeDcAsEnAmE:d:`, -1, []string{`MiXeDcAsEnAmE`, `d:`}}, - {`name:D:`, -1, []string{`name`, `D:`}}, - {`name:D::rW`, -1, []string{`name`, `D:`, `rW`}}, - {`name:D::RW`, -1, []string{`name`, `D:`, `RW`}}, - {`c:/:d:/forward/slashes/are/good/too`, -1, []string{`c:/`, `d:/forward/slashes/are/good/too`}}, - {`c:\Windows`, -1, []string{`c:\Windows`}}, - {`c:\Program Files (x86)`, -1, []string{`c:\Program Files (x86)`}}, - - {``, -1, nil}, - {`.`, -1, []string{`.`}}, - {`..\`, -1, []string{`..\`}}, - {`c:\:..\`, -1, []string{`c:\`, `..\`}}, - {`c:\:d:\:xyzzy`, -1, []string{`c:\`, `d:\`, `xyzzy`}}, - - // Cover directories with one-character name - {`/tmp/x/y:/foo/x/y`, -1, []string{`/tmp/x/y`, `/foo/x/y`}}, - } { - res := volumeSplitN(x.input, x.n) - if len(res) < len(x.expected) { - t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) - } - for i, e := range res { - if e != x.expected[i] { - t.Fatalf("input: %v, expected: %v, got: %v", x.input, x.expected, res) - } - } - } -} diff --git a/runconfig/opts/runtime.go b/runconfig/opts/runtime.go deleted file mode 100644 index 1fc099834e..0000000000 --- a/runconfig/opts/runtime.go +++ /dev/null @@ -1,74 +0,0 @@ -package opts - -import ( - "fmt" - "strings" - - "github.com/docker/engine-api/types" -) - -// RuntimeOpt defines a map of Runtimes -type RuntimeOpt struct { - name string - stockRuntimeName string - values *map[string]types.Runtime -} - -// NewNamedRuntimeOpt creates a new RuntimeOpt -func NewNamedRuntimeOpt(name string, ref *map[string]types.Runtime, stockRuntime string) *RuntimeOpt { - if ref == nil { - ref = &map[string]types.Runtime{} - } - return &RuntimeOpt{name: name, values: ref, stockRuntimeName: stockRuntime} -} - -// Name returns the name of the NamedListOpts in the configuration. -func (o *RuntimeOpt) Name() string { - return o.name -} - -// Set validates and updates the list of Runtimes -func (o *RuntimeOpt) Set(val string) error { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return fmt.Errorf("invalid runtime argument: %s", val) - } - - parts[0] = strings.TrimSpace(parts[0]) - parts[1] = strings.TrimSpace(parts[1]) - if parts[0] == "" || parts[1] == "" { - return fmt.Errorf("invalid runtime argument: %s", val) - } - - parts[0] = strings.ToLower(parts[0]) - if parts[0] == o.stockRuntimeName { - return fmt.Errorf("runtime name '%s' is reserved", o.stockRuntimeName) - } - - if _, ok := (*o.values)[parts[0]]; ok { - return fmt.Errorf("runtime '%s' was already defined", parts[0]) - } - - (*o.values)[parts[0]] = types.Runtime{Path: parts[1]} - - return nil -} - -// String returns Runtime values as a string. -func (o *RuntimeOpt) String() string { - var out []string - for k := range *o.values { - out = append(out, k) - } - - return fmt.Sprintf("%v", out) -} - -// GetMap returns a map of Runtimes (name: path) -func (o *RuntimeOpt) GetMap() map[string]types.Runtime { - if o.values != nil { - return *o.values - } - - return map[string]types.Runtime{} -} diff --git a/runconfig/opts/throttledevice.go b/runconfig/opts/throttledevice.go deleted file mode 100644 index f69e74ecc7..0000000000 --- a/runconfig/opts/throttledevice.go +++ /dev/null @@ -1,113 +0,0 @@ -package opts - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/engine-api/types/blkiodev" - "github.com/docker/go-units" -) - -// ValidatorThrottleFctType defines a validator function that returns a validated struct and/or an error. -type ValidatorThrottleFctType func(val string) (*blkiodev.ThrottleDevice, error) - -// ValidateThrottleBpsDevice validates that the specified string has a valid device-rate format. -func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - rate, err := units.RAMInBytes(split[1]) - if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) - } - if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :[]. Number must be a positive integer. Unit is optional and can be kb, mb, or gb", val) - } - - return &blkiodev.ThrottleDevice{ - Path: split[0], - Rate: uint64(rate), - }, nil -} - -// ValidateThrottleIOpsDevice validates that the specified string has a valid device-rate format. -func ValidateThrottleIOpsDevice(val string) (*blkiodev.ThrottleDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - rate, err := strconv.ParseUint(split[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) - } - if rate < 0 { - return nil, fmt.Errorf("invalid rate for device: %s. The correct format is :. Number must be a positive integer", val) - } - - return &blkiodev.ThrottleDevice{ - Path: split[0], - Rate: uint64(rate), - }, nil -} - -// ThrottledeviceOpt defines a map of ThrottleDevices -type ThrottledeviceOpt struct { - values []*blkiodev.ThrottleDevice - validator ValidatorThrottleFctType -} - -// NewThrottledeviceOpt creates a new ThrottledeviceOpt -func NewThrottledeviceOpt(validator ValidatorThrottleFctType) ThrottledeviceOpt { - values := []*blkiodev.ThrottleDevice{} - return ThrottledeviceOpt{ - values: values, - validator: validator, - } -} - -// Set validates a ThrottleDevice and sets its name as a key in ThrottledeviceOpt -func (opt *ThrottledeviceOpt) Set(val string) error { - var value *blkiodev.ThrottleDevice - if opt.validator != nil { - v, err := opt.validator(val) - if err != nil { - return err - } - value = v - } - (opt.values) = append((opt.values), value) - return nil -} - -// String returns ThrottledeviceOpt values as a string. -func (opt *ThrottledeviceOpt) String() string { - var out []string - for _, v := range opt.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to ThrottleDevices. -func (opt *ThrottledeviceOpt) GetList() []*blkiodev.ThrottleDevice { - var throttledevice []*blkiodev.ThrottleDevice - for _, v := range opt.values { - throttledevice = append(throttledevice, v) - } - - return throttledevice -} - -// Type returns the option type -func (opt *ThrottledeviceOpt) Type() string { - return "throttled-device" -} diff --git a/runconfig/opts/ulimit.go b/runconfig/opts/ulimit.go deleted file mode 100644 index 5adfe30851..0000000000 --- a/runconfig/opts/ulimit.go +++ /dev/null @@ -1,57 +0,0 @@ -package opts - -import ( - "fmt" - - "github.com/docker/go-units" -) - -// UlimitOpt defines a map of Ulimits -type UlimitOpt struct { - values *map[string]*units.Ulimit -} - -// NewUlimitOpt creates a new UlimitOpt -func NewUlimitOpt(ref *map[string]*units.Ulimit) *UlimitOpt { - if ref == nil { - ref = &map[string]*units.Ulimit{} - } - return &UlimitOpt{ref} -} - -// Set validates a Ulimit and sets its name as a key in UlimitOpt -func (o *UlimitOpt) Set(val string) error { - l, err := units.ParseUlimit(val) - if err != nil { - return err - } - - (*o.values)[l.Name] = l - - return nil -} - -// String returns Ulimit values as a string. -func (o *UlimitOpt) String() string { - var out []string - for _, v := range *o.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to Ulimits. -func (o *UlimitOpt) GetList() []*units.Ulimit { - var ulimits []*units.Ulimit - for _, v := range *o.values { - ulimits = append(ulimits, v) - } - - return ulimits -} - -// Type returns the option type -func (o *UlimitOpt) Type() string { - return "ulimit" -} diff --git a/runconfig/opts/ulimit_test.go b/runconfig/opts/ulimit_test.go deleted file mode 100644 index 0aa3facdfb..0000000000 --- a/runconfig/opts/ulimit_test.go +++ /dev/null @@ -1,42 +0,0 @@ -package opts - -import ( - "testing" - - "github.com/docker/go-units" -) - -func TestUlimitOpt(t *testing.T) { - ulimitMap := map[string]*units.Ulimit{ - "nofile": {"nofile", 1024, 512}, - } - - ulimitOpt := NewUlimitOpt(&ulimitMap) - - expected := "[nofile=512:1024]" - if ulimitOpt.String() != expected { - t.Fatalf("Expected %v, got %v", expected, ulimitOpt) - } - - // Valid ulimit append to opts - if err := ulimitOpt.Set("core=1024:1024"); err != nil { - t.Fatal(err) - } - - // Invalid ulimit type returns an error and do not append to opts - if err := ulimitOpt.Set("notavalidtype=1024:1024"); err == nil { - t.Fatalf("Expected error on invalid ulimit type") - } - expected = "[nofile=512:1024 core=1024:1024]" - expected2 := "[core=1024:1024 nofile=512:1024]" - result := ulimitOpt.String() - if result != expected && result != expected2 { - t.Fatalf("Expected %v or %v, got %v", expected, expected2, ulimitOpt) - } - - // And test GetList - ulimits := ulimitOpt.GetList() - if len(ulimits) != 2 { - t.Fatalf("Expected a ulimit list of 2, got %v", ulimits) - } -} diff --git a/runconfig/opts/weightdevice.go b/runconfig/opts/weightdevice.go deleted file mode 100644 index b3afd2213c..0000000000 --- a/runconfig/opts/weightdevice.go +++ /dev/null @@ -1,89 +0,0 @@ -package opts - -import ( - "fmt" - "strconv" - "strings" - - "github.com/docker/engine-api/types/blkiodev" -) - -// ValidatorWeightFctType defines a validator function that returns a validated struct and/or an error. -type ValidatorWeightFctType func(val string) (*blkiodev.WeightDevice, error) - -// ValidateWeightDevice validates that the specified string has a valid device-weight format. -func ValidateWeightDevice(val string) (*blkiodev.WeightDevice, error) { - split := strings.SplitN(val, ":", 2) - if len(split) != 2 { - return nil, fmt.Errorf("bad format: %s", val) - } - if !strings.HasPrefix(split[0], "/dev/") { - return nil, fmt.Errorf("bad format for device path: %s", val) - } - weight, err := strconv.ParseUint(split[1], 10, 0) - if err != nil { - return nil, fmt.Errorf("invalid weight for device: %s", val) - } - if weight > 0 && (weight < 10 || weight > 1000) { - return nil, fmt.Errorf("invalid weight for device: %s", val) - } - - return &blkiodev.WeightDevice{ - Path: split[0], - Weight: uint16(weight), - }, nil -} - -// WeightdeviceOpt defines a map of WeightDevices -type WeightdeviceOpt struct { - values []*blkiodev.WeightDevice - validator ValidatorWeightFctType -} - -// NewWeightdeviceOpt creates a new WeightdeviceOpt -func NewWeightdeviceOpt(validator ValidatorWeightFctType) WeightdeviceOpt { - values := []*blkiodev.WeightDevice{} - return WeightdeviceOpt{ - values: values, - validator: validator, - } -} - -// Set validates a WeightDevice and sets its name as a key in WeightdeviceOpt -func (opt *WeightdeviceOpt) Set(val string) error { - var value *blkiodev.WeightDevice - if opt.validator != nil { - v, err := opt.validator(val) - if err != nil { - return err - } - value = v - } - (opt.values) = append((opt.values), value) - return nil -} - -// String returns WeightdeviceOpt values as a string. -func (opt *WeightdeviceOpt) String() string { - var out []string - for _, v := range opt.values { - out = append(out, v.String()) - } - - return fmt.Sprintf("%v", out) -} - -// GetList returns a slice of pointers to WeightDevices. -func (opt *WeightdeviceOpt) GetList() []*blkiodev.WeightDevice { - var weightdevice []*blkiodev.WeightDevice - for _, v := range opt.values { - weightdevice = append(weightdevice, v) - } - - return weightdevice -} - -// Type returns the option type -func (opt *WeightdeviceOpt) Type() string { - return "weighted-device" -} diff --git a/runconfig/streams.go b/runconfig/streams.go deleted file mode 100644 index 117fd89aee..0000000000 --- a/runconfig/streams.go +++ /dev/null @@ -1,109 +0,0 @@ -package runconfig - -import ( - "fmt" - "io" - "io/ioutil" - "strings" - "sync" - - "github.com/docker/docker/pkg/broadcaster" - "github.com/docker/docker/pkg/ioutils" -) - -// StreamConfig holds information about I/O streams managed together. -// -// streamConfig.StdinPipe returns a WriteCloser which can be used to feed data -// to the standard input of the streamConfig's active process. -// streamConfig.StdoutPipe and streamConfig.StderrPipe each return a ReadCloser -// which can be used to retrieve the standard output (and error) generated -// by the container's active process. The output (and error) are actually -// copied and delivered to all StdoutPipe and StderrPipe consumers, using -// a kind of "broadcaster". -type StreamConfig struct { - sync.WaitGroup - stdout *broadcaster.Unbuffered - stderr *broadcaster.Unbuffered - stdin io.ReadCloser - stdinPipe io.WriteCloser -} - -// NewStreamConfig creates a stream config and initializes -// the standard err and standard out to new unbuffered broadcasters. -func NewStreamConfig() *StreamConfig { - return &StreamConfig{ - stderr: new(broadcaster.Unbuffered), - stdout: new(broadcaster.Unbuffered), - } -} - -// Stdout returns the standard output in the configuration. -func (streamConfig *StreamConfig) Stdout() *broadcaster.Unbuffered { - return streamConfig.stdout -} - -// Stderr returns the standard error in the configuration. -func (streamConfig *StreamConfig) Stderr() *broadcaster.Unbuffered { - return streamConfig.stderr -} - -// Stdin returns the standard input in the configuration. -func (streamConfig *StreamConfig) Stdin() io.ReadCloser { - return streamConfig.stdin -} - -// StdinPipe returns an input writer pipe as an io.WriteCloser. -func (streamConfig *StreamConfig) StdinPipe() io.WriteCloser { - return streamConfig.stdinPipe -} - -// StdoutPipe creates a new io.ReadCloser with an empty bytes pipe. -// It adds this new out pipe to the Stdout broadcaster. -func (streamConfig *StreamConfig) StdoutPipe() io.ReadCloser { - bytesPipe := ioutils.NewBytesPipe() - streamConfig.stdout.Add(bytesPipe) - return bytesPipe -} - -// StderrPipe creates a new io.ReadCloser with an empty bytes pipe. -// It adds this new err pipe to the Stderr broadcaster. -func (streamConfig *StreamConfig) StderrPipe() io.ReadCloser { - bytesPipe := ioutils.NewBytesPipe() - streamConfig.stderr.Add(bytesPipe) - return bytesPipe -} - -// NewInputPipes creates new pipes for both standard inputs, Stdin and StdinPipe. -func (streamConfig *StreamConfig) NewInputPipes() { - streamConfig.stdin, streamConfig.stdinPipe = io.Pipe() -} - -// NewNopInputPipe creates a new input pipe that will silently drop all messages in the input. -func (streamConfig *StreamConfig) NewNopInputPipe() { - streamConfig.stdinPipe = ioutils.NopWriteCloser(ioutil.Discard) -} - -// CloseStreams ensures that the configured streams are properly closed. -func (streamConfig *StreamConfig) CloseStreams() error { - var errors []string - - if streamConfig.stdin != nil { - if err := streamConfig.stdin.Close(); err != nil { - errors = append(errors, fmt.Sprintf("error close stdin: %s", err)) - } - } - - if err := streamConfig.stdout.Clean(); err != nil { - errors = append(errors, fmt.Sprintf("error close stdout: %s", err)) - } - - if err := streamConfig.stderr.Clean(); err != nil { - errors = append(errors, fmt.Sprintf("error close stderr: %s", err)) - } - - if len(errors) > 0 { - return fmt.Errorf(strings.Join(errors, "\n")) - } - - return nil -} diff --git a/utils/debug.go b/utils/debug.go deleted file mode 100644 index d203891129..0000000000 --- a/utils/debug.go +++ /dev/null @@ -1,26 +0,0 @@ -package utils - -import ( - "os" - - "github.com/Sirupsen/logrus" -) - -// EnableDebug sets the DEBUG env var to true -// and makes the logger to log at debug level. -func EnableDebug() { - os.Setenv("DEBUG", "1") - logrus.SetLevel(logrus.DebugLevel) -} - -// DisableDebug sets the DEBUG env var to false -// and makes the logger to log at info level. -func DisableDebug() { - os.Setenv("DEBUG", "") - logrus.SetLevel(logrus.InfoLevel) -} - -// IsDebugEnabled checks whether the debug flag is set or not. -func IsDebugEnabled() bool { - return os.Getenv("DEBUG") != "" -} diff --git a/utils/debug_test.go b/utils/debug_test.go deleted file mode 100644 index 6f9c4dfbb0..0000000000 --- a/utils/debug_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package utils - -import ( - "os" - "testing" - - "github.com/Sirupsen/logrus" -) - -func TestEnableDebug(t *testing.T) { - defer func() { - os.Setenv("DEBUG", "") - logrus.SetLevel(logrus.InfoLevel) - }() - EnableDebug() - if os.Getenv("DEBUG") != "1" { - t.Fatalf("expected DEBUG=1, got %s\n", os.Getenv("DEBUG")) - } - if logrus.GetLevel() != logrus.DebugLevel { - t.Fatalf("expected log level %v, got %v\n", logrus.DebugLevel, logrus.GetLevel()) - } -} - -func TestDisableDebug(t *testing.T) { - DisableDebug() - if os.Getenv("DEBUG") != "" { - t.Fatalf("expected DEBUG=\"\", got %s\n", os.Getenv("DEBUG")) - } - if logrus.GetLevel() != logrus.InfoLevel { - t.Fatalf("expected log level %v, got %v\n", logrus.InfoLevel, logrus.GetLevel()) - } -} - -func TestDebugEnabled(t *testing.T) { - EnableDebug() - if !IsDebugEnabled() { - t.Fatal("expected debug enabled, got false") - } - DisableDebug() - if IsDebugEnabled() { - t.Fatal("expected debug disabled, got true") - } -} diff --git a/utils/experimental.go b/utils/experimental.go deleted file mode 100644 index ceed0cb3ff..0000000000 --- a/utils/experimental.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build experimental - -package utils - -// ExperimentalBuild is a stub which always returns true for -// builds that include the "experimental" build tag -func ExperimentalBuild() bool { - return true -} diff --git a/utils/names.go b/utils/names.go deleted file mode 100644 index 8239c0de29..0000000000 --- a/utils/names.go +++ /dev/null @@ -1,12 +0,0 @@ -package utils - -import "regexp" - -// RestrictedNameChars collects the characters allowed to represent a name, normally used to validate container and volume names. -const RestrictedNameChars = `[a-zA-Z0-9][a-zA-Z0-9_.-]` - -// RestrictedNamePattern is a regular expression to validate names against the collection of restricted characters. -var RestrictedNamePattern = regexp.MustCompile(`^/?` + RestrictedNameChars + `+$`) - -// RestrictedVolumeNamePattern is a regular expression to validate volume names against the collection of restricted characters. -var RestrictedVolumeNamePattern = regexp.MustCompile(`^` + RestrictedNameChars + `+$`) diff --git a/utils/process_unix.go b/utils/process_unix.go deleted file mode 100644 index bdb1b46b3d..0000000000 --- a/utils/process_unix.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build linux freebsd - -package utils - -import ( - "syscall" -) - -// IsProcessAlive returns true if process with a given pid is running. -func IsProcessAlive(pid int) bool { - err := syscall.Kill(pid, syscall.Signal(0)) - if err == nil || err == syscall.EPERM { - return true - } - - return false -} - -// KillProcess force-stops a process. -func KillProcess(pid int) { - syscall.Kill(pid, syscall.SIGKILL) -} diff --git a/utils/process_windows.go b/utils/process_windows.go deleted file mode 100644 index 03cb855197..0000000000 --- a/utils/process_windows.go +++ /dev/null @@ -1,20 +0,0 @@ -package utils - -// IsProcessAlive returns true if process with a given pid is running. -func IsProcessAlive(pid int) bool { - // TODO Windows containerd. Not sure this is needed - // p, err := os.FindProcess(pid) - // if err == nil { - // return true - // } - return false -} - -// KillProcess force-stops a process. -func KillProcess(pid int) { - // TODO Windows containerd. Not sure this is needed - // p, err := os.FindProcess(pid) - // if err == nil { - // p.Kill() - // } -} diff --git a/utils/stubs.go b/utils/stubs.go deleted file mode 100644 index 8a496d392f..0000000000 --- a/utils/stubs.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build !experimental - -package utils - -// ExperimentalBuild is a stub which always returns false for -// builds that do not include the "experimental" build tag -func ExperimentalBuild() bool { - return false -} diff --git a/utils/templates/templates.go b/utils/templates/templates.go deleted file mode 100644 index 749da3d5af..0000000000 --- a/utils/templates/templates.go +++ /dev/null @@ -1,33 +0,0 @@ -package templates - -import ( - "encoding/json" - "strings" - "text/template" -) - -// basicFunctions are the set of initial -// functions provided to every template. -var basicFunctions = template.FuncMap{ - "json": func(v interface{}) string { - a, _ := json.Marshal(v) - return string(a) - }, - "split": strings.Split, - "join": strings.Join, - "title": strings.Title, - "lower": strings.ToLower, - "upper": strings.ToUpper, -} - -// Parse creates a new annonymous template with the basic functions -// and parses the given format. -func Parse(format string) (*template.Template, error) { - return NewParse("", format) -} - -// NewParse creates a new tagged template with the basic functions -// and parses the given format. -func NewParse(tag, format string) (*template.Template, error) { - return template.New(tag).Funcs(basicFunctions).Parse(format) -} diff --git a/utils/templates/templates_test.go b/utils/templates/templates_test.go deleted file mode 100644 index dd42901aed..0000000000 --- a/utils/templates/templates_test.go +++ /dev/null @@ -1,38 +0,0 @@ -package templates - -import ( - "bytes" - "testing" -) - -func TestParseStringFunctions(t *testing.T) { - tm, err := Parse(`{{join (split . ":") "/"}}`) - if err != nil { - t.Fatal(err) - } - - var b bytes.Buffer - if err := tm.Execute(&b, "text:with:colon"); err != nil { - t.Fatal(err) - } - want := "text/with/colon" - if b.String() != want { - t.Fatalf("expected %s, got %s", want, b.String()) - } -} - -func TestNewParse(t *testing.T) { - tm, err := NewParse("foo", "this is a {{ . }}") - if err != nil { - t.Fatal(err) - } - - var b bytes.Buffer - if err := tm.Execute(&b, "string"); err != nil { - t.Fatal(err) - } - want := "this is a string" - if b.String() != want { - t.Fatalf("expected %s, got %s", want, b.String()) - } -} diff --git a/utils/utils.go b/utils/utils.go deleted file mode 100644 index d3dd00abf4..0000000000 --- a/utils/utils.go +++ /dev/null @@ -1,87 +0,0 @@ -package utils - -import ( - "fmt" - "io/ioutil" - "os" - "runtime" - "strings" - - "github.com/docker/docker/pkg/archive" - "github.com/docker/docker/pkg/stringid" -) - -var globalTestID string - -// TestDirectory creates a new temporary directory and returns its path. -// The contents of directory at path `templateDir` is copied into the -// new directory. -func TestDirectory(templateDir string) (dir string, err error) { - if globalTestID == "" { - globalTestID = stringid.GenerateNonCryptoID()[:4] - } - prefix := fmt.Sprintf("docker-test%s-%s-", globalTestID, GetCallerName(2)) - if prefix == "" { - prefix = "docker-test-" - } - dir, err = ioutil.TempDir("", prefix) - if err = os.Remove(dir); err != nil { - return - } - if templateDir != "" { - if err = archive.CopyWithTar(templateDir, dir); err != nil { - return - } - } - return -} - -// GetCallerName introspects the call stack and returns the name of the -// function `depth` levels down in the stack. -func GetCallerName(depth int) string { - // Use the caller function name as a prefix. - // This helps trace temp directories back to their test. - pc, _, _, _ := runtime.Caller(depth + 1) - callerLongName := runtime.FuncForPC(pc).Name() - parts := strings.Split(callerLongName, ".") - callerShortName := parts[len(parts)-1] - return callerShortName -} - -// ReplaceOrAppendEnvValues returns the defaults with the overrides either -// replaced by env key or appended to the list -func ReplaceOrAppendEnvValues(defaults, overrides []string) []string { - cache := make(map[string]int, len(defaults)) - for i, e := range defaults { - parts := strings.SplitN(e, "=", 2) - cache[parts[0]] = i - } - - for _, value := range overrides { - // Values w/o = means they want this env to be removed/unset. - if !strings.Contains(value, "=") { - if i, exists := cache[value]; exists { - defaults[i] = "" // Used to indicate it should be removed - } - continue - } - - // Just do a normal set/update - parts := strings.SplitN(value, "=", 2) - if i, exists := cache[parts[0]]; exists { - defaults[i] = value - } else { - defaults = append(defaults, value) - } - } - - // Now remove all entries that we want to "unset" - for i := 0; i < len(defaults); i++ { - if defaults[i] == "" { - defaults = append(defaults[:i], defaults[i+1:]...) - i-- - } - } - - return defaults -} diff --git a/utils/utils_test.go b/utils/utils_test.go deleted file mode 100644 index ab3911e8b3..0000000000 --- a/utils/utils_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package utils - -import "testing" - -func TestReplaceAndAppendEnvVars(t *testing.T) { - var ( - d = []string{"HOME=/"} - o = []string{"HOME=/root", "TERM=xterm"} - ) - - env := ReplaceOrAppendEnvValues(d, o) - if len(env) != 2 { - t.Fatalf("expected len of 2 got %d", len(env)) - } - if env[0] != "HOME=/root" { - t.Fatalf("expected HOME=/root got '%s'", env[0]) - } - if env[1] != "TERM=xterm" { - t.Fatalf("expected TERM=xterm got '%s'", env[1]) - } -} diff --git a/vendor/src/bitbucket.org/ww/goautoneg/Makefile b/vendor/src/bitbucket.org/ww/goautoneg/Makefile deleted file mode 100644 index e33ee17303..0000000000 --- a/vendor/src/bitbucket.org/ww/goautoneg/Makefile +++ /dev/null @@ -1,13 +0,0 @@ -include $(GOROOT)/src/Make.inc - -TARG=bitbucket.org/ww/goautoneg -GOFILES=autoneg.go - -include $(GOROOT)/src/Make.pkg - -format: - gofmt -w *.go - -docs: - gomake clean - godoc ${TARG} > README.txt diff --git a/vendor/src/bitbucket.org/ww/goautoneg/README.txt b/vendor/src/bitbucket.org/ww/goautoneg/README.txt deleted file mode 100644 index 7723656d58..0000000000 --- a/vendor/src/bitbucket.org/ww/goautoneg/README.txt +++ /dev/null @@ -1,67 +0,0 @@ -PACKAGE - -package goautoneg -import "bitbucket.org/ww/goautoneg" - -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -FUNCTIONS - -func Negotiate(header string, alternatives []string) (content_type string) -Negotiate the most appropriate content_type given the accept header -and a list of alternatives. - -func ParseAccept(header string) (accept []Accept) -Parse an Accept Header string returning a sorted list -of clauses - - -TYPES - -type Accept struct { - Type, SubType string - Q float32 - Params map[string]string -} -Structure to represent a clause in an HTTP Accept Header - - -SUBDIRECTORIES - - .hg diff --git a/vendor/src/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/src/bitbucket.org/ww/goautoneg/autoneg.go deleted file mode 100644 index 648b38cb65..0000000000 --- a/vendor/src/bitbucket.org/ww/goautoneg/autoneg.go +++ /dev/null @@ -1,162 +0,0 @@ -/* -HTTP Content-Type Autonegotiation. - -The functions in this package implement the behaviour specified in -http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html - -Copyright (c) 2011, Open Knowledge Foundation Ltd. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in - the documentation and/or other materials provided with the - distribution. - - Neither the name of the Open Knowledge Foundation Ltd. nor the - names of its contributors may be used to endorse or promote - products derived from this software without specific prior written - permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -*/ -package goautoneg - -import ( - "sort" - "strconv" - "strings" -) - -// Structure to represent a clause in an HTTP Accept Header -type Accept struct { - Type, SubType string - Q float64 - Params map[string]string -} - -// For internal use, so that we can use the sort interface -type accept_slice []Accept - -func (accept accept_slice) Len() int { - slice := []Accept(accept) - return len(slice) -} - -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) - ai, aj := slice[i], slice[j] - if ai.Q > aj.Q { - return true - } - if ai.Type != "*" && aj.Type == "*" { - return true - } - if ai.SubType != "*" && aj.SubType == "*" { - return true - } - return false -} - -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) - slice[i], slice[j] = slice[j], slice[i] -} - -// Parse an Accept Header string returning a sorted list -// of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") - - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 - - mrp := strings.Split(part, ";") - - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") - - switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") - default: - continue - } - - if len(mrp) == 1 { - accept = append(accept, a) - continue - } - - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { - continue - } - token := strings.Trim(sp[0], " ") - if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) - } else { - a.Params[token] = strings.Trim(sp[1], " ") - } - } - - accept = append(accept, a) - } - - slice := accept_slice(accept) - sort.Sort(slice) - - return -} - -// Negotiate the most appropriate content_type given the accept header -// and a list of alternatives. -func Negotiate(header string, alternatives []string) (content_type string) { - asp := make([][]string, 0, len(alternatives)) - for _, ctype := range alternatives { - asp = append(asp, strings.SplitN(ctype, "/", 2)) - } - for _, clause := range ParseAccept(header) { - for i, ctsp := range asp { - if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { - content_type = alternatives[i] - return - } - if clause.Type == ctsp[0] && clause.SubType == "*" { - content_type = alternatives[i] - return - } - if clause.Type == "*" && clause.SubType == "*" { - content_type = alternatives[i] - return - } - } - } - return -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/LICENSE b/vendor/src/github.com/Azure/go-ansiterm/LICENSE deleted file mode 100644 index e3d9a64d1d..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft Corporation - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/src/github.com/Azure/go-ansiterm/README.md b/vendor/src/github.com/Azure/go-ansiterm/README.md deleted file mode 100644 index e25e382101..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/README.md +++ /dev/null @@ -1,9 +0,0 @@ -# go-ansiterm - -This is a cross platform Ansi Terminal Emulation library. It reads a stream of Ansi characters and produces the appropriate function calls. The results of the function calls are platform dependent. - -For example the parser might receive "ESC, [, A" as a stream of three characters. This is the code for Cursor Up (http://www.vt100.net/docs/vt510-rm/CUU). The parser then calls the cursor up function (CUU()) on an event handler. The event handler determines what platform specific work must be done to cause the cursor to move up one position. - -The parser (parser.go) is a partial implementation of this state machine (http://vt100.net/emu/vt500_parser.png). There are also two event handler implementations, one for tests (test_event_handler.go) to validate that the expected events are being produced and called, the other is a Windows implementation (winterm/win_event_handler.go). - -See parser_test.go for examples exercising the state machine and generating appropriate function calls. diff --git a/vendor/src/github.com/Azure/go-ansiterm/constants.go b/vendor/src/github.com/Azure/go-ansiterm/constants.go deleted file mode 100644 index 96504a33bc..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/constants.go +++ /dev/null @@ -1,188 +0,0 @@ -package ansiterm - -const LogEnv = "DEBUG_TERMINAL" - -// ANSI constants -// References: -// -- http://www.ecma-international.org/publications/standards/Ecma-048.htm -// -- http://man7.org/linux/man-pages/man4/console_codes.4.html -// -- http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html -// -- http://en.wikipedia.org/wiki/ANSI_escape_code -// -- http://vt100.net/emu/dec_ansi_parser -// -- http://vt100.net/emu/vt500_parser.svg -// -- http://invisible-island.net/xterm/ctlseqs/ctlseqs.html -// -- http://www.inwap.com/pdp10/ansicode.txt -const ( - // ECMA-48 Set Graphics Rendition - // Note: - // -- Constants leading with an underscore (e.g., _ANSI_xxx) are unsupported or reserved - // -- Fonts could possibly be supported via SetCurrentConsoleFontEx - // -- Windows does not expose the per-window cursor (i.e., caret) blink times - ANSI_SGR_RESET = 0 - ANSI_SGR_BOLD = 1 - ANSI_SGR_DIM = 2 - _ANSI_SGR_ITALIC = 3 - ANSI_SGR_UNDERLINE = 4 - _ANSI_SGR_BLINKSLOW = 5 - _ANSI_SGR_BLINKFAST = 6 - ANSI_SGR_REVERSE = 7 - _ANSI_SGR_INVISIBLE = 8 - _ANSI_SGR_LINETHROUGH = 9 - _ANSI_SGR_FONT_00 = 10 - _ANSI_SGR_FONT_01 = 11 - _ANSI_SGR_FONT_02 = 12 - _ANSI_SGR_FONT_03 = 13 - _ANSI_SGR_FONT_04 = 14 - _ANSI_SGR_FONT_05 = 15 - _ANSI_SGR_FONT_06 = 16 - _ANSI_SGR_FONT_07 = 17 - _ANSI_SGR_FONT_08 = 18 - _ANSI_SGR_FONT_09 = 19 - _ANSI_SGR_FONT_10 = 20 - _ANSI_SGR_DOUBLEUNDERLINE = 21 - ANSI_SGR_BOLD_DIM_OFF = 22 - _ANSI_SGR_ITALIC_OFF = 23 - ANSI_SGR_UNDERLINE_OFF = 24 - _ANSI_SGR_BLINK_OFF = 25 - _ANSI_SGR_RESERVED_00 = 26 - ANSI_SGR_REVERSE_OFF = 27 - _ANSI_SGR_INVISIBLE_OFF = 28 - _ANSI_SGR_LINETHROUGH_OFF = 29 - ANSI_SGR_FOREGROUND_BLACK = 30 - ANSI_SGR_FOREGROUND_RED = 31 - ANSI_SGR_FOREGROUND_GREEN = 32 - ANSI_SGR_FOREGROUND_YELLOW = 33 - ANSI_SGR_FOREGROUND_BLUE = 34 - ANSI_SGR_FOREGROUND_MAGENTA = 35 - ANSI_SGR_FOREGROUND_CYAN = 36 - ANSI_SGR_FOREGROUND_WHITE = 37 - _ANSI_SGR_RESERVED_01 = 38 - ANSI_SGR_FOREGROUND_DEFAULT = 39 - ANSI_SGR_BACKGROUND_BLACK = 40 - ANSI_SGR_BACKGROUND_RED = 41 - ANSI_SGR_BACKGROUND_GREEN = 42 - ANSI_SGR_BACKGROUND_YELLOW = 43 - ANSI_SGR_BACKGROUND_BLUE = 44 - ANSI_SGR_BACKGROUND_MAGENTA = 45 - ANSI_SGR_BACKGROUND_CYAN = 46 - ANSI_SGR_BACKGROUND_WHITE = 47 - _ANSI_SGR_RESERVED_02 = 48 - ANSI_SGR_BACKGROUND_DEFAULT = 49 - // 50 - 65: Unsupported - - ANSI_MAX_CMD_LENGTH = 4096 - - MAX_INPUT_EVENTS = 128 - DEFAULT_WIDTH = 80 - DEFAULT_HEIGHT = 24 - - ANSI_BEL = 0x07 - ANSI_BACKSPACE = 0x08 - ANSI_TAB = 0x09 - ANSI_LINE_FEED = 0x0A - ANSI_VERTICAL_TAB = 0x0B - ANSI_FORM_FEED = 0x0C - ANSI_CARRIAGE_RETURN = 0x0D - ANSI_ESCAPE_PRIMARY = 0x1B - ANSI_ESCAPE_SECONDARY = 0x5B - ANSI_OSC_STRING_ENTRY = 0x5D - ANSI_COMMAND_FIRST = 0x40 - ANSI_COMMAND_LAST = 0x7E - DCS_ENTRY = 0x90 - CSI_ENTRY = 0x9B - OSC_STRING = 0x9D - ANSI_PARAMETER_SEP = ";" - ANSI_CMD_G0 = '(' - ANSI_CMD_G1 = ')' - ANSI_CMD_G2 = '*' - ANSI_CMD_G3 = '+' - ANSI_CMD_DECPNM = '>' - ANSI_CMD_DECPAM = '=' - ANSI_CMD_OSC = ']' - ANSI_CMD_STR_TERM = '\\' - - KEY_CONTROL_PARAM_2 = ";2" - KEY_CONTROL_PARAM_3 = ";3" - KEY_CONTROL_PARAM_4 = ";4" - KEY_CONTROL_PARAM_5 = ";5" - KEY_CONTROL_PARAM_6 = ";6" - KEY_CONTROL_PARAM_7 = ";7" - KEY_CONTROL_PARAM_8 = ";8" - KEY_ESC_CSI = "\x1B[" - KEY_ESC_N = "\x1BN" - KEY_ESC_O = "\x1BO" - - FILL_CHARACTER = ' ' -) - -func getByteRange(start byte, end byte) []byte { - bytes := make([]byte, 0, 32) - for i := start; i <= end; i++ { - bytes = append(bytes, byte(i)) - } - - return bytes -} - -var toGroundBytes = getToGroundBytes() -var executors = getExecuteBytes() - -// SPACE 20+A0 hex Always and everywhere a blank space -// Intermediate 20-2F hex !"#$%&'()*+,-./ -var intermeds = getByteRange(0x20, 0x2F) - -// Parameters 30-3F hex 0123456789:;<=>? -// CSI Parameters 30-39, 3B hex 0123456789; -var csiParams = getByteRange(0x30, 0x3F) - -var csiCollectables = append(getByteRange(0x30, 0x39), getByteRange(0x3B, 0x3F)...) - -// Uppercase 40-5F hex @ABCDEFGHIJKLMNOPQRSTUVWXYZ[\]^_ -var upperCase = getByteRange(0x40, 0x5F) - -// Lowercase 60-7E hex `abcdefghijlkmnopqrstuvwxyz{|}~ -var lowerCase = getByteRange(0x60, 0x7E) - -// Alphabetics 40-7E hex (all of upper and lower case) -var alphabetics = append(upperCase, lowerCase...) - -var printables = getByteRange(0x20, 0x7F) - -var escapeIntermediateToGroundBytes = getByteRange(0x30, 0x7E) -var escapeToGroundBytes = getEscapeToGroundBytes() - -// See http://www.vt100.net/emu/vt500_parser.png for description of the complex -// byte ranges below - -func getEscapeToGroundBytes() []byte { - escapeToGroundBytes := getByteRange(0x30, 0x4F) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x51, 0x57)...) - escapeToGroundBytes = append(escapeToGroundBytes, 0x59) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5A) - escapeToGroundBytes = append(escapeToGroundBytes, 0x5C) - escapeToGroundBytes = append(escapeToGroundBytes, getByteRange(0x60, 0x7E)...) - return escapeToGroundBytes -} - -func getExecuteBytes() []byte { - executeBytes := getByteRange(0x00, 0x17) - executeBytes = append(executeBytes, 0x19) - executeBytes = append(executeBytes, getByteRange(0x1C, 0x1F)...) - return executeBytes -} - -func getToGroundBytes() []byte { - groundBytes := []byte{0x18} - groundBytes = append(groundBytes, 0x1A) - groundBytes = append(groundBytes, getByteRange(0x80, 0x8F)...) - groundBytes = append(groundBytes, getByteRange(0x91, 0x97)...) - groundBytes = append(groundBytes, 0x99) - groundBytes = append(groundBytes, 0x9A) - groundBytes = append(groundBytes, 0x9C) - return groundBytes -} - -// Delete 7F hex Always and everywhere ignored -// C1 Control 80-9F hex 32 additional control characters -// G1 Displayable A1-FE hex 94 additional displayable characters -// Special A0+FF hex Same as SPACE and DELETE diff --git a/vendor/src/github.com/Azure/go-ansiterm/context.go b/vendor/src/github.com/Azure/go-ansiterm/context.go deleted file mode 100644 index 8d66e777c0..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/context.go +++ /dev/null @@ -1,7 +0,0 @@ -package ansiterm - -type ansiContext struct { - currentChar byte - paramBuffer []byte - interBuffer []byte -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/csi_entry_state.go b/vendor/src/github.com/Azure/go-ansiterm/csi_entry_state.go deleted file mode 100644 index 1bd6057da8..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/csi_entry_state.go +++ /dev/null @@ -1,49 +0,0 @@ -package ansiterm - -type csiEntryState struct { - baseState -} - -func (csiState csiEntryState) Handle(b byte) (s state, e error) { - logger.Infof("CsiEntry::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - return csiState.parser.csiParam, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiEntryState) Transition(s state) error { - logger.Infof("CsiEntry::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - case csiState.parser.csiParam: - switch { - case sliceContains(csiParams, csiState.parser.context.currentChar): - csiState.parser.collectParam() - case sliceContains(intermeds, csiState.parser.context.currentChar): - csiState.parser.collectInter() - } - } - - return nil -} - -func (csiState csiEntryState) Enter() error { - csiState.parser.clear() - return nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/csi_param_state.go b/vendor/src/github.com/Azure/go-ansiterm/csi_param_state.go deleted file mode 100644 index 4be35c5fd2..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/csi_param_state.go +++ /dev/null @@ -1,38 +0,0 @@ -package ansiterm - -type csiParamState struct { - baseState -} - -func (csiState csiParamState) Handle(b byte) (s state, e error) { - logger.Infof("CsiParam::Handle %#x", b) - - nextState, err := csiState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(alphabetics, b): - return csiState.parser.ground, nil - case sliceContains(csiCollectables, b): - csiState.parser.collectParam() - return csiState, nil - case sliceContains(executors, b): - return csiState, csiState.parser.execute() - } - - return csiState, nil -} - -func (csiState csiParamState) Transition(s state) error { - logger.Infof("CsiParam::Transition %s --> %s", csiState.Name(), s.Name()) - csiState.baseState.Transition(s) - - switch s { - case csiState.parser.ground: - return csiState.parser.csiDispatch() - } - - return nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/escape_intermediate_state.go b/vendor/src/github.com/Azure/go-ansiterm/escape_intermediate_state.go deleted file mode 100644 index 2189eb6b6b..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/escape_intermediate_state.go +++ /dev/null @@ -1,36 +0,0 @@ -package ansiterm - -type escapeIntermediateState struct { - baseState -} - -func (escState escapeIntermediateState) Handle(b byte) (s state, e error) { - logger.Infof("escapeIntermediateState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(intermeds, b): - return escState, escState.parser.collectInter() - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeIntermediateToGroundBytes, b): - return escState.parser.ground, nil - } - - return escState, nil -} - -func (escState escapeIntermediateState) Transition(s state) error { - logger.Infof("escapeIntermediateState::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - } - - return nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/escape_state.go b/vendor/src/github.com/Azure/go-ansiterm/escape_state.go deleted file mode 100644 index 7b1b9ad3f1..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/escape_state.go +++ /dev/null @@ -1,47 +0,0 @@ -package ansiterm - -type escapeState struct { - baseState -} - -func (escState escapeState) Handle(b byte) (s state, e error) { - logger.Infof("escapeState::Handle %#x", b) - nextState, err := escState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case b == ANSI_ESCAPE_SECONDARY: - return escState.parser.csiEntry, nil - case b == ANSI_OSC_STRING_ENTRY: - return escState.parser.oscString, nil - case sliceContains(executors, b): - return escState, escState.parser.execute() - case sliceContains(escapeToGroundBytes, b): - return escState.parser.ground, nil - case sliceContains(intermeds, b): - return escState.parser.escapeIntermediate, nil - } - - return escState, nil -} - -func (escState escapeState) Transition(s state) error { - logger.Infof("Escape::Transition %s --> %s", escState.Name(), s.Name()) - escState.baseState.Transition(s) - - switch s { - case escState.parser.ground: - return escState.parser.escDispatch() - case escState.parser.escapeIntermediate: - return escState.parser.collectInter() - } - - return nil -} - -func (escState escapeState) Enter() error { - escState.parser.clear() - return nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/event_handler.go b/vendor/src/github.com/Azure/go-ansiterm/event_handler.go deleted file mode 100644 index 98087b38c2..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/event_handler.go +++ /dev/null @@ -1,90 +0,0 @@ -package ansiterm - -type AnsiEventHandler interface { - // Print - Print(b byte) error - - // Execute C0 commands - Execute(b byte) error - - // CUrsor Up - CUU(int) error - - // CUrsor Down - CUD(int) error - - // CUrsor Forward - CUF(int) error - - // CUrsor Backward - CUB(int) error - - // Cursor to Next Line - CNL(int) error - - // Cursor to Previous Line - CPL(int) error - - // Cursor Horizontal position Absolute - CHA(int) error - - // Vertical line Position Absolute - VPA(int) error - - // CUrsor Position - CUP(int, int) error - - // Horizontal and Vertical Position (depends on PUM) - HVP(int, int) error - - // Text Cursor Enable Mode - DECTCEM(bool) error - - // Origin Mode - DECOM(bool) error - - // 132 Column Mode - DECCOLM(bool) error - - // Erase in Display - ED(int) error - - // Erase in Line - EL(int) error - - // Insert Line - IL(int) error - - // Delete Line - DL(int) error - - // Insert Character - ICH(int) error - - // Delete Character - DCH(int) error - - // Set Graphics Rendition - SGR([]int) error - - // Pan Down - SU(int) error - - // Pan Up - SD(int) error - - // Device Attributes - DA([]string) error - - // Set Top and Bottom Margins - DECSTBM(int, int) error - - // Index - IND() error - - // Reverse Index - RI() error - - // Flush updates from previous commands - Flush() error -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/ground_state.go b/vendor/src/github.com/Azure/go-ansiterm/ground_state.go deleted file mode 100644 index 52451e9469..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/ground_state.go +++ /dev/null @@ -1,24 +0,0 @@ -package ansiterm - -type groundState struct { - baseState -} - -func (gs groundState) Handle(b byte) (s state, e error) { - gs.parser.context.currentChar = b - - nextState, err := gs.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case sliceContains(printables, b): - return gs, gs.parser.print() - - case sliceContains(executors, b): - return gs, gs.parser.execute() - } - - return gs, nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/osc_string_state.go b/vendor/src/github.com/Azure/go-ansiterm/osc_string_state.go deleted file mode 100644 index 24062d420e..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/osc_string_state.go +++ /dev/null @@ -1,31 +0,0 @@ -package ansiterm - -type oscStringState struct { - baseState -} - -func (oscState oscStringState) Handle(b byte) (s state, e error) { - logger.Infof("OscString::Handle %#x", b) - nextState, err := oscState.baseState.Handle(b) - if nextState != nil || err != nil { - return nextState, err - } - - switch { - case isOscStringTerminator(b): - return oscState.parser.ground, nil - } - - return oscState, nil -} - -// See below for OSC string terminators for linux -// http://man7.org/linux/man-pages/man4/console_codes.4.html -func isOscStringTerminator(b byte) bool { - - if b == ANSI_BEL || b == 0x5C { - return true - } - - return false -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/parser.go b/vendor/src/github.com/Azure/go-ansiterm/parser.go deleted file mode 100644 index 169f68dbef..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/parser.go +++ /dev/null @@ -1,136 +0,0 @@ -package ansiterm - -import ( - "errors" - "io/ioutil" - "os" - - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger - -type AnsiParser struct { - currState state - eventHandler AnsiEventHandler - context *ansiContext - csiEntry state - csiParam state - dcsEntry state - escape state - escapeIntermediate state - error state - ground state - oscString state - stateMap []state -} - -func CreateParser(initialState string, evtHandler AnsiEventHandler) *AnsiParser { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("ansiParser.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.InfoLevel, - } - - parser := &AnsiParser{ - eventHandler: evtHandler, - context: &ansiContext{}, - } - - parser.csiEntry = csiEntryState{baseState{name: "CsiEntry", parser: parser}} - parser.csiParam = csiParamState{baseState{name: "CsiParam", parser: parser}} - parser.dcsEntry = dcsEntryState{baseState{name: "DcsEntry", parser: parser}} - parser.escape = escapeState{baseState{name: "Escape", parser: parser}} - parser.escapeIntermediate = escapeIntermediateState{baseState{name: "EscapeIntermediate", parser: parser}} - parser.error = errorState{baseState{name: "Error", parser: parser}} - parser.ground = groundState{baseState{name: "Ground", parser: parser}} - parser.oscString = oscStringState{baseState{name: "OscString", parser: parser}} - - parser.stateMap = []state{ - parser.csiEntry, - parser.csiParam, - parser.dcsEntry, - parser.escape, - parser.escapeIntermediate, - parser.error, - parser.ground, - parser.oscString, - } - - parser.currState = getState(initialState, parser.stateMap) - - logger.Infof("CreateParser: parser %p", parser) - return parser -} - -func getState(name string, states []state) state { - for _, el := range states { - if el.Name() == name { - return el - } - } - - return nil -} - -func (ap *AnsiParser) Parse(bytes []byte) (int, error) { - for i, b := range bytes { - if err := ap.handle(b); err != nil { - return i, err - } - } - - return len(bytes), ap.eventHandler.Flush() -} - -func (ap *AnsiParser) handle(b byte) error { - ap.context.currentChar = b - newState, err := ap.currState.Handle(b) - if err != nil { - return err - } - - if newState == nil { - logger.Warning("newState is nil") - return errors.New("New state of 'nil' is invalid.") - } - - if newState != ap.currState { - if err := ap.changeState(newState); err != nil { - return err - } - } - - return nil -} - -func (ap *AnsiParser) changeState(newState state) error { - logger.Infof("ChangeState %s --> %s", ap.currState.Name(), newState.Name()) - - // Exit old state - if err := ap.currState.Exit(); err != nil { - logger.Infof("Exit state '%s' failed with : '%v'", ap.currState.Name(), err) - return err - } - - // Perform transition action - if err := ap.currState.Transition(newState); err != nil { - logger.Infof("Transition from '%s' to '%s' failed with: '%v'", ap.currState.Name(), newState.Name, err) - return err - } - - // Enter new state - if err := newState.Enter(); err != nil { - logger.Infof("Enter state '%s' failed with: '%v'", newState.Name(), err) - return err - } - - ap.currState = newState - return nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/parser_action_helpers.go b/vendor/src/github.com/Azure/go-ansiterm/parser_action_helpers.go deleted file mode 100644 index 8b69a67a5a..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/parser_action_helpers.go +++ /dev/null @@ -1,103 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func parseParams(bytes []byte) ([]string, error) { - paramBuff := make([]byte, 0, 0) - params := []string{} - - for _, v := range bytes { - if v == ';' { - if len(paramBuff) > 0 { - // Completed parameter, append it to the list - s := string(paramBuff) - params = append(params, s) - paramBuff = make([]byte, 0, 0) - } - } else { - paramBuff = append(paramBuff, v) - } - } - - // Last parameter may not be terminated with ';' - if len(paramBuff) > 0 { - s := string(paramBuff) - params = append(params, s) - } - - logger.Infof("Parsed params: %v with length: %d", params, len(params)) - return params, nil -} - -func parseCmd(context ansiContext) (string, error) { - return string(context.currentChar), nil -} - -func getInt(params []string, dflt int) int { - i := getInts(params, 1, dflt)[0] - logger.Infof("getInt: %v", i) - return i -} - -func getInts(params []string, minCount int, dflt int) []int { - ints := []int{} - - for _, v := range params { - i, _ := strconv.Atoi(v) - // Zero is mapped to the default value in VT100. - if i == 0 { - i = dflt - } - ints = append(ints, i) - } - - if len(ints) < minCount { - remaining := minCount - len(ints) - for i := 0; i < remaining; i++ { - ints = append(ints, dflt) - } - } - - logger.Infof("getInts: %v", ints) - - return ints -} - -func (ap *AnsiParser) modeDispatch(param string, set bool) error { - switch param { - case "?3": - return ap.eventHandler.DECCOLM(set) - case "?6": - return ap.eventHandler.DECOM(set) - case "?25": - return ap.eventHandler.DECTCEM(set) - } - return nil -} - -func (ap *AnsiParser) hDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], true) - } - - return nil -} - -func (ap *AnsiParser) lDispatch(params []string) error { - if len(params) == 1 { - return ap.modeDispatch(params[0], false) - } - - return nil -} - -func getEraseParam(params []string) int { - param := getInt(params, 0) - if param < 0 || 3 < param { - param = 0 - } - - return param -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/parser_actions.go b/vendor/src/github.com/Azure/go-ansiterm/parser_actions.go deleted file mode 100644 index 58750a2d2b..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/parser_actions.go +++ /dev/null @@ -1,122 +0,0 @@ -package ansiterm - -import ( - "fmt" -) - -func (ap *AnsiParser) collectParam() error { - currChar := ap.context.currentChar - logger.Infof("collectParam %#x", currChar) - ap.context.paramBuffer = append(ap.context.paramBuffer, currChar) - return nil -} - -func (ap *AnsiParser) collectInter() error { - currChar := ap.context.currentChar - logger.Infof("collectInter %#x", currChar) - ap.context.paramBuffer = append(ap.context.interBuffer, currChar) - return nil -} - -func (ap *AnsiParser) escDispatch() error { - cmd, _ := parseCmd(*ap.context) - intermeds := ap.context.interBuffer - logger.Infof("escDispatch currentChar: %#x", ap.context.currentChar) - logger.Infof("escDispatch: %v(%v)", cmd, intermeds) - - switch cmd { - case "D": // IND - return ap.eventHandler.IND() - case "E": // NEL, equivalent to CRLF - err := ap.eventHandler.Execute(ANSI_CARRIAGE_RETURN) - if err == nil { - err = ap.eventHandler.Execute(ANSI_LINE_FEED) - } - return err - case "M": // RI - return ap.eventHandler.RI() - } - - return nil -} - -func (ap *AnsiParser) csiDispatch() error { - cmd, _ := parseCmd(*ap.context) - params, _ := parseParams(ap.context.paramBuffer) - - logger.Infof("csiDispatch: %v(%v)", cmd, params) - - switch cmd { - case "@": - return ap.eventHandler.ICH(getInt(params, 1)) - case "A": - return ap.eventHandler.CUU(getInt(params, 1)) - case "B": - return ap.eventHandler.CUD(getInt(params, 1)) - case "C": - return ap.eventHandler.CUF(getInt(params, 1)) - case "D": - return ap.eventHandler.CUB(getInt(params, 1)) - case "E": - return ap.eventHandler.CNL(getInt(params, 1)) - case "F": - return ap.eventHandler.CPL(getInt(params, 1)) - case "G": - return ap.eventHandler.CHA(getInt(params, 1)) - case "H": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.CUP(x, y) - case "J": - param := getEraseParam(params) - return ap.eventHandler.ED(param) - case "K": - param := getEraseParam(params) - return ap.eventHandler.EL(param) - case "L": - return ap.eventHandler.IL(getInt(params, 1)) - case "M": - return ap.eventHandler.DL(getInt(params, 1)) - case "P": - return ap.eventHandler.DCH(getInt(params, 1)) - case "S": - return ap.eventHandler.SU(getInt(params, 1)) - case "T": - return ap.eventHandler.SD(getInt(params, 1)) - case "c": - return ap.eventHandler.DA(params) - case "d": - return ap.eventHandler.VPA(getInt(params, 1)) - case "f": - ints := getInts(params, 2, 1) - x, y := ints[0], ints[1] - return ap.eventHandler.HVP(x, y) - case "h": - return ap.hDispatch(params) - case "l": - return ap.lDispatch(params) - case "m": - return ap.eventHandler.SGR(getInts(params, 1, 0)) - case "r": - ints := getInts(params, 2, 1) - top, bottom := ints[0], ints[1] - return ap.eventHandler.DECSTBM(top, bottom) - default: - logger.Errorf(fmt.Sprintf("Unsupported CSI command: '%s', with full context: %v", cmd, ap.context)) - return nil - } - -} - -func (ap *AnsiParser) print() error { - return ap.eventHandler.Print(ap.context.currentChar) -} - -func (ap *AnsiParser) clear() error { - ap.context = &ansiContext{} - return nil -} - -func (ap *AnsiParser) execute() error { - return ap.eventHandler.Execute(ap.context.currentChar) -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/states.go b/vendor/src/github.com/Azure/go-ansiterm/states.go deleted file mode 100644 index f2ea1fcd12..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/states.go +++ /dev/null @@ -1,71 +0,0 @@ -package ansiterm - -type stateID int - -type state interface { - Enter() error - Exit() error - Handle(byte) (state, error) - Name() string - Transition(state) error -} - -type baseState struct { - name string - parser *AnsiParser -} - -func (base baseState) Enter() error { - return nil -} - -func (base baseState) Exit() error { - return nil -} - -func (base baseState) Handle(b byte) (s state, e error) { - - switch { - case b == CSI_ENTRY: - return base.parser.csiEntry, nil - case b == DCS_ENTRY: - return base.parser.dcsEntry, nil - case b == ANSI_ESCAPE_PRIMARY: - return base.parser.escape, nil - case b == OSC_STRING: - return base.parser.oscString, nil - case sliceContains(toGroundBytes, b): - return base.parser.ground, nil - } - - return nil, nil -} - -func (base baseState) Name() string { - return base.name -} - -func (base baseState) Transition(s state) error { - if s == base.parser.ground { - execBytes := []byte{0x18} - execBytes = append(execBytes, 0x1A) - execBytes = append(execBytes, getByteRange(0x80, 0x8F)...) - execBytes = append(execBytes, getByteRange(0x91, 0x97)...) - execBytes = append(execBytes, 0x99) - execBytes = append(execBytes, 0x9A) - - if sliceContains(execBytes, base.parser.context.currentChar) { - return base.parser.execute() - } - } - - return nil -} - -type dcsEntryState struct { - baseState -} - -type errorState struct { - baseState -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/utilities.go b/vendor/src/github.com/Azure/go-ansiterm/utilities.go deleted file mode 100644 index 392114493a..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/utilities.go +++ /dev/null @@ -1,21 +0,0 @@ -package ansiterm - -import ( - "strconv" -) - -func sliceContains(bytes []byte, b byte) bool { - for _, v := range bytes { - if v == b { - return true - } - } - - return false -} - -func convertBytesToInteger(bytes []byte) int { - s := string(bytes) - i, _ := strconv.Atoi(s) - return i -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/winterm/ansi.go b/vendor/src/github.com/Azure/go-ansiterm/winterm/ansi.go deleted file mode 100644 index daf2f06961..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/winterm/ansi.go +++ /dev/null @@ -1,182 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "os" - "strconv" - "strings" - "syscall" - - "github.com/Azure/go-ansiterm" -) - -// Windows keyboard constants -// See https://msdn.microsoft.com/en-us/library/windows/desktop/dd375731(v=vs.85).aspx. -const ( - VK_PRIOR = 0x21 // PAGE UP key - VK_NEXT = 0x22 // PAGE DOWN key - VK_END = 0x23 // END key - VK_HOME = 0x24 // HOME key - VK_LEFT = 0x25 // LEFT ARROW key - VK_UP = 0x26 // UP ARROW key - VK_RIGHT = 0x27 // RIGHT ARROW key - VK_DOWN = 0x28 // DOWN ARROW key - VK_SELECT = 0x29 // SELECT key - VK_PRINT = 0x2A // PRINT key - VK_EXECUTE = 0x2B // EXECUTE key - VK_SNAPSHOT = 0x2C // PRINT SCREEN key - VK_INSERT = 0x2D // INS key - VK_DELETE = 0x2E // DEL key - VK_HELP = 0x2F // HELP key - VK_F1 = 0x70 // F1 key - VK_F2 = 0x71 // F2 key - VK_F3 = 0x72 // F3 key - VK_F4 = 0x73 // F4 key - VK_F5 = 0x74 // F5 key - VK_F6 = 0x75 // F6 key - VK_F7 = 0x76 // F7 key - VK_F8 = 0x77 // F8 key - VK_F9 = 0x78 // F9 key - VK_F10 = 0x79 // F10 key - VK_F11 = 0x7A // F11 key - VK_F12 = 0x7B // F12 key - - RIGHT_ALT_PRESSED = 0x0001 - LEFT_ALT_PRESSED = 0x0002 - RIGHT_CTRL_PRESSED = 0x0004 - LEFT_CTRL_PRESSED = 0x0008 - SHIFT_PRESSED = 0x0010 - NUMLOCK_ON = 0x0020 - SCROLLLOCK_ON = 0x0040 - CAPSLOCK_ON = 0x0080 - ENHANCED_KEY = 0x0100 -) - -type ansiCommand struct { - CommandBytes []byte - Command string - Parameters []string - IsSpecial bool -} - -func newAnsiCommand(command []byte) *ansiCommand { - - if isCharacterSelectionCmdChar(command[1]) { - // Is Character Set Selection commands - return &ansiCommand{ - CommandBytes: command, - Command: string(command), - IsSpecial: true, - } - } - - // last char is command character - lastCharIndex := len(command) - 1 - - ac := &ansiCommand{ - CommandBytes: command, - Command: string(command[lastCharIndex]), - IsSpecial: false, - } - - // more than a single escape - if lastCharIndex != 0 { - start := 1 - // skip if double char escape sequence - if command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_ESCAPE_SECONDARY { - start++ - } - // convert this to GetNextParam method - ac.Parameters = strings.Split(string(command[start:lastCharIndex]), ansiterm.ANSI_PARAMETER_SEP) - } - - return ac -} - -func (ac *ansiCommand) paramAsSHORT(index int, defaultValue int16) int16 { - if index < 0 || index >= len(ac.Parameters) { - return defaultValue - } - - param, err := strconv.ParseInt(ac.Parameters[index], 10, 16) - if err != nil { - return defaultValue - } - - return int16(param) -} - -func (ac *ansiCommand) String() string { - return fmt.Sprintf("0x%v \"%v\" (\"%v\")", - bytesToHex(ac.CommandBytes), - ac.Command, - strings.Join(ac.Parameters, "\",\"")) -} - -// isAnsiCommandChar returns true if the passed byte falls within the range of ANSI commands. -// See http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html. -func isAnsiCommandChar(b byte) bool { - switch { - case ansiterm.ANSI_COMMAND_FIRST <= b && b <= ansiterm.ANSI_COMMAND_LAST && b != ansiterm.ANSI_ESCAPE_SECONDARY: - return true - case b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_OSC || b == ansiterm.ANSI_CMD_DECPAM || b == ansiterm.ANSI_CMD_DECPNM: - // non-CSI escape sequence terminator - return true - case b == ansiterm.ANSI_CMD_STR_TERM || b == ansiterm.ANSI_BEL: - // String escape sequence terminator - return true - } - return false -} - -func isXtermOscSequence(command []byte, current byte) bool { - return (len(command) >= 2 && command[0] == ansiterm.ANSI_ESCAPE_PRIMARY && command[1] == ansiterm.ANSI_CMD_OSC && current != ansiterm.ANSI_BEL) -} - -func isCharacterSelectionCmdChar(b byte) bool { - return (b == ansiterm.ANSI_CMD_G0 || b == ansiterm.ANSI_CMD_G1 || b == ansiterm.ANSI_CMD_G2 || b == ansiterm.ANSI_CMD_G3) -} - -// bytesToHex converts a slice of bytes to a human-readable string. -func bytesToHex(b []byte) string { - hex := make([]string, len(b)) - for i, ch := range b { - hex[i] = fmt.Sprintf("%X", ch) - } - return strings.Join(hex, "") -} - -// ensureInRange adjusts the passed value, if necessary, to ensure it is within -// the passed min / max range. -func ensureInRange(n int16, min int16, max int16) int16 { - if n < min { - return min - } else if n > max { - return max - } else { - return n - } -} - -func GetStdFile(nFile int) (*os.File, uintptr) { - var file *os.File - switch nFile { - case syscall.STD_INPUT_HANDLE: - file = os.Stdin - case syscall.STD_OUTPUT_HANDLE: - file = os.Stdout - case syscall.STD_ERROR_HANDLE: - file = os.Stderr - default: - panic(fmt.Errorf("Invalid standard handle identifier: %v", nFile)) - } - - fd, err := syscall.GetStdHandle(nFile) - if err != nil { - panic(fmt.Errorf("Invalid standard handle indentifier: %v -- %v", nFile, err)) - } - - return file, uintptr(fd) -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/winterm/api.go b/vendor/src/github.com/Azure/go-ansiterm/winterm/api.go deleted file mode 100644 index 462d92f8ef..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/winterm/api.go +++ /dev/null @@ -1,322 +0,0 @@ -// +build windows - -package winterm - -import ( - "fmt" - "syscall" - "unsafe" -) - -//=========================================================================================================== -// IMPORTANT NOTE: -// -// The methods below make extensive use of the "unsafe" package to obtain the required pointers. -// Beginning in Go 1.3, the garbage collector may release local variables (e.g., incoming arguments, stack -// variables) the pointers reference *before* the API completes. -// -// As a result, in those cases, the code must hint that the variables remain in active by invoking the -// dummy method "use" (see below). Newer versions of Go are planned to change the mechanism to no longer -// require unsafe pointers. -// -// If you add or modify methods, ENSURE protection of local variables through the "use" builtin to inform -// the garbage collector the variables remain in use if: -// -// -- The value is not a pointer (e.g., int32, struct) -// -- The value is not referenced by the method after passing the pointer to Windows -// -// See http://golang.org/doc/go1.3. -//=========================================================================================================== - -var ( - kernel32DLL = syscall.NewLazyDLL("kernel32.dll") - - getConsoleCursorInfoProc = kernel32DLL.NewProc("GetConsoleCursorInfo") - setConsoleCursorInfoProc = kernel32DLL.NewProc("SetConsoleCursorInfo") - setConsoleCursorPositionProc = kernel32DLL.NewProc("SetConsoleCursorPosition") - setConsoleModeProc = kernel32DLL.NewProc("SetConsoleMode") - getConsoleScreenBufferInfoProc = kernel32DLL.NewProc("GetConsoleScreenBufferInfo") - setConsoleScreenBufferSizeProc = kernel32DLL.NewProc("SetConsoleScreenBufferSize") - scrollConsoleScreenBufferProc = kernel32DLL.NewProc("ScrollConsoleScreenBufferA") - setConsoleTextAttributeProc = kernel32DLL.NewProc("SetConsoleTextAttribute") - setConsoleWindowInfoProc = kernel32DLL.NewProc("SetConsoleWindowInfo") - writeConsoleOutputProc = kernel32DLL.NewProc("WriteConsoleOutputW") - readConsoleInputProc = kernel32DLL.NewProc("ReadConsoleInputW") - waitForSingleObjectProc = kernel32DLL.NewProc("WaitForSingleObject") -) - -// Windows Console constants -const ( - // Console modes - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. - ENABLE_PROCESSED_INPUT = 0x0001 - ENABLE_LINE_INPUT = 0x0002 - ENABLE_ECHO_INPUT = 0x0004 - ENABLE_WINDOW_INPUT = 0x0008 - ENABLE_MOUSE_INPUT = 0x0010 - ENABLE_INSERT_MODE = 0x0020 - ENABLE_QUICK_EDIT_MODE = 0x0040 - ENABLE_EXTENDED_FLAGS = 0x0080 - - ENABLE_PROCESSED_OUTPUT = 0x0001 - ENABLE_WRAP_AT_EOL_OUTPUT = 0x0002 - - // Character attributes - // Note: - // -- The attributes are combined to produce various colors (e.g., Blue + Green will create Cyan). - // Clearing all foreground or background colors results in black; setting all creates white. - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682088(v=vs.85).aspx#_win32_character_attributes. - FOREGROUND_BLUE uint16 = 0x0001 - FOREGROUND_GREEN uint16 = 0x0002 - FOREGROUND_RED uint16 = 0x0004 - FOREGROUND_INTENSITY uint16 = 0x0008 - FOREGROUND_MASK uint16 = 0x000F - - BACKGROUND_BLUE uint16 = 0x0010 - BACKGROUND_GREEN uint16 = 0x0020 - BACKGROUND_RED uint16 = 0x0040 - BACKGROUND_INTENSITY uint16 = 0x0080 - BACKGROUND_MASK uint16 = 0x00F0 - - COMMON_LVB_MASK uint16 = 0xFF00 - COMMON_LVB_REVERSE_VIDEO uint16 = 0x4000 - COMMON_LVB_UNDERSCORE uint16 = 0x8000 - - // Input event types - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - KEY_EVENT = 0x0001 - MOUSE_EVENT = 0x0002 - WINDOW_BUFFER_SIZE_EVENT = 0x0004 - MENU_EVENT = 0x0008 - FOCUS_EVENT = 0x0010 - - // WaitForSingleObject return codes - WAIT_ABANDONED = 0x00000080 - WAIT_FAILED = 0xFFFFFFFF - WAIT_SIGNALED = 0x0000000 - WAIT_TIMEOUT = 0x00000102 - - // WaitForSingleObject wait duration - WAIT_INFINITE = 0xFFFFFFFF - WAIT_ONE_SECOND = 1000 - WAIT_HALF_SECOND = 500 - WAIT_QUARTER_SECOND = 250 -) - -// Windows API Console types -// -- See https://msdn.microsoft.com/en-us/library/windows/desktop/ms682101(v=vs.85).aspx for Console specific types (e.g., COORD) -// -- See https://msdn.microsoft.com/en-us/library/aa296569(v=vs.60).aspx for comments on alignment -type ( - CHAR_INFO struct { - UnicodeChar uint16 - Attributes uint16 - } - - CONSOLE_CURSOR_INFO struct { - Size uint32 - Visible int32 - } - - CONSOLE_SCREEN_BUFFER_INFO struct { - Size COORD - CursorPosition COORD - Attributes uint16 - Window SMALL_RECT - MaximumWindowSize COORD - } - - COORD struct { - X int16 - Y int16 - } - - SMALL_RECT struct { - Left int16 - Top int16 - Right int16 - Bottom int16 - } - - // INPUT_RECORD is a C/C++ union of which KEY_EVENT_RECORD is one case, it is also the largest - // See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683499(v=vs.85).aspx. - INPUT_RECORD struct { - EventType uint16 - KeyEvent KEY_EVENT_RECORD - } - - KEY_EVENT_RECORD struct { - KeyDown int32 - RepeatCount uint16 - VirtualKeyCode uint16 - VirtualScanCode uint16 - UnicodeChar uint16 - ControlKeyState uint32 - } - - WINDOW_BUFFER_SIZE struct { - Size COORD - } -) - -// boolToBOOL converts a Go bool into a Windows int32. -func boolToBOOL(f bool) int32 { - if f { - return int32(1) - } else { - return int32(0) - } -} - -// GetConsoleCursorInfo retrieves information about the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms683163(v=vs.85).aspx. -func GetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := getConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorInfo sets the size and visiblity of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686019(v=vs.85).aspx. -func SetConsoleCursorInfo(handle uintptr, cursorInfo *CONSOLE_CURSOR_INFO) error { - r1, r2, err := setConsoleCursorInfoProc.Call(handle, uintptr(unsafe.Pointer(cursorInfo)), 0) - return checkError(r1, r2, err) -} - -// SetConsoleCursorPosition location of the console cursor. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx. -func SetConsoleCursorPosition(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleCursorPositionProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// GetConsoleMode gets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683167(v=vs.85).aspx. -func GetConsoleMode(handle uintptr) (mode uint32, err error) { - err = syscall.GetConsoleMode(syscall.Handle(handle), &mode) - return mode, err -} - -// SetConsoleMode sets the console mode for given file descriptor -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx. -func SetConsoleMode(handle uintptr, mode uint32) error { - r1, r2, err := setConsoleModeProc.Call(handle, uintptr(mode), 0) - use(mode) - return checkError(r1, r2, err) -} - -// GetConsoleScreenBufferInfo retrieves information about the specified console screen buffer. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms683171(v=vs.85).aspx. -func GetConsoleScreenBufferInfo(handle uintptr) (*CONSOLE_SCREEN_BUFFER_INFO, error) { - info := CONSOLE_SCREEN_BUFFER_INFO{} - err := checkError(getConsoleScreenBufferInfoProc.Call(handle, uintptr(unsafe.Pointer(&info)), 0)) - if err != nil { - return nil, err - } - return &info, nil -} - -func ScrollConsoleScreenBuffer(handle uintptr, scrollRect SMALL_RECT, clipRect SMALL_RECT, destOrigin COORD, char CHAR_INFO) error { - r1, r2, err := scrollConsoleScreenBufferProc.Call(handle, uintptr(unsafe.Pointer(&scrollRect)), uintptr(unsafe.Pointer(&clipRect)), coordToPointer(destOrigin), uintptr(unsafe.Pointer(&char))) - use(scrollRect) - use(clipRect) - use(destOrigin) - use(char) - return checkError(r1, r2, err) -} - -// SetConsoleScreenBufferSize sets the size of the console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686044(v=vs.85).aspx. -func SetConsoleScreenBufferSize(handle uintptr, coord COORD) error { - r1, r2, err := setConsoleScreenBufferSizeProc.Call(handle, coordToPointer(coord)) - use(coord) - return checkError(r1, r2, err) -} - -// SetConsoleTextAttribute sets the attributes of characters written to the -// console screen buffer by the WriteFile or WriteConsole function. -// See http://msdn.microsoft.com/en-us/library/windows/desktop/ms686047(v=vs.85).aspx. -func SetConsoleTextAttribute(handle uintptr, attribute uint16) error { - r1, r2, err := setConsoleTextAttributeProc.Call(handle, uintptr(attribute), 0) - use(attribute) - return checkError(r1, r2, err) -} - -// SetConsoleWindowInfo sets the size and position of the console screen buffer's window. -// Note that the size and location must be within and no larger than the backing console screen buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms686125(v=vs.85).aspx. -func SetConsoleWindowInfo(handle uintptr, isAbsolute bool, rect SMALL_RECT) error { - r1, r2, err := setConsoleWindowInfoProc.Call(handle, uintptr(boolToBOOL(isAbsolute)), uintptr(unsafe.Pointer(&rect))) - use(isAbsolute) - use(rect) - return checkError(r1, r2, err) -} - -// WriteConsoleOutput writes the CHAR_INFOs from the provided buffer to the active console buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687404(v=vs.85).aspx. -func WriteConsoleOutput(handle uintptr, buffer []CHAR_INFO, bufferSize COORD, bufferCoord COORD, writeRegion *SMALL_RECT) error { - r1, r2, err := writeConsoleOutputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), coordToPointer(bufferSize), coordToPointer(bufferCoord), uintptr(unsafe.Pointer(writeRegion))) - use(buffer) - use(bufferSize) - use(bufferCoord) - return checkError(r1, r2, err) -} - -// ReadConsoleInput reads (and removes) data from the console input buffer. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx. -func ReadConsoleInput(handle uintptr, buffer []INPUT_RECORD, count *uint32) error { - r1, r2, err := readConsoleInputProc.Call(handle, uintptr(unsafe.Pointer(&buffer[0])), uintptr(len(buffer)), uintptr(unsafe.Pointer(count))) - use(buffer) - return checkError(r1, r2, err) -} - -// WaitForSingleObject waits for the passed handle to be signaled. -// It returns true if the handle was signaled; false otherwise. -// See https://msdn.microsoft.com/en-us/library/windows/desktop/ms687032(v=vs.85).aspx. -func WaitForSingleObject(handle uintptr, msWait uint32) (bool, error) { - r1, _, err := waitForSingleObjectProc.Call(handle, uintptr(uint32(msWait))) - switch r1 { - case WAIT_ABANDONED, WAIT_TIMEOUT: - return false, nil - case WAIT_SIGNALED: - return true, nil - } - use(msWait) - return false, err -} - -// String helpers -func (info CONSOLE_SCREEN_BUFFER_INFO) String() string { - return fmt.Sprintf("Size(%v) Cursor(%v) Window(%v) Max(%v)", info.Size, info.CursorPosition, info.Window, info.MaximumWindowSize) -} - -func (coord COORD) String() string { - return fmt.Sprintf("%v,%v", coord.X, coord.Y) -} - -func (rect SMALL_RECT) String() string { - return fmt.Sprintf("(%v,%v),(%v,%v)", rect.Left, rect.Top, rect.Right, rect.Bottom) -} - -// checkError evaluates the results of a Windows API call and returns the error if it failed. -func checkError(r1, r2 uintptr, err error) error { - // Windows APIs return non-zero to indicate success - if r1 != 0 { - return nil - } - - // Return the error if provided, otherwise default to EINVAL - if err != nil { - return err - } - return syscall.EINVAL -} - -// coordToPointer converts a COORD into a uintptr (by fooling the type system). -func coordToPointer(c COORD) uintptr { - // Note: This code assumes the two SHORTs are correctly laid out; the "cast" to uint32 is just to get a pointer to pass. - return uintptr(*((*uint32)(unsafe.Pointer(&c)))) -} - -// use is a no-op, but the compiler cannot see that it is. -// Calling use(p) ensures that p is kept live until that point. -func use(p interface{}) {} diff --git a/vendor/src/github.com/Azure/go-ansiterm/winterm/attr_translation.go b/vendor/src/github.com/Azure/go-ansiterm/winterm/attr_translation.go deleted file mode 100644 index cbec8f728f..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/winterm/attr_translation.go +++ /dev/null @@ -1,100 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -const ( - FOREGROUND_COLOR_MASK = FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - BACKGROUND_COLOR_MASK = BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE -) - -// collectAnsiIntoWindowsAttributes modifies the passed Windows text mode flags to reflect the -// request represented by the passed ANSI mode. -func collectAnsiIntoWindowsAttributes(windowsMode uint16, inverted bool, baseMode uint16, ansiMode int16) (uint16, bool) { - switch ansiMode { - - // Mode styles - case ansiterm.ANSI_SGR_BOLD: - windowsMode = windowsMode | FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_DIM, ansiterm.ANSI_SGR_BOLD_DIM_OFF: - windowsMode &^= FOREGROUND_INTENSITY - - case ansiterm.ANSI_SGR_UNDERLINE: - windowsMode = windowsMode | COMMON_LVB_UNDERSCORE - - case ansiterm.ANSI_SGR_REVERSE: - inverted = true - - case ansiterm.ANSI_SGR_REVERSE_OFF: - inverted = false - - case ansiterm.ANSI_SGR_UNDERLINE_OFF: - windowsMode &^= COMMON_LVB_UNDERSCORE - - // Foreground colors - case ansiterm.ANSI_SGR_FOREGROUND_DEFAULT: - windowsMode = (windowsMode &^ FOREGROUND_MASK) | (baseMode & FOREGROUND_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_BLACK: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_FOREGROUND_RED: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED - - case ansiterm.ANSI_SGR_FOREGROUND_GREEN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_YELLOW: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN - - case ansiterm.ANSI_SGR_FOREGROUND_BLUE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_MAGENTA: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_CYAN: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_GREEN | FOREGROUND_BLUE - - case ansiterm.ANSI_SGR_FOREGROUND_WHITE: - windowsMode = (windowsMode &^ FOREGROUND_COLOR_MASK) | FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE - - // Background colors - case ansiterm.ANSI_SGR_BACKGROUND_DEFAULT: - // Black with no intensity - windowsMode = (windowsMode &^ BACKGROUND_MASK) | (baseMode & BACKGROUND_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_BLACK: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) - - case ansiterm.ANSI_SGR_BACKGROUND_RED: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED - - case ansiterm.ANSI_SGR_BACKGROUND_GREEN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_YELLOW: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN - - case ansiterm.ANSI_SGR_BACKGROUND_BLUE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_MAGENTA: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_CYAN: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_GREEN | BACKGROUND_BLUE - - case ansiterm.ANSI_SGR_BACKGROUND_WHITE: - windowsMode = (windowsMode &^ BACKGROUND_COLOR_MASK) | BACKGROUND_RED | BACKGROUND_GREEN | BACKGROUND_BLUE - } - - return windowsMode, inverted -} - -// invertAttributes inverts the foreground and background colors of a Windows attributes value -func invertAttributes(windowsMode uint16) uint16 { - return (COMMON_LVB_MASK & windowsMode) | ((FOREGROUND_MASK & windowsMode) << 4) | ((BACKGROUND_MASK & windowsMode) >> 4) -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go b/vendor/src/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go deleted file mode 100644 index f015723ade..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/winterm/cursor_helpers.go +++ /dev/null @@ -1,101 +0,0 @@ -// +build windows - -package winterm - -const ( - horizontal = iota - vertical -) - -func (h *windowsAnsiEventHandler) getCursorWindow(info *CONSOLE_SCREEN_BUFFER_INFO) SMALL_RECT { - if h.originMode { - sr := h.effectiveSr(info.Window) - return SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - } else { - return SMALL_RECT{ - Top: info.Window.Top, - Bottom: info.Window.Bottom, - Left: 0, - Right: info.Size.X - 1, - } - } -} - -// setCursorPosition sets the cursor to the specified position, bounded to the screen size -func (h *windowsAnsiEventHandler) setCursorPosition(position COORD, window SMALL_RECT) error { - position.X = ensureInRange(position.X, window.Left, window.Right) - position.Y = ensureInRange(position.Y, window.Top, window.Bottom) - err := SetConsoleCursorPosition(h.fd, position) - if err != nil { - return err - } - logger.Infof("Cursor position set: (%d, %d)", position.X, position.Y) - return err -} - -func (h *windowsAnsiEventHandler) moveCursorVertical(param int) error { - return h.moveCursor(vertical, param) -} - -func (h *windowsAnsiEventHandler) moveCursorHorizontal(param int) error { - return h.moveCursor(horizontal, param) -} - -func (h *windowsAnsiEventHandler) moveCursor(moveMode int, param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - switch moveMode { - case horizontal: - position.X += int16(param) - case vertical: - position.Y += int16(param) - } - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorLine(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = 0 - position.Y += int16(param) - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) moveCursorColumn(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - position := info.CursorPosition - position.X = int16(param) - 1 - - if err = h.setCursorPosition(position, h.getCursorWindow(info)); err != nil { - return err - } - - return nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/winterm/erase_helpers.go b/vendor/src/github.com/Azure/go-ansiterm/winterm/erase_helpers.go deleted file mode 100644 index 244b5fa25e..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/winterm/erase_helpers.go +++ /dev/null @@ -1,84 +0,0 @@ -// +build windows - -package winterm - -import "github.com/Azure/go-ansiterm" - -func (h *windowsAnsiEventHandler) clearRange(attributes uint16, fromCoord COORD, toCoord COORD) error { - // Ignore an invalid (negative area) request - if toCoord.Y < fromCoord.Y { - return nil - } - - var err error - - var coordStart = COORD{} - var coordEnd = COORD{} - - xCurrent, yCurrent := fromCoord.X, fromCoord.Y - xEnd, yEnd := toCoord.X, toCoord.Y - - // Clear any partial initial line - if xCurrent > 0 { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yCurrent - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent += 1 - } - - // Clear intervening rectangular section - if yCurrent < yEnd { - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd-1 - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - xCurrent = 0 - yCurrent = yEnd - } - - // Clear remaining partial ending line - coordStart.X, coordStart.Y = xCurrent, yCurrent - coordEnd.X, coordEnd.Y = xEnd, yEnd - - err = h.clearRect(attributes, coordStart, coordEnd) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) clearRect(attributes uint16, fromCoord COORD, toCoord COORD) error { - region := SMALL_RECT{Top: fromCoord.Y, Left: fromCoord.X, Bottom: toCoord.Y, Right: toCoord.X} - width := toCoord.X - fromCoord.X + 1 - height := toCoord.Y - fromCoord.Y + 1 - size := uint32(width) * uint32(height) - - if size <= 0 { - return nil - } - - buffer := make([]CHAR_INFO, size) - - char := CHAR_INFO{ansiterm.FILL_CHARACTER, attributes} - for i := 0; i < int(size); i++ { - buffer[i] = char - } - - err := WriteConsoleOutput(h.fd, buffer, COORD{X: width, Y: height}, COORD{X: 0, Y: 0}, ®ion) - if err != nil { - return err - } - - return nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/winterm/scroll_helper.go b/vendor/src/github.com/Azure/go-ansiterm/winterm/scroll_helper.go deleted file mode 100644 index 706d270577..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/winterm/scroll_helper.go +++ /dev/null @@ -1,118 +0,0 @@ -// +build windows - -package winterm - -// effectiveSr gets the current effective scroll region in buffer coordinates -func (h *windowsAnsiEventHandler) effectiveSr(window SMALL_RECT) scrollRegion { - top := addInRange(window.Top, h.sr.top, window.Top, window.Bottom) - bottom := addInRange(window.Top, h.sr.bottom, window.Top, window.Bottom) - if top >= bottom { - top = window.Top - bottom = window.Bottom - } - return scrollRegion{top: top, bottom: bottom} -} - -func (h *windowsAnsiEventHandler) scrollUp(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - return h.scroll(param, sr, info) -} - -func (h *windowsAnsiEventHandler) scrollDown(param int) error { - return h.scrollUp(-param) -} - -func (h *windowsAnsiEventHandler) deleteLines(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - start := info.CursorPosition.Y - sr := h.effectiveSr(info.Window) - // Lines cannot be inserted or deleted outside the scrolling region. - if start >= sr.top && start <= sr.bottom { - sr.top = start - return h.scroll(param, sr, info) - } else { - return nil - } -} - -func (h *windowsAnsiEventHandler) insertLines(param int) error { - return h.deleteLines(-param) -} - -// scroll scrolls the provided scroll region by param lines. The scroll region is in buffer coordinates. -func (h *windowsAnsiEventHandler) scroll(param int, sr scrollRegion, info *CONSOLE_SCREEN_BUFFER_INFO) error { - logger.Infof("scroll: scrollTop: %d, scrollBottom: %d", sr.top, sr.bottom) - logger.Infof("scroll: windowTop: %d, windowBottom: %d", info.Window.Top, info.Window.Bottom) - - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: sr.top, - Bottom: sr.bottom, - Left: 0, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: 0, - Y: sr.top - int16(param), - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} - -func (h *windowsAnsiEventHandler) deleteCharacters(param int) error { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - return h.scrollLine(param, info.CursorPosition, info) -} - -func (h *windowsAnsiEventHandler) insertCharacters(param int) error { - return h.deleteCharacters(-param) -} - -// scrollLine scrolls a line horizontally starting at the provided position by a number of columns. -func (h *windowsAnsiEventHandler) scrollLine(columns int, position COORD, info *CONSOLE_SCREEN_BUFFER_INFO) error { - // Copy from and clip to the scroll region (full buffer width) - scrollRect := SMALL_RECT{ - Top: position.Y, - Bottom: position.Y, - Left: position.X, - Right: info.Size.X - 1, - } - - // Origin to which area should be copied - destOrigin := COORD{ - X: position.X - int16(columns), - Y: position.Y, - } - - char := CHAR_INFO{ - UnicodeChar: ' ', - Attributes: h.attributes, - } - - if err := ScrollConsoleScreenBuffer(h.fd, scrollRect, scrollRect, destOrigin, char); err != nil { - return err - } - return nil -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/winterm/utilities.go b/vendor/src/github.com/Azure/go-ansiterm/winterm/utilities.go deleted file mode 100644 index afa7635d77..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/winterm/utilities.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build windows - -package winterm - -// AddInRange increments a value by the passed quantity while ensuring the values -// always remain within the supplied min / max range. -func addInRange(n int16, increment int16, min int16, max int16) int16 { - return ensureInRange(n+increment, min, max) -} diff --git a/vendor/src/github.com/Azure/go-ansiterm/winterm/win_event_handler.go b/vendor/src/github.com/Azure/go-ansiterm/winterm/win_event_handler.go deleted file mode 100644 index 4d858ed611..0000000000 --- a/vendor/src/github.com/Azure/go-ansiterm/winterm/win_event_handler.go +++ /dev/null @@ -1,726 +0,0 @@ -// +build windows - -package winterm - -import ( - "bytes" - "io/ioutil" - "os" - "strconv" - - "github.com/Azure/go-ansiterm" - "github.com/Sirupsen/logrus" -) - -var logger *logrus.Logger - -type windowsAnsiEventHandler struct { - fd uintptr - file *os.File - infoReset *CONSOLE_SCREEN_BUFFER_INFO - sr scrollRegion - buffer bytes.Buffer - attributes uint16 - inverted bool - wrapNext bool - drewMarginByte bool - originMode bool - marginByte byte - curInfo *CONSOLE_SCREEN_BUFFER_INFO - curPos COORD -} - -func CreateWinEventHandler(fd uintptr, file *os.File) ansiterm.AnsiEventHandler { - logFile := ioutil.Discard - - if isDebugEnv := os.Getenv(ansiterm.LogEnv); isDebugEnv == "1" { - logFile, _ = os.Create("winEventHandler.log") - } - - logger = &logrus.Logger{ - Out: logFile, - Formatter: new(logrus.TextFormatter), - Level: logrus.DebugLevel, - } - - infoReset, err := GetConsoleScreenBufferInfo(fd) - if err != nil { - return nil - } - - return &windowsAnsiEventHandler{ - fd: fd, - file: file, - infoReset: infoReset, - attributes: infoReset.Attributes, - } -} - -type scrollRegion struct { - top int16 - bottom int16 -} - -// simulateLF simulates a LF or CR+LF by scrolling if necessary to handle the -// current cursor position and scroll region settings, in which case it returns -// true. If no special handling is necessary, then it does nothing and returns -// false. -// -// In the false case, the caller should ensure that a carriage return -// and line feed are inserted or that the text is otherwise wrapped. -func (h *windowsAnsiEventHandler) simulateLF(includeCR bool) (bool, error) { - if h.wrapNext { - if err := h.Flush(); err != nil { - return false, err - } - h.clearWrap() - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return false, err - } - sr := h.effectiveSr(info.Window) - if pos.Y == sr.bottom { - // Scrolling is necessary. Let Windows automatically scroll if the scrolling region - // is the full window. - if sr.top == info.Window.Top && sr.bottom == info.Window.Bottom { - if includeCR { - pos.X = 0 - h.updatePos(pos) - } - return false, nil - } - - // A custom scroll region is active. Scroll the window manually to simulate - // the LF. - if err := h.Flush(); err != nil { - return false, err - } - logger.Info("Simulating LF inside scroll region") - if err := h.scrollUp(1); err != nil { - return false, err - } - if includeCR { - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - - } else if pos.Y < info.Window.Bottom { - // Let Windows handle the LF. - pos.Y++ - if includeCR { - pos.X = 0 - } - h.updatePos(pos) - return false, nil - } else { - // The cursor is at the bottom of the screen but outside the scroll - // region. Skip the LF. - logger.Info("Simulating LF outside scroll region") - if includeCR { - if err := h.Flush(); err != nil { - return false, err - } - pos.X = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return false, err - } - } - return true, nil - } -} - -// executeLF executes a LF without a CR. -func (h *windowsAnsiEventHandler) executeLF() error { - handled, err := h.simulateLF(false) - if err != nil { - return err - } - if !handled { - // Windows LF will reset the cursor column position. Write the LF - // and restore the cursor position. - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - if pos.X != 0 { - if err := h.Flush(); err != nil { - return err - } - logger.Info("Resetting cursor position for LF without CR") - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - } - return nil -} - -func (h *windowsAnsiEventHandler) Print(b byte) error { - if h.wrapNext { - h.buffer.WriteByte(h.marginByte) - h.clearWrap() - if _, err := h.simulateLF(true); err != nil { - return err - } - } - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X == info.Size.X-1 { - h.wrapNext = true - h.marginByte = b - } else { - pos.X++ - h.updatePos(pos) - h.buffer.WriteByte(b) - } - return nil -} - -func (h *windowsAnsiEventHandler) Execute(b byte) error { - switch b { - case ansiterm.ANSI_TAB: - logger.Info("Execute(TAB)") - // Move to the next tab stop, but preserve auto-wrap if already set. - if !h.wrapNext { - pos, info, err := h.getCurrentInfo() - if err != nil { - return err - } - pos.X = (pos.X + 8) - pos.X%8 - if pos.X >= info.Size.X { - pos.X = info.Size.X - 1 - } - if err := h.Flush(); err != nil { - return err - } - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - } - return nil - - case ansiterm.ANSI_BEL: - h.buffer.WriteByte(ansiterm.ANSI_BEL) - return nil - - case ansiterm.ANSI_BACKSPACE: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X > 0 { - pos.X-- - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_BACKSPACE) - } - return nil - - case ansiterm.ANSI_VERTICAL_TAB, ansiterm.ANSI_FORM_FEED: - // Treat as true LF. - return h.executeLF() - - case ansiterm.ANSI_LINE_FEED: - // Simulate a CR and LF for now since there is no way in go-ansiterm - // to tell if the LF should include CR (and more things break when it's - // missing than when it's incorrectly added). - handled, err := h.simulateLF(true) - if handled || err != nil { - return err - } - return h.buffer.WriteByte(ansiterm.ANSI_LINE_FEED) - - case ansiterm.ANSI_CARRIAGE_RETURN: - if h.wrapNext { - if err := h.Flush(); err != nil { - return err - } - h.clearWrap() - } - pos, _, err := h.getCurrentInfo() - if err != nil { - return err - } - if pos.X != 0 { - pos.X = 0 - h.updatePos(pos) - h.buffer.WriteByte(ansiterm.ANSI_CARRIAGE_RETURN) - } - return nil - - default: - return nil - } -} - -func (h *windowsAnsiEventHandler) CUU(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(-param) -} - -func (h *windowsAnsiEventHandler) CUD(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorVertical(param) -} - -func (h *windowsAnsiEventHandler) CUF(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUF: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(param) -} - -func (h *windowsAnsiEventHandler) CUB(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUB: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorHorizontal(-param) -} - -func (h *windowsAnsiEventHandler) CNL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CNL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(param) -} - -func (h *windowsAnsiEventHandler) CPL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CPL: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorLine(-param) -} - -func (h *windowsAnsiEventHandler) CHA(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CHA: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.moveCursorColumn(param) -} - -func (h *windowsAnsiEventHandler) VPA(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("VPA: [[%d]]", param) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - window := h.getCursorWindow(info) - position := info.CursorPosition - position.Y = window.Top + int16(param) - 1 - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) CUP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("CUP: [[%d %d]]", row, col) - h.clearWrap() - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - window := h.getCursorWindow(info) - position := COORD{window.Left + int16(col) - 1, window.Top + int16(row) - 1} - return h.setCursorPosition(position, window) -} - -func (h *windowsAnsiEventHandler) HVP(row int, col int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("HVP: [[%d %d]]", row, col) - h.clearWrap() - return h.CUP(row, col) -} - -func (h *windowsAnsiEventHandler) DECTCEM(visible bool) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DECTCEM: [%v]", []string{strconv.FormatBool(visible)}) - h.clearWrap() - return nil -} - -func (h *windowsAnsiEventHandler) DECOM(enable bool) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DECOM: [%v]", []string{strconv.FormatBool(enable)}) - h.clearWrap() - h.originMode = enable - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) DECCOLM(use132 bool) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DECCOLM: [%v]", []string{strconv.FormatBool(use132)}) - h.clearWrap() - if err := h.ED(2); err != nil { - return err - } - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - targetWidth := int16(80) - if use132 { - targetWidth = 132 - } - if info.Size.X < targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - logger.Info("set buffer failed:", err) - return err - } - } - window := info.Window - window.Left = 0 - window.Right = targetWidth - 1 - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - logger.Info("set window failed:", err) - return err - } - if info.Size.X > targetWidth { - if err := SetConsoleScreenBufferSize(h.fd, COORD{targetWidth, info.Size.Y}); err != nil { - logger.Info("set buffer failed:", err) - return err - } - } - return SetConsoleCursorPosition(h.fd, COORD{0, 0}) -} - -func (h *windowsAnsiEventHandler) ED(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("ED: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - - // [J -- Erases from the cursor to the end of the screen, including the cursor position. - // [1J -- Erases from the beginning of the screen to the cursor, including the cursor position. - // [2J -- Erases the complete display. The cursor does not move. - // Notes: - // -- Clearing the entire buffer, versus just the Window, works best for Windows Consoles - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X - 1, info.Size.Y - 1} - - case 1: - start = COORD{0, 0} - end = info.CursorPosition - - case 2: - start = COORD{0, 0} - end = COORD{info.Size.X - 1, info.Size.Y - 1} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - // If the whole buffer was cleared, move the window to the top while preserving - // the window-relative cursor position. - if param == 2 { - pos := info.CursorPosition - window := info.Window - pos.Y -= window.Top - window.Bottom -= window.Top - window.Top = 0 - if err := SetConsoleCursorPosition(h.fd, pos); err != nil { - return err - } - if err := SetConsoleWindowInfo(h.fd, true, window); err != nil { - return err - } - } - - return nil -} - -func (h *windowsAnsiEventHandler) EL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("EL: [%v]", strconv.Itoa(param)) - h.clearWrap() - - // [K -- Erases from the cursor to the end of the line, including the cursor position. - // [1K -- Erases from the beginning of the line to the cursor, including the cursor position. - // [2K -- Erases the complete line. - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - var start COORD - var end COORD - - switch param { - case 0: - start = info.CursorPosition - end = COORD{info.Size.X, info.CursorPosition.Y} - - case 1: - start = COORD{0, info.CursorPosition.Y} - end = info.CursorPosition - - case 2: - start = COORD{0, info.CursorPosition.Y} - end = COORD{info.Size.X, info.CursorPosition.Y} - } - - err = h.clearRange(h.attributes, start, end) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) IL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("IL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertLines(param) -} - -func (h *windowsAnsiEventHandler) DL(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DL: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteLines(param) -} - -func (h *windowsAnsiEventHandler) ICH(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("ICH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.insertCharacters(param) -} - -func (h *windowsAnsiEventHandler) DCH(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DCH: [%v]", strconv.Itoa(param)) - h.clearWrap() - return h.deleteCharacters(param) -} - -func (h *windowsAnsiEventHandler) SGR(params []int) error { - if err := h.Flush(); err != nil { - return err - } - strings := []string{} - for _, v := range params { - strings = append(strings, strconv.Itoa(v)) - } - - logger.Infof("SGR: [%v]", strings) - - if len(params) <= 0 { - h.attributes = h.infoReset.Attributes - h.inverted = false - } else { - for _, attr := range params { - - if attr == ansiterm.ANSI_SGR_RESET { - h.attributes = h.infoReset.Attributes - h.inverted = false - continue - } - - h.attributes, h.inverted = collectAnsiIntoWindowsAttributes(h.attributes, h.inverted, h.infoReset.Attributes, int16(attr)) - } - } - - attributes := h.attributes - if h.inverted { - attributes = invertAttributes(attributes) - } - err := SetConsoleTextAttribute(h.fd, attributes) - if err != nil { - return err - } - - return nil -} - -func (h *windowsAnsiEventHandler) SU(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("SU: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollUp(param) -} - -func (h *windowsAnsiEventHandler) SD(param int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("SD: [%v]", []string{strconv.Itoa(param)}) - h.clearWrap() - return h.scrollDown(param) -} - -func (h *windowsAnsiEventHandler) DA(params []string) error { - logger.Infof("DA: [%v]", params) - // DA cannot be implemented because it must send data on the VT100 input stream, - // which is not available to go-ansiterm. - return nil -} - -func (h *windowsAnsiEventHandler) DECSTBM(top int, bottom int) error { - if err := h.Flush(); err != nil { - return err - } - logger.Infof("DECSTBM: [%d, %d]", top, bottom) - - // Windows is 0 indexed, Linux is 1 indexed - h.sr.top = int16(top - 1) - h.sr.bottom = int16(bottom - 1) - - // This command also moves the cursor to the origin. - h.clearWrap() - return h.CUP(1, 1) -} - -func (h *windowsAnsiEventHandler) RI() error { - if err := h.Flush(); err != nil { - return err - } - logger.Info("RI: []") - h.clearWrap() - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - sr := h.effectiveSr(info.Window) - if info.CursorPosition.Y == sr.top { - return h.scrollDown(1) - } - - return h.moveCursorVertical(-1) -} - -func (h *windowsAnsiEventHandler) IND() error { - logger.Info("IND: []") - return h.executeLF() -} - -func (h *windowsAnsiEventHandler) Flush() error { - h.curInfo = nil - if h.buffer.Len() > 0 { - logger.Infof("Flush: [%s]", h.buffer.Bytes()) - if _, err := h.buffer.WriteTo(h.file); err != nil { - return err - } - } - - if h.wrapNext && !h.drewMarginByte { - logger.Infof("Flush: drawing margin byte '%c'", h.marginByte) - - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return err - } - - charInfo := []CHAR_INFO{{UnicodeChar: uint16(h.marginByte), Attributes: info.Attributes}} - size := COORD{1, 1} - position := COORD{0, 0} - region := SMALL_RECT{Left: info.CursorPosition.X, Top: info.CursorPosition.Y, Right: info.CursorPosition.X, Bottom: info.CursorPosition.Y} - if err := WriteConsoleOutput(h.fd, charInfo, size, position, ®ion); err != nil { - return err - } - h.drewMarginByte = true - } - return nil -} - -// cacheConsoleInfo ensures that the current console screen information has been queried -// since the last call to Flush(). It must be called before accessing h.curInfo or h.curPos. -func (h *windowsAnsiEventHandler) getCurrentInfo() (COORD, *CONSOLE_SCREEN_BUFFER_INFO, error) { - if h.curInfo == nil { - info, err := GetConsoleScreenBufferInfo(h.fd) - if err != nil { - return COORD{}, nil, err - } - h.curInfo = info - h.curPos = info.CursorPosition - } - return h.curPos, h.curInfo, nil -} - -func (h *windowsAnsiEventHandler) updatePos(pos COORD) { - if h.curInfo == nil { - panic("failed to call getCurrentInfo before calling updatePos") - } - h.curPos = pos -} - -// clearWrap clears the state where the cursor is in the margin -// waiting for the next character before wrapping the line. This must -// be done before most operations that act on the cursor. -func (h *windowsAnsiEventHandler) clearWrap() { - h.wrapNext = false - h.drewMarginByte = false -} diff --git a/vendor/src/github.com/BurntSushi/toml/.gitignore b/vendor/src/github.com/BurntSushi/toml/.gitignore deleted file mode 100644 index 0cd3800377..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/.gitignore +++ /dev/null @@ -1,5 +0,0 @@ -TAGS -tags -.*.swp -tomlcheck/tomlcheck -toml.test diff --git a/vendor/src/github.com/BurntSushi/toml/.travis.yml b/vendor/src/github.com/BurntSushi/toml/.travis.yml deleted file mode 100644 index 43caf6d021..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/.travis.yml +++ /dev/null @@ -1,12 +0,0 @@ -language: go -go: - - 1.1 - - 1.2 - - tip -install: - - go install ./... - - go get github.com/BurntSushi/toml-test -script: - - export PATH="$PATH:$HOME/gopath/bin" - - make test - diff --git a/vendor/src/github.com/BurntSushi/toml/COMPATIBLE b/vendor/src/github.com/BurntSushi/toml/COMPATIBLE deleted file mode 100644 index 21e0938cae..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/COMPATIBLE +++ /dev/null @@ -1,3 +0,0 @@ -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - diff --git a/vendor/src/github.com/BurntSushi/toml/COPYING b/vendor/src/github.com/BurntSushi/toml/COPYING deleted file mode 100644 index 5a8e332545..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/src/github.com/BurntSushi/toml/Makefile b/vendor/src/github.com/BurntSushi/toml/Makefile deleted file mode 100644 index 3600848d33..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/Makefile +++ /dev/null @@ -1,19 +0,0 @@ -install: - go install ./... - -test: install - go test -v - toml-test toml-test-decoder - toml-test -encoder toml-test-encoder - -fmt: - gofmt -w *.go */*.go - colcheck *.go */*.go - -tags: - find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS - -push: - git push origin master - git push github master - diff --git a/vendor/src/github.com/BurntSushi/toml/README.md b/vendor/src/github.com/BurntSushi/toml/README.md deleted file mode 100644 index e861c0ca7b..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/README.md +++ /dev/null @@ -1,220 +0,0 @@ -## TOML parser and encoder for Go with reflection - -TOML stands for Tom's Obvious, Minimal Language. This Go package provides a -reflection interface similar to Go's standard library `json` and `xml` -packages. This package also supports the `encoding.TextUnmarshaler` and -`encoding.TextMarshaler` interfaces so that you can define custom data -representations. (There is an example of this below.) - -Spec: https://github.com/mojombo/toml - -Compatible with TOML version -[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md) - -Documentation: http://godoc.org/github.com/BurntSushi/toml - -Installation: - -```bash -go get github.com/BurntSushi/toml -``` - -Try the toml validator: - -```bash -go get github.com/BurntSushi/toml/cmd/tomlv -tomlv some-toml-file.toml -``` - -[![Build status](https://api.travis-ci.org/BurntSushi/toml.png)](https://travis-ci.org/BurntSushi/toml) - - -### Testing - -This package passes all tests in -[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder -and the encoder. - -### Examples - -This package works similarly to how the Go standard library handles `XML` -and `JSON`. Namely, data is loaded into Go values via reflection. - -For the simplest example, consider some TOML file as just a list of keys -and values: - -```toml -Age = 25 -Cats = [ "Cauchy", "Plato" ] -Pi = 3.14 -Perfection = [ 6, 28, 496, 8128 ] -DOB = 1987-07-05T05:45:00Z -``` - -Which could be defined in Go as: - -```go -type Config struct { - Age int - Cats []string - Pi float64 - Perfection []int - DOB time.Time // requires `import time` -} -``` - -And then decoded with: - -```go -var conf Config -if _, err := toml.Decode(tomlData, &conf); err != nil { - // handle error -} -``` - -You can also use struct tags if your struct field name doesn't map to a TOML -key value directly: - -```toml -some_key_NAME = "wat" -``` - -```go -type TOML struct { - ObscureKey string `toml:"some_key_NAME"` -} -``` - -### Using the `encoding.TextUnmarshaler` interface - -Here's an example that automatically parses duration strings into -`time.Duration` values: - -```toml -[[song]] -name = "Thunder Road" -duration = "4m49s" - -[[song]] -name = "Stairway to Heaven" -duration = "8m03s" -``` - -Which can be decoded with: - -```go -type song struct { - Name string - Duration duration -} -type songs struct { - Song []song -} -var favorites songs -if _, err := toml.Decode(blob, &favorites); err != nil { - log.Fatal(err) -} - -for _, s := range favorites.Song { - fmt.Printf("%s (%s)\n", s.Name, s.Duration) -} -``` - -And you'll also need a `duration` type that satisfies the -`encoding.TextUnmarshaler` interface: - -```go -type duration struct { - time.Duration -} - -func (d *duration) UnmarshalText(text []byte) error { - var err error - d.Duration, err = time.ParseDuration(string(text)) - return err -} -``` - -### More complex usage - -Here's an example of how to load the example from the official spec page: - -```toml -# This is a TOML document. Boom. - -title = "TOML Example" - -[owner] -name = "Tom Preston-Werner" -organization = "GitHub" -bio = "GitHub Cofounder & CEO\nLikes tater tots and beer." -dob = 1979-05-27T07:32:00Z # First class dates? Why not? - -[database] -server = "192.168.1.1" -ports = [ 8001, 8001, 8002 ] -connection_max = 5000 -enabled = true - -[servers] - - # You can indent as you please. Tabs or spaces. TOML don't care. - [servers.alpha] - ip = "10.0.0.1" - dc = "eqdc10" - - [servers.beta] - ip = "10.0.0.2" - dc = "eqdc10" - -[clients] -data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it - -# Line breaks are OK when inside arrays -hosts = [ - "alpha", - "omega" -] -``` - -And the corresponding Go types are: - -```go -type tomlConfig struct { - Title string - Owner ownerInfo - DB database `toml:"database"` - Servers map[string]server - Clients clients -} - -type ownerInfo struct { - Name string - Org string `toml:"organization"` - Bio string - DOB time.Time -} - -type database struct { - Server string - Ports []int - ConnMax int `toml:"connection_max"` - Enabled bool -} - -type server struct { - IP string - DC string -} - -type clients struct { - Data [][]interface{} - Hosts []string -} -``` - -Note that a case insensitive match will be tried if an exact match can't be -found. - -A working example of the above can be found in `_examples/example.{go,toml}`. - diff --git a/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING b/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING deleted file mode 100644 index 5a8e332545..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-decoder/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING b/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING deleted file mode 100644 index 5a8e332545..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/cmd/toml-test-encoder/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING b/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING deleted file mode 100644 index 5a8e332545..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/cmd/tomlv/COPYING +++ /dev/null @@ -1,14 +0,0 @@ - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - Version 2, December 2004 - - Copyright (C) 2004 Sam Hocevar - - Everyone is permitted to copy and distribute verbatim or modified - copies of this license document, and changing it is allowed as long - as the name is changed. - - DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. You just DO WHAT THE FUCK YOU WANT TO. - diff --git a/vendor/src/github.com/BurntSushi/toml/decode.go b/vendor/src/github.com/BurntSushi/toml/decode.go deleted file mode 100644 index 6c7d398b89..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/decode.go +++ /dev/null @@ -1,492 +0,0 @@ -package toml - -import ( - "fmt" - "io" - "io/ioutil" - "math" - "reflect" - "strings" - "time" -) - -var e = fmt.Errorf - -// Unmarshaler is the interface implemented by objects that can unmarshal a -// TOML description of themselves. -type Unmarshaler interface { - UnmarshalTOML(interface{}) error -} - -// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`. -func Unmarshal(p []byte, v interface{}) error { - _, err := Decode(string(p), v) - return err -} - -// Primitive is a TOML value that hasn't been decoded into a Go value. -// When using the various `Decode*` functions, the type `Primitive` may -// be given to any value, and its decoding will be delayed. -// -// A `Primitive` value can be decoded using the `PrimitiveDecode` function. -// -// The underlying representation of a `Primitive` value is subject to change. -// Do not rely on it. -// -// N.B. Primitive values are still parsed, so using them will only avoid -// the overhead of reflection. They can be useful when you don't know the -// exact type of TOML data until run time. -type Primitive struct { - undecoded interface{} - context Key -} - -// DEPRECATED! -// -// Use MetaData.PrimitiveDecode instead. -func PrimitiveDecode(primValue Primitive, v interface{}) error { - md := MetaData{decoded: make(map[string]bool)} - return md.unify(primValue.undecoded, rvalue(v)) -} - -// PrimitiveDecode is just like the other `Decode*` functions, except it -// decodes a TOML value that has already been parsed. Valid primitive values -// can *only* be obtained from values filled by the decoder functions, -// including this method. (i.e., `v` may contain more `Primitive` -// values.) -// -// Meta data for primitive values is included in the meta data returned by -// the `Decode*` functions with one exception: keys returned by the Undecoded -// method will only reflect keys that were decoded. Namely, any keys hidden -// behind a Primitive will be considered undecoded. Executing this method will -// update the undecoded keys in the meta data. (See the example.) -func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error { - md.context = primValue.context - defer func() { md.context = nil }() - return md.unify(primValue.undecoded, rvalue(v)) -} - -// Decode will decode the contents of `data` in TOML format into a pointer -// `v`. -// -// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be -// used interchangeably.) -// -// TOML arrays of tables correspond to either a slice of structs or a slice -// of maps. -// -// TOML datetimes correspond to Go `time.Time` values. -// -// All other TOML types (float, string, int, bool and array) correspond -// to the obvious Go types. -// -// An exception to the above rules is if a type implements the -// encoding.TextUnmarshaler interface. In this case, any primitive TOML value -// (floats, strings, integers, booleans and datetimes) will be converted to -// a byte string and given to the value's UnmarshalText method. See the -// Unmarshaler example for a demonstration with time duration strings. -// -// Key mapping -// -// TOML keys can map to either keys in a Go map or field names in a Go -// struct. The special `toml` struct tag may be used to map TOML keys to -// struct fields that don't match the key name exactly. (See the example.) -// A case insensitive match to struct names will be tried if an exact match -// can't be found. -// -// The mapping between TOML values and Go values is loose. That is, there -// may exist TOML values that cannot be placed into your representation, and -// there may be parts of your representation that do not correspond to -// TOML values. This loose mapping can be made stricter by using the IsDefined -// and/or Undecoded methods on the MetaData returned. -// -// This decoder will not handle cyclic types. If a cyclic type is passed, -// `Decode` will not terminate. -func Decode(data string, v interface{}) (MetaData, error) { - p, err := parse(data) - if err != nil { - return MetaData{}, err - } - md := MetaData{ - p.mapping, p.types, p.ordered, - make(map[string]bool, len(p.ordered)), nil, - } - return md, md.unify(p.mapping, rvalue(v)) -} - -// DecodeFile is just like Decode, except it will automatically read the -// contents of the file at `fpath` and decode it for you. -func DecodeFile(fpath string, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadFile(fpath) - if err != nil { - return MetaData{}, err - } - return Decode(string(bs), v) -} - -// DecodeReader is just like Decode, except it will consume all bytes -// from the reader and decode it for you. -func DecodeReader(r io.Reader, v interface{}) (MetaData, error) { - bs, err := ioutil.ReadAll(r) - if err != nil { - return MetaData{}, err - } - return Decode(string(bs), v) -} - -// unify performs a sort of type unification based on the structure of `rv`, -// which is the client representation. -// -// Any type mismatch produces an error. Finding a type that we don't know -// how to handle produces an unsupported type error. -func (md *MetaData) unify(data interface{}, rv reflect.Value) error { - - // Special case. Look for a `Primitive` value. - if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() { - // Save the undecoded data and the key context into the primitive - // value. - context := make(Key, len(md.context)) - copy(context, md.context) - rv.Set(reflect.ValueOf(Primitive{ - undecoded: data, - context: context, - })) - return nil - } - - // Special case. Unmarshaler Interface support. - if rv.CanAddr() { - if v, ok := rv.Addr().Interface().(Unmarshaler); ok { - return v.UnmarshalTOML(data) - } - } - - // Special case. Handle time.Time values specifically. - // TODO: Remove this code when we decide to drop support for Go 1.1. - // This isn't necessary in Go 1.2 because time.Time satisfies the encoding - // interfaces. - if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) { - return md.unifyDatetime(data, rv) - } - - // Special case. Look for a value satisfying the TextUnmarshaler interface. - if v, ok := rv.Interface().(TextUnmarshaler); ok { - return md.unifyText(data, v) - } - // BUG(burntsushi) - // The behavior here is incorrect whenever a Go type satisfies the - // encoding.TextUnmarshaler interface but also corresponds to a TOML - // hash or array. In particular, the unmarshaler should only be applied - // to primitive TOML values. But at this point, it will be applied to - // all kinds of values and produce an incorrect error whenever those values - // are hashes or arrays (including arrays of tables). - - k := rv.Kind() - - // laziness - if k >= reflect.Int && k <= reflect.Uint64 { - return md.unifyInt(data, rv) - } - switch k { - case reflect.Ptr: - elem := reflect.New(rv.Type().Elem()) - err := md.unify(data, reflect.Indirect(elem)) - if err != nil { - return err - } - rv.Set(elem) - return nil - case reflect.Struct: - return md.unifyStruct(data, rv) - case reflect.Map: - return md.unifyMap(data, rv) - case reflect.Array: - return md.unifyArray(data, rv) - case reflect.Slice: - return md.unifySlice(data, rv) - case reflect.String: - return md.unifyString(data, rv) - case reflect.Bool: - return md.unifyBool(data, rv) - case reflect.Interface: - // we only support empty interfaces. - if rv.NumMethod() > 0 { - return e("Unsupported type '%s'.", rv.Kind()) - } - return md.unifyAnything(data, rv) - case reflect.Float32: - fallthrough - case reflect.Float64: - return md.unifyFloat64(data, rv) - } - return e("Unsupported type '%s'.", rv.Kind()) -} - -func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - return mismatch(rv, "map", mapping) - } - - for key, datum := range tmap { - var f *field - fields := cachedTypeFields(rv.Type()) - for i := range fields { - ff := &fields[i] - if ff.name == key { - f = ff - break - } - if f == nil && strings.EqualFold(ff.name, key) { - f = ff - } - } - if f != nil { - subv := rv - for _, i := range f.index { - subv = indirect(subv.Field(i)) - } - if isUnifiable(subv) { - md.decoded[md.context.add(key).String()] = true - md.context = append(md.context, key) - if err := md.unify(datum, subv); err != nil { - return e("Type mismatch for '%s.%s': %s", - rv.Type().String(), f.name, err) - } - md.context = md.context[0 : len(md.context)-1] - } else if f.name != "" { - // Bad user! No soup for you! - return e("Field '%s.%s' is unexported, and therefore cannot "+ - "be loaded with reflection.", rv.Type().String(), f.name) - } - } - } - return nil -} - -func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error { - tmap, ok := mapping.(map[string]interface{}) - if !ok { - return badtype("map", mapping) - } - if rv.IsNil() { - rv.Set(reflect.MakeMap(rv.Type())) - } - for k, v := range tmap { - md.decoded[md.context.add(k).String()] = true - md.context = append(md.context, k) - - rvkey := indirect(reflect.New(rv.Type().Key())) - rvval := reflect.Indirect(reflect.New(rv.Type().Elem())) - if err := md.unify(v, rvval); err != nil { - return err - } - md.context = md.context[0 : len(md.context)-1] - - rvkey.SetString(k) - rv.SetMapIndex(rvkey, rvval) - } - return nil -} - -func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - return badtype("slice", data) - } - sliceLen := datav.Len() - if sliceLen != rv.Len() { - return e("expected array length %d; got TOML array of length %d", - rv.Len(), sliceLen) - } - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error { - datav := reflect.ValueOf(data) - if datav.Kind() != reflect.Slice { - return badtype("slice", data) - } - sliceLen := datav.Len() - if rv.IsNil() { - rv.Set(reflect.MakeSlice(rv.Type(), sliceLen, sliceLen)) - } - return md.unifySliceArray(datav, rv) -} - -func (md *MetaData) unifySliceArray(data, rv reflect.Value) error { - sliceLen := data.Len() - for i := 0; i < sliceLen; i++ { - v := data.Index(i).Interface() - sliceval := indirect(rv.Index(i)) - if err := md.unify(v, sliceval); err != nil { - return err - } - } - return nil -} - -func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error { - if _, ok := data.(time.Time); ok { - rv.Set(reflect.ValueOf(data)) - return nil - } - return badtype("time.Time", data) -} - -func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error { - if s, ok := data.(string); ok { - rv.SetString(s) - return nil - } - return badtype("string", data) -} - -func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error { - if num, ok := data.(float64); ok { - switch rv.Kind() { - case reflect.Float32: - fallthrough - case reflect.Float64: - rv.SetFloat(num) - default: - panic("bug") - } - return nil - } - return badtype("float", data) -} - -func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error { - if num, ok := data.(int64); ok { - if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 { - switch rv.Kind() { - case reflect.Int, reflect.Int64: - // No bounds checking necessary. - case reflect.Int8: - if num < math.MinInt8 || num > math.MaxInt8 { - return e("Value '%d' is out of range for int8.", num) - } - case reflect.Int16: - if num < math.MinInt16 || num > math.MaxInt16 { - return e("Value '%d' is out of range for int16.", num) - } - case reflect.Int32: - if num < math.MinInt32 || num > math.MaxInt32 { - return e("Value '%d' is out of range for int32.", num) - } - } - rv.SetInt(num) - } else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 { - unum := uint64(num) - switch rv.Kind() { - case reflect.Uint, reflect.Uint64: - // No bounds checking necessary. - case reflect.Uint8: - if num < 0 || unum > math.MaxUint8 { - return e("Value '%d' is out of range for uint8.", num) - } - case reflect.Uint16: - if num < 0 || unum > math.MaxUint16 { - return e("Value '%d' is out of range for uint16.", num) - } - case reflect.Uint32: - if num < 0 || unum > math.MaxUint32 { - return e("Value '%d' is out of range for uint32.", num) - } - } - rv.SetUint(unum) - } else { - panic("unreachable") - } - return nil - } - return badtype("integer", data) -} - -func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error { - if b, ok := data.(bool); ok { - rv.SetBool(b) - return nil - } - return badtype("boolean", data) -} - -func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error { - rv.Set(reflect.ValueOf(data)) - return nil -} - -func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error { - var s string - switch sdata := data.(type) { - case TextMarshaler: - text, err := sdata.MarshalText() - if err != nil { - return err - } - s = string(text) - case fmt.Stringer: - s = sdata.String() - case string: - s = sdata - case bool: - s = fmt.Sprintf("%v", sdata) - case int64: - s = fmt.Sprintf("%d", sdata) - case float64: - s = fmt.Sprintf("%f", sdata) - default: - return badtype("primitive (string-like)", data) - } - if err := v.UnmarshalText([]byte(s)); err != nil { - return err - } - return nil -} - -// rvalue returns a reflect.Value of `v`. All pointers are resolved. -func rvalue(v interface{}) reflect.Value { - return indirect(reflect.ValueOf(v)) -} - -// indirect returns the value pointed to by a pointer. -// Pointers are followed until the value is not a pointer. -// New values are allocated for each nil pointer. -// -// An exception to this rule is if the value satisfies an interface of -// interest to us (like encoding.TextUnmarshaler). -func indirect(v reflect.Value) reflect.Value { - if v.Kind() != reflect.Ptr { - if v.CanAddr() { - pv := v.Addr() - if _, ok := pv.Interface().(TextUnmarshaler); ok { - return pv - } - } - return v - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - return indirect(reflect.Indirect(v)) -} - -func isUnifiable(rv reflect.Value) bool { - if rv.CanSet() { - return true - } - if _, ok := rv.Interface().(TextUnmarshaler); ok { - return true - } - return false -} - -func badtype(expected string, data interface{}) error { - return e("Expected %s but found '%T'.", expected, data) -} - -func mismatch(user reflect.Value, expected string, data interface{}) error { - return e("Type mismatch for %s. Expected %s but found '%T'.", - user.Type().String(), expected, data) -} diff --git a/vendor/src/github.com/BurntSushi/toml/decode_meta.go b/vendor/src/github.com/BurntSushi/toml/decode_meta.go deleted file mode 100644 index ef6f545fa1..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/decode_meta.go +++ /dev/null @@ -1,122 +0,0 @@ -package toml - -import "strings" - -// MetaData allows access to meta information about TOML data that may not -// be inferrable via reflection. In particular, whether a key has been defined -// and the TOML type of a key. -type MetaData struct { - mapping map[string]interface{} - types map[string]tomlType - keys []Key - decoded map[string]bool - context Key // Used only during decoding. -} - -// IsDefined returns true if the key given exists in the TOML data. The key -// should be specified hierarchially. e.g., -// -// // access the TOML key 'a.b.c' -// IsDefined("a", "b", "c") -// -// IsDefined will return false if an empty key given. Keys are case sensitive. -func (md *MetaData) IsDefined(key ...string) bool { - if len(key) == 0 { - return false - } - - var hash map[string]interface{} - var ok bool - var hashOrVal interface{} = md.mapping - for _, k := range key { - if hash, ok = hashOrVal.(map[string]interface{}); !ok { - return false - } - if hashOrVal, ok = hash[k]; !ok { - return false - } - } - return true -} - -// Type returns a string representation of the type of the key specified. -// -// Type will return the empty string if given an empty key or a key that -// does not exist. Keys are case sensitive. -func (md *MetaData) Type(key ...string) string { - fullkey := strings.Join(key, ".") - if typ, ok := md.types[fullkey]; ok { - return typ.typeString() - } - return "" -} - -// Key is the type of any TOML key, including key groups. Use (MetaData).Keys -// to get values of this type. -type Key []string - -func (k Key) String() string { - return strings.Join(k, ".") -} - -func (k Key) maybeQuotedAll() string { - var ss []string - for i := range k { - ss = append(ss, k.maybeQuoted(i)) - } - return strings.Join(ss, ".") -} - -func (k Key) maybeQuoted(i int) string { - quote := false - for _, c := range k[i] { - if !isBareKeyChar(c) { - quote = true - break - } - } - if quote { - return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\"" - } else { - return k[i] - } -} - -func (k Key) add(piece string) Key { - newKey := make(Key, len(k)+1) - copy(newKey, k) - newKey[len(k)] = piece - return newKey -} - -// Keys returns a slice of every key in the TOML data, including key groups. -// Each key is itself a slice, where the first element is the top of the -// hierarchy and the last is the most specific. -// -// The list will have the same order as the keys appeared in the TOML data. -// -// All keys returned are non-empty. -func (md *MetaData) Keys() []Key { - return md.keys -} - -// Undecoded returns all keys that have not been decoded in the order in which -// they appear in the original TOML document. -// -// This includes keys that haven't been decoded because of a Primitive value. -// Once the Primitive value is decoded, the keys will be considered decoded. -// -// Also note that decoding into an empty interface will result in no decoding, -// and so no keys will be considered decoded. -// -// In this sense, the Undecoded keys correspond to keys in the TOML document -// that do not have a concrete type in your representation. -func (md *MetaData) Undecoded() []Key { - undecoded := make([]Key, 0, len(md.keys)) - for _, key := range md.keys { - if !md.decoded[key.String()] { - undecoded = append(undecoded, key) - } - } - return undecoded -} diff --git a/vendor/src/github.com/BurntSushi/toml/doc.go b/vendor/src/github.com/BurntSushi/toml/doc.go deleted file mode 100644 index fe26800041..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/doc.go +++ /dev/null @@ -1,27 +0,0 @@ -/* -Package toml provides facilities for decoding and encoding TOML configuration -files via reflection. There is also support for delaying decoding with -the Primitive type, and querying the set of keys in a TOML document with the -MetaData type. - -The specification implemented: https://github.com/mojombo/toml - -The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify -whether a file is a valid TOML document. It can also be used to print the -type of each key in a TOML document. - -Testing - -There are two important types of tests used for this package. The first is -contained inside '*_test.go' files and uses the standard Go unit testing -framework. These tests are primarily devoted to holistically testing the -decoder and encoder. - -The second type of testing is used to verify the implementation's adherence -to the TOML specification. These tests have been factored into their own -project: https://github.com/BurntSushi/toml-test - -The reason the tests are in a separate project is so that they can be used by -any implementation of TOML. Namely, it is language agnostic. -*/ -package toml diff --git a/vendor/src/github.com/BurntSushi/toml/encode.go b/vendor/src/github.com/BurntSushi/toml/encode.go deleted file mode 100644 index 64e8c47e13..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/encode.go +++ /dev/null @@ -1,496 +0,0 @@ -package toml - -import ( - "bufio" - "errors" - "fmt" - "io" - "reflect" - "sort" - "strconv" - "strings" - "time" -) - -type tomlEncodeError struct{ error } - -var ( - errArrayMixedElementTypes = errors.New( - "can't encode array with mixed element types") - errArrayNilElement = errors.New( - "can't encode array with nil element") - errNonString = errors.New( - "can't encode a map with non-string key type") - errAnonNonStruct = errors.New( - "can't encode an anonymous field that is not a struct") - errArrayNoTable = errors.New( - "TOML array element can't contain a table") - errNoKey = errors.New( - "top-level values must be a Go map or struct") - errAnything = errors.New("") // used in testing -) - -var quotedReplacer = strings.NewReplacer( - "\t", "\\t", - "\n", "\\n", - "\r", "\\r", - "\"", "\\\"", - "\\", "\\\\", -) - -// Encoder controls the encoding of Go values to a TOML document to some -// io.Writer. -// -// The indentation level can be controlled with the Indent field. -type Encoder struct { - // A single indentation level. By default it is two spaces. - Indent string - - // hasWritten is whether we have written any output to w yet. - hasWritten bool - w *bufio.Writer -} - -// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer -// given. By default, a single indentation level is 2 spaces. -func NewEncoder(w io.Writer) *Encoder { - return &Encoder{ - w: bufio.NewWriter(w), - Indent: " ", - } -} - -// Encode writes a TOML representation of the Go value to the underlying -// io.Writer. If the value given cannot be encoded to a valid TOML document, -// then an error is returned. -// -// The mapping between Go values and TOML values should be precisely the same -// as for the Decode* functions. Similarly, the TextMarshaler interface is -// supported by encoding the resulting bytes as strings. (If you want to write -// arbitrary binary data then you will need to use something like base64 since -// TOML does not have any binary types.) -// -// When encoding TOML hashes (i.e., Go maps or structs), keys without any -// sub-hashes are encoded first. -// -// If a Go map is encoded, then its keys are sorted alphabetically for -// deterministic output. More control over this behavior may be provided if -// there is demand for it. -// -// Encoding Go values without a corresponding TOML representation---like map -// types with non-string keys---will cause an error to be returned. Similarly -// for mixed arrays/slices, arrays/slices with nil elements, embedded -// non-struct types and nested slices containing maps or structs. -// (e.g., [][]map[string]string is not allowed but []map[string]string is OK -// and so is []map[string][]string.) -func (enc *Encoder) Encode(v interface{}) error { - rv := eindirect(reflect.ValueOf(v)) - if err := enc.safeEncode(Key([]string{}), rv); err != nil { - return err - } - return enc.w.Flush() -} - -func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) { - defer func() { - if r := recover(); r != nil { - if terr, ok := r.(tomlEncodeError); ok { - err = terr.error - return - } - panic(r) - } - }() - enc.encode(key, rv) - return nil -} - -func (enc *Encoder) encode(key Key, rv reflect.Value) { - // Special case. Time needs to be in ISO8601 format. - // Special case. If we can marshal the type to text, then we used that. - // Basically, this prevents the encoder for handling these types as - // generic structs (or whatever the underlying type of a TextMarshaler is). - switch rv.Interface().(type) { - case time.Time, TextMarshaler: - enc.keyEqElement(key, rv) - return - } - - k := rv.Kind() - switch k { - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64, - reflect.Float32, reflect.Float64, reflect.String, reflect.Bool: - enc.keyEqElement(key, rv) - case reflect.Array, reflect.Slice: - if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) { - enc.eArrayOfTables(key, rv) - } else { - enc.keyEqElement(key, rv) - } - case reflect.Interface: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Map: - if rv.IsNil() { - return - } - enc.eTable(key, rv) - case reflect.Ptr: - if rv.IsNil() { - return - } - enc.encode(key, rv.Elem()) - case reflect.Struct: - enc.eTable(key, rv) - default: - panic(e("Unsupported type for key '%s': %s", key, k)) - } -} - -// eElement encodes any value that can be an array element (primitives and -// arrays). -func (enc *Encoder) eElement(rv reflect.Value) { - switch v := rv.Interface().(type) { - case time.Time: - // Special case time.Time as a primitive. Has to come before - // TextMarshaler below because time.Time implements - // encoding.TextMarshaler, but we need to always use UTC. - enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z")) - return - case TextMarshaler: - // Special case. Use text marshaler if it's available for this value. - if s, err := v.MarshalText(); err != nil { - encPanic(err) - } else { - enc.writeQuoted(string(s)) - } - return - } - switch rv.Kind() { - case reflect.Bool: - enc.wf(strconv.FormatBool(rv.Bool())) - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64: - enc.wf(strconv.FormatInt(rv.Int(), 10)) - case reflect.Uint, reflect.Uint8, reflect.Uint16, - reflect.Uint32, reflect.Uint64: - enc.wf(strconv.FormatUint(rv.Uint(), 10)) - case reflect.Float32: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32))) - case reflect.Float64: - enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64))) - case reflect.Array, reflect.Slice: - enc.eArrayOrSliceElement(rv) - case reflect.Interface: - enc.eElement(rv.Elem()) - case reflect.String: - enc.writeQuoted(rv.String()) - default: - panic(e("Unexpected primitive type: %s", rv.Kind())) - } -} - -// By the TOML spec, all floats must have a decimal with at least one -// number on either side. -func floatAddDecimal(fstr string) string { - if !strings.Contains(fstr, ".") { - return fstr + ".0" - } - return fstr -} - -func (enc *Encoder) writeQuoted(s string) { - enc.wf("\"%s\"", quotedReplacer.Replace(s)) -} - -func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) { - length := rv.Len() - enc.wf("[") - for i := 0; i < length; i++ { - elem := rv.Index(i) - enc.eElement(elem) - if i != length-1 { - enc.wf(", ") - } - } - enc.wf("]") -} - -func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - for i := 0; i < rv.Len(); i++ { - trv := rv.Index(i) - if isNil(trv) { - continue - } - panicIfInvalidKey(key) - enc.newline() - enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll()) - enc.newline() - enc.eMapOrStruct(key, trv) - } -} - -func (enc *Encoder) eTable(key Key, rv reflect.Value) { - panicIfInvalidKey(key) - if len(key) == 1 { - // Output an extra new line between top-level tables. - // (The newline isn't written if nothing else has been written though.) - enc.newline() - } - if len(key) > 0 { - enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll()) - enc.newline() - } - enc.eMapOrStruct(key, rv) -} - -func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) { - switch rv := eindirect(rv); rv.Kind() { - case reflect.Map: - enc.eMap(key, rv) - case reflect.Struct: - enc.eStruct(key, rv) - default: - panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String()) - } -} - -func (enc *Encoder) eMap(key Key, rv reflect.Value) { - rt := rv.Type() - if rt.Key().Kind() != reflect.String { - encPanic(errNonString) - } - - // Sort keys so that we have deterministic output. And write keys directly - // underneath this key first, before writing sub-structs or sub-maps. - var mapKeysDirect, mapKeysSub []string - for _, mapKey := range rv.MapKeys() { - k := mapKey.String() - if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) { - mapKeysSub = append(mapKeysSub, k) - } else { - mapKeysDirect = append(mapKeysDirect, k) - } - } - - var writeMapKeys = func(mapKeys []string) { - sort.Strings(mapKeys) - for _, mapKey := range mapKeys { - mrv := rv.MapIndex(reflect.ValueOf(mapKey)) - if isNil(mrv) { - // Don't write anything for nil fields. - continue - } - enc.encode(key.add(mapKey), mrv) - } - } - writeMapKeys(mapKeysDirect) - writeMapKeys(mapKeysSub) -} - -func (enc *Encoder) eStruct(key Key, rv reflect.Value) { - // Write keys for fields directly under this key first, because if we write - // a field that creates a new table, then all keys under it will be in that - // table (not the one we're writing here). - rt := rv.Type() - var fieldsDirect, fieldsSub [][]int - var addFields func(rt reflect.Type, rv reflect.Value, start []int) - addFields = func(rt reflect.Type, rv reflect.Value, start []int) { - for i := 0; i < rt.NumField(); i++ { - f := rt.Field(i) - // skip unexporded fields - if f.PkgPath != "" { - continue - } - frv := rv.Field(i) - if f.Anonymous { - frv := eindirect(frv) - t := frv.Type() - if t.Kind() != reflect.Struct { - encPanic(errAnonNonStruct) - } - addFields(t, frv, f.Index) - } else if typeIsHash(tomlTypeOfGo(frv)) { - fieldsSub = append(fieldsSub, append(start, f.Index...)) - } else { - fieldsDirect = append(fieldsDirect, append(start, f.Index...)) - } - } - } - addFields(rt, rv, nil) - - var writeFields = func(fields [][]int) { - for _, fieldIndex := range fields { - sft := rt.FieldByIndex(fieldIndex) - sf := rv.FieldByIndex(fieldIndex) - if isNil(sf) { - // Don't write anything for nil fields. - continue - } - - keyName := sft.Tag.Get("toml") - if keyName == "-" { - continue - } - if keyName == "" { - keyName = sft.Name - } - enc.encode(key.add(keyName), sf) - } - } - writeFields(fieldsDirect) - writeFields(fieldsSub) -} - -// tomlTypeName returns the TOML type name of the Go value's type. It is -// used to determine whether the types of array elements are mixed (which is -// forbidden). If the Go value is nil, then it is illegal for it to be an array -// element, and valueIsNil is returned as true. - -// Returns the TOML type of a Go value. The type may be `nil`, which means -// no concrete TOML type could be found. -func tomlTypeOfGo(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() { - return nil - } - switch rv.Kind() { - case reflect.Bool: - return tomlBool - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, - reflect.Int64, - reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, - reflect.Uint64: - return tomlInteger - case reflect.Float32, reflect.Float64: - return tomlFloat - case reflect.Array, reflect.Slice: - if typeEqual(tomlHash, tomlArrayType(rv)) { - return tomlArrayHash - } else { - return tomlArray - } - case reflect.Ptr, reflect.Interface: - return tomlTypeOfGo(rv.Elem()) - case reflect.String: - return tomlString - case reflect.Map: - return tomlHash - case reflect.Struct: - switch rv.Interface().(type) { - case time.Time: - return tomlDatetime - case TextMarshaler: - return tomlString - default: - return tomlHash - } - default: - panic("unexpected reflect.Kind: " + rv.Kind().String()) - } -} - -// tomlArrayType returns the element type of a TOML array. The type returned -// may be nil if it cannot be determined (e.g., a nil slice or a zero length -// slize). This function may also panic if it finds a type that cannot be -// expressed in TOML (such as nil elements, heterogeneous arrays or directly -// nested arrays of tables). -func tomlArrayType(rv reflect.Value) tomlType { - if isNil(rv) || !rv.IsValid() || rv.Len() == 0 { - return nil - } - firstType := tomlTypeOfGo(rv.Index(0)) - if firstType == nil { - encPanic(errArrayNilElement) - } - - rvlen := rv.Len() - for i := 1; i < rvlen; i++ { - elem := rv.Index(i) - switch elemType := tomlTypeOfGo(elem); { - case elemType == nil: - encPanic(errArrayNilElement) - case !typeEqual(firstType, elemType): - encPanic(errArrayMixedElementTypes) - } - } - // If we have a nested array, then we must make sure that the nested - // array contains ONLY primitives. - // This checks arbitrarily nested arrays. - if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) { - nest := tomlArrayType(eindirect(rv.Index(0))) - if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) { - encPanic(errArrayNoTable) - } - } - return firstType -} - -func (enc *Encoder) newline() { - if enc.hasWritten { - enc.wf("\n") - } -} - -func (enc *Encoder) keyEqElement(key Key, val reflect.Value) { - if len(key) == 0 { - encPanic(errNoKey) - } - panicIfInvalidKey(key) - enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1)) - enc.eElement(val) - enc.newline() -} - -func (enc *Encoder) wf(format string, v ...interface{}) { - if _, err := fmt.Fprintf(enc.w, format, v...); err != nil { - encPanic(err) - } - enc.hasWritten = true -} - -func (enc *Encoder) indentStr(key Key) string { - return strings.Repeat(enc.Indent, len(key)-1) -} - -func encPanic(err error) { - panic(tomlEncodeError{err}) -} - -func eindirect(v reflect.Value) reflect.Value { - switch v.Kind() { - case reflect.Ptr, reflect.Interface: - return eindirect(v.Elem()) - default: - return v - } -} - -func isNil(rv reflect.Value) bool { - switch rv.Kind() { - case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: - return rv.IsNil() - default: - return false - } -} - -func panicIfInvalidKey(key Key) { - for _, k := range key { - if len(k) == 0 { - encPanic(e("Key '%s' is not a valid table name. Key names "+ - "cannot be empty.", key.maybeQuotedAll())) - } - } -} - -func isValidKeyName(s string) bool { - return len(s) != 0 -} diff --git a/vendor/src/github.com/BurntSushi/toml/encoding_types.go b/vendor/src/github.com/BurntSushi/toml/encoding_types.go deleted file mode 100644 index d36e1dd600..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/encoding_types.go +++ /dev/null @@ -1,19 +0,0 @@ -// +build go1.2 - -package toml - -// In order to support Go 1.1, we define our own TextMarshaler and -// TextUnmarshaler types. For Go 1.2+, we just alias them with the -// standard library interfaces. - -import ( - "encoding" -) - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler encoding.TextMarshaler - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler encoding.TextUnmarshaler diff --git a/vendor/src/github.com/BurntSushi/toml/encoding_types_1.1.go b/vendor/src/github.com/BurntSushi/toml/encoding_types_1.1.go deleted file mode 100644 index e8d503d046..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/encoding_types_1.1.go +++ /dev/null @@ -1,18 +0,0 @@ -// +build !go1.2 - -package toml - -// These interfaces were introduced in Go 1.2, so we add them manually when -// compiling for Go 1.1. - -// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here -// so that Go 1.1 can be supported. -type TextMarshaler interface { - MarshalText() (text []byte, err error) -} - -// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined -// here so that Go 1.1 can be supported. -type TextUnmarshaler interface { - UnmarshalText(text []byte) error -} diff --git a/vendor/src/github.com/BurntSushi/toml/lex.go b/vendor/src/github.com/BurntSushi/toml/lex.go deleted file mode 100644 index 219122857e..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/lex.go +++ /dev/null @@ -1,874 +0,0 @@ -package toml - -import ( - "fmt" - "strings" - "unicode/utf8" -) - -type itemType int - -const ( - itemError itemType = iota - itemNIL // used in the parser to indicate no type - itemEOF - itemText - itemString - itemRawString - itemMultilineString - itemRawMultilineString - itemBool - itemInteger - itemFloat - itemDatetime - itemArray // the start of an array - itemArrayEnd - itemTableStart - itemTableEnd - itemArrayTableStart - itemArrayTableEnd - itemKeyStart - itemCommentStart -) - -const ( - eof = 0 - tableStart = '[' - tableEnd = ']' - arrayTableStart = '[' - arrayTableEnd = ']' - tableSep = '.' - keySep = '=' - arrayStart = '[' - arrayEnd = ']' - arrayValTerm = ',' - commentStart = '#' - stringStart = '"' - stringEnd = '"' - rawStringStart = '\'' - rawStringEnd = '\'' -) - -type stateFn func(lx *lexer) stateFn - -type lexer struct { - input string - start int - pos int - width int - line int - state stateFn - items chan item - - // A stack of state functions used to maintain context. - // The idea is to reuse parts of the state machine in various places. - // For example, values can appear at the top level or within arbitrarily - // nested arrays. The last state on the stack is used after a value has - // been lexed. Similarly for comments. - stack []stateFn -} - -type item struct { - typ itemType - val string - line int -} - -func (lx *lexer) nextItem() item { - for { - select { - case item := <-lx.items: - return item - default: - lx.state = lx.state(lx) - } - } -} - -func lex(input string) *lexer { - lx := &lexer{ - input: input + "\n", - state: lexTop, - line: 1, - items: make(chan item, 10), - stack: make([]stateFn, 0, 10), - } - return lx -} - -func (lx *lexer) push(state stateFn) { - lx.stack = append(lx.stack, state) -} - -func (lx *lexer) pop() stateFn { - if len(lx.stack) == 0 { - return lx.errorf("BUG in lexer: no states to pop.") - } - last := lx.stack[len(lx.stack)-1] - lx.stack = lx.stack[0 : len(lx.stack)-1] - return last -} - -func (lx *lexer) current() string { - return lx.input[lx.start:lx.pos] -} - -func (lx *lexer) emit(typ itemType) { - lx.items <- item{typ, lx.current(), lx.line} - lx.start = lx.pos -} - -func (lx *lexer) emitTrim(typ itemType) { - lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line} - lx.start = lx.pos -} - -func (lx *lexer) next() (r rune) { - if lx.pos >= len(lx.input) { - lx.width = 0 - return eof - } - - if lx.input[lx.pos] == '\n' { - lx.line++ - } - r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:]) - lx.pos += lx.width - return r -} - -// ignore skips over the pending input before this point. -func (lx *lexer) ignore() { - lx.start = lx.pos -} - -// backup steps back one rune. Can be called only once per call of next. -func (lx *lexer) backup() { - lx.pos -= lx.width - if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' { - lx.line-- - } -} - -// accept consumes the next rune if it's equal to `valid`. -func (lx *lexer) accept(valid rune) bool { - if lx.next() == valid { - return true - } - lx.backup() - return false -} - -// peek returns but does not consume the next rune in the input. -func (lx *lexer) peek() rune { - r := lx.next() - lx.backup() - return r -} - -// errorf stops all lexing by emitting an error and returning `nil`. -// Note that any value that is a character is escaped if it's a special -// character (new lines, tabs, etc.). -func (lx *lexer) errorf(format string, values ...interface{}) stateFn { - lx.items <- item{ - itemError, - fmt.Sprintf(format, values...), - lx.line, - } - return nil -} - -// lexTop consumes elements at the top level of TOML data. -func lexTop(lx *lexer) stateFn { - r := lx.next() - if isWhitespace(r) || isNL(r) { - return lexSkip(lx, lexTop) - } - - switch r { - case commentStart: - lx.push(lexTop) - return lexCommentStart - case tableStart: - return lexTableStart - case eof: - if lx.pos > lx.start { - return lx.errorf("Unexpected EOF.") - } - lx.emit(itemEOF) - return nil - } - - // At this point, the only valid item can be a key, so we back up - // and let the key lexer do the rest. - lx.backup() - lx.push(lexTopEnd) - return lexKeyStart -} - -// lexTopEnd is entered whenever a top-level item has been consumed. (A value -// or a table.) It must see only whitespace, and will turn back to lexTop -// upon a new line. If it sees EOF, it will quit the lexer successfully. -func lexTopEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case r == commentStart: - // a comment will read to a new line for us. - lx.push(lexTop) - return lexCommentStart - case isWhitespace(r): - return lexTopEnd - case isNL(r): - lx.ignore() - return lexTop - case r == eof: - lx.ignore() - return lexTop - } - return lx.errorf("Expected a top-level item to end with a new line, "+ - "comment or EOF, but got %q instead.", r) -} - -// lexTable lexes the beginning of a table. Namely, it makes sure that -// it starts with a character other than '.' and ']'. -// It assumes that '[' has already been consumed. -// It also handles the case that this is an item in an array of tables. -// e.g., '[[name]]'. -func lexTableStart(lx *lexer) stateFn { - if lx.peek() == arrayTableStart { - lx.next() - lx.emit(itemArrayTableStart) - lx.push(lexArrayTableEnd) - } else { - lx.emit(itemTableStart) - lx.push(lexTableEnd) - } - return lexTableNameStart -} - -func lexTableEnd(lx *lexer) stateFn { - lx.emit(itemTableEnd) - return lexTopEnd -} - -func lexArrayTableEnd(lx *lexer) stateFn { - if r := lx.next(); r != arrayTableEnd { - return lx.errorf("Expected end of table array name delimiter %q, "+ - "but got %q instead.", arrayTableEnd, r) - } - lx.emit(itemArrayTableEnd) - return lexTopEnd -} - -func lexTableNameStart(lx *lexer) stateFn { - switch r := lx.peek(); { - case r == tableEnd || r == eof: - return lx.errorf("Unexpected end of table name. (Table names cannot " + - "be empty.)") - case r == tableSep: - return lx.errorf("Unexpected table separator. (Table names cannot " + - "be empty.)") - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.push(lexTableNameEnd) - return lexValue // reuse string lexing - case isWhitespace(r): - return lexTableNameStart - default: - return lexBareTableName - } -} - -// lexTableName lexes the name of a table. It assumes that at least one -// valid character for the table has already been read. -func lexBareTableName(lx *lexer) stateFn { - switch r := lx.next(); { - case isBareKeyChar(r): - return lexBareTableName - case r == tableSep || r == tableEnd: - lx.backup() - lx.emitTrim(itemText) - return lexTableNameEnd - default: - return lx.errorf("Bare keys cannot contain %q.", r) - } -} - -// lexTableNameEnd reads the end of a piece of a table name, optionally -// consuming whitespace. -func lexTableNameEnd(lx *lexer) stateFn { - switch r := lx.next(); { - case isWhitespace(r): - return lexTableNameEnd - case r == tableSep: - lx.ignore() - return lexTableNameStart - case r == tableEnd: - return lx.pop() - default: - return lx.errorf("Expected '.' or ']' to end table name, but got %q "+ - "instead.", r) - } -} - -// lexKeyStart consumes a key name up until the first non-whitespace character. -// lexKeyStart will ignore whitespace. -func lexKeyStart(lx *lexer) stateFn { - r := lx.peek() - switch { - case r == keySep: - return lx.errorf("Unexpected key separator %q.", keySep) - case isWhitespace(r) || isNL(r): - lx.next() - return lexSkip(lx, lexKeyStart) - case r == stringStart || r == rawStringStart: - lx.ignore() - lx.emit(itemKeyStart) - lx.push(lexKeyEnd) - return lexValue // reuse string lexing - default: - lx.ignore() - lx.emit(itemKeyStart) - return lexBareKey - } -} - -// lexBareKey consumes the text of a bare key. Assumes that the first character -// (which is not whitespace) has not yet been consumed. -func lexBareKey(lx *lexer) stateFn { - switch r := lx.next(); { - case isBareKeyChar(r): - return lexBareKey - case isWhitespace(r): - lx.emitTrim(itemText) - return lexKeyEnd - case r == keySep: - lx.backup() - lx.emitTrim(itemText) - return lexKeyEnd - default: - return lx.errorf("Bare keys cannot contain %q.", r) - } -} - -// lexKeyEnd consumes the end of a key and trims whitespace (up to the key -// separator). -func lexKeyEnd(lx *lexer) stateFn { - switch r := lx.next(); { - case r == keySep: - return lexSkip(lx, lexValue) - case isWhitespace(r): - return lexSkip(lx, lexKeyEnd) - default: - return lx.errorf("Expected key separator %q, but got %q instead.", - keySep, r) - } -} - -// lexValue starts the consumption of a value anywhere a value is expected. -// lexValue will ignore whitespace. -// After a value is lexed, the last state on the next is popped and returned. -func lexValue(lx *lexer) stateFn { - // We allow whitespace to precede a value, but NOT new lines. - // In array syntax, the array states are responsible for ignoring new - // lines. - r := lx.next() - if isWhitespace(r) { - return lexSkip(lx, lexValue) - } - - switch { - case r == arrayStart: - lx.ignore() - lx.emit(itemArray) - return lexArrayValue - case r == stringStart: - if lx.accept(stringStart) { - if lx.accept(stringStart) { - lx.ignore() // Ignore """ - return lexMultilineString - } - lx.backup() - } - lx.ignore() // ignore the '"' - return lexString - case r == rawStringStart: - if lx.accept(rawStringStart) { - if lx.accept(rawStringStart) { - lx.ignore() // Ignore """ - return lexMultilineRawString - } - lx.backup() - } - lx.ignore() // ignore the "'" - return lexRawString - case r == 't': - return lexTrue - case r == 'f': - return lexFalse - case r == '-': - return lexNumberStart - case isDigit(r): - lx.backup() // avoid an extra state and use the same as above - return lexNumberOrDateStart - case r == '.': // special error case, be kind to users - return lx.errorf("Floats must start with a digit, not '.'.") - } - return lx.errorf("Expected value but found %q instead.", r) -} - -// lexArrayValue consumes one value in an array. It assumes that '[' or ',' -// have already been consumed. All whitespace and new lines are ignored. -func lexArrayValue(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValue) - case r == commentStart: - lx.push(lexArrayValue) - return lexCommentStart - case r == arrayValTerm: - return lx.errorf("Unexpected array value terminator %q.", - arrayValTerm) - case r == arrayEnd: - return lexArrayEnd - } - - lx.backup() - lx.push(lexArrayValueEnd) - return lexValue -} - -// lexArrayValueEnd consumes the cruft between values of an array. Namely, -// it ignores whitespace and expects either a ',' or a ']'. -func lexArrayValueEnd(lx *lexer) stateFn { - r := lx.next() - switch { - case isWhitespace(r) || isNL(r): - return lexSkip(lx, lexArrayValueEnd) - case r == commentStart: - lx.push(lexArrayValueEnd) - return lexCommentStart - case r == arrayValTerm: - lx.ignore() - return lexArrayValue // move on to the next value - case r == arrayEnd: - return lexArrayEnd - } - return lx.errorf("Expected an array value terminator %q or an array "+ - "terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r) -} - -// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has -// just been consumed. -func lexArrayEnd(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemArrayEnd) - return lx.pop() -} - -// lexString consumes the inner contents of a string. It assumes that the -// beginning '"' has already been consumed and ignored. -func lexString(lx *lexer) stateFn { - r := lx.next() - switch { - case isNL(r): - return lx.errorf("Strings cannot contain new lines.") - case r == '\\': - lx.push(lexString) - return lexStringEscape - case r == stringEnd: - lx.backup() - lx.emit(itemString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexString -} - -// lexMultilineString consumes the inner contents of a string. It assumes that -// the beginning '"""' has already been consumed and ignored. -func lexMultilineString(lx *lexer) stateFn { - r := lx.next() - switch { - case r == '\\': - return lexMultilineStringEscape - case r == stringEnd: - if lx.accept(stringEnd) { - if lx.accept(stringEnd) { - lx.backup() - lx.backup() - lx.backup() - lx.emit(itemMultilineString) - lx.next() - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - } - return lexMultilineString -} - -// lexRawString consumes a raw string. Nothing can be escaped in such a string. -// It assumes that the beginning "'" has already been consumed and ignored. -func lexRawString(lx *lexer) stateFn { - r := lx.next() - switch { - case isNL(r): - return lx.errorf("Strings cannot contain new lines.") - case r == rawStringEnd: - lx.backup() - lx.emit(itemRawString) - lx.next() - lx.ignore() - return lx.pop() - } - return lexRawString -} - -// lexMultilineRawString consumes a raw string. Nothing can be escaped in such -// a string. It assumes that the beginning "'" has already been consumed and -// ignored. -func lexMultilineRawString(lx *lexer) stateFn { - r := lx.next() - switch { - case r == rawStringEnd: - if lx.accept(rawStringEnd) { - if lx.accept(rawStringEnd) { - lx.backup() - lx.backup() - lx.backup() - lx.emit(itemRawMultilineString) - lx.next() - lx.next() - lx.next() - lx.ignore() - return lx.pop() - } - lx.backup() - } - } - return lexMultilineRawString -} - -// lexMultilineStringEscape consumes an escaped character. It assumes that the -// preceding '\\' has already been consumed. -func lexMultilineStringEscape(lx *lexer) stateFn { - // Handle the special case first: - if isNL(lx.next()) { - lx.next() - return lexMultilineString - } else { - lx.backup() - lx.push(lexMultilineString) - return lexStringEscape(lx) - } -} - -func lexStringEscape(lx *lexer) stateFn { - r := lx.next() - switch r { - case 'b': - fallthrough - case 't': - fallthrough - case 'n': - fallthrough - case 'f': - fallthrough - case 'r': - fallthrough - case '"': - fallthrough - case '\\': - return lx.pop() - case 'u': - return lexShortUnicodeEscape - case 'U': - return lexLongUnicodeEscape - } - return lx.errorf("Invalid escape character %q. Only the following "+ - "escape characters are allowed: "+ - "\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+ - "\\uXXXX and \\UXXXXXXXX.", r) -} - -func lexShortUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 4; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf("Expected four hexadecimal digits after '\\u', "+ - "but got '%s' instead.", lx.current()) - } - } - return lx.pop() -} - -func lexLongUnicodeEscape(lx *lexer) stateFn { - var r rune - for i := 0; i < 8; i++ { - r = lx.next() - if !isHexadecimal(r) { - return lx.errorf("Expected eight hexadecimal digits after '\\U', "+ - "but got '%s' instead.", lx.current()) - } - } - return lx.pop() -} - -// lexNumberOrDateStart consumes either a (positive) integer, float or -// datetime. It assumes that NO negative sign has been consumed. -func lexNumberOrDateStart(lx *lexer) stateFn { - r := lx.next() - if !isDigit(r) { - if r == '.' { - return lx.errorf("Floats must start with a digit, not '.'.") - } else { - return lx.errorf("Expected a digit but got %q.", r) - } - } - return lexNumberOrDate -} - -// lexNumberOrDate consumes either a (positive) integer, float or datetime. -func lexNumberOrDate(lx *lexer) stateFn { - r := lx.next() - switch { - case r == '-': - if lx.pos-lx.start != 5 { - return lx.errorf("All ISO8601 dates must be in full Zulu form.") - } - return lexDateAfterYear - case isDigit(r): - return lexNumberOrDate - case r == '.': - return lexFloatStart - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format. -// It assumes that "YYYY-" has already been consumed. -func lexDateAfterYear(lx *lexer) stateFn { - formats := []rune{ - // digits are '0'. - // everything else is direct equality. - '0', '0', '-', '0', '0', - 'T', - '0', '0', ':', '0', '0', ':', '0', '0', - 'Z', - } - for _, f := range formats { - r := lx.next() - if f == '0' { - if !isDigit(r) { - return lx.errorf("Expected digit in ISO8601 datetime, "+ - "but found %q instead.", r) - } - } else if f != r { - return lx.errorf("Expected %q in ISO8601 datetime, "+ - "but found %q instead.", f, r) - } - } - lx.emit(itemDatetime) - return lx.pop() -} - -// lexNumberStart consumes either an integer or a float. It assumes that -// a negative sign has already been read, but that *no* digits have been -// consumed. lexNumberStart will move to the appropriate integer or float -// states. -func lexNumberStart(lx *lexer) stateFn { - // we MUST see a digit. Even floats have to start with a digit. - r := lx.next() - if !isDigit(r) { - if r == '.' { - return lx.errorf("Floats must start with a digit, not '.'.") - } else { - return lx.errorf("Expected a digit but got %q.", r) - } - } - return lexNumber -} - -// lexNumber consumes an integer or a float after seeing the first digit. -func lexNumber(lx *lexer) stateFn { - r := lx.next() - switch { - case isDigit(r): - return lexNumber - case r == '.': - return lexFloatStart - } - - lx.backup() - lx.emit(itemInteger) - return lx.pop() -} - -// lexFloatStart starts the consumption of digits of a float after a '.'. -// Namely, at least one digit is required. -func lexFloatStart(lx *lexer) stateFn { - r := lx.next() - if !isDigit(r) { - return lx.errorf("Floats must have a digit after the '.', but got "+ - "%q instead.", r) - } - return lexFloat -} - -// lexFloat consumes the digits of a float after a '.'. -// Assumes that one digit has been consumed after a '.' already. -func lexFloat(lx *lexer) stateFn { - r := lx.next() - if isDigit(r) { - return lexFloat - } - - lx.backup() - lx.emit(itemFloat) - return lx.pop() -} - -// lexConst consumes the s[1:] in s. It assumes that s[0] has already been -// consumed. -func lexConst(lx *lexer, s string) stateFn { - for i := range s[1:] { - if r := lx.next(); r != rune(s[i+1]) { - return lx.errorf("Expected %q, but found %q instead.", s[:i+1], - s[:i]+string(r)) - } - } - return nil -} - -// lexTrue consumes the "rue" in "true". It assumes that 't' has already -// been consumed. -func lexTrue(lx *lexer) stateFn { - if fn := lexConst(lx, "true"); fn != nil { - return fn - } - lx.emit(itemBool) - return lx.pop() -} - -// lexFalse consumes the "alse" in "false". It assumes that 'f' has already -// been consumed. -func lexFalse(lx *lexer) stateFn { - if fn := lexConst(lx, "false"); fn != nil { - return fn - } - lx.emit(itemBool) - return lx.pop() -} - -// lexCommentStart begins the lexing of a comment. It will emit -// itemCommentStart and consume no characters, passing control to lexComment. -func lexCommentStart(lx *lexer) stateFn { - lx.ignore() - lx.emit(itemCommentStart) - return lexComment -} - -// lexComment lexes an entire comment. It assumes that '#' has been consumed. -// It will consume *up to* the first new line character, and pass control -// back to the last state on the stack. -func lexComment(lx *lexer) stateFn { - r := lx.peek() - if isNL(r) || r == eof { - lx.emit(itemText) - return lx.pop() - } - lx.next() - return lexComment -} - -// lexSkip ignores all slurped input and moves on to the next state. -func lexSkip(lx *lexer, nextState stateFn) stateFn { - return func(lx *lexer) stateFn { - lx.ignore() - return nextState - } -} - -// isWhitespace returns true if `r` is a whitespace character according -// to the spec. -func isWhitespace(r rune) bool { - return r == '\t' || r == ' ' -} - -func isNL(r rune) bool { - return r == '\n' || r == '\r' -} - -func isDigit(r rune) bool { - return r >= '0' && r <= '9' -} - -func isHexadecimal(r rune) bool { - return (r >= '0' && r <= '9') || - (r >= 'a' && r <= 'f') || - (r >= 'A' && r <= 'F') -} - -func isBareKeyChar(r rune) bool { - return (r >= 'A' && r <= 'Z') || - (r >= 'a' && r <= 'z') || - (r >= '0' && r <= '9') || - r == '_' || - r == '-' -} - -func (itype itemType) String() string { - switch itype { - case itemError: - return "Error" - case itemNIL: - return "NIL" - case itemEOF: - return "EOF" - case itemText: - return "Text" - case itemString: - return "String" - case itemRawString: - return "String" - case itemMultilineString: - return "String" - case itemRawMultilineString: - return "String" - case itemBool: - return "Bool" - case itemInteger: - return "Integer" - case itemFloat: - return "Float" - case itemDatetime: - return "DateTime" - case itemTableStart: - return "TableStart" - case itemTableEnd: - return "TableEnd" - case itemKeyStart: - return "KeyStart" - case itemArray: - return "Array" - case itemArrayEnd: - return "ArrayEnd" - case itemCommentStart: - return "CommentStart" - } - panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype))) -} - -func (item item) String() string { - return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val) -} diff --git a/vendor/src/github.com/BurntSushi/toml/parse.go b/vendor/src/github.com/BurntSushi/toml/parse.go deleted file mode 100644 index c6069be1f1..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/parse.go +++ /dev/null @@ -1,498 +0,0 @@ -package toml - -import ( - "fmt" - "log" - "strconv" - "strings" - "time" - "unicode" - "unicode/utf8" -) - -type parser struct { - mapping map[string]interface{} - types map[string]tomlType - lx *lexer - - // A list of keys in the order that they appear in the TOML data. - ordered []Key - - // the full key for the current hash in scope - context Key - - // the base key name for everything except hashes - currentKey string - - // rough approximation of line number - approxLine int - - // A map of 'key.group.names' to whether they were created implicitly. - implicits map[string]bool -} - -type parseError string - -func (pe parseError) Error() string { - return string(pe) -} - -func parse(data string) (p *parser, err error) { - defer func() { - if r := recover(); r != nil { - var ok bool - if err, ok = r.(parseError); ok { - return - } - panic(r) - } - }() - - p = &parser{ - mapping: make(map[string]interface{}), - types: make(map[string]tomlType), - lx: lex(data), - ordered: make([]Key, 0), - implicits: make(map[string]bool), - } - for { - item := p.next() - if item.typ == itemEOF { - break - } - p.topLevel(item) - } - - return p, nil -} - -func (p *parser) panicf(format string, v ...interface{}) { - msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s", - p.approxLine, p.current(), fmt.Sprintf(format, v...)) - panic(parseError(msg)) -} - -func (p *parser) next() item { - it := p.lx.nextItem() - if it.typ == itemError { - p.panicf("%s", it.val) - } - return it -} - -func (p *parser) bug(format string, v ...interface{}) { - log.Fatalf("BUG: %s\n\n", fmt.Sprintf(format, v...)) -} - -func (p *parser) expect(typ itemType) item { - it := p.next() - p.assertEqual(typ, it.typ) - return it -} - -func (p *parser) assertEqual(expected, got itemType) { - if expected != got { - p.bug("Expected '%s' but got '%s'.", expected, got) - } -} - -func (p *parser) topLevel(item item) { - switch item.typ { - case itemCommentStart: - p.approxLine = item.line - p.expect(itemText) - case itemTableStart: - kg := p.next() - p.approxLine = kg.line - - var key Key - for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) - } - p.assertEqual(itemTableEnd, kg.typ) - - p.establishContext(key, false) - p.setType("", tomlHash) - p.ordered = append(p.ordered, key) - case itemArrayTableStart: - kg := p.next() - p.approxLine = kg.line - - var key Key - for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() { - key = append(key, p.keyString(kg)) - } - p.assertEqual(itemArrayTableEnd, kg.typ) - - p.establishContext(key, true) - p.setType("", tomlArrayHash) - p.ordered = append(p.ordered, key) - case itemKeyStart: - kname := p.next() - p.approxLine = kname.line - p.currentKey = p.keyString(kname) - - val, typ := p.value(p.next()) - p.setValue(p.currentKey, val) - p.setType(p.currentKey, typ) - p.ordered = append(p.ordered, p.context.add(p.currentKey)) - p.currentKey = "" - default: - p.bug("Unexpected type at top level: %s", item.typ) - } -} - -// Gets a string for a key (or part of a key in a table name). -func (p *parser) keyString(it item) string { - switch it.typ { - case itemText: - return it.val - case itemString, itemMultilineString, - itemRawString, itemRawMultilineString: - s, _ := p.value(it) - return s.(string) - default: - p.bug("Unexpected key type: %s", it.typ) - panic("unreachable") - } -} - -// value translates an expected value from the lexer into a Go value wrapped -// as an empty interface. -func (p *parser) value(it item) (interface{}, tomlType) { - switch it.typ { - case itemString: - return p.replaceEscapes(it.val), p.typeOfPrimitive(it) - case itemMultilineString: - trimmed := stripFirstNewline(stripEscapedWhitespace(it.val)) - return p.replaceEscapes(trimmed), p.typeOfPrimitive(it) - case itemRawString: - return it.val, p.typeOfPrimitive(it) - case itemRawMultilineString: - return stripFirstNewline(it.val), p.typeOfPrimitive(it) - case itemBool: - switch it.val { - case "true": - return true, p.typeOfPrimitive(it) - case "false": - return false, p.typeOfPrimitive(it) - } - p.bug("Expected boolean value, but got '%s'.", it.val) - case itemInteger: - num, err := strconv.ParseInt(it.val, 10, 64) - if err != nil { - // See comment below for floats describing why we make a - // distinction between a bug and a user error. - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Integer '%s' is out of the range of 64-bit "+ - "signed integers.", it.val) - } else { - p.bug("Expected integer value, but got '%s'.", it.val) - } - } - return num, p.typeOfPrimitive(it) - case itemFloat: - num, err := strconv.ParseFloat(it.val, 64) - if err != nil { - // Distinguish float values. Normally, it'd be a bug if the lexer - // provides an invalid float, but it's possible that the float is - // out of range of valid values (which the lexer cannot determine). - // So mark the former as a bug but the latter as a legitimate user - // error. - // - // This is also true for integers. - if e, ok := err.(*strconv.NumError); ok && - e.Err == strconv.ErrRange { - - p.panicf("Float '%s' is out of the range of 64-bit "+ - "IEEE-754 floating-point numbers.", it.val) - } else { - p.bug("Expected float value, but got '%s'.", it.val) - } - } - return num, p.typeOfPrimitive(it) - case itemDatetime: - t, err := time.Parse("2006-01-02T15:04:05Z", it.val) - if err != nil { - p.bug("Expected Zulu formatted DateTime, but got '%s'.", it.val) - } - return t, p.typeOfPrimitive(it) - case itemArray: - array := make([]interface{}, 0) - types := make([]tomlType, 0) - - for it = p.next(); it.typ != itemArrayEnd; it = p.next() { - if it.typ == itemCommentStart { - p.expect(itemText) - continue - } - - val, typ := p.value(it) - array = append(array, val) - types = append(types, typ) - } - return array, p.typeOfArray(types) - } - p.bug("Unexpected value type: %s", it.typ) - panic("unreachable") -} - -// establishContext sets the current context of the parser, -// where the context is either a hash or an array of hashes. Which one is -// set depends on the value of the `array` parameter. -// -// Establishing the context also makes sure that the key isn't a duplicate, and -// will create implicit hashes automatically. -func (p *parser) establishContext(key Key, array bool) { - var ok bool - - // Always start at the top level and drill down for our context. - hashContext := p.mapping - keyContext := make(Key, 0) - - // We only need implicit hashes for key[0:-1] - for _, k := range key[0 : len(key)-1] { - _, ok = hashContext[k] - keyContext = append(keyContext, k) - - // No key? Make an implicit hash and move on. - if !ok { - p.addImplicit(keyContext) - hashContext[k] = make(map[string]interface{}) - } - - // If the hash context is actually an array of tables, then set - // the hash context to the last element in that array. - // - // Otherwise, it better be a table, since this MUST be a key group (by - // virtue of it not being the last element in a key). - switch t := hashContext[k].(type) { - case []map[string]interface{}: - hashContext = t[len(t)-1] - case map[string]interface{}: - hashContext = t - default: - p.panicf("Key '%s' was already created as a hash.", keyContext) - } - } - - p.context = keyContext - if array { - // If this is the first element for this array, then allocate a new - // list of tables for it. - k := key[len(key)-1] - if _, ok := hashContext[k]; !ok { - hashContext[k] = make([]map[string]interface{}, 0, 5) - } - - // Add a new table. But make sure the key hasn't already been used - // for something else. - if hash, ok := hashContext[k].([]map[string]interface{}); ok { - hashContext[k] = append(hash, make(map[string]interface{})) - } else { - p.panicf("Key '%s' was already created and cannot be used as "+ - "an array.", keyContext) - } - } else { - p.setValue(key[len(key)-1], make(map[string]interface{})) - } - p.context = append(p.context, key[len(key)-1]) -} - -// setValue sets the given key to the given value in the current context. -// It will make sure that the key hasn't already been defined, account for -// implicit key groups. -func (p *parser) setValue(key string, value interface{}) { - var tmpHash interface{} - var ok bool - - hash := p.mapping - keyContext := make(Key, 0) - for _, k := range p.context { - keyContext = append(keyContext, k) - if tmpHash, ok = hash[k]; !ok { - p.bug("Context for key '%s' has not been established.", keyContext) - } - switch t := tmpHash.(type) { - case []map[string]interface{}: - // The context is a table of hashes. Pick the most recent table - // defined as the current hash. - hash = t[len(t)-1] - case map[string]interface{}: - hash = t - default: - p.bug("Expected hash to have type 'map[string]interface{}', but "+ - "it has '%T' instead.", tmpHash) - } - } - keyContext = append(keyContext, key) - - if _, ok := hash[key]; ok { - // Typically, if the given key has already been set, then we have - // to raise an error since duplicate keys are disallowed. However, - // it's possible that a key was previously defined implicitly. In this - // case, it is allowed to be redefined concretely. (See the - // `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.) - // - // But we have to make sure to stop marking it as an implicit. (So that - // another redefinition provokes an error.) - // - // Note that since it has already been defined (as a hash), we don't - // want to overwrite it. So our business is done. - if p.isImplicit(keyContext) { - p.removeImplicit(keyContext) - return - } - - // Otherwise, we have a concrete key trying to override a previous - // key, which is *always* wrong. - p.panicf("Key '%s' has already been defined.", keyContext) - } - hash[key] = value -} - -// setType sets the type of a particular value at a given key. -// It should be called immediately AFTER setValue. -// -// Note that if `key` is empty, then the type given will be applied to the -// current context (which is either a table or an array of tables). -func (p *parser) setType(key string, typ tomlType) { - keyContext := make(Key, 0, len(p.context)+1) - for _, k := range p.context { - keyContext = append(keyContext, k) - } - if len(key) > 0 { // allow type setting for hashes - keyContext = append(keyContext, key) - } - p.types[keyContext.String()] = typ -} - -// addImplicit sets the given Key as having been created implicitly. -func (p *parser) addImplicit(key Key) { - p.implicits[key.String()] = true -} - -// removeImplicit stops tagging the given key as having been implicitly -// created. -func (p *parser) removeImplicit(key Key) { - p.implicits[key.String()] = false -} - -// isImplicit returns true if the key group pointed to by the key was created -// implicitly. -func (p *parser) isImplicit(key Key) bool { - return p.implicits[key.String()] -} - -// current returns the full key name of the current context. -func (p *parser) current() string { - if len(p.currentKey) == 0 { - return p.context.String() - } - if len(p.context) == 0 { - return p.currentKey - } - return fmt.Sprintf("%s.%s", p.context, p.currentKey) -} - -func stripFirstNewline(s string) string { - if len(s) == 0 || s[0] != '\n' { - return s - } - return s[1:len(s)] -} - -func stripEscapedWhitespace(s string) string { - esc := strings.Split(s, "\\\n") - if len(esc) > 1 { - for i := 1; i < len(esc); i++ { - esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace) - } - } - return strings.Join(esc, "") -} - -func (p *parser) replaceEscapes(str string) string { - var replaced []rune - s := []byte(str) - r := 0 - for r < len(s) { - if s[r] != '\\' { - c, size := utf8.DecodeRune(s[r:]) - r += size - replaced = append(replaced, c) - continue - } - r += 1 - if r >= len(s) { - p.bug("Escape sequence at end of string.") - return "" - } - switch s[r] { - default: - p.bug("Expected valid escape code after \\, but got %q.", s[r]) - return "" - case 'b': - replaced = append(replaced, rune(0x0008)) - r += 1 - case 't': - replaced = append(replaced, rune(0x0009)) - r += 1 - case 'n': - replaced = append(replaced, rune(0x000A)) - r += 1 - case 'f': - replaced = append(replaced, rune(0x000C)) - r += 1 - case 'r': - replaced = append(replaced, rune(0x000D)) - r += 1 - case '"': - replaced = append(replaced, rune(0x0022)) - r += 1 - case '\\': - replaced = append(replaced, rune(0x005C)) - r += 1 - case 'u': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+5). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+5]) - replaced = append(replaced, escaped) - r += 5 - case 'U': - // At this point, we know we have a Unicode escape of the form - // `uXXXX` at [r, r+9). (Because the lexer guarantees this - // for us.) - escaped := p.asciiEscapeToUnicode(s[r+1 : r+9]) - replaced = append(replaced, escaped) - r += 9 - } - } - return string(replaced) -} - -func (p *parser) asciiEscapeToUnicode(bs []byte) rune { - s := string(bs) - hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32) - if err != nil { - p.bug("Could not parse '%s' as a hexadecimal number, but the "+ - "lexer claims it's OK: %s", s, err) - } - - // BUG(burntsushi) - // I honestly don't understand how this works. I can't seem - // to find a way to make this fail. I figured this would fail on invalid - // UTF-8 characters like U+DCFF, but it doesn't. - if !utf8.ValidString(string(rune(hex))) { - p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s) - } - return rune(hex) -} - -func isStringType(ty itemType) bool { - return ty == itemString || ty == itemMultilineString || - ty == itemRawString || ty == itemRawMultilineString -} diff --git a/vendor/src/github.com/BurntSushi/toml/session.vim b/vendor/src/github.com/BurntSushi/toml/session.vim deleted file mode 100644 index 562164be06..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/session.vim +++ /dev/null @@ -1 +0,0 @@ -au BufWritePost *.go silent!make tags > /dev/null 2>&1 diff --git a/vendor/src/github.com/BurntSushi/toml/type_check.go b/vendor/src/github.com/BurntSushi/toml/type_check.go deleted file mode 100644 index c73f8afc1a..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/type_check.go +++ /dev/null @@ -1,91 +0,0 @@ -package toml - -// tomlType represents any Go type that corresponds to a TOML type. -// While the first draft of the TOML spec has a simplistic type system that -// probably doesn't need this level of sophistication, we seem to be militating -// toward adding real composite types. -type tomlType interface { - typeString() string -} - -// typeEqual accepts any two types and returns true if they are equal. -func typeEqual(t1, t2 tomlType) bool { - if t1 == nil || t2 == nil { - return false - } - return t1.typeString() == t2.typeString() -} - -func typeIsHash(t tomlType) bool { - return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash) -} - -type tomlBaseType string - -func (btype tomlBaseType) typeString() string { - return string(btype) -} - -func (btype tomlBaseType) String() string { - return btype.typeString() -} - -var ( - tomlInteger tomlBaseType = "Integer" - tomlFloat tomlBaseType = "Float" - tomlDatetime tomlBaseType = "Datetime" - tomlString tomlBaseType = "String" - tomlBool tomlBaseType = "Bool" - tomlArray tomlBaseType = "Array" - tomlHash tomlBaseType = "Hash" - tomlArrayHash tomlBaseType = "ArrayHash" -) - -// typeOfPrimitive returns a tomlType of any primitive value in TOML. -// Primitive values are: Integer, Float, Datetime, String and Bool. -// -// Passing a lexer item other than the following will cause a BUG message -// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime. -func (p *parser) typeOfPrimitive(lexItem item) tomlType { - switch lexItem.typ { - case itemInteger: - return tomlInteger - case itemFloat: - return tomlFloat - case itemDatetime: - return tomlDatetime - case itemString: - return tomlString - case itemMultilineString: - return tomlString - case itemRawString: - return tomlString - case itemRawMultilineString: - return tomlString - case itemBool: - return tomlBool - } - p.bug("Cannot infer primitive type of lex item '%s'.", lexItem) - panic("unreachable") -} - -// typeOfArray returns a tomlType for an array given a list of types of its -// values. -// -// In the current spec, if an array is homogeneous, then its type is always -// "Array". If the array is not homogeneous, an error is generated. -func (p *parser) typeOfArray(types []tomlType) tomlType { - // Empty arrays are cool. - if len(types) == 0 { - return tomlArray - } - - theType := types[0] - for _, t := range types[1:] { - if !typeEqual(theType, t) { - p.panicf("Array contains values of type '%s' and '%s', but "+ - "arrays must be homogeneous.", theType, t) - } - } - return tomlArray -} diff --git a/vendor/src/github.com/BurntSushi/toml/type_fields.go b/vendor/src/github.com/BurntSushi/toml/type_fields.go deleted file mode 100644 index 7592f87a45..0000000000 --- a/vendor/src/github.com/BurntSushi/toml/type_fields.go +++ /dev/null @@ -1,241 +0,0 @@ -package toml - -// Struct field handling is adapted from code in encoding/json: -// -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the Go distribution. - -import ( - "reflect" - "sort" - "sync" -) - -// A field represents a single field found in a struct. -type field struct { - name string // the name of the field (`toml` tag included) - tag bool // whether field has a `toml` tag - index []int // represents the depth of an anonymous field - typ reflect.Type // the type of the field -} - -// byName sorts field by name, breaking ties with depth, -// then breaking ties with "name came from toml tag", then -// breaking ties with index sequence. -type byName []field - -func (x byName) Len() int { return len(x) } - -func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byName) Less(i, j int) bool { - if x[i].name != x[j].name { - return x[i].name < x[j].name - } - if len(x[i].index) != len(x[j].index) { - return len(x[i].index) < len(x[j].index) - } - if x[i].tag != x[j].tag { - return x[i].tag - } - return byIndex(x).Less(i, j) -} - -// byIndex sorts field by index sequence. -type byIndex []field - -func (x byIndex) Len() int { return len(x) } - -func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] } - -func (x byIndex) Less(i, j int) bool { - for k, xik := range x[i].index { - if k >= len(x[j].index) { - return false - } - if xik != x[j].index[k] { - return xik < x[j].index[k] - } - } - return len(x[i].index) < len(x[j].index) -} - -// typeFields returns a list of fields that TOML should recognize for the given -// type. The algorithm is breadth-first search over the set of structs to -// include - the top struct and then any reachable anonymous structs. -func typeFields(t reflect.Type) []field { - // Anonymous fields to explore at the current level and the next. - current := []field{} - next := []field{{typ: t}} - - // Count of queued names for current level and the next. - count := map[reflect.Type]int{} - nextCount := map[reflect.Type]int{} - - // Types already visited at an earlier level. - visited := map[reflect.Type]bool{} - - // Fields found. - var fields []field - - for len(next) > 0 { - current, next = next, current[:0] - count, nextCount = nextCount, map[reflect.Type]int{} - - for _, f := range current { - if visited[f.typ] { - continue - } - visited[f.typ] = true - - // Scan f.typ for fields to include. - for i := 0; i < f.typ.NumField(); i++ { - sf := f.typ.Field(i) - if sf.PkgPath != "" { // unexported - continue - } - name := sf.Tag.Get("toml") - if name == "-" { - continue - } - index := make([]int, len(f.index)+1) - copy(index, f.index) - index[len(f.index)] = i - - ft := sf.Type - if ft.Name() == "" && ft.Kind() == reflect.Ptr { - // Follow pointer. - ft = ft.Elem() - } - - // Record found field and index sequence. - if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct { - tagged := name != "" - if name == "" { - name = sf.Name - } - fields = append(fields, field{name, tagged, index, ft}) - if count[f.typ] > 1 { - // If there were multiple instances, add a second, - // so that the annihilation code will see a duplicate. - // It only cares about the distinction between 1 or 2, - // so don't bother generating any more copies. - fields = append(fields, fields[len(fields)-1]) - } - continue - } - - // Record new anonymous struct to explore in next round. - nextCount[ft]++ - if nextCount[ft] == 1 { - f := field{name: ft.Name(), index: index, typ: ft} - next = append(next, f) - } - } - } - } - - sort.Sort(byName(fields)) - - // Delete all fields that are hidden by the Go rules for embedded fields, - // except that fields with TOML tags are promoted. - - // The fields are sorted in primary order of name, secondary order - // of field index length. Loop over names; for each name, delete - // hidden fields by choosing the one dominant field that survives. - out := fields[:0] - for advance, i := 0, 0; i < len(fields); i += advance { - // One iteration per name. - // Find the sequence of fields with the name of this first field. - fi := fields[i] - name := fi.name - for advance = 1; i+advance < len(fields); advance++ { - fj := fields[i+advance] - if fj.name != name { - break - } - } - if advance == 1 { // Only one field with this name - out = append(out, fi) - continue - } - dominant, ok := dominantField(fields[i : i+advance]) - if ok { - out = append(out, dominant) - } - } - - fields = out - sort.Sort(byIndex(fields)) - - return fields -} - -// dominantField looks through the fields, all of which are known to -// have the same name, to find the single field that dominates the -// others using Go's embedding rules, modified by the presence of -// TOML tags. If there are multiple top-level fields, the boolean -// will be false: This condition is an error in Go and we skip all -// the fields. -func dominantField(fields []field) (field, bool) { - // The fields are sorted in increasing index-length order. The winner - // must therefore be one with the shortest index length. Drop all - // longer entries, which is easy: just truncate the slice. - length := len(fields[0].index) - tagged := -1 // Index of first tagged field. - for i, f := range fields { - if len(f.index) > length { - fields = fields[:i] - break - } - if f.tag { - if tagged >= 0 { - // Multiple tagged fields at the same level: conflict. - // Return no field. - return field{}, false - } - tagged = i - } - } - if tagged >= 0 { - return fields[tagged], true - } - // All remaining fields have the same length. If there's more than one, - // we have a conflict (two fields named "X" at the same level) and we - // return no field. - if len(fields) > 1 { - return field{}, false - } - return fields[0], true -} - -var fieldCache struct { - sync.RWMutex - m map[reflect.Type][]field -} - -// cachedTypeFields is like typeFields but uses a cache to avoid repeated work. -func cachedTypeFields(t reflect.Type) []field { - fieldCache.RLock() - f := fieldCache.m[t] - fieldCache.RUnlock() - if f != nil { - return f - } - - // Compute fields without lock. - // Might duplicate effort but won't hold other computations back. - f = typeFields(t) - if f == nil { - f = []field{} - } - - fieldCache.Lock() - if fieldCache.m == nil { - fieldCache.m = map[reflect.Type][]field{} - } - fieldCache.m[t] = f - fieldCache.Unlock() - return f -} diff --git a/vendor/src/github.com/Graylog2/go-gelf/LICENSE b/vendor/src/github.com/Graylog2/go-gelf/LICENSE deleted file mode 100644 index bc756ae365..0000000000 --- a/vendor/src/github.com/Graylog2/go-gelf/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -Copyright 2012 SocialCode - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE -LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION -OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION -WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - diff --git a/vendor/src/github.com/Graylog2/go-gelf/gelf/reader.go b/vendor/src/github.com/Graylog2/go-gelf/gelf/reader.go deleted file mode 100644 index ff719fc714..0000000000 --- a/vendor/src/github.com/Graylog2/go-gelf/gelf/reader.go +++ /dev/null @@ -1,140 +0,0 @@ -// Copyright 2012 SocialCode. All rights reserved. -// Use of this source code is governed by the MIT -// license that can be found in the LICENSE file. - -package gelf - -import ( - "bytes" - "compress/gzip" - "compress/zlib" - "encoding/json" - "fmt" - "io" - "net" - "strings" - "sync" -) - -type Reader struct { - mu sync.Mutex - conn net.Conn -} - -func NewReader(addr string) (*Reader, error) { - var err error - udpAddr, err := net.ResolveUDPAddr("udp", addr) - if err != nil { - return nil, fmt.Errorf("ResolveUDPAddr('%s'): %s", addr, err) - } - - conn, err := net.ListenUDP("udp", udpAddr) - if err != nil { - return nil, fmt.Errorf("ListenUDP: %s", err) - } - - r := new(Reader) - r.conn = conn - return r, nil -} - -func (r *Reader) Addr() string { - return r.conn.LocalAddr().String() -} - -// FIXME: this will discard data if p isn't big enough to hold the -// full message. -func (r *Reader) Read(p []byte) (int, error) { - msg, err := r.ReadMessage() - if err != nil { - return -1, err - } - - var data string - - if msg.Full == "" { - data = msg.Short - } else { - data = msg.Full - } - - return strings.NewReader(data).Read(p) -} - -func (r *Reader) ReadMessage() (*Message, error) { - cBuf := make([]byte, ChunkSize) - var ( - err error - n, length int - cid, ocid []byte - seq, total uint8 - cHead []byte - cReader io.Reader - chunks [][]byte - ) - - for got := 0; got < 128 && (total == 0 || got < int(total)); got++ { - if n, err = r.conn.Read(cBuf); err != nil { - return nil, fmt.Errorf("Read: %s", err) - } - cHead, cBuf = cBuf[:2], cBuf[:n] - - if bytes.Equal(cHead, magicChunked) { - //fmt.Printf("chunked %v\n", cBuf[:14]) - cid, seq, total = cBuf[2:2+8], cBuf[2+8], cBuf[2+8+1] - if ocid != nil && !bytes.Equal(cid, ocid) { - return nil, fmt.Errorf("out-of-band message %v (awaited %v)", cid, ocid) - } else if ocid == nil { - ocid = cid - chunks = make([][]byte, total) - } - n = len(cBuf) - chunkedHeaderLen - //fmt.Printf("setting chunks[%d]: %d\n", seq, n) - chunks[seq] = append(make([]byte, 0, n), cBuf[chunkedHeaderLen:]...) - length += n - } else { //not chunked - if total > 0 { - return nil, fmt.Errorf("out-of-band message (not chunked)") - } - break - } - } - //fmt.Printf("\nchunks: %v\n", chunks) - - if length > 0 { - if cap(cBuf) < length { - cBuf = append(cBuf, make([]byte, 0, length-cap(cBuf))...) - } - cBuf = cBuf[:0] - for i := range chunks { - //fmt.Printf("appending %d %v\n", i, chunks[i]) - cBuf = append(cBuf, chunks[i]...) - } - cHead = cBuf[:2] - } - - // the data we get from the wire is compressed - if bytes.Equal(cHead, magicGzip) { - cReader, err = gzip.NewReader(bytes.NewReader(cBuf)) - } else if cHead[0] == magicZlib[0] && - (int(cHead[0])*256+int(cHead[1]))%31 == 0 { - // zlib is slightly more complicated, but correct - cReader, err = zlib.NewReader(bytes.NewReader(cBuf)) - } else { - // compliance with https://github.com/Graylog2/graylog2-server - // treating all messages as uncompressed if they are not gzip, zlib or - // chunked - cReader = bytes.NewReader(cBuf) - } - - if err != nil { - return nil, fmt.Errorf("NewReader: %s", err) - } - - msg := new(Message) - if err := json.NewDecoder(cReader).Decode(&msg); err != nil { - return nil, fmt.Errorf("json.Unmarshal: %s", err) - } - - return msg, nil -} diff --git a/vendor/src/github.com/Graylog2/go-gelf/gelf/writer.go b/vendor/src/github.com/Graylog2/go-gelf/gelf/writer.go deleted file mode 100644 index 90cdb99216..0000000000 --- a/vendor/src/github.com/Graylog2/go-gelf/gelf/writer.go +++ /dev/null @@ -1,418 +0,0 @@ -// Copyright 2012 SocialCode. All rights reserved. -// Use of this source code is governed by the MIT -// license that can be found in the LICENSE file. - -package gelf - -import ( - "bytes" - "compress/flate" - "compress/gzip" - "compress/zlib" - "crypto/rand" - "encoding/json" - "fmt" - "io" - "net" - "os" - "path" - "runtime" - "strings" - "sync" - "time" -) - -// Writer implements io.Writer and is used to send both discrete -// messages to a graylog2 server, or data from a stream-oriented -// interface (like the functions in log). -type Writer struct { - mu sync.Mutex - conn net.Conn - hostname string - Facility string // defaults to current process name - CompressionLevel int // one of the consts from compress/flate - CompressionType CompressType -} - -// What compression type the writer should use when sending messages -// to the graylog2 server -type CompressType int - -const ( - CompressGzip CompressType = iota - CompressZlib - CompressNone -) - -// Message represents the contents of the GELF message. It is gzipped -// before sending. -type Message struct { - Version string `json:"version"` - Host string `json:"host"` - Short string `json:"short_message"` - Full string `json:"full_message,omitempty"` - TimeUnix float64 `json:"timestamp"` - Level int32 `json:"level,omitempty"` - Facility string `json:"facility,omitempty"` - Extra map[string]interface{} `json:"-"` - RawExtra json.RawMessage `json:"-"` -} - -// Used to control GELF chunking. Should be less than (MTU - len(UDP -// header)). -// -// TODO: generate dynamically using Path MTU Discovery? -const ( - ChunkSize = 1420 - chunkedHeaderLen = 12 - chunkedDataLen = ChunkSize - chunkedHeaderLen -) - -var ( - magicChunked = []byte{0x1e, 0x0f} - magicZlib = []byte{0x78} - magicGzip = []byte{0x1f, 0x8b} -) - -// Syslog severity levels -const ( - LOG_EMERG = int32(0) - LOG_ALERT = int32(1) - LOG_CRIT = int32(2) - LOG_ERR = int32(3) - LOG_WARNING = int32(4) - LOG_NOTICE = int32(5) - LOG_INFO = int32(6) - LOG_DEBUG = int32(7) -) - -// numChunks returns the number of GELF chunks necessary to transmit -// the given compressed buffer. -func numChunks(b []byte) int { - lenB := len(b) - if lenB <= ChunkSize { - return 1 - } - return len(b)/chunkedDataLen + 1 -} - -// New returns a new GELF Writer. This writer can be used to send the -// output of the standard Go log functions to a central GELF server by -// passing it to log.SetOutput() -func NewWriter(addr string) (*Writer, error) { - var err error - w := new(Writer) - w.CompressionLevel = flate.BestSpeed - - if w.conn, err = net.Dial("udp", addr); err != nil { - return nil, err - } - if w.hostname, err = os.Hostname(); err != nil { - return nil, err - } - - w.Facility = path.Base(os.Args[0]) - - return w, nil -} - -// writes the gzip compressed byte array to the connection as a series -// of GELF chunked messages. The header format is documented at -// https://github.com/Graylog2/graylog2-docs/wiki/GELF as: -// -// 2-byte magic (0x1e 0x0f), 8 byte id, 1 byte sequence id, 1 byte -// total, chunk-data -func (w *Writer) writeChunked(zBytes []byte) (err error) { - b := make([]byte, 0, ChunkSize) - buf := bytes.NewBuffer(b) - nChunksI := numChunks(zBytes) - if nChunksI > 255 { - return fmt.Errorf("msg too large, would need %d chunks", nChunksI) - } - nChunks := uint8(nChunksI) - // use urandom to get a unique message id - msgId := make([]byte, 8) - n, err := io.ReadFull(rand.Reader, msgId) - if err != nil || n != 8 { - return fmt.Errorf("rand.Reader: %d/%s", n, err) - } - - bytesLeft := len(zBytes) - for i := uint8(0); i < nChunks; i++ { - buf.Reset() - // manually write header. Don't care about - // host/network byte order, because the spec only - // deals in individual bytes. - buf.Write(magicChunked) //magic - buf.Write(msgId) - buf.WriteByte(i) - buf.WriteByte(nChunks) - // slice out our chunk from zBytes - chunkLen := chunkedDataLen - if chunkLen > bytesLeft { - chunkLen = bytesLeft - } - off := int(i) * chunkedDataLen - chunk := zBytes[off : off+chunkLen] - buf.Write(chunk) - - // write this chunk, and make sure the write was good - n, err := w.conn.Write(buf.Bytes()) - if err != nil { - return fmt.Errorf("Write (chunk %d/%d): %s", i, - nChunks, err) - } - if n != len(buf.Bytes()) { - return fmt.Errorf("Write len: (chunk %d/%d) (%d/%d)", - i, nChunks, n, len(buf.Bytes())) - } - - bytesLeft -= chunkLen - } - - if bytesLeft != 0 { - return fmt.Errorf("error: %d bytes left after sending", bytesLeft) - } - return nil -} - -// 1k bytes buffer by default -var bufPool = sync.Pool{ - New: func() interface{} { - return bytes.NewBuffer(make([]byte, 0, 1024)) - }, -} - -func newBuffer() *bytes.Buffer { - b := bufPool.Get().(*bytes.Buffer) - if b != nil { - b.Reset() - return b - } - return bytes.NewBuffer(nil) -} - -// WriteMessage sends the specified message to the GELF server -// specified in the call to New(). It assumes all the fields are -// filled out appropriately. In general, clients will want to use -// Write, rather than WriteMessage. -func (w *Writer) WriteMessage(m *Message) (err error) { - mBuf := newBuffer() - defer bufPool.Put(mBuf) - if err = m.MarshalJSONBuf(mBuf); err != nil { - return err - } - mBytes := mBuf.Bytes() - - var ( - zBuf *bytes.Buffer - zBytes []byte - ) - - var zw io.WriteCloser - switch w.CompressionType { - case CompressGzip: - zBuf = newBuffer() - defer bufPool.Put(zBuf) - zw, err = gzip.NewWriterLevel(zBuf, w.CompressionLevel) - case CompressZlib: - zBuf = newBuffer() - defer bufPool.Put(zBuf) - zw, err = zlib.NewWriterLevel(zBuf, w.CompressionLevel) - case CompressNone: - zBytes = mBytes - default: - panic(fmt.Sprintf("unknown compression type %d", - w.CompressionType)) - } - if zw != nil { - if err != nil { - return - } - if _, err = zw.Write(mBytes); err != nil { - zw.Close() - return - } - zw.Close() - zBytes = zBuf.Bytes() - } - - if numChunks(zBytes) > 1 { - return w.writeChunked(zBytes) - } - n, err := w.conn.Write(zBytes) - if err != nil { - return - } - if n != len(zBytes) { - return fmt.Errorf("bad write (%d/%d)", n, len(zBytes)) - } - - return nil -} - -// Close connection and interrupt blocked Read or Write operations -func (w *Writer) Close() error { - return w.conn.Close() -} - -/* -func (w *Writer) Alert(m string) (err error) -func (w *Writer) Close() error -func (w *Writer) Crit(m string) (err error) -func (w *Writer) Debug(m string) (err error) -func (w *Writer) Emerg(m string) (err error) -func (w *Writer) Err(m string) (err error) -func (w *Writer) Info(m string) (err error) -func (w *Writer) Notice(m string) (err error) -func (w *Writer) Warning(m string) (err error) -*/ - -// getCaller returns the filename and the line info of a function -// further down in the call stack. Passing 0 in as callDepth would -// return info on the function calling getCallerIgnoringLog, 1 the -// parent function, and so on. Any suffixes passed to getCaller are -// path fragments like "/pkg/log/log.go", and functions in the call -// stack from that file are ignored. -func getCaller(callDepth int, suffixesToIgnore ...string) (file string, line int) { - // bump by 1 to ignore the getCaller (this) stackframe - callDepth++ -outer: - for { - var ok bool - _, file, line, ok = runtime.Caller(callDepth) - if !ok { - file = "???" - line = 0 - break - } - - for _, s := range suffixesToIgnore { - if strings.HasSuffix(file, s) { - callDepth++ - continue outer - } - } - break - } - return -} - -func getCallerIgnoringLogMulti(callDepth int) (string, int) { - // the +1 is to ignore this (getCallerIgnoringLogMulti) frame - return getCaller(callDepth+1, "/pkg/log/log.go", "/pkg/io/multi.go") -} - -// Write encodes the given string in a GELF message and sends it to -// the server specified in New(). -func (w *Writer) Write(p []byte) (n int, err error) { - - // 1 for the function that called us. - file, line := getCallerIgnoringLogMulti(1) - - // remove trailing and leading whitespace - p = bytes.TrimSpace(p) - - // If there are newlines in the message, use the first line - // for the short message and set the full message to the - // original input. If the input has no newlines, stick the - // whole thing in Short. - short := p - full := []byte("") - if i := bytes.IndexRune(p, '\n'); i > 0 { - short = p[:i] - full = p - } - - m := Message{ - Version: "1.1", - Host: w.hostname, - Short: string(short), - Full: string(full), - TimeUnix: float64(time.Now().Unix()), - Level: 6, // info - Facility: w.Facility, - Extra: map[string]interface{}{ - "_file": file, - "_line": line, - }, - } - - if err = w.WriteMessage(&m); err != nil { - return 0, err - } - - return len(p), nil -} - -func (m *Message) MarshalJSONBuf(buf *bytes.Buffer) error { - b, err := json.Marshal(m) - if err != nil { - return err - } - // write up until the final } - if _, err = buf.Write(b[:len(b)-1]); err != nil { - return err - } - if len(m.Extra) > 0 { - eb, err := json.Marshal(m.Extra) - if err != nil { - return err - } - // merge serialized message + serialized extra map - if err = buf.WriteByte(','); err != nil { - return err - } - // write serialized extra bytes, without enclosing quotes - if _, err = buf.Write(eb[1 : len(eb)-1]); err != nil { - return err - } - } - - if len(m.RawExtra) > 0 { - if err := buf.WriteByte(','); err != nil { - return err - } - - // write serialized extra bytes, without enclosing quotes - if _, err = buf.Write(m.RawExtra[1 : len(m.RawExtra)-1]); err != nil { - return err - } - } - - // write final closing quotes - return buf.WriteByte('}') -} - -func (m *Message) UnmarshalJSON(data []byte) error { - i := make(map[string]interface{}, 16) - if err := json.Unmarshal(data, &i); err != nil { - return err - } - for k, v := range i { - if k[0] == '_' { - if m.Extra == nil { - m.Extra = make(map[string]interface{}, 1) - } - m.Extra[k] = v - continue - } - switch k { - case "version": - m.Version = v.(string) - case "host": - m.Host = v.(string) - case "short_message": - m.Short = v.(string) - case "full_message": - m.Full = v.(string) - case "timestamp": - m.TimeUnix = v.(float64) - case "level": - m.Level = int32(v.(float64)) - case "facility": - m.Facility = v.(string) - } - } - return nil -} diff --git a/vendor/src/github.com/Microsoft/go-winio/.gitignore b/vendor/src/github.com/Microsoft/go-winio/.gitignore deleted file mode 100644 index b883f1fdc6..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.exe diff --git a/vendor/src/github.com/Microsoft/go-winio/LICENSE b/vendor/src/github.com/Microsoft/go-winio/LICENSE deleted file mode 100644 index b8b569d774..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/src/github.com/Microsoft/go-winio/README.md b/vendor/src/github.com/Microsoft/go-winio/README.md deleted file mode 100644 index 478862a8b9..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/README.md +++ /dev/null @@ -1,15 +0,0 @@ -# go-winio - -This repository contains utilities for efficiently performing Win32 IO operations in -Go. Currently, this is focused on accessing named pipes and other file handles, and -for using named pipes as a net transport. - -This code relies on IO completion ports to avoid blocking IO on system threads, allowing Go -to reuse the thread to schedule another goroutine. This limits support to Windows Vista and -newer operating systems. This is similar to the implementation of network sockets in Go's net -package. - -Please see the LICENSE file for licensing information. - -Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe -for another named pipe implementation. diff --git a/vendor/src/github.com/Microsoft/go-winio/archive/tar/LICENSE b/vendor/src/github.com/Microsoft/go-winio/archive/tar/LICENSE deleted file mode 100644 index 7448756763..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/archive/tar/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/Microsoft/go-winio/archive/tar/common.go b/vendor/src/github.com/Microsoft/go-winio/archive/tar/common.go deleted file mode 100644 index 0378401c0d..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/archive/tar/common.go +++ /dev/null @@ -1,344 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package tar implements access to tar archives. -// It aims to cover most of the variations, including those produced -// by GNU and BSD tars. -// -// References: -// http://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5 -// http://www.gnu.org/software/tar/manual/html_node/Standard.html -// http://pubs.opengroup.org/onlinepubs/9699919799/utilities/pax.html -package tar - -import ( - "bytes" - "errors" - "fmt" - "os" - "path" - "time" -) - -const ( - blockSize = 512 - - // Types - TypeReg = '0' // regular file - TypeRegA = '\x00' // regular file - TypeLink = '1' // hard link - TypeSymlink = '2' // symbolic link - TypeChar = '3' // character device node - TypeBlock = '4' // block device node - TypeDir = '5' // directory - TypeFifo = '6' // fifo node - TypeCont = '7' // reserved - TypeXHeader = 'x' // extended header - TypeXGlobalHeader = 'g' // global extended header - TypeGNULongName = 'L' // Next file has a long name - TypeGNULongLink = 'K' // Next file symlinks to a file w/ a long name - TypeGNUSparse = 'S' // sparse file -) - -// A Header represents a single header in a tar archive. -// Some fields may not be populated. -type Header struct { - Name string // name of header file entry - Mode int64 // permission and mode bits - Uid int // user id of owner - Gid int // group id of owner - Size int64 // length in bytes - ModTime time.Time // modified time - Typeflag byte // type of header entry - Linkname string // target name of link - Uname string // user name of owner - Gname string // group name of owner - Devmajor int64 // major number of character or block device - Devminor int64 // minor number of character or block device - AccessTime time.Time // access time - ChangeTime time.Time // status change time - CreationTime time.Time // creation time - Xattrs map[string]string - Winheaders map[string]string -} - -// File name constants from the tar spec. -const ( - fileNameSize = 100 // Maximum number of bytes in a standard tar name. - fileNamePrefixSize = 155 // Maximum number of ustar extension bytes. -) - -// FileInfo returns an os.FileInfo for the Header. -func (h *Header) FileInfo() os.FileInfo { - return headerFileInfo{h} -} - -// headerFileInfo implements os.FileInfo. -type headerFileInfo struct { - h *Header -} - -func (fi headerFileInfo) Size() int64 { return fi.h.Size } -func (fi headerFileInfo) IsDir() bool { return fi.Mode().IsDir() } -func (fi headerFileInfo) ModTime() time.Time { return fi.h.ModTime } -func (fi headerFileInfo) Sys() interface{} { return fi.h } - -// Name returns the base name of the file. -func (fi headerFileInfo) Name() string { - if fi.IsDir() { - return path.Base(path.Clean(fi.h.Name)) - } - return path.Base(fi.h.Name) -} - -// Mode returns the permission and mode bits for the headerFileInfo. -func (fi headerFileInfo) Mode() (mode os.FileMode) { - // Set file permission bits. - mode = os.FileMode(fi.h.Mode).Perm() - - // Set setuid, setgid and sticky bits. - if fi.h.Mode&c_ISUID != 0 { - // setuid - mode |= os.ModeSetuid - } - if fi.h.Mode&c_ISGID != 0 { - // setgid - mode |= os.ModeSetgid - } - if fi.h.Mode&c_ISVTX != 0 { - // sticky - mode |= os.ModeSticky - } - - // Set file mode bits. - // clear perm, setuid, setgid and sticky bits. - m := os.FileMode(fi.h.Mode) &^ 07777 - if m == c_ISDIR { - // directory - mode |= os.ModeDir - } - if m == c_ISFIFO { - // named pipe (FIFO) - mode |= os.ModeNamedPipe - } - if m == c_ISLNK { - // symbolic link - mode |= os.ModeSymlink - } - if m == c_ISBLK { - // device file - mode |= os.ModeDevice - } - if m == c_ISCHR { - // Unix character device - mode |= os.ModeDevice - mode |= os.ModeCharDevice - } - if m == c_ISSOCK { - // Unix domain socket - mode |= os.ModeSocket - } - - switch fi.h.Typeflag { - case TypeSymlink: - // symbolic link - mode |= os.ModeSymlink - case TypeChar: - // character device node - mode |= os.ModeDevice - mode |= os.ModeCharDevice - case TypeBlock: - // block device node - mode |= os.ModeDevice - case TypeDir: - // directory - mode |= os.ModeDir - case TypeFifo: - // fifo node - mode |= os.ModeNamedPipe - } - - return mode -} - -// sysStat, if non-nil, populates h from system-dependent fields of fi. -var sysStat func(fi os.FileInfo, h *Header) error - -// Mode constants from the tar spec. -const ( - c_ISUID = 04000 // Set uid - c_ISGID = 02000 // Set gid - c_ISVTX = 01000 // Save text (sticky bit) - c_ISDIR = 040000 // Directory - c_ISFIFO = 010000 // FIFO - c_ISREG = 0100000 // Regular file - c_ISLNK = 0120000 // Symbolic link - c_ISBLK = 060000 // Block special file - c_ISCHR = 020000 // Character special file - c_ISSOCK = 0140000 // Socket -) - -// Keywords for the PAX Extended Header -const ( - paxAtime = "atime" - paxCharset = "charset" - paxComment = "comment" - paxCtime = "ctime" // please note that ctime is not a valid pax header. - paxCreationTime = "LIBARCHIVE.creationtime" - paxGid = "gid" - paxGname = "gname" - paxLinkpath = "linkpath" - paxMtime = "mtime" - paxPath = "path" - paxSize = "size" - paxUid = "uid" - paxUname = "uname" - paxXattr = "SCHILY.xattr." - paxWindows = "MSWINDOWS." - paxNone = "" -) - -// FileInfoHeader creates a partially-populated Header from fi. -// If fi describes a symlink, FileInfoHeader records link as the link target. -// If fi describes a directory, a slash is appended to the name. -// Because os.FileInfo's Name method returns only the base name of -// the file it describes, it may be necessary to modify the Name field -// of the returned header to provide the full path name of the file. -func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) { - if fi == nil { - return nil, errors.New("tar: FileInfo is nil") - } - fm := fi.Mode() - h := &Header{ - Name: fi.Name(), - ModTime: fi.ModTime(), - Mode: int64(fm.Perm()), // or'd with c_IS* constants later - } - switch { - case fm.IsRegular(): - h.Mode |= c_ISREG - h.Typeflag = TypeReg - h.Size = fi.Size() - case fi.IsDir(): - h.Typeflag = TypeDir - h.Mode |= c_ISDIR - h.Name += "/" - case fm&os.ModeSymlink != 0: - h.Typeflag = TypeSymlink - h.Mode |= c_ISLNK - h.Linkname = link - case fm&os.ModeDevice != 0: - if fm&os.ModeCharDevice != 0 { - h.Mode |= c_ISCHR - h.Typeflag = TypeChar - } else { - h.Mode |= c_ISBLK - h.Typeflag = TypeBlock - } - case fm&os.ModeNamedPipe != 0: - h.Typeflag = TypeFifo - h.Mode |= c_ISFIFO - case fm&os.ModeSocket != 0: - h.Mode |= c_ISSOCK - default: - return nil, fmt.Errorf("archive/tar: unknown file mode %v", fm) - } - if fm&os.ModeSetuid != 0 { - h.Mode |= c_ISUID - } - if fm&os.ModeSetgid != 0 { - h.Mode |= c_ISGID - } - if fm&os.ModeSticky != 0 { - h.Mode |= c_ISVTX - } - // If possible, populate additional fields from OS-specific - // FileInfo fields. - if sys, ok := fi.Sys().(*Header); ok { - // This FileInfo came from a Header (not the OS). Use the - // original Header to populate all remaining fields. - h.Uid = sys.Uid - h.Gid = sys.Gid - h.Uname = sys.Uname - h.Gname = sys.Gname - h.AccessTime = sys.AccessTime - h.ChangeTime = sys.ChangeTime - if sys.Xattrs != nil { - h.Xattrs = make(map[string]string) - for k, v := range sys.Xattrs { - h.Xattrs[k] = v - } - } - if sys.Typeflag == TypeLink { - // hard link - h.Typeflag = TypeLink - h.Size = 0 - h.Linkname = sys.Linkname - } - } - if sysStat != nil { - return h, sysStat(fi, h) - } - return h, nil -} - -var zeroBlock = make([]byte, blockSize) - -// POSIX specifies a sum of the unsigned byte values, but the Sun tar uses signed byte values. -// We compute and return both. -func checksum(header []byte) (unsigned int64, signed int64) { - for i := 0; i < len(header); i++ { - if i == 148 { - // The chksum field (header[148:156]) is special: it should be treated as space bytes. - unsigned += ' ' * 8 - signed += ' ' * 8 - i += 7 - continue - } - unsigned += int64(header[i]) - signed += int64(int8(header[i])) - } - return -} - -type slicer []byte - -func (sp *slicer) next(n int) (b []byte) { - s := *sp - b, *sp = s[0:n], s[n:] - return -} - -func isASCII(s string) bool { - for _, c := range s { - if c >= 0x80 { - return false - } - } - return true -} - -func toASCII(s string) string { - if isASCII(s) { - return s - } - var buf bytes.Buffer - for _, c := range s { - if c < 0x80 { - buf.WriteByte(byte(c)) - } - } - return buf.String() -} - -// isHeaderOnlyType checks if the given type flag is of the type that has no -// data section even if a size is specified. -func isHeaderOnlyType(flag byte) bool { - switch flag { - case TypeLink, TypeSymlink, TypeChar, TypeBlock, TypeDir, TypeFifo: - return true - default: - return false - } -} diff --git a/vendor/src/github.com/Microsoft/go-winio/archive/tar/reader.go b/vendor/src/github.com/Microsoft/go-winio/archive/tar/reader.go deleted file mode 100644 index e210c618a1..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/archive/tar/reader.go +++ /dev/null @@ -1,1002 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -// TODO(dsymonds): -// - pax extensions - -import ( - "bytes" - "errors" - "io" - "io/ioutil" - "math" - "os" - "strconv" - "strings" - "time" -) - -var ( - ErrHeader = errors.New("archive/tar: invalid tar header") -) - -const maxNanoSecondIntSize = 9 - -// A Reader provides sequential access to the contents of a tar archive. -// A tar archive consists of a sequence of files. -// The Next method advances to the next file in the archive (including the first), -// and then it can be treated as an io.Reader to access the file's data. -type Reader struct { - r io.Reader - err error - pad int64 // amount of padding (ignored) after current file entry - curr numBytesReader // reader for current file entry - hdrBuff [blockSize]byte // buffer to use in readHeader -} - -type parser struct { - err error // Last error seen -} - -// A numBytesReader is an io.Reader with a numBytes method, returning the number -// of bytes remaining in the underlying encoded data. -type numBytesReader interface { - io.Reader - numBytes() int64 -} - -// A regFileReader is a numBytesReader for reading file data from a tar archive. -type regFileReader struct { - r io.Reader // underlying reader - nb int64 // number of unread bytes for current file entry -} - -// A sparseFileReader is a numBytesReader for reading sparse file data from a -// tar archive. -type sparseFileReader struct { - rfr numBytesReader // Reads the sparse-encoded file data - sp []sparseEntry // The sparse map for the file - pos int64 // Keeps track of file position - total int64 // Total size of the file -} - -// A sparseEntry holds a single entry in a sparse file's sparse map. -// -// Sparse files are represented using a series of sparseEntrys. -// Despite the name, a sparseEntry represents an actual data fragment that -// references data found in the underlying archive stream. All regions not -// covered by a sparseEntry are logically filled with zeros. -// -// For example, if the underlying raw file contains the 10-byte data: -// var compactData = "abcdefgh" -// -// And the sparse map has the following entries: -// var sp = []sparseEntry{ -// {offset: 2, numBytes: 5} // Data fragment for [2..7] -// {offset: 18, numBytes: 3} // Data fragment for [18..21] -// } -// -// Then the content of the resulting sparse file with a "real" size of 25 is: -// var sparseData = "\x00"*2 + "abcde" + "\x00"*11 + "fgh" + "\x00"*4 -type sparseEntry struct { - offset int64 // Starting position of the fragment - numBytes int64 // Length of the fragment -} - -// Keywords for GNU sparse files in a PAX extended header -const ( - paxGNUSparseNumBlocks = "GNU.sparse.numblocks" - paxGNUSparseOffset = "GNU.sparse.offset" - paxGNUSparseNumBytes = "GNU.sparse.numbytes" - paxGNUSparseMap = "GNU.sparse.map" - paxGNUSparseName = "GNU.sparse.name" - paxGNUSparseMajor = "GNU.sparse.major" - paxGNUSparseMinor = "GNU.sparse.minor" - paxGNUSparseSize = "GNU.sparse.size" - paxGNUSparseRealSize = "GNU.sparse.realsize" -) - -// Keywords for old GNU sparse headers -const ( - oldGNUSparseMainHeaderOffset = 386 - oldGNUSparseMainHeaderIsExtendedOffset = 482 - oldGNUSparseMainHeaderNumEntries = 4 - oldGNUSparseExtendedHeaderIsExtendedOffset = 504 - oldGNUSparseExtendedHeaderNumEntries = 21 - oldGNUSparseOffsetSize = 12 - oldGNUSparseNumBytesSize = 12 -) - -// NewReader creates a new Reader reading from r. -func NewReader(r io.Reader) *Reader { return &Reader{r: r} } - -// Next advances to the next entry in the tar archive. -// -// io.EOF is returned at the end of the input. -func (tr *Reader) Next() (*Header, error) { - if tr.err != nil { - return nil, tr.err - } - - var hdr *Header - var extHdrs map[string]string - - // Externally, Next iterates through the tar archive as if it is a series of - // files. Internally, the tar format often uses fake "files" to add meta - // data that describes the next file. These meta data "files" should not - // normally be visible to the outside. As such, this loop iterates through - // one or more "header files" until it finds a "normal file". -loop: - for { - tr.err = tr.skipUnread() - if tr.err != nil { - return nil, tr.err - } - - hdr = tr.readHeader() - if tr.err != nil { - return nil, tr.err - } - - // Check for PAX/GNU special headers and files. - switch hdr.Typeflag { - case TypeXHeader: - extHdrs, tr.err = parsePAX(tr) - if tr.err != nil { - return nil, tr.err - } - continue loop // This is a meta header affecting the next header - case TypeGNULongName, TypeGNULongLink: - var realname []byte - realname, tr.err = ioutil.ReadAll(tr) - if tr.err != nil { - return nil, tr.err - } - - // Convert GNU extensions to use PAX headers. - if extHdrs == nil { - extHdrs = make(map[string]string) - } - var p parser - switch hdr.Typeflag { - case TypeGNULongName: - extHdrs[paxPath] = p.parseString(realname) - case TypeGNULongLink: - extHdrs[paxLinkpath] = p.parseString(realname) - } - if p.err != nil { - tr.err = p.err - return nil, tr.err - } - continue loop // This is a meta header affecting the next header - default: - mergePAX(hdr, extHdrs) - - // Check for a PAX format sparse file - sp, err := tr.checkForGNUSparsePAXHeaders(hdr, extHdrs) - if err != nil { - tr.err = err - return nil, err - } - if sp != nil { - // Current file is a PAX format GNU sparse file. - // Set the current file reader to a sparse file reader. - tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size) - if tr.err != nil { - return nil, tr.err - } - } - break loop // This is a file, so stop - } - } - return hdr, nil -} - -// checkForGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers. If they are found, then -// this function reads the sparse map and returns it. Unknown sparse formats are ignored, causing the file to -// be treated as a regular file. -func (tr *Reader) checkForGNUSparsePAXHeaders(hdr *Header, headers map[string]string) ([]sparseEntry, error) { - var sparseFormat string - - // Check for sparse format indicators - major, majorOk := headers[paxGNUSparseMajor] - minor, minorOk := headers[paxGNUSparseMinor] - sparseName, sparseNameOk := headers[paxGNUSparseName] - _, sparseMapOk := headers[paxGNUSparseMap] - sparseSize, sparseSizeOk := headers[paxGNUSparseSize] - sparseRealSize, sparseRealSizeOk := headers[paxGNUSparseRealSize] - - // Identify which, if any, sparse format applies from which PAX headers are set - if majorOk && minorOk { - sparseFormat = major + "." + minor - } else if sparseNameOk && sparseMapOk { - sparseFormat = "0.1" - } else if sparseSizeOk { - sparseFormat = "0.0" - } else { - // Not a PAX format GNU sparse file. - return nil, nil - } - - // Check for unknown sparse format - if sparseFormat != "0.0" && sparseFormat != "0.1" && sparseFormat != "1.0" { - return nil, nil - } - - // Update hdr from GNU sparse PAX headers - if sparseNameOk { - hdr.Name = sparseName - } - if sparseSizeOk { - realSize, err := strconv.ParseInt(sparseSize, 10, 0) - if err != nil { - return nil, ErrHeader - } - hdr.Size = realSize - } else if sparseRealSizeOk { - realSize, err := strconv.ParseInt(sparseRealSize, 10, 0) - if err != nil { - return nil, ErrHeader - } - hdr.Size = realSize - } - - // Set up the sparse map, according to the particular sparse format in use - var sp []sparseEntry - var err error - switch sparseFormat { - case "0.0", "0.1": - sp, err = readGNUSparseMap0x1(headers) - case "1.0": - sp, err = readGNUSparseMap1x0(tr.curr) - } - return sp, err -} - -// mergePAX merges well known headers according to PAX standard. -// In general headers with the same name as those found -// in the header struct overwrite those found in the header -// struct with higher precision or longer values. Esp. useful -// for name and linkname fields. -func mergePAX(hdr *Header, headers map[string]string) error { - for k, v := range headers { - switch k { - case paxPath: - hdr.Name = v - case paxLinkpath: - hdr.Linkname = v - case paxGname: - hdr.Gname = v - case paxUname: - hdr.Uname = v - case paxUid: - uid, err := strconv.ParseInt(v, 10, 0) - if err != nil { - return err - } - hdr.Uid = int(uid) - case paxGid: - gid, err := strconv.ParseInt(v, 10, 0) - if err != nil { - return err - } - hdr.Gid = int(gid) - case paxAtime: - t, err := parsePAXTime(v) - if err != nil { - return err - } - hdr.AccessTime = t - case paxMtime: - t, err := parsePAXTime(v) - if err != nil { - return err - } - hdr.ModTime = t - case paxCtime: - t, err := parsePAXTime(v) - if err != nil { - return err - } - hdr.ChangeTime = t - case paxCreationTime: - t, err := parsePAXTime(v) - if err != nil { - return err - } - hdr.CreationTime = t - case paxSize: - size, err := strconv.ParseInt(v, 10, 0) - if err != nil { - return err - } - hdr.Size = int64(size) - default: - if strings.HasPrefix(k, paxXattr) { - if hdr.Xattrs == nil { - hdr.Xattrs = make(map[string]string) - } - hdr.Xattrs[k[len(paxXattr):]] = v - } else if strings.HasPrefix(k, paxWindows) { - if hdr.Winheaders == nil { - hdr.Winheaders = make(map[string]string) - } - hdr.Winheaders[k[len(paxWindows):]] = v - } - } - } - return nil -} - -// parsePAXTime takes a string of the form %d.%d as described in -// the PAX specification. -func parsePAXTime(t string) (time.Time, error) { - buf := []byte(t) - pos := bytes.IndexByte(buf, '.') - var seconds, nanoseconds int64 - var err error - if pos == -1 { - seconds, err = strconv.ParseInt(t, 10, 0) - if err != nil { - return time.Time{}, err - } - } else { - seconds, err = strconv.ParseInt(string(buf[:pos]), 10, 0) - if err != nil { - return time.Time{}, err - } - nano_buf := string(buf[pos+1:]) - // Pad as needed before converting to a decimal. - // For example .030 -> .030000000 -> 30000000 nanoseconds - if len(nano_buf) < maxNanoSecondIntSize { - // Right pad - nano_buf += strings.Repeat("0", maxNanoSecondIntSize-len(nano_buf)) - } else if len(nano_buf) > maxNanoSecondIntSize { - // Right truncate - nano_buf = nano_buf[:maxNanoSecondIntSize] - } - nanoseconds, err = strconv.ParseInt(string(nano_buf), 10, 0) - if err != nil { - return time.Time{}, err - } - } - ts := time.Unix(seconds, nanoseconds) - return ts, nil -} - -// parsePAX parses PAX headers. -// If an extended header (type 'x') is invalid, ErrHeader is returned -func parsePAX(r io.Reader) (map[string]string, error) { - buf, err := ioutil.ReadAll(r) - if err != nil { - return nil, err - } - sbuf := string(buf) - - // For GNU PAX sparse format 0.0 support. - // This function transforms the sparse format 0.0 headers into sparse format 0.1 headers. - var sparseMap bytes.Buffer - - headers := make(map[string]string) - // Each record is constructed as - // "%d %s=%s\n", length, keyword, value - for len(sbuf) > 0 { - key, value, residual, err := parsePAXRecord(sbuf) - if err != nil { - return nil, ErrHeader - } - sbuf = residual - - keyStr := string(key) - if keyStr == paxGNUSparseOffset || keyStr == paxGNUSparseNumBytes { - // GNU sparse format 0.0 special key. Write to sparseMap instead of using the headers map. - sparseMap.WriteString(value) - sparseMap.Write([]byte{','}) - } else { - // Normal key. Set the value in the headers map. - headers[keyStr] = string(value) - } - } - if sparseMap.Len() != 0 { - // Add sparse info to headers, chopping off the extra comma - sparseMap.Truncate(sparseMap.Len() - 1) - headers[paxGNUSparseMap] = sparseMap.String() - } - return headers, nil -} - -// parsePAXRecord parses the input PAX record string into a key-value pair. -// If parsing is successful, it will slice off the currently read record and -// return the remainder as r. -// -// A PAX record is of the following form: -// "%d %s=%s\n" % (size, key, value) -func parsePAXRecord(s string) (k, v, r string, err error) { - // The size field ends at the first space. - sp := strings.IndexByte(s, ' ') - if sp == -1 { - return "", "", s, ErrHeader - } - - // Parse the first token as a decimal integer. - n, perr := strconv.ParseInt(s[:sp], 10, 0) // Intentionally parse as native int - if perr != nil || n < 5 || int64(len(s)) < n { - return "", "", s, ErrHeader - } - - // Extract everything between the space and the final newline. - rec, nl, rem := s[sp+1:n-1], s[n-1:n], s[n:] - if nl != "\n" { - return "", "", s, ErrHeader - } - - // The first equals separates the key from the value. - eq := strings.IndexByte(rec, '=') - if eq == -1 { - return "", "", s, ErrHeader - } - return rec[:eq], rec[eq+1:], rem, nil -} - -// parseString parses bytes as a NUL-terminated C-style string. -// If a NUL byte is not found then the whole slice is returned as a string. -func (*parser) parseString(b []byte) string { - n := 0 - for n < len(b) && b[n] != 0 { - n++ - } - return string(b[0:n]) -} - -// parseNumeric parses the input as being encoded in either base-256 or octal. -// This function may return negative numbers. -// If parsing fails or an integer overflow occurs, err will be set. -func (p *parser) parseNumeric(b []byte) int64 { - // Check for base-256 (binary) format first. - // If the first bit is set, then all following bits constitute a two's - // complement encoded number in big-endian byte order. - if len(b) > 0 && b[0]&0x80 != 0 { - // Handling negative numbers relies on the following identity: - // -a-1 == ^a - // - // If the number is negative, we use an inversion mask to invert the - // data bytes and treat the value as an unsigned number. - var inv byte // 0x00 if positive or zero, 0xff if negative - if b[0]&0x40 != 0 { - inv = 0xff - } - - var x uint64 - for i, c := range b { - c ^= inv // Inverts c only if inv is 0xff, otherwise does nothing - if i == 0 { - c &= 0x7f // Ignore signal bit in first byte - } - if (x >> 56) > 0 { - p.err = ErrHeader // Integer overflow - return 0 - } - x = x<<8 | uint64(c) - } - if (x >> 63) > 0 { - p.err = ErrHeader // Integer overflow - return 0 - } - if inv == 0xff { - return ^int64(x) - } - return int64(x) - } - - // Normal case is base-8 (octal) format. - return p.parseOctal(b) -} - -func (p *parser) parseOctal(b []byte) int64 { - // Because unused fields are filled with NULs, we need - // to skip leading NULs. Fields may also be padded with - // spaces or NULs. - // So we remove leading and trailing NULs and spaces to - // be sure. - b = bytes.Trim(b, " \x00") - - if len(b) == 0 { - return 0 - } - x, perr := strconv.ParseUint(p.parseString(b), 8, 64) - if perr != nil { - p.err = ErrHeader - } - return int64(x) -} - -// skipUnread skips any unread bytes in the existing file entry, as well as any -// alignment padding. It returns io.ErrUnexpectedEOF if any io.EOF is -// encountered in the data portion; it is okay to hit io.EOF in the padding. -// -// Note that this function still works properly even when sparse files are being -// used since numBytes returns the bytes remaining in the underlying io.Reader. -func (tr *Reader) skipUnread() error { - dataSkip := tr.numBytes() // Number of data bytes to skip - totalSkip := dataSkip + tr.pad // Total number of bytes to skip - tr.curr, tr.pad = nil, 0 - - // If possible, Seek to the last byte before the end of the data section. - // Do this because Seek is often lazy about reporting errors; this will mask - // the fact that the tar stream may be truncated. We can rely on the - // io.CopyN done shortly afterwards to trigger any IO errors. - var seekSkipped int64 // Number of bytes skipped via Seek - if sr, ok := tr.r.(io.Seeker); ok && dataSkip > 1 { - // Not all io.Seeker can actually Seek. For example, os.Stdin implements - // io.Seeker, but calling Seek always returns an error and performs - // no action. Thus, we try an innocent seek to the current position - // to see if Seek is really supported. - pos1, err := sr.Seek(0, os.SEEK_CUR) - if err == nil { - // Seek seems supported, so perform the real Seek. - pos2, err := sr.Seek(dataSkip-1, os.SEEK_CUR) - if err != nil { - tr.err = err - return tr.err - } - seekSkipped = pos2 - pos1 - } - } - - var copySkipped int64 // Number of bytes skipped via CopyN - copySkipped, tr.err = io.CopyN(ioutil.Discard, tr.r, totalSkip-seekSkipped) - if tr.err == io.EOF && seekSkipped+copySkipped < dataSkip { - tr.err = io.ErrUnexpectedEOF - } - return tr.err -} - -func (tr *Reader) verifyChecksum(header []byte) bool { - if tr.err != nil { - return false - } - - var p parser - given := p.parseOctal(header[148:156]) - unsigned, signed := checksum(header) - return p.err == nil && (given == unsigned || given == signed) -} - -// readHeader reads the next block header and assumes that the underlying reader -// is already aligned to a block boundary. -// -// The err will be set to io.EOF only when one of the following occurs: -// * Exactly 0 bytes are read and EOF is hit. -// * Exactly 1 block of zeros is read and EOF is hit. -// * At least 2 blocks of zeros are read. -func (tr *Reader) readHeader() *Header { - header := tr.hdrBuff[:] - copy(header, zeroBlock) - - if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { - return nil // io.EOF is okay here - } - - // Two blocks of zero bytes marks the end of the archive. - if bytes.Equal(header, zeroBlock[0:blockSize]) { - if _, tr.err = io.ReadFull(tr.r, header); tr.err != nil { - return nil // io.EOF is okay here - } - if bytes.Equal(header, zeroBlock[0:blockSize]) { - tr.err = io.EOF - } else { - tr.err = ErrHeader // zero block and then non-zero block - } - return nil - } - - if !tr.verifyChecksum(header) { - tr.err = ErrHeader - return nil - } - - // Unpack - var p parser - hdr := new(Header) - s := slicer(header) - - hdr.Name = p.parseString(s.next(100)) - hdr.Mode = p.parseNumeric(s.next(8)) - hdr.Uid = int(p.parseNumeric(s.next(8))) - hdr.Gid = int(p.parseNumeric(s.next(8))) - hdr.Size = p.parseNumeric(s.next(12)) - hdr.ModTime = time.Unix(p.parseNumeric(s.next(12)), 0) - s.next(8) // chksum - hdr.Typeflag = s.next(1)[0] - hdr.Linkname = p.parseString(s.next(100)) - - // The remainder of the header depends on the value of magic. - // The original (v7) version of tar had no explicit magic field, - // so its magic bytes, like the rest of the block, are NULs. - magic := string(s.next(8)) // contains version field as well. - var format string - switch { - case magic[:6] == "ustar\x00": // POSIX tar (1003.1-1988) - if string(header[508:512]) == "tar\x00" { - format = "star" - } else { - format = "posix" - } - case magic == "ustar \x00": // old GNU tar - format = "gnu" - } - - switch format { - case "posix", "gnu", "star": - hdr.Uname = p.parseString(s.next(32)) - hdr.Gname = p.parseString(s.next(32)) - devmajor := s.next(8) - devminor := s.next(8) - if hdr.Typeflag == TypeChar || hdr.Typeflag == TypeBlock { - hdr.Devmajor = p.parseNumeric(devmajor) - hdr.Devminor = p.parseNumeric(devminor) - } - var prefix string - switch format { - case "posix", "gnu": - prefix = p.parseString(s.next(155)) - case "star": - prefix = p.parseString(s.next(131)) - hdr.AccessTime = time.Unix(p.parseNumeric(s.next(12)), 0) - hdr.ChangeTime = time.Unix(p.parseNumeric(s.next(12)), 0) - } - if len(prefix) > 0 { - hdr.Name = prefix + "/" + hdr.Name - } - } - - if p.err != nil { - tr.err = p.err - return nil - } - - nb := hdr.Size - if isHeaderOnlyType(hdr.Typeflag) { - nb = 0 - } - if nb < 0 { - tr.err = ErrHeader - return nil - } - - // Set the current file reader. - tr.pad = -nb & (blockSize - 1) // blockSize is a power of two - tr.curr = ®FileReader{r: tr.r, nb: nb} - - // Check for old GNU sparse format entry. - if hdr.Typeflag == TypeGNUSparse { - // Get the real size of the file. - hdr.Size = p.parseNumeric(header[483:495]) - if p.err != nil { - tr.err = p.err - return nil - } - - // Read the sparse map. - sp := tr.readOldGNUSparseMap(header) - if tr.err != nil { - return nil - } - - // Current file is a GNU sparse file. Update the current file reader. - tr.curr, tr.err = newSparseFileReader(tr.curr, sp, hdr.Size) - if tr.err != nil { - return nil - } - } - - return hdr -} - -// readOldGNUSparseMap reads the sparse map as stored in the old GNU sparse format. -// The sparse map is stored in the tar header if it's small enough. If it's larger than four entries, -// then one or more extension headers are used to store the rest of the sparse map. -func (tr *Reader) readOldGNUSparseMap(header []byte) []sparseEntry { - var p parser - isExtended := header[oldGNUSparseMainHeaderIsExtendedOffset] != 0 - spCap := oldGNUSparseMainHeaderNumEntries - if isExtended { - spCap += oldGNUSparseExtendedHeaderNumEntries - } - sp := make([]sparseEntry, 0, spCap) - s := slicer(header[oldGNUSparseMainHeaderOffset:]) - - // Read the four entries from the main tar header - for i := 0; i < oldGNUSparseMainHeaderNumEntries; i++ { - offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize)) - numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize)) - if p.err != nil { - tr.err = p.err - return nil - } - if offset == 0 && numBytes == 0 { - break - } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) - } - - for isExtended { - // There are more entries. Read an extension header and parse its entries. - sparseHeader := make([]byte, blockSize) - if _, tr.err = io.ReadFull(tr.r, sparseHeader); tr.err != nil { - return nil - } - isExtended = sparseHeader[oldGNUSparseExtendedHeaderIsExtendedOffset] != 0 - s = slicer(sparseHeader) - for i := 0; i < oldGNUSparseExtendedHeaderNumEntries; i++ { - offset := p.parseNumeric(s.next(oldGNUSparseOffsetSize)) - numBytes := p.parseNumeric(s.next(oldGNUSparseNumBytesSize)) - if p.err != nil { - tr.err = p.err - return nil - } - if offset == 0 && numBytes == 0 { - break - } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) - } - } - return sp -} - -// readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format -// version 1.0. The format of the sparse map consists of a series of -// newline-terminated numeric fields. The first field is the number of entries -// and is always present. Following this are the entries, consisting of two -// fields (offset, numBytes). This function must stop reading at the end -// boundary of the block containing the last newline. -// -// Note that the GNU manual says that numeric values should be encoded in octal -// format. However, the GNU tar utility itself outputs these values in decimal. -// As such, this library treats values as being encoded in decimal. -func readGNUSparseMap1x0(r io.Reader) ([]sparseEntry, error) { - var cntNewline int64 - var buf bytes.Buffer - var blk = make([]byte, blockSize) - - // feedTokens copies data in numBlock chunks from r into buf until there are - // at least cnt newlines in buf. It will not read more blocks than needed. - var feedTokens = func(cnt int64) error { - for cntNewline < cnt { - if _, err := io.ReadFull(r, blk); err != nil { - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return err - } - buf.Write(blk) - for _, c := range blk { - if c == '\n' { - cntNewline++ - } - } - } - return nil - } - - // nextToken gets the next token delimited by a newline. This assumes that - // at least one newline exists in the buffer. - var nextToken = func() string { - cntNewline-- - tok, _ := buf.ReadString('\n') - return tok[:len(tok)-1] // Cut off newline - } - - // Parse for the number of entries. - // Use integer overflow resistant math to check this. - if err := feedTokens(1); err != nil { - return nil, err - } - numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int - if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { - return nil, ErrHeader - } - - // Parse for all member entries. - // numEntries is trusted after this since a potential attacker must have - // committed resources proportional to what this library used. - if err := feedTokens(2 * numEntries); err != nil { - return nil, err - } - sp := make([]sparseEntry, 0, numEntries) - for i := int64(0); i < numEntries; i++ { - offset, err := strconv.ParseInt(nextToken(), 10, 64) - if err != nil { - return nil, ErrHeader - } - numBytes, err := strconv.ParseInt(nextToken(), 10, 64) - if err != nil { - return nil, ErrHeader - } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) - } - return sp, nil -} - -// readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format -// version 0.1. The sparse map is stored in the PAX headers. -func readGNUSparseMap0x1(extHdrs map[string]string) ([]sparseEntry, error) { - // Get number of entries. - // Use integer overflow resistant math to check this. - numEntriesStr := extHdrs[paxGNUSparseNumBlocks] - numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int - if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) { - return nil, ErrHeader - } - - // There should be two numbers in sparseMap for each entry. - sparseMap := strings.Split(extHdrs[paxGNUSparseMap], ",") - if int64(len(sparseMap)) != 2*numEntries { - return nil, ErrHeader - } - - // Loop through the entries in the sparse map. - // numEntries is trusted now. - sp := make([]sparseEntry, 0, numEntries) - for i := int64(0); i < numEntries; i++ { - offset, err := strconv.ParseInt(sparseMap[2*i], 10, 64) - if err != nil { - return nil, ErrHeader - } - numBytes, err := strconv.ParseInt(sparseMap[2*i+1], 10, 64) - if err != nil { - return nil, ErrHeader - } - sp = append(sp, sparseEntry{offset: offset, numBytes: numBytes}) - } - return sp, nil -} - -// numBytes returns the number of bytes left to read in the current file's entry -// in the tar archive, or 0 if there is no current file. -func (tr *Reader) numBytes() int64 { - if tr.curr == nil { - // No current file, so no bytes - return 0 - } - return tr.curr.numBytes() -} - -// Read reads from the current entry in the tar archive. -// It returns 0, io.EOF when it reaches the end of that entry, -// until Next is called to advance to the next entry. -// -// Calling Read on special types like TypeLink, TypeSymLink, TypeChar, -// TypeBlock, TypeDir, and TypeFifo returns 0, io.EOF regardless of what -// the Header.Size claims. -func (tr *Reader) Read(b []byte) (n int, err error) { - if tr.err != nil { - return 0, tr.err - } - if tr.curr == nil { - return 0, io.EOF - } - - n, err = tr.curr.Read(b) - if err != nil && err != io.EOF { - tr.err = err - } - return -} - -func (rfr *regFileReader) Read(b []byte) (n int, err error) { - if rfr.nb == 0 { - // file consumed - return 0, io.EOF - } - if int64(len(b)) > rfr.nb { - b = b[0:rfr.nb] - } - n, err = rfr.r.Read(b) - rfr.nb -= int64(n) - - if err == io.EOF && rfr.nb > 0 { - err = io.ErrUnexpectedEOF - } - return -} - -// numBytes returns the number of bytes left to read in the file's data in the tar archive. -func (rfr *regFileReader) numBytes() int64 { - return rfr.nb -} - -// newSparseFileReader creates a new sparseFileReader, but validates all of the -// sparse entries before doing so. -func newSparseFileReader(rfr numBytesReader, sp []sparseEntry, total int64) (*sparseFileReader, error) { - if total < 0 { - return nil, ErrHeader // Total size cannot be negative - } - - // Validate all sparse entries. These are the same checks as performed by - // the BSD tar utility. - for i, s := range sp { - switch { - case s.offset < 0 || s.numBytes < 0: - return nil, ErrHeader // Negative values are never okay - case s.offset > math.MaxInt64-s.numBytes: - return nil, ErrHeader // Integer overflow with large length - case s.offset+s.numBytes > total: - return nil, ErrHeader // Region extends beyond the "real" size - case i > 0 && sp[i-1].offset+sp[i-1].numBytes > s.offset: - return nil, ErrHeader // Regions can't overlap and must be in order - } - } - return &sparseFileReader{rfr: rfr, sp: sp, total: total}, nil -} - -// readHole reads a sparse hole ending at endOffset. -func (sfr *sparseFileReader) readHole(b []byte, endOffset int64) int { - n64 := endOffset - sfr.pos - if n64 > int64(len(b)) { - n64 = int64(len(b)) - } - n := int(n64) - for i := 0; i < n; i++ { - b[i] = 0 - } - sfr.pos += n64 - return n -} - -// Read reads the sparse file data in expanded form. -func (sfr *sparseFileReader) Read(b []byte) (n int, err error) { - // Skip past all empty fragments. - for len(sfr.sp) > 0 && sfr.sp[0].numBytes == 0 { - sfr.sp = sfr.sp[1:] - } - - // If there are no more fragments, then it is possible that there - // is one last sparse hole. - if len(sfr.sp) == 0 { - // This behavior matches the BSD tar utility. - // However, GNU tar stops returning data even if sfr.total is unmet. - if sfr.pos < sfr.total { - return sfr.readHole(b, sfr.total), nil - } - return 0, io.EOF - } - - // In front of a data fragment, so read a hole. - if sfr.pos < sfr.sp[0].offset { - return sfr.readHole(b, sfr.sp[0].offset), nil - } - - // In a data fragment, so read from it. - // This math is overflow free since we verify that offset and numBytes can - // be safely added when creating the sparseFileReader. - endPos := sfr.sp[0].offset + sfr.sp[0].numBytes // End offset of fragment - bytesLeft := endPos - sfr.pos // Bytes left in fragment - if int64(len(b)) > bytesLeft { - b = b[:bytesLeft] - } - - n, err = sfr.rfr.Read(b) - sfr.pos += int64(n) - if err == io.EOF { - if sfr.pos < endPos { - err = io.ErrUnexpectedEOF // There was supposed to be more data - } else if sfr.pos < sfr.total { - err = nil // There is still an implicit sparse hole at the end - } - } - - if sfr.pos == endPos { - sfr.sp = sfr.sp[1:] // We are done with this fragment, so pop it - } - return n, err -} - -// numBytes returns the number of bytes left to read in the sparse file's -// sparse-encoded data in the tar archive. -func (sfr *sparseFileReader) numBytes() int64 { - return sfr.rfr.numBytes() -} diff --git a/vendor/src/github.com/Microsoft/go-winio/archive/tar/stat_atim.go b/vendor/src/github.com/Microsoft/go-winio/archive/tar/stat_atim.go deleted file mode 100644 index cf9cc79c59..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/archive/tar/stat_atim.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux dragonfly openbsd solaris - -package tar - -import ( - "syscall" - "time" -) - -func statAtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Atim.Unix()) -} - -func statCtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Ctim.Unix()) -} diff --git a/vendor/src/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go b/vendor/src/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go deleted file mode 100644 index 6f17dbe307..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/archive/tar/stat_atimespec.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build darwin freebsd netbsd - -package tar - -import ( - "syscall" - "time" -) - -func statAtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Atimespec.Unix()) -} - -func statCtime(st *syscall.Stat_t) time.Time { - return time.Unix(st.Ctimespec.Unix()) -} diff --git a/vendor/src/github.com/Microsoft/go-winio/archive/tar/stat_unix.go b/vendor/src/github.com/Microsoft/go-winio/archive/tar/stat_unix.go deleted file mode 100644 index cb843db4cf..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/archive/tar/stat_unix.go +++ /dev/null @@ -1,32 +0,0 @@ -// Copyright 2012 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin dragonfly freebsd openbsd netbsd solaris - -package tar - -import ( - "os" - "syscall" -) - -func init() { - sysStat = statUnix -} - -func statUnix(fi os.FileInfo, h *Header) error { - sys, ok := fi.Sys().(*syscall.Stat_t) - if !ok { - return nil - } - h.Uid = int(sys.Uid) - h.Gid = int(sys.Gid) - // TODO(bradfitz): populate username & group. os/user - // doesn't cache LookupId lookups, and lacks group - // lookup functions. - h.AccessTime = statAtime(sys) - h.ChangeTime = statCtime(sys) - // TODO(bradfitz): major/minor device numbers? - return nil -} diff --git a/vendor/src/github.com/Microsoft/go-winio/archive/tar/writer.go b/vendor/src/github.com/Microsoft/go-winio/archive/tar/writer.go deleted file mode 100644 index 30d7e606d6..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/archive/tar/writer.go +++ /dev/null @@ -1,444 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package tar - -// TODO(dsymonds): -// - catch more errors (no first header, etc.) - -import ( - "bytes" - "errors" - "fmt" - "io" - "path" - "sort" - "strconv" - "strings" - "time" -) - -var ( - ErrWriteTooLong = errors.New("archive/tar: write too long") - ErrFieldTooLong = errors.New("archive/tar: header field too long") - ErrWriteAfterClose = errors.New("archive/tar: write after close") - errInvalidHeader = errors.New("archive/tar: header field too long or contains invalid values") -) - -// A Writer provides sequential writing of a tar archive in POSIX.1 format. -// A tar archive consists of a sequence of files. -// Call WriteHeader to begin a new file, and then call Write to supply that file's data, -// writing at most hdr.Size bytes in total. -type Writer struct { - w io.Writer - err error - nb int64 // number of unwritten bytes for current file entry - pad int64 // amount of padding to write after current file entry - closed bool - usedBinary bool // whether the binary numeric field extension was used - preferPax bool // use pax header instead of binary numeric header - hdrBuff [blockSize]byte // buffer to use in writeHeader when writing a regular header - paxHdrBuff [blockSize]byte // buffer to use in writeHeader when writing a pax header -} - -type formatter struct { - err error // Last error seen -} - -// NewWriter creates a new Writer writing to w. -func NewWriter(w io.Writer) *Writer { return &Writer{w: w, preferPax: true} } - -// Flush finishes writing the current file (optional). -func (tw *Writer) Flush() error { - if tw.nb > 0 { - tw.err = fmt.Errorf("archive/tar: missed writing %d bytes", tw.nb) - return tw.err - } - - n := tw.nb + tw.pad - for n > 0 && tw.err == nil { - nr := n - if nr > blockSize { - nr = blockSize - } - var nw int - nw, tw.err = tw.w.Write(zeroBlock[0:nr]) - n -= int64(nw) - } - tw.nb = 0 - tw.pad = 0 - return tw.err -} - -// Write s into b, terminating it with a NUL if there is room. -func (f *formatter) formatString(b []byte, s string) { - if len(s) > len(b) { - f.err = ErrFieldTooLong - return - } - ascii := toASCII(s) - copy(b, ascii) - if len(ascii) < len(b) { - b[len(ascii)] = 0 - } -} - -// Encode x as an octal ASCII string and write it into b with leading zeros. -func (f *formatter) formatOctal(b []byte, x int64) { - s := strconv.FormatInt(x, 8) - // leading zeros, but leave room for a NUL. - for len(s)+1 < len(b) { - s = "0" + s - } - f.formatString(b, s) -} - -// fitsInBase256 reports whether x can be encoded into n bytes using base-256 -// encoding. Unlike octal encoding, base-256 encoding does not require that the -// string ends with a NUL character. Thus, all n bytes are available for output. -// -// If operating in binary mode, this assumes strict GNU binary mode; which means -// that the first byte can only be either 0x80 or 0xff. Thus, the first byte is -// equivalent to the sign bit in two's complement form. -func fitsInBase256(n int, x int64) bool { - var binBits = uint(n-1) * 8 - return n >= 9 || (x >= -1<= 0; i-- { - b[i] = byte(x) - x >>= 8 - } - b[0] |= 0x80 // Highest bit indicates binary format - return - } - - f.formatOctal(b, 0) // Last resort, just write zero - f.err = ErrFieldTooLong -} - -var ( - minTime = time.Unix(0, 0) - // There is room for 11 octal digits (33 bits) of mtime. - maxTime = minTime.Add((1<<33 - 1) * time.Second) -) - -// WriteHeader writes hdr and prepares to accept the file's contents. -// WriteHeader calls Flush if it is not the first header. -// Calling after a Close will return ErrWriteAfterClose. -func (tw *Writer) WriteHeader(hdr *Header) error { - return tw.writeHeader(hdr, true) -} - -// WriteHeader writes hdr and prepares to accept the file's contents. -// WriteHeader calls Flush if it is not the first header. -// Calling after a Close will return ErrWriteAfterClose. -// As this method is called internally by writePax header to allow it to -// suppress writing the pax header. -func (tw *Writer) writeHeader(hdr *Header, allowPax bool) error { - if tw.closed { - return ErrWriteAfterClose - } - if tw.err == nil { - tw.Flush() - } - if tw.err != nil { - return tw.err - } - - // a map to hold pax header records, if any are needed - paxHeaders := make(map[string]string) - - // TODO(shanemhansen): we might want to use PAX headers for - // subsecond time resolution, but for now let's just capture - // too long fields or non ascii characters - - var f formatter - var header []byte - - // We need to select which scratch buffer to use carefully, - // since this method is called recursively to write PAX headers. - // If allowPax is true, this is the non-recursive call, and we will use hdrBuff. - // If allowPax is false, we are being called by writePAXHeader, and hdrBuff is - // already being used by the non-recursive call, so we must use paxHdrBuff. - header = tw.hdrBuff[:] - if !allowPax { - header = tw.paxHdrBuff[:] - } - copy(header, zeroBlock) - s := slicer(header) - - // Wrappers around formatter that automatically sets paxHeaders if the - // argument extends beyond the capacity of the input byte slice. - var formatString = func(b []byte, s string, paxKeyword string) { - needsPaxHeader := paxKeyword != paxNone && len(s) > len(b) || !isASCII(s) - if needsPaxHeader { - paxHeaders[paxKeyword] = s - return - } - f.formatString(b, s) - } - var formatNumeric = func(b []byte, x int64, paxKeyword string) { - // Try octal first. - s := strconv.FormatInt(x, 8) - if len(s) < len(b) { - f.formatOctal(b, x) - return - } - - // If it is too long for octal, and PAX is preferred, use a PAX header. - if paxKeyword != paxNone && tw.preferPax { - f.formatOctal(b, 0) - s := strconv.FormatInt(x, 10) - paxHeaders[paxKeyword] = s - return - } - - tw.usedBinary = true - f.formatNumeric(b, x) - } - var formatTime = func(b []byte, t time.Time, paxKeyword string) { - var unixTime int64 - if !t.Before(minTime) && !t.After(maxTime) { - unixTime = t.Unix() - } - formatNumeric(b, unixTime, paxNone) - - // Write a PAX header if the time didn't fit precisely. - if paxKeyword != "" && tw.preferPax && allowPax && (t.Nanosecond() != 0 || !t.Before(minTime) || !t.After(maxTime)) { - paxHeaders[paxKeyword] = formatPAXTime(t) - } - } - - // keep a reference to the filename to allow to overwrite it later if we detect that we can use ustar longnames instead of pax - pathHeaderBytes := s.next(fileNameSize) - - formatString(pathHeaderBytes, hdr.Name, paxPath) - - f.formatOctal(s.next(8), hdr.Mode) // 100:108 - formatNumeric(s.next(8), int64(hdr.Uid), paxUid) // 108:116 - formatNumeric(s.next(8), int64(hdr.Gid), paxGid) // 116:124 - formatNumeric(s.next(12), hdr.Size, paxSize) // 124:136 - formatTime(s.next(12), hdr.ModTime, paxMtime) // 136:148 - s.next(8) // chksum (148:156) - s.next(1)[0] = hdr.Typeflag // 156:157 - - formatString(s.next(100), hdr.Linkname, paxLinkpath) - - copy(s.next(8), []byte("ustar\x0000")) // 257:265 - formatString(s.next(32), hdr.Uname, paxUname) // 265:297 - formatString(s.next(32), hdr.Gname, paxGname) // 297:329 - formatNumeric(s.next(8), hdr.Devmajor, paxNone) // 329:337 - formatNumeric(s.next(8), hdr.Devminor, paxNone) // 337:345 - - // keep a reference to the prefix to allow to overwrite it later if we detect that we can use ustar longnames instead of pax - prefixHeaderBytes := s.next(155) - formatString(prefixHeaderBytes, "", paxNone) // 345:500 prefix - - // Use the GNU magic instead of POSIX magic if we used any GNU extensions. - if tw.usedBinary { - copy(header[257:265], []byte("ustar \x00")) - } - - _, paxPathUsed := paxHeaders[paxPath] - // try to use a ustar header when only the name is too long - if !tw.preferPax && len(paxHeaders) == 1 && paxPathUsed { - prefix, suffix, ok := splitUSTARPath(hdr.Name) - if ok { - // Since we can encode in USTAR format, disable PAX header. - delete(paxHeaders, paxPath) - - // Update the path fields - formatString(pathHeaderBytes, suffix, paxNone) - formatString(prefixHeaderBytes, prefix, paxNone) - } - } - - // The chksum field is terminated by a NUL and a space. - // This is different from the other octal fields. - chksum, _ := checksum(header) - f.formatOctal(header[148:155], chksum) // Never fails - header[155] = ' ' - - // Check if there were any formatting errors. - if f.err != nil { - tw.err = f.err - return tw.err - } - - if allowPax { - if !hdr.AccessTime.IsZero() { - paxHeaders[paxAtime] = formatPAXTime(hdr.AccessTime) - } - if !hdr.ChangeTime.IsZero() { - paxHeaders[paxCtime] = formatPAXTime(hdr.ChangeTime) - } - if !hdr.CreationTime.IsZero() { - paxHeaders[paxCreationTime] = formatPAXTime(hdr.CreationTime) - } - for k, v := range hdr.Xattrs { - paxHeaders[paxXattr+k] = v - } - for k, v := range hdr.Winheaders { - paxHeaders[paxWindows+k] = v - } - } - - if len(paxHeaders) > 0 { - if !allowPax { - return errInvalidHeader - } - if err := tw.writePAXHeader(hdr, paxHeaders); err != nil { - return err - } - } - tw.nb = int64(hdr.Size) - tw.pad = (blockSize - (tw.nb % blockSize)) % blockSize - - _, tw.err = tw.w.Write(header) - return tw.err -} - -func formatPAXTime(t time.Time) string { - sec := t.Unix() - usec := t.Nanosecond() - s := strconv.FormatInt(sec, 10) - if usec != 0 { - s = fmt.Sprintf("%s.%09d", s, usec) - } - return s -} - -// splitUSTARPath splits a path according to USTAR prefix and suffix rules. -// If the path is not splittable, then it will return ("", "", false). -func splitUSTARPath(name string) (prefix, suffix string, ok bool) { - length := len(name) - if length <= fileNameSize || !isASCII(name) { - return "", "", false - } else if length > fileNamePrefixSize+1 { - length = fileNamePrefixSize + 1 - } else if name[length-1] == '/' { - length-- - } - - i := strings.LastIndex(name[:length], "/") - nlen := len(name) - i - 1 // nlen is length of suffix - plen := i // plen is length of prefix - if i <= 0 || nlen > fileNameSize || nlen == 0 || plen > fileNamePrefixSize { - return "", "", false - } - return name[:i], name[i+1:], true -} - -// writePaxHeader writes an extended pax header to the -// archive. -func (tw *Writer) writePAXHeader(hdr *Header, paxHeaders map[string]string) error { - // Prepare extended header - ext := new(Header) - ext.Typeflag = TypeXHeader - // Setting ModTime is required for reader parsing to - // succeed, and seems harmless enough. - ext.ModTime = hdr.ModTime - // The spec asks that we namespace our pseudo files - // with the current pid. However, this results in differing outputs - // for identical inputs. As such, the constant 0 is now used instead. - // golang.org/issue/12358 - dir, file := path.Split(hdr.Name) - fullName := path.Join(dir, "PaxHeaders.0", file) - - ascii := toASCII(fullName) - if len(ascii) > 100 { - ascii = ascii[:100] - } - ext.Name = ascii - // Construct the body - var buf bytes.Buffer - - // Keys are sorted before writing to body to allow deterministic output. - var keys []string - for k := range paxHeaders { - keys = append(keys, k) - } - sort.Strings(keys) - - for _, k := range keys { - fmt.Fprint(&buf, formatPAXRecord(k, paxHeaders[k])) - } - - ext.Size = int64(len(buf.Bytes())) - if err := tw.writeHeader(ext, false); err != nil { - return err - } - if _, err := tw.Write(buf.Bytes()); err != nil { - return err - } - if err := tw.Flush(); err != nil { - return err - } - return nil -} - -// formatPAXRecord formats a single PAX record, prefixing it with the -// appropriate length. -func formatPAXRecord(k, v string) string { - const padding = 3 // Extra padding for ' ', '=', and '\n' - size := len(k) + len(v) + padding - size += len(strconv.Itoa(size)) - record := fmt.Sprintf("%d %s=%s\n", size, k, v) - - // Final adjustment if adding size field increased the record size. - if len(record) != size { - size = len(record) - record = fmt.Sprintf("%d %s=%s\n", size, k, v) - } - return record -} - -// Write writes to the current entry in the tar archive. -// Write returns the error ErrWriteTooLong if more than -// hdr.Size bytes are written after WriteHeader. -func (tw *Writer) Write(b []byte) (n int, err error) { - if tw.closed { - err = ErrWriteAfterClose - return - } - overwrite := false - if int64(len(b)) > tw.nb { - b = b[0:tw.nb] - overwrite = true - } - n, err = tw.w.Write(b) - tw.nb -= int64(n) - if err == nil && overwrite { - err = ErrWriteTooLong - return - } - tw.err = err - return -} - -// Close closes the tar archive, flushing any unwritten -// data to the underlying writer. -func (tw *Writer) Close() error { - if tw.err != nil || tw.closed { - return tw.err - } - tw.Flush() - tw.closed = true - if tw.err != nil { - return tw.err - } - - // trailer: two zero blocks - for i := 0; i < 2; i++ { - _, tw.err = tw.w.Write(zeroBlock) - if tw.err != nil { - break - } - } - return tw.err -} diff --git a/vendor/src/github.com/Microsoft/go-winio/backup.go b/vendor/src/github.com/Microsoft/go-winio/backup.go deleted file mode 100644 index 864935175f..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/backup.go +++ /dev/null @@ -1,266 +0,0 @@ -package winio - -import ( - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "runtime" - "syscall" - "unicode/utf16" -) - -//sys backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupRead -//sys backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) = BackupWrite - -const ( - BackupData = uint32(iota + 1) - BackupEaData - BackupSecurity - BackupAlternateData - BackupLink - BackupPropertyData - BackupObjectId - BackupReparseData - BackupSparseBlock - BackupTxfsData -) - -const ( - StreamSparseAttributes = uint32(8) -) - -const ( - WRITE_DAC = 0x40000 - WRITE_OWNER = 0x80000 - ACCESS_SYSTEM_SECURITY = 0x1000000 -) - -// BackupHeader represents a backup stream of a file. -type BackupHeader struct { - Id uint32 // The backup stream ID - Attributes uint32 // Stream attributes - Size int64 // The size of the stream in bytes - Name string // The name of the stream (for BackupAlternateData only). - Offset int64 // The offset of the stream in the file (for BackupSparseBlock only). -} - -type win32StreamId struct { - StreamId uint32 - Attributes uint32 - Size uint64 - NameSize uint32 -} - -// BackupStreamReader reads from a stream produced by the BackupRead Win32 API and produces a series -// of BackupHeader values. -type BackupStreamReader struct { - r io.Reader - bytesLeft int64 -} - -// NewBackupStreamReader produces a BackupStreamReader from any io.Reader. -func NewBackupStreamReader(r io.Reader) *BackupStreamReader { - return &BackupStreamReader{r, 0} -} - -// Next returns the next backup stream and prepares for calls to Write(). It skips the remainder of the current stream if -// it was not completely read. -func (r *BackupStreamReader) Next() (*BackupHeader, error) { - if r.bytesLeft > 0 { - if _, err := io.Copy(ioutil.Discard, r); err != nil { - return nil, err - } - } - var wsi win32StreamId - if err := binary.Read(r.r, binary.LittleEndian, &wsi); err != nil { - return nil, err - } - hdr := &BackupHeader{ - Id: wsi.StreamId, - Attributes: wsi.Attributes, - Size: int64(wsi.Size), - } - if wsi.NameSize != 0 { - name := make([]uint16, int(wsi.NameSize/2)) - if err := binary.Read(r.r, binary.LittleEndian, name); err != nil { - return nil, err - } - hdr.Name = syscall.UTF16ToString(name) - } - if wsi.StreamId == BackupSparseBlock { - if err := binary.Read(r.r, binary.LittleEndian, &hdr.Offset); err != nil { - return nil, err - } - hdr.Size -= 8 - } - r.bytesLeft = hdr.Size - return hdr, nil -} - -// Read reads from the current backup stream. -func (r *BackupStreamReader) Read(b []byte) (int, error) { - if r.bytesLeft == 0 { - return 0, io.EOF - } - if int64(len(b)) > r.bytesLeft { - b = b[:r.bytesLeft] - } - n, err := r.r.Read(b) - r.bytesLeft -= int64(n) - if err == io.EOF { - err = io.ErrUnexpectedEOF - } else if r.bytesLeft == 0 && err == nil { - err = io.EOF - } - return n, err -} - -// BackupStreamWriter writes a stream compatible with the BackupWrite Win32 API. -type BackupStreamWriter struct { - w io.Writer - bytesLeft int64 -} - -// NewBackupStreamWriter produces a BackupStreamWriter on top of an io.Writer. -func NewBackupStreamWriter(w io.Writer) *BackupStreamWriter { - return &BackupStreamWriter{w, 0} -} - -// WriteHeader writes the next backup stream header and prepares for calls to Write(). -func (w *BackupStreamWriter) WriteHeader(hdr *BackupHeader) error { - if w.bytesLeft != 0 { - return fmt.Errorf("missing %d bytes", w.bytesLeft) - } - name := utf16.Encode([]rune(hdr.Name)) - wsi := win32StreamId{ - StreamId: hdr.Id, - Attributes: hdr.Attributes, - Size: uint64(hdr.Size), - NameSize: uint32(len(name) * 2), - } - if hdr.Id == BackupSparseBlock { - // Include space for the int64 block offset - wsi.Size += 8 - } - if err := binary.Write(w.w, binary.LittleEndian, &wsi); err != nil { - return err - } - if len(name) != 0 { - if err := binary.Write(w.w, binary.LittleEndian, name); err != nil { - return err - } - } - if hdr.Id == BackupSparseBlock { - if err := binary.Write(w.w, binary.LittleEndian, hdr.Offset); err != nil { - return err - } - } - w.bytesLeft = hdr.Size - return nil -} - -// Write writes to the current backup stream. -func (w *BackupStreamWriter) Write(b []byte) (int, error) { - if w.bytesLeft < int64(len(b)) { - return 0, fmt.Errorf("too many bytes by %d", int64(len(b))-w.bytesLeft) - } - n, err := w.w.Write(b) - w.bytesLeft -= int64(n) - return n, err -} - -// BackupFileReader provides an io.ReadCloser interface on top of the BackupRead Win32 API. -type BackupFileReader struct { - f *os.File - includeSecurity bool - ctx uintptr -} - -// NewBackupFileReader returns a new BackupFileReader from a file handle. If includeSecurity is true, -// Read will attempt to read the security descriptor of the file. -func NewBackupFileReader(f *os.File, includeSecurity bool) *BackupFileReader { - r := &BackupFileReader{f, includeSecurity, 0} - runtime.SetFinalizer(r, func(r *BackupFileReader) { r.Close() }) - return r -} - -// Read reads a backup stream from the file by calling the Win32 API BackupRead(). -func (r *BackupFileReader) Read(b []byte) (int, error) { - var bytesRead uint32 - err := backupRead(syscall.Handle(r.f.Fd()), b, &bytesRead, false, r.includeSecurity, &r.ctx) - if err != nil { - return 0, &os.PathError{"BackupRead", r.f.Name(), err} - } - if bytesRead == 0 { - return 0, io.EOF - } - return int(bytesRead), nil -} - -// Close frees Win32 resources associated with the BackupFileReader. It does not close -// the underlying file. -func (r *BackupFileReader) Close() error { - if r.ctx != 0 { - backupRead(syscall.Handle(r.f.Fd()), nil, nil, true, false, &r.ctx) - r.ctx = 0 - } - return nil -} - -// BackupFileWriter provides an io.WriteCloser interface on top of the BackupWrite Win32 API. -type BackupFileWriter struct { - f *os.File - includeSecurity bool - ctx uintptr -} - -// NewBackupFileWrtier returns a new BackupFileWriter from a file handle. If includeSecurity is true, -// Write() will attempt to restore the security descriptor from the stream. -func NewBackupFileWriter(f *os.File, includeSecurity bool) *BackupFileWriter { - w := &BackupFileWriter{f, includeSecurity, 0} - runtime.SetFinalizer(w, func(w *BackupFileWriter) { w.Close() }) - return w -} - -// Write restores a portion of the file using the provided backup stream. -func (w *BackupFileWriter) Write(b []byte) (int, error) { - var bytesWritten uint32 - err := backupWrite(syscall.Handle(w.f.Fd()), b, &bytesWritten, false, w.includeSecurity, &w.ctx) - if err != nil { - return 0, &os.PathError{"BackupWrite", w.f.Name(), err} - } - if int(bytesWritten) != len(b) { - return int(bytesWritten), errors.New("not all bytes could be written") - } - return len(b), nil -} - -// Close frees Win32 resources associated with the BackupFileWriter. It does not -// close the underlying file. -func (w *BackupFileWriter) Close() error { - if w.ctx != 0 { - backupWrite(syscall.Handle(w.f.Fd()), nil, nil, true, false, &w.ctx) - w.ctx = 0 - } - return nil -} - -// OpenForBackup opens a file or directory, potentially skipping access checks if the backup -// or restore privileges have been acquired. -// -// If the file opened was a directory, it cannot be used with Readdir(). -func OpenForBackup(path string, access uint32, share uint32, createmode uint32) (*os.File, error) { - winPath, err := syscall.UTF16FromString(path) - if err != nil { - return nil, err - } - h, err := syscall.CreateFile(&winPath[0], access, share, nil, createmode, syscall.FILE_FLAG_BACKUP_SEMANTICS, 0) - if err != nil { - err = &os.PathError{Op: "open", Path: path, Err: err} - return nil, err - } - return os.NewFile(uintptr(h), path), nil -} diff --git a/vendor/src/github.com/Microsoft/go-winio/backuptar/tar.go b/vendor/src/github.com/Microsoft/go-winio/backuptar/tar.go deleted file mode 100644 index c454c4c054..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/backuptar/tar.go +++ /dev/null @@ -1,351 +0,0 @@ -package backuptar - -import ( - "encoding/base64" - "errors" - "fmt" - "io" - "io/ioutil" - "path/filepath" - "strconv" - "strings" - "syscall" - "time" - - "github.com/Microsoft/go-winio" - "github.com/Microsoft/go-winio/archive/tar" // until archive/tar supports pax extensions in its interface -) - -const ( - c_ISUID = 04000 // Set uid - c_ISGID = 02000 // Set gid - c_ISVTX = 01000 // Save text (sticky bit) - c_ISDIR = 040000 // Directory - c_ISFIFO = 010000 // FIFO - c_ISREG = 0100000 // Regular file - c_ISLNK = 0120000 // Symbolic link - c_ISBLK = 060000 // Block special file - c_ISCHR = 020000 // Character special file - c_ISSOCK = 0140000 // Socket -) - -const ( - hdrFileAttributes = "fileattr" - hdrSecurityDescriptor = "sd" - hdrRawSecurityDescriptor = "rawsd" - hdrMountPoint = "mountpoint" -) - -func writeZeroes(w io.Writer, count int64) error { - buf := make([]byte, 8192) - c := len(buf) - for i := int64(0); i < count; i += int64(c) { - if int64(c) > count-i { - c = int(count - i) - } - _, err := w.Write(buf[:c]) - if err != nil { - return err - } - } - return nil -} - -func copySparse(t *tar.Writer, br *winio.BackupStreamReader) error { - curOffset := int64(0) - for { - bhdr, err := br.Next() - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - if err != nil { - return err - } - if bhdr.Id != winio.BackupSparseBlock { - return fmt.Errorf("unexpected stream %d", bhdr.Id) - } - - // archive/tar does not support writing sparse files - // so just write zeroes to catch up to the current offset. - err = writeZeroes(t, bhdr.Offset-curOffset) - if bhdr.Size == 0 { - break - } - n, err := io.Copy(t, br) - if err != nil { - return err - } - curOffset = bhdr.Offset + n - } - return nil -} - -// BasicInfoHeader creates a tar header from basic file information. -func BasicInfoHeader(name string, size int64, fileInfo *winio.FileBasicInfo) *tar.Header { - hdr := &tar.Header{ - Name: filepath.ToSlash(name), - Size: size, - Typeflag: tar.TypeReg, - ModTime: time.Unix(0, fileInfo.LastWriteTime.Nanoseconds()), - ChangeTime: time.Unix(0, fileInfo.ChangeTime.Nanoseconds()), - AccessTime: time.Unix(0, fileInfo.LastAccessTime.Nanoseconds()), - CreationTime: time.Unix(0, fileInfo.CreationTime.Nanoseconds()), - Winheaders: make(map[string]string), - } - hdr.Winheaders[hdrFileAttributes] = fmt.Sprintf("%d", fileInfo.FileAttributes) - - if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - hdr.Mode |= c_ISDIR - hdr.Size = 0 - hdr.Typeflag = tar.TypeDir - } - return hdr -} - -// WriteTarFileFromBackupStream writes a file to a tar writer using data from a Win32 backup stream. -// -// This encodes Win32 metadata as tar pax vendor extensions starting with MSWINDOWS. -// -// The additional Win32 metadata is: -// -// MSWINDOWS.fileattr: The Win32 file attributes, as a decimal value -// -// MSWINDOWS.rawsd: The Win32 security descriptor, in raw binary format -// -// MSWINDOWS.mountpoint: If present, this is a mount point and not a symlink, even though the type is '2' (symlink) -func WriteTarFileFromBackupStream(t *tar.Writer, r io.Reader, name string, size int64, fileInfo *winio.FileBasicInfo) error { - name = filepath.ToSlash(name) - hdr := BasicInfoHeader(name, size, fileInfo) - br := winio.NewBackupStreamReader(r) - var dataHdr *winio.BackupHeader - for dataHdr == nil { - bhdr, err := br.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - switch bhdr.Id { - case winio.BackupData: - hdr.Mode |= c_ISREG - dataHdr = bhdr - case winio.BackupSecurity: - sd, err := ioutil.ReadAll(br) - if err != nil { - return err - } - hdr.Winheaders[hdrRawSecurityDescriptor] = base64.StdEncoding.EncodeToString(sd) - - case winio.BackupReparseData: - hdr.Mode |= c_ISLNK - hdr.Typeflag = tar.TypeSymlink - reparseBuffer, err := ioutil.ReadAll(br) - rp, err := winio.DecodeReparsePoint(reparseBuffer) - if err != nil { - return err - } - if rp.IsMountPoint { - hdr.Winheaders[hdrMountPoint] = "1" - } - hdr.Linkname = rp.Target - case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: - // ignore these streams - default: - return fmt.Errorf("%s: unknown stream ID %d", name, bhdr.Id) - } - } - - err := t.WriteHeader(hdr) - if err != nil { - return err - } - - if dataHdr != nil { - // A data stream was found. Copy the data. - if (dataHdr.Attributes & winio.StreamSparseAttributes) == 0 { - if size != dataHdr.Size { - return fmt.Errorf("%s: mismatch between file size %d and header size %d", name, size, dataHdr.Size) - } - _, err = io.Copy(t, br) - if err != nil { - return err - } - } else { - err = copySparse(t, br) - if err != nil { - return err - } - } - } - - // Look for streams after the data stream. The only ones we handle are alternate data streams. - // Other streams may have metadata that could be serialized, but the tar header has already - // been written. In practice, this means that we don't get EA or TXF metadata. - for { - bhdr, err := br.Next() - if err == io.EOF { - break - } - if err != nil { - return err - } - switch bhdr.Id { - case winio.BackupAlternateData: - altName := bhdr.Name - if strings.HasSuffix(altName, ":$DATA") { - altName = altName[:len(altName)-len(":$DATA")] - } - if (bhdr.Attributes & winio.StreamSparseAttributes) == 0 { - hdr = &tar.Header{ - Name: name + altName, - Mode: hdr.Mode, - Typeflag: tar.TypeReg, - Size: bhdr.Size, - ModTime: hdr.ModTime, - AccessTime: hdr.AccessTime, - ChangeTime: hdr.ChangeTime, - } - err = t.WriteHeader(hdr) - if err != nil { - return err - } - _, err = io.Copy(t, br) - if err != nil { - return err - } - - } else { - // Unsupported for now, since the size of the alternate stream is not present - // in the backup stream until after the data has been read. - return errors.New("tar of sparse alternate data streams is unsupported") - } - case winio.BackupEaData, winio.BackupLink, winio.BackupPropertyData, winio.BackupObjectId, winio.BackupTxfsData: - // ignore these streams - default: - return fmt.Errorf("%s: unknown stream ID %d after data", name, bhdr.Id) - } - } - return nil -} - -// FileInfoFromHeader retrieves basic Win32 file information from a tar header, using the additional metadata written by -// WriteTarFileFromBackupStream. -func FileInfoFromHeader(hdr *tar.Header) (name string, size int64, fileInfo *winio.FileBasicInfo, err error) { - name = hdr.Name - if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { - size = hdr.Size - } - fileInfo = &winio.FileBasicInfo{ - LastAccessTime: syscall.NsecToFiletime(hdr.AccessTime.UnixNano()), - LastWriteTime: syscall.NsecToFiletime(hdr.ModTime.UnixNano()), - ChangeTime: syscall.NsecToFiletime(hdr.ChangeTime.UnixNano()), - CreationTime: syscall.NsecToFiletime(hdr.CreationTime.UnixNano()), - } - if attrStr, ok := hdr.Winheaders[hdrFileAttributes]; ok { - attr, err := strconv.ParseUint(attrStr, 10, 32) - if err != nil { - return "", 0, nil, err - } - fileInfo.FileAttributes = uintptr(attr) - } else { - if hdr.Typeflag == tar.TypeDir { - fileInfo.FileAttributes |= syscall.FILE_ATTRIBUTE_DIRECTORY - } - } - return -} - -// WriteBackupStreamFromTarFile writes a Win32 backup stream from the current tar file. Since this function may process multiple -// tar file entries in order to collect all the alternate data streams for the file, it returns the next -// tar file that was not processed, or io.EOF is there are no more. -func WriteBackupStreamFromTarFile(w io.Writer, t *tar.Reader, hdr *tar.Header) (*tar.Header, error) { - bw := winio.NewBackupStreamWriter(w) - var sd []byte - var err error - // Maintaining old SDDL-based behavior for backward compatibility. All new tar headers written - // by this library will have raw binary for the security descriptor. - if sddl, ok := hdr.Winheaders[hdrSecurityDescriptor]; ok { - sd, err = winio.SddlToSecurityDescriptor(sddl) - if err != nil { - return nil, err - } - } - if sdraw, ok := hdr.Winheaders[hdrRawSecurityDescriptor]; ok { - sd, err = base64.StdEncoding.DecodeString(sdraw) - if err != nil { - return nil, err - } - } - if len(sd) != 0 { - bhdr := winio.BackupHeader{ - Id: winio.BackupSecurity, - Size: int64(len(sd)), - } - err := bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = bw.Write(sd) - if err != nil { - return nil, err - } - } - if hdr.Typeflag == tar.TypeSymlink { - _, isMountPoint := hdr.Winheaders[hdrMountPoint] - rp := winio.ReparsePoint{ - Target: filepath.FromSlash(hdr.Linkname), - IsMountPoint: isMountPoint, - } - reparse := winio.EncodeReparsePoint(&rp) - bhdr := winio.BackupHeader{ - Id: winio.BackupReparseData, - Size: int64(len(reparse)), - } - err := bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = bw.Write(reparse) - if err != nil { - return nil, err - } - } - if hdr.Typeflag == tar.TypeReg || hdr.Typeflag == tar.TypeRegA { - bhdr := winio.BackupHeader{ - Id: winio.BackupData, - Size: hdr.Size, - } - err := bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = io.Copy(bw, t) - if err != nil { - return nil, err - } - } - // Copy all the alternate data streams and return the next non-ADS header. - for { - ahdr, err := t.Next() - if err != nil { - return nil, err - } - if ahdr.Typeflag != tar.TypeReg || !strings.HasPrefix(ahdr.Name, hdr.Name+":") { - return ahdr, nil - } - bhdr := winio.BackupHeader{ - Id: winio.BackupAlternateData, - Size: ahdr.Size, - Name: ahdr.Name[len(hdr.Name)+1:] + ":$DATA", - } - err = bw.WriteHeader(&bhdr) - if err != nil { - return nil, err - } - _, err = io.Copy(bw, t) - if err != nil { - return nil, err - } - } -} diff --git a/vendor/src/github.com/Microsoft/go-winio/file.go b/vendor/src/github.com/Microsoft/go-winio/file.go deleted file mode 100644 index fd16f00755..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/file.go +++ /dev/null @@ -1,219 +0,0 @@ -package winio - -import ( - "errors" - "io" - "runtime" - "sync" - "syscall" - "time" -) - -//sys cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) = CancelIoEx -//sys createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) = CreateIoCompletionPort -//sys getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) = GetQueuedCompletionStatus -//sys setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) = SetFileCompletionNotificationModes -//sys timeBeginPeriod(period uint32) (n int32) = winmm.timeBeginPeriod - -const ( - cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS = 1 - cFILE_SKIP_SET_EVENT_ON_HANDLE = 2 -) - -var ( - ErrFileClosed = errors.New("file has already been closed") - ErrTimeout = &timeoutError{} -) - -type timeoutError struct{} - -func (e *timeoutError) Error() string { return "i/o timeout" } -func (e *timeoutError) Timeout() bool { return true } -func (e *timeoutError) Temporary() bool { return true } - -var ioInitOnce sync.Once -var ioCompletionPort syscall.Handle - -// ioResult contains the result of an asynchronous IO operation -type ioResult struct { - bytes uint32 - err error -} - -// ioOperation represents an outstanding asynchronous Win32 IO -type ioOperation struct { - o syscall.Overlapped - ch chan ioResult -} - -func initIo() { - h, err := createIoCompletionPort(syscall.InvalidHandle, 0, 0, 0xffffffff) - if err != nil { - panic(err) - } - ioCompletionPort = h - go ioCompletionProcessor(h) -} - -// win32File implements Reader, Writer, and Closer on a Win32 handle without blocking in a syscall. -// It takes ownership of this handle and will close it if it is garbage collected. -type win32File struct { - handle syscall.Handle - wg sync.WaitGroup - closing bool - readDeadline time.Time - writeDeadline time.Time -} - -// makeWin32File makes a new win32File from an existing file handle -func makeWin32File(h syscall.Handle) (*win32File, error) { - f := &win32File{handle: h} - ioInitOnce.Do(initIo) - _, err := createIoCompletionPort(h, ioCompletionPort, 0, 0xffffffff) - if err != nil { - return nil, err - } - err = setFileCompletionNotificationModes(h, cFILE_SKIP_COMPLETION_PORT_ON_SUCCESS|cFILE_SKIP_SET_EVENT_ON_HANDLE) - if err != nil { - return nil, err - } - runtime.SetFinalizer(f, (*win32File).closeHandle) - return f, nil -} - -func MakeOpenFile(h syscall.Handle) (io.ReadWriteCloser, error) { - return makeWin32File(h) -} - -// closeHandle closes the resources associated with a Win32 handle -func (f *win32File) closeHandle() { - if !f.closing { - // cancel all IO and wait for it to complete - f.closing = true - cancelIoEx(f.handle, nil) - f.wg.Wait() - // at this point, no new IO can start - syscall.Close(f.handle) - f.handle = 0 - } -} - -// Close closes a win32File. -func (f *win32File) Close() error { - f.closeHandle() - runtime.SetFinalizer(f, nil) - return nil -} - -// prepareIo prepares for a new IO operation -func (f *win32File) prepareIo() (*ioOperation, error) { - f.wg.Add(1) - if f.closing { - return nil, ErrFileClosed - } - c := &ioOperation{} - c.ch = make(chan ioResult) - return c, nil -} - -// ioCompletionProcessor processes completed async IOs forever -func ioCompletionProcessor(h syscall.Handle) { - // Set the timer resolution to 1. This fixes a performance regression in golang 1.6. - timeBeginPeriod(1) - for { - var bytes uint32 - var key uintptr - var op *ioOperation - err := getQueuedCompletionStatus(h, &bytes, &key, &op, syscall.INFINITE) - if op == nil { - panic(err) - } - op.ch <- ioResult{bytes, err} - } -} - -// asyncIo processes the return value from ReadFile or WriteFile, blocking until -// the operation has actually completed. -func (f *win32File) asyncIo(c *ioOperation, deadline time.Time, bytes uint32, err error) (int, error) { - if err != syscall.ERROR_IO_PENDING { - f.wg.Done() - return int(bytes), err - } else { - var r ioResult - wait := true - timedout := false - if f.closing { - cancelIoEx(f.handle, &c.o) - } else if !deadline.IsZero() { - now := time.Now() - if !deadline.After(now) { - timedout = true - } else { - timeout := time.After(deadline.Sub(now)) - select { - case r = <-c.ch: - wait = false - case <-timeout: - timedout = true - } - } - } - if timedout { - cancelIoEx(f.handle, &c.o) - } - if wait { - r = <-c.ch - } - err = r.err - if err == syscall.ERROR_OPERATION_ABORTED { - if f.closing { - err = ErrFileClosed - } else if timedout { - err = ErrTimeout - } - } - f.wg.Done() - return int(r.bytes), err - } -} - -// Read reads from a file handle. -func (f *win32File) Read(b []byte) (int, error) { - c, err := f.prepareIo() - if err != nil { - return 0, err - } - var bytes uint32 - err = syscall.ReadFile(f.handle, b, &bytes, &c.o) - n, err := f.asyncIo(c, f.readDeadline, bytes, err) - - // Handle EOF conditions. - if err == nil && n == 0 && len(b) != 0 { - return 0, io.EOF - } else if err == syscall.ERROR_BROKEN_PIPE { - return 0, io.EOF - } else { - return n, err - } -} - -// Write writes to a file handle. -func (f *win32File) Write(b []byte) (int, error) { - c, err := f.prepareIo() - if err != nil { - return 0, err - } - var bytes uint32 - err = syscall.WriteFile(f.handle, b, &bytes, &c.o) - return f.asyncIo(c, f.writeDeadline, bytes, err) -} - -func (f *win32File) SetReadDeadline(t time.Time) error { - f.readDeadline = t - return nil -} - -func (f *win32File) SetWriteDeadline(t time.Time) error { - f.writeDeadline = t - return nil -} diff --git a/vendor/src/github.com/Microsoft/go-winio/fileinfo.go b/vendor/src/github.com/Microsoft/go-winio/fileinfo.go deleted file mode 100644 index d5acb72d5b..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/fileinfo.go +++ /dev/null @@ -1,54 +0,0 @@ -package winio - -import ( - "os" - "syscall" - "unsafe" -) - -//sys getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = GetFileInformationByHandleEx -//sys setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) = SetFileInformationByHandle - -const ( - fileBasicInfo = 0 - fileIDInfo = 0x12 -) - -// FileBasicInfo contains file access time and file attributes information. -type FileBasicInfo struct { - CreationTime, LastAccessTime, LastWriteTime, ChangeTime syscall.Filetime - FileAttributes uintptr // includes padding -} - -// GetFileBasicInfo retrieves times and attributes for a file. -func GetFileBasicInfo(f *os.File) (*FileBasicInfo, error) { - bi := &FileBasicInfo{} - if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - return bi, nil -} - -// SetFileBasicInfo sets times and attributes for a file. -func SetFileBasicInfo(f *os.File, bi *FileBasicInfo) error { - if err := setFileInformationByHandle(syscall.Handle(f.Fd()), fileBasicInfo, (*byte)(unsafe.Pointer(bi)), uint32(unsafe.Sizeof(*bi))); err != nil { - return &os.PathError{Op: "SetFileInformationByHandle", Path: f.Name(), Err: err} - } - return nil -} - -// FileIDInfo contains the volume serial number and file ID for a file. This pair should be -// unique on a system. -type FileIDInfo struct { - VolumeSerialNumber uint64 - FileID [16]byte -} - -// GetFileID retrieves the unique (volume, file ID) pair for a file. -func GetFileID(f *os.File) (*FileIDInfo, error) { - fileID := &FileIDInfo{} - if err := getFileInformationByHandleEx(syscall.Handle(f.Fd()), fileIDInfo, (*byte)(unsafe.Pointer(fileID)), uint32(unsafe.Sizeof(*fileID))); err != nil { - return nil, &os.PathError{Op: "GetFileInformationByHandleEx", Path: f.Name(), Err: err} - } - return fileID, nil -} diff --git a/vendor/src/github.com/Microsoft/go-winio/pipe.go b/vendor/src/github.com/Microsoft/go-winio/pipe.go deleted file mode 100644 index 82db283061..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/pipe.go +++ /dev/null @@ -1,398 +0,0 @@ -package winio - -import ( - "errors" - "io" - "net" - "os" - "syscall" - "time" - "unsafe" -) - -//sys connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) = ConnectNamedPipe -//sys createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateNamedPipeW -//sys createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) [failretval==syscall.InvalidHandle] = CreateFileW -//sys waitNamedPipe(name string, timeout uint32) (err error) = WaitNamedPipeW -//sys getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) = GetNamedPipeInfo -//sys getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) = GetNamedPipeHandleStateW - -type securityAttributes struct { - Length uint32 - SecurityDescriptor *byte - InheritHandle uint32 -} - -const ( - cERROR_PIPE_BUSY = syscall.Errno(231) - cERROR_PIPE_CONNECTED = syscall.Errno(535) - cERROR_SEM_TIMEOUT = syscall.Errno(121) - - cPIPE_ACCESS_DUPLEX = 0x3 - cFILE_FLAG_FIRST_PIPE_INSTANCE = 0x80000 - cSECURITY_SQOS_PRESENT = 0x100000 - cSECURITY_ANONYMOUS = 0 - - cPIPE_REJECT_REMOTE_CLIENTS = 0x8 - - cPIPE_UNLIMITED_INSTANCES = 255 - - cNMPWAIT_USE_DEFAULT_WAIT = 0 - cNMPWAIT_NOWAIT = 1 - - cPIPE_TYPE_MESSAGE = 4 - - cPIPE_READMODE_MESSAGE = 2 -) - -var ( - // ErrPipeListenerClosed is returned for pipe operations on listeners that have been closed. - // This error should match net.errClosing since docker takes a dependency on its text. - ErrPipeListenerClosed = errors.New("use of closed network connection") - - errPipeWriteClosed = errors.New("pipe has been closed for write") -) - -type win32Pipe struct { - *win32File - path string -} - -type win32MessageBytePipe struct { - win32Pipe - writeClosed bool - readEOF bool -} - -type pipeAddress string - -func (f *win32Pipe) LocalAddr() net.Addr { - return pipeAddress(f.path) -} - -func (f *win32Pipe) RemoteAddr() net.Addr { - return pipeAddress(f.path) -} - -func (f *win32Pipe) SetDeadline(t time.Time) error { - f.SetReadDeadline(t) - f.SetWriteDeadline(t) - return nil -} - -// CloseWrite closes the write side of a message pipe in byte mode. -func (f *win32MessageBytePipe) CloseWrite() error { - if f.writeClosed { - return errPipeWriteClosed - } - _, err := f.win32File.Write(nil) - if err != nil { - return err - } - f.writeClosed = true - return nil -} - -// Write writes bytes to a message pipe in byte mode. Zero-byte writes are ignored, since -// they are used to implement CloseWrite(). -func (f *win32MessageBytePipe) Write(b []byte) (int, error) { - if f.writeClosed { - return 0, errPipeWriteClosed - } - if len(b) == 0 { - return 0, nil - } - return f.win32File.Write(b) -} - -// Read reads bytes from a message pipe in byte mode. A read of a zero-byte message on a message -// mode pipe will return io.EOF, as will all subsequent reads. -func (f *win32MessageBytePipe) Read(b []byte) (int, error) { - if f.readEOF { - return 0, io.EOF - } - n, err := f.win32File.Read(b) - if err == io.EOF { - // If this was the result of a zero-byte read, then - // it is possible that the read was due to a zero-size - // message. Since we are simulating CloseWrite with a - // zero-byte message, ensure that all future Read() calls - // also return EOF. - f.readEOF = true - } - return n, err -} - -func (s pipeAddress) Network() string { - return "pipe" -} - -func (s pipeAddress) String() string { - return string(s) -} - -// DialPipe connects to a named pipe by path, timing out if the connection -// takes longer than the specified duration. If timeout is nil, then the timeout -// is the default timeout established by the pipe server. -func DialPipe(path string, timeout *time.Duration) (net.Conn, error) { - var absTimeout time.Time - if timeout != nil { - absTimeout = time.Now().Add(*timeout) - } - var err error - var h syscall.Handle - for { - h, err = createFile(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, 0, nil, syscall.OPEN_EXISTING, syscall.FILE_FLAG_OVERLAPPED|cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) - if err != cERROR_PIPE_BUSY { - break - } - now := time.Now() - var ms uint32 - if absTimeout.IsZero() { - ms = cNMPWAIT_USE_DEFAULT_WAIT - } else if now.After(absTimeout) { - ms = cNMPWAIT_NOWAIT - } else { - ms = uint32(absTimeout.Sub(now).Nanoseconds() / 1000 / 1000) - } - err = waitNamedPipe(path, ms) - if err != nil { - if err == cERROR_SEM_TIMEOUT { - return nil, ErrTimeout - } - break - } - } - if err != nil { - return nil, &os.PathError{Op: "open", Path: path, Err: err} - } - - var flags uint32 - err = getNamedPipeInfo(h, &flags, nil, nil, nil) - if err != nil { - return nil, err - } - - var state uint32 - err = getNamedPipeHandleState(h, &state, nil, nil, nil, nil, 0) - if err != nil { - return nil, err - } - - if state&cPIPE_READMODE_MESSAGE != 0 { - return nil, &os.PathError{Op: "open", Path: path, Err: errors.New("message readmode pipes not supported")} - } - - f, err := makeWin32File(h) - if err != nil { - syscall.Close(h) - return nil, err - } - - // If the pipe is in message mode, return a message byte pipe, which - // supports CloseWrite(). - if flags&cPIPE_TYPE_MESSAGE != 0 { - return &win32MessageBytePipe{ - win32Pipe: win32Pipe{win32File: f, path: path}, - }, nil - } - return &win32Pipe{win32File: f, path: path}, nil -} - -type acceptResponse struct { - f *win32File - err error -} - -type win32PipeListener struct { - firstHandle syscall.Handle - path string - securityDescriptor []byte - config PipeConfig - acceptCh chan (chan acceptResponse) - closeCh chan int - doneCh chan int -} - -func makeServerPipeHandle(path string, securityDescriptor []byte, c *PipeConfig, first bool) (syscall.Handle, error) { - var flags uint32 = cPIPE_ACCESS_DUPLEX | syscall.FILE_FLAG_OVERLAPPED - if first { - flags |= cFILE_FLAG_FIRST_PIPE_INSTANCE - } - - var mode uint32 = cPIPE_REJECT_REMOTE_CLIENTS - if c.MessageMode { - mode |= cPIPE_TYPE_MESSAGE - } - - var sa securityAttributes - sa.Length = uint32(unsafe.Sizeof(sa)) - if securityDescriptor != nil { - sa.SecurityDescriptor = &securityDescriptor[0] - } - h, err := createNamedPipe(path, flags, mode, cPIPE_UNLIMITED_INSTANCES, uint32(c.OutputBufferSize), uint32(c.InputBufferSize), 0, &sa) - if err != nil { - return 0, &os.PathError{Op: "open", Path: path, Err: err} - } - return h, nil -} - -func (l *win32PipeListener) makeServerPipe() (*win32File, error) { - h, err := makeServerPipeHandle(l.path, l.securityDescriptor, &l.config, false) - if err != nil { - return nil, err - } - f, err := makeWin32File(h) - if err != nil { - syscall.Close(h) - return nil, err - } - return f, nil -} - -func (l *win32PipeListener) listenerRoutine() { - closed := false - for !closed { - select { - case <-l.closeCh: - closed = true - case responseCh := <-l.acceptCh: - p, err := l.makeServerPipe() - if err == nil { - // Wait for the client to connect. - ch := make(chan error) - go func() { - ch <- connectPipe(p) - }() - select { - case err = <-ch: - if err != nil { - p.Close() - p = nil - } - case <-l.closeCh: - // Abort the connect request by closing the handle. - p.Close() - p = nil - err = <-ch - if err == nil || err == ErrFileClosed { - err = ErrPipeListenerClosed - } - closed = true - } - } - responseCh <- acceptResponse{p, err} - } - } - syscall.Close(l.firstHandle) - l.firstHandle = 0 - // Notify Close() and Accept() callers that the handle has been closed. - close(l.doneCh) -} - -// PipeConfig contain configuration for the pipe listener. -type PipeConfig struct { - // SecurityDescriptor contains a Windows security descriptor in SDDL format. - SecurityDescriptor string - - // MessageMode determines whether the pipe is in byte or message mode. In either - // case the pipe is read in byte mode by default. The only practical difference in - // this implementation is that CloseWrite() is only supported for message mode pipes; - // CloseWrite() is implemented as a zero-byte write, but zero-byte writes are only - // transferred to the reader (and returned as io.EOF in this implementation) - // when the pipe is in message mode. - MessageMode bool - - // InputBufferSize specifies the size the input buffer, in bytes. - InputBufferSize int32 - - // OutputBufferSize specifies the size the input buffer, in bytes. - OutputBufferSize int32 -} - -// ListenPipe creates a listener on a Windows named pipe path, e.g. \\.\pipe\mypipe. -// The pipe must not already exist. -func ListenPipe(path string, c *PipeConfig) (net.Listener, error) { - var ( - sd []byte - err error - ) - if c == nil { - c = &PipeConfig{} - } - if c.SecurityDescriptor != "" { - sd, err = SddlToSecurityDescriptor(c.SecurityDescriptor) - if err != nil { - return nil, err - } - } - h, err := makeServerPipeHandle(path, sd, c, true) - if err != nil { - return nil, err - } - // Immediately open and then close a client handle so that the named pipe is - // created but not currently accepting connections. - h2, err := createFile(path, 0, 0, nil, syscall.OPEN_EXISTING, cSECURITY_SQOS_PRESENT|cSECURITY_ANONYMOUS, 0) - if err != nil { - syscall.Close(h) - return nil, err - } - syscall.Close(h2) - l := &win32PipeListener{ - firstHandle: h, - path: path, - securityDescriptor: sd, - config: *c, - acceptCh: make(chan (chan acceptResponse)), - closeCh: make(chan int), - doneCh: make(chan int), - } - go l.listenerRoutine() - return l, nil -} - -func connectPipe(p *win32File) error { - c, err := p.prepareIo() - if err != nil { - return err - } - err = connectNamedPipe(p.handle, &c.o) - _, err = p.asyncIo(c, time.Time{}, 0, err) - if err != nil && err != cERROR_PIPE_CONNECTED { - return err - } - return nil -} - -func (l *win32PipeListener) Accept() (net.Conn, error) { - ch := make(chan acceptResponse) - select { - case l.acceptCh <- ch: - response := <-ch - err := response.err - if err != nil { - return nil, err - } - if l.config.MessageMode { - return &win32MessageBytePipe{ - win32Pipe: win32Pipe{win32File: response.f, path: l.path}, - }, nil - } - return &win32Pipe{win32File: response.f, path: l.path}, nil - case <-l.doneCh: - return nil, ErrPipeListenerClosed - } -} - -func (l *win32PipeListener) Close() error { - select { - case l.closeCh <- 1: - <-l.doneCh - case <-l.doneCh: - } - return nil -} - -func (l *win32PipeListener) Addr() net.Addr { - return pipeAddress(l.path) -} diff --git a/vendor/src/github.com/Microsoft/go-winio/privilege.go b/vendor/src/github.com/Microsoft/go-winio/privilege.go deleted file mode 100644 index 3d59412c76..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/privilege.go +++ /dev/null @@ -1,191 +0,0 @@ -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "runtime" - "sync" - "syscall" - "unicode/utf16" - - "golang.org/x/sys/windows" -) - -//sys adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) [true] = advapi32.AdjustTokenPrivileges -//sys impersonateSelf(level uint32) (err error) = advapi32.ImpersonateSelf -//sys revertToSelf() (err error) = advapi32.RevertToSelf -//sys openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) = advapi32.OpenThreadToken -//sys getCurrentThread() (h syscall.Handle) = GetCurrentThread -//sys lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) = advapi32.LookupPrivilegeValueW -//sys lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) = advapi32.LookupPrivilegeNameW -//sys lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) = advapi32.LookupPrivilegeDisplayNameW - -const ( - SE_PRIVILEGE_ENABLED = 2 - - ERROR_NOT_ALL_ASSIGNED syscall.Errno = 1300 - - SeBackupPrivilege = "SeBackupPrivilege" - SeRestorePrivilege = "SeRestorePrivilege" -) - -const ( - securityAnonymous = iota - securityIdentification - securityImpersonation - securityDelegation -) - -var ( - privNames = make(map[string]uint64) - privNameMutex sync.Mutex -) - -// PrivilegeError represents an error enabling privileges. -type PrivilegeError struct { - privileges []uint64 -} - -func (e *PrivilegeError) Error() string { - s := "" - if len(e.privileges) > 1 { - s = "Could not enable privileges " - } else { - s = "Could not enable privilege " - } - for i, p := range e.privileges { - if i != 0 { - s += ", " - } - s += `"` - s += getPrivilegeName(p) - s += `"` - } - return s -} - -// RunWithPrivilege enables a single privilege for a function call. -func RunWithPrivilege(name string, fn func() error) error { - return RunWithPrivileges([]string{name}, fn) -} - -// RunWithPrivileges enables privileges for a function call. -func RunWithPrivileges(names []string, fn func() error) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - runtime.LockOSThread() - defer runtime.UnlockOSThread() - token, err := newThreadToken() - if err != nil { - return err - } - defer releaseThreadToken(token) - err = adjustPrivileges(token, privileges) - if err != nil { - return err - } - return fn() -} - -func mapPrivileges(names []string) ([]uint64, error) { - var privileges []uint64 - privNameMutex.Lock() - defer privNameMutex.Unlock() - for _, name := range names { - p, ok := privNames[name] - if !ok { - err := lookupPrivilegeValue("", name, &p) - if err != nil { - return nil, err - } - privNames[name] = p - } - privileges = append(privileges, p) - } - return privileges, nil -} - -// EnableProcessPrivileges enables privileges globally for the process. -func EnableProcessPrivileges(names []string) error { - privileges, err := mapPrivileges(names) - if err != nil { - return err - } - - p, _ := windows.GetCurrentProcess() - var token windows.Token - err = windows.OpenProcessToken(p, windows.TOKEN_ADJUST_PRIVILEGES|windows.TOKEN_QUERY, &token) - if err != nil { - return err - } - - defer token.Close() - return adjustPrivileges(token, privileges) -} - -func adjustPrivileges(token windows.Token, privileges []uint64) error { - var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, uint32(len(privileges))) - for _, p := range privileges { - binary.Write(&b, binary.LittleEndian, p) - binary.Write(&b, binary.LittleEndian, uint32(SE_PRIVILEGE_ENABLED)) - } - prevState := make([]byte, b.Len()) - reqSize := uint32(0) - success, err := adjustTokenPrivileges(token, false, &b.Bytes()[0], uint32(len(prevState)), &prevState[0], &reqSize) - if !success { - return err - } - if err == ERROR_NOT_ALL_ASSIGNED { - return &PrivilegeError{privileges} - } - return nil -} - -func getPrivilegeName(luid uint64) string { - var nameBuffer [256]uint16 - bufSize := uint32(len(nameBuffer)) - err := lookupPrivilegeName("", &luid, &nameBuffer[0], &bufSize) - if err != nil { - return fmt.Sprintf("", luid) - } - - var displayNameBuffer [256]uint16 - displayBufSize := uint32(len(displayNameBuffer)) - var langID uint32 - err = lookupPrivilegeDisplayName("", &nameBuffer[0], &displayNameBuffer[0], &displayBufSize, &langID) - if err != nil { - return fmt.Sprintf("", string(utf16.Decode(nameBuffer[:bufSize]))) - } - - return string(utf16.Decode(displayNameBuffer[:displayBufSize])) -} - -func newThreadToken() (windows.Token, error) { - err := impersonateSelf(securityImpersonation) - if err != nil { - return 0, err - } - - var token windows.Token - err = openThreadToken(getCurrentThread(), syscall.TOKEN_ADJUST_PRIVILEGES|syscall.TOKEN_QUERY, false, &token) - if err != nil { - rerr := revertToSelf() - if rerr != nil { - panic(rerr) - } - return 0, err - } - return token, nil -} - -func releaseThreadToken(h windows.Token) { - err := revertToSelf() - if err != nil { - panic(err) - } - h.Close() -} diff --git a/vendor/src/github.com/Microsoft/go-winio/reparse.go b/vendor/src/github.com/Microsoft/go-winio/reparse.go deleted file mode 100644 index fc1ee4d3a3..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/reparse.go +++ /dev/null @@ -1,128 +0,0 @@ -package winio - -import ( - "bytes" - "encoding/binary" - "fmt" - "strings" - "unicode/utf16" - "unsafe" -) - -const ( - reparseTagMountPoint = 0xA0000003 - reparseTagSymlink = 0xA000000C -) - -type reparseDataBuffer struct { - ReparseTag uint32 - ReparseDataLength uint16 - Reserved uint16 - SubstituteNameOffset uint16 - SubstituteNameLength uint16 - PrintNameOffset uint16 - PrintNameLength uint16 -} - -// ReparsePoint describes a Win32 symlink or mount point. -type ReparsePoint struct { - Target string - IsMountPoint bool -} - -// UnsupportedReparsePointError is returned when trying to decode a non-symlink or -// mount point reparse point. -type UnsupportedReparsePointError struct { - Tag uint32 -} - -func (e *UnsupportedReparsePointError) Error() string { - return fmt.Sprintf("unsupported reparse point %x", e.Tag) -} - -// DecodeReparsePoint decodes a Win32 REPARSE_DATA_BUFFER structure containing either a symlink -// or a mount point. -func DecodeReparsePoint(b []byte) (*ReparsePoint, error) { - tag := binary.LittleEndian.Uint32(b[0:4]) - return DecodeReparsePointData(tag, b[8:]) -} - -func DecodeReparsePointData(tag uint32, b []byte) (*ReparsePoint, error) { - isMountPoint := false - switch tag { - case reparseTagMountPoint: - isMountPoint = true - case reparseTagSymlink: - default: - return nil, &UnsupportedReparsePointError{tag} - } - nameOffset := 8 + binary.LittleEndian.Uint16(b[4:6]) - if !isMountPoint { - nameOffset += 4 - } - nameLength := binary.LittleEndian.Uint16(b[6:8]) - name := make([]uint16, nameLength/2) - err := binary.Read(bytes.NewReader(b[nameOffset:nameOffset+nameLength]), binary.LittleEndian, &name) - if err != nil { - return nil, err - } - return &ReparsePoint{string(utf16.Decode(name)), isMountPoint}, nil -} - -func isDriveLetter(c byte) bool { - return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') -} - -// EncodeReparsePoint encodes a Win32 REPARSE_DATA_BUFFER structure describing a symlink or -// mount point. -func EncodeReparsePoint(rp *ReparsePoint) []byte { - // Generate an NT path and determine if this is a relative path. - var ntTarget string - relative := false - if strings.HasPrefix(rp.Target, `\\?\`) { - ntTarget = `\??\` + rp.Target[4:] - } else if strings.HasPrefix(rp.Target, `\\`) { - ntTarget = `\??\UNC\` + rp.Target[2:] - } else if len(rp.Target) >= 2 && isDriveLetter(rp.Target[0]) && rp.Target[1] == ':' { - ntTarget = `\??\` + rp.Target - } else { - ntTarget = rp.Target - relative = true - } - - // The paths must be NUL-terminated even though they are counted strings. - target16 := utf16.Encode([]rune(rp.Target + "\x00")) - ntTarget16 := utf16.Encode([]rune(ntTarget + "\x00")) - - size := int(unsafe.Sizeof(reparseDataBuffer{})) - 8 - size += len(ntTarget16)*2 + len(target16)*2 - - tag := uint32(reparseTagMountPoint) - if !rp.IsMountPoint { - tag = reparseTagSymlink - size += 4 // Add room for symlink flags - } - - data := reparseDataBuffer{ - ReparseTag: tag, - ReparseDataLength: uint16(size), - SubstituteNameOffset: 0, - SubstituteNameLength: uint16((len(ntTarget16) - 1) * 2), - PrintNameOffset: uint16(len(ntTarget16) * 2), - PrintNameLength: uint16((len(target16) - 1) * 2), - } - - var b bytes.Buffer - binary.Write(&b, binary.LittleEndian, &data) - if !rp.IsMountPoint { - flags := uint32(0) - if relative { - flags |= 1 - } - binary.Write(&b, binary.LittleEndian, flags) - } - - binary.Write(&b, binary.LittleEndian, ntTarget16) - binary.Write(&b, binary.LittleEndian, target16) - return b.Bytes() -} diff --git a/vendor/src/github.com/Microsoft/go-winio/sd.go b/vendor/src/github.com/Microsoft/go-winio/sd.go deleted file mode 100644 index 60ab56ce7a..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/sd.go +++ /dev/null @@ -1,96 +0,0 @@ -package winio - -import ( - "syscall" - "unsafe" -) - -//sys lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) = advapi32.LookupAccountNameW -//sys convertSidToStringSid(sid *byte, str **uint16) (err error) = advapi32.ConvertSidToStringSidW -//sys convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) = advapi32.ConvertStringSecurityDescriptorToSecurityDescriptorW -//sys convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) = advapi32.ConvertSecurityDescriptorToStringSecurityDescriptorW -//sys localFree(mem uintptr) = LocalFree -//sys getSecurityDescriptorLength(sd uintptr) (len uint32) = advapi32.GetSecurityDescriptorLength - -const ( - cERROR_NONE_MAPPED = syscall.Errno(1332) -) - -type AccountLookupError struct { - Name string - Err error -} - -func (e *AccountLookupError) Error() string { - if e.Name == "" { - return "lookup account: empty account name specified" - } - var s string - switch e.Err { - case cERROR_NONE_MAPPED: - s = "not found" - default: - s = e.Err.Error() - } - return "lookup account " + e.Name + ": " + s -} - -type SddlConversionError struct { - Sddl string - Err error -} - -func (e *SddlConversionError) Error() string { - return "convert " + e.Sddl + ": " + e.Err.Error() -} - -// LookupSidByName looks up the SID of an account by name -func LookupSidByName(name string) (sid string, err error) { - if name == "" { - return "", &AccountLookupError{name, cERROR_NONE_MAPPED} - } - - var sidSize, sidNameUse, refDomainSize uint32 - err = lookupAccountName(nil, name, nil, &sidSize, nil, &refDomainSize, &sidNameUse) - if err != nil && err != syscall.ERROR_INSUFFICIENT_BUFFER { - return "", &AccountLookupError{name, err} - } - sidBuffer := make([]byte, sidSize) - refDomainBuffer := make([]uint16, refDomainSize) - err = lookupAccountName(nil, name, &sidBuffer[0], &sidSize, &refDomainBuffer[0], &refDomainSize, &sidNameUse) - if err != nil { - return "", &AccountLookupError{name, err} - } - var strBuffer *uint16 - err = convertSidToStringSid(&sidBuffer[0], &strBuffer) - if err != nil { - return "", &AccountLookupError{name, err} - } - sid = syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(strBuffer))[:]) - localFree(uintptr(unsafe.Pointer(strBuffer))) - return sid, nil -} - -func SddlToSecurityDescriptor(sddl string) ([]byte, error) { - var sdBuffer uintptr - err := convertStringSecurityDescriptorToSecurityDescriptor(sddl, 1, &sdBuffer, nil) - if err != nil { - return nil, &SddlConversionError{sddl, err} - } - defer localFree(sdBuffer) - sd := make([]byte, getSecurityDescriptorLength(sdBuffer)) - copy(sd, (*[0xffff]byte)(unsafe.Pointer(sdBuffer))[:len(sd)]) - return sd, nil -} - -func SecurityDescriptorToSddl(sd []byte) (string, error) { - var sddl *uint16 - // The returned string length seems to including an aribtrary number of terminating NULs. - // Don't use it. - err := convertSecurityDescriptorToStringSecurityDescriptor(&sd[0], 1, 0xff, &sddl, nil) - if err != nil { - return "", err - } - defer localFree(uintptr(unsafe.Pointer(sddl))) - return syscall.UTF16ToString((*[0xffff]uint16)(unsafe.Pointer(sddl))[:]), nil -} diff --git a/vendor/src/github.com/Microsoft/go-winio/syscall.go b/vendor/src/github.com/Microsoft/go-winio/syscall.go deleted file mode 100644 index 96fdff7b49..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/syscall.go +++ /dev/null @@ -1,3 +0,0 @@ -package winio - -//go:generate go run $GOROOT/src/syscall/mksyscall_windows.go -output zsyscall.go file.go pipe.go sd.go fileinfo.go privilege.go backup.go diff --git a/vendor/src/github.com/Microsoft/go-winio/zsyscall.go b/vendor/src/github.com/Microsoft/go-winio/zsyscall.go deleted file mode 100644 index 6d047d3690..0000000000 --- a/vendor/src/github.com/Microsoft/go-winio/zsyscall.go +++ /dev/null @@ -1,496 +0,0 @@ -// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT - -package winio - -import ( - "syscall" - "unsafe" - - "golang.org/x/sys/windows" -) - -var _ unsafe.Pointer - -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - modwinmm = syscall.NewLazyDLL("winmm.dll") - modadvapi32 = syscall.NewLazyDLL("advapi32.dll") - - procCancelIoEx = modkernel32.NewProc("CancelIoEx") - procCreateIoCompletionPort = modkernel32.NewProc("CreateIoCompletionPort") - procGetQueuedCompletionStatus = modkernel32.NewProc("GetQueuedCompletionStatus") - procSetFileCompletionNotificationModes = modkernel32.NewProc("SetFileCompletionNotificationModes") - proctimeBeginPeriod = modwinmm.NewProc("timeBeginPeriod") - procConnectNamedPipe = modkernel32.NewProc("ConnectNamedPipe") - procCreateNamedPipeW = modkernel32.NewProc("CreateNamedPipeW") - procCreateFileW = modkernel32.NewProc("CreateFileW") - procWaitNamedPipeW = modkernel32.NewProc("WaitNamedPipeW") - procGetNamedPipeInfo = modkernel32.NewProc("GetNamedPipeInfo") - procGetNamedPipeHandleStateW = modkernel32.NewProc("GetNamedPipeHandleStateW") - procLookupAccountNameW = modadvapi32.NewProc("LookupAccountNameW") - procConvertSidToStringSidW = modadvapi32.NewProc("ConvertSidToStringSidW") - procConvertStringSecurityDescriptorToSecurityDescriptorW = modadvapi32.NewProc("ConvertStringSecurityDescriptorToSecurityDescriptorW") - procConvertSecurityDescriptorToStringSecurityDescriptorW = modadvapi32.NewProc("ConvertSecurityDescriptorToStringSecurityDescriptorW") - procLocalFree = modkernel32.NewProc("LocalFree") - procGetSecurityDescriptorLength = modadvapi32.NewProc("GetSecurityDescriptorLength") - procGetFileInformationByHandleEx = modkernel32.NewProc("GetFileInformationByHandleEx") - procSetFileInformationByHandle = modkernel32.NewProc("SetFileInformationByHandle") - procAdjustTokenPrivileges = modadvapi32.NewProc("AdjustTokenPrivileges") - procImpersonateSelf = modadvapi32.NewProc("ImpersonateSelf") - procRevertToSelf = modadvapi32.NewProc("RevertToSelf") - procOpenThreadToken = modadvapi32.NewProc("OpenThreadToken") - procGetCurrentThread = modkernel32.NewProc("GetCurrentThread") - procLookupPrivilegeValueW = modadvapi32.NewProc("LookupPrivilegeValueW") - procLookupPrivilegeNameW = modadvapi32.NewProc("LookupPrivilegeNameW") - procLookupPrivilegeDisplayNameW = modadvapi32.NewProc("LookupPrivilegeDisplayNameW") - procBackupRead = modkernel32.NewProc("BackupRead") - procBackupWrite = modkernel32.NewProc("BackupWrite") -) - -func cancelIoEx(file syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procCancelIoEx.Addr(), 2, uintptr(file), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func createIoCompletionPort(file syscall.Handle, port syscall.Handle, key uintptr, threadCount uint32) (newport syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall6(procCreateIoCompletionPort.Addr(), 4, uintptr(file), uintptr(port), uintptr(key), uintptr(threadCount), 0, 0) - newport = syscall.Handle(r0) - if newport == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getQueuedCompletionStatus(port syscall.Handle, bytes *uint32, key *uintptr, o **ioOperation, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetQueuedCompletionStatus.Addr(), 5, uintptr(port), uintptr(unsafe.Pointer(bytes)), uintptr(unsafe.Pointer(key)), uintptr(unsafe.Pointer(o)), uintptr(timeout), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func setFileCompletionNotificationModes(h syscall.Handle, flags uint8) (err error) { - r1, _, e1 := syscall.Syscall(procSetFileCompletionNotificationModes.Addr(), 2, uintptr(h), uintptr(flags), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func timeBeginPeriod(period uint32) (n int32) { - r0, _, _ := syscall.Syscall(proctimeBeginPeriod.Addr(), 1, uintptr(period), 0, 0) - n = int32(r0) - return -} - -func connectNamedPipe(pipe syscall.Handle, o *syscall.Overlapped) (err error) { - r1, _, e1 := syscall.Syscall(procConnectNamedPipe.Addr(), 2, uintptr(pipe), uintptr(unsafe.Pointer(o)), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func createNamedPipe(name string, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _createNamedPipe(_p0, flags, pipeMode, maxInstances, outSize, inSize, defaultTimeout, sa) -} - -func _createNamedPipe(name *uint16, flags uint32, pipeMode uint32, maxInstances uint32, outSize uint32, inSize uint32, defaultTimeout uint32, sa *securityAttributes) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateNamedPipeW.Addr(), 8, uintptr(unsafe.Pointer(name)), uintptr(flags), uintptr(pipeMode), uintptr(maxInstances), uintptr(outSize), uintptr(inSize), uintptr(defaultTimeout), uintptr(unsafe.Pointer(sa)), 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func createFile(name string, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _createFile(_p0, access, mode, sa, createmode, attrs, templatefile) -} - -func _createFile(name *uint16, access uint32, mode uint32, sa *securityAttributes, createmode uint32, attrs uint32, templatefile syscall.Handle) (handle syscall.Handle, err error) { - r0, _, e1 := syscall.Syscall9(procCreateFileW.Addr(), 7, uintptr(unsafe.Pointer(name)), uintptr(access), uintptr(mode), uintptr(unsafe.Pointer(sa)), uintptr(createmode), uintptr(attrs), uintptr(templatefile), 0, 0) - handle = syscall.Handle(r0) - if handle == syscall.InvalidHandle { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func waitNamedPipe(name string, timeout uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _waitNamedPipe(_p0, timeout) -} - -func _waitNamedPipe(name *uint16, timeout uint32) (err error) { - r1, _, e1 := syscall.Syscall(procWaitNamedPipeW.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(timeout), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getNamedPipeInfo(pipe syscall.Handle, flags *uint32, outSize *uint32, inSize *uint32, maxInstances *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetNamedPipeInfo.Addr(), 5, uintptr(pipe), uintptr(unsafe.Pointer(flags)), uintptr(unsafe.Pointer(outSize)), uintptr(unsafe.Pointer(inSize)), uintptr(unsafe.Pointer(maxInstances)), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getNamedPipeHandleState(pipe syscall.Handle, state *uint32, curInstances *uint32, maxCollectionCount *uint32, collectDataTimeout *uint32, userName *uint16, maxUserNameSize uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procGetNamedPipeHandleStateW.Addr(), 7, uintptr(pipe), uintptr(unsafe.Pointer(state)), uintptr(unsafe.Pointer(curInstances)), uintptr(unsafe.Pointer(maxCollectionCount)), uintptr(unsafe.Pointer(collectDataTimeout)), uintptr(unsafe.Pointer(userName)), uintptr(maxUserNameSize), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func lookupAccountName(systemName *uint16, accountName string, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(accountName) - if err != nil { - return - } - return _lookupAccountName(systemName, _p0, sid, sidSize, refDomain, refDomainSize, sidNameUse) -} - -func _lookupAccountName(systemName *uint16, accountName *uint16, sid *byte, sidSize *uint32, refDomain *uint16, refDomainSize *uint32, sidNameUse *uint32) (err error) { - r1, _, e1 := syscall.Syscall9(procLookupAccountNameW.Addr(), 7, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(accountName)), uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(sidSize)), uintptr(unsafe.Pointer(refDomain)), uintptr(unsafe.Pointer(refDomainSize)), uintptr(unsafe.Pointer(sidNameUse)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func convertSidToStringSid(sid *byte, str **uint16) (err error) { - r1, _, e1 := syscall.Syscall(procConvertSidToStringSidW.Addr(), 2, uintptr(unsafe.Pointer(sid)), uintptr(unsafe.Pointer(str)), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func convertStringSecurityDescriptorToSecurityDescriptor(str string, revision uint32, sd *uintptr, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(str) - if err != nil { - return - } - return _convertStringSecurityDescriptorToSecurityDescriptor(_p0, revision, sd, size) -} - -func _convertStringSecurityDescriptorToSecurityDescriptor(str *uint16, revision uint32, sd *uintptr, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertStringSecurityDescriptorToSecurityDescriptorW.Addr(), 4, uintptr(unsafe.Pointer(str)), uintptr(revision), uintptr(unsafe.Pointer(sd)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func convertSecurityDescriptorToStringSecurityDescriptor(sd *byte, revision uint32, secInfo uint32, sddl **uint16, sddlSize *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procConvertSecurityDescriptorToStringSecurityDescriptorW.Addr(), 5, uintptr(unsafe.Pointer(sd)), uintptr(revision), uintptr(secInfo), uintptr(unsafe.Pointer(sddl)), uintptr(unsafe.Pointer(sddlSize)), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func localFree(mem uintptr) { - syscall.Syscall(procLocalFree.Addr(), 1, uintptr(mem), 0, 0) - return -} - -func getSecurityDescriptorLength(sd uintptr) (len uint32) { - r0, _, _ := syscall.Syscall(procGetSecurityDescriptorLength.Addr(), 1, uintptr(sd), 0, 0) - len = uint32(r0) - return -} - -func getFileInformationByHandleEx(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procGetFileInformationByHandleEx.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func setFileInformationByHandle(h syscall.Handle, class uint32, buffer *byte, size uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procSetFileInformationByHandle.Addr(), 4, uintptr(h), uintptr(class), uintptr(unsafe.Pointer(buffer)), uintptr(size), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func adjustTokenPrivileges(token windows.Token, releaseAll bool, input *byte, outputSize uint32, output *byte, requiredSize *uint32) (success bool, err error) { - var _p0 uint32 - if releaseAll { - _p0 = 1 - } else { - _p0 = 0 - } - r0, _, e1 := syscall.Syscall6(procAdjustTokenPrivileges.Addr(), 6, uintptr(token), uintptr(_p0), uintptr(unsafe.Pointer(input)), uintptr(outputSize), uintptr(unsafe.Pointer(output)), uintptr(unsafe.Pointer(requiredSize))) - success = r0 != 0 - if true { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func impersonateSelf(level uint32) (err error) { - r1, _, e1 := syscall.Syscall(procImpersonateSelf.Addr(), 1, uintptr(level), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func revertToSelf() (err error) { - r1, _, e1 := syscall.Syscall(procRevertToSelf.Addr(), 0, 0, 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func openThreadToken(thread syscall.Handle, accessMask uint32, openAsSelf bool, token *windows.Token) (err error) { - var _p0 uint32 - if openAsSelf { - _p0 = 1 - } else { - _p0 = 0 - } - r1, _, e1 := syscall.Syscall6(procOpenThreadToken.Addr(), 4, uintptr(thread), uintptr(accessMask), uintptr(_p0), uintptr(unsafe.Pointer(token)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func getCurrentThread() (h syscall.Handle) { - r0, _, _ := syscall.Syscall(procGetCurrentThread.Addr(), 0, 0, 0, 0) - h = syscall.Handle(r0) - return -} - -func lookupPrivilegeValue(systemName string, name string, luid *uint64) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - var _p1 *uint16 - _p1, err = syscall.UTF16PtrFromString(name) - if err != nil { - return - } - return _lookupPrivilegeValue(_p0, _p1, luid) -} - -func _lookupPrivilegeValue(systemName *uint16, name *uint16, luid *uint64) (err error) { - r1, _, e1 := syscall.Syscall(procLookupPrivilegeValueW.Addr(), 3, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(luid))) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func lookupPrivilegeName(systemName string, luid *uint64, buffer *uint16, size *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeName(_p0, luid, buffer, size) -} - -func _lookupPrivilegeName(systemName *uint16, luid *uint64, buffer *uint16, size *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeNameW.Addr(), 4, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(luid)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func lookupPrivilegeDisplayName(systemName string, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - var _p0 *uint16 - _p0, err = syscall.UTF16PtrFromString(systemName) - if err != nil { - return - } - return _lookupPrivilegeDisplayName(_p0, name, buffer, size, languageId) -} - -func _lookupPrivilegeDisplayName(systemName *uint16, name *uint16, buffer *uint16, size *uint32, languageId *uint32) (err error) { - r1, _, e1 := syscall.Syscall6(procLookupPrivilegeDisplayNameW.Addr(), 5, uintptr(unsafe.Pointer(systemName)), uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(buffer)), uintptr(unsafe.Pointer(size)), uintptr(unsafe.Pointer(languageId)), 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func backupRead(h syscall.Handle, b []byte, bytesRead *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } else { - _p1 = 0 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } else { - _p2 = 0 - } - r1, _, e1 := syscall.Syscall9(procBackupRead.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesRead)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} - -func backupWrite(h syscall.Handle, b []byte, bytesWritten *uint32, abort bool, processSecurity bool, context *uintptr) (err error) { - var _p0 *byte - if len(b) > 0 { - _p0 = &b[0] - } - var _p1 uint32 - if abort { - _p1 = 1 - } else { - _p1 = 0 - } - var _p2 uint32 - if processSecurity { - _p2 = 1 - } else { - _p2 = 0 - } - r1, _, e1 := syscall.Syscall9(procBackupWrite.Addr(), 7, uintptr(h), uintptr(unsafe.Pointer(_p0)), uintptr(len(b)), uintptr(unsafe.Pointer(bytesWritten)), uintptr(_p1), uintptr(_p2), uintptr(unsafe.Pointer(context)), 0, 0) - if r1 == 0 { - if e1 != 0 { - err = error(e1) - } else { - err = syscall.EINVAL - } - } - return -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/LICENSE b/vendor/src/github.com/Microsoft/hcsshim/LICENSE deleted file mode 100644 index b8b569d774..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2015 Microsoft - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - diff --git a/vendor/src/github.com/Microsoft/hcsshim/activatelayer.go b/vendor/src/github.com/Microsoft/hcsshim/activatelayer.go deleted file mode 100644 index efc4d8029c..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/activatelayer.go +++ /dev/null @@ -1,28 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// ActivateLayer will find the layer with the given id and mount it's filesystem. -// For a read/write layer, the mounted filesystem will appear as a volume on the -// host, while a read-only layer is generally expected to be a no-op. -// An activated layer must later be deactivated via DeactivateLayer. -func ActivateLayer(info DriverInfo, id string) error { - title := "hcsshim::ActivateLayer " - logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id) - - infop, err := convertDriverInfo(info) - if err != nil { - logrus.Error(err) - return err - } - - err = activateLayer(&infop, id) - if err != nil { - err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour) - logrus.Error(err) - return err - } - - logrus.Debugf(title+" - succeeded id=%s flavour=%d", id, info.Flavour) - return nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/baselayer.go b/vendor/src/github.com/Microsoft/hcsshim/baselayer.go deleted file mode 100644 index 63bde87a4b..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/baselayer.go +++ /dev/null @@ -1,147 +0,0 @@ -package hcsshim - -import ( - "errors" - "os" - "path/filepath" - "syscall" - - "github.com/Microsoft/go-winio" -) - -type baseLayerWriter struct { - root string - f *os.File - bw *winio.BackupFileWriter - err error - hasUtilityVM bool -} - -func (w *baseLayerWriter) closeCurrentFile() error { - if w.f != nil { - err := w.bw.Close() - err2 := w.f.Close() - w.f = nil - w.bw = nil - if err != nil { - return err - } - if err2 != nil { - return err2 - } - } - return nil -} - -func (w *baseLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) (err error) { - defer func() { - if err != nil { - w.err = err - } - }() - - err = w.closeCurrentFile() - if err != nil { - return err - } - - if filepath.ToSlash(name) == `UtilityVM/Files` { - w.hasUtilityVM = true - } - - path := filepath.Join(w.root, name) - path, err = makeLongAbsPath(path) - if err != nil { - return err - } - - var f *os.File - defer func() { - if f != nil { - f.Close() - } - }() - - createmode := uint32(syscall.CREATE_NEW) - if fileInfo.FileAttributes&syscall.FILE_ATTRIBUTE_DIRECTORY != 0 { - err := os.Mkdir(path, 0) - if err != nil && !os.IsExist(err) { - return err - } - createmode = syscall.OPEN_EXISTING - } - - mode := uint32(syscall.GENERIC_READ | syscall.GENERIC_WRITE | winio.WRITE_DAC | winio.WRITE_OWNER | winio.ACCESS_SYSTEM_SECURITY) - f, err = winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createmode) - if err != nil { - return err - } - - err = winio.SetFileBasicInfo(f, fileInfo) - if err != nil { - return err - } - - w.f = f - w.bw = winio.NewBackupFileWriter(f, true) - f = nil - return nil -} - -func (w *baseLayerWriter) AddLink(name string, target string) (err error) { - defer func() { - if err != nil { - w.err = err - } - }() - - err = w.closeCurrentFile() - if err != nil { - return err - } - - linkpath, err := makeLongAbsPath(filepath.Join(w.root, name)) - if err != nil { - return err - } - - linktarget, err := makeLongAbsPath(filepath.Join(w.root, target)) - if err != nil { - return err - } - - return os.Link(linktarget, linkpath) -} - -func (w *baseLayerWriter) Remove(name string) error { - return errors.New("base layer cannot have tombstones") -} - -func (w *baseLayerWriter) Write(b []byte) (int, error) { - n, err := w.bw.Write(b) - if err != nil { - w.err = err - } - return n, err -} - -func (w *baseLayerWriter) Close() error { - err := w.closeCurrentFile() - if err != nil { - return err - } - if w.err == nil { - err = ProcessBaseLayer(w.root) - if err != nil { - return err - } - - if w.hasUtilityVM { - err = ProcessUtilityVMImage(filepath.Join(w.root, "UtilityVM")) - if err != nil { - return err - } - } - } - return w.err -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/callback.go b/vendor/src/github.com/Microsoft/hcsshim/callback.go deleted file mode 100644 index e8c2b00c8a..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/callback.go +++ /dev/null @@ -1,79 +0,0 @@ -package hcsshim - -import ( - "sync" - "syscall" -) - -var ( - nextCallback uintptr - callbackMap = map[uintptr]*notifcationWatcherContext{} - callbackMapLock = sync.RWMutex{} - - notificationWatcherCallback = syscall.NewCallback(notificationWatcher) - - // Notifications for HCS_SYSTEM handles - hcsNotificationSystemExited hcsNotification = 0x00000001 - hcsNotificationSystemCreateCompleted hcsNotification = 0x00000002 - hcsNotificationSystemStartCompleted hcsNotification = 0x00000003 - hcsNotificationSystemPauseCompleted hcsNotification = 0x00000004 - hcsNotificationSystemResumeCompleted hcsNotification = 0x00000005 - - // Notifications for HCS_PROCESS handles - hcsNotificationProcessExited hcsNotification = 0x00010000 - - // Common notifications - hcsNotificationInvalid hcsNotification = 0x00000000 - hcsNotificationServiceDisconnect hcsNotification = 0x01000000 -) - -type hcsNotification uint32 -type notificationChannel chan error - -type notifcationWatcherContext struct { - channels notificationChannels - handle hcsCallback -} - -type notificationChannels map[hcsNotification]notificationChannel - -func newChannels() notificationChannels { - channels := make(notificationChannels) - - channels[hcsNotificationSystemExited] = make(notificationChannel, 1) - channels[hcsNotificationSystemCreateCompleted] = make(notificationChannel, 1) - channels[hcsNotificationSystemStartCompleted] = make(notificationChannel, 1) - channels[hcsNotificationSystemPauseCompleted] = make(notificationChannel, 1) - channels[hcsNotificationSystemResumeCompleted] = make(notificationChannel, 1) - channels[hcsNotificationProcessExited] = make(notificationChannel, 1) - channels[hcsNotificationServiceDisconnect] = make(notificationChannel, 1) - return channels -} -func closeChannels(channels notificationChannels) { - close(channels[hcsNotificationSystemExited]) - close(channels[hcsNotificationSystemCreateCompleted]) - close(channels[hcsNotificationSystemStartCompleted]) - close(channels[hcsNotificationSystemPauseCompleted]) - close(channels[hcsNotificationSystemResumeCompleted]) - close(channels[hcsNotificationProcessExited]) - close(channels[hcsNotificationServiceDisconnect]) -} - -func notificationWatcher(notificationType hcsNotification, callbackNumber uintptr, notificationStatus uintptr, notificationData *uint16) uintptr { - var result error - if int32(notificationStatus) < 0 { - result = syscall.Errno(win32FromHresult(notificationStatus)) - } - - callbackMapLock.RLock() - context := callbackMap[callbackNumber] - callbackMapLock.RUnlock() - - if context == nil { - return 0 - } - - context.channels[notificationType] <- result - - return 0 -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/container.go b/vendor/src/github.com/Microsoft/hcsshim/container.go deleted file mode 100644 index 6360347b00..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/container.go +++ /dev/null @@ -1,531 +0,0 @@ -package hcsshim - -import ( - "encoding/json" - "errors" - "fmt" - "runtime" - "syscall" - "time" - - "github.com/Sirupsen/logrus" -) - -var ( - defaultTimeout = time.Minute * 4 -) - -const pendingUpdatesQuery = `{ "PropertyTypes" : ["PendingUpdates"]}` - -// ContainerError is an error encountered in HCS -type ContainerError struct { - Container *container - Operation string - ExtraInfo string - Err error -} - -type container struct { - handle hcsSystem - id string - callbackNumber uintptr -} - -type containerProperties struct { - ID string `json:"Id"` - Name string - SystemType string - Owner string - SiloGUID string `json:"SiloGuid,omitempty"` - IsDummy bool `json:",omitempty"` - RuntimeID string `json:"RuntimeId,omitempty"` - Stopped bool `json:",omitempty"` - ExitType string `json:",omitempty"` - AreUpdatesPending bool `json:",omitempty"` -} - -// CreateContainer creates a new container with the given configuration but does not start it. -func CreateContainer(id string, c *ContainerConfig) (Container, error) { - operation := "CreateContainer" - title := "HCSShim::" + operation - - container := &container{ - id: id, - } - - configurationb, err := json.Marshal(c) - if err != nil { - return nil, err - } - - configuration := string(configurationb) - logrus.Debugf(title+" id=%s config=%s", id, configuration) - - var ( - resultp *uint16 - createError error - ) - if hcsCallbacksSupported { - var identity syscall.Handle - createError = hcsCreateComputeSystem(id, configuration, identity, &container.handle, &resultp) - - if createError == nil || createError == ErrVmcomputeOperationPending { - if err := container.registerCallback(); err != nil { - return nil, makeContainerError(container, operation, "", err) - } - } - } else { - createError = hcsCreateComputeSystemTP5(id, configuration, &container.handle, &resultp) - } - - err = processAsyncHcsResult(createError, resultp, container.callbackNumber, hcsNotificationSystemCreateCompleted, &defaultTimeout) - if err != nil { - return nil, makeContainerError(container, operation, configuration, err) - } - - logrus.Debugf(title+" succeeded id=%s handle=%d", id, container.handle) - runtime.SetFinalizer(container, closeContainer) - return container, nil -} - -// OpenContainer opens an existing container by ID. -func OpenContainer(id string) (Container, error) { - operation := "OpenContainer" - title := "HCSShim::" + operation - logrus.Debugf(title+" id=%s", id) - - container := &container{ - id: id, - } - - var ( - handle hcsSystem - resultp *uint16 - ) - err := hcsOpenComputeSystem(id, &handle, &resultp) - err = processHcsResult(err, resultp) - if err != nil { - return nil, makeContainerError(container, operation, "", err) - } - - container.handle = handle - - logrus.Debugf(title+" succeeded id=%s handle=%d", id, handle) - runtime.SetFinalizer(container, closeContainer) - return container, nil -} - -// Start synchronously starts the container. -func (container *container) Start() error { - operation := "Start" - title := "HCSShim::Container::" + operation - logrus.Debugf(title+" id=%s", container.id) - - var resultp *uint16 - err := hcsStartComputeSystemTP5(container.handle, nil, &resultp) - err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemStartCompleted, &defaultTimeout) - if err != nil { - return makeContainerError(container, operation, "", err) - } - - logrus.Debugf(title+" succeeded id=%s", container.id) - return nil -} - -// Shutdown requests a container shutdown, but it may not actually be shut down until Wait() succeeds. -// It returns ErrVmcomputeOperationPending if the shutdown is in progress, nil if the shutdown is complete. -func (container *container) Shutdown() error { - operation := "Shutdown" - title := "HCSShim::Container::" + operation - logrus.Debugf(title+" id=%s", container.id) - - var resultp *uint16 - err := hcsShutdownComputeSystemTP5(container.handle, nil, &resultp) - err = processHcsResult(err, resultp) - if err != nil { - if err == ErrVmcomputeOperationPending { - return ErrVmcomputeOperationPending - } - return makeContainerError(container, operation, "", err) - } - - logrus.Debugf(title+" succeeded id=%s", container.id) - return nil -} - -// Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. -// It returns ErrVmcomputeOperationPending if the shutdown is in progress, nil if the shutdown is complete. -func (container *container) Terminate() error { - operation := "Terminate" - title := "HCSShim::Container::" + operation - logrus.Debugf(title+" id=%s", container.id) - - var resultp *uint16 - err := hcsTerminateComputeSystemTP5(container.handle, nil, &resultp) - err = processHcsResult(err, resultp) - if err != nil { - if err == ErrVmcomputeOperationPending { - return ErrVmcomputeOperationPending - } - return makeContainerError(container, operation, "", err) - } - - logrus.Debugf(title+" succeeded id=%s", container.id) - return nil -} - -// Wait synchronously waits for the container to shutdown or terminate. -func (container *container) Wait() error { - operation := "Wait" - title := "HCSShim::Container::" + operation - logrus.Debugf(title+" id=%s", container.id) - - if hcsCallbacksSupported { - err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, nil) - if err != nil { - return makeContainerError(container, operation, "", err) - } - } else { - _, err := container.waitTimeoutInternal(syscall.INFINITE) - if err != nil { - return makeContainerError(container, operation, "", err) - } - } - - logrus.Debugf(title+" succeeded id=%s", container.id) - return nil -} - -func (container *container) waitTimeoutInternal(timeout uint32) (bool, error) { - return waitTimeoutInternalHelper(container, timeout) -} - -// WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It returns -// ErrTimeout if the timeout duration expires before the container is shut down. -func (container *container) WaitTimeout(timeout time.Duration) error { - operation := "WaitTimeout" - title := "HCSShim::Container::" + operation - logrus.Debugf(title+" id=%s", container.id) - - if hcsCallbacksSupported { - err := waitForNotification(container.callbackNumber, hcsNotificationSystemExited, &timeout) - if err != nil { - return makeContainerError(container, operation, "", err) - } - } else { - finished, err := waitTimeoutHelper(container, timeout) - if !finished { - return ErrTimeout - } else if err != nil { - return makeContainerError(container, operation, "", err) - } - } - - logrus.Debugf(title+" succeeded id=%s", container.id) - return nil -} - -func (container *container) hcsWait(timeout uint32) (bool, error) { - var ( - resultp *uint16 - exitEvent syscall.Handle - ) - - err := hcsCreateComputeSystemWait(container.handle, &exitEvent, &resultp) - err = processHcsResult(err, resultp) - if err != nil { - return false, err - } - defer syscall.CloseHandle(exitEvent) - - return waitForSingleObject(exitEvent, timeout) -} - -func (container *container) properties(query string) (*containerProperties, error) { - var ( - resultp *uint16 - propertiesp *uint16 - ) - err := hcsGetComputeSystemProperties(container.handle, query, &propertiesp, &resultp) - err = processHcsResult(err, resultp) - if err != nil { - return nil, err - } - - if propertiesp == nil { - return nil, errors.New("Unexpected result from hcsGetComputeSystemProperties, properties should never be nil") - } - propertiesRaw := convertAndFreeCoTaskMemBytes(propertiesp) - - properties := &containerProperties{} - if err := json.Unmarshal(propertiesRaw, properties); err != nil { - return nil, err - } - - return properties, nil -} - -// HasPendingUpdates returns true if the container has updates pending to install -func (container *container) HasPendingUpdates() (bool, error) { - operation := "HasPendingUpdates" - title := "HCSShim::Container::" + operation - logrus.Debugf(title+" id=%s", container.id) - properties, err := container.properties(pendingUpdatesQuery) - if err != nil { - return false, makeContainerError(container, operation, "", err) - } - - logrus.Debugf(title+" succeeded id=%s", container.id) - return properties.AreUpdatesPending, nil -} - -// Pause pauses the execution of the container. This feature is not enabled in TP5. -func (container *container) Pause() error { - operation := "Pause" - title := "HCSShim::Container::" + operation - logrus.Debugf(title+" id=%s", container.id) - - var resultp *uint16 - err := hcsPauseComputeSystemTP5(container.handle, nil, &resultp) - err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemPauseCompleted, &defaultTimeout) - if err != nil { - return makeContainerError(container, operation, "", err) - } - - logrus.Debugf(title+" succeeded id=%s", container.id) - return nil -} - -// Resume resumes the execution of the container. This feature is not enabled in TP5. -func (container *container) Resume() error { - operation := "Resume" - title := "HCSShim::Container::" + operation - logrus.Debugf(title+" id=%s", container.id) - var ( - resultp *uint16 - ) - - err := hcsResumeComputeSystemTP5(container.handle, nil, &resultp) - err = processAsyncHcsResult(err, resultp, container.callbackNumber, hcsNotificationSystemResumeCompleted, &defaultTimeout) - if err != nil { - return makeContainerError(container, operation, "", err) - } - - logrus.Debugf(title+" succeeded id=%s", container.id) - return nil -} - -// CreateProcess launches a new process within the container. -func (container *container) CreateProcess(c *ProcessConfig) (Process, error) { - operation := "CreateProcess" - title := "HCSShim::Container::" + operation - var ( - processInfo hcsProcessInformation - processHandle hcsProcess - resultp *uint16 - ) - - // If we are not emulating a console, ignore any console size passed to us - if !c.EmulateConsole { - c.ConsoleSize[0] = 0 - c.ConsoleSize[1] = 0 - } - - configurationb, err := json.Marshal(c) - if err != nil { - return nil, err - } - - configuration := string(configurationb) - logrus.Debugf(title+" id=%s config=%s", container.id, configuration) - - err = hcsCreateProcess(container.handle, configuration, &processInfo, &processHandle, &resultp) - err = processHcsResult(err, resultp) - if err != nil { - return nil, makeContainerError(container, operation, configuration, err) - } - - process := &process{ - handle: processHandle, - processID: int(processInfo.ProcessId), - container: container, - cachedPipes: &cachedPipes{ - stdIn: processInfo.StdInput, - stdOut: processInfo.StdOutput, - stdErr: processInfo.StdError, - }, - } - - if hcsCallbacksSupported { - if err := process.registerCallback(); err != nil { - return nil, makeContainerError(container, operation, "", err) - } - } - - logrus.Debugf(title+" succeeded id=%s processid=%s", container.id, process.processID) - runtime.SetFinalizer(process, closeProcess) - return process, nil -} - -// OpenProcess gets an interface to an existing process within the container. -func (container *container) OpenProcess(pid int) (Process, error) { - operation := "OpenProcess" - title := "HCSShim::Container::" + operation - logrus.Debugf(title+" id=%s, processid=%d", container.id, pid) - var ( - processHandle hcsProcess - resultp *uint16 - ) - - err := hcsOpenProcess(container.handle, uint32(pid), &processHandle, &resultp) - err = processHcsResult(err, resultp) - if err != nil { - return nil, makeContainerError(container, operation, "", err) - } - - process := &process{ - handle: processHandle, - processID: pid, - container: container, - } - - if err := process.registerCallback(); err != nil { - return nil, makeContainerError(container, operation, "", err) - } - - logrus.Debugf(title+" succeeded id=%s processid=%s", container.id, process.processID) - runtime.SetFinalizer(process, closeProcess) - return process, nil -} - -// Close cleans up any state associated with the container but does not terminate or wait for it. -func (container *container) Close() error { - operation := "Close" - title := "HCSShim::Container::" + operation - logrus.Debugf(title+" id=%s", container.id) - - // Don't double free this - if container.handle == 0 { - return nil - } - - if hcsCallbacksSupported { - if err := container.unregisterCallback(); err != nil { - return makeContainerError(container, operation, "", err) - } - } - - if err := hcsCloseComputeSystem(container.handle); err != nil { - return makeContainerError(container, operation, "", err) - } - - container.handle = 0 - - logrus.Debugf(title+" succeeded id=%s", container.id) - return nil -} - -// closeContainer wraps container.Close for use by a finalizer -func closeContainer(container *container) { - container.Close() -} - -func (container *container) registerCallback() error { - context := ¬ifcationWatcherContext{ - channels: newChannels(), - } - - callbackMapLock.Lock() - callbackNumber := nextCallback - nextCallback++ - callbackMap[callbackNumber] = context - callbackMapLock.Unlock() - - var callbackHandle hcsCallback - err := hcsRegisterComputeSystemCallback(container.handle, notificationWatcherCallback, callbackNumber, &callbackHandle) - if err != nil { - return err - } - context.handle = callbackHandle - container.callbackNumber = callbackNumber - - return nil -} - -func (container *container) unregisterCallback() error { - callbackNumber := container.callbackNumber - - callbackMapLock.RLock() - context := callbackMap[callbackNumber] - callbackMapLock.RUnlock() - - if context == nil { - return nil - } - - handle := context.handle - - if handle == 0 { - return nil - } - - // hcsUnregisterComputeSystemCallback has its own syncronization - // to wait for all callbacks to complete. We must NOT hold the callbackMapLock. - err := hcsUnregisterComputeSystemCallback(handle) - if err != nil { - return err - } - - closeChannels(context.channels) - - callbackMapLock.Lock() - callbackMap[callbackNumber] = nil - callbackMapLock.Unlock() - - handle = 0 - - return nil -} - -func (e *ContainerError) Error() string { - if e == nil { - return "" - } - - if e.Container == nil { - return "unexpected nil container for error: " + e.Err.Error() - } - - s := "container " + e.Container.id - - if e.Operation != "" { - s += " encountered an error during " + e.Operation - } - - if e.Err != nil { - s += fmt.Sprintf(" failed in Win32: %s (0x%x)", e.Err, win32FromError(e.Err)) - } - - if e.ExtraInfo != "" { - s += " extra info: " + e.ExtraInfo - } - - return s -} - -func makeContainerError(container *container, operation string, extraInfo string, err error) error { - // Don't wrap errors created in hcsshim - if err == ErrTimeout || - err == ErrUnexpectedProcessAbort || - err == ErrUnexpectedContainerExit || - err == ErrHandleClose || - err == ErrInvalidProcessState || - err == ErrInvalidNotificationType || - err == ErrVmcomputeOperationPending { - return err - } - - containerError := &ContainerError{Container: container, Operation: operation, ExtraInfo: extraInfo, Err: err} - logrus.Error(containerError) - return containerError -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/createcomputesystem.go b/vendor/src/github.com/Microsoft/hcsshim/createcomputesystem.go deleted file mode 100644 index 3cc12a38ef..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/createcomputesystem.go +++ /dev/null @@ -1,22 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// CreateComputeSystem creates a container, initializing its configuration in -// the Host Compute Service such that it can be started by a call to the -// StartComputeSystem method. -func CreateComputeSystem(id string, configuration string) error { - - title := "HCSShim::CreateComputeSystem" - logrus.Debugln(title+" id=%s, configuration=%s", id, configuration) - - err := createComputeSystem(id, configuration) - if err != nil { - err = makeErrorf(err, title, "id=%s configuration=%s", id, configuration) - logrus.Error(err) - return err - } - - logrus.Debugf(title+"- succeeded %s", id) - return nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/createlayer.go b/vendor/src/github.com/Microsoft/hcsshim/createlayer.go deleted file mode 100644 index 9ecffb1cb0..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/createlayer.go +++ /dev/null @@ -1,27 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// CreateLayer creates a new, empty, read-only layer on the filesystem based on -// the parent layer provided. -func CreateLayer(info DriverInfo, id, parent string) error { - title := "hcsshim::CreateLayer " - logrus.Debugf(title+"Flavour %d ID %s parent %s", info.Flavour, id, parent) - - // Convert info to API calling convention - infop, err := convertDriverInfo(info) - if err != nil { - logrus.Error(err) - return err - } - - err = createLayer(&infop, id, parent) - if err != nil { - err = makeErrorf(err, title, "id=%s parent=%s flavour=%d", id, parent, info.Flavour) - logrus.Error(err) - return err - } - - logrus.Debugf(title+" - succeeded id=%s parent=%s flavour=%d", id, parent, info.Flavour) - return nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/createprocess.go b/vendor/src/github.com/Microsoft/hcsshim/createprocess.go deleted file mode 100644 index a2b6298546..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/createprocess.go +++ /dev/null @@ -1,101 +0,0 @@ -package hcsshim - -import ( - "encoding/json" - "io" - "syscall" - - "github.com/Microsoft/go-winio" - "github.com/Sirupsen/logrus" -) - -// CreateProcessParams is used as both the input of CreateProcessInComputeSystem -// and to convert the parameters to JSON for passing onto the HCS -type CreateProcessParams struct { - ApplicationName string - CommandLine string - WorkingDirectory string - Environment map[string]string - EmulateConsole bool - ConsoleSize [2]int -} - -// makeOpenFiles calls winio.MakeOpenFile for each handle in a slice but closes all the handles -// if there is an error. -func makeOpenFiles(hs []syscall.Handle) (_ []io.ReadWriteCloser, err error) { - fs := make([]io.ReadWriteCloser, len(hs)) - for i, h := range hs { - if h != syscall.Handle(0) { - if err == nil { - fs[i], err = winio.MakeOpenFile(h) - } - if err != nil { - syscall.Close(h) - } - } - } - if err != nil { - for _, f := range fs { - if f != nil { - f.Close() - } - } - return nil, err - } - return fs, nil -} - -// CreateProcessInComputeSystem starts a process in a container. This is invoked, for example, -// as a result of docker run, docker exec, or RUN in Dockerfile. If successful, -// it returns the PID of the process. -func CreateProcessInComputeSystem(id string, useStdin bool, useStdout bool, useStderr bool, params CreateProcessParams) (_ uint32, _ io.WriteCloser, _ io.ReadCloser, _ io.ReadCloser, err error) { - title := "HCSShim::CreateProcessInComputeSystem" - logrus.Debugf(title+" id=%s", id) - - // If we are not emulating a console, ignore any console size passed to us - if !params.EmulateConsole { - params.ConsoleSize[0] = 0 - params.ConsoleSize[1] = 0 - } - - paramsJson, err := json.Marshal(params) - if err != nil { - return - } - - logrus.Debugf(title+" - Calling Win32 %s %s", id, paramsJson) - - var pid uint32 - - handles := make([]syscall.Handle, 3) - var stdinParam, stdoutParam, stderrParam *syscall.Handle - if useStdin { - stdinParam = &handles[0] - } - if useStdout { - stdoutParam = &handles[1] - } - if useStderr { - stderrParam = &handles[2] - } - - err = createProcessWithStdHandlesInComputeSystem(id, string(paramsJson), &pid, stdinParam, stdoutParam, stderrParam) - if err != nil { - herr := makeErrorf(err, title, "id=%s params=%v", id, params) - // Windows TP4: Hyper-V Containers may return this error with more than one - // concurrent exec. Do not log it as an error - if err != WSAEINVAL { - logrus.Error(herr) - } - err = herr - return - } - - pipes, err := makeOpenFiles(handles) - if err != nil { - return - } - - logrus.Debugf(title+" - succeeded id=%s params=%s pid=%d", id, paramsJson, pid) - return pid, pipes[0], pipes[1], pipes[2], nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/createsandboxlayer.go b/vendor/src/github.com/Microsoft/hcsshim/createsandboxlayer.go deleted file mode 100644 index b69c3da368..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/createsandboxlayer.go +++ /dev/null @@ -1,35 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// CreateSandboxLayer creates and populates new read-write layer for use by a container. -// This requires both the id of the direct parent layer, as well as the full list -// of paths to all parent layers up to the base (and including the direct parent -// whose id was provided). -func CreateSandboxLayer(info DriverInfo, layerId, parentId string, parentLayerPaths []string) error { - title := "hcsshim::CreateSandboxLayer " - logrus.Debugf(title+"layerId %s parentId %s", layerId, parentId) - - // Generate layer descriptors - layers, err := layerPathsToDescriptors(parentLayerPaths) - if err != nil { - return err - } - - // Convert info to API calling convention - infop, err := convertDriverInfo(info) - if err != nil { - logrus.Error(err) - return err - } - - err = createSandboxLayer(&infop, layerId, parentId, layers) - if err != nil { - err = makeErrorf(err, title, "layerId=%s parentId=%s", layerId, parentId) - logrus.Error(err) - return err - } - - logrus.Debugf(title+"- succeeded layerId=%s parentId=%s", layerId, parentId) - return nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/deactivatelayer.go b/vendor/src/github.com/Microsoft/hcsshim/deactivatelayer.go deleted file mode 100644 index c02bcb3a0b..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/deactivatelayer.go +++ /dev/null @@ -1,26 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// DeactivateLayer will dismount a layer that was mounted via ActivateLayer. -func DeactivateLayer(info DriverInfo, id string) error { - title := "hcsshim::DeactivateLayer " - logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id) - - // Convert info to API calling convention - infop, err := convertDriverInfo(info) - if err != nil { - logrus.Error(err) - return err - } - - err = deactivateLayer(&infop, id) - if err != nil { - err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour) - logrus.Error(err) - return err - } - - logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id) - return nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/destroylayer.go b/vendor/src/github.com/Microsoft/hcsshim/destroylayer.go deleted file mode 100644 index 91ed269eef..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/destroylayer.go +++ /dev/null @@ -1,27 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// DestroyLayer will remove the on-disk files representing the layer with the given -// id, including that layer's containing folder, if any. -func DestroyLayer(info DriverInfo, id string) error { - title := "hcsshim::DestroyLayer " - logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id) - - // Convert info to API calling convention - infop, err := convertDriverInfo(info) - if err != nil { - logrus.Error(err) - return err - } - - err = destroyLayer(&infop, id) - if err != nil { - err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour) - logrus.Error(err) - return err - } - - logrus.Debugf(title+"succeeded flavour=%d id=%s", info.Flavour, id) - return nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/expandsandboxsize.go b/vendor/src/github.com/Microsoft/hcsshim/expandsandboxsize.go deleted file mode 100644 index e168921841..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/expandsandboxsize.go +++ /dev/null @@ -1,26 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// ExpandSandboxSize expands the size of a layer to at least size bytes. -func ExpandSandboxSize(info DriverInfo, layerId string, size uint64) error { - title := "hcsshim::ExpandSandboxSize " - logrus.Debugf(title+"layerId=%s size=%d", layerId, size) - - // Convert info to API calling convention - infop, err := convertDriverInfo(info) - if err != nil { - logrus.Error(err) - return err - } - - err = expandSandboxSize(&infop, layerId, size) - if err != nil { - err = makeErrorf(err, title, "layerId=%s size=%d", layerId, size) - logrus.Error(err) - return err - } - - logrus.Debugf(title+"- succeeded layerId=%s size=%d", layerId, size) - return nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/exportlayer.go b/vendor/src/github.com/Microsoft/hcsshim/exportlayer.go deleted file mode 100644 index 903e08519d..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/exportlayer.go +++ /dev/null @@ -1,158 +0,0 @@ -package hcsshim - -import ( - "io" - "io/ioutil" - "os" - "runtime" - "syscall" - - "github.com/Microsoft/go-winio" - "github.com/Sirupsen/logrus" -) - -// ExportLayer will create a folder at exportFolderPath and fill that folder with -// the transport format version of the layer identified by layerId. This transport -// format includes any metadata required for later importing the layer (using -// ImportLayer), and requires the full list of parent layer paths in order to -// perform the export. -func ExportLayer(info DriverInfo, layerId string, exportFolderPath string, parentLayerPaths []string) error { - title := "hcsshim::ExportLayer " - logrus.Debugf(title+"flavour %d layerId %s folder %s", info.Flavour, layerId, exportFolderPath) - - // Generate layer descriptors - layers, err := layerPathsToDescriptors(parentLayerPaths) - if err != nil { - return err - } - - // Convert info to API calling convention - infop, err := convertDriverInfo(info) - if err != nil { - logrus.Error(err) - return err - } - - err = exportLayer(&infop, layerId, exportFolderPath, layers) - if err != nil { - err = makeErrorf(err, title, "layerId=%s flavour=%d folder=%s", layerId, info.Flavour, exportFolderPath) - logrus.Error(err) - return err - } - - logrus.Debugf(title+"succeeded flavour=%d layerId=%s folder=%s", info.Flavour, layerId, exportFolderPath) - return nil -} - -type LayerReader interface { - Next() (string, int64, *winio.FileBasicInfo, error) - Read(b []byte) (int, error) - Close() error -} - -// FilterLayerReader provides an interface for extracting the contents of an on-disk layer. -type FilterLayerReader struct { - context uintptr -} - -// Next reads the next available file from a layer, ensuring that parent directories are always read -// before child files and directories. -// -// Next returns the file's relative path, size, and basic file metadata. Read() should be used to -// extract a Win32 backup stream with the remainder of the metadata and the data. -func (r *FilterLayerReader) Next() (string, int64, *winio.FileBasicInfo, error) { - var fileNamep *uint16 - fileInfo := &winio.FileBasicInfo{} - var deleted uint32 - var fileSize int64 - err := exportLayerNext(r.context, &fileNamep, fileInfo, &fileSize, &deleted) - if err != nil { - if err == syscall.ERROR_NO_MORE_FILES { - err = io.EOF - } else { - err = makeError(err, "ExportLayerNext", "") - } - return "", 0, nil, err - } - fileName := convertAndFreeCoTaskMemString(fileNamep) - if deleted != 0 { - fileInfo = nil - } - if fileName[0] == '\\' { - fileName = fileName[1:] - } - return fileName, fileSize, fileInfo, nil -} - -// Read reads from the current file's Win32 backup stream. -func (r *FilterLayerReader) Read(b []byte) (int, error) { - var bytesRead uint32 - err := exportLayerRead(r.context, b, &bytesRead) - if err != nil { - return 0, makeError(err, "ExportLayerRead", "") - } - if bytesRead == 0 { - return 0, io.EOF - } - return int(bytesRead), nil -} - -// Close frees resources associated with the layer reader. It will return an -// error if there was an error while reading the layer or of the layer was not -// completely read. -func (r *FilterLayerReader) Close() (err error) { - if r.context != 0 { - err = exportLayerEnd(r.context) - if err != nil { - err = makeError(err, "ExportLayerEnd", "") - } - r.context = 0 - } - return -} - -// NewLayerReader returns a new layer reader for reading the contents of an on-disk layer. -// The caller must have taken the SeBackupPrivilege privilege -// to call this and any methods on the resulting LayerReader. -func NewLayerReader(info DriverInfo, layerID string, parentLayerPaths []string) (LayerReader, error) { - if procExportLayerBegin.Find() != nil { - // The new layer reader is not available on this Windows build. Fall back to the - // legacy export code path. - path, err := ioutil.TempDir("", "hcs") - if err != nil { - return nil, err - } - err = ExportLayer(info, layerID, path, parentLayerPaths) - if err != nil { - os.RemoveAll(path) - return nil, err - } - return &legacyLayerReaderWrapper{newLegacyLayerReader(path)}, nil - } - - layers, err := layerPathsToDescriptors(parentLayerPaths) - if err != nil { - return nil, err - } - infop, err := convertDriverInfo(info) - if err != nil { - return nil, err - } - r := &FilterLayerReader{} - err = exportLayerBegin(&infop, layerID, layers, &r.context) - if err != nil { - return nil, makeError(err, "ExportLayerBegin", "") - } - runtime.SetFinalizer(r, func(r *FilterLayerReader) { r.Close() }) - return r, err -} - -type legacyLayerReaderWrapper struct { - *legacyLayerReader -} - -func (r *legacyLayerReaderWrapper) Close() error { - err := r.legacyLayerReader.Close() - os.RemoveAll(r.root) - return err -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/getcomputesystemproperties.go b/vendor/src/github.com/Microsoft/hcsshim/getcomputesystemproperties.go deleted file mode 100644 index 3c544492da..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/getcomputesystemproperties.go +++ /dev/null @@ -1,43 +0,0 @@ -package hcsshim - -import ( - "encoding/json" - - "github.com/Sirupsen/logrus" -) - -// ComputeSystemProperties is a struct describing the returned properties. -type ComputeSystemProperties struct { - ID string - Name string - Stopped bool - AreUpdatesPending bool -} - -// GetComputeSystemProperties gets the properties for the compute system with the given ID. -func GetComputeSystemProperties(id string, flags uint32) (ComputeSystemProperties, error) { - title := "hcsshim::GetComputeSystemProperties " - - csProps := ComputeSystemProperties{ - Stopped: false, - AreUpdatesPending: false, - } - - logrus.Debugf("Calling proc") - var buffer *uint16 - err := getComputeSystemProperties(id, flags, &buffer) - if err != nil { - err = makeError(err, title, "") - logrus.Error(err) - return csProps, err - } - propData := convertAndFreeCoTaskMemString(buffer) - logrus.Debugf(title+" - succeeded output=%s", propData) - - if err = json.Unmarshal([]byte(propData), &csProps); err != nil { - logrus.Error(err) - return csProps, err - } - - return csProps, nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/getlayermountpath.go b/vendor/src/github.com/Microsoft/hcsshim/getlayermountpath.go deleted file mode 100644 index 41b5758926..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/getlayermountpath.go +++ /dev/null @@ -1,55 +0,0 @@ -package hcsshim - -import ( - "syscall" - - "github.com/Sirupsen/logrus" -) - -// GetLayerMountPath will look for a mounted layer with the given id and return -// the path at which that layer can be accessed. This path may be a volume path -// if the layer is a mounted read-write layer, otherwise it is expected to be the -// folder path at which the layer is stored. -func GetLayerMountPath(info DriverInfo, id string) (string, error) { - title := "hcsshim::GetLayerMountPath " - logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id) - - // Convert info to API calling convention - infop, err := convertDriverInfo(info) - if err != nil { - logrus.Error(err) - return "", err - } - - var mountPathLength uintptr - mountPathLength = 0 - - // Call the procedure itself. - logrus.Debugf("Calling proc (1)") - err = getLayerMountPath(&infop, id, &mountPathLength, nil) - if err != nil { - err = makeErrorf(err, title, "(first call) id=%s flavour=%d", id, info.Flavour) - logrus.Error(err) - return "", err - } - - // Allocate a mount path of the returned length. - if mountPathLength == 0 { - return "", nil - } - mountPathp := make([]uint16, mountPathLength) - mountPathp[0] = 0 - - // Call the procedure again - logrus.Debugf("Calling proc (2)") - err = getLayerMountPath(&infop, id, &mountPathLength, &mountPathp[0]) - if err != nil { - err = makeErrorf(err, title, "(second call) id=%s flavour=%d", id, info.Flavour) - logrus.Error(err) - return "", err - } - - path := syscall.UTF16ToString(mountPathp[0:]) - logrus.Debugf(title+"succeeded flavour=%d id=%s path=%s", info.Flavour, id, path) - return path, nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/getsharedbaseimages.go b/vendor/src/github.com/Microsoft/hcsshim/getsharedbaseimages.go deleted file mode 100644 index 01ab4da3dd..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/getsharedbaseimages.go +++ /dev/null @@ -1,22 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// GetSharedBaseImages will enumerate the images stored in the common central -// image store and return descriptive info about those images for the purpose -// of registering them with the graphdriver, graph, and tagstore. -func GetSharedBaseImages() (imageData string, err error) { - title := "hcsshim::GetSharedBaseImages " - - logrus.Debugf("Calling proc") - var buffer *uint16 - err = getBaseImages(&buffer) - if err != nil { - err = makeError(err, title, "") - logrus.Error(err) - return - } - imageData = convertAndFreeCoTaskMemString(buffer) - logrus.Debugf(title+" - succeeded output=%s", imageData) - return -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/guid.go b/vendor/src/github.com/Microsoft/hcsshim/guid.go deleted file mode 100644 index 620aba123c..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/guid.go +++ /dev/null @@ -1,19 +0,0 @@ -package hcsshim - -import ( - "crypto/sha1" - "fmt" -) - -type GUID [16]byte - -func NewGUID(source string) *GUID { - h := sha1.Sum([]byte(source)) - var g GUID - copy(g[0:], h[0:16]) - return &g -} - -func (g *GUID) ToString() string { - return fmt.Sprintf("%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x-%02x", g[3], g[2], g[1], g[0], g[5], g[4], g[7], g[6], g[8:10], g[10:]) -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/hcsshim.go b/vendor/src/github.com/Microsoft/hcsshim/hcsshim.go deleted file mode 100644 index 62214ef2f9..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/hcsshim.go +++ /dev/null @@ -1,184 +0,0 @@ -// Shim for the Host Compute Service (HSC) to manage Windows Server -// containers and Hyper-V containers. - -package hcsshim - -import ( - "fmt" - "syscall" - "unsafe" - - "github.com/Sirupsen/logrus" -) - -//go:generate go run mksyscall_windows.go -output zhcsshim.go hcsshim.go - -//sys coTaskMemFree(buffer unsafe.Pointer) = ole32.CoTaskMemFree - -//sys activateLayer(info *driverInfo, id string) (hr error) = vmcompute.ActivateLayer? -//sys copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CopyLayer? -//sys createLayer(info *driverInfo, id string, parent string) (hr error) = vmcompute.CreateLayer? -//sys createSandboxLayer(info *driverInfo, id string, parent string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.CreateSandboxLayer? -//sys expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) = vmcompute.ExpandSandboxSize? -//sys deactivateLayer(info *driverInfo, id string) (hr error) = vmcompute.DeactivateLayer? -//sys destroyLayer(info *driverInfo, id string) (hr error) = vmcompute.DestroyLayer? -//sys exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ExportLayer? -//sys getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) = vmcompute.GetLayerMountPath? -//sys getBaseImages(buffer **uint16) (hr error) = vmcompute.GetBaseImages? -//sys importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.ImportLayer? -//sys layerExists(info *driverInfo, id string, exists *uint32) (hr error) = vmcompute.LayerExists? -//sys nameToGuid(name string, guid *GUID) (hr error) = vmcompute.NameToGuid? -//sys prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) = vmcompute.PrepareLayer? -//sys unprepareLayer(info *driverInfo, id string) (hr error) = vmcompute.UnprepareLayer? -//sys processBaseImage(path string) (hr error) = vmcompute.ProcessBaseImage? -//sys processUtilityImage(path string) (hr error) = vmcompute.ProcessUtilityImage? - -//sys importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ImportLayerBegin? -//sys importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) = vmcompute.ImportLayerNext? -//sys importLayerWrite(context uintptr, buffer []byte) (hr error) = vmcompute.ImportLayerWrite? -//sys importLayerEnd(context uintptr) (hr error) = vmcompute.ImportLayerEnd? - -//sys exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) = vmcompute.ExportLayerBegin? -//sys exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) = vmcompute.ExportLayerNext? -//sys exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) = vmcompute.ExportLayerRead? -//sys exportLayerEnd(context uintptr) (hr error) = vmcompute.ExportLayerEnd? - -//sys createComputeSystem(id string, configuration string) (hr error) = vmcompute.CreateComputeSystem? -//sys createProcessWithStdHandlesInComputeSystem(id string, paramsJson string, pid *uint32, stdin *syscall.Handle, stdout *syscall.Handle, stderr *syscall.Handle) (hr error) = vmcompute.CreateProcessWithStdHandlesInComputeSystem? -//sys resizeConsoleInComputeSystem(id string, pid uint32, height uint16, width uint16, flags uint32) (hr error) = vmcompute.ResizeConsoleInComputeSystem? -//sys shutdownComputeSystem(id string, timeout uint32) (hr error) = vmcompute.ShutdownComputeSystem? -//sys startComputeSystem(id string) (hr error) = vmcompute.StartComputeSystem? -//sys terminateComputeSystem(id string) (hr error) = vmcompute.TerminateComputeSystem? -//sys terminateProcessInComputeSystem(id string, pid uint32) (hr error) = vmcompute.TerminateProcessInComputeSystem? -//sys waitForProcessInComputeSystem(id string, pid uint32, timeout uint32, exitCode *uint32) (hr error) = vmcompute.WaitForProcessInComputeSystem? -//sys getComputeSystemProperties(id string, flags uint32, properties **uint16) (hr error) = vmcompute.GetComputeSystemProperties? - -//sys hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) = vmcompute.HcsEnumerateComputeSystems? -//sys hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem? -//sys hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsOpenComputeSystem? -//sys hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) = vmcompute.HcsCloseComputeSystem? -//sys hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem? -//sys hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem? -//sys hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem? -//sys hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem? -//sys hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem? -//sys hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetComputeSystemProperties? -//sys hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) = vmcompute.HcsModifyComputeSystem? -//sys hcsCreateComputeSystemWait(computeSystem hcsSystem, exitEvent *syscall.Handle, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystemWait? -//sys hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsCreateProcess? -//sys hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) = vmcompute.HcsOpenProcess? -//sys hcsCloseProcess(process hcsProcess) (hr error) = vmcompute.HcsCloseProcess? -//sys hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) = vmcompute.HcsTerminateProcess? -//sys hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) = vmcompute.HcsGetProcessInfo? -//sys hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) = vmcompute.HcsGetProcessProperties? -//sys hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) = vmcompute.HcsModifyProcess? -//sys hcsCreateProcessWait(process hcsProcess, settings *syscall.Handle, result **uint16) (hr error) = vmcompute.HcsCreateProcessWait? -//sys hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) = vmcompute.HcsGetServiceProperties? -//sys hcsModifyServiceSettings(settings string, result **uint16) (hr error) = vmcompute.HcsModifyServiceSettings? - -//sys hcsCreateComputeSystemTP5(id string, configuration string, computeSystem *hcsSystem, result **uint16) (hr error) = vmcompute.HcsCreateComputeSystem? -//sys hcsStartComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) = vmcompute.HcsStartComputeSystem? -//sys hcsShutdownComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) = vmcompute.HcsShutdownComputeSystem? -//sys hcsTerminateComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) = vmcompute.HcsTerminateComputeSystem? -//sys hcsPauseComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) = vmcompute.HcsPauseComputeSystem? -//sys hcsResumeComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) = vmcompute.HcsResumeComputeSystem? -//sys hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterComputeSystemCallback? -//sys hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterComputeSystemCallback? -//sys hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) = vmcompute.HcsRegisterProcessCallback? -//sys hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) = vmcompute.HcsUnregisterProcessCallback? - -//sys _hnsCall(method string, path string, object string, response **uint16) (hr error) = vmcompute.HNSCall? - -const ( - // Specific user-visible exit codes - WaitErrExecFailed = 32767 - - ERROR_GEN_FAILURE = syscall.Errno(31) - ERROR_SHUTDOWN_IN_PROGRESS = syscall.Errno(1115) - WSAEINVAL = syscall.Errno(10022) - - ErrVmcomputeOperationPending = syscall.Errno(0xC0370103) - - // Timeout on wait calls - TimeoutInfinite = 0xFFFFFFFF -) - -type HcsError struct { - title string - rest string - Err error -} - -type hcsSystem syscall.Handle -type hcsProcess syscall.Handle -type hcsCallback syscall.Handle - -type hcsProcessInformation struct { - ProcessId uint32 - Reserved uint32 - StdInput syscall.Handle - StdOutput syscall.Handle - StdError syscall.Handle -} - -func makeError(err error, title, rest string) error { - // Pass through DLL errors directly since they do not originate from HCS. - if _, ok := err.(*syscall.DLLError); ok { - return err - } - return &HcsError{title, rest, err} -} - -func makeErrorf(err error, title, format string, a ...interface{}) error { - return makeError(err, title, fmt.Sprintf(format, a...)) -} - -func win32FromError(err error) uint32 { - if herr, ok := err.(*HcsError); ok { - return win32FromError(herr.Err) - } - if code, ok := err.(syscall.Errno); ok { - return uint32(code) - } - return uint32(ERROR_GEN_FAILURE) -} - -func win32FromHresult(hr uintptr) uintptr { - if hr&0x1fff0000 == 0x00070000 { - return hr & 0xffff - } - return hr -} - -func (e *HcsError) Error() string { - s := e.title - if len(s) > 0 && s[len(s)-1] != ' ' { - s += " " - } - s += fmt.Sprintf("failed in Win32: %s (0x%x)", e.Err, win32FromError(e.Err)) - if e.rest != "" { - if e.rest[0] != ' ' { - s += " " - } - s += e.rest - } - return s -} - -func convertAndFreeCoTaskMemString(buffer *uint16) string { - str := syscall.UTF16ToString((*[1 << 30]uint16)(unsafe.Pointer(buffer))[:]) - coTaskMemFree(unsafe.Pointer(buffer)) - return str -} - -func convertAndFreeCoTaskMemBytes(buffer *uint16) []byte { - return []byte(convertAndFreeCoTaskMemString(buffer)) -} - -func processHcsResult(err error, resultp *uint16) error { - if resultp != nil { - result := convertAndFreeCoTaskMemString(resultp) - logrus.Debugf("Result: %s", result) - } - return err -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/hnsfuncs.go b/vendor/src/github.com/Microsoft/hcsshim/hnsfuncs.go deleted file mode 100644 index 642b3167b2..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/hnsfuncs.go +++ /dev/null @@ -1,149 +0,0 @@ -package hcsshim - -import ( - "encoding/json" - "fmt" - "net" - - "github.com/Sirupsen/logrus" -) - -type NatPolicy struct { - Type string - Protocol string - InternalPort uint16 - ExternalPort uint16 -} - -type QosPolicy struct { - Type string - MaximumOutgoingBandwidthInBytes uint64 -} - -type VlanPolicy struct { - Type string - VLAN uint -} - -type VsidPolicy struct { - Type string - VSID uint -} - -// Subnet is assoicated with a network and represents a list -// of subnets available to the network -type Subnet struct { - AddressPrefix string `json:",omitempty"` - GatewayAddress string `json:",omitempty"` -} - -// MacPool is assoicated with a network and represents a list -// of macaddresses available to the network -type MacPool struct { - StartMacAddress string `json:",omitempty"` - EndMacAddress string `json:",omitempty"` -} - -// HNSNetwork represents a network in HNS -type HNSNetwork struct { - Id string `json:",omitempty"` - Name string `json:",omitempty"` - Type string `json:",omitempty"` - NetworkAdapterName string `json:",omitempty"` - SourceMac string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` - MacPools []MacPool `json:",omitempty"` - Subnets []Subnet `json:",omitempty"` - DNSSuffix string `json:",omitempty"` - DNSServerList string `json:",omitempty"` -} - -// HNSEndpoint represents a network endpoint in HNS -type HNSEndpoint struct { - Id string `json:",omitempty"` - Name string `json:",omitempty"` - VirtualNetwork string `json:",omitempty"` - VirtualNetworkName string `json:",omitempty"` - Policies []json.RawMessage `json:",omitempty"` - MacAddress string `json:",omitempty"` - IPAddress net.IP `json:",omitempty"` - DNSSuffix string `json:",omitempty"` - DNSServerList string `json:",omitempty"` - GatewayAddress string `json:",omitempty"` - PrefixLength uint8 `json:",omitempty"` -} - -type hnsNetworkResponse struct { - Success bool - Error string - Output HNSNetwork -} - -type hnsResponse struct { - Success bool - Error string - Output json.RawMessage -} - -func hnsCall(method, path, request string, returnResponse interface{}) error { - var responseBuffer *uint16 - err := _hnsCall(method, path, request, &responseBuffer) - if err != nil { - return makeError(err, "hnsCall ", "") - } - response := convertAndFreeCoTaskMemString(responseBuffer) - - hnsresponse := &hnsResponse{} - if err = json.Unmarshal([]byte(response), &hnsresponse); err != nil { - return err - } - - if !hnsresponse.Success { - return fmt.Errorf("HNS failed with error : %s", hnsresponse.Error) - } - - if len(hnsresponse.Output) == 0 { - return nil - } - - logrus.Debugf("Network Response : %s", hnsresponse.Output) - err = json.Unmarshal(hnsresponse.Output, returnResponse) - if err != nil { - return err - } - - return nil -} - -// HNSNetworkRequest makes a call into HNS to update/query a single network -func HNSNetworkRequest(method, path, request string) (*HNSNetwork, error) { - var network HNSNetwork - err := hnsCall(method, "/networks/"+path, request, &network) - if err != nil { - return nil, err - } - - return &network, nil -} - -// HNSListNetworkRequest makes a HNS call to query the list of available networks -func HNSListNetworkRequest(method, path, request string) ([]HNSNetwork, error) { - var network []HNSNetwork - err := hnsCall(method, "/networks/"+path, request, &network) - if err != nil { - return nil, err - } - - return network, nil -} - -// HNSEndpointRequest makes a HNS call to modify/query a network endpoint -func HNSEndpointRequest(method, path, request string) (*HNSEndpoint, error) { - endpoint := &HNSEndpoint{} - err := hnsCall(method, "/endpoints/"+path, request, &endpoint) - if err != nil { - return nil, err - } - - return endpoint, nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/importlayer.go b/vendor/src/github.com/Microsoft/hcsshim/importlayer.go deleted file mode 100644 index 42d7270448..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/importlayer.go +++ /dev/null @@ -1,193 +0,0 @@ -package hcsshim - -import ( - "errors" - "io/ioutil" - "os" - "path/filepath" - "runtime" - - "github.com/Microsoft/go-winio" - "github.com/Sirupsen/logrus" -) - -// ImportLayer will take the contents of the folder at importFolderPath and import -// that into a layer with the id layerId. Note that in order to correctly populate -// the layer and interperet the transport format, all parent layers must already -// be present on the system at the paths provided in parentLayerPaths. -func ImportLayer(info DriverInfo, layerID string, importFolderPath string, parentLayerPaths []string) error { - title := "hcsshim::ImportLayer " - logrus.Debugf(title+"flavour %d layerId %s folder %s", info.Flavour, layerID, importFolderPath) - - // Generate layer descriptors - layers, err := layerPathsToDescriptors(parentLayerPaths) - if err != nil { - return err - } - - // Convert info to API calling convention - infop, err := convertDriverInfo(info) - if err != nil { - logrus.Error(err) - return err - } - - err = importLayer(&infop, layerID, importFolderPath, layers) - if err != nil { - err = makeErrorf(err, title, "layerId=%s flavour=%d folder=%s", layerID, info.Flavour, importFolderPath) - logrus.Error(err) - return err - } - - logrus.Debugf(title+"succeeded flavour=%d layerId=%s folder=%s", info.Flavour, layerID, importFolderPath) - return nil -} - -// LayerWriter is an interface that supports writing a new container image layer. -type LayerWriter interface { - // Add adds a file to the layer with given metadata. - Add(name string, fileInfo *winio.FileBasicInfo) error - // AddLink adds a hard link to the layer. The target must already have been added. - AddLink(name string, target string) error - // Remove removes a file that was present in a parent layer from the layer. - Remove(name string) error - // Write writes data to the current file. The data must be in the format of a Win32 - // backup stream. - Write(b []byte) (int, error) - // Close finishes the layer writing process and releases any resources. - Close() error -} - -// FilterLayerWriter provides an interface to write the contents of a layer to the file system. -type FilterLayerWriter struct { - context uintptr -} - -// Add adds a file or directory to the layer. The file's parent directory must have already been added. -// -// name contains the file's relative path. fileInfo contains file times and file attributes; the rest -// of the file metadata and the file data must be written as a Win32 backup stream to the Write() method. -// winio.BackupStreamWriter can be used to facilitate this. -func (w *FilterLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error { - if name[0] != '\\' { - name = `\` + name - } - err := importLayerNext(w.context, name, fileInfo) - if err != nil { - return makeError(err, "ImportLayerNext", "") - } - return nil -} - -// AddLink adds a hard link to the layer. The target of the link must have already been added. -func (w *FilterLayerWriter) AddLink(name string, target string) error { - return errors.New("hard links not yet supported") -} - -// Remove removes a file from the layer. The file must have been present in the parent layer. -// -// name contains the file's relative path. -func (w *FilterLayerWriter) Remove(name string) error { - if name[0] != '\\' { - name = `\` + name - } - err := importLayerNext(w.context, name, nil) - if err != nil { - return makeError(err, "ImportLayerNext", "") - } - return nil -} - -// Write writes more backup stream data to the current file. -func (w *FilterLayerWriter) Write(b []byte) (int, error) { - err := importLayerWrite(w.context, b) - if err != nil { - err = makeError(err, "ImportLayerWrite", "") - return 0, err - } - return len(b), err -} - -// Close completes the layer write operation. The error must be checked to ensure that the -// operation was successful. -func (w *FilterLayerWriter) Close() (err error) { - if w.context != 0 { - err = importLayerEnd(w.context) - if err != nil { - err = makeError(err, "ImportLayerEnd", "") - } - w.context = 0 - } - return -} - -type legacyLayerWriterWrapper struct { - *legacyLayerWriter - info DriverInfo - layerID string - path string - parentLayerPaths []string -} - -func (r *legacyLayerWriterWrapper) Close() error { - err := r.legacyLayerWriter.Close() - if err == nil { - var fullPath string - // Use the original path here because ImportLayer does not support long paths for the source in TP5. - // But do use a long path for the destination to work around another bug with directories - // with MAX_PATH - 12 < length < MAX_PATH. - info := r.info - fullPath, err = makeLongAbsPath(filepath.Join(info.HomeDir, r.layerID)) - if err == nil { - info.HomeDir = "" - err = ImportLayer(info, fullPath, r.path, r.parentLayerPaths) - } - } - os.RemoveAll(r.root) - return err -} - -// NewLayerWriter returns a new layer writer for creating a layer on disk. -// The caller must have taken the SeBackupPrivilege and SeRestorePrivilege privileges -// to call this and any methods on the resulting LayerWriter. -func NewLayerWriter(info DriverInfo, layerID string, parentLayerPaths []string) (LayerWriter, error) { - if len(parentLayerPaths) == 0 { - // This is a base layer. It gets imported differently. - return &baseLayerWriter{ - root: filepath.Join(info.HomeDir, layerID), - }, nil - } - - if procImportLayerBegin.Find() != nil { - // The new layer reader is not available on this Windows build. Fall back to the - // legacy export code path. - path, err := ioutil.TempDir("", "hcs") - if err != nil { - return nil, err - } - return &legacyLayerWriterWrapper{ - legacyLayerWriter: newLegacyLayerWriter(path), - info: info, - layerID: layerID, - path: path, - parentLayerPaths: parentLayerPaths, - }, nil - } - layers, err := layerPathsToDescriptors(parentLayerPaths) - if err != nil { - return nil, err - } - - infop, err := convertDriverInfo(info) - if err != nil { - return nil, err - } - - w := &FilterLayerWriter{} - err = importLayerBegin(&infop, layerID, layers, &w.context) - if err != nil { - return nil, makeError(err, "ImportLayerStart", "") - } - runtime.SetFinalizer(w, func(w *FilterLayerWriter) { w.Close() }) - return w, nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/interface.go b/vendor/src/github.com/Microsoft/hcsshim/interface.go deleted file mode 100644 index c7f7cc8202..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/interface.go +++ /dev/null @@ -1,171 +0,0 @@ -package hcsshim - -import ( - "errors" - "io" - "time" -) - -var ( - // ErrInvalidNotificationType is an error encountered when an invalid notification type is used - ErrInvalidNotificationType = errors.New("hcsshim: invalid notification type") - - // ErrTimeout is an error encountered when waiting on a notification times out - ErrTimeout = errors.New("hcsshim: timeout waiting for notification") - - // ErrHandleClose is an error returned when the handle generating the notification being waited on has been closed - ErrHandleClose = errors.New("hcsshim: the handle generating this notification has been closed") - - // ErrInvalidProcessState is an error encountered when the process is not in a valid state for the requested operation - ErrInvalidProcessState = errors.New("the process is in an invalid state for the attempted operation") - - // ErrUnexpectedContainerExit is the error returned when a container exits while waiting for - // a different expected notification - ErrUnexpectedContainerExit = errors.New("unexpected container exit") - - // ErrUnexpectedProcessAbort is the error returned when communication with the compute service - // is lost while waiting for a notification - ErrUnexpectedProcessAbort = errors.New("lost communication with compute service") -) - -// ProcessConfig is used as both the input of Container.CreateProcess -// and to convert the parameters to JSON for passing onto the HCS -type ProcessConfig struct { - ApplicationName string - CommandLine string - WorkingDirectory string - Environment map[string]string - EmulateConsole bool - CreateStdInPipe bool - CreateStdOutPipe bool - CreateStdErrPipe bool - ConsoleSize [2]int -} - -type Layer struct { - ID string - Path string -} - -type MappedDir struct { - HostPath string - ContainerPath string - ReadOnly bool -} - -type HvRuntime struct { - ImagePath string `json:",omitempty"` -} - -// ContainerConfig is used as both the input of CreateContainer -// and to convert the parameters to JSON for passing onto the HCS -// TODO Windows: @darrenstahlmsft Add ProcessorCount -type ContainerConfig struct { - SystemType string // HCS requires this to be hard-coded to "Container" - Name string // Name of the container. We use the docker ID. - Owner string // The management platform that created this container - IsDummy bool // Used for development purposes. - VolumePath string // Windows volume path for scratch space - IgnoreFlushesDuringBoot bool // Optimization hint for container startup in Windows - LayerFolderPath string // Where the layer folders are located - Layers []Layer // List of storage layers - Credentials string `json:",omitempty"` // Credentials information - ProcessorWeight uint64 `json:",omitempty"` // CPU Shares 0..10000 on Windows; where 0 will be omitted and HCS will default. - ProcessorMaximum int64 `json:",omitempty"` // CPU maximum usage percent 1..100 - StorageIOPSMaximum uint64 `json:",omitempty"` // Maximum Storage IOPS - StorageBandwidthMaximum uint64 `json:",omitempty"` // Maximum Storage Bandwidth in bytes per second - StorageSandboxSize uint64 `json:",omitempty"` // Size in bytes that the container system drive should be expanded to if smaller - MemoryMaximumInMB int64 `json:",omitempty"` // Maximum memory available to the container in Megabytes - HostName string // Hostname - MappedDirectories []MappedDir // List of mapped directories (volumes/mounts) - SandboxPath string // Location of unmounted sandbox (used for Hyper-V containers) - HvPartition bool // True if it a Hyper-V Container - EndpointList []string // List of networking endpoints to be attached to container - HvRuntime *HvRuntime // Hyper-V container settings - Servicing bool // True if this container is for servicing -} - -const ( - notificationTypeNone string = "None" - notificationTypeGracefulExit string = "GracefulExit" - notificationTypeForcedExit string = "ForcedExit" - notificationTypeUnexpectedExit string = "UnexpectedExit" - notificationTypeReboot string = "Reboot" - notificationTypeConstructed string = "Constructed" - notificationTypeStarted string = "Started" - notificationTypePaused string = "Paused" - notificationTypeUnknown string = "Unknown" -) - -// Container represents a created (but not necessarily running) container. -type Container interface { - // Start synchronously starts the container. - Start() error - - // Shutdown requests a container shutdown, but it may not actually be shutdown until Wait() succeeds. - Shutdown() error - - // Terminate requests a container terminate, but it may not actually be terminated until Wait() succeeds. - Terminate() error - - // Waits synchronously waits for the container to shutdown or terminate. - Wait() error - - // WaitTimeout synchronously waits for the container to terminate or the duration to elapse. It - // returns false if timeout occurs. - WaitTimeout(time.Duration) error - - // Pause pauses the execution of a container. - Pause() error - - // Resume resumes the execution of a container. - Resume() error - - // HasPendingUpdates returns true if the container has updates pending to install. - HasPendingUpdates() (bool, error) - - // CreateProcess launches a new process within the container. - CreateProcess(c *ProcessConfig) (Process, error) - - // OpenProcess gets an interface to an existing process within the container. - OpenProcess(pid int) (Process, error) - - // Close cleans up any state associated with the container but does not terminate or wait for it. - Close() error -} - -// Process represents a running or exited process. -type Process interface { - // Pid returns the process ID of the process within the container. - Pid() int - - // Kill signals the process to terminate but does not wait for it to finish terminating. - Kill() error - - // Wait waits for the process to exit. - Wait() error - - // WaitTimeout waits for the process to exit or the duration to elapse. It returns - // false if timeout occurs. - WaitTimeout(time.Duration) error - - // ExitCode returns the exit code of the process. The process must have - // already terminated. - ExitCode() (int, error) - - // ResizeConsole resizes the console of the process. - ResizeConsole(width, height uint16) error - - // Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing - // these pipes does not close the underlying pipes; it should be possible to - // call this multiple times to get multiple interfaces. - Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) - - // CloseStdin closes the write side of the stdin pipe so that the process is - // notified on the read side that there is no more data in stdin. - CloseStdin() error - - // Close cleans up any state associated with the process but does not kill - // or wait on it. - Close() error -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/layerexists.go b/vendor/src/github.com/Microsoft/hcsshim/layerexists.go deleted file mode 100644 index 522d95cce4..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/layerexists.go +++ /dev/null @@ -1,30 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// LayerExists will return true if a layer with the given id exists and is known -// to the system. -func LayerExists(info DriverInfo, id string) (bool, error) { - title := "hcsshim::LayerExists " - logrus.Debugf(title+"Flavour %d ID %s", info.Flavour, id) - - // Convert info to API calling convention - infop, err := convertDriverInfo(info) - if err != nil { - logrus.Error(err) - return false, err - } - - // Call the procedure itself. - var exists uint32 - - err = layerExists(&infop, id, &exists) - if err != nil { - err = makeErrorf(err, title, "id=%s flavour=%d", id, info.Flavour) - logrus.Error(err) - return false, err - } - - logrus.Debugf(title+"succeeded flavour=%d id=%s exists=%d", info.Flavour, id, exists) - return exists != 0, nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/layerutils.go b/vendor/src/github.com/Microsoft/hcsshim/layerutils.go deleted file mode 100644 index 47229d22e5..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/layerutils.go +++ /dev/null @@ -1,111 +0,0 @@ -package hcsshim - -// This file contains utility functions to support storage (graph) related -// functionality. - -import ( - "path/filepath" - "syscall" - - "github.com/Sirupsen/logrus" -) - -/* To pass into syscall, we need a struct matching the following: -enum GraphDriverType -{ - DiffDriver, - FilterDriver -}; - -struct DriverInfo { - GraphDriverType Flavour; - LPCWSTR HomeDir; -}; -*/ -type DriverInfo struct { - Flavour int - HomeDir string -} - -type driverInfo struct { - Flavour int - HomeDirp *uint16 -} - -func convertDriverInfo(info DriverInfo) (driverInfo, error) { - homedirp, err := syscall.UTF16PtrFromString(info.HomeDir) - if err != nil { - logrus.Debugf("Failed conversion of home to pointer for driver info: %s", err.Error()) - return driverInfo{}, err - } - - return driverInfo{ - Flavour: info.Flavour, - HomeDirp: homedirp, - }, nil -} - -/* To pass into syscall, we need a struct matching the following: -typedef struct _WC_LAYER_DESCRIPTOR { - - // - // The ID of the layer - // - - GUID LayerId; - - // - // Additional flags - // - - union { - struct { - ULONG Reserved : 31; - ULONG Dirty : 1; // Created from sandbox as a result of snapshot - }; - ULONG Value; - } Flags; - - // - // Path to the layer root directory, null-terminated - // - - PCWSTR Path; - -} WC_LAYER_DESCRIPTOR, *PWC_LAYER_DESCRIPTOR; -*/ -type WC_LAYER_DESCRIPTOR struct { - LayerId GUID - Flags uint32 - Pathp *uint16 -} - -func layerPathsToDescriptors(parentLayerPaths []string) ([]WC_LAYER_DESCRIPTOR, error) { - // Array of descriptors that gets constructed. - var layers []WC_LAYER_DESCRIPTOR - - for i := 0; i < len(parentLayerPaths); i++ { - // Create a layer descriptor, using the folder name - // as the source for a GUID LayerId - _, folderName := filepath.Split(parentLayerPaths[i]) - g, err := NameToGuid(folderName) - if err != nil { - logrus.Debugf("Failed to convert name to guid %s", err) - return nil, err - } - - p, err := syscall.UTF16PtrFromString(parentLayerPaths[i]) - if err != nil { - logrus.Debugf("Failed conversion of parentLayerPath to pointer %s", err) - return nil, err - } - - layers = append(layers, WC_LAYER_DESCRIPTOR{ - LayerId: g, - Flags: 0, - Pathp: p, - }) - } - - return layers, nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/legacy.go b/vendor/src/github.com/Microsoft/hcsshim/legacy.go deleted file mode 100644 index e19ac8a902..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/legacy.go +++ /dev/null @@ -1,441 +0,0 @@ -package hcsshim - -import ( - "bufio" - "encoding/binary" - "errors" - "fmt" - "io" - "os" - "path/filepath" - "strings" - "syscall" - - "github.com/Microsoft/go-winio" -) - -var errorIterationCanceled = errors.New("") - -func openFileOrDir(path string, mode uint32, createDisposition uint32) (file *os.File, err error) { - return winio.OpenForBackup(path, mode, syscall.FILE_SHARE_READ, createDisposition) -} - -func makeLongAbsPath(path string) (string, error) { - if strings.HasPrefix(path, `\\?\`) || strings.HasPrefix(path, `\\.\`) { - return path, nil - } - if !filepath.IsAbs(path) { - absPath, err := filepath.Abs(path) - if err != nil { - return "", err - } - path = absPath - } - if strings.HasPrefix(path, `\\`) { - return `\\?\UNC\` + path[2:], nil - } - return `\\?\` + path, nil -} - -type fileEntry struct { - path string - fi os.FileInfo - err error -} - -type legacyLayerReader struct { - root string - result chan *fileEntry - proceed chan bool - currentFile *os.File - backupReader *winio.BackupFileReader - isTP4Format bool -} - -// newLegacyLayerReader returns a new LayerReader that can read the Windows -// TP4 transport format from disk. -func newLegacyLayerReader(root string) *legacyLayerReader { - r := &legacyLayerReader{ - root: root, - result: make(chan *fileEntry), - proceed: make(chan bool), - isTP4Format: IsTP4(), - } - go r.walk() - return r -} - -func readTombstones(path string) (map[string]([]string), error) { - tf, err := os.Open(filepath.Join(path, "tombstones.txt")) - if err != nil { - return nil, err - } - defer tf.Close() - s := bufio.NewScanner(tf) - if !s.Scan() || s.Text() != "\xef\xbb\xbfVersion 1.0" { - return nil, errors.New("Invalid tombstones file") - } - - ts := make(map[string]([]string)) - for s.Scan() { - t := filepath.Join("Files", s.Text()[1:]) // skip leading `\` - dir := filepath.Dir(t) - ts[dir] = append(ts[dir], t) - } - if err = s.Err(); err != nil { - return nil, err - } - - return ts, nil -} - -func (r *legacyLayerReader) walkUntilCancelled() error { - root, err := makeLongAbsPath(r.root) - if err != nil { - return err - } - - r.root = root - ts, err := readTombstones(r.root) - if err != nil { - return err - } - - err = filepath.Walk(r.root, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if path == r.root || path == filepath.Join(r.root, "tombstones.txt") || strings.HasSuffix(path, ".$wcidirs$") { - return nil - } - - r.result <- &fileEntry{path, info, nil} - if !<-r.proceed { - return errorIterationCanceled - } - - // List all the tombstones. - if info.IsDir() { - relPath, err := filepath.Rel(r.root, path) - if err != nil { - return err - } - if dts, ok := ts[relPath]; ok { - for _, t := range dts { - r.result <- &fileEntry{filepath.Join(r.root, t), nil, nil} - if !<-r.proceed { - return errorIterationCanceled - } - } - } - } - return nil - }) - if err == errorIterationCanceled { - return nil - } - if err == nil { - return io.EOF - } - return err -} - -func (r *legacyLayerReader) walk() { - defer close(r.result) - if !<-r.proceed { - return - } - - err := r.walkUntilCancelled() - if err != nil { - for { - r.result <- &fileEntry{err: err} - if !<-r.proceed { - return - } - } - } -} - -func (r *legacyLayerReader) reset() { - if r.backupReader != nil { - r.backupReader.Close() - r.backupReader = nil - } - if r.currentFile != nil { - r.currentFile.Close() - r.currentFile = nil - } -} - -func findBackupStreamSize(r io.Reader) (int64, error) { - br := winio.NewBackupStreamReader(r) - for { - hdr, err := br.Next() - if err != nil { - if err == io.EOF { - err = nil - } - return 0, err - } - if hdr.Id == winio.BackupData { - return hdr.Size, nil - } - } -} - -func (r *legacyLayerReader) Next() (path string, size int64, fileInfo *winio.FileBasicInfo, err error) { - r.reset() - r.proceed <- true - fe := <-r.result - if fe == nil { - err = errors.New("LegacyLayerReader closed") - return - } - if fe.err != nil { - err = fe.err - return - } - - path, err = filepath.Rel(r.root, fe.path) - if err != nil { - return - } - - if fe.fi == nil { - // This is a tombstone. Return a nil fileInfo. - return - } - - if fe.fi.IsDir() && strings.HasPrefix(path, `Files\`) { - fe.path += ".$wcidirs$" - } - - f, err := openFileOrDir(fe.path, syscall.GENERIC_READ, syscall.OPEN_EXISTING) - if err != nil { - return - } - defer func() { - if f != nil { - f.Close() - } - }() - - fileInfo, err = winio.GetFileBasicInfo(f) - if err != nil { - return - } - - if !strings.HasPrefix(path, `Files\`) { - size = fe.fi.Size() - r.backupReader = winio.NewBackupFileReader(f, false) - if path == "Hives" || path == "Files" { - // The Hives directory has a non-deterministic file time because of the - // nature of the import process. Use the times from System_Delta. - var g *os.File - g, err = os.Open(filepath.Join(r.root, `Hives\System_Delta`)) - if err != nil { - return - } - attr := fileInfo.FileAttributes - fileInfo, err = winio.GetFileBasicInfo(g) - g.Close() - if err != nil { - return - } - fileInfo.FileAttributes = attr - } - - // The creation time and access time get reset for files outside of the Files path. - fileInfo.CreationTime = fileInfo.LastWriteTime - fileInfo.LastAccessTime = fileInfo.LastWriteTime - - } else { - beginning := int64(0) - if !r.isTP4Format { - // In TP5, the file attributes were added before the backup stream - var attr uint32 - err = binary.Read(f, binary.LittleEndian, &attr) - if err != nil { - return - } - fileInfo.FileAttributes = uintptr(attr) - beginning = 4 - } - - // Find the accurate file size. - if !fe.fi.IsDir() { - size, err = findBackupStreamSize(f) - if err != nil { - err = &os.PathError{Op: "findBackupStreamSize", Path: fe.path, Err: err} - return - } - } - - // Return back to the beginning of the backup stream. - _, err = f.Seek(beginning, 0) - if err != nil { - return - } - } - - r.currentFile = f - f = nil - return -} - -func (r *legacyLayerReader) Read(b []byte) (int, error) { - if r.backupReader == nil { - if r.currentFile == nil { - return 0, io.EOF - } - return r.currentFile.Read(b) - } - return r.backupReader.Read(b) -} - -func (r *legacyLayerReader) Close() error { - r.proceed <- false - <-r.result - r.reset() - return nil -} - -type legacyLayerWriter struct { - root string - currentFile *os.File - backupWriter *winio.BackupFileWriter - tombstones []string - isTP4Format bool - pathFixed bool -} - -// newLegacyLayerWriter returns a LayerWriter that can write the TP4 transport format -// to disk. -func newLegacyLayerWriter(root string) *legacyLayerWriter { - return &legacyLayerWriter{ - root: root, - isTP4Format: IsTP4(), - } -} - -func (w *legacyLayerWriter) init() error { - if !w.pathFixed { - path, err := makeLongAbsPath(w.root) - if err != nil { - return err - } - w.root = path - w.pathFixed = true - } - return nil -} - -func (w *legacyLayerWriter) reset() { - if w.backupWriter != nil { - w.backupWriter.Close() - w.backupWriter = nil - } - if w.currentFile != nil { - w.currentFile.Close() - w.currentFile = nil - } -} - -func (w *legacyLayerWriter) Add(name string, fileInfo *winio.FileBasicInfo) error { - w.reset() - err := w.init() - if err != nil { - return err - } - path := filepath.Join(w.root, name) - - createDisposition := uint32(syscall.CREATE_NEW) - if (fileInfo.FileAttributes & syscall.FILE_ATTRIBUTE_DIRECTORY) != 0 { - err := os.Mkdir(path, 0) - if err != nil { - return err - } - path += ".$wcidirs$" - } - - f, err := openFileOrDir(path, syscall.GENERIC_READ|syscall.GENERIC_WRITE, createDisposition) - if err != nil { - return err - } - defer func() { - if f != nil { - f.Close() - os.Remove(path) - } - }() - - strippedFi := *fileInfo - strippedFi.FileAttributes = 0 - err = winio.SetFileBasicInfo(f, &strippedFi) - if err != nil { - return err - } - - if strings.HasPrefix(name, `Hives\`) { - w.backupWriter = winio.NewBackupFileWriter(f, false) - } else { - if !w.isTP4Format { - // In TP5, the file attributes were added to the header - err = binary.Write(f, binary.LittleEndian, uint32(fileInfo.FileAttributes)) - if err != nil { - return err - } - } - } - - w.currentFile = f - f = nil - return nil -} - -func (w *legacyLayerWriter) AddLink(name string, target string) error { - return errors.New("hard links not supported with legacy writer") -} - -func (w *legacyLayerWriter) Remove(name string) error { - if !strings.HasPrefix(name, `Files\`) { - return fmt.Errorf("invalid tombstone %s", name) - } - w.tombstones = append(w.tombstones, name[len(`Files\`):]) - return nil -} - -func (w *legacyLayerWriter) Write(b []byte) (int, error) { - if w.backupWriter == nil { - if w.currentFile == nil { - return 0, errors.New("closed") - } - return w.currentFile.Write(b) - } - return w.backupWriter.Write(b) -} - -func (w *legacyLayerWriter) Close() error { - w.reset() - err := w.init() - if err != nil { - return err - } - tf, err := os.Create(filepath.Join(w.root, "tombstones.txt")) - if err != nil { - return err - } - defer tf.Close() - _, err = tf.Write([]byte("\xef\xbb\xbfVersion 1.0\n")) - if err != nil { - return err - } - for _, t := range w.tombstones { - _, err = tf.Write([]byte(filepath.Join(`\`, t) + "\n")) - if err != nil { - return err - } - } - return nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/mksyscall_windows.go b/vendor/src/github.com/Microsoft/hcsshim/mksyscall_windows.go deleted file mode 100644 index a76bb4414c..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/mksyscall_windows.go +++ /dev/null @@ -1,818 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build ignore - -/* -mksyscall_windows generates windows system call bodies - -It parses all files specified on command line containing function -prototypes (like syscall_windows.go) and prints system call bodies -to standard output. - -The prototypes are marked by lines beginning with "//sys" and read -like func declarations if //sys is replaced by func, but: - -* The parameter lists must give a name for each argument. This - includes return parameters. - -* The parameter lists must give a type for each argument: - the (x, y, z int) shorthand is not allowed. - -* If the return parameter is an error number, it must be named err. - -* If go func name needs to be different from it's winapi dll name, - the winapi name could be specified at the end, after "=" sign, like - //sys LoadLibrary(libname string) (handle uint32, err error) = LoadLibraryA - -* Each function that returns err needs to supply a condition, that - return value of winapi will be tested against to detect failure. - This would set err to windows "last-error", otherwise it will be nil. - The value can be provided at end of //sys declaration, like - //sys LoadLibrary(libname string) (handle uint32, err error) [failretval==-1] = LoadLibraryA - and is [failretval==0] by default. - -Usage: - mksyscall_windows [flags] [path ...] - -The flags are: - -output - Specify output file name (outputs to console if blank). - -trace - Generate print statement after every syscall. -*/ -package main - -import ( - "bufio" - "bytes" - "errors" - "flag" - "fmt" - "go/format" - "go/parser" - "go/token" - "io" - "io/ioutil" - "log" - "os" - "strconv" - "strings" - "text/template" -) - -var ( - filename = flag.String("output", "", "output file name (standard output if omitted)") - printTraceFlag = flag.Bool("trace", false, "generate print statement after every syscall") -) - -func trim(s string) string { - return strings.Trim(s, " \t") -} - -var packageName string - -func packagename() string { - return packageName -} - -func syscalldot() string { - if packageName == "syscall" { - return "" - } - return "syscall." -} - -// Param is function parameter -type Param struct { - Name string - Type string - fn *Fn - tmpVarIdx int -} - -// tmpVar returns temp variable name that will be used to represent p during syscall. -func (p *Param) tmpVar() string { - if p.tmpVarIdx < 0 { - p.tmpVarIdx = p.fn.curTmpVarIdx - p.fn.curTmpVarIdx++ - } - return fmt.Sprintf("_p%d", p.tmpVarIdx) -} - -// BoolTmpVarCode returns source code for bool temp variable. -func (p *Param) BoolTmpVarCode() string { - const code = `var %s uint32 - if %s { - %s = 1 - } else { - %s = 0 - }` - tmp := p.tmpVar() - return fmt.Sprintf(code, tmp, p.Name, tmp, tmp) -} - -// SliceTmpVarCode returns source code for slice temp variable. -func (p *Param) SliceTmpVarCode() string { - const code = `var %s *%s - if len(%s) > 0 { - %s = &%s[0] - }` - tmp := p.tmpVar() - return fmt.Sprintf(code, tmp, p.Type[2:], p.Name, tmp, p.Name) -} - -// StringTmpVarCode returns source code for string temp variable. -func (p *Param) StringTmpVarCode() string { - errvar := p.fn.Rets.ErrorVarName() - if errvar == "" { - errvar = "_" - } - tmp := p.tmpVar() - const code = `var %s %s - %s, %s = %s(%s)` - s := fmt.Sprintf(code, tmp, p.fn.StrconvType(), tmp, errvar, p.fn.StrconvFunc(), p.Name) - if errvar == "-" { - return s - } - const morecode = ` - if %s != nil { - return - }` - return s + fmt.Sprintf(morecode, errvar) -} - -// TmpVarCode returns source code for temp variable. -func (p *Param) TmpVarCode() string { - switch { - case p.Type == "bool": - return p.BoolTmpVarCode() - case strings.HasPrefix(p.Type, "[]"): - return p.SliceTmpVarCode() - default: - return "" - } -} - -// TmpVarHelperCode returns source code for helper's temp variable. -func (p *Param) TmpVarHelperCode() string { - if p.Type != "string" { - return "" - } - return p.StringTmpVarCode() -} - -// SyscallArgList returns source code fragments representing p parameter -// in syscall. Slices are translated into 2 syscall parameters: pointer to -// the first element and length. -func (p *Param) SyscallArgList() []string { - t := p.HelperType() - var s string - switch { - case t[0] == '*': - s = fmt.Sprintf("unsafe.Pointer(%s)", p.Name) - case t == "bool": - s = p.tmpVar() - case strings.HasPrefix(t, "[]"): - return []string{ - fmt.Sprintf("uintptr(unsafe.Pointer(%s))", p.tmpVar()), - fmt.Sprintf("uintptr(len(%s))", p.Name), - } - default: - s = p.Name - } - return []string{fmt.Sprintf("uintptr(%s)", s)} -} - -// IsError determines if p parameter is used to return error. -func (p *Param) IsError() bool { - return p.Name == "err" && p.Type == "error" -} - -// HelperType returns type of parameter p used in helper function. -func (p *Param) HelperType() string { - if p.Type == "string" { - return p.fn.StrconvType() - } - return p.Type -} - -// join concatenates parameters ps into a string with sep separator. -// Each parameter is converted into string by applying fn to it -// before conversion. -func join(ps []*Param, fn func(*Param) string, sep string) string { - if len(ps) == 0 { - return "" - } - a := make([]string, 0) - for _, p := range ps { - a = append(a, fn(p)) - } - return strings.Join(a, sep) -} - -// Rets describes function return parameters. -type Rets struct { - Name string - Type string - ReturnsError bool - FailCond string -} - -// ErrorVarName returns error variable name for r. -func (r *Rets) ErrorVarName() string { - if r.ReturnsError { - return "err" - } - if r.Type == "error" { - return r.Name - } - return "" -} - -// ToParams converts r into slice of *Param. -func (r *Rets) ToParams() []*Param { - ps := make([]*Param, 0) - if len(r.Name) > 0 { - ps = append(ps, &Param{Name: r.Name, Type: r.Type}) - } - if r.ReturnsError { - ps = append(ps, &Param{Name: "err", Type: "error"}) - } - return ps -} - -// List returns source code of syscall return parameters. -func (r *Rets) List() string { - s := join(r.ToParams(), func(p *Param) string { return p.Name + " " + p.Type }, ", ") - if len(s) > 0 { - s = "(" + s + ")" - } - return s -} - -// PrintList returns source code of trace printing part correspondent -// to syscall return values. -func (r *Rets) PrintList() string { - return join(r.ToParams(), func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `) -} - -// SetReturnValuesCode returns source code that accepts syscall return values. -func (r *Rets) SetReturnValuesCode() string { - if r.Name == "" && !r.ReturnsError { - return "" - } - retvar := "r0" - if r.Name == "" { - retvar = "r1" - } - errvar := "_" - if r.ReturnsError { - errvar = "e1" - } - return fmt.Sprintf("%s, _, %s := ", retvar, errvar) -} - -func (r *Rets) useLongHandleErrorCode(retvar string) string { - const code = `if %s { - if e1 != 0 { - err = error(e1) - } else { - err = %sEINVAL - } - }` - cond := retvar + " == 0" - if r.FailCond != "" { - cond = strings.Replace(r.FailCond, "failretval", retvar, 1) - } - return fmt.Sprintf(code, cond, syscalldot()) -} - -// SetErrorCode returns source code that sets return parameters. -func (r *Rets) SetErrorCode() string { - const code = `if r0 != 0 { - %s = %sErrno(r0) - }` - const hrCode = `if int32(r0) < 0 { - %s = %sErrno(win32FromHresult(r0)) - }` - if r.Name == "" && !r.ReturnsError { - return "" - } - if r.Name == "" { - return r.useLongHandleErrorCode("r1") - } - if r.Type == "error" { - if r.Name == "hr" { - return fmt.Sprintf(hrCode, r.Name, syscalldot()) - } else { - return fmt.Sprintf(code, r.Name, syscalldot()) - } - } - s := "" - switch { - case r.Type[0] == '*': - s = fmt.Sprintf("%s = (%s)(unsafe.Pointer(r0))", r.Name, r.Type) - case r.Type == "bool": - s = fmt.Sprintf("%s = r0 != 0", r.Name) - default: - s = fmt.Sprintf("%s = %s(r0)", r.Name, r.Type) - } - if !r.ReturnsError { - return s - } - return s + "\n\t" + r.useLongHandleErrorCode(r.Name) -} - -// Fn describes syscall function. -type Fn struct { - Name string - Params []*Param - Rets *Rets - PrintTrace bool - confirmproc bool - dllname string - dllfuncname string - src string - // TODO: get rid of this field and just use parameter index instead - curTmpVarIdx int // insure tmp variables have uniq names -} - -// extractParams parses s to extract function parameters. -func extractParams(s string, f *Fn) ([]*Param, error) { - s = trim(s) - if s == "" { - return nil, nil - } - a := strings.Split(s, ",") - ps := make([]*Param, len(a)) - for i := range ps { - s2 := trim(a[i]) - b := strings.Split(s2, " ") - if len(b) != 2 { - b = strings.Split(s2, "\t") - if len(b) != 2 { - return nil, errors.New("Could not extract function parameter from \"" + s2 + "\"") - } - } - ps[i] = &Param{ - Name: trim(b[0]), - Type: trim(b[1]), - fn: f, - tmpVarIdx: -1, - } - } - return ps, nil -} - -// extractSection extracts text out of string s starting after start -// and ending just before end. found return value will indicate success, -// and prefix, body and suffix will contain correspondent parts of string s. -func extractSection(s string, start, end rune) (prefix, body, suffix string, found bool) { - s = trim(s) - if strings.HasPrefix(s, string(start)) { - // no prefix - body = s[1:] - } else { - a := strings.SplitN(s, string(start), 2) - if len(a) != 2 { - return "", "", s, false - } - prefix = a[0] - body = a[1] - } - a := strings.SplitN(body, string(end), 2) - if len(a) != 2 { - return "", "", "", false - } - return prefix, a[0], a[1], true -} - -// newFn parses string s and return created function Fn. -func newFn(s string) (*Fn, error) { - s = trim(s) - f := &Fn{ - Rets: &Rets{}, - src: s, - PrintTrace: *printTraceFlag, - } - // function name and args - prefix, body, s, found := extractSection(s, '(', ')') - if !found || prefix == "" { - return nil, errors.New("Could not extract function name and parameters from \"" + f.src + "\"") - } - f.Name = prefix - var err error - f.Params, err = extractParams(body, f) - if err != nil { - return nil, err - } - // return values - _, body, s, found = extractSection(s, '(', ')') - if found { - r, err := extractParams(body, f) - if err != nil { - return nil, err - } - switch len(r) { - case 0: - case 1: - if r[0].IsError() { - f.Rets.ReturnsError = true - } else { - f.Rets.Name = r[0].Name - f.Rets.Type = r[0].Type - } - case 2: - if !r[1].IsError() { - return nil, errors.New("Only last windows error is allowed as second return value in \"" + f.src + "\"") - } - f.Rets.ReturnsError = true - f.Rets.Name = r[0].Name - f.Rets.Type = r[0].Type - default: - return nil, errors.New("Too many return values in \"" + f.src + "\"") - } - } - // fail condition - _, body, s, found = extractSection(s, '[', ']') - if found { - f.Rets.FailCond = body - } - // dll and dll function names - s = trim(s) - if s == "" { - return f, nil - } - if !strings.HasPrefix(s, "=") { - return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") - } - s = trim(s[1:]) - a := strings.Split(s, ".") - switch len(a) { - case 1: - f.dllfuncname = a[0] - case 2: - f.dllname = a[0] - f.dllfuncname = a[1] - default: - return nil, errors.New("Could not extract dll name from \"" + f.src + "\"") - } - if f.dllfuncname[len(f.dllfuncname)-1] == '?' { - f.confirmproc = true - f.dllfuncname = f.dllfuncname[0 : len(f.dllfuncname)-1] - } - return f, nil -} - -// DLLName returns DLL name for function f. -func (f *Fn) DLLName() string { - if f.dllname == "" { - return "kernel32" - } - return f.dllname -} - -// DLLName returns DLL function name for function f. -func (f *Fn) DLLFuncName() string { - if f.dllfuncname == "" { - return f.Name - } - return f.dllfuncname -} - -func (f *Fn) ConfirmProc() bool { - return f.confirmproc -} - -// ParamList returns source code for function f parameters. -func (f *Fn) ParamList() string { - return join(f.Params, func(p *Param) string { return p.Name + " " + p.Type }, ", ") -} - -// HelperParamList returns source code for helper function f parameters. -func (f *Fn) HelperParamList() string { - return join(f.Params, func(p *Param) string { return p.Name + " " + p.HelperType() }, ", ") -} - -// ParamPrintList returns source code of trace printing part correspondent -// to syscall input parameters. -func (f *Fn) ParamPrintList() string { - return join(f.Params, func(p *Param) string { return fmt.Sprintf(`"%s=", %s, `, p.Name, p.Name) }, `", ", `) -} - -// ParamCount return number of syscall parameters for function f. -func (f *Fn) ParamCount() int { - n := 0 - for _, p := range f.Params { - n += len(p.SyscallArgList()) - } - return n -} - -// SyscallParamCount determines which version of Syscall/Syscall6/Syscall9/... -// to use. It returns parameter count for correspondent SyscallX function. -func (f *Fn) SyscallParamCount() int { - n := f.ParamCount() - switch { - case n <= 3: - return 3 - case n <= 6: - return 6 - case n <= 9: - return 9 - case n <= 12: - return 12 - case n <= 15: - return 15 - default: - panic("too many arguments to system call") - } -} - -// Syscall determines which SyscallX function to use for function f. -func (f *Fn) Syscall() string { - c := f.SyscallParamCount() - if c == 3 { - return syscalldot() + "Syscall" - } - return syscalldot() + "Syscall" + strconv.Itoa(c) -} - -// SyscallParamList returns source code for SyscallX parameters for function f. -func (f *Fn) SyscallParamList() string { - a := make([]string, 0) - for _, p := range f.Params { - a = append(a, p.SyscallArgList()...) - } - for len(a) < f.SyscallParamCount() { - a = append(a, "0") - } - return strings.Join(a, ", ") -} - -// HelperCallParamList returns source code of call into function f helper. -func (f *Fn) HelperCallParamList() string { - a := make([]string, 0, len(f.Params)) - for _, p := range f.Params { - s := p.Name - if p.Type == "string" { - s = p.tmpVar() - } - a = append(a, s) - } - return strings.Join(a, ", ") -} - -// IsUTF16 is true, if f is W (utf16) function. It is false -// for all A (ascii) functions. -func (_ *Fn) IsUTF16() bool { - return true -} - -// StrconvFunc returns name of Go string to OS string function for f. -func (f *Fn) StrconvFunc() string { - if f.IsUTF16() { - return syscalldot() + "UTF16PtrFromString" - } - return syscalldot() + "BytePtrFromString" -} - -// StrconvType returns Go type name used for OS string for f. -func (f *Fn) StrconvType() string { - if f.IsUTF16() { - return "*uint16" - } - return "*byte" -} - -// HasStringParam is true, if f has at least one string parameter. -// Otherwise it is false. -func (f *Fn) HasStringParam() bool { - for _, p := range f.Params { - if p.Type == "string" { - return true - } - } - return false -} - -var uniqDllFuncName = make(map[string]bool) - -// IsNotDuplicate is true if f is not a duplicated function -func (f *Fn) IsNotDuplicate() bool { - funcName := f.DLLFuncName() - if uniqDllFuncName[funcName] == false { - uniqDllFuncName[funcName] = true - return true - } - - return false -} - -// HelperName returns name of function f helper. -func (f *Fn) HelperName() string { - if !f.HasStringParam() { - return f.Name - } - return "_" + f.Name -} - -// Source files and functions. -type Source struct { - Funcs []*Fn - Files []string -} - -// ParseFiles parses files listed in fs and extracts all syscall -// functions listed in sys comments. It returns source files -// and functions collection *Source if successful. -func ParseFiles(fs []string) (*Source, error) { - src := &Source{ - Funcs: make([]*Fn, 0), - Files: make([]string, 0), - } - for _, file := range fs { - if err := src.ParseFile(file); err != nil { - return nil, err - } - } - return src, nil -} - -// DLLs return dll names for a source set src. -func (src *Source) DLLs() []string { - uniq := make(map[string]bool) - r := make([]string, 0) - for _, f := range src.Funcs { - name := f.DLLName() - if _, found := uniq[name]; !found { - uniq[name] = true - r = append(r, name) - } - } - return r -} - -// ParseFile adds additional file path to a source set src. -func (src *Source) ParseFile(path string) error { - file, err := os.Open(path) - if err != nil { - return err - } - defer file.Close() - - s := bufio.NewScanner(file) - for s.Scan() { - t := trim(s.Text()) - if len(t) < 7 { - continue - } - if !strings.HasPrefix(t, "//sys") { - continue - } - t = t[5:] - if !(t[0] == ' ' || t[0] == '\t') { - continue - } - f, err := newFn(t[1:]) - if err != nil { - return err - } - src.Funcs = append(src.Funcs, f) - } - if err := s.Err(); err != nil { - return err - } - src.Files = append(src.Files, path) - - // get package name - fset := token.NewFileSet() - _, err = file.Seek(0, 0) - if err != nil { - return err - } - pkg, err := parser.ParseFile(fset, "", file, parser.PackageClauseOnly) - if err != nil { - return err - } - packageName = pkg.Name.Name - - return nil -} - -// Generate output source file from a source set src. -func (src *Source) Generate(w io.Writer) error { - funcMap := template.FuncMap{ - "packagename": packagename, - "syscalldot": syscalldot, - } - t := template.Must(template.New("main").Funcs(funcMap).Parse(srcTemplate)) - err := t.Execute(w, src) - if err != nil { - return errors.New("Failed to execute template: " + err.Error()) - } - return nil -} - -func usage() { - fmt.Fprintf(os.Stderr, "usage: mksyscall_windows [flags] [path ...]\n") - flag.PrintDefaults() - os.Exit(1) -} - -func main() { - flag.Usage = usage - flag.Parse() - if len(flag.Args()) <= 0 { - fmt.Fprintf(os.Stderr, "no files to parse provided\n") - usage() - } - - src, err := ParseFiles(flag.Args()) - if err != nil { - log.Fatal(err) - } - - var buf bytes.Buffer - if err := src.Generate(&buf); err != nil { - log.Fatal(err) - } - - data, err := format.Source(buf.Bytes()) - if err != nil { - log.Fatal(err) - } - if *filename == "" { - _, err = os.Stdout.Write(data) - } else { - err = ioutil.WriteFile(*filename, data, 0644) - } - if err != nil { - log.Fatal(err) - } -} - -// TODO: use println instead to print in the following template -const srcTemplate = ` - -{{define "main"}}// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT - -package {{packagename}} - -import "github.com/Microsoft/go-winio" -import "unsafe"{{if syscalldot}} -import "syscall"{{end}} - -var _ unsafe.Pointer - -var ( -{{template "dlls" .}} -{{template "funcnames" .}}) -{{range .Funcs}}{{if .HasStringParam}}{{template "helperbody" .}}{{end}}{{template "funcbody" .}}{{end}} -{{end}} - -{{/* help functions */}} - -{{define "dlls"}}{{range .DLLs}} mod{{.}} = {{syscalldot}}NewLazyDLL("{{.}}.dll") -{{end}}{{end}} - -{{define "funcnames"}}{{range .Funcs}}{{if .IsNotDuplicate}} proc{{.DLLFuncName}} = mod{{.DLLName}}.NewProc("{{.DLLFuncName}}"){{end}} -{{end}}{{end}} - -{{define "helperbody"}} -func {{.Name}}({{.ParamList}}) {{template "results" .}}{ -{{template "helpertmpvars" .}} return {{.HelperName}}({{.HelperCallParamList}}) -} -{{end}} - -{{define "funcbody"}} -func {{.HelperName}}({{.HelperParamList}}) {{template "results" .}}{ -{{template "tmpvars" .}} {{template "syscallcheck" .}}{{template "syscall" .}} -{{template "seterror" .}}{{template "printtrace" .}} return -} -{{end}} - -{{define "helpertmpvars"}}{{range .Params}}{{if .TmpVarHelperCode}} {{.TmpVarHelperCode}} -{{end}}{{end}}{{end}} - -{{define "tmpvars"}}{{range .Params}}{{if .TmpVarCode}} {{.TmpVarCode}} -{{end}}{{end}}{{end}} - -{{define "results"}}{{if .Rets.List}}{{.Rets.List}} {{end}}{{end}} - -{{define "syscallcheck"}}{{if .ConfirmProc}}if {{.Rets.ErrorVarName}} = proc{{.DLLFuncName}}.Find(); {{.Rets.ErrorVarName}} != nil { - return -} -{{end}}{{end}} - -{{define "syscall"}}{{.Rets.SetReturnValuesCode}}{{.Syscall}}(proc{{.DLLFuncName}}.Addr(), {{.ParamCount}}, {{.SyscallParamList}}){{end}} - -{{define "seterror"}}{{if .Rets.SetErrorCode}} {{.Rets.SetErrorCode}} -{{end}}{{end}} - -{{define "printtrace"}}{{if .PrintTrace}} print("SYSCALL: {{.Name}}(", {{.ParamPrintList}}") (", {{.Rets.PrintList}}")\n") -{{end}}{{end}} - -` diff --git a/vendor/src/github.com/Microsoft/hcsshim/nametoguid.go b/vendor/src/github.com/Microsoft/hcsshim/nametoguid.go deleted file mode 100644 index 1a522f95e0..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/nametoguid.go +++ /dev/null @@ -1,20 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// NameToGuid converts the given string into a GUID using the algorithm in the -// Host Compute Service, ensuring GUIDs generated with the same string are common -// across all clients. -func NameToGuid(name string) (id GUID, err error) { - title := "hcsshim::NameToGuid " - logrus.Debugf(title+"Name %s", name) - - err = nameToGuid(name, &id) - if err != nil { - err = makeErrorf(err, title, "name=%s", name) - logrus.Error(err) - return - } - - return -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/preparelayer.go b/vendor/src/github.com/Microsoft/hcsshim/preparelayer.go deleted file mode 100644 index 69b5fe045e..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/preparelayer.go +++ /dev/null @@ -1,36 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// PrepareLayer finds a mounted read-write layer matching layerId and enables the -// the filesystem filter for use on that layer. This requires the paths to all -// parent layers, and is necessary in order to view or interact with the layer -// as an actual filesystem (reading and writing files, creating directories, etc). -// Disabling the filter must be done via UnprepareLayer. -func PrepareLayer(info DriverInfo, layerId string, parentLayerPaths []string) error { - title := "hcsshim::PrepareLayer " - logrus.Debugf(title+"flavour %d layerId %s", info.Flavour, layerId) - - // Generate layer descriptors - layers, err := layerPathsToDescriptors(parentLayerPaths) - if err != nil { - return err - } - - // Convert info to API calling convention - infop, err := convertDriverInfo(info) - if err != nil { - logrus.Error(err) - return err - } - - err = prepareLayer(&infop, layerId, layers) - if err != nil { - err = makeErrorf(err, title, "layerId=%s flavour=%d", layerId, info.Flavour) - logrus.Error(err) - return err - } - - logrus.Debugf(title+"succeeded flavour=%d layerId=%s", info.Flavour, layerId) - return nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/process.go b/vendor/src/github.com/Microsoft/hcsshim/process.go deleted file mode 100644 index 07b9762dce..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/process.go +++ /dev/null @@ -1,441 +0,0 @@ -package hcsshim - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "syscall" - "time" - - "github.com/Sirupsen/logrus" -) - -type ProcessError struct { - Process *process - Operation string - ExtraInfo string - Err error -} - -type process struct { - handle hcsProcess - processID int - container *container - cachedPipes *cachedPipes - callbackNumber uintptr -} - -type cachedPipes struct { - stdIn syscall.Handle - stdOut syscall.Handle - stdErr syscall.Handle -} - -type processModifyRequest struct { - Operation string - ConsoleSize *consoleSize `json:",omitempty"` - CloseHandle *closeHandle `json:",omitempty"` -} - -type consoleSize struct { - Height uint16 - Width uint16 -} - -type closeHandle struct { - Handle string -} - -type processStatus struct { - ProcessId uint32 - Exited bool - ExitCode uint32 - LastWaitResult int32 -} - -const ( - stdIn string = "StdIn" - stdOut string = "StdOut" - stdErr string = "StdErr" -) - -const ( - modifyConsoleSize string = "ConsoleSize" - modifyCloseHandle string = "CloseHandle" -) - -// Pid returns the process ID of the process within the container. -func (process *process) Pid() int { - return process.processID -} - -// Kill signals the process to terminate but does not wait for it to finish terminating. -func (process *process) Kill() error { - operation := "Kill" - title := "HCSShim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) - - var resultp *uint16 - err := hcsTerminateProcess(process.handle, &resultp) - err = processHcsResult(err, resultp) - if err == ErrVmcomputeOperationPending { - return ErrVmcomputeOperationPending - } else if err != nil { - return makeProcessError(process, operation, "", err) - } - - logrus.Debugf(title+" succeeded processid=%d", process.processID) - return nil -} - -// Wait waits for the process to exit. -func (process *process) Wait() error { - operation := "Wait" - title := "HCSShim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) - - if hcsCallbacksSupported { - err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, nil) - if err != nil { - return makeProcessError(process, operation, "", err) - } - } else { - _, err := process.waitTimeoutInternal(syscall.INFINITE) - if err != nil { - return makeProcessError(process, operation, "", err) - } - } - - logrus.Debugf(title+" succeeded processid=%d", process.processID) - return nil -} - -// WaitTimeout waits for the process to exit or the duration to elapse. It returns -// false if timeout occurs. -func (process *process) WaitTimeout(timeout time.Duration) error { - operation := "WaitTimeout" - title := "HCSShim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) - - if hcsCallbacksSupported { - err := waitForNotification(process.callbackNumber, hcsNotificationProcessExited, &timeout) - if err != nil { - return makeProcessError(process, operation, "", err) - } - } else { - finished, err := waitTimeoutHelper(process, timeout) - if !finished { - return ErrTimeout - } else if err != nil { - return makeProcessError(process, operation, "", err) - } - } - - logrus.Debugf(title+" succeeded processid=%d", process.processID) - return nil -} - -func (process *process) hcsWait(timeout uint32) (bool, error) { - var ( - resultp *uint16 - exitEvent syscall.Handle - ) - err := hcsCreateProcessWait(process.handle, &exitEvent, &resultp) - err = processHcsResult(err, resultp) - if err != nil { - return false, err - } - defer syscall.CloseHandle(exitEvent) - - return waitForSingleObject(exitEvent, timeout) -} - -func (process *process) waitTimeoutInternal(timeout uint32) (bool, error) { - return waitTimeoutInternalHelper(process, timeout) -} - -// ExitCode returns the exit code of the process. The process must have -// already terminated. -func (process *process) ExitCode() (int, error) { - operation := "ExitCode" - title := "HCSShim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) - - properties, err := process.properties() - if err != nil { - return 0, makeProcessError(process, operation, "", err) - } - - if properties.Exited == false { - return 0, ErrInvalidProcessState - } - - logrus.Debugf(title+" succeeded processid=%d exitCode=%d", process.processID, properties.ExitCode) - return int(properties.ExitCode), nil -} - -// ResizeConsole resizes the console of the process. -func (process *process) ResizeConsole(width, height uint16) error { - operation := "ResizeConsole" - title := "HCSShim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) - - modifyRequest := processModifyRequest{ - Operation: modifyConsoleSize, - ConsoleSize: &consoleSize{ - Height: height, - Width: width, - }, - } - - modifyRequestb, err := json.Marshal(modifyRequest) - if err != nil { - return err - } - - modifyRequestStr := string(modifyRequestb) - - var resultp *uint16 - err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp) - err = processHcsResult(err, resultp) - if err != nil { - return makeProcessError(process, operation, "", err) - } - - logrus.Debugf(title+" succeeded processid=%d", process.processID) - return nil -} - -func (process *process) properties() (*processStatus, error) { - operation := "properties" - title := "HCSShim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) - - var ( - resultp *uint16 - propertiesp *uint16 - ) - err := hcsGetProcessProperties(process.handle, &propertiesp, &resultp) - err = processHcsResult(err, resultp) - if err != nil { - return nil, makeProcessError(process, operation, "", err) - } - - if propertiesp == nil { - return nil, errors.New("Unexpected result from hcsGetProcessProperties, properties should never be nil") - } - propertiesRaw := convertAndFreeCoTaskMemBytes(propertiesp) - - properties := &processStatus{} - if err := json.Unmarshal(propertiesRaw, properties); err != nil { - return nil, err - } - - logrus.Debugf(title+" succeeded processid=%d, properties=%s", process.processID, propertiesRaw) - return properties, nil -} - -// Stdio returns the stdin, stdout, and stderr pipes, respectively. Closing -// these pipes does not close the underlying pipes; it should be possible to -// call this multiple times to get multiple interfaces. -func (process *process) Stdio() (io.WriteCloser, io.ReadCloser, io.ReadCloser, error) { - operation := "Stdio" - title := "HCSShim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) - - var stdIn, stdOut, stdErr syscall.Handle - - if process.cachedPipes == nil { - var ( - processInfo hcsProcessInformation - resultp *uint16 - ) - err := hcsGetProcessInfo(process.handle, &processInfo, &resultp) - err = processHcsResult(err, resultp) - if err != nil { - return nil, nil, nil, makeProcessError(process, operation, "", err) - } - - stdIn, stdOut, stdErr = processInfo.StdInput, processInfo.StdOutput, processInfo.StdError - } else { - // Use cached pipes - stdIn, stdOut, stdErr = process.cachedPipes.stdIn, process.cachedPipes.stdOut, process.cachedPipes.stdErr - - // Invalidate the cache - process.cachedPipes = nil - } - - pipes, err := makeOpenFiles([]syscall.Handle{stdIn, stdOut, stdErr}) - if err != nil { - return nil, nil, nil, err - } - - logrus.Debugf(title+" succeeded processid=%d", process.processID) - return pipes[0], pipes[1], pipes[2], nil -} - -// CloseStdin closes the write side of the stdin pipe so that the process is -// notified on the read side that there is no more data in stdin. -func (process *process) CloseStdin() error { - operation := "CloseStdin" - title := "HCSShim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) - - modifyRequest := processModifyRequest{ - Operation: modifyCloseHandle, - CloseHandle: &closeHandle{ - Handle: stdIn, - }, - } - - modifyRequestb, err := json.Marshal(modifyRequest) - if err != nil { - return err - } - - modifyRequestStr := string(modifyRequestb) - - var resultp *uint16 - err = hcsModifyProcess(process.handle, modifyRequestStr, &resultp) - err = processHcsResult(err, resultp) - if err != nil { - return makeProcessError(process, operation, "", err) - } - - logrus.Debugf(title+" succeeded processid=%d", process.processID) - return nil -} - -// Close cleans up any state associated with the process but does not kill -// or wait on it. -func (process *process) Close() error { - operation := "Close" - title := "HCSShim::Process::" + operation - logrus.Debugf(title+" processid=%d", process.processID) - - // Don't double free this - if process.handle == 0 { - return nil - } - - if hcsCallbacksSupported { - if err := process.unregisterCallback(); err != nil { - return makeProcessError(process, operation, "", err) - } - } - - if err := hcsCloseProcess(process.handle); err != nil { - return makeProcessError(process, operation, "", err) - } - - process.handle = 0 - - logrus.Debugf(title+" succeeded processid=%d", process.processID) - return nil -} - -// closeProcess wraps process.Close for use by a finalizer -func closeProcess(process *process) { - process.Close() -} - -func (process *process) registerCallback() error { - context := ¬ifcationWatcherContext{ - channels: newChannels(), - } - - callbackMapLock.Lock() - callbackNumber := nextCallback - nextCallback++ - callbackMap[callbackNumber] = context - callbackMapLock.Unlock() - - var callbackHandle hcsCallback - err := hcsRegisterProcessCallback(process.handle, notificationWatcherCallback, callbackNumber, &callbackHandle) - if err != nil { - return err - } - context.handle = callbackHandle - process.callbackNumber = callbackNumber - - return nil -} - -func (process *process) unregisterCallback() error { - callbackNumber := process.callbackNumber - - callbackMapLock.RLock() - context := callbackMap[callbackNumber] - callbackMapLock.RUnlock() - - if context == nil { - return nil - } - - handle := context.handle - - if handle == 0 { - return nil - } - - // hcsUnregisterProcessCallback has its own syncronization - // to wait for all callbacks to complete. We must NOT hold the callbackMapLock. - err := hcsUnregisterProcessCallback(handle) - if err != nil { - return err - } - - closeChannels(context.channels) - - callbackMapLock.Lock() - callbackMap[callbackNumber] = nil - callbackMapLock.Unlock() - - handle = 0 - - return nil -} - -func (e *ProcessError) Error() string { - if e == nil { - return "" - } - - if e.Process == nil { - return "Unexpected nil process for error: " + e.Err.Error() - } - - s := fmt.Sprintf("process %d", e.Process.processID) - - if e.Process.container != nil { - s += " in container " + e.Process.container.id - } - - if e.Operation != "" { - s += " " + e.Operation - } - - if e.Err != nil { - s += fmt.Sprintf(" failed in Win32: %s (0x%x)", e.Err, win32FromError(e.Err)) - } - - return s -} - -func makeProcessError(process *process, operation string, extraInfo string, err error) error { - // Don't wrap errors created in hcsshim - if err == ErrTimeout || - err == ErrUnexpectedProcessAbort || - err == ErrUnexpectedContainerExit || - err == ErrHandleClose || - err == ErrInvalidProcessState || - err == ErrInvalidNotificationType || - err == ErrVmcomputeOperationPending { - return err - } - - processError := &ProcessError{Process: process, Operation: operation, ExtraInfo: extraInfo, Err: err} - logrus.Error(processError) - return processError -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/processimage.go b/vendor/src/github.com/Microsoft/hcsshim/processimage.go deleted file mode 100644 index fadb1b92c5..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/processimage.go +++ /dev/null @@ -1,23 +0,0 @@ -package hcsshim - -import "os" - -// ProcessBaseLayer post-processes a base layer that has had its files extracted. -// The files should have been extracted to \Files. -func ProcessBaseLayer(path string) error { - err := processBaseImage(path) - if err != nil { - return &os.PathError{Op: "ProcessBaseLayer", Path: path, Err: err} - } - return nil -} - -// ProcessUtilityVMImage post-processes a utility VM image that has had its files extracted. -// The files should have been extracted to \Files. -func ProcessUtilityVMImage(path string) error { - err := processUtilityImage(path) - if err != nil { - return &os.PathError{Op: "ProcessUtilityVMImage", Path: path, Err: err} - } - return nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/resizeconsole.go b/vendor/src/github.com/Microsoft/hcsshim/resizeconsole.go deleted file mode 100644 index d04ce70d85..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/resizeconsole.go +++ /dev/null @@ -1,22 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// ResizeConsoleInComputeSystem updates the height and width of the console -// session for the process with the given id in the container with the given id. -func ResizeConsoleInComputeSystem(id string, processid uint32, h, w int) error { - - title := "HCSShim::ResizeConsoleInComputeSystem" - logrus.Debugf(title+" id=%s processid=%d (%d,%d)", id, processid, h, w) - - err := resizeConsoleInComputeSystem(id, processid, uint16(h), uint16(w), 0) - if err != nil { - err = makeErrorf(err, title, "id=%s pid=%d", id, processid) - logrus.Error(err) - return err - } - - logrus.Debugf(title+" succeeded id=%s processid=%d (%d,%d)", id, processid, h, w) - return nil - -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/shutdownterminatecomputesystem.go b/vendor/src/github.com/Microsoft/hcsshim/shutdownterminatecomputesystem.go deleted file mode 100644 index 27ac734bd8..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/shutdownterminatecomputesystem.go +++ /dev/null @@ -1,43 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// TerminateComputeSystem force terminates a container. -func TerminateComputeSystem(id string, timeout uint32, context string) error { - return shutdownTerminate(false, id, timeout, context) -} - -// ShutdownComputeSystem shuts down a container by requesting a shutdown within -// the container operating system. -func ShutdownComputeSystem(id string, timeout uint32, context string) error { - return shutdownTerminate(true, id, timeout, context) -} - -// shutdownTerminate is a wrapper for ShutdownComputeSystem and TerminateComputeSystem -// which have very similar calling semantics -func shutdownTerminate(shutdown bool, id string, timeout uint32, context string) error { - - var ( - title = "HCSShim::" - ) - if shutdown { - title = title + "ShutdownComputeSystem" - } else { - title = title + "TerminateComputeSystem" - } - logrus.Debugf(title+" id=%s context=%s", id, context) - - var err error - if shutdown { - err = shutdownComputeSystem(id, timeout) - } else { - err = terminateComputeSystem(id) - } - - if err != nil { - return makeErrorf(err, title, "id=%s context=%s", id, context) - } - - logrus.Debugf(title+" succeeded id=%s context=%s", id, context) - return nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/startcomputesystem.go b/vendor/src/github.com/Microsoft/hcsshim/startcomputesystem.go deleted file mode 100644 index 41a7e676f7..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/startcomputesystem.go +++ /dev/null @@ -1,21 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// StartComputeSystem starts a container that has previously been created via -// CreateComputeSystem. -func StartComputeSystem(id string) error { - - title := "HCSShim::StartComputeSystem" - logrus.Debugf(title+" id=%s", id) - - err := startComputeSystem(id) - if err != nil { - err = makeErrorf(err, title, "id=%s", id) - logrus.Error(err) - return err - } - - logrus.Debugf(title+" succeeded id=%s", id) - return nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/terminateprocess.go b/vendor/src/github.com/Microsoft/hcsshim/terminateprocess.go deleted file mode 100644 index 47880afce1..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/terminateprocess.go +++ /dev/null @@ -1,20 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// TerminateProcessInComputeSystem kills a process in a running container. -func TerminateProcessInComputeSystem(id string, processid uint32) (err error) { - - title := "HCSShim::TerminateProcessInComputeSystem" - logrus.Debugf(title+" id=%s processid=%d", id, processid) - - err = terminateProcessInComputeSystem(id, processid) - if err != nil { - err = makeErrorf(err, title, "err=%s id=%s", id) - logrus.Error(err) - return err - } - - logrus.Debugf(title+" succeeded id=%s", id) - return nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/unpreparelayer.go b/vendor/src/github.com/Microsoft/hcsshim/unpreparelayer.go deleted file mode 100644 index d0ead0bdda..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/unpreparelayer.go +++ /dev/null @@ -1,27 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// UnprepareLayer disables the filesystem filter for the read-write layer with -// the given id. -func UnprepareLayer(info DriverInfo, layerId string) error { - title := "hcsshim::UnprepareLayer " - logrus.Debugf(title+"flavour %d layerId %s", info.Flavour, layerId) - - // Convert info to API calling convention - infop, err := convertDriverInfo(info) - if err != nil { - logrus.Error(err) - return err - } - - err = unprepareLayer(&infop, layerId) - if err != nil { - err = makeErrorf(err, title, "layerId=%s flavour=%d", layerId, info.Flavour) - logrus.Error(err) - return err - } - - logrus.Debugf(title+"succeeded flavour %d layerId=%s", info.Flavour, layerId) - return nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/utils.go b/vendor/src/github.com/Microsoft/hcsshim/utils.go deleted file mode 100644 index 694b001977..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/utils.go +++ /dev/null @@ -1,11 +0,0 @@ -package hcsshim - -import ( - "syscall" -) - -var ( - vmcomputedll = syscall.NewLazyDLL("vmcompute.dll") - hcsCallbackAPI = vmcomputedll.NewProc("HcsRegisterComputeSystemCallback") - hcsCallbacksSupported = hcsCallbackAPI.Find() == nil -) diff --git a/vendor/src/github.com/Microsoft/hcsshim/version.go b/vendor/src/github.com/Microsoft/hcsshim/version.go deleted file mode 100644 index ae10c23d42..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/version.go +++ /dev/null @@ -1,7 +0,0 @@ -package hcsshim - -// IsTP4 returns whether the currently running Windows build is at least TP4. -func IsTP4() bool { - // HNSCall was not present in TP4 - return procHNSCall.Find() != nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/waithelper.go b/vendor/src/github.com/Microsoft/hcsshim/waithelper.go deleted file mode 100644 index 3c3599a372..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/waithelper.go +++ /dev/null @@ -1,126 +0,0 @@ -package hcsshim - -import ( - "github.com/Sirupsen/logrus" - "syscall" - "time" -) - -type waitable interface { - waitTimeoutInternal(timeout uint32) (bool, error) - hcsWait(timeout uint32) (bool, error) -} - -func waitTimeoutHelper(object waitable, timeout time.Duration) (bool, error) { - var ( - millis uint32 - ) - - for totalMillis := uint64(timeout / time.Millisecond); totalMillis > 0; totalMillis = totalMillis - uint64(millis) { - if totalMillis >= syscall.INFINITE { - millis = syscall.INFINITE - 1 - } else { - millis = uint32(totalMillis) - } - - result, err := object.waitTimeoutInternal(millis) - - if err != nil { - return result, err - } - } - return true, nil -} - -func waitTimeoutInternalHelper(object waitable, timeout uint32) (bool, error) { - return object.hcsWait(timeout) -} - -func waitForSingleObject(handle syscall.Handle, timeout uint32) (bool, error) { - s, e := syscall.WaitForSingleObject(handle, timeout) - switch s { - case syscall.WAIT_OBJECT_0: - return true, nil - case syscall.WAIT_TIMEOUT: - return false, nil - default: - return false, e - } -} - -func processAsyncHcsResult(err error, resultp *uint16, callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error { - err = processHcsResult(err, resultp) - if err == ErrVmcomputeOperationPending { - return waitForNotification(callbackNumber, expectedNotification, timeout) - } - - return err -} - -func waitForNotification(callbackNumber uintptr, expectedNotification hcsNotification, timeout *time.Duration) error { - callbackMapLock.RLock() - channels := callbackMap[callbackNumber].channels - callbackMapLock.RUnlock() - - expectedChannel := channels[expectedNotification] - if expectedChannel == nil { - logrus.Errorf("unknown notification type in waitForNotification %x", expectedNotification) - return ErrInvalidNotificationType - } - - if timeout != nil { - timer := time.NewTimer(*timeout) - defer timer.Stop() - - select { - case err, ok := <-expectedChannel: - if !ok { - return ErrHandleClose - } - return err - case err, ok := <-channels[hcsNotificationSystemExited]: - if !ok { - return ErrHandleClose - } - // If the expected notification is hcsNotificationSystemExited which of the two selects - // chosen is random. Return the raw error if hcsNotificationSystemExited is expected - if channels[hcsNotificationSystemExited] == expectedChannel { - return err - } - return ErrUnexpectedContainerExit - case _, ok := <-channels[hcsNotificationServiceDisconnect]: - if !ok { - return ErrHandleClose - } - // hcsNotificationServiceDisconnect should never be an expected notification - // it does not need the same handling as hcsNotificationSystemExited - return ErrUnexpectedProcessAbort - case <-timer.C: - return ErrTimeout - } - } - select { - case err, ok := <-expectedChannel: - if !ok { - return ErrHandleClose - } - return err - case err, ok := <-channels[hcsNotificationSystemExited]: - if !ok { - return ErrHandleClose - } - // If the expected notification is hcsNotificationSystemExited which of the two selects - // chosen is random. Return the raw error if hcsNotificationSystemExited is expected - if channels[hcsNotificationSystemExited] == expectedChannel { - return err - } - return ErrUnexpectedContainerExit - case _, ok := <-channels[hcsNotificationServiceDisconnect]: - if !ok { - return ErrHandleClose - } - // hcsNotificationServiceDisconnect should never be an expected notification - // it does not need the same handling as hcsNotificationSystemExited - return ErrUnexpectedProcessAbort - } -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/waitprocess.go b/vendor/src/github.com/Microsoft/hcsshim/waitprocess.go deleted file mode 100644 index e916140399..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/waitprocess.go +++ /dev/null @@ -1,20 +0,0 @@ -package hcsshim - -import "github.com/Sirupsen/logrus" - -// WaitForProcessInComputeSystem waits for a process ID to terminate and returns -// the exit code. Returns exitcode, error -func WaitForProcessInComputeSystem(id string, processid uint32, timeout uint32) (int32, error) { - - title := "HCSShim::WaitForProcessInComputeSystem" - logrus.Debugf(title+" id=%s processid=%d", id, processid) - - var exitCode uint32 - err := waitForProcessInComputeSystem(id, processid, timeout, &exitCode) - if err != nil { - return 0, makeErrorf(err, title, "id=%s", id) - } - - logrus.Debugf(title+" succeeded id=%s processid=%d exitcode=%d", id, processid, exitCode) - return int32(exitCode), nil -} diff --git a/vendor/src/github.com/Microsoft/hcsshim/zhcsshim.go b/vendor/src/github.com/Microsoft/hcsshim/zhcsshim.go deleted file mode 100644 index a1faeaa083..0000000000 --- a/vendor/src/github.com/Microsoft/hcsshim/zhcsshim.go +++ /dev/null @@ -1,1307 +0,0 @@ -// MACHINE GENERATED BY 'go generate' COMMAND; DO NOT EDIT - -package hcsshim - -import "github.com/Microsoft/go-winio" -import "unsafe" -import "syscall" - -var _ unsafe.Pointer - -var ( - modole32 = syscall.NewLazyDLL("ole32.dll") - modvmcompute = syscall.NewLazyDLL("vmcompute.dll") - - procCoTaskMemFree = modole32.NewProc("CoTaskMemFree") - procActivateLayer = modvmcompute.NewProc("ActivateLayer") - procCopyLayer = modvmcompute.NewProc("CopyLayer") - procCreateLayer = modvmcompute.NewProc("CreateLayer") - procCreateSandboxLayer = modvmcompute.NewProc("CreateSandboxLayer") - procExpandSandboxSize = modvmcompute.NewProc("ExpandSandboxSize") - procDeactivateLayer = modvmcompute.NewProc("DeactivateLayer") - procDestroyLayer = modvmcompute.NewProc("DestroyLayer") - procExportLayer = modvmcompute.NewProc("ExportLayer") - procGetLayerMountPath = modvmcompute.NewProc("GetLayerMountPath") - procGetBaseImages = modvmcompute.NewProc("GetBaseImages") - procImportLayer = modvmcompute.NewProc("ImportLayer") - procLayerExists = modvmcompute.NewProc("LayerExists") - procNameToGuid = modvmcompute.NewProc("NameToGuid") - procPrepareLayer = modvmcompute.NewProc("PrepareLayer") - procUnprepareLayer = modvmcompute.NewProc("UnprepareLayer") - procProcessBaseImage = modvmcompute.NewProc("ProcessBaseImage") - procProcessUtilityImage = modvmcompute.NewProc("ProcessUtilityImage") - procImportLayerBegin = modvmcompute.NewProc("ImportLayerBegin") - procImportLayerNext = modvmcompute.NewProc("ImportLayerNext") - procImportLayerWrite = modvmcompute.NewProc("ImportLayerWrite") - procImportLayerEnd = modvmcompute.NewProc("ImportLayerEnd") - procExportLayerBegin = modvmcompute.NewProc("ExportLayerBegin") - procExportLayerNext = modvmcompute.NewProc("ExportLayerNext") - procExportLayerRead = modvmcompute.NewProc("ExportLayerRead") - procExportLayerEnd = modvmcompute.NewProc("ExportLayerEnd") - procCreateComputeSystem = modvmcompute.NewProc("CreateComputeSystem") - procCreateProcessWithStdHandlesInComputeSystem = modvmcompute.NewProc("CreateProcessWithStdHandlesInComputeSystem") - procResizeConsoleInComputeSystem = modvmcompute.NewProc("ResizeConsoleInComputeSystem") - procShutdownComputeSystem = modvmcompute.NewProc("ShutdownComputeSystem") - procStartComputeSystem = modvmcompute.NewProc("StartComputeSystem") - procTerminateComputeSystem = modvmcompute.NewProc("TerminateComputeSystem") - procTerminateProcessInComputeSystem = modvmcompute.NewProc("TerminateProcessInComputeSystem") - procWaitForProcessInComputeSystem = modvmcompute.NewProc("WaitForProcessInComputeSystem") - procGetComputeSystemProperties = modvmcompute.NewProc("GetComputeSystemProperties") - procHcsEnumerateComputeSystems = modvmcompute.NewProc("HcsEnumerateComputeSystems") - procHcsCreateComputeSystem = modvmcompute.NewProc("HcsCreateComputeSystem") - procHcsOpenComputeSystem = modvmcompute.NewProc("HcsOpenComputeSystem") - procHcsCloseComputeSystem = modvmcompute.NewProc("HcsCloseComputeSystem") - procHcsStartComputeSystem = modvmcompute.NewProc("HcsStartComputeSystem") - procHcsShutdownComputeSystem = modvmcompute.NewProc("HcsShutdownComputeSystem") - procHcsTerminateComputeSystem = modvmcompute.NewProc("HcsTerminateComputeSystem") - procHcsPauseComputeSystem = modvmcompute.NewProc("HcsPauseComputeSystem") - procHcsResumeComputeSystem = modvmcompute.NewProc("HcsResumeComputeSystem") - procHcsGetComputeSystemProperties = modvmcompute.NewProc("HcsGetComputeSystemProperties") - procHcsModifyComputeSystem = modvmcompute.NewProc("HcsModifyComputeSystem") - procHcsCreateComputeSystemWait = modvmcompute.NewProc("HcsCreateComputeSystemWait") - procHcsCreateProcess = modvmcompute.NewProc("HcsCreateProcess") - procHcsOpenProcess = modvmcompute.NewProc("HcsOpenProcess") - procHcsCloseProcess = modvmcompute.NewProc("HcsCloseProcess") - procHcsTerminateProcess = modvmcompute.NewProc("HcsTerminateProcess") - procHcsGetProcessInfo = modvmcompute.NewProc("HcsGetProcessInfo") - procHcsGetProcessProperties = modvmcompute.NewProc("HcsGetProcessProperties") - procHcsModifyProcess = modvmcompute.NewProc("HcsModifyProcess") - procHcsCreateProcessWait = modvmcompute.NewProc("HcsCreateProcessWait") - procHcsGetServiceProperties = modvmcompute.NewProc("HcsGetServiceProperties") - procHcsModifyServiceSettings = modvmcompute.NewProc("HcsModifyServiceSettings") - - procHcsRegisterComputeSystemCallback = modvmcompute.NewProc("HcsRegisterComputeSystemCallback") - procHcsUnregisterComputeSystemCallback = modvmcompute.NewProc("HcsUnregisterComputeSystemCallback") - procHcsRegisterProcessCallback = modvmcompute.NewProc("HcsRegisterProcessCallback") - procHcsUnregisterProcessCallback = modvmcompute.NewProc("HcsUnregisterProcessCallback") - procHNSCall = modvmcompute.NewProc("HNSCall") -) - -func coTaskMemFree(buffer unsafe.Pointer) { - syscall.Syscall(procCoTaskMemFree.Addr(), 1, uintptr(buffer), 0, 0) - return -} - -func activateLayer(info *driverInfo, id string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _activateLayer(info, _p0) -} - -func _activateLayer(info *driverInfo, id *uint16) (hr error) { - if hr = procActivateLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procActivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func copyLayer(info *driverInfo, srcId string, dstId string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(srcId) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(dstId) - if hr != nil { - return - } - return _copyLayer(info, _p0, _p1, descriptors) -} - -func _copyLayer(info *driverInfo, srcId *uint16, dstId *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p2 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p2 = &descriptors[0] - } - if hr = procCopyLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procCopyLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(srcId)), uintptr(unsafe.Pointer(dstId)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func createLayer(info *driverInfo, id string, parent string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(parent) - if hr != nil { - return - } - return _createLayer(info, _p0, _p1) -} - -func _createLayer(info *driverInfo, id *uint16, parent *uint16) (hr error) { - if hr = procCreateLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procCreateLayer.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func createSandboxLayer(info *driverInfo, id string, parent string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(parent) - if hr != nil { - return - } - return _createSandboxLayer(info, _p0, _p1, descriptors) -} - -func _createSandboxLayer(info *driverInfo, id *uint16, parent *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p2 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p2 = &descriptors[0] - } - if hr = procCreateSandboxLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procCreateSandboxLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(parent)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func expandSandboxSize(info *driverInfo, id string, size uint64) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _expandSandboxSize(info, _p0, size) -} - -func _expandSandboxSize(info *driverInfo, id *uint16, size uint64) (hr error) { - if hr = procExpandSandboxSize.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procExpandSandboxSize.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(size)) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func deactivateLayer(info *driverInfo, id string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _deactivateLayer(info, _p0) -} - -func _deactivateLayer(info *driverInfo, id *uint16) (hr error) { - if hr = procDeactivateLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procDeactivateLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func destroyLayer(info *driverInfo, id string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _destroyLayer(info, _p0) -} - -func _destroyLayer(info *driverInfo, id *uint16) (hr error) { - if hr = procDestroyLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procDestroyLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func exportLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - return _exportLayer(info, _p0, _p1, descriptors) -} - -func _exportLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p2 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p2 = &descriptors[0] - } - if hr = procExportLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procExportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func getLayerMountPath(info *driverInfo, id string, length *uintptr, buffer *uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _getLayerMountPath(info, _p0, length, buffer) -} - -func _getLayerMountPath(info *driverInfo, id *uint16, length *uintptr, buffer *uint16) (hr error) { - if hr = procGetLayerMountPath.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procGetLayerMountPath.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(length)), uintptr(unsafe.Pointer(buffer)), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func getBaseImages(buffer **uint16) (hr error) { - if hr = procGetBaseImages.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procGetBaseImages.Addr(), 1, uintptr(unsafe.Pointer(buffer)), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func importLayer(info *driverInfo, id string, path string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - return _importLayer(info, _p0, _p1, descriptors) -} - -func _importLayer(info *driverInfo, id *uint16, path *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p2 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p2 = &descriptors[0] - } - if hr = procImportLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procImportLayer.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(_p2)), uintptr(len(descriptors)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func layerExists(info *driverInfo, id string, exists *uint32) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _layerExists(info, _p0, exists) -} - -func _layerExists(info *driverInfo, id *uint16, exists *uint32) (hr error) { - if hr = procLayerExists.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procLayerExists.Addr(), 3, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(exists))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func nameToGuid(name string, guid *GUID) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(name) - if hr != nil { - return - } - return _nameToGuid(_p0, guid) -} - -func _nameToGuid(name *uint16, guid *GUID) (hr error) { - if hr = procNameToGuid.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procNameToGuid.Addr(), 2, uintptr(unsafe.Pointer(name)), uintptr(unsafe.Pointer(guid)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func prepareLayer(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _prepareLayer(info, _p0, descriptors) -} - -func _prepareLayer(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR) (hr error) { - var _p1 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p1 = &descriptors[0] - } - if hr = procPrepareLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procPrepareLayer.Addr(), 4, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func unprepareLayer(info *driverInfo, id string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _unprepareLayer(info, _p0) -} - -func _unprepareLayer(info *driverInfo, id *uint16) (hr error) { - if hr = procUnprepareLayer.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procUnprepareLayer.Addr(), 2, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func processBaseImage(path string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - return _processBaseImage(_p0) -} - -func _processBaseImage(path *uint16) (hr error) { - if hr = procProcessBaseImage.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procProcessBaseImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func processUtilityImage(path string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - return _processUtilityImage(_p0) -} - -func _processUtilityImage(path *uint16) (hr error) { - if hr = procProcessUtilityImage.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procProcessUtilityImage.Addr(), 1, uintptr(unsafe.Pointer(path)), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func importLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _importLayerBegin(info, _p0, descriptors, context) -} - -func _importLayerBegin(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) { - var _p1 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p1 = &descriptors[0] - } - if hr = procImportLayerBegin.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procImportLayerBegin.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), uintptr(unsafe.Pointer(context)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func importLayerNext(context uintptr, fileName string, fileInfo *winio.FileBasicInfo) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(fileName) - if hr != nil { - return - } - return _importLayerNext(context, _p0, fileInfo) -} - -func _importLayerNext(context uintptr, fileName *uint16, fileInfo *winio.FileBasicInfo) (hr error) { - if hr = procImportLayerNext.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procImportLayerNext.Addr(), 3, uintptr(context), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(fileInfo))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func importLayerWrite(context uintptr, buffer []byte) (hr error) { - var _p0 *byte - if len(buffer) > 0 { - _p0 = &buffer[0] - } - if hr = procImportLayerWrite.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procImportLayerWrite.Addr(), 3, uintptr(context), uintptr(unsafe.Pointer(_p0)), uintptr(len(buffer))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func importLayerEnd(context uintptr) (hr error) { - if hr = procImportLayerEnd.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procImportLayerEnd.Addr(), 1, uintptr(context), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func exportLayerBegin(info *driverInfo, id string, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _exportLayerBegin(info, _p0, descriptors, context) -} - -func _exportLayerBegin(info *driverInfo, id *uint16, descriptors []WC_LAYER_DESCRIPTOR, context *uintptr) (hr error) { - var _p1 *WC_LAYER_DESCRIPTOR - if len(descriptors) > 0 { - _p1 = &descriptors[0] - } - if hr = procExportLayerBegin.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procExportLayerBegin.Addr(), 5, uintptr(unsafe.Pointer(info)), uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(_p1)), uintptr(len(descriptors)), uintptr(unsafe.Pointer(context)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func exportLayerNext(context uintptr, fileName **uint16, fileInfo *winio.FileBasicInfo, fileSize *int64, deleted *uint32) (hr error) { - if hr = procExportLayerNext.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procExportLayerNext.Addr(), 5, uintptr(context), uintptr(unsafe.Pointer(fileName)), uintptr(unsafe.Pointer(fileInfo)), uintptr(unsafe.Pointer(fileSize)), uintptr(unsafe.Pointer(deleted)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func exportLayerRead(context uintptr, buffer []byte, bytesRead *uint32) (hr error) { - var _p0 *byte - if len(buffer) > 0 { - _p0 = &buffer[0] - } - if hr = procExportLayerRead.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procExportLayerRead.Addr(), 4, uintptr(context), uintptr(unsafe.Pointer(_p0)), uintptr(len(buffer)), uintptr(unsafe.Pointer(bytesRead)), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func exportLayerEnd(context uintptr) (hr error) { - if hr = procExportLayerEnd.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procExportLayerEnd.Addr(), 1, uintptr(context), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func createComputeSystem(id string, configuration string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(configuration) - if hr != nil { - return - } - return _createComputeSystem(_p0, _p1) -} - -func _createComputeSystem(id *uint16, configuration *uint16) (hr error) { - if hr = procCreateComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procCreateComputeSystem.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func createProcessWithStdHandlesInComputeSystem(id string, paramsJson string, pid *uint32, stdin *syscall.Handle, stdout *syscall.Handle, stderr *syscall.Handle) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(paramsJson) - if hr != nil { - return - } - return _createProcessWithStdHandlesInComputeSystem(_p0, _p1, pid, stdin, stdout, stderr) -} - -func _createProcessWithStdHandlesInComputeSystem(id *uint16, paramsJson *uint16, pid *uint32, stdin *syscall.Handle, stdout *syscall.Handle, stderr *syscall.Handle) (hr error) { - if hr = procCreateProcessWithStdHandlesInComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procCreateProcessWithStdHandlesInComputeSystem.Addr(), 6, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(paramsJson)), uintptr(unsafe.Pointer(pid)), uintptr(unsafe.Pointer(stdin)), uintptr(unsafe.Pointer(stdout)), uintptr(unsafe.Pointer(stderr))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func resizeConsoleInComputeSystem(id string, pid uint32, height uint16, width uint16, flags uint32) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _resizeConsoleInComputeSystem(_p0, pid, height, width, flags) -} - -func _resizeConsoleInComputeSystem(id *uint16, pid uint32, height uint16, width uint16, flags uint32) (hr error) { - if hr = procResizeConsoleInComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procResizeConsoleInComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(pid), uintptr(height), uintptr(width), uintptr(flags), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func shutdownComputeSystem(id string, timeout uint32) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _shutdownComputeSystem(_p0, timeout) -} - -func _shutdownComputeSystem(id *uint16, timeout uint32) (hr error) { - if hr = procShutdownComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procShutdownComputeSystem.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(timeout), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func startComputeSystem(id string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _startComputeSystem(_p0) -} - -func _startComputeSystem(id *uint16) (hr error) { - if hr = procStartComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procStartComputeSystem.Addr(), 1, uintptr(unsafe.Pointer(id)), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func terminateComputeSystem(id string) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _terminateComputeSystem(_p0) -} - -func _terminateComputeSystem(id *uint16) (hr error) { - if hr = procTerminateComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procTerminateComputeSystem.Addr(), 1, uintptr(unsafe.Pointer(id)), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func terminateProcessInComputeSystem(id string, pid uint32) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _terminateProcessInComputeSystem(_p0, pid) -} - -func _terminateProcessInComputeSystem(id *uint16, pid uint32) (hr error) { - if hr = procTerminateProcessInComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procTerminateProcessInComputeSystem.Addr(), 2, uintptr(unsafe.Pointer(id)), uintptr(pid), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func waitForProcessInComputeSystem(id string, pid uint32, timeout uint32, exitCode *uint32) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _waitForProcessInComputeSystem(_p0, pid, timeout, exitCode) -} - -func _waitForProcessInComputeSystem(id *uint16, pid uint32, timeout uint32, exitCode *uint32) (hr error) { - if hr = procWaitForProcessInComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procWaitForProcessInComputeSystem.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(pid), uintptr(timeout), uintptr(unsafe.Pointer(exitCode)), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func getComputeSystemProperties(id string, flags uint32, properties **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _getComputeSystemProperties(_p0, flags, properties) -} - -func _getComputeSystemProperties(id *uint16, flags uint32, properties **uint16) (hr error) { - if hr = procGetComputeSystemProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procGetComputeSystemProperties.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(flags), uintptr(unsafe.Pointer(properties))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsEnumerateComputeSystems(query string, computeSystems **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(query) - if hr != nil { - return - } - return _hcsEnumerateComputeSystems(_p0, computeSystems, result) -} - -func _hcsEnumerateComputeSystems(query *uint16, computeSystems **uint16, result **uint16) (hr error) { - if hr = procHcsEnumerateComputeSystems.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsEnumerateComputeSystems.Addr(), 3, uintptr(unsafe.Pointer(query)), uintptr(unsafe.Pointer(computeSystems)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsCreateComputeSystem(id string, configuration string, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(configuration) - if hr != nil { - return - } - return _hcsCreateComputeSystem(_p0, _p1, identity, computeSystem, result) -} - -func _hcsCreateComputeSystem(id *uint16, configuration *uint16, identity syscall.Handle, computeSystem *hcsSystem, result **uint16) (hr error) { - if hr = procHcsCreateComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 5, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(identity), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsOpenComputeSystem(id string, computeSystem *hcsSystem, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - return _hcsOpenComputeSystem(_p0, computeSystem, result) -} - -func _hcsOpenComputeSystem(id *uint16, computeSystem *hcsSystem, result **uint16) (hr error) { - if hr = procHcsOpenComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsOpenComputeSystem.Addr(), 3, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsCloseComputeSystem(computeSystem hcsSystem) (hr error) { - if hr = procHcsCloseComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsCloseComputeSystem.Addr(), 1, uintptr(computeSystem), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsStartComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsStartComputeSystem(computeSystem, _p0, result) -} - -func _hcsStartComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsStartComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsShutdownComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsShutdownComputeSystem(computeSystem, _p0, result) -} - -func _hcsShutdownComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsShutdownComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsTerminateComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsTerminateComputeSystem(computeSystem, _p0, result) -} - -func _hcsTerminateComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsTerminateComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsPauseComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsPauseComputeSystem(computeSystem, _p0, result) -} - -func _hcsPauseComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsPauseComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsResumeComputeSystem(computeSystem hcsSystem, options string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(options) - if hr != nil { - return - } - return _hcsResumeComputeSystem(computeSystem, _p0, result) -} - -func _hcsResumeComputeSystem(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsResumeComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(propertyQuery) - if hr != nil { - return - } - return _hcsGetComputeSystemProperties(computeSystem, _p0, properties, result) -} - -func _hcsGetComputeSystemProperties(computeSystem hcsSystem, propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcsGetComputeSystemProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsGetComputeSystemProperties.Addr(), 4, uintptr(computeSystem), uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsModifyComputeSystem(computeSystem hcsSystem, configuration string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(configuration) - if hr != nil { - return - } - return _hcsModifyComputeSystem(computeSystem, _p0, result) -} - -func _hcsModifyComputeSystem(computeSystem hcsSystem, configuration *uint16, result **uint16) (hr error) { - if hr = procHcsModifyComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsModifyComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsCreateComputeSystemWait(computeSystem hcsSystem, exitEvent *syscall.Handle, result **uint16) (hr error) { - if hr = procHcsCreateComputeSystemWait.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsCreateComputeSystemWait.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(exitEvent)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsCreateProcess(computeSystem hcsSystem, processParameters string, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(processParameters) - if hr != nil { - return - } - return _hcsCreateProcess(computeSystem, _p0, processInformation, process, result) -} - -func _hcsCreateProcess(computeSystem hcsSystem, processParameters *uint16, processInformation *hcsProcessInformation, process *hcsProcess, result **uint16) (hr error) { - if hr = procHcsCreateProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsCreateProcess.Addr(), 5, uintptr(computeSystem), uintptr(unsafe.Pointer(processParameters)), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsOpenProcess(computeSystem hcsSystem, pid uint32, process *hcsProcess, result **uint16) (hr error) { - if hr = procHcsOpenProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsOpenProcess.Addr(), 4, uintptr(computeSystem), uintptr(pid), uintptr(unsafe.Pointer(process)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsCloseProcess(process hcsProcess) (hr error) { - if hr = procHcsCloseProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsCloseProcess.Addr(), 1, uintptr(process), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsTerminateProcess(process hcsProcess, result **uint16) (hr error) { - if hr = procHcsTerminateProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsTerminateProcess.Addr(), 2, uintptr(process), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsGetProcessInfo(process hcsProcess, processInformation *hcsProcessInformation, result **uint16) (hr error) { - if hr = procHcsGetProcessInfo.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsGetProcessInfo.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processInformation)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsGetProcessProperties(process hcsProcess, processProperties **uint16, result **uint16) (hr error) { - if hr = procHcsGetProcessProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsGetProcessProperties.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(processProperties)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsModifyProcess(process hcsProcess, settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcsModifyProcess(process, _p0, result) -} - -func _hcsModifyProcess(process hcsProcess, settings *uint16, result **uint16) (hr error) { - if hr = procHcsModifyProcess.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsModifyProcess.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsCreateProcessWait(process hcsProcess, settings *syscall.Handle, result **uint16) (hr error) { - if hr = procHcsCreateProcessWait.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsCreateProcessWait.Addr(), 3, uintptr(process), uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsGetServiceProperties(propertyQuery string, properties **uint16, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(propertyQuery) - if hr != nil { - return - } - return _hcsGetServiceProperties(_p0, properties, result) -} - -func _hcsGetServiceProperties(propertyQuery *uint16, properties **uint16, result **uint16) (hr error) { - if hr = procHcsGetServiceProperties.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsGetServiceProperties.Addr(), 3, uintptr(unsafe.Pointer(propertyQuery)), uintptr(unsafe.Pointer(properties)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsModifyServiceSettings(settings string, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(settings) - if hr != nil { - return - } - return _hcsModifyServiceSettings(_p0, result) -} - -func _hcsModifyServiceSettings(settings *uint16, result **uint16) (hr error) { - if hr = procHcsModifyServiceSettings.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsModifyServiceSettings.Addr(), 2, uintptr(unsafe.Pointer(settings)), uintptr(unsafe.Pointer(result)), 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsCreateComputeSystemTP5(id string, configuration string, computeSystem *hcsSystem, result **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(id) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(configuration) - if hr != nil { - return - } - return _hcsCreateComputeSystemTP5(_p0, _p1, computeSystem, result) -} - -func _hcsCreateComputeSystemTP5(id *uint16, configuration *uint16, computeSystem *hcsSystem, result **uint16) (hr error) { - if hr = procHcsCreateComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsCreateComputeSystem.Addr(), 4, uintptr(unsafe.Pointer(id)), uintptr(unsafe.Pointer(configuration)), uintptr(unsafe.Pointer(computeSystem)), uintptr(unsafe.Pointer(result)), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsStartComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsStartComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsStartComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsShutdownComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsShutdownComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsShutdownComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsTerminateComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsTerminateComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsTerminateComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsPauseComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsPauseComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsPauseComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsResumeComputeSystemTP5(computeSystem hcsSystem, options *uint16, result **uint16) (hr error) { - if hr = procHcsResumeComputeSystem.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsResumeComputeSystem.Addr(), 3, uintptr(computeSystem), uintptr(unsafe.Pointer(options)), uintptr(unsafe.Pointer(result))) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsRegisterComputeSystemCallback(computeSystem hcsSystem, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) { - if hr = procHcsRegisterComputeSystemCallback.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsRegisterComputeSystemCallback.Addr(), 4, uintptr(computeSystem), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsUnregisterComputeSystemCallback(callbackHandle hcsCallback) (hr error) { - if hr = procHcsUnregisterComputeSystemCallback.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsUnregisterComputeSystemCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsRegisterProcessCallback(process hcsProcess, callback uintptr, context uintptr, callbackHandle *hcsCallback) (hr error) { - if hr = procHcsRegisterProcessCallback.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHcsRegisterProcessCallback.Addr(), 4, uintptr(process), uintptr(callback), uintptr(context), uintptr(unsafe.Pointer(callbackHandle)), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func hcsUnregisterProcessCallback(callbackHandle hcsCallback) (hr error) { - if hr = procHcsUnregisterProcessCallback.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall(procHcsUnregisterProcessCallback.Addr(), 1, uintptr(callbackHandle), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} - -func _hnsCall(method string, path string, object string, response **uint16) (hr error) { - var _p0 *uint16 - _p0, hr = syscall.UTF16PtrFromString(method) - if hr != nil { - return - } - var _p1 *uint16 - _p1, hr = syscall.UTF16PtrFromString(path) - if hr != nil { - return - } - var _p2 *uint16 - _p2, hr = syscall.UTF16PtrFromString(object) - if hr != nil { - return - } - return __hnsCall(_p0, _p1, _p2, response) -} - -func __hnsCall(method *uint16, path *uint16, object *uint16, response **uint16) (hr error) { - if hr = procHNSCall.Find(); hr != nil { - return - } - r0, _, _ := syscall.Syscall6(procHNSCall.Addr(), 4, uintptr(unsafe.Pointer(method)), uintptr(unsafe.Pointer(path)), uintptr(unsafe.Pointer(object)), uintptr(unsafe.Pointer(response)), 0, 0) - if int32(r0) < 0 { - hr = syscall.Errno(win32FromHresult(r0)) - } - return -} diff --git a/vendor/src/github.com/RackSec/srslog/.gitignore b/vendor/src/github.com/RackSec/srslog/.gitignore deleted file mode 100644 index ebf0f2e4e3..0000000000 --- a/vendor/src/github.com/RackSec/srslog/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.cover diff --git a/vendor/src/github.com/RackSec/srslog/.travis.yml b/vendor/src/github.com/RackSec/srslog/.travis.yml deleted file mode 100644 index 4e5c4f0753..0000000000 --- a/vendor/src/github.com/RackSec/srslog/.travis.yml +++ /dev/null @@ -1,18 +0,0 @@ -sudo: required -dist: trusty -group: edge -language: go -go: -- 1.5 -before_install: - - pip install --user codecov -script: -- | - go get ./... - go test -v -coverprofile=coverage.txt -covermode=atomic - go vet -after_success: - - codecov -notifications: - slack: - secure: dtDue9gP6CRR1jYjEf6raXXFak3QKGcCFvCf5mfvv5XScdpmc3udwgqc5TdyjC0goaC9OK/4jTcCD30dYZm/u6ux3E9mo3xwMl2xRLHx76p5r9rSQtloH19BDwA2+A+bpDfFQVz05k2YXuTiGSvNMMdwzx+Dr294Sl/z43RFB4+b9/R/6LlFpRW89IwftvpLAFnBy4K/ZcspQzKM+rQfQTL5Kk+iZ/KBsuR/VziDq6MoJ8t43i4ee8vwS06vFBKDbUiZ4FIZpLgc2RAL5qso5aWRKYXL6waXfoKHZWKPe0w4+9IY1rDJxG1jEb7YGgcbLaF9xzPRRs2b2yO/c87FKpkh6PDgYHfLjpgXotCoojZrL4p1x6MI1ldJr3NhARGPxS9r4liB9n6Y5nD+ErXi1IMf55fuUHcPY27Jc0ySeLFeM6cIWJ8OhFejCgGw6a5DnnmJo0PqopsaBDHhadpLejT1+K6bL2iGkT4SLcVNuRGLs+VyuNf1+5XpkWZvy32vquO7SZOngLLBv+GIem+t3fWm0Z9s/0i1uRCQei1iUutlYjoV/LBd35H2rhob4B5phIuJin9kb0zbHf6HnaoN0CtN8r0d8G5CZiInVlG5Xcid5Byb4dddf5U2EJTDuCMVyyiM7tcnfjqw9UbVYNxtYM9SzcqIq+uVqM8pYL9xSec= diff --git a/vendor/src/github.com/RackSec/srslog/CODE_OF_CONDUCT.md b/vendor/src/github.com/RackSec/srslog/CODE_OF_CONDUCT.md deleted file mode 100644 index 18ac49fc75..0000000000 --- a/vendor/src/github.com/RackSec/srslog/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,50 +0,0 @@ -# Contributor Code of Conduct - -As contributors and maintainers of this project, and in the interest of -fostering an open and welcoming community, we pledge to respect all people who -contribute through reporting issues, posting feature requests, updating -documentation, submitting pull requests or patches, and other activities. - -We are committed to making participation in this project a harassment-free -experience for everyone, regardless of level of experience, gender, gender -identity and expression, sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. - -Examples of unacceptable behavior by participants include: - -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, such as physical or electronic - addresses, without explicit permission -* Other unethical or unprofessional conduct - -Project maintainers have the right and responsibility to remove, edit, or -reject comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct, or to ban temporarily or -permanently any contributor for other behaviors that they deem inappropriate, -threatening, offensive, or harmful. - -By adopting this Code of Conduct, project maintainers commit themselves to -fairly and consistently applying these principles to every aspect of managing -this project. Project maintainers who do not follow or enforce the Code of -Conduct may be permanently removed from the project team. - -This Code of Conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior may be -reported by contacting a project maintainer at [sirsean@gmail.com]. All -complaints will be reviewed and investigated and will result in a response that -is deemed necessary and appropriate to the circumstances. Maintainers are -obligated to maintain confidentiality with regard to the reporter of an -incident. - - -This Code of Conduct is adapted from the [Contributor Covenant][homepage], -version 1.3.0, available at -[http://contributor-covenant.org/version/1/3/0/][version] - -[homepage]: http://contributor-covenant.org -[version]: http://contributor-covenant.org/version/1/3/0/ diff --git a/vendor/src/github.com/RackSec/srslog/LICENSE b/vendor/src/github.com/RackSec/srslog/LICENSE deleted file mode 100644 index 9269338fbb..0000000000 --- a/vendor/src/github.com/RackSec/srslog/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2015 Rackspace. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/RackSec/srslog/README.md b/vendor/src/github.com/RackSec/srslog/README.md deleted file mode 100644 index 1ae1fd4ef8..0000000000 --- a/vendor/src/github.com/RackSec/srslog/README.md +++ /dev/null @@ -1,131 +0,0 @@ -[![Build Status](https://travis-ci.org/RackSec/srslog.svg?branch=master)](https://travis-ci.org/RackSec/srslog) - -# srslog - -Go has a `syslog` package in the standard library, but it has the following -shortcomings: - -1. It doesn't have TLS support -2. [According to bradfitz on the Go team, it is no longer being maintained.](https://github.com/golang/go/issues/13449#issuecomment-161204716) - -I agree that it doesn't need to be in the standard library. So, I've -followed Brad's suggestion and have made a separate project to handle syslog. - -This code was taken directly from the Go project as a base to start from. - -However, this _does_ have TLS support. - -# Usage - -Basic usage retains the same interface as the original `syslog` package. We -only added to the interface where required to support new functionality. - -Switch from the standard library: - -``` -import( - //"log/syslog" - syslog "github.com/RackSec/srslog" -) -``` - -You can still use it for local syslog: - -``` -w, err := syslog.Dial("", "", syslog.LOG_ERR, "testtag") -``` - -Or to unencrypted UDP: - -``` -w, err := syslog.Dial("udp", "192.168.0.50:514", syslog.LOG_ERR, "testtag") -``` - -Or to unencrypted TCP: - -``` -w, err := syslog.Dial("tcp", "192.168.0.51:514", syslog.LOG_ERR, "testtag") -``` - -But now you can also send messages via TLS-encrypted TCP: - -``` -w, err := syslog.DialWithTLSCertPath("tcp+tls", "192.168.0.52:514", syslog.LOG_ERR, "testtag", "/path/to/servercert.pem") -``` - -And if you need more control over your TLS configuration : - -``` -pool := x509.NewCertPool() -serverCert, err := ioutil.ReadFile("/path/to/servercert.pem") -if err != nil { - return nil, err -} -pool.AppendCertsFromPEM(serverCert) -config := tls.Config{ - RootCAs: pool, -} - -w, err := DialWithTLSConfig(network, raddr, priority, tag, &config) -``` - -(Note that in both TLS cases, this uses a self-signed certificate, where the -remote syslog server has the keypair and the client has only the public key.) - -And then to write log messages, continue like so: - -``` -if err != nil { - log.Fatal("failed to connect to syslog:", err) -} -defer w.Close() - -w.Alert("this is an alert") -w.Crit("this is critical") -w.Err("this is an error") -w.Warning("this is a warning") -w.Notice("this is a notice") -w.Info("this is info") -w.Debug("this is debug") -w.Write([]byte("these are some bytes")) -``` - -# Generating TLS Certificates - -We've provided a script that you can use to generate a self-signed keypair: - -``` -pip install cryptography -python script/gen-certs.py -``` - -That outputs the public key and private key to standard out. Put those into -`.pem` files. (And don't put them into any source control. The certificate in -the `test` directory is used by the unit tests, and please do not actually use -it anywhere else.) - -# Running Tests - -Run the tests as usual: - -``` -go test -``` - -But we've also provided a test coverage script that will show you which -lines of code are not covered: - -``` -script/coverage --html -``` - -That will open a new browser tab showing coverage information. - -# License - -This project uses the New BSD License, the same as the Go project itself. - -# Code of Conduct - -Please note that this project is released with a Contributor Code of Conduct. -By participating in this project you agree to abide by its terms. diff --git a/vendor/src/github.com/RackSec/srslog/constants.go b/vendor/src/github.com/RackSec/srslog/constants.go deleted file mode 100644 index 600801ee84..0000000000 --- a/vendor/src/github.com/RackSec/srslog/constants.go +++ /dev/null @@ -1,68 +0,0 @@ -package srslog - -import ( - "errors" -) - -// Priority is a combination of the syslog facility and -// severity. For example, LOG_ALERT | LOG_FTP sends an alert severity -// message from the FTP facility. The default severity is LOG_EMERG; -// the default facility is LOG_KERN. -type Priority int - -const severityMask = 0x07 -const facilityMask = 0xf8 - -const ( - // Severity. - - // From /usr/include/sys/syslog.h. - // These are the same on Linux, BSD, and OS X. - LOG_EMERG Priority = iota - LOG_ALERT - LOG_CRIT - LOG_ERR - LOG_WARNING - LOG_NOTICE - LOG_INFO - LOG_DEBUG -) - -const ( - // Facility. - - // From /usr/include/sys/syslog.h. - // These are the same up to LOG_FTP on Linux, BSD, and OS X. - LOG_KERN Priority = iota << 3 - LOG_USER - LOG_MAIL - LOG_DAEMON - LOG_AUTH - LOG_SYSLOG - LOG_LPR - LOG_NEWS - LOG_UUCP - LOG_CRON - LOG_AUTHPRIV - LOG_FTP - _ // unused - _ // unused - _ // unused - _ // unused - LOG_LOCAL0 - LOG_LOCAL1 - LOG_LOCAL2 - LOG_LOCAL3 - LOG_LOCAL4 - LOG_LOCAL5 - LOG_LOCAL6 - LOG_LOCAL7 -) - -func validatePriority(p Priority) error { - if p < 0 || p > LOG_LOCAL7|LOG_DEBUG { - return errors.New("log/syslog: invalid priority") - } else { - return nil - } -} diff --git a/vendor/src/github.com/RackSec/srslog/dialer.go b/vendor/src/github.com/RackSec/srslog/dialer.go deleted file mode 100644 index 47a7b2beaf..0000000000 --- a/vendor/src/github.com/RackSec/srslog/dialer.go +++ /dev/null @@ -1,87 +0,0 @@ -package srslog - -import ( - "crypto/tls" - "net" -) - -// dialerFunctionWrapper is a simple object that consists of a dialer function -// and its name. This is primarily for testing, so we can make sure that the -// getDialer method returns the correct dialer function. However, if you ever -// find that you need to check which dialer function you have, this would also -// be useful for you without having to use reflection. -type dialerFunctionWrapper struct { - Name string - Dialer func() (serverConn, string, error) -} - -// Call the wrapped dialer function and return its return values. -func (df dialerFunctionWrapper) Call() (serverConn, string, error) { - return df.Dialer() -} - -// getDialer returns a "dialer" function that can be called to connect to a -// syslog server. -// -// Each dialer function is responsible for dialing the remote host and returns -// a serverConn, the hostname (or a default if the Writer has not specified a -// hostname), and an error in case dialing fails. -// -// The reason for separate dialers is that different network types may need -// to dial their connection differently, yet still provide a net.Conn interface -// that you can use once they have dialed. Rather than an increasingly long -// conditional, we have a map of network -> dialer function (with a sane default -// value), and adding a new network type is as easy as writing the dialer -// function and adding it to the map. -func (w *Writer) getDialer() dialerFunctionWrapper { - dialers := map[string]dialerFunctionWrapper{ - "": dialerFunctionWrapper{"unixDialer", w.unixDialer}, - "tcp+tls": dialerFunctionWrapper{"tlsDialer", w.tlsDialer}, - } - dialer, ok := dialers[w.network] - if !ok { - dialer = dialerFunctionWrapper{"basicDialer", w.basicDialer} - } - return dialer -} - -// unixDialer uses the unixSyslog method to open a connection to the syslog -// daemon running on the local machine. -func (w *Writer) unixDialer() (serverConn, string, error) { - sc, err := unixSyslog() - hostname := w.hostname - if hostname == "" { - hostname = "localhost" - } - return sc, hostname, err -} - -// tlsDialer connects to TLS over TCP, and is used for the "tcp+tls" network -// type. -func (w *Writer) tlsDialer() (serverConn, string, error) { - c, err := tls.Dial("tcp", w.raddr, w.tlsConfig) - var sc serverConn - hostname := w.hostname - if err == nil { - sc = &netConn{conn: c} - if hostname == "" { - hostname = c.LocalAddr().String() - } - } - return sc, hostname, err -} - -// basicDialer is the most common dialer for syslog, and supports both TCP and -// UDP connections. -func (w *Writer) basicDialer() (serverConn, string, error) { - c, err := net.Dial(w.network, w.raddr) - var sc serverConn - hostname := w.hostname - if err == nil { - sc = &netConn{conn: c} - if hostname == "" { - hostname = c.LocalAddr().String() - } - } - return sc, hostname, err -} diff --git a/vendor/src/github.com/RackSec/srslog/formatter.go b/vendor/src/github.com/RackSec/srslog/formatter.go deleted file mode 100644 index 7852ad37e4..0000000000 --- a/vendor/src/github.com/RackSec/srslog/formatter.go +++ /dev/null @@ -1,48 +0,0 @@ -package srslog - -import ( - "fmt" - "os" - "time" -) - -// Formatter is a type of function that takes the consituent parts of a -// syslog message and returns a formatted string. A different Formatter is -// defined for each different syslog protocol we support. -type Formatter func(p Priority, hostname, tag, content string) string - -// DefaultFormatter is the original format supported by the Go syslog package, -// and is a non-compliant amalgamation of 3164 and 5424 that is intended to -// maximize compatibility. -func DefaultFormatter(p Priority, hostname, tag, content string) string { - timestamp := time.Now().Format(time.RFC3339) - msg := fmt.Sprintf("<%d> %s %s %s[%d]: %s", - p, timestamp, hostname, tag, os.Getpid(), content) - return msg -} - -// UnixFormatter omits the hostname, because it is only used locally. -func UnixFormatter(p Priority, hostname, tag, content string) string { - timestamp := time.Now().Format(time.Stamp) - msg := fmt.Sprintf("<%d>%s %s[%d]: %s", - p, timestamp, tag, os.Getpid(), content) - return msg -} - -// RFC3164Formatter provides an RFC 3164 compliant message. -func RFC3164Formatter(p Priority, hostname, tag, content string) string { - timestamp := time.Now().Format(time.Stamp) - msg := fmt.Sprintf("<%d>%s %s %s[%d]: %s", - p, timestamp, hostname, tag, os.Getpid(), content) - return msg -} - -// RFC5424Formatter provides an RFC 5424 compliant message. -func RFC5424Formatter(p Priority, hostname, tag, content string) string { - timestamp := time.Now().Format(time.RFC3339) - pid := os.Getpid() - appName := os.Args[0] - msg := fmt.Sprintf("<%d>%d %s %s %s %d %s %s", - p, 1, timestamp, hostname, appName, pid, tag, content) - return msg -} diff --git a/vendor/src/github.com/RackSec/srslog/framer.go b/vendor/src/github.com/RackSec/srslog/framer.go deleted file mode 100644 index ab46f0de74..0000000000 --- a/vendor/src/github.com/RackSec/srslog/framer.go +++ /dev/null @@ -1,24 +0,0 @@ -package srslog - -import ( - "fmt" -) - -// Framer is a type of function that takes an input string (typically an -// already-formatted syslog message) and applies "message framing" to it. We -// have different framers because different versions of the syslog protocol -// and its transport requirements define different framing behavior. -type Framer func(in string) string - -// DefaultFramer does nothing, since there is no framing to apply. This is -// the original behavior of the Go syslog package, and is also typically used -// for UDP syslog. -func DefaultFramer(in string) string { - return in -} - -// RFC5425MessageLengthFramer prepends the message length to the front of the -// provided message, as defined in RFC 5425. -func RFC5425MessageLengthFramer(in string) string { - return fmt.Sprintf("%d %s", len(in), in) -} diff --git a/vendor/src/github.com/RackSec/srslog/net_conn.go b/vendor/src/github.com/RackSec/srslog/net_conn.go deleted file mode 100644 index 75e4c3ca1c..0000000000 --- a/vendor/src/github.com/RackSec/srslog/net_conn.go +++ /dev/null @@ -1,30 +0,0 @@ -package srslog - -import ( - "net" -) - -// netConn has an internal net.Conn and adheres to the serverConn interface, -// allowing us to send syslog messages over the network. -type netConn struct { - conn net.Conn -} - -// writeString formats syslog messages using time.RFC3339 and includes the -// hostname, and sends the message to the connection. -func (n *netConn) writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, msg string) error { - if framer == nil { - framer = DefaultFramer - } - if formatter == nil { - formatter = DefaultFormatter - } - formattedMessage := framer(formatter(p, hostname, tag, msg)) - _, err := n.conn.Write([]byte(formattedMessage)) - return err -} - -// close the network connection -func (n *netConn) close() error { - return n.conn.Close() -} diff --git a/vendor/src/github.com/RackSec/srslog/srslog.go b/vendor/src/github.com/RackSec/srslog/srslog.go deleted file mode 100644 index 4469d720c3..0000000000 --- a/vendor/src/github.com/RackSec/srslog/srslog.go +++ /dev/null @@ -1,100 +0,0 @@ -package srslog - -import ( - "crypto/tls" - "crypto/x509" - "io/ioutil" - "log" - "os" -) - -// This interface allows us to work with both local and network connections, -// and enables Solaris support (see syslog_unix.go). -type serverConn interface { - writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, s string) error - close() error -} - -// New establishes a new connection to the system log daemon. Each -// write to the returned Writer sends a log message with the given -// priority and prefix. -func New(priority Priority, tag string) (w *Writer, err error) { - return Dial("", "", priority, tag) -} - -// Dial establishes a connection to a log daemon by connecting to -// address raddr on the specified network. Each write to the returned -// Writer sends a log message with the given facility, severity and -// tag. -// If network is empty, Dial will connect to the local syslog server. -func Dial(network, raddr string, priority Priority, tag string) (*Writer, error) { - return DialWithTLSConfig(network, raddr, priority, tag, nil) -} - -// DialWithTLSCertPath establishes a secure connection to a log daemon by connecting to -// address raddr on the specified network. It uses certPath to load TLS certificates and configure -// the secure connection. -func DialWithTLSCertPath(network, raddr string, priority Priority, tag, certPath string) (*Writer, error) { - serverCert, err := ioutil.ReadFile(certPath) - if err != nil { - return nil, err - } - - return DialWithTLSCert(network, raddr, priority, tag, serverCert) -} - -// DialWIthTLSCert establishes a secure connection to a log daemon by connecting to -// address raddr on the specified network. It uses serverCert to load a TLS certificate -// and configure the secure connection. -func DialWithTLSCert(network, raddr string, priority Priority, tag string, serverCert []byte) (*Writer, error) { - pool := x509.NewCertPool() - pool.AppendCertsFromPEM(serverCert) - config := tls.Config{ - RootCAs: pool, - } - - return DialWithTLSConfig(network, raddr, priority, tag, &config) -} - -// DialWithTLSConfig establishes a secure connection to a log daemon by connecting to -// address raddr on the specified network. It uses tlsConfig to configure the secure connection. -func DialWithTLSConfig(network, raddr string, priority Priority, tag string, tlsConfig *tls.Config) (*Writer, error) { - if err := validatePriority(priority); err != nil { - return nil, err - } - - if tag == "" { - tag = os.Args[0] - } - hostname, _ := os.Hostname() - - w := &Writer{ - priority: priority, - tag: tag, - hostname: hostname, - network: network, - raddr: raddr, - tlsConfig: tlsConfig, - } - - w.Lock() - defer w.Unlock() - - err := w.connect() - if err != nil { - return nil, err - } - return w, err -} - -// NewLogger creates a log.Logger whose output is written to -// the system log service with the specified priority. The logFlag -// argument is the flag set passed through to log.New to create -// the Logger. -func NewLogger(p Priority, logFlag int) (*log.Logger, error) { - s, err := New(p, "") - if err != nil { - return nil, err - } - return log.New(s, "", logFlag), nil -} diff --git a/vendor/src/github.com/RackSec/srslog/srslog_unix.go b/vendor/src/github.com/RackSec/srslog/srslog_unix.go deleted file mode 100644 index a04d9396f6..0000000000 --- a/vendor/src/github.com/RackSec/srslog/srslog_unix.go +++ /dev/null @@ -1,54 +0,0 @@ -package srslog - -import ( - "errors" - "io" - "net" -) - -// unixSyslog opens a connection to the syslog daemon running on the -// local machine using a Unix domain socket. This function exists because of -// Solaris support as implemented by gccgo. On Solaris you can not -// simply open a TCP connection to the syslog daemon. The gccgo -// sources have a syslog_solaris.go file that implements unixSyslog to -// return a type that satisfies the serverConn interface and simply calls the C -// library syslog function. -func unixSyslog() (conn serverConn, err error) { - logTypes := []string{"unixgram", "unix"} - logPaths := []string{"/dev/log", "/var/run/syslog", "/var/run/log"} - for _, network := range logTypes { - for _, path := range logPaths { - conn, err := net.Dial(network, path) - if err != nil { - continue - } else { - return &localConn{conn: conn}, nil - } - } - } - return nil, errors.New("Unix syslog delivery error") -} - -// localConn adheres to the serverConn interface, allowing us to send syslog -// messages to the local syslog daemon over a Unix domain socket. -type localConn struct { - conn io.WriteCloser -} - -// writeString formats syslog messages using time.Stamp instead of time.RFC3339, -// and omits the hostname (because it is expected to be used locally). -func (n *localConn) writeString(framer Framer, formatter Formatter, p Priority, hostname, tag, msg string) error { - if framer == nil { - framer = DefaultFramer - } - if formatter == nil { - formatter = UnixFormatter - } - _, err := n.conn.Write([]byte(framer(formatter(p, hostname, tag, msg)))) - return err -} - -// close the (local) network connection -func (n *localConn) close() error { - return n.conn.Close() -} diff --git a/vendor/src/github.com/RackSec/srslog/writer.go b/vendor/src/github.com/RackSec/srslog/writer.go deleted file mode 100644 index fdecaf61f6..0000000000 --- a/vendor/src/github.com/RackSec/srslog/writer.go +++ /dev/null @@ -1,164 +0,0 @@ -package srslog - -import ( - "crypto/tls" - "strings" - "sync" -) - -// A Writer is a connection to a syslog server. -type Writer struct { - sync.Mutex // guards conn - - priority Priority - tag string - hostname string - network string - raddr string - tlsConfig *tls.Config - framer Framer - formatter Formatter - - conn serverConn -} - -// connect makes a connection to the syslog server. -// It must be called with w.mu held. -func (w *Writer) connect() (err error) { - if w.conn != nil { - // ignore err from close, it makes sense to continue anyway - w.conn.close() - w.conn = nil - } - - var conn serverConn - var hostname string - dialer := w.getDialer() - conn, hostname, err = dialer.Call() - if err == nil { - w.conn = conn - w.hostname = hostname - } - - return -} - -// SetFormatter changes the formatter function for subsequent messages. -func (w *Writer) SetFormatter(f Formatter) { - w.formatter = f -} - -// SetFramer changes the framer function for subsequent messages. -func (w *Writer) SetFramer(f Framer) { - w.framer = f -} - -// Write sends a log message to the syslog daemon using the default priority -// passed into `srslog.New` or the `srslog.Dial*` functions. -func (w *Writer) Write(b []byte) (int, error) { - return w.writeAndRetry(w.priority, string(b)) -} - -// Close closes a connection to the syslog daemon. -func (w *Writer) Close() error { - w.Lock() - defer w.Unlock() - - if w.conn != nil { - err := w.conn.close() - w.conn = nil - return err - } - return nil -} - -// Emerg logs a message with severity LOG_EMERG; this overrides the default -// priority passed to `srslog.New` and the `srslog.Dial*` functions. -func (w *Writer) Emerg(m string) (err error) { - _, err = w.writeAndRetry(LOG_EMERG, m) - return err -} - -// Alert logs a message with severity LOG_ALERT; this overrides the default -// priority passed to `srslog.New` and the `srslog.Dial*` functions. -func (w *Writer) Alert(m string) (err error) { - _, err = w.writeAndRetry(LOG_ALERT, m) - return err -} - -// Crit logs a message with severity LOG_CRIT; this overrides the default -// priority passed to `srslog.New` and the `srslog.Dial*` functions. -func (w *Writer) Crit(m string) (err error) { - _, err = w.writeAndRetry(LOG_CRIT, m) - return err -} - -// Err logs a message with severity LOG_ERR; this overrides the default -// priority passed to `srslog.New` and the `srslog.Dial*` functions. -func (w *Writer) Err(m string) (err error) { - _, err = w.writeAndRetry(LOG_ERR, m) - return err -} - -// Warning logs a message with severity LOG_WARNING; this overrides the default -// priority passed to `srslog.New` and the `srslog.Dial*` functions. -func (w *Writer) Warning(m string) (err error) { - _, err = w.writeAndRetry(LOG_WARNING, m) - return err -} - -// Notice logs a message with severity LOG_NOTICE; this overrides the default -// priority passed to `srslog.New` and the `srslog.Dial*` functions. -func (w *Writer) Notice(m string) (err error) { - _, err = w.writeAndRetry(LOG_NOTICE, m) - return err -} - -// Info logs a message with severity LOG_INFO; this overrides the default -// priority passed to `srslog.New` and the `srslog.Dial*` functions. -func (w *Writer) Info(m string) (err error) { - _, err = w.writeAndRetry(LOG_INFO, m) - return err -} - -// Debug logs a message with severity LOG_DEBUG; this overrides the default -// priority passed to `srslog.New` and the `srslog.Dial*` functions. -func (w *Writer) Debug(m string) (err error) { - _, err = w.writeAndRetry(LOG_DEBUG, m) - return err -} - -func (w *Writer) writeAndRetry(p Priority, s string) (int, error) { - pr := (w.priority & facilityMask) | (p & severityMask) - - w.Lock() - defer w.Unlock() - - if w.conn != nil { - if n, err := w.write(pr, s); err == nil { - return n, err - } - } - if err := w.connect(); err != nil { - return 0, err - } - return w.write(pr, s) -} - -// write generates and writes a syslog formatted string. It formats the -// message based on the current Formatter and Framer. -func (w *Writer) write(p Priority, msg string) (int, error) { - // ensure it ends in a \n - if !strings.HasSuffix(msg, "\n") { - msg += "\n" - } - - err := w.conn.writeString(w.framer, w.formatter, p, w.hostname, w.tag, msg) - if err != nil { - return 0, err - } - // Note: return the length of the input, not the number of - // bytes printed by Fprintf, because this must behave like - // an io.Writer. - return len(msg), nil -} diff --git a/vendor/src/github.com/Sirupsen/logrus/.gitignore b/vendor/src/github.com/Sirupsen/logrus/.gitignore deleted file mode 100644 index 66be63a005..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/.gitignore +++ /dev/null @@ -1 +0,0 @@ -logrus diff --git a/vendor/src/github.com/Sirupsen/logrus/.travis.yml b/vendor/src/github.com/Sirupsen/logrus/.travis.yml deleted file mode 100644 index ff23150dc3..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go -go: - - 1.3 - - 1.4 - - 1.5 - - tip -install: - - go get -t ./... -script: GOMAXPROCS=4 GORACE="halt_on_error=1" go test -race -v ./... diff --git a/vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md deleted file mode 100644 index f2c2bc2111..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/CHANGELOG.md +++ /dev/null @@ -1,66 +0,0 @@ -# 0.10.0 - -* feature: Add a test hook (#180) -* feature: `ParseLevel` is now case-insensitive (#326) -* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308) -* performance: avoid re-allocations on `WithFields` (#335) - -# 0.9.0 - -* logrus/text_formatter: don't emit empty msg -* logrus/hooks/airbrake: move out of main repository -* logrus/hooks/sentry: move out of main repository -* logrus/hooks/papertrail: move out of main repository -* logrus/hooks/bugsnag: move out of main repository -* logrus/core: run tests with `-race` -* logrus/core: detect TTY based on `stderr` -* logrus/core: support `WithError` on logger -* logrus/core: Solaris support - -# 0.8.7 - -* logrus/core: fix possible race (#216) -* logrus/doc: small typo fixes and doc improvements - - -# 0.8.6 - -* hooks/raven: allow passing an initialized client - -# 0.8.5 - -* logrus/core: revert #208 - -# 0.8.4 - -* formatter/text: fix data race (#218) - -# 0.8.3 - -* logrus/core: fix entry log level (#208) -* logrus/core: improve performance of text formatter by 40% -* logrus/core: expose `LevelHooks` type -* logrus/core: add support for DragonflyBSD and NetBSD -* formatter/text: print structs more verbosely - -# 0.8.2 - -* logrus: fix more Fatal family functions - -# 0.8.1 - -* logrus: fix not exiting on `Fatalf` and `Fatalln` - -# 0.8.0 - -* logrus: defaults to stderr instead of stdout -* hooks/sentry: add special field for `*http.Request` -* formatter/text: ignore Windows for colors - -# 0.7.3 - -* formatter/\*: allow configuration of timestamp layout - -# 0.7.2 - -* formatter/text: Add configuration option for time format (#158) diff --git a/vendor/src/github.com/Sirupsen/logrus/LICENSE b/vendor/src/github.com/Sirupsen/logrus/LICENSE deleted file mode 100644 index f090cb42f3..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Simon Eskildsen - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/vendor/src/github.com/Sirupsen/logrus/README.md b/vendor/src/github.com/Sirupsen/logrus/README.md deleted file mode 100644 index 6e1721a743..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/README.md +++ /dev/null @@ -1,388 +0,0 @@ -# Logrus :walrus: [![Build Status](https://travis-ci.org/Sirupsen/logrus.svg?branch=master)](https://travis-ci.org/Sirupsen/logrus) [![GoDoc](https://godoc.org/github.com/Sirupsen/logrus?status.svg)](https://godoc.org/github.com/Sirupsen/logrus) - -Logrus is a structured logger for Go (golang), completely API compatible with -the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not -yet stable (pre 1.0). Logrus itself is completely stable and has been used in -many large deployments. The core API is unlikely to change much but please -version control your Logrus to make sure you aren't fetching latest `master` on -every build.** - -Nicely color-coded in development (when a TTY is attached, otherwise just -plain text): - -![Colored](http://i.imgur.com/PY7qMwd.png) - -With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash -or Splunk: - -```json -{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the -ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"} - -{"level":"warning","msg":"The group's number increased tremendously!", -"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"A giant walrus appears!", -"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"} - -{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.", -"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"} - -{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true, -"time":"2014-03-10 19:57:38.562543128 -0400 EDT"} -``` - -With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not -attached, the output is compatible with the -[logfmt](http://godoc.org/github.com/kr/logfmt) format: - -```text -time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8 -time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10 -time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true -time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 -time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 -time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 -``` - -#### Example - -The simplest way to use Logrus is simply the package-level exported logger: - -```go -package main - -import ( - log "github.com/Sirupsen/logrus" -) - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - }).Info("A walrus appears") -} -``` - -Note that it's completely api-compatible with the stdlib logger, so you can -replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"` -and you'll now have the flexibility of Logrus. You can customize it all you -want: - -```go -package main - -import ( - "os" - log "github.com/Sirupsen/logrus" -) - -func init() { - // Log as JSON instead of the default ASCII formatter. - log.SetFormatter(&log.JSONFormatter{}) - - // Output to stderr instead of stdout, could also be a file. - log.SetOutput(os.Stderr) - - // Only log the warning severity or above. - log.SetLevel(log.WarnLevel) -} - -func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") - - log.WithFields(log.Fields{ - "omg": true, - "number": 122, - }).Warn("The group's number increased tremendously!") - - log.WithFields(log.Fields{ - "omg": true, - "number": 100, - }).Fatal("The ice breaks!") - - // A common pattern is to re-use fields between logging statements by re-using - // the logrus.Entry returned from WithFields() - contextLogger := log.WithFields(log.Fields{ - "common": "this is a common field", - "other": "I also should be logged always", - }) - - contextLogger.Info("I'll be logged with common and other field") - contextLogger.Info("Me too") -} -``` - -For more advanced usage such as logging to multiple locations from the same -application, you can also create an instance of the `logrus` Logger: - -```go -package main - -import ( - "github.com/Sirupsen/logrus" -) - -// Create a new instance of the logger. You can have any number of instances. -var log = logrus.New() - -func main() { - // The API for setting attributes is a little different than the package level - // exported logger. See Godoc. - log.Out = os.Stderr - - log.WithFields(logrus.Fields{ - "animal": "walrus", - "size": 10, - }).Info("A group of walrus emerges from the ocean") -} -``` - -#### Fields - -Logrus encourages careful, structured logging though logging fields instead of -long, unparseable error messages. For example, instead of: `log.Fatalf("Failed -to send event %s to topic %s with key %d")`, you should log the much more -discoverable: - -```go -log.WithFields(log.Fields{ - "event": event, - "topic": topic, - "key": key, -}).Fatal("Failed to send event") -``` - -We've found this API forces you to think about logging in a way that produces -much more useful logging messages. We've been in countless situations where just -a single added field to a log statement that was already there would've saved us -hours. The `WithFields` call is optional. - -In general, with Logrus using any of the `printf`-family functions should be -seen as a hint you should add a field, however, you can still use the -`printf`-family functions with Logrus. - -#### Hooks - -You can add hooks for logging levels. For example to send errors to an exception -tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to -multiple places simultaneously, e.g. syslog. - -Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in -`init`: - -```go -import ( - log "github.com/Sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" - logrus_syslog "github.com/Sirupsen/logrus/hooks/syslog" - "log/syslog" -) - -func init() { - - // Use the Airbrake hook to report errors that have Error severity or above to - // an exception tracker. You can create custom hooks, see the Hooks section. - log.AddHook(airbrake.NewHook(123, "xyz", "production")) - - hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "") - if err != nil { - log.Error("Unable to connect to local syslog daemon") - } else { - log.AddHook(hook) - } -} -``` -Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). - -| Hook | Description | -| ----- | ----------- | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | -| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | -| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | -| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | -| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | -| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | -| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | -| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | -| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | -| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| - - -#### Level logging - -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. - -```go -log.Debug("Useful debugging information.") -log.Info("Something noteworthy happened!") -log.Warn("You should probably take a look at this.") -log.Error("Something failed but I'm not quitting.") -// Calls os.Exit(1) after logging -log.Fatal("Bye.") -// Calls panic() after logging -log.Panic("I'm bailing.") -``` - -You can set the logging level on a `Logger`, then it will only log entries with -that severity or anything above it: - -```go -// Will log anything that is info or above (warn, error, fatal, panic). Default. -log.SetLevel(log.InfoLevel) -``` - -It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose -environment if your application has that. - -#### Entries - -Besides the fields added with `WithField` or `WithFields` some fields are -automatically added to all logging events: - -1. `time`. The timestamp when the entry was created. -2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after - the `AddFields` call. E.g. `Failed to send event.` -3. `level`. The logging level. E.g. `info`. - -#### Environments - -Logrus has no notion of environment. - -If you wish for hooks and formatters to only be used in specific environments, -you should handle that yourself. For example, if your application has a global -variable `Environment`, which is a string representation of the environment you -could do: - -```go -import ( - log "github.com/Sirupsen/logrus" -) - -init() { - // do something here to set environment depending on an environment variable - // or command-line flag - if Environment == "production" { - log.SetFormatter(&log.JSONFormatter{}) - } else { - // The TextFormatter is default, you don't actually have to do this. - log.SetFormatter(&log.TextFormatter{}) - } -} -``` - -This configuration is how `logrus` was intended to be used, but JSON in -production is mostly only useful if you do log aggregation with tools like -Splunk or Logstash. - -#### Formatters - -The built-in logging formatters are: - -* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise - without colors. - * *Note:* to force colored output when there is no TTY, set the `ForceColors` - field to `true`. To force no colored output even if there is a TTY set the - `DisableColors` field to `true` -* `logrus.JSONFormatter`. Logs fields as JSON. -* `logrus/formatters/logstash.LogstashFormatter`. Logs fields as [Logstash](http://logstash.net) Events. - - ```go - logrus.SetFormatter(&logstash.LogstashFormatter{Type: "application_name"}) - ``` - -Third party logging formatters: - -* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. -* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. - -You can define your formatter by implementing the `Formatter` interface, -requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a -`Fields` type (`map[string]interface{}`) with all your fields as well as the -default ones (see Entries section above): - -```go -type MyJSONFormatter struct { -} - -log.SetFormatter(new(MyJSONFormatter)) - -func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) { - // Note this doesn't include Time, Level and Message which are available on - // the Entry. Consult `godoc` on information about those fields or read the - // source of the official loggers. - serialized, err := json.Marshal(entry.Data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} -``` - -#### Logger as an `io.Writer` - -Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it. - -```go -w := logger.Writer() -defer w.Close() - -srv := http.Server{ - // create a stdlib log.Logger that writes to - // logrus.Logger. - ErrorLog: log.New(w, "", 0), -} -``` - -Each line written to that writer will be printed the usual way, using formatters -and hooks. The level for those entries is `info`. - -#### Rotation - -Log rotation is not provided with Logrus. Log rotation should be done by an -external program (like `logrotate(8)`) that can compress and delete old log -entries. It should not be a feature of the application-level logger. - -#### Tools - -| Tool | Description | -| ---- | ----------- | -|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.| - -#### Testing - -Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides: - -* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook -* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any): - -```go -logger, hook := NewNullLogger() -logger.Error("Hello error") - -assert.Equal(1, len(hook.Entries)) -assert.Equal(logrus.ErrorLevel, hook.LastEntry().Level) -assert.Equal("Hello error", hook.LastEntry().Message) - -hook.Reset() -assert.Nil(hook.LastEntry()) -``` diff --git a/vendor/src/github.com/Sirupsen/logrus/doc.go b/vendor/src/github.com/Sirupsen/logrus/doc.go deleted file mode 100644 index dddd5f877b..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/doc.go +++ /dev/null @@ -1,26 +0,0 @@ -/* -Package logrus is a structured logger for Go, completely API compatible with the standard library logger. - - -The simplest way to use Logrus is simply the package-level exported logger: - - package main - - import ( - log "github.com/Sirupsen/logrus" - ) - - func main() { - log.WithFields(log.Fields{ - "animal": "walrus", - "number": 1, - "size": 10, - }).Info("A walrus appears") - } - -Output: - time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10 - -For a full guide visit https://github.com/Sirupsen/logrus -*/ -package logrus diff --git a/vendor/src/github.com/Sirupsen/logrus/entry.go b/vendor/src/github.com/Sirupsen/logrus/entry.go deleted file mode 100644 index 89e966e7bf..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/entry.go +++ /dev/null @@ -1,264 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "io" - "os" - "time" -) - -// Defines the key when adding errors using WithError. -var ErrorKey = "error" - -// An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. -type Entry struct { - Logger *Logger - - // Contains all the fields set by the user. - Data Fields - - // Time at which the log entry was created - Time time.Time - - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic - Level Level - - // Message passed to Debug, Info, Warn, Error, Fatal or Panic - Message string -} - -func NewEntry(logger *Logger) *Entry { - return &Entry{ - Logger: logger, - // Default is three fields, give a little extra room - Data: make(Fields, 5), - } -} - -// Returns a reader for the entry, which is a proxy to the formatter. -func (entry *Entry) Reader() (*bytes.Buffer, error) { - serialized, err := entry.Logger.Formatter.Format(entry) - return bytes.NewBuffer(serialized), err -} - -// Returns the string representation from the reader and ultimately the -// formatter. -func (entry *Entry) String() (string, error) { - reader, err := entry.Reader() - if err != nil { - return "", err - } - - return reader.String(), err -} - -// Add an error as single field (using the key defined in ErrorKey) to the Entry. -func (entry *Entry) WithError(err error) *Entry { - return entry.WithField(ErrorKey, err) -} - -// Add a single field to the Entry. -func (entry *Entry) WithField(key string, value interface{}) *Entry { - return entry.WithFields(Fields{key: value}) -} - -// Add a map of fields to the Entry. -func (entry *Entry) WithFields(fields Fields) *Entry { - data := make(Fields, len(entry.Data)+len(fields)) - for k, v := range entry.Data { - data[k] = v - } - for k, v := range fields { - data[k] = v - } - return &Entry{Logger: entry.Logger, Data: data} -} - -// This function is not declared with a pointer value because otherwise -// race conditions will occur when using multiple goroutines -func (entry Entry) log(level Level, msg string) { - entry.Time = time.Now() - entry.Level = level - entry.Message = msg - - if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - entry.Logger.mu.Unlock() - } - - reader, err := entry.Reader() - if err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - entry.Logger.mu.Unlock() - } - - entry.Logger.mu.Lock() - defer entry.Logger.mu.Unlock() - - _, err = io.Copy(entry.Logger.Out, reader) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - - // To avoid Entry#log() returning a value that only would make sense for - // panic() to use in Entry#Panic(), we avoid the allocation by checking - // directly here. - if level <= PanicLevel { - panic(&entry) - } -} - -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Print(args ...interface{}) { - entry.Info(args...) -} - -func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Warning(args ...interface{}) { - entry.Warn(args...) -} - -func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } -} - -func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } - panic(fmt.Sprint(args...)) -} - -// Entry Printf family functions - -func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Printf(format string, args ...interface{}) { - entry.Infof(format, args...) -} - -func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Warningf(format string, args ...interface{}) { - entry.Warnf(format, args...) -} - -func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } -} - -func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } -} - -// Entry Println family functions - -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.Level >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.Level >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Println(args ...interface{}) { - entry.Infoln(args...) -} - -func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.Level >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Warningln(args ...interface{}) { - entry.Warnln(args...) -} - -func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.Level >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } -} - -func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.Level >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } - os.Exit(1) -} - -func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.Level >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } -} - -// Sprintlnn => Sprint no newline. This is to get the behavior of how -// fmt.Sprintln where spaces are always added between operands, regardless of -// their type. Instead of vendoring the Sprintln implementation to spare a -// string allocation, we do the simplest thing. -func (entry *Entry) sprintlnn(args ...interface{}) string { - msg := fmt.Sprintln(args...) - return msg[:len(msg)-1] -} diff --git a/vendor/src/github.com/Sirupsen/logrus/exported.go b/vendor/src/github.com/Sirupsen/logrus/exported.go deleted file mode 100644 index 9a0120ac1d..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/exported.go +++ /dev/null @@ -1,193 +0,0 @@ -package logrus - -import ( - "io" -) - -var ( - // std is the name of the standard logger in stdlib `log` - std = New() -) - -func StandardLogger() *Logger { - return std -} - -// SetOutput sets the standard logger output. -func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out -} - -// SetFormatter sets the standard logger formatter. -func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter -} - -// SetLevel sets the standard logger level. -func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() - std.Level = level -} - -// GetLevel returns the standard logger level. -func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() - return std.Level -} - -// AddHook adds a hook to the standard logger hooks. -func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) -} - -// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. -func WithError(err error) *Entry { - return std.WithField(ErrorKey, err) -} - -// WithField creates an entry from the standard logger and adds a field to -// it. If you want multiple fields, use `WithFields`. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithField(key string, value interface{}) *Entry { - return std.WithField(key, value) -} - -// WithFields creates an entry from the standard logger and adds multiple -// fields to it. This is simply a helper for `WithField`, invoking it -// once for each field. -// -// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal -// or Panic on the Entry it returns. -func WithFields(fields Fields) *Entry { - return std.WithFields(fields) -} - -// Debug logs a message at level Debug on the standard logger. -func Debug(args ...interface{}) { - std.Debug(args...) -} - -// Print logs a message at level Info on the standard logger. -func Print(args ...interface{}) { - std.Print(args...) -} - -// Info logs a message at level Info on the standard logger. -func Info(args ...interface{}) { - std.Info(args...) -} - -// Warn logs a message at level Warn on the standard logger. -func Warn(args ...interface{}) { - std.Warn(args...) -} - -// Warning logs a message at level Warn on the standard logger. -func Warning(args ...interface{}) { - std.Warning(args...) -} - -// Error logs a message at level Error on the standard logger. -func Error(args ...interface{}) { - std.Error(args...) -} - -// Panic logs a message at level Panic on the standard logger. -func Panic(args ...interface{}) { - std.Panic(args...) -} - -// Fatal logs a message at level Fatal on the standard logger. -func Fatal(args ...interface{}) { - std.Fatal(args...) -} - -// Debugf logs a message at level Debug on the standard logger. -func Debugf(format string, args ...interface{}) { - std.Debugf(format, args...) -} - -// Printf logs a message at level Info on the standard logger. -func Printf(format string, args ...interface{}) { - std.Printf(format, args...) -} - -// Infof logs a message at level Info on the standard logger. -func Infof(format string, args ...interface{}) { - std.Infof(format, args...) -} - -// Warnf logs a message at level Warn on the standard logger. -func Warnf(format string, args ...interface{}) { - std.Warnf(format, args...) -} - -// Warningf logs a message at level Warn on the standard logger. -func Warningf(format string, args ...interface{}) { - std.Warningf(format, args...) -} - -// Errorf logs a message at level Error on the standard logger. -func Errorf(format string, args ...interface{}) { - std.Errorf(format, args...) -} - -// Panicf logs a message at level Panic on the standard logger. -func Panicf(format string, args ...interface{}) { - std.Panicf(format, args...) -} - -// Fatalf logs a message at level Fatal on the standard logger. -func Fatalf(format string, args ...interface{}) { - std.Fatalf(format, args...) -} - -// Debugln logs a message at level Debug on the standard logger. -func Debugln(args ...interface{}) { - std.Debugln(args...) -} - -// Println logs a message at level Info on the standard logger. -func Println(args ...interface{}) { - std.Println(args...) -} - -// Infoln logs a message at level Info on the standard logger. -func Infoln(args ...interface{}) { - std.Infoln(args...) -} - -// Warnln logs a message at level Warn on the standard logger. -func Warnln(args ...interface{}) { - std.Warnln(args...) -} - -// Warningln logs a message at level Warn on the standard logger. -func Warningln(args ...interface{}) { - std.Warningln(args...) -} - -// Errorln logs a message at level Error on the standard logger. -func Errorln(args ...interface{}) { - std.Errorln(args...) -} - -// Panicln logs a message at level Panic on the standard logger. -func Panicln(args ...interface{}) { - std.Panicln(args...) -} - -// Fatalln logs a message at level Fatal on the standard logger. -func Fatalln(args ...interface{}) { - std.Fatalln(args...) -} diff --git a/vendor/src/github.com/Sirupsen/logrus/formatter.go b/vendor/src/github.com/Sirupsen/logrus/formatter.go deleted file mode 100644 index 104d689f18..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/formatter.go +++ /dev/null @@ -1,48 +0,0 @@ -package logrus - -import "time" - -const DefaultTimestampFormat = time.RFC3339 - -// The Formatter interface is used to implement a custom Formatter. It takes an -// `Entry`. It exposes all the fields, including the default ones: -// -// * `entry.Data["msg"]`. The message passed from Info, Warn, Error .. -// * `entry.Data["time"]`. The timestamp. -// * `entry.Data["level"]. The level the entry was logged at. -// -// Any additional fields added with `WithField` or `WithFields` are also in -// `entry.Data`. Format is expected to return an array of bytes which are then -// logged to `logger.Out`. -type Formatter interface { - Format(*Entry) ([]byte, error) -} - -// This is to not silently overwrite `time`, `msg` and `level` fields when -// dumping it. If this code wasn't there doing: -// -// logrus.WithField("level", 1).Info("hello") -// -// Would just silently drop the user provided level. Instead with this code -// it'll logged as: -// -// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."} -// -// It's not exported because it's still using Data in an opinionated way. It's to -// avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - _, ok := data["time"] - if ok { - data["fields.time"] = data["time"] - } - - _, ok = data["msg"] - if ok { - data["fields.msg"] = data["msg"] - } - - _, ok = data["level"] - if ok { - data["fields.level"] = data["level"] - } -} diff --git a/vendor/src/github.com/Sirupsen/logrus/hooks.go b/vendor/src/github.com/Sirupsen/logrus/hooks.go deleted file mode 100644 index 3f151cdc39..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/hooks.go +++ /dev/null @@ -1,34 +0,0 @@ -package logrus - -// A hook to be fired when logging on the logging levels returned from -// `Levels()` on your implementation of the interface. Note that this is not -// fired in a goroutine or a channel with workers, you should handle such -// functionality yourself if your call is non-blocking and you don't wish for -// the logging calls for levels returned from `Levels()` to block. -type Hook interface { - Levels() []Level - Fire(*Entry) error -} - -// Internal type for storing the hooks on a logger instance. -type LevelHooks map[Level][]Hook - -// Add a hook to an instance of logger. This is called with -// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface. -func (hooks LevelHooks) Add(hook Hook) { - for _, level := range hook.Levels() { - hooks[level] = append(hooks[level], hook) - } -} - -// Fire all the hooks for the passed level. Used by `entry.log` to fire -// appropriate hooks for a log entry. -func (hooks LevelHooks) Fire(level Level, entry *Entry) error { - for _, hook := range hooks[level] { - if err := hook.Fire(entry); err != nil { - return err - } - } - - return nil -} diff --git a/vendor/src/github.com/Sirupsen/logrus/json_formatter.go b/vendor/src/github.com/Sirupsen/logrus/json_formatter.go deleted file mode 100644 index 2ad6dc5cf4..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/json_formatter.go +++ /dev/null @@ -1,41 +0,0 @@ -package logrus - -import ( - "encoding/json" - "fmt" -) - -type JSONFormatter struct { - // TimestampFormat sets the format used for marshaling timestamps. - TimestampFormat string -} - -func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) - for k, v := range entry.Data { - switch v := v.(type) { - case error: - // Otherwise errors are ignored by `encoding/json` - // https://github.com/Sirupsen/logrus/issues/137 - data[k] = v.Error() - default: - data[k] = v - } - } - prefixFieldClashes(data) - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - - data["time"] = entry.Time.Format(timestampFormat) - data["msg"] = entry.Message - data["level"] = entry.Level.String() - - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) - } - return append(serialized, '\n'), nil -} diff --git a/vendor/src/github.com/Sirupsen/logrus/logger.go b/vendor/src/github.com/Sirupsen/logrus/logger.go deleted file mode 100644 index 2fdb231761..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/logger.go +++ /dev/null @@ -1,212 +0,0 @@ -package logrus - -import ( - "io" - "os" - "sync" -) - -type Logger struct { - // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a - // file, or leave it default which is `os.Stderr`. You can also set this to - // something more adventorous, such as logging to Kafka. - Out io.Writer - // Hooks for the logger instance. These allow firing events based on logging - // levels and log entries. For example, to send errors to an error tracking - // service, log to StatsD or dump the core on fatal errors. - Hooks LevelHooks - // All log entries pass through the formatter before logged to Out. The - // included formatters are `TextFormatter` and `JSONFormatter` for which - // TextFormatter is the default. In development (when a TTY is attached) it - // logs with colors, but to a file it wouldn't. You can easily implement your - // own that implements the `Formatter` interface, see the `README` or included - // formatters for examples. - Formatter Formatter - // The logging level the logger should log at. This is typically (and defaults - // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be - // logged. `logrus.Debug` is useful in - Level Level - // Used to sync writing to the log. - mu sync.Mutex -} - -// Creates a new logger. Configuration should be set by changing `Formatter`, -// `Out` and `Hooks` directly on the default logger instance. You can also just -// instantiate your own: -// -// var log = &Logger{ -// Out: os.Stderr, -// Formatter: new(JSONFormatter), -// Hooks: make(LevelHooks), -// Level: logrus.DebugLevel, -// } -// -// It's recommended to make this a global instance called `log`. -func New() *Logger { - return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, - } -} - -// Adds a field to the log entry, note that you it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. -// If you want multiple fields, use `WithFields`. -func (logger *Logger) WithField(key string, value interface{}) *Entry { - return NewEntry(logger).WithField(key, value) -} - -// Adds a struct of fields to the log entry. All it does is call `WithField` for -// each `Field`. -func (logger *Logger) WithFields(fields Fields) *Entry { - return NewEntry(logger).WithFields(fields) -} - -// Add an error as single field to the log entry. All it does is call -// `WithError` for the given `error`. -func (logger *Logger) WithError(err error) *Entry { - return NewEntry(logger).WithError(err) -} - -func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugf(format, args...) - } -} - -func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infof(format, args...) - } -} - -func (logger *Logger) Printf(format string, args ...interface{}) { - NewEntry(logger).Printf(format, args...) -} - -func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnf(format, args...) - } -} - -func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorf(format, args...) - } -} - -func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalf(format, args...) - } - os.Exit(1) -} - -func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicf(format, args...) - } -} - -func (logger *Logger) Debug(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debug(args...) - } -} - -func (logger *Logger) Info(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Info(args...) - } -} - -func (logger *Logger) Print(args ...interface{}) { - NewEntry(logger).Info(args...) -} - -func (logger *Logger) Warn(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Warning(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warn(args...) - } -} - -func (logger *Logger) Error(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Error(args...) - } -} - -func (logger *Logger) Fatal(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatal(args...) - } - os.Exit(1) -} - -func (logger *Logger) Panic(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panic(args...) - } -} - -func (logger *Logger) Debugln(args ...interface{}) { - if logger.Level >= DebugLevel { - NewEntry(logger).Debugln(args...) - } -} - -func (logger *Logger) Infoln(args ...interface{}) { - if logger.Level >= InfoLevel { - NewEntry(logger).Infoln(args...) - } -} - -func (logger *Logger) Println(args ...interface{}) { - NewEntry(logger).Println(args...) -} - -func (logger *Logger) Warnln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Warningln(args ...interface{}) { - if logger.Level >= WarnLevel { - NewEntry(logger).Warnln(args...) - } -} - -func (logger *Logger) Errorln(args ...interface{}) { - if logger.Level >= ErrorLevel { - NewEntry(logger).Errorln(args...) - } -} - -func (logger *Logger) Fatalln(args ...interface{}) { - if logger.Level >= FatalLevel { - NewEntry(logger).Fatalln(args...) - } - os.Exit(1) -} - -func (logger *Logger) Panicln(args ...interface{}) { - if logger.Level >= PanicLevel { - NewEntry(logger).Panicln(args...) - } -} diff --git a/vendor/src/github.com/Sirupsen/logrus/logrus.go b/vendor/src/github.com/Sirupsen/logrus/logrus.go deleted file mode 100644 index e596691116..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/logrus.go +++ /dev/null @@ -1,143 +0,0 @@ -package logrus - -import ( - "fmt" - "log" - "strings" -) - -// Fields type, used to pass to `WithFields`. -type Fields map[string]interface{} - -// Level type -type Level uint8 - -// Convert the Level to a string. E.g. PanicLevel becomes "panic". -func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" - } - - return "unknown" -} - -// ParseLevel takes a string level and returns the Logrus log level constant. -func ParseLevel(lvl string) (Level, error) { - switch strings.ToLower(lvl) { - case "panic": - return PanicLevel, nil - case "fatal": - return FatalLevel, nil - case "error": - return ErrorLevel, nil - case "warn", "warning": - return WarnLevel, nil - case "info": - return InfoLevel, nil - case "debug": - return DebugLevel, nil - } - - var l Level - return l, fmt.Errorf("not a valid logrus Level: %q", lvl) -} - -// A constant exposing all logging levels -var AllLevels = []Level{ - PanicLevel, - FatalLevel, - ErrorLevel, - WarnLevel, - InfoLevel, - DebugLevel, -} - -// These are the different logging levels. You can set the logging level to log -// on your instance of logger, obtained with `logrus.New()`. -const ( - // PanicLevel level, highest level of severity. Logs and then calls panic with the - // message passed to Debug, Info, ... - PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the - // logging level is set to Panic. - FatalLevel - // ErrorLevel level. Logs. Used for errors that should definitely be noted. - // Commonly used for hooks to send errors to an error tracking service. - ErrorLevel - // WarnLevel level. Non-critical entries that deserve eyes. - WarnLevel - // InfoLevel level. General operational entries about what's going on inside the - // application. - InfoLevel - // DebugLevel level. Usually only enabled when debugging. Very verbose logging. - DebugLevel -) - -// Won't compile if StdLogger can't be realized by a log.Logger -var ( - _ StdLogger = &log.Logger{} - _ StdLogger = &Entry{} - _ StdLogger = &Logger{} -) - -// StdLogger is what your logrus-enabled library should take, that way -// it'll accept a stdlib logger and a logrus logger. There's no standard -// interface, this is the closest we get, unfortunately. -type StdLogger interface { - Print(...interface{}) - Printf(string, ...interface{}) - Println(...interface{}) - - Fatal(...interface{}) - Fatalf(string, ...interface{}) - Fatalln(...interface{}) - - Panic(...interface{}) - Panicf(string, ...interface{}) - Panicln(...interface{}) -} - -// The FieldLogger interface generalizes the Entry and Logger types -type FieldLogger interface { - WithField(key string, value interface{}) *Entry - WithFields(fields Fields) *Entry - WithError(err error) *Entry - - Debugf(format string, args ...interface{}) - Infof(format string, args ...interface{}) - Printf(format string, args ...interface{}) - Warnf(format string, args ...interface{}) - Warningf(format string, args ...interface{}) - Errorf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) - Panicf(format string, args ...interface{}) - - Debug(args ...interface{}) - Info(args ...interface{}) - Print(args ...interface{}) - Warn(args ...interface{}) - Warning(args ...interface{}) - Error(args ...interface{}) - Fatal(args ...interface{}) - Panic(args ...interface{}) - - Debugln(args ...interface{}) - Infoln(args ...interface{}) - Println(args ...interface{}) - Warnln(args ...interface{}) - Warningln(args ...interface{}) - Errorln(args ...interface{}) - Fatalln(args ...interface{}) - Panicln(args ...interface{}) -} diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/src/github.com/Sirupsen/logrus/terminal_bsd.go deleted file mode 100644 index 71f8d67a55..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/terminal_bsd.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build darwin freebsd openbsd netbsd dragonfly - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TIOCGETA - -type Termios syscall.Termios diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/src/github.com/Sirupsen/logrus/terminal_linux.go deleted file mode 100644 index a2c0b40db6..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/terminal_linux.go +++ /dev/null @@ -1,12 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package logrus - -import "syscall" - -const ioctlReadTermios = syscall.TCGETS - -type Termios syscall.Termios diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go deleted file mode 100644 index b343b3a375..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/terminal_notwindows.go +++ /dev/null @@ -1,21 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build linux darwin freebsd openbsd netbsd dragonfly - -package logrus - -import ( - "syscall" - "unsafe" -) - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stderr - var termios Termios - _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0) - return err == 0 -} diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/src/github.com/Sirupsen/logrus/terminal_solaris.go deleted file mode 100644 index 3e70bf7bf0..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/terminal_solaris.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build solaris - -package logrus - -import ( - "os" - - "golang.org/x/sys/unix" -) - -// IsTerminal returns true if the given file descriptor is a terminal. -func IsTerminal() bool { - _, err := unix.IoctlGetTermios(int(os.Stdout.Fd()), unix.TCGETA) - return err == nil -} diff --git a/vendor/src/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/src/github.com/Sirupsen/logrus/terminal_windows.go deleted file mode 100644 index 0146845d16..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/terminal_windows.go +++ /dev/null @@ -1,27 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2011 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build windows - -package logrus - -import ( - "syscall" - "unsafe" -) - -var kernel32 = syscall.NewLazyDLL("kernel32.dll") - -var ( - procGetConsoleMode = kernel32.NewProc("GetConsoleMode") -) - -// IsTerminal returns true if stderr's file descriptor is a terminal. -func IsTerminal() bool { - fd := syscall.Stderr - var st uint32 - r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0) - return r != 0 && e == 0 -} diff --git a/vendor/src/github.com/Sirupsen/logrus/text_formatter.go b/vendor/src/github.com/Sirupsen/logrus/text_formatter.go deleted file mode 100644 index 06ef202337..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/text_formatter.go +++ /dev/null @@ -1,161 +0,0 @@ -package logrus - -import ( - "bytes" - "fmt" - "runtime" - "sort" - "strings" - "time" -) - -const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 34 - gray = 37 -) - -var ( - baseTimestamp time.Time - isTerminal bool -) - -func init() { - baseTimestamp = time.Now() - isTerminal = IsTerminal() -} - -func miniTS() int { - return int(time.Since(baseTimestamp) / time.Second) -} - -type TextFormatter struct { - // Set to true to bypass checking for a TTY before outputting colors. - ForceColors bool - - // Force disabling colors. - DisableColors bool - - // Disable timestamp logging. useful when output is redirected to logging - // system that already adds timestamps. - DisableTimestamp bool - - // Enable logging the full timestamp when a TTY is attached instead of just - // the time passed since beginning of execution. - FullTimestamp bool - - // TimestampFormat to use for display when a full timestamp is printed - TimestampFormat string - - // The fields are sorted by default for a consistent output. For applications - // that log extremely frequently and don't use the JSON formatter this may not - // be desired. - DisableSorting bool -} - -func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var keys []string = make([]string, 0, len(entry.Data)) - for k := range entry.Data { - keys = append(keys, k) - } - - if !f.DisableSorting { - sort.Strings(keys) - } - - b := &bytes.Buffer{} - - prefixFieldClashes(entry.Data) - - isColorTerminal := isTerminal && (runtime.GOOS != "windows") - isColored := (f.ForceColors || isColorTerminal) && !f.DisableColors - - timestampFormat := f.TimestampFormat - if timestampFormat == "" { - timestampFormat = DefaultTimestampFormat - } - if isColored { - f.printColored(b, entry, keys, timestampFormat) - } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) - } - f.appendKeyValue(b, "level", entry.Level.String()) - if entry.Message != "" { - f.appendKeyValue(b, "msg", entry.Message) - } - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) - } - } - - b.WriteByte('\n') - return b.Bytes(), nil -} - -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { - var levelColor int - switch entry.Level { - case DebugLevel: - levelColor = gray - case WarnLevel: - levelColor = yellow - case ErrorLevel, FatalLevel, PanicLevel: - levelColor = red - default: - levelColor = blue - } - - levelText := strings.ToUpper(entry.Level.String())[0:4] - - if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message) - } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) - } - for _, k := range keys { - v := entry.Data[k] - fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%+v", levelColor, k, v) - } -} - -func needsQuoting(text string) bool { - for _, ch := range text { - if !((ch >= 'a' && ch <= 'z') || - (ch >= 'A' && ch <= 'Z') || - (ch >= '0' && ch <= '9') || - ch == '-' || ch == '.') { - return false - } - } - return true -} - -func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) { - - b.WriteString(key) - b.WriteByte('=') - - switch value := value.(type) { - case string: - if needsQuoting(value) { - b.WriteString(value) - } else { - fmt.Fprintf(b, "%q", value) - } - case error: - errmsg := value.Error() - if needsQuoting(errmsg) { - b.WriteString(errmsg) - } else { - fmt.Fprintf(b, "%q", value) - } - default: - fmt.Fprint(b, value) - } - - b.WriteByte(' ') -} diff --git a/vendor/src/github.com/Sirupsen/logrus/writer.go b/vendor/src/github.com/Sirupsen/logrus/writer.go deleted file mode 100644 index 1e30b1c753..0000000000 --- a/vendor/src/github.com/Sirupsen/logrus/writer.go +++ /dev/null @@ -1,31 +0,0 @@ -package logrus - -import ( - "bufio" - "io" - "runtime" -) - -func (logger *Logger) Writer() *io.PipeWriter { - reader, writer := io.Pipe() - - go logger.writerScanner(reader) - runtime.SetFinalizer(writer, writerFinalizer) - - return writer -} - -func (logger *Logger) writerScanner(reader *io.PipeReader) { - scanner := bufio.NewScanner(reader) - for scanner.Scan() { - logger.Print(scanner.Text()) - } - if err := scanner.Err(); err != nil { - logger.Errorf("Error while reading from Writer: %s", err) - } - reader.Close() -} - -func writerFinalizer(writer *io.PipeWriter) { - writer.Close() -} diff --git a/vendor/src/github.com/agl/ed25519/LICENSE b/vendor/src/github.com/agl/ed25519/LICENSE deleted file mode 100644 index 7448756763..0000000000 --- a/vendor/src/github.com/agl/ed25519/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/agl/ed25519/ed25519.go b/vendor/src/github.com/agl/ed25519/ed25519.go deleted file mode 100644 index 700938ddda..0000000000 --- a/vendor/src/github.com/agl/ed25519/ed25519.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package ed25519 implements the Ed25519 signature algorithm. See -// http://ed25519.cr.yp.to/. -package ed25519 - -// This code is a port of the public domain, "ref10" implementation of ed25519 -// from SUPERCOP. - -import ( - "crypto/sha512" - "crypto/subtle" - "io" - - "github.com/agl/ed25519/edwards25519" -) - -const ( - PublicKeySize = 32 - PrivateKeySize = 64 - SignatureSize = 64 -) - -// GenerateKey generates a public/private key pair using randomness from rand. -func GenerateKey(rand io.Reader) (publicKey *[PublicKeySize]byte, privateKey *[PrivateKeySize]byte, err error) { - privateKey = new([64]byte) - publicKey = new([32]byte) - _, err = io.ReadFull(rand, privateKey[:32]) - if err != nil { - return nil, nil, err - } - - h := sha512.New() - h.Write(privateKey[:32]) - digest := h.Sum(nil) - - digest[0] &= 248 - digest[31] &= 127 - digest[31] |= 64 - - var A edwards25519.ExtendedGroupElement - var hBytes [32]byte - copy(hBytes[:], digest) - edwards25519.GeScalarMultBase(&A, &hBytes) - A.ToBytes(publicKey) - - copy(privateKey[32:], publicKey[:]) - return -} - -// Sign signs the message with privateKey and returns a signature. -func Sign(privateKey *[PrivateKeySize]byte, message []byte) *[SignatureSize]byte { - h := sha512.New() - h.Write(privateKey[:32]) - - var digest1, messageDigest, hramDigest [64]byte - var expandedSecretKey [32]byte - h.Sum(digest1[:0]) - copy(expandedSecretKey[:], digest1[:]) - expandedSecretKey[0] &= 248 - expandedSecretKey[31] &= 63 - expandedSecretKey[31] |= 64 - - h.Reset() - h.Write(digest1[32:]) - h.Write(message) - h.Sum(messageDigest[:0]) - - var messageDigestReduced [32]byte - edwards25519.ScReduce(&messageDigestReduced, &messageDigest) - var R edwards25519.ExtendedGroupElement - edwards25519.GeScalarMultBase(&R, &messageDigestReduced) - - var encodedR [32]byte - R.ToBytes(&encodedR) - - h.Reset() - h.Write(encodedR[:]) - h.Write(privateKey[32:]) - h.Write(message) - h.Sum(hramDigest[:0]) - var hramDigestReduced [32]byte - edwards25519.ScReduce(&hramDigestReduced, &hramDigest) - - var s [32]byte - edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced) - - signature := new([64]byte) - copy(signature[:], encodedR[:]) - copy(signature[32:], s[:]) - return signature -} - -// Verify returns true iff sig is a valid signature of message by publicKey. -func Verify(publicKey *[PublicKeySize]byte, message []byte, sig *[SignatureSize]byte) bool { - if sig[63]&224 != 0 { - return false - } - - var A edwards25519.ExtendedGroupElement - if !A.FromBytes(publicKey) { - return false - } - - h := sha512.New() - h.Write(sig[:32]) - h.Write(publicKey[:]) - h.Write(message) - var digest [64]byte - h.Sum(digest[:0]) - - var hReduced [32]byte - edwards25519.ScReduce(&hReduced, &digest) - - var R edwards25519.ProjectiveGroupElement - var b [32]byte - copy(b[:], sig[32:]) - edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &b) - - var checkR [32]byte - R.ToBytes(&checkR) - return subtle.ConstantTimeCompare(sig[:32], checkR[:]) == 1 -} diff --git a/vendor/src/github.com/agl/ed25519/edwards25519/const.go b/vendor/src/github.com/agl/ed25519/edwards25519/const.go deleted file mode 100644 index ea5b77a710..0000000000 --- a/vendor/src/github.com/agl/ed25519/edwards25519/const.go +++ /dev/null @@ -1,1411 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package edwards25519 - -var d = FieldElement{ - -10913610, 13857413, -15372611, 6949391, 114729, -8787816, -6275908, -3247719, -18696448, -12055116, -} - -var d2 = FieldElement{ - -21827239, -5839606, -30745221, 13898782, 229458, 15978800, -12551817, -6495438, 29715968, 9444199, -} - -var SqrtM1 = FieldElement{ - -32595792, -7943725, 9377950, 3500415, 12389472, -272473, -25146209, -2005654, 326686, 11406482, -} - -var A = FieldElement{ - 486662, 0, 0, 0, 0, 0, 0, 0, 0, 0, -} - -var bi = [8]PreComputedGroupElement{ - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{-22518993, -6692182, 14201702, -8745502, -23510406, 8844726, 18474211, -1361450, -13062696, 13821877}, - FieldElement{-6455177, -7839871, 3374702, -4740862, -27098617, -10571707, 31655028, -7212327, 18853322, -14220951}, - FieldElement{4566830, -12963868, -28974889, -12240689, -7602672, -2830569, -8514358, -10431137, 2207753, -3209784}, - }, - { - FieldElement{-25154831, -4185821, 29681144, 7868801, -6854661, -9423865, -12437364, -663000, -31111463, -16132436}, - FieldElement{25576264, -2703214, 7349804, -11814844, 16472782, 9300885, 3844789, 15725684, 171356, 6466918}, - FieldElement{23103977, 13316479, 9739013, -16149481, 817875, -15038942, 8965339, -14088058, -30714912, 16193877}, - }, - { - FieldElement{-33521811, 3180713, -2394130, 14003687, -16903474, -16270840, 17238398, 4729455, -18074513, 9256800}, - FieldElement{-25182317, -4174131, 32336398, 5036987, -21236817, 11360617, 22616405, 9761698, -19827198, 630305}, - FieldElement{-13720693, 2639453, -24237460, -7406481, 9494427, -5774029, -6554551, -15960994, -2449256, -14291300}, - }, - { - FieldElement{-3151181, -5046075, 9282714, 6866145, -31907062, -863023, -18940575, 15033784, 25105118, -7894876}, - FieldElement{-24326370, 15950226, -31801215, -14592823, -11662737, -5090925, 1573892, -2625887, 2198790, -15804619}, - FieldElement{-3099351, 10324967, -2241613, 7453183, -5446979, -2735503, -13812022, -16236442, -32461234, -12290683}, - }, -} - -var base = [32][8]PreComputedGroupElement{ - { - { - FieldElement{25967493, -14356035, 29566456, 3660896, -12694345, 4014787, 27544626, -11754271, -6079156, 2047605}, - FieldElement{-12545711, 934262, -2722910, 3049990, -727428, 9406986, 12720692, 5043384, 19500929, -15469378}, - FieldElement{-8738181, 4489570, 9688441, -14785194, 10184609, -12363380, 29287919, 11864899, -24514362, -4438546}, - }, - { - FieldElement{-12815894, -12976347, -21581243, 11784320, -25355658, -2750717, -11717903, -3814571, -358445, -10211303}, - FieldElement{-21703237, 6903825, 27185491, 6451973, -29577724, -9554005, -15616551, 11189268, -26829678, -5319081}, - FieldElement{26966642, 11152617, 32442495, 15396054, 14353839, -12752335, -3128826, -9541118, -15472047, -4166697}, - }, - { - FieldElement{15636291, -9688557, 24204773, -7912398, 616977, -16685262, 27787600, -14772189, 28944400, -1550024}, - FieldElement{16568933, 4717097, -11556148, -1102322, 15682896, -11807043, 16354577, -11775962, 7689662, 11199574}, - FieldElement{30464156, -5976125, -11779434, -15670865, 23220365, 15915852, 7512774, 10017326, -17749093, -9920357}, - }, - { - FieldElement{-17036878, 13921892, 10945806, -6033431, 27105052, -16084379, -28926210, 15006023, 3284568, -6276540}, - FieldElement{23599295, -8306047, -11193664, -7687416, 13236774, 10506355, 7464579, 9656445, 13059162, 10374397}, - FieldElement{7798556, 16710257, 3033922, 2874086, 28997861, 2835604, 32406664, -3839045, -641708, -101325}, - }, - { - FieldElement{10861363, 11473154, 27284546, 1981175, -30064349, 12577861, 32867885, 14515107, -15438304, 10819380}, - FieldElement{4708026, 6336745, 20377586, 9066809, -11272109, 6594696, -25653668, 12483688, -12668491, 5581306}, - FieldElement{19563160, 16186464, -29386857, 4097519, 10237984, -4348115, 28542350, 13850243, -23678021, -15815942}, - }, - { - FieldElement{-15371964, -12862754, 32573250, 4720197, -26436522, 5875511, -19188627, -15224819, -9818940, -12085777}, - FieldElement{-8549212, 109983, 15149363, 2178705, 22900618, 4543417, 3044240, -15689887, 1762328, 14866737}, - FieldElement{-18199695, -15951423, -10473290, 1707278, -17185920, 3916101, -28236412, 3959421, 27914454, 4383652}, - }, - { - FieldElement{5153746, 9909285, 1723747, -2777874, 30523605, 5516873, 19480852, 5230134, -23952439, -15175766}, - FieldElement{-30269007, -3463509, 7665486, 10083793, 28475525, 1649722, 20654025, 16520125, 30598449, 7715701}, - FieldElement{28881845, 14381568, 9657904, 3680757, -20181635, 7843316, -31400660, 1370708, 29794553, -1409300}, - }, - { - FieldElement{14499471, -2729599, -33191113, -4254652, 28494862, 14271267, 30290735, 10876454, -33154098, 2381726}, - FieldElement{-7195431, -2655363, -14730155, 462251, -27724326, 3941372, -6236617, 3696005, -32300832, 15351955}, - FieldElement{27431194, 8222322, 16448760, -3907995, -18707002, 11938355, -32961401, -2970515, 29551813, 10109425}, - }, - }, - { - { - FieldElement{-13657040, -13155431, -31283750, 11777098, 21447386, 6519384, -2378284, -1627556, 10092783, -4764171}, - FieldElement{27939166, 14210322, 4677035, 16277044, -22964462, -12398139, -32508754, 12005538, -17810127, 12803510}, - FieldElement{17228999, -15661624, -1233527, 300140, -1224870, -11714777, 30364213, -9038194, 18016357, 4397660}, - }, - { - FieldElement{-10958843, -7690207, 4776341, -14954238, 27850028, -15602212, -26619106, 14544525, -17477504, 982639}, - FieldElement{29253598, 15796703, -2863982, -9908884, 10057023, 3163536, 7332899, -4120128, -21047696, 9934963}, - FieldElement{5793303, 16271923, -24131614, -10116404, 29188560, 1206517, -14747930, 4559895, -30123922, -10897950}, - }, - { - FieldElement{-27643952, -11493006, 16282657, -11036493, 28414021, -15012264, 24191034, 4541697, -13338309, 5500568}, - FieldElement{12650548, -1497113, 9052871, 11355358, -17680037, -8400164, -17430592, 12264343, 10874051, 13524335}, - FieldElement{25556948, -3045990, 714651, 2510400, 23394682, -10415330, 33119038, 5080568, -22528059, 5376628}, - }, - { - FieldElement{-26088264, -4011052, -17013699, -3537628, -6726793, 1920897, -22321305, -9447443, 4535768, 1569007}, - FieldElement{-2255422, 14606630, -21692440, -8039818, 28430649, 8775819, -30494562, 3044290, 31848280, 12543772}, - FieldElement{-22028579, 2943893, -31857513, 6777306, 13784462, -4292203, -27377195, -2062731, 7718482, 14474653}, - }, - { - FieldElement{2385315, 2454213, -22631320, 46603, -4437935, -15680415, 656965, -7236665, 24316168, -5253567}, - FieldElement{13741529, 10911568, -33233417, -8603737, -20177830, -1033297, 33040651, -13424532, -20729456, 8321686}, - FieldElement{21060490, -2212744, 15712757, -4336099, 1639040, 10656336, 23845965, -11874838, -9984458, 608372}, - }, - { - FieldElement{-13672732, -15087586, -10889693, -7557059, -6036909, 11305547, 1123968, -6780577, 27229399, 23887}, - FieldElement{-23244140, -294205, -11744728, 14712571, -29465699, -2029617, 12797024, -6440308, -1633405, 16678954}, - FieldElement{-29500620, 4770662, -16054387, 14001338, 7830047, 9564805, -1508144, -4795045, -17169265, 4904953}, - }, - { - FieldElement{24059557, 14617003, 19037157, -15039908, 19766093, -14906429, 5169211, 16191880, 2128236, -4326833}, - FieldElement{-16981152, 4124966, -8540610, -10653797, 30336522, -14105247, -29806336, 916033, -6882542, -2986532}, - FieldElement{-22630907, 12419372, -7134229, -7473371, -16478904, 16739175, 285431, 2763829, 15736322, 4143876}, - }, - { - FieldElement{2379352, 11839345, -4110402, -5988665, 11274298, 794957, 212801, -14594663, 23527084, -16458268}, - FieldElement{33431127, -11130478, -17838966, -15626900, 8909499, 8376530, -32625340, 4087881, -15188911, -14416214}, - FieldElement{1767683, 7197987, -13205226, -2022635, -13091350, 448826, 5799055, 4357868, -4774191, -16323038}, - }, - }, - { - { - FieldElement{6721966, 13833823, -23523388, -1551314, 26354293, -11863321, 23365147, -3949732, 7390890, 2759800}, - FieldElement{4409041, 2052381, 23373853, 10530217, 7676779, -12885954, 21302353, -4264057, 1244380, -12919645}, - FieldElement{-4421239, 7169619, 4982368, -2957590, 30256825, -2777540, 14086413, 9208236, 15886429, 16489664}, - }, - { - FieldElement{1996075, 10375649, 14346367, 13311202, -6874135, -16438411, -13693198, 398369, -30606455, -712933}, - FieldElement{-25307465, 9795880, -2777414, 14878809, -33531835, 14780363, 13348553, 12076947, -30836462, 5113182}, - FieldElement{-17770784, 11797796, 31950843, 13929123, -25888302, 12288344, -30341101, -7336386, 13847711, 5387222}, - }, - { - FieldElement{-18582163, -3416217, 17824843, -2340966, 22744343, -10442611, 8763061, 3617786, -19600662, 10370991}, - FieldElement{20246567, -14369378, 22358229, -543712, 18507283, -10413996, 14554437, -8746092, 32232924, 16763880}, - FieldElement{9648505, 10094563, 26416693, 14745928, -30374318, -6472621, 11094161, 15689506, 3140038, -16510092}, - }, - { - FieldElement{-16160072, 5472695, 31895588, 4744994, 8823515, 10365685, -27224800, 9448613, -28774454, 366295}, - FieldElement{19153450, 11523972, -11096490, -6503142, -24647631, 5420647, 28344573, 8041113, 719605, 11671788}, - FieldElement{8678025, 2694440, -6808014, 2517372, 4964326, 11152271, -15432916, -15266516, 27000813, -10195553}, - }, - { - FieldElement{-15157904, 7134312, 8639287, -2814877, -7235688, 10421742, 564065, 5336097, 6750977, -14521026}, - FieldElement{11836410, -3979488, 26297894, 16080799, 23455045, 15735944, 1695823, -8819122, 8169720, 16220347}, - FieldElement{-18115838, 8653647, 17578566, -6092619, -8025777, -16012763, -11144307, -2627664, -5990708, -14166033}, - }, - { - FieldElement{-23308498, -10968312, 15213228, -10081214, -30853605, -11050004, 27884329, 2847284, 2655861, 1738395}, - FieldElement{-27537433, -14253021, -25336301, -8002780, -9370762, 8129821, 21651608, -3239336, -19087449, -11005278}, - FieldElement{1533110, 3437855, 23735889, 459276, 29970501, 11335377, 26030092, 5821408, 10478196, 8544890}, - }, - { - FieldElement{32173121, -16129311, 24896207, 3921497, 22579056, -3410854, 19270449, 12217473, 17789017, -3395995}, - FieldElement{-30552961, -2228401, -15578829, -10147201, 13243889, 517024, 15479401, -3853233, 30460520, 1052596}, - FieldElement{-11614875, 13323618, 32618793, 8175907, -15230173, 12596687, 27491595, -4612359, 3179268, -9478891}, - }, - { - FieldElement{31947069, -14366651, -4640583, -15339921, -15125977, -6039709, -14756777, -16411740, 19072640, -9511060}, - FieldElement{11685058, 11822410, 3158003, -13952594, 33402194, -4165066, 5977896, -5215017, 473099, 5040608}, - FieldElement{-20290863, 8198642, -27410132, 11602123, 1290375, -2799760, 28326862, 1721092, -19558642, -3131606}, - }, - }, - { - { - FieldElement{7881532, 10687937, 7578723, 7738378, -18951012, -2553952, 21820786, 8076149, -27868496, 11538389}, - FieldElement{-19935666, 3899861, 18283497, -6801568, -15728660, -11249211, 8754525, 7446702, -5676054, 5797016}, - FieldElement{-11295600, -3793569, -15782110, -7964573, 12708869, -8456199, 2014099, -9050574, -2369172, -5877341}, - }, - { - FieldElement{-22472376, -11568741, -27682020, 1146375, 18956691, 16640559, 1192730, -3714199, 15123619, 10811505}, - FieldElement{14352098, -3419715, -18942044, 10822655, 32750596, 4699007, -70363, 15776356, -28886779, -11974553}, - FieldElement{-28241164, -8072475, -4978962, -5315317, 29416931, 1847569, -20654173, -16484855, 4714547, -9600655}, - }, - { - FieldElement{15200332, 8368572, 19679101, 15970074, -31872674, 1959451, 24611599, -4543832, -11745876, 12340220}, - FieldElement{12876937, -10480056, 33134381, 6590940, -6307776, 14872440, 9613953, 8241152, 15370987, 9608631}, - FieldElement{-4143277, -12014408, 8446281, -391603, 4407738, 13629032, -7724868, 15866074, -28210621, -8814099}, - }, - { - FieldElement{26660628, -15677655, 8393734, 358047, -7401291, 992988, -23904233, 858697, 20571223, 8420556}, - FieldElement{14620715, 13067227, -15447274, 8264467, 14106269, 15080814, 33531827, 12516406, -21574435, -12476749}, - FieldElement{236881, 10476226, 57258, -14677024, 6472998, 2466984, 17258519, 7256740, 8791136, 15069930}, - }, - { - FieldElement{1276410, -9371918, 22949635, -16322807, -23493039, -5702186, 14711875, 4874229, -30663140, -2331391}, - FieldElement{5855666, 4990204, -13711848, 7294284, -7804282, 1924647, -1423175, -7912378, -33069337, 9234253}, - FieldElement{20590503, -9018988, 31529744, -7352666, -2706834, 10650548, 31559055, -11609587, 18979186, 13396066}, - }, - { - FieldElement{24474287, 4968103, 22267082, 4407354, 24063882, -8325180, -18816887, 13594782, 33514650, 7021958}, - FieldElement{-11566906, -6565505, -21365085, 15928892, -26158305, 4315421, -25948728, -3916677, -21480480, 12868082}, - FieldElement{-28635013, 13504661, 19988037, -2132761, 21078225, 6443208, -21446107, 2244500, -12455797, -8089383}, - }, - { - FieldElement{-30595528, 13793479, -5852820, 319136, -25723172, -6263899, 33086546, 8957937, -15233648, 5540521}, - FieldElement{-11630176, -11503902, -8119500, -7643073, 2620056, 1022908, -23710744, -1568984, -16128528, -14962807}, - FieldElement{23152971, 775386, 27395463, 14006635, -9701118, 4649512, 1689819, 892185, -11513277, -15205948}, - }, - { - FieldElement{9770129, 9586738, 26496094, 4324120, 1556511, -3550024, 27453819, 4763127, -19179614, 5867134}, - FieldElement{-32765025, 1927590, 31726409, -4753295, 23962434, -16019500, 27846559, 5931263, -29749703, -16108455}, - FieldElement{27461885, -2977536, 22380810, 1815854, -23033753, -3031938, 7283490, -15148073, -19526700, 7734629}, - }, - }, - { - { - FieldElement{-8010264, -9590817, -11120403, 6196038, 29344158, -13430885, 7585295, -3176626, 18549497, 15302069}, - FieldElement{-32658337, -6171222, -7672793, -11051681, 6258878, 13504381, 10458790, -6418461, -8872242, 8424746}, - FieldElement{24687205, 8613276, -30667046, -3233545, 1863892, -1830544, 19206234, 7134917, -11284482, -828919}, - }, - { - FieldElement{11334899, -9218022, 8025293, 12707519, 17523892, -10476071, 10243738, -14685461, -5066034, 16498837}, - FieldElement{8911542, 6887158, -9584260, -6958590, 11145641, -9543680, 17303925, -14124238, 6536641, 10543906}, - FieldElement{-28946384, 15479763, -17466835, 568876, -1497683, 11223454, -2669190, -16625574, -27235709, 8876771}, - }, - { - FieldElement{-25742899, -12566864, -15649966, -846607, -33026686, -796288, -33481822, 15824474, -604426, -9039817}, - FieldElement{10330056, 70051, 7957388, -9002667, 9764902, 15609756, 27698697, -4890037, 1657394, 3084098}, - FieldElement{10477963, -7470260, 12119566, -13250805, 29016247, -5365589, 31280319, 14396151, -30233575, 15272409}, - }, - { - FieldElement{-12288309, 3169463, 28813183, 16658753, 25116432, -5630466, -25173957, -12636138, -25014757, 1950504}, - FieldElement{-26180358, 9489187, 11053416, -14746161, -31053720, 5825630, -8384306, -8767532, 15341279, 8373727}, - FieldElement{28685821, 7759505, -14378516, -12002860, -31971820, 4079242, 298136, -10232602, -2878207, 15190420}, - }, - { - FieldElement{-32932876, 13806336, -14337485, -15794431, -24004620, 10940928, 8669718, 2742393, -26033313, -6875003}, - FieldElement{-1580388, -11729417, -25979658, -11445023, -17411874, -10912854, 9291594, -16247779, -12154742, 6048605}, - FieldElement{-30305315, 14843444, 1539301, 11864366, 20201677, 1900163, 13934231, 5128323, 11213262, 9168384}, - }, - { - FieldElement{-26280513, 11007847, 19408960, -940758, -18592965, -4328580, -5088060, -11105150, 20470157, -16398701}, - FieldElement{-23136053, 9282192, 14855179, -15390078, -7362815, -14408560, -22783952, 14461608, 14042978, 5230683}, - FieldElement{29969567, -2741594, -16711867, -8552442, 9175486, -2468974, 21556951, 3506042, -5933891, -12449708}, - }, - { - FieldElement{-3144746, 8744661, 19704003, 4581278, -20430686, 6830683, -21284170, 8971513, -28539189, 15326563}, - FieldElement{-19464629, 10110288, -17262528, -3503892, -23500387, 1355669, -15523050, 15300988, -20514118, 9168260}, - FieldElement{-5353335, 4488613, -23803248, 16314347, 7780487, -15638939, -28948358, 9601605, 33087103, -9011387}, - }, - { - FieldElement{-19443170, -15512900, -20797467, -12445323, -29824447, 10229461, -27444329, -15000531, -5996870, 15664672}, - FieldElement{23294591, -16632613, -22650781, -8470978, 27844204, 11461195, 13099750, -2460356, 18151676, 13417686}, - FieldElement{-24722913, -4176517, -31150679, 5988919, -26858785, 6685065, 1661597, -12551441, 15271676, -15452665}, - }, - }, - { - { - FieldElement{11433042, -13228665, 8239631, -5279517, -1985436, -725718, -18698764, 2167544, -6921301, -13440182}, - FieldElement{-31436171, 15575146, 30436815, 12192228, -22463353, 9395379, -9917708, -8638997, 12215110, 12028277}, - FieldElement{14098400, 6555944, 23007258, 5757252, -15427832, -12950502, 30123440, 4617780, -16900089, -655628}, - }, - { - FieldElement{-4026201, -15240835, 11893168, 13718664, -14809462, 1847385, -15819999, 10154009, 23973261, -12684474}, - FieldElement{-26531820, -3695990, -1908898, 2534301, -31870557, -16550355, 18341390, -11419951, 32013174, -10103539}, - FieldElement{-25479301, 10876443, -11771086, -14625140, -12369567, 1838104, 21911214, 6354752, 4425632, -837822}, - }, - { - FieldElement{-10433389, -14612966, 22229858, -3091047, -13191166, 776729, -17415375, -12020462, 4725005, 14044970}, - FieldElement{19268650, -7304421, 1555349, 8692754, -21474059, -9910664, 6347390, -1411784, -19522291, -16109756}, - FieldElement{-24864089, 12986008, -10898878, -5558584, -11312371, -148526, 19541418, 8180106, 9282262, 10282508}, - }, - { - FieldElement{-26205082, 4428547, -8661196, -13194263, 4098402, -14165257, 15522535, 8372215, 5542595, -10702683}, - FieldElement{-10562541, 14895633, 26814552, -16673850, -17480754, -2489360, -2781891, 6993761, -18093885, 10114655}, - FieldElement{-20107055, -929418, 31422704, 10427861, -7110749, 6150669, -29091755, -11529146, 25953725, -106158}, - }, - { - FieldElement{-4234397, -8039292, -9119125, 3046000, 2101609, -12607294, 19390020, 6094296, -3315279, 12831125}, - FieldElement{-15998678, 7578152, 5310217, 14408357, -33548620, -224739, 31575954, 6326196, 7381791, -2421839}, - FieldElement{-20902779, 3296811, 24736065, -16328389, 18374254, 7318640, 6295303, 8082724, -15362489, 12339664}, - }, - { - FieldElement{27724736, 2291157, 6088201, -14184798, 1792727, 5857634, 13848414, 15768922, 25091167, 14856294}, - FieldElement{-18866652, 8331043, 24373479, 8541013, -701998, -9269457, 12927300, -12695493, -22182473, -9012899}, - FieldElement{-11423429, -5421590, 11632845, 3405020, 30536730, -11674039, -27260765, 13866390, 30146206, 9142070}, - }, - { - FieldElement{3924129, -15307516, -13817122, -10054960, 12291820, -668366, -27702774, 9326384, -8237858, 4171294}, - FieldElement{-15921940, 16037937, 6713787, 16606682, -21612135, 2790944, 26396185, 3731949, 345228, -5462949}, - FieldElement{-21327538, 13448259, 25284571, 1143661, 20614966, -8849387, 2031539, -12391231, -16253183, -13582083}, - }, - { - FieldElement{31016211, -16722429, 26371392, -14451233, -5027349, 14854137, 17477601, 3842657, 28012650, -16405420}, - FieldElement{-5075835, 9368966, -8562079, -4600902, -15249953, 6970560, -9189873, 16292057, -8867157, 3507940}, - FieldElement{29439664, 3537914, 23333589, 6997794, -17555561, -11018068, -15209202, -15051267, -9164929, 6580396}, - }, - }, - { - { - FieldElement{-12185861, -7679788, 16438269, 10826160, -8696817, -6235611, 17860444, -9273846, -2095802, 9304567}, - FieldElement{20714564, -4336911, 29088195, 7406487, 11426967, -5095705, 14792667, -14608617, 5289421, -477127}, - FieldElement{-16665533, -10650790, -6160345, -13305760, 9192020, -1802462, 17271490, 12349094, 26939669, -3752294}, - }, - { - FieldElement{-12889898, 9373458, 31595848, 16374215, 21471720, 13221525, -27283495, -12348559, -3698806, 117887}, - FieldElement{22263325, -6560050, 3984570, -11174646, -15114008, -566785, 28311253, 5358056, -23319780, 541964}, - FieldElement{16259219, 3261970, 2309254, -15534474, -16885711, -4581916, 24134070, -16705829, -13337066, -13552195}, - }, - { - FieldElement{9378160, -13140186, -22845982, -12745264, 28198281, -7244098, -2399684, -717351, 690426, 14876244}, - FieldElement{24977353, -314384, -8223969, -13465086, 28432343, -1176353, -13068804, -12297348, -22380984, 6618999}, - FieldElement{-1538174, 11685646, 12944378, 13682314, -24389511, -14413193, 8044829, -13817328, 32239829, -5652762}, - }, - { - FieldElement{-18603066, 4762990, -926250, 8885304, -28412480, -3187315, 9781647, -10350059, 32779359, 5095274}, - FieldElement{-33008130, -5214506, -32264887, -3685216, 9460461, -9327423, -24601656, 14506724, 21639561, -2630236}, - FieldElement{-16400943, -13112215, 25239338, 15531969, 3987758, -4499318, -1289502, -6863535, 17874574, 558605}, - }, - { - FieldElement{-13600129, 10240081, 9171883, 16131053, -20869254, 9599700, 33499487, 5080151, 2085892, 5119761}, - FieldElement{-22205145, -2519528, -16381601, 414691, -25019550, 2170430, 30634760, -8363614, -31999993, -5759884}, - FieldElement{-6845704, 15791202, 8550074, -1312654, 29928809, -12092256, 27534430, -7192145, -22351378, 12961482}, - }, - { - FieldElement{-24492060, -9570771, 10368194, 11582341, -23397293, -2245287, 16533930, 8206996, -30194652, -5159638}, - FieldElement{-11121496, -3382234, 2307366, 6362031, -135455, 8868177, -16835630, 7031275, 7589640, 8945490}, - FieldElement{-32152748, 8917967, 6661220, -11677616, -1192060, -15793393, 7251489, -11182180, 24099109, -14456170}, - }, - { - FieldElement{5019558, -7907470, 4244127, -14714356, -26933272, 6453165, -19118182, -13289025, -6231896, -10280736}, - FieldElement{10853594, 10721687, 26480089, 5861829, -22995819, 1972175, -1866647, -10557898, -3363451, -6441124}, - FieldElement{-17002408, 5906790, 221599, -6563147, 7828208, -13248918, 24362661, -2008168, -13866408, 7421392}, - }, - { - FieldElement{8139927, -6546497, 32257646, -5890546, 30375719, 1886181, -21175108, 15441252, 28826358, -4123029}, - FieldElement{6267086, 9695052, 7709135, -16603597, -32869068, -1886135, 14795160, -7840124, 13746021, -1742048}, - FieldElement{28584902, 7787108, -6732942, -15050729, 22846041, -7571236, -3181936, -363524, 4771362, -8419958}, - }, - }, - { - { - FieldElement{24949256, 6376279, -27466481, -8174608, -18646154, -9930606, 33543569, -12141695, 3569627, 11342593}, - FieldElement{26514989, 4740088, 27912651, 3697550, 19331575, -11472339, 6809886, 4608608, 7325975, -14801071}, - FieldElement{-11618399, -14554430, -24321212, 7655128, -1369274, 5214312, -27400540, 10258390, -17646694, -8186692}, - }, - { - FieldElement{11431204, 15823007, 26570245, 14329124, 18029990, 4796082, -31446179, 15580664, 9280358, -3973687}, - FieldElement{-160783, -10326257, -22855316, -4304997, -20861367, -13621002, -32810901, -11181622, -15545091, 4387441}, - FieldElement{-20799378, 12194512, 3937617, -5805892, -27154820, 9340370, -24513992, 8548137, 20617071, -7482001}, - }, - { - FieldElement{-938825, -3930586, -8714311, 16124718, 24603125, -6225393, -13775352, -11875822, 24345683, 10325460}, - FieldElement{-19855277, -1568885, -22202708, 8714034, 14007766, 6928528, 16318175, -1010689, 4766743, 3552007}, - FieldElement{-21751364, -16730916, 1351763, -803421, -4009670, 3950935, 3217514, 14481909, 10988822, -3994762}, - }, - { - FieldElement{15564307, -14311570, 3101243, 5684148, 30446780, -8051356, 12677127, -6505343, -8295852, 13296005}, - FieldElement{-9442290, 6624296, -30298964, -11913677, -4670981, -2057379, 31521204, 9614054, -30000824, 12074674}, - FieldElement{4771191, -135239, 14290749, -13089852, 27992298, 14998318, -1413936, -1556716, 29832613, -16391035}, - }, - { - FieldElement{7064884, -7541174, -19161962, -5067537, -18891269, -2912736, 25825242, 5293297, -27122660, 13101590}, - FieldElement{-2298563, 2439670, -7466610, 1719965, -27267541, -16328445, 32512469, -5317593, -30356070, -4190957}, - FieldElement{-30006540, 10162316, -33180176, 3981723, -16482138, -13070044, 14413974, 9515896, 19568978, 9628812}, - }, - { - FieldElement{33053803, 199357, 15894591, 1583059, 27380243, -4580435, -17838894, -6106839, -6291786, 3437740}, - FieldElement{-18978877, 3884493, 19469877, 12726490, 15913552, 13614290, -22961733, 70104, 7463304, 4176122}, - FieldElement{-27124001, 10659917, 11482427, -16070381, 12771467, -6635117, -32719404, -5322751, 24216882, 5944158}, - }, - { - FieldElement{8894125, 7450974, -2664149, -9765752, -28080517, -12389115, 19345746, 14680796, 11632993, 5847885}, - FieldElement{26942781, -2315317, 9129564, -4906607, 26024105, 11769399, -11518837, 6367194, -9727230, 4782140}, - FieldElement{19916461, -4828410, -22910704, -11414391, 25606324, -5972441, 33253853, 8220911, 6358847, -1873857}, - }, - { - FieldElement{801428, -2081702, 16569428, 11065167, 29875704, 96627, 7908388, -4480480, -13538503, 1387155}, - FieldElement{19646058, 5720633, -11416706, 12814209, 11607948, 12749789, 14147075, 15156355, -21866831, 11835260}, - FieldElement{19299512, 1155910, 28703737, 14890794, 2925026, 7269399, 26121523, 15467869, -26560550, 5052483}, - }, - }, - { - { - FieldElement{-3017432, 10058206, 1980837, 3964243, 22160966, 12322533, -6431123, -12618185, 12228557, -7003677}, - FieldElement{32944382, 14922211, -22844894, 5188528, 21913450, -8719943, 4001465, 13238564, -6114803, 8653815}, - FieldElement{22865569, -4652735, 27603668, -12545395, 14348958, 8234005, 24808405, 5719875, 28483275, 2841751}, - }, - { - FieldElement{-16420968, -1113305, -327719, -12107856, 21886282, -15552774, -1887966, -315658, 19932058, -12739203}, - FieldElement{-11656086, 10087521, -8864888, -5536143, -19278573, -3055912, 3999228, 13239134, -4777469, -13910208}, - FieldElement{1382174, -11694719, 17266790, 9194690, -13324356, 9720081, 20403944, 11284705, -14013818, 3093230}, - }, - { - FieldElement{16650921, -11037932, -1064178, 1570629, -8329746, 7352753, -302424, 16271225, -24049421, -6691850}, - FieldElement{-21911077, -5927941, -4611316, -5560156, -31744103, -10785293, 24123614, 15193618, -21652117, -16739389}, - FieldElement{-9935934, -4289447, -25279823, 4372842, 2087473, 10399484, 31870908, 14690798, 17361620, 11864968}, - }, - { - FieldElement{-11307610, 6210372, 13206574, 5806320, -29017692, -13967200, -12331205, -7486601, -25578460, -16240689}, - FieldElement{14668462, -12270235, 26039039, 15305210, 25515617, 4542480, 10453892, 6577524, 9145645, -6443880}, - FieldElement{5974874, 3053895, -9433049, -10385191, -31865124, 3225009, -7972642, 3936128, -5652273, -3050304}, - }, - { - FieldElement{30625386, -4729400, -25555961, -12792866, -20484575, 7695099, 17097188, -16303496, -27999779, 1803632}, - FieldElement{-3553091, 9865099, -5228566, 4272701, -5673832, -16689700, 14911344, 12196514, -21405489, 7047412}, - FieldElement{20093277, 9920966, -11138194, -5343857, 13161587, 12044805, -32856851, 4124601, -32343828, -10257566}, - }, - { - FieldElement{-20788824, 14084654, -13531713, 7842147, 19119038, -13822605, 4752377, -8714640, -21679658, 2288038}, - FieldElement{-26819236, -3283715, 29965059, 3039786, -14473765, 2540457, 29457502, 14625692, -24819617, 12570232}, - FieldElement{-1063558, -11551823, 16920318, 12494842, 1278292, -5869109, -21159943, -3498680, -11974704, 4724943}, - }, - { - FieldElement{17960970, -11775534, -4140968, -9702530, -8876562, -1410617, -12907383, -8659932, -29576300, 1903856}, - FieldElement{23134274, -14279132, -10681997, -1611936, 20684485, 15770816, -12989750, 3190296, 26955097, 14109738}, - FieldElement{15308788, 5320727, -30113809, -14318877, 22902008, 7767164, 29425325, -11277562, 31960942, 11934971}, - }, - { - FieldElement{-27395711, 8435796, 4109644, 12222639, -24627868, 14818669, 20638173, 4875028, 10491392, 1379718}, - FieldElement{-13159415, 9197841, 3875503, -8936108, -1383712, -5879801, 33518459, 16176658, 21432314, 12180697}, - FieldElement{-11787308, 11500838, 13787581, -13832590, -22430679, 10140205, 1465425, 12689540, -10301319, -13872883}, - }, - }, - { - { - FieldElement{5414091, -15386041, -21007664, 9643570, 12834970, 1186149, -2622916, -1342231, 26128231, 6032912}, - FieldElement{-26337395, -13766162, 32496025, -13653919, 17847801, -12669156, 3604025, 8316894, -25875034, -10437358}, - FieldElement{3296484, 6223048, 24680646, -12246460, -23052020, 5903205, -8862297, -4639164, 12376617, 3188849}, - }, - { - FieldElement{29190488, -14659046, 27549113, -1183516, 3520066, -10697301, 32049515, -7309113, -16109234, -9852307}, - FieldElement{-14744486, -9309156, 735818, -598978, -20407687, -5057904, 25246078, -15795669, 18640741, -960977}, - FieldElement{-6928835, -16430795, 10361374, 5642961, 4910474, 12345252, -31638386, -494430, 10530747, 1053335}, - }, - { - FieldElement{-29265967, -14186805, -13538216, -12117373, -19457059, -10655384, -31462369, -2948985, 24018831, 15026644}, - FieldElement{-22592535, -3145277, -2289276, 5953843, -13440189, 9425631, 25310643, 13003497, -2314791, -15145616}, - FieldElement{-27419985, -603321, -8043984, -1669117, -26092265, 13987819, -27297622, 187899, -23166419, -2531735}, - }, - { - FieldElement{-21744398, -13810475, 1844840, 5021428, -10434399, -15911473, 9716667, 16266922, -5070217, 726099}, - FieldElement{29370922, -6053998, 7334071, -15342259, 9385287, 2247707, -13661962, -4839461, 30007388, -15823341}, - FieldElement{-936379, 16086691, 23751945, -543318, -1167538, -5189036, 9137109, 730663, 9835848, 4555336}, - }, - { - FieldElement{-23376435, 1410446, -22253753, -12899614, 30867635, 15826977, 17693930, 544696, -11985298, 12422646}, - FieldElement{31117226, -12215734, -13502838, 6561947, -9876867, -12757670, -5118685, -4096706, 29120153, 13924425}, - FieldElement{-17400879, -14233209, 19675799, -2734756, -11006962, -5858820, -9383939, -11317700, 7240931, -237388}, - }, - { - FieldElement{-31361739, -11346780, -15007447, -5856218, -22453340, -12152771, 1222336, 4389483, 3293637, -15551743}, - FieldElement{-16684801, -14444245, 11038544, 11054958, -13801175, -3338533, -24319580, 7733547, 12796905, -6335822}, - FieldElement{-8759414, -10817836, -25418864, 10783769, -30615557, -9746811, -28253339, 3647836, 3222231, -11160462}, - }, - { - FieldElement{18606113, 1693100, -25448386, -15170272, 4112353, 10045021, 23603893, -2048234, -7550776, 2484985}, - FieldElement{9255317, -3131197, -12156162, -1004256, 13098013, -9214866, 16377220, -2102812, -19802075, -3034702}, - FieldElement{-22729289, 7496160, -5742199, 11329249, 19991973, -3347502, -31718148, 9936966, -30097688, -10618797}, - }, - { - FieldElement{21878590, -5001297, 4338336, 13643897, -3036865, 13160960, 19708896, 5415497, -7360503, -4109293}, - FieldElement{27736861, 10103576, 12500508, 8502413, -3413016, -9633558, 10436918, -1550276, -23659143, -8132100}, - FieldElement{19492550, -12104365, -29681976, -852630, -3208171, 12403437, 30066266, 8367329, 13243957, 8709688}, - }, - }, - { - { - FieldElement{12015105, 2801261, 28198131, 10151021, 24818120, -4743133, -11194191, -5645734, 5150968, 7274186}, - FieldElement{2831366, -12492146, 1478975, 6122054, 23825128, -12733586, 31097299, 6083058, 31021603, -9793610}, - FieldElement{-2529932, -2229646, 445613, 10720828, -13849527, -11505937, -23507731, 16354465, 15067285, -14147707}, - }, - { - FieldElement{7840942, 14037873, -33364863, 15934016, -728213, -3642706, 21403988, 1057586, -19379462, -12403220}, - FieldElement{915865, -16469274, 15608285, -8789130, -24357026, 6060030, -17371319, 8410997, -7220461, 16527025}, - FieldElement{32922597, -556987, 20336074, -16184568, 10903705, -5384487, 16957574, 52992, 23834301, 6588044}, - }, - { - FieldElement{32752030, 11232950, 3381995, -8714866, 22652988, -10744103, 17159699, 16689107, -20314580, -1305992}, - FieldElement{-4689649, 9166776, -25710296, -10847306, 11576752, 12733943, 7924251, -2752281, 1976123, -7249027}, - FieldElement{21251222, 16309901, -2983015, -6783122, 30810597, 12967303, 156041, -3371252, 12331345, -8237197}, - }, - { - FieldElement{8651614, -4477032, -16085636, -4996994, 13002507, 2950805, 29054427, -5106970, 10008136, -4667901}, - FieldElement{31486080, 15114593, -14261250, 12951354, 14369431, -7387845, 16347321, -13662089, 8684155, -10532952}, - FieldElement{19443825, 11385320, 24468943, -9659068, -23919258, 2187569, -26263207, -6086921, 31316348, 14219878}, - }, - { - FieldElement{-28594490, 1193785, 32245219, 11392485, 31092169, 15722801, 27146014, 6992409, 29126555, 9207390}, - FieldElement{32382935, 1110093, 18477781, 11028262, -27411763, -7548111, -4980517, 10843782, -7957600, -14435730}, - FieldElement{2814918, 7836403, 27519878, -7868156, -20894015, -11553689, -21494559, 8550130, 28346258, 1994730}, - }, - { - FieldElement{-19578299, 8085545, -14000519, -3948622, 2785838, -16231307, -19516951, 7174894, 22628102, 8115180}, - FieldElement{-30405132, 955511, -11133838, -15078069, -32447087, -13278079, -25651578, 3317160, -9943017, 930272}, - FieldElement{-15303681, -6833769, 28856490, 1357446, 23421993, 1057177, 24091212, -1388970, -22765376, -10650715}, - }, - { - FieldElement{-22751231, -5303997, -12907607, -12768866, -15811511, -7797053, -14839018, -16554220, -1867018, 8398970}, - FieldElement{-31969310, 2106403, -4736360, 1362501, 12813763, 16200670, 22981545, -6291273, 18009408, -15772772}, - FieldElement{-17220923, -9545221, -27784654, 14166835, 29815394, 7444469, 29551787, -3727419, 19288549, 1325865}, - }, - { - FieldElement{15100157, -15835752, -23923978, -1005098, -26450192, 15509408, 12376730, -3479146, 33166107, -8042750}, - FieldElement{20909231, 13023121, -9209752, 16251778, -5778415, -8094914, 12412151, 10018715, 2213263, -13878373}, - FieldElement{32529814, -11074689, 30361439, -16689753, -9135940, 1513226, 22922121, 6382134, -5766928, 8371348}, - }, - }, - { - { - FieldElement{9923462, 11271500, 12616794, 3544722, -29998368, -1721626, 12891687, -8193132, -26442943, 10486144}, - FieldElement{-22597207, -7012665, 8587003, -8257861, 4084309, -12970062, 361726, 2610596, -23921530, -11455195}, - FieldElement{5408411, -1136691, -4969122, 10561668, 24145918, 14240566, 31319731, -4235541, 19985175, -3436086}, - }, - { - FieldElement{-13994457, 16616821, 14549246, 3341099, 32155958, 13648976, -17577068, 8849297, 65030, 8370684}, - FieldElement{-8320926, -12049626, 31204563, 5839400, -20627288, -1057277, -19442942, 6922164, 12743482, -9800518}, - FieldElement{-2361371, 12678785, 28815050, 4759974, -23893047, 4884717, 23783145, 11038569, 18800704, 255233}, - }, - { - FieldElement{-5269658, -1773886, 13957886, 7990715, 23132995, 728773, 13393847, 9066957, 19258688, -14753793}, - FieldElement{-2936654, -10827535, -10432089, 14516793, -3640786, 4372541, -31934921, 2209390, -1524053, 2055794}, - FieldElement{580882, 16705327, 5468415, -2683018, -30926419, -14696000, -7203346, -8994389, -30021019, 7394435}, - }, - { - FieldElement{23838809, 1822728, -15738443, 15242727, 8318092, -3733104, -21672180, -3492205, -4821741, 14799921}, - FieldElement{13345610, 9759151, 3371034, -16137791, 16353039, 8577942, 31129804, 13496856, -9056018, 7402518}, - FieldElement{2286874, -4435931, -20042458, -2008336, -13696227, 5038122, 11006906, -15760352, 8205061, 1607563}, - }, - { - FieldElement{14414086, -8002132, 3331830, -3208217, 22249151, -5594188, 18364661, -2906958, 30019587, -9029278}, - FieldElement{-27688051, 1585953, -10775053, 931069, -29120221, -11002319, -14410829, 12029093, 9944378, 8024}, - FieldElement{4368715, -3709630, 29874200, -15022983, -20230386, -11410704, -16114594, -999085, -8142388, 5640030}, - }, - { - FieldElement{10299610, 13746483, 11661824, 16234854, 7630238, 5998374, 9809887, -16694564, 15219798, -14327783}, - FieldElement{27425505, -5719081, 3055006, 10660664, 23458024, 595578, -15398605, -1173195, -18342183, 9742717}, - FieldElement{6744077, 2427284, 26042789, 2720740, -847906, 1118974, 32324614, 7406442, 12420155, 1994844}, - }, - { - FieldElement{14012521, -5024720, -18384453, -9578469, -26485342, -3936439, -13033478, -10909803, 24319929, -6446333}, - FieldElement{16412690, -4507367, 10772641, 15929391, -17068788, -4658621, 10555945, -10484049, -30102368, -4739048}, - FieldElement{22397382, -7767684, -9293161, -12792868, 17166287, -9755136, -27333065, 6199366, 21880021, -12250760}, - }, - { - FieldElement{-4283307, 5368523, -31117018, 8163389, -30323063, 3209128, 16557151, 8890729, 8840445, 4957760}, - FieldElement{-15447727, 709327, -6919446, -10870178, -29777922, 6522332, -21720181, 12130072, -14796503, 5005757}, - FieldElement{-2114751, -14308128, 23019042, 15765735, -25269683, 6002752, 10183197, -13239326, -16395286, -2176112}, - }, - }, - { - { - FieldElement{-19025756, 1632005, 13466291, -7995100, -23640451, 16573537, -32013908, -3057104, 22208662, 2000468}, - FieldElement{3065073, -1412761, -25598674, -361432, -17683065, -5703415, -8164212, 11248527, -3691214, -7414184}, - FieldElement{10379208, -6045554, 8877319, 1473647, -29291284, -12507580, 16690915, 2553332, -3132688, 16400289}, - }, - { - FieldElement{15716668, 1254266, -18472690, 7446274, -8448918, 6344164, -22097271, -7285580, 26894937, 9132066}, - FieldElement{24158887, 12938817, 11085297, -8177598, -28063478, -4457083, -30576463, 64452, -6817084, -2692882}, - FieldElement{13488534, 7794716, 22236231, 5989356, 25426474, -12578208, 2350710, -3418511, -4688006, 2364226}, - }, - { - FieldElement{16335052, 9132434, 25640582, 6678888, 1725628, 8517937, -11807024, -11697457, 15445875, -7798101}, - FieldElement{29004207, -7867081, 28661402, -640412, -12794003, -7943086, 31863255, -4135540, -278050, -15759279}, - FieldElement{-6122061, -14866665, -28614905, 14569919, -10857999, -3591829, 10343412, -6976290, -29828287, -10815811}, - }, - { - FieldElement{27081650, 3463984, 14099042, -4517604, 1616303, -6205604, 29542636, 15372179, 17293797, 960709}, - FieldElement{20263915, 11434237, -5765435, 11236810, 13505955, -10857102, -16111345, 6493122, -19384511, 7639714}, - FieldElement{-2830798, -14839232, 25403038, -8215196, -8317012, -16173699, 18006287, -16043750, 29994677, -15808121}, - }, - { - FieldElement{9769828, 5202651, -24157398, -13631392, -28051003, -11561624, -24613141, -13860782, -31184575, 709464}, - FieldElement{12286395, 13076066, -21775189, -1176622, -25003198, 4057652, -32018128, -8890874, 16102007, 13205847}, - FieldElement{13733362, 5599946, 10557076, 3195751, -5557991, 8536970, -25540170, 8525972, 10151379, 10394400}, - }, - { - FieldElement{4024660, -16137551, 22436262, 12276534, -9099015, -2686099, 19698229, 11743039, -33302334, 8934414}, - FieldElement{-15879800, -4525240, -8580747, -2934061, 14634845, -698278, -9449077, 3137094, -11536886, 11721158}, - FieldElement{17555939, -5013938, 8268606, 2331751, -22738815, 9761013, 9319229, 8835153, -9205489, -1280045}, - }, - { - FieldElement{-461409, -7830014, 20614118, 16688288, -7514766, -4807119, 22300304, 505429, 6108462, -6183415}, - FieldElement{-5070281, 12367917, -30663534, 3234473, 32617080, -8422642, 29880583, -13483331, -26898490, -7867459}, - FieldElement{-31975283, 5726539, 26934134, 10237677, -3173717, -605053, 24199304, 3795095, 7592688, -14992079}, - }, - { - FieldElement{21594432, -14964228, 17466408, -4077222, 32537084, 2739898, 6407723, 12018833, -28256052, 4298412}, - FieldElement{-20650503, -11961496, -27236275, 570498, 3767144, -1717540, 13891942, -1569194, 13717174, 10805743}, - FieldElement{-14676630, -15644296, 15287174, 11927123, 24177847, -8175568, -796431, 14860609, -26938930, -5863836}, - }, - }, - { - { - FieldElement{12962541, 5311799, -10060768, 11658280, 18855286, -7954201, 13286263, -12808704, -4381056, 9882022}, - FieldElement{18512079, 11319350, -20123124, 15090309, 18818594, 5271736, -22727904, 3666879, -23967430, -3299429}, - FieldElement{-6789020, -3146043, 16192429, 13241070, 15898607, -14206114, -10084880, -6661110, -2403099, 5276065}, - }, - { - FieldElement{30169808, -5317648, 26306206, -11750859, 27814964, 7069267, 7152851, 3684982, 1449224, 13082861}, - FieldElement{10342826, 3098505, 2119311, 193222, 25702612, 12233820, 23697382, 15056736, -21016438, -8202000}, - FieldElement{-33150110, 3261608, 22745853, 7948688, 19370557, -15177665, -26171976, 6482814, -10300080, -11060101}, - }, - { - FieldElement{32869458, -5408545, 25609743, 15678670, -10687769, -15471071, 26112421, 2521008, -22664288, 6904815}, - FieldElement{29506923, 4457497, 3377935, -9796444, -30510046, 12935080, 1561737, 3841096, -29003639, -6657642}, - FieldElement{10340844, -6630377, -18656632, -2278430, 12621151, -13339055, 30878497, -11824370, -25584551, 5181966}, - }, - { - FieldElement{25940115, -12658025, 17324188, -10307374, -8671468, 15029094, 24396252, -16450922, -2322852, -12388574}, - FieldElement{-21765684, 9916823, -1300409, 4079498, -1028346, 11909559, 1782390, 12641087, 20603771, -6561742}, - FieldElement{-18882287, -11673380, 24849422, 11501709, 13161720, -4768874, 1925523, 11914390, 4662781, 7820689}, - }, - { - FieldElement{12241050, -425982, 8132691, 9393934, 32846760, -1599620, 29749456, 12172924, 16136752, 15264020}, - FieldElement{-10349955, -14680563, -8211979, 2330220, -17662549, -14545780, 10658213, 6671822, 19012087, 3772772}, - FieldElement{3753511, -3421066, 10617074, 2028709, 14841030, -6721664, 28718732, -15762884, 20527771, 12988982}, - }, - { - FieldElement{-14822485, -5797269, -3707987, 12689773, -898983, -10914866, -24183046, -10564943, 3299665, -12424953}, - FieldElement{-16777703, -15253301, -9642417, 4978983, 3308785, 8755439, 6943197, 6461331, -25583147, 8991218}, - FieldElement{-17226263, 1816362, -1673288, -6086439, 31783888, -8175991, -32948145, 7417950, -30242287, 1507265}, - }, - { - FieldElement{29692663, 6829891, -10498800, 4334896, 20945975, -11906496, -28887608, 8209391, 14606362, -10647073}, - FieldElement{-3481570, 8707081, 32188102, 5672294, 22096700, 1711240, -33020695, 9761487, 4170404, -2085325}, - FieldElement{-11587470, 14855945, -4127778, -1531857, -26649089, 15084046, 22186522, 16002000, -14276837, -8400798}, - }, - { - FieldElement{-4811456, 13761029, -31703877, -2483919, -3312471, 7869047, -7113572, -9620092, 13240845, 10965870}, - FieldElement{-7742563, -8256762, -14768334, -13656260, -23232383, 12387166, 4498947, 14147411, 29514390, 4302863}, - FieldElement{-13413405, -12407859, 20757302, -13801832, 14785143, 8976368, -5061276, -2144373, 17846988, -13971927}, - }, - }, - { - { - FieldElement{-2244452, -754728, -4597030, -1066309, -6247172, 1455299, -21647728, -9214789, -5222701, 12650267}, - FieldElement{-9906797, -16070310, 21134160, 12198166, -27064575, 708126, 387813, 13770293, -19134326, 10958663}, - FieldElement{22470984, 12369526, 23446014, -5441109, -21520802, -9698723, -11772496, -11574455, -25083830, 4271862}, - }, - { - FieldElement{-25169565, -10053642, -19909332, 15361595, -5984358, 2159192, 75375, -4278529, -32526221, 8469673}, - FieldElement{15854970, 4148314, -8893890, 7259002, 11666551, 13824734, -30531198, 2697372, 24154791, -9460943}, - FieldElement{15446137, -15806644, 29759747, 14019369, 30811221, -9610191, -31582008, 12840104, 24913809, 9815020}, - }, - { - FieldElement{-4709286, -5614269, -31841498, -12288893, -14443537, 10799414, -9103676, 13438769, 18735128, 9466238}, - FieldElement{11933045, 9281483, 5081055, -5183824, -2628162, -4905629, -7727821, -10896103, -22728655, 16199064}, - FieldElement{14576810, 379472, -26786533, -8317236, -29426508, -10812974, -102766, 1876699, 30801119, 2164795}, - }, - { - FieldElement{15995086, 3199873, 13672555, 13712240, -19378835, -4647646, -13081610, -15496269, -13492807, 1268052}, - FieldElement{-10290614, -3659039, -3286592, 10948818, 23037027, 3794475, -3470338, -12600221, -17055369, 3565904}, - FieldElement{29210088, -9419337, -5919792, -4952785, 10834811, -13327726, -16512102, -10820713, -27162222, -14030531}, - }, - { - FieldElement{-13161890, 15508588, 16663704, -8156150, -28349942, 9019123, -29183421, -3769423, 2244111, -14001979}, - FieldElement{-5152875, -3800936, -9306475, -6071583, 16243069, 14684434, -25673088, -16180800, 13491506, 4641841}, - FieldElement{10813417, 643330, -19188515, -728916, 30292062, -16600078, 27548447, -7721242, 14476989, -12767431}, - }, - { - FieldElement{10292079, 9984945, 6481436, 8279905, -7251514, 7032743, 27282937, -1644259, -27912810, 12651324}, - FieldElement{-31185513, -813383, 22271204, 11835308, 10201545, 15351028, 17099662, 3988035, 21721536, -3148940}, - FieldElement{10202177, -6545839, -31373232, -9574638, -32150642, -8119683, -12906320, 3852694, 13216206, 14842320}, - }, - { - FieldElement{-15815640, -10601066, -6538952, -7258995, -6984659, -6581778, -31500847, 13765824, -27434397, 9900184}, - FieldElement{14465505, -13833331, -32133984, -14738873, -27443187, 12990492, 33046193, 15796406, -7051866, -8040114}, - FieldElement{30924417, -8279620, 6359016, -12816335, 16508377, 9071735, -25488601, 15413635, 9524356, -7018878}, - }, - { - FieldElement{12274201, -13175547, 32627641, -1785326, 6736625, 13267305, 5237659, -5109483, 15663516, 4035784}, - FieldElement{-2951309, 8903985, 17349946, 601635, -16432815, -4612556, -13732739, -15889334, -22258478, 4659091}, - FieldElement{-16916263, -4952973, -30393711, -15158821, 20774812, 15897498, 5736189, 15026997, -2178256, -13455585}, - }, - }, - { - { - FieldElement{-8858980, -2219056, 28571666, -10155518, -474467, -10105698, -3801496, 278095, 23440562, -290208}, - FieldElement{10226241, -5928702, 15139956, 120818, -14867693, 5218603, 32937275, 11551483, -16571960, -7442864}, - FieldElement{17932739, -12437276, -24039557, 10749060, 11316803, 7535897, 22503767, 5561594, -3646624, 3898661}, - }, - { - FieldElement{7749907, -969567, -16339731, -16464, -25018111, 15122143, -1573531, 7152530, 21831162, 1245233}, - FieldElement{26958459, -14658026, 4314586, 8346991, -5677764, 11960072, -32589295, -620035, -30402091, -16716212}, - FieldElement{-12165896, 9166947, 33491384, 13673479, 29787085, 13096535, 6280834, 14587357, -22338025, 13987525}, - }, - { - FieldElement{-24349909, 7778775, 21116000, 15572597, -4833266, -5357778, -4300898, -5124639, -7469781, -2858068}, - FieldElement{9681908, -6737123, -31951644, 13591838, -6883821, 386950, 31622781, 6439245, -14581012, 4091397}, - FieldElement{-8426427, 1470727, -28109679, -1596990, 3978627, -5123623, -19622683, 12092163, 29077877, -14741988}, - }, - { - FieldElement{5269168, -6859726, -13230211, -8020715, 25932563, 1763552, -5606110, -5505881, -20017847, 2357889}, - FieldElement{32264008, -15407652, -5387735, -1160093, -2091322, -3946900, 23104804, -12869908, 5727338, 189038}, - FieldElement{14609123, -8954470, -6000566, -16622781, -14577387, -7743898, -26745169, 10942115, -25888931, -14884697}, - }, - { - FieldElement{20513500, 5557931, -15604613, 7829531, 26413943, -2019404, -21378968, 7471781, 13913677, -5137875}, - FieldElement{-25574376, 11967826, 29233242, 12948236, -6754465, 4713227, -8940970, 14059180, 12878652, 8511905}, - FieldElement{-25656801, 3393631, -2955415, -7075526, -2250709, 9366908, -30223418, 6812974, 5568676, -3127656}, - }, - { - FieldElement{11630004, 12144454, 2116339, 13606037, 27378885, 15676917, -17408753, -13504373, -14395196, 8070818}, - FieldElement{27117696, -10007378, -31282771, -5570088, 1127282, 12772488, -29845906, 10483306, -11552749, -1028714}, - FieldElement{10637467, -5688064, 5674781, 1072708, -26343588, -6982302, -1683975, 9177853, -27493162, 15431203}, - }, - { - FieldElement{20525145, 10892566, -12742472, 12779443, -29493034, 16150075, -28240519, 14943142, -15056790, -7935931}, - FieldElement{-30024462, 5626926, -551567, -9981087, 753598, 11981191, 25244767, -3239766, -3356550, 9594024}, - FieldElement{-23752644, 2636870, -5163910, -10103818, 585134, 7877383, 11345683, -6492290, 13352335, -10977084}, - }, - { - FieldElement{-1931799, -5407458, 3304649, -12884869, 17015806, -4877091, -29783850, -7752482, -13215537, -319204}, - FieldElement{20239939, 6607058, 6203985, 3483793, -18386976, -779229, -20723742, 15077870, -22750759, 14523817}, - FieldElement{27406042, -6041657, 27423596, -4497394, 4996214, 10002360, -28842031, -4545494, -30172742, -4805667}, - }, - }, - { - { - FieldElement{11374242, 12660715, 17861383, -12540833, 10935568, 1099227, -13886076, -9091740, -27727044, 11358504}, - FieldElement{-12730809, 10311867, 1510375, 10778093, -2119455, -9145702, 32676003, 11149336, -26123651, 4985768}, - FieldElement{-19096303, 341147, -6197485, -239033, 15756973, -8796662, -983043, 13794114, -19414307, -15621255}, - }, - { - FieldElement{6490081, 11940286, 25495923, -7726360, 8668373, -8751316, 3367603, 6970005, -1691065, -9004790}, - FieldElement{1656497, 13457317, 15370807, 6364910, 13605745, 8362338, -19174622, -5475723, -16796596, -5031438}, - FieldElement{-22273315, -13524424, -64685, -4334223, -18605636, -10921968, -20571065, -7007978, -99853, -10237333}, - }, - { - FieldElement{17747465, 10039260, 19368299, -4050591, -20630635, -16041286, 31992683, -15857976, -29260363, -5511971}, - FieldElement{31932027, -4986141, -19612382, 16366580, 22023614, 88450, 11371999, -3744247, 4882242, -10626905}, - FieldElement{29796507, 37186, 19818052, 10115756, -11829032, 3352736, 18551198, 3272828, -5190932, -4162409}, - }, - { - FieldElement{12501286, 4044383, -8612957, -13392385, -32430052, 5136599, -19230378, -3529697, 330070, -3659409}, - FieldElement{6384877, 2899513, 17807477, 7663917, -2358888, 12363165, 25366522, -8573892, -271295, 12071499}, - FieldElement{-8365515, -4042521, 25133448, -4517355, -6211027, 2265927, -32769618, 1936675, -5159697, 3829363}, - }, - { - FieldElement{28425966, -5835433, -577090, -4697198, -14217555, 6870930, 7921550, -6567787, 26333140, 14267664}, - FieldElement{-11067219, 11871231, 27385719, -10559544, -4585914, -11189312, 10004786, -8709488, -21761224, 8930324}, - FieldElement{-21197785, -16396035, 25654216, -1725397, 12282012, 11008919, 1541940, 4757911, -26491501, -16408940}, - }, - { - FieldElement{13537262, -7759490, -20604840, 10961927, -5922820, -13218065, -13156584, 6217254, -15943699, 13814990}, - FieldElement{-17422573, 15157790, 18705543, 29619, 24409717, -260476, 27361681, 9257833, -1956526, -1776914}, - FieldElement{-25045300, -10191966, 15366585, 15166509, -13105086, 8423556, -29171540, 12361135, -18685978, 4578290}, - }, - { - FieldElement{24579768, 3711570, 1342322, -11180126, -27005135, 14124956, -22544529, 14074919, 21964432, 8235257}, - FieldElement{-6528613, -2411497, 9442966, -5925588, 12025640, -1487420, -2981514, -1669206, 13006806, 2355433}, - FieldElement{-16304899, -13605259, -6632427, -5142349, 16974359, -10911083, 27202044, 1719366, 1141648, -12796236}, - }, - { - FieldElement{-12863944, -13219986, -8318266, -11018091, -6810145, -4843894, 13475066, -3133972, 32674895, 13715045}, - FieldElement{11423335, -5468059, 32344216, 8962751, 24989809, 9241752, -13265253, 16086212, -28740881, -15642093}, - FieldElement{-1409668, 12530728, -6368726, 10847387, 19531186, -14132160, -11709148, 7791794, -27245943, 4383347}, - }, - }, - { - { - FieldElement{-28970898, 5271447, -1266009, -9736989, -12455236, 16732599, -4862407, -4906449, 27193557, 6245191}, - FieldElement{-15193956, 5362278, -1783893, 2695834, 4960227, 12840725, 23061898, 3260492, 22510453, 8577507}, - FieldElement{-12632451, 11257346, -32692994, 13548177, -721004, 10879011, 31168030, 13952092, -29571492, -3635906}, - }, - { - FieldElement{3877321, -9572739, 32416692, 5405324, -11004407, -13656635, 3759769, 11935320, 5611860, 8164018}, - FieldElement{-16275802, 14667797, 15906460, 12155291, -22111149, -9039718, 32003002, -8832289, 5773085, -8422109}, - FieldElement{-23788118, -8254300, 1950875, 8937633, 18686727, 16459170, -905725, 12376320, 31632953, 190926}, - }, - { - FieldElement{-24593607, -16138885, -8423991, 13378746, 14162407, 6901328, -8288749, 4508564, -25341555, -3627528}, - FieldElement{8884438, -5884009, 6023974, 10104341, -6881569, -4941533, 18722941, -14786005, -1672488, 827625}, - FieldElement{-32720583, -16289296, -32503547, 7101210, 13354605, 2659080, -1800575, -14108036, -24878478, 1541286}, - }, - { - FieldElement{2901347, -1117687, 3880376, -10059388, -17620940, -3612781, -21802117, -3567481, 20456845, -1885033}, - FieldElement{27019610, 12299467, -13658288, -1603234, -12861660, -4861471, -19540150, -5016058, 29439641, 15138866}, - FieldElement{21536104, -6626420, -32447818, -10690208, -22408077, 5175814, -5420040, -16361163, 7779328, 109896}, - }, - { - FieldElement{30279744, 14648750, -8044871, 6425558, 13639621, -743509, 28698390, 12180118, 23177719, -554075}, - FieldElement{26572847, 3405927, -31701700, 12890905, -19265668, 5335866, -6493768, 2378492, 4439158, -13279347}, - FieldElement{-22716706, 3489070, -9225266, -332753, 18875722, -1140095, 14819434, -12731527, -17717757, -5461437}, - }, - { - FieldElement{-5056483, 16566551, 15953661, 3767752, -10436499, 15627060, -820954, 2177225, 8550082, -15114165}, - FieldElement{-18473302, 16596775, -381660, 15663611, 22860960, 15585581, -27844109, -3582739, -23260460, -8428588}, - FieldElement{-32480551, 15707275, -8205912, -5652081, 29464558, 2713815, -22725137, 15860482, -21902570, 1494193}, - }, - { - FieldElement{-19562091, -14087393, -25583872, -9299552, 13127842, 759709, 21923482, 16529112, 8742704, 12967017}, - FieldElement{-28464899, 1553205, 32536856, -10473729, -24691605, -406174, -8914625, -2933896, -29903758, 15553883}, - FieldElement{21877909, 3230008, 9881174, 10539357, -4797115, 2841332, 11543572, 14513274, 19375923, -12647961}, - }, - { - FieldElement{8832269, -14495485, 13253511, 5137575, 5037871, 4078777, 24880818, -6222716, 2862653, 9455043}, - FieldElement{29306751, 5123106, 20245049, -14149889, 9592566, 8447059, -2077124, -2990080, 15511449, 4789663}, - FieldElement{-20679756, 7004547, 8824831, -9434977, -4045704, -3750736, -5754762, 108893, 23513200, 16652362}, - }, - }, - { - { - FieldElement{-33256173, 4144782, -4476029, -6579123, 10770039, -7155542, -6650416, -12936300, -18319198, 10212860}, - FieldElement{2756081, 8598110, 7383731, -6859892, 22312759, -1105012, 21179801, 2600940, -9988298, -12506466}, - FieldElement{-24645692, 13317462, -30449259, -15653928, 21365574, -10869657, 11344424, 864440, -2499677, -16710063}, - }, - { - FieldElement{-26432803, 6148329, -17184412, -14474154, 18782929, -275997, -22561534, 211300, 2719757, 4940997}, - FieldElement{-1323882, 3911313, -6948744, 14759765, -30027150, 7851207, 21690126, 8518463, 26699843, 5276295}, - FieldElement{-13149873, -6429067, 9396249, 365013, 24703301, -10488939, 1321586, 149635, -15452774, 7159369}, - }, - { - FieldElement{9987780, -3404759, 17507962, 9505530, 9731535, -2165514, 22356009, 8312176, 22477218, -8403385}, - FieldElement{18155857, -16504990, 19744716, 9006923, 15154154, -10538976, 24256460, -4864995, -22548173, 9334109}, - FieldElement{2986088, -4911893, 10776628, -3473844, 10620590, -7083203, -21413845, 14253545, -22587149, 536906}, - }, - { - FieldElement{4377756, 8115836, 24567078, 15495314, 11625074, 13064599, 7390551, 10589625, 10838060, -15420424}, - FieldElement{-19342404, 867880, 9277171, -3218459, -14431572, -1986443, 19295826, -15796950, 6378260, 699185}, - FieldElement{7895026, 4057113, -7081772, -13077756, -17886831, -323126, -716039, 15693155, -5045064, -13373962}, - }, - { - FieldElement{-7737563, -5869402, -14566319, -7406919, 11385654, 13201616, 31730678, -10962840, -3918636, -9669325}, - FieldElement{10188286, -15770834, -7336361, 13427543, 22223443, 14896287, 30743455, 7116568, -21786507, 5427593}, - FieldElement{696102, 13206899, 27047647, -10632082, 15285305, -9853179, 10798490, -4578720, 19236243, 12477404}, - }, - { - FieldElement{-11229439, 11243796, -17054270, -8040865, -788228, -8167967, -3897669, 11180504, -23169516, 7733644}, - FieldElement{17800790, -14036179, -27000429, -11766671, 23887827, 3149671, 23466177, -10538171, 10322027, 15313801}, - FieldElement{26246234, 11968874, 32263343, -5468728, 6830755, -13323031, -15794704, -101982, -24449242, 10890804}, - }, - { - FieldElement{-31365647, 10271363, -12660625, -6267268, 16690207, -13062544, -14982212, 16484931, 25180797, -5334884}, - FieldElement{-586574, 10376444, -32586414, -11286356, 19801893, 10997610, 2276632, 9482883, 316878, 13820577}, - FieldElement{-9882808, -4510367, -2115506, 16457136, -11100081, 11674996, 30756178, -7515054, 30696930, -3712849}, - }, - { - FieldElement{32988917, -9603412, 12499366, 7910787, -10617257, -11931514, -7342816, -9985397, -32349517, 7392473}, - FieldElement{-8855661, 15927861, 9866406, -3649411, -2396914, -16655781, -30409476, -9134995, 25112947, -2926644}, - FieldElement{-2504044, -436966, 25621774, -5678772, 15085042, -5479877, -24884878, -13526194, 5537438, -13914319}, - }, - }, - { - { - FieldElement{-11225584, 2320285, -9584280, 10149187, -33444663, 5808648, -14876251, -1729667, 31234590, 6090599}, - FieldElement{-9633316, 116426, 26083934, 2897444, -6364437, -2688086, 609721, 15878753, -6970405, -9034768}, - FieldElement{-27757857, 247744, -15194774, -9002551, 23288161, -10011936, -23869595, 6503646, 20650474, 1804084}, - }, - { - FieldElement{-27589786, 15456424, 8972517, 8469608, 15640622, 4439847, 3121995, -10329713, 27842616, -202328}, - FieldElement{-15306973, 2839644, 22530074, 10026331, 4602058, 5048462, 28248656, 5031932, -11375082, 12714369}, - FieldElement{20807691, -7270825, 29286141, 11421711, -27876523, -13868230, -21227475, 1035546, -19733229, 12796920}, - }, - { - FieldElement{12076899, -14301286, -8785001, -11848922, -25012791, 16400684, -17591495, -12899438, 3480665, -15182815}, - FieldElement{-32361549, 5457597, 28548107, 7833186, 7303070, -11953545, -24363064, -15921875, -33374054, 2771025}, - FieldElement{-21389266, 421932, 26597266, 6860826, 22486084, -6737172, -17137485, -4210226, -24552282, 15673397}, - }, - { - FieldElement{-20184622, 2338216, 19788685, -9620956, -4001265, -8740893, -20271184, 4733254, 3727144, -12934448}, - FieldElement{6120119, 814863, -11794402, -622716, 6812205, -15747771, 2019594, 7975683, 31123697, -10958981}, - FieldElement{30069250, -11435332, 30434654, 2958439, 18399564, -976289, 12296869, 9204260, -16432438, 9648165}, - }, - { - FieldElement{32705432, -1550977, 30705658, 7451065, -11805606, 9631813, 3305266, 5248604, -26008332, -11377501}, - FieldElement{17219865, 2375039, -31570947, -5575615, -19459679, 9219903, 294711, 15298639, 2662509, -16297073}, - FieldElement{-1172927, -7558695, -4366770, -4287744, -21346413, -8434326, 32087529, -1222777, 32247248, -14389861}, - }, - { - FieldElement{14312628, 1221556, 17395390, -8700143, -4945741, -8684635, -28197744, -9637817, -16027623, -13378845}, - FieldElement{-1428825, -9678990, -9235681, 6549687, -7383069, -468664, 23046502, 9803137, 17597934, 2346211}, - FieldElement{18510800, 15337574, 26171504, 981392, -22241552, 7827556, -23491134, -11323352, 3059833, -11782870}, - }, - { - FieldElement{10141598, 6082907, 17829293, -1947643, 9830092, 13613136, -25556636, -5544586, -33502212, 3592096}, - FieldElement{33114168, -15889352, -26525686, -13343397, 33076705, 8716171, 1151462, 1521897, -982665, -6837803}, - FieldElement{-32939165, -4255815, 23947181, -324178, -33072974, -12305637, -16637686, 3891704, 26353178, 693168}, - }, - { - FieldElement{30374239, 1595580, -16884039, 13186931, 4600344, 406904, 9585294, -400668, 31375464, 14369965}, - FieldElement{-14370654, -7772529, 1510301, 6434173, -18784789, -6262728, 32732230, -13108839, 17901441, 16011505}, - FieldElement{18171223, -11934626, -12500402, 15197122, -11038147, -15230035, -19172240, -16046376, 8764035, 12309598}, - }, - }, - { - { - FieldElement{5975908, -5243188, -19459362, -9681747, -11541277, 14015782, -23665757, 1228319, 17544096, -10593782}, - FieldElement{5811932, -1715293, 3442887, -2269310, -18367348, -8359541, -18044043, -15410127, -5565381, 12348900}, - FieldElement{-31399660, 11407555, 25755363, 6891399, -3256938, 14872274, -24849353, 8141295, -10632534, -585479}, - }, - { - FieldElement{-12675304, 694026, -5076145, 13300344, 14015258, -14451394, -9698672, -11329050, 30944593, 1130208}, - FieldElement{8247766, -6710942, -26562381, -7709309, -14401939, -14648910, 4652152, 2488540, 23550156, -271232}, - FieldElement{17294316, -3788438, 7026748, 15626851, 22990044, 113481, 2267737, -5908146, -408818, -137719}, - }, - { - FieldElement{16091085, -16253926, 18599252, 7340678, 2137637, -1221657, -3364161, 14550936, 3260525, -7166271}, - FieldElement{-4910104, -13332887, 18550887, 10864893, -16459325, -7291596, -23028869, -13204905, -12748722, 2701326}, - FieldElement{-8574695, 16099415, 4629974, -16340524, -20786213, -6005432, -10018363, 9276971, 11329923, 1862132}, - }, - { - FieldElement{14763076, -15903608, -30918270, 3689867, 3511892, 10313526, -21951088, 12219231, -9037963, -940300}, - FieldElement{8894987, -3446094, 6150753, 3013931, 301220, 15693451, -31981216, -2909717, -15438168, 11595570}, - FieldElement{15214962, 3537601, -26238722, -14058872, 4418657, -15230761, 13947276, 10730794, -13489462, -4363670}, - }, - { - FieldElement{-2538306, 7682793, 32759013, 263109, -29984731, -7955452, -22332124, -10188635, 977108, 699994}, - FieldElement{-12466472, 4195084, -9211532, 550904, -15565337, 12917920, 19118110, -439841, -30534533, -14337913}, - FieldElement{31788461, -14507657, 4799989, 7372237, 8808585, -14747943, 9408237, -10051775, 12493932, -5409317}, - }, - { - FieldElement{-25680606, 5260744, -19235809, -6284470, -3695942, 16566087, 27218280, 2607121, 29375955, 6024730}, - FieldElement{842132, -2794693, -4763381, -8722815, 26332018, -12405641, 11831880, 6985184, -9940361, 2854096}, - FieldElement{-4847262, -7969331, 2516242, -5847713, 9695691, -7221186, 16512645, 960770, 12121869, 16648078}, - }, - { - FieldElement{-15218652, 14667096, -13336229, 2013717, 30598287, -464137, -31504922, -7882064, 20237806, 2838411}, - FieldElement{-19288047, 4453152, 15298546, -16178388, 22115043, -15972604, 12544294, -13470457, 1068881, -12499905}, - FieldElement{-9558883, -16518835, 33238498, 13506958, 30505848, -1114596, -8486907, -2630053, 12521378, 4845654}, - }, - { - FieldElement{-28198521, 10744108, -2958380, 10199664, 7759311, -13088600, 3409348, -873400, -6482306, -12885870}, - FieldElement{-23561822, 6230156, -20382013, 10655314, -24040585, -11621172, 10477734, -1240216, -3113227, 13974498}, - FieldElement{12966261, 15550616, -32038948, -1615346, 21025980, -629444, 5642325, 7188737, 18895762, 12629579}, - }, - }, - { - { - FieldElement{14741879, -14946887, 22177208, -11721237, 1279741, 8058600, 11758140, 789443, 32195181, 3895677}, - FieldElement{10758205, 15755439, -4509950, 9243698, -4879422, 6879879, -2204575, -3566119, -8982069, 4429647}, - FieldElement{-2453894, 15725973, -20436342, -10410672, -5803908, -11040220, -7135870, -11642895, 18047436, -15281743}, - }, - { - FieldElement{-25173001, -11307165, 29759956, 11776784, -22262383, -15820455, 10993114, -12850837, -17620701, -9408468}, - FieldElement{21987233, 700364, -24505048, 14972008, -7774265, -5718395, 32155026, 2581431, -29958985, 8773375}, - FieldElement{-25568350, 454463, -13211935, 16126715, 25240068, 8594567, 20656846, 12017935, -7874389, -13920155}, - }, - { - FieldElement{6028182, 6263078, -31011806, -11301710, -818919, 2461772, -31841174, -5468042, -1721788, -2776725}, - FieldElement{-12278994, 16624277, 987579, -5922598, 32908203, 1248608, 7719845, -4166698, 28408820, 6816612}, - FieldElement{-10358094, -8237829, 19549651, -12169222, 22082623, 16147817, 20613181, 13982702, -10339570, 5067943}, - }, - { - FieldElement{-30505967, -3821767, 12074681, 13582412, -19877972, 2443951, -19719286, 12746132, 5331210, -10105944}, - FieldElement{30528811, 3601899, -1957090, 4619785, -27361822, -15436388, 24180793, -12570394, 27679908, -1648928}, - FieldElement{9402404, -13957065, 32834043, 10838634, -26580150, -13237195, 26653274, -8685565, 22611444, -12715406}, - }, - { - FieldElement{22190590, 1118029, 22736441, 15130463, -30460692, -5991321, 19189625, -4648942, 4854859, 6622139}, - FieldElement{-8310738, -2953450, -8262579, -3388049, -10401731, -271929, 13424426, -3567227, 26404409, 13001963}, - FieldElement{-31241838, -15415700, -2994250, 8939346, 11562230, -12840670, -26064365, -11621720, -15405155, 11020693}, - }, - { - FieldElement{1866042, -7949489, -7898649, -10301010, 12483315, 13477547, 3175636, -12424163, 28761762, 1406734}, - FieldElement{-448555, -1777666, 13018551, 3194501, -9580420, -11161737, 24760585, -4347088, 25577411, -13378680}, - FieldElement{-24290378, 4759345, -690653, -1852816, 2066747, 10693769, -29595790, 9884936, -9368926, 4745410}, - }, - { - FieldElement{-9141284, 6049714, -19531061, -4341411, -31260798, 9944276, -15462008, -11311852, 10931924, -11931931}, - FieldElement{-16561513, 14112680, -8012645, 4817318, -8040464, -11414606, -22853429, 10856641, -20470770, 13434654}, - FieldElement{22759489, -10073434, -16766264, -1871422, 13637442, -10168091, 1765144, -12654326, 28445307, -5364710}, - }, - { - FieldElement{29875063, 12493613, 2795536, -3786330, 1710620, 15181182, -10195717, -8788675, 9074234, 1167180}, - FieldElement{-26205683, 11014233, -9842651, -2635485, -26908120, 7532294, -18716888, -9535498, 3843903, 9367684}, - FieldElement{-10969595, -6403711, 9591134, 9582310, 11349256, 108879, 16235123, 8601684, -139197, 4242895}, - }, - }, - { - { - FieldElement{22092954, -13191123, -2042793, -11968512, 32186753, -11517388, -6574341, 2470660, -27417366, 16625501}, - FieldElement{-11057722, 3042016, 13770083, -9257922, 584236, -544855, -7770857, 2602725, -27351616, 14247413}, - FieldElement{6314175, -10264892, -32772502, 15957557, -10157730, 168750, -8618807, 14290061, 27108877, -1180880}, - }, - { - FieldElement{-8586597, -7170966, 13241782, 10960156, -32991015, -13794596, 33547976, -11058889, -27148451, 981874}, - FieldElement{22833440, 9293594, -32649448, -13618667, -9136966, 14756819, -22928859, -13970780, -10479804, -16197962}, - FieldElement{-7768587, 3326786, -28111797, 10783824, 19178761, 14905060, 22680049, 13906969, -15933690, 3797899}, - }, - { - FieldElement{21721356, -4212746, -12206123, 9310182, -3882239, -13653110, 23740224, -2709232, 20491983, -8042152}, - FieldElement{9209270, -15135055, -13256557, -6167798, -731016, 15289673, 25947805, 15286587, 30997318, -6703063}, - FieldElement{7392032, 16618386, 23946583, -8039892, -13265164, -1533858, -14197445, -2321576, 17649998, -250080}, - }, - { - FieldElement{-9301088, -14193827, 30609526, -3049543, -25175069, -1283752, -15241566, -9525724, -2233253, 7662146}, - FieldElement{-17558673, 1763594, -33114336, 15908610, -30040870, -12174295, 7335080, -8472199, -3174674, 3440183}, - FieldElement{-19889700, -5977008, -24111293, -9688870, 10799743, -16571957, 40450, -4431835, 4862400, 1133}, - }, - { - FieldElement{-32856209, -7873957, -5422389, 14860950, -16319031, 7956142, 7258061, 311861, -30594991, -7379421}, - FieldElement{-3773428, -1565936, 28985340, 7499440, 24445838, 9325937, 29727763, 16527196, 18278453, 15405622}, - FieldElement{-4381906, 8508652, -19898366, -3674424, -5984453, 15149970, -13313598, 843523, -21875062, 13626197}, - }, - { - FieldElement{2281448, -13487055, -10915418, -2609910, 1879358, 16164207, -10783882, 3953792, 13340839, 15928663}, - FieldElement{31727126, -7179855, -18437503, -8283652, 2875793, -16390330, -25269894, -7014826, -23452306, 5964753}, - FieldElement{4100420, -5959452, -17179337, 6017714, -18705837, 12227141, -26684835, 11344144, 2538215, -7570755}, - }, - { - FieldElement{-9433605, 6123113, 11159803, -2156608, 30016280, 14966241, -20474983, 1485421, -629256, -15958862}, - FieldElement{-26804558, 4260919, 11851389, 9658551, -32017107, 16367492, -20205425, -13191288, 11659922, -11115118}, - FieldElement{26180396, 10015009, -30844224, -8581293, 5418197, 9480663, 2231568, -10170080, 33100372, -1306171}, - }, - { - FieldElement{15121113, -5201871, -10389905, 15427821, -27509937, -15992507, 21670947, 4486675, -5931810, -14466380}, - FieldElement{16166486, -9483733, -11104130, 6023908, -31926798, -1364923, 2340060, -16254968, -10735770, -10039824}, - FieldElement{28042865, -3557089, -12126526, 12259706, -3717498, -6945899, 6766453, -8689599, 18036436, 5803270}, - }, - }, - { - { - FieldElement{-817581, 6763912, 11803561, 1585585, 10958447, -2671165, 23855391, 4598332, -6159431, -14117438}, - FieldElement{-31031306, -14256194, 17332029, -2383520, 31312682, -5967183, 696309, 50292, -20095739, 11763584}, - FieldElement{-594563, -2514283, -32234153, 12643980, 12650761, 14811489, 665117, -12613632, -19773211, -10713562}, - }, - { - FieldElement{30464590, -11262872, -4127476, -12734478, 19835327, -7105613, -24396175, 2075773, -17020157, 992471}, - FieldElement{18357185, -6994433, 7766382, 16342475, -29324918, 411174, 14578841, 8080033, -11574335, -10601610}, - FieldElement{19598397, 10334610, 12555054, 2555664, 18821899, -10339780, 21873263, 16014234, 26224780, 16452269}, - }, - { - FieldElement{-30223925, 5145196, 5944548, 16385966, 3976735, 2009897, -11377804, -7618186, -20533829, 3698650}, - FieldElement{14187449, 3448569, -10636236, -10810935, -22663880, -3433596, 7268410, -10890444, 27394301, 12015369}, - FieldElement{19695761, 16087646, 28032085, 12999827, 6817792, 11427614, 20244189, -1312777, -13259127, -3402461}, - }, - { - FieldElement{30860103, 12735208, -1888245, -4699734, -16974906, 2256940, -8166013, 12298312, -8550524, -10393462}, - FieldElement{-5719826, -11245325, -1910649, 15569035, 26642876, -7587760, -5789354, -15118654, -4976164, 12651793}, - FieldElement{-2848395, 9953421, 11531313, -5282879, 26895123, -12697089, -13118820, -16517902, 9768698, -2533218}, - }, - { - FieldElement{-24719459, 1894651, -287698, -4704085, 15348719, -8156530, 32767513, 12765450, 4940095, 10678226}, - FieldElement{18860224, 15980149, -18987240, -1562570, -26233012, -11071856, -7843882, 13944024, -24372348, 16582019}, - FieldElement{-15504260, 4970268, -29893044, 4175593, -20993212, -2199756, -11704054, 15444560, -11003761, 7989037}, - }, - { - FieldElement{31490452, 5568061, -2412803, 2182383, -32336847, 4531686, -32078269, 6200206, -19686113, -14800171}, - FieldElement{-17308668, -15879940, -31522777, -2831, -32887382, 16375549, 8680158, -16371713, 28550068, -6857132}, - FieldElement{-28126887, -5688091, 16837845, -1820458, -6850681, 12700016, -30039981, 4364038, 1155602, 5988841}, - }, - { - FieldElement{21890435, -13272907, -12624011, 12154349, -7831873, 15300496, 23148983, -4470481, 24618407, 8283181}, - FieldElement{-33136107, -10512751, 9975416, 6841041, -31559793, 16356536, 3070187, -7025928, 1466169, 10740210}, - FieldElement{-1509399, -15488185, -13503385, -10655916, 32799044, 909394, -13938903, -5779719, -32164649, -15327040}, - }, - { - FieldElement{3960823, -14267803, -28026090, -15918051, -19404858, 13146868, 15567327, 951507, -3260321, -573935}, - FieldElement{24740841, 5052253, -30094131, 8961361, 25877428, 6165135, -24368180, 14397372, -7380369, -6144105}, - FieldElement{-28888365, 3510803, -28103278, -1158478, -11238128, -10631454, -15441463, -14453128, -1625486, -6494814}, - }, - }, - { - { - FieldElement{793299, -9230478, 8836302, -6235707, -27360908, -2369593, 33152843, -4885251, -9906200, -621852}, - FieldElement{5666233, 525582, 20782575, -8038419, -24538499, 14657740, 16099374, 1468826, -6171428, -15186581}, - FieldElement{-4859255, -3779343, -2917758, -6748019, 7778750, 11688288, -30404353, -9871238, -1558923, -9863646}, - }, - { - FieldElement{10896332, -7719704, 824275, 472601, -19460308, 3009587, 25248958, 14783338, -30581476, -15757844}, - FieldElement{10566929, 12612572, -31944212, 11118703, -12633376, 12362879, 21752402, 8822496, 24003793, 14264025}, - FieldElement{27713862, -7355973, -11008240, 9227530, 27050101, 2504721, 23886875, -13117525, 13958495, -5732453}, - }, - { - FieldElement{-23481610, 4867226, -27247128, 3900521, 29838369, -8212291, -31889399, -10041781, 7340521, -15410068}, - FieldElement{4646514, -8011124, -22766023, -11532654, 23184553, 8566613, 31366726, -1381061, -15066784, -10375192}, - FieldElement{-17270517, 12723032, -16993061, 14878794, 21619651, -6197576, 27584817, 3093888, -8843694, 3849921}, - }, - { - FieldElement{-9064912, 2103172, 25561640, -15125738, -5239824, 9582958, 32477045, -9017955, 5002294, -15550259}, - FieldElement{-12057553, -11177906, 21115585, -13365155, 8808712, -12030708, 16489530, 13378448, -25845716, 12741426}, - FieldElement{-5946367, 10645103, -30911586, 15390284, -3286982, -7118677, 24306472, 15852464, 28834118, -7646072}, - }, - { - FieldElement{-17335748, -9107057, -24531279, 9434953, -8472084, -583362, -13090771, 455841, 20461858, 5491305}, - FieldElement{13669248, -16095482, -12481974, -10203039, -14569770, -11893198, -24995986, 11293807, -28588204, -9421832}, - FieldElement{28497928, 6272777, -33022994, 14470570, 8906179, -1225630, 18504674, -14165166, 29867745, -8795943}, - }, - { - FieldElement{-16207023, 13517196, -27799630, -13697798, 24009064, -6373891, -6367600, -13175392, 22853429, -4012011}, - FieldElement{24191378, 16712145, -13931797, 15217831, 14542237, 1646131, 18603514, -11037887, 12876623, -2112447}, - FieldElement{17902668, 4518229, -411702, -2829247, 26878217, 5258055, -12860753, 608397, 16031844, 3723494}, - }, - { - FieldElement{-28632773, 12763728, -20446446, 7577504, 33001348, -13017745, 17558842, -7872890, 23896954, -4314245}, - FieldElement{-20005381, -12011952, 31520464, 605201, 2543521, 5991821, -2945064, 7229064, -9919646, -8826859}, - FieldElement{28816045, 298879, -28165016, -15920938, 19000928, -1665890, -12680833, -2949325, -18051778, -2082915}, - }, - { - FieldElement{16000882, -344896, 3493092, -11447198, -29504595, -13159789, 12577740, 16041268, -19715240, 7847707}, - FieldElement{10151868, 10572098, 27312476, 7922682, 14825339, 4723128, -32855931, -6519018, -10020567, 3852848}, - FieldElement{-11430470, 15697596, -21121557, -4420647, 5386314, 15063598, 16514493, -15932110, 29330899, -15076224}, - }, - }, - { - { - FieldElement{-25499735, -4378794, -15222908, -6901211, 16615731, 2051784, 3303702, 15490, -27548796, 12314391}, - FieldElement{15683520, -6003043, 18109120, -9980648, 15337968, -5997823, -16717435, 15921866, 16103996, -3731215}, - FieldElement{-23169824, -10781249, 13588192, -1628807, -3798557, -1074929, -19273607, 5402699, -29815713, -9841101}, - }, - { - FieldElement{23190676, 2384583, -32714340, 3462154, -29903655, -1529132, -11266856, 8911517, -25205859, 2739713}, - FieldElement{21374101, -3554250, -33524649, 9874411, 15377179, 11831242, -33529904, 6134907, 4931255, 11987849}, - FieldElement{-7732, -2978858, -16223486, 7277597, 105524, -322051, -31480539, 13861388, -30076310, 10117930}, - }, - { - FieldElement{-29501170, -10744872, -26163768, 13051539, -25625564, 5089643, -6325503, 6704079, 12890019, 15728940}, - FieldElement{-21972360, -11771379, -951059, -4418840, 14704840, 2695116, 903376, -10428139, 12885167, 8311031}, - FieldElement{-17516482, 5352194, 10384213, -13811658, 7506451, 13453191, 26423267, 4384730, 1888765, -5435404}, - }, - { - FieldElement{-25817338, -3107312, -13494599, -3182506, 30896459, -13921729, -32251644, -12707869, -19464434, -3340243}, - FieldElement{-23607977, -2665774, -526091, 4651136, 5765089, 4618330, 6092245, 14845197, 17151279, -9854116}, - FieldElement{-24830458, -12733720, -15165978, 10367250, -29530908, -265356, 22825805, -7087279, -16866484, 16176525}, - }, - { - FieldElement{-23583256, 6564961, 20063689, 3798228, -4740178, 7359225, 2006182, -10363426, -28746253, -10197509}, - FieldElement{-10626600, -4486402, -13320562, -5125317, 3432136, -6393229, 23632037, -1940610, 32808310, 1099883}, - FieldElement{15030977, 5768825, -27451236, -2887299, -6427378, -15361371, -15277896, -6809350, 2051441, -15225865}, - }, - { - FieldElement{-3362323, -7239372, 7517890, 9824992, 23555850, 295369, 5148398, -14154188, -22686354, 16633660}, - FieldElement{4577086, -16752288, 13249841, -15304328, 19958763, -14537274, 18559670, -10759549, 8402478, -9864273}, - FieldElement{-28406330, -1051581, -26790155, -907698, -17212414, -11030789, 9453451, -14980072, 17983010, 9967138}, - }, - { - FieldElement{-25762494, 6524722, 26585488, 9969270, 24709298, 1220360, -1677990, 7806337, 17507396, 3651560}, - FieldElement{-10420457, -4118111, 14584639, 15971087, -15768321, 8861010, 26556809, -5574557, -18553322, -11357135}, - FieldElement{2839101, 14284142, 4029895, 3472686, 14402957, 12689363, -26642121, 8459447, -5605463, -7621941}, - }, - { - FieldElement{-4839289, -3535444, 9744961, 2871048, 25113978, 3187018, -25110813, -849066, 17258084, -7977739}, - FieldElement{18164541, -10595176, -17154882, -1542417, 19237078, -9745295, 23357533, -15217008, 26908270, 12150756}, - FieldElement{-30264870, -7647865, 5112249, -7036672, -1499807, -6974257, 43168, -5537701, -32302074, 16215819}, - }, - }, - { - { - FieldElement{-6898905, 9824394, -12304779, -4401089, -31397141, -6276835, 32574489, 12532905, -7503072, -8675347}, - FieldElement{-27343522, -16515468, -27151524, -10722951, 946346, 16291093, 254968, 7168080, 21676107, -1943028}, - FieldElement{21260961, -8424752, -16831886, -11920822, -23677961, 3968121, -3651949, -6215466, -3556191, -7913075}, - }, - { - FieldElement{16544754, 13250366, -16804428, 15546242, -4583003, 12757258, -2462308, -8680336, -18907032, -9662799}, - FieldElement{-2415239, -15577728, 18312303, 4964443, -15272530, -12653564, 26820651, 16690659, 25459437, -4564609}, - FieldElement{-25144690, 11425020, 28423002, -11020557, -6144921, -15826224, 9142795, -2391602, -6432418, -1644817}, - }, - { - FieldElement{-23104652, 6253476, 16964147, -3768872, -25113972, -12296437, -27457225, -16344658, 6335692, 7249989}, - FieldElement{-30333227, 13979675, 7503222, -12368314, -11956721, -4621693, -30272269, 2682242, 25993170, -12478523}, - FieldElement{4364628, 5930691, 32304656, -10044554, -8054781, 15091131, 22857016, -10598955, 31820368, 15075278}, - }, - { - FieldElement{31879134, -8918693, 17258761, 90626, -8041836, -4917709, 24162788, -9650886, -17970238, 12833045}, - FieldElement{19073683, 14851414, -24403169, -11860168, 7625278, 11091125, -19619190, 2074449, -9413939, 14905377}, - FieldElement{24483667, -11935567, -2518866, -11547418, -1553130, 15355506, -25282080, 9253129, 27628530, -7555480}, - }, - { - FieldElement{17597607, 8340603, 19355617, 552187, 26198470, -3176583, 4593324, -9157582, -14110875, 15297016}, - FieldElement{510886, 14337390, -31785257, 16638632, 6328095, 2713355, -20217417, -11864220, 8683221, 2921426}, - FieldElement{18606791, 11874196, 27155355, -5281482, -24031742, 6265446, -25178240, -1278924, 4674690, 13890525}, - }, - { - FieldElement{13609624, 13069022, -27372361, -13055908, 24360586, 9592974, 14977157, 9835105, 4389687, 288396}, - FieldElement{9922506, -519394, 13613107, 5883594, -18758345, -434263, -12304062, 8317628, 23388070, 16052080}, - FieldElement{12720016, 11937594, -31970060, -5028689, 26900120, 8561328, -20155687, -11632979, -14754271, -10812892}, - }, - { - FieldElement{15961858, 14150409, 26716931, -665832, -22794328, 13603569, 11829573, 7467844, -28822128, 929275}, - FieldElement{11038231, -11582396, -27310482, -7316562, -10498527, -16307831, -23479533, -9371869, -21393143, 2465074}, - FieldElement{20017163, -4323226, 27915242, 1529148, 12396362, 15675764, 13817261, -9658066, 2463391, -4622140}, - }, - { - FieldElement{-16358878, -12663911, -12065183, 4996454, -1256422, 1073572, 9583558, 12851107, 4003896, 12673717}, - FieldElement{-1731589, -15155870, -3262930, 16143082, 19294135, 13385325, 14741514, -9103726, 7903886, 2348101}, - FieldElement{24536016, -16515207, 12715592, -3862155, 1511293, 10047386, -3842346, -7129159, -28377538, 10048127}, - }, - }, - { - { - FieldElement{-12622226, -6204820, 30718825, 2591312, -10617028, 12192840, 18873298, -7297090, -32297756, 15221632}, - FieldElement{-26478122, -11103864, 11546244, -1852483, 9180880, 7656409, -21343950, 2095755, 29769758, 6593415}, - FieldElement{-31994208, -2907461, 4176912, 3264766, 12538965, -868111, 26312345, -6118678, 30958054, 8292160}, - }, - { - FieldElement{31429822, -13959116, 29173532, 15632448, 12174511, -2760094, 32808831, 3977186, 26143136, -3148876}, - FieldElement{22648901, 1402143, -22799984, 13746059, 7936347, 365344, -8668633, -1674433, -3758243, -2304625}, - FieldElement{-15491917, 8012313, -2514730, -12702462, -23965846, -10254029, -1612713, -1535569, -16664475, 8194478}, - }, - { - FieldElement{27338066, -7507420, -7414224, 10140405, -19026427, -6589889, 27277191, 8855376, 28572286, 3005164}, - FieldElement{26287124, 4821776, 25476601, -4145903, -3764513, -15788984, -18008582, 1182479, -26094821, -13079595}, - FieldElement{-7171154, 3178080, 23970071, 6201893, -17195577, -4489192, -21876275, -13982627, 32208683, -1198248}, - }, - { - FieldElement{-16657702, 2817643, -10286362, 14811298, 6024667, 13349505, -27315504, -10497842, -27672585, -11539858}, - FieldElement{15941029, -9405932, -21367050, 8062055, 31876073, -238629, -15278393, -1444429, 15397331, -4130193}, - FieldElement{8934485, -13485467, -23286397, -13423241, -32446090, 14047986, 31170398, -1441021, -27505566, 15087184}, - }, - { - FieldElement{-18357243, -2156491, 24524913, -16677868, 15520427, -6360776, -15502406, 11461896, 16788528, -5868942}, - FieldElement{-1947386, 16013773, 21750665, 3714552, -17401782, -16055433, -3770287, -10323320, 31322514, -11615635}, - FieldElement{21426655, -5650218, -13648287, -5347537, -28812189, -4920970, -18275391, -14621414, 13040862, -12112948}, - }, - { - FieldElement{11293895, 12478086, -27136401, 15083750, -29307421, 14748872, 14555558, -13417103, 1613711, 4896935}, - FieldElement{-25894883, 15323294, -8489791, -8057900, 25967126, -13425460, 2825960, -4897045, -23971776, -11267415}, - FieldElement{-15924766, -5229880, -17443532, 6410664, 3622847, 10243618, 20615400, 12405433, -23753030, -8436416}, - }, - { - FieldElement{-7091295, 12556208, -20191352, 9025187, -17072479, 4333801, 4378436, 2432030, 23097949, -566018}, - FieldElement{4565804, -16025654, 20084412, -7842817, 1724999, 189254, 24767264, 10103221, -18512313, 2424778}, - FieldElement{366633, -11976806, 8173090, -6890119, 30788634, 5745705, -7168678, 1344109, -3642553, 12412659}, - }, - { - FieldElement{-24001791, 7690286, 14929416, -168257, -32210835, -13412986, 24162697, -15326504, -3141501, 11179385}, - FieldElement{18289522, -14724954, 8056945, 16430056, -21729724, 7842514, -6001441, -1486897, -18684645, -11443503}, - FieldElement{476239, 6601091, -6152790, -9723375, 17503545, -4863900, 27672959, 13403813, 11052904, 5219329}, - }, - }, - { - { - FieldElement{20678546, -8375738, -32671898, 8849123, -5009758, 14574752, 31186971, -3973730, 9014762, -8579056}, - FieldElement{-13644050, -10350239, -15962508, 5075808, -1514661, -11534600, -33102500, 9160280, 8473550, -3256838}, - FieldElement{24900749, 14435722, 17209120, -15292541, -22592275, 9878983, -7689309, -16335821, -24568481, 11788948}, - }, - { - FieldElement{-3118155, -11395194, -13802089, 14797441, 9652448, -6845904, -20037437, 10410733, -24568470, -1458691}, - FieldElement{-15659161, 16736706, -22467150, 10215878, -9097177, 7563911, 11871841, -12505194, -18513325, 8464118}, - FieldElement{-23400612, 8348507, -14585951, -861714, -3950205, -6373419, 14325289, 8628612, 33313881, -8370517}, - }, - { - FieldElement{-20186973, -4967935, 22367356, 5271547, -1097117, -4788838, -24805667, -10236854, -8940735, -5818269}, - FieldElement{-6948785, -1795212, -32625683, -16021179, 32635414, -7374245, 15989197, -12838188, 28358192, -4253904}, - FieldElement{-23561781, -2799059, -32351682, -1661963, -9147719, 10429267, -16637684, 4072016, -5351664, 5596589}, - }, - { - FieldElement{-28236598, -3390048, 12312896, 6213178, 3117142, 16078565, 29266239, 2557221, 1768301, 15373193}, - FieldElement{-7243358, -3246960, -4593467, -7553353, -127927, -912245, -1090902, -4504991, -24660491, 3442910}, - FieldElement{-30210571, 5124043, 14181784, 8197961, 18964734, -11939093, 22597931, 7176455, -18585478, 13365930}, - }, - { - FieldElement{-7877390, -1499958, 8324673, 4690079, 6261860, 890446, 24538107, -8570186, -9689599, -3031667}, - FieldElement{25008904, -10771599, -4305031, -9638010, 16265036, 15721635, 683793, -11823784, 15723479, -15163481}, - FieldElement{-9660625, 12374379, -27006999, -7026148, -7724114, -12314514, 11879682, 5400171, 519526, -1235876}, - }, - { - FieldElement{22258397, -16332233, -7869817, 14613016, -22520255, -2950923, -20353881, 7315967, 16648397, 7605640}, - FieldElement{-8081308, -8464597, -8223311, 9719710, 19259459, -15348212, 23994942, -5281555, -9468848, 4763278}, - FieldElement{-21699244, 9220969, -15730624, 1084137, -25476107, -2852390, 31088447, -7764523, -11356529, 728112}, - }, - { - FieldElement{26047220, -11751471, -6900323, -16521798, 24092068, 9158119, -4273545, -12555558, -29365436, -5498272}, - FieldElement{17510331, -322857, 5854289, 8403524, 17133918, -3112612, -28111007, 12327945, 10750447, 10014012}, - FieldElement{-10312768, 3936952, 9156313, -8897683, 16498692, -994647, -27481051, -666732, 3424691, 7540221}, - }, - { - FieldElement{30322361, -6964110, 11361005, -4143317, 7433304, 4989748, -7071422, -16317219, -9244265, 15258046}, - FieldElement{13054562, -2779497, 19155474, 469045, -12482797, 4566042, 5631406, 2711395, 1062915, -5136345}, - FieldElement{-19240248, -11254599, -29509029, -7499965, -5835763, 13005411, -6066489, 12194497, 32960380, 1459310}, - }, - }, - { - { - FieldElement{19852034, 7027924, 23669353, 10020366, 8586503, -6657907, 394197, -6101885, 18638003, -11174937}, - FieldElement{31395534, 15098109, 26581030, 8030562, -16527914, -5007134, 9012486, -7584354, -6643087, -5442636}, - FieldElement{-9192165, -2347377, -1997099, 4529534, 25766844, 607986, -13222, 9677543, -32294889, -6456008}, - }, - { - FieldElement{-2444496, -149937, 29348902, 8186665, 1873760, 12489863, -30934579, -7839692, -7852844, -8138429}, - FieldElement{-15236356, -15433509, 7766470, 746860, 26346930, -10221762, -27333451, 10754588, -9431476, 5203576}, - FieldElement{31834314, 14135496, -770007, 5159118, 20917671, -16768096, -7467973, -7337524, 31809243, 7347066}, - }, - { - FieldElement{-9606723, -11874240, 20414459, 13033986, 13716524, -11691881, 19797970, -12211255, 15192876, -2087490}, - FieldElement{-12663563, -2181719, 1168162, -3804809, 26747877, -14138091, 10609330, 12694420, 33473243, -13382104}, - FieldElement{33184999, 11180355, 15832085, -11385430, -1633671, 225884, 15089336, -11023903, -6135662, 14480053}, - }, - { - FieldElement{31308717, -5619998, 31030840, -1897099, 15674547, -6582883, 5496208, 13685227, 27595050, 8737275}, - FieldElement{-20318852, -15150239, 10933843, -16178022, 8335352, -7546022, -31008351, -12610604, 26498114, 66511}, - FieldElement{22644454, -8761729, -16671776, 4884562, -3105614, -13559366, 30540766, -4286747, -13327787, -7515095}, - }, - { - FieldElement{-28017847, 9834845, 18617207, -2681312, -3401956, -13307506, 8205540, 13585437, -17127465, 15115439}, - FieldElement{23711543, -672915, 31206561, -8362711, 6164647, -9709987, -33535882, -1426096, 8236921, 16492939}, - FieldElement{-23910559, -13515526, -26299483, -4503841, 25005590, -7687270, 19574902, 10071562, 6708380, -6222424}, - }, - { - FieldElement{2101391, -4930054, 19702731, 2367575, -15427167, 1047675, 5301017, 9328700, 29955601, -11678310}, - FieldElement{3096359, 9271816, -21620864, -15521844, -14847996, -7592937, -25892142, -12635595, -9917575, 6216608}, - FieldElement{-32615849, 338663, -25195611, 2510422, -29213566, -13820213, 24822830, -6146567, -26767480, 7525079}, - }, - { - FieldElement{-23066649, -13985623, 16133487, -7896178, -3389565, 778788, -910336, -2782495, -19386633, 11994101}, - FieldElement{21691500, -13624626, -641331, -14367021, 3285881, -3483596, -25064666, 9718258, -7477437, 13381418}, - FieldElement{18445390, -4202236, 14979846, 11622458, -1727110, -3582980, 23111648, -6375247, 28535282, 15779576}, - }, - { - FieldElement{30098053, 3089662, -9234387, 16662135, -21306940, 11308411, -14068454, 12021730, 9955285, -16303356}, - FieldElement{9734894, -14576830, -7473633, -9138735, 2060392, 11313496, -18426029, 9924399, 20194861, 13380996}, - FieldElement{-26378102, -7965207, -22167821, 15789297, -18055342, -6168792, -1984914, 15707771, 26342023, 10146099}, - }, - }, - { - { - FieldElement{-26016874, -219943, 21339191, -41388, 19745256, -2878700, -29637280, 2227040, 21612326, -545728}, - FieldElement{-13077387, 1184228, 23562814, -5970442, -20351244, -6348714, 25764461, 12243797, -20856566, 11649658}, - FieldElement{-10031494, 11262626, 27384172, 2271902, 26947504, -15997771, 39944, 6114064, 33514190, 2333242}, - }, - { - FieldElement{-21433588, -12421821, 8119782, 7219913, -21830522, -9016134, -6679750, -12670638, 24350578, -13450001}, - FieldElement{-4116307, -11271533, -23886186, 4843615, -30088339, 690623, -31536088, -10406836, 8317860, 12352766}, - FieldElement{18200138, -14475911, -33087759, -2696619, -23702521, -9102511, -23552096, -2287550, 20712163, 6719373}, - }, - { - FieldElement{26656208, 6075253, -7858556, 1886072, -28344043, 4262326, 11117530, -3763210, 26224235, -3297458}, - FieldElement{-17168938, -14854097, -3395676, -16369877, -19954045, 14050420, 21728352, 9493610, 18620611, -16428628}, - FieldElement{-13323321, 13325349, 11432106, 5964811, 18609221, 6062965, -5269471, -9725556, -30701573, -16479657}, - }, - { - FieldElement{-23860538, -11233159, 26961357, 1640861, -32413112, -16737940, 12248509, -5240639, 13735342, 1934062}, - FieldElement{25089769, 6742589, 17081145, -13406266, 21909293, -16067981, -15136294, -3765346, -21277997, 5473616}, - FieldElement{31883677, -7961101, 1083432, -11572403, 22828471, 13290673, -7125085, 12469656, 29111212, -5451014}, - }, - { - FieldElement{24244947, -15050407, -26262976, 2791540, -14997599, 16666678, 24367466, 6388839, -10295587, 452383}, - FieldElement{-25640782, -3417841, 5217916, 16224624, 19987036, -4082269, -24236251, -5915248, 15766062, 8407814}, - FieldElement{-20406999, 13990231, 15495425, 16395525, 5377168, 15166495, -8917023, -4388953, -8067909, 2276718}, - }, - { - FieldElement{30157918, 12924066, -17712050, 9245753, 19895028, 3368142, -23827587, 5096219, 22740376, -7303417}, - FieldElement{2041139, -14256350, 7783687, 13876377, -25946985, -13352459, 24051124, 13742383, -15637599, 13295222}, - FieldElement{33338237, -8505733, 12532113, 7977527, 9106186, -1715251, -17720195, -4612972, -4451357, -14669444}, - }, - { - FieldElement{-20045281, 5454097, -14346548, 6447146, 28862071, 1883651, -2469266, -4141880, 7770569, 9620597}, - FieldElement{23208068, 7979712, 33071466, 8149229, 1758231, -10834995, 30945528, -1694323, -33502340, -14767970}, - FieldElement{1439958, -16270480, -1079989, -793782, 4625402, 10647766, -5043801, 1220118, 30494170, -11440799}, - }, - { - FieldElement{-5037580, -13028295, -2970559, -3061767, 15640974, -6701666, -26739026, 926050, -1684339, -13333647}, - FieldElement{13908495, -3549272, 30919928, -6273825, -21521863, 7989039, 9021034, 9078865, 3353509, 4033511}, - FieldElement{-29663431, -15113610, 32259991, -344482, 24295849, -12912123, 23161163, 8839127, 27485041, 7356032}, - }, - }, - { - { - FieldElement{9661027, 705443, 11980065, -5370154, -1628543, 14661173, -6346142, 2625015, 28431036, -16771834}, - FieldElement{-23839233, -8311415, -25945511, 7480958, -17681669, -8354183, -22545972, 14150565, 15970762, 4099461}, - FieldElement{29262576, 16756590, 26350592, -8793563, 8529671, -11208050, 13617293, -9937143, 11465739, 8317062}, - }, - { - FieldElement{-25493081, -6962928, 32500200, -9419051, -23038724, -2302222, 14898637, 3848455, 20969334, -5157516}, - FieldElement{-20384450, -14347713, -18336405, 13884722, -33039454, 2842114, -21610826, -3649888, 11177095, 14989547}, - FieldElement{-24496721, -11716016, 16959896, 2278463, 12066309, 10137771, 13515641, 2581286, -28487508, 9930240}, - }, - { - FieldElement{-17751622, -2097826, 16544300, -13009300, -15914807, -14949081, 18345767, -13403753, 16291481, -5314038}, - FieldElement{-33229194, 2553288, 32678213, 9875984, 8534129, 6889387, -9676774, 6957617, 4368891, 9788741}, - FieldElement{16660756, 7281060, -10830758, 12911820, 20108584, -8101676, -21722536, -8613148, 16250552, -11111103}, - }, - { - FieldElement{-19765507, 2390526, -16551031, 14161980, 1905286, 6414907, 4689584, 10604807, -30190403, 4782747}, - FieldElement{-1354539, 14736941, -7367442, -13292886, 7710542, -14155590, -9981571, 4383045, 22546403, 437323}, - FieldElement{31665577, -12180464, -16186830, 1491339, -18368625, 3294682, 27343084, 2786261, -30633590, -14097016}, - }, - { - FieldElement{-14467279, -683715, -33374107, 7448552, 19294360, 14334329, -19690631, 2355319, -19284671, -6114373}, - FieldElement{15121312, -15796162, 6377020, -6031361, -10798111, -12957845, 18952177, 15496498, -29380133, 11754228}, - FieldElement{-2637277, -13483075, 8488727, -14303896, 12728761, -1622493, 7141596, 11724556, 22761615, -10134141}, - }, - { - FieldElement{16918416, 11729663, -18083579, 3022987, -31015732, -13339659, -28741185, -12227393, 32851222, 11717399}, - FieldElement{11166634, 7338049, -6722523, 4531520, -29468672, -7302055, 31474879, 3483633, -1193175, -4030831}, - FieldElement{-185635, 9921305, 31456609, -13536438, -12013818, 13348923, 33142652, 6546660, -19985279, -3948376}, - }, - { - FieldElement{-32460596, 11266712, -11197107, -7899103, 31703694, 3855903, -8537131, -12833048, -30772034, -15486313}, - FieldElement{-18006477, 12709068, 3991746, -6479188, -21491523, -10550425, -31135347, -16049879, 10928917, 3011958}, - FieldElement{-6957757, -15594337, 31696059, 334240, 29576716, 14796075, -30831056, -12805180, 18008031, 10258577}, - }, - { - FieldElement{-22448644, 15655569, 7018479, -4410003, -30314266, -1201591, -1853465, 1367120, 25127874, 6671743}, - FieldElement{29701166, -14373934, -10878120, 9279288, -17568, 13127210, 21382910, 11042292, 25838796, 4642684}, - FieldElement{-20430234, 14955537, -24126347, 8124619, -5369288, -5990470, 30468147, -13900640, 18423289, 4177476}, - }, - }, -} diff --git a/vendor/src/github.com/agl/ed25519/edwards25519/edwards25519.go b/vendor/src/github.com/agl/ed25519/edwards25519/edwards25519.go deleted file mode 100644 index 184b4a8596..0000000000 --- a/vendor/src/github.com/agl/ed25519/edwards25519/edwards25519.go +++ /dev/null @@ -1,2127 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package edwards25519 implements operations in GF(2**255-19) and on an -// Edwards curve that is isomorphic to curve25519. See -// http://ed25519.cr.yp.to/. -package edwards25519 - -// This code is a port of the public domain, "ref10" implementation of ed25519 -// from SUPERCOP. - -// FieldElement represents an element of the field GF(2^255 - 19). An element -// t, entries t[0]...t[9], represents the integer t[0]+2^26 t[1]+2^51 t[2]+2^77 -// t[3]+2^102 t[4]+...+2^230 t[9]. Bounds on each t[i] vary depending on -// context. -type FieldElement [10]int32 - -func FeZero(fe *FieldElement) { - for i := range fe { - fe[i] = 0 - } -} - -func FeOne(fe *FieldElement) { - FeZero(fe) - fe[0] = 1 -} - -func FeAdd(dst, a, b *FieldElement) { - for i := range dst { - dst[i] = a[i] + b[i] - } -} - -func FeSub(dst, a, b *FieldElement) { - for i := range dst { - dst[i] = a[i] - b[i] - } -} - -func FeCopy(dst, src *FieldElement) { - for i := range dst { - dst[i] = src[i] - } -} - -// Replace (f,g) with (g,g) if b == 1; -// replace (f,g) with (f,g) if b == 0. -// -// Preconditions: b in {0,1}. -func FeCMove(f, g *FieldElement, b int32) { - var x FieldElement - b = -b - for i := range x { - x[i] = b & (f[i] ^ g[i]) - } - - for i := range f { - f[i] ^= x[i] - } -} - -func load3(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - return r -} - -func load4(in []byte) int64 { - var r int64 - r = int64(in[0]) - r |= int64(in[1]) << 8 - r |= int64(in[2]) << 16 - r |= int64(in[3]) << 24 - return r -} - -func FeFromBytes(dst *FieldElement, src *[32]byte) { - h0 := load4(src[:]) - h1 := load3(src[4:]) << 6 - h2 := load3(src[7:]) << 5 - h3 := load3(src[10:]) << 3 - h4 := load3(src[13:]) << 2 - h5 := load4(src[16:]) - h6 := load3(src[20:]) << 7 - h7 := load3(src[23:]) << 5 - h8 := load3(src[26:]) << 4 - h9 := (load3(src[29:]) & 8388607) << 2 - - var carry [10]int64 - carry[9] = (h9 + 1<<24) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - carry[1] = (h1 + 1<<24) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[3] = (h3 + 1<<24) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[5] = (h5 + 1<<24) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - carry[7] = (h7 + 1<<24) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[0] = (h0 + 1<<25) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[2] = (h2 + 1<<25) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[4] = (h4 + 1<<25) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[6] = (h6 + 1<<25) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - carry[8] = (h8 + 1<<25) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - dst[0] = int32(h0) - dst[1] = int32(h1) - dst[2] = int32(h2) - dst[3] = int32(h3) - dst[4] = int32(h4) - dst[5] = int32(h5) - dst[6] = int32(h6) - dst[7] = int32(h7) - dst[8] = int32(h8) - dst[9] = int32(h9) -} - -// FeToBytes marshals h to s. -// Preconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Write p=2^255-19; q=floor(h/p). -// Basic claim: q = floor(2^(-255)(h + 19 2^(-25)h9 + 2^(-1))). -// -// Proof: -// Have |h|<=p so |q|<=1 so |19^2 2^(-255) q|<1/4. -// Also have |h-2^230 h9|<2^230 so |19 2^(-255)(h-2^230 h9)|<1/4. -// -// Write y=2^(-1)-19^2 2^(-255)q-19 2^(-255)(h-2^230 h9). -// Then 0> 25 - q = (h[0] + q) >> 26 - q = (h[1] + q) >> 25 - q = (h[2] + q) >> 26 - q = (h[3] + q) >> 25 - q = (h[4] + q) >> 26 - q = (h[5] + q) >> 25 - q = (h[6] + q) >> 26 - q = (h[7] + q) >> 25 - q = (h[8] + q) >> 26 - q = (h[9] + q) >> 25 - - // Goal: Output h-(2^255-19)q, which is between 0 and 2^255-20. - h[0] += 19 * q - // Goal: Output h-2^255 q, which is between 0 and 2^255-20. - - carry[0] = h[0] >> 26 - h[1] += carry[0] - h[0] -= carry[0] << 26 - carry[1] = h[1] >> 25 - h[2] += carry[1] - h[1] -= carry[1] << 25 - carry[2] = h[2] >> 26 - h[3] += carry[2] - h[2] -= carry[2] << 26 - carry[3] = h[3] >> 25 - h[4] += carry[3] - h[3] -= carry[3] << 25 - carry[4] = h[4] >> 26 - h[5] += carry[4] - h[4] -= carry[4] << 26 - carry[5] = h[5] >> 25 - h[6] += carry[5] - h[5] -= carry[5] << 25 - carry[6] = h[6] >> 26 - h[7] += carry[6] - h[6] -= carry[6] << 26 - carry[7] = h[7] >> 25 - h[8] += carry[7] - h[7] -= carry[7] << 25 - carry[8] = h[8] >> 26 - h[9] += carry[8] - h[8] -= carry[8] << 26 - carry[9] = h[9] >> 25 - h[9] -= carry[9] << 25 - // h10 = carry9 - - // Goal: Output h[0]+...+2^255 h10-2^255 q, which is between 0 and 2^255-20. - // Have h[0]+...+2^230 h[9] between 0 and 2^255-1; - // evidently 2^255 h10-2^255 q = 0. - // Goal: Output h[0]+...+2^230 h[9]. - - s[0] = byte(h[0] >> 0) - s[1] = byte(h[0] >> 8) - s[2] = byte(h[0] >> 16) - s[3] = byte((h[0] >> 24) | (h[1] << 2)) - s[4] = byte(h[1] >> 6) - s[5] = byte(h[1] >> 14) - s[6] = byte((h[1] >> 22) | (h[2] << 3)) - s[7] = byte(h[2] >> 5) - s[8] = byte(h[2] >> 13) - s[9] = byte((h[2] >> 21) | (h[3] << 5)) - s[10] = byte(h[3] >> 3) - s[11] = byte(h[3] >> 11) - s[12] = byte((h[3] >> 19) | (h[4] << 6)) - s[13] = byte(h[4] >> 2) - s[14] = byte(h[4] >> 10) - s[15] = byte(h[4] >> 18) - s[16] = byte(h[5] >> 0) - s[17] = byte(h[5] >> 8) - s[18] = byte(h[5] >> 16) - s[19] = byte((h[5] >> 24) | (h[6] << 1)) - s[20] = byte(h[6] >> 7) - s[21] = byte(h[6] >> 15) - s[22] = byte((h[6] >> 23) | (h[7] << 3)) - s[23] = byte(h[7] >> 5) - s[24] = byte(h[7] >> 13) - s[25] = byte((h[7] >> 21) | (h[8] << 4)) - s[26] = byte(h[8] >> 4) - s[27] = byte(h[8] >> 12) - s[28] = byte((h[8] >> 20) | (h[9] << 6)) - s[29] = byte(h[9] >> 2) - s[30] = byte(h[9] >> 10) - s[31] = byte(h[9] >> 18) -} - -func FeIsNegative(f *FieldElement) byte { - var s [32]byte - FeToBytes(&s, f) - return s[0] & 1 -} - -func FeIsNonZero(f *FieldElement) int32 { - var s [32]byte - FeToBytes(&s, f) - var x uint8 - for _, b := range s { - x |= b - } - x |= x >> 4 - x |= x >> 2 - x |= x >> 1 - return int32(x & 1) -} - -// FeNeg sets h = -f -// -// Preconditions: -// |f| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeNeg(h, f *FieldElement) { - for i := range h { - h[i] = -f[i] - } -} - -// FeMul calculates h = f * g -// Can overlap h with f or g. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// |g| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -// -// Notes on implementation strategy: -// -// Using schoolbook multiplication. -// Karatsuba would save a little in some cost models. -// -// Most multiplications by 2 and 19 are 32-bit precomputations; -// cheaper than 64-bit postcomputations. -// -// There is one remaining multiplication by 19 in the carry chain; -// one *19 precomputation can be merged into this, -// but the resulting data flow is considerably less clean. -// -// There are 12 carries below. -// 10 of them are 2-way parallelizable and vectorizable. -// Can get away with 11 carries, but then data flow is much deeper. -// -// With tighter constraints on inputs can squeeze carries into int32. -func FeMul(h, f, g *FieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - g0 := g[0] - g1 := g[1] - g2 := g[2] - g3 := g[3] - g4 := g[4] - g5 := g[5] - g6 := g[6] - g7 := g[7] - g8 := g[8] - g9 := g[9] - g1_19 := 19 * g1 /* 1.4*2^29 */ - g2_19 := 19 * g2 /* 1.4*2^30; still ok */ - g3_19 := 19 * g3 - g4_19 := 19 * g4 - g5_19 := 19 * g5 - g6_19 := 19 * g6 - g7_19 := 19 * g7 - g8_19 := 19 * g8 - g9_19 := 19 * g9 - f1_2 := 2 * f1 - f3_2 := 2 * f3 - f5_2 := 2 * f5 - f7_2 := 2 * f7 - f9_2 := 2 * f9 - f0g0 := int64(f0) * int64(g0) - f0g1 := int64(f0) * int64(g1) - f0g2 := int64(f0) * int64(g2) - f0g3 := int64(f0) * int64(g3) - f0g4 := int64(f0) * int64(g4) - f0g5 := int64(f0) * int64(g5) - f0g6 := int64(f0) * int64(g6) - f0g7 := int64(f0) * int64(g7) - f0g8 := int64(f0) * int64(g8) - f0g9 := int64(f0) * int64(g9) - f1g0 := int64(f1) * int64(g0) - f1g1_2 := int64(f1_2) * int64(g1) - f1g2 := int64(f1) * int64(g2) - f1g3_2 := int64(f1_2) * int64(g3) - f1g4 := int64(f1) * int64(g4) - f1g5_2 := int64(f1_2) * int64(g5) - f1g6 := int64(f1) * int64(g6) - f1g7_2 := int64(f1_2) * int64(g7) - f1g8 := int64(f1) * int64(g8) - f1g9_38 := int64(f1_2) * int64(g9_19) - f2g0 := int64(f2) * int64(g0) - f2g1 := int64(f2) * int64(g1) - f2g2 := int64(f2) * int64(g2) - f2g3 := int64(f2) * int64(g3) - f2g4 := int64(f2) * int64(g4) - f2g5 := int64(f2) * int64(g5) - f2g6 := int64(f2) * int64(g6) - f2g7 := int64(f2) * int64(g7) - f2g8_19 := int64(f2) * int64(g8_19) - f2g9_19 := int64(f2) * int64(g9_19) - f3g0 := int64(f3) * int64(g0) - f3g1_2 := int64(f3_2) * int64(g1) - f3g2 := int64(f3) * int64(g2) - f3g3_2 := int64(f3_2) * int64(g3) - f3g4 := int64(f3) * int64(g4) - f3g5_2 := int64(f3_2) * int64(g5) - f3g6 := int64(f3) * int64(g6) - f3g7_38 := int64(f3_2) * int64(g7_19) - f3g8_19 := int64(f3) * int64(g8_19) - f3g9_38 := int64(f3_2) * int64(g9_19) - f4g0 := int64(f4) * int64(g0) - f4g1 := int64(f4) * int64(g1) - f4g2 := int64(f4) * int64(g2) - f4g3 := int64(f4) * int64(g3) - f4g4 := int64(f4) * int64(g4) - f4g5 := int64(f4) * int64(g5) - f4g6_19 := int64(f4) * int64(g6_19) - f4g7_19 := int64(f4) * int64(g7_19) - f4g8_19 := int64(f4) * int64(g8_19) - f4g9_19 := int64(f4) * int64(g9_19) - f5g0 := int64(f5) * int64(g0) - f5g1_2 := int64(f5_2) * int64(g1) - f5g2 := int64(f5) * int64(g2) - f5g3_2 := int64(f5_2) * int64(g3) - f5g4 := int64(f5) * int64(g4) - f5g5_38 := int64(f5_2) * int64(g5_19) - f5g6_19 := int64(f5) * int64(g6_19) - f5g7_38 := int64(f5_2) * int64(g7_19) - f5g8_19 := int64(f5) * int64(g8_19) - f5g9_38 := int64(f5_2) * int64(g9_19) - f6g0 := int64(f6) * int64(g0) - f6g1 := int64(f6) * int64(g1) - f6g2 := int64(f6) * int64(g2) - f6g3 := int64(f6) * int64(g3) - f6g4_19 := int64(f6) * int64(g4_19) - f6g5_19 := int64(f6) * int64(g5_19) - f6g6_19 := int64(f6) * int64(g6_19) - f6g7_19 := int64(f6) * int64(g7_19) - f6g8_19 := int64(f6) * int64(g8_19) - f6g9_19 := int64(f6) * int64(g9_19) - f7g0 := int64(f7) * int64(g0) - f7g1_2 := int64(f7_2) * int64(g1) - f7g2 := int64(f7) * int64(g2) - f7g3_38 := int64(f7_2) * int64(g3_19) - f7g4_19 := int64(f7) * int64(g4_19) - f7g5_38 := int64(f7_2) * int64(g5_19) - f7g6_19 := int64(f7) * int64(g6_19) - f7g7_38 := int64(f7_2) * int64(g7_19) - f7g8_19 := int64(f7) * int64(g8_19) - f7g9_38 := int64(f7_2) * int64(g9_19) - f8g0 := int64(f8) * int64(g0) - f8g1 := int64(f8) * int64(g1) - f8g2_19 := int64(f8) * int64(g2_19) - f8g3_19 := int64(f8) * int64(g3_19) - f8g4_19 := int64(f8) * int64(g4_19) - f8g5_19 := int64(f8) * int64(g5_19) - f8g6_19 := int64(f8) * int64(g6_19) - f8g7_19 := int64(f8) * int64(g7_19) - f8g8_19 := int64(f8) * int64(g8_19) - f8g9_19 := int64(f8) * int64(g9_19) - f9g0 := int64(f9) * int64(g0) - f9g1_38 := int64(f9_2) * int64(g1_19) - f9g2_19 := int64(f9) * int64(g2_19) - f9g3_38 := int64(f9_2) * int64(g3_19) - f9g4_19 := int64(f9) * int64(g4_19) - f9g5_38 := int64(f9_2) * int64(g5_19) - f9g6_19 := int64(f9) * int64(g6_19) - f9g7_38 := int64(f9_2) * int64(g7_19) - f9g8_19 := int64(f9) * int64(g8_19) - f9g9_38 := int64(f9_2) * int64(g9_19) - h0 := f0g0 + f1g9_38 + f2g8_19 + f3g7_38 + f4g6_19 + f5g5_38 + f6g4_19 + f7g3_38 + f8g2_19 + f9g1_38 - h1 := f0g1 + f1g0 + f2g9_19 + f3g8_19 + f4g7_19 + f5g6_19 + f6g5_19 + f7g4_19 + f8g3_19 + f9g2_19 - h2 := f0g2 + f1g1_2 + f2g0 + f3g9_38 + f4g8_19 + f5g7_38 + f6g6_19 + f7g5_38 + f8g4_19 + f9g3_38 - h3 := f0g3 + f1g2 + f2g1 + f3g0 + f4g9_19 + f5g8_19 + f6g7_19 + f7g6_19 + f8g5_19 + f9g4_19 - h4 := f0g4 + f1g3_2 + f2g2 + f3g1_2 + f4g0 + f5g9_38 + f6g8_19 + f7g7_38 + f8g6_19 + f9g5_38 - h5 := f0g5 + f1g4 + f2g3 + f3g2 + f4g1 + f5g0 + f6g9_19 + f7g8_19 + f8g7_19 + f9g6_19 - h6 := f0g6 + f1g5_2 + f2g4 + f3g3_2 + f4g2 + f5g1_2 + f6g0 + f7g9_38 + f8g8_19 + f9g7_38 - h7 := f0g7 + f1g6 + f2g5 + f3g4 + f4g3 + f5g2 + f6g1 + f7g0 + f8g9_19 + f9g8_19 - h8 := f0g8 + f1g7_2 + f2g6 + f3g5_2 + f4g4 + f5g3_2 + f6g2 + f7g1_2 + f8g0 + f9g9_38 - h9 := f0g9 + f1g8 + f2g7 + f3g6 + f4g5 + f5g4 + f6g3 + f7g2 + f8g1 + f9g0 - var carry [10]int64 - - /* - |h0| <= (1.1*1.1*2^52*(1+19+19+19+19)+1.1*1.1*2^50*(38+38+38+38+38)) - i.e. |h0| <= 1.2*2^59; narrower ranges for h2, h4, h6, h8 - |h1| <= (1.1*1.1*2^51*(1+1+19+19+19+19+19+19+19+19)) - i.e. |h1| <= 1.5*2^58; narrower ranges for h3, h5, h7, h9 - */ - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - /* |h0| <= 2^25 */ - /* |h4| <= 2^25 */ - /* |h1| <= 1.51*2^58 */ - /* |h5| <= 1.51*2^58 */ - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - /* |h1| <= 2^24; from now on fits into int32 */ - /* |h5| <= 2^24; from now on fits into int32 */ - /* |h2| <= 1.21*2^59 */ - /* |h6| <= 1.21*2^59 */ - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - /* |h2| <= 2^25; from now on fits into int32 unchanged */ - /* |h6| <= 2^25; from now on fits into int32 unchanged */ - /* |h3| <= 1.51*2^58 */ - /* |h7| <= 1.51*2^58 */ - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - /* |h3| <= 2^24; from now on fits into int32 unchanged */ - /* |h7| <= 2^24; from now on fits into int32 unchanged */ - /* |h4| <= 1.52*2^33 */ - /* |h8| <= 1.52*2^33 */ - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - /* |h4| <= 2^25; from now on fits into int32 unchanged */ - /* |h8| <= 2^25; from now on fits into int32 unchanged */ - /* |h5| <= 1.01*2^24 */ - /* |h9| <= 1.51*2^58 */ - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - /* |h9| <= 2^24; from now on fits into int32 unchanged */ - /* |h0| <= 1.8*2^37 */ - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - /* |h0| <= 2^25; from now on fits into int32 unchanged */ - /* |h1| <= 1.01*2^24 */ - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeSquare calculates h = f*f. Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.1*2^26,1.1*2^25,1.1*2^26,1.1*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.1*2^25,1.1*2^24,1.1*2^25,1.1*2^24,etc. -func FeSquare(h, f *FieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - f0_2 := 2 * f0 - f1_2 := 2 * f1 - f2_2 := 2 * f2 - f3_2 := 2 * f3 - f4_2 := 2 * f4 - f5_2 := 2 * f5 - f6_2 := 2 * f6 - f7_2 := 2 * f7 - f5_38 := 38 * f5 // 1.31*2^30 - f6_19 := 19 * f6 // 1.31*2^30 - f7_38 := 38 * f7 // 1.31*2^30 - f8_19 := 19 * f8 // 1.31*2^30 - f9_38 := 38 * f9 // 1.31*2^30 - f0f0 := int64(f0) * int64(f0) - f0f1_2 := int64(f0_2) * int64(f1) - f0f2_2 := int64(f0_2) * int64(f2) - f0f3_2 := int64(f0_2) * int64(f3) - f0f4_2 := int64(f0_2) * int64(f4) - f0f5_2 := int64(f0_2) * int64(f5) - f0f6_2 := int64(f0_2) * int64(f6) - f0f7_2 := int64(f0_2) * int64(f7) - f0f8_2 := int64(f0_2) * int64(f8) - f0f9_2 := int64(f0_2) * int64(f9) - f1f1_2 := int64(f1_2) * int64(f1) - f1f2_2 := int64(f1_2) * int64(f2) - f1f3_4 := int64(f1_2) * int64(f3_2) - f1f4_2 := int64(f1_2) * int64(f4) - f1f5_4 := int64(f1_2) * int64(f5_2) - f1f6_2 := int64(f1_2) * int64(f6) - f1f7_4 := int64(f1_2) * int64(f7_2) - f1f8_2 := int64(f1_2) * int64(f8) - f1f9_76 := int64(f1_2) * int64(f9_38) - f2f2 := int64(f2) * int64(f2) - f2f3_2 := int64(f2_2) * int64(f3) - f2f4_2 := int64(f2_2) * int64(f4) - f2f5_2 := int64(f2_2) * int64(f5) - f2f6_2 := int64(f2_2) * int64(f6) - f2f7_2 := int64(f2_2) * int64(f7) - f2f8_38 := int64(f2_2) * int64(f8_19) - f2f9_38 := int64(f2) * int64(f9_38) - f3f3_2 := int64(f3_2) * int64(f3) - f3f4_2 := int64(f3_2) * int64(f4) - f3f5_4 := int64(f3_2) * int64(f5_2) - f3f6_2 := int64(f3_2) * int64(f6) - f3f7_76 := int64(f3_2) * int64(f7_38) - f3f8_38 := int64(f3_2) * int64(f8_19) - f3f9_76 := int64(f3_2) * int64(f9_38) - f4f4 := int64(f4) * int64(f4) - f4f5_2 := int64(f4_2) * int64(f5) - f4f6_38 := int64(f4_2) * int64(f6_19) - f4f7_38 := int64(f4) * int64(f7_38) - f4f8_38 := int64(f4_2) * int64(f8_19) - f4f9_38 := int64(f4) * int64(f9_38) - f5f5_38 := int64(f5) * int64(f5_38) - f5f6_38 := int64(f5_2) * int64(f6_19) - f5f7_76 := int64(f5_2) * int64(f7_38) - f5f8_38 := int64(f5_2) * int64(f8_19) - f5f9_76 := int64(f5_2) * int64(f9_38) - f6f6_19 := int64(f6) * int64(f6_19) - f6f7_38 := int64(f6) * int64(f7_38) - f6f8_38 := int64(f6_2) * int64(f8_19) - f6f9_38 := int64(f6) * int64(f9_38) - f7f7_38 := int64(f7) * int64(f7_38) - f7f8_38 := int64(f7_2) * int64(f8_19) - f7f9_76 := int64(f7_2) * int64(f9_38) - f8f8_19 := int64(f8) * int64(f8_19) - f8f9_38 := int64(f8) * int64(f9_38) - f9f9_38 := int64(f9) * int64(f9_38) - h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 - h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 - h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 - h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 - h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 - h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 - h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 - h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 - h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 - h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 - var carry [10]int64 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -// FeSquare2 sets h = 2 * f * f -// -// Can overlap h with f. -// -// Preconditions: -// |f| bounded by 1.65*2^26,1.65*2^25,1.65*2^26,1.65*2^25,etc. -// -// Postconditions: -// |h| bounded by 1.01*2^25,1.01*2^24,1.01*2^25,1.01*2^24,etc. -// See fe_mul.c for discussion of implementation strategy. -func FeSquare2(h, f *FieldElement) { - f0 := f[0] - f1 := f[1] - f2 := f[2] - f3 := f[3] - f4 := f[4] - f5 := f[5] - f6 := f[6] - f7 := f[7] - f8 := f[8] - f9 := f[9] - f0_2 := 2 * f0 - f1_2 := 2 * f1 - f2_2 := 2 * f2 - f3_2 := 2 * f3 - f4_2 := 2 * f4 - f5_2 := 2 * f5 - f6_2 := 2 * f6 - f7_2 := 2 * f7 - f5_38 := 38 * f5 // 1.959375*2^30 - f6_19 := 19 * f6 // 1.959375*2^30 - f7_38 := 38 * f7 // 1.959375*2^30 - f8_19 := 19 * f8 // 1.959375*2^30 - f9_38 := 38 * f9 // 1.959375*2^30 - f0f0 := int64(f0) * int64(f0) - f0f1_2 := int64(f0_2) * int64(f1) - f0f2_2 := int64(f0_2) * int64(f2) - f0f3_2 := int64(f0_2) * int64(f3) - f0f4_2 := int64(f0_2) * int64(f4) - f0f5_2 := int64(f0_2) * int64(f5) - f0f6_2 := int64(f0_2) * int64(f6) - f0f7_2 := int64(f0_2) * int64(f7) - f0f8_2 := int64(f0_2) * int64(f8) - f0f9_2 := int64(f0_2) * int64(f9) - f1f1_2 := int64(f1_2) * int64(f1) - f1f2_2 := int64(f1_2) * int64(f2) - f1f3_4 := int64(f1_2) * int64(f3_2) - f1f4_2 := int64(f1_2) * int64(f4) - f1f5_4 := int64(f1_2) * int64(f5_2) - f1f6_2 := int64(f1_2) * int64(f6) - f1f7_4 := int64(f1_2) * int64(f7_2) - f1f8_2 := int64(f1_2) * int64(f8) - f1f9_76 := int64(f1_2) * int64(f9_38) - f2f2 := int64(f2) * int64(f2) - f2f3_2 := int64(f2_2) * int64(f3) - f2f4_2 := int64(f2_2) * int64(f4) - f2f5_2 := int64(f2_2) * int64(f5) - f2f6_2 := int64(f2_2) * int64(f6) - f2f7_2 := int64(f2_2) * int64(f7) - f2f8_38 := int64(f2_2) * int64(f8_19) - f2f9_38 := int64(f2) * int64(f9_38) - f3f3_2 := int64(f3_2) * int64(f3) - f3f4_2 := int64(f3_2) * int64(f4) - f3f5_4 := int64(f3_2) * int64(f5_2) - f3f6_2 := int64(f3_2) * int64(f6) - f3f7_76 := int64(f3_2) * int64(f7_38) - f3f8_38 := int64(f3_2) * int64(f8_19) - f3f9_76 := int64(f3_2) * int64(f9_38) - f4f4 := int64(f4) * int64(f4) - f4f5_2 := int64(f4_2) * int64(f5) - f4f6_38 := int64(f4_2) * int64(f6_19) - f4f7_38 := int64(f4) * int64(f7_38) - f4f8_38 := int64(f4_2) * int64(f8_19) - f4f9_38 := int64(f4) * int64(f9_38) - f5f5_38 := int64(f5) * int64(f5_38) - f5f6_38 := int64(f5_2) * int64(f6_19) - f5f7_76 := int64(f5_2) * int64(f7_38) - f5f8_38 := int64(f5_2) * int64(f8_19) - f5f9_76 := int64(f5_2) * int64(f9_38) - f6f6_19 := int64(f6) * int64(f6_19) - f6f7_38 := int64(f6) * int64(f7_38) - f6f8_38 := int64(f6_2) * int64(f8_19) - f6f9_38 := int64(f6) * int64(f9_38) - f7f7_38 := int64(f7) * int64(f7_38) - f7f8_38 := int64(f7_2) * int64(f8_19) - f7f9_76 := int64(f7_2) * int64(f9_38) - f8f8_19 := int64(f8) * int64(f8_19) - f8f9_38 := int64(f8) * int64(f9_38) - f9f9_38 := int64(f9) * int64(f9_38) - h0 := f0f0 + f1f9_76 + f2f8_38 + f3f7_76 + f4f6_38 + f5f5_38 - h1 := f0f1_2 + f2f9_38 + f3f8_38 + f4f7_38 + f5f6_38 - h2 := f0f2_2 + f1f1_2 + f3f9_76 + f4f8_38 + f5f7_76 + f6f6_19 - h3 := f0f3_2 + f1f2_2 + f4f9_38 + f5f8_38 + f6f7_38 - h4 := f0f4_2 + f1f3_4 + f2f2 + f5f9_76 + f6f8_38 + f7f7_38 - h5 := f0f5_2 + f1f4_2 + f2f3_2 + f6f9_38 + f7f8_38 - h6 := f0f6_2 + f1f5_4 + f2f4_2 + f3f3_2 + f7f9_76 + f8f8_19 - h7 := f0f7_2 + f1f6_2 + f2f5_2 + f3f4_2 + f8f9_38 - h8 := f0f8_2 + f1f7_4 + f2f6_2 + f3f5_4 + f4f4 + f9f9_38 - h9 := f0f9_2 + f1f8_2 + f2f7_2 + f3f6_2 + f4f5_2 - var carry [10]int64 - - h0 += h0 - h1 += h1 - h2 += h2 - h3 += h3 - h4 += h4 - h5 += h5 - h6 += h6 - h7 += h7 - h8 += h8 - h9 += h9 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - - carry[1] = (h1 + (1 << 24)) >> 25 - h2 += carry[1] - h1 -= carry[1] << 25 - carry[5] = (h5 + (1 << 24)) >> 25 - h6 += carry[5] - h5 -= carry[5] << 25 - - carry[2] = (h2 + (1 << 25)) >> 26 - h3 += carry[2] - h2 -= carry[2] << 26 - carry[6] = (h6 + (1 << 25)) >> 26 - h7 += carry[6] - h6 -= carry[6] << 26 - - carry[3] = (h3 + (1 << 24)) >> 25 - h4 += carry[3] - h3 -= carry[3] << 25 - carry[7] = (h7 + (1 << 24)) >> 25 - h8 += carry[7] - h7 -= carry[7] << 25 - - carry[4] = (h4 + (1 << 25)) >> 26 - h5 += carry[4] - h4 -= carry[4] << 26 - carry[8] = (h8 + (1 << 25)) >> 26 - h9 += carry[8] - h8 -= carry[8] << 26 - - carry[9] = (h9 + (1 << 24)) >> 25 - h0 += carry[9] * 19 - h9 -= carry[9] << 25 - - carry[0] = (h0 + (1 << 25)) >> 26 - h1 += carry[0] - h0 -= carry[0] << 26 - - h[0] = int32(h0) - h[1] = int32(h1) - h[2] = int32(h2) - h[3] = int32(h3) - h[4] = int32(h4) - h[5] = int32(h5) - h[6] = int32(h6) - h[7] = int32(h7) - h[8] = int32(h8) - h[9] = int32(h9) -} - -func FeInvert(out, z *FieldElement) { - var t0, t1, t2, t3 FieldElement - var i int - - FeSquare(&t0, z) // 2^1 - FeSquare(&t1, &t0) // 2^2 - for i = 1; i < 2; i++ { // 2^3 - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) // 2^3 + 2^0 - FeMul(&t0, &t0, &t1) // 2^3 + 2^1 + 2^0 - FeSquare(&t2, &t0) // 2^4 + 2^2 + 2^1 - FeMul(&t1, &t1, &t2) // 2^4 + 2^3 + 2^2 + 2^1 + 2^0 - FeSquare(&t2, &t1) // 5,4,3,2,1 - for i = 1; i < 5; i++ { // 9,8,7,6,5 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 9,8,7,6,5,4,3,2,1,0 - FeSquare(&t2, &t1) // 10..1 - for i = 1; i < 10; i++ { // 19..10 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 19..0 - FeSquare(&t3, &t2) // 20..1 - for i = 1; i < 20; i++ { // 39..20 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 39..0 - FeSquare(&t2, &t2) // 40..1 - for i = 1; i < 10; i++ { // 49..10 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 49..0 - FeSquare(&t2, &t1) // 50..1 - for i = 1; i < 50; i++ { // 99..50 - FeSquare(&t2, &t2) - } - FeMul(&t2, &t2, &t1) // 99..0 - FeSquare(&t3, &t2) // 100..1 - for i = 1; i < 100; i++ { // 199..100 - FeSquare(&t3, &t3) - } - FeMul(&t2, &t3, &t2) // 199..0 - FeSquare(&t2, &t2) // 200..1 - for i = 1; i < 50; i++ { // 249..50 - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) // 249..0 - FeSquare(&t1, &t1) // 250..1 - for i = 1; i < 5; i++ { // 254..5 - FeSquare(&t1, &t1) - } - FeMul(out, &t1, &t0) // 254..5,3,1,0 -} - -func fePow22523(out, z *FieldElement) { - var t0, t1, t2 FieldElement - var i int - - FeSquare(&t0, z) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeSquare(&t1, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, z, &t1) - FeMul(&t0, &t0, &t1) - FeSquare(&t0, &t0) - for i = 1; i < 1; i++ { - FeSquare(&t0, &t0) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 5; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 20; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 10; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t1, &t0) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t1, &t1, &t0) - FeSquare(&t2, &t1) - for i = 1; i < 100; i++ { - FeSquare(&t2, &t2) - } - FeMul(&t1, &t2, &t1) - FeSquare(&t1, &t1) - for i = 1; i < 50; i++ { - FeSquare(&t1, &t1) - } - FeMul(&t0, &t1, &t0) - FeSquare(&t0, &t0) - for i = 1; i < 2; i++ { - FeSquare(&t0, &t0) - } - FeMul(out, &t0, z) -} - -// Group elements are members of the elliptic curve -x^2 + y^2 = 1 + d * x^2 * -// y^2 where d = -121665/121666. -// -// Several representations are used: -// ProjectiveGroupElement: (X:Y:Z) satisfying x=X/Z, y=Y/Z -// ExtendedGroupElement: (X:Y:Z:T) satisfying x=X/Z, y=Y/Z, XY=ZT -// CompletedGroupElement: ((X:Z),(Y:T)) satisfying x=X/Z, y=Y/T -// PreComputedGroupElement: (y+x,y-x,2dxy) - -type ProjectiveGroupElement struct { - X, Y, Z FieldElement -} - -type ExtendedGroupElement struct { - X, Y, Z, T FieldElement -} - -type CompletedGroupElement struct { - X, Y, Z, T FieldElement -} - -type PreComputedGroupElement struct { - yPlusX, yMinusX, xy2d FieldElement -} - -type CachedGroupElement struct { - yPlusX, yMinusX, Z, T2d FieldElement -} - -func (p *ProjectiveGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) -} - -func (p *ProjectiveGroupElement) Double(r *CompletedGroupElement) { - var t0 FieldElement - - FeSquare(&r.X, &p.X) - FeSquare(&r.Z, &p.Y) - FeSquare2(&r.T, &p.Z) - FeAdd(&r.Y, &p.X, &p.Y) - FeSquare(&t0, &r.Y) - FeAdd(&r.Y, &r.Z, &r.X) - FeSub(&r.Z, &r.Z, &r.X) - FeSub(&r.X, &t0, &r.Y) - FeSub(&r.T, &r.T, &r.Z) -} - -func (p *ProjectiveGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) Zero() { - FeZero(&p.X) - FeOne(&p.Y) - FeOne(&p.Z) - FeZero(&p.T) -} - -func (p *ExtendedGroupElement) Double(r *CompletedGroupElement) { - var q ProjectiveGroupElement - p.ToProjective(&q) - q.Double(r) -} - -func (p *ExtendedGroupElement) ToCached(r *CachedGroupElement) { - FeAdd(&r.yPlusX, &p.Y, &p.X) - FeSub(&r.yMinusX, &p.Y, &p.X) - FeCopy(&r.Z, &p.Z) - FeMul(&r.T2d, &p.T, &d2) -} - -func (p *ExtendedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeCopy(&r.X, &p.X) - FeCopy(&r.Y, &p.Y) - FeCopy(&r.Z, &p.Z) -} - -func (p *ExtendedGroupElement) ToBytes(s *[32]byte) { - var recip, x, y FieldElement - - FeInvert(&recip, &p.Z) - FeMul(&x, &p.X, &recip) - FeMul(&y, &p.Y, &recip) - FeToBytes(s, &y) - s[31] ^= FeIsNegative(&x) << 7 -} - -func (p *ExtendedGroupElement) FromBytes(s *[32]byte) bool { - var u, v, v3, vxx, check FieldElement - - FeFromBytes(&p.Y, s) - FeOne(&p.Z) - FeSquare(&u, &p.Y) - FeMul(&v, &u, &d) - FeSub(&u, &u, &p.Z) // y = y^2-1 - FeAdd(&v, &v, &p.Z) // v = dy^2+1 - - FeSquare(&v3, &v) - FeMul(&v3, &v3, &v) // v3 = v^3 - FeSquare(&p.X, &v3) - FeMul(&p.X, &p.X, &v) - FeMul(&p.X, &p.X, &u) // x = uv^7 - - fePow22523(&p.X, &p.X) // x = (uv^7)^((q-5)/8) - FeMul(&p.X, &p.X, &v3) - FeMul(&p.X, &p.X, &u) // x = uv^3(uv^7)^((q-5)/8) - - var tmpX, tmp2 [32]byte - - FeSquare(&vxx, &p.X) - FeMul(&vxx, &vxx, &v) - FeSub(&check, &vxx, &u) // vx^2-u - if FeIsNonZero(&check) == 1 { - FeAdd(&check, &vxx, &u) // vx^2+u - if FeIsNonZero(&check) == 1 { - return false - } - FeMul(&p.X, &p.X, &SqrtM1) - - FeToBytes(&tmpX, &p.X) - for i, v := range tmpX { - tmp2[31-i] = v - } - } - - if FeIsNegative(&p.X) == (s[31] >> 7) { - FeNeg(&p.X, &p.X) - } - - FeMul(&p.T, &p.X, &p.Y) - return true -} - -func (p *CompletedGroupElement) ToProjective(r *ProjectiveGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) -} - -func (p *CompletedGroupElement) ToExtended(r *ExtendedGroupElement) { - FeMul(&r.X, &p.X, &p.T) - FeMul(&r.Y, &p.Y, &p.Z) - FeMul(&r.Z, &p.Z, &p.T) - FeMul(&r.T, &p.X, &p.Y) -} - -func (p *PreComputedGroupElement) Zero() { - FeOne(&p.yPlusX) - FeOne(&p.yMinusX) - FeZero(&p.xy2d) -} - -func geAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *CachedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.T2d, &p.T) - FeMul(&r.X, &p.Z, &q.Z) - FeAdd(&t0, &r.X, &r.X) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func geMixedAdd(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yPlusX) - FeMul(&r.Y, &r.Y, &q.yMinusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeAdd(&r.Z, &t0, &r.T) - FeSub(&r.T, &t0, &r.T) -} - -func geMixedSub(r *CompletedGroupElement, p *ExtendedGroupElement, q *PreComputedGroupElement) { - var t0 FieldElement - - FeAdd(&r.X, &p.Y, &p.X) - FeSub(&r.Y, &p.Y, &p.X) - FeMul(&r.Z, &r.X, &q.yMinusX) - FeMul(&r.Y, &r.Y, &q.yPlusX) - FeMul(&r.T, &q.xy2d, &p.T) - FeAdd(&t0, &p.Z, &p.Z) - FeSub(&r.X, &r.Z, &r.Y) - FeAdd(&r.Y, &r.Z, &r.Y) - FeSub(&r.Z, &t0, &r.T) - FeAdd(&r.T, &t0, &r.T) -} - -func slide(r *[256]int8, a *[32]byte) { - for i := range r { - r[i] = int8(1 & (a[i>>3] >> uint(i&7))) - } - - for i := range r { - if r[i] != 0 { - for b := 1; b <= 6 && i+b < 256; b++ { - if r[i+b] != 0 { - if r[i]+(r[i+b]<= -15 { - r[i] -= r[i+b] << uint(b) - for k := i + b; k < 256; k++ { - if r[k] == 0 { - r[k] = 1 - break - } - r[k] = 0 - } - } else { - break - } - } - } - } - } -} - -// GeDoubleScalarMultVartime sets r = a*A + b*B -// where a = a[0]+256*a[1]+...+256^31 a[31]. -// and b = b[0]+256*b[1]+...+256^31 b[31]. -// B is the Ed25519 base point (x,4/5) with x positive. -func GeDoubleScalarMultVartime(r *ProjectiveGroupElement, a *[32]byte, A *ExtendedGroupElement, b *[32]byte) { - var aSlide, bSlide [256]int8 - var Ai [8]CachedGroupElement // A,3A,5A,7A,9A,11A,13A,15A - var t CompletedGroupElement - var u, A2 ExtendedGroupElement - var i int - - slide(&aSlide, a) - slide(&bSlide, b) - - A.ToCached(&Ai[0]) - A.Double(&t) - t.ToExtended(&A2) - - for i := 0; i < 7; i++ { - geAdd(&t, &A2, &Ai[i]) - t.ToExtended(&u) - u.ToCached(&Ai[i+1]) - } - - r.Zero() - - for i = 255; i >= 0; i-- { - if aSlide[i] != 0 || bSlide[i] != 0 { - break - } - } - - for ; i >= 0; i-- { - r.Double(&t) - - if aSlide[i] > 0 { - t.ToExtended(&u) - geAdd(&t, &u, &Ai[aSlide[i]/2]) - } else if aSlide[i] < 0 { - t.ToExtended(&u) - geSub(&t, &u, &Ai[(-aSlide[i])/2]) - } - - if bSlide[i] > 0 { - t.ToExtended(&u) - geMixedAdd(&t, &u, &bi[bSlide[i]/2]) - } else if bSlide[i] < 0 { - t.ToExtended(&u) - geMixedSub(&t, &u, &bi[(-bSlide[i])/2]) - } - - t.ToProjective(r) - } -} - -// equal returns 1 if b == c and 0 otherwise. -func equal(b, c int32) int32 { - x := uint32(b ^ c) - x-- - return int32(x >> 31) -} - -// negative returns 1 if b < 0 and 0 otherwise. -func negative(b int32) int32 { - return (b >> 31) & 1 -} - -func PreComputedGroupElementCMove(t, u *PreComputedGroupElement, b int32) { - FeCMove(&t.yPlusX, &u.yPlusX, b) - FeCMove(&t.yMinusX, &u.yMinusX, b) - FeCMove(&t.xy2d, &u.xy2d, b) -} - -func selectPoint(t *PreComputedGroupElement, pos int32, b int32) { - var minusT PreComputedGroupElement - bNegative := negative(b) - bAbs := b - (((-bNegative) & b) << 1) - - t.Zero() - for i := int32(0); i < 8; i++ { - PreComputedGroupElementCMove(t, &base[pos][i], equal(bAbs, i+1)) - } - FeCopy(&minusT.yPlusX, &t.yMinusX) - FeCopy(&minusT.yMinusX, &t.yPlusX) - FeNeg(&minusT.xy2d, &t.xy2d) - PreComputedGroupElementCMove(t, &minusT, bNegative) -} - -// GeScalarMultBase computes h = a*B, where -// a = a[0]+256*a[1]+...+256^31 a[31] -// B is the Ed25519 base point (x,4/5) with x positive. -// -// Preconditions: -// a[31] <= 127 -func GeScalarMultBase(h *ExtendedGroupElement, a *[32]byte) { - var e [64]int8 - - for i, v := range a { - e[2*i] = int8(v & 15) - e[2*i+1] = int8((v >> 4) & 15) - } - - // each e[i] is between 0 and 15 and e[63] is between 0 and 7. - - carry := int8(0) - for i := 0; i < 63; i++ { - e[i] += carry - carry = (e[i] + 8) >> 4 - e[i] -= carry << 4 - } - e[63] += carry - // each e[i] is between -8 and 8. - - h.Zero() - var t PreComputedGroupElement - var r CompletedGroupElement - for i := int32(1); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } - - var s ProjectiveGroupElement - - h.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToProjective(&s) - s.Double(&r) - r.ToExtended(h) - - for i := int32(0); i < 64; i += 2 { - selectPoint(&t, i/2, int32(e[i])) - geMixedAdd(&r, h, &t) - r.ToExtended(h) - } -} - -// The scalars are GF(2^252 + 27742317777372353535851937790883648493). - -// Input: -// a[0]+256*a[1]+...+256^31*a[31] = a -// b[0]+256*b[1]+...+256^31*b[31] = b -// c[0]+256*c[1]+...+256^31*c[31] = c -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = (ab+c) mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScMulAdd(s, a, b, c *[32]byte) { - a0 := 2097151 & load3(a[:]) - a1 := 2097151 & (load4(a[2:]) >> 5) - a2 := 2097151 & (load3(a[5:]) >> 2) - a3 := 2097151 & (load4(a[7:]) >> 7) - a4 := 2097151 & (load4(a[10:]) >> 4) - a5 := 2097151 & (load3(a[13:]) >> 1) - a6 := 2097151 & (load4(a[15:]) >> 6) - a7 := 2097151 & (load3(a[18:]) >> 3) - a8 := 2097151 & load3(a[21:]) - a9 := 2097151 & (load4(a[23:]) >> 5) - a10 := 2097151 & (load3(a[26:]) >> 2) - a11 := (load4(a[28:]) >> 7) - b0 := 2097151 & load3(b[:]) - b1 := 2097151 & (load4(b[2:]) >> 5) - b2 := 2097151 & (load3(b[5:]) >> 2) - b3 := 2097151 & (load4(b[7:]) >> 7) - b4 := 2097151 & (load4(b[10:]) >> 4) - b5 := 2097151 & (load3(b[13:]) >> 1) - b6 := 2097151 & (load4(b[15:]) >> 6) - b7 := 2097151 & (load3(b[18:]) >> 3) - b8 := 2097151 & load3(b[21:]) - b9 := 2097151 & (load4(b[23:]) >> 5) - b10 := 2097151 & (load3(b[26:]) >> 2) - b11 := (load4(b[28:]) >> 7) - c0 := 2097151 & load3(c[:]) - c1 := 2097151 & (load4(c[2:]) >> 5) - c2 := 2097151 & (load3(c[5:]) >> 2) - c3 := 2097151 & (load4(c[7:]) >> 7) - c4 := 2097151 & (load4(c[10:]) >> 4) - c5 := 2097151 & (load3(c[13:]) >> 1) - c6 := 2097151 & (load4(c[15:]) >> 6) - c7 := 2097151 & (load3(c[18:]) >> 3) - c8 := 2097151 & load3(c[21:]) - c9 := 2097151 & (load4(c[23:]) >> 5) - c10 := 2097151 & (load3(c[26:]) >> 2) - c11 := (load4(c[28:]) >> 7) - var carry [23]int64 - - s0 := c0 + a0*b0 - s1 := c1 + a0*b1 + a1*b0 - s2 := c2 + a0*b2 + a1*b1 + a2*b0 - s3 := c3 + a0*b3 + a1*b2 + a2*b1 + a3*b0 - s4 := c4 + a0*b4 + a1*b3 + a2*b2 + a3*b1 + a4*b0 - s5 := c5 + a0*b5 + a1*b4 + a2*b3 + a3*b2 + a4*b1 + a5*b0 - s6 := c6 + a0*b6 + a1*b5 + a2*b4 + a3*b3 + a4*b2 + a5*b1 + a6*b0 - s7 := c7 + a0*b7 + a1*b6 + a2*b5 + a3*b4 + a4*b3 + a5*b2 + a6*b1 + a7*b0 - s8 := c8 + a0*b8 + a1*b7 + a2*b6 + a3*b5 + a4*b4 + a5*b3 + a6*b2 + a7*b1 + a8*b0 - s9 := c9 + a0*b9 + a1*b8 + a2*b7 + a3*b6 + a4*b5 + a5*b4 + a6*b3 + a7*b2 + a8*b1 + a9*b0 - s10 := c10 + a0*b10 + a1*b9 + a2*b8 + a3*b7 + a4*b6 + a5*b5 + a6*b4 + a7*b3 + a8*b2 + a9*b1 + a10*b0 - s11 := c11 + a0*b11 + a1*b10 + a2*b9 + a3*b8 + a4*b7 + a5*b6 + a6*b5 + a7*b4 + a8*b3 + a9*b2 + a10*b1 + a11*b0 - s12 := a1*b11 + a2*b10 + a3*b9 + a4*b8 + a5*b7 + a6*b6 + a7*b5 + a8*b4 + a9*b3 + a10*b2 + a11*b1 - s13 := a2*b11 + a3*b10 + a4*b9 + a5*b8 + a6*b7 + a7*b6 + a8*b5 + a9*b4 + a10*b3 + a11*b2 - s14 := a3*b11 + a4*b10 + a5*b9 + a6*b8 + a7*b7 + a8*b6 + a9*b5 + a10*b4 + a11*b3 - s15 := a4*b11 + a5*b10 + a6*b9 + a7*b8 + a8*b7 + a9*b6 + a10*b5 + a11*b4 - s16 := a5*b11 + a6*b10 + a7*b9 + a8*b8 + a9*b7 + a10*b6 + a11*b5 - s17 := a6*b11 + a7*b10 + a8*b9 + a9*b8 + a10*b7 + a11*b6 - s18 := a7*b11 + a8*b10 + a9*b9 + a10*b8 + a11*b7 - s19 := a8*b11 + a9*b10 + a10*b9 + a11*b8 - s20 := a9*b11 + a10*b10 + a11*b9 - s21 := a10*b11 + a11*b10 - s22 := a11 * b11 - s23 := int64(0) - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - carry[18] = (s18 + (1 << 20)) >> 21 - s19 += carry[18] - s18 -= carry[18] << 21 - carry[20] = (s20 + (1 << 20)) >> 21 - s21 += carry[20] - s20 -= carry[20] << 21 - carry[22] = (s22 + (1 << 20)) >> 21 - s23 += carry[22] - s22 -= carry[22] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - carry[17] = (s17 + (1 << 20)) >> 21 - s18 += carry[17] - s17 -= carry[17] << 21 - carry[19] = (s19 + (1 << 20)) >> 21 - s20 += carry[19] - s19 -= carry[19] << 21 - carry[21] = (s21 + (1 << 20)) >> 21 - s22 += carry[21] - s21 -= carry[21] << 21 - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - s[0] = byte(s0 >> 0) - s[1] = byte(s0 >> 8) - s[2] = byte((s0 >> 16) | (s1 << 5)) - s[3] = byte(s1 >> 3) - s[4] = byte(s1 >> 11) - s[5] = byte((s1 >> 19) | (s2 << 2)) - s[6] = byte(s2 >> 6) - s[7] = byte((s2 >> 14) | (s3 << 7)) - s[8] = byte(s3 >> 1) - s[9] = byte(s3 >> 9) - s[10] = byte((s3 >> 17) | (s4 << 4)) - s[11] = byte(s4 >> 4) - s[12] = byte(s4 >> 12) - s[13] = byte((s4 >> 20) | (s5 << 1)) - s[14] = byte(s5 >> 7) - s[15] = byte((s5 >> 15) | (s6 << 6)) - s[16] = byte(s6 >> 2) - s[17] = byte(s6 >> 10) - s[18] = byte((s6 >> 18) | (s7 << 3)) - s[19] = byte(s7 >> 5) - s[20] = byte(s7 >> 13) - s[21] = byte(s8 >> 0) - s[22] = byte(s8 >> 8) - s[23] = byte((s8 >> 16) | (s9 << 5)) - s[24] = byte(s9 >> 3) - s[25] = byte(s9 >> 11) - s[26] = byte((s9 >> 19) | (s10 << 2)) - s[27] = byte(s10 >> 6) - s[28] = byte((s10 >> 14) | (s11 << 7)) - s[29] = byte(s11 >> 1) - s[30] = byte(s11 >> 9) - s[31] = byte(s11 >> 17) -} - -// Input: -// s[0]+256*s[1]+...+256^63*s[63] = s -// -// Output: -// s[0]+256*s[1]+...+256^31*s[31] = s mod l -// where l = 2^252 + 27742317777372353535851937790883648493. -func ScReduce(out *[32]byte, s *[64]byte) { - s0 := 2097151 & load3(s[:]) - s1 := 2097151 & (load4(s[2:]) >> 5) - s2 := 2097151 & (load3(s[5:]) >> 2) - s3 := 2097151 & (load4(s[7:]) >> 7) - s4 := 2097151 & (load4(s[10:]) >> 4) - s5 := 2097151 & (load3(s[13:]) >> 1) - s6 := 2097151 & (load4(s[15:]) >> 6) - s7 := 2097151 & (load3(s[18:]) >> 3) - s8 := 2097151 & load3(s[21:]) - s9 := 2097151 & (load4(s[23:]) >> 5) - s10 := 2097151 & (load3(s[26:]) >> 2) - s11 := 2097151 & (load4(s[28:]) >> 7) - s12 := 2097151 & (load4(s[31:]) >> 4) - s13 := 2097151 & (load3(s[34:]) >> 1) - s14 := 2097151 & (load4(s[36:]) >> 6) - s15 := 2097151 & (load3(s[39:]) >> 3) - s16 := 2097151 & load3(s[42:]) - s17 := 2097151 & (load4(s[44:]) >> 5) - s18 := 2097151 & (load3(s[47:]) >> 2) - s19 := 2097151 & (load4(s[49:]) >> 7) - s20 := 2097151 & (load4(s[52:]) >> 4) - s21 := 2097151 & (load3(s[55:]) >> 1) - s22 := 2097151 & (load4(s[57:]) >> 6) - s23 := (load4(s[60:]) >> 3) - - s11 += s23 * 666643 - s12 += s23 * 470296 - s13 += s23 * 654183 - s14 -= s23 * 997805 - s15 += s23 * 136657 - s16 -= s23 * 683901 - s23 = 0 - - s10 += s22 * 666643 - s11 += s22 * 470296 - s12 += s22 * 654183 - s13 -= s22 * 997805 - s14 += s22 * 136657 - s15 -= s22 * 683901 - s22 = 0 - - s9 += s21 * 666643 - s10 += s21 * 470296 - s11 += s21 * 654183 - s12 -= s21 * 997805 - s13 += s21 * 136657 - s14 -= s21 * 683901 - s21 = 0 - - s8 += s20 * 666643 - s9 += s20 * 470296 - s10 += s20 * 654183 - s11 -= s20 * 997805 - s12 += s20 * 136657 - s13 -= s20 * 683901 - s20 = 0 - - s7 += s19 * 666643 - s8 += s19 * 470296 - s9 += s19 * 654183 - s10 -= s19 * 997805 - s11 += s19 * 136657 - s12 -= s19 * 683901 - s19 = 0 - - s6 += s18 * 666643 - s7 += s18 * 470296 - s8 += s18 * 654183 - s9 -= s18 * 997805 - s10 += s18 * 136657 - s11 -= s18 * 683901 - s18 = 0 - - var carry [17]int64 - - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[12] = (s12 + (1 << 20)) >> 21 - s13 += carry[12] - s12 -= carry[12] << 21 - carry[14] = (s14 + (1 << 20)) >> 21 - s15 += carry[14] - s14 -= carry[14] << 21 - carry[16] = (s16 + (1 << 20)) >> 21 - s17 += carry[16] - s16 -= carry[16] << 21 - - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - carry[13] = (s13 + (1 << 20)) >> 21 - s14 += carry[13] - s13 -= carry[13] << 21 - carry[15] = (s15 + (1 << 20)) >> 21 - s16 += carry[15] - s15 -= carry[15] << 21 - - s5 += s17 * 666643 - s6 += s17 * 470296 - s7 += s17 * 654183 - s8 -= s17 * 997805 - s9 += s17 * 136657 - s10 -= s17 * 683901 - s17 = 0 - - s4 += s16 * 666643 - s5 += s16 * 470296 - s6 += s16 * 654183 - s7 -= s16 * 997805 - s8 += s16 * 136657 - s9 -= s16 * 683901 - s16 = 0 - - s3 += s15 * 666643 - s4 += s15 * 470296 - s5 += s15 * 654183 - s6 -= s15 * 997805 - s7 += s15 * 136657 - s8 -= s15 * 683901 - s15 = 0 - - s2 += s14 * 666643 - s3 += s14 * 470296 - s4 += s14 * 654183 - s5 -= s14 * 997805 - s6 += s14 * 136657 - s7 -= s14 * 683901 - s14 = 0 - - s1 += s13 * 666643 - s2 += s13 * 470296 - s3 += s13 * 654183 - s4 -= s13 * 997805 - s5 += s13 * 136657 - s6 -= s13 * 683901 - s13 = 0 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = (s0 + (1 << 20)) >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[2] = (s2 + (1 << 20)) >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[4] = (s4 + (1 << 20)) >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[6] = (s6 + (1 << 20)) >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[8] = (s8 + (1 << 20)) >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[10] = (s10 + (1 << 20)) >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - carry[1] = (s1 + (1 << 20)) >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[3] = (s3 + (1 << 20)) >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[5] = (s5 + (1 << 20)) >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[7] = (s7 + (1 << 20)) >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[9] = (s9 + (1 << 20)) >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[11] = (s11 + (1 << 20)) >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - carry[11] = s11 >> 21 - s12 += carry[11] - s11 -= carry[11] << 21 - - s0 += s12 * 666643 - s1 += s12 * 470296 - s2 += s12 * 654183 - s3 -= s12 * 997805 - s4 += s12 * 136657 - s5 -= s12 * 683901 - s12 = 0 - - carry[0] = s0 >> 21 - s1 += carry[0] - s0 -= carry[0] << 21 - carry[1] = s1 >> 21 - s2 += carry[1] - s1 -= carry[1] << 21 - carry[2] = s2 >> 21 - s3 += carry[2] - s2 -= carry[2] << 21 - carry[3] = s3 >> 21 - s4 += carry[3] - s3 -= carry[3] << 21 - carry[4] = s4 >> 21 - s5 += carry[4] - s4 -= carry[4] << 21 - carry[5] = s5 >> 21 - s6 += carry[5] - s5 -= carry[5] << 21 - carry[6] = s6 >> 21 - s7 += carry[6] - s6 -= carry[6] << 21 - carry[7] = s7 >> 21 - s8 += carry[7] - s7 -= carry[7] << 21 - carry[8] = s8 >> 21 - s9 += carry[8] - s8 -= carry[8] << 21 - carry[9] = s9 >> 21 - s10 += carry[9] - s9 -= carry[9] << 21 - carry[10] = s10 >> 21 - s11 += carry[10] - s10 -= carry[10] << 21 - - out[0] = byte(s0 >> 0) - out[1] = byte(s0 >> 8) - out[2] = byte((s0 >> 16) | (s1 << 5)) - out[3] = byte(s1 >> 3) - out[4] = byte(s1 >> 11) - out[5] = byte((s1 >> 19) | (s2 << 2)) - out[6] = byte(s2 >> 6) - out[7] = byte((s2 >> 14) | (s3 << 7)) - out[8] = byte(s3 >> 1) - out[9] = byte(s3 >> 9) - out[10] = byte((s3 >> 17) | (s4 << 4)) - out[11] = byte(s4 >> 4) - out[12] = byte(s4 >> 12) - out[13] = byte((s4 >> 20) | (s5 << 1)) - out[14] = byte(s5 >> 7) - out[15] = byte((s5 >> 15) | (s6 << 6)) - out[16] = byte(s6 >> 2) - out[17] = byte(s6 >> 10) - out[18] = byte((s6 >> 18) | (s7 << 3)) - out[19] = byte(s7 >> 5) - out[20] = byte(s7 >> 13) - out[21] = byte(s8 >> 0) - out[22] = byte(s8 >> 8) - out[23] = byte((s8 >> 16) | (s9 << 5)) - out[24] = byte(s9 >> 3) - out[25] = byte(s9 >> 11) - out[26] = byte((s9 >> 19) | (s10 << 2)) - out[27] = byte(s10 >> 6) - out[28] = byte((s10 >> 14) | (s11 << 7)) - out[29] = byte(s11 >> 1) - out[30] = byte(s11 >> 9) - out[31] = byte(s11 >> 17) -} diff --git a/vendor/src/github.com/armon/go-metrics/.gitignore b/vendor/src/github.com/armon/go-metrics/.gitignore deleted file mode 100755 index 00268614f0..0000000000 --- a/vendor/src/github.com/armon/go-metrics/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/src/github.com/armon/go-metrics/LICENSE b/vendor/src/github.com/armon/go-metrics/LICENSE deleted file mode 100644 index 106569e542..0000000000 --- a/vendor/src/github.com/armon/go-metrics/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/src/github.com/armon/go-metrics/README.md b/vendor/src/github.com/armon/go-metrics/README.md deleted file mode 100644 index d9f46e85ba..0000000000 --- a/vendor/src/github.com/armon/go-metrics/README.md +++ /dev/null @@ -1,68 +0,0 @@ -go-metrics -========== - -This library provides a `metrics` package which can be used to instrument code, -expose application metrics, and profile runtime performance in a flexible manner. - -Sinks -===== - -The `metrics` package makes use of a `MetricSink` interface to support delivery -to any type of backend. Currently the following sinks are provided: - -* StatsiteSink : Sinks to a statsite instance (TCP) -* StatsdSink: Sinks to a statsd / statsite instance (UDP) -* InmemSink : Provides in-memory aggregation, can be used to export stats -* FanoutSink : Sinks to multiple sinks. Enables writing to multiple statsite instances for example. -* BlackholeSink : Sinks to nowhere - -In addition to the sinks, the `InmemSignal` can be used to catch a signal, -and dump a formatted output of recent metrics. For example, when a process gets -a SIGUSR1, it can dump to stderr recent performance metrics for debugging. - -Examples -======== - -Here is an example of using the package: - - func SlowMethod() { - // Profiling the runtime of a method - defer metrics.MeasureSince([]string{"SlowMethod"}, time.Now()) - } - - // Configure a statsite sink as the global metrics sink - sink, _ := metrics.NewStatsiteSink("statsite:8125") - metrics.NewGlobal(metrics.DefaultConfig("service-name"), sink) - - // Emit a Key/Value pair - metrics.EmitKey([]string{"questions", "meaning of life"}, 42) - - -Here is an example of setting up an signal handler: - - // Setup the inmem sink and signal handler - inm := NewInmemSink(10*time.Second, time.Minute) - sig := DefaultInmemSignal(inm) - metrics.NewGlobal(metrics.DefaultConfig("service-name"), inm) - - // Run some code - inm.SetGauge([]string{"foo"}, 42) - inm.EmitKey([]string{"bar"}, 30) - - inm.IncrCounter([]string{"baz"}, 42) - inm.IncrCounter([]string{"baz"}, 1) - inm.IncrCounter([]string{"baz"}, 80) - - inm.AddSample([]string{"method", "wow"}, 42) - inm.AddSample([]string{"method", "wow"}, 100) - inm.AddSample([]string{"method", "wow"}, 22) - - .... - -When a signal comes in, output like the following will be dumped to stderr: - - [2014-01-28 14:57:33.04 -0800 PST][G] 'foo': 42.000 - [2014-01-28 14:57:33.04 -0800 PST][P] 'bar': 30.000 - [2014-01-28 14:57:33.04 -0800 PST][C] 'baz': Count: 3 Min: 1.000 Mean: 41.000 Max: 80.000 Stddev: 39.509 - [2014-01-28 14:57:33.04 -0800 PST][S] 'method.wow': Count: 3 Min: 22.000 Mean: 54.667 Max: 100.000 Stddev: 40.513 - diff --git a/vendor/src/github.com/armon/go-metrics/const_unix.go b/vendor/src/github.com/armon/go-metrics/const_unix.go deleted file mode 100644 index 31098dd57e..0000000000 --- a/vendor/src/github.com/armon/go-metrics/const_unix.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !windows - -package metrics - -import ( - "syscall" -) - -const ( - // DefaultSignal is used with DefaultInmemSignal - DefaultSignal = syscall.SIGUSR1 -) diff --git a/vendor/src/github.com/armon/go-metrics/const_windows.go b/vendor/src/github.com/armon/go-metrics/const_windows.go deleted file mode 100644 index 38136af3e4..0000000000 --- a/vendor/src/github.com/armon/go-metrics/const_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build windows - -package metrics - -import ( - "syscall" -) - -const ( - // DefaultSignal is used with DefaultInmemSignal - // Windows has no SIGUSR1, use SIGBREAK - DefaultSignal = syscall.Signal(21) -) diff --git a/vendor/src/github.com/armon/go-metrics/inmem.go b/vendor/src/github.com/armon/go-metrics/inmem.go deleted file mode 100644 index 0749229bfd..0000000000 --- a/vendor/src/github.com/armon/go-metrics/inmem.go +++ /dev/null @@ -1,239 +0,0 @@ -package metrics - -import ( - "fmt" - "math" - "strings" - "sync" - "time" -) - -// InmemSink provides a MetricSink that does in-memory aggregation -// without sending metrics over a network. It can be embedded within -// an application to provide profiling information. -type InmemSink struct { - // How long is each aggregation interval - interval time.Duration - - // Retain controls how many metrics interval we keep - retain time.Duration - - // maxIntervals is the maximum length of intervals. - // It is retain / interval. - maxIntervals int - - // intervals is a slice of the retained intervals - intervals []*IntervalMetrics - intervalLock sync.RWMutex -} - -// IntervalMetrics stores the aggregated metrics -// for a specific interval -type IntervalMetrics struct { - sync.RWMutex - - // The start time of the interval - Interval time.Time - - // Gauges maps the key to the last set value - Gauges map[string]float32 - - // Points maps the string to the list of emitted values - // from EmitKey - Points map[string][]float32 - - // Counters maps the string key to a sum of the counter - // values - Counters map[string]*AggregateSample - - // Samples maps the key to an AggregateSample, - // which has the rolled up view of a sample - Samples map[string]*AggregateSample -} - -// NewIntervalMetrics creates a new IntervalMetrics for a given interval -func NewIntervalMetrics(intv time.Time) *IntervalMetrics { - return &IntervalMetrics{ - Interval: intv, - Gauges: make(map[string]float32), - Points: make(map[string][]float32), - Counters: make(map[string]*AggregateSample), - Samples: make(map[string]*AggregateSample), - } -} - -// AggregateSample is used to hold aggregate metrics -// about a sample -type AggregateSample struct { - Count int // The count of emitted pairs - Sum float64 // The sum of values - SumSq float64 // The sum of squared values - Min float64 // Minimum value - Max float64 // Maximum value -} - -// Computes a Stddev of the values -func (a *AggregateSample) Stddev() float64 { - num := (float64(a.Count) * a.SumSq) - math.Pow(a.Sum, 2) - div := float64(a.Count * (a.Count - 1)) - if div == 0 { - return 0 - } - return math.Sqrt(num / div) -} - -// Computes a mean of the values -func (a *AggregateSample) Mean() float64 { - if a.Count == 0 { - return 0 - } - return a.Sum / float64(a.Count) -} - -// Ingest is used to update a sample -func (a *AggregateSample) Ingest(v float64) { - a.Count++ - a.Sum += v - a.SumSq += (v * v) - if v < a.Min || a.Count == 1 { - a.Min = v - } - if v > a.Max || a.Count == 1 { - a.Max = v - } -} - -func (a *AggregateSample) String() string { - if a.Count == 0 { - return "Count: 0" - } else if a.Stddev() == 0 { - return fmt.Sprintf("Count: %d Sum: %0.3f", a.Count, a.Sum) - } else { - return fmt.Sprintf("Count: %d Min: %0.3f Mean: %0.3f Max: %0.3f Stddev: %0.3f Sum: %0.3f", - a.Count, a.Min, a.Mean(), a.Max, a.Stddev(), a.Sum) - } -} - -// NewInmemSink is used to construct a new in-memory sink. -// Uses an aggregation interval and maximum retention period. -func NewInmemSink(interval, retain time.Duration) *InmemSink { - i := &InmemSink{ - interval: interval, - retain: retain, - maxIntervals: int(retain / interval), - } - i.intervals = make([]*IntervalMetrics, 0, i.maxIntervals) - return i -} - -func (i *InmemSink) SetGauge(key []string, val float32) { - k := i.flattenKey(key) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - intv.Gauges[k] = val -} - -func (i *InmemSink) EmitKey(key []string, val float32) { - k := i.flattenKey(key) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - vals := intv.Points[k] - intv.Points[k] = append(vals, val) -} - -func (i *InmemSink) IncrCounter(key []string, val float32) { - k := i.flattenKey(key) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - - agg := intv.Counters[k] - if agg == nil { - agg = &AggregateSample{} - intv.Counters[k] = agg - } - agg.Ingest(float64(val)) -} - -func (i *InmemSink) AddSample(key []string, val float32) { - k := i.flattenKey(key) - intv := i.getInterval() - - intv.Lock() - defer intv.Unlock() - - agg := intv.Samples[k] - if agg == nil { - agg = &AggregateSample{} - intv.Samples[k] = agg - } - agg.Ingest(float64(val)) -} - -// Data is used to retrieve all the aggregated metrics -// Intervals may be in use, and a read lock should be acquired -func (i *InmemSink) Data() []*IntervalMetrics { - // Get the current interval, forces creation - i.getInterval() - - i.intervalLock.RLock() - defer i.intervalLock.RUnlock() - - intervals := make([]*IntervalMetrics, len(i.intervals)) - copy(intervals, i.intervals) - return intervals -} - -func (i *InmemSink) getExistingInterval(intv time.Time) *IntervalMetrics { - i.intervalLock.RLock() - defer i.intervalLock.RUnlock() - - n := len(i.intervals) - if n > 0 && i.intervals[n-1].Interval == intv { - return i.intervals[n-1] - } - return nil -} - -func (i *InmemSink) createInterval(intv time.Time) *IntervalMetrics { - i.intervalLock.Lock() - defer i.intervalLock.Unlock() - - // Check for an existing interval - n := len(i.intervals) - if n > 0 && i.intervals[n-1].Interval == intv { - return i.intervals[n-1] - } - - // Add the current interval - current := NewIntervalMetrics(intv) - i.intervals = append(i.intervals, current) - n++ - - // Truncate the intervals if they are too long - if n >= i.maxIntervals { - copy(i.intervals[0:], i.intervals[n-i.maxIntervals:]) - i.intervals = i.intervals[:i.maxIntervals] - } - return current -} - -// getInterval returns the current interval to write to -func (i *InmemSink) getInterval() *IntervalMetrics { - intv := time.Now().Truncate(i.interval) - if m := i.getExistingInterval(intv); m != nil { - return m - } - return i.createInterval(intv) -} - -// Flattens the key for formatting, removes spaces -func (i *InmemSink) flattenKey(parts []string) string { - joined := strings.Join(parts, ".") - return strings.Replace(joined, " ", "_", -1) -} diff --git a/vendor/src/github.com/armon/go-metrics/inmem_signal.go b/vendor/src/github.com/armon/go-metrics/inmem_signal.go deleted file mode 100644 index 95d08ee10f..0000000000 --- a/vendor/src/github.com/armon/go-metrics/inmem_signal.go +++ /dev/null @@ -1,100 +0,0 @@ -package metrics - -import ( - "bytes" - "fmt" - "io" - "os" - "os/signal" - "sync" - "syscall" -) - -// InmemSignal is used to listen for a given signal, and when received, -// to dump the current metrics from the InmemSink to an io.Writer -type InmemSignal struct { - signal syscall.Signal - inm *InmemSink - w io.Writer - sigCh chan os.Signal - - stop bool - stopCh chan struct{} - stopLock sync.Mutex -} - -// NewInmemSignal creates a new InmemSignal which listens for a given signal, -// and dumps the current metrics out to a writer -func NewInmemSignal(inmem *InmemSink, sig syscall.Signal, w io.Writer) *InmemSignal { - i := &InmemSignal{ - signal: sig, - inm: inmem, - w: w, - sigCh: make(chan os.Signal, 1), - stopCh: make(chan struct{}), - } - signal.Notify(i.sigCh, sig) - go i.run() - return i -} - -// DefaultInmemSignal returns a new InmemSignal that responds to SIGUSR1 -// and writes output to stderr. Windows uses SIGBREAK -func DefaultInmemSignal(inmem *InmemSink) *InmemSignal { - return NewInmemSignal(inmem, DefaultSignal, os.Stderr) -} - -// Stop is used to stop the InmemSignal from listening -func (i *InmemSignal) Stop() { - i.stopLock.Lock() - defer i.stopLock.Unlock() - - if i.stop { - return - } - i.stop = true - close(i.stopCh) - signal.Stop(i.sigCh) -} - -// run is a long running routine that handles signals -func (i *InmemSignal) run() { - for { - select { - case <-i.sigCh: - i.dumpStats() - case <-i.stopCh: - return - } - } -} - -// dumpStats is used to dump the data to output writer -func (i *InmemSignal) dumpStats() { - buf := bytes.NewBuffer(nil) - - data := i.inm.Data() - // Skip the last period which is still being aggregated - for i := 0; i < len(data)-1; i++ { - intv := data[i] - intv.RLock() - for name, val := range intv.Gauges { - fmt.Fprintf(buf, "[%v][G] '%s': %0.3f\n", intv.Interval, name, val) - } - for name, vals := range intv.Points { - for _, val := range vals { - fmt.Fprintf(buf, "[%v][P] '%s': %0.3f\n", intv.Interval, name, val) - } - } - for name, agg := range intv.Counters { - fmt.Fprintf(buf, "[%v][C] '%s': %s\n", intv.Interval, name, agg) - } - for name, agg := range intv.Samples { - fmt.Fprintf(buf, "[%v][S] '%s': %s\n", intv.Interval, name, agg) - } - intv.RUnlock() - } - - // Write out the bytes - i.w.Write(buf.Bytes()) -} diff --git a/vendor/src/github.com/armon/go-metrics/metrics.go b/vendor/src/github.com/armon/go-metrics/metrics.go deleted file mode 100755 index b818e4182c..0000000000 --- a/vendor/src/github.com/armon/go-metrics/metrics.go +++ /dev/null @@ -1,115 +0,0 @@ -package metrics - -import ( - "runtime" - "time" -) - -func (m *Metrics) SetGauge(key []string, val float32) { - if m.HostName != "" && m.EnableHostname { - key = insert(0, m.HostName, key) - } - if m.EnableTypePrefix { - key = insert(0, "gauge", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - m.sink.SetGauge(key, val) -} - -func (m *Metrics) EmitKey(key []string, val float32) { - if m.EnableTypePrefix { - key = insert(0, "kv", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - m.sink.EmitKey(key, val) -} - -func (m *Metrics) IncrCounter(key []string, val float32) { - if m.EnableTypePrefix { - key = insert(0, "counter", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - m.sink.IncrCounter(key, val) -} - -func (m *Metrics) AddSample(key []string, val float32) { - if m.EnableTypePrefix { - key = insert(0, "sample", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - m.sink.AddSample(key, val) -} - -func (m *Metrics) MeasureSince(key []string, start time.Time) { - if m.EnableTypePrefix { - key = insert(0, "timer", key) - } - if m.ServiceName != "" { - key = insert(0, m.ServiceName, key) - } - now := time.Now() - elapsed := now.Sub(start) - msec := float32(elapsed.Nanoseconds()) / float32(m.TimerGranularity) - m.sink.AddSample(key, msec) -} - -// Periodically collects runtime stats to publish -func (m *Metrics) collectStats() { - for { - time.Sleep(m.ProfileInterval) - m.emitRuntimeStats() - } -} - -// Emits various runtime statsitics -func (m *Metrics) emitRuntimeStats() { - // Export number of Goroutines - numRoutines := runtime.NumGoroutine() - m.SetGauge([]string{"runtime", "num_goroutines"}, float32(numRoutines)) - - // Export memory stats - var stats runtime.MemStats - runtime.ReadMemStats(&stats) - m.SetGauge([]string{"runtime", "alloc_bytes"}, float32(stats.Alloc)) - m.SetGauge([]string{"runtime", "sys_bytes"}, float32(stats.Sys)) - m.SetGauge([]string{"runtime", "malloc_count"}, float32(stats.Mallocs)) - m.SetGauge([]string{"runtime", "free_count"}, float32(stats.Frees)) - m.SetGauge([]string{"runtime", "heap_objects"}, float32(stats.HeapObjects)) - m.SetGauge([]string{"runtime", "total_gc_pause_ns"}, float32(stats.PauseTotalNs)) - m.SetGauge([]string{"runtime", "total_gc_runs"}, float32(stats.NumGC)) - - // Export info about the last few GC runs - num := stats.NumGC - - // Handle wrap around - if num < m.lastNumGC { - m.lastNumGC = 0 - } - - // Ensure we don't scan more than 256 - if num-m.lastNumGC >= 256 { - m.lastNumGC = num - 255 - } - - for i := m.lastNumGC; i < num; i++ { - pause := stats.PauseNs[i%256] - m.AddSample([]string{"runtime", "gc_pause_ns"}, float32(pause)) - } - m.lastNumGC = num -} - -// Inserts a string value at an index into the slice -func insert(i int, v string, s []string) []string { - s = append(s, "") - copy(s[i+1:], s[i:]) - s[i] = v - return s -} diff --git a/vendor/src/github.com/armon/go-metrics/sink.go b/vendor/src/github.com/armon/go-metrics/sink.go deleted file mode 100755 index 0c240c2c47..0000000000 --- a/vendor/src/github.com/armon/go-metrics/sink.go +++ /dev/null @@ -1,52 +0,0 @@ -package metrics - -// The MetricSink interface is used to transmit metrics information -// to an external system -type MetricSink interface { - // A Gauge should retain the last value it is set to - SetGauge(key []string, val float32) - - // Should emit a Key/Value pair for each call - EmitKey(key []string, val float32) - - // Counters should accumulate values - IncrCounter(key []string, val float32) - - // Samples are for timing information, where quantiles are used - AddSample(key []string, val float32) -} - -// BlackholeSink is used to just blackhole messages -type BlackholeSink struct{} - -func (*BlackholeSink) SetGauge(key []string, val float32) {} -func (*BlackholeSink) EmitKey(key []string, val float32) {} -func (*BlackholeSink) IncrCounter(key []string, val float32) {} -func (*BlackholeSink) AddSample(key []string, val float32) {} - -// FanoutSink is used to sink to fanout values to multiple sinks -type FanoutSink []MetricSink - -func (fh FanoutSink) SetGauge(key []string, val float32) { - for _, s := range fh { - s.SetGauge(key, val) - } -} - -func (fh FanoutSink) EmitKey(key []string, val float32) { - for _, s := range fh { - s.EmitKey(key, val) - } -} - -func (fh FanoutSink) IncrCounter(key []string, val float32) { - for _, s := range fh { - s.IncrCounter(key, val) - } -} - -func (fh FanoutSink) AddSample(key []string, val float32) { - for _, s := range fh { - s.AddSample(key, val) - } -} diff --git a/vendor/src/github.com/armon/go-metrics/start.go b/vendor/src/github.com/armon/go-metrics/start.go deleted file mode 100755 index 44113f1004..0000000000 --- a/vendor/src/github.com/armon/go-metrics/start.go +++ /dev/null @@ -1,95 +0,0 @@ -package metrics - -import ( - "os" - "time" -) - -// Config is used to configure metrics settings -type Config struct { - ServiceName string // Prefixed with keys to seperate services - HostName string // Hostname to use. If not provided and EnableHostname, it will be os.Hostname - EnableHostname bool // Enable prefixing gauge values with hostname - EnableRuntimeMetrics bool // Enables profiling of runtime metrics (GC, Goroutines, Memory) - EnableTypePrefix bool // Prefixes key with a type ("counter", "gauge", "timer") - TimerGranularity time.Duration // Granularity of timers. - ProfileInterval time.Duration // Interval to profile runtime metrics -} - -// Metrics represents an instance of a metrics sink that can -// be used to emit -type Metrics struct { - Config - lastNumGC uint32 - sink MetricSink -} - -// Shared global metrics instance -var globalMetrics *Metrics - -func init() { - // Initialize to a blackhole sink to avoid errors - globalMetrics = &Metrics{sink: &BlackholeSink{}} -} - -// DefaultConfig provides a sane default configuration -func DefaultConfig(serviceName string) *Config { - c := &Config{ - ServiceName: serviceName, // Use client provided service - HostName: "", - EnableHostname: true, // Enable hostname prefix - EnableRuntimeMetrics: true, // Enable runtime profiling - EnableTypePrefix: false, // Disable type prefix - TimerGranularity: time.Millisecond, // Timers are in milliseconds - ProfileInterval: time.Second, // Poll runtime every second - } - - // Try to get the hostname - name, _ := os.Hostname() - c.HostName = name - return c -} - -// New is used to create a new instance of Metrics -func New(conf *Config, sink MetricSink) (*Metrics, error) { - met := &Metrics{} - met.Config = *conf - met.sink = sink - - // Start the runtime collector - if conf.EnableRuntimeMetrics { - go met.collectStats() - } - return met, nil -} - -// NewGlobal is the same as New, but it assigns the metrics object to be -// used globally as well as returning it. -func NewGlobal(conf *Config, sink MetricSink) (*Metrics, error) { - metrics, err := New(conf, sink) - if err == nil { - globalMetrics = metrics - } - return metrics, err -} - -// Proxy all the methods to the globalMetrics instance -func SetGauge(key []string, val float32) { - globalMetrics.SetGauge(key, val) -} - -func EmitKey(key []string, val float32) { - globalMetrics.EmitKey(key, val) -} - -func IncrCounter(key []string, val float32) { - globalMetrics.IncrCounter(key, val) -} - -func AddSample(key []string, val float32) { - globalMetrics.AddSample(key, val) -} - -func MeasureSince(key []string, start time.Time) { - globalMetrics.MeasureSince(key, start) -} diff --git a/vendor/src/github.com/armon/go-metrics/statsd.go b/vendor/src/github.com/armon/go-metrics/statsd.go deleted file mode 100644 index 65a5021a05..0000000000 --- a/vendor/src/github.com/armon/go-metrics/statsd.go +++ /dev/null @@ -1,154 +0,0 @@ -package metrics - -import ( - "bytes" - "fmt" - "log" - "net" - "strings" - "time" -) - -const ( - // statsdMaxLen is the maximum size of a packet - // to send to statsd - statsdMaxLen = 1400 -) - -// StatsdSink provides a MetricSink that can be used -// with a statsite or statsd metrics server. It uses -// only UDP packets, while StatsiteSink uses TCP. -type StatsdSink struct { - addr string - metricQueue chan string -} - -// NewStatsdSink is used to create a new StatsdSink -func NewStatsdSink(addr string) (*StatsdSink, error) { - s := &StatsdSink{ - addr: addr, - metricQueue: make(chan string, 4096), - } - go s.flushMetrics() - return s, nil -} - -// Close is used to stop flushing to statsd -func (s *StatsdSink) Shutdown() { - close(s.metricQueue) -} - -func (s *StatsdSink) SetGauge(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) -} - -func (s *StatsdSink) EmitKey(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) -} - -func (s *StatsdSink) IncrCounter(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) -} - -func (s *StatsdSink) AddSample(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) -} - -// Flattens the key for formatting, removes spaces -func (s *StatsdSink) flattenKey(parts []string) string { - joined := strings.Join(parts, ".") - return strings.Map(func(r rune) rune { - switch r { - case ':': - fallthrough - case ' ': - return '_' - default: - return r - } - }, joined) -} - -// Does a non-blocking push to the metrics queue -func (s *StatsdSink) pushMetric(m string) { - select { - case s.metricQueue <- m: - default: - } -} - -// Flushes metrics -func (s *StatsdSink) flushMetrics() { - var sock net.Conn - var err error - var wait <-chan time.Time - ticker := time.NewTicker(flushInterval) - defer ticker.Stop() - -CONNECT: - // Create a buffer - buf := bytes.NewBuffer(nil) - - // Attempt to connect - sock, err = net.Dial("udp", s.addr) - if err != nil { - log.Printf("[ERR] Error connecting to statsd! Err: %s", err) - goto WAIT - } - - for { - select { - case metric, ok := <-s.metricQueue: - // Get a metric from the queue - if !ok { - goto QUIT - } - - // Check if this would overflow the packet size - if len(metric)+buf.Len() > statsdMaxLen { - _, err := sock.Write(buf.Bytes()) - buf.Reset() - if err != nil { - log.Printf("[ERR] Error writing to statsd! Err: %s", err) - goto WAIT - } - } - - // Append to the buffer - buf.WriteString(metric) - - case <-ticker.C: - if buf.Len() == 0 { - continue - } - - _, err := sock.Write(buf.Bytes()) - buf.Reset() - if err != nil { - log.Printf("[ERR] Error flushing to statsd! Err: %s", err) - goto WAIT - } - } - } - -WAIT: - // Wait for a while - wait = time.After(time.Duration(5) * time.Second) - for { - select { - // Dequeue the messages to avoid backlog - case _, ok := <-s.metricQueue: - if !ok { - goto QUIT - } - case <-wait: - goto CONNECT - } - } -QUIT: - s.metricQueue = nil -} diff --git a/vendor/src/github.com/armon/go-metrics/statsite.go b/vendor/src/github.com/armon/go-metrics/statsite.go deleted file mode 100755 index 68730139a7..0000000000 --- a/vendor/src/github.com/armon/go-metrics/statsite.go +++ /dev/null @@ -1,142 +0,0 @@ -package metrics - -import ( - "bufio" - "fmt" - "log" - "net" - "strings" - "time" -) - -const ( - // We force flush the statsite metrics after this period of - // inactivity. Prevents stats from getting stuck in a buffer - // forever. - flushInterval = 100 * time.Millisecond -) - -// StatsiteSink provides a MetricSink that can be used with a -// statsite metrics server -type StatsiteSink struct { - addr string - metricQueue chan string -} - -// NewStatsiteSink is used to create a new StatsiteSink -func NewStatsiteSink(addr string) (*StatsiteSink, error) { - s := &StatsiteSink{ - addr: addr, - metricQueue: make(chan string, 4096), - } - go s.flushMetrics() - return s, nil -} - -// Close is used to stop flushing to statsite -func (s *StatsiteSink) Shutdown() { - close(s.metricQueue) -} - -func (s *StatsiteSink) SetGauge(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|g\n", flatKey, val)) -} - -func (s *StatsiteSink) EmitKey(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|kv\n", flatKey, val)) -} - -func (s *StatsiteSink) IncrCounter(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|c\n", flatKey, val)) -} - -func (s *StatsiteSink) AddSample(key []string, val float32) { - flatKey := s.flattenKey(key) - s.pushMetric(fmt.Sprintf("%s:%f|ms\n", flatKey, val)) -} - -// Flattens the key for formatting, removes spaces -func (s *StatsiteSink) flattenKey(parts []string) string { - joined := strings.Join(parts, ".") - return strings.Map(func(r rune) rune { - switch r { - case ':': - fallthrough - case ' ': - return '_' - default: - return r - } - }, joined) -} - -// Does a non-blocking push to the metrics queue -func (s *StatsiteSink) pushMetric(m string) { - select { - case s.metricQueue <- m: - default: - } -} - -// Flushes metrics -func (s *StatsiteSink) flushMetrics() { - var sock net.Conn - var err error - var wait <-chan time.Time - var buffered *bufio.Writer - ticker := time.NewTicker(flushInterval) - defer ticker.Stop() - -CONNECT: - // Attempt to connect - sock, err = net.Dial("tcp", s.addr) - if err != nil { - log.Printf("[ERR] Error connecting to statsite! Err: %s", err) - goto WAIT - } - - // Create a buffered writer - buffered = bufio.NewWriter(sock) - - for { - select { - case metric, ok := <-s.metricQueue: - // Get a metric from the queue - if !ok { - goto QUIT - } - - // Try to send to statsite - _, err := buffered.Write([]byte(metric)) - if err != nil { - log.Printf("[ERR] Error writing to statsite! Err: %s", err) - goto WAIT - } - case <-ticker.C: - if err := buffered.Flush(); err != nil { - log.Printf("[ERR] Error flushing to statsite! Err: %s", err) - goto WAIT - } - } - } - -WAIT: - // Wait for a while - wait = time.After(time.Duration(5) * time.Second) - for { - select { - // Dequeue the messages to avoid backlog - case _, ok := <-s.metricQueue: - if !ok { - goto QUIT - } - case <-wait: - goto CONNECT - } - } -QUIT: - s.metricQueue = nil -} diff --git a/vendor/src/github.com/armon/go-radix/.gitignore b/vendor/src/github.com/armon/go-radix/.gitignore deleted file mode 100644 index 00268614f0..0000000000 --- a/vendor/src/github.com/armon/go-radix/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/src/github.com/armon/go-radix/.travis.yml b/vendor/src/github.com/armon/go-radix/.travis.yml deleted file mode 100644 index 1a0bbea6c7..0000000000 --- a/vendor/src/github.com/armon/go-radix/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: go -go: - - tip diff --git a/vendor/src/github.com/armon/go-radix/LICENSE b/vendor/src/github.com/armon/go-radix/LICENSE deleted file mode 100644 index a5df10e675..0000000000 --- a/vendor/src/github.com/armon/go-radix/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2014 Armon Dadgar - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/src/github.com/armon/go-radix/README.md b/vendor/src/github.com/armon/go-radix/README.md deleted file mode 100644 index c054fe86c0..0000000000 --- a/vendor/src/github.com/armon/go-radix/README.md +++ /dev/null @@ -1,36 +0,0 @@ -go-radix [![Build Status](https://travis-ci.org/armon/go-radix.png)](https://travis-ci.org/armon/go-radix) -========= - -Provides the `radix` package that implements a [radix tree](http://en.wikipedia.org/wiki/Radix_tree). -The package only provides a single `Tree` implementation, optimized for sparse nodes. - -As a radix tree, it provides the following: - * O(k) operations. In many cases, this can be faster than a hash table since - the hash function is an O(k) operation, and hash tables have very poor cache locality. - * Minimum / Maximum value lookups - * Ordered iteration - -Documentation -============= - -The full documentation is available on [Godoc](http://godoc.org/github.com/armon/go-radix). - -Example -======= - -Below is a simple example of usage - -```go -// Create a tree -r := radix.New() -r.Insert("foo", 1) -r.Insert("bar", 2) -r.Insert("foobar", 2) - -// Find the longest prefix match -m, _, _ := r.LongestPrefix("foozip") -if m != "foo" { - panic("should be foo") -} -``` - diff --git a/vendor/src/github.com/armon/go-radix/radix.go b/vendor/src/github.com/armon/go-radix/radix.go deleted file mode 100644 index 8c963c914a..0000000000 --- a/vendor/src/github.com/armon/go-radix/radix.go +++ /dev/null @@ -1,467 +0,0 @@ -package radix - -import ( - "sort" - "strings" -) - -// WalkFn is used when walking the tree. Takes a -// key and value, returning if iteration should -// be terminated. -type WalkFn func(s string, v interface{}) bool - -// leafNode is used to represent a value -type leafNode struct { - key string - val interface{} -} - -// edge is used to represent an edge node -type edge struct { - label byte - node *node -} - -type node struct { - // leaf is used to store possible leaf - leaf *leafNode - - // prefix is the common prefix we ignore - prefix string - - // Edges should be stored in-order for iteration. - // We avoid a fully materialized slice to save memory, - // since in most cases we expect to be sparse - edges edges -} - -func (n *node) isLeaf() bool { - return n.leaf != nil -} - -func (n *node) addEdge(e edge) { - n.edges = append(n.edges, e) - n.edges.Sort() -} - -func (n *node) replaceEdge(e edge) { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= e.label - }) - if idx < num && n.edges[idx].label == e.label { - n.edges[idx].node = e.node - return - } - panic("replacing missing edge") -} - -func (n *node) getEdge(label byte) *node { - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= label - }) - if idx < num && n.edges[idx].label == label { - return n.edges[idx].node - } - return nil -} - -type edges []edge - -func (e edges) Len() int { - return len(e) -} - -func (e edges) Less(i, j int) bool { - return e[i].label < e[j].label -} - -func (e edges) Swap(i, j int) { - e[i], e[j] = e[j], e[i] -} - -func (e edges) Sort() { - sort.Sort(e) -} - -// Tree implements a radix tree. This can be treated as a -// Dictionary abstract data type. The main advantage over -// a standard hash map is prefix-based lookups and -// ordered iteration, -type Tree struct { - root *node - size int -} - -// New returns an empty Tree -func New() *Tree { - return NewFromMap(nil) -} - -// NewFromMap returns a new tree containing the keys -// from an existing map -func NewFromMap(m map[string]interface{}) *Tree { - t := &Tree{root: &node{}} - for k, v := range m { - t.Insert(k, v) - } - return t -} - -// Len is used to return the number of elements in the tree -func (t *Tree) Len() int { - return t.size -} - -// longestPrefix finds the length of the shared prefix -// of two strings -func longestPrefix(k1, k2 string) int { - max := len(k1) - if l := len(k2); l < max { - max = l - } - var i int - for i = 0; i < max; i++ { - if k1[i] != k2[i] { - break - } - } - return i -} - -// Insert is used to add a newentry or update -// an existing entry. Returns if updated. -func (t *Tree) Insert(s string, v interface{}) (interface{}, bool) { - var parent *node - n := t.root - search := s - for { - // Handle key exhaution - if len(search) == 0 { - if n.isLeaf() { - old := n.leaf.val - n.leaf.val = v - return old, true - } else { - n.leaf = &leafNode{ - key: s, - val: v, - } - t.size++ - return nil, false - } - } - - // Look for the edge - parent = n - n = n.getEdge(search[0]) - - // No edge, create one - if n == nil { - e := edge{ - label: search[0], - node: &node{ - leaf: &leafNode{ - key: s, - val: v, - }, - prefix: search, - }, - } - parent.addEdge(e) - t.size++ - return nil, false - } - - // Determine longest prefix of the search key on match - commonPrefix := longestPrefix(search, n.prefix) - if commonPrefix == len(n.prefix) { - search = search[commonPrefix:] - continue - } - - // Split the node - t.size++ - child := &node{ - prefix: search[:commonPrefix], - } - parent.replaceEdge(edge{ - label: search[0], - node: child, - }) - - // Restore the existing node - child.addEdge(edge{ - label: n.prefix[commonPrefix], - node: n, - }) - n.prefix = n.prefix[commonPrefix:] - - // Create a new leaf node - leaf := &leafNode{ - key: s, - val: v, - } - - // If the new key is a subset, add to to this node - search = search[commonPrefix:] - if len(search) == 0 { - child.leaf = leaf - return nil, false - } - - // Create a new edge for the node - child.addEdge(edge{ - label: search[0], - node: &node{ - leaf: leaf, - prefix: search, - }, - }) - return nil, false - } - return nil, false -} - -// Delete is used to delete a key, returning the previous -// value and if it was deleted -func (t *Tree) Delete(s string) (interface{}, bool) { - n := t.root - search := s - for { - // Check for key exhaution - if len(search) == 0 { - if !n.isLeaf() { - break - } - goto DELETE - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - return nil, false - -DELETE: - // Delete the leaf - leaf := n.leaf - n.leaf = nil - t.size-- - - // Check if we should merge this node - if len(n.edges) == 1 { - e := n.edges[0] - child := e.node - n.prefix = n.prefix + child.prefix - n.leaf = child.leaf - n.edges = child.edges - } - return leaf.val, true -} - -// Get is used to lookup a specific key, returning -// the value and if it was found -func (t *Tree) Get(s string) (interface{}, bool) { - n := t.root - search := s - for { - // Check for key exhaution - if len(search) == 0 { - if n.isLeaf() { - return n.leaf.val, true - } - break - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - return nil, false -} - -// LongestPrefix is like Get, but instead of an -// exact match, it will return the longest prefix match. -func (t *Tree) LongestPrefix(s string) (string, interface{}, bool) { - var last *leafNode - n := t.root - search := s - for { - // Look for a leaf node - if n.isLeaf() { - last = n.leaf - } - - // Check for key exhaution - if len(search) == 0 { - break - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } - if last != nil { - return last.key, last.val, true - } - return "", nil, false -} - -// Minimum is used to return the minimum value in the tree -func (t *Tree) Minimum() (string, interface{}, bool) { - n := t.root - for { - if n.isLeaf() { - return n.leaf.key, n.leaf.val, true - } - if len(n.edges) > 0 { - n = n.edges[0].node - } else { - break - } - } - return "", nil, false -} - -// Maximum is used to return the maximum value in the tree -func (t *Tree) Maximum() (string, interface{}, bool) { - n := t.root - for { - if num := len(n.edges); num > 0 { - n = n.edges[num-1].node - continue - } - if n.isLeaf() { - return n.leaf.key, n.leaf.val, true - } else { - break - } - } - return "", nil, false -} - -// Walk is used to walk the tree -func (t *Tree) Walk(fn WalkFn) { - recursiveWalk(t.root, fn) -} - -// WalkPrefix is used to walk the tree under a prefix -func (t *Tree) WalkPrefix(prefix string, fn WalkFn) { - n := t.root - search := prefix - for { - // Check for key exhaution - if len(search) == 0 { - recursiveWalk(n, fn) - return - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - break - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - - } else if strings.HasPrefix(n.prefix, search) { - // Child may be under our search prefix - recursiveWalk(n, fn) - return - } else { - break - } - } - -} - -// WalkPath is used to walk the tree, but only visiting nodes -// from the root down to a given leaf. Where WalkPrefix walks -// all the entries *under* the given prefix, this walks the -// entries *above* the given prefix. -func (t *Tree) WalkPath(path string, fn WalkFn) { - n := t.root - search := path - for { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return - } - - // Check for key exhaution - if len(search) == 0 { - return - } - - // Look for an edge - n = n.getEdge(search[0]) - if n == nil { - return - } - - // Consume the search prefix - if strings.HasPrefix(search, n.prefix) { - search = search[len(n.prefix):] - } else { - break - } - } -} - -// recursiveWalk is used to do a pre-order walk of a node -// recursively. Returns true if the walk should be aborted -func recursiveWalk(n *node, fn WalkFn) bool { - // Visit the leaf values if any - if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { - return true - } - - // Recurse on the children - for _, e := range n.edges { - if recursiveWalk(e.node, fn) { - return true - } - } - return false -} - -// ToMap is used to walk the tree and convert it into a map -func (t *Tree) ToMap() map[string]interface{} { - out := make(map[string]interface{}, t.size) - t.Walk(func(k string, v interface{}) bool { - out[k] = v - return false - }) - return out -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt b/vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt deleted file mode 100644 index d645695673..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go b/vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go deleted file mode 100644 index e50771f803..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/awserr/error.go +++ /dev/null @@ -1,145 +0,0 @@ -// Package awserr represents API error interface accessors for the SDK. -package awserr - -// An Error wraps lower level errors with code, message and an original error. -// The underlying concrete error type may also satisfy other interfaces which -// can be to used to obtain more specific information about the error. -// -// Calling Error() or String() will always include the full information about -// an error based on its underlying type. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Get error details -// log.Println("Error:", awsErr.Code(), awsErr.Message()) -// -// // Prints out full error message, including original error if there was one. -// log.Println("Error:", awsErr.Error()) -// -// // Get original error -// if origErr := awsErr.OrigErr(); origErr != nil { -// // operate on original error. -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type Error interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErr() error -} - -// BatchError is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occured in the batch. -// -// Deprecated: Replaced with BatchedErrors. Only defined for backwards -// compatibility. -type BatchError interface { - // Satisfy the generic error interface. - error - - // Returns the short phrase depicting the classification of the error. - Code() string - - // Returns the error details message. - Message() string - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// BatchedErrors is a batch of errors which also wraps lower level errors with -// code, message, and original errors. Calling Error() will include all errors -// that occured in the batch. -// -// Replaces BatchError -type BatchedErrors interface { - // Satisfy the base Error interface. - Error - - // Returns the original error if one was set. Nil is returned if not set. - OrigErrs() []error -} - -// New returns an Error object described by the code, message, and origErr. -// -// If origErr satisfies the Error interface it will not be wrapped within a new -// Error object and will instead be returned. -func New(code, message string, origErr error) Error { - var errs []error - if origErr != nil { - errs = append(errs, origErr) - } - return newBaseError(code, message, errs) -} - -// NewBatchError returns an BatchedErrors with a collection of errors as an -// array of errors. -func NewBatchError(code, message string, errs []error) BatchedErrors { - return newBaseError(code, message, errs) -} - -// A RequestFailure is an interface to extract request failure information from -// an Error such as the request ID of the failed request returned by a service. -// RequestFailures may not always have a requestID value if the request failed -// prior to reaching the service such as a connection error. -// -// Example: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if reqerr, ok := err.(RequestFailure); ok { -// log.Println("Request failed", reqerr.Code(), reqerr.Message(), reqerr.RequestID()) -// } else { -// log.Println("Error:", err.Error()) -// } -// } -// -// Combined with awserr.Error: -// -// output, err := s3manage.Upload(svc, input, opts) -// if err != nil { -// if awsErr, ok := err.(awserr.Error); ok { -// // Generic AWS Error with Code, Message, and original error (if any) -// fmt.Println(awsErr.Code(), awsErr.Message(), awsErr.OrigErr()) -// -// if reqErr, ok := err.(awserr.RequestFailure); ok { -// // A service error occurred -// fmt.Println(reqErr.StatusCode(), reqErr.RequestID()) -// } -// } else { -// fmt.Println(err.Error()) -// } -// } -// -type RequestFailure interface { - Error - - // The status code of the HTTP response. - StatusCode() int - - // The request ID returned by the service for a request failure. This will - // be empty if no request ID is available such as the request failed due - // to a connection error. - RequestID() string -} - -// NewRequestFailure returns a new request error wrapper for the given Error -// provided. -func NewRequestFailure(err Error, statusCode int, reqID string) RequestFailure { - return newRequestError(err, statusCode, reqID) -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go b/vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go deleted file mode 100644 index e2d333b84b..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/awserr/types.go +++ /dev/null @@ -1,194 +0,0 @@ -package awserr - -import "fmt" - -// SprintError returns a string of the formatted error code. -// -// Both extra and origErr are optional. If they are included their lines -// will be added, but if they are not included their lines will be ignored. -func SprintError(code, message, extra string, origErr error) string { - msg := fmt.Sprintf("%s: %s", code, message) - if extra != "" { - msg = fmt.Sprintf("%s\n\t%s", msg, extra) - } - if origErr != nil { - msg = fmt.Sprintf("%s\ncaused by: %s", msg, origErr.Error()) - } - return msg -} - -// A baseError wraps the code and message which defines an error. It also -// can be used to wrap an original error object. -// -// Should be used as the root for errors satisfying the awserr.Error. Also -// for any error which does not fit into a specific error wrapper type. -type baseError struct { - // Classification of error - code string - - // Detailed information about error - message string - - // Optional original error this error is based off of. Allows building - // chained errors. - errs []error -} - -// newBaseError returns an error object for the code, message, and errors. -// -// code is a short no whitespace phrase depicting the classification of -// the error that is being created. -// -// message is the free flow string containing detailed information about the -// error. -// -// origErrs is the error objects which will be nested under the new errors to -// be returned. -func newBaseError(code, message string, origErrs []error) *baseError { - b := &baseError{ - code: code, - message: message, - errs: origErrs, - } - - return b -} - -// Error returns the string representation of the error. -// -// See ErrorWithExtra for formatting. -// -// Satisfies the error interface. -func (b baseError) Error() string { - size := len(b.errs) - if size > 0 { - return SprintError(b.code, b.message, "", errorList(b.errs)) - } - - return SprintError(b.code, b.message, "", nil) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (b baseError) String() string { - return b.Error() -} - -// Code returns the short phrase depicting the classification of the error. -func (b baseError) Code() string { - return b.code -} - -// Message returns the error details message. -func (b baseError) Message() string { - return b.message -} - -// OrigErr returns the original error if one was set. Nil is returned if no -// error was set. This only returns the first element in the list. If the full -// list is needed, use BatchedErrors. -func (b baseError) OrigErr() error { - switch len(b.errs) { - case 0: - return nil - case 1: - return b.errs[0] - default: - if err, ok := b.errs[0].(Error); ok { - return NewBatchError(err.Code(), err.Message(), b.errs[1:]) - } - return NewBatchError("BatchedErrors", - "multiple errors occured", b.errs) - } -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (b baseError) OrigErrs() []error { - return b.errs -} - -// So that the Error interface type can be included as an anonymous field -// in the requestError struct and not conflict with the error.Error() method. -type awsError Error - -// A requestError wraps a request or service error. -// -// Composed of baseError for code, message, and original error. -type requestError struct { - awsError - statusCode int - requestID string -} - -// newRequestError returns a wrapped error with additional information for -// request status code, and service requestID. -// -// Should be used to wrap all request which involve service requests. Even if -// the request failed without a service response, but had an HTTP status code -// that may be meaningful. -// -// Also wraps original errors via the baseError. -func newRequestError(err Error, statusCode int, requestID string) *requestError { - return &requestError{ - awsError: err, - statusCode: statusCode, - requestID: requestID, - } -} - -// Error returns the string representation of the error. -// Satisfies the error interface. -func (r requestError) Error() string { - extra := fmt.Sprintf("status code: %d, request id: %s", - r.statusCode, r.requestID) - return SprintError(r.Code(), r.Message(), extra, r.OrigErr()) -} - -// String returns the string representation of the error. -// Alias for Error to satisfy the stringer interface. -func (r requestError) String() string { - return r.Error() -} - -// StatusCode returns the wrapped status code for the error -func (r requestError) StatusCode() int { - return r.statusCode -} - -// RequestID returns the wrapped requestID -func (r requestError) RequestID() string { - return r.requestID -} - -// OrigErrs returns the original errors if one was set. An empty slice is -// returned if no error was set. -func (r requestError) OrigErrs() []error { - if b, ok := r.awsError.(BatchedErrors); ok { - return b.OrigErrs() - } - return []error{r.OrigErr()} -} - -// An error list that satisfies the golang interface -type errorList []error - -// Error returns the string representation of the error. -// -// Satisfies the error interface. -func (e errorList) Error() string { - msg := "" - // How do we want to handle the array size being zero - if size := len(e); size > 0 { - for i := 0; i < size; i++ { - msg += fmt.Sprintf("%s", e[i].Error()) - // We check the next index to see if it is within the slice. - // If it is, then we append a newline. We do this, because unit tests - // could be broken with the additional '\n' - if i+1 < size { - msg += "\n" - } - } - } - return msg -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go b/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go deleted file mode 100644 index 8429470b9d..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/copy.go +++ /dev/null @@ -1,100 +0,0 @@ -package awsutil - -import ( - "io" - "reflect" -) - -// Copy deeply copies a src structure to dst. Useful for copying request and -// response structures. -// -// Can copy between structs of different type, but will only copy fields which -// are assignable, and exist in both structs. Fields which are not assignable, -// or do not exist in both structs are ignored. -func Copy(dst, src interface{}) { - dstval := reflect.ValueOf(dst) - if !dstval.IsValid() { - panic("Copy dst cannot be nil") - } - - rcopy(dstval, reflect.ValueOf(src), true) -} - -// CopyOf returns a copy of src while also allocating the memory for dst. -// src must be a pointer type or this operation will fail. -func CopyOf(src interface{}) (dst interface{}) { - dsti := reflect.New(reflect.TypeOf(src).Elem()) - dst = dsti.Interface() - rcopy(dsti, reflect.ValueOf(src), true) - return -} - -// rcopy performs a recursive copy of values from the source to destination. -// -// root is used to skip certain aspects of the copy which are not valid -// for the root node of a object. -func rcopy(dst, src reflect.Value, root bool) { - if !src.IsValid() { - return - } - - switch src.Kind() { - case reflect.Ptr: - if _, ok := src.Interface().(io.Reader); ok { - if dst.Kind() == reflect.Ptr && dst.Elem().CanSet() { - dst.Elem().Set(src) - } else if dst.CanSet() { - dst.Set(src) - } - } else { - e := src.Type().Elem() - if dst.CanSet() && !src.IsNil() { - dst.Set(reflect.New(e)) - } - if src.Elem().IsValid() { - // Keep the current root state since the depth hasn't changed - rcopy(dst.Elem(), src.Elem(), root) - } - } - case reflect.Struct: - t := dst.Type() - for i := 0; i < t.NumField(); i++ { - name := t.Field(i).Name - srcVal := src.FieldByName(name) - dstVal := dst.FieldByName(name) - if srcVal.IsValid() && dstVal.CanSet() { - rcopy(dstVal, srcVal, false) - } - } - case reflect.Slice: - if src.IsNil() { - break - } - - s := reflect.MakeSlice(src.Type(), src.Len(), src.Cap()) - dst.Set(s) - for i := 0; i < src.Len(); i++ { - rcopy(dst.Index(i), src.Index(i), false) - } - case reflect.Map: - if src.IsNil() { - break - } - - s := reflect.MakeMap(src.Type()) - dst.Set(s) - for _, k := range src.MapKeys() { - v := src.MapIndex(k) - v2 := reflect.New(v.Type()).Elem() - rcopy(v2, v, false) - dst.SetMapIndex(k, v2) - } - default: - // Assign the value if possible. If its not assignable, the value would - // need to be converted and the impact of that may be unexpected, or is - // not compatible with the dst type. - if src.Type().AssignableTo(dst.Type()) { - dst.Set(src) - } - } -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go b/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go deleted file mode 100644 index 59fa4a558a..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/equal.go +++ /dev/null @@ -1,27 +0,0 @@ -package awsutil - -import ( - "reflect" -) - -// DeepEqual returns if the two values are deeply equal like reflect.DeepEqual. -// In addition to this, this method will also dereference the input values if -// possible so the DeepEqual performed will not fail if one parameter is a -// pointer and the other is not. -// -// DeepEqual will not perform indirection of nested values of the input parameters. -func DeepEqual(a, b interface{}) bool { - ra := reflect.Indirect(reflect.ValueOf(a)) - rb := reflect.Indirect(reflect.ValueOf(b)) - - if raValid, rbValid := ra.IsValid(), rb.IsValid(); !raValid && !rbValid { - // If the elements are both nil, and of the same type the are equal - // If they are of different types they are not equal - return reflect.TypeOf(a) == reflect.TypeOf(b) - } else if raValid != rbValid { - // Both values must be valid to be equal - return false - } - - return reflect.DeepEqual(ra.Interface(), rb.Interface()) -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go b/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go deleted file mode 100644 index 4d2a01e8c4..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/path_value.go +++ /dev/null @@ -1,222 +0,0 @@ -package awsutil - -import ( - "reflect" - "regexp" - "strconv" - "strings" - - "github.com/jmespath/go-jmespath" -) - -var indexRe = regexp.MustCompile(`(.+)\[(-?\d+)?\]$`) - -// rValuesAtPath returns a slice of values found in value v. The values -// in v are explored recursively so all nested values are collected. -func rValuesAtPath(v interface{}, path string, createPath, caseSensitive, nilTerm bool) []reflect.Value { - pathparts := strings.Split(path, "||") - if len(pathparts) > 1 { - for _, pathpart := range pathparts { - vals := rValuesAtPath(v, pathpart, createPath, caseSensitive, nilTerm) - if len(vals) > 0 { - return vals - } - } - return nil - } - - values := []reflect.Value{reflect.Indirect(reflect.ValueOf(v))} - components := strings.Split(path, ".") - for len(values) > 0 && len(components) > 0 { - var index *int64 - var indexStar bool - c := strings.TrimSpace(components[0]) - if c == "" { // no actual component, illegal syntax - return nil - } else if caseSensitive && c != "*" && strings.ToLower(c[0:1]) == c[0:1] { - // TODO normalize case for user - return nil // don't support unexported fields - } - - // parse this component - if m := indexRe.FindStringSubmatch(c); m != nil { - c = m[1] - if m[2] == "" { - index = nil - indexStar = true - } else { - i, _ := strconv.ParseInt(m[2], 10, 32) - index = &i - indexStar = false - } - } - - nextvals := []reflect.Value{} - for _, value := range values { - // pull component name out of struct member - if value.Kind() != reflect.Struct { - continue - } - - if c == "*" { // pull all members - for i := 0; i < value.NumField(); i++ { - if f := reflect.Indirect(value.Field(i)); f.IsValid() { - nextvals = append(nextvals, f) - } - } - continue - } - - value = value.FieldByNameFunc(func(name string) bool { - if c == name { - return true - } else if !caseSensitive && strings.ToLower(name) == strings.ToLower(c) { - return true - } - return false - }) - - if nilTerm && value.Kind() == reflect.Ptr && len(components[1:]) == 0 { - if !value.IsNil() { - value.Set(reflect.Zero(value.Type())) - } - return []reflect.Value{value} - } - - if createPath && value.Kind() == reflect.Ptr && value.IsNil() { - // TODO if the value is the terminus it should not be created - // if the value to be set to its position is nil. - value.Set(reflect.New(value.Type().Elem())) - value = value.Elem() - } else { - value = reflect.Indirect(value) - } - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - - if indexStar || index != nil { - nextvals = []reflect.Value{} - for _, value := range values { - value := reflect.Indirect(value) - if value.Kind() != reflect.Slice { - continue - } - - if indexStar { // grab all indices - for i := 0; i < value.Len(); i++ { - idx := reflect.Indirect(value.Index(i)) - if idx.IsValid() { - nextvals = append(nextvals, idx) - } - } - continue - } - - // pull out index - i := int(*index) - if i >= value.Len() { // check out of bounds - if createPath { - // TODO resize slice - } else { - continue - } - } else if i < 0 { // support negative indexing - i = value.Len() + i - } - value = reflect.Indirect(value.Index(i)) - - if value.Kind() == reflect.Slice || value.Kind() == reflect.Map { - if !createPath && value.IsNil() { - value = reflect.ValueOf(nil) - } - } - - if value.IsValid() { - nextvals = append(nextvals, value) - } - } - values = nextvals - } - - components = components[1:] - } - return values -} - -// ValuesAtPath returns a list of values at the case insensitive lexical -// path inside of a structure. -func ValuesAtPath(i interface{}, path string) ([]interface{}, error) { - result, err := jmespath.Search(path, i) - if err != nil { - return nil, err - } - - v := reflect.ValueOf(result) - if !v.IsValid() || (v.Kind() == reflect.Ptr && v.IsNil()) { - return nil, nil - } - if s, ok := result.([]interface{}); ok { - return s, err - } - if v.Kind() == reflect.Map && v.Len() == 0 { - return nil, nil - } - if v.Kind() == reflect.Slice { - out := make([]interface{}, v.Len()) - for i := 0; i < v.Len(); i++ { - out[i] = v.Index(i).Interface() - } - return out, nil - } - - return []interface{}{result}, nil -} - -// SetValueAtPath sets a value at the case insensitive lexical path inside -// of a structure. -func SetValueAtPath(i interface{}, path string, v interface{}) { - if rvals := rValuesAtPath(i, path, true, false, v == nil); rvals != nil { - for _, rval := range rvals { - if rval.Kind() == reflect.Ptr && rval.IsNil() { - continue - } - setValue(rval, v) - } - } -} - -func setValue(dstVal reflect.Value, src interface{}) { - if dstVal.Kind() == reflect.Ptr { - dstVal = reflect.Indirect(dstVal) - } - srcVal := reflect.ValueOf(src) - - if !srcVal.IsValid() { // src is literal nil - if dstVal.CanAddr() { - // Convert to pointer so that pointer's value can be nil'ed - // dstVal = dstVal.Addr() - } - dstVal.Set(reflect.Zero(dstVal.Type())) - - } else if srcVal.Kind() == reflect.Ptr { - if srcVal.IsNil() { - srcVal = reflect.Zero(dstVal.Type()) - } else { - srcVal = reflect.ValueOf(src).Elem() - } - dstVal.Set(srcVal) - } else { - dstVal.Set(srcVal) - } - -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go b/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go deleted file mode 100644 index fc38172fec..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/prettify.go +++ /dev/null @@ -1,107 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "io" - "reflect" - "strings" -) - -// Prettify returns the string representation of a value. -func Prettify(i interface{}) string { - var buf bytes.Buffer - prettify(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -// prettify will recursively walk value v to build a textual -// representation of the value. -func prettify(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - strtype := v.Type().String() - if strtype == "time.Time" { - fmt.Fprintf(buf, "%s", v.Interface()) - break - } else if strings.HasPrefix(strtype, "io.") { - buf.WriteString("") - break - } - - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice || f.Kind() == reflect.Map) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - prettify(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - prettify(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - prettify(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - if !v.IsValid() { - fmt.Fprint(buf, "") - return - } - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - case io.ReadSeeker, io.Reader: - format = "buffer(%p)" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go b/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go deleted file mode 100644 index b6432f1a11..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/awsutil/string_value.go +++ /dev/null @@ -1,89 +0,0 @@ -package awsutil - -import ( - "bytes" - "fmt" - "reflect" - "strings" -) - -// StringValue returns the string representation of a value. -func StringValue(i interface{}) string { - var buf bytes.Buffer - stringValue(reflect.ValueOf(i), 0, &buf) - return buf.String() -} - -func stringValue(v reflect.Value, indent int, buf *bytes.Buffer) { - for v.Kind() == reflect.Ptr { - v = v.Elem() - } - - switch v.Kind() { - case reflect.Struct: - buf.WriteString("{\n") - - names := []string{} - for i := 0; i < v.Type().NumField(); i++ { - name := v.Type().Field(i).Name - f := v.Field(i) - if name[0:1] == strings.ToLower(name[0:1]) { - continue // ignore unexported fields - } - if (f.Kind() == reflect.Ptr || f.Kind() == reflect.Slice) && f.IsNil() { - continue // ignore unset fields - } - names = append(names, name) - } - - for i, n := range names { - val := v.FieldByName(n) - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(n + ": ") - stringValue(val, indent+2, buf) - - if i < len(names)-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - case reflect.Slice: - nl, id, id2 := "", "", "" - if v.Len() > 3 { - nl, id, id2 = "\n", strings.Repeat(" ", indent), strings.Repeat(" ", indent+2) - } - buf.WriteString("[" + nl) - for i := 0; i < v.Len(); i++ { - buf.WriteString(id2) - stringValue(v.Index(i), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString("," + nl) - } - } - - buf.WriteString(nl + id + "]") - case reflect.Map: - buf.WriteString("{\n") - - for i, k := range v.MapKeys() { - buf.WriteString(strings.Repeat(" ", indent+2)) - buf.WriteString(k.String() + ": ") - stringValue(v.MapIndex(k), indent+2, buf) - - if i < v.Len()-1 { - buf.WriteString(",\n") - } - } - - buf.WriteString("\n" + strings.Repeat(" ", indent) + "}") - default: - format := "%v" - switch v.Interface().(type) { - case string: - format = "%q" - } - fmt.Fprintf(buf, format, v.Interface()) - } -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/client/client.go b/vendor/src/github.com/aws/aws-sdk-go/aws/client/client.go deleted file mode 100644 index c8d0564d8a..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/client/client.go +++ /dev/null @@ -1,120 +0,0 @@ -package client - -import ( - "fmt" - "io/ioutil" - "net/http/httputil" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// A Config provides configuration to a service client instance. -type Config struct { - Config *aws.Config - Handlers request.Handlers - Endpoint, SigningRegion string -} - -// ConfigProvider provides a generic way for a service client to receive -// the ClientConfig without circular dependencies. -type ConfigProvider interface { - ClientConfig(serviceName string, cfgs ...*aws.Config) Config -} - -// A Client implements the base client request and response handling -// used by all service clients. -type Client struct { - request.Retryer - metadata.ClientInfo - - Config aws.Config - Handlers request.Handlers -} - -// New will return a pointer to a new initialized service client. -func New(cfg aws.Config, info metadata.ClientInfo, handlers request.Handlers, options ...func(*Client)) *Client { - svc := &Client{ - Config: cfg, - ClientInfo: info, - Handlers: handlers, - } - - switch retryer, ok := cfg.Retryer.(request.Retryer); { - case ok: - svc.Retryer = retryer - case cfg.Retryer != nil && cfg.Logger != nil: - s := fmt.Sprintf("WARNING: %T does not implement request.Retryer; using DefaultRetryer instead", cfg.Retryer) - cfg.Logger.Log(s) - fallthrough - default: - maxRetries := aws.IntValue(cfg.MaxRetries) - if cfg.MaxRetries == nil || maxRetries == aws.UseServiceDefaultRetries { - maxRetries = 3 - } - svc.Retryer = DefaultRetryer{NumMaxRetries: maxRetries} - } - - svc.AddDebugHandlers() - - for _, option := range options { - option(svc) - } - - return svc -} - -// NewRequest returns a new Request pointer for the service API -// operation and parameters. -func (c *Client) NewRequest(operation *request.Operation, params interface{}, data interface{}) *request.Request { - return request.New(c.Config, c.ClientInfo, c.Handlers, c.Retryer, operation, params, data) -} - -// AddDebugHandlers injects debug logging handlers into the service to log request -// debug information. -func (c *Client) AddDebugHandlers() { - if !c.Config.LogLevel.AtLeast(aws.LogDebug) { - return - } - - c.Handlers.Send.PushFront(logRequest) - c.Handlers.Send.PushBack(logResponse) -} - -const logReqMsg = `DEBUG: Request %s/%s Details: ----[ REQUEST POST-SIGN ]----------------------------- -%s ------------------------------------------------------` - -func logRequest(r *request.Request) { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, _ := httputil.DumpRequestOut(r.HTTPRequest, logBody) - - if logBody { - // Reset the request body because dumpRequest will re-wrap the r.HTTPRequest's - // Body as a NoOpCloser and will not be reset after read by the HTTP - // client reader. - r.Body.Seek(r.BodyStart, 0) - r.HTTPRequest.Body = ioutil.NopCloser(r.Body) - } - - r.Config.Logger.Log(fmt.Sprintf(logReqMsg, r.ClientInfo.ServiceName, r.Operation.Name, string(dumpedBody))) -} - -const logRespMsg = `DEBUG: Response %s/%s Details: ----[ RESPONSE ]-------------------------------------- -%s ------------------------------------------------------` - -func logResponse(r *request.Request) { - var msg = "no response data" - if r.HTTPResponse != nil { - logBody := r.Config.LogLevel.Matches(aws.LogDebugWithHTTPBody) - dumpedBody, _ := httputil.DumpResponse(r.HTTPResponse, logBody) - msg = string(dumpedBody) - } else if r.Error != nil { - msg = r.Error.Error() - } - r.Config.Logger.Log(fmt.Sprintf(logRespMsg, r.ClientInfo.ServiceName, r.Operation.Name, msg)) -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go b/vendor/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go deleted file mode 100644 index 43a3676b79..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/client/default_retryer.go +++ /dev/null @@ -1,90 +0,0 @@ -package client - -import ( - "math/rand" - "sync" - "time" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// DefaultRetryer implements basic retry logic using exponential backoff for -// most services. If you want to implement custom retry logic, implement the -// request.Retryer interface or create a structure type that composes this -// struct and override the specific methods. For example, to override only -// the MaxRetries method: -// -// type retryer struct { -// service.DefaultRetryer -// } -// -// // This implementation always has 100 max retries -// func (d retryer) MaxRetries() uint { return 100 } -type DefaultRetryer struct { - NumMaxRetries int -} - -// MaxRetries returns the number of maximum returns the service will use to make -// an individual API request. -func (d DefaultRetryer) MaxRetries() int { - return d.NumMaxRetries -} - -var seededRand = rand.New(&lockedSource{src: rand.NewSource(time.Now().UnixNano())}) - -// RetryRules returns the delay duration before retrying this request again -func (d DefaultRetryer) RetryRules(r *request.Request) time.Duration { - // Set the upper limit of delay in retrying at ~five minutes - minTime := 30 - throttle := d.shouldThrottle(r) - if throttle { - minTime = 500 - } - - retryCount := r.RetryCount - if retryCount > 13 { - retryCount = 13 - } else if throttle && retryCount > 8 { - retryCount = 8 - } - - delay := (1 << uint(retryCount)) * (seededRand.Intn(minTime) + minTime) - return time.Duration(delay) * time.Millisecond -} - -// ShouldRetry returns true if the request should be retried. -func (d DefaultRetryer) ShouldRetry(r *request.Request) bool { - if r.HTTPResponse.StatusCode >= 500 { - return true - } - return r.IsErrorRetryable() || d.shouldThrottle(r) -} - -// ShouldThrottle returns true if the request should be throttled. -func (d DefaultRetryer) shouldThrottle(r *request.Request) bool { - if r.HTTPResponse.StatusCode == 502 || - r.HTTPResponse.StatusCode == 503 || - r.HTTPResponse.StatusCode == 504 { - return true - } - return r.IsErrorThrottle() -} - -// lockedSource is a thread-safe implementation of rand.Source -type lockedSource struct { - lk sync.Mutex - src rand.Source -} - -func (r *lockedSource) Int63() (n int64) { - r.lk.Lock() - n = r.src.Int63() - r.lk.Unlock() - return -} - -func (r *lockedSource) Seed(seed int64) { - r.lk.Lock() - r.src.Seed(seed) - r.lk.Unlock() -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go b/vendor/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go deleted file mode 100644 index 4778056ddf..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/client/metadata/client_info.go +++ /dev/null @@ -1,12 +0,0 @@ -package metadata - -// ClientInfo wraps immutable data from the client.Client structure. -type ClientInfo struct { - ServiceName string - APIVersion string - Endpoint string - SigningName string - SigningRegion string - JSONVersion string - TargetPrefix string -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/config.go b/vendor/src/github.com/aws/aws-sdk-go/aws/config.go deleted file mode 100644 index bfaa152033..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/config.go +++ /dev/null @@ -1,358 +0,0 @@ -package aws - -import ( - "net/http" - "time" - - "github.com/aws/aws-sdk-go/aws/credentials" -) - -// UseServiceDefaultRetries instructs the config to use the service's own default -// number of retries. This will be the default action if Config.MaxRetries -// is nil also. -const UseServiceDefaultRetries = -1 - -// RequestRetryer is an alias for a type that implements the request.Retryer interface. -type RequestRetryer interface{} - -// A Config provides service configuration for service clients. By default, -// all clients will use the {defaults.DefaultConfig} structure. -type Config struct { - // Enables verbose error printing of all credential chain errors. - // Should be used when wanting to see all errors while attempting to retreive - // credentials. - CredentialsChainVerboseErrors *bool - - // The credentials object to use when signing requests. Defaults to - // a chain of credential providers to search for credentials in environment - // variables, shared credential file, and EC2 Instance Roles. - Credentials *credentials.Credentials - - // An optional endpoint URL (hostname only or fully qualified URI) - // that overrides the default generated endpoint for a client. Set this - // to `""` to use the default generated endpoint. - // - // @note You must still provide a `Region` value when specifying an - // endpoint for a client. - Endpoint *string - - // The region to send requests to. This parameter is required and must - // be configured globally or on a per-client basis unless otherwise - // noted. A full list of regions is found in the "Regions and Endpoints" - // document. - // - // @see http://docs.aws.amazon.com/general/latest/gr/rande.html - // AWS Regions and Endpoints - Region *string - - // Set this to `true` to disable SSL when sending requests. Defaults - // to `false`. - DisableSSL *bool - - // The HTTP client to use when sending requests. Defaults to - // `http.DefaultClient`. - HTTPClient *http.Client - - // An integer value representing the logging level. The default log level - // is zero (LogOff), which represents no logging. To enable logging set - // to a LogLevel Value. - LogLevel *LogLevelType - - // The logger writer interface to write logging messages to. Defaults to - // standard out. - Logger Logger - - // The maximum number of times that a request will be retried for failures. - // Defaults to -1, which defers the max retry setting to the service specific - // configuration. - MaxRetries *int - - // Retryer guides how HTTP requests should be retried in case of recoverable failures. - // - // When nil or the value does not implement the request.Retryer interface, - // the request.DefaultRetryer will be used. - // - // When both Retryer and MaxRetries are non-nil, the former is used and - // the latter ignored. - // - // To set the Retryer field in a type-safe manner and with chaining, use - // the request.WithRetryer helper function: - // - // cfg := request.WithRetryer(aws.NewConfig(), myRetryer) - // - Retryer RequestRetryer - - // Disables semantic parameter validation, which validates input for missing - // required fields and/or other semantic request input errors. - DisableParamValidation *bool - - // Disables the computation of request and response checksums, e.g., - // CRC32 checksums in Amazon DynamoDB. - DisableComputeChecksums *bool - - // Set this to `true` to force the request to use path-style addressing, - // i.e., `http://s3.amazonaws.com/BUCKET/KEY`. By default, the S3 client will - // use virtual hosted bucket addressing when possible - // (`http://BUCKET.s3.amazonaws.com/KEY`). - // - // @note This configuration option is specific to the Amazon S3 service. - // @see http://docs.aws.amazon.com/AmazonS3/latest/dev/VirtualHosting.html - // Amazon S3: Virtual Hosting of Buckets - S3ForcePathStyle *bool - - // Set this to `true` to disable the SDK adding the `Expect: 100-Continue` - // header to PUT requests over 2MB of content. 100-Continue instructs the - // HTTP client not to send the body until the service responds with a - // `continue` status. This is useful to prevent sending the request body - // until after the request is authenticated, and validated. - // - // http://docs.aws.amazon.com/AmazonS3/latest/API/RESTObjectPUT.html - // - // 100-Continue is only enabled for Go 1.6 and above. See `http.Transport`'s - // `ExpectContinueTimeout` for information on adjusting the continue wait timeout. - // https://golang.org/pkg/net/http/#Transport - // - // You should use this flag to disble 100-Continue if you experiance issues - // with proxies or thrid party S3 compatible services. - S3Disable100Continue *bool - - // Set this to `true` to enable S3 Accelerate feature. For all operations compatible - // with S3 Accelerate will use the accelerate endpoint for requests. Requests not compatible - // will fall back to normal S3 requests. - // - // The bucket must be enable for accelerate to be used with S3 client with accelerate - // enabled. If the bucket is not enabled for accelerate an error will be returned. - // The bucket name must be DNS compatible to also work with accelerate. - S3UseAccelerate *bool - - // Set this to `true` to disable the EC2Metadata client from overriding the - // default http.Client's Timeout. This is helpful if you do not want the EC2Metadata - // client to create a new http.Client. This options is only meaningful if you're not - // already using a custom HTTP client with the SDK. Enabled by default. - // - // Must be set and provided to the session.New() in order to disable the EC2Metadata - // overriding the timeout for default credentials chain. - // - // Example: - // sess := session.New(aws.NewConfig().WithEC2MetadataDiableTimeoutOverride(true)) - // svc := s3.New(sess) - // - EC2MetadataDisableTimeoutOverride *bool - - SleepDelay func(time.Duration) -} - -// NewConfig returns a new Config pointer that can be chained with builder methods to -// set multiple configuration values inline without using pointers. -// -// svc := s3.New(aws.NewConfig().WithRegion("us-west-2").WithMaxRetries(10)) -// -func NewConfig() *Config { - return &Config{} -} - -// WithCredentialsChainVerboseErrors sets a config verbose errors boolean and returning -// a Config pointer. -func (c *Config) WithCredentialsChainVerboseErrors(verboseErrs bool) *Config { - c.CredentialsChainVerboseErrors = &verboseErrs - return c -} - -// WithCredentials sets a config Credentials value returning a Config pointer -// for chaining. -func (c *Config) WithCredentials(creds *credentials.Credentials) *Config { - c.Credentials = creds - return c -} - -// WithEndpoint sets a config Endpoint value returning a Config pointer for -// chaining. -func (c *Config) WithEndpoint(endpoint string) *Config { - c.Endpoint = &endpoint - return c -} - -// WithRegion sets a config Region value returning a Config pointer for -// chaining. -func (c *Config) WithRegion(region string) *Config { - c.Region = ®ion - return c -} - -// WithDisableSSL sets a config DisableSSL value returning a Config pointer -// for chaining. -func (c *Config) WithDisableSSL(disable bool) *Config { - c.DisableSSL = &disable - return c -} - -// WithHTTPClient sets a config HTTPClient value returning a Config pointer -// for chaining. -func (c *Config) WithHTTPClient(client *http.Client) *Config { - c.HTTPClient = client - return c -} - -// WithMaxRetries sets a config MaxRetries value returning a Config pointer -// for chaining. -func (c *Config) WithMaxRetries(max int) *Config { - c.MaxRetries = &max - return c -} - -// WithDisableParamValidation sets a config DisableParamValidation value -// returning a Config pointer for chaining. -func (c *Config) WithDisableParamValidation(disable bool) *Config { - c.DisableParamValidation = &disable - return c -} - -// WithDisableComputeChecksums sets a config DisableComputeChecksums value -// returning a Config pointer for chaining. -func (c *Config) WithDisableComputeChecksums(disable bool) *Config { - c.DisableComputeChecksums = &disable - return c -} - -// WithLogLevel sets a config LogLevel value returning a Config pointer for -// chaining. -func (c *Config) WithLogLevel(level LogLevelType) *Config { - c.LogLevel = &level - return c -} - -// WithLogger sets a config Logger value returning a Config pointer for -// chaining. -func (c *Config) WithLogger(logger Logger) *Config { - c.Logger = logger - return c -} - -// WithS3ForcePathStyle sets a config S3ForcePathStyle value returning a Config -// pointer for chaining. -func (c *Config) WithS3ForcePathStyle(force bool) *Config { - c.S3ForcePathStyle = &force - return c -} - -// WithS3Disable100Continue sets a config S3Disable100Continue value returning -// a Config pointer for chaining. -func (c *Config) WithS3Disable100Continue(disable bool) *Config { - c.S3Disable100Continue = &disable - return c -} - -// WithS3UseAccelerate sets a config S3UseAccelerate value returning a Config -// pointer for chaining. -func (c *Config) WithS3UseAccelerate(enable bool) *Config { - c.S3UseAccelerate = &enable - return c -} - -// WithEC2MetadataDisableTimeoutOverride sets a config EC2MetadataDisableTimeoutOverride value -// returning a Config pointer for chaining. -func (c *Config) WithEC2MetadataDisableTimeoutOverride(enable bool) *Config { - c.EC2MetadataDisableTimeoutOverride = &enable - return c -} - -// WithSleepDelay overrides the function used to sleep while waiting for the -// next retry. Defaults to time.Sleep. -func (c *Config) WithSleepDelay(fn func(time.Duration)) *Config { - c.SleepDelay = fn - return c -} - -// MergeIn merges the passed in configs into the existing config object. -func (c *Config) MergeIn(cfgs ...*Config) { - for _, other := range cfgs { - mergeInConfig(c, other) - } -} - -func mergeInConfig(dst *Config, other *Config) { - if other == nil { - return - } - - if other.CredentialsChainVerboseErrors != nil { - dst.CredentialsChainVerboseErrors = other.CredentialsChainVerboseErrors - } - - if other.Credentials != nil { - dst.Credentials = other.Credentials - } - - if other.Endpoint != nil { - dst.Endpoint = other.Endpoint - } - - if other.Region != nil { - dst.Region = other.Region - } - - if other.DisableSSL != nil { - dst.DisableSSL = other.DisableSSL - } - - if other.HTTPClient != nil { - dst.HTTPClient = other.HTTPClient - } - - if other.LogLevel != nil { - dst.LogLevel = other.LogLevel - } - - if other.Logger != nil { - dst.Logger = other.Logger - } - - if other.MaxRetries != nil { - dst.MaxRetries = other.MaxRetries - } - - if other.Retryer != nil { - dst.Retryer = other.Retryer - } - - if other.DisableParamValidation != nil { - dst.DisableParamValidation = other.DisableParamValidation - } - - if other.DisableComputeChecksums != nil { - dst.DisableComputeChecksums = other.DisableComputeChecksums - } - - if other.S3ForcePathStyle != nil { - dst.S3ForcePathStyle = other.S3ForcePathStyle - } - - if other.S3Disable100Continue != nil { - dst.S3Disable100Continue = other.S3Disable100Continue - } - - if other.S3UseAccelerate != nil { - dst.S3UseAccelerate = other.S3UseAccelerate - } - - if other.EC2MetadataDisableTimeoutOverride != nil { - dst.EC2MetadataDisableTimeoutOverride = other.EC2MetadataDisableTimeoutOverride - } - - if other.SleepDelay != nil { - dst.SleepDelay = other.SleepDelay - } -} - -// Copy will return a shallow copy of the Config object. If any additional -// configurations are provided they will be merged into the new config returned. -func (c *Config) Copy(cfgs ...*Config) *Config { - dst := &Config{} - dst.MergeIn(c) - - for _, cfg := range cfgs { - dst.MergeIn(cfg) - } - - return dst -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/convert_types.go b/vendor/src/github.com/aws/aws-sdk-go/aws/convert_types.go deleted file mode 100644 index cff5c5c8a7..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/convert_types.go +++ /dev/null @@ -1,369 +0,0 @@ -package aws - -import "time" - -// String returns a pointer to of the string value passed in. -func String(v string) *string { - return &v -} - -// StringValue returns the value of the string pointer passed in or -// "" if the pointer is nil. -func StringValue(v *string) string { - if v != nil { - return *v - } - return "" -} - -// StringSlice converts a slice of string values into a slice of -// string pointers -func StringSlice(src []string) []*string { - dst := make([]*string, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// StringValueSlice converts a slice of string pointers into a slice of -// string values -func StringValueSlice(src []*string) []string { - dst := make([]string, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// StringMap converts a string map of string values into a string -// map of string pointers -func StringMap(src map[string]string) map[string]*string { - dst := make(map[string]*string) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// StringValueMap converts a string map of string pointers into a string -// map of string values -func StringValueMap(src map[string]*string) map[string]string { - dst := make(map[string]string) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Bool returns a pointer to of the bool value passed in. -func Bool(v bool) *bool { - return &v -} - -// BoolValue returns the value of the bool pointer passed in or -// false if the pointer is nil. -func BoolValue(v *bool) bool { - if v != nil { - return *v - } - return false -} - -// BoolSlice converts a slice of bool values into a slice of -// bool pointers -func BoolSlice(src []bool) []*bool { - dst := make([]*bool, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// BoolValueSlice converts a slice of bool pointers into a slice of -// bool values -func BoolValueSlice(src []*bool) []bool { - dst := make([]bool, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// BoolMap converts a string map of bool values into a string -// map of bool pointers -func BoolMap(src map[string]bool) map[string]*bool { - dst := make(map[string]*bool) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// BoolValueMap converts a string map of bool pointers into a string -// map of bool values -func BoolValueMap(src map[string]*bool) map[string]bool { - dst := make(map[string]bool) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int returns a pointer to of the int value passed in. -func Int(v int) *int { - return &v -} - -// IntValue returns the value of the int pointer passed in or -// 0 if the pointer is nil. -func IntValue(v *int) int { - if v != nil { - return *v - } - return 0 -} - -// IntSlice converts a slice of int values into a slice of -// int pointers -func IntSlice(src []int) []*int { - dst := make([]*int, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// IntValueSlice converts a slice of int pointers into a slice of -// int values -func IntValueSlice(src []*int) []int { - dst := make([]int, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// IntMap converts a string map of int values into a string -// map of int pointers -func IntMap(src map[string]int) map[string]*int { - dst := make(map[string]*int) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// IntValueMap converts a string map of int pointers into a string -// map of int values -func IntValueMap(src map[string]*int) map[string]int { - dst := make(map[string]int) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Int64 returns a pointer to of the int64 value passed in. -func Int64(v int64) *int64 { - return &v -} - -// Int64Value returns the value of the int64 pointer passed in or -// 0 if the pointer is nil. -func Int64Value(v *int64) int64 { - if v != nil { - return *v - } - return 0 -} - -// Int64Slice converts a slice of int64 values into a slice of -// int64 pointers -func Int64Slice(src []int64) []*int64 { - dst := make([]*int64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Int64ValueSlice converts a slice of int64 pointers into a slice of -// int64 values -func Int64ValueSlice(src []*int64) []int64 { - dst := make([]int64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Int64Map converts a string map of int64 values into a string -// map of int64 pointers -func Int64Map(src map[string]int64) map[string]*int64 { - dst := make(map[string]*int64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Int64ValueMap converts a string map of int64 pointers into a string -// map of int64 values -func Int64ValueMap(src map[string]*int64) map[string]int64 { - dst := make(map[string]int64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Float64 returns a pointer to of the float64 value passed in. -func Float64(v float64) *float64 { - return &v -} - -// Float64Value returns the value of the float64 pointer passed in or -// 0 if the pointer is nil. -func Float64Value(v *float64) float64 { - if v != nil { - return *v - } - return 0 -} - -// Float64Slice converts a slice of float64 values into a slice of -// float64 pointers -func Float64Slice(src []float64) []*float64 { - dst := make([]*float64, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// Float64ValueSlice converts a slice of float64 pointers into a slice of -// float64 values -func Float64ValueSlice(src []*float64) []float64 { - dst := make([]float64, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// Float64Map converts a string map of float64 values into a string -// map of float64 pointers -func Float64Map(src map[string]float64) map[string]*float64 { - dst := make(map[string]*float64) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// Float64ValueMap converts a string map of float64 pointers into a string -// map of float64 values -func Float64ValueMap(src map[string]*float64) map[string]float64 { - dst := make(map[string]float64) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} - -// Time returns a pointer to of the time.Time value passed in. -func Time(v time.Time) *time.Time { - return &v -} - -// TimeValue returns the value of the time.Time pointer passed in or -// time.Time{} if the pointer is nil. -func TimeValue(v *time.Time) time.Time { - if v != nil { - return *v - } - return time.Time{} -} - -// TimeUnixMilli returns a Unix timestamp in milliseconds from "January 1, 1970 UTC". -// The result is undefined if the Unix time cannot be represented by an int64. -// Which includes calling TimeUnixMilli on a zero Time is undefined. -// -// This utility is useful for service API's such as CloudWatch Logs which require -// their unix time values to be in milliseconds. -// -// See Go stdlib https://golang.org/pkg/time/#Time.UnixNano for more information. -func TimeUnixMilli(t time.Time) int64 { - return t.UnixNano() / int64(time.Millisecond/time.Nanosecond) -} - -// TimeSlice converts a slice of time.Time values into a slice of -// time.Time pointers -func TimeSlice(src []time.Time) []*time.Time { - dst := make([]*time.Time, len(src)) - for i := 0; i < len(src); i++ { - dst[i] = &(src[i]) - } - return dst -} - -// TimeValueSlice converts a slice of time.Time pointers into a slice of -// time.Time values -func TimeValueSlice(src []*time.Time) []time.Time { - dst := make([]time.Time, len(src)) - for i := 0; i < len(src); i++ { - if src[i] != nil { - dst[i] = *(src[i]) - } - } - return dst -} - -// TimeMap converts a string map of time.Time values into a string -// map of time.Time pointers -func TimeMap(src map[string]time.Time) map[string]*time.Time { - dst := make(map[string]*time.Time) - for k, val := range src { - v := val - dst[k] = &v - } - return dst -} - -// TimeValueMap converts a string map of time.Time pointers into a string -// map of time.Time values -func TimeValueMap(src map[string]*time.Time) map[string]time.Time { - dst := make(map[string]time.Time) - for k, val := range src { - if val != nil { - dst[k] = *val - } - } - return dst -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go b/vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go deleted file mode 100644 index 8456e29b56..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/handlers.go +++ /dev/null @@ -1,152 +0,0 @@ -package corehandlers - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "regexp" - "runtime" - "strconv" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// Interface for matching types which also have a Len method. -type lener interface { - Len() int -} - -// BuildContentLengthHandler builds the content length of a request based on the body, -// or will use the HTTPRequest.Header's "Content-Length" if defined. If unable -// to determine request body length and no "Content-Length" was specified it will panic. -// -// The Content-Length will only be aded to the request if the length of the body -// is greater than 0. If the body is empty or the current `Content-Length` -// header is <= 0, the header will also be stripped. -var BuildContentLengthHandler = request.NamedHandler{Name: "core.BuildContentLengthHandler", Fn: func(r *request.Request) { - var length int64 - - if slength := r.HTTPRequest.Header.Get("Content-Length"); slength != "" { - length, _ = strconv.ParseInt(slength, 10, 64) - } else { - switch body := r.Body.(type) { - case nil: - length = 0 - case lener: - length = int64(body.Len()) - case io.Seeker: - r.BodyStart, _ = body.Seek(0, 1) - end, _ := body.Seek(0, 2) - body.Seek(r.BodyStart, 0) // make sure to seek back to original location - length = end - r.BodyStart - default: - panic("Cannot get length of body, must provide `ContentLength`") - } - } - - if length > 0 { - r.HTTPRequest.ContentLength = length - r.HTTPRequest.Header.Set("Content-Length", fmt.Sprintf("%d", length)) - } else { - r.HTTPRequest.ContentLength = 0 - r.HTTPRequest.Header.Del("Content-Length") - } -}} - -// SDKVersionUserAgentHandler is a request handler for adding the SDK Version to the user agent. -var SDKVersionUserAgentHandler = request.NamedHandler{ - Name: "core.SDKVersionUserAgentHandler", - Fn: request.MakeAddToUserAgentHandler(aws.SDKName, aws.SDKVersion, - runtime.Version(), runtime.GOOS, runtime.GOARCH), -} - -var reStatusCode = regexp.MustCompile(`^(\d{3})`) - -// SendHandler is a request handler to send service request using HTTP client. -var SendHandler = request.NamedHandler{Name: "core.SendHandler", Fn: func(r *request.Request) { - var err error - r.HTTPResponse, err = r.Config.HTTPClient.Do(r.HTTPRequest) - if err != nil { - // Prevent leaking if an HTTPResponse was returned. Clean up - // the body. - if r.HTTPResponse != nil { - r.HTTPResponse.Body.Close() - } - // Capture the case where url.Error is returned for error processing - // response. e.g. 301 without location header comes back as string - // error and r.HTTPResponse is nil. Other url redirect errors will - // comeback in a similar method. - if e, ok := err.(*url.Error); ok && e.Err != nil { - if s := reStatusCode.FindStringSubmatch(e.Err.Error()); s != nil { - code, _ := strconv.ParseInt(s[1], 10, 64) - r.HTTPResponse = &http.Response{ - StatusCode: int(code), - Status: http.StatusText(int(code)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - return - } - } - if r.HTTPResponse == nil { - // Add a dummy request response object to ensure the HTTPResponse - // value is consistent. - r.HTTPResponse = &http.Response{ - StatusCode: int(0), - Status: http.StatusText(int(0)), - Body: ioutil.NopCloser(bytes.NewReader([]byte{})), - } - } - // Catch all other request errors. - r.Error = awserr.New("RequestError", "send request failed", err) - r.Retryable = aws.Bool(true) // network errors are retryable - } -}} - -// ValidateResponseHandler is a request handler to validate service response. -var ValidateResponseHandler = request.NamedHandler{Name: "core.ValidateResponseHandler", Fn: func(r *request.Request) { - if r.HTTPResponse.StatusCode == 0 || r.HTTPResponse.StatusCode >= 300 { - // this may be replaced by an UnmarshalError handler - r.Error = awserr.New("UnknownError", "unknown error", nil) - } -}} - -// AfterRetryHandler performs final checks to determine if the request should -// be retried and how long to delay. -var AfterRetryHandler = request.NamedHandler{Name: "core.AfterRetryHandler", Fn: func(r *request.Request) { - // If one of the other handlers already set the retry state - // we don't want to override it based on the service's state - if r.Retryable == nil { - r.Retryable = aws.Bool(r.ShouldRetry(r)) - } - - if r.WillRetry() { - r.RetryDelay = r.RetryRules(r) - r.Config.SleepDelay(r.RetryDelay) - - // when the expired token exception occurs the credentials - // need to be expired locally so that the next request to - // get credentials will trigger a credentials refresh. - if r.IsErrorExpired() { - r.Config.Credentials.Expire() - } - - r.RetryCount++ - r.Error = nil - } -}} - -// ValidateEndpointHandler is a request handler to validate a request had the -// appropriate Region and Endpoint set. Will set r.Error if the endpoint or -// region is not valid. -var ValidateEndpointHandler = request.NamedHandler{Name: "core.ValidateEndpointHandler", Fn: func(r *request.Request) { - if r.ClientInfo.SigningRegion == "" && aws.StringValue(r.Config.Region) == "" { - r.Error = aws.ErrMissingRegion - } else if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -}} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go b/vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go deleted file mode 100644 index 7d50b1557c..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/corehandlers/param_validator.go +++ /dev/null @@ -1,17 +0,0 @@ -package corehandlers - -import "github.com/aws/aws-sdk-go/aws/request" - -// ValidateParametersHandler is a request handler to validate the input parameters. -// Validating parameters only has meaning if done prior to the request being sent. -var ValidateParametersHandler = request.NamedHandler{Name: "core.ValidateParametersHandler", Fn: func(r *request.Request) { - if !r.ParamsFilled() { - return - } - - if v, ok := r.Params.(request.Validator); ok { - if err := v.Validate(); err != nil { - r.Error = err - } - } -}} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go b/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go deleted file mode 100644 index 857311f64c..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/chain_provider.go +++ /dev/null @@ -1,100 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -var ( - // ErrNoValidProvidersFoundInChain Is returned when there are no valid - // providers in the ChainProvider. - // - // This has been deprecated. For verbose error messaging set - // aws.Config.CredentialsChainVerboseErrors to true - // - // @readonly - ErrNoValidProvidersFoundInChain = awserr.New("NoCredentialProviders", - `no valid providers in chain. Deprecated. - For verbose messaging see aws.Config.CredentialsChainVerboseErrors`, - nil) -) - -// A ChainProvider will search for a provider which returns credentials -// and cache that provider until Retrieve is called again. -// -// The ChainProvider provides a way of chaining multiple providers together -// which will pick the first available using priority order of the Providers -// in the list. -// -// If none of the Providers retrieve valid credentials Value, ChainProvider's -// Retrieve() will return the error ErrNoValidProvidersFoundInChain. -// -// If a Provider is found which returns valid credentials Value ChainProvider -// will cache that Provider for all calls to IsExpired(), until Retrieve is -// called again. -// -// Example of ChainProvider to be used with an EnvProvider and EC2RoleProvider. -// In this example EnvProvider will first check if any credentials are available -// vai the environment variables. If there are none ChainProvider will check -// the next Provider in the list, EC2RoleProvider in this case. If EC2RoleProvider -// does not return any credentials ChainProvider will return the error -// ErrNoValidProvidersFoundInChain -// -// creds := NewChainCredentials( -// []Provider{ -// &EnvProvider{}, -// &EC2RoleProvider{ -// Client: ec2metadata.New(sess), -// }, -// }) -// -// // Usage of ChainCredentials with aws.Config -// svc := ec2.New(&aws.Config{Credentials: creds}) -// -type ChainProvider struct { - Providers []Provider - curr Provider - VerboseErrors bool -} - -// NewChainCredentials returns a pointer to a new Credentials object -// wrapping a chain of providers. -func NewChainCredentials(providers []Provider) *Credentials { - return NewCredentials(&ChainProvider{ - Providers: append([]Provider{}, providers...), - }) -} - -// Retrieve returns the credentials value or error if no provider returned -// without error. -// -// If a provider is found it will be cached and any calls to IsExpired() -// will return the expired state of the cached provider. -func (c *ChainProvider) Retrieve() (Value, error) { - var errs []error - for _, p := range c.Providers { - creds, err := p.Retrieve() - if err == nil { - c.curr = p - return creds, nil - } - errs = append(errs, err) - } - c.curr = nil - - var err error - err = ErrNoValidProvidersFoundInChain - if c.VerboseErrors { - err = awserr.NewBatchError("NoCredentialProviders", "no valid providers in chain", errs) - } - return Value{}, err -} - -// IsExpired will returned the expired state of the currently cached provider -// if there is one. If there is no current provider, true will be returned. -func (c *ChainProvider) IsExpired() bool { - if c.curr != nil { - return c.curr.IsExpired() - } - - return true -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go b/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go deleted file mode 100644 index 7b8ebf5f9d..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/credentials.go +++ /dev/null @@ -1,223 +0,0 @@ -// Package credentials provides credential retrieval and management -// -// The Credentials is the primary method of getting access to and managing -// credentials Values. Using dependency injection retrieval of the credential -// values is handled by a object which satisfies the Provider interface. -// -// By default the Credentials.Get() will cache the successful result of a -// Provider's Retrieve() until Provider.IsExpired() returns true. At which -// point Credentials will call Provider's Retrieve() to get new credential Value. -// -// The Provider is responsible for determining when credentials Value have expired. -// It is also important to note that Credentials will always call Retrieve the -// first time Credentials.Get() is called. -// -// Example of using the environment variable credentials. -// -// creds := NewEnvCredentials() -// -// // Retrieve the credentials value -// credValue, err := creds.Get() -// if err != nil { -// // handle error -// } -// -// Example of forcing credentials to expire and be refreshed on the next Get(). -// This may be helpful to proactively expire credentials and refresh them sooner -// than they would naturally expire on their own. -// -// creds := NewCredentials(&EC2RoleProvider{}) -// creds.Expire() -// credsValue, err := creds.Get() -// // New credentials will be retrieved instead of from cache. -// -// -// Custom Provider -// -// Each Provider built into this package also provides a helper method to generate -// a Credentials pointer setup with the provider. To use a custom Provider just -// create a type which satisfies the Provider interface and pass it to the -// NewCredentials method. -// -// type MyProvider struct{} -// func (m *MyProvider) Retrieve() (Value, error) {...} -// func (m *MyProvider) IsExpired() bool {...} -// -// creds := NewCredentials(&MyProvider{}) -// credValue, err := creds.Get() -// -package credentials - -import ( - "sync" - "time" -) - -// AnonymousCredentials is an empty Credential object that can be used as -// dummy placeholder credentials for requests that do not need signed. -// -// This Credentials can be used to configure a service to not sign requests -// when making service API calls. For example, when accessing public -// s3 buckets. -// -// svc := s3.New(&aws.Config{Credentials: AnonymousCredentials}) -// // Access public S3 buckets. -// -// @readonly -var AnonymousCredentials = NewStaticCredentials("", "", "") - -// A Value is the AWS credentials value for individual credential fields. -type Value struct { - // AWS Access key ID - AccessKeyID string - - // AWS Secret Access Key - SecretAccessKey string - - // AWS Session Token - SessionToken string - - // Provider used to get credentials - ProviderName string -} - -// A Provider is the interface for any component which will provide credentials -// Value. A provider is required to manage its own Expired state, and what to -// be expired means. -// -// The Provider should not need to implement its own mutexes, because -// that will be managed by Credentials. -type Provider interface { - // Refresh returns nil if it successfully retrieved the value. - // Error is returned if the value were not obtainable, or empty. - Retrieve() (Value, error) - - // IsExpired returns if the credentials are no longer valid, and need - // to be retrieved. - IsExpired() bool -} - -// A Expiry provides shared expiration logic to be used by credentials -// providers to implement expiry functionality. -// -// The best method to use this struct is as an anonymous field within the -// provider's struct. -// -// Example: -// type EC2RoleProvider struct { -// Expiry -// ... -// } -type Expiry struct { - // The date/time when to expire on - expiration time.Time - - // If set will be used by IsExpired to determine the current time. - // Defaults to time.Now if CurrentTime is not set. Available for testing - // to be able to mock out the current time. - CurrentTime func() time.Time -} - -// SetExpiration sets the expiration IsExpired will check when called. -// -// If window is greater than 0 the expiration time will be reduced by the -// window value. -// -// Using a window is helpful to trigger credentials to expire sooner than -// the expiration time given to ensure no requests are made with expired -// tokens. -func (e *Expiry) SetExpiration(expiration time.Time, window time.Duration) { - e.expiration = expiration - if window > 0 { - e.expiration = e.expiration.Add(-window) - } -} - -// IsExpired returns if the credentials are expired. -func (e *Expiry) IsExpired() bool { - if e.CurrentTime == nil { - e.CurrentTime = time.Now - } - return e.expiration.Before(e.CurrentTime()) -} - -// A Credentials provides synchronous safe retrieval of AWS credentials Value. -// Credentials will cache the credentials value until they expire. Once the value -// expires the next Get will attempt to retrieve valid credentials. -// -// Credentials is safe to use across multiple goroutines and will manage the -// synchronous state so the Providers do not need to implement their own -// synchronization. -// -// The first Credentials.Get() will always call Provider.Retrieve() to get the -// first instance of the credentials Value. All calls to Get() after that -// will return the cached credentials Value until IsExpired() returns true. -type Credentials struct { - creds Value - forceRefresh bool - m sync.Mutex - - provider Provider -} - -// NewCredentials returns a pointer to a new Credentials with the provider set. -func NewCredentials(provider Provider) *Credentials { - return &Credentials{ - provider: provider, - forceRefresh: true, - } -} - -// Get returns the credentials value, or error if the credentials Value failed -// to be retrieved. -// -// Will return the cached credentials Value if it has not expired. If the -// credentials Value has expired the Provider's Retrieve() will be called -// to refresh the credentials. -// -// If Credentials.Expire() was called the credentials Value will be force -// expired, and the next call to Get() will cause them to be refreshed. -func (c *Credentials) Get() (Value, error) { - c.m.Lock() - defer c.m.Unlock() - - if c.isExpired() { - creds, err := c.provider.Retrieve() - if err != nil { - return Value{}, err - } - c.creds = creds - c.forceRefresh = false - } - - return c.creds, nil -} - -// Expire expires the credentials and forces them to be retrieved on the -// next call to Get(). -// -// This will override the Provider's expired state, and force Credentials -// to call the Provider's Retrieve(). -func (c *Credentials) Expire() { - c.m.Lock() - defer c.m.Unlock() - - c.forceRefresh = true -} - -// IsExpired returns if the credentials are no longer valid, and need -// to be retrieved. -// -// If the Credentials were forced to be expired with Expire() this will -// reflect that override. -func (c *Credentials) IsExpired() bool { - c.m.Lock() - defer c.m.Unlock() - - return c.isExpired() -} - -// isExpired helper method wrapping the definition of expired credentials. -func (c *Credentials) isExpired() bool { - return c.forceRefresh || c.provider.IsExpired() -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go b/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go deleted file mode 100644 index aa9d689a01..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds/ec2_role_provider.go +++ /dev/null @@ -1,178 +0,0 @@ -package ec2rolecreds - -import ( - "bufio" - "encoding/json" - "fmt" - "path" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/ec2metadata" -) - -// ProviderName provides a name of EC2Role provider -const ProviderName = "EC2RoleProvider" - -// A EC2RoleProvider retrieves credentials from the EC2 service, and keeps track if -// those credentials are expired. -// -// Example how to configure the EC2RoleProvider with custom http Client, Endpoint -// or ExpiryWindow -// -// p := &ec2rolecreds.EC2RoleProvider{ -// // Pass in a custom timeout to be used when requesting -// // IAM EC2 Role credentials. -// Client: ec2metadata.New(sess, aws.Config{ -// HTTPClient: &http.Client{Timeout: 10 * time.Second}, -// }), -// -// // Do not use early expiry of credentials. If a non zero value is -// // specified the credentials will be expired early -// ExpiryWindow: 0, -// } -type EC2RoleProvider struct { - credentials.Expiry - - // Required EC2Metadata client to use when connecting to EC2 metadata service. - Client *ec2metadata.EC2Metadata - - // ExpiryWindow will allow the credentials to trigger refreshing prior to - // the credentials actually expiring. This is beneficial so race conditions - // with expiring credentials do not cause request to fail unexpectedly - // due to ExpiredTokenException exceptions. - // - // So a ExpiryWindow of 10s would cause calls to IsExpired() to return true - // 10 seconds before the credentials are actually expired. - // - // If ExpiryWindow is 0 or less it will be ignored. - ExpiryWindow time.Duration -} - -// NewCredentials returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a ConfigProvider to create a EC2Metadata client. -// The ConfigProvider is satisfied by the session.Session type. -func NewCredentials(c client.ConfigProvider, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: ec2metadata.New(c), - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// NewCredentialsWithClient returns a pointer to a new Credentials object wrapping -// the EC2RoleProvider. Takes a EC2Metadata client to use when connecting to EC2 -// metadata service. -func NewCredentialsWithClient(client *ec2metadata.EC2Metadata, options ...func(*EC2RoleProvider)) *credentials.Credentials { - p := &EC2RoleProvider{ - Client: client, - } - - for _, option := range options { - option(p) - } - - return credentials.NewCredentials(p) -} - -// Retrieve retrieves credentials from the EC2 service. -// Error will be returned if the request fails, or unable to extract -// the desired credentials. -func (m *EC2RoleProvider) Retrieve() (credentials.Value, error) { - credsList, err := requestCredList(m.Client) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - if len(credsList) == 0 { - return credentials.Value{ProviderName: ProviderName}, awserr.New("EmptyEC2RoleList", "empty EC2 Role list", nil) - } - credsName := credsList[0] - - roleCreds, err := requestCred(m.Client, credsName) - if err != nil { - return credentials.Value{ProviderName: ProviderName}, err - } - - m.SetExpiration(roleCreds.Expiration, m.ExpiryWindow) - - return credentials.Value{ - AccessKeyID: roleCreds.AccessKeyID, - SecretAccessKey: roleCreds.SecretAccessKey, - SessionToken: roleCreds.Token, - ProviderName: ProviderName, - }, nil -} - -// A ec2RoleCredRespBody provides the shape for unmarshalling credential -// request responses. -type ec2RoleCredRespBody struct { - // Success State - Expiration time.Time - AccessKeyID string - SecretAccessKey string - Token string - - // Error state - Code string - Message string -} - -const iamSecurityCredsPath = "/iam/security-credentials" - -// requestCredList requests a list of credentials from the EC2 service. -// If there are no credentials, or there is an error making or receiving the request -func requestCredList(client *ec2metadata.EC2Metadata) ([]string, error) { - resp, err := client.GetMetadata(iamSecurityCredsPath) - if err != nil { - return nil, awserr.New("EC2RoleRequestError", "no EC2 instance role found", err) - } - - credsList := []string{} - s := bufio.NewScanner(strings.NewReader(resp)) - for s.Scan() { - credsList = append(credsList, s.Text()) - } - - if err := s.Err(); err != nil { - return nil, awserr.New("SerializationError", "failed to read EC2 instance role from metadata service", err) - } - - return credsList, nil -} - -// requestCred requests the credentials for a specific credentials from the EC2 service. -// -// If the credentials cannot be found, or there is an error reading the response -// and error will be returned. -func requestCred(client *ec2metadata.EC2Metadata, credsName string) (ec2RoleCredRespBody, error) { - resp, err := client.GetMetadata(path.Join(iamSecurityCredsPath, credsName)) - if err != nil { - return ec2RoleCredRespBody{}, - awserr.New("EC2RoleRequestError", - fmt.Sprintf("failed to get %s EC2 instance role credentials", credsName), - err) - } - - respCreds := ec2RoleCredRespBody{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&respCreds); err != nil { - return ec2RoleCredRespBody{}, - awserr.New("SerializationError", - fmt.Sprintf("failed to decode %s EC2 instance role credentials", credsName), - err) - } - - if respCreds.Code != "Success" { - // If an error code was returned something failed requesting the role. - return ec2RoleCredRespBody{}, awserr.New(respCreds.Code, respCreds.Message, nil) - } - - return respCreds, nil -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go b/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go deleted file mode 100644 index 96655bc46a..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/env_provider.go +++ /dev/null @@ -1,77 +0,0 @@ -package credentials - -import ( - "os" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// EnvProviderName provides a name of Env provider -const EnvProviderName = "EnvProvider" - -var ( - // ErrAccessKeyIDNotFound is returned when the AWS Access Key ID can't be - // found in the process's environment. - // - // @readonly - ErrAccessKeyIDNotFound = awserr.New("EnvAccessKeyNotFound", "AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY not found in environment", nil) - - // ErrSecretAccessKeyNotFound is returned when the AWS Secret Access Key - // can't be found in the process's environment. - // - // @readonly - ErrSecretAccessKeyNotFound = awserr.New("EnvSecretNotFound", "AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY not found in environment", nil) -) - -// A EnvProvider retrieves credentials from the environment variables of the -// running process. Environment credentials never expire. -// -// Environment variables used: -// -// * Access Key ID: AWS_ACCESS_KEY_ID or AWS_ACCESS_KEY -// * Secret Access Key: AWS_SECRET_ACCESS_KEY or AWS_SECRET_KEY -type EnvProvider struct { - retrieved bool -} - -// NewEnvCredentials returns a pointer to a new Credentials object -// wrapping the environment variable provider. -func NewEnvCredentials() *Credentials { - return NewCredentials(&EnvProvider{}) -} - -// Retrieve retrieves the keys from the environment. -func (e *EnvProvider) Retrieve() (Value, error) { - e.retrieved = false - - id := os.Getenv("AWS_ACCESS_KEY_ID") - if id == "" { - id = os.Getenv("AWS_ACCESS_KEY") - } - - secret := os.Getenv("AWS_SECRET_ACCESS_KEY") - if secret == "" { - secret = os.Getenv("AWS_SECRET_KEY") - } - - if id == "" { - return Value{ProviderName: EnvProviderName}, ErrAccessKeyIDNotFound - } - - if secret == "" { - return Value{ProviderName: EnvProviderName}, ErrSecretAccessKeyNotFound - } - - e.retrieved = true - return Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: os.Getenv("AWS_SESSION_TOKEN"), - ProviderName: EnvProviderName, - }, nil -} - -// IsExpired returns if the credentials have been retrieved. -func (e *EnvProvider) IsExpired() bool { - return !e.retrieved -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini b/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini deleted file mode 100644 index 7fc91d9d20..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/example.ini +++ /dev/null @@ -1,12 +0,0 @@ -[default] -aws_access_key_id = accessKey -aws_secret_access_key = secret -aws_session_token = token - -[no_token] -aws_access_key_id = accessKey -aws_secret_access_key = secret - -[with_colon] -aws_access_key_id: accessKey -aws_secret_access_key: secret diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go b/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go deleted file mode 100644 index 7fb7cbf0db..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/shared_credentials_provider.go +++ /dev/null @@ -1,151 +0,0 @@ -package credentials - -import ( - "fmt" - "os" - "path/filepath" - - "github.com/go-ini/ini" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// SharedCredsProviderName provides a name of SharedCreds provider -const SharedCredsProviderName = "SharedCredentialsProvider" - -var ( - // ErrSharedCredentialsHomeNotFound is emitted when the user directory cannot be found. - // - // @readonly - ErrSharedCredentialsHomeNotFound = awserr.New("UserHomeNotFound", "user home directory not found.", nil) -) - -// A SharedCredentialsProvider retrieves credentials from the current user's home -// directory, and keeps track if those credentials are expired. -// -// Profile ini file example: $HOME/.aws/credentials -type SharedCredentialsProvider struct { - // Path to the shared credentials file. - // - // If empty will look for "AWS_SHARED_CREDENTIALS_FILE" env variable. If the - // env value is empty will default to current user's home directory. - // Linux/OSX: "$HOME/.aws/credentials" - // Windows: "%USERPROFILE%\.aws\credentials" - Filename string - - // AWS Profile to extract credentials from the shared credentials file. If empty - // will default to environment variable "AWS_PROFILE" or "default" if - // environment variable is also not set. - Profile string - - // retrieved states if the credentials have been successfully retrieved. - retrieved bool -} - -// NewSharedCredentials returns a pointer to a new Credentials object -// wrapping the Profile file provider. -func NewSharedCredentials(filename, profile string) *Credentials { - return NewCredentials(&SharedCredentialsProvider{ - Filename: filename, - Profile: profile, - }) -} - -// Retrieve reads and extracts the shared credentials from the current -// users home directory. -func (p *SharedCredentialsProvider) Retrieve() (Value, error) { - p.retrieved = false - - filename, err := p.filename() - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - creds, err := loadProfile(filename, p.profile()) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, err - } - - p.retrieved = true - return creds, nil -} - -// IsExpired returns if the shared credentials have expired. -func (p *SharedCredentialsProvider) IsExpired() bool { - return !p.retrieved -} - -// loadProfiles loads from the file pointed to by shared credentials filename for profile. -// The credentials retrieved from the profile will be returned or error. Error will be -// returned if it fails to read from the file, or the data is invalid. -func loadProfile(filename, profile string) (Value, error) { - config, err := ini.Load(filename) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to load shared credentials file", err) - } - iniProfile, err := config.GetSection(profile) - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsLoad", "failed to get profile", err) - } - - id, err := iniProfile.GetKey("aws_access_key_id") - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsAccessKey", - fmt.Sprintf("shared credentials %s in %s did not contain aws_access_key_id", profile, filename), - err) - } - - secret, err := iniProfile.GetKey("aws_secret_access_key") - if err != nil { - return Value{ProviderName: SharedCredsProviderName}, awserr.New("SharedCredsSecret", - fmt.Sprintf("shared credentials %s in %s did not contain aws_secret_access_key", profile, filename), - nil) - } - - // Default to empty string if not found - token := iniProfile.Key("aws_session_token") - - return Value{ - AccessKeyID: id.String(), - SecretAccessKey: secret.String(), - SessionToken: token.String(), - ProviderName: SharedCredsProviderName, - }, nil -} - -// filename returns the filename to use to read AWS shared credentials. -// -// Will return an error if the user's home directory path cannot be found. -func (p *SharedCredentialsProvider) filename() (string, error) { - if p.Filename == "" { - if p.Filename = os.Getenv("AWS_SHARED_CREDENTIALS_FILE"); p.Filename != "" { - return p.Filename, nil - } - - homeDir := os.Getenv("HOME") // *nix - if homeDir == "" { // Windows - homeDir = os.Getenv("USERPROFILE") - } - if homeDir == "" { - return "", ErrSharedCredentialsHomeNotFound - } - - p.Filename = filepath.Join(homeDir, ".aws", "credentials") - } - - return p.Filename, nil -} - -// profile returns the AWS shared credentials profile. If empty will read -// environment variable "AWS_PROFILE". If that is not set profile will -// return "default". -func (p *SharedCredentialsProvider) profile() string { - if p.Profile == "" { - p.Profile = os.Getenv("AWS_PROFILE") - } - if p.Profile == "" { - p.Profile = "default" - } - - return p.Profile -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go b/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go deleted file mode 100644 index 6f075604e2..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/credentials/static_provider.go +++ /dev/null @@ -1,48 +0,0 @@ -package credentials - -import ( - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// StaticProviderName provides a name of Static provider -const StaticProviderName = "StaticProvider" - -var ( - // ErrStaticCredentialsEmpty is emitted when static credentials are empty. - // - // @readonly - ErrStaticCredentialsEmpty = awserr.New("EmptyStaticCreds", "static credentials are empty", nil) -) - -// A StaticProvider is a set of credentials which are set programmatically, -// and will never expire. -type StaticProvider struct { - Value -} - -// NewStaticCredentials returns a pointer to a new Credentials object -// wrapping a static credentials value provider. -func NewStaticCredentials(id, secret, token string) *Credentials { - return NewCredentials(&StaticProvider{Value: Value{ - AccessKeyID: id, - SecretAccessKey: secret, - SessionToken: token, - }}) -} - -// Retrieve returns the credentials or error if the credentials are invalid. -func (s *StaticProvider) Retrieve() (Value, error) { - if s.AccessKeyID == "" || s.SecretAccessKey == "" { - return Value{ProviderName: StaticProviderName}, ErrStaticCredentialsEmpty - } - - s.Value.ProviderName = StaticProviderName - return s.Value, nil -} - -// IsExpired returns if the credentials are expired. -// -// For StaticProvider, the credentials never expired. -func (s *StaticProvider) IsExpired() bool { - return false -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go b/vendor/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go deleted file mode 100644 index 12be1a5d77..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/defaults/defaults.go +++ /dev/null @@ -1,98 +0,0 @@ -// Package defaults is a collection of helpers to retrieve the SDK's default -// configuration and handlers. -// -// Generally this package shouldn't be used directly, but session.Session -// instead. This package is useful when you need to reset the defaults -// of a session or service client to the SDK defaults before setting -// additional parameters. -package defaults - -import ( - "net/http" - "os" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/credentials/ec2rolecreds" - "github.com/aws/aws-sdk-go/aws/ec2metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/endpoints" -) - -// A Defaults provides a collection of default values for SDK clients. -type Defaults struct { - Config *aws.Config - Handlers request.Handlers -} - -// Get returns the SDK's default values with Config and handlers pre-configured. -func Get() Defaults { - cfg := Config() - handlers := Handlers() - cfg.Credentials = CredChain(cfg, handlers) - - return Defaults{ - Config: cfg, - Handlers: handlers, - } -} - -// Config returns the default configuration without credentials. -// To retrieve a config with credentials also included use -// `defaults.Get().Config` instead. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the configuration of an -// existing service client or session. -func Config() *aws.Config { - return aws.NewConfig(). - WithCredentials(credentials.AnonymousCredentials). - WithRegion(os.Getenv("AWS_REGION")). - WithHTTPClient(http.DefaultClient). - WithMaxRetries(aws.UseServiceDefaultRetries). - WithLogger(aws.NewDefaultLogger()). - WithLogLevel(aws.LogOff). - WithSleepDelay(time.Sleep) -} - -// Handlers returns the default request handlers. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the request handlers of an -// existing service client or session. -func Handlers() request.Handlers { - var handlers request.Handlers - - handlers.Validate.PushBackNamed(corehandlers.ValidateEndpointHandler) - handlers.Validate.AfterEachFn = request.HandlerListStopOnError - handlers.Build.PushBackNamed(corehandlers.SDKVersionUserAgentHandler) - handlers.Build.AfterEachFn = request.HandlerListStopOnError - handlers.Sign.PushBackNamed(corehandlers.BuildContentLengthHandler) - handlers.Send.PushBackNamed(corehandlers.SendHandler) - handlers.AfterRetry.PushBackNamed(corehandlers.AfterRetryHandler) - handlers.ValidateResponse.PushBackNamed(corehandlers.ValidateResponseHandler) - - return handlers -} - -// CredChain returns the default credential chain. -// -// Generally you shouldn't need to use this method directly, but -// is available if you need to reset the credentials of an -// existing service client or session's Config. -func CredChain(cfg *aws.Config, handlers request.Handlers) *credentials.Credentials { - endpoint, signingRegion := endpoints.EndpointForRegion(ec2metadata.ServiceName, *cfg.Region, true) - - return credentials.NewCredentials(&credentials.ChainProvider{ - VerboseErrors: aws.BoolValue(cfg.CredentialsChainVerboseErrors), - Providers: []credentials.Provider{ - &credentials.EnvProvider{}, - &credentials.SharedCredentialsProvider{Filename: "", Profile: ""}, - &ec2rolecreds.EC2RoleProvider{ - Client: ec2metadata.NewClient(*cfg, handlers, endpoint, signingRegion), - ExpiryWindow: 5 * time.Minute, - }, - }}) -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go b/vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go deleted file mode 100644 index 669c813a00..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/api.go +++ /dev/null @@ -1,140 +0,0 @@ -package ec2metadata - -import ( - "encoding/json" - "fmt" - "path" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// GetMetadata uses the path provided to request information from the EC2 -// instance metdata service. The content will be returned as a string, or -// error if the request failed. -func (c *EC2Metadata) GetMetadata(p string) (string, error) { - op := &request.Operation{ - Name: "GetMetadata", - HTTPMethod: "GET", - HTTPPath: path.Join("/", "meta-data", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - - return output.Content, req.Send() -} - -// GetDynamicData uses the path provided to request information from the EC2 -// instance metadata service for dynamic data. The content will be returned -// as a string, or error if the request failed. -func (c *EC2Metadata) GetDynamicData(p string) (string, error) { - op := &request.Operation{ - Name: "GetDynamicData", - HTTPMethod: "GET", - HTTPPath: path.Join("/", "dynamic", p), - } - - output := &metadataOutput{} - req := c.NewRequest(op, nil, output) - - return output.Content, req.Send() -} - -// GetInstanceIdentityDocument retrieves an identity document describing an -// instance. Error is returned if the request fails or is unable to parse -// the response. -func (c *EC2Metadata) GetInstanceIdentityDocument() (EC2InstanceIdentityDocument, error) { - resp, err := c.GetDynamicData("instance-identity/document") - if err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 instance identity document", err) - } - - doc := EC2InstanceIdentityDocument{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&doc); err != nil { - return EC2InstanceIdentityDocument{}, - awserr.New("SerializationError", - "failed to decode EC2 instance identity document", err) - } - - return doc, nil -} - -// IAMInfo retrieves IAM info from the metadata API -func (c *EC2Metadata) IAMInfo() (EC2IAMInfo, error) { - resp, err := c.GetMetadata("iam/info") - if err != nil { - return EC2IAMInfo{}, - awserr.New("EC2MetadataRequestError", - "failed to get EC2 IAM info", err) - } - - info := EC2IAMInfo{} - if err := json.NewDecoder(strings.NewReader(resp)).Decode(&info); err != nil { - return EC2IAMInfo{}, - awserr.New("SerializationError", - "failed to decode EC2 IAM info", err) - } - - if info.Code != "Success" { - errMsg := fmt.Sprintf("failed to get EC2 IAM Info (%s)", info.Code) - return EC2IAMInfo{}, - awserr.New("EC2MetadataError", errMsg, nil) - } - - return info, nil -} - -// Region returns the region the instance is running in. -func (c *EC2Metadata) Region() (string, error) { - resp, err := c.GetMetadata("placement/availability-zone") - if err != nil { - return "", err - } - - // returns region without the suffix. Eg: us-west-2a becomes us-west-2 - return resp[:len(resp)-1], nil -} - -// Available returns if the application has access to the EC2 Metadata service. -// Can be used to determine if application is running within an EC2 Instance and -// the metadata service is available. -func (c *EC2Metadata) Available() bool { - if _, err := c.GetMetadata("instance-id"); err != nil { - return false - } - - return true -} - -// An EC2IAMInfo provides the shape for unmarshalling -// an IAM info from the metadata API -type EC2IAMInfo struct { - Code string - LastUpdated time.Time - InstanceProfileArn string - InstanceProfileID string -} - -// An EC2InstanceIdentityDocument provides the shape for unmarshalling -// an instance identity document -type EC2InstanceIdentityDocument struct { - DevpayProductCodes []string `json:"devpayProductCodes"` - AvailabilityZone string `json:"availabilityZone"` - PrivateIP string `json:"privateIp"` - Version string `json:"version"` - Region string `json:"region"` - InstanceID string `json:"instanceId"` - BillingProducts []string `json:"billingProducts"` - InstanceType string `json:"instanceType"` - AccountID string `json:"accountId"` - PendingTime time.Time `json:"pendingTime"` - ImageID string `json:"imageId"` - KernelID string `json:"kernelId"` - RamdiskID string `json:"ramdiskId"` - Architecture string `json:"architecture"` -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go b/vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go deleted file mode 100644 index 5b4379dbd8..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/ec2metadata/service.go +++ /dev/null @@ -1,124 +0,0 @@ -// Package ec2metadata provides the client for making API calls to the -// EC2 Metadata service. -package ec2metadata - -import ( - "bytes" - "errors" - "io" - "net/http" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" -) - -// ServiceName is the name of the service. -const ServiceName = "ec2metadata" - -// A EC2Metadata is an EC2 Metadata service Client. -type EC2Metadata struct { - *client.Client -} - -// New creates a new instance of the EC2Metadata client with a session. -// This client is safe to use across multiple goroutines. -// -// -// Example: -// // Create a EC2Metadata client from just a session. -// svc := ec2metadata.New(mySession) -// -// // Create a EC2Metadata client with additional configuration -// svc := ec2metadata.New(mySession, aws.NewConfig().WithLogLevel(aws.LogDebugHTTPBody)) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *EC2Metadata { - c := p.ClientConfig(ServiceName, cfgs...) - return NewClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) -} - -// NewClient returns a new EC2Metadata client. Should be used to create -// a client when not using a session. Generally using just New with a session -// is preferred. -// -// If an unmodified HTTP client is provided from the stdlib default, or no client -// the EC2RoleProvider's EC2Metadata HTTP client's timeout will be shortened. -// To disable this set Config.EC2MetadataDisableTimeoutOverride to false. Enabled by default. -func NewClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string, opts ...func(*client.Client)) *EC2Metadata { - if !aws.BoolValue(cfg.EC2MetadataDisableTimeoutOverride) && httpClientZero(cfg.HTTPClient) { - // If the http client is unmodified and this feature is not disabled - // set custom timeouts for EC2Metadata requests. - cfg.HTTPClient = &http.Client{ - // use a shorter timeout than default because the metadata - // service is local if it is running, and to fail faster - // if not running on an ec2 instance. - Timeout: 5 * time.Second, - } - } - - svc := &EC2Metadata{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - Endpoint: endpoint, - APIVersion: "latest", - }, - handlers, - ), - } - - svc.Handlers.Unmarshal.PushBack(unmarshalHandler) - svc.Handlers.UnmarshalError.PushBack(unmarshalError) - svc.Handlers.Validate.Clear() - svc.Handlers.Validate.PushBack(validateEndpointHandler) - - // Add additional options to the service config - for _, option := range opts { - option(svc.Client) - } - - return svc -} - -func httpClientZero(c *http.Client) bool { - return c == nil || (c.Transport == nil && c.CheckRedirect == nil && c.Jar == nil && c.Timeout == 0) -} - -type metadataOutput struct { - Content string -} - -func unmarshalHandler(r *request.Request) { - defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata respose", err) - return - } - - if data, ok := r.Data.(*metadataOutput); ok { - data.Content = b.String() - } -} - -func unmarshalError(r *request.Request) { - defer r.HTTPResponse.Body.Close() - b := &bytes.Buffer{} - if _, err := io.Copy(b, r.HTTPResponse.Body); err != nil { - r.Error = awserr.New("SerializationError", "unable to unmarshal EC2 metadata error respose", err) - return - } - - // Response body format is not consistent between metadata endpoints. - // Grab the error message as a string and include that as the source error - r.Error = awserr.New("EC2MetadataError", "failed to make EC2Metadata request", errors.New(b.String())) -} - -func validateEndpointHandler(r *request.Request) { - if r.ClientInfo.Endpoint == "" { - r.Error = aws.ErrMissingEndpoint - } -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/errors.go b/vendor/src/github.com/aws/aws-sdk-go/aws/errors.go deleted file mode 100644 index 5766361686..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/errors.go +++ /dev/null @@ -1,17 +0,0 @@ -package aws - -import "github.com/aws/aws-sdk-go/aws/awserr" - -var ( - // ErrMissingRegion is an error that is returned if region configuration is - // not found. - // - // @readonly - ErrMissingRegion = awserr.New("MissingRegion", "could not find region configuration", nil) - - // ErrMissingEndpoint is an error that is returned if an endpoint cannot be - // resolved for a service. - // - // @readonly - ErrMissingEndpoint = awserr.New("MissingEndpoint", "'Endpoint' configuration is required for this service", nil) -) diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/logger.go b/vendor/src/github.com/aws/aws-sdk-go/aws/logger.go deleted file mode 100644 index db87188e20..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/logger.go +++ /dev/null @@ -1,112 +0,0 @@ -package aws - -import ( - "log" - "os" -) - -// A LogLevelType defines the level logging should be performed at. Used to instruct -// the SDK which statements should be logged. -type LogLevelType uint - -// LogLevel returns the pointer to a LogLevel. Should be used to workaround -// not being able to take the address of a non-composite literal. -func LogLevel(l LogLevelType) *LogLevelType { - return &l -} - -// Value returns the LogLevel value or the default value LogOff if the LogLevel -// is nil. Safe to use on nil value LogLevelTypes. -func (l *LogLevelType) Value() LogLevelType { - if l != nil { - return *l - } - return LogOff -} - -// Matches returns true if the v LogLevel is enabled by this LogLevel. Should be -// used with logging sub levels. Is safe to use on nil value LogLevelTypes. If -// LogLevel is nill, will default to LogOff comparison. -func (l *LogLevelType) Matches(v LogLevelType) bool { - c := l.Value() - return c&v == v -} - -// AtLeast returns true if this LogLevel is at least high enough to satisfies v. -// Is safe to use on nil value LogLevelTypes. If LogLevel is nill, will default -// to LogOff comparison. -func (l *LogLevelType) AtLeast(v LogLevelType) bool { - c := l.Value() - return c >= v -} - -const ( - // LogOff states that no logging should be performed by the SDK. This is the - // default state of the SDK, and should be use to disable all logging. - LogOff LogLevelType = iota * 0x1000 - - // LogDebug state that debug output should be logged by the SDK. This should - // be used to inspect request made and responses received. - LogDebug -) - -// Debug Logging Sub Levels -const ( - // LogDebugWithSigning states that the SDK should log request signing and - // presigning events. This should be used to log the signing details of - // requests for debugging. Will also enable LogDebug. - LogDebugWithSigning LogLevelType = LogDebug | (1 << iota) - - // LogDebugWithHTTPBody states the SDK should log HTTP request and response - // HTTP bodys in addition to the headers and path. This should be used to - // see the body content of requests and responses made while using the SDK - // Will also enable LogDebug. - LogDebugWithHTTPBody - - // LogDebugWithRequestRetries states the SDK should log when service requests will - // be retried. This should be used to log when you want to log when service - // requests are being retried. Will also enable LogDebug. - LogDebugWithRequestRetries - - // LogDebugWithRequestErrors states the SDK should log when service requests fail - // to build, send, validate, or unmarshal. - LogDebugWithRequestErrors -) - -// A Logger is a minimalistic interface for the SDK to log messages to. Should -// be used to provide custom logging writers for the SDK to use. -type Logger interface { - Log(...interface{}) -} - -// A LoggerFunc is a convenience type to convert a function taking a variadic -// list of arguments and wrap it so the Logger interface can be used. -// -// Example: -// s3.New(sess, &aws.Config{Logger: aws.LoggerFunc(func(args ...interface{}) { -// fmt.Fprintln(os.Stdout, args...) -// })}) -type LoggerFunc func(...interface{}) - -// Log calls the wrapped function with the arguments provided -func (f LoggerFunc) Log(args ...interface{}) { - f(args...) -} - -// NewDefaultLogger returns a Logger which will write log messages to stdout, and -// use same formatting runes as the stdlib log.Logger -func NewDefaultLogger() Logger { - return &defaultLogger{ - logger: log.New(os.Stdout, "", log.LstdFlags), - } -} - -// A defaultLogger provides a minimalistic logger satisfying the Logger interface. -type defaultLogger struct { - logger *log.Logger -} - -// Log logs the parameters to the stdlib logger. See log.Println. -func (l defaultLogger) Log(args ...interface{}) { - l.logger.Println(args...) -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers.go b/vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers.go deleted file mode 100644 index 5279c19c09..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/request/handlers.go +++ /dev/null @@ -1,187 +0,0 @@ -package request - -import ( - "fmt" - "strings" -) - -// A Handlers provides a collection of request handlers for various -// stages of handling requests. -type Handlers struct { - Validate HandlerList - Build HandlerList - Sign HandlerList - Send HandlerList - ValidateResponse HandlerList - Unmarshal HandlerList - UnmarshalMeta HandlerList - UnmarshalError HandlerList - Retry HandlerList - AfterRetry HandlerList -} - -// Copy returns of this handler's lists. -func (h *Handlers) Copy() Handlers { - return Handlers{ - Validate: h.Validate.copy(), - Build: h.Build.copy(), - Sign: h.Sign.copy(), - Send: h.Send.copy(), - ValidateResponse: h.ValidateResponse.copy(), - Unmarshal: h.Unmarshal.copy(), - UnmarshalError: h.UnmarshalError.copy(), - UnmarshalMeta: h.UnmarshalMeta.copy(), - Retry: h.Retry.copy(), - AfterRetry: h.AfterRetry.copy(), - } -} - -// Clear removes callback functions for all handlers -func (h *Handlers) Clear() { - h.Validate.Clear() - h.Build.Clear() - h.Send.Clear() - h.Sign.Clear() - h.Unmarshal.Clear() - h.UnmarshalMeta.Clear() - h.UnmarshalError.Clear() - h.ValidateResponse.Clear() - h.Retry.Clear() - h.AfterRetry.Clear() -} - -// A HandlerListRunItem represents an entry in the HandlerList which -// is being run. -type HandlerListRunItem struct { - Index int - Handler NamedHandler - Request *Request -} - -// A HandlerList manages zero or more handlers in a list. -type HandlerList struct { - list []NamedHandler - - // Called after each request handler in the list is called. If set - // and the func returns true the HandlerList will continue to iterate - // over the request handlers. If false is returned the HandlerList - // will stop iterating. - // - // Should be used if extra logic to be performed between each handler - // in the list. This can be used to terminate a list's iteration - // based on a condition such as error like, HandlerListStopOnError. - // Or for logging like HandlerListLogItem. - AfterEachFn func(item HandlerListRunItem) bool -} - -// A NamedHandler is a struct that contains a name and function callback. -type NamedHandler struct { - Name string - Fn func(*Request) -} - -// copy creates a copy of the handler list. -func (l *HandlerList) copy() HandlerList { - n := HandlerList{ - AfterEachFn: l.AfterEachFn, - } - n.list = append([]NamedHandler{}, l.list...) - return n -} - -// Clear clears the handler list. -func (l *HandlerList) Clear() { - l.list = []NamedHandler{} -} - -// Len returns the number of handlers in the list. -func (l *HandlerList) Len() int { - return len(l.list) -} - -// PushBack pushes handler f to the back of the handler list. -func (l *HandlerList) PushBack(f func(*Request)) { - l.list = append(l.list, NamedHandler{"__anonymous", f}) -} - -// PushFront pushes handler f to the front of the handler list. -func (l *HandlerList) PushFront(f func(*Request)) { - l.list = append([]NamedHandler{{"__anonymous", f}}, l.list...) -} - -// PushBackNamed pushes named handler f to the back of the handler list. -func (l *HandlerList) PushBackNamed(n NamedHandler) { - l.list = append(l.list, n) -} - -// PushFrontNamed pushes named handler f to the front of the handler list. -func (l *HandlerList) PushFrontNamed(n NamedHandler) { - l.list = append([]NamedHandler{n}, l.list...) -} - -// Remove removes a NamedHandler n -func (l *HandlerList) Remove(n NamedHandler) { - newlist := []NamedHandler{} - for _, m := range l.list { - if m.Name != n.Name { - newlist = append(newlist, m) - } - } - l.list = newlist -} - -// Run executes all handlers in the list with a given request object. -func (l *HandlerList) Run(r *Request) { - for i, h := range l.list { - h.Fn(r) - item := HandlerListRunItem{ - Index: i, Handler: h, Request: r, - } - if l.AfterEachFn != nil && !l.AfterEachFn(item) { - return - } - } -} - -// HandlerListLogItem logs the request handler and the state of the -// request's Error value. Always returns true to continue iterating -// request handlers in a HandlerList. -func HandlerListLogItem(item HandlerListRunItem) bool { - if item.Request.Config.Logger == nil { - return true - } - item.Request.Config.Logger.Log("DEBUG: RequestHandler", - item.Index, item.Handler.Name, item.Request.Error) - - return true -} - -// HandlerListStopOnError returns false to stop the HandlerList iterating -// over request handlers if Request.Error is not nil. True otherwise -// to continue iterating. -func HandlerListStopOnError(item HandlerListRunItem) bool { - return item.Request.Error == nil -} - -// MakeAddToUserAgentHandler will add the name/version pair to the User-Agent request -// header. If the extra parameters are provided they will be added as metadata to the -// name/version pair resulting in the following format. -// "name/version (extra0; extra1; ...)" -// The user agent part will be concatenated with this current request's user agent string. -func MakeAddToUserAgentHandler(name, version string, extra ...string) func(*Request) { - ua := fmt.Sprintf("%s/%s", name, version) - if len(extra) > 0 { - ua += fmt.Sprintf(" (%s)", strings.Join(extra, "; ")) - } - return func(r *Request) { - AddToUserAgent(r, ua) - } -} - -// MakeAddToUserAgentFreeFormHandler adds the input to the User-Agent request header. -// The input string will be concatenated with the current request's user agent string. -func MakeAddToUserAgentFreeFormHandler(s string) func(*Request) { - return func(r *Request) { - AddToUserAgent(r, s) - } -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request.go b/vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request.go deleted file mode 100644 index a4087f20e8..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request.go +++ /dev/null @@ -1,33 +0,0 @@ -// +build go1.5 - -package request - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := &http.Request{ - URL: &url.URL{}, - Header: http.Header{}, - Close: r.Close, - Body: body, - Host: r.Host, - Method: r.Method, - Proto: r.Proto, - ContentLength: r.ContentLength, - // Cancel will be deprecated in 1.7 and will be replaced with Context - Cancel: r.Cancel, - } - - *req.URL = *r.URL - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go b/vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go deleted file mode 100644 index 75da021efe..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/request/http_request_1_4.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build !go1.5 - -package request - -import ( - "io" - "net/http" - "net/url" -) - -func copyHTTPRequest(r *http.Request, body io.ReadCloser) *http.Request { - req := &http.Request{ - URL: &url.URL{}, - Header: http.Header{}, - Close: r.Close, - Body: body, - Host: r.Host, - Method: r.Method, - Proto: r.Proto, - ContentLength: r.ContentLength, - } - - *req.URL = *r.URL - for k, v := range r.Header { - for _, vv := range v { - req.Header.Add(k, vv) - } - } - - return req -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go b/vendor/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go deleted file mode 100644 index da6396d2d9..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/request/offset_reader.go +++ /dev/null @@ -1,49 +0,0 @@ -package request - -import ( - "io" - "sync" -) - -// offsetReader is a thread-safe io.ReadCloser to prevent racing -// with retrying requests -type offsetReader struct { - buf io.ReadSeeker - lock sync.RWMutex - closed bool -} - -func newOffsetReader(buf io.ReadSeeker, offset int64) *offsetReader { - reader := &offsetReader{} - buf.Seek(offset, 0) - - reader.buf = buf - return reader -} - -// Close is a thread-safe close. Uses the write lock. -func (o *offsetReader) Close() error { - o.lock.Lock() - defer o.lock.Unlock() - o.closed = true - return nil -} - -// Read is a thread-safe read using a read lock. -func (o *offsetReader) Read(p []byte) (int, error) { - o.lock.RLock() - defer o.lock.RUnlock() - - if o.closed { - return 0, io.EOF - } - - return o.buf.Read(p) -} - -// CloseAndCopy will return a new offsetReader with a copy of the old buffer -// and close the old buffer. -func (o *offsetReader) CloseAndCopy(offset int64) *offsetReader { - o.Close() - return newOffsetReader(o.buf, offset) -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/request/request.go b/vendor/src/github.com/aws/aws-sdk-go/aws/request/request.go deleted file mode 100644 index ce8087024c..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/request/request.go +++ /dev/null @@ -1,329 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "reflect" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/client/metadata" -) - -// A Request is the service request to be made. -type Request struct { - Config aws.Config - ClientInfo metadata.ClientInfo - Handlers Handlers - - Retryer - Time time.Time - ExpireTime time.Duration - Operation *Operation - HTTPRequest *http.Request - HTTPResponse *http.Response - Body io.ReadSeeker - BodyStart int64 // offset from beginning of Body that the request body starts - Params interface{} - Error error - Data interface{} - RequestID string - RetryCount int - Retryable *bool - RetryDelay time.Duration - NotHoist bool - SignedHeaderVals http.Header - - built bool -} - -// An Operation is the service API operation to be made. -type Operation struct { - Name string - HTTPMethod string - HTTPPath string - *Paginator -} - -// Paginator keeps track of pagination configuration for an API operation. -type Paginator struct { - InputTokens []string - OutputTokens []string - LimitToken string - TruncationToken string -} - -// New returns a new Request pointer for the service API -// operation and parameters. -// -// Params is any value of input parameters to be the request payload. -// Data is pointer value to an object which the request's response -// payload will be deserialized to. -func New(cfg aws.Config, clientInfo metadata.ClientInfo, handlers Handlers, - retryer Retryer, operation *Operation, params interface{}, data interface{}) *Request { - - method := operation.HTTPMethod - if method == "" { - method = "POST" - } - p := operation.HTTPPath - if p == "" { - p = "/" - } - - httpReq, _ := http.NewRequest(method, "", nil) - - var err error - httpReq.URL, err = url.Parse(clientInfo.Endpoint + p) - if err != nil { - httpReq.URL = &url.URL{} - err = awserr.New("InvalidEndpointURL", "invalid endpoint uri", err) - } - - r := &Request{ - Config: cfg, - ClientInfo: clientInfo, - Handlers: handlers.Copy(), - - Retryer: retryer, - Time: time.Now(), - ExpireTime: 0, - Operation: operation, - HTTPRequest: httpReq, - Body: nil, - Params: params, - Error: err, - Data: data, - } - r.SetBufferBody([]byte{}) - - return r -} - -// WillRetry returns if the request's can be retried. -func (r *Request) WillRetry() bool { - return r.Error != nil && aws.BoolValue(r.Retryable) && r.RetryCount < r.MaxRetries() -} - -// ParamsFilled returns if the request's parameters have been populated -// and the parameters are valid. False is returned if no parameters are -// provided or invalid. -func (r *Request) ParamsFilled() bool { - return r.Params != nil && reflect.ValueOf(r.Params).Elem().IsValid() -} - -// DataFilled returns true if the request's data for response deserialization -// target has been set and is a valid. False is returned if data is not -// set, or is invalid. -func (r *Request) DataFilled() bool { - return r.Data != nil && reflect.ValueOf(r.Data).Elem().IsValid() -} - -// SetBufferBody will set the request's body bytes that will be sent to -// the service API. -func (r *Request) SetBufferBody(buf []byte) { - r.SetReaderBody(bytes.NewReader(buf)) -} - -// SetStringBody sets the body of the request to be backed by a string. -func (r *Request) SetStringBody(s string) { - r.SetReaderBody(strings.NewReader(s)) -} - -// SetReaderBody will set the request's body reader. -func (r *Request) SetReaderBody(reader io.ReadSeeker) { - r.HTTPRequest.Body = newOffsetReader(reader, 0) - r.Body = reader -} - -// Presign returns the request's signed URL. Error will be returned -// if the signing fails. -func (r *Request) Presign(expireTime time.Duration) (string, error) { - r.ExpireTime = expireTime - r.NotHoist = false - r.Sign() - if r.Error != nil { - return "", r.Error - } - return r.HTTPRequest.URL.String(), nil -} - -// PresignRequest behaves just like presign, but hoists all headers and signs them. -// Also returns the signed hash back to the user -func (r *Request) PresignRequest(expireTime time.Duration) (string, http.Header, error) { - r.ExpireTime = expireTime - r.NotHoist = true - r.Sign() - if r.Error != nil { - return "", nil, r.Error - } - return r.HTTPRequest.URL.String(), r.SignedHeaderVals, nil -} - -func debugLogReqError(r *Request, stage string, retrying bool, err error) { - if !r.Config.LogLevel.Matches(aws.LogDebugWithRequestErrors) { - return - } - - retryStr := "not retrying" - if retrying { - retryStr = "will retry" - } - - r.Config.Logger.Log(fmt.Sprintf("DEBUG: %s %s/%s failed, %s, error %v", - stage, r.ClientInfo.ServiceName, r.Operation.Name, retryStr, err)) -} - -// Build will build the request's object so it can be signed and sent -// to the service. Build will also validate all the request's parameters. -// Anny additional build Handlers set on this request will be run -// in the order they were set. -// -// The request will only be built once. Multiple calls to build will have -// no effect. -// -// If any Validate or Build errors occur the build will stop and the error -// which occurred will be returned. -func (r *Request) Build() error { - if !r.built { - r.Handlers.Validate.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Request", false, r.Error) - return r.Error - } - r.Handlers.Build.Run(r) - if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) - return r.Error - } - r.built = true - } - - return r.Error -} - -// Sign will sign the request retuning error if errors are encountered. -// -// Send will build the request prior to signing. All Sign Handlers will -// be executed in the order they were set. -func (r *Request) Sign() error { - r.Build() - if r.Error != nil { - debugLogReqError(r, "Build Request", false, r.Error) - return r.Error - } - - r.Handlers.Sign.Run(r) - return r.Error -} - -// Send will send the request returning error if errors are encountered. -// -// Send will sign the request prior to sending. All Send Handlers will -// be executed in the order they were set. -// -// Canceling a request is non-deterministic. If a request has been canceled, -// then the transport will choose, randomly, one of the state channels during -// reads or getting the connection. -// -// readLoop() and getConn(req *Request, cm connectMethod) -// https://github.com/golang/go/blob/master/src/net/http/transport.go -func (r *Request) Send() error { - for { - if aws.BoolValue(r.Retryable) { - if r.Config.LogLevel.Matches(aws.LogDebugWithRequestRetries) { - r.Config.Logger.Log(fmt.Sprintf("DEBUG: Retrying Request %s/%s, attempt %d", - r.ClientInfo.ServiceName, r.Operation.Name, r.RetryCount)) - } - - var body io.ReadCloser - if reader, ok := r.HTTPRequest.Body.(*offsetReader); ok { - body = reader.CloseAndCopy(r.BodyStart) - } else { - if r.Config.Logger != nil { - r.Config.Logger.Log("Request body type has been overwritten. May cause race conditions") - } - r.Body.Seek(r.BodyStart, 0) - body = ioutil.NopCloser(r.Body) - } - - r.HTTPRequest = copyHTTPRequest(r.HTTPRequest, body) - if r.HTTPResponse != nil && r.HTTPResponse.Body != nil { - // Closing response body. Since we are setting a new request to send off, this - // response will get squashed and leaked. - r.HTTPResponse.Body.Close() - } - } - - r.Sign() - if r.Error != nil { - return r.Error - } - - r.Retryable = nil - - r.Handlers.Send.Run(r) - if r.Error != nil { - if strings.Contains(r.Error.Error(), "net/http: request canceled") { - return r.Error - } - - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Send Request", false, r.Error) - return r.Error - } - debugLogReqError(r, "Send Request", true, err) - continue - } - - r.Handlers.UnmarshalMeta.Run(r) - r.Handlers.ValidateResponse.Run(r) - if r.Error != nil { - err := r.Error - r.Handlers.UnmarshalError.Run(r) - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Validate Response", false, r.Error) - return r.Error - } - debugLogReqError(r, "Validate Response", true, err) - continue - } - - r.Handlers.Unmarshal.Run(r) - if r.Error != nil { - err := r.Error - r.Handlers.Retry.Run(r) - r.Handlers.AfterRetry.Run(r) - if r.Error != nil { - debugLogReqError(r, "Unmarshal Response", false, r.Error) - return r.Error - } - debugLogReqError(r, "Unmarshal Response", true, err) - continue - } - - break - } - - return nil -} - -// AddToUserAgent adds the string to the end of the request's current user agent. -func AddToUserAgent(r *Request, s string) { - curUA := r.HTTPRequest.Header.Get("User-Agent") - if len(curUA) > 0 { - s = curUA + " " + s - } - r.HTTPRequest.Header.Set("User-Agent", s) -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go b/vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go deleted file mode 100644 index 2939ec473f..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/request/request_pagination.go +++ /dev/null @@ -1,104 +0,0 @@ -package request - -import ( - "reflect" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awsutil" -) - -//type Paginater interface { -// HasNextPage() bool -// NextPage() *Request -// EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error -//} - -// HasNextPage returns true if this request has more pages of data available. -func (r *Request) HasNextPage() bool { - return len(r.nextPageTokens()) > 0 -} - -// nextPageTokens returns the tokens to use when asking for the next page of -// data. -func (r *Request) nextPageTokens() []interface{} { - if r.Operation.Paginator == nil { - return nil - } - - if r.Operation.TruncationToken != "" { - tr, _ := awsutil.ValuesAtPath(r.Data, r.Operation.TruncationToken) - if len(tr) == 0 { - return nil - } - - switch v := tr[0].(type) { - case *bool: - if !aws.BoolValue(v) { - return nil - } - case bool: - if v == false { - return nil - } - } - } - - tokens := []interface{}{} - tokenAdded := false - for _, outToken := range r.Operation.OutputTokens { - v, _ := awsutil.ValuesAtPath(r.Data, outToken) - if len(v) > 0 { - tokens = append(tokens, v[0]) - tokenAdded = true - } else { - tokens = append(tokens, nil) - } - } - if !tokenAdded { - return nil - } - - return tokens -} - -// NextPage returns a new Request that can be executed to return the next -// page of result data. Call .Send() on this request to execute it. -func (r *Request) NextPage() *Request { - tokens := r.nextPageTokens() - if len(tokens) == 0 { - return nil - } - - data := reflect.New(reflect.TypeOf(r.Data).Elem()).Interface() - nr := New(r.Config, r.ClientInfo, r.Handlers, r.Retryer, r.Operation, awsutil.CopyOf(r.Params), data) - for i, intok := range nr.Operation.InputTokens { - awsutil.SetValueAtPath(nr.Params, intok, tokens[i]) - } - return nr -} - -// EachPage iterates over each page of a paginated request object. The fn -// parameter should be a function with the following sample signature: -// -// func(page *T, lastPage bool) bool { -// return true // return false to stop iterating -// } -// -// Where "T" is the structure type matching the output structure of the given -// operation. For example, a request object generated by -// DynamoDB.ListTablesRequest() would expect to see dynamodb.ListTablesOutput -// as the structure "T". The lastPage value represents whether the page is -// the last page of data or not. The return value of this function should -// return true to keep iterating or false to stop. -func (r *Request) EachPage(fn func(data interface{}, isLastPage bool) (shouldContinue bool)) error { - for page := r; page != nil; page = page.NextPage() { - if err := page.Send(); err != nil { - return err - } - if getNextPage := fn(page.Data, !page.HasNextPage()); !getNextPage { - return page.Error - } - } - - return nil -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/request/retryer.go b/vendor/src/github.com/aws/aws-sdk-go/aws/request/retryer.go deleted file mode 100644 index 8cc8b015ae..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/request/retryer.go +++ /dev/null @@ -1,101 +0,0 @@ -package request - -import ( - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" -) - -// Retryer is an interface to control retry logic for a given service. -// The default implementation used by most services is the service.DefaultRetryer -// structure, which contains basic retry logic using exponential backoff. -type Retryer interface { - RetryRules(*Request) time.Duration - ShouldRetry(*Request) bool - MaxRetries() int -} - -// WithRetryer sets a config Retryer value to the given Config returning it -// for chaining. -func WithRetryer(cfg *aws.Config, retryer Retryer) *aws.Config { - cfg.Retryer = retryer - return cfg -} - -// retryableCodes is a collection of service response codes which are retry-able -// without any further action. -var retryableCodes = map[string]struct{}{ - "RequestError": {}, - "RequestTimeout": {}, -} - -var throttleCodes = map[string]struct{}{ - "ProvisionedThroughputExceededException": {}, - "Throttling": {}, - "ThrottlingException": {}, - "RequestLimitExceeded": {}, - "RequestThrottled": {}, - "LimitExceededException": {}, // Deleting 10+ DynamoDb tables at once - "TooManyRequestsException": {}, // Lambda functions -} - -// credsExpiredCodes is a collection of error codes which signify the credentials -// need to be refreshed. Expired tokens require refreshing of credentials, and -// resigning before the request can be retried. -var credsExpiredCodes = map[string]struct{}{ - "ExpiredToken": {}, - "ExpiredTokenException": {}, - "RequestExpired": {}, // EC2 Only -} - -func isCodeThrottle(code string) bool { - _, ok := throttleCodes[code] - return ok -} - -func isCodeRetryable(code string) bool { - if _, ok := retryableCodes[code]; ok { - return true - } - - return isCodeExpiredCreds(code) -} - -func isCodeExpiredCreds(code string) bool { - _, ok := credsExpiredCodes[code] - return ok -} - -// IsErrorRetryable returns whether the error is retryable, based on its Code. -// Returns false if the request has no Error set. -func (r *Request) IsErrorRetryable() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeRetryable(err.Code()) - } - } - return false -} - -// IsErrorThrottle returns whether the error is to be throttled based on its code. -// Returns false if the request has no Error set -func (r *Request) IsErrorThrottle() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeThrottle(err.Code()) - } - } - return false -} - -// IsErrorExpired returns whether the error code is a credential expiry error. -// Returns false if the request has no Error set. -func (r *Request) IsErrorExpired() bool { - if r.Error != nil { - if err, ok := r.Error.(awserr.Error); ok { - return isCodeExpiredCreds(err.Code()) - } - } - return false -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/request/validation.go b/vendor/src/github.com/aws/aws-sdk-go/aws/request/validation.go deleted file mode 100644 index 2520286b75..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/request/validation.go +++ /dev/null @@ -1,234 +0,0 @@ -package request - -import ( - "bytes" - "fmt" - - "github.com/aws/aws-sdk-go/aws/awserr" -) - -const ( - // InvalidParameterErrCode is the error code for invalid parameters errors - InvalidParameterErrCode = "InvalidParameter" - // ParamRequiredErrCode is the error code for required parameter errors - ParamRequiredErrCode = "ParamRequiredError" - // ParamMinValueErrCode is the error code for fields with too low of a - // number value. - ParamMinValueErrCode = "ParamMinValueError" - // ParamMinLenErrCode is the error code for fields without enough elements. - ParamMinLenErrCode = "ParamMinLenError" -) - -// Validator provides a way for types to perform validation logic on their -// input values that external code can use to determine if a type's values -// are valid. -type Validator interface { - Validate() error -} - -// An ErrInvalidParams provides wrapping of invalid parameter errors found when -// validating API operation input parameters. -type ErrInvalidParams struct { - // Context is the base context of the invalid parameter group. - Context string - errs []ErrInvalidParam -} - -// Add adds a new invalid parameter error to the collection of invalid -// parameters. The context of the invalid parameter will be updated to reflect -// this collection. -func (e *ErrInvalidParams) Add(err ErrInvalidParam) { - err.SetContext(e.Context) - e.errs = append(e.errs, err) -} - -// AddNested adds the invalid parameter errors from another ErrInvalidParams -// value into this collection. The nested errors will have their nested context -// updated and base context to reflect the merging. -// -// Use for nested validations errors. -func (e *ErrInvalidParams) AddNested(nestedCtx string, nested ErrInvalidParams) { - for _, err := range nested.errs { - err.SetContext(e.Context) - err.AddNestedContext(nestedCtx) - e.errs = append(e.errs, err) - } -} - -// Len returns the number of invalid parameter errors -func (e ErrInvalidParams) Len() int { - return len(e.errs) -} - -// Code returns the code of the error -func (e ErrInvalidParams) Code() string { - return InvalidParameterErrCode -} - -// Message returns the message of the error -func (e ErrInvalidParams) Message() string { - return fmt.Sprintf("%d validation error(s) found.", len(e.errs)) -} - -// Error returns the string formatted form of the invalid parameters. -func (e ErrInvalidParams) Error() string { - w := &bytes.Buffer{} - fmt.Fprintf(w, "%s: %s\n", e.Code(), e.Message()) - - for _, err := range e.errs { - fmt.Fprintf(w, "- %s\n", err.Message()) - } - - return w.String() -} - -// OrigErr returns the invalid parameters as a awserr.BatchedErrors value -func (e ErrInvalidParams) OrigErr() error { - return awserr.NewBatchError( - InvalidParameterErrCode, e.Message(), e.OrigErrs()) -} - -// OrigErrs returns a slice of the invalid parameters -func (e ErrInvalidParams) OrigErrs() []error { - errs := make([]error, len(e.errs)) - for i := 0; i < len(errs); i++ { - errs[i] = e.errs[i] - } - - return errs -} - -// An ErrInvalidParam represents an invalid parameter error type. -type ErrInvalidParam interface { - awserr.Error - - // Field name the error occurred on. - Field() string - - // SetContext updates the context of the error. - SetContext(string) - - // AddNestedContext updates the error's context to include a nested level. - AddNestedContext(string) -} - -type errInvalidParam struct { - context string - nestedContext string - field string - code string - msg string -} - -// Code returns the error code for the type of invalid parameter. -func (e *errInvalidParam) Code() string { - return e.code -} - -// Message returns the reason the parameter was invalid, and its context. -func (e *errInvalidParam) Message() string { - return fmt.Sprintf("%s, %s.", e.msg, e.Field()) -} - -// Error returns the string version of the invalid parameter error. -func (e *errInvalidParam) Error() string { - return fmt.Sprintf("%s: %s", e.code, e.Message()) -} - -// OrigErr returns nil, Implemented for awserr.Error interface. -func (e *errInvalidParam) OrigErr() error { - return nil -} - -// Field Returns the field and context the error occurred. -func (e *errInvalidParam) Field() string { - field := e.context - if len(field) > 0 { - field += "." - } - if len(e.nestedContext) > 0 { - field += fmt.Sprintf("%s.", e.nestedContext) - } - field += e.field - - return field -} - -// SetContext updates the base context of the error. -func (e *errInvalidParam) SetContext(ctx string) { - e.context = ctx -} - -// AddNestedContext prepends a context to the field's path. -func (e *errInvalidParam) AddNestedContext(ctx string) { - if len(e.nestedContext) == 0 { - e.nestedContext = ctx - } else { - e.nestedContext = fmt.Sprintf("%s.%s", ctx, e.nestedContext) - } - -} - -// An ErrParamRequired represents an required parameter error. -type ErrParamRequired struct { - errInvalidParam -} - -// NewErrParamRequired creates a new required parameter error. -func NewErrParamRequired(field string) *ErrParamRequired { - return &ErrParamRequired{ - errInvalidParam{ - code: ParamRequiredErrCode, - field: field, - msg: fmt.Sprintf("missing required field"), - }, - } -} - -// An ErrParamMinValue represents a minimum value parameter error. -type ErrParamMinValue struct { - errInvalidParam - min float64 -} - -// NewErrParamMinValue creates a new minimum value parameter error. -func NewErrParamMinValue(field string, min float64) *ErrParamMinValue { - return &ErrParamMinValue{ - errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, - field: field, - msg: fmt.Sprintf("minimum field value of %v", min), - }, - min: min, - } -} - -// MinValue returns the field's require minimum value. -// -// float64 is returned for both int and float min values. -func (e *ErrParamMinValue) MinValue() float64 { - return e.min -} - -// An ErrParamMinLen represents a minimum length parameter error. -type ErrParamMinLen struct { - errInvalidParam - min int -} - -// NewErrParamMinLen creates a new minimum length parameter error. -func NewErrParamMinLen(field string, min int) *ErrParamMinLen { - return &ErrParamMinLen{ - errInvalidParam: errInvalidParam{ - code: ParamMinValueErrCode, - field: field, - msg: fmt.Sprintf("minimum field size of %v", min), - }, - min: min, - } -} - -// MinLen returns the field's required minimum length. -func (e *ErrParamMinLen) MinLen() int { - return e.min -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/session/session.go b/vendor/src/github.com/aws/aws-sdk-go/aws/session/session.go deleted file mode 100644 index 6bc8f1be95..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/session/session.go +++ /dev/null @@ -1,120 +0,0 @@ -// Package session provides a way to create service clients with shared configuration -// and handlers. -// -// Generally this package should be used instead of the `defaults` package. -// -// A session should be used to share configurations and request handlers between multiple -// service clients. When service clients need specific configuration aws.Config can be -// used to provide additional configuration directly to the service client. -package session - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/corehandlers" - "github.com/aws/aws-sdk-go/aws/defaults" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/endpoints" -) - -// A Session provides a central location to create service clients from and -// store configurations and request handlers for those services. -// -// Sessions are safe to create service clients concurrently, but it is not safe -// to mutate the session concurrently. -type Session struct { - Config *aws.Config - Handlers request.Handlers -} - -// New creates a new instance of the handlers merging in the provided Configs -// on top of the SDK's default configurations. Once the session is created it -// can be mutated to modify Configs or Handlers. The session is safe to be read -// concurrently, but it should not be written to concurrently. -// -// Example: -// // Create a session with the default config and request handlers. -// sess := session.New() -// -// // Create a session with a custom region -// sess := session.New(&aws.Config{Region: aws.String("us-east-1")}) -// -// // Create a session, and add additional handlers for all service -// // clients created with the session to inherit. Adds logging handler. -// sess := session.New() -// sess.Handlers.Send.PushFront(func(r *request.Request) { -// // Log every request made and its payload -// logger.Println("Request: %s/%s, Payload: %s", r.ClientInfo.ServiceName, r.Operation, r.Params) -// }) -// -// // Create a S3 client instance from a session -// sess := session.New() -// svc := s3.New(sess) -func New(cfgs ...*aws.Config) *Session { - cfg := defaults.Config() - handlers := defaults.Handlers() - - // Apply the passed in configs so the configuration can be applied to the - // default credential chain - cfg.MergeIn(cfgs...) - cfg.Credentials = defaults.CredChain(cfg, handlers) - - // Reapply any passed in configs to override credentials if set - cfg.MergeIn(cfgs...) - - s := &Session{ - Config: cfg, - Handlers: handlers, - } - - initHandlers(s) - - return s -} - -func initHandlers(s *Session) { - // Add the Validate parameter handler if it is not disabled. - s.Handlers.Validate.Remove(corehandlers.ValidateParametersHandler) - if !aws.BoolValue(s.Config.DisableParamValidation) { - s.Handlers.Validate.PushBackNamed(corehandlers.ValidateParametersHandler) - } -} - -// Copy creates and returns a copy of the current session, coping the config -// and handlers. If any additional configs are provided they will be merged -// on top of the session's copied config. -// -// Example: -// // Create a copy of the current session, configured for the us-west-2 region. -// sess.Copy(&aws.Config{Region: aws.String("us-west-2")}) -func (s *Session) Copy(cfgs ...*aws.Config) *Session { - newSession := &Session{ - Config: s.Config.Copy(cfgs...), - Handlers: s.Handlers.Copy(), - } - - initHandlers(newSession) - - return newSession -} - -// ClientConfig satisfies the client.ConfigProvider interface and is used to -// configure the service client instances. Passing the Session to the service -// client's constructor (New) will use this method to configure the client. -// -// Example: -// sess := session.New() -// s3.New(sess) -func (s *Session) ClientConfig(serviceName string, cfgs ...*aws.Config) client.Config { - s = s.Copy(cfgs...) - endpoint, signingRegion := endpoints.NormalizeEndpoint( - aws.StringValue(s.Config.Endpoint), serviceName, - aws.StringValue(s.Config.Region), aws.BoolValue(s.Config.DisableSSL)) - - return client.Config{ - Config: s.Config, - Handlers: s.Handlers, - Endpoint: endpoint, - SigningRegion: signingRegion, - } -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/types.go b/vendor/src/github.com/aws/aws-sdk-go/aws/types.go deleted file mode 100644 index fa014b49e1..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/types.go +++ /dev/null @@ -1,106 +0,0 @@ -package aws - -import ( - "io" - "sync" -) - -// ReadSeekCloser wraps a io.Reader returning a ReaderSeekerCloser -func ReadSeekCloser(r io.Reader) ReaderSeekerCloser { - return ReaderSeekerCloser{r} -} - -// ReaderSeekerCloser represents a reader that can also delegate io.Seeker and -// io.Closer interfaces to the underlying object if they are available. -type ReaderSeekerCloser struct { - r io.Reader -} - -// Read reads from the reader up to size of p. The number of bytes read, and -// error if it occurred will be returned. -// -// If the reader is not an io.Reader zero bytes read, and nil error will be returned. -// -// Performs the same functionality as io.Reader Read -func (r ReaderSeekerCloser) Read(p []byte) (int, error) { - switch t := r.r.(type) { - case io.Reader: - return t.Read(p) - } - return 0, nil -} - -// Seek sets the offset for the next Read to offset, interpreted according to -// whence: 0 means relative to the origin of the file, 1 means relative to the -// current offset, and 2 means relative to the end. Seek returns the new offset -// and an error, if any. -// -// If the ReaderSeekerCloser is not an io.Seeker nothing will be done. -func (r ReaderSeekerCloser) Seek(offset int64, whence int) (int64, error) { - switch t := r.r.(type) { - case io.Seeker: - return t.Seek(offset, whence) - } - return int64(0), nil -} - -// Close closes the ReaderSeekerCloser. -// -// If the ReaderSeekerCloser is not an io.Closer nothing will be done. -func (r ReaderSeekerCloser) Close() error { - switch t := r.r.(type) { - case io.Closer: - return t.Close() - } - return nil -} - -// A WriteAtBuffer provides a in memory buffer supporting the io.WriterAt interface -// Can be used with the s3manager.Downloader to download content to a buffer -// in memory. Safe to use concurrently. -type WriteAtBuffer struct { - buf []byte - m sync.Mutex - - // GrowthCoeff defines the growth rate of the internal buffer. By - // default, the growth rate is 1, where expanding the internal - // buffer will allocate only enough capacity to fit the new expected - // length. - GrowthCoeff float64 -} - -// NewWriteAtBuffer creates a WriteAtBuffer with an internal buffer -// provided by buf. -func NewWriteAtBuffer(buf []byte) *WriteAtBuffer { - return &WriteAtBuffer{buf: buf} -} - -// WriteAt writes a slice of bytes to a buffer starting at the position provided -// The number of bytes written will be returned, or error. Can overwrite previous -// written slices if the write ats overlap. -func (b *WriteAtBuffer) WriteAt(p []byte, pos int64) (n int, err error) { - pLen := len(p) - expLen := pos + int64(pLen) - b.m.Lock() - defer b.m.Unlock() - if int64(len(b.buf)) < expLen { - if int64(cap(b.buf)) < expLen { - if b.GrowthCoeff < 1 { - b.GrowthCoeff = 1 - } - newBuf := make([]byte, expLen, int64(b.GrowthCoeff*float64(expLen))) - copy(newBuf, b.buf) - b.buf = newBuf - } - b.buf = b.buf[:expLen] - } - copy(b.buf[pos:], p) - return pLen, nil -} - -// Bytes returns a slice of bytes written to the buffer. -func (b *WriteAtBuffer) Bytes() []byte { - b.m.Lock() - defer b.m.Unlock() - return b.buf[:len(b.buf):len(b.buf)] -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/aws/version.go b/vendor/src/github.com/aws/aws-sdk-go/aws/version.go deleted file mode 100644 index 84c62becd1..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/aws/version.go +++ /dev/null @@ -1,8 +0,0 @@ -// Package aws provides core functionality for making requests to AWS services. -package aws - -// SDKName is the name of this AWS SDK -const SDKName = "aws-sdk-go" - -// SDKVersion is the version of this SDK -const SDKVersion = "1.1.30" diff --git a/vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/LICENSE b/vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/LICENSE deleted file mode 100644 index 6a66aea5ea..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/awsmigrate/awsmigrate-renamer/vendor/golang.org/x/tools/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go b/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go deleted file mode 100644 index 2b279e6599..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.go +++ /dev/null @@ -1,65 +0,0 @@ -// Package endpoints validates regional endpoints for services. -package endpoints - -//go:generate go run ../model/cli/gen-endpoints/main.go endpoints.json endpoints_map.go -//go:generate gofmt -s -w endpoints_map.go - -import ( - "fmt" - "regexp" - "strings" -) - -// NormalizeEndpoint takes and endpoint and service API information to return a -// normalized endpoint and signing region. If the endpoint is not an empty string -// the service name and region will be used to look up the service's API endpoint. -// If the endpoint is provided the scheme will be added if it is not present. -func NormalizeEndpoint(endpoint, serviceName, region string, disableSSL bool) (normEndpoint, signingRegion string) { - if endpoint == "" { - return EndpointForRegion(serviceName, region, disableSSL) - } - - return AddScheme(endpoint, disableSSL), "" -} - -// EndpointForRegion returns an endpoint and its signing region for a service and region. -// if the service and region pair are not found endpoint and signingRegion will be empty. -func EndpointForRegion(svcName, region string, disableSSL bool) (endpoint, signingRegion string) { - derivedKeys := []string{ - region + "/" + svcName, - region + "/*", - "*/" + svcName, - "*/*", - } - - for _, key := range derivedKeys { - if val, ok := endpointsMap.Endpoints[key]; ok { - ep := val.Endpoint - ep = strings.Replace(ep, "{region}", region, -1) - ep = strings.Replace(ep, "{service}", svcName, -1) - - endpoint = ep - signingRegion = val.SigningRegion - break - } - } - - return AddScheme(endpoint, disableSSL), signingRegion -} - -// Regular expression to determine if the endpoint string is prefixed with a scheme. -var schemeRE = regexp.MustCompile("^([^:]+)://") - -// AddScheme adds the HTTP or HTTPS schemes to a endpoint URL if there is no -// scheme. If disableSSL is true HTTP will be added instead of the default HTTPS. -func AddScheme(endpoint string, disableSSL bool) string { - if endpoint != "" && !schemeRE.MatchString(endpoint) { - scheme := "https" - if disableSSL { - scheme = "http" - } - endpoint = fmt.Sprintf("%s://%s", scheme, endpoint) - } - - return endpoint -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json b/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json deleted file mode 100644 index 5f4991c2bd..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints.json +++ /dev/null @@ -1,75 +0,0 @@ -{ - "version": 2, - "endpoints": { - "*/*": { - "endpoint": "{service}.{region}.amazonaws.com" - }, - "cn-north-1/*": { - "endpoint": "{service}.{region}.amazonaws.com.cn", - "signatureVersion": "v4" - }, - "cn-north-1/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "us-gov-west-1/iam": { - "endpoint": "iam.us-gov.amazonaws.com" - }, - "us-gov-west-1/sts": { - "endpoint": "sts.us-gov-west-1.amazonaws.com" - }, - "us-gov-west-1/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "us-gov-west-1/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "*/cloudfront": { - "endpoint": "cloudfront.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/cloudsearchdomain": { - "endpoint": "", - "signingRegion": "us-east-1" - }, - "*/data.iot": { - "endpoint": "", - "signingRegion": "us-east-1" - }, - "*/ec2metadata": { - "endpoint": "http://169.254.169.254/latest" - }, - "*/iam": { - "endpoint": "iam.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/importexport": { - "endpoint": "importexport.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/route53": { - "endpoint": "route53.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/sts": { - "endpoint": "sts.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/waf": { - "endpoint": "waf.amazonaws.com", - "signingRegion": "us-east-1" - }, - "us-east-1/sdb": { - "endpoint": "sdb.amazonaws.com", - "signingRegion": "us-east-1" - }, - "*/s3": { - "endpoint": "s3-{region}.amazonaws.com" - }, - "us-east-1/s3": { - "endpoint": "s3.amazonaws.com" - }, - "eu-central-1/s3": { - "endpoint": "{service}.{region}.amazonaws.com" - } - } -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go b/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go deleted file mode 100644 index e995315ab8..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/endpoints/endpoints_map.go +++ /dev/null @@ -1,88 +0,0 @@ -package endpoints - -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -type endpointStruct struct { - Version int - Endpoints map[string]endpointEntry -} - -type endpointEntry struct { - Endpoint string - SigningRegion string -} - -var endpointsMap = endpointStruct{ - Version: 2, - Endpoints: map[string]endpointEntry{ - "*/*": { - Endpoint: "{service}.{region}.amazonaws.com", - }, - "*/cloudfront": { - Endpoint: "cloudfront.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/cloudsearchdomain": { - Endpoint: "", - SigningRegion: "us-east-1", - }, - "*/data.iot": { - Endpoint: "", - SigningRegion: "us-east-1", - }, - "*/ec2metadata": { - Endpoint: "http://169.254.169.254/latest", - }, - "*/iam": { - Endpoint: "iam.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/importexport": { - Endpoint: "importexport.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/route53": { - Endpoint: "route53.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "*/sts": { - Endpoint: "sts.amazonaws.com", - SigningRegion: "us-east-1", - }, - "*/waf": { - Endpoint: "waf.amazonaws.com", - SigningRegion: "us-east-1", - }, - "cn-north-1/*": { - Endpoint: "{service}.{region}.amazonaws.com.cn", - }, - "cn-north-1/ec2metadata": { - Endpoint: "http://169.254.169.254/latest", - }, - "eu-central-1/s3": { - Endpoint: "{service}.{region}.amazonaws.com", - }, - "us-east-1/s3": { - Endpoint: "s3.amazonaws.com", - }, - "us-east-1/sdb": { - Endpoint: "sdb.amazonaws.com", - SigningRegion: "us-east-1", - }, - "us-gov-west-1/ec2metadata": { - Endpoint: "http://169.254.169.254/latest", - }, - "us-gov-west-1/iam": { - Endpoint: "iam.us-gov.amazonaws.com", - }, - "us-gov-west-1/s3": { - Endpoint: "s3-{region}.amazonaws.com", - }, - "us-gov-west-1/sts": { - Endpoint: "sts.us-gov-west-1.amazonaws.com", - }, - }, -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go b/vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go deleted file mode 100644 index 53831dff98..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/idempotency.go +++ /dev/null @@ -1,75 +0,0 @@ -package protocol - -import ( - "crypto/rand" - "fmt" - "reflect" -) - -// RandReader is the random reader the protocol package will use to read -// random bytes from. This is exported for testing, and should not be used. -var RandReader = rand.Reader - -const idempotencyTokenFillTag = `idempotencyToken` - -// CanSetIdempotencyToken returns true if the struct field should be -// automatically populated with a Idempotency token. -// -// Only *string and string type fields that are tagged with idempotencyToken -// which are not already set can be auto filled. -func CanSetIdempotencyToken(v reflect.Value, f reflect.StructField) bool { - switch u := v.Interface().(type) { - // To auto fill an Idempotency token the field must be a string, - // tagged for auto fill, and have a zero value. - case *string: - return u == nil && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - case string: - return len(u) == 0 && len(f.Tag.Get(idempotencyTokenFillTag)) != 0 - } - - return false -} - -// GetIdempotencyToken returns a randomly generated idempotency token. -func GetIdempotencyToken() string { - b := make([]byte, 16) - RandReader.Read(b) - - return UUIDVersion4(b) -} - -// SetIdempotencyToken will set the value provided with a Idempotency Token. -// Given that the value can be set. Will panic if value is not setable. -func SetIdempotencyToken(v reflect.Value) { - if v.Kind() == reflect.Ptr { - if v.IsNil() && v.CanSet() { - v.Set(reflect.New(v.Type().Elem())) - } - v = v.Elem() - } - v = reflect.Indirect(v) - - if !v.CanSet() { - panic(fmt.Sprintf("unable to set idempotnecy token %v", v)) - } - - b := make([]byte, 16) - _, err := rand.Read(b) - if err != nil { - // TODO handle error - return - } - - v.Set(reflect.ValueOf(UUIDVersion4(b))) -} - -// UUIDVersion4 returns a Version 4 random UUID from the byte slice provided -func UUIDVersion4(u []byte) string { - // https://en.wikipedia.org/wiki/Universally_unique_identifier#Version_4_.28random.29 - // 13th character is "4" - u[6] = (u[6] | 0x40) & 0x4F - // 17th character is "8", "9", "a", or "b" - u[8] = (u[8] | 0x80) & 0xBF - - return fmt.Sprintf(`%X-%X-%X-%X-%X`, u[0:4], u[4:6], u[6:8], u[8:10], u[10:]) -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go b/vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go deleted file mode 100644 index 7ad6742785..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/build.go +++ /dev/null @@ -1,254 +0,0 @@ -// Package jsonutil provides JSON serialisation of AWS requests and responses. -package jsonutil - -import ( - "bytes" - "encoding/base64" - "fmt" - "reflect" - "sort" - "strconv" - "time" - - "github.com/aws/aws-sdk-go/private/protocol" -) - -var timeType = reflect.ValueOf(time.Time{}).Type() -var byteSliceType = reflect.ValueOf([]byte{}).Type() - -// BuildJSON builds a JSON string for a given object v. -func BuildJSON(v interface{}) ([]byte, error) { - var buf bytes.Buffer - - err := buildAny(reflect.ValueOf(v), &buf, "") - return buf.Bytes(), err -} - -func buildAny(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - value = reflect.Indirect(value) - if !value.IsValid() { - return nil - } - - vtype := value.Type() - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if value.Type() != timeType { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return buildStruct(value, buf, tag) - case "list": - return buildList(value, buf, tag) - case "map": - return buildMap(value, buf, tag) - default: - return buildScalar(value, buf, tag) - } -} - -func buildStruct(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - if !value.IsValid() { - return nil - } - - // unwrap payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := value.Type().FieldByName(payload) - tag = field.Tag - value = elemOf(value.FieldByName(payload)) - - if !value.IsValid() { - return nil - } - } - - buf.WriteByte('{') - - t := value.Type() - first := true - for i := 0; i < t.NumField(); i++ { - member := value.Field(i) - field := t.Field(i) - - if field.PkgPath != "" { - continue // ignore unexported fields - } - if field.Tag.Get("json") == "-" { - continue - } - if field.Tag.Get("location") != "" { - continue // ignore non-body elements - } - - if protocol.CanSetIdempotencyToken(member, field) { - token := protocol.GetIdempotencyToken() - member = reflect.ValueOf(&token) - } - - if (member.Kind() == reflect.Ptr || member.Kind() == reflect.Slice || member.Kind() == reflect.Map) && member.IsNil() { - continue // ignore unset fields - } - - if first { - first = false - } else { - buf.WriteByte(',') - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - writeString(name, buf) - buf.WriteString(`:`) - - err := buildAny(member, buf, field.Tag) - if err != nil { - return err - } - - } - - buf.WriteString("}") - - return nil -} - -func buildList(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("[") - - for i := 0; i < value.Len(); i++ { - buildAny(value.Index(i), buf, "") - - if i < value.Len()-1 { - buf.WriteString(",") - } - } - - buf.WriteString("]") - - return nil -} - -type sortedValues []reflect.Value - -func (sv sortedValues) Len() int { return len(sv) } -func (sv sortedValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } -func (sv sortedValues) Less(i, j int) bool { return sv[i].String() < sv[j].String() } - -func buildMap(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - buf.WriteString("{") - - var sv sortedValues = value.MapKeys() - sort.Sort(sv) - - for i, k := range sv { - if i > 0 { - buf.WriteByte(',') - } - - writeString(k.String(), buf) - buf.WriteString(`:`) - - buildAny(value.MapIndex(k), buf, "") - } - - buf.WriteString("}") - - return nil -} - -func buildScalar(value reflect.Value, buf *bytes.Buffer, tag reflect.StructTag) error { - switch value.Kind() { - case reflect.String: - writeString(value.String(), buf) - case reflect.Bool: - buf.WriteString(strconv.FormatBool(value.Bool())) - case reflect.Int64: - buf.WriteString(strconv.FormatInt(value.Int(), 10)) - case reflect.Float64: - buf.WriteString(strconv.FormatFloat(value.Float(), 'f', -1, 64)) - default: - switch value.Type() { - case timeType: - converted := value.Interface().(time.Time) - buf.WriteString(strconv.FormatInt(converted.UTC().Unix(), 10)) - case byteSliceType: - if !value.IsNil() { - converted := value.Interface().([]byte) - buf.WriteByte('"') - if len(converted) < 1024 { - // for small buffers, using Encode directly is much faster. - dst := make([]byte, base64.StdEncoding.EncodedLen(len(converted))) - base64.StdEncoding.Encode(dst, converted) - buf.Write(dst) - } else { - // for large buffers, avoid unnecessary extra temporary - // buffer space. - enc := base64.NewEncoder(base64.StdEncoding, buf) - enc.Write(converted) - enc.Close() - } - buf.WriteByte('"') - } - default: - return fmt.Errorf("unsupported JSON value %v (%s)", value.Interface(), value.Type()) - } - } - return nil -} - -func writeString(s string, buf *bytes.Buffer) { - buf.WriteByte('"') - for _, r := range s { - if r == '"' { - buf.WriteString(`\"`) - } else if r == '\\' { - buf.WriteString(`\\`) - } else if r == '\b' { - buf.WriteString(`\b`) - } else if r == '\f' { - buf.WriteString(`\f`) - } else if r == '\r' { - buf.WriteString(`\r`) - } else if r == '\t' { - buf.WriteString(`\t`) - } else if r == '\n' { - buf.WriteString(`\n`) - } else if r < 32 { - fmt.Fprintf(buf, "\\u%0.4x", r) - } else { - buf.WriteRune(r) - } - } - buf.WriteByte('"') -} - -// Returns the reflection element of a value, if it is a pointer. -func elemOf(value reflect.Value) reflect.Value { - for value.Kind() == reflect.Ptr { - value = value.Elem() - } - return value -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go b/vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go deleted file mode 100644 index fea5356136..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/json/jsonutil/unmarshal.go +++ /dev/null @@ -1,213 +0,0 @@ -package jsonutil - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "reflect" - "time" -) - -// UnmarshalJSON reads a stream and unmarshals the results in object v. -func UnmarshalJSON(v interface{}, stream io.Reader) error { - var out interface{} - - b, err := ioutil.ReadAll(stream) - if err != nil { - return err - } - - if len(b) == 0 { - return nil - } - - if err := json.Unmarshal(b, &out); err != nil { - return err - } - - return unmarshalAny(reflect.ValueOf(v), out, "") -} - -func unmarshalAny(value reflect.Value, data interface{}, tag reflect.StructTag) error { - vtype := value.Type() - if vtype.Kind() == reflect.Ptr { - vtype = vtype.Elem() // check kind of actual element type - } - - t := tag.Get("type") - if t == "" { - switch vtype.Kind() { - case reflect.Struct: - // also it can't be a time object - if _, ok := value.Interface().(*time.Time); !ok { - t = "structure" - } - case reflect.Slice: - // also it can't be a byte slice - if _, ok := value.Interface().([]byte); !ok { - t = "list" - } - case reflect.Map: - t = "map" - } - } - - switch t { - case "structure": - if field, ok := vtype.FieldByName("_"); ok { - tag = field.Tag - } - return unmarshalStruct(value, data, tag) - case "list": - return unmarshalList(value, data, tag) - case "map": - return unmarshalMap(value, data, tag) - default: - return unmarshalScalar(value, data, tag) - } -} - -func unmarshalStruct(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a structure (%#v)", data) - } - - t := value.Type() - if value.Kind() == reflect.Ptr { - if value.IsNil() { // create the structure if it's nil - s := reflect.New(value.Type().Elem()) - value.Set(s) - value = s - } - - value = value.Elem() - t = t.Elem() - } - - // unwrap any payloads - if payload := tag.Get("payload"); payload != "" { - field, _ := t.FieldByName(payload) - return unmarshalAny(value.FieldByName(payload), data, field.Tag) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - if field.PkgPath != "" { - continue // ignore unexported fields - } - - // figure out what this field is called - name := field.Name - if locName := field.Tag.Get("locationName"); locName != "" { - name = locName - } - - member := value.FieldByIndex(field.Index) - err := unmarshalAny(member, mapData[name], field.Tag) - if err != nil { - return err - } - } - return nil -} - -func unmarshalList(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - listData, ok := data.([]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a list (%#v)", data) - } - - if value.IsNil() { - l := len(listData) - value.Set(reflect.MakeSlice(value.Type(), l, l)) - } - - for i, c := range listData { - err := unmarshalAny(value.Index(i), c, "") - if err != nil { - return err - } - } - - return nil -} - -func unmarshalMap(value reflect.Value, data interface{}, tag reflect.StructTag) error { - if data == nil { - return nil - } - mapData, ok := data.(map[string]interface{}) - if !ok { - return fmt.Errorf("JSON value is not a map (%#v)", data) - } - - if value.IsNil() { - value.Set(reflect.MakeMap(value.Type())) - } - - for k, v := range mapData { - kvalue := reflect.ValueOf(k) - vvalue := reflect.New(value.Type().Elem()).Elem() - - unmarshalAny(vvalue, v, "") - value.SetMapIndex(kvalue, vvalue) - } - - return nil -} - -func unmarshalScalar(value reflect.Value, data interface{}, tag reflect.StructTag) error { - errf := func() error { - return fmt.Errorf("unsupported value: %v (%s)", value.Interface(), value.Type()) - } - - switch d := data.(type) { - case nil: - return nil // nothing to do here - case string: - switch value.Interface().(type) { - case *string: - value.Set(reflect.ValueOf(&d)) - case []byte: - b, err := base64.StdEncoding.DecodeString(d) - if err != nil { - return err - } - value.Set(reflect.ValueOf(b)) - default: - return errf() - } - case float64: - switch value.Interface().(type) { - case *int64: - di := int64(d) - value.Set(reflect.ValueOf(&di)) - case *float64: - value.Set(reflect.ValueOf(&d)) - case *time.Time: - t := time.Unix(int64(d), 0).UTC() - value.Set(reflect.ValueOf(&t)) - default: - return errf() - } - case bool: - switch value.Interface().(type) { - case *bool: - value.Set(reflect.ValueOf(&d)) - default: - return errf() - } - default: - return fmt.Errorf("unsupported JSON value (%v)", data) - } - return nil -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go b/vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go deleted file mode 100644 index 7aff0e0fa4..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/jsonrpc/jsonrpc.go +++ /dev/null @@ -1,111 +0,0 @@ -// Package jsonrpc provides JSON RPC utilities for serialisation of AWS -// requests and responses. -package jsonrpc - -//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/input/json.json build_test.go -//go:generate go run ../../../models/protocol_tests/generate.go ../../../models/protocol_tests/output/json.json unmarshal_test.go - -import ( - "encoding/json" - "io/ioutil" - "strings" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/json/jsonutil" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -var emptyJSON = []byte("{}") - -// BuildHandler is a named request handler for building jsonrpc protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Build", Fn: Build} - -// UnmarshalHandler is a named request handler for unmarshaling jsonrpc protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.jsonrpc.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling jsonrpc protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalMeta", Fn: UnmarshalMeta} - -// UnmarshalErrorHandler is a named request handler for unmarshaling jsonrpc protocol request errors -var UnmarshalErrorHandler = request.NamedHandler{Name: "awssdk.jsonrpc.UnmarshalError", Fn: UnmarshalError} - -// Build builds a JSON payload for a JSON RPC request. -func Build(req *request.Request) { - var buf []byte - var err error - if req.ParamsFilled() { - buf, err = jsonutil.BuildJSON(req.Params) - if err != nil { - req.Error = awserr.New("SerializationError", "failed encoding JSON RPC request", err) - return - } - } else { - buf = emptyJSON - } - - if req.ClientInfo.TargetPrefix != "" || string(buf) != "{}" { - req.SetBufferBody(buf) - } - - if req.ClientInfo.TargetPrefix != "" { - target := req.ClientInfo.TargetPrefix + "." + req.Operation.Name - req.HTTPRequest.Header.Add("X-Amz-Target", target) - } - if req.ClientInfo.JSONVersion != "" { - jsonVersion := req.ClientInfo.JSONVersion - req.HTTPRequest.Header.Add("Content-Type", "application/x-amz-json-"+jsonVersion) - } -} - -// Unmarshal unmarshals a response for a JSON RPC service. -func Unmarshal(req *request.Request) { - defer req.HTTPResponse.Body.Close() - if req.DataFilled() { - err := jsonutil.UnmarshalJSON(req.Data, req.HTTPResponse.Body) - if err != nil { - req.Error = awserr.New("SerializationError", "failed decoding JSON RPC response", err) - } - } - return -} - -// UnmarshalMeta unmarshals headers from a response for a JSON RPC service. -func UnmarshalMeta(req *request.Request) { - rest.UnmarshalMeta(req) -} - -// UnmarshalError unmarshals an error response for a JSON RPC service. -func UnmarshalError(req *request.Request) { - defer req.HTTPResponse.Body.Close() - bodyBytes, err := ioutil.ReadAll(req.HTTPResponse.Body) - if err != nil { - req.Error = awserr.New("SerializationError", "failed reading JSON RPC error response", err) - return - } - if len(bodyBytes) == 0 { - req.Error = awserr.NewRequestFailure( - awserr.New("SerializationError", req.HTTPResponse.Status, nil), - req.HTTPResponse.StatusCode, - "", - ) - return - } - var jsonErr jsonErrorResponse - if err := json.Unmarshal(bodyBytes, &jsonErr); err != nil { - req.Error = awserr.New("SerializationError", "failed decoding JSON RPC error response", err) - return - } - - codes := strings.SplitN(jsonErr.Code, "#", 2) - req.Error = awserr.NewRequestFailure( - awserr.New(codes[len(codes)-1], jsonErr.Message, nil), - req.HTTPResponse.StatusCode, - req.RequestID, - ) -} - -type jsonErrorResponse struct { - Code string `json:"__type"` - Message string `json:"message"` -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go b/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go deleted file mode 100644 index 5f412516dc..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/build.go +++ /dev/null @@ -1,256 +0,0 @@ -// Package rest provides RESTful serialization of AWS requests and responses. -package rest - -import ( - "bytes" - "encoding/base64" - "fmt" - "io" - "net/http" - "net/url" - "path" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// RFC822 returns an RFC822 formatted timestamp for AWS protocols -const RFC822 = "Mon, 2 Jan 2006 15:04:05 GMT" - -// Whether the byte value can be sent without escaping in AWS URLs -var noEscape [256]bool - -var errValueNotSet = fmt.Errorf("value not set") - -func init() { - for i := 0; i < len(noEscape); i++ { - // AWS expects every character except these to be escaped - noEscape[i] = (i >= 'A' && i <= 'Z') || - (i >= 'a' && i <= 'z') || - (i >= '0' && i <= '9') || - i == '-' || - i == '.' || - i == '_' || - i == '~' - } -} - -// BuildHandler is a named request handler for building rest protocol requests -var BuildHandler = request.NamedHandler{Name: "awssdk.rest.Build", Fn: Build} - -// Build builds the REST component of a service request. -func Build(r *request.Request) { - if r.ParamsFilled() { - v := reflect.ValueOf(r.Params).Elem() - buildLocationElements(r, v) - buildBody(r, v) - } -} - -func buildLocationElements(r *request.Request, v reflect.Value) { - query := r.HTTPRequest.URL.Query() - - for i := 0; i < v.NumField(); i++ { - m := v.Field(i) - if n := v.Type().Field(i).Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - field := v.Type().Field(i) - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - if m.Kind() == reflect.Ptr { - m = m.Elem() - } - if !m.IsValid() { - continue - } - - var err error - switch field.Tag.Get("location") { - case "headers": // header maps - err = buildHeaderMap(&r.HTTPRequest.Header, m, field.Tag.Get("locationName")) - case "header": - err = buildHeader(&r.HTTPRequest.Header, m, name) - case "uri": - err = buildURI(r.HTTPRequest.URL, m, name) - case "querystring": - err = buildQueryString(query, m, name) - } - r.Error = err - } - if r.Error != nil { - return - } - } - - r.HTTPRequest.URL.RawQuery = query.Encode() - updatePath(r.HTTPRequest.URL, r.HTTPRequest.URL.Path) -} - -func buildBody(r *request.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := reflect.Indirect(v.FieldByName(payloadName)) - if payload.IsValid() && payload.Interface() != nil { - switch reader := payload.Interface().(type) { - case io.ReadSeeker: - r.SetReaderBody(reader) - case []byte: - r.SetBufferBody(reader) - case string: - r.SetStringBody(reader) - default: - r.Error = awserr.New("SerializationError", - "failed to encode REST request", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } -} - -func buildHeader(header *http.Header, v reflect.Value, name string) error { - str, err := convertType(v) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - } - - header.Add(name, str) - - return nil -} - -func buildHeaderMap(header *http.Header, v reflect.Value, prefix string) error { - for _, key := range v.MapKeys() { - str, err := convertType(v.MapIndex(key)) - if err == errValueNotSet { - continue - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - - } - - header.Add(prefix+key.String(), str) - } - return nil -} - -func buildURI(u *url.URL, v reflect.Value, name string) error { - value, err := convertType(v) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - } - - uri := u.Path - uri = strings.Replace(uri, "{"+name+"}", EscapePath(value, true), -1) - uri = strings.Replace(uri, "{"+name+"+}", EscapePath(value, false), -1) - u.Path = uri - - return nil -} - -func buildQueryString(query url.Values, v reflect.Value, name string) error { - switch value := v.Interface().(type) { - case []*string: - for _, item := range value { - query.Add(name, *item) - } - case map[string]*string: - for key, item := range value { - query.Add(key, *item) - } - case map[string][]*string: - for key, items := range value { - for _, item := range items { - query.Add(key, *item) - } - } - default: - str, err := convertType(v) - if err == errValueNotSet { - return nil - } else if err != nil { - return awserr.New("SerializationError", "failed to encode REST request", err) - } - query.Set(name, str) - } - - return nil -} - -func updatePath(url *url.URL, urlPath string) { - scheme, query := url.Scheme, url.RawQuery - - hasSlash := strings.HasSuffix(urlPath, "/") - - // clean up path - urlPath = path.Clean(urlPath) - if hasSlash && !strings.HasSuffix(urlPath, "/") { - urlPath += "/" - } - - // get formatted URL minus scheme so we can build this into Opaque - url.Scheme, url.Path, url.RawQuery = "", "", "" - s := url.String() - url.Scheme = scheme - url.RawQuery = query - - // build opaque URI - url.Opaque = s + urlPath -} - -// EscapePath escapes part of a URL path in Amazon style -func EscapePath(path string, encodeSep bool) string { - var buf bytes.Buffer - for i := 0; i < len(path); i++ { - c := path[i] - if noEscape[c] || (c == '/' && !encodeSep) { - buf.WriteByte(c) - } else { - fmt.Fprintf(&buf, "%%%02X", c) - } - } - return buf.String() -} - -func convertType(v reflect.Value) (string, error) { - v = reflect.Indirect(v) - if !v.IsValid() { - return "", errValueNotSet - } - - var str string - switch value := v.Interface().(type) { - case string: - str = value - case []byte: - str = base64.StdEncoding.EncodeToString(value) - case bool: - str = strconv.FormatBool(value) - case int64: - str = strconv.FormatInt(value, 10) - case float64: - str = strconv.FormatFloat(value, 'f', -1, 64) - case time.Time: - str = value.UTC().Format(RFC822) - default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) - return "", err - } - return str, nil -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go b/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go deleted file mode 100644 index 4366de2e1e..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/payload.go +++ /dev/null @@ -1,45 +0,0 @@ -package rest - -import "reflect" - -// PayloadMember returns the payload field member of i if there is one, or nil. -func PayloadMember(i interface{}) interface{} { - if i == nil { - return nil - } - - v := reflect.ValueOf(i).Elem() - if !v.IsValid() { - return nil - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - field, _ := v.Type().FieldByName(payloadName) - if field.Tag.Get("type") != "structure" { - return nil - } - - payload := v.FieldByName(payloadName) - if payload.IsValid() || (payload.Kind() == reflect.Ptr && !payload.IsNil()) { - return payload.Interface() - } - } - } - return nil -} - -// PayloadType returns the type of a payload field member of i if there is one, or "". -func PayloadType(i interface{}) string { - v := reflect.Indirect(reflect.ValueOf(i)) - if !v.IsValid() { - return "" - } - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - if member, ok := v.Type().FieldByName(payloadName); ok { - return member.Tag.Get("type") - } - } - } - return "" -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go b/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go deleted file mode 100644 index 2cba1d9aa7..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/rest/unmarshal.go +++ /dev/null @@ -1,198 +0,0 @@ -package rest - -import ( - "encoding/base64" - "fmt" - "io" - "io/ioutil" - "net/http" - "reflect" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/awserr" - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalHandler is a named request handler for unmarshaling rest protocol requests -var UnmarshalHandler = request.NamedHandler{Name: "awssdk.rest.Unmarshal", Fn: Unmarshal} - -// UnmarshalMetaHandler is a named request handler for unmarshaling rest protocol request metadata -var UnmarshalMetaHandler = request.NamedHandler{Name: "awssdk.rest.UnmarshalMeta", Fn: UnmarshalMeta} - -// Unmarshal unmarshals the REST component of a response in a REST service. -func Unmarshal(r *request.Request) { - if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalBody(r, v) - } -} - -// UnmarshalMeta unmarshals the REST metadata of a response in a REST service -func UnmarshalMeta(r *request.Request) { - r.RequestID = r.HTTPResponse.Header.Get("X-Amzn-Requestid") - if r.RequestID == "" { - // Alternative version of request id in the header - r.RequestID = r.HTTPResponse.Header.Get("X-Amz-Request-Id") - } - if r.DataFilled() { - v := reflect.Indirect(reflect.ValueOf(r.Data)) - unmarshalLocationElements(r, v) - } -} - -func unmarshalBody(r *request.Request, v reflect.Value) { - if field, ok := v.Type().FieldByName("_"); ok { - if payloadName := field.Tag.Get("payload"); payloadName != "" { - pfield, _ := v.Type().FieldByName(payloadName) - if ptag := pfield.Tag.Get("type"); ptag != "" && ptag != "structure" { - payload := v.FieldByName(payloadName) - if payload.IsValid() { - switch payload.Interface().(type) { - case []byte: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - } else { - payload.Set(reflect.ValueOf(b)) - } - case *string: - defer r.HTTPResponse.Body.Close() - b, err := ioutil.ReadAll(r.HTTPResponse.Body) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - } else { - str := string(b) - payload.Set(reflect.ValueOf(&str)) - } - default: - switch payload.Type().String() { - case "io.ReadSeeker": - payload.Set(reflect.ValueOf(aws.ReadSeekCloser(r.HTTPResponse.Body))) - case "aws.ReadSeekCloser", "io.ReadCloser": - payload.Set(reflect.ValueOf(r.HTTPResponse.Body)) - default: - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - defer r.HTTPResponse.Body.Close() - r.Error = awserr.New("SerializationError", - "failed to decode REST response", - fmt.Errorf("unknown payload type %s", payload.Type())) - } - } - } - } - } - } -} - -func unmarshalLocationElements(r *request.Request, v reflect.Value) { - for i := 0; i < v.NumField(); i++ { - m, field := v.Field(i), v.Type().Field(i) - if n := field.Name; n[0:1] == strings.ToLower(n[0:1]) { - continue - } - - if m.IsValid() { - name := field.Tag.Get("locationName") - if name == "" { - name = field.Name - } - - switch field.Tag.Get("location") { - case "statusCode": - unmarshalStatusCode(m, r.HTTPResponse.StatusCode) - case "header": - err := unmarshalHeader(m, r.HTTPResponse.Header.Get(name)) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - break - } - case "headers": - prefix := field.Tag.Get("locationName") - err := unmarshalHeaderMap(m, r.HTTPResponse.Header, prefix) - if err != nil { - r.Error = awserr.New("SerializationError", "failed to decode REST response", err) - break - } - } - } - if r.Error != nil { - return - } - } -} - -func unmarshalStatusCode(v reflect.Value, statusCode int) { - if !v.IsValid() { - return - } - - switch v.Interface().(type) { - case *int64: - s := int64(statusCode) - v.Set(reflect.ValueOf(&s)) - } -} - -func unmarshalHeaderMap(r reflect.Value, headers http.Header, prefix string) error { - switch r.Interface().(type) { - case map[string]*string: // we only support string map value types - out := map[string]*string{} - for k, v := range headers { - k = http.CanonicalHeaderKey(k) - if strings.HasPrefix(strings.ToLower(k), strings.ToLower(prefix)) { - out[k[len(prefix):]] = &v[0] - } - } - r.Set(reflect.ValueOf(out)) - } - return nil -} - -func unmarshalHeader(v reflect.Value, header string) error { - if !v.IsValid() || (header == "" && v.Elem().Kind() != reflect.String) { - return nil - } - - switch v.Interface().(type) { - case *string: - v.Set(reflect.ValueOf(&header)) - case []byte: - b, err := base64.StdEncoding.DecodeString(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&b)) - case *bool: - b, err := strconv.ParseBool(header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&b)) - case *int64: - i, err := strconv.ParseInt(header, 10, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&i)) - case *float64: - f, err := strconv.ParseFloat(header, 64) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&f)) - case *time.Time: - t, err := time.Parse(RFC822, header) - if err != nil { - return err - } - v.Set(reflect.ValueOf(&t)) - default: - err := fmt.Errorf("Unsupported value for param %v (%s)", v.Interface(), v.Type()) - return err - } - return nil -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go b/vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go deleted file mode 100644 index da1a68111d..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/protocol/unmarshal.go +++ /dev/null @@ -1,21 +0,0 @@ -package protocol - -import ( - "io" - "io/ioutil" - - "github.com/aws/aws-sdk-go/aws/request" -) - -// UnmarshalDiscardBodyHandler is a named request handler to empty and close a response's body -var UnmarshalDiscardBodyHandler = request.NamedHandler{Name: "awssdk.shared.UnmarshalDiscardBody", Fn: UnmarshalDiscardBody} - -// UnmarshalDiscardBody is a request handler to empty a response's body and closing it. -func UnmarshalDiscardBody(r *request.Request) { - if r.HTTPResponse == nil || r.HTTPResponse.Body == nil { - return - } - - io.Copy(ioutil.Discard, r.HTTPResponse.Body) - r.HTTPResponse.Body.Close() -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go b/vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go deleted file mode 100644 index 244c86da05..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/header_rules.go +++ /dev/null @@ -1,82 +0,0 @@ -package v4 - -import ( - "net/http" - "strings" -) - -// validator houses a set of rule needed for validation of a -// string value -type rules []rule - -// rule interface allows for more flexible rules and just simply -// checks whether or not a value adheres to that rule -type rule interface { - IsValid(value string) bool -} - -// IsValid will iterate through all rules and see if any rules -// apply to the value and supports nested rules -func (r rules) IsValid(value string) bool { - for _, rule := range r { - if rule.IsValid(value) { - return true - } - } - return false -} - -// mapRule generic rule for maps -type mapRule map[string]struct{} - -// IsValid for the map rule satisfies whether it exists in the map -func (m mapRule) IsValid(value string) bool { - _, ok := m[value] - return ok -} - -// whitelist is a generic rule for whitelisting -type whitelist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (w whitelist) IsValid(value string) bool { - return w.rule.IsValid(value) -} - -// blacklist is a generic rule for blacklisting -type blacklist struct { - rule -} - -// IsValid for whitelist checks if the value is within the whitelist -func (b blacklist) IsValid(value string) bool { - return !b.rule.IsValid(value) -} - -type patterns []string - -// IsValid for patterns checks each pattern and returns if a match has -// been found -func (p patterns) IsValid(value string) bool { - for _, pattern := range p { - if strings.HasPrefix(http.CanonicalHeaderKey(value), pattern) { - return true - } - } - return false -} - -// inclusiveRules rules allow for rules to depend on one another -type inclusiveRules []rule - -// IsValid will return true if all rules are true -func (r inclusiveRules) IsValid(value string) bool { - for _, rule := range r { - if !rule.IsValid(value) { - return false - } - } - return true -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go b/vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go deleted file mode 100644 index 4765800562..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/private/signer/v4/v4.go +++ /dev/null @@ -1,465 +0,0 @@ -// Package v4 implements signing for AWS V4 signer -package v4 - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - "time" - - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/credentials" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/rest" -) - -const ( - authHeaderPrefix = "AWS4-HMAC-SHA256" - timeFormat = "20060102T150405Z" - shortTimeFormat = "20060102" -) - -var ignoredHeaders = rules{ - blacklist{ - mapRule{ - "Authorization": struct{}{}, - "User-Agent": struct{}{}, - }, - }, -} - -// requiredSignedHeaders is a whitelist for build canonical headers. -var requiredSignedHeaders = rules{ - whitelist{ - mapRule{ - "Cache-Control": struct{}{}, - "Content-Disposition": struct{}{}, - "Content-Encoding": struct{}{}, - "Content-Language": struct{}{}, - "Content-Md5": struct{}{}, - "Content-Type": struct{}{}, - "Expires": struct{}{}, - "If-Match": struct{}{}, - "If-Modified-Since": struct{}{}, - "If-None-Match": struct{}{}, - "If-Unmodified-Since": struct{}{}, - "Range": struct{}{}, - "X-Amz-Acl": struct{}{}, - "X-Amz-Copy-Source": struct{}{}, - "X-Amz-Copy-Source-If-Match": struct{}{}, - "X-Amz-Copy-Source-If-Modified-Since": struct{}{}, - "X-Amz-Copy-Source-If-None-Match": struct{}{}, - "X-Amz-Copy-Source-If-Unmodified-Since": struct{}{}, - "X-Amz-Copy-Source-Range": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Copy-Source-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Grant-Full-control": struct{}{}, - "X-Amz-Grant-Read": struct{}{}, - "X-Amz-Grant-Read-Acp": struct{}{}, - "X-Amz-Grant-Write": struct{}{}, - "X-Amz-Grant-Write-Acp": struct{}{}, - "X-Amz-Metadata-Directive": struct{}{}, - "X-Amz-Mfa": struct{}{}, - "X-Amz-Request-Payer": struct{}{}, - "X-Amz-Server-Side-Encryption": struct{}{}, - "X-Amz-Server-Side-Encryption-Aws-Kms-Key-Id": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Algorithm": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key": struct{}{}, - "X-Amz-Server-Side-Encryption-Customer-Key-Md5": struct{}{}, - "X-Amz-Storage-Class": struct{}{}, - "X-Amz-Website-Redirect-Location": struct{}{}, - }, - }, - patterns{"X-Amz-Meta-"}, -} - -// allowedHoisting is a whitelist for build query headers. The boolean value -// represents whether or not it is a pattern. -var allowedQueryHoisting = inclusiveRules{ - blacklist{requiredSignedHeaders}, - patterns{"X-Amz-"}, -} - -type signer struct { - Request *http.Request - Time time.Time - ExpireTime time.Duration - ServiceName string - Region string - CredValues credentials.Value - Credentials *credentials.Credentials - Query url.Values - Body io.ReadSeeker - Debug aws.LogLevelType - Logger aws.Logger - - isPresign bool - formattedTime string - formattedShortTime string - - signedHeaders string - canonicalHeaders string - canonicalString string - credentialString string - stringToSign string - signature string - authorization string - notHoist bool - signedHeaderVals http.Header -} - -// Sign requests with signature version 4. -// -// Will sign the requests with the service config's Credentials object -// Signing is skipped if the credentials is the credentials.AnonymousCredentials -// object. -func Sign(req *request.Request) { - // If the request does not need to be signed ignore the signing of the - // request if the AnonymousCredentials object is used. - if req.Config.Credentials == credentials.AnonymousCredentials { - return - } - - region := req.ClientInfo.SigningRegion - if region == "" { - region = aws.StringValue(req.Config.Region) - } - - name := req.ClientInfo.SigningName - if name == "" { - name = req.ClientInfo.ServiceName - } - - s := signer{ - Request: req.HTTPRequest, - Time: req.Time, - ExpireTime: req.ExpireTime, - Query: req.HTTPRequest.URL.Query(), - Body: req.Body, - ServiceName: name, - Region: region, - Credentials: req.Config.Credentials, - Debug: req.Config.LogLevel.Value(), - Logger: req.Config.Logger, - notHoist: req.NotHoist, - } - - req.Error = s.sign() - req.Time = s.Time - req.SignedHeaderVals = s.signedHeaderVals -} - -func (v4 *signer) sign() error { - if v4.ExpireTime != 0 { - v4.isPresign = true - } - - if v4.isRequestSigned() { - if !v4.Credentials.IsExpired() && time.Now().Before(v4.Time.Add(10*time.Minute)) { - // If the request is already signed, and the credentials have not - // expired, and the request is not too old ignore the signing request. - return nil - } - v4.Time = time.Now() - - // The credentials have expired for this request. The current signing - // is invalid, and needs to be request because the request will fail. - if v4.isPresign { - v4.removePresign() - // Update the request's query string to ensure the values stays in - // sync in the case retrieving the new credentials fails. - v4.Request.URL.RawQuery = v4.Query.Encode() - } - } - - var err error - v4.CredValues, err = v4.Credentials.Get() - if err != nil { - return err - } - - if v4.isPresign { - v4.Query.Set("X-Amz-Algorithm", authHeaderPrefix) - if v4.CredValues.SessionToken != "" { - v4.Query.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) - } else { - v4.Query.Del("X-Amz-Security-Token") - } - } else if v4.CredValues.SessionToken != "" { - v4.Request.Header.Set("X-Amz-Security-Token", v4.CredValues.SessionToken) - } - - v4.build() - - if v4.Debug.Matches(aws.LogDebugWithSigning) { - v4.logSigningInfo() - } - - return nil -} - -const logSignInfoMsg = `DEBUG: Request Signiture: ----[ CANONICAL STRING ]----------------------------- -%s ----[ STRING TO SIGN ]-------------------------------- -%s%s ------------------------------------------------------` -const logSignedURLMsg = ` ----[ SIGNED URL ]------------------------------------ -%s` - -func (v4 *signer) logSigningInfo() { - signedURLMsg := "" - if v4.isPresign { - signedURLMsg = fmt.Sprintf(logSignedURLMsg, v4.Request.URL.String()) - } - msg := fmt.Sprintf(logSignInfoMsg, v4.canonicalString, v4.stringToSign, signedURLMsg) - v4.Logger.Log(msg) -} - -func (v4 *signer) build() { - - v4.buildTime() // no depends - v4.buildCredentialString() // no depends - - unsignedHeaders := v4.Request.Header - if v4.isPresign { - if !v4.notHoist { - urlValues := url.Values{} - urlValues, unsignedHeaders = buildQuery(allowedQueryHoisting, unsignedHeaders) // no depends - for k := range urlValues { - v4.Query[k] = urlValues[k] - } - } - } - - v4.buildCanonicalHeaders(ignoredHeaders, unsignedHeaders) - v4.buildCanonicalString() // depends on canon headers / signed headers - v4.buildStringToSign() // depends on canon string - v4.buildSignature() // depends on string to sign - - if v4.isPresign { - v4.Request.URL.RawQuery += "&X-Amz-Signature=" + v4.signature - } else { - parts := []string{ - authHeaderPrefix + " Credential=" + v4.CredValues.AccessKeyID + "/" + v4.credentialString, - "SignedHeaders=" + v4.signedHeaders, - "Signature=" + v4.signature, - } - v4.Request.Header.Set("Authorization", strings.Join(parts, ", ")) - } -} - -func (v4 *signer) buildTime() { - v4.formattedTime = v4.Time.UTC().Format(timeFormat) - v4.formattedShortTime = v4.Time.UTC().Format(shortTimeFormat) - - if v4.isPresign { - duration := int64(v4.ExpireTime / time.Second) - v4.Query.Set("X-Amz-Date", v4.formattedTime) - v4.Query.Set("X-Amz-Expires", strconv.FormatInt(duration, 10)) - } else { - v4.Request.Header.Set("X-Amz-Date", v4.formattedTime) - } -} - -func (v4 *signer) buildCredentialString() { - v4.credentialString = strings.Join([]string{ - v4.formattedShortTime, - v4.Region, - v4.ServiceName, - "aws4_request", - }, "/") - - if v4.isPresign { - v4.Query.Set("X-Amz-Credential", v4.CredValues.AccessKeyID+"/"+v4.credentialString) - } -} - -func buildQuery(r rule, header http.Header) (url.Values, http.Header) { - query := url.Values{} - unsignedHeaders := http.Header{} - for k, h := range header { - if r.IsValid(k) { - query[k] = h - } else { - unsignedHeaders[k] = h - } - } - - return query, unsignedHeaders -} -func (v4 *signer) buildCanonicalHeaders(r rule, header http.Header) { - var headers []string - headers = append(headers, "host") - for k, v := range header { - canonicalKey := http.CanonicalHeaderKey(k) - if !r.IsValid(canonicalKey) { - continue // ignored header - } - if v4.signedHeaderVals == nil { - v4.signedHeaderVals = make(http.Header) - } - - lowerCaseKey := strings.ToLower(k) - if _, ok := v4.signedHeaderVals[lowerCaseKey]; ok { - // include additional values - v4.signedHeaderVals[lowerCaseKey] = append(v4.signedHeaderVals[lowerCaseKey], v...) - continue - } - - headers = append(headers, lowerCaseKey) - v4.signedHeaderVals[lowerCaseKey] = v - } - sort.Strings(headers) - - v4.signedHeaders = strings.Join(headers, ";") - - if v4.isPresign { - v4.Query.Set("X-Amz-SignedHeaders", v4.signedHeaders) - } - - headerValues := make([]string, len(headers)) - for i, k := range headers { - if k == "host" { - headerValues[i] = "host:" + v4.Request.URL.Host - } else { - headerValues[i] = k + ":" + - strings.Join(v4.signedHeaderVals[k], ",") - } - } - - v4.canonicalHeaders = strings.Join(stripExcessSpaces(headerValues), "\n") -} - -func (v4 *signer) buildCanonicalString() { - v4.Request.URL.RawQuery = strings.Replace(v4.Query.Encode(), "+", "%20", -1) - uri := v4.Request.URL.Opaque - if uri != "" { - uri = "/" + strings.Join(strings.Split(uri, "/")[3:], "/") - } else { - uri = v4.Request.URL.Path - } - if uri == "" { - uri = "/" - } - - if v4.ServiceName != "s3" { - uri = rest.EscapePath(uri, false) - } - - v4.canonicalString = strings.Join([]string{ - v4.Request.Method, - uri, - v4.Request.URL.RawQuery, - v4.canonicalHeaders + "\n", - v4.signedHeaders, - v4.bodyDigest(), - }, "\n") -} - -func (v4 *signer) buildStringToSign() { - v4.stringToSign = strings.Join([]string{ - authHeaderPrefix, - v4.formattedTime, - v4.credentialString, - hex.EncodeToString(makeSha256([]byte(v4.canonicalString))), - }, "\n") -} - -func (v4 *signer) buildSignature() { - secret := v4.CredValues.SecretAccessKey - date := makeHmac([]byte("AWS4"+secret), []byte(v4.formattedShortTime)) - region := makeHmac(date, []byte(v4.Region)) - service := makeHmac(region, []byte(v4.ServiceName)) - credentials := makeHmac(service, []byte("aws4_request")) - signature := makeHmac(credentials, []byte(v4.stringToSign)) - v4.signature = hex.EncodeToString(signature) -} - -func (v4 *signer) bodyDigest() string { - hash := v4.Request.Header.Get("X-Amz-Content-Sha256") - if hash == "" { - if v4.isPresign && v4.ServiceName == "s3" { - hash = "UNSIGNED-PAYLOAD" - } else if v4.Body == nil { - hash = hex.EncodeToString(makeSha256([]byte{})) - } else { - hash = hex.EncodeToString(makeSha256Reader(v4.Body)) - } - v4.Request.Header.Add("X-Amz-Content-Sha256", hash) - } - return hash -} - -// isRequestSigned returns if the request is currently signed or presigned -func (v4 *signer) isRequestSigned() bool { - if v4.isPresign && v4.Query.Get("X-Amz-Signature") != "" { - return true - } - if v4.Request.Header.Get("Authorization") != "" { - return true - } - - return false -} - -// unsign removes signing flags for both signed and presigned requests. -func (v4 *signer) removePresign() { - v4.Query.Del("X-Amz-Algorithm") - v4.Query.Del("X-Amz-Signature") - v4.Query.Del("X-Amz-Security-Token") - v4.Query.Del("X-Amz-Date") - v4.Query.Del("X-Amz-Expires") - v4.Query.Del("X-Amz-Credential") - v4.Query.Del("X-Amz-SignedHeaders") -} - -func makeHmac(key []byte, data []byte) []byte { - hash := hmac.New(sha256.New, key) - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256(data []byte) []byte { - hash := sha256.New() - hash.Write(data) - return hash.Sum(nil) -} - -func makeSha256Reader(reader io.ReadSeeker) []byte { - hash := sha256.New() - start, _ := reader.Seek(0, 1) - defer reader.Seek(start, 0) - - io.Copy(hash, reader) - return hash.Sum(nil) -} - -func stripExcessSpaces(headerVals []string) []string { - vals := make([]string, len(headerVals)) - for i, str := range headerVals { - stripped := "" - found := false - str = strings.TrimSpace(str) - for _, c := range str { - if !found && c == ' ' { - stripped += string(c) - found = true - } else if c != ' ' { - stripped += string(c) - found = false - } - } - vals[i] = stripped - } - return vals -} diff --git a/vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go b/vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go deleted file mode 100644 index d5be83323f..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/api.go +++ /dev/null @@ -1,3141 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -// Package cloudwatchlogs provides a client for Amazon CloudWatch Logs. -package cloudwatchlogs - -import ( - "fmt" - - "github.com/aws/aws-sdk-go/aws/awsutil" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" -) - -const opCancelExportTask = "CancelExportTask" - -// CancelExportTaskRequest generates a request for the CancelExportTask operation. -func (c *CloudWatchLogs) CancelExportTaskRequest(input *CancelExportTaskInput) (req *request.Request, output *CancelExportTaskOutput) { - op := &request.Operation{ - Name: opCancelExportTask, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CancelExportTaskInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &CancelExportTaskOutput{} - req.Data = output - return -} - -// Cancels an export task if it is in PENDING or RUNNING state. -func (c *CloudWatchLogs) CancelExportTask(input *CancelExportTaskInput) (*CancelExportTaskOutput, error) { - req, out := c.CancelExportTaskRequest(input) - err := req.Send() - return out, err -} - -const opCreateExportTask = "CreateExportTask" - -// CreateExportTaskRequest generates a request for the CreateExportTask operation. -func (c *CloudWatchLogs) CreateExportTaskRequest(input *CreateExportTaskInput) (req *request.Request, output *CreateExportTaskOutput) { - op := &request.Operation{ - Name: opCreateExportTask, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateExportTaskInput{} - } - - req = c.newRequest(op, input, output) - output = &CreateExportTaskOutput{} - req.Data = output - return -} - -// Creates an ExportTask which allows you to efficiently export data from a -// Log Group to your Amazon S3 bucket. -// -// This is an asynchronous call. If all the required information is provided, -// this API will initiate an export task and respond with the task Id. Once -// started, DescribeExportTasks can be used to get the status of an export task. -// You can only have one active (RUNNING or PENDING) export task at a time, -// per account. -// -// You can export logs from multiple log groups or multiple time ranges to -// the same Amazon S3 bucket. To separate out log data for each export task, -// you can specify a prefix that will be used as the Amazon S3 key prefix for -// all exported objects. -func (c *CloudWatchLogs) CreateExportTask(input *CreateExportTaskInput) (*CreateExportTaskOutput, error) { - req, out := c.CreateExportTaskRequest(input) - err := req.Send() - return out, err -} - -const opCreateLogGroup = "CreateLogGroup" - -// CreateLogGroupRequest generates a request for the CreateLogGroup operation. -func (c *CloudWatchLogs) CreateLogGroupRequest(input *CreateLogGroupInput) (req *request.Request, output *CreateLogGroupOutput) { - op := &request.Operation{ - Name: opCreateLogGroup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateLogGroupInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &CreateLogGroupOutput{} - req.Data = output - return -} - -// Creates a new log group with the specified name. The name of the log group -// must be unique within a region for an AWS account. You can create up to 500 -// log groups per account. -// -// You must use the following guidelines when naming a log group: Log group -// names can be between 1 and 512 characters long. Allowed characters are a-z, -// A-Z, 0-9, '_' (underscore), '-' (hyphen), '/' (forward slash), and '.' (period). -func (c *CloudWatchLogs) CreateLogGroup(input *CreateLogGroupInput) (*CreateLogGroupOutput, error) { - req, out := c.CreateLogGroupRequest(input) - err := req.Send() - return out, err -} - -const opCreateLogStream = "CreateLogStream" - -// CreateLogStreamRequest generates a request for the CreateLogStream operation. -func (c *CloudWatchLogs) CreateLogStreamRequest(input *CreateLogStreamInput) (req *request.Request, output *CreateLogStreamOutput) { - op := &request.Operation{ - Name: opCreateLogStream, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &CreateLogStreamInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &CreateLogStreamOutput{} - req.Data = output - return -} - -// Creates a new log stream in the specified log group. The name of the log -// stream must be unique within the log group. There is no limit on the number -// of log streams that can exist in a log group. -// -// You must use the following guidelines when naming a log stream: Log stream -// names can be between 1 and 512 characters long. The ':' colon character is -// not allowed. -func (c *CloudWatchLogs) CreateLogStream(input *CreateLogStreamInput) (*CreateLogStreamOutput, error) { - req, out := c.CreateLogStreamRequest(input) - err := req.Send() - return out, err -} - -const opDeleteDestination = "DeleteDestination" - -// DeleteDestinationRequest generates a request for the DeleteDestination operation. -func (c *CloudWatchLogs) DeleteDestinationRequest(input *DeleteDestinationInput) (req *request.Request, output *DeleteDestinationOutput) { - op := &request.Operation{ - Name: opDeleteDestination, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteDestinationInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteDestinationOutput{} - req.Data = output - return -} - -// Deletes the destination with the specified name and eventually disables all -// the subscription filters that publish to it. This will not delete the physical -// resource encapsulated by the destination. -func (c *CloudWatchLogs) DeleteDestination(input *DeleteDestinationInput) (*DeleteDestinationOutput, error) { - req, out := c.DeleteDestinationRequest(input) - err := req.Send() - return out, err -} - -const opDeleteLogGroup = "DeleteLogGroup" - -// DeleteLogGroupRequest generates a request for the DeleteLogGroup operation. -func (c *CloudWatchLogs) DeleteLogGroupRequest(input *DeleteLogGroupInput) (req *request.Request, output *DeleteLogGroupOutput) { - op := &request.Operation{ - Name: opDeleteLogGroup, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteLogGroupInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteLogGroupOutput{} - req.Data = output - return -} - -// Deletes the log group with the specified name and permanently deletes all -// the archived log events associated with it. -func (c *CloudWatchLogs) DeleteLogGroup(input *DeleteLogGroupInput) (*DeleteLogGroupOutput, error) { - req, out := c.DeleteLogGroupRequest(input) - err := req.Send() - return out, err -} - -const opDeleteLogStream = "DeleteLogStream" - -// DeleteLogStreamRequest generates a request for the DeleteLogStream operation. -func (c *CloudWatchLogs) DeleteLogStreamRequest(input *DeleteLogStreamInput) (req *request.Request, output *DeleteLogStreamOutput) { - op := &request.Operation{ - Name: opDeleteLogStream, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteLogStreamInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteLogStreamOutput{} - req.Data = output - return -} - -// Deletes a log stream and permanently deletes all the archived log events -// associated with it. -func (c *CloudWatchLogs) DeleteLogStream(input *DeleteLogStreamInput) (*DeleteLogStreamOutput, error) { - req, out := c.DeleteLogStreamRequest(input) - err := req.Send() - return out, err -} - -const opDeleteMetricFilter = "DeleteMetricFilter" - -// DeleteMetricFilterRequest generates a request for the DeleteMetricFilter operation. -func (c *CloudWatchLogs) DeleteMetricFilterRequest(input *DeleteMetricFilterInput) (req *request.Request, output *DeleteMetricFilterOutput) { - op := &request.Operation{ - Name: opDeleteMetricFilter, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteMetricFilterInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteMetricFilterOutput{} - req.Data = output - return -} - -// Deletes a metric filter associated with the specified log group. -func (c *CloudWatchLogs) DeleteMetricFilter(input *DeleteMetricFilterInput) (*DeleteMetricFilterOutput, error) { - req, out := c.DeleteMetricFilterRequest(input) - err := req.Send() - return out, err -} - -const opDeleteRetentionPolicy = "DeleteRetentionPolicy" - -// DeleteRetentionPolicyRequest generates a request for the DeleteRetentionPolicy operation. -func (c *CloudWatchLogs) DeleteRetentionPolicyRequest(input *DeleteRetentionPolicyInput) (req *request.Request, output *DeleteRetentionPolicyOutput) { - op := &request.Operation{ - Name: opDeleteRetentionPolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteRetentionPolicyInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteRetentionPolicyOutput{} - req.Data = output - return -} - -// Deletes the retention policy of the specified log group. Log events would -// not expire if they belong to log groups without a retention policy. -func (c *CloudWatchLogs) DeleteRetentionPolicy(input *DeleteRetentionPolicyInput) (*DeleteRetentionPolicyOutput, error) { - req, out := c.DeleteRetentionPolicyRequest(input) - err := req.Send() - return out, err -} - -const opDeleteSubscriptionFilter = "DeleteSubscriptionFilter" - -// DeleteSubscriptionFilterRequest generates a request for the DeleteSubscriptionFilter operation. -func (c *CloudWatchLogs) DeleteSubscriptionFilterRequest(input *DeleteSubscriptionFilterInput) (req *request.Request, output *DeleteSubscriptionFilterOutput) { - op := &request.Operation{ - Name: opDeleteSubscriptionFilter, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DeleteSubscriptionFilterInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &DeleteSubscriptionFilterOutput{} - req.Data = output - return -} - -// Deletes a subscription filter associated with the specified log group. -func (c *CloudWatchLogs) DeleteSubscriptionFilter(input *DeleteSubscriptionFilterInput) (*DeleteSubscriptionFilterOutput, error) { - req, out := c.DeleteSubscriptionFilterRequest(input) - err := req.Send() - return out, err -} - -const opDescribeDestinations = "DescribeDestinations" - -// DescribeDestinationsRequest generates a request for the DescribeDestinations operation. -func (c *CloudWatchLogs) DescribeDestinationsRequest(input *DescribeDestinationsInput) (req *request.Request, output *DescribeDestinationsOutput) { - op := &request.Operation{ - Name: opDescribeDestinations, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeDestinationsInput{} - } - - req = c.newRequest(op, input, output) - output = &DescribeDestinationsOutput{} - req.Data = output - return -} - -// Returns all the destinations that are associated with the AWS account making -// the request. The list returned in the response is ASCII-sorted by destination -// name. -// -// By default, this operation returns up to 50 destinations. If there are -// more destinations to list, the response would contain a nextToken value in -// the response body. You can also limit the number of destinations returned -// in the response by specifying the limit parameter in the request. -func (c *CloudWatchLogs) DescribeDestinations(input *DescribeDestinationsInput) (*DescribeDestinationsOutput, error) { - req, out := c.DescribeDestinationsRequest(input) - err := req.Send() - return out, err -} - -func (c *CloudWatchLogs) DescribeDestinationsPages(input *DescribeDestinationsInput, fn func(p *DescribeDestinationsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeDestinationsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeDestinationsOutput), lastPage) - }) -} - -const opDescribeExportTasks = "DescribeExportTasks" - -// DescribeExportTasksRequest generates a request for the DescribeExportTasks operation. -func (c *CloudWatchLogs) DescribeExportTasksRequest(input *DescribeExportTasksInput) (req *request.Request, output *DescribeExportTasksOutput) { - op := &request.Operation{ - Name: opDescribeExportTasks, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &DescribeExportTasksInput{} - } - - req = c.newRequest(op, input, output) - output = &DescribeExportTasksOutput{} - req.Data = output - return -} - -// Returns all the export tasks that are associated with the AWS account making -// the request. The export tasks can be filtered based on TaskId or TaskStatus. -// -// By default, this operation returns up to 50 export tasks that satisfy the -// specified filters. If there are more export tasks to list, the response would -// contain a nextToken value in the response body. You can also limit the number -// of export tasks returned in the response by specifying the limit parameter -// in the request. -func (c *CloudWatchLogs) DescribeExportTasks(input *DescribeExportTasksInput) (*DescribeExportTasksOutput, error) { - req, out := c.DescribeExportTasksRequest(input) - err := req.Send() - return out, err -} - -const opDescribeLogGroups = "DescribeLogGroups" - -// DescribeLogGroupsRequest generates a request for the DescribeLogGroups operation. -func (c *CloudWatchLogs) DescribeLogGroupsRequest(input *DescribeLogGroupsInput) (req *request.Request, output *DescribeLogGroupsOutput) { - op := &request.Operation{ - Name: opDescribeLogGroups, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeLogGroupsInput{} - } - - req = c.newRequest(op, input, output) - output = &DescribeLogGroupsOutput{} - req.Data = output - return -} - -// Returns all the log groups that are associated with the AWS account making -// the request. The list returned in the response is ASCII-sorted by log group -// name. -// -// By default, this operation returns up to 50 log groups. If there are more -// log groups to list, the response would contain a nextToken value in the response -// body. You can also limit the number of log groups returned in the response -// by specifying the limit parameter in the request. -func (c *CloudWatchLogs) DescribeLogGroups(input *DescribeLogGroupsInput) (*DescribeLogGroupsOutput, error) { - req, out := c.DescribeLogGroupsRequest(input) - err := req.Send() - return out, err -} - -func (c *CloudWatchLogs) DescribeLogGroupsPages(input *DescribeLogGroupsInput, fn func(p *DescribeLogGroupsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeLogGroupsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeLogGroupsOutput), lastPage) - }) -} - -const opDescribeLogStreams = "DescribeLogStreams" - -// DescribeLogStreamsRequest generates a request for the DescribeLogStreams operation. -func (c *CloudWatchLogs) DescribeLogStreamsRequest(input *DescribeLogStreamsInput) (req *request.Request, output *DescribeLogStreamsOutput) { - op := &request.Operation{ - Name: opDescribeLogStreams, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeLogStreamsInput{} - } - - req = c.newRequest(op, input, output) - output = &DescribeLogStreamsOutput{} - req.Data = output - return -} - -// Returns all the log streams that are associated with the specified log group. -// The list returned in the response is ASCII-sorted by log stream name. -// -// By default, this operation returns up to 50 log streams. If there are more -// log streams to list, the response would contain a nextToken value in the -// response body. You can also limit the number of log streams returned in the -// response by specifying the limit parameter in the request. This operation -// has a limit of five transactions per second, after which transactions are -// throttled. -func (c *CloudWatchLogs) DescribeLogStreams(input *DescribeLogStreamsInput) (*DescribeLogStreamsOutput, error) { - req, out := c.DescribeLogStreamsRequest(input) - err := req.Send() - return out, err -} - -func (c *CloudWatchLogs) DescribeLogStreamsPages(input *DescribeLogStreamsInput, fn func(p *DescribeLogStreamsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeLogStreamsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeLogStreamsOutput), lastPage) - }) -} - -const opDescribeMetricFilters = "DescribeMetricFilters" - -// DescribeMetricFiltersRequest generates a request for the DescribeMetricFilters operation. -func (c *CloudWatchLogs) DescribeMetricFiltersRequest(input *DescribeMetricFiltersInput) (req *request.Request, output *DescribeMetricFiltersOutput) { - op := &request.Operation{ - Name: opDescribeMetricFilters, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeMetricFiltersInput{} - } - - req = c.newRequest(op, input, output) - output = &DescribeMetricFiltersOutput{} - req.Data = output - return -} - -// Returns all the metrics filters associated with the specified log group. -// The list returned in the response is ASCII-sorted by filter name. -// -// By default, this operation returns up to 50 metric filters. If there are -// more metric filters to list, the response would contain a nextToken value -// in the response body. You can also limit the number of metric filters returned -// in the response by specifying the limit parameter in the request. -func (c *CloudWatchLogs) DescribeMetricFilters(input *DescribeMetricFiltersInput) (*DescribeMetricFiltersOutput, error) { - req, out := c.DescribeMetricFiltersRequest(input) - err := req.Send() - return out, err -} - -func (c *CloudWatchLogs) DescribeMetricFiltersPages(input *DescribeMetricFiltersInput, fn func(p *DescribeMetricFiltersOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeMetricFiltersRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeMetricFiltersOutput), lastPage) - }) -} - -const opDescribeSubscriptionFilters = "DescribeSubscriptionFilters" - -// DescribeSubscriptionFiltersRequest generates a request for the DescribeSubscriptionFilters operation. -func (c *CloudWatchLogs) DescribeSubscriptionFiltersRequest(input *DescribeSubscriptionFiltersInput) (req *request.Request, output *DescribeSubscriptionFiltersOutput) { - op := &request.Operation{ - Name: opDescribeSubscriptionFilters, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &DescribeSubscriptionFiltersInput{} - } - - req = c.newRequest(op, input, output) - output = &DescribeSubscriptionFiltersOutput{} - req.Data = output - return -} - -// Returns all the subscription filters associated with the specified log group. -// The list returned in the response is ASCII-sorted by filter name. -// -// By default, this operation returns up to 50 subscription filters. If there -// are more subscription filters to list, the response would contain a nextToken -// value in the response body. You can also limit the number of subscription -// filters returned in the response by specifying the limit parameter in the -// request. -func (c *CloudWatchLogs) DescribeSubscriptionFilters(input *DescribeSubscriptionFiltersInput) (*DescribeSubscriptionFiltersOutput, error) { - req, out := c.DescribeSubscriptionFiltersRequest(input) - err := req.Send() - return out, err -} - -func (c *CloudWatchLogs) DescribeSubscriptionFiltersPages(input *DescribeSubscriptionFiltersInput, fn func(p *DescribeSubscriptionFiltersOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.DescribeSubscriptionFiltersRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*DescribeSubscriptionFiltersOutput), lastPage) - }) -} - -const opFilterLogEvents = "FilterLogEvents" - -// FilterLogEventsRequest generates a request for the FilterLogEvents operation. -func (c *CloudWatchLogs) FilterLogEventsRequest(input *FilterLogEventsInput) (req *request.Request, output *FilterLogEventsOutput) { - op := &request.Operation{ - Name: opFilterLogEvents, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextToken"}, - LimitToken: "limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &FilterLogEventsInput{} - } - - req = c.newRequest(op, input, output) - output = &FilterLogEventsOutput{} - req.Data = output - return -} - -// Retrieves log events, optionally filtered by a filter pattern from the specified -// log group. You can provide an optional time range to filter the results on -// the event timestamp. You can limit the streams searched to an explicit list -// of logStreamNames. -// -// By default, this operation returns as much matching log events as can fit -// in a response size of 1MB, up to 10,000 log events, or all the events found -// within a time-bounded scan window. If the response includes a nextToken, -// then there is more data to search, and the search can be resumed with a new -// request providing the nextToken. The response will contain a list of searchedLogStreams -// that contains information about which streams were searched in the request -// and whether they have been searched completely or require further pagination. -// The limit parameter in the request. can be used to specify the maximum number -// of events to return in a page. -func (c *CloudWatchLogs) FilterLogEvents(input *FilterLogEventsInput) (*FilterLogEventsOutput, error) { - req, out := c.FilterLogEventsRequest(input) - err := req.Send() - return out, err -} - -func (c *CloudWatchLogs) FilterLogEventsPages(input *FilterLogEventsInput, fn func(p *FilterLogEventsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.FilterLogEventsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*FilterLogEventsOutput), lastPage) - }) -} - -const opGetLogEvents = "GetLogEvents" - -// GetLogEventsRequest generates a request for the GetLogEvents operation. -func (c *CloudWatchLogs) GetLogEventsRequest(input *GetLogEventsInput) (req *request.Request, output *GetLogEventsOutput) { - op := &request.Operation{ - Name: opGetLogEvents, - HTTPMethod: "POST", - HTTPPath: "/", - Paginator: &request.Paginator{ - InputTokens: []string{"nextToken"}, - OutputTokens: []string{"nextForwardToken"}, - LimitToken: "limit", - TruncationToken: "", - }, - } - - if input == nil { - input = &GetLogEventsInput{} - } - - req = c.newRequest(op, input, output) - output = &GetLogEventsOutput{} - req.Data = output - return -} - -// Retrieves log events from the specified log stream. You can provide an optional -// time range to filter the results on the event timestamp. -// -// By default, this operation returns as much log events as can fit in a response -// size of 1MB, up to 10,000 log events. The response will always include a -// nextForwardToken and a nextBackwardToken in the response body. You can use -// any of these tokens in subsequent GetLogEvents requests to paginate through -// events in either forward or backward direction. You can also limit the number -// of log events returned in the response by specifying the limit parameter -// in the request. -func (c *CloudWatchLogs) GetLogEvents(input *GetLogEventsInput) (*GetLogEventsOutput, error) { - req, out := c.GetLogEventsRequest(input) - err := req.Send() - return out, err -} - -func (c *CloudWatchLogs) GetLogEventsPages(input *GetLogEventsInput, fn func(p *GetLogEventsOutput, lastPage bool) (shouldContinue bool)) error { - page, _ := c.GetLogEventsRequest(input) - page.Handlers.Build.PushBack(request.MakeAddToUserAgentFreeFormHandler("Paginator")) - return page.EachPage(func(p interface{}, lastPage bool) bool { - return fn(p.(*GetLogEventsOutput), lastPage) - }) -} - -const opPutDestination = "PutDestination" - -// PutDestinationRequest generates a request for the PutDestination operation. -func (c *CloudWatchLogs) PutDestinationRequest(input *PutDestinationInput) (req *request.Request, output *PutDestinationOutput) { - op := &request.Operation{ - Name: opPutDestination, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutDestinationInput{} - } - - req = c.newRequest(op, input, output) - output = &PutDestinationOutput{} - req.Data = output - return -} - -// Creates or updates a Destination. A destination encapsulates a physical resource -// (such as a Kinesis stream) and allows you to subscribe to a real-time stream -// of log events of a different account, ingested through PutLogEvents requests. -// Currently, the only supported physical resource is a Amazon Kinesis stream -// belonging to the same account as the destination. -// -// A destination controls what is written to its Amazon Kinesis stream through -// an access policy. By default, PutDestination does not set any access policy -// with the destination, which means a cross-account user will not be able to -// call PutSubscriptionFilter against this destination. To enable that, the -// destination owner must call PutDestinationPolicy after PutDestination. -func (c *CloudWatchLogs) PutDestination(input *PutDestinationInput) (*PutDestinationOutput, error) { - req, out := c.PutDestinationRequest(input) - err := req.Send() - return out, err -} - -const opPutDestinationPolicy = "PutDestinationPolicy" - -// PutDestinationPolicyRequest generates a request for the PutDestinationPolicy operation. -func (c *CloudWatchLogs) PutDestinationPolicyRequest(input *PutDestinationPolicyInput) (req *request.Request, output *PutDestinationPolicyOutput) { - op := &request.Operation{ - Name: opPutDestinationPolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutDestinationPolicyInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutDestinationPolicyOutput{} - req.Data = output - return -} - -// Creates or updates an access policy associated with an existing Destination. -// An access policy is an IAM policy document (http://docs.aws.amazon.com/IAM/latest/UserGuide/policies_overview.html) -// that is used to authorize claims to register a subscription filter against -// a given destination. -func (c *CloudWatchLogs) PutDestinationPolicy(input *PutDestinationPolicyInput) (*PutDestinationPolicyOutput, error) { - req, out := c.PutDestinationPolicyRequest(input) - err := req.Send() - return out, err -} - -const opPutLogEvents = "PutLogEvents" - -// PutLogEventsRequest generates a request for the PutLogEvents operation. -func (c *CloudWatchLogs) PutLogEventsRequest(input *PutLogEventsInput) (req *request.Request, output *PutLogEventsOutput) { - op := &request.Operation{ - Name: opPutLogEvents, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutLogEventsInput{} - } - - req = c.newRequest(op, input, output) - output = &PutLogEventsOutput{} - req.Data = output - return -} - -// Uploads a batch of log events to the specified log stream. -// -// Every PutLogEvents request must include the sequenceToken obtained from -// the response of the previous request. An upload in a newly created log stream -// does not require a sequenceToken. -// -// The batch of events must satisfy the following constraints: The maximum -// batch size is 1,048,576 bytes, and this size is calculated as the sum of -// all event messages in UTF-8, plus 26 bytes for each log event. None of the -// log events in the batch can be more than 2 hours in the future. None of the -// log events in the batch can be older than 14 days or the retention period -// of the log group. The log events in the batch must be in chronological ordered -// by their timestamp. The maximum number of log events in a batch is 10,000. -// A batch of log events in a single PutLogEvents request cannot span more than -// 24 hours. Otherwise, the PutLogEvents operation will fail. -func (c *CloudWatchLogs) PutLogEvents(input *PutLogEventsInput) (*PutLogEventsOutput, error) { - req, out := c.PutLogEventsRequest(input) - err := req.Send() - return out, err -} - -const opPutMetricFilter = "PutMetricFilter" - -// PutMetricFilterRequest generates a request for the PutMetricFilter operation. -func (c *CloudWatchLogs) PutMetricFilterRequest(input *PutMetricFilterInput) (req *request.Request, output *PutMetricFilterOutput) { - op := &request.Operation{ - Name: opPutMetricFilter, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutMetricFilterInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutMetricFilterOutput{} - req.Data = output - return -} - -// Creates or updates a metric filter and associates it with the specified log -// group. Metric filters allow you to configure rules to extract metric data -// from log events ingested through PutLogEvents requests. -// -// The maximum number of metric filters that can be associated with a log -// group is 100. -func (c *CloudWatchLogs) PutMetricFilter(input *PutMetricFilterInput) (*PutMetricFilterOutput, error) { - req, out := c.PutMetricFilterRequest(input) - err := req.Send() - return out, err -} - -const opPutRetentionPolicy = "PutRetentionPolicy" - -// PutRetentionPolicyRequest generates a request for the PutRetentionPolicy operation. -func (c *CloudWatchLogs) PutRetentionPolicyRequest(input *PutRetentionPolicyInput) (req *request.Request, output *PutRetentionPolicyOutput) { - op := &request.Operation{ - Name: opPutRetentionPolicy, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutRetentionPolicyInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutRetentionPolicyOutput{} - req.Data = output - return -} - -// Sets the retention of the specified log group. A retention policy allows -// you to configure the number of days you want to retain log events in the -// specified log group. -func (c *CloudWatchLogs) PutRetentionPolicy(input *PutRetentionPolicyInput) (*PutRetentionPolicyOutput, error) { - req, out := c.PutRetentionPolicyRequest(input) - err := req.Send() - return out, err -} - -const opPutSubscriptionFilter = "PutSubscriptionFilter" - -// PutSubscriptionFilterRequest generates a request for the PutSubscriptionFilter operation. -func (c *CloudWatchLogs) PutSubscriptionFilterRequest(input *PutSubscriptionFilterInput) (req *request.Request, output *PutSubscriptionFilterOutput) { - op := &request.Operation{ - Name: opPutSubscriptionFilter, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &PutSubscriptionFilterInput{} - } - - req = c.newRequest(op, input, output) - req.Handlers.Unmarshal.Remove(jsonrpc.UnmarshalHandler) - req.Handlers.Unmarshal.PushBackNamed(protocol.UnmarshalDiscardBodyHandler) - output = &PutSubscriptionFilterOutput{} - req.Data = output - return -} - -// Creates or updates a subscription filter and associates it with the specified -// log group. Subscription filters allow you to subscribe to a real-time stream -// of log events ingested through PutLogEvents requests and have them delivered -// to a specific destination. Currently, the supported destinations are: An -// Amazon Kinesis stream belonging to the same account as the subscription filter, -// for same-account delivery. A logical destination (used via an ARN of Destination) -// belonging to a different account, for cross-account delivery. An Amazon -// Kinesis Firehose stream belonging to the same account as the subscription -// filter, for same-account delivery. An AWS Lambda function belonging to -// the same account as the subscription filter, for same-account delivery. -// -// -// Currently there can only be one subscription filter associated with a log -// group. -func (c *CloudWatchLogs) PutSubscriptionFilter(input *PutSubscriptionFilterInput) (*PutSubscriptionFilterOutput, error) { - req, out := c.PutSubscriptionFilterRequest(input) - err := req.Send() - return out, err -} - -const opTestMetricFilter = "TestMetricFilter" - -// TestMetricFilterRequest generates a request for the TestMetricFilter operation. -func (c *CloudWatchLogs) TestMetricFilterRequest(input *TestMetricFilterInput) (req *request.Request, output *TestMetricFilterOutput) { - op := &request.Operation{ - Name: opTestMetricFilter, - HTTPMethod: "POST", - HTTPPath: "/", - } - - if input == nil { - input = &TestMetricFilterInput{} - } - - req = c.newRequest(op, input, output) - output = &TestMetricFilterOutput{} - req.Data = output - return -} - -// Tests the filter pattern of a metric filter against a sample of log event -// messages. You can use this operation to validate the correctness of a metric -// filter pattern. -func (c *CloudWatchLogs) TestMetricFilter(input *TestMetricFilterInput) (*TestMetricFilterOutput, error) { - req, out := c.TestMetricFilterRequest(input) - err := req.Send() - return out, err -} - -type CancelExportTaskInput struct { - _ struct{} `type:"structure"` - - // Id of the export task to cancel. - TaskId *string `locationName:"taskId" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CancelExportTaskInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CancelExportTaskInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CancelExportTaskInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CancelExportTaskInput"} - if s.TaskId == nil { - invalidParams.Add(request.NewErrParamRequired("TaskId")) - } - if s.TaskId != nil && len(*s.TaskId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type CancelExportTaskOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CancelExportTaskOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CancelExportTaskOutput) GoString() string { - return s.String() -} - -type CreateExportTaskInput struct { - _ struct{} `type:"structure"` - - // Name of Amazon S3 bucket to which the log data will be exported. - // - // Note: Only buckets in the same AWS region are supported. - Destination *string `locationName:"destination" min:"1" type:"string" required:"true"` - - // Prefix that will be used as the start of Amazon S3 key for every object exported. - // If not specified, this defaults to 'exportedlogs'. - DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. It indicates the start time of the range for the request. Events - // with a timestamp prior to this time will not be exported. - From *int64 `locationName:"from" type:"long" required:"true"` - - // The name of the log group to export. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // Will only export log streams that match the provided logStreamNamePrefix. - // If you don't specify a value, no prefix filter is applied. - LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` - - // The name of the export task. - TaskName *string `locationName:"taskName" min:"1" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. It indicates the end time of the range for the request. Events - // with a timestamp later than this time will not be exported. - To *int64 `locationName:"to" type:"long" required:"true"` -} - -// String returns the string representation -func (s CreateExportTaskInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateExportTaskInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateExportTaskInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateExportTaskInput"} - if s.Destination == nil { - invalidParams.Add(request.NewErrParamRequired("Destination")) - } - if s.Destination != nil && len(*s.Destination) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Destination", 1)) - } - if s.From == nil { - invalidParams.Add(request.NewErrParamRequired("From")) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.LogStreamNamePrefix != nil && len(*s.LogStreamNamePrefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogStreamNamePrefix", 1)) - } - if s.TaskName != nil && len(*s.TaskName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TaskName", 1)) - } - if s.To == nil { - invalidParams.Add(request.NewErrParamRequired("To")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type CreateExportTaskOutput struct { - _ struct{} `type:"structure"` - - // Id of the export task that got created. - TaskId *string `locationName:"taskId" min:"1" type:"string"` -} - -// String returns the string representation -func (s CreateExportTaskOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateExportTaskOutput) GoString() string { - return s.String() -} - -type CreateLogGroupInput struct { - _ struct{} `type:"structure"` - - // The name of the log group to create. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateLogGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateLogGroupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateLogGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateLogGroupInput"} - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type CreateLogGroupOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateLogGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateLogGroupOutput) GoString() string { - return s.String() -} - -type CreateLogStreamInput struct { - _ struct{} `type:"structure"` - - // The name of the log group under which the log stream is to be created. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // The name of the log stream to create. - LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s CreateLogStreamInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateLogStreamInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *CreateLogStreamInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "CreateLogStreamInput"} - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.LogStreamName == nil { - invalidParams.Add(request.NewErrParamRequired("LogStreamName")) - } - if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type CreateLogStreamOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s CreateLogStreamOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s CreateLogStreamOutput) GoString() string { - return s.String() -} - -type DeleteDestinationInput struct { - _ struct{} `type:"structure"` - - // The name of destination to delete. - DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteDestinationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteDestinationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteDestinationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteDestinationInput"} - if s.DestinationName == nil { - invalidParams.Add(request.NewErrParamRequired("DestinationName")) - } - if s.DestinationName != nil && len(*s.DestinationName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteDestinationOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteDestinationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteDestinationOutput) GoString() string { - return s.String() -} - -type DeleteLogGroupInput struct { - _ struct{} `type:"structure"` - - // The name of the log group to delete. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteLogGroupInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteLogGroupInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteLogGroupInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteLogGroupInput"} - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteLogGroupOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteLogGroupOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteLogGroupOutput) GoString() string { - return s.String() -} - -type DeleteLogStreamInput struct { - _ struct{} `type:"structure"` - - // The name of the log group under which the log stream to delete belongs. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // The name of the log stream to delete. - LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteLogStreamInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteLogStreamInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteLogStreamInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteLogStreamInput"} - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.LogStreamName == nil { - invalidParams.Add(request.NewErrParamRequired("LogStreamName")) - } - if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteLogStreamOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteLogStreamOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteLogStreamOutput) GoString() string { - return s.String() -} - -type DeleteMetricFilterInput struct { - _ struct{} `type:"structure"` - - // The name of the metric filter to delete. - FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` - - // The name of the log group that is associated with the metric filter to delete. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteMetricFilterInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMetricFilterInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteMetricFilterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteMetricFilterInput"} - if s.FilterName == nil { - invalidParams.Add(request.NewErrParamRequired("FilterName")) - } - if s.FilterName != nil && len(*s.FilterName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteMetricFilterOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteMetricFilterOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteMetricFilterOutput) GoString() string { - return s.String() -} - -type DeleteRetentionPolicyInput struct { - _ struct{} `type:"structure"` - - // The name of the log group that is associated with the retention policy to - // delete. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteRetentionPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteRetentionPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteRetentionPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteRetentionPolicyInput"} - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteRetentionPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteRetentionPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteRetentionPolicyOutput) GoString() string { - return s.String() -} - -type DeleteSubscriptionFilterInput struct { - _ struct{} `type:"structure"` - - // The name of the subscription filter to delete. - FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` - - // The name of the log group that is associated with the subscription filter - // to delete. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s DeleteSubscriptionFilterInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteSubscriptionFilterInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DeleteSubscriptionFilterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DeleteSubscriptionFilterInput"} - if s.FilterName == nil { - invalidParams.Add(request.NewErrParamRequired("FilterName")) - } - if s.FilterName != nil && len(*s.FilterName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DeleteSubscriptionFilterOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s DeleteSubscriptionFilterOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DeleteSubscriptionFilterOutput) GoString() string { - return s.String() -} - -type DescribeDestinationsInput struct { - _ struct{} `type:"structure"` - - // Will only return destinations that match the provided destinationNamePrefix. - // If you don't specify a value, no prefix is applied. - DestinationNamePrefix *string `min:"1" type:"string"` - - // The maximum number of results to return. - Limit *int64 `locationName:"limit" min:"1" type:"integer"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeDestinationsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeDestinationsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeDestinationsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeDestinationsInput"} - if s.DestinationNamePrefix != nil && len(*s.DestinationNamePrefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DestinationNamePrefix", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DescribeDestinationsOutput struct { - _ struct{} `type:"structure"` - - Destinations []*Destination `locationName:"destinations" type:"list"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeDestinationsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeDestinationsOutput) GoString() string { - return s.String() -} - -type DescribeExportTasksInput struct { - _ struct{} `type:"structure"` - - // The maximum number of items returned in the response. If you don't specify - // a value, the request would return up to 50 items. - Limit *int64 `locationName:"limit" min:"1" type:"integer"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous DescribeExportTasks - // request. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` - - // All export tasks that matches the specified status code will be returned. - // This can return zero or more export tasks. - StatusCode *string `locationName:"statusCode" type:"string" enum:"ExportTaskStatusCode"` - - // Export task that matches the specified task Id will be returned. This can - // result in zero or one export task. - TaskId *string `locationName:"taskId" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeExportTasksInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeExportTasksInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeExportTasksInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeExportTasksInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - if s.TaskId != nil && len(*s.TaskId) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TaskId", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DescribeExportTasksOutput struct { - _ struct{} `type:"structure"` - - // A list of export tasks. - ExportTasks []*ExportTask `locationName:"exportTasks" type:"list"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeExportTasksOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeExportTasksOutput) GoString() string { - return s.String() -} - -type DescribeLogGroupsInput struct { - _ struct{} `type:"structure"` - - // The maximum number of items returned in the response. If you don't specify - // a value, the request would return up to 50 items. - Limit *int64 `locationName:"limit" min:"1" type:"integer"` - - // Will only return log groups that match the provided logGroupNamePrefix. If - // you don't specify a value, no prefix filter is applied. - LogGroupNamePrefix *string `locationName:"logGroupNamePrefix" min:"1" type:"string"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous DescribeLogGroups - // request. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeLogGroupsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeLogGroupsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeLogGroupsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeLogGroupsInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.LogGroupNamePrefix != nil && len(*s.LogGroupNamePrefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupNamePrefix", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DescribeLogGroupsOutput struct { - _ struct{} `type:"structure"` - - // A list of log groups. - LogGroups []*LogGroup `locationName:"logGroups" type:"list"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeLogGroupsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeLogGroupsOutput) GoString() string { - return s.String() -} - -type DescribeLogStreamsInput struct { - _ struct{} `type:"structure"` - - // If set to true, results are returned in descending order. If you don't specify - // a value or set it to false, results are returned in ascending order. - Descending *bool `locationName:"descending" type:"boolean"` - - // The maximum number of items returned in the response. If you don't specify - // a value, the request would return up to 50 items. - Limit *int64 `locationName:"limit" min:"1" type:"integer"` - - // The log group name for which log streams are to be listed. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // Will only return log streams that match the provided logStreamNamePrefix. - // If you don't specify a value, no prefix filter is applied. - LogStreamNamePrefix *string `locationName:"logStreamNamePrefix" min:"1" type:"string"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous DescribeLogStreams - // request. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` - - // Specifies what to order the returned log streams by. Valid arguments are - // 'LogStreamName' or 'LastEventTime'. If you don't specify a value, results - // are ordered by LogStreamName. If 'LastEventTime' is chosen, the request cannot - // also contain a logStreamNamePrefix. - OrderBy *string `locationName:"orderBy" type:"string" enum:"OrderBy"` -} - -// String returns the string representation -func (s DescribeLogStreamsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeLogStreamsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeLogStreamsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeLogStreamsInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.LogStreamNamePrefix != nil && len(*s.LogStreamNamePrefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogStreamNamePrefix", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DescribeLogStreamsOutput struct { - _ struct{} `type:"structure"` - - // A list of log streams. - LogStreams []*LogStream `locationName:"logStreams" type:"list"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeLogStreamsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeLogStreamsOutput) GoString() string { - return s.String() -} - -type DescribeMetricFiltersInput struct { - _ struct{} `type:"structure"` - - // Will only return metric filters that match the provided filterNamePrefix. - // If you don't specify a value, no prefix filter is applied. - FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` - - // The maximum number of items returned in the response. If you don't specify - // a value, the request would return up to 50 items. - Limit *int64 `locationName:"limit" min:"1" type:"integer"` - - // The log group name for which metric filters are to be listed. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous DescribeMetricFilters - // request. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeMetricFiltersInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeMetricFiltersInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeMetricFiltersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeMetricFiltersInput"} - if s.FilterNamePrefix != nil && len(*s.FilterNamePrefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FilterNamePrefix", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DescribeMetricFiltersOutput struct { - _ struct{} `type:"structure"` - - MetricFilters []*MetricFilter `locationName:"metricFilters" type:"list"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeMetricFiltersOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeMetricFiltersOutput) GoString() string { - return s.String() -} - -type DescribeSubscriptionFiltersInput struct { - _ struct{} `type:"structure"` - - // Will only return subscription filters that match the provided filterNamePrefix. - // If you don't specify a value, no prefix filter is applied. - FilterNamePrefix *string `locationName:"filterNamePrefix" min:"1" type:"string"` - - // The maximum number of results to return. - Limit *int64 `locationName:"limit" min:"1" type:"integer"` - - // The log group name for which subscription filters are to be listed. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s DescribeSubscriptionFiltersInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeSubscriptionFiltersInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *DescribeSubscriptionFiltersInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "DescribeSubscriptionFiltersInput"} - if s.FilterNamePrefix != nil && len(*s.FilterNamePrefix) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FilterNamePrefix", 1)) - } - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type DescribeSubscriptionFiltersOutput struct { - _ struct{} `type:"structure"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` - - SubscriptionFilters []*SubscriptionFilter `locationName:"subscriptionFilters" type:"list"` -} - -// String returns the string representation -func (s DescribeSubscriptionFiltersOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s DescribeSubscriptionFiltersOutput) GoString() string { - return s.String() -} - -// A cross account destination that is the recipient of subscription log events. -type Destination struct { - _ struct{} `type:"structure"` - - // An IAM policy document that governs which AWS accounts can create subscription - // filters against this destination. - AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string"` - - // ARN of this destination. - Arn *string `locationName:"arn" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC specifying when this destination was created. - CreationTime *int64 `locationName:"creationTime" type:"long"` - - // Name of the destination. - DestinationName *string `locationName:"destinationName" min:"1" type:"string"` - - // A role for impersonation for delivering log events to the target. - RoleArn *string `locationName:"roleArn" min:"1" type:"string"` - - // ARN of the physical target where the log events will be delivered (eg. ARN - // of a Kinesis stream). - TargetArn *string `locationName:"targetArn" min:"1" type:"string"` -} - -// String returns the string representation -func (s Destination) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s Destination) GoString() string { - return s.String() -} - -// Represents an export task. -type ExportTask struct { - _ struct{} `type:"structure"` - - // Name of Amazon S3 bucket to which the log data was exported. - Destination *string `locationName:"destination" min:"1" type:"string"` - - // Prefix that was used as the start of Amazon S3 key for every object exported. - DestinationPrefix *string `locationName:"destinationPrefix" type:"string"` - - // Execution info about the export task. - ExecutionInfo *ExportTaskExecutionInfo `locationName:"executionInfo" type:"structure"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. Events with a timestamp prior to this time are not exported. - From *int64 `locationName:"from" type:"long"` - - // The name of the log group from which logs data was exported. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` - - // Status of the export task. - Status *ExportTaskStatus `locationName:"status" type:"structure"` - - // Id of the export task. - TaskId *string `locationName:"taskId" min:"1" type:"string"` - - // The name of the export task. - TaskName *string `locationName:"taskName" min:"1" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. Events with a timestamp later than this time are not exported. - To *int64 `locationName:"to" type:"long"` -} - -// String returns the string representation -func (s ExportTask) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ExportTask) GoString() string { - return s.String() -} - -// Represents the status of an export task. -type ExportTaskExecutionInfo struct { - _ struct{} `type:"structure"` - - // A point in time when the export task got completed. - CompletionTime *int64 `locationName:"completionTime" type:"long"` - - // A point in time when the export task got created. - CreationTime *int64 `locationName:"creationTime" type:"long"` -} - -// String returns the string representation -func (s ExportTaskExecutionInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ExportTaskExecutionInfo) GoString() string { - return s.String() -} - -// Represents the status of an export task. -type ExportTaskStatus struct { - _ struct{} `type:"structure"` - - // Status code of the export task. - Code *string `locationName:"code" type:"string" enum:"ExportTaskStatusCode"` - - // Status message related to the code. - Message *string `locationName:"message" type:"string"` -} - -// String returns the string representation -func (s ExportTaskStatus) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s ExportTaskStatus) GoString() string { - return s.String() -} - -type FilterLogEventsInput struct { - _ struct{} `type:"structure"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. If provided, events with a timestamp later than this time are - // not returned. - EndTime *int64 `locationName:"endTime" type:"long"` - - // A valid CloudWatch Logs filter pattern to use for filtering the response. - // If not provided, all the events are matched. - FilterPattern *string `locationName:"filterPattern" type:"string"` - - // If provided, the API will make a best effort to provide responses that contain - // events from multiple log streams within the log group interleaved in a single - // response. If not provided, all the matched log events in the first log stream - // will be searched first, then those in the next log stream, etc. - Interleaved *bool `locationName:"interleaved" type:"boolean"` - - // The maximum number of events to return in a page of results. Default is 10,000 - // events. - Limit *int64 `locationName:"limit" min:"1" type:"integer"` - - // The name of the log group to query. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // Optional list of log stream names within the specified log group to search. - // Defaults to all the log streams in the log group. - LogStreamNames []*string `locationName:"logStreamNames" min:"1" type:"list"` - - // A pagination token obtained from a FilterLogEvents response to continue paginating - // the FilterLogEvents results. This token is omitted from the response when - // there are no other events to display. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. If provided, events with a timestamp prior to this time are - // not returned. - StartTime *int64 `locationName:"startTime" type:"long"` -} - -// String returns the string representation -func (s FilterLogEventsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FilterLogEventsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *FilterLogEventsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "FilterLogEventsInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.LogStreamNames != nil && len(s.LogStreamNames) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogStreamNames", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type FilterLogEventsOutput struct { - _ struct{} `type:"structure"` - - // A list of FilteredLogEvent objects representing the matched events from the - // request. - Events []*FilteredLogEvent `locationName:"events" type:"list"` - - // A pagination token obtained from a FilterLogEvents response to continue paginating - // the FilterLogEvents results. This token is omitted from the response when - // there are no other events to display. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` - - // A list of SearchedLogStream objects indicating which log streams have been - // searched in this request and whether each has been searched completely or - // still has more to be paginated. - SearchedLogStreams []*SearchedLogStream `locationName:"searchedLogStreams" type:"list"` -} - -// String returns the string representation -func (s FilterLogEventsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FilterLogEventsOutput) GoString() string { - return s.String() -} - -// Represents a matched event from a FilterLogEvents request. -type FilteredLogEvent struct { - _ struct{} `type:"structure"` - - // A unique identifier for this event. - EventId *string `locationName:"eventId" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - IngestionTime *int64 `locationName:"ingestionTime" type:"long"` - - // The name of the log stream this event belongs to. - LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` - - // The data contained in the log event. - Message *string `locationName:"message" min:"1" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - Timestamp *int64 `locationName:"timestamp" type:"long"` -} - -// String returns the string representation -func (s FilteredLogEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s FilteredLogEvent) GoString() string { - return s.String() -} - -type GetLogEventsInput struct { - _ struct{} `type:"structure"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - EndTime *int64 `locationName:"endTime" type:"long"` - - // The maximum number of log events returned in the response. If you don't specify - // a value, the request would return as many log events as can fit in a response - // size of 1MB, up to 10,000 log events. - Limit *int64 `locationName:"limit" min:"1" type:"integer"` - - // The name of the log group to query. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // The name of the log stream to query. - LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the nextForwardToken or nextBackwardToken - // fields in the response of the previous GetLogEvents request. - NextToken *string `locationName:"nextToken" min:"1" type:"string"` - - // If set to true, the earliest log events would be returned first. The default - // is false (the latest log events are returned first). - StartFromHead *bool `locationName:"startFromHead" type:"boolean"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - StartTime *int64 `locationName:"startTime" type:"long"` -} - -// String returns the string representation -func (s GetLogEventsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetLogEventsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *GetLogEventsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "GetLogEventsInput"} - if s.Limit != nil && *s.Limit < 1 { - invalidParams.Add(request.NewErrParamMinValue("Limit", 1)) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.LogStreamName == nil { - invalidParams.Add(request.NewErrParamRequired("LogStreamName")) - } - if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) - } - if s.NextToken != nil && len(*s.NextToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("NextToken", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type GetLogEventsOutput struct { - _ struct{} `type:"structure"` - - Events []*OutputLogEvent `locationName:"events" type:"list"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextBackwardToken *string `locationName:"nextBackwardToken" min:"1" type:"string"` - - // A string token used for pagination that points to the next page of results. - // It must be a value obtained from the response of the previous request. The - // token expires after 24 hours. - NextForwardToken *string `locationName:"nextForwardToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s GetLogEventsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s GetLogEventsOutput) GoString() string { - return s.String() -} - -// A log event is a record of some activity that was recorded by the application -// or resource being monitored. The log event record that CloudWatch Logs understands -// contains two properties: the timestamp of when the event occurred, and the -// raw event message. -type InputLogEvent struct { - _ struct{} `type:"structure"` - - Message *string `locationName:"message" min:"1" type:"string" required:"true"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - Timestamp *int64 `locationName:"timestamp" type:"long" required:"true"` -} - -// String returns the string representation -func (s InputLogEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s InputLogEvent) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *InputLogEvent) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "InputLogEvent"} - if s.Message == nil { - invalidParams.Add(request.NewErrParamRequired("Message")) - } - if s.Message != nil && len(*s.Message) < 1 { - invalidParams.Add(request.NewErrParamMinLen("Message", 1)) - } - if s.Timestamp == nil { - invalidParams.Add(request.NewErrParamRequired("Timestamp")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type LogGroup struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - CreationTime *int64 `locationName:"creationTime" type:"long"` - - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` - - // The number of metric filters associated with the log group. - MetricFilterCount *int64 `locationName:"metricFilterCount" type:"integer"` - - // Specifies the number of days you want to retain log events in the specified - // log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, - // 365, 400, 545, 731, 1827, 3653. - RetentionInDays *int64 `locationName:"retentionInDays" type:"integer"` - - StoredBytes *int64 `locationName:"storedBytes" type:"long"` -} - -// String returns the string representation -func (s LogGroup) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LogGroup) GoString() string { - return s.String() -} - -// A log stream is sequence of log events from a single emitter of logs. -type LogStream struct { - _ struct{} `type:"structure"` - - Arn *string `locationName:"arn" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - CreationTime *int64 `locationName:"creationTime" type:"long"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - FirstEventTimestamp *int64 `locationName:"firstEventTimestamp" type:"long"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - LastEventTimestamp *int64 `locationName:"lastEventTimestamp" type:"long"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - LastIngestionTime *int64 `locationName:"lastIngestionTime" type:"long"` - - LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` - - StoredBytes *int64 `locationName:"storedBytes" type:"long"` - - // A string token used for making PutLogEvents requests. A sequenceToken can - // only be used once, and PutLogEvents requests must include the sequenceToken - // obtained from the response of the previous request. - UploadSequenceToken *string `locationName:"uploadSequenceToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s LogStream) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s LogStream) GoString() string { - return s.String() -} - -// Metric filters can be used to express how CloudWatch Logs would extract metric -// observations from ingested log events and transform them to metric data in -// a CloudWatch metric. -type MetricFilter struct { - _ struct{} `type:"structure"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - CreationTime *int64 `locationName:"creationTime" type:"long"` - - // A name for a metric or subscription filter. - FilterName *string `locationName:"filterName" min:"1" type:"string"` - - // A symbolic description of how CloudWatch Logs should interpret the data in - // each log event. For example, a log event may contain timestamps, IP addresses, - // strings, and so on. You use the filter pattern to specify what to look for - // in the log event message. - FilterPattern *string `locationName:"filterPattern" type:"string"` - - MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list"` -} - -// String returns the string representation -func (s MetricFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MetricFilter) GoString() string { - return s.String() -} - -type MetricFilterMatchRecord struct { - _ struct{} `type:"structure"` - - EventMessage *string `locationName:"eventMessage" min:"1" type:"string"` - - EventNumber *int64 `locationName:"eventNumber" type:"long"` - - ExtractedValues map[string]*string `locationName:"extractedValues" type:"map"` -} - -// String returns the string representation -func (s MetricFilterMatchRecord) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MetricFilterMatchRecord) GoString() string { - return s.String() -} - -type MetricTransformation struct { - _ struct{} `type:"structure"` - - // The name of the CloudWatch metric to which the monitored log information - // should be published. For example, you may publish to a metric called ErrorCount. - MetricName *string `locationName:"metricName" type:"string" required:"true"` - - // The destination namespace of the new CloudWatch metric. - MetricNamespace *string `locationName:"metricNamespace" type:"string" required:"true"` - - // What to publish to the metric. For example, if you're counting the occurrences - // of a particular term like "Error", the value will be "1" for each occurrence. - // If you're counting the bytes transferred the published value will be the - // value in the log event. - MetricValue *string `locationName:"metricValue" type:"string" required:"true"` -} - -// String returns the string representation -func (s MetricTransformation) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s MetricTransformation) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *MetricTransformation) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "MetricTransformation"} - if s.MetricName == nil { - invalidParams.Add(request.NewErrParamRequired("MetricName")) - } - if s.MetricNamespace == nil { - invalidParams.Add(request.NewErrParamRequired("MetricNamespace")) - } - if s.MetricValue == nil { - invalidParams.Add(request.NewErrParamRequired("MetricValue")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type OutputLogEvent struct { - _ struct{} `type:"structure"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - IngestionTime *int64 `locationName:"ingestionTime" type:"long"` - - Message *string `locationName:"message" min:"1" type:"string"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - Timestamp *int64 `locationName:"timestamp" type:"long"` -} - -// String returns the string representation -func (s OutputLogEvent) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s OutputLogEvent) GoString() string { - return s.String() -} - -type PutDestinationInput struct { - _ struct{} `type:"structure"` - - // A name for the destination. - DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` - - // The ARN of an IAM role that grants CloudWatch Logs permissions to do Amazon - // Kinesis PutRecord requests on the desitnation stream. - RoleArn *string `locationName:"roleArn" min:"1" type:"string" required:"true"` - - // The ARN of an Amazon Kinesis stream to deliver matching log events to. - TargetArn *string `locationName:"targetArn" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s PutDestinationInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutDestinationInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutDestinationInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutDestinationInput"} - if s.DestinationName == nil { - invalidParams.Add(request.NewErrParamRequired("DestinationName")) - } - if s.DestinationName != nil && len(*s.DestinationName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) - } - if s.RoleArn == nil { - invalidParams.Add(request.NewErrParamRequired("RoleArn")) - } - if s.RoleArn != nil && len(*s.RoleArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) - } - if s.TargetArn == nil { - invalidParams.Add(request.NewErrParamRequired("TargetArn")) - } - if s.TargetArn != nil && len(*s.TargetArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("TargetArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutDestinationOutput struct { - _ struct{} `type:"structure"` - - // A cross account destination that is the recipient of subscription log events. - Destination *Destination `locationName:"destination" type:"structure"` -} - -// String returns the string representation -func (s PutDestinationOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutDestinationOutput) GoString() string { - return s.String() -} - -type PutDestinationPolicyInput struct { - _ struct{} `type:"structure"` - - // An IAM policy document that authorizes cross-account users to deliver their - // log events to associated destination. - AccessPolicy *string `locationName:"accessPolicy" min:"1" type:"string" required:"true"` - - // A name for an existing destination. - DestinationName *string `locationName:"destinationName" min:"1" type:"string" required:"true"` -} - -// String returns the string representation -func (s PutDestinationPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutDestinationPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutDestinationPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutDestinationPolicyInput"} - if s.AccessPolicy == nil { - invalidParams.Add(request.NewErrParamRequired("AccessPolicy")) - } - if s.AccessPolicy != nil && len(*s.AccessPolicy) < 1 { - invalidParams.Add(request.NewErrParamMinLen("AccessPolicy", 1)) - } - if s.DestinationName == nil { - invalidParams.Add(request.NewErrParamRequired("DestinationName")) - } - if s.DestinationName != nil && len(*s.DestinationName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DestinationName", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutDestinationPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutDestinationPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutDestinationPolicyOutput) GoString() string { - return s.String() -} - -type PutLogEventsInput struct { - _ struct{} `type:"structure"` - - // A list of log events belonging to a log stream. - LogEvents []*InputLogEvent `locationName:"logEvents" min:"1" type:"list" required:"true"` - - // The name of the log group to put log events to. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // The name of the log stream to put log events to. - LogStreamName *string `locationName:"logStreamName" min:"1" type:"string" required:"true"` - - // A string token that must be obtained from the response of the previous PutLogEvents - // request. - SequenceToken *string `locationName:"sequenceToken" min:"1" type:"string"` -} - -// String returns the string representation -func (s PutLogEventsInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutLogEventsInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutLogEventsInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutLogEventsInput"} - if s.LogEvents == nil { - invalidParams.Add(request.NewErrParamRequired("LogEvents")) - } - if s.LogEvents != nil && len(s.LogEvents) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogEvents", 1)) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.LogStreamName == nil { - invalidParams.Add(request.NewErrParamRequired("LogStreamName")) - } - if s.LogStreamName != nil && len(*s.LogStreamName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogStreamName", 1)) - } - if s.SequenceToken != nil && len(*s.SequenceToken) < 1 { - invalidParams.Add(request.NewErrParamMinLen("SequenceToken", 1)) - } - if s.LogEvents != nil { - for i, v := range s.LogEvents { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "LogEvents", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutLogEventsOutput struct { - _ struct{} `type:"structure"` - - // A string token used for making PutLogEvents requests. A sequenceToken can - // only be used once, and PutLogEvents requests must include the sequenceToken - // obtained from the response of the previous request. - NextSequenceToken *string `locationName:"nextSequenceToken" min:"1" type:"string"` - - RejectedLogEventsInfo *RejectedLogEventsInfo `locationName:"rejectedLogEventsInfo" type:"structure"` -} - -// String returns the string representation -func (s PutLogEventsOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutLogEventsOutput) GoString() string { - return s.String() -} - -type PutMetricFilterInput struct { - _ struct{} `type:"structure"` - - // A name for the metric filter. - FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` - - // A valid CloudWatch Logs filter pattern for extracting metric data out of - // ingested log events. - FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` - - // The name of the log group to associate the metric filter with. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // A collection of information needed to define how metric data gets emitted. - MetricTransformations []*MetricTransformation `locationName:"metricTransformations" min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s PutMetricFilterInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutMetricFilterInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutMetricFilterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutMetricFilterInput"} - if s.FilterName == nil { - invalidParams.Add(request.NewErrParamRequired("FilterName")) - } - if s.FilterName != nil && len(*s.FilterName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) - } - if s.FilterPattern == nil { - invalidParams.Add(request.NewErrParamRequired("FilterPattern")) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.MetricTransformations == nil { - invalidParams.Add(request.NewErrParamRequired("MetricTransformations")) - } - if s.MetricTransformations != nil && len(s.MetricTransformations) < 1 { - invalidParams.Add(request.NewErrParamMinLen("MetricTransformations", 1)) - } - if s.MetricTransformations != nil { - for i, v := range s.MetricTransformations { - if v == nil { - continue - } - if err := v.Validate(); err != nil { - invalidParams.AddNested(fmt.Sprintf("%s[%v]", "MetricTransformations", i), err.(request.ErrInvalidParams)) - } - } - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutMetricFilterOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutMetricFilterOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutMetricFilterOutput) GoString() string { - return s.String() -} - -type PutRetentionPolicyInput struct { - _ struct{} `type:"structure"` - - // The name of the log group to associate the retention policy with. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // Specifies the number of days you want to retain log events in the specified - // log group. Possible values are: 1, 3, 5, 7, 14, 30, 60, 90, 120, 150, 180, - // 365, 400, 545, 731, 1827, 3653. - RetentionInDays *int64 `locationName:"retentionInDays" type:"integer" required:"true"` -} - -// String returns the string representation -func (s PutRetentionPolicyInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutRetentionPolicyInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutRetentionPolicyInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutRetentionPolicyInput"} - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.RetentionInDays == nil { - invalidParams.Add(request.NewErrParamRequired("RetentionInDays")) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutRetentionPolicyOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutRetentionPolicyOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutRetentionPolicyOutput) GoString() string { - return s.String() -} - -type PutSubscriptionFilterInput struct { - _ struct{} `type:"structure"` - - // The ARN of the destination to deliver matching log events to. Currently, - // the supported destinations are: An Amazon Kinesis stream belonging to the - // same account as the subscription filter, for same-account delivery. A logical - // destination (used via an ARN of Destination) belonging to a different account, - // for cross-account delivery. An Amazon Kinesis Firehose stream belonging - // to the same account as the subscription filter, for same-account delivery. - // An AWS Lambda function belonging to the same account as the subscription - // filter, for same-account delivery. - DestinationArn *string `locationName:"destinationArn" min:"1" type:"string" required:"true"` - - // A name for the subscription filter. - FilterName *string `locationName:"filterName" min:"1" type:"string" required:"true"` - - // A valid CloudWatch Logs filter pattern for subscribing to a filtered stream - // of log events. - FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` - - // The name of the log group to associate the subscription filter with. - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string" required:"true"` - - // The ARN of an IAM role that grants CloudWatch Logs permissions to deliver - // ingested log events to the destination stream. You don't need to provide - // the ARN when you are working with a logical destination (used via an ARN - // of Destination) for cross-account delivery. - RoleArn *string `locationName:"roleArn" min:"1" type:"string"` -} - -// String returns the string representation -func (s PutSubscriptionFilterInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutSubscriptionFilterInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *PutSubscriptionFilterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "PutSubscriptionFilterInput"} - if s.DestinationArn == nil { - invalidParams.Add(request.NewErrParamRequired("DestinationArn")) - } - if s.DestinationArn != nil && len(*s.DestinationArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("DestinationArn", 1)) - } - if s.FilterName == nil { - invalidParams.Add(request.NewErrParamRequired("FilterName")) - } - if s.FilterName != nil && len(*s.FilterName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("FilterName", 1)) - } - if s.FilterPattern == nil { - invalidParams.Add(request.NewErrParamRequired("FilterPattern")) - } - if s.LogGroupName == nil { - invalidParams.Add(request.NewErrParamRequired("LogGroupName")) - } - if s.LogGroupName != nil && len(*s.LogGroupName) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogGroupName", 1)) - } - if s.RoleArn != nil && len(*s.RoleArn) < 1 { - invalidParams.Add(request.NewErrParamMinLen("RoleArn", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type PutSubscriptionFilterOutput struct { - _ struct{} `type:"structure"` -} - -// String returns the string representation -func (s PutSubscriptionFilterOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s PutSubscriptionFilterOutput) GoString() string { - return s.String() -} - -type RejectedLogEventsInfo struct { - _ struct{} `type:"structure"` - - ExpiredLogEventEndIndex *int64 `locationName:"expiredLogEventEndIndex" type:"integer"` - - TooNewLogEventStartIndex *int64 `locationName:"tooNewLogEventStartIndex" type:"integer"` - - TooOldLogEventEndIndex *int64 `locationName:"tooOldLogEventEndIndex" type:"integer"` -} - -// String returns the string representation -func (s RejectedLogEventsInfo) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s RejectedLogEventsInfo) GoString() string { - return s.String() -} - -// An object indicating the search status of a log stream in a FilterLogEvents -// request. -type SearchedLogStream struct { - _ struct{} `type:"structure"` - - // The name of the log stream. - LogStreamName *string `locationName:"logStreamName" min:"1" type:"string"` - - // Indicates whether all the events in this log stream were searched or more - // data exists to search by paginating further. - SearchedCompletely *bool `locationName:"searchedCompletely" type:"boolean"` -} - -// String returns the string representation -func (s SearchedLogStream) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SearchedLogStream) GoString() string { - return s.String() -} - -type SubscriptionFilter struct { - _ struct{} `type:"structure"` - - // A point in time expressed as the number of milliseconds since Jan 1, 1970 - // 00:00:00 UTC. - CreationTime *int64 `locationName:"creationTime" type:"long"` - - DestinationArn *string `locationName:"destinationArn" min:"1" type:"string"` - - // A name for a metric or subscription filter. - FilterName *string `locationName:"filterName" min:"1" type:"string"` - - // A symbolic description of how CloudWatch Logs should interpret the data in - // each log event. For example, a log event may contain timestamps, IP addresses, - // strings, and so on. You use the filter pattern to specify what to look for - // in the log event message. - FilterPattern *string `locationName:"filterPattern" type:"string"` - - LogGroupName *string `locationName:"logGroupName" min:"1" type:"string"` - - RoleArn *string `locationName:"roleArn" min:"1" type:"string"` -} - -// String returns the string representation -func (s SubscriptionFilter) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s SubscriptionFilter) GoString() string { - return s.String() -} - -type TestMetricFilterInput struct { - _ struct{} `type:"structure"` - - // A symbolic description of how CloudWatch Logs should interpret the data in - // each log event. For example, a log event may contain timestamps, IP addresses, - // strings, and so on. You use the filter pattern to specify what to look for - // in the log event message. - FilterPattern *string `locationName:"filterPattern" type:"string" required:"true"` - - // A list of log event messages to test. - LogEventMessages []*string `locationName:"logEventMessages" min:"1" type:"list" required:"true"` -} - -// String returns the string representation -func (s TestMetricFilterInput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TestMetricFilterInput) GoString() string { - return s.String() -} - -// Validate inspects the fields of the type to determine if they are valid. -func (s *TestMetricFilterInput) Validate() error { - invalidParams := request.ErrInvalidParams{Context: "TestMetricFilterInput"} - if s.FilterPattern == nil { - invalidParams.Add(request.NewErrParamRequired("FilterPattern")) - } - if s.LogEventMessages == nil { - invalidParams.Add(request.NewErrParamRequired("LogEventMessages")) - } - if s.LogEventMessages != nil && len(s.LogEventMessages) < 1 { - invalidParams.Add(request.NewErrParamMinLen("LogEventMessages", 1)) - } - - if invalidParams.Len() > 0 { - return invalidParams - } - return nil -} - -type TestMetricFilterOutput struct { - _ struct{} `type:"structure"` - - Matches []*MetricFilterMatchRecord `locationName:"matches" type:"list"` -} - -// String returns the string representation -func (s TestMetricFilterOutput) String() string { - return awsutil.Prettify(s) -} - -// GoString returns the string representation -func (s TestMetricFilterOutput) GoString() string { - return s.String() -} - -const ( - // @enum ExportTaskStatusCode - ExportTaskStatusCodeCancelled = "CANCELLED" - // @enum ExportTaskStatusCode - ExportTaskStatusCodeCompleted = "COMPLETED" - // @enum ExportTaskStatusCode - ExportTaskStatusCodeFailed = "FAILED" - // @enum ExportTaskStatusCode - ExportTaskStatusCodePending = "PENDING" - // @enum ExportTaskStatusCode - ExportTaskStatusCodePendingCancel = "PENDING_CANCEL" - // @enum ExportTaskStatusCode - ExportTaskStatusCodeRunning = "RUNNING" -) - -const ( - // @enum OrderBy - OrderByLogStreamName = "LogStreamName" - // @enum OrderBy - OrderByLastEventTime = "LastEventTime" -) diff --git a/vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go b/vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go deleted file mode 100644 index e5a47270c5..0000000000 --- a/vendor/src/github.com/aws/aws-sdk-go/service/cloudwatchlogs/service.go +++ /dev/null @@ -1,116 +0,0 @@ -// THIS FILE IS AUTOMATICALLY GENERATED. DO NOT EDIT. - -package cloudwatchlogs - -import ( - "github.com/aws/aws-sdk-go/aws" - "github.com/aws/aws-sdk-go/aws/client" - "github.com/aws/aws-sdk-go/aws/client/metadata" - "github.com/aws/aws-sdk-go/aws/request" - "github.com/aws/aws-sdk-go/private/protocol/jsonrpc" - "github.com/aws/aws-sdk-go/private/signer/v4" -) - -// You can use Amazon CloudWatch Logs to monitor, store, and access your log -// files from Amazon Elastic Compute Cloud (Amazon EC2) instances, Amazon CloudTrail, -// or other sources. You can then retrieve the associated log data from CloudWatch -// Logs using the Amazon CloudWatch console, the CloudWatch Logs commands in -// the AWS CLI, the CloudWatch Logs API, or the CloudWatch Logs SDK. -// -// You can use CloudWatch Logs to: -// -// Monitor Logs from Amazon EC2 Instances in Real-time: You can use CloudWatch -// Logs to monitor applications and systems using log data. For example, CloudWatch -// Logs can track the number of errors that occur in your application logs and -// send you a notification whenever the rate of errors exceeds a threshold you -// specify. CloudWatch Logs uses your log data for monitoring; so, no code changes -// are required. For example, you can monitor application logs for specific -// literal terms (such as "NullReferenceException") or count the number of occurrences -// of a literal term at a particular position in log data (such as "404" status -// codes in an Apache access log). When the term you are searching for is found, -// CloudWatch Logs reports the data to a Amazon CloudWatch metric that you specify. -// -// Monitor Amazon CloudTrail Logged Events: You can create alarms in Amazon -// CloudWatch and receive notifications of particular API activity as captured -// by CloudTrail and use the notification to perform troubleshooting. -// -// Archive Log Data: You can use CloudWatch Logs to store your log data in -// highly durable storage. You can change the log retention setting so that -// any log events older than this setting are automatically deleted. The CloudWatch -// Logs agent makes it easy to quickly send both rotated and non-rotated log -// data off of a host and into the log service. You can then access the raw -// log data when you need it. -//The service client's operations are safe to be used concurrently. -// It is not safe to mutate any of the client's properties though. -type CloudWatchLogs struct { - *client.Client -} - -// Used for custom client initialization logic -var initClient func(*client.Client) - -// Used for custom request initialization logic -var initRequest func(*request.Request) - -// A ServiceName is the name of the service the client will make API calls to. -const ServiceName = "logs" - -// New creates a new instance of the CloudWatchLogs client with a session. -// If additional configuration is needed for the client instance use the optional -// aws.Config parameter to add your extra config. -// -// Example: -// // Create a CloudWatchLogs client from just a session. -// svc := cloudwatchlogs.New(mySession) -// -// // Create a CloudWatchLogs client with additional configuration -// svc := cloudwatchlogs.New(mySession, aws.NewConfig().WithRegion("us-west-2")) -func New(p client.ConfigProvider, cfgs ...*aws.Config) *CloudWatchLogs { - c := p.ClientConfig(ServiceName, cfgs...) - return newClient(*c.Config, c.Handlers, c.Endpoint, c.SigningRegion) -} - -// newClient creates, initializes and returns a new service client instance. -func newClient(cfg aws.Config, handlers request.Handlers, endpoint, signingRegion string) *CloudWatchLogs { - svc := &CloudWatchLogs{ - Client: client.New( - cfg, - metadata.ClientInfo{ - ServiceName: ServiceName, - SigningRegion: signingRegion, - Endpoint: endpoint, - APIVersion: "2014-03-28", - JSONVersion: "1.1", - TargetPrefix: "Logs_20140328", - }, - handlers, - ), - } - - // Handlers - svc.Handlers.Sign.PushBack(v4.Sign) - svc.Handlers.Build.PushBackNamed(jsonrpc.BuildHandler) - svc.Handlers.Unmarshal.PushBackNamed(jsonrpc.UnmarshalHandler) - svc.Handlers.UnmarshalMeta.PushBackNamed(jsonrpc.UnmarshalMetaHandler) - svc.Handlers.UnmarshalError.PushBackNamed(jsonrpc.UnmarshalErrorHandler) - - // Run custom client initialization if present - if initClient != nil { - initClient(svc.Client) - } - - return svc -} - -// newRequest creates a new request for a CloudWatchLogs operation and runs any -// custom request initialization. -func (c *CloudWatchLogs) newRequest(op *request.Operation, params, data interface{}) *request.Request { - req := c.NewRequest(op, params, data) - - // Run custom request initialization if present - if initRequest != nil { - initRequest(req) - } - - return req -} diff --git a/vendor/src/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/src/github.com/beorn7/perks/quantile/exampledata.txt deleted file mode 100644 index 1602287d7c..0000000000 --- a/vendor/src/github.com/beorn7/perks/quantile/exampledata.txt +++ /dev/null @@ -1,2388 +0,0 @@ -8 -5 -26 -12 -5 -235 -13 -6 -28 -30 -3 -3 -3 -3 -5 -2 -33 -7 -2 -4 -7 -12 -14 -5 -8 -3 -10 -4 -5 -3 -6 -6 -209 -20 -3 -10 -14 -3 -4 -6 -8 -5 -11 -7 -3 -2 -3 -3 -212 -5 -222 -4 -10 -10 -5 -6 -3 -8 -3 -10 -254 -220 -2 -3 -5 -24 -5 -4 -222 -7 -3 -3 -223 -8 -15 -12 -14 -14 -3 -2 -2 -3 -13 -3 -11 -4 -4 -6 -5 -7 -13 -5 -3 -5 -2 -5 -3 -5 -2 -7 -15 -17 -14 -3 -6 -6 -3 -17 -5 -4 -7 -6 -4 -4 -8 -6 -8 -3 -9 -3 -6 -3 -4 -5 -3 -3 -660 -4 -6 -10 -3 -6 -3 -2 -5 -13 -2 -4 -4 -10 -4 -8 -4 -3 -7 -9 -9 -3 -10 -37 -3 -13 -4 -12 -3 -6 -10 -8 -5 -21 -2 -3 -8 -3 -2 -3 -3 -4 -12 -2 -4 -8 -8 -4 -3 -2 -20 -1 -6 -32 -2 -11 -6 -18 -3 -8 -11 -3 -212 -3 -4 -2 -6 -7 -12 -11 -3 -2 -16 -10 -6 -4 -6 -3 -2 -7 -3 -2 -2 -2 -2 -5 -6 -4 -3 -10 -3 -4 -6 -5 -3 -4 -4 -5 -6 -4 -3 -4 -4 -5 -7 -5 -5 -3 -2 -7 -2 -4 -12 -4 -5 -6 -2 -4 -4 -8 -4 -15 -13 -7 -16 -5 -3 -23 -5 -5 -7 -3 -2 -9 -8 -7 -5 -8 -11 -4 -10 -76 -4 -47 -4 -3 -2 -7 -4 -2 -3 -37 -10 -4 -2 -20 -5 -4 -4 -10 -10 -4 -3 -7 -23 -240 -7 -13 -5 -5 -3 -3 -2 -5 -4 -2 -8 -7 -19 -2 -23 -8 -7 -2 -5 -3 -8 -3 -8 -13 -5 -5 -5 -2 -3 -23 -4 -9 -8 -4 -3 -3 -5 -220 -2 -3 -4 -6 -14 -3 -53 -6 -2 -5 -18 -6 -3 -219 -6 -5 -2 -5 -3 -6 -5 -15 -4 -3 -17 -3 -2 -4 -7 -2 -3 -3 -4 -4 -3 -2 -664 -6 -3 -23 -5 -5 -16 -5 -8 -2 -4 -2 -24 -12 -3 -2 -3 -5 -8 -3 -5 -4 -3 -14 -3 -5 -8 -2 -3 -7 -9 -4 -2 -3 -6 -8 -4 -3 -4 -6 -5 -3 -3 -6 -3 -19 -4 -4 -6 -3 -6 -3 -5 -22 -5 -4 -4 -3 -8 -11 -4 -9 -7 -6 -13 -4 -4 -4 -6 -17 -9 -3 -3 -3 -4 -3 -221 -5 -11 -3 -4 -2 -12 -6 -3 -5 -7 -5 -7 -4 -9 -7 -14 -37 -19 -217 -16 -3 -5 -2 -2 -7 -19 -7 -6 -7 -4 -24 -5 -11 -4 -7 -7 -9 -13 -3 -4 -3 -6 -28 -4 -4 -5 -5 -2 -5 -6 -4 -4 -6 -10 -5 -4 -3 -2 -3 -3 -6 -5 -5 -4 -3 -2 -3 -7 -4 -6 -18 -16 -8 -16 -4 -5 -8 -6 -9 -13 -1545 -6 -215 -6 -5 -6 -3 -45 -31 -5 -2 -2 -4 -3 -3 -2 -5 -4 -3 -5 -7 -7 -4 -5 -8 -5 -4 -749 -2 -31 -9 -11 -2 -11 -5 -4 -4 -7 -9 -11 -4 -5 -4 -7 -3 -4 -6 -2 -15 -3 -4 -3 -4 -3 -5 -2 -13 -5 -5 -3 -3 -23 -4 -4 -5 -7 -4 -13 -2 -4 -3 -4 -2 -6 -2 -7 -3 -5 -5 -3 -29 -5 -4 -4 -3 -10 -2 -3 -79 -16 -6 -6 -7 -7 -3 -5 -5 -7 -4 -3 -7 -9 -5 -6 -5 -9 -6 -3 -6 -4 -17 -2 -10 -9 -3 -6 -2 -3 -21 -22 -5 -11 -4 -2 -17 -2 -224 -2 -14 -3 -4 -4 -2 -4 -4 -4 -4 -5 -3 -4 -4 -10 -2 -6 -3 -3 -5 -7 -2 -7 -5 -6 -3 -218 -2 -2 -5 -2 -6 -3 -5 -222 -14 -6 -33 -3 -2 -5 -3 -3 -3 -9 -5 -3 -3 -2 -7 -4 -3 -4 -3 -5 -6 -5 -26 -4 -13 -9 -7 -3 -221 -3 -3 -4 -4 -4 -4 -2 -18 -5 -3 -7 -9 -6 -8 -3 -10 -3 -11 -9 -5 -4 -17 -5 -5 -6 -6 -3 -2 -4 -12 -17 -6 -7 -218 -4 -2 -4 -10 -3 -5 -15 -3 -9 -4 -3 -3 -6 -29 -3 -3 -4 -5 -5 -3 -8 -5 -6 -6 -7 -5 -3 -5 -3 -29 -2 -31 -5 -15 -24 -16 -5 -207 -4 -3 -3 -2 -15 -4 -4 -13 -5 -5 -4 -6 -10 -2 -7 -8 -4 -6 -20 -5 -3 -4 -3 -12 -12 -5 -17 -7 -3 -3 -3 -6 -10 -3 -5 -25 -80 -4 -9 -3 -2 -11 -3 -3 -2 -3 -8 -7 -5 -5 -19 -5 -3 -3 -12 -11 -2 -6 -5 -5 -5 -3 -3 -3 -4 -209 -14 -3 -2 -5 -19 -4 -4 -3 -4 -14 -5 -6 -4 -13 -9 -7 -4 -7 -10 -2 -9 -5 -7 -2 -8 -4 -6 -5 -5 -222 -8 -7 -12 -5 -216 -3 -4 -4 -6 -3 -14 -8 -7 -13 -4 -3 -3 -3 -3 -17 -5 -4 -3 -33 -6 -6 -33 -7 -5 -3 -8 -7 -5 -2 -9 -4 -2 -233 -24 -7 -4 -8 -10 -3 -4 -15 -2 -16 -3 -3 -13 -12 -7 -5 -4 -207 -4 -2 -4 -27 -15 -2 -5 -2 -25 -6 -5 -5 -6 -13 -6 -18 -6 -4 -12 -225 -10 -7 -5 -2 -2 -11 -4 -14 -21 -8 -10 -3 -5 -4 -232 -2 -5 -5 -3 -7 -17 -11 -6 -6 -23 -4 -6 -3 -5 -4 -2 -17 -3 -6 -5 -8 -3 -2 -2 -14 -9 -4 -4 -2 -5 -5 -3 -7 -6 -12 -6 -10 -3 -6 -2 -2 -19 -5 -4 -4 -9 -2 -4 -13 -3 -5 -6 -3 -6 -5 -4 -9 -6 -3 -5 -7 -3 -6 -6 -4 -3 -10 -6 -3 -221 -3 -5 -3 -6 -4 -8 -5 -3 -6 -4 -4 -2 -54 -5 -6 -11 -3 -3 -4 -4 -4 -3 -7 -3 -11 -11 -7 -10 -6 -13 -223 -213 -15 -231 -7 -3 -7 -228 -2 -3 -4 -4 -5 -6 -7 -4 -13 -3 -4 -5 -3 -6 -4 -6 -7 -2 -4 -3 -4 -3 -3 -6 -3 -7 -3 -5 -18 -5 -6 -8 -10 -3 -3 -3 -2 -4 -2 -4 -4 -5 -6 -6 -4 -10 -13 -3 -12 -5 -12 -16 -8 -4 -19 -11 -2 -4 -5 -6 -8 -5 -6 -4 -18 -10 -4 -2 -216 -6 -6 -6 -2 -4 -12 -8 -3 -11 -5 -6 -14 -5 -3 -13 -4 -5 -4 -5 -3 -28 -6 -3 -7 -219 -3 -9 -7 -3 -10 -6 -3 -4 -19 -5 -7 -11 -6 -15 -19 -4 -13 -11 -3 -7 -5 -10 -2 -8 -11 -2 -6 -4 -6 -24 -6 -3 -3 -3 -3 -6 -18 -4 -11 -4 -2 -5 -10 -8 -3 -9 -5 -3 -4 -5 -6 -2 -5 -7 -4 -4 -14 -6 -4 -4 -5 -5 -7 -2 -4 -3 -7 -3 -3 -6 -4 -5 -4 -4 -4 -3 -3 -3 -3 -8 -14 -2 -3 -5 -3 -2 -4 -5 -3 -7 -3 -3 -18 -3 -4 -4 -5 -7 -3 -3 -3 -13 -5 -4 -8 -211 -5 -5 -3 -5 -2 -5 -4 -2 -655 -6 -3 -5 -11 -2 -5 -3 -12 -9 -15 -11 -5 -12 -217 -2 -6 -17 -3 -3 -207 -5 -5 -4 -5 -9 -3 -2 -8 -5 -4 -3 -2 -5 -12 -4 -14 -5 -4 -2 -13 -5 -8 -4 -225 -4 -3 -4 -5 -4 -3 -3 -6 -23 -9 -2 -6 -7 -233 -4 -4 -6 -18 -3 -4 -6 -3 -4 -4 -2 -3 -7 -4 -13 -227 -4 -3 -5 -4 -2 -12 -9 -17 -3 -7 -14 -6 -4 -5 -21 -4 -8 -9 -2 -9 -25 -16 -3 -6 -4 -7 -8 -5 -2 -3 -5 -4 -3 -3 -5 -3 -3 -3 -2 -3 -19 -2 -4 -3 -4 -2 -3 -4 -4 -2 -4 -3 -3 -3 -2 -6 -3 -17 -5 -6 -4 -3 -13 -5 -3 -3 -3 -4 -9 -4 -2 -14 -12 -4 -5 -24 -4 -3 -37 -12 -11 -21 -3 -4 -3 -13 -4 -2 -3 -15 -4 -11 -4 -4 -3 -8 -3 -4 -4 -12 -8 -5 -3 -3 -4 -2 -220 -3 -5 -223 -3 -3 -3 -10 -3 -15 -4 -241 -9 -7 -3 -6 -6 -23 -4 -13 -7 -3 -4 -7 -4 -9 -3 -3 -4 -10 -5 -5 -1 -5 -24 -2 -4 -5 -5 -6 -14 -3 -8 -2 -3 -5 -13 -13 -3 -5 -2 -3 -15 -3 -4 -2 -10 -4 -4 -4 -5 -5 -3 -5 -3 -4 -7 -4 -27 -3 -6 -4 -15 -3 -5 -6 -6 -5 -4 -8 -3 -9 -2 -6 -3 -4 -3 -7 -4 -18 -3 -11 -3 -3 -8 -9 -7 -24 -3 -219 -7 -10 -4 -5 -9 -12 -2 -5 -4 -4 -4 -3 -3 -19 -5 -8 -16 -8 -6 -22 -3 -23 -3 -242 -9 -4 -3 -3 -5 -7 -3 -3 -5 -8 -3 -7 -5 -14 -8 -10 -3 -4 -3 -7 -4 -6 -7 -4 -10 -4 -3 -11 -3 -7 -10 -3 -13 -6 -8 -12 -10 -5 -7 -9 -3 -4 -7 -7 -10 -8 -30 -9 -19 -4 -3 -19 -15 -4 -13 -3 -215 -223 -4 -7 -4 -8 -17 -16 -3 -7 -6 -5 -5 -4 -12 -3 -7 -4 -4 -13 -4 -5 -2 -5 -6 -5 -6 -6 -7 -10 -18 -23 -9 -3 -3 -6 -5 -2 -4 -2 -7 -3 -3 -2 -5 -5 -14 -10 -224 -6 -3 -4 -3 -7 -5 -9 -3 -6 -4 -2 -5 -11 -4 -3 -3 -2 -8 -4 -7 -4 -10 -7 -3 -3 -18 -18 -17 -3 -3 -3 -4 -5 -3 -3 -4 -12 -7 -3 -11 -13 -5 -4 -7 -13 -5 -4 -11 -3 -12 -3 -6 -4 -4 -21 -4 -6 -9 -5 -3 -10 -8 -4 -6 -4 -4 -6 -5 -4 -8 -6 -4 -6 -4 -4 -5 -9 -6 -3 -4 -2 -9 -3 -18 -2 -4 -3 -13 -3 -6 -6 -8 -7 -9 -3 -2 -16 -3 -4 -6 -3 -2 -33 -22 -14 -4 -9 -12 -4 -5 -6 -3 -23 -9 -4 -3 -5 -5 -3 -4 -5 -3 -5 -3 -10 -4 -5 -5 -8 -4 -4 -6 -8 -5 -4 -3 -4 -6 -3 -3 -3 -5 -9 -12 -6 -5 -9 -3 -5 -3 -2 -2 -2 -18 -3 -2 -21 -2 -5 -4 -6 -4 -5 -10 -3 -9 -3 -2 -10 -7 -3 -6 -6 -4 -4 -8 -12 -7 -3 -7 -3 -3 -9 -3 -4 -5 -4 -4 -5 -5 -10 -15 -4 -4 -14 -6 -227 -3 -14 -5 -216 -22 -5 -4 -2 -2 -6 -3 -4 -2 -9 -9 -4 -3 -28 -13 -11 -4 -5 -3 -3 -2 -3 -3 -5 -3 -4 -3 -5 -23 -26 -3 -4 -5 -6 -4 -6 -3 -5 -5 -3 -4 -3 -2 -2 -2 -7 -14 -3 -6 -7 -17 -2 -2 -15 -14 -16 -4 -6 -7 -13 -6 -4 -5 -6 -16 -3 -3 -28 -3 -6 -15 -3 -9 -2 -4 -6 -3 -3 -22 -4 -12 -6 -7 -2 -5 -4 -10 -3 -16 -6 -9 -2 -5 -12 -7 -5 -5 -5 -5 -2 -11 -9 -17 -4 -3 -11 -7 -3 -5 -15 -4 -3 -4 -211 -8 -7 -5 -4 -7 -6 -7 -6 -3 -6 -5 -6 -5 -3 -4 -4 -26 -4 -6 -10 -4 -4 -3 -2 -3 -3 -4 -5 -9 -3 -9 -4 -4 -5 -5 -8 -2 -4 -2 -3 -8 -4 -11 -19 -5 -8 -6 -3 -5 -6 -12 -3 -2 -4 -16 -12 -3 -4 -4 -8 -6 -5 -6 -6 -219 -8 -222 -6 -16 -3 -13 -19 -5 -4 -3 -11 -6 -10 -4 -7 -7 -12 -5 -3 -3 -5 -6 -10 -3 -8 -2 -5 -4 -7 -2 -4 -4 -2 -12 -9 -6 -4 -2 -40 -2 -4 -10 -4 -223 -4 -2 -20 -6 -7 -24 -5 -4 -5 -2 -20 -16 -6 -5 -13 -2 -3 -3 -19 -3 -2 -4 -5 -6 -7 -11 -12 -5 -6 -7 -7 -3 -5 -3 -5 -3 -14 -3 -4 -4 -2 -11 -1 -7 -3 -9 -6 -11 -12 -5 -8 -6 -221 -4 -2 -12 -4 -3 -15 -4 -5 -226 -7 -218 -7 -5 -4 -5 -18 -4 -5 -9 -4 -4 -2 -9 -18 -18 -9 -5 -6 -6 -3 -3 -7 -3 -5 -4 -4 -4 -12 -3 -6 -31 -5 -4 -7 -3 -6 -5 -6 -5 -11 -2 -2 -11 -11 -6 -7 -5 -8 -7 -10 -5 -23 -7 -4 -3 -5 -34 -2 -5 -23 -7 -3 -6 -8 -4 -4 -4 -2 -5 -3 -8 -5 -4 -8 -25 -2 -3 -17 -8 -3 -4 -8 -7 -3 -15 -6 -5 -7 -21 -9 -5 -6 -6 -5 -3 -2 -3 -10 -3 -6 -3 -14 -7 -4 -4 -8 -7 -8 -2 -6 -12 -4 -213 -6 -5 -21 -8 -2 -5 -23 -3 -11 -2 -3 -6 -25 -2 -3 -6 -7 -6 -6 -4 -4 -6 -3 -17 -9 -7 -6 -4 -3 -10 -7 -2 -3 -3 -3 -11 -8 -3 -7 -6 -4 -14 -36 -3 -4 -3 -3 -22 -13 -21 -4 -2 -7 -4 -4 -17 -15 -3 -7 -11 -2 -4 -7 -6 -209 -6 -3 -2 -2 -24 -4 -9 -4 -3 -3 -3 -29 -2 -2 -4 -3 -3 -5 -4 -6 -3 -3 -2 -4 diff --git a/vendor/src/github.com/beorn7/perks/quantile/stream.go b/vendor/src/github.com/beorn7/perks/quantile/stream.go deleted file mode 100644 index 587b1fc5ba..0000000000 --- a/vendor/src/github.com/beorn7/perks/quantile/stream.go +++ /dev/null @@ -1,292 +0,0 @@ -// Package quantile computes approximate quantiles over an unbounded data -// stream within low memory and CPU bounds. -// -// A small amount of accuracy is traded to achieve the above properties. -// -// Multiple streams can be merged before calling Query to generate a single set -// of results. This is meaningful when the streams represent the same type of -// data. See Merge and Samples. -// -// For more detailed information about the algorithm used, see: -// -// Effective Computation of Biased Quantiles over Data Streams -// -// http://www.cs.rutgers.edu/~muthu/bquant.pdf -package quantile - -import ( - "math" - "sort" -) - -// Sample holds an observed value and meta information for compression. JSON -// tags have been added for convenience. -type Sample struct { - Value float64 `json:",string"` - Width float64 `json:",string"` - Delta float64 `json:",string"` -} - -// Samples represents a slice of samples. It implements sort.Interface. -type Samples []Sample - -func (a Samples) Len() int { return len(a) } -func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } -func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } - -type invariant func(s *stream, r float64) float64 - -// NewLowBiased returns an initialized Stream for low-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the lower ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewLowBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * r - } - return newStream(ƒ) -} - -// NewHighBiased returns an initialized Stream for high-biased quantiles -// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but -// error guarantees can still be given even for the higher ranks of the data -// distribution. -// -// The provided epsilon is a relative error, i.e. the true quantile of a value -// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error -// properties. -func NewHighBiased(epsilon float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - return 2 * epsilon * (s.n - r) - } - return newStream(ƒ) -} - -// NewTargeted returns an initialized Stream concerned with a particular set of -// quantile values that are supplied a priori. Knowing these a priori reduces -// space and computation time. The targets map maps the desired quantiles to -// their absolute errors, i.e. the true quantile of a value returned by a query -// is guaranteed to be within (Quantile±Epsilon). -// -// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. -func NewTargeted(targets map[float64]float64) *Stream { - ƒ := func(s *stream, r float64) float64 { - var m = math.MaxFloat64 - var f float64 - for quantile, epsilon := range targets { - if quantile*s.n <= r { - f = (2 * epsilon * r) / quantile - } else { - f = (2 * epsilon * (s.n - r)) / (1 - quantile) - } - if f < m { - m = f - } - } - return m - } - return newStream(ƒ) -} - -// Stream computes quantiles for a stream of float64s. It is not thread-safe by -// design. Take care when using across multiple goroutines. -type Stream struct { - *stream - b Samples - sorted bool -} - -func newStream(ƒ invariant) *Stream { - x := &stream{ƒ: ƒ} - return &Stream{x, make(Samples, 0, 500), true} -} - -// Insert inserts v into the stream. -func (s *Stream) Insert(v float64) { - s.insert(Sample{Value: v, Width: 1}) -} - -func (s *Stream) insert(sample Sample) { - s.b = append(s.b, sample) - s.sorted = false - if len(s.b) == cap(s.b) { - s.flush() - } -} - -// Query returns the computed qth percentiles value. If s was created with -// NewTargeted, and q is not in the set of quantiles provided a priori, Query -// will return an unspecified result. -func (s *Stream) Query(q float64) float64 { - if !s.flushed() { - // Fast path when there hasn't been enough data for a flush; - // this also yields better accuracy for small sets of data. - l := len(s.b) - if l == 0 { - return 0 - } - i := int(float64(l) * q) - if i > 0 { - i -= 1 - } - s.maybeSort() - return s.b[i].Value - } - s.flush() - return s.stream.query(q) -} - -// Merge merges samples into the underlying streams samples. This is handy when -// merging multiple streams from separate threads, database shards, etc. -// -// ATTENTION: This method is broken and does not yield correct results. The -// underlying algorithm is not capable of merging streams correctly. -func (s *Stream) Merge(samples Samples) { - sort.Sort(samples) - s.stream.merge(samples) -} - -// Reset reinitializes and clears the list reusing the samples buffer memory. -func (s *Stream) Reset() { - s.stream.reset() - s.b = s.b[:0] -} - -// Samples returns stream samples held by s. -func (s *Stream) Samples() Samples { - if !s.flushed() { - return s.b - } - s.flush() - return s.stream.samples() -} - -// Count returns the total number of samples observed in the stream -// since initialization. -func (s *Stream) Count() int { - return len(s.b) + s.stream.count() -} - -func (s *Stream) flush() { - s.maybeSort() - s.stream.merge(s.b) - s.b = s.b[:0] -} - -func (s *Stream) maybeSort() { - if !s.sorted { - s.sorted = true - sort.Sort(s.b) - } -} - -func (s *Stream) flushed() bool { - return len(s.stream.l) > 0 -} - -type stream struct { - n float64 - l []Sample - ƒ invariant -} - -func (s *stream) reset() { - s.l = s.l[:0] - s.n = 0 -} - -func (s *stream) insert(v float64) { - s.merge(Samples{{v, 1, 0}}) -} - -func (s *stream) merge(samples Samples) { - // TODO(beorn7): This tries to merge not only individual samples, but - // whole summaries. The paper doesn't mention merging summaries at - // all. Unittests show that the merging is inaccurate. Find out how to - // do merges properly. - var r float64 - i := 0 - for _, sample := range samples { - for ; i < len(s.l); i++ { - c := s.l[i] - if c.Value > sample.Value { - // Insert at position i. - s.l = append(s.l, Sample{}) - copy(s.l[i+1:], s.l[i:]) - s.l[i] = Sample{ - sample.Value, - sample.Width, - math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), - // TODO(beorn7): How to calculate delta correctly? - } - i++ - goto inserted - } - r += c.Width - } - s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) - i++ - inserted: - s.n += sample.Width - r += sample.Width - } - s.compress() -} - -func (s *stream) count() int { - return int(s.n) -} - -func (s *stream) query(q float64) float64 { - t := math.Ceil(q * s.n) - t += math.Ceil(s.ƒ(s, t) / 2) - p := s.l[0] - var r float64 - for _, c := range s.l[1:] { - r += p.Width - if r+c.Width+c.Delta > t { - return p.Value - } - p = c - } - return p.Value -} - -func (s *stream) compress() { - if len(s.l) < 2 { - return - } - x := s.l[len(s.l)-1] - xi := len(s.l) - 1 - r := s.n - 1 - x.Width - - for i := len(s.l) - 2; i >= 0; i-- { - c := s.l[i] - if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { - x.Width += c.Width - s.l[xi] = x - // Remove element at i. - copy(s.l[i:], s.l[i+1:]) - s.l = s.l[:len(s.l)-1] - xi -= 1 - } else { - x = c - xi = i - } - r -= c.Width - } -} - -func (s *stream) samples() Samples { - samples := make(Samples, len(s.l)) - copy(samples, s.l) - return samples -} diff --git a/vendor/src/github.com/boltdb/bolt/.gitignore b/vendor/src/github.com/boltdb/bolt/.gitignore deleted file mode 100644 index c7bd2b7a5b..0000000000 --- a/vendor/src/github.com/boltdb/bolt/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -*.prof -*.test -*.swp -/bin/ diff --git a/vendor/src/github.com/boltdb/bolt/LICENSE b/vendor/src/github.com/boltdb/bolt/LICENSE deleted file mode 100644 index 004e77fe5d..0000000000 --- a/vendor/src/github.com/boltdb/bolt/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -The MIT License (MIT) - -Copyright (c) 2013 Ben Johnson - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of -the Software, and to permit persons to whom the Software is furnished to do so, -subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS -FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR -COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER -IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN -CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/src/github.com/boltdb/bolt/Makefile b/vendor/src/github.com/boltdb/bolt/Makefile deleted file mode 100644 index e035e63adc..0000000000 --- a/vendor/src/github.com/boltdb/bolt/Makefile +++ /dev/null @@ -1,18 +0,0 @@ -BRANCH=`git rev-parse --abbrev-ref HEAD` -COMMIT=`git rev-parse --short HEAD` -GOLDFLAGS="-X main.branch $(BRANCH) -X main.commit $(COMMIT)" - -default: build - -race: - @go test -v -race -test.run="TestSimulate_(100op|1000op)" - -# go get github.com/kisielk/errcheck -errcheck: - @errcheck -ignorepkg=bytes -ignore=os:Remove github.com/boltdb/bolt - -test: - @go test -v -cover . - @go test -v ./cmd/bolt - -.PHONY: fmt test diff --git a/vendor/src/github.com/boltdb/bolt/README.md b/vendor/src/github.com/boltdb/bolt/README.md deleted file mode 100644 index 2a69d95e78..0000000000 --- a/vendor/src/github.com/boltdb/bolt/README.md +++ /dev/null @@ -1,857 +0,0 @@ -Bolt [![Coverage Status](https://coveralls.io/repos/boltdb/bolt/badge.svg?branch=master)](https://coveralls.io/r/boltdb/bolt?branch=master) [![GoDoc](https://godoc.org/github.com/boltdb/bolt?status.svg)](https://godoc.org/github.com/boltdb/bolt) ![Version](https://img.shields.io/badge/version-1.2.1-green.svg) -==== - -Bolt is a pure Go key/value store inspired by [Howard Chu's][hyc_symas] -[LMDB project][lmdb]. The goal of the project is to provide a simple, -fast, and reliable database for projects that don't require a full database -server such as Postgres or MySQL. - -Since Bolt is meant to be used as such a low-level piece of functionality, -simplicity is key. The API will be small and only focus on getting values -and setting values. That's it. - -[hyc_symas]: https://twitter.com/hyc_symas -[lmdb]: http://symas.com/mdb/ - -## Project Status - -Bolt is stable, the API is fixed, and the file format is fixed. Full unit -test coverage and randomized black box testing are used to ensure database -consistency and thread safety. Bolt is currently in high-load production -environments serving databases as large as 1TB. Many companies such as -Shopify and Heroku use Bolt-backed services every day. - -## Table of Contents - -- [Getting Started](#getting-started) - - [Installing](#installing) - - [Opening a database](#opening-a-database) - - [Transactions](#transactions) - - [Read-write transactions](#read-write-transactions) - - [Read-only transactions](#read-only-transactions) - - [Batch read-write transactions](#batch-read-write-transactions) - - [Managing transactions manually](#managing-transactions-manually) - - [Using buckets](#using-buckets) - - [Using key/value pairs](#using-keyvalue-pairs) - - [Autoincrementing integer for the bucket](#autoincrementing-integer-for-the-bucket) - - [Iterating over keys](#iterating-over-keys) - - [Prefix scans](#prefix-scans) - - [Range scans](#range-scans) - - [ForEach()](#foreach) - - [Nested buckets](#nested-buckets) - - [Database backups](#database-backups) - - [Statistics](#statistics) - - [Read-Only Mode](#read-only-mode) - - [Mobile Use (iOS/Android)](#mobile-use-iosandroid) -- [Resources](#resources) -- [Comparison with other databases](#comparison-with-other-databases) - - [Postgres, MySQL, & other relational databases](#postgres-mysql--other-relational-databases) - - [LevelDB, RocksDB](#leveldb-rocksdb) - - [LMDB](#lmdb) -- [Caveats & Limitations](#caveats--limitations) -- [Reading the Source](#reading-the-source) -- [Other Projects Using Bolt](#other-projects-using-bolt) - -## Getting Started - -### Installing - -To start using Bolt, install Go and run `go get`: - -```sh -$ go get github.com/boltdb/bolt/... -``` - -This will retrieve the library and install the `bolt` command line utility into -your `$GOBIN` path. - - -### Opening a database - -The top-level object in Bolt is a `DB`. It is represented as a single file on -your disk and represents a consistent snapshot of your data. - -To open your database, simply use the `bolt.Open()` function: - -```go -package main - -import ( - "log" - - "github.com/boltdb/bolt" -) - -func main() { - // Open the my.db data file in your current directory. - // It will be created if it doesn't exist. - db, err := bolt.Open("my.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - defer db.Close() - - ... -} -``` - -Please note that Bolt obtains a file lock on the data file so multiple processes -cannot open the same database at the same time. Opening an already open Bolt -database will cause it to hang until the other process closes it. To prevent -an indefinite wait you can pass a timeout option to the `Open()` function: - -```go -db, err := bolt.Open("my.db", 0600, &bolt.Options{Timeout: 1 * time.Second}) -``` - - -### Transactions - -Bolt allows only one read-write transaction at a time but allows as many -read-only transactions as you want at a time. Each transaction has a consistent -view of the data as it existed when the transaction started. - -Individual transactions and all objects created from them (e.g. buckets, keys) -are not thread safe. To work with data in multiple goroutines you must start -a transaction for each one or use locking to ensure only one goroutine accesses -a transaction at a time. Creating transaction from the `DB` is thread safe. - -Read-only transactions and read-write transactions should not depend on one -another and generally shouldn't be opened simultaneously in the same goroutine. -This can cause a deadlock as the read-write transaction needs to periodically -re-map the data file but it cannot do so while a read-only transaction is open. - - -#### Read-write transactions - -To start a read-write transaction, you can use the `DB.Update()` function: - -```go -err := db.Update(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Inside the closure, you have a consistent view of the database. You commit the -transaction by returning `nil` at the end. You can also rollback the transaction -at any point by returning an error. All database operations are allowed inside -a read-write transaction. - -Always check the return error as it will report any disk failures that can cause -your transaction to not complete. If you return an error within your closure -it will be passed through. - - -#### Read-only transactions - -To start a read-only transaction, you can use the `DB.View()` function: - -```go -err := db.View(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -You also get a consistent view of the database within this closure, however, -no mutating operations are allowed within a read-only transaction. You can only -retrieve buckets, retrieve values, and copy the database within a read-only -transaction. - - -#### Batch read-write transactions - -Each `DB.Update()` waits for disk to commit the writes. This overhead -can be minimized by combining multiple updates with the `DB.Batch()` -function: - -```go -err := db.Batch(func(tx *bolt.Tx) error { - ... - return nil -}) -``` - -Concurrent Batch calls are opportunistically combined into larger -transactions. Batch is only useful when there are multiple goroutines -calling it. - -The trade-off is that `Batch` can call the given -function multiple times, if parts of the transaction fail. The -function must be idempotent and side effects must take effect only -after a successful return from `DB.Batch()`. - -For example: don't display messages from inside the function, instead -set variables in the enclosing scope: - -```go -var id uint64 -err := db.Batch(func(tx *bolt.Tx) error { - // Find last key in bucket, decode as bigendian uint64, increment - // by one, encode back to []byte, and add new key. - ... - id = newValue - return nil -}) -if err != nil { - return ... -} -fmt.Println("Allocated ID %d", id) -``` - - -#### Managing transactions manually - -The `DB.View()` and `DB.Update()` functions are wrappers around the `DB.Begin()` -function. These helper functions will start the transaction, execute a function, -and then safely close your transaction if an error is returned. This is the -recommended way to use Bolt transactions. - -However, sometimes you may want to manually start and end your transactions. -You can use the `DB.Begin()` function directly but **please** be sure to close -the transaction. - -```go -// Start a writable transaction. -tx, err := db.Begin(true) -if err != nil { - return err -} -defer tx.Rollback() - -// Use the transaction... -_, err := tx.CreateBucket([]byte("MyBucket")) -if err != nil { - return err -} - -// Commit the transaction and check for error. -if err := tx.Commit(); err != nil { - return err -} -``` - -The first argument to `DB.Begin()` is a boolean stating if the transaction -should be writable. - - -### Using buckets - -Buckets are collections of key/value pairs within the database. All keys in a -bucket must be unique. You can create a bucket using the `DB.CreateBucket()` -function: - -```go -db.Update(func(tx *bolt.Tx) error { - b, err := tx.CreateBucket([]byte("MyBucket")) - if err != nil { - return fmt.Errorf("create bucket: %s", err) - } - return nil -}) -``` - -You can also create a bucket only if it doesn't exist by using the -`Tx.CreateBucketIfNotExists()` function. It's a common pattern to call this -function for all your top-level buckets after you open your database so you can -guarantee that they exist for future transactions. - -To delete a bucket, simply call the `Tx.DeleteBucket()` function. - - -### Using key/value pairs - -To save a key/value pair to a bucket, use the `Bucket.Put()` function: - -```go -db.Update(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - err := b.Put([]byte("answer"), []byte("42")) - return err -}) -``` - -This will set the value of the `"answer"` key to `"42"` in the `MyBucket` -bucket. To retrieve this value, we can use the `Bucket.Get()` function: - -```go -db.View(func(tx *bolt.Tx) error { - b := tx.Bucket([]byte("MyBucket")) - v := b.Get([]byte("answer")) - fmt.Printf("The answer is: %s\n", v) - return nil -}) -``` - -The `Get()` function does not return an error because its operation is -guaranteed to work (unless there is some kind of system failure). If the key -exists then it will return its byte slice value. If it doesn't exist then it -will return `nil`. It's important to note that you can have a zero-length value -set to a key which is different than the key not existing. - -Use the `Bucket.Delete()` function to delete a key from the bucket. - -Please note that values returned from `Get()` are only valid while the -transaction is open. If you need to use a value outside of the transaction -then you must use `copy()` to copy it to another byte slice. - - -### Autoincrementing integer for the bucket -By using the `NextSequence()` function, you can let Bolt determine a sequence -which can be used as the unique identifier for your key/value pairs. See the -example below. - -```go -// CreateUser saves u to the store. The new user ID is set on u once the data is persisted. -func (s *Store) CreateUser(u *User) error { - return s.db.Update(func(tx *bolt.Tx) error { - // Retrieve the users bucket. - // This should be created when the DB is first opened. - b := tx.Bucket([]byte("users")) - - // Generate ID for the user. - // This returns an error only if the Tx is closed or not writeable. - // That can't happen in an Update() call so I ignore the error check. - id, _ := b.NextSequence() - u.ID = int(id) - - // Marshal user data into bytes. - buf, err := json.Marshal(u) - if err != nil { - return err - } - - // Persist bytes to users bucket. - return b.Put(itob(u.ID), buf) - }) -} - -// itob returns an 8-byte big endian representation of v. -func itob(v int) []byte { - b := make([]byte, 8) - binary.BigEndian.PutUint64(b, uint64(v)) - return b -} - -type User struct { - ID int - ... -} -``` - -### Iterating over keys - -Bolt stores its keys in byte-sorted order within a bucket. This makes sequential -iteration over these keys extremely fast. To iterate over keys we'll use a -`Cursor`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - c := b.Cursor() - - for k, v := c.First(); k != nil; k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -The cursor allows you to move to a specific point in the list of keys and move -forward or backward through the keys one at a time. - -The following functions are available on the cursor: - -``` -First() Move to the first key. -Last() Move to the last key. -Seek() Move to a specific key. -Next() Move to the next key. -Prev() Move to the previous key. -``` - -Each of those functions has a return signature of `(key []byte, value []byte)`. -When you have iterated to the end of the cursor then `Next()` will return a -`nil` key. You must seek to a position using `First()`, `Last()`, or `Seek()` -before calling `Next()` or `Prev()`. If you do not seek to a position then -these functions will return a `nil` key. - -During iteration, if the key is non-`nil` but the value is `nil`, that means -the key refers to a bucket rather than a value. Use `Bucket.Bucket()` to -access the sub-bucket. - - -#### Prefix scans - -To iterate over a key prefix, you can combine `Seek()` and `bytes.HasPrefix()`: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - c := tx.Bucket([]byte("MyBucket")).Cursor() - - prefix := []byte("1234") - for k, v := c.Seek(prefix); bytes.HasPrefix(k, prefix); k, v = c.Next() { - fmt.Printf("key=%s, value=%s\n", k, v) - } - - return nil -}) -``` - -#### Range scans - -Another common use case is scanning over a range such as a time range. If you -use a sortable time encoding such as RFC3339 then you can query a specific -date range like this: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume our events bucket exists and has RFC3339 encoded time keys. - c := tx.Bucket([]byte("Events")).Cursor() - - // Our time range spans the 90's decade. - min := []byte("1990-01-01T00:00:00Z") - max := []byte("2000-01-01T00:00:00Z") - - // Iterate over the 90's. - for k, v := c.Seek(min); k != nil && bytes.Compare(k, max) <= 0; k, v = c.Next() { - fmt.Printf("%s: %s\n", k, v) - } - - return nil -}) -``` - -Note that, while RFC3339 is sortable, the Golang implementation of RFC3339Nano does not use a fixed number of digits after the decimal point and is therefore not sortable. - - -#### ForEach() - -You can also use the function `ForEach()` if you know you'll be iterating over -all the keys in a bucket: - -```go -db.View(func(tx *bolt.Tx) error { - // Assume bucket exists and has keys - b := tx.Bucket([]byte("MyBucket")) - - b.ForEach(func(k, v []byte) error { - fmt.Printf("key=%s, value=%s\n", k, v) - return nil - }) - return nil -}) -``` - -Please note that keys and values in `ForEach()` are only valid while -the transaction is open. If you need to use a key or value outside of -the transaction, you must use `copy()` to copy it to another byte -slice. - -### Nested buckets - -You can also store a bucket in a key to create nested buckets. The API is the -same as the bucket management API on the `DB` object: - -```go -func (*Bucket) CreateBucket(key []byte) (*Bucket, error) -func (*Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) -func (*Bucket) DeleteBucket(key []byte) error -``` - - -### Database backups - -Bolt is a single file so it's easy to backup. You can use the `Tx.WriteTo()` -function to write a consistent view of the database to a writer. If you call -this from a read-only transaction, it will perform a hot backup and not block -your other database reads and writes. - -By default, it will use a regular file handle which will utilize the operating -system's page cache. See the [`Tx`](https://godoc.org/github.com/boltdb/bolt#Tx) -documentation for information about optimizing for larger-than-RAM datasets. - -One common use case is to backup over HTTP so you can use tools like `cURL` to -do database backups: - -```go -func BackupHandleFunc(w http.ResponseWriter, req *http.Request) { - err := db.View(func(tx *bolt.Tx) error { - w.Header().Set("Content-Type", "application/octet-stream") - w.Header().Set("Content-Disposition", `attachment; filename="my.db"`) - w.Header().Set("Content-Length", strconv.Itoa(int(tx.Size()))) - _, err := tx.WriteTo(w) - return err - }) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} -``` - -Then you can backup using this command: - -```sh -$ curl http://localhost/backup > my.db -``` - -Or you can open your browser to `http://localhost/backup` and it will download -automatically. - -If you want to backup to another file you can use the `Tx.CopyFile()` helper -function. - - -### Statistics - -The database keeps a running count of many of the internal operations it -performs so you can better understand what's going on. By grabbing a snapshot -of these stats at two points in time we can see what operations were performed -in that time range. - -For example, we could start a goroutine to log stats every 10 seconds: - -```go -go func() { - // Grab the initial stats. - prev := db.Stats() - - for { - // Wait for 10s. - time.Sleep(10 * time.Second) - - // Grab the current stats and diff them. - stats := db.Stats() - diff := stats.Sub(&prev) - - // Encode stats to JSON and print to STDERR. - json.NewEncoder(os.Stderr).Encode(diff) - - // Save stats for the next loop. - prev = stats - } -}() -``` - -It's also useful to pipe these stats to a service such as statsd for monitoring -or to provide an HTTP endpoint that will perform a fixed-length sample. - - -### Read-Only Mode - -Sometimes it is useful to create a shared, read-only Bolt database. To this, -set the `Options.ReadOnly` flag when opening your database. Read-only mode -uses a shared lock to allow multiple processes to read from the database but -it will block any processes from opening the database in read-write mode. - -```go -db, err := bolt.Open("my.db", 0666, &bolt.Options{ReadOnly: true}) -if err != nil { - log.Fatal(err) -} -``` - -### Mobile Use (iOS/Android) - -Bolt is able to run on mobile devices by leveraging the binding feature of the -[gomobile](https://github.com/golang/mobile) tool. Create a struct that will -contain your database logic and a reference to a `*bolt.DB` with a initializing -constructor that takes in a filepath where the database file will be stored. -Neither Android nor iOS require extra permissions or cleanup from using this method. - -```go -func NewBoltDB(filepath string) *BoltDB { - db, err := bolt.Open(filepath+"/demo.db", 0600, nil) - if err != nil { - log.Fatal(err) - } - - return &BoltDB{db} -} - -type BoltDB struct { - db *bolt.DB - ... -} - -func (b *BoltDB) Path() string { - return b.db.Path() -} - -func (b *BoltDB) Close() { - b.db.Close() -} -``` - -Database logic should be defined as methods on this wrapper struct. - -To initialize this struct from the native language (both platforms now sync -their local storage to the cloud. These snippets disable that functionality for the -database file): - -#### Android - -```java -String path; -if (android.os.Build.VERSION.SDK_INT >=android.os.Build.VERSION_CODES.LOLLIPOP){ - path = getNoBackupFilesDir().getAbsolutePath(); -} else{ - path = getFilesDir().getAbsolutePath(); -} -Boltmobiledemo.BoltDB boltDB = Boltmobiledemo.NewBoltDB(path) -``` - -#### iOS - -```objc -- (void)demo { - NSString* path = [NSSearchPathForDirectoriesInDomains(NSLibraryDirectory, - NSUserDomainMask, - YES) objectAtIndex:0]; - GoBoltmobiledemoBoltDB * demo = GoBoltmobiledemoNewBoltDB(path); - [self addSkipBackupAttributeToItemAtPath:demo.path]; - //Some DB Logic would go here - [demo close]; -} - -- (BOOL)addSkipBackupAttributeToItemAtPath:(NSString *) filePathString -{ - NSURL* URL= [NSURL fileURLWithPath: filePathString]; - assert([[NSFileManager defaultManager] fileExistsAtPath: [URL path]]); - - NSError *error = nil; - BOOL success = [URL setResourceValue: [NSNumber numberWithBool: YES] - forKey: NSURLIsExcludedFromBackupKey error: &error]; - if(!success){ - NSLog(@"Error excluding %@ from backup %@", [URL lastPathComponent], error); - } - return success; -} - -``` - -## Resources - -For more information on getting started with Bolt, check out the following articles: - -* [Intro to BoltDB: Painless Performant Persistence](http://npf.io/2014/07/intro-to-boltdb-painless-performant-persistence/) by [Nate Finch](https://github.com/natefinch). -* [Bolt -- an embedded key/value database for Go](https://www.progville.com/go/bolt-embedded-db-golang/) by Progville - - -## Comparison with other databases - -### Postgres, MySQL, & other relational databases - -Relational databases structure data into rows and are only accessible through -the use of SQL. This approach provides flexibility in how you store and query -your data but also incurs overhead in parsing and planning SQL statements. Bolt -accesses all data by a byte slice key. This makes Bolt fast to read and write -data by key but provides no built-in support for joining values together. - -Most relational databases (with the exception of SQLite) are standalone servers -that run separately from your application. This gives your systems -flexibility to connect multiple application servers to a single database -server but also adds overhead in serializing and transporting data over the -network. Bolt runs as a library included in your application so all data access -has to go through your application's process. This brings data closer to your -application but limits multi-process access to the data. - - -### LevelDB, RocksDB - -LevelDB and its derivatives (RocksDB, HyperLevelDB) are similar to Bolt in that -they are libraries bundled into the application, however, their underlying -structure is a log-structured merge-tree (LSM tree). An LSM tree optimizes -random writes by using a write ahead log and multi-tiered, sorted files called -SSTables. Bolt uses a B+tree internally and only a single file. Both approaches -have trade-offs. - -If you require a high random write throughput (>10,000 w/sec) or you need to use -spinning disks then LevelDB could be a good choice. If your application is -read-heavy or does a lot of range scans then Bolt could be a good choice. - -One other important consideration is that LevelDB does not have transactions. -It supports batch writing of key/values pairs and it supports read snapshots -but it will not give you the ability to do a compare-and-swap operation safely. -Bolt supports fully serializable ACID transactions. - - -### LMDB - -Bolt was originally a port of LMDB so it is architecturally similar. Both use -a B+tree, have ACID semantics with fully serializable transactions, and support -lock-free MVCC using a single writer and multiple readers. - -The two projects have somewhat diverged. LMDB heavily focuses on raw performance -while Bolt has focused on simplicity and ease of use. For example, LMDB allows -several unsafe actions such as direct writes for the sake of performance. Bolt -opts to disallow actions which can leave the database in a corrupted state. The -only exception to this in Bolt is `DB.NoSync`. - -There are also a few differences in API. LMDB requires a maximum mmap size when -opening an `mdb_env` whereas Bolt will handle incremental mmap resizing -automatically. LMDB overloads the getter and setter functions with multiple -flags whereas Bolt splits these specialized cases into their own functions. - - -## Caveats & Limitations - -It's important to pick the right tool for the job and Bolt is no exception. -Here are a few things to note when evaluating and using Bolt: - -* Bolt is good for read intensive workloads. Sequential write performance is - also fast but random writes can be slow. You can use `DB.Batch()` or add a - write-ahead log to help mitigate this issue. - -* Bolt uses a B+tree internally so there can be a lot of random page access. - SSDs provide a significant performance boost over spinning disks. - -* Try to avoid long running read transactions. Bolt uses copy-on-write so - old pages cannot be reclaimed while an old transaction is using them. - -* Byte slices returned from Bolt are only valid during a transaction. Once the - transaction has been committed or rolled back then the memory they point to - can be reused by a new page or can be unmapped from virtual memory and you'll - see an `unexpected fault address` panic when accessing it. - -* Be careful when using `Bucket.FillPercent`. Setting a high fill percent for - buckets that have random inserts will cause your database to have very poor - page utilization. - -* Use larger buckets in general. Smaller buckets causes poor page utilization - once they become larger than the page size (typically 4KB). - -* Bulk loading a lot of random writes into a new bucket can be slow as the - page will not split until the transaction is committed. Randomly inserting - more than 100,000 key/value pairs into a single new bucket in a single - transaction is not advised. - -* Bolt uses a memory-mapped file so the underlying operating system handles the - caching of the data. Typically, the OS will cache as much of the file as it - can in memory and will release memory as needed to other processes. This means - that Bolt can show very high memory usage when working with large databases. - However, this is expected and the OS will release memory as needed. Bolt can - handle databases much larger than the available physical RAM, provided its - memory-map fits in the process virtual address space. It may be problematic - on 32-bits systems. - -* The data structures in the Bolt database are memory mapped so the data file - will be endian specific. This means that you cannot copy a Bolt file from a - little endian machine to a big endian machine and have it work. For most - users this is not a concern since most modern CPUs are little endian. - -* Because of the way pages are laid out on disk, Bolt cannot truncate data files - and return free pages back to the disk. Instead, Bolt maintains a free list - of unused pages within its data file. These free pages can be reused by later - transactions. This works well for many use cases as databases generally tend - to grow. However, it's important to note that deleting large chunks of data - will not allow you to reclaim that space on disk. - - For more information on page allocation, [see this comment][page-allocation]. - -[page-allocation]: https://github.com/boltdb/bolt/issues/308#issuecomment-74811638 - - -## Reading the Source - -Bolt is a relatively small code base (<3KLOC) for an embedded, serializable, -transactional key/value database so it can be a good starting point for people -interested in how databases work. - -The best places to start are the main entry points into Bolt: - -- `Open()` - Initializes the reference to the database. It's responsible for - creating the database if it doesn't exist, obtaining an exclusive lock on the - file, reading the meta pages, & memory-mapping the file. - -- `DB.Begin()` - Starts a read-only or read-write transaction depending on the - value of the `writable` argument. This requires briefly obtaining the "meta" - lock to keep track of open transactions. Only one read-write transaction can - exist at a time so the "rwlock" is acquired during the life of a read-write - transaction. - -- `Bucket.Put()` - Writes a key/value pair into a bucket. After validating the - arguments, a cursor is used to traverse the B+tree to the page and position - where they key & value will be written. Once the position is found, the bucket - materializes the underlying page and the page's parent pages into memory as - "nodes". These nodes are where mutations occur during read-write transactions. - These changes get flushed to disk during commit. - -- `Bucket.Get()` - Retrieves a key/value pair from a bucket. This uses a cursor - to move to the page & position of a key/value pair. During a read-only - transaction, the key and value data is returned as a direct reference to the - underlying mmap file so there's no allocation overhead. For read-write - transactions, this data may reference the mmap file or one of the in-memory - node values. - -- `Cursor` - This object is simply for traversing the B+tree of on-disk pages - or in-memory nodes. It can seek to a specific key, move to the first or last - value, or it can move forward or backward. The cursor handles the movement up - and down the B+tree transparently to the end user. - -- `Tx.Commit()` - Converts the in-memory dirty nodes and the list of free pages - into pages to be written to disk. Writing to disk then occurs in two phases. - First, the dirty pages are written to disk and an `fsync()` occurs. Second, a - new meta page with an incremented transaction ID is written and another - `fsync()` occurs. This two phase write ensures that partially written data - pages are ignored in the event of a crash since the meta page pointing to them - is never written. Partially written meta pages are invalidated because they - are written with a checksum. - -If you have additional notes that could be helpful for others, please submit -them via pull request. - - -## Other Projects Using Bolt - -Below is a list of public, open source projects that use Bolt: - -* [BoltDbWeb](https://github.com/evnix/boltdbweb) - A web based GUI for BoltDB files. -* [Operation Go: A Routine Mission](http://gocode.io) - An online programming game for Golang using Bolt for user accounts and a leaderboard. -* [Bazil](https://bazil.org/) - A file system that lets your data reside where it is most convenient for it to reside. -* [DVID](https://github.com/janelia-flyem/dvid) - Added Bolt as optional storage engine and testing it against Basho-tuned leveldb. -* [Skybox Analytics](https://github.com/skybox/skybox) - A standalone funnel analysis tool for web analytics. -* [Scuttlebutt](https://github.com/benbjohnson/scuttlebutt) - Uses Bolt to store and process all Twitter mentions of GitHub projects. -* [Wiki](https://github.com/peterhellberg/wiki) - A tiny wiki using Goji, BoltDB and Blackfriday. -* [ChainStore](https://github.com/pressly/chainstore) - Simple key-value interface to a variety of storage engines organized as a chain of operations. -* [MetricBase](https://github.com/msiebuhr/MetricBase) - Single-binary version of Graphite. -* [Gitchain](https://github.com/gitchain/gitchain) - Decentralized, peer-to-peer Git repositories aka "Git meets Bitcoin". -* [event-shuttle](https://github.com/sclasen/event-shuttle) - A Unix system service to collect and reliably deliver messages to Kafka. -* [ipxed](https://github.com/kelseyhightower/ipxed) - Web interface and api for ipxed. -* [BoltStore](https://github.com/yosssi/boltstore) - Session store using Bolt. -* [photosite/session](https://godoc.org/bitbucket.org/kardianos/photosite/session) - Sessions for a photo viewing site. -* [LedisDB](https://github.com/siddontang/ledisdb) - A high performance NoSQL, using Bolt as optional storage. -* [ipLocator](https://github.com/AndreasBriese/ipLocator) - A fast ip-geo-location-server using bolt with bloom filters. -* [cayley](https://github.com/google/cayley) - Cayley is an open-source graph database using Bolt as optional backend. -* [bleve](http://www.blevesearch.com/) - A pure Go search engine similar to ElasticSearch that uses Bolt as the default storage backend. -* [tentacool](https://github.com/optiflows/tentacool) - REST api server to manage system stuff (IP, DNS, Gateway...) on a linux server. -* [Seaweed File System](https://github.com/chrislusf/seaweedfs) - Highly scalable distributed key~file system with O(1) disk read. -* [InfluxDB](https://influxdata.com) - Scalable datastore for metrics, events, and real-time analytics. -* [Freehold](http://tshannon.bitbucket.org/freehold/) - An open, secure, and lightweight platform for your files and data. -* [Prometheus Annotation Server](https://github.com/oliver006/prom_annotation_server) - Annotation server for PromDash & Prometheus service monitoring system. -* [Consul](https://github.com/hashicorp/consul) - Consul is service discovery and configuration made easy. Distributed, highly available, and datacenter-aware. -* [Kala](https://github.com/ajvb/kala) - Kala is a modern job scheduler optimized to run on a single node. It is persistent, JSON over HTTP API, ISO 8601 duration notation, and dependent jobs. -* [drive](https://github.com/odeke-em/drive) - drive is an unofficial Google Drive command line client for \*NIX operating systems. -* [stow](https://github.com/djherbis/stow) - a persistence manager for objects - backed by boltdb. -* [buckets](https://github.com/joyrexus/buckets) - a bolt wrapper streamlining - simple tx and key scans. -* [mbuckets](https://github.com/abhigupta912/mbuckets) - A Bolt wrapper that allows easy operations on multi level (nested) buckets. -* [Request Baskets](https://github.com/darklynx/request-baskets) - A web service to collect arbitrary HTTP requests and inspect them via REST API or simple web UI, similar to [RequestBin](http://requestb.in/) service -* [Go Report Card](https://goreportcard.com/) - Go code quality report cards as a (free and open source) service. -* [Boltdb Boilerplate](https://github.com/bobintornado/boltdb-boilerplate) - Boilerplate wrapper around bolt aiming to make simple calls one-liners. -* [lru](https://github.com/crowdriff/lru) - Easy to use Bolt-backed Least-Recently-Used (LRU) read-through cache with chainable remote stores. -* [Storm](https://github.com/asdine/storm) - Simple and powerful ORM for BoltDB. -* [GoWebApp](https://github.com/josephspurrier/gowebapp) - A basic MVC web application in Go using BoltDB. -* [SimpleBolt](https://github.com/xyproto/simplebolt) - A simple way to use BoltDB. Deals mainly with strings. -* [Algernon](https://github.com/xyproto/algernon) - A HTTP/2 web server with built-in support for Lua. Uses BoltDB as the default database backend. -* [MuLiFS](https://github.com/dankomiocevic/mulifs) - Music Library Filesystem creates a filesystem to organise your music files. -* [GoShort](https://github.com/pankajkhairnar/goShort) - GoShort is a URL shortener written in Golang and BoltDB for persistent key/value storage and for routing it's using high performent HTTPRouter. -* [torrent](https://github.com/anacrolix/torrent) - Full-featured BitTorrent client package and utilities in Go. BoltDB is a storage backend in development. - -If you are using Bolt in a project please send a pull request to add it to the list. diff --git a/vendor/src/github.com/boltdb/bolt/appveyor.yml b/vendor/src/github.com/boltdb/bolt/appveyor.yml deleted file mode 100644 index 6e26e941d6..0000000000 --- a/vendor/src/github.com/boltdb/bolt/appveyor.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: "{build}" - -os: Windows Server 2012 R2 - -clone_folder: c:\gopath\src\github.com\boltdb\bolt - -environment: - GOPATH: c:\gopath - -install: - - echo %PATH% - - echo %GOPATH% - - go version - - go env - - go get -v -t ./... - -build_script: - - go test -v ./... diff --git a/vendor/src/github.com/boltdb/bolt/bolt_386.go b/vendor/src/github.com/boltdb/bolt/bolt_386.go deleted file mode 100644 index 820d533c15..0000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_386.go +++ /dev/null @@ -1,10 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/src/github.com/boltdb/bolt/bolt_amd64.go b/vendor/src/github.com/boltdb/bolt/bolt_amd64.go deleted file mode 100644 index 98fafdb47d..0000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_amd64.go +++ /dev/null @@ -1,10 +0,0 @@ -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/src/github.com/boltdb/bolt/bolt_arm.go b/vendor/src/github.com/boltdb/bolt/bolt_arm.go deleted file mode 100644 index 7e5cb4b941..0000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_arm.go +++ /dev/null @@ -1,28 +0,0 @@ -package bolt - -import "unsafe" - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned bool - -func init() { - // Simple check to see whether this arch handles unaligned load/stores - // correctly. - - // ARM9 and older devices require load/stores to be from/to aligned - // addresses. If not, the lower 2 bits are cleared and that address is - // read in a jumbled up order. - - // See http://infocenter.arm.com/help/index.jsp?topic=/com.arm.doc.faqs/ka15414.html - - raw := [6]byte{0xfe, 0xef, 0x11, 0x22, 0x22, 0x11} - val := *(*uint32)(unsafe.Pointer(uintptr(unsafe.Pointer(&raw)) + 2)) - - brokenUnaligned = val != 0x11222211 -} diff --git a/vendor/src/github.com/boltdb/bolt/bolt_arm64.go b/vendor/src/github.com/boltdb/bolt/bolt_arm64.go deleted file mode 100644 index b26d84f91b..0000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_arm64.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build arm64 - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/src/github.com/boltdb/bolt/bolt_linux.go b/vendor/src/github.com/boltdb/bolt/bolt_linux.go deleted file mode 100644 index 2b67666140..0000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_linux.go +++ /dev/null @@ -1,10 +0,0 @@ -package bolt - -import ( - "syscall" -) - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return syscall.Fdatasync(int(db.file.Fd())) -} diff --git a/vendor/src/github.com/boltdb/bolt/bolt_openbsd.go b/vendor/src/github.com/boltdb/bolt/bolt_openbsd.go deleted file mode 100644 index 7058c3d734..0000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_openbsd.go +++ /dev/null @@ -1,27 +0,0 @@ -package bolt - -import ( - "syscall" - "unsafe" -) - -const ( - msAsync = 1 << iota // perform asynchronous writes - msSync // perform synchronous writes - msInvalidate // invalidate cached data -) - -func msync(db *DB) error { - _, _, errno := syscall.Syscall(syscall.SYS_MSYNC, uintptr(unsafe.Pointer(db.data)), uintptr(db.datasz), msInvalidate) - if errno != 0 { - return errno - } - return nil -} - -func fdatasync(db *DB) error { - if db.data != nil { - return msync(db) - } - return db.file.Sync() -} diff --git a/vendor/src/github.com/boltdb/bolt/bolt_ppc.go b/vendor/src/github.com/boltdb/bolt/bolt_ppc.go deleted file mode 100644 index 645ddc3edc..0000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_ppc.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build ppc - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0x7FFFFFFF // 2GB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0xFFFFFFF diff --git a/vendor/src/github.com/boltdb/bolt/bolt_ppc64.go b/vendor/src/github.com/boltdb/bolt/bolt_ppc64.go deleted file mode 100644 index 2dc6be02e3..0000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_ppc64.go +++ /dev/null @@ -1,9 +0,0 @@ -// +build ppc64 - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF diff --git a/vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go b/vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go deleted file mode 100644 index 8c143bc5d1..0000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_ppc64le.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build ppc64le - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/src/github.com/boltdb/bolt/bolt_s390x.go b/vendor/src/github.com/boltdb/bolt/bolt_s390x.go deleted file mode 100644 index d7c39af925..0000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_s390x.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build s390x - -package bolt - -// maxMapSize represents the largest mmap size supported by Bolt. -const maxMapSize = 0xFFFFFFFFFFFF // 256TB - -// maxAllocSize is the size used when creating array pointers. -const maxAllocSize = 0x7FFFFFFF - -// Are unaligned load/stores broken on this arch? -var brokenUnaligned = false diff --git a/vendor/src/github.com/boltdb/bolt/bolt_unix.go b/vendor/src/github.com/boltdb/bolt/bolt_unix.go deleted file mode 100644 index cad62dda1e..0000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_unix.go +++ /dev/null @@ -1,89 +0,0 @@ -// +build !windows,!plan9,!solaris - -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - flag := syscall.LOCK_SH - if exclusive { - flag = syscall.LOCK_EX - } - - // Otherwise attempt to obtain an exclusive lock. - err := syscall.Flock(int(db.file.Fd()), flag|syscall.LOCK_NB) - if err == nil { - return nil - } else if err != syscall.EWOULDBLOCK { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - return syscall.Flock(int(db.file.Fd()), syscall.LOCK_UN) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := syscall.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := syscall.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} - -// NOTE: This function is copied from stdlib because it is not available on darwin. -func madvise(b []byte, advice int) (err error) { - _, _, e1 := syscall.Syscall(syscall.SYS_MADVISE, uintptr(unsafe.Pointer(&b[0])), uintptr(len(b)), uintptr(advice)) - if e1 != 0 { - err = e1 - } - return -} diff --git a/vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go b/vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go deleted file mode 100644 index 307bf2b3ee..0000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_unix_solaris.go +++ /dev/null @@ -1,90 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" - - "golang.org/x/sys/unix" -) - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Pid = 0 - lock.Whence = 0 - lock.Pid = 0 - if exclusive { - lock.Type = syscall.F_WRLCK - } else { - lock.Type = syscall.F_RDLCK - } - err := syscall.FcntlFlock(db.file.Fd(), syscall.F_SETLK, &lock) - if err == nil { - return nil - } else if err != syscall.EAGAIN { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Type = syscall.F_UNLCK - lock.Whence = 0 - return syscall.FcntlFlock(uintptr(db.file.Fd()), syscall.F_SETLK, &lock) -} - -// mmap memory maps a DB's data file. -func mmap(db *DB, sz int) error { - // Map the data file to memory. - b, err := unix.Mmap(int(db.file.Fd()), 0, sz, syscall.PROT_READ, syscall.MAP_SHARED|db.MmapFlags) - if err != nil { - return err - } - - // Advise the kernel that the mmap is accessed randomly. - if err := unix.Madvise(b, syscall.MADV_RANDOM); err != nil { - return fmt.Errorf("madvise: %s", err) - } - - // Save the original byte slice and convert to a byte array pointer. - db.dataref = b - db.data = (*[maxMapSize]byte)(unsafe.Pointer(&b[0])) - db.datasz = sz - return nil -} - -// munmap unmaps a DB's data file from memory. -func munmap(db *DB) error { - // Ignore the unmap if we have no mapped data. - if db.dataref == nil { - return nil - } - - // Unmap using the original byte slice. - err := unix.Munmap(db.dataref) - db.dataref = nil - db.data = nil - db.datasz = 0 - return err -} diff --git a/vendor/src/github.com/boltdb/bolt/bolt_windows.go b/vendor/src/github.com/boltdb/bolt/bolt_windows.go deleted file mode 100644 index d538e6afd7..0000000000 --- a/vendor/src/github.com/boltdb/bolt/bolt_windows.go +++ /dev/null @@ -1,144 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "syscall" - "time" - "unsafe" -) - -// LockFileEx code derived from golang build filemutex_windows.go @ v1.5.1 -var ( - modkernel32 = syscall.NewLazyDLL("kernel32.dll") - procLockFileEx = modkernel32.NewProc("LockFileEx") - procUnlockFileEx = modkernel32.NewProc("UnlockFileEx") -) - -const ( - lockExt = ".lock" - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/aa365203(v=vs.85).aspx - flagLockExclusive = 2 - flagLockFailImmediately = 1 - - // see https://msdn.microsoft.com/en-us/library/windows/desktop/ms681382(v=vs.85).aspx - errLockViolation syscall.Errno = 0x21 -) - -func lockFileEx(h syscall.Handle, flags, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r, _, err := procLockFileEx.Call(uintptr(h), uintptr(flags), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol))) - if r == 0 { - return err - } - return nil -} - -func unlockFileEx(h syscall.Handle, reserved, locklow, lockhigh uint32, ol *syscall.Overlapped) (err error) { - r, _, err := procUnlockFileEx.Call(uintptr(h), uintptr(reserved), uintptr(locklow), uintptr(lockhigh), uintptr(unsafe.Pointer(ol)), 0) - if r == 0 { - return err - } - return nil -} - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} - -// flock acquires an advisory lock on a file descriptor. -func flock(db *DB, mode os.FileMode, exclusive bool, timeout time.Duration) error { - // Create a separate lock file on windows because a process - // cannot share an exclusive lock on the same file. This is - // needed during Tx.WriteTo(). - f, err := os.OpenFile(db.path+lockExt, os.O_CREATE, mode) - if err != nil { - return err - } - db.lockfile = f - - var t time.Time - for { - // If we're beyond our timeout then return an error. - // This can only occur after we've attempted a flock once. - if t.IsZero() { - t = time.Now() - } else if timeout > 0 && time.Since(t) > timeout { - return ErrTimeout - } - - var flag uint32 = flagLockFailImmediately - if exclusive { - flag |= flagLockExclusive - } - - err := lockFileEx(syscall.Handle(db.lockfile.Fd()), flag, 0, 1, 0, &syscall.Overlapped{}) - if err == nil { - return nil - } else if err != errLockViolation { - return err - } - - // Wait for a bit and try again. - time.Sleep(50 * time.Millisecond) - } -} - -// funlock releases an advisory lock on a file descriptor. -func funlock(db *DB) error { - err := unlockFileEx(syscall.Handle(db.lockfile.Fd()), 0, 1, 0, &syscall.Overlapped{}) - db.lockfile.Close() - os.Remove(db.path+lockExt) - return err -} - -// mmap memory maps a DB's data file. -// Based on: https://github.com/edsrzf/mmap-go -func mmap(db *DB, sz int) error { - if !db.readOnly { - // Truncate the database to the size of the mmap. - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("truncate: %s", err) - } - } - - // Open a file mapping handle. - sizelo := uint32(sz >> 32) - sizehi := uint32(sz) & 0xffffffff - h, errno := syscall.CreateFileMapping(syscall.Handle(db.file.Fd()), nil, syscall.PAGE_READONLY, sizelo, sizehi, nil) - if h == 0 { - return os.NewSyscallError("CreateFileMapping", errno) - } - - // Create the memory map. - addr, errno := syscall.MapViewOfFile(h, syscall.FILE_MAP_READ, 0, 0, uintptr(sz)) - if addr == 0 { - return os.NewSyscallError("MapViewOfFile", errno) - } - - // Close mapping handle. - if err := syscall.CloseHandle(syscall.Handle(h)); err != nil { - return os.NewSyscallError("CloseHandle", err) - } - - // Convert to a byte array. - db.data = ((*[maxMapSize]byte)(unsafe.Pointer(addr))) - db.datasz = sz - - return nil -} - -// munmap unmaps a pointer from a file. -// Based on: https://github.com/edsrzf/mmap-go -func munmap(db *DB) error { - if db.data == nil { - return nil - } - - addr := (uintptr)(unsafe.Pointer(&db.data[0])) - if err := syscall.UnmapViewOfFile(addr); err != nil { - return os.NewSyscallError("UnmapViewOfFile", err) - } - return nil -} diff --git a/vendor/src/github.com/boltdb/bolt/boltsync_unix.go b/vendor/src/github.com/boltdb/bolt/boltsync_unix.go deleted file mode 100644 index f50442523c..0000000000 --- a/vendor/src/github.com/boltdb/bolt/boltsync_unix.go +++ /dev/null @@ -1,8 +0,0 @@ -// +build !windows,!plan9,!linux,!openbsd - -package bolt - -// fdatasync flushes written data to a file descriptor. -func fdatasync(db *DB) error { - return db.file.Sync() -} diff --git a/vendor/src/github.com/boltdb/bolt/bucket.go b/vendor/src/github.com/boltdb/bolt/bucket.go deleted file mode 100644 index 511ce72d33..0000000000 --- a/vendor/src/github.com/boltdb/bolt/bucket.go +++ /dev/null @@ -1,778 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "unsafe" -) - -const ( - // MaxKeySize is the maximum length of a key, in bytes. - MaxKeySize = 32768 - - // MaxValueSize is the maximum length of a value, in bytes. - MaxValueSize = (1 << 31) - 2 -) - -const ( - maxUint = ^uint(0) - minUint = 0 - maxInt = int(^uint(0) >> 1) - minInt = -maxInt - 1 -) - -const bucketHeaderSize = int(unsafe.Sizeof(bucket{})) - -const ( - minFillPercent = 0.1 - maxFillPercent = 1.0 -) - -// DefaultFillPercent is the percentage that split pages are filled. -// This value can be changed by setting Bucket.FillPercent. -const DefaultFillPercent = 0.5 - -// Bucket represents a collection of key/value pairs inside the database. -type Bucket struct { - *bucket - tx *Tx // the associated transaction - buckets map[string]*Bucket // subbucket cache - page *page // inline page reference - rootNode *node // materialized node for the root page. - nodes map[pgid]*node // node cache - - // Sets the threshold for filling nodes when they split. By default, - // the bucket will fill to 50% but it can be useful to increase this - // amount if you know that your write workloads are mostly append-only. - // - // This is non-persisted across transactions so it must be set in every Tx. - FillPercent float64 -} - -// bucket represents the on-file representation of a bucket. -// This is stored as the "value" of a bucket key. If the bucket is small enough, -// then its root page can be stored inline in the "value", after the bucket -// header. In the case of inline buckets, the "root" will be 0. -type bucket struct { - root pgid // page id of the bucket's root-level page - sequence uint64 // monotonically incrementing, used by NextSequence() -} - -// newBucket returns a new bucket associated with a transaction. -func newBucket(tx *Tx) Bucket { - var b = Bucket{tx: tx, FillPercent: DefaultFillPercent} - if tx.writable { - b.buckets = make(map[string]*Bucket) - b.nodes = make(map[pgid]*node) - } - return b -} - -// Tx returns the tx of the bucket. -func (b *Bucket) Tx() *Tx { - return b.tx -} - -// Root returns the root of the bucket. -func (b *Bucket) Root() pgid { - return b.root -} - -// Writable returns whether the bucket is writable. -func (b *Bucket) Writable() bool { - return b.tx.writable -} - -// Cursor creates a cursor associated with the bucket. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (b *Bucket) Cursor() *Cursor { - // Update transaction statistics. - b.tx.stats.CursorCount++ - - // Allocate and return a cursor. - return &Cursor{ - bucket: b, - stack: make([]elemRef, 0), - } -} - -// Bucket retrieves a nested bucket by name. -// Returns nil if the bucket does not exist. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) Bucket(name []byte) *Bucket { - if b.buckets != nil { - if child := b.buckets[string(name)]; child != nil { - return child - } - } - - // Move cursor to key. - c := b.Cursor() - k, v, flags := c.seek(name) - - // Return nil if the key doesn't exist or it is not a bucket. - if !bytes.Equal(name, k) || (flags&bucketLeafFlag) == 0 { - return nil - } - - // Otherwise create a bucket and cache it. - var child = b.openBucket(v) - if b.buckets != nil { - b.buckets[string(name)] = child - } - - return child -} - -// Helper method that re-interprets a sub-bucket value -// from a parent into a Bucket -func (b *Bucket) openBucket(value []byte) *Bucket { - var child = newBucket(b.tx) - - // If unaligned load/stores are broken on this arch and value is - // unaligned simply clone to an aligned byte array. - unaligned := brokenUnaligned && uintptr(unsafe.Pointer(&value[0]))&3 != 0 - - if unaligned { - value = cloneBytes(value) - } - - // If this is a writable transaction then we need to copy the bucket entry. - // Read-only transactions can point directly at the mmap entry. - if b.tx.writable && !unaligned { - child.bucket = &bucket{} - *child.bucket = *(*bucket)(unsafe.Pointer(&value[0])) - } else { - child.bucket = (*bucket)(unsafe.Pointer(&value[0])) - } - - // Save a reference to the inline page if the bucket is inline. - if child.root == 0 { - child.page = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - } - - return &child -} - -// CreateBucket creates a new bucket at the given key and returns the new bucket. -// Returns an error if the key already exists, if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucket(key []byte) (*Bucket, error) { - if b.tx.db == nil { - return nil, ErrTxClosed - } else if !b.tx.writable { - return nil, ErrTxNotWritable - } else if len(key) == 0 { - return nil, ErrBucketNameRequired - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key. - if bytes.Equal(key, k) { - if (flags & bucketLeafFlag) != 0 { - return nil, ErrBucketExists - } else { - return nil, ErrIncompatibleValue - } - } - - // Create empty, inline bucket. - var bucket = Bucket{ - bucket: &bucket{}, - rootNode: &node{isLeaf: true}, - FillPercent: DefaultFillPercent, - } - var value = bucket.write() - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, bucketLeafFlag) - - // Since subbuckets are not allowed on inline buckets, we need to - // dereference the inline page, if it exists. This will cause the bucket - // to be treated as a regular, non-inline bucket for the rest of the tx. - b.page = nil - - return b.Bucket(key), nil -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist and returns a reference to it. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (b *Bucket) CreateBucketIfNotExists(key []byte) (*Bucket, error) { - child, err := b.CreateBucket(key) - if err == ErrBucketExists { - return b.Bucket(key), nil - } else if err != nil { - return nil, err - } - return child, nil -} - -// DeleteBucket deletes a bucket at the given key. -// Returns an error if the bucket does not exists, or if the key represents a non-bucket value. -func (b *Bucket) DeleteBucket(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if bucket doesn't exist or is not a bucket. - if !bytes.Equal(key, k) { - return ErrBucketNotFound - } else if (flags & bucketLeafFlag) == 0 { - return ErrIncompatibleValue - } - - // Recursively delete all child buckets. - child := b.Bucket(key) - err := child.ForEach(func(k, v []byte) error { - if v == nil { - if err := child.DeleteBucket(k); err != nil { - return fmt.Errorf("delete bucket: %s", err) - } - } - return nil - }) - if err != nil { - return err - } - - // Remove cached copy. - delete(b.buckets, string(key)) - - // Release all bucket pages to freelist. - child.nodes = nil - child.rootNode = nil - child.free() - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Get retrieves the value for a key in the bucket. -// Returns a nil value if the key does not exist or if the key is a nested bucket. -// The returned value is only valid for the life of the transaction. -func (b *Bucket) Get(key []byte) []byte { - k, v, flags := b.Cursor().seek(key) - - // Return nil if this is a bucket. - if (flags & bucketLeafFlag) != 0 { - return nil - } - - // If our target node isn't the same key as what's passed in then return nil. - if !bytes.Equal(key, k) { - return nil - } - return v -} - -// Put sets the value for a key in the bucket. -// If the key exist then its previous value will be overwritten. -// Supplied value must remain valid for the life of the transaction. -// Returns an error if the bucket was created from a read-only transaction, if the key is blank, if the key is too large, or if the value is too large. -func (b *Bucket) Put(key []byte, value []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } else if len(key) == 0 { - return ErrKeyRequired - } else if len(key) > MaxKeySize { - return ErrKeyTooLarge - } else if int64(len(value)) > MaxValueSize { - return ErrValueTooLarge - } - - // Move cursor to correct position. - c := b.Cursor() - k, _, flags := c.seek(key) - - // Return an error if there is an existing key with a bucket value. - if bytes.Equal(key, k) && (flags&bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Insert into node. - key = cloneBytes(key) - c.node().put(key, key, value, 0, 0) - - return nil -} - -// Delete removes a key from the bucket. -// If the key does not exist then nothing is done and a nil error is returned. -// Returns an error if the bucket was created from a read-only transaction. -func (b *Bucket) Delete(key []byte) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Move cursor to correct position. - c := b.Cursor() - _, _, flags := c.seek(key) - - // Return an error if there is already existing bucket value. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - - // Delete the node if we have a matching key. - c.node().del(key) - - return nil -} - -// Sequence returns the current integer for the bucket without incrementing it. -func (b *Bucket) Sequence() uint64 { return b.bucket.sequence } - -// SetSequence updates the sequence number for the bucket. -func (b *Bucket) SetSequence(v uint64) error { - if b.tx.db == nil { - return ErrTxClosed - } else if !b.Writable() { - return ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence = v - return nil -} - -// NextSequence returns an autoincrementing integer for the bucket. -func (b *Bucket) NextSequence() (uint64, error) { - if b.tx.db == nil { - return 0, ErrTxClosed - } else if !b.Writable() { - return 0, ErrTxNotWritable - } - - // Materialize the root node if it hasn't been already so that the - // bucket will be saved during commit. - if b.rootNode == nil { - _ = b.node(b.root, nil) - } - - // Increment and return the sequence. - b.bucket.sequence++ - return b.bucket.sequence, nil -} - -// ForEach executes a function for each key/value pair in a bucket. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. The provided function must not modify -// the bucket; this will result in undefined behavior. -func (b *Bucket) ForEach(fn func(k, v []byte) error) error { - if b.tx.db == nil { - return ErrTxClosed - } - c := b.Cursor() - for k, v := c.First(); k != nil; k, v = c.Next() { - if err := fn(k, v); err != nil { - return err - } - } - return nil -} - -// Stat returns stats on a bucket. -func (b *Bucket) Stats() BucketStats { - var s, subStats BucketStats - pageSize := b.tx.db.pageSize - s.BucketN += 1 - if b.root == 0 { - s.InlineBucketN += 1 - } - b.forEachPage(func(p *page, depth int) { - if (p.flags & leafPageFlag) != 0 { - s.KeyN += int(p.count) - - // used totals the used bytes for the page - used := pageHeaderSize - - if p.count != 0 { - // If page has any elements, add all element headers. - used += leafPageElementSize * int(p.count-1) - - // Add all element key, value sizes. - // The computation takes advantage of the fact that the position - // of the last element's key/value equals to the total of the sizes - // of all previous elements' keys and values. - // It also includes the last element's header. - lastElement := p.leafPageElement(p.count - 1) - used += int(lastElement.pos + lastElement.ksize + lastElement.vsize) - } - - if b.root == 0 { - // For inlined bucket just update the inline stats - s.InlineBucketInuse += used - } else { - // For non-inlined bucket update all the leaf stats - s.LeafPageN++ - s.LeafInuse += used - s.LeafOverflowN += int(p.overflow) - - // Collect stats from sub-buckets. - // Do that by iterating over all element headers - // looking for the ones with the bucketLeafFlag. - for i := uint16(0); i < p.count; i++ { - e := p.leafPageElement(i) - if (e.flags & bucketLeafFlag) != 0 { - // For any bucket element, open the element value - // and recursively call Stats on the contained bucket. - subStats.Add(b.openBucket(e.value()).Stats()) - } - } - } - } else if (p.flags & branchPageFlag) != 0 { - s.BranchPageN++ - lastElement := p.branchPageElement(p.count - 1) - - // used totals the used bytes for the page - // Add header and all element headers. - used := pageHeaderSize + (branchPageElementSize * int(p.count-1)) - - // Add size of all keys and values. - // Again, use the fact that last element's position equals to - // the total of key, value sizes of all previous elements. - used += int(lastElement.pos + lastElement.ksize) - s.BranchInuse += used - s.BranchOverflowN += int(p.overflow) - } - - // Keep track of maximum page depth. - if depth+1 > s.Depth { - s.Depth = (depth + 1) - } - }) - - // Alloc stats can be computed from page counts and pageSize. - s.BranchAlloc = (s.BranchPageN + s.BranchOverflowN) * pageSize - s.LeafAlloc = (s.LeafPageN + s.LeafOverflowN) * pageSize - - // Add the max depth of sub-buckets to get total nested depth. - s.Depth += subStats.Depth - // Add the stats for all sub-buckets - s.Add(subStats) - return s -} - -// forEachPage iterates over every page in a bucket, including inline pages. -func (b *Bucket) forEachPage(fn func(*page, int)) { - // If we have an inline page then just use that. - if b.page != nil { - fn(b.page, 0) - return - } - - // Otherwise traverse the page hierarchy. - b.tx.forEachPage(b.root, 0, fn) -} - -// forEachPageNode iterates over every page (or node) in a bucket. -// This also includes inline pages. -func (b *Bucket) forEachPageNode(fn func(*page, *node, int)) { - // If we have an inline page or root node then just use that. - if b.page != nil { - fn(b.page, nil, 0) - return - } - b._forEachPageNode(b.root, 0, fn) -} - -func (b *Bucket) _forEachPageNode(pgid pgid, depth int, fn func(*page, *node, int)) { - var p, n = b.pageNode(pgid) - - // Execute function. - fn(p, n, depth) - - // Recursively loop over children. - if p != nil { - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - b._forEachPageNode(elem.pgid, depth+1, fn) - } - } - } else { - if !n.isLeaf { - for _, inode := range n.inodes { - b._forEachPageNode(inode.pgid, depth+1, fn) - } - } - } -} - -// spill writes all the nodes for this bucket to dirty pages. -func (b *Bucket) spill() error { - // Spill all child buckets first. - for name, child := range b.buckets { - // If the child bucket is small enough and it has no child buckets then - // write it inline into the parent bucket's page. Otherwise spill it - // like a normal bucket and make the parent value a pointer to the page. - var value []byte - if child.inlineable() { - child.free() - value = child.write() - } else { - if err := child.spill(); err != nil { - return err - } - - // Update the child bucket header in this bucket. - value = make([]byte, unsafe.Sizeof(bucket{})) - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *child.bucket - } - - // Skip writing the bucket if there are no materialized nodes. - if child.rootNode == nil { - continue - } - - // Update parent node. - var c = b.Cursor() - k, _, flags := c.seek([]byte(name)) - if !bytes.Equal([]byte(name), k) { - panic(fmt.Sprintf("misplaced bucket header: %x -> %x", []byte(name), k)) - } - if flags&bucketLeafFlag == 0 { - panic(fmt.Sprintf("unexpected bucket header flag: %x", flags)) - } - c.node().put([]byte(name), []byte(name), value, 0, bucketLeafFlag) - } - - // Ignore if there's not a materialized root node. - if b.rootNode == nil { - return nil - } - - // Spill nodes. - if err := b.rootNode.spill(); err != nil { - return err - } - b.rootNode = b.rootNode.root() - - // Update the root node for this bucket. - if b.rootNode.pgid >= b.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", b.rootNode.pgid, b.tx.meta.pgid)) - } - b.root = b.rootNode.pgid - - return nil -} - -// inlineable returns true if a bucket is small enough to be written inline -// and if it contains no subbuckets. Otherwise returns false. -func (b *Bucket) inlineable() bool { - var n = b.rootNode - - // Bucket must only contain a single leaf node. - if n == nil || !n.isLeaf { - return false - } - - // Bucket is not inlineable if it contains subbuckets or if it goes beyond - // our threshold for inline bucket size. - var size = pageHeaderSize - for _, inode := range n.inodes { - size += leafPageElementSize + len(inode.key) + len(inode.value) - - if inode.flags&bucketLeafFlag != 0 { - return false - } else if size > b.maxInlineBucketSize() { - return false - } - } - - return true -} - -// Returns the maximum total size of a bucket to make it a candidate for inlining. -func (b *Bucket) maxInlineBucketSize() int { - return b.tx.db.pageSize / 4 -} - -// write allocates and writes a bucket to a byte slice. -func (b *Bucket) write() []byte { - // Allocate the appropriate size. - var n = b.rootNode - var value = make([]byte, bucketHeaderSize+n.size()) - - // Write a bucket header. - var bucket = (*bucket)(unsafe.Pointer(&value[0])) - *bucket = *b.bucket - - // Convert byte slice to a fake page and write the root node. - var p = (*page)(unsafe.Pointer(&value[bucketHeaderSize])) - n.write(p) - - return value -} - -// rebalance attempts to balance all nodes. -func (b *Bucket) rebalance() { - for _, n := range b.nodes { - n.rebalance() - } - for _, child := range b.buckets { - child.rebalance() - } -} - -// node creates a node from a page and associates it with a given parent. -func (b *Bucket) node(pgid pgid, parent *node) *node { - _assert(b.nodes != nil, "nodes map expected") - - // Retrieve node if it's already been created. - if n := b.nodes[pgid]; n != nil { - return n - } - - // Otherwise create a node and cache it. - n := &node{bucket: b, parent: parent} - if parent == nil { - b.rootNode = n - } else { - parent.children = append(parent.children, n) - } - - // Use the inline page if this is an inline bucket. - var p = b.page - if p == nil { - p = b.tx.page(pgid) - } - - // Read the page into the node and cache it. - n.read(p) - b.nodes[pgid] = n - - // Update statistics. - b.tx.stats.NodeCount++ - - return n -} - -// free recursively frees all pages in the bucket. -func (b *Bucket) free() { - if b.root == 0 { - return - } - - var tx = b.tx - b.forEachPageNode(func(p *page, n *node, _ int) { - if p != nil { - tx.db.freelist.free(tx.meta.txid, p) - } else { - n.free() - } - }) - b.root = 0 -} - -// dereference removes all references to the old mmap. -func (b *Bucket) dereference() { - if b.rootNode != nil { - b.rootNode.root().dereference() - } - - for _, child := range b.buckets { - child.dereference() - } -} - -// pageNode returns the in-memory node, if it exists. -// Otherwise returns the underlying page. -func (b *Bucket) pageNode(id pgid) (*page, *node) { - // Inline buckets have a fake page embedded in their value so treat them - // differently. We'll return the rootNode (if available) or the fake page. - if b.root == 0 { - if id != 0 { - panic(fmt.Sprintf("inline bucket non-zero page access(2): %d != 0", id)) - } - if b.rootNode != nil { - return nil, b.rootNode - } - return b.page, nil - } - - // Check the node cache for non-inline buckets. - if b.nodes != nil { - if n := b.nodes[id]; n != nil { - return nil, n - } - } - - // Finally lookup the page from the transaction if no node is materialized. - return b.tx.page(id), nil -} - -// BucketStats records statistics about resources used by a bucket. -type BucketStats struct { - // Page count statistics. - BranchPageN int // number of logical branch pages - BranchOverflowN int // number of physical branch overflow pages - LeafPageN int // number of logical leaf pages - LeafOverflowN int // number of physical leaf overflow pages - - // Tree statistics. - KeyN int // number of keys/value pairs - Depth int // number of levels in B+tree - - // Page size utilization. - BranchAlloc int // bytes allocated for physical branch pages - BranchInuse int // bytes actually used for branch data - LeafAlloc int // bytes allocated for physical leaf pages - LeafInuse int // bytes actually used for leaf data - - // Bucket statistics - BucketN int // total number of buckets including the top bucket - InlineBucketN int // total number on inlined buckets - InlineBucketInuse int // bytes used for inlined buckets (also accounted for in LeafInuse) -} - -func (s *BucketStats) Add(other BucketStats) { - s.BranchPageN += other.BranchPageN - s.BranchOverflowN += other.BranchOverflowN - s.LeafPageN += other.LeafPageN - s.LeafOverflowN += other.LeafOverflowN - s.KeyN += other.KeyN - if s.Depth < other.Depth { - s.Depth = other.Depth - } - s.BranchAlloc += other.BranchAlloc - s.BranchInuse += other.BranchInuse - s.LeafAlloc += other.LeafAlloc - s.LeafInuse += other.LeafInuse - - s.BucketN += other.BucketN - s.InlineBucketN += other.InlineBucketN - s.InlineBucketInuse += other.InlineBucketInuse -} - -// cloneBytes returns a copy of a given slice. -func cloneBytes(v []byte) []byte { - var clone = make([]byte, len(v)) - copy(clone, v) - return clone -} diff --git a/vendor/src/github.com/boltdb/bolt/cursor.go b/vendor/src/github.com/boltdb/bolt/cursor.go deleted file mode 100644 index 1be9f35e3e..0000000000 --- a/vendor/src/github.com/boltdb/bolt/cursor.go +++ /dev/null @@ -1,400 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "sort" -) - -// Cursor represents an iterator that can traverse over all key/value pairs in a bucket in sorted order. -// Cursors see nested buckets with value == nil. -// Cursors can be obtained from a transaction and are valid as long as the transaction is open. -// -// Keys and values returned from the cursor are only valid for the life of the transaction. -// -// Changing data while traversing with a cursor may cause it to be invalidated -// and return unexpected keys and/or values. You must reposition your cursor -// after mutating data. -type Cursor struct { - bucket *Bucket - stack []elemRef -} - -// Bucket returns the bucket that this cursor was created from. -func (c *Cursor) Bucket() *Bucket { - return c.bucket -} - -// First moves the cursor to the first item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) First() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - c.first() - - // If we land on an empty page then move to the next value. - // https://github.com/boltdb/bolt/issues/450 - if c.stack[len(c.stack)-1].count() == 0 { - c.next() - } - - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v - -} - -// Last moves the cursor to the last item in the bucket and returns its key and value. -// If the bucket is empty then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Last() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - c.stack = c.stack[:0] - p, n := c.bucket.pageNode(c.bucket.root) - ref := elemRef{page: p, node: n} - ref.index = ref.count() - 1 - c.stack = append(c.stack, ref) - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Next moves the cursor to the next item in the bucket and returns its key and value. -// If the cursor is at the end of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Next() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - k, v, flags := c.next() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Prev moves the cursor to the previous item in the bucket and returns its key and value. -// If the cursor is at the beginning of the bucket then a nil key and value are returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Prev() (key []byte, value []byte) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Attempt to move back one element until we're successful. - // Move up the stack as we hit the beginning of each page in our stack. - for i := len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index > 0 { - elem.index-- - break - } - c.stack = c.stack[:i] - } - - // If we've hit the end then return nil. - if len(c.stack) == 0 { - return nil, nil - } - - // Move down the stack to find the last element of the last leaf under this branch. - c.last() - k, v, flags := c.keyValue() - if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. If no keys -// follow, a nil key is returned. -// The returned key and value are only valid for the life of the transaction. -func (c *Cursor) Seek(seek []byte) (key []byte, value []byte) { - k, v, flags := c.seek(seek) - - // If we ended up after the last element of a page then move to the next one. - if ref := &c.stack[len(c.stack)-1]; ref.index >= ref.count() { - k, v, flags = c.next() - } - - if k == nil { - return nil, nil - } else if (flags & uint32(bucketLeafFlag)) != 0 { - return k, nil - } - return k, v -} - -// Delete removes the current key/value under the cursor from the bucket. -// Delete fails if current key/value is a bucket or if the transaction is not writable. -func (c *Cursor) Delete() error { - if c.bucket.tx.db == nil { - return ErrTxClosed - } else if !c.bucket.Writable() { - return ErrTxNotWritable - } - - key, _, flags := c.keyValue() - // Return an error if current value is a bucket. - if (flags & bucketLeafFlag) != 0 { - return ErrIncompatibleValue - } - c.node().del(key) - - return nil -} - -// seek moves the cursor to a given key and returns it. -// If the key does not exist then the next key is used. -func (c *Cursor) seek(seek []byte) (key []byte, value []byte, flags uint32) { - _assert(c.bucket.tx.db != nil, "tx closed") - - // Start from root page/node and traverse to correct page. - c.stack = c.stack[:0] - c.search(seek, c.bucket.root) - ref := &c.stack[len(c.stack)-1] - - // If the cursor is pointing to the end of page/node then return nil. - if ref.index >= ref.count() { - return nil, nil, 0 - } - - // If this is a bucket then return a nil value. - return c.keyValue() -} - -// first moves the cursor to the first leaf element under the last page in the stack. -func (c *Cursor) first() { - for { - // Exit when we hit a leaf page. - var ref = &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the first element to the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - c.stack = append(c.stack, elemRef{page: p, node: n, index: 0}) - } -} - -// last moves the cursor to the last leaf element under the last page in the stack. -func (c *Cursor) last() { - for { - // Exit when we hit a leaf page. - ref := &c.stack[len(c.stack)-1] - if ref.isLeaf() { - break - } - - // Keep adding pages pointing to the last element in the stack. - var pgid pgid - if ref.node != nil { - pgid = ref.node.inodes[ref.index].pgid - } else { - pgid = ref.page.branchPageElement(uint16(ref.index)).pgid - } - p, n := c.bucket.pageNode(pgid) - - var nextRef = elemRef{page: p, node: n} - nextRef.index = nextRef.count() - 1 - c.stack = append(c.stack, nextRef) - } -} - -// next moves to the next leaf element and returns the key and value. -// If the cursor is at the last leaf element then it stays there and returns nil. -func (c *Cursor) next() (key []byte, value []byte, flags uint32) { - for { - // Attempt to move over one element until we're successful. - // Move up the stack as we hit the end of each page in our stack. - var i int - for i = len(c.stack) - 1; i >= 0; i-- { - elem := &c.stack[i] - if elem.index < elem.count()-1 { - elem.index++ - break - } - } - - // If we've hit the root page then stop and return. This will leave the - // cursor on the last element of the last page. - if i == -1 { - return nil, nil, 0 - } - - // Otherwise start from where we left off in the stack and find the - // first element of the first leaf page. - c.stack = c.stack[:i+1] - c.first() - - // If this is an empty page then restart and move back up the stack. - // https://github.com/boltdb/bolt/issues/450 - if c.stack[len(c.stack)-1].count() == 0 { - continue - } - - return c.keyValue() - } -} - -// search recursively performs a binary search against a given page/node until it finds a given key. -func (c *Cursor) search(key []byte, pgid pgid) { - p, n := c.bucket.pageNode(pgid) - if p != nil && (p.flags&(branchPageFlag|leafPageFlag)) == 0 { - panic(fmt.Sprintf("invalid page type: %d: %x", p.id, p.flags)) - } - e := elemRef{page: p, node: n} - c.stack = append(c.stack, e) - - // If we're on a leaf page/node then find the specific node. - if e.isLeaf() { - c.nsearch(key) - return - } - - if n != nil { - c.searchNode(key, n) - return - } - c.searchPage(key, p) -} - -func (c *Cursor) searchNode(key []byte, n *node) { - var exact bool - index := sort.Search(len(n.inodes), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(n.inodes[i].key, key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, n.inodes[index].pgid) -} - -func (c *Cursor) searchPage(key []byte, p *page) { - // Binary search for the correct range. - inodes := p.branchPageElements() - - var exact bool - index := sort.Search(int(p.count), func(i int) bool { - // TODO(benbjohnson): Optimize this range search. It's a bit hacky right now. - // sort.Search() finds the lowest index where f() != -1 but we need the highest index. - ret := bytes.Compare(inodes[i].key(), key) - if ret == 0 { - exact = true - } - return ret != -1 - }) - if !exact && index > 0 { - index-- - } - c.stack[len(c.stack)-1].index = index - - // Recursively search to the next page. - c.search(key, inodes[index].pgid) -} - -// nsearch searches the leaf node on the top of the stack for a key. -func (c *Cursor) nsearch(key []byte) { - e := &c.stack[len(c.stack)-1] - p, n := e.page, e.node - - // If we have a node then search its inodes. - if n != nil { - index := sort.Search(len(n.inodes), func(i int) bool { - return bytes.Compare(n.inodes[i].key, key) != -1 - }) - e.index = index - return - } - - // If we have a page then search its leaf elements. - inodes := p.leafPageElements() - index := sort.Search(int(p.count), func(i int) bool { - return bytes.Compare(inodes[i].key(), key) != -1 - }) - e.index = index -} - -// keyValue returns the key and value of the current leaf element. -func (c *Cursor) keyValue() ([]byte, []byte, uint32) { - ref := &c.stack[len(c.stack)-1] - if ref.count() == 0 || ref.index >= ref.count() { - return nil, nil, 0 - } - - // Retrieve value from node. - if ref.node != nil { - inode := &ref.node.inodes[ref.index] - return inode.key, inode.value, inode.flags - } - - // Or retrieve value from page. - elem := ref.page.leafPageElement(uint16(ref.index)) - return elem.key(), elem.value(), elem.flags -} - -// node returns the node that the cursor is currently positioned on. -func (c *Cursor) node() *node { - _assert(len(c.stack) > 0, "accessing a node with a zero-length cursor stack") - - // If the top of the stack is a leaf node then just return it. - if ref := &c.stack[len(c.stack)-1]; ref.node != nil && ref.isLeaf() { - return ref.node - } - - // Start from root and traverse down the hierarchy. - var n = c.stack[0].node - if n == nil { - n = c.bucket.node(c.stack[0].page.id, nil) - } - for _, ref := range c.stack[:len(c.stack)-1] { - _assert(!n.isLeaf, "expected branch node") - n = n.childAt(int(ref.index)) - } - _assert(n.isLeaf, "expected leaf node") - return n -} - -// elemRef represents a reference to an element on a given page/node. -type elemRef struct { - page *page - node *node - index int -} - -// isLeaf returns whether the ref is pointing at a leaf page/node. -func (r *elemRef) isLeaf() bool { - if r.node != nil { - return r.node.isLeaf - } - return (r.page.flags & leafPageFlag) != 0 -} - -// count returns the number of inodes or page elements. -func (r *elemRef) count() int { - if r.node != nil { - return len(r.node.inodes) - } - return int(r.page.count) -} diff --git a/vendor/src/github.com/boltdb/bolt/db.go b/vendor/src/github.com/boltdb/bolt/db.go deleted file mode 100644 index 1223493ca7..0000000000 --- a/vendor/src/github.com/boltdb/bolt/db.go +++ /dev/null @@ -1,1036 +0,0 @@ -package bolt - -import ( - "errors" - "fmt" - "hash/fnv" - "log" - "os" - "runtime" - "runtime/debug" - "strings" - "sync" - "time" - "unsafe" -) - -// The largest step that can be taken when remapping the mmap. -const maxMmapStep = 1 << 30 // 1GB - -// The data file format version. -const version = 2 - -// Represents a marker value to indicate that a file is a Bolt DB. -const magic uint32 = 0xED0CDAED - -// IgnoreNoSync specifies whether the NoSync field of a DB is ignored when -// syncing changes to a file. This is required as some operating systems, -// such as OpenBSD, do not have a unified buffer cache (UBC) and writes -// must be synchronized using the msync(2) syscall. -const IgnoreNoSync = runtime.GOOS == "openbsd" - -// Default values if not set in a DB instance. -const ( - DefaultMaxBatchSize int = 1000 - DefaultMaxBatchDelay = 10 * time.Millisecond - DefaultAllocSize = 16 * 1024 * 1024 -) - -// default page size for db is set to the OS page size. -var defaultPageSize = os.Getpagesize() - -// DB represents a collection of buckets persisted to a file on disk. -// All data access is performed through transactions which can be obtained through the DB. -// All the functions on DB will return a ErrDatabaseNotOpen if accessed before Open() is called. -type DB struct { - // When enabled, the database will perform a Check() after every commit. - // A panic is issued if the database is in an inconsistent state. This - // flag has a large performance impact so it should only be used for - // debugging purposes. - StrictMode bool - - // Setting the NoSync flag will cause the database to skip fsync() - // calls after each commit. This can be useful when bulk loading data - // into a database and you can restart the bulk load in the event of - // a system failure or database corruption. Do not set this flag for - // normal use. - // - // If the package global IgnoreNoSync constant is true, this value is - // ignored. See the comment on that constant for more details. - // - // THIS IS UNSAFE. PLEASE USE WITH CAUTION. - NoSync bool - - // When true, skips the truncate call when growing the database. - // Setting this to true is only safe on non-ext3/ext4 systems. - // Skipping truncation avoids preallocation of hard drive space and - // bypasses a truncate() and fsync() syscall on remapping. - // - // https://github.com/boltdb/bolt/issues/284 - NoGrowSync bool - - // If you want to read the entire database fast, you can set MmapFlag to - // syscall.MAP_POPULATE on Linux 2.6.23+ for sequential read-ahead. - MmapFlags int - - // MaxBatchSize is the maximum size of a batch. Default value is - // copied from DefaultMaxBatchSize in Open. - // - // If <=0, disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchSize int - - // MaxBatchDelay is the maximum delay before a batch starts. - // Default value is copied from DefaultMaxBatchDelay in Open. - // - // If <=0, effectively disables batching. - // - // Do not change concurrently with calls to Batch. - MaxBatchDelay time.Duration - - // AllocSize is the amount of space allocated when the database - // needs to create new pages. This is done to amortize the cost - // of truncate() and fsync() when growing the data file. - AllocSize int - - path string - file *os.File - lockfile *os.File // windows only - dataref []byte // mmap'ed readonly, write throws SEGV - data *[maxMapSize]byte - datasz int - filesz int // current on disk file size - meta0 *meta - meta1 *meta - pageSize int - opened bool - rwtx *Tx - txs []*Tx - freelist *freelist - stats Stats - - pagePool sync.Pool - - batchMu sync.Mutex - batch *batch - - rwlock sync.Mutex // Allows only one writer at a time. - metalock sync.Mutex // Protects meta page access. - mmaplock sync.RWMutex // Protects mmap access during remapping. - statlock sync.RWMutex // Protects stats access. - - ops struct { - writeAt func(b []byte, off int64) (n int, err error) - } - - // Read only mode. - // When true, Update() and Begin(true) return ErrDatabaseReadOnly immediately. - readOnly bool -} - -// Path returns the path to currently open database file. -func (db *DB) Path() string { - return db.path -} - -// GoString returns the Go string representation of the database. -func (db *DB) GoString() string { - return fmt.Sprintf("bolt.DB{path:%q}", db.path) -} - -// String returns the string representation of the database. -func (db *DB) String() string { - return fmt.Sprintf("DB<%q>", db.path) -} - -// Open creates and opens a database at the given path. -// If the file does not exist then it will be created automatically. -// Passing in nil options will cause Bolt to open the database with the default options. -func Open(path string, mode os.FileMode, options *Options) (*DB, error) { - var db = &DB{opened: true} - - // Set default options if no options are provided. - if options == nil { - options = DefaultOptions - } - db.NoGrowSync = options.NoGrowSync - db.MmapFlags = options.MmapFlags - - // Set default values for later DB operations. - db.MaxBatchSize = DefaultMaxBatchSize - db.MaxBatchDelay = DefaultMaxBatchDelay - db.AllocSize = DefaultAllocSize - - flag := os.O_RDWR - if options.ReadOnly { - flag = os.O_RDONLY - db.readOnly = true - } - - // Open data file and separate sync handler for metadata writes. - db.path = path - var err error - if db.file, err = os.OpenFile(db.path, flag|os.O_CREATE, mode); err != nil { - _ = db.close() - return nil, err - } - - // Lock file so that other processes using Bolt in read-write mode cannot - // use the database at the same time. This would cause corruption since - // the two processes would write meta pages and free pages separately. - // The database file is locked exclusively (only one process can grab the lock) - // if !options.ReadOnly. - // The database file is locked using the shared lock (more than one process may - // hold a lock at the same time) otherwise (options.ReadOnly is set). - if err := flock(db, mode, !db.readOnly, options.Timeout); err != nil { - _ = db.close() - return nil, err - } - - // Default values for test hooks - db.ops.writeAt = db.file.WriteAt - - // Initialize the database if it doesn't exist. - if info, err := db.file.Stat(); err != nil { - return nil, err - } else if info.Size() == 0 { - // Initialize new files with meta pages. - if err := db.init(); err != nil { - return nil, err - } - } else { - // Read the first meta page to determine the page size. - var buf [0x1000]byte - if _, err := db.file.ReadAt(buf[:], 0); err == nil { - m := db.pageInBuffer(buf[:], 0).meta() - if err := m.validate(); err != nil { - // If we can't read the page size, we can assume it's the same - // as the OS -- since that's how the page size was chosen in the - // first place. - // - // If the first page is invalid and this OS uses a different - // page size than what the database was created with then we - // are out of luck and cannot access the database. - db.pageSize = os.Getpagesize() - } else { - db.pageSize = int(m.pageSize) - } - } - } - - // Initialize page pool. - db.pagePool = sync.Pool{ - New: func() interface{} { - return make([]byte, db.pageSize) - }, - } - - // Memory map the data file. - if err := db.mmap(options.InitialMmapSize); err != nil { - _ = db.close() - return nil, err - } - - // Read in the freelist. - db.freelist = newFreelist() - db.freelist.read(db.page(db.meta().freelist)) - - // Mark the database as opened and return. - return db, nil -} - -// mmap opens the underlying memory-mapped file and initializes the meta references. -// minsz is the minimum size that the new mmap can be. -func (db *DB) mmap(minsz int) error { - db.mmaplock.Lock() - defer db.mmaplock.Unlock() - - info, err := db.file.Stat() - if err != nil { - return fmt.Errorf("mmap stat error: %s", err) - } else if int(info.Size()) < db.pageSize*2 { - return fmt.Errorf("file size too small") - } - - // Ensure the size is at least the minimum size. - var size = int(info.Size()) - if size < minsz { - size = minsz - } - size, err = db.mmapSize(size) - if err != nil { - return err - } - - // Dereference all mmap references before unmapping. - if db.rwtx != nil { - db.rwtx.root.dereference() - } - - // Unmap existing data before continuing. - if err := db.munmap(); err != nil { - return err - } - - // Memory-map the data file as a byte slice. - if err := mmap(db, size); err != nil { - return err - } - - // Save references to the meta pages. - db.meta0 = db.page(0).meta() - db.meta1 = db.page(1).meta() - - // Validate the meta pages. We only return an error if both meta pages fail - // validation, since meta0 failing validation means that it wasn't saved - // properly -- but we can recover using meta1. And vice-versa. - err0 := db.meta0.validate() - err1 := db.meta1.validate() - if err0 != nil && err1 != nil { - return err0 - } - - return nil -} - -// munmap unmaps the data file from memory. -func (db *DB) munmap() error { - if err := munmap(db); err != nil { - return fmt.Errorf("unmap error: " + err.Error()) - } - return nil -} - -// mmapSize determines the appropriate size for the mmap given the current size -// of the database. The minimum size is 32KB and doubles until it reaches 1GB. -// Returns an error if the new mmap size is greater than the max allowed. -func (db *DB) mmapSize(size int) (int, error) { - // Double the size from 32KB until 1GB. - for i := uint(15); i <= 30; i++ { - if size <= 1< maxMapSize { - return 0, fmt.Errorf("mmap too large") - } - - // If larger than 1GB then grow by 1GB at a time. - sz := int64(size) - if remainder := sz % int64(maxMmapStep); remainder > 0 { - sz += int64(maxMmapStep) - remainder - } - - // Ensure that the mmap size is a multiple of the page size. - // This should always be true since we're incrementing in MBs. - pageSize := int64(db.pageSize) - if (sz % pageSize) != 0 { - sz = ((sz / pageSize) + 1) * pageSize - } - - // If we've exceeded the max size then only grow up to the max size. - if sz > maxMapSize { - sz = maxMapSize - } - - return int(sz), nil -} - -// init creates a new database file and initializes its meta pages. -func (db *DB) init() error { - // Set the page size to the OS page size. - db.pageSize = os.Getpagesize() - - // Create two meta pages on a buffer. - buf := make([]byte, db.pageSize*4) - for i := 0; i < 2; i++ { - p := db.pageInBuffer(buf[:], pgid(i)) - p.id = pgid(i) - p.flags = metaPageFlag - - // Initialize the meta page. - m := p.meta() - m.magic = magic - m.version = version - m.pageSize = uint32(db.pageSize) - m.freelist = 2 - m.root = bucket{root: 3} - m.pgid = 4 - m.txid = txid(i) - m.checksum = m.sum64() - } - - // Write an empty freelist at page 3. - p := db.pageInBuffer(buf[:], pgid(2)) - p.id = pgid(2) - p.flags = freelistPageFlag - p.count = 0 - - // Write an empty leaf page at page 4. - p = db.pageInBuffer(buf[:], pgid(3)) - p.id = pgid(3) - p.flags = leafPageFlag - p.count = 0 - - // Write the buffer to our data file. - if _, err := db.ops.writeAt(buf, 0); err != nil { - return err - } - if err := fdatasync(db); err != nil { - return err - } - - return nil -} - -// Close releases all database resources. -// All transactions must be closed before closing the database. -func (db *DB) Close() error { - db.rwlock.Lock() - defer db.rwlock.Unlock() - - db.metalock.Lock() - defer db.metalock.Unlock() - - db.mmaplock.RLock() - defer db.mmaplock.RUnlock() - - return db.close() -} - -func (db *DB) close() error { - if !db.opened { - return nil - } - - db.opened = false - - db.freelist = nil - - // Clear ops. - db.ops.writeAt = nil - - // Close the mmap. - if err := db.munmap(); err != nil { - return err - } - - // Close file handles. - if db.file != nil { - // No need to unlock read-only file. - if !db.readOnly { - // Unlock the file. - if err := funlock(db); err != nil { - log.Printf("bolt.Close(): funlock error: %s", err) - } - } - - // Close the file descriptor. - if err := db.file.Close(); err != nil { - return fmt.Errorf("db file close: %s", err) - } - db.file = nil - } - - db.path = "" - return nil -} - -// Begin starts a new transaction. -// Multiple read-only transactions can be used concurrently but only one -// write transaction can be used at a time. Starting multiple write transactions -// will cause the calls to block and be serialized until the current write -// transaction finishes. -// -// Transactions should not be dependent on one another. Opening a read -// transaction and a write transaction in the same goroutine can cause the -// writer to deadlock because the database periodically needs to re-mmap itself -// as it grows and it cannot do that while a read transaction is open. -// -// If a long running read transaction (for example, a snapshot transaction) is -// needed, you might want to set DB.InitialMmapSize to a large enough value -// to avoid potential blocking of write transaction. -// -// IMPORTANT: You must close read-only transactions after you are finished or -// else the database will not reclaim old pages. -func (db *DB) Begin(writable bool) (*Tx, error) { - if writable { - return db.beginRWTx() - } - return db.beginTx() -} - -func (db *DB) beginTx() (*Tx, error) { - // Lock the meta pages while we initialize the transaction. We obtain - // the meta lock before the mmap lock because that's the order that the - // write transaction will obtain them. - db.metalock.Lock() - - // Obtain a read-only lock on the mmap. When the mmap is remapped it will - // obtain a write lock so all transactions must finish before it can be - // remapped. - db.mmaplock.RLock() - - // Exit if the database is not open yet. - if !db.opened { - db.mmaplock.RUnlock() - db.metalock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{} - t.init(db) - - // Keep track of transaction until it closes. - db.txs = append(db.txs, t) - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Update the transaction stats. - db.statlock.Lock() - db.stats.TxN++ - db.stats.OpenTxN = n - db.statlock.Unlock() - - return t, nil -} - -func (db *DB) beginRWTx() (*Tx, error) { - // If the database was opened with Options.ReadOnly, return an error. - if db.readOnly { - return nil, ErrDatabaseReadOnly - } - - // Obtain writer lock. This is released by the transaction when it closes. - // This enforces only one writer transaction at a time. - db.rwlock.Lock() - - // Once we have the writer lock then we can lock the meta pages so that - // we can set up the transaction. - db.metalock.Lock() - defer db.metalock.Unlock() - - // Exit if the database is not open yet. - if !db.opened { - db.rwlock.Unlock() - return nil, ErrDatabaseNotOpen - } - - // Create a transaction associated with the database. - t := &Tx{writable: true} - t.init(db) - db.rwtx = t - - // Free any pages associated with closed read-only transactions. - var minid txid = 0xFFFFFFFFFFFFFFFF - for _, t := range db.txs { - if t.meta.txid < minid { - minid = t.meta.txid - } - } - if minid > 0 { - db.freelist.release(minid - 1) - } - - return t, nil -} - -// removeTx removes a transaction from the database. -func (db *DB) removeTx(tx *Tx) { - // Release the read lock on the mmap. - db.mmaplock.RUnlock() - - // Use the meta lock to restrict access to the DB object. - db.metalock.Lock() - - // Remove the transaction. - for i, t := range db.txs { - if t == tx { - db.txs = append(db.txs[:i], db.txs[i+1:]...) - break - } - } - n := len(db.txs) - - // Unlock the meta pages. - db.metalock.Unlock() - - // Merge statistics. - db.statlock.Lock() - db.stats.OpenTxN = n - db.stats.TxStats.add(&tx.stats) - db.statlock.Unlock() -} - -// Update executes a function within the context of a read-write managed transaction. -// If no error is returned from the function then the transaction is committed. -// If an error is returned then the entire transaction is rolled back. -// Any error that is returned from the function or returned from the commit is -// returned from the Update() method. -// -// Attempting to manually commit or rollback within the function will cause a panic. -func (db *DB) Update(fn func(*Tx) error) error { - t, err := db.Begin(true) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually commit. - t.managed = true - - // If an error is returned from the function then rollback and return error. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - return t.Commit() -} - -// View executes a function within the context of a managed read-only transaction. -// Any error that is returned from the function is returned from the View() method. -// -// Attempting to manually rollback within the function will cause a panic. -func (db *DB) View(fn func(*Tx) error) error { - t, err := db.Begin(false) - if err != nil { - return err - } - - // Make sure the transaction rolls back in the event of a panic. - defer func() { - if t.db != nil { - t.rollback() - } - }() - - // Mark as a managed tx so that the inner function cannot manually rollback. - t.managed = true - - // If an error is returned from the function then pass it through. - err = fn(t) - t.managed = false - if err != nil { - _ = t.Rollback() - return err - } - - if err := t.Rollback(); err != nil { - return err - } - - return nil -} - -// Batch calls fn as part of a batch. It behaves similar to Update, -// except: -// -// 1. concurrent Batch calls can be combined into a single Bolt -// transaction. -// -// 2. the function passed to Batch may be called multiple times, -// regardless of whether it returns error or not. -// -// This means that Batch function side effects must be idempotent and -// take permanent effect only after a successful return is seen in -// caller. -// -// The maximum batch size and delay can be adjusted with DB.MaxBatchSize -// and DB.MaxBatchDelay, respectively. -// -// Batch is only useful when there are multiple goroutines calling it. -func (db *DB) Batch(fn func(*Tx) error) error { - errCh := make(chan error, 1) - - db.batchMu.Lock() - if (db.batch == nil) || (db.batch != nil && len(db.batch.calls) >= db.MaxBatchSize) { - // There is no existing batch, or the existing batch is full; start a new one. - db.batch = &batch{ - db: db, - } - db.batch.timer = time.AfterFunc(db.MaxBatchDelay, db.batch.trigger) - } - db.batch.calls = append(db.batch.calls, call{fn: fn, err: errCh}) - if len(db.batch.calls) >= db.MaxBatchSize { - // wake up batch, it's ready to run - go db.batch.trigger() - } - db.batchMu.Unlock() - - err := <-errCh - if err == trySolo { - err = db.Update(fn) - } - return err -} - -type call struct { - fn func(*Tx) error - err chan<- error -} - -type batch struct { - db *DB - timer *time.Timer - start sync.Once - calls []call -} - -// trigger runs the batch if it hasn't already been run. -func (b *batch) trigger() { - b.start.Do(b.run) -} - -// run performs the transactions in the batch and communicates results -// back to DB.Batch. -func (b *batch) run() { - b.db.batchMu.Lock() - b.timer.Stop() - // Make sure no new work is added to this batch, but don't break - // other batches. - if b.db.batch == b { - b.db.batch = nil - } - b.db.batchMu.Unlock() - -retry: - for len(b.calls) > 0 { - var failIdx = -1 - err := b.db.Update(func(tx *Tx) error { - for i, c := range b.calls { - if err := safelyCall(c.fn, tx); err != nil { - failIdx = i - return err - } - } - return nil - }) - - if failIdx >= 0 { - // take the failing transaction out of the batch. it's - // safe to shorten b.calls here because db.batch no longer - // points to us, and we hold the mutex anyway. - c := b.calls[failIdx] - b.calls[failIdx], b.calls = b.calls[len(b.calls)-1], b.calls[:len(b.calls)-1] - // tell the submitter re-run it solo, continue with the rest of the batch - c.err <- trySolo - continue retry - } - - // pass success, or bolt internal errors, to all callers - for _, c := range b.calls { - if c.err != nil { - c.err <- err - } - } - break retry - } -} - -// trySolo is a special sentinel error value used for signaling that a -// transaction function should be re-run. It should never be seen by -// callers. -var trySolo = errors.New("batch function returned an error and should be re-run solo") - -type panicked struct { - reason interface{} -} - -func (p panicked) Error() string { - if err, ok := p.reason.(error); ok { - return err.Error() - } - return fmt.Sprintf("panic: %v", p.reason) -} - -func safelyCall(fn func(*Tx) error, tx *Tx) (err error) { - defer func() { - if p := recover(); p != nil { - err = panicked{p} - } - }() - return fn(tx) -} - -// Sync executes fdatasync() against the database file handle. -// -// This is not necessary under normal operation, however, if you use NoSync -// then it allows you to force the database file to sync against the disk. -func (db *DB) Sync() error { return fdatasync(db) } - -// Stats retrieves ongoing performance stats for the database. -// This is only updated when a transaction closes. -func (db *DB) Stats() Stats { - db.statlock.RLock() - defer db.statlock.RUnlock() - return db.stats -} - -// This is for internal access to the raw data bytes from the C cursor, use -// carefully, or not at all. -func (db *DB) Info() *Info { - return &Info{uintptr(unsafe.Pointer(&db.data[0])), db.pageSize} -} - -// page retrieves a page reference from the mmap based on the current page size. -func (db *DB) page(id pgid) *page { - pos := id * pgid(db.pageSize) - return (*page)(unsafe.Pointer(&db.data[pos])) -} - -// pageInBuffer retrieves a page reference from a given byte array based on the current page size. -func (db *DB) pageInBuffer(b []byte, id pgid) *page { - return (*page)(unsafe.Pointer(&b[id*pgid(db.pageSize)])) -} - -// meta retrieves the current meta page reference. -func (db *DB) meta() *meta { - // We have to return the meta with the highest txid which doesn't fail - // validation. Otherwise, we can cause errors when in fact the database is - // in a consistent state. metaA is the one with the higher txid. - metaA := db.meta0 - metaB := db.meta1 - if db.meta1.txid > db.meta0.txid { - metaA = db.meta1 - metaB = db.meta0 - } - - // Use higher meta page if valid. Otherwise fallback to previous, if valid. - if err := metaA.validate(); err == nil { - return metaA - } else if err := metaB.validate(); err == nil { - return metaB - } - - // This should never be reached, because both meta1 and meta0 were validated - // on mmap() and we do fsync() on every write. - panic("bolt.DB.meta(): invalid meta pages") -} - -// allocate returns a contiguous block of memory starting at a given page. -func (db *DB) allocate(count int) (*page, error) { - // Allocate a temporary buffer for the page. - var buf []byte - if count == 1 { - buf = db.pagePool.Get().([]byte) - } else { - buf = make([]byte, count*db.pageSize) - } - p := (*page)(unsafe.Pointer(&buf[0])) - p.overflow = uint32(count - 1) - - // Use pages from the freelist if they are available. - if p.id = db.freelist.allocate(count); p.id != 0 { - return p, nil - } - - // Resize mmap() if we're at the end. - p.id = db.rwtx.meta.pgid - var minsz = int((p.id+pgid(count))+1) * db.pageSize - if minsz >= db.datasz { - if err := db.mmap(minsz); err != nil { - return nil, fmt.Errorf("mmap allocate error: %s", err) - } - } - - // Move the page id high water mark. - db.rwtx.meta.pgid += pgid(count) - - return p, nil -} - -// grow grows the size of the database to the given sz. -func (db *DB) grow(sz int) error { - // Ignore if the new size is less than available file size. - if sz <= db.filesz { - return nil - } - - // If the data is smaller than the alloc size then only allocate what's needed. - // Once it goes over the allocation size then allocate in chunks. - if db.datasz < db.AllocSize { - sz = db.datasz - } else { - sz += db.AllocSize - } - - // Truncate and fsync to ensure file size metadata is flushed. - // https://github.com/boltdb/bolt/issues/284 - if !db.NoGrowSync && !db.readOnly { - if runtime.GOOS != "windows" { - if err := db.file.Truncate(int64(sz)); err != nil { - return fmt.Errorf("file resize error: %s", err) - } - } - if err := db.file.Sync(); err != nil { - return fmt.Errorf("file sync error: %s", err) - } - } - - db.filesz = sz - return nil -} - -func (db *DB) IsReadOnly() bool { - return db.readOnly -} - -// Options represents the options that can be set when opening a database. -type Options struct { - // Timeout is the amount of time to wait to obtain a file lock. - // When set to zero it will wait indefinitely. This option is only - // available on Darwin and Linux. - Timeout time.Duration - - // Sets the DB.NoGrowSync flag before memory mapping the file. - NoGrowSync bool - - // Open database in read-only mode. Uses flock(..., LOCK_SH |LOCK_NB) to - // grab a shared lock (UNIX). - ReadOnly bool - - // Sets the DB.MmapFlags flag before memory mapping the file. - MmapFlags int - - // InitialMmapSize is the initial mmap size of the database - // in bytes. Read transactions won't block write transaction - // if the InitialMmapSize is large enough to hold database mmap - // size. (See DB.Begin for more information) - // - // If <=0, the initial map size is 0. - // If initialMmapSize is smaller than the previous database size, - // it takes no effect. - InitialMmapSize int -} - -// DefaultOptions represent the options used if nil options are passed into Open(). -// No timeout is used which will cause Bolt to wait indefinitely for a lock. -var DefaultOptions = &Options{ - Timeout: 0, - NoGrowSync: false, -} - -// Stats represents statistics about the database. -type Stats struct { - // Freelist stats - FreePageN int // total number of free pages on the freelist - PendingPageN int // total number of pending pages on the freelist - FreeAlloc int // total bytes allocated in free pages - FreelistInuse int // total bytes used by the freelist - - // Transaction stats - TxN int // total number of started read transactions - OpenTxN int // number of currently open read transactions - - TxStats TxStats // global, ongoing stats. -} - -// Sub calculates and returns the difference between two sets of database stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *Stats) Sub(other *Stats) Stats { - if other == nil { - return *s - } - var diff Stats - diff.FreePageN = s.FreePageN - diff.PendingPageN = s.PendingPageN - diff.FreeAlloc = s.FreeAlloc - diff.FreelistInuse = s.FreelistInuse - diff.TxN = other.TxN - s.TxN - diff.TxStats = s.TxStats.Sub(&other.TxStats) - return diff -} - -func (s *Stats) add(other *Stats) { - s.TxStats.add(&other.TxStats) -} - -type Info struct { - Data uintptr - PageSize int -} - -type meta struct { - magic uint32 - version uint32 - pageSize uint32 - flags uint32 - root bucket - freelist pgid - pgid pgid - txid txid - checksum uint64 -} - -// validate checks the marker bytes and version of the meta page to ensure it matches this binary. -func (m *meta) validate() error { - if m.magic != magic { - return ErrInvalid - } else if m.version != version { - return ErrVersionMismatch - } else if m.checksum != 0 && m.checksum != m.sum64() { - return ErrChecksum - } - return nil -} - -// copy copies one meta object to another. -func (m *meta) copy(dest *meta) { - *dest = *m -} - -// write writes the meta onto a page. -func (m *meta) write(p *page) { - if m.root.root >= m.pgid { - panic(fmt.Sprintf("root bucket pgid (%d) above high water mark (%d)", m.root.root, m.pgid)) - } else if m.freelist >= m.pgid { - panic(fmt.Sprintf("freelist pgid (%d) above high water mark (%d)", m.freelist, m.pgid)) - } - - // Page id is either going to be 0 or 1 which we can determine by the transaction ID. - p.id = pgid(m.txid % 2) - p.flags |= metaPageFlag - - // Calculate the checksum. - m.checksum = m.sum64() - - m.copy(p.meta()) -} - -// generates the checksum for the meta. -func (m *meta) sum64() uint64 { - var h = fnv.New64a() - _, _ = h.Write((*[unsafe.Offsetof(meta{}.checksum)]byte)(unsafe.Pointer(m))[:]) - return h.Sum64() -} - -// _assert will panic with a given formatted message if the given condition is false. -func _assert(condition bool, msg string, v ...interface{}) { - if !condition { - panic(fmt.Sprintf("assertion failed: "+msg, v...)) - } -} - -func warn(v ...interface{}) { fmt.Fprintln(os.Stderr, v...) } -func warnf(msg string, v ...interface{}) { fmt.Fprintf(os.Stderr, msg+"\n", v...) } - -func printstack() { - stack := strings.Join(strings.Split(string(debug.Stack()), "\n")[2:], "\n") - fmt.Fprintln(os.Stderr, stack) -} diff --git a/vendor/src/github.com/boltdb/bolt/doc.go b/vendor/src/github.com/boltdb/bolt/doc.go deleted file mode 100644 index cc937845db..0000000000 --- a/vendor/src/github.com/boltdb/bolt/doc.go +++ /dev/null @@ -1,44 +0,0 @@ -/* -Package bolt implements a low-level key/value store in pure Go. It supports -fully serializable transactions, ACID semantics, and lock-free MVCC with -multiple readers and a single writer. Bolt can be used for projects that -want a simple data store without the need to add large dependencies such as -Postgres or MySQL. - -Bolt is a single-level, zero-copy, B+tree data store. This means that Bolt is -optimized for fast read access and does not require recovery in the event of a -system crash. Transactions which have not finished committing will simply be -rolled back in the event of a crash. - -The design of Bolt is based on Howard Chu's LMDB database project. - -Bolt currently works on Windows, Mac OS X, and Linux. - - -Basics - -There are only a few types in Bolt: DB, Bucket, Tx, and Cursor. The DB is -a collection of buckets and is represented by a single file on disk. A bucket is -a collection of unique keys that are associated with values. - -Transactions provide either read-only or read-write access to the database. -Read-only transactions can retrieve key/value pairs and can use Cursors to -iterate over the dataset sequentially. Read-write transactions can create and -delete buckets and can insert and remove keys. Only one read-write transaction -is allowed at a time. - - -Caveats - -The database uses a read-only, memory-mapped data file to ensure that -applications cannot corrupt the database, however, this means that keys and -values returned from Bolt cannot be changed. Writing to a read-only byte slice -will cause Go to panic. - -Keys and values retrieved from the database are only valid for the life of -the transaction. When used outside the transaction, these byte slices can -point to different data or can point to invalid memory which will cause a panic. - - -*/ -package bolt diff --git a/vendor/src/github.com/boltdb/bolt/errors.go b/vendor/src/github.com/boltdb/bolt/errors.go deleted file mode 100644 index a3620a3ebb..0000000000 --- a/vendor/src/github.com/boltdb/bolt/errors.go +++ /dev/null @@ -1,71 +0,0 @@ -package bolt - -import "errors" - -// These errors can be returned when opening or calling methods on a DB. -var ( - // ErrDatabaseNotOpen is returned when a DB instance is accessed before it - // is opened or after it is closed. - ErrDatabaseNotOpen = errors.New("database not open") - - // ErrDatabaseOpen is returned when opening a database that is - // already open. - ErrDatabaseOpen = errors.New("database already open") - - // ErrInvalid is returned when both meta pages on a database are invalid. - // This typically occurs when a file is not a bolt database. - ErrInvalid = errors.New("invalid database") - - // ErrVersionMismatch is returned when the data file was created with a - // different version of Bolt. - ErrVersionMismatch = errors.New("version mismatch") - - // ErrChecksum is returned when either meta page checksum does not match. - ErrChecksum = errors.New("checksum error") - - // ErrTimeout is returned when a database cannot obtain an exclusive lock - // on the data file after the timeout passed to Open(). - ErrTimeout = errors.New("timeout") -) - -// These errors can occur when beginning or committing a Tx. -var ( - // ErrTxNotWritable is returned when performing a write operation on a - // read-only transaction. - ErrTxNotWritable = errors.New("tx not writable") - - // ErrTxClosed is returned when committing or rolling back a transaction - // that has already been committed or rolled back. - ErrTxClosed = errors.New("tx closed") - - // ErrDatabaseReadOnly is returned when a mutating transaction is started on a - // read-only database. - ErrDatabaseReadOnly = errors.New("database is in read-only mode") -) - -// These errors can occur when putting or deleting a value or a bucket. -var ( - // ErrBucketNotFound is returned when trying to access a bucket that has - // not been created yet. - ErrBucketNotFound = errors.New("bucket not found") - - // ErrBucketExists is returned when creating a bucket that already exists. - ErrBucketExists = errors.New("bucket already exists") - - // ErrBucketNameRequired is returned when creating a bucket with a blank name. - ErrBucketNameRequired = errors.New("bucket name required") - - // ErrKeyRequired is returned when inserting a zero-length key. - ErrKeyRequired = errors.New("key required") - - // ErrKeyTooLarge is returned when inserting a key that is larger than MaxKeySize. - ErrKeyTooLarge = errors.New("key too large") - - // ErrValueTooLarge is returned when inserting a value that is larger than MaxValueSize. - ErrValueTooLarge = errors.New("value too large") - - // ErrIncompatibleValue is returned when trying create or delete a bucket - // on an existing non-bucket key or when trying to create or delete a - // non-bucket key on an existing bucket key. - ErrIncompatibleValue = errors.New("incompatible value") -) diff --git a/vendor/src/github.com/boltdb/bolt/freelist.go b/vendor/src/github.com/boltdb/bolt/freelist.go deleted file mode 100644 index d32f6cd937..0000000000 --- a/vendor/src/github.com/boltdb/bolt/freelist.go +++ /dev/null @@ -1,248 +0,0 @@ -package bolt - -import ( - "fmt" - "sort" - "unsafe" -) - -// freelist represents a list of all pages that are available for allocation. -// It also tracks pages that have been freed but are still in use by open transactions. -type freelist struct { - ids []pgid // all free and available free page ids. - pending map[txid][]pgid // mapping of soon-to-be free page ids by tx. - cache map[pgid]bool // fast lookup of all free and pending page ids. -} - -// newFreelist returns an empty, initialized freelist. -func newFreelist() *freelist { - return &freelist{ - pending: make(map[txid][]pgid), - cache: make(map[pgid]bool), - } -} - -// size returns the size of the page after serialization. -func (f *freelist) size() int { - return pageHeaderSize + (int(unsafe.Sizeof(pgid(0))) * f.count()) -} - -// count returns count of pages on the freelist -func (f *freelist) count() int { - return f.free_count() + f.pending_count() -} - -// free_count returns count of free pages -func (f *freelist) free_count() int { - return len(f.ids) -} - -// pending_count returns count of pending pages -func (f *freelist) pending_count() int { - var count int - for _, list := range f.pending { - count += len(list) - } - return count -} - -// all returns a list of all free ids and all pending ids in one sorted list. -func (f *freelist) all() []pgid { - m := make(pgids, 0) - - for _, list := range f.pending { - m = append(m, list...) - } - - sort.Sort(m) - return pgids(f.ids).merge(m) -} - -// allocate returns the starting page id of a contiguous list of pages of a given size. -// If a contiguous block cannot be found then 0 is returned. -func (f *freelist) allocate(n int) pgid { - if len(f.ids) == 0 { - return 0 - } - - var initial, previd pgid - for i, id := range f.ids { - if id <= 1 { - panic(fmt.Sprintf("invalid page allocation: %d", id)) - } - - // Reset initial page if this is not contiguous. - if previd == 0 || id-previd != 1 { - initial = id - } - - // If we found a contiguous block then remove it and return it. - if (id-initial)+1 == pgid(n) { - // If we're allocating off the beginning then take the fast path - // and just adjust the existing slice. This will use extra memory - // temporarily but the append() in free() will realloc the slice - // as is necessary. - if (i + 1) == n { - f.ids = f.ids[i+1:] - } else { - copy(f.ids[i-n+1:], f.ids[i+1:]) - f.ids = f.ids[:len(f.ids)-n] - } - - // Remove from the free cache. - for i := pgid(0); i < pgid(n); i++ { - delete(f.cache, initial+i) - } - - return initial - } - - previd = id - } - return 0 -} - -// free releases a page and its overflow for a given transaction id. -// If the page is already free then a panic will occur. -func (f *freelist) free(txid txid, p *page) { - if p.id <= 1 { - panic(fmt.Sprintf("cannot free page 0 or 1: %d", p.id)) - } - - // Free page and all its overflow pages. - var ids = f.pending[txid] - for id := p.id; id <= p.id+pgid(p.overflow); id++ { - // Verify that page is not already free. - if f.cache[id] { - panic(fmt.Sprintf("page %d already freed", id)) - } - - // Add to the freelist and cache. - ids = append(ids, id) - f.cache[id] = true - } - f.pending[txid] = ids -} - -// release moves all page ids for a transaction id (or older) to the freelist. -func (f *freelist) release(txid txid) { - m := make(pgids, 0) - for tid, ids := range f.pending { - if tid <= txid { - // Move transaction's pending pages to the available freelist. - // Don't remove from the cache since the page is still free. - m = append(m, ids...) - delete(f.pending, tid) - } - } - sort.Sort(m) - f.ids = pgids(f.ids).merge(m) -} - -// rollback removes the pages from a given pending tx. -func (f *freelist) rollback(txid txid) { - // Remove page ids from cache. - for _, id := range f.pending[txid] { - delete(f.cache, id) - } - - // Remove pages from pending list. - delete(f.pending, txid) -} - -// freed returns whether a given page is in the free list. -func (f *freelist) freed(pgid pgid) bool { - return f.cache[pgid] -} - -// read initializes the freelist from a freelist page. -func (f *freelist) read(p *page) { - // If the page.count is at the max uint16 value (64k) then it's considered - // an overflow and the size of the freelist is stored as the first element. - idx, count := 0, int(p.count) - if count == 0xFFFF { - idx = 1 - count = int(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0]) - } - - // Copy the list of page ids from the freelist. - if count == 0 { - f.ids = nil - } else { - ids := ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[idx:count] - f.ids = make([]pgid, len(ids)) - copy(f.ids, ids) - - // Make sure they're sorted. - sort.Sort(pgids(f.ids)) - } - - // Rebuild the page cache. - f.reindex() -} - -// write writes the page ids onto a freelist page. All free and pending ids are -// saved to disk since in the event of a program crash, all pending ids will -// become free. -func (f *freelist) write(p *page) error { - // Combine the old free pgids and pgids waiting on an open transaction. - ids := f.all() - - // Update the header flag. - p.flags |= freelistPageFlag - - // The page.count can only hold up to 64k elements so if we overflow that - // number then we handle it by putting the size in the first element. - if len(ids) == 0 { - p.count = uint16(len(ids)) - } else if len(ids) < 0xFFFF { - p.count = uint16(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[:], ids) - } else { - p.count = 0xFFFF - ((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[0] = pgid(len(ids)) - copy(((*[maxAllocSize]pgid)(unsafe.Pointer(&p.ptr)))[1:], ids) - } - - return nil -} - -// reload reads the freelist from a page and filters out pending items. -func (f *freelist) reload(p *page) { - f.read(p) - - // Build a cache of only pending pages. - pcache := make(map[pgid]bool) - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { - pcache[pendingID] = true - } - } - - // Check each page in the freelist and build a new available freelist - // with any pages not in the pending lists. - var a []pgid - for _, id := range f.ids { - if !pcache[id] { - a = append(a, id) - } - } - f.ids = a - - // Once the available list is rebuilt then rebuild the free cache so that - // it includes the available and pending free pages. - f.reindex() -} - -// reindex rebuilds the free cache based on available and pending free lists. -func (f *freelist) reindex() { - f.cache = make(map[pgid]bool, len(f.ids)) - for _, id := range f.ids { - f.cache[id] = true - } - for _, pendingIDs := range f.pending { - for _, pendingID := range pendingIDs { - f.cache[pendingID] = true - } - } -} diff --git a/vendor/src/github.com/boltdb/bolt/node.go b/vendor/src/github.com/boltdb/bolt/node.go deleted file mode 100644 index 159318b229..0000000000 --- a/vendor/src/github.com/boltdb/bolt/node.go +++ /dev/null @@ -1,604 +0,0 @@ -package bolt - -import ( - "bytes" - "fmt" - "sort" - "unsafe" -) - -// node represents an in-memory, deserialized page. -type node struct { - bucket *Bucket - isLeaf bool - unbalanced bool - spilled bool - key []byte - pgid pgid - parent *node - children nodes - inodes inodes -} - -// root returns the top-level node this node is attached to. -func (n *node) root() *node { - if n.parent == nil { - return n - } - return n.parent.root() -} - -// minKeys returns the minimum number of inodes this node should have. -func (n *node) minKeys() int { - if n.isLeaf { - return 1 - } - return 2 -} - -// size returns the size of the node after serialization. -func (n *node) size() int { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) - } - return sz -} - -// sizeLessThan returns true if the node is less than a given size. -// This is an optimization to avoid calculating a large node when we only need -// to know if it fits inside a certain page size. -func (n *node) sizeLessThan(v int) bool { - sz, elsz := pageHeaderSize, n.pageElementSize() - for i := 0; i < len(n.inodes); i++ { - item := &n.inodes[i] - sz += elsz + len(item.key) + len(item.value) - if sz >= v { - return false - } - } - return true -} - -// pageElementSize returns the size of each page element based on the type of node. -func (n *node) pageElementSize() int { - if n.isLeaf { - return leafPageElementSize - } - return branchPageElementSize -} - -// childAt returns the child node at a given index. -func (n *node) childAt(index int) *node { - if n.isLeaf { - panic(fmt.Sprintf("invalid childAt(%d) on a leaf node", index)) - } - return n.bucket.node(n.inodes[index].pgid, n) -} - -// childIndex returns the index of a given child node. -func (n *node) childIndex(child *node) int { - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, child.key) != -1 }) - return index -} - -// numChildren returns the number of children. -func (n *node) numChildren() int { - return len(n.inodes) -} - -// nextSibling returns the next node with the same parent. -func (n *node) nextSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index >= n.parent.numChildren()-1 { - return nil - } - return n.parent.childAt(index + 1) -} - -// prevSibling returns the previous node with the same parent. -func (n *node) prevSibling() *node { - if n.parent == nil { - return nil - } - index := n.parent.childIndex(n) - if index == 0 { - return nil - } - return n.parent.childAt(index - 1) -} - -// put inserts a key/value. -func (n *node) put(oldKey, newKey, value []byte, pgid pgid, flags uint32) { - if pgid >= n.bucket.tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", pgid, n.bucket.tx.meta.pgid)) - } else if len(oldKey) <= 0 { - panic("put: zero-length old key") - } else if len(newKey) <= 0 { - panic("put: zero-length new key") - } - - // Find insertion index. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, oldKey) != -1 }) - - // Add capacity and shift nodes if we don't have an exact match and need to insert. - exact := (len(n.inodes) > 0 && index < len(n.inodes) && bytes.Equal(n.inodes[index].key, oldKey)) - if !exact { - n.inodes = append(n.inodes, inode{}) - copy(n.inodes[index+1:], n.inodes[index:]) - } - - inode := &n.inodes[index] - inode.flags = flags - inode.key = newKey - inode.value = value - inode.pgid = pgid - _assert(len(inode.key) > 0, "put: zero-length inode key") -} - -// del removes a key from the node. -func (n *node) del(key []byte) { - // Find index of key. - index := sort.Search(len(n.inodes), func(i int) bool { return bytes.Compare(n.inodes[i].key, key) != -1 }) - - // Exit if the key isn't found. - if index >= len(n.inodes) || !bytes.Equal(n.inodes[index].key, key) { - return - } - - // Delete inode from the node. - n.inodes = append(n.inodes[:index], n.inodes[index+1:]...) - - // Mark the node as needing rebalancing. - n.unbalanced = true -} - -// read initializes the node from a page. -func (n *node) read(p *page) { - n.pgid = p.id - n.isLeaf = ((p.flags & leafPageFlag) != 0) - n.inodes = make(inodes, int(p.count)) - - for i := 0; i < int(p.count); i++ { - inode := &n.inodes[i] - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - inode.flags = elem.flags - inode.key = elem.key() - inode.value = elem.value() - } else { - elem := p.branchPageElement(uint16(i)) - inode.pgid = elem.pgid - inode.key = elem.key() - } - _assert(len(inode.key) > 0, "read: zero-length inode key") - } - - // Save first key so we can find the node in the parent when we spill. - if len(n.inodes) > 0 { - n.key = n.inodes[0].key - _assert(len(n.key) > 0, "read: zero-length node key") - } else { - n.key = nil - } -} - -// write writes the items onto one or more pages. -func (n *node) write(p *page) { - // Initialize page. - if n.isLeaf { - p.flags |= leafPageFlag - } else { - p.flags |= branchPageFlag - } - - if len(n.inodes) >= 0xFFFF { - panic(fmt.Sprintf("inode overflow: %d (pgid=%d)", len(n.inodes), p.id)) - } - p.count = uint16(len(n.inodes)) - - // Stop here if there are no items to write. - if p.count == 0 { - return - } - - // Loop over each item and write it to the page. - b := (*[maxAllocSize]byte)(unsafe.Pointer(&p.ptr))[n.pageElementSize()*len(n.inodes):] - for i, item := range n.inodes { - _assert(len(item.key) > 0, "write: zero-length inode key") - - // Write the page element. - if n.isLeaf { - elem := p.leafPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.flags = item.flags - elem.ksize = uint32(len(item.key)) - elem.vsize = uint32(len(item.value)) - } else { - elem := p.branchPageElement(uint16(i)) - elem.pos = uint32(uintptr(unsafe.Pointer(&b[0])) - uintptr(unsafe.Pointer(elem))) - elem.ksize = uint32(len(item.key)) - elem.pgid = item.pgid - _assert(elem.pgid != p.id, "write: circular dependency occurred") - } - - // If the length of key+value is larger than the max allocation size - // then we need to reallocate the byte array pointer. - // - // See: https://github.com/boltdb/bolt/pull/335 - klen, vlen := len(item.key), len(item.value) - if len(b) < klen+vlen { - b = (*[maxAllocSize]byte)(unsafe.Pointer(&b[0]))[:] - } - - // Write data for the element to the end of the page. - copy(b[0:], item.key) - b = b[klen:] - copy(b[0:], item.value) - b = b[vlen:] - } - - // DEBUG ONLY: n.dump() -} - -// split breaks up a node into multiple smaller nodes, if appropriate. -// This should only be called from the spill() function. -func (n *node) split(pageSize int) []*node { - var nodes []*node - - node := n - for { - // Split node into two. - a, b := node.splitTwo(pageSize) - nodes = append(nodes, a) - - // If we can't split then exit the loop. - if b == nil { - break - } - - // Set node to b so it gets split on the next iteration. - node = b - } - - return nodes -} - -// splitTwo breaks up a node into two smaller nodes, if appropriate. -// This should only be called from the split() function. -func (n *node) splitTwo(pageSize int) (*node, *node) { - // Ignore the split if the page doesn't have at least enough nodes for - // two pages or if the nodes can fit in a single page. - if len(n.inodes) <= (minKeysPerPage*2) || n.sizeLessThan(pageSize) { - return n, nil - } - - // Determine the threshold before starting a new node. - var fillPercent = n.bucket.FillPercent - if fillPercent < minFillPercent { - fillPercent = minFillPercent - } else if fillPercent > maxFillPercent { - fillPercent = maxFillPercent - } - threshold := int(float64(pageSize) * fillPercent) - - // Determine split position and sizes of the two pages. - splitIndex, _ := n.splitIndex(threshold) - - // Split node into two separate nodes. - // If there's no parent then we'll need to create one. - if n.parent == nil { - n.parent = &node{bucket: n.bucket, children: []*node{n}} - } - - // Create a new node and add it to the parent. - next := &node{bucket: n.bucket, isLeaf: n.isLeaf, parent: n.parent} - n.parent.children = append(n.parent.children, next) - - // Split inodes across two nodes. - next.inodes = n.inodes[splitIndex:] - n.inodes = n.inodes[:splitIndex] - - // Update the statistics. - n.bucket.tx.stats.Split++ - - return n, next -} - -// splitIndex finds the position where a page will fill a given threshold. -// It returns the index as well as the size of the first page. -// This is only be called from split(). -func (n *node) splitIndex(threshold int) (index, sz int) { - sz = pageHeaderSize - - // Loop until we only have the minimum number of keys required for the second page. - for i := 0; i < len(n.inodes)-minKeysPerPage; i++ { - index = i - inode := n.inodes[i] - elsize := n.pageElementSize() + len(inode.key) + len(inode.value) - - // If we have at least the minimum number of keys and adding another - // node would put us over the threshold then exit and return. - if i >= minKeysPerPage && sz+elsize > threshold { - break - } - - // Add the element size to the total size. - sz += elsize - } - - return -} - -// spill writes the nodes to dirty pages and splits nodes as it goes. -// Returns an error if dirty pages cannot be allocated. -func (n *node) spill() error { - var tx = n.bucket.tx - if n.spilled { - return nil - } - - // Spill child nodes first. Child nodes can materialize sibling nodes in - // the case of split-merge so we cannot use a range loop. We have to check - // the children size on every loop iteration. - sort.Sort(n.children) - for i := 0; i < len(n.children); i++ { - if err := n.children[i].spill(); err != nil { - return err - } - } - - // We no longer need the child list because it's only used for spill tracking. - n.children = nil - - // Split nodes into appropriate sizes. The first node will always be n. - var nodes = n.split(tx.db.pageSize) - for _, node := range nodes { - // Add node's page to the freelist if it's not new. - if node.pgid > 0 { - tx.db.freelist.free(tx.meta.txid, tx.page(node.pgid)) - node.pgid = 0 - } - - // Allocate contiguous space for the node. - p, err := tx.allocate((node.size() / tx.db.pageSize) + 1) - if err != nil { - return err - } - - // Write the node. - if p.id >= tx.meta.pgid { - panic(fmt.Sprintf("pgid (%d) above high water mark (%d)", p.id, tx.meta.pgid)) - } - node.pgid = p.id - node.write(p) - node.spilled = true - - // Insert into parent inodes. - if node.parent != nil { - var key = node.key - if key == nil { - key = node.inodes[0].key - } - - node.parent.put(key, node.inodes[0].key, nil, node.pgid, 0) - node.key = node.inodes[0].key - _assert(len(node.key) > 0, "spill: zero-length node key") - } - - // Update the statistics. - tx.stats.Spill++ - } - - // If the root node split and created a new root then we need to spill that - // as well. We'll clear out the children to make sure it doesn't try to respill. - if n.parent != nil && n.parent.pgid == 0 { - n.children = nil - return n.parent.spill() - } - - return nil -} - -// rebalance attempts to combine the node with sibling nodes if the node fill -// size is below a threshold or if there are not enough keys. -func (n *node) rebalance() { - if !n.unbalanced { - return - } - n.unbalanced = false - - // Update statistics. - n.bucket.tx.stats.Rebalance++ - - // Ignore if node is above threshold (25%) and has enough keys. - var threshold = n.bucket.tx.db.pageSize / 4 - if n.size() > threshold && len(n.inodes) > n.minKeys() { - return - } - - // Root node has special handling. - if n.parent == nil { - // If root node is a branch and only has one node then collapse it. - if !n.isLeaf && len(n.inodes) == 1 { - // Move root's child up. - child := n.bucket.node(n.inodes[0].pgid, n) - n.isLeaf = child.isLeaf - n.inodes = child.inodes[:] - n.children = child.children - - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent = n - } - } - - // Remove old child. - child.parent = nil - delete(n.bucket.nodes, child.pgid) - child.free() - } - - return - } - - // If node has no keys then just remove it. - if n.numChildren() == 0 { - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - n.parent.rebalance() - return - } - - _assert(n.parent.numChildren() > 1, "parent must have at least 2 children") - - // Destination node is right sibling if idx == 0, otherwise left sibling. - var target *node - var useNextSibling = (n.parent.childIndex(n) == 0) - if useNextSibling { - target = n.nextSibling() - } else { - target = n.prevSibling() - } - - // If both this node and the target node are too small then merge them. - if useNextSibling { - // Reparent all child nodes being moved. - for _, inode := range target.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = n - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes from target and remove target. - n.inodes = append(n.inodes, target.inodes...) - n.parent.del(target.key) - n.parent.removeChild(target) - delete(n.bucket.nodes, target.pgid) - target.free() - } else { - // Reparent all child nodes being moved. - for _, inode := range n.inodes { - if child, ok := n.bucket.nodes[inode.pgid]; ok { - child.parent.removeChild(child) - child.parent = target - child.parent.children = append(child.parent.children, child) - } - } - - // Copy over inodes to target and remove node. - target.inodes = append(target.inodes, n.inodes...) - n.parent.del(n.key) - n.parent.removeChild(n) - delete(n.bucket.nodes, n.pgid) - n.free() - } - - // Either this node or the target node was deleted from the parent so rebalance it. - n.parent.rebalance() -} - -// removes a node from the list of in-memory children. -// This does not affect the inodes. -func (n *node) removeChild(target *node) { - for i, child := range n.children { - if child == target { - n.children = append(n.children[:i], n.children[i+1:]...) - return - } - } -} - -// dereference causes the node to copy all its inode key/value references to heap memory. -// This is required when the mmap is reallocated so inodes are not pointing to stale data. -func (n *node) dereference() { - if n.key != nil { - key := make([]byte, len(n.key)) - copy(key, n.key) - n.key = key - _assert(n.pgid == 0 || len(n.key) > 0, "dereference: zero-length node key on existing node") - } - - for i := range n.inodes { - inode := &n.inodes[i] - - key := make([]byte, len(inode.key)) - copy(key, inode.key) - inode.key = key - _assert(len(inode.key) > 0, "dereference: zero-length inode key") - - value := make([]byte, len(inode.value)) - copy(value, inode.value) - inode.value = value - } - - // Recursively dereference children. - for _, child := range n.children { - child.dereference() - } - - // Update statistics. - n.bucket.tx.stats.NodeDeref++ -} - -// free adds the node's underlying page to the freelist. -func (n *node) free() { - if n.pgid != 0 { - n.bucket.tx.db.freelist.free(n.bucket.tx.meta.txid, n.bucket.tx.page(n.pgid)) - n.pgid = 0 - } -} - -// dump writes the contents of the node to STDERR for debugging purposes. -/* -func (n *node) dump() { - // Write node header. - var typ = "branch" - if n.isLeaf { - typ = "leaf" - } - warnf("[NODE %d {type=%s count=%d}]", n.pgid, typ, len(n.inodes)) - - // Write out abbreviated version of each item. - for _, item := range n.inodes { - if n.isLeaf { - if item.flags&bucketLeafFlag != 0 { - bucket := (*bucket)(unsafe.Pointer(&item.value[0])) - warnf("+L %08x -> (bucket root=%d)", trunc(item.key, 4), bucket.root) - } else { - warnf("+L %08x -> %08x", trunc(item.key, 4), trunc(item.value, 4)) - } - } else { - warnf("+B %08x -> pgid=%d", trunc(item.key, 4), item.pgid) - } - } - warn("") -} -*/ - -type nodes []*node - -func (s nodes) Len() int { return len(s) } -func (s nodes) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s nodes) Less(i, j int) bool { return bytes.Compare(s[i].inodes[0].key, s[j].inodes[0].key) == -1 } - -// inode represents an internal node inside of a node. -// It can be used to point to elements in a page or point -// to an element which hasn't been added to a page yet. -type inode struct { - flags uint32 - pgid pgid - key []byte - value []byte -} - -type inodes []inode diff --git a/vendor/src/github.com/boltdb/bolt/page.go b/vendor/src/github.com/boltdb/bolt/page.go deleted file mode 100644 index 7651a6bf7d..0000000000 --- a/vendor/src/github.com/boltdb/bolt/page.go +++ /dev/null @@ -1,178 +0,0 @@ -package bolt - -import ( - "fmt" - "os" - "sort" - "unsafe" -) - -const pageHeaderSize = int(unsafe.Offsetof(((*page)(nil)).ptr)) - -const minKeysPerPage = 2 - -const branchPageElementSize = int(unsafe.Sizeof(branchPageElement{})) -const leafPageElementSize = int(unsafe.Sizeof(leafPageElement{})) - -const ( - branchPageFlag = 0x01 - leafPageFlag = 0x02 - metaPageFlag = 0x04 - freelistPageFlag = 0x10 -) - -const ( - bucketLeafFlag = 0x01 -) - -type pgid uint64 - -type page struct { - id pgid - flags uint16 - count uint16 - overflow uint32 - ptr uintptr -} - -// typ returns a human readable page type string used for debugging. -func (p *page) typ() string { - if (p.flags & branchPageFlag) != 0 { - return "branch" - } else if (p.flags & leafPageFlag) != 0 { - return "leaf" - } else if (p.flags & metaPageFlag) != 0 { - return "meta" - } else if (p.flags & freelistPageFlag) != 0 { - return "freelist" - } - return fmt.Sprintf("unknown<%02x>", p.flags) -} - -// meta returns a pointer to the metadata section of the page. -func (p *page) meta() *meta { - return (*meta)(unsafe.Pointer(&p.ptr)) -} - -// leafPageElement retrieves the leaf node by index -func (p *page) leafPageElement(index uint16) *leafPageElement { - n := &((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[index] - return n -} - -// leafPageElements retrieves a list of leaf nodes. -func (p *page) leafPageElements() []leafPageElement { - if p.count == 0 { - return nil - } - return ((*[0x7FFFFFF]leafPageElement)(unsafe.Pointer(&p.ptr)))[:] -} - -// branchPageElement retrieves the branch node by index -func (p *page) branchPageElement(index uint16) *branchPageElement { - return &((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[index] -} - -// branchPageElements retrieves a list of branch nodes. -func (p *page) branchPageElements() []branchPageElement { - if p.count == 0 { - return nil - } - return ((*[0x7FFFFFF]branchPageElement)(unsafe.Pointer(&p.ptr)))[:] -} - -// dump writes n bytes of the page to STDERR as hex output. -func (p *page) hexdump(n int) { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:n] - fmt.Fprintf(os.Stderr, "%x\n", buf) -} - -type pages []*page - -func (s pages) Len() int { return len(s) } -func (s pages) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pages) Less(i, j int) bool { return s[i].id < s[j].id } - -// branchPageElement represents a node on a branch page. -type branchPageElement struct { - pos uint32 - ksize uint32 - pgid pgid -} - -// key returns a byte slice of the node key. -func (n *branchPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize] -} - -// leafPageElement represents a node on a leaf page. -type leafPageElement struct { - flags uint32 - pos uint32 - ksize uint32 - vsize uint32 -} - -// key returns a byte slice of the node key. -func (n *leafPageElement) key() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos]))[:n.ksize:n.ksize] -} - -// value returns a byte slice of the node value. -func (n *leafPageElement) value() []byte { - buf := (*[maxAllocSize]byte)(unsafe.Pointer(n)) - return (*[maxAllocSize]byte)(unsafe.Pointer(&buf[n.pos+n.ksize]))[:n.vsize:n.vsize] -} - -// PageInfo represents human readable information about a page. -type PageInfo struct { - ID int - Type string - Count int - OverflowCount int -} - -type pgids []pgid - -func (s pgids) Len() int { return len(s) } -func (s pgids) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s pgids) Less(i, j int) bool { return s[i] < s[j] } - -// merge returns the sorted union of a and b. -func (a pgids) merge(b pgids) pgids { - // Return the opposite slice if one is nil. - if len(a) == 0 { - return b - } else if len(b) == 0 { - return a - } - - // Create a list to hold all elements from both lists. - merged := make(pgids, 0, len(a)+len(b)) - - // Assign lead to the slice with a lower starting value, follow to the higher value. - lead, follow := a, b - if b[0] < a[0] { - lead, follow = b, a - } - - // Continue while there are elements in the lead. - for len(lead) > 0 { - // Merge largest prefix of lead that is ahead of follow[0]. - n := sort.Search(len(lead), func(i int) bool { return lead[i] > follow[0] }) - merged = append(merged, lead[:n]...) - if n >= len(lead) { - break - } - - // Swap lead and follow. - lead, follow = follow, lead[n:] - } - - // Append what's left in follow. - merged = append(merged, follow...) - - return merged -} diff --git a/vendor/src/github.com/boltdb/bolt/tx.go b/vendor/src/github.com/boltdb/bolt/tx.go deleted file mode 100644 index 1cfb4cde85..0000000000 --- a/vendor/src/github.com/boltdb/bolt/tx.go +++ /dev/null @@ -1,682 +0,0 @@ -package bolt - -import ( - "fmt" - "io" - "os" - "sort" - "strings" - "time" - "unsafe" -) - -// txid represents the internal transaction identifier. -type txid uint64 - -// Tx represents a read-only or read/write transaction on the database. -// Read-only transactions can be used for retrieving values for keys and creating cursors. -// Read/write transactions can create and remove buckets and create and remove keys. -// -// IMPORTANT: You must commit or rollback transactions when you are done with -// them. Pages can not be reclaimed by the writer until no more transactions -// are using them. A long running read transaction can cause the database to -// quickly grow. -type Tx struct { - writable bool - managed bool - db *DB - meta *meta - root Bucket - pages map[pgid]*page - stats TxStats - commitHandlers []func() - - // WriteFlag specifies the flag for write-related methods like WriteTo(). - // Tx opens the database file with the specified flag to copy the data. - // - // By default, the flag is unset, which works well for mostly in-memory - // workloads. For databases that are much larger than available RAM, - // set the flag to syscall.O_DIRECT to avoid trashing the page cache. - WriteFlag int -} - -// init initializes the transaction. -func (tx *Tx) init(db *DB) { - tx.db = db - tx.pages = nil - - // Copy the meta page since it can be changed by the writer. - tx.meta = &meta{} - db.meta().copy(tx.meta) - - // Copy over the root bucket. - tx.root = newBucket(tx) - tx.root.bucket = &bucket{} - *tx.root.bucket = tx.meta.root - - // Increment the transaction id and add a page cache for writable transactions. - if tx.writable { - tx.pages = make(map[pgid]*page) - tx.meta.txid += txid(1) - } -} - -// ID returns the transaction id. -func (tx *Tx) ID() int { - return int(tx.meta.txid) -} - -// DB returns a reference to the database that created the transaction. -func (tx *Tx) DB() *DB { - return tx.db -} - -// Size returns current database size in bytes as seen by this transaction. -func (tx *Tx) Size() int64 { - return int64(tx.meta.pgid) * int64(tx.db.pageSize) -} - -// Writable returns whether the transaction can perform write operations. -func (tx *Tx) Writable() bool { - return tx.writable -} - -// Cursor creates a cursor associated with the root bucket. -// All items in the cursor will return a nil value because all root bucket keys point to buckets. -// The cursor is only valid as long as the transaction is open. -// Do not use a cursor after the transaction is closed. -func (tx *Tx) Cursor() *Cursor { - return tx.root.Cursor() -} - -// Stats retrieves a copy of the current transaction statistics. -func (tx *Tx) Stats() TxStats { - return tx.stats -} - -// Bucket retrieves a bucket by name. -// Returns nil if the bucket does not exist. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) Bucket(name []byte) *Bucket { - return tx.root.Bucket(name) -} - -// CreateBucket creates a new bucket. -// Returns an error if the bucket already exists, if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) CreateBucket(name []byte) (*Bucket, error) { - return tx.root.CreateBucket(name) -} - -// CreateBucketIfNotExists creates a new bucket if it doesn't already exist. -// Returns an error if the bucket name is blank, or if the bucket name is too long. -// The bucket instance is only valid for the lifetime of the transaction. -func (tx *Tx) CreateBucketIfNotExists(name []byte) (*Bucket, error) { - return tx.root.CreateBucketIfNotExists(name) -} - -// DeleteBucket deletes a bucket. -// Returns an error if the bucket cannot be found or if the key represents a non-bucket value. -func (tx *Tx) DeleteBucket(name []byte) error { - return tx.root.DeleteBucket(name) -} - -// ForEach executes a function for each bucket in the root. -// If the provided function returns an error then the iteration is stopped and -// the error is returned to the caller. -func (tx *Tx) ForEach(fn func(name []byte, b *Bucket) error) error { - return tx.root.ForEach(func(k, v []byte) error { - if err := fn(k, tx.root.Bucket(k)); err != nil { - return err - } - return nil - }) -} - -// OnCommit adds a handler function to be executed after the transaction successfully commits. -func (tx *Tx) OnCommit(fn func()) { - tx.commitHandlers = append(tx.commitHandlers, fn) -} - -// Commit writes all changes to disk and updates the meta page. -// Returns an error if a disk write error occurs, or if Commit is -// called on a read-only transaction. -func (tx *Tx) Commit() error { - _assert(!tx.managed, "managed tx commit not allowed") - if tx.db == nil { - return ErrTxClosed - } else if !tx.writable { - return ErrTxNotWritable - } - - // TODO(benbjohnson): Use vectorized I/O to write out dirty pages. - - // Rebalance nodes which have had deletions. - var startTime = time.Now() - tx.root.rebalance() - if tx.stats.Rebalance > 0 { - tx.stats.RebalanceTime += time.Since(startTime) - } - - // spill data onto dirty pages. - startTime = time.Now() - if err := tx.root.spill(); err != nil { - tx.rollback() - return err - } - tx.stats.SpillTime += time.Since(startTime) - - // Free the old root bucket. - tx.meta.root.root = tx.root.root - - opgid := tx.meta.pgid - - // Free the freelist and allocate new pages for it. This will overestimate - // the size of the freelist but not underestimate the size (which would be bad). - tx.db.freelist.free(tx.meta.txid, tx.db.page(tx.meta.freelist)) - p, err := tx.allocate((tx.db.freelist.size() / tx.db.pageSize) + 1) - if err != nil { - tx.rollback() - return err - } - if err := tx.db.freelist.write(p); err != nil { - tx.rollback() - return err - } - tx.meta.freelist = p.id - - // If the high water mark has moved up then attempt to grow the database. - if tx.meta.pgid > opgid { - if err := tx.db.grow(int(tx.meta.pgid+1) * tx.db.pageSize); err != nil { - tx.rollback() - return err - } - } - - // Write dirty pages to disk. - startTime = time.Now() - if err := tx.write(); err != nil { - tx.rollback() - return err - } - - // If strict mode is enabled then perform a consistency check. - // Only the first consistency error is reported in the panic. - if tx.db.StrictMode { - ch := tx.Check() - var errs []string - for { - err, ok := <-ch - if !ok { - break - } - errs = append(errs, err.Error()) - } - if len(errs) > 0 { - panic("check fail: " + strings.Join(errs, "\n")) - } - } - - // Write meta to disk. - if err := tx.writeMeta(); err != nil { - tx.rollback() - return err - } - tx.stats.WriteTime += time.Since(startTime) - - // Finalize the transaction. - tx.close() - - // Execute commit handlers now that the locks have been removed. - for _, fn := range tx.commitHandlers { - fn() - } - - return nil -} - -// Rollback closes the transaction and ignores all previous updates. Read-only -// transactions must be rolled back and not committed. -func (tx *Tx) Rollback() error { - _assert(!tx.managed, "managed tx rollback not allowed") - if tx.db == nil { - return ErrTxClosed - } - tx.rollback() - return nil -} - -func (tx *Tx) rollback() { - if tx.db == nil { - return - } - if tx.writable { - tx.db.freelist.rollback(tx.meta.txid) - tx.db.freelist.reload(tx.db.page(tx.db.meta().freelist)) - } - tx.close() -} - -func (tx *Tx) close() { - if tx.db == nil { - return - } - if tx.writable { - // Grab freelist stats. - var freelistFreeN = tx.db.freelist.free_count() - var freelistPendingN = tx.db.freelist.pending_count() - var freelistAlloc = tx.db.freelist.size() - - // Remove transaction ref & writer lock. - tx.db.rwtx = nil - tx.db.rwlock.Unlock() - - // Merge statistics. - tx.db.statlock.Lock() - tx.db.stats.FreePageN = freelistFreeN - tx.db.stats.PendingPageN = freelistPendingN - tx.db.stats.FreeAlloc = (freelistFreeN + freelistPendingN) * tx.db.pageSize - tx.db.stats.FreelistInuse = freelistAlloc - tx.db.stats.TxStats.add(&tx.stats) - tx.db.statlock.Unlock() - } else { - tx.db.removeTx(tx) - } - - // Clear all references. - tx.db = nil - tx.meta = nil - tx.root = Bucket{tx: tx} - tx.pages = nil -} - -// Copy writes the entire database to a writer. -// This function exists for backwards compatibility. Use WriteTo() instead. -func (tx *Tx) Copy(w io.Writer) error { - _, err := tx.WriteTo(w) - return err -} - -// WriteTo writes the entire database to a writer. -// If err == nil then exactly tx.Size() bytes will be written into the writer. -func (tx *Tx) WriteTo(w io.Writer) (n int64, err error) { - // Attempt to open reader with WriteFlag - f, err := os.OpenFile(tx.db.path, os.O_RDONLY|tx.WriteFlag, 0) - if err != nil { - return 0, err - } - defer func() { _ = f.Close() }() - - // Generate a meta page. We use the same page data for both meta pages. - buf := make([]byte, tx.db.pageSize) - page := (*page)(unsafe.Pointer(&buf[0])) - page.flags = metaPageFlag - *page.meta() = *tx.meta - - // Write meta 0. - page.id = 0 - page.meta().checksum = page.meta().sum64() - nn, err := w.Write(buf) - n += int64(nn) - if err != nil { - return n, fmt.Errorf("meta 0 copy: %s", err) - } - - // Write meta 1 with a lower transaction id. - page.id = 1 - page.meta().txid -= 1 - page.meta().checksum = page.meta().sum64() - nn, err = w.Write(buf) - n += int64(nn) - if err != nil { - return n, fmt.Errorf("meta 1 copy: %s", err) - } - - // Move past the meta pages in the file. - if _, err := f.Seek(int64(tx.db.pageSize*2), os.SEEK_SET); err != nil { - return n, fmt.Errorf("seek: %s", err) - } - - // Copy data pages. - wn, err := io.CopyN(w, f, tx.Size()-int64(tx.db.pageSize*2)) - n += wn - if err != nil { - return n, err - } - - return n, f.Close() -} - -// CopyFile copies the entire database to file at the given path. -// A reader transaction is maintained during the copy so it is safe to continue -// using the database while a copy is in progress. -func (tx *Tx) CopyFile(path string, mode os.FileMode) error { - f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, mode) - if err != nil { - return err - } - - err = tx.Copy(f) - if err != nil { - _ = f.Close() - return err - } - return f.Close() -} - -// Check performs several consistency checks on the database for this transaction. -// An error is returned if any inconsistency is found. -// -// It can be safely run concurrently on a writable transaction. However, this -// incurs a high cost for large databases and databases with a lot of subbuckets -// because of caching. This overhead can be removed if running on a read-only -// transaction, however, it is not safe to execute other writer transactions at -// the same time. -func (tx *Tx) Check() <-chan error { - ch := make(chan error) - go tx.check(ch) - return ch -} - -func (tx *Tx) check(ch chan error) { - // Check if any pages are double freed. - freed := make(map[pgid]bool) - for _, id := range tx.db.freelist.all() { - if freed[id] { - ch <- fmt.Errorf("page %d: already freed", id) - } - freed[id] = true - } - - // Track every reachable page. - reachable := make(map[pgid]*page) - reachable[0] = tx.page(0) // meta0 - reachable[1] = tx.page(1) // meta1 - for i := uint32(0); i <= tx.page(tx.meta.freelist).overflow; i++ { - reachable[tx.meta.freelist+pgid(i)] = tx.page(tx.meta.freelist) - } - - // Recursively check buckets. - tx.checkBucket(&tx.root, reachable, freed, ch) - - // Ensure all pages below high water mark are either reachable or freed. - for i := pgid(0); i < tx.meta.pgid; i++ { - _, isReachable := reachable[i] - if !isReachable && !freed[i] { - ch <- fmt.Errorf("page %d: unreachable unfreed", int(i)) - } - } - - // Close the channel to signal completion. - close(ch) -} - -func (tx *Tx) checkBucket(b *Bucket, reachable map[pgid]*page, freed map[pgid]bool, ch chan error) { - // Ignore inline buckets. - if b.root == 0 { - return - } - - // Check every page used by this bucket. - b.tx.forEachPage(b.root, 0, func(p *page, _ int) { - if p.id > tx.meta.pgid { - ch <- fmt.Errorf("page %d: out of bounds: %d", int(p.id), int(b.tx.meta.pgid)) - } - - // Ensure each page is only referenced once. - for i := pgid(0); i <= pgid(p.overflow); i++ { - var id = p.id + i - if _, ok := reachable[id]; ok { - ch <- fmt.Errorf("page %d: multiple references", int(id)) - } - reachable[id] = p - } - - // We should only encounter un-freed leaf and branch pages. - if freed[p.id] { - ch <- fmt.Errorf("page %d: reachable freed", int(p.id)) - } else if (p.flags&branchPageFlag) == 0 && (p.flags&leafPageFlag) == 0 { - ch <- fmt.Errorf("page %d: invalid type: %s", int(p.id), p.typ()) - } - }) - - // Check each bucket within this bucket. - _ = b.ForEach(func(k, v []byte) error { - if child := b.Bucket(k); child != nil { - tx.checkBucket(child, reachable, freed, ch) - } - return nil - }) -} - -// allocate returns a contiguous block of memory starting at a given page. -func (tx *Tx) allocate(count int) (*page, error) { - p, err := tx.db.allocate(count) - if err != nil { - return nil, err - } - - // Save to our page cache. - tx.pages[p.id] = p - - // Update statistics. - tx.stats.PageCount++ - tx.stats.PageAlloc += count * tx.db.pageSize - - return p, nil -} - -// write writes any dirty pages to disk. -func (tx *Tx) write() error { - // Sort pages by id. - pages := make(pages, 0, len(tx.pages)) - for _, p := range tx.pages { - pages = append(pages, p) - } - // Clear out page cache early. - tx.pages = make(map[pgid]*page) - sort.Sort(pages) - - // Write pages to disk in order. - for _, p := range pages { - size := (int(p.overflow) + 1) * tx.db.pageSize - offset := int64(p.id) * int64(tx.db.pageSize) - - // Write out page in "max allocation" sized chunks. - ptr := (*[maxAllocSize]byte)(unsafe.Pointer(p)) - for { - // Limit our write to our max allocation size. - sz := size - if sz > maxAllocSize-1 { - sz = maxAllocSize - 1 - } - - // Write chunk to disk. - buf := ptr[:sz] - if _, err := tx.db.ops.writeAt(buf, offset); err != nil { - return err - } - - // Update statistics. - tx.stats.Write++ - - // Exit inner for loop if we've written all the chunks. - size -= sz - if size == 0 { - break - } - - // Otherwise move offset forward and move pointer to next chunk. - offset += int64(sz) - ptr = (*[maxAllocSize]byte)(unsafe.Pointer(&ptr[sz])) - } - } - - // Ignore file sync if flag is set on DB. - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Put small pages back to page pool. - for _, p := range pages { - // Ignore page sizes over 1 page. - // These are allocated using make() instead of the page pool. - if int(p.overflow) != 0 { - continue - } - - buf := (*[maxAllocSize]byte)(unsafe.Pointer(p))[:tx.db.pageSize] - - // See https://go.googlesource.com/go/+/f03c9202c43e0abb130669852082117ca50aa9b1 - for i := range buf { - buf[i] = 0 - } - tx.db.pagePool.Put(buf) - } - - return nil -} - -// writeMeta writes the meta to the disk. -func (tx *Tx) writeMeta() error { - // Create a temporary buffer for the meta page. - buf := make([]byte, tx.db.pageSize) - p := tx.db.pageInBuffer(buf, 0) - tx.meta.write(p) - - // Write the meta page to file. - if _, err := tx.db.ops.writeAt(buf, int64(p.id)*int64(tx.db.pageSize)); err != nil { - return err - } - if !tx.db.NoSync || IgnoreNoSync { - if err := fdatasync(tx.db); err != nil { - return err - } - } - - // Update statistics. - tx.stats.Write++ - - return nil -} - -// page returns a reference to the page with a given id. -// If page has been written to then a temporary buffered page is returned. -func (tx *Tx) page(id pgid) *page { - // Check the dirty pages first. - if tx.pages != nil { - if p, ok := tx.pages[id]; ok { - return p - } - } - - // Otherwise return directly from the mmap. - return tx.db.page(id) -} - -// forEachPage iterates over every page within a given page and executes a function. -func (tx *Tx) forEachPage(pgid pgid, depth int, fn func(*page, int)) { - p := tx.page(pgid) - - // Execute function. - fn(p, depth) - - // Recursively loop over children. - if (p.flags & branchPageFlag) != 0 { - for i := 0; i < int(p.count); i++ { - elem := p.branchPageElement(uint16(i)) - tx.forEachPage(elem.pgid, depth+1, fn) - } - } -} - -// Page returns page information for a given page number. -// This is only safe for concurrent use when used by a writable transaction. -func (tx *Tx) Page(id int) (*PageInfo, error) { - if tx.db == nil { - return nil, ErrTxClosed - } else if pgid(id) >= tx.meta.pgid { - return nil, nil - } - - // Build the page info. - p := tx.db.page(pgid(id)) - info := &PageInfo{ - ID: id, - Count: int(p.count), - OverflowCount: int(p.overflow), - } - - // Determine the type (or if it's free). - if tx.db.freelist.freed(pgid(id)) { - info.Type = "free" - } else { - info.Type = p.typ() - } - - return info, nil -} - -// TxStats represents statistics about the actions performed by the transaction. -type TxStats struct { - // Page statistics. - PageCount int // number of page allocations - PageAlloc int // total bytes allocated - - // Cursor statistics. - CursorCount int // number of cursors created - - // Node statistics - NodeCount int // number of node allocations - NodeDeref int // number of node dereferences - - // Rebalance statistics. - Rebalance int // number of node rebalances - RebalanceTime time.Duration // total time spent rebalancing - - // Split/Spill statistics. - Split int // number of nodes split - Spill int // number of nodes spilled - SpillTime time.Duration // total time spent spilling - - // Write statistics. - Write int // number of writes performed - WriteTime time.Duration // total time spent writing to disk -} - -func (s *TxStats) add(other *TxStats) { - s.PageCount += other.PageCount - s.PageAlloc += other.PageAlloc - s.CursorCount += other.CursorCount - s.NodeCount += other.NodeCount - s.NodeDeref += other.NodeDeref - s.Rebalance += other.Rebalance - s.RebalanceTime += other.RebalanceTime - s.Split += other.Split - s.Spill += other.Spill - s.SpillTime += other.SpillTime - s.Write += other.Write - s.WriteTime += other.WriteTime -} - -// Sub calculates and returns the difference between two sets of transaction stats. -// This is useful when obtaining stats at two different points and time and -// you need the performance counters that occurred within that time span. -func (s *TxStats) Sub(other *TxStats) TxStats { - var diff TxStats - diff.PageCount = s.PageCount - other.PageCount - diff.PageAlloc = s.PageAlloc - other.PageAlloc - diff.CursorCount = s.CursorCount - other.CursorCount - diff.NodeCount = s.NodeCount - other.NodeCount - diff.NodeDeref = s.NodeDeref - other.NodeDeref - diff.Rebalance = s.Rebalance - other.Rebalance - diff.RebalanceTime = s.RebalanceTime - other.RebalanceTime - diff.Split = s.Split - other.Split - diff.Spill = s.Spill - other.Spill - diff.SpillTime = s.SpillTime - other.SpillTime - diff.Write = s.Write - other.Write - diff.WriteTime = s.WriteTime - other.WriteTime - return diff -} diff --git a/vendor/src/github.com/cloudflare/cfssl/LICENSE b/vendor/src/github.com/cloudflare/cfssl/LICENSE deleted file mode 100644 index bc5841fa55..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/LICENSE +++ /dev/null @@ -1,24 +0,0 @@ -Copyright (c) 2014 CloudFlare Inc. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions -are met: - -Redistributions of source code must retain the above copyright notice, -this list of conditions and the following disclaimer. - -Redistributions in binary form must reproduce the above copyright notice, -this list of conditions and the following disclaimer in the documentation -and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED -TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF -LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING -NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/cloudflare/cfssl/api/api.go b/vendor/src/github.com/cloudflare/cfssl/api/api.go deleted file mode 100644 index f1040caded..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/api/api.go +++ /dev/null @@ -1,231 +0,0 @@ -// Package api implements an HTTP-based API and server for CFSSL. -package api - -import ( - "encoding/json" - "io/ioutil" - "net/http" - - "github.com/cloudflare/cfssl/errors" - "github.com/cloudflare/cfssl/log" -) - -// Handler is an interface providing a generic mechanism for handling HTTP requests. -type Handler interface { - Handle(w http.ResponseWriter, r *http.Request) error -} - -// HTTPHandler is a wrapper that encapsulates Handler interface as http.Handler. -// HTTPHandler also enforces that the Handler only responds to requests with registered HTTP methods. -type HTTPHandler struct { - Handler // CFSSL handler - Methods []string // The associated HTTP methods -} - -// HandlerFunc is similar to the http.HandlerFunc type; it serves as -// an adapter allowing the use of ordinary functions as Handlers. If -// f is a function with the appropriate signature, HandlerFunc(f) is a -// Handler object that calls f. -type HandlerFunc func(http.ResponseWriter, *http.Request) error - -// Handle calls f(w, r) -func (f HandlerFunc) Handle(w http.ResponseWriter, r *http.Request) error { - w.Header().Set("Content-Type", "application/json") - return f(w, r) -} - -// handleError is the centralised error handling and reporting. -func handleError(w http.ResponseWriter, err error) (code int) { - if err == nil { - return http.StatusOK - } - msg := err.Error() - httpCode := http.StatusInternalServerError - - // If it is recognized as HttpError emitted from cfssl, - // we rewrite the status code accordingly. If it is a - // cfssl error, set the http status to StatusBadRequest - switch err := err.(type) { - case *errors.HTTPError: - httpCode = err.StatusCode - code = err.StatusCode - case *errors.Error: - httpCode = http.StatusBadRequest - code = err.ErrorCode - msg = err.Message - } - - response := NewErrorResponse(msg, code) - jsonMessage, err := json.Marshal(response) - if err != nil { - log.Errorf("Failed to marshal JSON: %v", err) - } else { - msg = string(jsonMessage) - } - http.Error(w, msg, httpCode) - return code -} - -// ServeHTTP encapsulates the call to underlying Handler to handle the request -// and return the response with proper HTTP status code -func (h HTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var err error - var match bool - // Throw 405 when requested with an unsupported verb. - for _, m := range h.Methods { - if m == r.Method { - match = true - } - } - if match { - err = h.Handle(w, r) - } else { - err = errors.NewMethodNotAllowed(r.Method) - } - status := handleError(w, err) - log.Infof("%s - \"%s %s\" %d", r.RemoteAddr, r.Method, r.URL, status) -} - -// readRequestBlob takes a JSON-blob-encoded response body in the form -// map[string]string and returns it, the list of keywords presented, -// and any error that occurred. -func readRequestBlob(r *http.Request) (map[string]string, error) { - var blob map[string]string - - body, err := ioutil.ReadAll(r.Body) - if err != nil { - return nil, err - } - r.Body.Close() - - err = json.Unmarshal(body, &blob) - if err != nil { - return nil, err - } - return blob, nil -} - -// ProcessRequestOneOf reads a JSON blob for the request and makes -// sure it contains one of a set of keywords. For example, a request -// might have the ('foo' && 'bar') keys, OR it might have the 'baz' -// key. In either case, we want to accept the request; however, if -// none of these sets shows up, the request is a bad request, and it -// should be returned. -func ProcessRequestOneOf(r *http.Request, keywordSets [][]string) (map[string]string, []string, error) { - blob, err := readRequestBlob(r) - if err != nil { - return nil, nil, err - } - - var matched []string - for _, set := range keywordSets { - if matchKeywords(blob, set) { - if matched != nil { - return nil, nil, errors.NewBadRequestString("mismatched parameters") - } - matched = set - } - } - if matched == nil { - return nil, nil, errors.NewBadRequestString("no valid parameter sets found") - } - return blob, matched, nil -} - -// ProcessRequestFirstMatchOf reads a JSON blob for the request and returns -// the first match of a set of keywords. For example, a request -// might have one of the following combinations: (foo=1, bar=2), (foo=1), and (bar=2) -// By giving a specific ordering of those combinations, we could decide how to accept -// the request. -func ProcessRequestFirstMatchOf(r *http.Request, keywordSets [][]string) (map[string]string, []string, error) { - blob, err := readRequestBlob(r) - if err != nil { - return nil, nil, err - } - - for _, set := range keywordSets { - if matchKeywords(blob, set) { - return blob, set, nil - } - } - return nil, nil, errors.NewBadRequestString("no valid parameter sets found") -} - -func matchKeywords(blob map[string]string, keywords []string) bool { - for _, keyword := range keywords { - if _, ok := blob[keyword]; !ok { - return false - } - } - return true -} - -// ResponseMessage implements the standard for response errors and -// messages. A message has a code and a string message. -type ResponseMessage struct { - Code int `json:"code"` - Message string `json:"message"` -} - -// Response implements the CloudFlare standard for API -// responses. -type Response struct { - Success bool `json:"success"` - Result interface{} `json:"result"` - Errors []ResponseMessage `json:"errors"` - Messages []ResponseMessage `json:"messages"` -} - -// NewSuccessResponse is a shortcut for creating new successul API -// responses. -func NewSuccessResponse(result interface{}) Response { - return Response{ - Success: true, - Result: result, - Errors: []ResponseMessage{}, - Messages: []ResponseMessage{}, - } -} - -// NewSuccessResponseWithMessage is a shortcut for creating new successul API -// responses that includes a message. -func NewSuccessResponseWithMessage(result interface{}, message string, code int) Response { - return Response{ - Success: true, - Result: result, - Errors: []ResponseMessage{}, - Messages: []ResponseMessage{{code, message}}, - } -} - -// NewErrorResponse is a shortcut for creating an error response for a -// single error. -func NewErrorResponse(message string, code int) Response { - return Response{ - Success: false, - Result: nil, - Errors: []ResponseMessage{{code, message}}, - Messages: []ResponseMessage{}, - } -} - -// SendResponse builds a response from the result, sets the JSON -// header, and writes to the http.ResponseWriter. -func SendResponse(w http.ResponseWriter, result interface{}) error { - response := NewSuccessResponse(result) - w.Header().Set("Content-Type", "application/json") - enc := json.NewEncoder(w) - err := enc.Encode(response) - return err -} - -// SendResponseWithMessage builds a response from the result and the -// provided message, sets the JSON header, and writes to the -// http.ResponseWriter. -func SendResponseWithMessage(w http.ResponseWriter, result interface{}, message string, code int) error { - response := NewSuccessResponseWithMessage(result, message, code) - w.Header().Set("Content-Type", "application/json") - enc := json.NewEncoder(w) - err := enc.Encode(response) - return err -} diff --git a/vendor/src/github.com/cloudflare/cfssl/auth/auth.go b/vendor/src/github.com/cloudflare/cfssl/auth/auth.go deleted file mode 100644 index ecd5e5fefd..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/auth/auth.go +++ /dev/null @@ -1,94 +0,0 @@ -// Package auth implements an interface for providing CFSSL -// authentication. This is meant to authenticate a client CFSSL to a -// remote CFSSL in order to prevent unauthorised use of the signature -// capabilities. This package provides both the interface and a -// standard HMAC-based implementation. -package auth - -import ( - "crypto/hmac" - "crypto/sha256" - "encoding/hex" - "fmt" - "io/ioutil" - "os" - "strings" -) - -// An AuthenticatedRequest contains a request and authentication -// token. The Provider may determine whether to validate the timestamp -// and remote address. -type AuthenticatedRequest struct { - // An Authenticator decides whether to use this field. - Timestamp int64 `json:"timestamp,omitempty"` - RemoteAddress []byte `json:"remote_address,omitempty"` - Token []byte `json:"token"` - Request []byte `json:"request"` -} - -// A Provider can generate tokens from a request and verify a -// request. The handling of additional authentication data (such as -// the IP address) is handled by the concrete type, as is any -// serialisation and state-keeping. -type Provider interface { - Token(req []byte) (token []byte, err error) - Verify(aReq *AuthenticatedRequest) bool -} - -// Standard implements an HMAC-SHA-256 authentication provider. It may -// be supplied additional data at creation time that will be used as -// request || additional-data with the HMAC. -type Standard struct { - key []byte - ad []byte -} - -// New generates a new standard authentication provider from the key -// and additional data. The additional data will be used when -// generating a new token. -func New(key string, ad []byte) (*Standard, error) { - if splitKey := strings.SplitN(key, ":", 2); len(splitKey) == 2 { - switch splitKey[0] { - case "env": - key = os.Getenv(splitKey[1]) - case "file": - data, err := ioutil.ReadFile(splitKey[1]) - if err != nil { - return nil, err - } - key = string(data) - default: - return nil, fmt.Errorf("unknown key prefix: %s", splitKey[0]) - } - } - - keyBytes, err := hex.DecodeString(key) - if err != nil { - return nil, err - } - - return &Standard{keyBytes, ad}, nil -} - -// Token generates a new authentication token from the request. -func (p Standard) Token(req []byte) (token []byte, err error) { - h := hmac.New(sha256.New, p.key) - h.Write(req) - h.Write(p.ad) - return h.Sum(nil), nil -} - -// Verify determines whether an authenticated request is valid. -func (p Standard) Verify(ad *AuthenticatedRequest) bool { - if ad == nil { - return false - } - - // Standard token generation returns no error. - token, _ := p.Token(ad.Request) - if len(ad.Token) != len(token) { - return false - } - - return hmac.Equal(token, ad.Token) -} diff --git a/vendor/src/github.com/cloudflare/cfssl/certdb/README.md b/vendor/src/github.com/cloudflare/cfssl/certdb/README.md deleted file mode 100644 index 31eff57ca3..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/certdb/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# certdb usage - -Using a database enables additional functionality for existing commands when a -db config is provided: - - - `sign` and `gencert` add a certificate to the certdb after signing it - - `serve` enables database functionality for the sign and revoke endpoints - -A database is required for the following: - - - `revoke` marks certificates revoked in the database with an optional reason - - `ocsprefresh` refreshes the table of cached OCSP responses - - `ocspdump` outputs cached OCSP responses in a concatenated base64-encoded format - -## Setup/Migration - -This directory stores [goose](https://bitbucket.org/liamstask/goose/) db migration scripts for various DB backends. -Currently supported: - - MySQL in mysql - - PostgreSQL in pg - - SQLite in sqlite - -### Get goose - - go get bitbucket.org/liamstask/goose/cmd/goose - -### Use goose to start and terminate a MySQL DB -To start a MySQL using goose: - - goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql up - -To tear down a MySQL DB using goose - - goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/mysql down - -Note: the administration of MySQL DB is not included. We assume -the databases being connected to are already created and access control -is properly handled. - -### Use goose to start and terminate a PostgreSQL DB -To start a PostgreSQL using goose: - - goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg up - -To tear down a PostgreSQL DB using goose - - goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/pg down - -Note: the administration of PostgreSQL DB is not included. We assume -the databases being connected to are already created and access control -is properly handled. - -### Use goose to start and terminate a SQLite DB -To start a SQLite DB using goose: - - goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite up - -To tear down a SQLite DB using goose - - goose -path $GOPATH/src/github.com/cloudflare/cfssl/certdb/sqlite down - -## CFSSL Configuration - -Several cfssl commands take a -db-config flag. Create a file with a -JSON dictionary: - - {"driver":"sqlite3","data_source":"certs.db"} - -or - - {"driver":"postgres","data_source":"postgres://user:password@host/db"} diff --git a/vendor/src/github.com/cloudflare/cfssl/certdb/certdb.go b/vendor/src/github.com/cloudflare/cfssl/certdb/certdb.go deleted file mode 100644 index 96694f7685..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/certdb/certdb.go +++ /dev/null @@ -1,40 +0,0 @@ -package certdb - -import ( - "time" -) - -// CertificateRecord encodes a certificate and its metadata -// that will be recorded in a database. -type CertificateRecord struct { - Serial string `db:"serial_number"` - AKI string `db:"authority_key_identifier"` - CALabel string `db:"ca_label"` - Status string `db:"status"` - Reason int `db:"reason"` - Expiry time.Time `db:"expiry"` - RevokedAt time.Time `db:"revoked_at"` - PEM string `db:"pem"` -} - -// OCSPRecord encodes a OCSP response body and its metadata -// that will be recorded in a database. -type OCSPRecord struct { - Serial string `db:"serial_number"` - AKI string `db:"authority_key_identifier"` - Body string `db:"body"` - Expiry time.Time `db:"expiry"` -} - -// Accessor abstracts the CRUD of certdb objects from a DB. -type Accessor interface { - InsertCertificate(cr CertificateRecord) error - GetCertificate(serial, aki string) ([]CertificateRecord, error) - GetUnexpiredCertificates() ([]CertificateRecord, error) - RevokeCertificate(serial, aki string, reasonCode int) error - InsertOCSP(rr OCSPRecord) error - GetOCSP(serial, aki string) ([]OCSPRecord, error) - GetUnexpiredOCSPs() ([]OCSPRecord, error) - UpdateOCSP(serial, aki, body string, expiry time.Time) error - UpsertOCSP(serial, aki, body string, expiry time.Time) error -} diff --git a/vendor/src/github.com/cloudflare/cfssl/config/config.go b/vendor/src/github.com/cloudflare/cfssl/config/config.go deleted file mode 100644 index a6837eb0db..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/config/config.go +++ /dev/null @@ -1,563 +0,0 @@ -// Package config contains the configuration logic for CFSSL. -package config - -import ( - "crypto/x509" - "encoding/asn1" - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "regexp" - "strconv" - "strings" - "time" - - "github.com/cloudflare/cfssl/auth" - cferr "github.com/cloudflare/cfssl/errors" - "github.com/cloudflare/cfssl/helpers" - "github.com/cloudflare/cfssl/log" - ocspConfig "github.com/cloudflare/cfssl/ocsp/config" -) - -// A CSRWhitelist stores booleans for fields in the CSR. If a CSRWhitelist is -// not present in a SigningProfile, all of these fields may be copied from the -// CSR into the signed certificate. If a CSRWhitelist *is* present in a -// SigningProfile, only those fields with a `true` value in the CSRWhitelist may -// be copied from the CSR to the signed certificate. Note that some of these -// fields, like Subject, can be provided or partially provided through the API. -// Since API clients are expected to be trusted, but CSRs are not, fields -// provided through the API are not subject to whitelisting through this -// mechanism. -type CSRWhitelist struct { - Subject, PublicKeyAlgorithm, PublicKey, SignatureAlgorithm bool - DNSNames, IPAddresses, EmailAddresses bool -} - -// OID is our own version of asn1's ObjectIdentifier, so we can define a custom -// JSON marshal / unmarshal. -type OID asn1.ObjectIdentifier - -// CertificatePolicy represents the ASN.1 PolicyInformation structure from -// https://tools.ietf.org/html/rfc3280.html#page-106. -// Valid values of Type are "id-qt-unotice" and "id-qt-cps" -type CertificatePolicy struct { - ID OID - Qualifiers []CertificatePolicyQualifier -} - -// CertificatePolicyQualifier represents a single qualifier from an ASN.1 -// PolicyInformation structure. -type CertificatePolicyQualifier struct { - Type string - Value string -} - -// AuthRemote is an authenticated remote signer. -type AuthRemote struct { - RemoteName string `json:"remote"` - AuthKeyName string `json:"auth_key"` -} - -// A SigningProfile stores information that the CA needs to store -// signature policy. -type SigningProfile struct { - Usage []string `json:"usages"` - IssuerURL []string `json:"issuer_urls"` - OCSP string `json:"ocsp_url"` - CRL string `json:"crl_url"` - CA bool `json:"is_ca"` - OCSPNoCheck bool `json:"ocsp_no_check"` - ExpiryString string `json:"expiry"` - BackdateString string `json:"backdate"` - AuthKeyName string `json:"auth_key"` - RemoteName string `json:"remote"` - NotBefore time.Time `json:"not_before"` - NotAfter time.Time `json:"not_after"` - NameWhitelistString string `json:"name_whitelist"` - AuthRemote AuthRemote `json:"auth_remote"` - CTLogServers []string `json:"ct_log_servers"` - AllowedExtensions []OID `json:"allowed_extensions"` - CertStore string `json:"cert_store"` - - Policies []CertificatePolicy - Expiry time.Duration - Backdate time.Duration - Provider auth.Provider - RemoteProvider auth.Provider - RemoteServer string - CSRWhitelist *CSRWhitelist - NameWhitelist *regexp.Regexp - ExtensionWhitelist map[string]bool - ClientProvidesSerialNumbers bool -} - -// UnmarshalJSON unmarshals a JSON string into an OID. -func (oid *OID) UnmarshalJSON(data []byte) (err error) { - if data[0] != '"' || data[len(data)-1] != '"' { - return errors.New("OID JSON string not wrapped in quotes." + string(data)) - } - data = data[1 : len(data)-1] - parsedOid, err := parseObjectIdentifier(string(data)) - if err != nil { - return err - } - *oid = OID(parsedOid) - return -} - -// MarshalJSON marshals an oid into a JSON string. -func (oid OID) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf(`"%v"`, asn1.ObjectIdentifier(oid))), nil -} - -func parseObjectIdentifier(oidString string) (oid asn1.ObjectIdentifier, err error) { - validOID, err := regexp.MatchString("\\d(\\.\\d+)*", oidString) - if err != nil { - return - } - if !validOID { - err = errors.New("Invalid OID") - return - } - - segments := strings.Split(oidString, ".") - oid = make(asn1.ObjectIdentifier, len(segments)) - for i, intString := range segments { - oid[i], err = strconv.Atoi(intString) - if err != nil { - return - } - } - return -} - -const timeFormat = "2006-01-02T15:04:05" - -// populate is used to fill in the fields that are not in JSON -// -// First, the ExpiryString parameter is needed to parse -// expiration timestamps from JSON. The JSON decoder is not able to -// decode a string time duration to a time.Duration, so this is called -// when loading the configuration to properly parse and fill out the -// Expiry parameter. -// This function is also used to create references to the auth key -// and default remote for the profile. -// It returns true if ExpiryString is a valid representation of a -// time.Duration, and the AuthKeyString and RemoteName point to -// valid objects. It returns false otherwise. -func (p *SigningProfile) populate(cfg *Config) error { - if p == nil { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("can't parse nil profile")) - } - - var err error - if p.RemoteName == "" && p.AuthRemote.RemoteName == "" { - log.Debugf("parse expiry in profile") - if p.ExpiryString == "" { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("empty expiry string")) - } - - dur, err := time.ParseDuration(p.ExpiryString) - if err != nil { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err) - } - - log.Debugf("expiry is valid") - p.Expiry = dur - - if p.BackdateString != "" { - dur, err = time.ParseDuration(p.BackdateString) - if err != nil { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err) - } - - p.Backdate = dur - } - - if !p.NotBefore.IsZero() && !p.NotAfter.IsZero() && p.NotAfter.Before(p.NotBefore) { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err) - } - - if len(p.Policies) > 0 { - for _, policy := range p.Policies { - for _, qualifier := range policy.Qualifiers { - if qualifier.Type != "" && qualifier.Type != "id-qt-unotice" && qualifier.Type != "id-qt-cps" { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("invalid policy qualifier type")) - } - } - } - } - } else if p.RemoteName != "" { - log.Debug("match remote in profile to remotes section") - if p.AuthRemote.RemoteName != "" { - log.Error("profile has both a remote and an auth remote specified") - return cferr.New(cferr.PolicyError, cferr.InvalidPolicy) - } - if remote := cfg.Remotes[p.RemoteName]; remote != "" { - if err := p.updateRemote(remote); err != nil { - return err - } - } else { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("failed to find remote in remotes section")) - } - } else { - log.Debug("match auth remote in profile to remotes section") - if remote := cfg.Remotes[p.AuthRemote.RemoteName]; remote != "" { - if err := p.updateRemote(remote); err != nil { - return err - } - } else { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("failed to find remote in remotes section")) - } - } - - if p.AuthKeyName != "" { - log.Debug("match auth key in profile to auth_keys section") - if key, ok := cfg.AuthKeys[p.AuthKeyName]; ok == true { - if key.Type == "standard" { - p.Provider, err = auth.New(key.Key, nil) - if err != nil { - log.Debugf("failed to create new standard auth provider: %v", err) - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("failed to create new standard auth provider")) - } - } else { - log.Debugf("unknown authentication type %v", key.Type) - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("unknown authentication type")) - } - } else { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("failed to find auth_key in auth_keys section")) - } - } - - if p.AuthRemote.AuthKeyName != "" { - log.Debug("match auth remote key in profile to auth_keys section") - if key, ok := cfg.AuthKeys[p.AuthRemote.AuthKeyName]; ok == true { - if key.Type == "standard" { - p.RemoteProvider, err = auth.New(key.Key, nil) - if err != nil { - log.Debugf("failed to create new standard auth provider: %v", err) - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("failed to create new standard auth provider")) - } - } else { - log.Debugf("unknown authentication type %v", key.Type) - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("unknown authentication type")) - } - } else { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("failed to find auth_remote's auth_key in auth_keys section")) - } - } - - if p.NameWhitelistString != "" { - log.Debug("compiling whitelist regular expression") - rule, err := regexp.Compile(p.NameWhitelistString) - if err != nil { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("failed to compile name whitelist section")) - } - p.NameWhitelist = rule - } - - p.ExtensionWhitelist = map[string]bool{} - for _, oid := range p.AllowedExtensions { - p.ExtensionWhitelist[asn1.ObjectIdentifier(oid).String()] = true - } - - return nil -} - -// updateRemote takes a signing profile and initializes the remote server object -// to the hostname:port combination sent by remote. -func (p *SigningProfile) updateRemote(remote string) error { - if remote != "" { - p.RemoteServer = remote - } - return nil -} - -// OverrideRemotes takes a signing configuration and updates the remote server object -// to the hostname:port combination sent by remote -func (p *Signing) OverrideRemotes(remote string) error { - if remote != "" { - var err error - for _, profile := range p.Profiles { - err = profile.updateRemote(remote) - if err != nil { - return err - } - } - err = p.Default.updateRemote(remote) - if err != nil { - return err - } - } - return nil -} - -// NeedsRemoteSigner returns true if one of the profiles has a remote set -func (p *Signing) NeedsRemoteSigner() bool { - for _, profile := range p.Profiles { - if profile.RemoteServer != "" { - return true - } - } - - if p.Default.RemoteServer != "" { - return true - } - - return false -} - -// NeedsLocalSigner returns true if one of the profiles doe not have a remote set -func (p *Signing) NeedsLocalSigner() bool { - for _, profile := range p.Profiles { - if profile.RemoteServer == "" { - return true - } - } - - if p.Default.RemoteServer == "" { - return true - } - - return false -} - -// Usages parses the list of key uses in the profile, translating them -// to a list of X.509 key usages and extended key usages. The unknown -// uses are collected into a slice that is also returned. -func (p *SigningProfile) Usages() (ku x509.KeyUsage, eku []x509.ExtKeyUsage, unk []string) { - for _, keyUse := range p.Usage { - if kuse, ok := KeyUsage[keyUse]; ok { - ku |= kuse - } else if ekuse, ok := ExtKeyUsage[keyUse]; ok { - eku = append(eku, ekuse) - } else { - unk = append(unk, keyUse) - } - } - return -} - -// A valid profile must be a valid local profile or a valid remote profile. -// A valid local profile has defined at least key usages to be used, and a -// valid local default profile has defined at least a default expiration. -// A valid remote profile (default or not) has remote signer initialized. -// In addition, a remote profile must has a valid auth provider if auth -// key defined. -func (p *SigningProfile) validProfile(isDefault bool) bool { - if p == nil { - return false - } - - if p.RemoteName != "" { - log.Debugf("validate remote profile") - - if p.RemoteServer == "" { - log.Debugf("invalid remote profile: no remote signer specified") - return false - } - - if p.AuthKeyName != "" && p.Provider == nil { - log.Debugf("invalid remote profile: auth key name is defined but no auth provider is set") - return false - } - - if p.AuthRemote.RemoteName != "" { - log.Debugf("invalid remote profile: auth remote is also specified") - } - } else if p.AuthRemote.RemoteName != "" { - log.Debugf("validate auth remote profile") - if p.RemoteServer == "" { - log.Debugf("invalid auth remote profile: no remote signer specified") - return false - } - - if p.AuthRemote.AuthKeyName == "" || p.RemoteProvider == nil { - log.Debugf("invalid auth remote profile: no auth key is defined") - return false - } - } else { - log.Debugf("validate local profile") - if !isDefault { - if len(p.Usage) == 0 { - log.Debugf("invalid local profile: no usages specified") - return false - } else if _, _, unk := p.Usages(); len(unk) == len(p.Usage) { - log.Debugf("invalid local profile: no valid usages") - return false - } - } else { - if p.Expiry == 0 { - log.Debugf("invalid local profile: no expiry set") - return false - } - } - } - - log.Debugf("profile is valid") - return true -} - -// Signing codifies the signature configuration policy for a CA. -type Signing struct { - Profiles map[string]*SigningProfile `json:"profiles"` - Default *SigningProfile `json:"default"` -} - -// Config stores configuration information for the CA. -type Config struct { - Signing *Signing `json:"signing"` - OCSP *ocspConfig.Config `json:"ocsp"` - AuthKeys map[string]AuthKey `json:"auth_keys,omitempty"` - Remotes map[string]string `json:"remotes,omitempty"` -} - -// Valid ensures that Config is a valid configuration. It should be -// called immediately after parsing a configuration file. -func (c *Config) Valid() bool { - return c.Signing.Valid() -} - -// Valid checks the signature policies, ensuring they are valid -// policies. A policy is valid if it has defined at least key usages -// to be used, and a valid default profile has defined at least a -// default expiration. -func (p *Signing) Valid() bool { - if p == nil { - return false - } - - log.Debugf("validating configuration") - if !p.Default.validProfile(true) { - log.Debugf("default profile is invalid") - return false - } - - for _, sp := range p.Profiles { - if !sp.validProfile(false) { - log.Debugf("invalid profile") - return false - } - } - return true -} - -// KeyUsage contains a mapping of string names to key usages. -var KeyUsage = map[string]x509.KeyUsage{ - "signing": x509.KeyUsageDigitalSignature, - "digital signature": x509.KeyUsageDigitalSignature, - "content committment": x509.KeyUsageContentCommitment, - "key encipherment": x509.KeyUsageKeyEncipherment, - "key agreement": x509.KeyUsageKeyAgreement, - "data encipherment": x509.KeyUsageDataEncipherment, - "cert sign": x509.KeyUsageCertSign, - "crl sign": x509.KeyUsageCRLSign, - "encipher only": x509.KeyUsageEncipherOnly, - "decipher only": x509.KeyUsageDecipherOnly, -} - -// ExtKeyUsage contains a mapping of string names to extended key -// usages. -var ExtKeyUsage = map[string]x509.ExtKeyUsage{ - "any": x509.ExtKeyUsageAny, - "server auth": x509.ExtKeyUsageServerAuth, - "client auth": x509.ExtKeyUsageClientAuth, - "code signing": x509.ExtKeyUsageCodeSigning, - "email protection": x509.ExtKeyUsageEmailProtection, - "s/mime": x509.ExtKeyUsageEmailProtection, - "ipsec end system": x509.ExtKeyUsageIPSECEndSystem, - "ipsec tunnel": x509.ExtKeyUsageIPSECTunnel, - "ipsec user": x509.ExtKeyUsageIPSECUser, - "timestamping": x509.ExtKeyUsageTimeStamping, - "ocsp signing": x509.ExtKeyUsageOCSPSigning, - "microsoft sgc": x509.ExtKeyUsageMicrosoftServerGatedCrypto, - "netscape sgc": x509.ExtKeyUsageNetscapeServerGatedCrypto, -} - -// An AuthKey contains an entry for a key used for authentication. -type AuthKey struct { - // Type contains information needed to select the appropriate - // constructor. For example, "standard" for HMAC-SHA-256, - // "standard-ip" for HMAC-SHA-256 incorporating the client's - // IP. - Type string `json:"type"` - // Key contains the key information, such as a hex-encoded - // HMAC key. - Key string `json:"key"` -} - -// DefaultConfig returns a default configuration specifying basic key -// usage and a 1 year expiration time. The key usages chosen are -// signing, key encipherment, client auth and server auth. -func DefaultConfig() *SigningProfile { - d := helpers.OneYear - return &SigningProfile{ - Usage: []string{"signing", "key encipherment", "server auth", "client auth"}, - Expiry: d, - ExpiryString: "8760h", - } -} - -// LoadFile attempts to load the configuration file stored at the path -// and returns the configuration. On error, it returns nil. -func LoadFile(path string) (*Config, error) { - log.Debugf("loading configuration file from %s", path) - if path == "" { - return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid path")) - } - - body, err := ioutil.ReadFile(path) - if err != nil { - return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("could not read configuration file")) - } - - return LoadConfig(body) -} - -// LoadConfig attempts to load the configuration from a byte slice. -// On error, it returns nil. -func LoadConfig(config []byte) (*Config, error) { - var cfg = &Config{} - err := json.Unmarshal(config, &cfg) - if err != nil { - return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, - errors.New("failed to unmarshal configuration: "+err.Error())) - } - - if cfg.Signing == nil { - return nil, errors.New("No \"signing\" field present") - } - - if cfg.Signing.Default == nil { - log.Debugf("no default given: using default config") - cfg.Signing.Default = DefaultConfig() - } else { - if err := cfg.Signing.Default.populate(cfg); err != nil { - return nil, err - } - } - - for k := range cfg.Signing.Profiles { - if err := cfg.Signing.Profiles[k].populate(cfg); err != nil { - return nil, err - } - } - - if !cfg.Valid() { - return nil, cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, errors.New("invalid configuration")) - } - - log.Debugf("configuration ok") - return cfg, nil -} diff --git a/vendor/src/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go b/vendor/src/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go deleted file mode 100644 index 8db547fce5..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/crypto/pkcs7/pkcs7.go +++ /dev/null @@ -1,188 +0,0 @@ -// Package pkcs7 implements the subset of the CMS PKCS #7 datatype that is typically -// used to package certificates and CRLs. Using openssl, every certificate converted -// to PKCS #7 format from another encoding such as PEM conforms to this implementation. -// reference: https://www.openssl.org/docs/apps/crl2pkcs7.html -// -// PKCS #7 Data type, reference: https://tools.ietf.org/html/rfc2315 -// -// The full pkcs#7 cryptographic message syntax allows for cryptographic enhancements, -// for example data can be encrypted and signed and then packaged through pkcs#7 to be -// sent over a network and then verified and decrypted. It is asn1, and the type of -// PKCS #7 ContentInfo, which comprises the PKCS #7 structure, is: -// -// ContentInfo ::= SEQUENCE { -// contentType ContentType, -// content [0] EXPLICIT ANY DEFINED BY contentType OPTIONAL -// } -// -// There are 6 possible ContentTypes, data, signedData, envelopedData, -// signedAndEnvelopedData, digestedData, and encryptedData. Here signedData, Data, and encrypted -// Data are implemented, as the degenerate case of signedData without a signature is the typical -// format for transferring certificates and CRLS, and Data and encryptedData are used in PKCS #12 -// formats. -// The ContentType signedData has the form: -// -// -// signedData ::= SEQUENCE { -// version Version, -// digestAlgorithms DigestAlgorithmIdentifiers, -// contentInfo ContentInfo, -// certificates [0] IMPLICIT ExtendedCertificatesAndCertificates OPTIONAL -// crls [1] IMPLICIT CertificateRevocationLists OPTIONAL, -// signerInfos SignerInfos -// } -// -// As of yet signerInfos and digestAlgorithms are not parsed, as they are not relevant to -// this system's use of PKCS #7 data. Version is an integer type, note that PKCS #7 is -// recursive, this second layer of ContentInfo is similar ignored for our degenerate -// usage. The ExtendedCertificatesAndCertificates type consists of a sequence of choices -// between PKCS #6 extended certificates and x509 certificates. Any sequence consisting -// of any number of extended certificates is not yet supported in this implementation. -// -// The ContentType Data is simply a raw octet string and is parsed directly into a Go []byte slice. -// -// The ContentType encryptedData is the most complicated and its form can be gathered by -// the go type below. It essentially contains a raw octet string of encrypted data and an -// algorithm identifier for use in decrypting this data. -package pkcs7 - -import ( - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - - cferr "github.com/cloudflare/cfssl/errors" -) - -// Types used for asn1 Unmarshaling. - -type signedData struct { - Version int - DigestAlgorithms asn1.RawValue - ContentInfo asn1.RawValue - Certificates asn1.RawValue `asn1:"optional" asn1:"tag:0"` - Crls asn1.RawValue `asn1:"optional"` - SignerInfos asn1.RawValue -} - -type initPKCS7 struct { - Raw asn1.RawContent - ContentType asn1.ObjectIdentifier - Content asn1.RawValue `asn1:"tag:0,explicit,optional"` -} - -// Object identifier strings of the three implemented PKCS7 types. -const ( - ObjIDData = "1.2.840.113549.1.7.1" - ObjIDSignedData = "1.2.840.113549.1.7.2" - ObjIDEncryptedData = "1.2.840.113549.1.7.6" -) - -// PKCS7 represents the ASN1 PKCS #7 Content type. It contains one of three -// possible types of Content objects, as denoted by the object identifier in -// the ContentInfo field, the other two being nil. SignedData -// is the degenerate SignedData Content info without signature used -// to hold certificates and crls. Data is raw bytes, and EncryptedData -// is as defined in PKCS #7 standard. -type PKCS7 struct { - Raw asn1.RawContent - ContentInfo string - Content Content -} - -// Content implements three of the six possible PKCS7 data types. Only one is non-nil. -type Content struct { - Data []byte - SignedData SignedData - EncryptedData EncryptedData -} - -// SignedData defines the typical carrier of certificates and crls. -type SignedData struct { - Raw asn1.RawContent - Version int - Certificates []*x509.Certificate - Crl *pkix.CertificateList -} - -// Data contains raw bytes. Used as a subtype in PKCS12. -type Data struct { - Bytes []byte -} - -// EncryptedData contains encrypted data. Used as a subtype in PKCS12. -type EncryptedData struct { - Raw asn1.RawContent - Version int - EncryptedContentInfo EncryptedContentInfo -} - -// EncryptedContentInfo is a subtype of PKCS7EncryptedData. -type EncryptedContentInfo struct { - Raw asn1.RawContent - ContentType asn1.ObjectIdentifier - ContentEncryptionAlgorithm pkix.AlgorithmIdentifier - EncryptedContent []byte `asn1:"tag:0,optional"` -} - -// ParsePKCS7 attempts to parse the DER encoded bytes of a -// PKCS7 structure. -func ParsePKCS7(raw []byte) (msg *PKCS7, err error) { - - var pkcs7 initPKCS7 - _, err = asn1.Unmarshal(raw, &pkcs7) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) - } - - msg = new(PKCS7) - msg.Raw = pkcs7.Raw - msg.ContentInfo = pkcs7.ContentType.String() - switch { - case msg.ContentInfo == ObjIDData: - msg.ContentInfo = "Data" - _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &msg.Content.Data) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) - } - case msg.ContentInfo == ObjIDSignedData: - msg.ContentInfo = "SignedData" - var signedData signedData - _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &signedData) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) - } - if len(signedData.Certificates.Bytes) != 0 { - msg.Content.SignedData.Certificates, err = x509.ParseCertificates(signedData.Certificates.Bytes) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) - } - } - if len(signedData.Crls.Bytes) != 0 { - msg.Content.SignedData.Crl, err = x509.ParseDERCRL(signedData.Crls.Bytes) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) - } - } - msg.Content.SignedData.Version = signedData.Version - msg.Content.SignedData.Raw = pkcs7.Content.Bytes - case msg.ContentInfo == ObjIDEncryptedData: - msg.ContentInfo = "EncryptedData" - var encryptedData EncryptedData - _, err = asn1.Unmarshal(pkcs7.Content.Bytes, &encryptedData) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) - } - if encryptedData.Version != 0 { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Only support for PKCS #7 encryptedData version 0")) - } - msg.Content.EncryptedData = encryptedData - - default: - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("Attempt to parse PKCS# 7 Content not of type data, signed data or encrypted data")) - } - - return msg, nil - -} diff --git a/vendor/src/github.com/cloudflare/cfssl/csr/csr.go b/vendor/src/github.com/cloudflare/cfssl/csr/csr.go deleted file mode 100644 index 4329b79564..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/csr/csr.go +++ /dev/null @@ -1,431 +0,0 @@ -// Package csr implements certificate requests for CFSSL. -package csr - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/pem" - "errors" - "net" - "net/mail" - "strings" - - cferr "github.com/cloudflare/cfssl/errors" - "github.com/cloudflare/cfssl/helpers" - "github.com/cloudflare/cfssl/log" -) - -const ( - curveP256 = 256 - curveP384 = 384 - curveP521 = 521 -) - -// A Name contains the SubjectInfo fields. -type Name struct { - C string // Country - ST string // State - L string // Locality - O string // OrganisationName - OU string // OrganisationalUnitName - SerialNumber string -} - -// A KeyRequest is a generic request for a new key. -type KeyRequest interface { - Algo() string - Size() int - Generate() (crypto.PrivateKey, error) - SigAlgo() x509.SignatureAlgorithm -} - -// A BasicKeyRequest contains the algorithm and key size for a new private key. -type BasicKeyRequest struct { - A string `json:"algo"` - S int `json:"size"` -} - -// NewBasicKeyRequest returns a default BasicKeyRequest. -func NewBasicKeyRequest() *BasicKeyRequest { - return &BasicKeyRequest{"ecdsa", curveP256} -} - -// Algo returns the requested key algorithm represented as a string. -func (kr *BasicKeyRequest) Algo() string { - return kr.A -} - -// Size returns the requested key size. -func (kr *BasicKeyRequest) Size() int { - return kr.S -} - -// Generate generates a key as specified in the request. Currently, -// only ECDSA and RSA are supported. -func (kr *BasicKeyRequest) Generate() (crypto.PrivateKey, error) { - log.Debugf("generate key from request: algo=%s, size=%d", kr.Algo(), kr.Size()) - switch kr.Algo() { - case "rsa": - if kr.Size() < 2048 { - return nil, errors.New("RSA key is too weak") - } - if kr.Size() > 8192 { - return nil, errors.New("RSA key size too large") - } - return rsa.GenerateKey(rand.Reader, kr.Size()) - case "ecdsa": - var curve elliptic.Curve - switch kr.Size() { - case curveP256: - curve = elliptic.P256() - case curveP384: - curve = elliptic.P384() - case curveP521: - curve = elliptic.P521() - default: - return nil, errors.New("invalid curve") - } - return ecdsa.GenerateKey(curve, rand.Reader) - default: - return nil, errors.New("invalid algorithm") - } -} - -// SigAlgo returns an appropriate X.509 signature algorithm given the -// key request's type and size. -func (kr *BasicKeyRequest) SigAlgo() x509.SignatureAlgorithm { - switch kr.Algo() { - case "rsa": - switch { - case kr.Size() >= 4096: - return x509.SHA512WithRSA - case kr.Size() >= 3072: - return x509.SHA384WithRSA - case kr.Size() >= 2048: - return x509.SHA256WithRSA - default: - return x509.SHA1WithRSA - } - case "ecdsa": - switch kr.Size() { - case curveP521: - return x509.ECDSAWithSHA512 - case curveP384: - return x509.ECDSAWithSHA384 - case curveP256: - return x509.ECDSAWithSHA256 - default: - return x509.ECDSAWithSHA1 - } - default: - return x509.UnknownSignatureAlgorithm - } -} - -// CAConfig is a section used in the requests initialising a new CA. -type CAConfig struct { - PathLength int `json:"pathlen"` - PathLenZero bool `json:"pathlenzero"` - Expiry string `json:"expiry"` -} - -// A CertificateRequest encapsulates the API interface to the -// certificate request functionality. -type CertificateRequest struct { - CN string - Names []Name `json:"names"` - Hosts []string `json:"hosts"` - KeyRequest KeyRequest `json:"key,omitempty"` - CA *CAConfig `json:"ca,omitempty"` - SerialNumber string `json:"serialnumber,omitempty"` -} - -// New returns a new, empty CertificateRequest with a -// BasicKeyRequest. -func New() *CertificateRequest { - return &CertificateRequest{ - KeyRequest: NewBasicKeyRequest(), - } -} - -// appendIf appends to a if s is not an empty string. -func appendIf(s string, a *[]string) { - if s != "" { - *a = append(*a, s) - } -} - -// Name returns the PKIX name for the request. -func (cr *CertificateRequest) Name() pkix.Name { - var name pkix.Name - name.CommonName = cr.CN - - for _, n := range cr.Names { - appendIf(n.C, &name.Country) - appendIf(n.ST, &name.Province) - appendIf(n.L, &name.Locality) - appendIf(n.O, &name.Organization) - appendIf(n.OU, &name.OrganizationalUnit) - } - name.SerialNumber = cr.SerialNumber - return name -} - -// BasicConstraints CSR information RFC 5280, 4.2.1.9 -type BasicConstraints struct { - IsCA bool `asn1:"optional"` - MaxPathLen int `asn1:"optional,default:-1"` -} - -// ParseRequest takes a certificate request and generates a key and -// CSR from it. It does no validation -- caveat emptor. It will, -// however, fail if the key request is not valid (i.e., an unsupported -// curve or RSA key size). The lack of validation was specifically -// chosen to allow the end user to define a policy and validate the -// request appropriately before calling this function. -func ParseRequest(req *CertificateRequest) (csr, key []byte, err error) { - log.Info("received CSR") - if req.KeyRequest == nil { - req.KeyRequest = NewBasicKeyRequest() - } - - log.Infof("generating key: %s-%d", req.KeyRequest.Algo(), req.KeyRequest.Size()) - priv, err := req.KeyRequest.Generate() - if err != nil { - err = cferr.Wrap(cferr.PrivateKeyError, cferr.GenerationFailed, err) - return - } - - switch priv := priv.(type) { - case *rsa.PrivateKey: - key = x509.MarshalPKCS1PrivateKey(priv) - block := pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: key, - } - key = pem.EncodeToMemory(&block) - case *ecdsa.PrivateKey: - key, err = x509.MarshalECPrivateKey(priv) - if err != nil { - err = cferr.Wrap(cferr.PrivateKeyError, cferr.Unknown, err) - return - } - block := pem.Block{ - Type: "EC PRIVATE KEY", - Bytes: key, - } - key = pem.EncodeToMemory(&block) - default: - panic("Generate should have failed to produce a valid key.") - } - - csr, err = Generate(priv.(crypto.Signer), req) - if err != nil { - log.Errorf("failed to generate a CSR: %v", err) - err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err) - } - return -} - -// ExtractCertificateRequest extracts a CertificateRequest from -// x509.Certificate. It is aimed to used for generating a new certificate -// from an existing certificate. For a root certificate, the CA expiry -// length is calculated as the duration between cert.NotAfter and cert.NotBefore. -func ExtractCertificateRequest(cert *x509.Certificate) *CertificateRequest { - req := New() - req.CN = cert.Subject.CommonName - req.Names = getNames(cert.Subject) - req.Hosts = getHosts(cert) - req.SerialNumber = cert.Subject.SerialNumber - - if cert.IsCA { - req.CA = new(CAConfig) - // CA expiry length is calculated based on the input cert - // issue date and expiry date. - req.CA.Expiry = cert.NotAfter.Sub(cert.NotBefore).String() - req.CA.PathLength = cert.MaxPathLen - req.CA.PathLenZero = cert.MaxPathLenZero - } - - return req -} - -func getHosts(cert *x509.Certificate) []string { - var hosts []string - for _, ip := range cert.IPAddresses { - hosts = append(hosts, ip.String()) - } - for _, dns := range cert.DNSNames { - hosts = append(hosts, dns) - } - for _, email := range cert.EmailAddresses { - hosts = append(hosts, email) - } - - return hosts -} - -// getNames returns an array of Names from the certificate -// It onnly cares about Country, Organization, OrganizationalUnit, Locality, Province -func getNames(sub pkix.Name) []Name { - // anonymous func for finding the max of a list of interger - max := func(v1 int, vn ...int) (max int) { - max = v1 - for i := 0; i < len(vn); i++ { - if vn[i] > max { - max = vn[i] - } - } - return max - } - - nc := len(sub.Country) - norg := len(sub.Organization) - nou := len(sub.OrganizationalUnit) - nl := len(sub.Locality) - np := len(sub.Province) - - n := max(nc, norg, nou, nl, np) - - names := make([]Name, n) - for i := range names { - if i < nc { - names[i].C = sub.Country[i] - } - if i < norg { - names[i].O = sub.Organization[i] - } - if i < nou { - names[i].OU = sub.OrganizationalUnit[i] - } - if i < nl { - names[i].L = sub.Locality[i] - } - if i < np { - names[i].ST = sub.Province[i] - } - } - return names -} - -// A Generator is responsible for validating certificate requests. -type Generator struct { - Validator func(*CertificateRequest) error -} - -// ProcessRequest validates and processes the incoming request. It is -// a wrapper around a validator and the ParseRequest function. -func (g *Generator) ProcessRequest(req *CertificateRequest) (csr, key []byte, err error) { - - log.Info("generate received request") - err = g.Validator(req) - if err != nil { - log.Warningf("invalid request: %v", err) - return - } - - csr, key, err = ParseRequest(req) - if err != nil { - return nil, nil, err - } - return -} - -// IsNameEmpty returns true if the name has no identifying information in it. -func IsNameEmpty(n Name) bool { - empty := func(s string) bool { return strings.TrimSpace(s) == "" } - - if empty(n.C) && empty(n.ST) && empty(n.L) && empty(n.O) && empty(n.OU) { - return true - } - return false -} - -// Regenerate uses the provided CSR as a template for signing a new -// CSR using priv. -func Regenerate(priv crypto.Signer, csr []byte) ([]byte, error) { - req, extra, err := helpers.ParseCSR(csr) - if err != nil { - return nil, err - } else if len(extra) > 0 { - return nil, errors.New("csr: trailing data in certificate request") - } - - return x509.CreateCertificateRequest(rand.Reader, req, priv) -} - -// Generate creates a new CSR from a CertificateRequest structure and -// an existing key. The KeyRequest field is ignored. -func Generate(priv crypto.Signer, req *CertificateRequest) (csr []byte, err error) { - sigAlgo := helpers.SignerAlgo(priv) - if sigAlgo == x509.UnknownSignatureAlgorithm { - return nil, cferr.New(cferr.PrivateKeyError, cferr.Unavailable) - } - - var tpl = x509.CertificateRequest{ - Subject: req.Name(), - SignatureAlgorithm: sigAlgo, - } - - for i := range req.Hosts { - if ip := net.ParseIP(req.Hosts[i]); ip != nil { - tpl.IPAddresses = append(tpl.IPAddresses, ip) - } else if email, err := mail.ParseAddress(req.Hosts[i]); err == nil && email != nil { - tpl.EmailAddresses = append(tpl.EmailAddresses, email.Address) - } else { - tpl.DNSNames = append(tpl.DNSNames, req.Hosts[i]) - } - } - - if req.CA != nil { - err = appendCAInfoToCSR(req.CA, &tpl) - if err != nil { - err = cferr.Wrap(cferr.CSRError, cferr.GenerationFailed, err) - return - } - } - - csr, err = x509.CreateCertificateRequest(rand.Reader, &tpl, priv) - if err != nil { - log.Errorf("failed to generate a CSR: %v", err) - err = cferr.Wrap(cferr.CSRError, cferr.BadRequest, err) - return - } - block := pem.Block{ - Type: "CERTIFICATE REQUEST", - Bytes: csr, - } - - log.Info("encoded CSR") - csr = pem.EncodeToMemory(&block) - return -} - -// appendCAInfoToCSR appends CAConfig BasicConstraint extension to a CSR -func appendCAInfoToCSR(reqConf *CAConfig, csr *x509.CertificateRequest) error { - pathlen := reqConf.PathLength - if pathlen == 0 && !reqConf.PathLenZero { - pathlen = -1 - } - val, err := asn1.Marshal(BasicConstraints{true, pathlen}) - - if err != nil { - return err - } - - csr.ExtraExtensions = []pkix.Extension{ - { - Id: asn1.ObjectIdentifier{2, 5, 29, 19}, - Value: val, - Critical: true, - }, - } - - return nil -} diff --git a/vendor/src/github.com/cloudflare/cfssl/errors/doc.go b/vendor/src/github.com/cloudflare/cfssl/errors/doc.go deleted file mode 100644 index 1910e2662f..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/errors/doc.go +++ /dev/null @@ -1,46 +0,0 @@ -/* -Package errors provides error types returned in CF SSL. - -1. Type Error is intended for errors produced by CF SSL packages. -It formats to a json object that consists of an error message and a 4-digit code for error reasoning. - -Example: {"code":1002, "message": "Failed to decode certificate"} - -The index of codes are listed below: - 1XXX: CertificateError - 1000: Unknown - 1001: ReadFailed - 1002: DecodeFailed - 1003: ParseFailed - 1100: SelfSigned - 12XX: VerifyFailed - 121X: CertificateInvalid - 1210: NotAuthorizedToSign - 1211: Expired - 1212: CANotAuthorizedForThisName - 1213: TooManyIntermediates - 1214: IncompatibleUsage - 1220: UnknownAuthority - 2XXX: PrivatekeyError - 2000: Unknown - 2001: ReadFailed - 2002: DecodeFailed - 2003: ParseFailed - 2100: Encrypted - 2200: NotRSA - 2300: KeyMismatch - 2400: GenerationFailed - 2500: Unavailable - 3XXX: IntermediatesError - 4XXX: RootError - 5XXX: PolicyError - 5100: NoKeyUsages - 5200: InvalidPolicy - 5300: InvalidRequest - 5400: UnknownProfile - 6XXX: DialError - -2. Type HttpError is intended for CF SSL API to consume. It contains a HTTP status code that will be read and returned -by the API server. -*/ -package errors diff --git a/vendor/src/github.com/cloudflare/cfssl/errors/error.go b/vendor/src/github.com/cloudflare/cfssl/errors/error.go deleted file mode 100644 index 88663b2c67..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/errors/error.go +++ /dev/null @@ -1,420 +0,0 @@ -package errors - -import ( - "crypto/x509" - "encoding/json" - "fmt" -) - -// Error is the error type usually returned by functions in CF SSL package. -// It contains a 4-digit error code where the most significant digit -// describes the category where the error occurred and the rest 3 digits -// describe the specific error reason. -type Error struct { - ErrorCode int `json:"code"` - Message string `json:"message"` -} - -// Category is the most significant digit of the error code. -type Category int - -// Reason is the last 3 digits of the error code. -type Reason int - -const ( - // Success indicates no error occurred. - Success Category = 1000 * iota // 0XXX - - // CertificateError indicates a fault in a certificate. - CertificateError // 1XXX - - // PrivateKeyError indicates a fault in a private key. - PrivateKeyError // 2XXX - - // IntermediatesError indicates a fault in an intermediate. - IntermediatesError // 3XXX - - // RootError indicates a fault in a root. - RootError // 4XXX - - // PolicyError indicates an error arising from a malformed or - // non-existent policy, or a breach of policy. - PolicyError // 5XXX - - // DialError indicates a network fault. - DialError // 6XXX - - // APIClientError indicates a problem with the API client. - APIClientError // 7XXX - - // OCSPError indicates a problem with OCSP signing - OCSPError // 8XXX - - // CSRError indicates a problem with CSR parsing - CSRError // 9XXX - - // CTError indicates a problem with the certificate transparency process - CTError // 10XXX - - // CertStoreError indicates a problem with the certificate store - CertStoreError // 11XXX -) - -// None is a non-specified error. -const ( - None Reason = iota -) - -// Warning code for a success -const ( - BundleExpiringBit int = 1 << iota // 0x01 - BundleNotUbiquitousBit // 0x02 -) - -// Parsing errors -const ( - Unknown Reason = iota // X000 - ReadFailed // X001 - DecodeFailed // X002 - ParseFailed // X003 -) - -// The following represent certificate non-parsing errors, and must be -// specified along with CertificateError. -const ( - // SelfSigned indicates that a certificate is self-signed and - // cannot be used in the manner being attempted. - SelfSigned Reason = 100 * (iota + 1) // Code 11XX - - // VerifyFailed is an X.509 verification failure. The least two - // significant digits of 12XX is determined as the actual x509 - // error is examined. - VerifyFailed // Code 12XX - - // BadRequest indicates that the certificate request is invalid. - BadRequest // Code 13XX - - // MissingSerial indicates that the profile specified - // 'ClientProvidesSerialNumbers', but the SignRequest did not include a serial - // number. - MissingSerial // Code 14XX -) - -const ( - certificateInvalid = 10 * (iota + 1) //121X - unknownAuthority //122x -) - -// The following represent private-key non-parsing errors, and must be -// specified with PrivateKeyError. -const ( - // Encrypted indicates that the private key is a PKCS #8 encrypted - // private key. At this time, CFSSL does not support decrypting - // these keys. - Encrypted Reason = 100 * (iota + 1) //21XX - - // NotRSAOrECC indicates that they key is not an RSA or ECC - // private key; these are the only two private key types supported - // at this time by CFSSL. - NotRSAOrECC //22XX - - // KeyMismatch indicates that the private key does not match - // the public key or certificate being presented with the key. - KeyMismatch //23XX - - // GenerationFailed indicates that a private key could not - // be generated. - GenerationFailed //24XX - - // Unavailable indicates that a private key mechanism (such as - // PKCS #11) was requested but support for that mechanism is - // not available. - Unavailable -) - -// The following are policy-related non-parsing errors, and must be -// specified along with PolicyError. -const ( - // NoKeyUsages indicates that the profile does not permit any - // key usages for the certificate. - NoKeyUsages Reason = 100 * (iota + 1) // 51XX - - // InvalidPolicy indicates that policy being requested is not - // a valid policy or does not exist. - InvalidPolicy // 52XX - - // InvalidRequest indicates a certificate request violated the - // constraints of the policy being applied to the request. - InvalidRequest // 53XX - - // UnknownProfile indicates that the profile does not exist. - UnknownProfile // 54XX -) - -// The following are API client related errors, and should be -// specified with APIClientError. -const ( - // AuthenticationFailure occurs when the client is unable - // to obtain an authentication token for the request. - AuthenticationFailure Reason = 100 * (iota + 1) - - // JSONError wraps an encoding/json error. - JSONError - - // IOError wraps an io/ioutil error. - IOError - - // ClientHTTPError wraps a net/http error. - ClientHTTPError - - // ServerRequestFailed covers any other failures from the API - // client. - ServerRequestFailed -) - -// The following are OCSP related errors, and should be -// specified with OCSPError -const ( - // IssuerMismatch ocurs when the certificate in the OCSP signing - // request was not issued by the CA that this responder responds for. - IssuerMismatch Reason = 100 * (iota + 1) // 81XX - - // InvalidStatus occurs when the OCSP signing requests includes an - // invalid value for the certificate status. - InvalidStatus -) - -// Certificate transparency related errors specified with CTError -const ( - // PrecertSubmissionFailed occurs when submitting a precertificate to - // a log server fails - PrecertSubmissionFailed = 100 * (iota + 1) -) - -// Certificate persistence related errors specified with CertStoreError -const ( - // InsertionFailed occurs when a SQL insert query failes to complete. - InsertionFailed = 100 * (iota + 1) - // RecordNotFound occurs when a SQL query targeting on one unique - // record failes to update the specified row in the table. - RecordNotFound -) - -// The error interface implementation, which formats to a JSON object string. -func (e *Error) Error() string { - marshaled, err := json.Marshal(e) - if err != nil { - panic(err) - } - return string(marshaled) - -} - -// New returns an error that contains an error code and message derived from -// the given category, reason. Currently, to avoid confusion, it is not -// allowed to create an error of category Success -func New(category Category, reason Reason) *Error { - errorCode := int(category) + int(reason) - var msg string - switch category { - case OCSPError: - switch reason { - case ReadFailed: - msg = "No certificate provided" - case IssuerMismatch: - msg = "Certificate not issued by this issuer" - case InvalidStatus: - msg = "Invalid revocation status" - } - case CertificateError: - switch reason { - case Unknown: - msg = "Unknown certificate error" - case ReadFailed: - msg = "Failed to read certificate" - case DecodeFailed: - msg = "Failed to decode certificate" - case ParseFailed: - msg = "Failed to parse certificate" - case SelfSigned: - msg = "Certificate is self signed" - case VerifyFailed: - msg = "Unable to verify certificate" - case BadRequest: - msg = "Invalid certificate request" - case MissingSerial: - msg = "Missing serial number in request" - default: - panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category CertificateError.", - reason)) - - } - case PrivateKeyError: - switch reason { - case Unknown: - msg = "Unknown private key error" - case ReadFailed: - msg = "Failed to read private key" - case DecodeFailed: - msg = "Failed to decode private key" - case ParseFailed: - msg = "Failed to parse private key" - case Encrypted: - msg = "Private key is encrypted." - case NotRSAOrECC: - msg = "Private key algorithm is not RSA or ECC" - case KeyMismatch: - msg = "Private key does not match public key" - case GenerationFailed: - msg = "Failed to new private key" - case Unavailable: - msg = "Private key is unavailable" - default: - panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PrivateKeyError.", - reason)) - } - case IntermediatesError: - switch reason { - case Unknown: - msg = "Unknown intermediate certificate error" - case ReadFailed: - msg = "Failed to read intermediate certificate" - case DecodeFailed: - msg = "Failed to decode intermediate certificate" - case ParseFailed: - msg = "Failed to parse intermediate certificate" - default: - panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category IntermediatesError.", - reason)) - } - case RootError: - switch reason { - case Unknown: - msg = "Unknown root certificate error" - case ReadFailed: - msg = "Failed to read root certificate" - case DecodeFailed: - msg = "Failed to decode root certificate" - case ParseFailed: - msg = "Failed to parse root certificate" - default: - panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category RootError.", - reason)) - } - case PolicyError: - switch reason { - case Unknown: - msg = "Unknown policy error" - case NoKeyUsages: - msg = "Invalid policy: no key usage available" - case InvalidPolicy: - msg = "Invalid or unknown policy" - case InvalidRequest: - msg = "Policy violation request" - case UnknownProfile: - msg = "Unknown policy profile" - default: - panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category PolicyError.", - reason)) - } - case DialError: - switch reason { - case Unknown: - msg = "Failed to dial remote server" - default: - panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category DialError.", - reason)) - } - case APIClientError: - switch reason { - case AuthenticationFailure: - msg = "API client authentication failure" - case JSONError: - msg = "API client JSON config error" - case ClientHTTPError: - msg = "API client HTTP error" - case IOError: - msg = "API client IO error" - case ServerRequestFailed: - msg = "API client error: Server request failed" - default: - panic(fmt.Sprintf("Unsupported CFSSL error reason %d under category APIClientError.", - reason)) - } - case CSRError: - switch reason { - case Unknown: - msg = "CSR parsing failed due to unknown error" - case ReadFailed: - msg = "CSR file read failed" - case ParseFailed: - msg = "CSR Parsing failed" - case DecodeFailed: - msg = "CSR Decode failed" - case BadRequest: - msg = "CSR Bad request" - default: - panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category APIClientError.", reason)) - } - case CTError: - switch reason { - case Unknown: - msg = "Certificate transparency parsing failed due to unknown error" - case PrecertSubmissionFailed: - msg = "Certificate transparency precertificate submission failed" - default: - panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CTError.", reason)) - } - case CertStoreError: - switch reason { - case Unknown: - msg = "Certificate store action failed due to unknown error" - default: - panic(fmt.Sprintf("Unsupported CF-SSL error reason %d under category CertStoreError.", reason)) - } - - default: - panic(fmt.Sprintf("Unsupported CFSSL error type: %d.", - category)) - } - return &Error{ErrorCode: errorCode, Message: msg} -} - -// Wrap returns an error that contains the given error and an error code derived from -// the given category, reason and the error. Currently, to avoid confusion, it is not -// allowed to create an error of category Success -func Wrap(category Category, reason Reason, err error) *Error { - errorCode := int(category) + int(reason) - if err == nil { - panic("Wrap needs a supplied error to initialize.") - } - - // do not double wrap a error - switch err.(type) { - case *Error: - panic("Unable to wrap a wrapped error.") - } - - switch category { - case CertificateError: - // given VerifyFailed , report the status with more detailed status code - // for some certificate errors we care. - if reason == VerifyFailed { - switch errorType := err.(type) { - case x509.CertificateInvalidError: - errorCode += certificateInvalid + int(errorType.Reason) - case x509.UnknownAuthorityError: - errorCode += unknownAuthority - } - } - case PrivateKeyError, IntermediatesError, RootError, PolicyError, DialError, - APIClientError, CSRError, CTError, CertStoreError: - // no-op, just use the error - default: - panic(fmt.Sprintf("Unsupported CFSSL error type: %d.", - category)) - } - - return &Error{ErrorCode: errorCode, Message: err.Error()} - -} diff --git a/vendor/src/github.com/cloudflare/cfssl/errors/http.go b/vendor/src/github.com/cloudflare/cfssl/errors/http.go deleted file mode 100644 index c9c0a39c70..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/errors/http.go +++ /dev/null @@ -1,47 +0,0 @@ -package errors - -import ( - "errors" - "net/http" -) - -// HTTPError is an augmented error with a HTTP status code. -type HTTPError struct { - StatusCode int - error -} - -// Error implements the error interface. -func (e *HTTPError) Error() string { - return e.error.Error() -} - -// NewMethodNotAllowed returns an appropriate error in the case that -// an HTTP client uses an invalid method (i.e. a GET in place of a POST) -// on an API endpoint. -func NewMethodNotAllowed(method string) *HTTPError { - return &HTTPError{http.StatusMethodNotAllowed, errors.New(`Method is not allowed:"` + method + `"`)} -} - -// NewBadRequest creates a HttpError with the given error and error code 400. -func NewBadRequest(err error) *HTTPError { - return &HTTPError{http.StatusBadRequest, err} -} - -// NewBadRequestString returns a HttpError with the supplied message -// and error code 400. -func NewBadRequestString(s string) *HTTPError { - return NewBadRequest(errors.New(s)) -} - -// NewBadRequestMissingParameter returns a 400 HttpError as a required -// parameter is missing in the HTTP request. -func NewBadRequestMissingParameter(s string) *HTTPError { - return NewBadRequestString(`Missing parameter "` + s + `"`) -} - -// NewBadRequestUnwantedParameter returns a 400 HttpError as a unnecessary -// parameter is present in the HTTP request. -func NewBadRequestUnwantedParameter(s string) *HTTPError { - return NewBadRequestString(`Unwanted parameter "` + s + `"`) -} diff --git a/vendor/src/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go b/vendor/src/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go deleted file mode 100644 index bcc7418508..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/helpers/derhelpers/derhelpers.go +++ /dev/null @@ -1,42 +0,0 @@ -// Package derhelpers implements common functionality -// on DER encoded data -package derhelpers - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - - cferr "github.com/cloudflare/cfssl/errors" -) - -// ParsePrivateKeyDER parses a PKCS #1, PKCS #8, or elliptic curve -// DER-encoded private key. The key must not be in PEM format. -func ParsePrivateKeyDER(keyDER []byte) (key crypto.Signer, err error) { - generalKey, err := x509.ParsePKCS8PrivateKey(keyDER) - if err != nil { - generalKey, err = x509.ParsePKCS1PrivateKey(keyDER) - if err != nil { - generalKey, err = x509.ParseECPrivateKey(keyDER) - if err != nil { - // We don't include the actual error into - // the final error. The reason might be - // we don't want to leak any info about - // the private key. - return nil, cferr.New(cferr.PrivateKeyError, - cferr.ParseFailed) - } - } - } - - switch generalKey.(type) { - case *rsa.PrivateKey: - return generalKey.(*rsa.PrivateKey), nil - case *ecdsa.PrivateKey: - return generalKey.(*ecdsa.PrivateKey), nil - } - - // should never reach here - return nil, cferr.New(cferr.PrivateKeyError, cferr.ParseFailed) -} diff --git a/vendor/src/github.com/cloudflare/cfssl/helpers/helpers.go b/vendor/src/github.com/cloudflare/cfssl/helpers/helpers.go deleted file mode 100644 index 85b0d4a314..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/helpers/helpers.go +++ /dev/null @@ -1,479 +0,0 @@ -// Package helpers implements utility functionality common to many -// CFSSL packages. -package helpers - -import ( - "bytes" - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/x509" - "encoding/asn1" - "encoding/pem" - "errors" - "io/ioutil" - "math/big" - - "strings" - "time" - - "github.com/cloudflare/cfssl/crypto/pkcs7" - cferr "github.com/cloudflare/cfssl/errors" - "github.com/cloudflare/cfssl/helpers/derhelpers" - "github.com/cloudflare/cfssl/log" - "golang.org/x/crypto/pkcs12" -) - -// OneYear is a time.Duration representing a year's worth of seconds. -const OneYear = 8760 * time.Hour - -// OneDay is a time.Duration representing a day's worth of seconds. -const OneDay = 24 * time.Hour - -// InclusiveDate returns the time.Time representation of a date - 1 -// nanosecond. This allows time.After to be used inclusively. -func InclusiveDate(year int, month time.Month, day int) time.Time { - return time.Date(year, month, day, 0, 0, 0, 0, time.UTC).Add(-1 * time.Nanosecond) -} - -// Jul2012 is the July 2012 CAB Forum deadline for when CAs must stop -// issuing certificates valid for more than 5 years. -var Jul2012 = InclusiveDate(2012, time.July, 01) - -// Apr2015 is the April 2015 CAB Forum deadline for when CAs must stop -// issuing certificates valid for more than 39 months. -var Apr2015 = InclusiveDate(2015, time.April, 01) - -// KeyLength returns the bit size of ECDSA or RSA PublicKey -func KeyLength(key interface{}) int { - if key == nil { - return 0 - } - if ecdsaKey, ok := key.(*ecdsa.PublicKey); ok { - return ecdsaKey.Curve.Params().BitSize - } else if rsaKey, ok := key.(*rsa.PublicKey); ok { - return rsaKey.N.BitLen() - } - - return 0 -} - -// ExpiryTime returns the time when the certificate chain is expired. -func ExpiryTime(chain []*x509.Certificate) (notAfter time.Time) { - if len(chain) == 0 { - return - } - - notAfter = chain[0].NotAfter - for _, cert := range chain { - if notAfter.After(cert.NotAfter) { - notAfter = cert.NotAfter - } - } - return -} - -// MonthsValid returns the number of months for which a certificate is valid. -func MonthsValid(c *x509.Certificate) int { - issued := c.NotBefore - expiry := c.NotAfter - years := (expiry.Year() - issued.Year()) - months := years*12 + int(expiry.Month()) - int(issued.Month()) - - // Round up if valid for less than a full month - if expiry.Day() > issued.Day() { - months++ - } - return months -} - -// ValidExpiry determines if a certificate is valid for an acceptable -// length of time per the CA/Browser Forum baseline requirements. -// See https://cabforum.org/wp-content/uploads/CAB-Forum-BR-1.3.0.pdf -func ValidExpiry(c *x509.Certificate) bool { - issued := c.NotBefore - - var maxMonths int - switch { - case issued.After(Apr2015): - maxMonths = 39 - case issued.After(Jul2012): - maxMonths = 60 - case issued.Before(Jul2012): - maxMonths = 120 - } - - if MonthsValid(c) > maxMonths { - return false - } - return true -} - -// SignatureString returns the TLS signature string corresponding to -// an X509 signature algorithm. -func SignatureString(alg x509.SignatureAlgorithm) string { - switch alg { - case x509.MD2WithRSA: - return "MD2WithRSA" - case x509.MD5WithRSA: - return "MD5WithRSA" - case x509.SHA1WithRSA: - return "SHA1WithRSA" - case x509.SHA256WithRSA: - return "SHA256WithRSA" - case x509.SHA384WithRSA: - return "SHA384WithRSA" - case x509.SHA512WithRSA: - return "SHA512WithRSA" - case x509.DSAWithSHA1: - return "DSAWithSHA1" - case x509.DSAWithSHA256: - return "DSAWithSHA256" - case x509.ECDSAWithSHA1: - return "ECDSAWithSHA1" - case x509.ECDSAWithSHA256: - return "ECDSAWithSHA256" - case x509.ECDSAWithSHA384: - return "ECDSAWithSHA384" - case x509.ECDSAWithSHA512: - return "ECDSAWithSHA512" - default: - return "Unknown Signature" - } -} - -// HashAlgoString returns the hash algorithm name contains in the signature -// method. -func HashAlgoString(alg x509.SignatureAlgorithm) string { - switch alg { - case x509.MD2WithRSA: - return "MD2" - case x509.MD5WithRSA: - return "MD5" - case x509.SHA1WithRSA: - return "SHA1" - case x509.SHA256WithRSA: - return "SHA256" - case x509.SHA384WithRSA: - return "SHA384" - case x509.SHA512WithRSA: - return "SHA512" - case x509.DSAWithSHA1: - return "SHA1" - case x509.DSAWithSHA256: - return "SHA256" - case x509.ECDSAWithSHA1: - return "SHA1" - case x509.ECDSAWithSHA256: - return "SHA256" - case x509.ECDSAWithSHA384: - return "SHA384" - case x509.ECDSAWithSHA512: - return "SHA512" - default: - return "Unknown Hash Algorithm" - } -} - -// EncodeCertificatesPEM encodes a number of x509 certficates to PEM -func EncodeCertificatesPEM(certs []*x509.Certificate) []byte { - var buffer bytes.Buffer - for _, cert := range certs { - pem.Encode(&buffer, &pem.Block{ - Type: "CERTIFICATE", - Bytes: cert.Raw, - }) - } - - return buffer.Bytes() -} - -// EncodeCertificatePEM encodes a single x509 certficates to PEM -func EncodeCertificatePEM(cert *x509.Certificate) []byte { - return EncodeCertificatesPEM([]*x509.Certificate{cert}) -} - -// ParseCertificatesPEM parses a sequence of PEM-encoded certificate and returns them, -// can handle PEM encoded PKCS #7 structures. -func ParseCertificatesPEM(certsPEM []byte) ([]*x509.Certificate, error) { - var certs []*x509.Certificate - var err error - certsPEM = bytes.TrimSpace(certsPEM) - for len(certsPEM) > 0 { - var cert []*x509.Certificate - cert, certsPEM, err = ParseOneCertificateFromPEM(certsPEM) - if err != nil { - - return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed) - } else if cert == nil { - break - } - - certs = append(certs, cert...) - } - if len(certsPEM) > 0 { - return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed) - } - return certs, nil -} - -// ParseCertificatesDER parses a DER encoding of a certificate object and possibly private key, -// either PKCS #7, PKCS #12, or raw x509. -func ParseCertificatesDER(certsDER []byte, password string) (certs []*x509.Certificate, key crypto.Signer, err error) { - certsDER = bytes.TrimSpace(certsDER) - pkcs7data, err := pkcs7.ParsePKCS7(certsDER) - if err != nil { - var pkcs12data interface{} - certs = make([]*x509.Certificate, 1) - pkcs12data, certs[0], err = pkcs12.Decode(certsDER, password) - if err != nil { - certs, err = x509.ParseCertificates(certsDER) - if err != nil { - return nil, nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed) - } - } else { - key = pkcs12data.(crypto.Signer) - } - } else { - if pkcs7data.ContentInfo != "SignedData" { - return nil, nil, cferr.Wrap(cferr.CertificateError, cferr.DecodeFailed, errors.New("can only extract certificates from signed data content info")) - } - certs = pkcs7data.Content.SignedData.Certificates - } - if certs == nil { - return nil, key, cferr.New(cferr.CertificateError, cferr.DecodeFailed) - } - return certs, key, nil -} - -// ParseSelfSignedCertificatePEM parses a PEM-encoded certificate and check if it is self-signed. -func ParseSelfSignedCertificatePEM(certPEM []byte) (*x509.Certificate, error) { - cert, err := ParseCertificatePEM(certPEM) - if err != nil { - return nil, err - } - - if err := cert.CheckSignature(cert.SignatureAlgorithm, cert.RawTBSCertificate, cert.Signature); err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.VerifyFailed, err) - } - return cert, nil -} - -// ParseCertificatePEM parses and returns a PEM-encoded certificate, -// can handle PEM encoded PKCS #7 structures. -func ParseCertificatePEM(certPEM []byte) (*x509.Certificate, error) { - certPEM = bytes.TrimSpace(certPEM) - cert, rest, err := ParseOneCertificateFromPEM(certPEM) - if err != nil { - // Log the actual parsing error but throw a default parse error message. - log.Debugf("Certificate parsing error: %v", err) - return nil, cferr.New(cferr.CertificateError, cferr.ParseFailed) - } else if cert == nil { - return nil, cferr.New(cferr.CertificateError, cferr.DecodeFailed) - } else if len(rest) > 0 { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PEM file should contain only one object")) - } else if len(cert) > 1 { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, errors.New("the PKCS7 object in the PEM file should contain only one certificate")) - } - return cert[0], nil -} - -// ParseOneCertificateFromPEM attempts to parse one PEM encoded certificate object, -// either a raw x509 certificate or a PKCS #7 structure possibly containing -// multiple certificates, from the top of certsPEM, which itself may -// contain multiple PEM encoded certificate objects. -func ParseOneCertificateFromPEM(certsPEM []byte) ([]*x509.Certificate, []byte, error) { - - block, rest := pem.Decode(certsPEM) - if block == nil { - return nil, rest, nil - } - - cert, err := x509.ParseCertificate(block.Bytes) - if err != nil { - pkcs7data, err := pkcs7.ParsePKCS7(block.Bytes) - if err != nil { - return nil, rest, err - } - if pkcs7data.ContentInfo != "SignedData" { - return nil, rest, errors.New("only PKCS #7 Signed Data Content Info supported for certificate parsing") - } - certs := pkcs7data.Content.SignedData.Certificates - if certs == nil { - return nil, rest, errors.New("PKCS #7 structure contains no certificates") - } - return certs, rest, nil - } - var certs = []*x509.Certificate{cert} - return certs, rest, nil -} - -// LoadPEMCertPool loads a pool of PEM certificates from file. -func LoadPEMCertPool(certsFile string) (*x509.CertPool, error) { - pemCerts, err := ioutil.ReadFile(certsFile) - if err != nil { - return nil, err - } - - certPool := x509.NewCertPool() - if !certPool.AppendCertsFromPEM(pemCerts) { - return nil, errors.New("failed to load cert pool") - } - - return certPool, nil -} - -// ParsePrivateKeyPEM parses and returns a PEM-encoded private -// key. The private key may be either an unencrypted PKCS#8, PKCS#1, -// or elliptic private key. -func ParsePrivateKeyPEM(keyPEM []byte) (key crypto.Signer, err error) { - return ParsePrivateKeyPEMWithPassword(keyPEM, nil) -} - -// ParsePrivateKeyPEMWithPassword parses and returns a PEM-encoded private -// key. The private key may be a potentially encrypted PKCS#8, PKCS#1, -// or elliptic private key. -func ParsePrivateKeyPEMWithPassword(keyPEM []byte, password []byte) (key crypto.Signer, err error) { - keyDER, err := GetKeyDERFromPEM(keyPEM, password) - if err != nil { - return nil, err - } - - return derhelpers.ParsePrivateKeyDER(keyDER) -} - -// GetKeyDERFromPEM parses a PEM-encoded private key and returns DER-format key bytes. -func GetKeyDERFromPEM(in []byte, password []byte) ([]byte, error) { - keyDER, _ := pem.Decode(in) - if keyDER != nil { - if procType, ok := keyDER.Headers["Proc-Type"]; ok { - if strings.Contains(procType, "ENCRYPTED") { - if password != nil { - return x509.DecryptPEMBlock(keyDER, password) - } - return nil, cferr.New(cferr.PrivateKeyError, cferr.Encrypted) - } - } - return keyDER.Bytes, nil - } - - return nil, cferr.New(cferr.PrivateKeyError, cferr.DecodeFailed) -} - -// CheckSignature verifies a signature made by the key on a CSR, such -// as on the CSR itself. -func CheckSignature(csr *x509.CertificateRequest, algo x509.SignatureAlgorithm, signed, signature []byte) error { - var hashType crypto.Hash - - switch algo { - case x509.SHA1WithRSA, x509.ECDSAWithSHA1: - hashType = crypto.SHA1 - case x509.SHA256WithRSA, x509.ECDSAWithSHA256: - hashType = crypto.SHA256 - case x509.SHA384WithRSA, x509.ECDSAWithSHA384: - hashType = crypto.SHA384 - case x509.SHA512WithRSA, x509.ECDSAWithSHA512: - hashType = crypto.SHA512 - default: - return x509.ErrUnsupportedAlgorithm - } - - if !hashType.Available() { - return x509.ErrUnsupportedAlgorithm - } - h := hashType.New() - - h.Write(signed) - digest := h.Sum(nil) - - switch pub := csr.PublicKey.(type) { - case *rsa.PublicKey: - return rsa.VerifyPKCS1v15(pub, hashType, digest, signature) - case *ecdsa.PublicKey: - ecdsaSig := new(struct{ R, S *big.Int }) - if _, err := asn1.Unmarshal(signature, ecdsaSig); err != nil { - return err - } - if ecdsaSig.R.Sign() <= 0 || ecdsaSig.S.Sign() <= 0 { - return errors.New("x509: ECDSA signature contained zero or negative values") - } - if !ecdsa.Verify(pub, digest, ecdsaSig.R, ecdsaSig.S) { - return errors.New("x509: ECDSA verification failure") - } - return nil - } - return x509.ErrUnsupportedAlgorithm -} - -// ParseCSR parses a PEM- or DER-encoded PKCS #10 certificate signing request. -func ParseCSR(in []byte) (csr *x509.CertificateRequest, rest []byte, err error) { - in = bytes.TrimSpace(in) - p, rest := pem.Decode(in) - if p != nil { - if p.Type != "NEW CERTIFICATE REQUEST" && p.Type != "CERTIFICATE REQUEST" { - return nil, rest, cferr.New(cferr.CSRError, cferr.BadRequest) - } - - csr, err = x509.ParseCertificateRequest(p.Bytes) - } else { - csr, err = x509.ParseCertificateRequest(in) - } - - if err != nil { - return nil, rest, err - } - - err = CheckSignature(csr, csr.SignatureAlgorithm, csr.RawTBSCertificateRequest, csr.Signature) - if err != nil { - return nil, rest, err - } - - return csr, rest, nil -} - -// ParseCSRPEM parses a PEM-encoded certificiate signing request. -// It does not check the signature. This is useful for dumping data from a CSR -// locally. -func ParseCSRPEM(csrPEM []byte) (*x509.CertificateRequest, error) { - block, _ := pem.Decode([]byte(csrPEM)) - der := block.Bytes - csrObject, err := x509.ParseCertificateRequest(der) - - if err != nil { - return nil, err - } - - return csrObject, nil -} - -// SignerAlgo returns an X.509 signature algorithm from a crypto.Signer. -func SignerAlgo(priv crypto.Signer) x509.SignatureAlgorithm { - switch pub := priv.Public().(type) { - case *rsa.PublicKey: - bitLength := pub.N.BitLen() - switch { - case bitLength >= 4096: - return x509.SHA512WithRSA - case bitLength >= 3072: - return x509.SHA384WithRSA - case bitLength >= 2048: - return x509.SHA256WithRSA - default: - return x509.SHA1WithRSA - } - case *ecdsa.PublicKey: - switch pub.Curve { - case elliptic.P521(): - return x509.ECDSAWithSHA512 - case elliptic.P384(): - return x509.ECDSAWithSHA384 - case elliptic.P256(): - return x509.ECDSAWithSHA256 - default: - return x509.ECDSAWithSHA1 - } - default: - return x509.UnknownSignatureAlgorithm - } -} diff --git a/vendor/src/github.com/cloudflare/cfssl/info/info.go b/vendor/src/github.com/cloudflare/cfssl/info/info.go deleted file mode 100644 index 926a411ffb..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/info/info.go +++ /dev/null @@ -1,15 +0,0 @@ -// Package info contains the definitions for the info endpoint -package info - -// Req is the request struct for an info API request. -type Req struct { - Label string `json:"label"` - Profile string `json:"profile"` -} - -// Resp is the response for an Info API request. -type Resp struct { - Certificate string `json:"certificate"` - Usage []string `json:"usages"` - ExpiryString string `json:"expiry"` -} diff --git a/vendor/src/github.com/cloudflare/cfssl/initca/initca.go b/vendor/src/github.com/cloudflare/cfssl/initca/initca.go deleted file mode 100644 index 320ffb70b2..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/initca/initca.go +++ /dev/null @@ -1,223 +0,0 @@ -// Package initca contains code to initialise a certificate authority, -// generating a new root key and certificate. -package initca - -import ( - "crypto" - "crypto/ecdsa" - "crypto/rsa" - "crypto/x509" - "errors" - "io/ioutil" - "time" - - "github.com/cloudflare/cfssl/config" - "github.com/cloudflare/cfssl/csr" - cferr "github.com/cloudflare/cfssl/errors" - "github.com/cloudflare/cfssl/helpers" - "github.com/cloudflare/cfssl/log" - "github.com/cloudflare/cfssl/signer" - "github.com/cloudflare/cfssl/signer/local" -) - -// validator contains the default validation logic for certificate -// authority certificates. The only requirement here is that the -// certificate have a non-empty subject field. -func validator(req *csr.CertificateRequest) error { - if req.CN != "" { - return nil - } - - if len(req.Names) == 0 { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidRequest, errors.New("missing subject information")) - } - - for i := range req.Names { - if csr.IsNameEmpty(req.Names[i]) { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidRequest, errors.New("missing subject information")) - } - } - - return nil -} - -// New creates a new root certificate from the certificate request. -func New(req *csr.CertificateRequest) (cert, csrPEM, key []byte, err error) { - policy := CAPolicy() - if req.CA != nil { - if req.CA.Expiry != "" { - policy.Default.ExpiryString = req.CA.Expiry - policy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry) - } - - signer.MaxPathLen = req.CA.PathLength - if req.CA.PathLength != 0 && req.CA.PathLenZero == true { - log.Infof("ignore invalid 'pathlenzero' value") - } else { - signer.MaxPathLenZero = req.CA.PathLenZero - } - } - - g := &csr.Generator{Validator: validator} - csrPEM, key, err = g.ProcessRequest(req) - if err != nil { - log.Errorf("failed to process request: %v", err) - key = nil - return - } - - priv, err := helpers.ParsePrivateKeyPEM(key) - if err != nil { - log.Errorf("failed to parse private key: %v", err) - return - } - - s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), nil) - if err != nil { - log.Errorf("failed to create signer: %v", err) - return - } - s.SetPolicy(policy) - - signReq := signer.SignRequest{Hosts: req.Hosts, Request: string(csrPEM)} - cert, err = s.Sign(signReq) - - return - -} - -// NewFromPEM creates a new root certificate from the key file passed in. -func NewFromPEM(req *csr.CertificateRequest, keyFile string) (cert, csrPEM []byte, err error) { - privData, err := ioutil.ReadFile(keyFile) - if err != nil { - return nil, nil, err - } - - priv, err := helpers.ParsePrivateKeyPEM(privData) - if err != nil { - return nil, nil, err - } - - return NewFromSigner(req, priv) -} - -// RenewFromPEM re-creates a root certificate from the CA cert and key -// files. The resulting root certificate will have the input CA certificate -// as the template and have the same expiry length. E.g. the exsiting CA -// is valid for a year from Jan 01 2015 to Jan 01 2016, the renewed certificate -// will be valid from now and expire in one year as well. -func RenewFromPEM(caFile, keyFile string) ([]byte, error) { - caBytes, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, err - } - - ca, err := helpers.ParseCertificatePEM(caBytes) - if err != nil { - return nil, err - } - - keyBytes, err := ioutil.ReadFile(keyFile) - if err != nil { - return nil, err - } - - key, err := helpers.ParsePrivateKeyPEM(keyBytes) - if err != nil { - return nil, err - } - - return RenewFromSigner(ca, key) - -} - -// NewFromSigner creates a new root certificate from a crypto.Signer. -func NewFromSigner(req *csr.CertificateRequest, priv crypto.Signer) (cert, csrPEM []byte, err error) { - policy := CAPolicy() - if req.CA != nil { - if req.CA.Expiry != "" { - policy.Default.ExpiryString = req.CA.Expiry - policy.Default.Expiry, err = time.ParseDuration(req.CA.Expiry) - if err != nil { - return nil, nil, err - } - } - - signer.MaxPathLen = req.CA.PathLength - if req.CA.PathLength != 0 && req.CA.PathLenZero == true { - log.Infof("ignore invalid 'pathlenzero' value") - } else { - signer.MaxPathLenZero = req.CA.PathLenZero - } - } - - csrPEM, err = csr.Generate(priv, req) - if err != nil { - return nil, nil, err - } - - s, err := local.NewSigner(priv, nil, signer.DefaultSigAlgo(priv), nil) - if err != nil { - log.Errorf("failed to create signer: %v", err) - return - } - s.SetPolicy(policy) - - signReq := signer.SignRequest{Request: string(csrPEM)} - cert, err = s.Sign(signReq) - return -} - -// RenewFromSigner re-creates a root certificate from the CA cert and crypto.Signer. -// The resulting root certificate will have ca certificate -// as the template and have the same expiry length. E.g. the exsiting CA -// is valid for a year from Jan 01 2015 to Jan 01 2016, the renewed certificate -// will be valid from now and expire in one year as well. -func RenewFromSigner(ca *x509.Certificate, priv crypto.Signer) ([]byte, error) { - if !ca.IsCA { - return nil, errors.New("input certificate is not a CA cert") - } - - // matching certificate public key vs private key - switch { - case ca.PublicKeyAlgorithm == x509.RSA: - - var rsaPublicKey *rsa.PublicKey - var ok bool - if rsaPublicKey, ok = priv.Public().(*rsa.PublicKey); !ok { - return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) - } - if ca.PublicKey.(*rsa.PublicKey).N.Cmp(rsaPublicKey.N) != 0 { - return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) - } - case ca.PublicKeyAlgorithm == x509.ECDSA: - var ecdsaPublicKey *ecdsa.PublicKey - var ok bool - if ecdsaPublicKey, ok = priv.Public().(*ecdsa.PublicKey); !ok { - return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) - } - if ca.PublicKey.(*ecdsa.PublicKey).X.Cmp(ecdsaPublicKey.X) != 0 { - return nil, cferr.New(cferr.PrivateKeyError, cferr.KeyMismatch) - } - default: - return nil, cferr.New(cferr.PrivateKeyError, cferr.NotRSAOrECC) - } - - req := csr.ExtractCertificateRequest(ca) - - cert, _, err := NewFromSigner(req, priv) - return cert, err - -} - -// CAPolicy contains the CA issuing policy as default policy. -var CAPolicy = func() *config.Signing { - return &config.Signing{ - Default: &config.SigningProfile{ - Usage: []string{"cert sign", "crl sign"}, - ExpiryString: "43800h", - Expiry: 5 * helpers.OneYear, - CA: true, - }, - } -} diff --git a/vendor/src/github.com/cloudflare/cfssl/log/log.go b/vendor/src/github.com/cloudflare/cfssl/log/log.go deleted file mode 100644 index 4ceacc9e2b..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/log/log.go +++ /dev/null @@ -1,170 +0,0 @@ -// Package log implements a wrapper around the Go standard library's -// logging package. Clients should set the current log level; only -// messages below that level will actually be logged. For example, if -// Level is set to LevelWarning, only log messages at the Warning, -// Error, and Critical levels will be logged. -package log - -import ( - "flag" - "fmt" - "log" - "os" -) - -// The following constants represent logging levels in increasing levels of seriousness. -const ( - // LevelDebug is the log level for Debug statements. - LevelDebug = iota - // LevelInfo is the log level for Info statements. - LevelInfo - // LevelWarning is the log level for Warning statements. - LevelWarning - // LevelError is the log level for Error statements. - LevelError - // LevelCritical is the log level for Critical statements. - LevelCritical - // LevelFatal is the log level for Fatal statements. - LevelFatal -) - -var levelPrefix = [...]string{ - LevelDebug: "DEBUG", - LevelInfo: "INFO", - LevelWarning: "WARNING", - LevelError: "ERROR", - LevelCritical: "CRITICAL", - LevelFatal: "FATAL", -} - -// Level stores the current logging level. -var Level = LevelInfo - -// SyslogWriter specifies the necessary methods for an alternate output -// destination passed in via SetLogger. -// -// SyslogWriter is satisfied by *syslog.Writer. -type SyslogWriter interface { - Debug(string) - Info(string) - Warning(string) - Err(string) - Crit(string) - Emerg(string) -} - -// syslogWriter stores the SetLogger() parameter. -var syslogWriter SyslogWriter - -// SetLogger sets the output used for output by this package. -// A *syslog.Writer is a good choice for the logger parameter. -// Call with a nil parameter to revert to default behavior. -func SetLogger(logger SyslogWriter) { - syslogWriter = logger -} - -func init() { - // Only define loglevel flag once. - if flag.Lookup("loglevel") == nil { - flag.IntVar(&Level, "loglevel", LevelInfo, "Log level (0 = DEBUG, 5 = FATAL)") - } -} - -func print(l int, msg string) { - if l >= Level { - if syslogWriter != nil { - switch l { - case LevelDebug: - syslogWriter.Debug(msg) - case LevelInfo: - syslogWriter.Info(msg) - case LevelWarning: - syslogWriter.Warning(msg) - case LevelError: - syslogWriter.Err(msg) - case LevelCritical: - syslogWriter.Crit(msg) - case LevelFatal: - syslogWriter.Emerg(msg) - } - } else { - log.Printf("[%s] %s", levelPrefix[l], msg) - } - } -} - -func outputf(l int, format string, v []interface{}) { - print(l, fmt.Sprintf(format, v...)) -} - -func output(l int, v []interface{}) { - print(l, fmt.Sprint(v...)) -} - -// Fatalf logs a formatted message at the "fatal" level and then exits. The -// arguments are handled in the same manner as fmt.Printf. -func Fatalf(format string, v ...interface{}) { - outputf(LevelFatal, format, v) - os.Exit(1) -} - -// Fatal logs its arguments at the "fatal" level and then exits. -func Fatal(v ...interface{}) { - output(LevelFatal, v) - os.Exit(1) -} - -// Criticalf logs a formatted message at the "critical" level. The -// arguments are handled in the same manner as fmt.Printf. -func Criticalf(format string, v ...interface{}) { - outputf(LevelCritical, format, v) -} - -// Critical logs its arguments at the "critical" level. -func Critical(v ...interface{}) { - output(LevelCritical, v) -} - -// Errorf logs a formatted message at the "error" level. The arguments -// are handled in the same manner as fmt.Printf. -func Errorf(format string, v ...interface{}) { - outputf(LevelError, format, v) -} - -// Error logs its arguments at the "error" level. -func Error(v ...interface{}) { - output(LevelError, v) -} - -// Warningf logs a formatted message at the "warning" level. The -// arguments are handled in the same manner as fmt.Printf. -func Warningf(format string, v ...interface{}) { - outputf(LevelWarning, format, v) -} - -// Warning logs its arguments at the "warning" level. -func Warning(v ...interface{}) { - output(LevelWarning, v) -} - -// Infof logs a formatted message at the "info" level. The arguments -// are handled in the same manner as fmt.Printf. -func Infof(format string, v ...interface{}) { - outputf(LevelInfo, format, v) -} - -// Info logs its arguments at the "info" level. -func Info(v ...interface{}) { - output(LevelInfo, v) -} - -// Debugf logs a formatted message at the "debug" level. The arguments -// are handled in the same manner as fmt.Printf. -func Debugf(format string, v ...interface{}) { - outputf(LevelDebug, format, v) -} - -// Debug logs its arguments at the "debug" level. -func Debug(v ...interface{}) { - output(LevelDebug, v) -} diff --git a/vendor/src/github.com/cloudflare/cfssl/ocsp/config/config.go b/vendor/src/github.com/cloudflare/cfssl/ocsp/config/config.go deleted file mode 100644 index a19b113d4e..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/ocsp/config/config.go +++ /dev/null @@ -1,13 +0,0 @@ -// Package config in the ocsp directory provides configuration data for an OCSP -// signer. -package config - -import "time" - -// Config contains configuration information required to set up an OCSP signer. -type Config struct { - CACertFile string - ResponderCertFile string - KeyFile string - Interval time.Duration -} diff --git a/vendor/src/github.com/cloudflare/cfssl/signer/local/local.go b/vendor/src/github.com/cloudflare/cfssl/signer/local/local.go deleted file mode 100644 index d565bc4f4f..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/signer/local/local.go +++ /dev/null @@ -1,469 +0,0 @@ -// Package local implements certificate signature functionality for CFSSL. -package local - -import ( - "bytes" - "crypto" - "crypto/rand" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "encoding/binary" - "encoding/hex" - "encoding/pem" - "errors" - "io" - "io/ioutil" - "math/big" - "net" - "net/mail" - "os" - - "github.com/cloudflare/cfssl/certdb" - "github.com/cloudflare/cfssl/config" - cferr "github.com/cloudflare/cfssl/errors" - "github.com/cloudflare/cfssl/helpers" - "github.com/cloudflare/cfssl/info" - "github.com/cloudflare/cfssl/log" - "github.com/cloudflare/cfssl/signer" - "github.com/google/certificate-transparency/go" - "github.com/google/certificate-transparency/go/client" -) - -// Signer contains a signer that uses the standard library to -// support both ECDSA and RSA CA keys. -type Signer struct { - ca *x509.Certificate - priv crypto.Signer - policy *config.Signing - sigAlgo x509.SignatureAlgorithm - dbAccessor certdb.Accessor -} - -// NewSigner creates a new Signer directly from a -// private key and certificate, with optional policy. -func NewSigner(priv crypto.Signer, cert *x509.Certificate, sigAlgo x509.SignatureAlgorithm, policy *config.Signing) (*Signer, error) { - if policy == nil { - policy = &config.Signing{ - Profiles: map[string]*config.SigningProfile{}, - Default: config.DefaultConfig()} - } - - if !policy.Valid() { - return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy) - } - - return &Signer{ - ca: cert, - priv: priv, - sigAlgo: sigAlgo, - policy: policy, - }, nil -} - -// NewSignerFromFile generates a new local signer from a caFile -// and a caKey file, both PEM encoded. -func NewSignerFromFile(caFile, caKeyFile string, policy *config.Signing) (*Signer, error) { - log.Debug("Loading CA: ", caFile) - ca, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, err - } - log.Debug("Loading CA key: ", caKeyFile) - cakey, err := ioutil.ReadFile(caKeyFile) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ReadFailed, err) - } - - parsedCa, err := helpers.ParseCertificatePEM(ca) - if err != nil { - return nil, err - } - - strPassword := os.Getenv("CFSSL_CA_PK_PASSWORD") - password := []byte(strPassword) - if strPassword == "" { - password = nil - } - - priv, err := helpers.ParsePrivateKeyPEMWithPassword(cakey, password) - if err != nil { - log.Debug("Malformed private key %v", err) - return nil, err - } - - return NewSigner(priv, parsedCa, signer.DefaultSigAlgo(priv), policy) -} - -func (s *Signer) sign(template *x509.Certificate, profile *config.SigningProfile) (cert []byte, err error) { - var distPoints = template.CRLDistributionPoints - err = signer.FillTemplate(template, s.policy.Default, profile) - if distPoints != nil && len(distPoints) > 0 { - template.CRLDistributionPoints = distPoints - } - if err != nil { - return - } - - var initRoot bool - if s.ca == nil { - if !template.IsCA { - err = cferr.New(cferr.PolicyError, cferr.InvalidRequest) - return - } - template.DNSNames = nil - template.EmailAddresses = nil - s.ca = template - initRoot = true - } else if template.IsCA { - template.DNSNames = nil - template.EmailAddresses = nil - } - - derBytes, err := x509.CreateCertificate(rand.Reader, template, s.ca, template.PublicKey, s.priv) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err) - } - if initRoot { - s.ca, err = x509.ParseCertificate(derBytes) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.ParseFailed, err) - } - } - - cert = pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: derBytes}) - log.Infof("signed certificate with serial number %d", template.SerialNumber) - return -} - -// replaceSliceIfEmpty replaces the contents of replaced with newContents if -// the slice referenced by replaced is empty -func replaceSliceIfEmpty(replaced, newContents *[]string) { - if len(*replaced) == 0 { - *replaced = *newContents - } -} - -// PopulateSubjectFromCSR has functionality similar to Name, except -// it fills the fields of the resulting pkix.Name with req's if the -// subject's corresponding fields are empty -func PopulateSubjectFromCSR(s *signer.Subject, req pkix.Name) pkix.Name { - // if no subject, use req - if s == nil { - return req - } - - name := s.Name() - - if name.CommonName == "" { - name.CommonName = req.CommonName - } - - replaceSliceIfEmpty(&name.Country, &req.Country) - replaceSliceIfEmpty(&name.Province, &req.Province) - replaceSliceIfEmpty(&name.Locality, &req.Locality) - replaceSliceIfEmpty(&name.Organization, &req.Organization) - replaceSliceIfEmpty(&name.OrganizationalUnit, &req.OrganizationalUnit) - if name.SerialNumber == "" { - name.SerialNumber = req.SerialNumber - } - return name -} - -// OverrideHosts fills template's IPAddresses, EmailAddresses, and DNSNames with the -// content of hosts, if it is not nil. -func OverrideHosts(template *x509.Certificate, hosts []string) { - if hosts != nil { - template.IPAddresses = []net.IP{} - template.EmailAddresses = []string{} - template.DNSNames = []string{} - } - - for i := range hosts { - if ip := net.ParseIP(hosts[i]); ip != nil { - template.IPAddresses = append(template.IPAddresses, ip) - } else if email, err := mail.ParseAddress(hosts[i]); err == nil && email != nil { - template.EmailAddresses = append(template.EmailAddresses, email.Address) - } else { - template.DNSNames = append(template.DNSNames, hosts[i]) - } - } - -} - -// Sign signs a new certificate based on the PEM-encoded client -// certificate or certificate request with the signing profile, -// specified by profileName. -func (s *Signer) Sign(req signer.SignRequest) (cert []byte, err error) { - profile, err := signer.Profile(s, req.Profile) - if err != nil { - return - } - - block, _ := pem.Decode([]byte(req.Request)) - if block == nil { - return nil, cferr.New(cferr.CSRError, cferr.DecodeFailed) - } - - if block.Type != "NEW CERTIFICATE REQUEST" && block.Type != "CERTIFICATE REQUEST" { - return nil, cferr.Wrap(cferr.CSRError, - cferr.BadRequest, errors.New("not a certificate or csr")) - } - - csrTemplate, err := signer.ParseCertificateRequest(s, block.Bytes) - if err != nil { - return nil, err - } - - // Copy out only the fields from the CSR authorized by policy. - safeTemplate := x509.Certificate{} - // If the profile contains no explicit whitelist, assume that all fields - // should be copied from the CSR. - if profile.CSRWhitelist == nil { - safeTemplate = *csrTemplate - } else { - if profile.CSRWhitelist.Subject { - safeTemplate.Subject = csrTemplate.Subject - } - if profile.CSRWhitelist.PublicKeyAlgorithm { - safeTemplate.PublicKeyAlgorithm = csrTemplate.PublicKeyAlgorithm - } - if profile.CSRWhitelist.PublicKey { - safeTemplate.PublicKey = csrTemplate.PublicKey - } - if profile.CSRWhitelist.SignatureAlgorithm { - safeTemplate.SignatureAlgorithm = csrTemplate.SignatureAlgorithm - } - if profile.CSRWhitelist.DNSNames { - safeTemplate.DNSNames = csrTemplate.DNSNames - } - if profile.CSRWhitelist.IPAddresses { - safeTemplate.IPAddresses = csrTemplate.IPAddresses - } - if profile.CSRWhitelist.EmailAddresses { - safeTemplate.EmailAddresses = csrTemplate.EmailAddresses - } - } - - if req.CRLOverride != "" { - safeTemplate.CRLDistributionPoints = []string{req.CRLOverride} - } - - if safeTemplate.IsCA { - if !profile.CA { - return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest) - } - - if s.ca != nil && s.ca.MaxPathLen > 0 { - if safeTemplate.MaxPathLen >= s.ca.MaxPathLen { - // do not sign a cert with pathlen > current - return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest) - } - } else if s.ca != nil && s.ca.MaxPathLen == 0 && s.ca.MaxPathLenZero { - // signer has pathlen of 0, do not sign more intermediate CAs - return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest) - } - } - - OverrideHosts(&safeTemplate, req.Hosts) - safeTemplate.Subject = PopulateSubjectFromCSR(req.Subject, safeTemplate.Subject) - - // If there is a whitelist, ensure that both the Common Name and SAN DNSNames match - if profile.NameWhitelist != nil { - if safeTemplate.Subject.CommonName != "" { - if profile.NameWhitelist.Find([]byte(safeTemplate.Subject.CommonName)) == nil { - return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy) - } - } - for _, name := range safeTemplate.DNSNames { - if profile.NameWhitelist.Find([]byte(name)) == nil { - return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy) - } - } - for _, name := range safeTemplate.EmailAddresses { - if profile.NameWhitelist.Find([]byte(name)) == nil { - return nil, cferr.New(cferr.PolicyError, cferr.InvalidPolicy) - } - } - } - - if profile.ClientProvidesSerialNumbers { - if req.Serial == nil { - return nil, cferr.New(cferr.CertificateError, cferr.MissingSerial) - } - safeTemplate.SerialNumber = req.Serial - } else { - // RFC 5280 4.1.2.2: - // Certificate users MUST be able to handle serialNumber - // values up to 20 octets. Conforming CAs MUST NOT use - // serialNumber values longer than 20 octets. - // - // If CFSSL is providing the serial numbers, it makes - // sense to use the max supported size. - serialNumber := make([]byte, 20) - _, err = io.ReadFull(rand.Reader, serialNumber) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.Unknown, err) - } - - // SetBytes interprets buf as the bytes of a big-endian - // unsigned integer. The leading byte should be masked - // off to ensure it isn't negative. - serialNumber[0] &= 0x7F - - safeTemplate.SerialNumber = new(big.Int).SetBytes(serialNumber) - } - - if len(req.Extensions) > 0 { - for _, ext := range req.Extensions { - oid := asn1.ObjectIdentifier(ext.ID) - if !profile.ExtensionWhitelist[oid.String()] { - return nil, cferr.New(cferr.CertificateError, cferr.InvalidRequest) - } - - rawValue, err := hex.DecodeString(ext.Value) - if err != nil { - return nil, cferr.Wrap(cferr.CertificateError, cferr.InvalidRequest, err) - } - - safeTemplate.ExtraExtensions = append(safeTemplate.ExtraExtensions, pkix.Extension{ - Id: oid, - Critical: ext.Critical, - Value: rawValue, - }) - } - } - - var certTBS = safeTemplate - - if len(profile.CTLogServers) > 0 { - // Add a poison extension which prevents validation - var poisonExtension = pkix.Extension{Id: signer.CTPoisonOID, Critical: true, Value: []byte{0x05, 0x00}} - var poisonedPreCert = certTBS - poisonedPreCert.ExtraExtensions = append(safeTemplate.ExtraExtensions, poisonExtension) - cert, err = s.sign(&poisonedPreCert, profile) - if err != nil { - return - } - - derCert, _ := pem.Decode(cert) - prechain := []ct.ASN1Cert{derCert.Bytes, s.ca.Raw} - var sctList []ct.SignedCertificateTimestamp - - for _, server := range profile.CTLogServers { - log.Infof("submitting poisoned precertificate to %s", server) - var ctclient = client.New(server) - var resp *ct.SignedCertificateTimestamp - resp, err = ctclient.AddPreChain(prechain) - if err != nil { - return nil, cferr.Wrap(cferr.CTError, cferr.PrecertSubmissionFailed, err) - } - sctList = append(sctList, *resp) - } - - var serializedSCTList []byte - serializedSCTList, err = serializeSCTList(sctList) - if err != nil { - return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err) - } - - // Serialize again as an octet string before embedding - serializedSCTList, err = asn1.Marshal(serializedSCTList) - if err != nil { - return nil, cferr.Wrap(cferr.CTError, cferr.Unknown, err) - } - - var SCTListExtension = pkix.Extension{Id: signer.SCTListOID, Critical: false, Value: serializedSCTList} - certTBS.ExtraExtensions = append(certTBS.ExtraExtensions, SCTListExtension) - } - var signedCert []byte - signedCert, err = s.sign(&certTBS, profile) - if err != nil { - return nil, err - } - - if s.dbAccessor != nil { - var certRecord = certdb.CertificateRecord{ - Serial: certTBS.SerialNumber.String(), - // this relies on the specific behavior of x509.CreateCertificate - // which updates certTBS AuthorityKeyId from the signer's SubjectKeyId - AKI: hex.EncodeToString(certTBS.AuthorityKeyId), - CALabel: req.Label, - Status: "good", - Expiry: certTBS.NotAfter, - PEM: string(signedCert), - } - - err = s.dbAccessor.InsertCertificate(certRecord) - if err != nil { - return nil, err - } - log.Debug("saved certificate with serial number ", certTBS.SerialNumber) - } - - return signedCert, nil -} - -func serializeSCTList(sctList []ct.SignedCertificateTimestamp) ([]byte, error) { - var buf bytes.Buffer - for _, sct := range sctList { - sct, err := ct.SerializeSCT(sct) - if err != nil { - return nil, err - } - binary.Write(&buf, binary.BigEndian, uint16(len(sct))) - buf.Write(sct) - } - - var sctListLengthField = make([]byte, 2) - binary.BigEndian.PutUint16(sctListLengthField, uint16(buf.Len())) - return bytes.Join([][]byte{sctListLengthField, buf.Bytes()}, nil), nil -} - -// Info return a populated info.Resp struct or an error. -func (s *Signer) Info(req info.Req) (resp *info.Resp, err error) { - cert, err := s.Certificate(req.Label, req.Profile) - if err != nil { - return - } - - profile, err := signer.Profile(s, req.Profile) - if err != nil { - return - } - - resp = new(info.Resp) - if cert.Raw != nil { - resp.Certificate = string(bytes.TrimSpace(pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}))) - } - resp.Usage = profile.Usage - resp.ExpiryString = profile.ExpiryString - - return -} - -// SigAlgo returns the RSA signer's signature algorithm. -func (s *Signer) SigAlgo() x509.SignatureAlgorithm { - return s.sigAlgo -} - -// Certificate returns the signer's certificate. -func (s *Signer) Certificate(label, profile string) (*x509.Certificate, error) { - cert := *s.ca - return &cert, nil -} - -// SetPolicy sets the signer's signature policy. -func (s *Signer) SetPolicy(policy *config.Signing) { - s.policy = policy -} - -// SetDBAccessor sets the signers' cert db accessor -func (s *Signer) SetDBAccessor(dba certdb.Accessor) { - s.dbAccessor = dba -} - -// Policy returns the signer's policy. -func (s *Signer) Policy() *config.Signing { - return s.policy -} diff --git a/vendor/src/github.com/cloudflare/cfssl/signer/signer.go b/vendor/src/github.com/cloudflare/cfssl/signer/signer.go deleted file mode 100644 index 6b0c3ea6f1..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/signer/signer.go +++ /dev/null @@ -1,410 +0,0 @@ -// Package signer implements certificate signature functionality for CFSSL. -package signer - -import ( - "crypto" - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rsa" - "crypto/sha1" - "crypto/x509" - "crypto/x509/pkix" - "encoding/asn1" - "errors" - "math/big" - "strings" - "time" - - "github.com/cloudflare/cfssl/certdb" - "github.com/cloudflare/cfssl/config" - "github.com/cloudflare/cfssl/csr" - cferr "github.com/cloudflare/cfssl/errors" - "github.com/cloudflare/cfssl/helpers" - "github.com/cloudflare/cfssl/info" -) - -// MaxPathLen is the default path length for a new CA certificate. -var MaxPathLen = 2 - -// MaxPathLenZero indicates whether a new CA certificate has pathlen=0 -var MaxPathLenZero = false - -// Subject contains the information that should be used to override the -// subject information when signing a certificate. -type Subject struct { - CN string - Names []csr.Name `json:"names"` - SerialNumber string -} - -// Extension represents a raw extension to be included in the certificate. The -// "value" field must be hex encoded. -type Extension struct { - ID config.OID `json:"id"` - Critical bool `json:"critical"` - Value string `json:"value"` -} - -// SignRequest stores a signature request, which contains the hostname, -// the CSR, optional subject information, and the signature profile. -// -// Extensions provided in the signRequest are copied into the certificate, as -// long as they are in the ExtensionWhitelist for the signer's policy. -// Extensions requested in the CSR are ignored, except for those processed by -// ParseCertificateRequest (mainly subjectAltName). -type SignRequest struct { - Hosts []string `json:"hosts"` - Request string `json:"certificate_request"` - Subject *Subject `json:"subject,omitempty"` - Profile string `json:"profile"` - CRLOverride string `json:"crl_override"` - Label string `json:"label"` - Serial *big.Int `json:"serial,omitempty"` - Extensions []Extension `json:"extensions,omitempty"` -} - -// appendIf appends to a if s is not an empty string. -func appendIf(s string, a *[]string) { - if s != "" { - *a = append(*a, s) - } -} - -// Name returns the PKIX name for the subject. -func (s *Subject) Name() pkix.Name { - var name pkix.Name - name.CommonName = s.CN - - for _, n := range s.Names { - appendIf(n.C, &name.Country) - appendIf(n.ST, &name.Province) - appendIf(n.L, &name.Locality) - appendIf(n.O, &name.Organization) - appendIf(n.OU, &name.OrganizationalUnit) - } - name.SerialNumber = s.SerialNumber - return name -} - -// SplitHosts takes a comma-spearated list of hosts and returns a slice -// with the hosts split -func SplitHosts(hostList string) []string { - if hostList == "" { - return nil - } - - return strings.Split(hostList, ",") -} - -// A Signer contains a CA's certificate and private key for signing -// certificates, a Signing policy to refer to and a SignatureAlgorithm. -type Signer interface { - Info(info.Req) (*info.Resp, error) - Policy() *config.Signing - SetDBAccessor(certdb.Accessor) - SetPolicy(*config.Signing) - SigAlgo() x509.SignatureAlgorithm - Sign(req SignRequest) (cert []byte, err error) -} - -// Profile gets the specific profile from the signer -func Profile(s Signer, profile string) (*config.SigningProfile, error) { - var p *config.SigningProfile - policy := s.Policy() - if policy != nil && policy.Profiles != nil && profile != "" { - p = policy.Profiles[profile] - } - - if p == nil && policy != nil { - p = policy.Default - } - - if p == nil { - return nil, cferr.Wrap(cferr.APIClientError, cferr.ClientHTTPError, errors.New("profile must not be nil")) - } - return p, nil -} - -// DefaultSigAlgo returns an appropriate X.509 signature algorithm given -// the CA's private key. -func DefaultSigAlgo(priv crypto.Signer) x509.SignatureAlgorithm { - pub := priv.Public() - switch pub := pub.(type) { - case *rsa.PublicKey: - keySize := pub.N.BitLen() - switch { - case keySize >= 4096: - return x509.SHA512WithRSA - case keySize >= 3072: - return x509.SHA384WithRSA - case keySize >= 2048: - return x509.SHA256WithRSA - default: - return x509.SHA1WithRSA - } - case *ecdsa.PublicKey: - switch pub.Curve { - case elliptic.P256(): - return x509.ECDSAWithSHA256 - case elliptic.P384(): - return x509.ECDSAWithSHA384 - case elliptic.P521(): - return x509.ECDSAWithSHA512 - default: - return x509.ECDSAWithSHA1 - } - default: - return x509.UnknownSignatureAlgorithm - } -} - -// ParseCertificateRequest takes an incoming certificate request and -// builds a certificate template from it. -func ParseCertificateRequest(s Signer, csrBytes []byte) (template *x509.Certificate, err error) { - csrv, err := x509.ParseCertificateRequest(csrBytes) - if err != nil { - err = cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err) - return - } - - err = helpers.CheckSignature(csrv, csrv.SignatureAlgorithm, csrv.RawTBSCertificateRequest, csrv.Signature) - if err != nil { - err = cferr.Wrap(cferr.CSRError, cferr.KeyMismatch, err) - return - } - - template = &x509.Certificate{ - Subject: csrv.Subject, - PublicKeyAlgorithm: csrv.PublicKeyAlgorithm, - PublicKey: csrv.PublicKey, - SignatureAlgorithm: s.SigAlgo(), - DNSNames: csrv.DNSNames, - IPAddresses: csrv.IPAddresses, - EmailAddresses: csrv.EmailAddresses, - } - - for _, val := range csrv.Extensions { - // Check the CSR for the X.509 BasicConstraints (RFC 5280, 4.2.1.9) - // extension and append to template if necessary - if val.Id.Equal(asn1.ObjectIdentifier{2, 5, 29, 19}) { - var constraints csr.BasicConstraints - var rest []byte - - if rest, err = asn1.Unmarshal(val.Value, &constraints); err != nil { - return nil, cferr.Wrap(cferr.CSRError, cferr.ParseFailed, err) - } else if len(rest) != 0 { - return nil, cferr.Wrap(cferr.CSRError, cferr.ParseFailed, errors.New("x509: trailing data after X.509 BasicConstraints")) - } - - template.BasicConstraintsValid = true - template.IsCA = constraints.IsCA - template.MaxPathLen = constraints.MaxPathLen - template.MaxPathLenZero = template.MaxPathLen == 0 - } - } - - return -} - -type subjectPublicKeyInfo struct { - Algorithm pkix.AlgorithmIdentifier - SubjectPublicKey asn1.BitString -} - -// ComputeSKI derives an SKI from the certificate's public key in a -// standard manner. This is done by computing the SHA-1 digest of the -// SubjectPublicKeyInfo component of the certificate. -func ComputeSKI(template *x509.Certificate) ([]byte, error) { - pub := template.PublicKey - encodedPub, err := x509.MarshalPKIXPublicKey(pub) - if err != nil { - return nil, err - } - - var subPKI subjectPublicKeyInfo - _, err = asn1.Unmarshal(encodedPub, &subPKI) - if err != nil { - return nil, err - } - - pubHash := sha1.Sum(subPKI.SubjectPublicKey.Bytes) - return pubHash[:], nil -} - -// FillTemplate is a utility function that tries to load as much of -// the certificate template as possible from the profiles and current -// template. It fills in the key uses, expiration, revocation URLs -// and SKI. -func FillTemplate(template *x509.Certificate, defaultProfile, profile *config.SigningProfile) error { - ski, err := ComputeSKI(template) - - var ( - eku []x509.ExtKeyUsage - ku x509.KeyUsage - backdate time.Duration - expiry time.Duration - notBefore time.Time - notAfter time.Time - crlURL, ocspURL string - issuerURL = profile.IssuerURL - ) - - // The third value returned from Usages is a list of unknown key usages. - // This should be used when validating the profile at load, and isn't used - // here. - ku, eku, _ = profile.Usages() - if profile.IssuerURL == nil { - issuerURL = defaultProfile.IssuerURL - } - - if ku == 0 && len(eku) == 0 { - return cferr.New(cferr.PolicyError, cferr.NoKeyUsages) - } - - if expiry = profile.Expiry; expiry == 0 { - expiry = defaultProfile.Expiry - } - - if crlURL = profile.CRL; crlURL == "" { - crlURL = defaultProfile.CRL - } - if ocspURL = profile.OCSP; ocspURL == "" { - ocspURL = defaultProfile.OCSP - } - if backdate = profile.Backdate; backdate == 0 { - backdate = -5 * time.Minute - } else { - backdate = -1 * profile.Backdate - } - - if !profile.NotBefore.IsZero() { - notBefore = profile.NotBefore.UTC() - } else { - notBefore = time.Now().Round(time.Minute).Add(backdate).UTC() - } - - if !profile.NotAfter.IsZero() { - notAfter = profile.NotAfter.UTC() - } else { - notAfter = notBefore.Add(expiry).UTC() - } - - template.NotBefore = notBefore - template.NotAfter = notAfter - template.KeyUsage = ku - template.ExtKeyUsage = eku - template.BasicConstraintsValid = true - template.IsCA = profile.CA - template.SubjectKeyId = ski - - if ocspURL != "" { - template.OCSPServer = []string{ocspURL} - } - if crlURL != "" { - template.CRLDistributionPoints = []string{crlURL} - } - - if len(issuerURL) != 0 { - template.IssuingCertificateURL = issuerURL - } - if len(profile.Policies) != 0 { - err = addPolicies(template, profile.Policies) - if err != nil { - return cferr.Wrap(cferr.PolicyError, cferr.InvalidPolicy, err) - } - } - if profile.OCSPNoCheck { - ocspNoCheckExtension := pkix.Extension{ - Id: asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 48, 1, 5}, - Critical: false, - Value: []byte{0x05, 0x00}, - } - template.ExtraExtensions = append(template.ExtraExtensions, ocspNoCheckExtension) - } - - return nil -} - -type policyInformation struct { - PolicyIdentifier asn1.ObjectIdentifier - Qualifiers []interface{} `asn1:"tag:optional,omitempty"` -} - -type cpsPolicyQualifier struct { - PolicyQualifierID asn1.ObjectIdentifier - Qualifier string `asn1:"tag:optional,ia5"` -} - -type userNotice struct { - ExplicitText string `asn1:"tag:optional,utf8"` -} -type userNoticePolicyQualifier struct { - PolicyQualifierID asn1.ObjectIdentifier - Qualifier userNotice -} - -var ( - // Per https://tools.ietf.org/html/rfc3280.html#page-106, this represents: - // iso(1) identified-organization(3) dod(6) internet(1) security(5) - // mechanisms(5) pkix(7) id-qt(2) id-qt-cps(1) - iDQTCertificationPracticeStatement = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 1} - // iso(1) identified-organization(3) dod(6) internet(1) security(5) - // mechanisms(5) pkix(7) id-qt(2) id-qt-unotice(2) - iDQTUserNotice = asn1.ObjectIdentifier{1, 3, 6, 1, 5, 5, 7, 2, 2} - - // CTPoisonOID is the object ID of the critical poison extension for precertificates - // https://tools.ietf.org/html/rfc6962#page-9 - CTPoisonOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 3} - - // SCTListOID is the object ID for the Signed Certificate Timestamp certificate extension - // https://tools.ietf.org/html/rfc6962#page-14 - SCTListOID = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11129, 2, 4, 2} -) - -// addPolicies adds Certificate Policies and optional Policy Qualifiers to a -// certificate, based on the input config. Go's x509 library allows setting -// Certificate Policies easily, but does not support nested Policy Qualifiers -// under those policies. So we need to construct the ASN.1 structure ourselves. -func addPolicies(template *x509.Certificate, policies []config.CertificatePolicy) error { - asn1PolicyList := []policyInformation{} - - for _, policy := range policies { - pi := policyInformation{ - // The PolicyIdentifier is an OID assigned to a given issuer. - PolicyIdentifier: asn1.ObjectIdentifier(policy.ID), - } - for _, qualifier := range policy.Qualifiers { - switch qualifier.Type { - case "id-qt-unotice": - pi.Qualifiers = append(pi.Qualifiers, - userNoticePolicyQualifier{ - PolicyQualifierID: iDQTUserNotice, - Qualifier: userNotice{ - ExplicitText: qualifier.Value, - }, - }) - case "id-qt-cps": - pi.Qualifiers = append(pi.Qualifiers, - cpsPolicyQualifier{ - PolicyQualifierID: iDQTCertificationPracticeStatement, - Qualifier: qualifier.Value, - }) - default: - return errors.New("Invalid qualifier type in Policies " + qualifier.Type) - } - } - asn1PolicyList = append(asn1PolicyList, pi) - } - - asn1Bytes, err := asn1.Marshal(asn1PolicyList) - if err != nil { - return err - } - - template.ExtraExtensions = append(template.ExtraExtensions, pkix.Extension{ - Id: asn1.ObjectIdentifier{2, 5, 29, 32}, - Critical: false, - Value: asn1Bytes, - }) - return nil -} diff --git a/vendor/src/github.com/cloudflare/cfssl/whitelist/LICENSE b/vendor/src/github.com/cloudflare/cfssl/whitelist/LICENSE deleted file mode 100644 index 2387f30269..0000000000 --- a/vendor/src/github.com/cloudflare/cfssl/whitelist/LICENSE +++ /dev/null @@ -1,13 +0,0 @@ -Copyright (c) 2014 Kyle Isom - -Permission to use, copy, modify, and distribute this software for any -purpose with or without fee is hereby granted, provided that the above -copyright notice and this permission notice appear in all copies. - -THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES -WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR -ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES -WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN -ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF -OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/vendor/src/github.com/coreos/etcd/LICENSE b/vendor/src/github.com/coreos/etcd/LICENSE deleted file mode 100644 index d645695673..0000000000 --- a/vendor/src/github.com/coreos/etcd/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/src/github.com/coreos/etcd/client/README.md b/vendor/src/github.com/coreos/etcd/client/README.md deleted file mode 100644 index e9e4be468e..0000000000 --- a/vendor/src/github.com/coreos/etcd/client/README.md +++ /dev/null @@ -1,110 +0,0 @@ -# etcd/client - -etcd/client is the Go client library for etcd. - -[![GoDoc](https://godoc.org/github.com/coreos/etcd/client?status.png)](https://godoc.org/github.com/coreos/etcd/client) - -## Install - -```bash -go get github.com/coreos/etcd/client -``` - -## Usage - -```go -package main - -import ( - "log" - "time" - - "github.com/coreos/etcd/Godeps/_workspace/src/golang.org/x/net/context" - "github.com/coreos/etcd/client" -) - -func main() { - cfg := client.Config{ - Endpoints: []string{"http://127.0.0.1:2379"}, - Transport: client.DefaultTransport, - // set timeout per request to fail fast when the target endpoint is unavailable - HeaderTimeoutPerRequest: time.Second, - } - c, err := client.New(cfg) - if err != nil { - log.Fatal(err) - } - kapi := client.NewKeysAPI(c) - // set "/foo" key with "bar" value - log.Print("Setting '/foo' key with 'bar' value") - resp, err := kapi.Set(context.Background(), "/foo", "bar", nil) - if err != nil { - log.Fatal(err) - } else { - // print common key info - log.Printf("Set is done. Metadata is %q\n", resp) - } - // get "/foo" key's value - log.Print("Getting '/foo' key value") - resp, err = kapi.Get(context.Background(), "/foo", nil) - if err != nil { - log.Fatal(err) - } else { - // print common key info - log.Printf("Get is done. Metadata is %q\n", resp) - // print value - log.Printf("%q key has %q value\n", resp.Node.Key, resp.Node.Value) - } -} -``` - -## Error Handling - -etcd client might return three types of errors. - -- context error - -Each API call has its first parameter as `context`. A context can be canceled or have an attached deadline. If the context is canceled or reaches its deadline, the responding context error will be returned no matter what internal errors the API call has already encountered. - -- cluster error - -Each API call tries to send request to the cluster endpoints one by one until it successfully gets a response. If a requests to an endpoint fails, due to exceeding per request timeout or connection issues, the error will be added into a list of errors. If all possible endpoints fail, a cluster error that includes all encountered errors will be returned. - -- response error - -If the response gets from the cluster is invalid, a plain string error will be returned. For example, it might be a invalid JSON error. - -Here is the example code to handle client errors: - -```go -cfg := client.Config{Endpoints: []string{"http://etcd1:2379","http://etcd2:2379","http://etcd3:2379"}} -c, err := client.New(cfg) -if err != nil { - log.Fatal(err) -} - -kapi := client.NewKeysAPI(c) -resp, err := kapi.Set(ctx, "test", "bar", nil) -if err != nil { - if err == context.Canceled { - // ctx is canceled by another routine - } else if err == context.DeadlineExceeded { - // ctx is attached with a deadline and it exceeded - } else if cerr, ok := err.(*client.ClusterError); ok { - // process (cerr.Errors) - } else { - // bad cluster endpoints, which are not etcd servers - } -} -``` - - -## Caveat - -1. etcd/client prefers to use the same endpoint as long as the endpoint continues to work well. This saves socket resources, and improves efficiency for both client and server side. This preference doesn't remove consistency from the data consumed by the client because data replicated to each etcd member has already passed through the consensus process. - -2. etcd/client does round-robin rotation on other available endpoints if the preferred endpoint isn't functioning properly. For example, if the member that etcd/client connects to is hard killed, etcd/client will fail on the first attempt with the killed member, and succeed on the second attempt with another member. If it fails to talk to all available endpoints, it will return all errors happened. - -3. Default etcd/client cannot handle the case that the remote server is SIGSTOPed now. TCP keepalive mechanism doesn't help in this scenario because operating system may still send TCP keep-alive packets. Over time we'd like to improve this functionality, but solving this issue isn't high priority because a real-life case in which a server is stopped, but the connection is kept alive, hasn't been brought to our attention. - -4. etcd/client cannot detect whether the member in use is healthy when doing read requests. If the member is isolated from the cluster, etcd/client may retrieve outdated data. As a workaround, users could monitor experimental /health endpoint for member healthy information. We are improving it at [#3265](https://github.com/coreos/etcd/issues/3265). diff --git a/vendor/src/github.com/coreos/etcd/client/auth_role.go b/vendor/src/github.com/coreos/etcd/client/auth_role.go deleted file mode 100644 index 0f6748bdfa..0000000000 --- a/vendor/src/github.com/coreos/etcd/client/auth_role.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "encoding/json" - "net/http" - "net/url" - - "golang.org/x/net/context" -) - -type Role struct { - Role string `json:"role"` - Permissions Permissions `json:"permissions"` - Grant *Permissions `json:"grant,omitempty"` - Revoke *Permissions `json:"revoke,omitempty"` -} - -type Permissions struct { - KV rwPermission `json:"kv"` -} - -type rwPermission struct { - Read []string `json:"read"` - Write []string `json:"write"` -} - -type PermissionType int - -const ( - ReadPermission PermissionType = iota - WritePermission - ReadWritePermission -) - -// NewAuthRoleAPI constructs a new AuthRoleAPI that uses HTTP to -// interact with etcd's role creation and modification features. -func NewAuthRoleAPI(c Client) AuthRoleAPI { - return &httpAuthRoleAPI{ - client: c, - } -} - -type AuthRoleAPI interface { - // AddRole adds a role. - AddRole(ctx context.Context, role string) error - - // RemoveRole removes a role. - RemoveRole(ctx context.Context, role string) error - - // GetRole retrieves role details. - GetRole(ctx context.Context, role string) (*Role, error) - - // GrantRoleKV grants a role some permission prefixes for the KV store. - GrantRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) - - // RevokeRoleKV revokes some permission prefixes for a role on the KV store. - RevokeRoleKV(ctx context.Context, role string, prefixes []string, permType PermissionType) (*Role, error) - - // ListRoles lists roles. - ListRoles(ctx context.Context) ([]string, error) -} - -type httpAuthRoleAPI struct { - client httpClient -} - -type authRoleAPIAction struct { - verb string - name string - role *Role -} - -type authRoleAPIList struct{} - -func (list *authRoleAPIList) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "roles", "") - req, _ := http.NewRequest("GET", u.String(), nil) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (l *authRoleAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "roles", l.name) - if l.role == nil { - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req - } - b, err := json.Marshal(l.role) - if err != nil { - panic(err) - } - body := bytes.NewReader(b) - req, _ := http.NewRequest(l.verb, u.String(), body) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (r *httpAuthRoleAPI) ListRoles(ctx context.Context) ([]string, error) { - resp, body, err := r.client.Do(ctx, &authRoleAPIList{}) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - var roleList struct { - Roles []Role `json:"roles"` - } - if err = json.Unmarshal(body, &roleList); err != nil { - return nil, err - } - ret := make([]string, 0, len(roleList.Roles)) - for _, r := range roleList.Roles { - ret = append(ret, r.Role) - } - return ret, nil -} - -func (r *httpAuthRoleAPI) AddRole(ctx context.Context, rolename string) error { - role := &Role{ - Role: rolename, - } - return r.addRemoveRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) RemoveRole(ctx context.Context, rolename string) error { - return r.addRemoveRole(ctx, &authRoleAPIAction{ - verb: "DELETE", - name: rolename, - }) -} - -func (r *httpAuthRoleAPI) addRemoveRole(ctx context.Context, req *authRoleAPIAction) error { - resp, body, err := r.client.Do(ctx, req) - if err != nil { - return err - } - if err := assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err := json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -func (r *httpAuthRoleAPI) GetRole(ctx context.Context, rolename string) (*Role, error) { - return r.modRole(ctx, &authRoleAPIAction{ - verb: "GET", - name: rolename, - }) -} - -func buildRWPermission(prefixes []string, permType PermissionType) rwPermission { - var out rwPermission - switch permType { - case ReadPermission: - out.Read = prefixes - case WritePermission: - out.Write = prefixes - case ReadWritePermission: - out.Read = prefixes - out.Write = prefixes - } - return out -} - -func (r *httpAuthRoleAPI) GrantRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { - rwp := buildRWPermission(prefixes, permType) - role := &Role{ - Role: rolename, - Grant: &Permissions{ - KV: rwp, - }, - } - return r.modRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) RevokeRoleKV(ctx context.Context, rolename string, prefixes []string, permType PermissionType) (*Role, error) { - rwp := buildRWPermission(prefixes, permType) - role := &Role{ - Role: rolename, - Revoke: &Permissions{ - KV: rwp, - }, - } - return r.modRole(ctx, &authRoleAPIAction{ - verb: "PUT", - name: rolename, - role: role, - }) -} - -func (r *httpAuthRoleAPI) modRole(ctx context.Context, req *authRoleAPIAction) (*Role, error) { - resp, body, err := r.client.Do(ctx, req) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - var role Role - if err = json.Unmarshal(body, &role); err != nil { - return nil, err - } - return &role, nil -} diff --git a/vendor/src/github.com/coreos/etcd/client/auth_user.go b/vendor/src/github.com/coreos/etcd/client/auth_user.go deleted file mode 100644 index 0b0e091551..0000000000 --- a/vendor/src/github.com/coreos/etcd/client/auth_user.go +++ /dev/null @@ -1,324 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "encoding/json" - "net/http" - "net/url" - "path" - - "golang.org/x/net/context" -) - -var ( - defaultV2AuthPrefix = "/v2/auth" -) - -type User struct { - User string `json:"user"` - Password string `json:"password,omitempty"` - Roles []string `json:"roles"` - Grant []string `json:"grant,omitempty"` - Revoke []string `json:"revoke,omitempty"` -} - -// userListEntry is the user representation given by the server for ListUsers -type userListEntry struct { - User string `json:"user"` - Roles []Role `json:"roles"` -} - -type UserRoles struct { - User string `json:"user"` - Roles []Role `json:"roles"` -} - -type userName struct { - User string `json:"user"` -} - -func v2AuthURL(ep url.URL, action string, name string) *url.URL { - if name != "" { - ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action, name) - return &ep - } - ep.Path = path.Join(ep.Path, defaultV2AuthPrefix, action) - return &ep -} - -// NewAuthAPI constructs a new AuthAPI that uses HTTP to -// interact with etcd's general auth features. -func NewAuthAPI(c Client) AuthAPI { - return &httpAuthAPI{ - client: c, - } -} - -type AuthAPI interface { - // Enable auth. - Enable(ctx context.Context) error - - // Disable auth. - Disable(ctx context.Context) error -} - -type httpAuthAPI struct { - client httpClient -} - -func (s *httpAuthAPI) Enable(ctx context.Context) error { - return s.enableDisable(ctx, &authAPIAction{"PUT"}) -} - -func (s *httpAuthAPI) Disable(ctx context.Context) error { - return s.enableDisable(ctx, &authAPIAction{"DELETE"}) -} - -func (s *httpAuthAPI) enableDisable(ctx context.Context, req httpAction) error { - resp, body, err := s.client.Do(ctx, req) - if err != nil { - return err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -type authAPIAction struct { - verb string -} - -func (l *authAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "enable", "") - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req -} - -type authError struct { - Message string `json:"message"` - Code int `json:"-"` -} - -func (e authError) Error() string { - return e.Message -} - -// NewAuthUserAPI constructs a new AuthUserAPI that uses HTTP to -// interact with etcd's user creation and modification features. -func NewAuthUserAPI(c Client) AuthUserAPI { - return &httpAuthUserAPI{ - client: c, - } -} - -type AuthUserAPI interface { - // AddUser adds a user. - AddUser(ctx context.Context, username string, password string) error - - // RemoveUser removes a user. - RemoveUser(ctx context.Context, username string) error - - // GetUser retrieves user details. - GetUser(ctx context.Context, username string) (*User, error) - - // GrantUser grants a user some permission roles. - GrantUser(ctx context.Context, username string, roles []string) (*User, error) - - // RevokeUser revokes some permission roles from a user. - RevokeUser(ctx context.Context, username string, roles []string) (*User, error) - - // ChangePassword changes the user's password. - ChangePassword(ctx context.Context, username string, password string) (*User, error) - - // ListUsers lists the users. - ListUsers(ctx context.Context) ([]string, error) -} - -type httpAuthUserAPI struct { - client httpClient -} - -type authUserAPIAction struct { - verb string - username string - user *User -} - -type authUserAPIList struct{} - -func (list *authUserAPIList) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "users", "") - req, _ := http.NewRequest("GET", u.String(), nil) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (l *authUserAPIAction) HTTPRequest(ep url.URL) *http.Request { - u := v2AuthURL(ep, "users", l.username) - if l.user == nil { - req, _ := http.NewRequest(l.verb, u.String(), nil) - return req - } - b, err := json.Marshal(l.user) - if err != nil { - panic(err) - } - body := bytes.NewReader(b) - req, _ := http.NewRequest(l.verb, u.String(), body) - req.Header.Set("Content-Type", "application/json") - return req -} - -func (u *httpAuthUserAPI) ListUsers(ctx context.Context) ([]string, error) { - resp, body, err := u.client.Do(ctx, &authUserAPIList{}) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - - var userList struct { - Users []userListEntry `json:"users"` - } - - if err = json.Unmarshal(body, &userList); err != nil { - return nil, err - } - - ret := make([]string, 0, len(userList.Users)) - for _, u := range userList.Users { - ret = append(ret, u.User) - } - return ret, nil -} - -func (u *httpAuthUserAPI) AddUser(ctx context.Context, username string, password string) error { - user := &User{ - User: username, - Password: password, - } - return u.addRemoveUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) RemoveUser(ctx context.Context, username string) error { - return u.addRemoveUser(ctx, &authUserAPIAction{ - verb: "DELETE", - username: username, - }) -} - -func (u *httpAuthUserAPI) addRemoveUser(ctx context.Context, req *authUserAPIAction) error { - resp, body, err := u.client.Do(ctx, req) - if err != nil { - return err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK, http.StatusCreated); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return err - } - return sec - } - return nil -} - -func (u *httpAuthUserAPI) GetUser(ctx context.Context, username string) (*User, error) { - return u.modUser(ctx, &authUserAPIAction{ - verb: "GET", - username: username, - }) -} - -func (u *httpAuthUserAPI) GrantUser(ctx context.Context, username string, roles []string) (*User, error) { - user := &User{ - User: username, - Grant: roles, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) RevokeUser(ctx context.Context, username string, roles []string) (*User, error) { - user := &User{ - User: username, - Revoke: roles, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) ChangePassword(ctx context.Context, username string, password string) (*User, error) { - user := &User{ - User: username, - Password: password, - } - return u.modUser(ctx, &authUserAPIAction{ - verb: "PUT", - username: username, - user: user, - }) -} - -func (u *httpAuthUserAPI) modUser(ctx context.Context, req *authUserAPIAction) (*User, error) { - resp, body, err := u.client.Do(ctx, req) - if err != nil { - return nil, err - } - if err = assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - var sec authError - err = json.Unmarshal(body, &sec) - if err != nil { - return nil, err - } - return nil, sec - } - var user User - if err = json.Unmarshal(body, &user); err != nil { - var userR UserRoles - if urerr := json.Unmarshal(body, &userR); urerr != nil { - return nil, err - } - user.User = userR.User - for _, r := range userR.Roles { - user.Roles = append(user.Roles, r.Role) - } - } - return &user, nil -} diff --git a/vendor/src/github.com/coreos/etcd/client/cancelreq.go b/vendor/src/github.com/coreos/etcd/client/cancelreq.go deleted file mode 100644 index fefdb40e42..0000000000 --- a/vendor/src/github.com/coreos/etcd/client/cancelreq.go +++ /dev/null @@ -1,20 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// borrowed from golang/net/context/ctxhttp/cancelreq.go - -// +build go1.5 - -package client - -import "net/http" - -func requestCanceler(tr CancelableTransport, req *http.Request) func() { - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} diff --git a/vendor/src/github.com/coreos/etcd/client/cancelreq_go14.go b/vendor/src/github.com/coreos/etcd/client/cancelreq_go14.go deleted file mode 100644 index 2bed38a418..0000000000 --- a/vendor/src/github.com/coreos/etcd/client/cancelreq_go14.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// borrowed from golang/net/context/ctxhttp/cancelreq_go14.go - -// +build !go1.5 - -package client - -import "net/http" - -func requestCanceler(tr CancelableTransport, req *http.Request) func() { - return func() { - tr.CancelRequest(req) - } -} diff --git a/vendor/src/github.com/coreos/etcd/client/client.go b/vendor/src/github.com/coreos/etcd/client/client.go deleted file mode 100644 index 2aaa112ed3..0000000000 --- a/vendor/src/github.com/coreos/etcd/client/client.go +++ /dev/null @@ -1,598 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "errors" - "fmt" - "io/ioutil" - "math/rand" - "net" - "net/http" - "net/url" - "reflect" - "sort" - "strconv" - "sync" - "time" - - "golang.org/x/net/context" -) - -var ( - ErrNoEndpoints = errors.New("client: no endpoints available") - ErrTooManyRedirects = errors.New("client: too many redirects") - ErrClusterUnavailable = errors.New("client: etcd cluster is unavailable or misconfigured") - ErrNoLeaderEndpoint = errors.New("client: no leader endpoint available") - errTooManyRedirectChecks = errors.New("client: too many redirect checks") -) - -var DefaultRequestTimeout = 5 * time.Second - -var DefaultTransport CancelableTransport = &http.Transport{ - Proxy: http.ProxyFromEnvironment, - Dial: (&net.Dialer{ - Timeout: 30 * time.Second, - KeepAlive: 30 * time.Second, - }).Dial, - TLSHandshakeTimeout: 10 * time.Second, -} - -type EndpointSelectionMode int - -const ( - // EndpointSelectionRandom is the default value of the 'SelectionMode'. - // As the name implies, the client object will pick a node from the members - // of the cluster in a random fashion. If the cluster has three members, A, B, - // and C, the client picks any node from its three members as its request - // destination. - EndpointSelectionRandom EndpointSelectionMode = iota - - // If 'SelectionMode' is set to 'EndpointSelectionPrioritizeLeader', - // requests are sent directly to the cluster leader. This reduces - // forwarding roundtrips compared to making requests to etcd followers - // who then forward them to the cluster leader. In the event of a leader - // failure, however, clients configured this way cannot prioritize among - // the remaining etcd followers. Therefore, when a client sets 'SelectionMode' - // to 'EndpointSelectionPrioritizeLeader', it must use 'client.AutoSync()' to - // maintain its knowledge of current cluster state. - // - // This mode should be used with Client.AutoSync(). - EndpointSelectionPrioritizeLeader -) - -type Config struct { - // Endpoints defines a set of URLs (schemes, hosts and ports only) - // that can be used to communicate with a logical etcd cluster. For - // example, a three-node cluster could be provided like so: - // - // Endpoints: []string{ - // "http://node1.example.com:2379", - // "http://node2.example.com:2379", - // "http://node3.example.com:2379", - // } - // - // If multiple endpoints are provided, the Client will attempt to - // use them all in the event that one or more of them are unusable. - // - // If Client.Sync is ever called, the Client may cache an alternate - // set of endpoints to continue operation. - Endpoints []string - - // Transport is used by the Client to drive HTTP requests. If not - // provided, DefaultTransport will be used. - Transport CancelableTransport - - // CheckRedirect specifies the policy for handling HTTP redirects. - // If CheckRedirect is not nil, the Client calls it before - // following an HTTP redirect. The sole argument is the number of - // requests that have already been made. If CheckRedirect returns - // an error, Client.Do will not make any further requests and return - // the error back it to the caller. - // - // If CheckRedirect is nil, the Client uses its default policy, - // which is to stop after 10 consecutive requests. - CheckRedirect CheckRedirectFunc - - // Username specifies the user credential to add as an authorization header - Username string - - // Password is the password for the specified user to add as an authorization header - // to the request. - Password string - - // HeaderTimeoutPerRequest specifies the time limit to wait for response - // header in a single request made by the Client. The timeout includes - // connection time, any redirects, and header wait time. - // - // For non-watch GET request, server returns the response body immediately. - // For PUT/POST/DELETE request, server will attempt to commit request - // before responding, which is expected to take `100ms + 2 * RTT`. - // For watch request, server returns the header immediately to notify Client - // watch start. But if server is behind some kind of proxy, the response - // header may be cached at proxy, and Client cannot rely on this behavior. - // - // Especially, wait request will ignore this timeout. - // - // One API call may send multiple requests to different etcd servers until it - // succeeds. Use context of the API to specify the overall timeout. - // - // A HeaderTimeoutPerRequest of zero means no timeout. - HeaderTimeoutPerRequest time.Duration - - // SelectionMode is an EndpointSelectionMode enum that specifies the - // policy for choosing the etcd cluster node to which requests are sent. - SelectionMode EndpointSelectionMode -} - -func (cfg *Config) transport() CancelableTransport { - if cfg.Transport == nil { - return DefaultTransport - } - return cfg.Transport -} - -func (cfg *Config) checkRedirect() CheckRedirectFunc { - if cfg.CheckRedirect == nil { - return DefaultCheckRedirect - } - return cfg.CheckRedirect -} - -// CancelableTransport mimics net/http.Transport, but requires that -// the object also support request cancellation. -type CancelableTransport interface { - http.RoundTripper - CancelRequest(req *http.Request) -} - -type CheckRedirectFunc func(via int) error - -// DefaultCheckRedirect follows up to 10 redirects, but no more. -var DefaultCheckRedirect CheckRedirectFunc = func(via int) error { - if via > 10 { - return ErrTooManyRedirects - } - return nil -} - -type Client interface { - // Sync updates the internal cache of the etcd cluster's membership. - Sync(context.Context) error - - // AutoSync periodically calls Sync() every given interval. - // The recommended sync interval is 10 seconds to 1 minute, which does - // not bring too much overhead to server and makes client catch up the - // cluster change in time. - // - // The example to use it: - // - // for { - // err := client.AutoSync(ctx, 10*time.Second) - // if err == context.DeadlineExceeded || err == context.Canceled { - // break - // } - // log.Print(err) - // } - AutoSync(context.Context, time.Duration) error - - // Endpoints returns a copy of the current set of API endpoints used - // by Client to resolve HTTP requests. If Sync has ever been called, - // this may differ from the initial Endpoints provided in the Config. - Endpoints() []string - - // SetEndpoints sets the set of API endpoints used by Client to resolve - // HTTP requests. If the given endpoints are not valid, an error will be - // returned - SetEndpoints(eps []string) error - - httpClient -} - -func New(cfg Config) (Client, error) { - c := &httpClusterClient{ - clientFactory: newHTTPClientFactory(cfg.transport(), cfg.checkRedirect(), cfg.HeaderTimeoutPerRequest), - rand: rand.New(rand.NewSource(int64(time.Now().Nanosecond()))), - selectionMode: cfg.SelectionMode, - } - if cfg.Username != "" { - c.credentials = &credentials{ - username: cfg.Username, - password: cfg.Password, - } - } - if err := c.SetEndpoints(cfg.Endpoints); err != nil { - return nil, err - } - return c, nil -} - -type httpClient interface { - Do(context.Context, httpAction) (*http.Response, []byte, error) -} - -func newHTTPClientFactory(tr CancelableTransport, cr CheckRedirectFunc, headerTimeout time.Duration) httpClientFactory { - return func(ep url.URL) httpClient { - return &redirectFollowingHTTPClient{ - checkRedirect: cr, - client: &simpleHTTPClient{ - transport: tr, - endpoint: ep, - headerTimeout: headerTimeout, - }, - } - } -} - -type credentials struct { - username string - password string -} - -type httpClientFactory func(url.URL) httpClient - -type httpAction interface { - HTTPRequest(url.URL) *http.Request -} - -type httpClusterClient struct { - clientFactory httpClientFactory - endpoints []url.URL - pinned int - credentials *credentials - sync.RWMutex - rand *rand.Rand - selectionMode EndpointSelectionMode -} - -func (c *httpClusterClient) getLeaderEndpoint() (string, error) { - mAPI := NewMembersAPI(c) - leader, err := mAPI.Leader(context.Background()) - if err != nil { - return "", err - } - - return leader.ClientURLs[0], nil // TODO: how to handle multiple client URLs? -} - -func (c *httpClusterClient) SetEndpoints(eps []string) error { - if len(eps) == 0 { - return ErrNoEndpoints - } - - neps := make([]url.URL, len(eps)) - for i, ep := range eps { - u, err := url.Parse(ep) - if err != nil { - return err - } - neps[i] = *u - } - - switch c.selectionMode { - case EndpointSelectionRandom: - c.endpoints = shuffleEndpoints(c.rand, neps) - c.pinned = 0 - case EndpointSelectionPrioritizeLeader: - c.endpoints = neps - lep, err := c.getLeaderEndpoint() - if err != nil { - return ErrNoLeaderEndpoint - } - - for i := range c.endpoints { - if c.endpoints[i].String() == lep { - c.pinned = i - break - } - } - // If endpoints doesn't have the lu, just keep c.pinned = 0. - // Forwarding between follower and leader would be required but it works. - default: - return errors.New(fmt.Sprintf("invalid endpoint selection mode: %d", c.selectionMode)) - } - - return nil -} - -func (c *httpClusterClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - action := act - c.RLock() - leps := len(c.endpoints) - eps := make([]url.URL, leps) - n := copy(eps, c.endpoints) - pinned := c.pinned - - if c.credentials != nil { - action = &authedAction{ - act: act, - credentials: *c.credentials, - } - } - c.RUnlock() - - if leps == 0 { - return nil, nil, ErrNoEndpoints - } - - if leps != n { - return nil, nil, errors.New("unable to pick endpoint: copy failed") - } - - var resp *http.Response - var body []byte - var err error - cerr := &ClusterError{} - - for i := pinned; i < leps+pinned; i++ { - k := i % leps - hc := c.clientFactory(eps[k]) - resp, body, err = hc.Do(ctx, action) - if err != nil { - cerr.Errors = append(cerr.Errors, err) - if err == ctx.Err() { - return nil, nil, ctx.Err() - } - if err == context.Canceled || err == context.DeadlineExceeded { - return nil, nil, err - } - continue - } - if resp.StatusCode/100 == 5 { - switch resp.StatusCode { - case http.StatusInternalServerError, http.StatusServiceUnavailable: - // TODO: make sure this is a no leader response - cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s has no leader", eps[k].String())) - default: - cerr.Errors = append(cerr.Errors, fmt.Errorf("client: etcd member %s returns server error [%s]", eps[k].String(), http.StatusText(resp.StatusCode))) - } - continue - } - if k != pinned { - c.Lock() - c.pinned = k - c.Unlock() - } - return resp, body, nil - } - - return nil, nil, cerr -} - -func (c *httpClusterClient) Endpoints() []string { - c.RLock() - defer c.RUnlock() - - eps := make([]string, len(c.endpoints)) - for i, ep := range c.endpoints { - eps[i] = ep.String() - } - - return eps -} - -func (c *httpClusterClient) Sync(ctx context.Context) error { - mAPI := NewMembersAPI(c) - ms, err := mAPI.List(ctx) - if err != nil { - return err - } - - c.Lock() - defer c.Unlock() - - eps := make([]string, 0) - for _, m := range ms { - eps = append(eps, m.ClientURLs...) - } - sort.Sort(sort.StringSlice(eps)) - - ceps := make([]string, len(c.endpoints)) - for i, cep := range c.endpoints { - ceps[i] = cep.String() - } - sort.Sort(sort.StringSlice(ceps)) - // fast path if no change happens - // this helps client to pin the endpoint when no cluster change - if reflect.DeepEqual(eps, ceps) { - return nil - } - - return c.SetEndpoints(eps) -} - -func (c *httpClusterClient) AutoSync(ctx context.Context, interval time.Duration) error { - ticker := time.NewTicker(interval) - defer ticker.Stop() - for { - err := c.Sync(ctx) - if err != nil { - return err - } - select { - case <-ctx.Done(): - return ctx.Err() - case <-ticker.C: - } - } -} - -type roundTripResponse struct { - resp *http.Response - err error -} - -type simpleHTTPClient struct { - transport CancelableTransport - endpoint url.URL - headerTimeout time.Duration -} - -func (c *simpleHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - req := act.HTTPRequest(c.endpoint) - - if err := printcURL(req); err != nil { - return nil, nil, err - } - - isWait := false - if req != nil && req.URL != nil { - ws := req.URL.Query().Get("wait") - if len(ws) != 0 { - var err error - isWait, err = strconv.ParseBool(ws) - if err != nil { - return nil, nil, fmt.Errorf("wrong wait value %s (%v for %+v)", ws, err, req) - } - } - } - - var hctx context.Context - var hcancel context.CancelFunc - if !isWait && c.headerTimeout > 0 { - hctx, hcancel = context.WithTimeout(ctx, c.headerTimeout) - } else { - hctx, hcancel = context.WithCancel(ctx) - } - defer hcancel() - - reqcancel := requestCanceler(c.transport, req) - - rtchan := make(chan roundTripResponse, 1) - go func() { - resp, err := c.transport.RoundTrip(req) - rtchan <- roundTripResponse{resp: resp, err: err} - close(rtchan) - }() - - var resp *http.Response - var err error - - select { - case rtresp := <-rtchan: - resp, err = rtresp.resp, rtresp.err - case <-hctx.Done(): - // cancel and wait for request to actually exit before continuing - reqcancel() - rtresp := <-rtchan - resp = rtresp.resp - switch { - case ctx.Err() != nil: - err = ctx.Err() - case hctx.Err() != nil: - err = fmt.Errorf("client: endpoint %s exceeded header timeout", c.endpoint.String()) - default: - panic("failed to get error from context") - } - } - - // always check for resp nil-ness to deal with possible - // race conditions between channels above - defer func() { - if resp != nil { - resp.Body.Close() - } - }() - - if err != nil { - return nil, nil, err - } - - var body []byte - done := make(chan struct{}) - go func() { - body, err = ioutil.ReadAll(resp.Body) - done <- struct{}{} - }() - - select { - case <-ctx.Done(): - resp.Body.Close() - <-done - return nil, nil, ctx.Err() - case <-done: - } - - return resp, body, err -} - -type authedAction struct { - act httpAction - credentials credentials -} - -func (a *authedAction) HTTPRequest(url url.URL) *http.Request { - r := a.act.HTTPRequest(url) - r.SetBasicAuth(a.credentials.username, a.credentials.password) - return r -} - -type redirectFollowingHTTPClient struct { - client httpClient - checkRedirect CheckRedirectFunc -} - -func (r *redirectFollowingHTTPClient) Do(ctx context.Context, act httpAction) (*http.Response, []byte, error) { - next := act - for i := 0; i < 100; i++ { - if i > 0 { - if err := r.checkRedirect(i); err != nil { - return nil, nil, err - } - } - resp, body, err := r.client.Do(ctx, next) - if err != nil { - return nil, nil, err - } - if resp.StatusCode/100 == 3 { - hdr := resp.Header.Get("Location") - if hdr == "" { - return nil, nil, fmt.Errorf("Location header not set") - } - loc, err := url.Parse(hdr) - if err != nil { - return nil, nil, fmt.Errorf("Location header not valid URL: %s", hdr) - } - next = &redirectedHTTPAction{ - action: act, - location: *loc, - } - continue - } - return resp, body, nil - } - - return nil, nil, errTooManyRedirectChecks -} - -type redirectedHTTPAction struct { - action httpAction - location url.URL -} - -func (r *redirectedHTTPAction) HTTPRequest(ep url.URL) *http.Request { - orig := r.action.HTTPRequest(ep) - orig.URL = &r.location - return orig -} - -func shuffleEndpoints(r *rand.Rand, eps []url.URL) []url.URL { - p := r.Perm(len(eps)) - neps := make([]url.URL, len(eps)) - for i, k := range p { - neps[i] = eps[k] - } - return neps -} diff --git a/vendor/src/github.com/coreos/etcd/client/cluster_error.go b/vendor/src/github.com/coreos/etcd/client/cluster_error.go deleted file mode 100644 index 957ed46245..0000000000 --- a/vendor/src/github.com/coreos/etcd/client/cluster_error.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import "fmt" - -type ClusterError struct { - Errors []error -} - -func (ce *ClusterError) Error() string { - return ErrClusterUnavailable.Error() -} - -func (ce *ClusterError) Detail() string { - s := "" - for i, e := range ce.Errors { - s += fmt.Sprintf("error #%d: %s\n", i, e) - } - return s -} diff --git a/vendor/src/github.com/coreos/etcd/client/curl.go b/vendor/src/github.com/coreos/etcd/client/curl.go deleted file mode 100644 index 5a5a69a941..0000000000 --- a/vendor/src/github.com/coreos/etcd/client/curl.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "fmt" - "io/ioutil" - "net/http" - "os" -) - -var ( - cURLDebug = false -) - -func EnablecURLDebug() { - cURLDebug = true -} - -func DisablecURLDebug() { - cURLDebug = false -} - -// printcURL prints the cURL equivalent request to stderr. -// It returns an error if the body of the request cannot -// be read. -// The caller MUST cancel the request if there is an error. -func printcURL(req *http.Request) error { - if !cURLDebug { - return nil - } - var ( - command string - b []byte - err error - ) - - if req.URL != nil { - command = fmt.Sprintf("curl -X %s %s", req.Method, req.URL.String()) - } - - if req.Body != nil { - b, err = ioutil.ReadAll(req.Body) - if err != nil { - return err - } - command += fmt.Sprintf(" -d %q", string(b)) - } - - fmt.Fprintf(os.Stderr, "cURL Command: %s\n", command) - - // reset body - body := bytes.NewBuffer(b) - req.Body = ioutil.NopCloser(body) - - return nil -} diff --git a/vendor/src/github.com/coreos/etcd/client/discover.go b/vendor/src/github.com/coreos/etcd/client/discover.go deleted file mode 100644 index ae88659f42..0000000000 --- a/vendor/src/github.com/coreos/etcd/client/discover.go +++ /dev/null @@ -1,21 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -// Discoverer is an interface that wraps the Discover method. -type Discoverer interface { - // Discover looks up the etcd servers for the domain. - Discover(domain string) ([]string, error) -} diff --git a/vendor/src/github.com/coreos/etcd/client/doc.go b/vendor/src/github.com/coreos/etcd/client/doc.go deleted file mode 100644 index 70111caceb..0000000000 --- a/vendor/src/github.com/coreos/etcd/client/doc.go +++ /dev/null @@ -1,71 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package client provides bindings for the etcd APIs. - -Create a Config and exchange it for a Client: - - import ( - "net/http" - - "github.com/coreos/etcd/client" - "golang.org/x/net/context" - ) - - cfg := client.Config{ - Endpoints: []string{"http://127.0.0.1:2379"}, - Transport: DefaultTransport, - } - - c, err := client.New(cfg) - if err != nil { - // handle error - } - -Create a KeysAPI using the Client, then use it to interact with etcd: - - kAPI := client.NewKeysAPI(c) - - // create a new key /foo with the value "bar" - _, err = kAPI.Create(context.Background(), "/foo", "bar") - if err != nil { - // handle error - } - - // delete the newly created key only if the value is still "bar" - _, err = kAPI.Delete(context.Background(), "/foo", &DeleteOptions{PrevValue: "bar"}) - if err != nil { - // handle error - } - -Use a custom context to set timeouts on your operations: - - import "time" - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - // set a new key, ignoring it's previous state - _, err := kAPI.Set(ctx, "/ping", "pong", nil) - if err != nil { - if err == context.DeadlineExceeded { - // request took longer than 5s - } else { - // handle error - } - } - -*/ -package client diff --git a/vendor/src/github.com/coreos/etcd/client/keys.generated.go b/vendor/src/github.com/coreos/etcd/client/keys.generated.go deleted file mode 100644 index 748283aa95..0000000000 --- a/vendor/src/github.com/coreos/etcd/client/keys.generated.go +++ /dev/null @@ -1,1000 +0,0 @@ -// ************************************************************ -// DO NOT EDIT. -// THIS FILE IS AUTO-GENERATED BY codecgen. -// ************************************************************ - -package client - -import ( - "errors" - "fmt" - codec1978 "github.com/ugorji/go/codec" - "reflect" - "runtime" - time "time" -) - -const ( - // ----- content types ---- - codecSelferC_UTF81819 = 1 - codecSelferC_RAW1819 = 0 - // ----- value types used ---- - codecSelferValueTypeArray1819 = 10 - codecSelferValueTypeMap1819 = 9 - // ----- containerStateValues ---- - codecSelfer_containerMapKey1819 = 2 - codecSelfer_containerMapValue1819 = 3 - codecSelfer_containerMapEnd1819 = 4 - codecSelfer_containerArrayElem1819 = 6 - codecSelfer_containerArrayEnd1819 = 7 -) - -var ( - codecSelferBitsize1819 = uint8(reflect.TypeOf(uint(0)).Bits()) - codecSelferOnlyMapOrArrayEncodeToStructErr1819 = errors.New(`only encoded map or array can be decoded into a struct`) -) - -type codecSelfer1819 struct{} - -func init() { - if codec1978.GenVersion != 5 { - _, file, _, _ := runtime.Caller(0) - err := fmt.Errorf("codecgen version mismatch: current: %v, need %v. Re-generate file: %v", - 5, codec1978.GenVersion, file) - panic(err) - } - if false { // reference the types, but skip this branch at build/run time - var v0 time.Time - _ = v0 - } -} - -func (x *Response) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym1 := z.EncBinary() - _ = yym1 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep2 := !z.EncBinary() - yy2arr2 := z.EncBasicHandle().StructToArray - var yyq2 [3]bool - _, _, _ = yysep2, yyq2, yy2arr2 - const yyr2 bool = false - var yynn2 int - if yyr2 || yy2arr2 { - r.EncodeArrayStart(3) - } else { - yynn2 = 3 - for _, b := range yyq2 { - if b { - yynn2++ - } - } - r.EncodeMapStart(yynn2) - yynn2 = 0 - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym4 := z.EncBinary() - _ = yym4 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Action)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("action")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym5 := z.EncBinary() - _ = yym5 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Action)) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.Node == nil { - r.EncodeNil() - } else { - x.Node.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("node")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Node == nil { - r.EncodeNil() - } else { - x.Node.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.PrevNode == nil { - r.EncodeNil() - } else { - x.PrevNode.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("prevNode")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.PrevNode == nil { - r.EncodeNil() - } else { - x.PrevNode.CodecEncodeSelf(e) - } - } - if yyr2 || yy2arr2 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1819) - } - } - } -} - -func (x *Response) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym8 := z.DecBinary() - _ = yym8 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct9 := r.ContainerType() - if yyct9 == codecSelferValueTypeMap1819 { - yyl9 := r.ReadMapStart() - if yyl9 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1819) - } else { - x.codecDecodeSelfFromMap(yyl9, d) - } - } else if yyct9 == codecSelferValueTypeArray1819 { - yyl9 := r.ReadArrayStart() - if yyl9 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - x.codecDecodeSelfFromArray(yyl9, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) - } - } -} - -func (x *Response) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys10Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys10Slc - var yyhl10 bool = l >= 0 - for yyj10 := 0; ; yyj10++ { - if yyhl10 { - if yyj10 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1819) - yys10Slc = r.DecodeBytes(yys10Slc, true, true) - yys10 := string(yys10Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1819) - switch yys10 { - case "action": - if r.TryDecodeAsNil() { - x.Action = "" - } else { - x.Action = string(r.DecodeString()) - } - case "node": - if r.TryDecodeAsNil() { - if x.Node != nil { - x.Node = nil - } - } else { - if x.Node == nil { - x.Node = new(Node) - } - x.Node.CodecDecodeSelf(d) - } - case "prevNode": - if r.TryDecodeAsNil() { - if x.PrevNode != nil { - x.PrevNode = nil - } - } else { - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - x.PrevNode.CodecDecodeSelf(d) - } - default: - z.DecStructFieldNotFound(-1, yys10) - } // end switch yys10 - } // end for yyj10 - z.DecSendContainerState(codecSelfer_containerMapEnd1819) -} - -func (x *Response) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj14 int - var yyb14 bool - var yyhl14 bool = l >= 0 - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Action = "" - } else { - x.Action = string(r.DecodeString()) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.Node != nil { - x.Node = nil - } - } else { - if x.Node == nil { - x.Node = new(Node) - } - x.Node.CodecDecodeSelf(d) - } - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.PrevNode != nil { - x.PrevNode = nil - } - } else { - if x.PrevNode == nil { - x.PrevNode = new(Node) - } - x.PrevNode.CodecDecodeSelf(d) - } - for { - yyj14++ - if yyhl14 { - yyb14 = yyj14 > l - } else { - yyb14 = r.CheckBreak() - } - if yyb14 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - z.DecStructFieldNotFound(yyj14-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x *Node) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym18 := z.EncBinary() - _ = yym18 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - yysep19 := !z.EncBinary() - yy2arr19 := z.EncBasicHandle().StructToArray - var yyq19 [8]bool - _, _, _ = yysep19, yyq19, yy2arr19 - const yyr19 bool = false - yyq19[1] = x.Dir != false - yyq19[6] = x.Expiration != nil - yyq19[7] = x.TTL != 0 - var yynn19 int - if yyr19 || yy2arr19 { - r.EncodeArrayStart(8) - } else { - yynn19 = 5 - for _, b := range yyq19 { - if b { - yynn19++ - } - } - r.EncodeMapStart(yynn19) - yynn19 = 0 - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym21 := z.EncBinary() - _ = yym21 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Key)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("key")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym22 := z.EncBinary() - _ = yym22 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Key)) - } - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq19[1] { - yym24 := z.EncBinary() - _ = yym24 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } else { - r.EncodeBool(false) - } - } else { - if yyq19[1] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("dir")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym25 := z.EncBinary() - _ = yym25 - if false { - } else { - r.EncodeBool(bool(x.Dir)) - } - } - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym27 := z.EncBinary() - _ = yym27 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Value)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("value")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym28 := z.EncBinary() - _ = yym28 - if false { - } else { - r.EncodeString(codecSelferC_UTF81819, string(x.Value)) - } - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if x.Nodes == nil { - r.EncodeNil() - } else { - x.Nodes.CodecEncodeSelf(e) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("nodes")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Nodes == nil { - r.EncodeNil() - } else { - x.Nodes.CodecEncodeSelf(e) - } - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym31 := z.EncBinary() - _ = yym31 - if false { - } else { - r.EncodeUint(uint64(x.CreatedIndex)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("createdIndex")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym32 := z.EncBinary() - _ = yym32 - if false { - } else { - r.EncodeUint(uint64(x.CreatedIndex)) - } - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - yym34 := z.EncBinary() - _ = yym34 - if false { - } else { - r.EncodeUint(uint64(x.ModifiedIndex)) - } - } else { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("modifiedIndex")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym35 := z.EncBinary() - _ = yym35 - if false { - } else { - r.EncodeUint(uint64(x.ModifiedIndex)) - } - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq19[6] { - if x.Expiration == nil { - r.EncodeNil() - } else { - yym37 := z.EncBinary() - _ = yym37 - if false { - } else if yym38 := z.TimeRtidIfBinc(); yym38 != 0 { - r.EncodeBuiltin(yym38, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym37 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym37 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) - } else { - z.EncFallback(x.Expiration) - } - } - } else { - r.EncodeNil() - } - } else { - if yyq19[6] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("expiration")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - if x.Expiration == nil { - r.EncodeNil() - } else { - yym39 := z.EncBinary() - _ = yym39 - if false { - } else if yym40 := z.TimeRtidIfBinc(); yym40 != 0 { - r.EncodeBuiltin(yym40, x.Expiration) - } else if z.HasExtensions() && z.EncExt(x.Expiration) { - } else if yym39 { - z.EncBinaryMarshal(x.Expiration) - } else if !yym39 && z.IsJSONHandle() { - z.EncJSONMarshal(x.Expiration) - } else { - z.EncFallback(x.Expiration) - } - } - } - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyq19[7] { - yym42 := z.EncBinary() - _ = yym42 - if false { - } else { - r.EncodeInt(int64(x.TTL)) - } - } else { - r.EncodeInt(0) - } - } else { - if yyq19[7] { - z.EncSendContainerState(codecSelfer_containerMapKey1819) - r.EncodeString(codecSelferC_UTF81819, string("ttl")) - z.EncSendContainerState(codecSelfer_containerMapValue1819) - yym43 := z.EncBinary() - _ = yym43 - if false { - } else { - r.EncodeInt(int64(x.TTL)) - } - } - } - if yyr19 || yy2arr19 { - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - z.EncSendContainerState(codecSelfer_containerMapEnd1819) - } - } - } -} - -func (x *Node) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym44 := z.DecBinary() - _ = yym44 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - yyct45 := r.ContainerType() - if yyct45 == codecSelferValueTypeMap1819 { - yyl45 := r.ReadMapStart() - if yyl45 == 0 { - z.DecSendContainerState(codecSelfer_containerMapEnd1819) - } else { - x.codecDecodeSelfFromMap(yyl45, d) - } - } else if yyct45 == codecSelferValueTypeArray1819 { - yyl45 := r.ReadArrayStart() - if yyl45 == 0 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - } else { - x.codecDecodeSelfFromArray(yyl45, d) - } - } else { - panic(codecSelferOnlyMapOrArrayEncodeToStructErr1819) - } - } -} - -func (x *Node) codecDecodeSelfFromMap(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yys46Slc = z.DecScratchBuffer() // default slice to decode into - _ = yys46Slc - var yyhl46 bool = l >= 0 - for yyj46 := 0; ; yyj46++ { - if yyhl46 { - if yyj46 >= l { - break - } - } else { - if r.CheckBreak() { - break - } - } - z.DecSendContainerState(codecSelfer_containerMapKey1819) - yys46Slc = r.DecodeBytes(yys46Slc, true, true) - yys46 := string(yys46Slc) - z.DecSendContainerState(codecSelfer_containerMapValue1819) - switch yys46 { - case "key": - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - case "dir": - if r.TryDecodeAsNil() { - x.Dir = false - } else { - x.Dir = bool(r.DecodeBool()) - } - case "value": - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - case "nodes": - if r.TryDecodeAsNil() { - x.Nodes = nil - } else { - yyv50 := &x.Nodes - yyv50.CodecDecodeSelf(d) - } - case "createdIndex": - if r.TryDecodeAsNil() { - x.CreatedIndex = 0 - } else { - x.CreatedIndex = uint64(r.DecodeUint(64)) - } - case "modifiedIndex": - if r.TryDecodeAsNil() { - x.ModifiedIndex = 0 - } else { - x.ModifiedIndex = uint64(r.DecodeUint(64)) - } - case "expiration": - if r.TryDecodeAsNil() { - if x.Expiration != nil { - x.Expiration = nil - } - } else { - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - yym54 := z.DecBinary() - _ = yym54 - if false { - } else if yym55 := z.TimeRtidIfBinc(); yym55 != 0 { - r.DecodeBuiltin(yym55, x.Expiration) - } else if z.HasExtensions() && z.DecExt(x.Expiration) { - } else if yym54 { - z.DecBinaryUnmarshal(x.Expiration) - } else if !yym54 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Expiration) - } else { - z.DecFallback(x.Expiration, false) - } - } - case "ttl": - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - x.TTL = int64(r.DecodeInt(64)) - } - default: - z.DecStructFieldNotFound(-1, yys46) - } // end switch yys46 - } // end for yyj46 - z.DecSendContainerState(codecSelfer_containerMapEnd1819) -} - -func (x *Node) codecDecodeSelfFromArray(l int, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - var yyj57 int - var yyb57 bool - var yyhl57 bool = l >= 0 - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Key = "" - } else { - x.Key = string(r.DecodeString()) - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Dir = false - } else { - x.Dir = bool(r.DecodeBool()) - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Value = "" - } else { - x.Value = string(r.DecodeString()) - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.Nodes = nil - } else { - yyv61 := &x.Nodes - yyv61.CodecDecodeSelf(d) - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.CreatedIndex = 0 - } else { - x.CreatedIndex = uint64(r.DecodeUint(64)) - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.ModifiedIndex = 0 - } else { - x.ModifiedIndex = uint64(r.DecodeUint(64)) - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - if x.Expiration != nil { - x.Expiration = nil - } - } else { - if x.Expiration == nil { - x.Expiration = new(time.Time) - } - yym65 := z.DecBinary() - _ = yym65 - if false { - } else if yym66 := z.TimeRtidIfBinc(); yym66 != 0 { - r.DecodeBuiltin(yym66, x.Expiration) - } else if z.HasExtensions() && z.DecExt(x.Expiration) { - } else if yym65 { - z.DecBinaryUnmarshal(x.Expiration) - } else if !yym65 && z.IsJSONHandle() { - z.DecJSONUnmarshal(x.Expiration) - } else { - z.DecFallback(x.Expiration, false) - } - } - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) - return - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - if r.TryDecodeAsNil() { - x.TTL = 0 - } else { - x.TTL = int64(r.DecodeInt(64)) - } - for { - yyj57++ - if yyhl57 { - yyb57 = yyj57 > l - } else { - yyb57 = r.CheckBreak() - } - if yyb57 { - break - } - z.DecSendContainerState(codecSelfer_containerArrayElem1819) - z.DecStructFieldNotFound(yyj57-1, "") - } - z.DecSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x Nodes) CodecEncodeSelf(e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - if x == nil { - r.EncodeNil() - } else { - yym68 := z.EncBinary() - _ = yym68 - if false { - } else if z.HasExtensions() && z.EncExt(x) { - } else { - h.encNodes((Nodes)(x), e) - } - } -} - -func (x *Nodes) CodecDecodeSelf(d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - yym69 := z.DecBinary() - _ = yym69 - if false { - } else if z.HasExtensions() && z.DecExt(x) { - } else { - h.decNodes((*Nodes)(x), d) - } -} - -func (x codecSelfer1819) encNodes(v Nodes, e *codec1978.Encoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperEncoder(e) - _, _, _ = h, z, r - r.EncodeArrayStart(len(v)) - for _, yyv70 := range v { - z.EncSendContainerState(codecSelfer_containerArrayElem1819) - if yyv70 == nil { - r.EncodeNil() - } else { - yyv70.CodecEncodeSelf(e) - } - } - z.EncSendContainerState(codecSelfer_containerArrayEnd1819) -} - -func (x codecSelfer1819) decNodes(v *Nodes, d *codec1978.Decoder) { - var h codecSelfer1819 - z, r := codec1978.GenHelperDecoder(d) - _, _, _ = h, z, r - - yyv71 := *v - yyh71, yyl71 := z.DecSliceHelperStart() - var yyc71 bool - if yyl71 == 0 { - if yyv71 == nil { - yyv71 = []*Node{} - yyc71 = true - } else if len(yyv71) != 0 { - yyv71 = yyv71[:0] - yyc71 = true - } - } else if yyl71 > 0 { - var yyrr71, yyrl71 int - var yyrt71 bool - if yyl71 > cap(yyv71) { - - yyrg71 := len(yyv71) > 0 - yyv271 := yyv71 - yyrl71, yyrt71 = z.DecInferLen(yyl71, z.DecBasicHandle().MaxInitLen, 8) - if yyrt71 { - if yyrl71 <= cap(yyv71) { - yyv71 = yyv71[:yyrl71] - } else { - yyv71 = make([]*Node, yyrl71) - } - } else { - yyv71 = make([]*Node, yyrl71) - } - yyc71 = true - yyrr71 = len(yyv71) - if yyrg71 { - copy(yyv71, yyv271) - } - } else if yyl71 != len(yyv71) { - yyv71 = yyv71[:yyl71] - yyc71 = true - } - yyj71 := 0 - for ; yyj71 < yyrr71; yyj71++ { - yyh71.ElemContainerState(yyj71) - if r.TryDecodeAsNil() { - if yyv71[yyj71] != nil { - *yyv71[yyj71] = Node{} - } - } else { - if yyv71[yyj71] == nil { - yyv71[yyj71] = new(Node) - } - yyw72 := yyv71[yyj71] - yyw72.CodecDecodeSelf(d) - } - - } - if yyrt71 { - for ; yyj71 < yyl71; yyj71++ { - yyv71 = append(yyv71, nil) - yyh71.ElemContainerState(yyj71) - if r.TryDecodeAsNil() { - if yyv71[yyj71] != nil { - *yyv71[yyj71] = Node{} - } - } else { - if yyv71[yyj71] == nil { - yyv71[yyj71] = new(Node) - } - yyw73 := yyv71[yyj71] - yyw73.CodecDecodeSelf(d) - } - - } - } - - } else { - yyj71 := 0 - for ; !r.CheckBreak(); yyj71++ { - - if yyj71 >= len(yyv71) { - yyv71 = append(yyv71, nil) // var yyz71 *Node - yyc71 = true - } - yyh71.ElemContainerState(yyj71) - if yyj71 < len(yyv71) { - if r.TryDecodeAsNil() { - if yyv71[yyj71] != nil { - *yyv71[yyj71] = Node{} - } - } else { - if yyv71[yyj71] == nil { - yyv71[yyj71] = new(Node) - } - yyw74 := yyv71[yyj71] - yyw74.CodecDecodeSelf(d) - } - - } else { - z.DecSwallow() - } - - } - if yyj71 < len(yyv71) { - yyv71 = yyv71[:yyj71] - yyc71 = true - } else if yyj71 == 0 && yyv71 == nil { - yyv71 = []*Node{} - yyc71 = true - } - } - yyh71.End() - if yyc71 { - *v = yyv71 - } -} diff --git a/vendor/src/github.com/coreos/etcd/client/keys.go b/vendor/src/github.com/coreos/etcd/client/keys.go deleted file mode 100644 index 69dd83c665..0000000000 --- a/vendor/src/github.com/coreos/etcd/client/keys.go +++ /dev/null @@ -1,663 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -//go:generate codecgen -d 1819 -r "Node|Response|Nodes" -o keys.generated.go keys.go - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/ugorji/go/codec" - "golang.org/x/net/context" - "github.com/coreos/etcd/pkg/pathutil" -) - -const ( - ErrorCodeKeyNotFound = 100 - ErrorCodeTestFailed = 101 - ErrorCodeNotFile = 102 - ErrorCodeNotDir = 104 - ErrorCodeNodeExist = 105 - ErrorCodeRootROnly = 107 - ErrorCodeDirNotEmpty = 108 - ErrorCodeUnauthorized = 110 - - ErrorCodePrevValueRequired = 201 - ErrorCodeTTLNaN = 202 - ErrorCodeIndexNaN = 203 - ErrorCodeInvalidField = 209 - ErrorCodeInvalidForm = 210 - - ErrorCodeRaftInternal = 300 - ErrorCodeLeaderElect = 301 - - ErrorCodeWatcherCleared = 400 - ErrorCodeEventIndexCleared = 401 -) - -type Error struct { - Code int `json:"errorCode"` - Message string `json:"message"` - Cause string `json:"cause"` - Index uint64 `json:"index"` -} - -func (e Error) Error() string { - return fmt.Sprintf("%v: %v (%v) [%v]", e.Code, e.Message, e.Cause, e.Index) -} - -var ( - ErrInvalidJSON = errors.New("client: response is invalid json. The endpoint is probably not valid etcd cluster endpoint.") - ErrEmptyBody = errors.New("client: response body is empty") -) - -// PrevExistType is used to define an existence condition when setting -// or deleting Nodes. -type PrevExistType string - -const ( - PrevIgnore = PrevExistType("") - PrevExist = PrevExistType("true") - PrevNoExist = PrevExistType("false") -) - -var ( - defaultV2KeysPrefix = "/v2/keys" -) - -// NewKeysAPI builds a KeysAPI that interacts with etcd's key-value -// API over HTTP. -func NewKeysAPI(c Client) KeysAPI { - return NewKeysAPIWithPrefix(c, defaultV2KeysPrefix) -} - -// NewKeysAPIWithPrefix acts like NewKeysAPI, but allows the caller -// to provide a custom base URL path. This should only be used in -// very rare cases. -func NewKeysAPIWithPrefix(c Client, p string) KeysAPI { - return &httpKeysAPI{ - client: c, - prefix: p, - } -} - -type KeysAPI interface { - // Get retrieves a set of Nodes from etcd - Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) - - // Set assigns a new value to a Node identified by a given key. The caller - // may define a set of conditions in the SetOptions. If SetOptions.Dir=true - // then value is ignored. - Set(ctx context.Context, key, value string, opts *SetOptions) (*Response, error) - - // Delete removes a Node identified by the given key, optionally destroying - // all of its children as well. The caller may define a set of required - // conditions in an DeleteOptions object. - Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) - - // Create is an alias for Set w/ PrevExist=false - Create(ctx context.Context, key, value string) (*Response, error) - - // CreateInOrder is used to atomically create in-order keys within the given directory. - CreateInOrder(ctx context.Context, dir, value string, opts *CreateInOrderOptions) (*Response, error) - - // Update is an alias for Set w/ PrevExist=true - Update(ctx context.Context, key, value string) (*Response, error) - - // Watcher builds a new Watcher targeted at a specific Node identified - // by the given key. The Watcher may be configured at creation time - // through a WatcherOptions object. The returned Watcher is designed - // to emit events that happen to a Node, and optionally to its children. - Watcher(key string, opts *WatcherOptions) Watcher -} - -type WatcherOptions struct { - // AfterIndex defines the index after-which the Watcher should - // start emitting events. For example, if a value of 5 is - // provided, the first event will have an index >= 6. - // - // Setting AfterIndex to 0 (default) means that the Watcher - // should start watching for events starting at the current - // index, whatever that may be. - AfterIndex uint64 - - // Recursive specifies whether or not the Watcher should emit - // events that occur in children of the given keyspace. If set - // to false (default), events will be limited to those that - // occur for the exact key. - Recursive bool -} - -type CreateInOrderOptions struct { - // TTL defines a period of time after-which the Node should - // expire and no longer exist. Values <= 0 are ignored. Given - // that the zero-value is ignored, TTL cannot be used to set - // a TTL of 0. - TTL time.Duration -} - -type SetOptions struct { - // PrevValue specifies what the current value of the Node must - // be in order for the Set operation to succeed. - // - // Leaving this field empty means that the caller wishes to - // ignore the current value of the Node. This cannot be used - // to compare the Node's current value to an empty string. - // - // PrevValue is ignored if Dir=true - PrevValue string - - // PrevIndex indicates what the current ModifiedIndex of the - // Node must be in order for the Set operation to succeed. - // - // If PrevIndex is set to 0 (default), no comparison is made. - PrevIndex uint64 - - // PrevExist specifies whether the Node must currently exist - // (PrevExist) or not (PrevNoExist). If the caller does not - // care about existence, set PrevExist to PrevIgnore, or simply - // leave it unset. - PrevExist PrevExistType - - // TTL defines a period of time after-which the Node should - // expire and no longer exist. Values <= 0 are ignored. Given - // that the zero-value is ignored, TTL cannot be used to set - // a TTL of 0. - TTL time.Duration - - // Refresh set to true means a TTL value can be updated - // without firing a watch or changing the node value. A - // value must not be provided when refreshing a key. - Refresh bool - - // Dir specifies whether or not this Node should be created as a directory. - Dir bool -} - -type GetOptions struct { - // Recursive defines whether or not all children of the Node - // should be returned. - Recursive bool - - // Sort instructs the server whether or not to sort the Nodes. - // If true, the Nodes are sorted alphabetically by key in - // ascending order (A to z). If false (default), the Nodes will - // not be sorted and the ordering used should not be considered - // predictable. - Sort bool - - // Quorum specifies whether it gets the latest committed value that - // has been applied in quorum of members, which ensures external - // consistency (or linearizability). - Quorum bool -} - -type DeleteOptions struct { - // PrevValue specifies what the current value of the Node must - // be in order for the Delete operation to succeed. - // - // Leaving this field empty means that the caller wishes to - // ignore the current value of the Node. This cannot be used - // to compare the Node's current value to an empty string. - PrevValue string - - // PrevIndex indicates what the current ModifiedIndex of the - // Node must be in order for the Delete operation to succeed. - // - // If PrevIndex is set to 0 (default), no comparison is made. - PrevIndex uint64 - - // Recursive defines whether or not all children of the Node - // should be deleted. If set to true, all children of the Node - // identified by the given key will be deleted. If left unset - // or explicitly set to false, only a single Node will be - // deleted. - Recursive bool - - // Dir specifies whether or not this Node should be removed as a directory. - Dir bool -} - -type Watcher interface { - // Next blocks until an etcd event occurs, then returns a Response - // representing that event. The behavior of Next depends on the - // WatcherOptions used to construct the Watcher. Next is designed to - // be called repeatedly, each time blocking until a subsequent event - // is available. - // - // If the provided context is cancelled, Next will return a non-nil - // error. Any other failures encountered while waiting for the next - // event (connection issues, deserialization failures, etc) will - // also result in a non-nil error. - Next(context.Context) (*Response, error) -} - -type Response struct { - // Action is the name of the operation that occurred. Possible values - // include get, set, delete, update, create, compareAndSwap, - // compareAndDelete and expire. - Action string `json:"action"` - - // Node represents the state of the relevant etcd Node. - Node *Node `json:"node"` - - // PrevNode represents the previous state of the Node. PrevNode is non-nil - // only if the Node existed before the action occurred and the action - // caused a change to the Node. - PrevNode *Node `json:"prevNode"` - - // Index holds the cluster-level index at the time the Response was generated. - // This index is not tied to the Node(s) contained in this Response. - Index uint64 `json:"-"` -} - -type Node struct { - // Key represents the unique location of this Node (e.g. "/foo/bar"). - Key string `json:"key"` - - // Dir reports whether node describes a directory. - Dir bool `json:"dir,omitempty"` - - // Value is the current data stored on this Node. If this Node - // is a directory, Value will be empty. - Value string `json:"value"` - - // Nodes holds the children of this Node, only if this Node is a directory. - // This slice of will be arbitrarily deep (children, grandchildren, great- - // grandchildren, etc.) if a recursive Get or Watch request were made. - Nodes Nodes `json:"nodes"` - - // CreatedIndex is the etcd index at-which this Node was created. - CreatedIndex uint64 `json:"createdIndex"` - - // ModifiedIndex is the etcd index at-which this Node was last modified. - ModifiedIndex uint64 `json:"modifiedIndex"` - - // Expiration is the server side expiration time of the key. - Expiration *time.Time `json:"expiration,omitempty"` - - // TTL is the time to live of the key in second. - TTL int64 `json:"ttl,omitempty"` -} - -func (n *Node) String() string { - return fmt.Sprintf("{Key: %s, CreatedIndex: %d, ModifiedIndex: %d, TTL: %d}", n.Key, n.CreatedIndex, n.ModifiedIndex, n.TTL) -} - -// TTLDuration returns the Node's TTL as a time.Duration object -func (n *Node) TTLDuration() time.Duration { - return time.Duration(n.TTL) * time.Second -} - -type Nodes []*Node - -// interfaces for sorting - -func (ns Nodes) Len() int { return len(ns) } -func (ns Nodes) Less(i, j int) bool { return ns[i].Key < ns[j].Key } -func (ns Nodes) Swap(i, j int) { ns[i], ns[j] = ns[j], ns[i] } - -type httpKeysAPI struct { - client httpClient - prefix string -} - -func (k *httpKeysAPI) Set(ctx context.Context, key, val string, opts *SetOptions) (*Response, error) { - act := &setAction{ - Prefix: k.prefix, - Key: key, - Value: val, - } - - if opts != nil { - act.PrevValue = opts.PrevValue - act.PrevIndex = opts.PrevIndex - act.PrevExist = opts.PrevExist - act.TTL = opts.TTL - act.Refresh = opts.Refresh - act.Dir = opts.Dir - } - - resp, body, err := k.client.Do(ctx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Create(ctx context.Context, key, val string) (*Response, error) { - return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevNoExist}) -} - -func (k *httpKeysAPI) CreateInOrder(ctx context.Context, dir, val string, opts *CreateInOrderOptions) (*Response, error) { - act := &createInOrderAction{ - Prefix: k.prefix, - Dir: dir, - Value: val, - } - - if opts != nil { - act.TTL = opts.TTL - } - - resp, body, err := k.client.Do(ctx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Update(ctx context.Context, key, val string) (*Response, error) { - return k.Set(ctx, key, val, &SetOptions{PrevExist: PrevExist}) -} - -func (k *httpKeysAPI) Delete(ctx context.Context, key string, opts *DeleteOptions) (*Response, error) { - act := &deleteAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.PrevValue = opts.PrevValue - act.PrevIndex = opts.PrevIndex - act.Dir = opts.Dir - act.Recursive = opts.Recursive - } - - resp, body, err := k.client.Do(ctx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Get(ctx context.Context, key string, opts *GetOptions) (*Response, error) { - act := &getAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.Recursive = opts.Recursive - act.Sorted = opts.Sort - act.Quorum = opts.Quorum - } - - resp, body, err := k.client.Do(ctx, act) - if err != nil { - return nil, err - } - - return unmarshalHTTPResponse(resp.StatusCode, resp.Header, body) -} - -func (k *httpKeysAPI) Watcher(key string, opts *WatcherOptions) Watcher { - act := waitAction{ - Prefix: k.prefix, - Key: key, - } - - if opts != nil { - act.Recursive = opts.Recursive - if opts.AfterIndex > 0 { - act.WaitIndex = opts.AfterIndex + 1 - } - } - - return &httpWatcher{ - client: k.client, - nextWait: act, - } -} - -type httpWatcher struct { - client httpClient - nextWait waitAction -} - -func (hw *httpWatcher) Next(ctx context.Context) (*Response, error) { - for { - httpresp, body, err := hw.client.Do(ctx, &hw.nextWait) - if err != nil { - return nil, err - } - - resp, err := unmarshalHTTPResponse(httpresp.StatusCode, httpresp.Header, body) - if err != nil { - if err == ErrEmptyBody { - continue - } - return nil, err - } - - hw.nextWait.WaitIndex = resp.Node.ModifiedIndex + 1 - return resp, nil - } -} - -// v2KeysURL forms a URL representing the location of a key. -// The endpoint argument represents the base URL of an etcd -// server. The prefix is the path needed to route from the -// provided endpoint's path to the root of the keys API -// (typically "/v2/keys"). -func v2KeysURL(ep url.URL, prefix, key string) *url.URL { - // We concatenate all parts together manually. We cannot use - // path.Join because it does not reserve trailing slash. - // We call CanonicalURLPath to further cleanup the path. - if prefix != "" && prefix[0] != '/' { - prefix = "/" + prefix - } - if key != "" && key[0] != '/' { - key = "/" + key - } - ep.Path = pathutil.CanonicalURLPath(ep.Path + prefix + key) - return &ep -} - -type getAction struct { - Prefix string - Key string - Recursive bool - Sorted bool - Quorum bool -} - -func (g *getAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, g.Prefix, g.Key) - - params := u.Query() - params.Set("recursive", strconv.FormatBool(g.Recursive)) - params.Set("sorted", strconv.FormatBool(g.Sorted)) - params.Set("quorum", strconv.FormatBool(g.Quorum)) - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type waitAction struct { - Prefix string - Key string - WaitIndex uint64 - Recursive bool -} - -func (w *waitAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, w.Prefix, w.Key) - - params := u.Query() - params.Set("wait", "true") - params.Set("waitIndex", strconv.FormatUint(w.WaitIndex, 10)) - params.Set("recursive", strconv.FormatBool(w.Recursive)) - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type setAction struct { - Prefix string - Key string - Value string - PrevValue string - PrevIndex uint64 - PrevExist PrevExistType - TTL time.Duration - Refresh bool - Dir bool -} - -func (a *setAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Key) - - params := u.Query() - form := url.Values{} - - // we're either creating a directory or setting a key - if a.Dir { - params.Set("dir", strconv.FormatBool(a.Dir)) - } else { - // These options are only valid for setting a key - if a.PrevValue != "" { - params.Set("prevValue", a.PrevValue) - } - form.Add("value", a.Value) - } - - // Options which apply to both setting a key and creating a dir - if a.PrevIndex != 0 { - params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) - } - if a.PrevExist != PrevIgnore { - params.Set("prevExist", string(a.PrevExist)) - } - if a.TTL > 0 { - form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) - } - - if a.Refresh { - form.Add("refresh", "true") - } - - u.RawQuery = params.Encode() - body := strings.NewReader(form.Encode()) - - req, _ := http.NewRequest("PUT", u.String(), body) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - return req -} - -type deleteAction struct { - Prefix string - Key string - PrevValue string - PrevIndex uint64 - Dir bool - Recursive bool -} - -func (a *deleteAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Key) - - params := u.Query() - if a.PrevValue != "" { - params.Set("prevValue", a.PrevValue) - } - if a.PrevIndex != 0 { - params.Set("prevIndex", strconv.FormatUint(a.PrevIndex, 10)) - } - if a.Dir { - params.Set("dir", "true") - } - if a.Recursive { - params.Set("recursive", "true") - } - u.RawQuery = params.Encode() - - req, _ := http.NewRequest("DELETE", u.String(), nil) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - - return req -} - -type createInOrderAction struct { - Prefix string - Dir string - Value string - TTL time.Duration -} - -func (a *createInOrderAction) HTTPRequest(ep url.URL) *http.Request { - u := v2KeysURL(ep, a.Prefix, a.Dir) - - form := url.Values{} - form.Add("value", a.Value) - if a.TTL > 0 { - form.Add("ttl", strconv.FormatUint(uint64(a.TTL.Seconds()), 10)) - } - body := strings.NewReader(form.Encode()) - - req, _ := http.NewRequest("POST", u.String(), body) - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - return req -} - -func unmarshalHTTPResponse(code int, header http.Header, body []byte) (res *Response, err error) { - switch code { - case http.StatusOK, http.StatusCreated: - if len(body) == 0 { - return nil, ErrEmptyBody - } - res, err = unmarshalSuccessfulKeysResponse(header, body) - default: - err = unmarshalFailedKeysResponse(body) - } - - return -} - -func unmarshalSuccessfulKeysResponse(header http.Header, body []byte) (*Response, error) { - var res Response - err := codec.NewDecoderBytes(body, new(codec.JsonHandle)).Decode(&res) - if err != nil { - return nil, ErrInvalidJSON - } - if header.Get("X-Etcd-Index") != "" { - res.Index, err = strconv.ParseUint(header.Get("X-Etcd-Index"), 10, 64) - if err != nil { - return nil, err - } - } - return &res, nil -} - -func unmarshalFailedKeysResponse(body []byte) error { - var etcdErr Error - if err := json.Unmarshal(body, &etcdErr); err != nil { - return ErrInvalidJSON - } - return etcdErr -} diff --git a/vendor/src/github.com/coreos/etcd/client/members.go b/vendor/src/github.com/coreos/etcd/client/members.go deleted file mode 100644 index 8b602db54b..0000000000 --- a/vendor/src/github.com/coreos/etcd/client/members.go +++ /dev/null @@ -1,304 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "net/http" - "net/url" - "path" - - "golang.org/x/net/context" - - "github.com/coreos/etcd/pkg/types" -) - -var ( - defaultV2MembersPrefix = "/v2/members" - defaultLeaderSuffix = "/leader" -) - -type Member struct { - // ID is the unique identifier of this Member. - ID string `json:"id"` - - // Name is a human-readable, non-unique identifier of this Member. - Name string `json:"name"` - - // PeerURLs represents the HTTP(S) endpoints this Member uses to - // participate in etcd's consensus protocol. - PeerURLs []string `json:"peerURLs"` - - // ClientURLs represents the HTTP(S) endpoints on which this Member - // serves it's client-facing APIs. - ClientURLs []string `json:"clientURLs"` -} - -type memberCollection []Member - -func (c *memberCollection) UnmarshalJSON(data []byte) error { - d := struct { - Members []Member - }{} - - if err := json.Unmarshal(data, &d); err != nil { - return err - } - - if d.Members == nil { - *c = make([]Member, 0) - return nil - } - - *c = d.Members - return nil -} - -type memberCreateOrUpdateRequest struct { - PeerURLs types.URLs -} - -func (m *memberCreateOrUpdateRequest) MarshalJSON() ([]byte, error) { - s := struct { - PeerURLs []string `json:"peerURLs"` - }{ - PeerURLs: make([]string, len(m.PeerURLs)), - } - - for i, u := range m.PeerURLs { - s.PeerURLs[i] = u.String() - } - - return json.Marshal(&s) -} - -// NewMembersAPI constructs a new MembersAPI that uses HTTP to -// interact with etcd's membership API. -func NewMembersAPI(c Client) MembersAPI { - return &httpMembersAPI{ - client: c, - } -} - -type MembersAPI interface { - // List enumerates the current cluster membership. - List(ctx context.Context) ([]Member, error) - - // Add instructs etcd to accept a new Member into the cluster. - Add(ctx context.Context, peerURL string) (*Member, error) - - // Remove demotes an existing Member out of the cluster. - Remove(ctx context.Context, mID string) error - - // Update instructs etcd to update an existing Member in the cluster. - Update(ctx context.Context, mID string, peerURLs []string) error - - // Leader gets current leader of the cluster - Leader(ctx context.Context) (*Member, error) -} - -type httpMembersAPI struct { - client httpClient -} - -func (m *httpMembersAPI) List(ctx context.Context) ([]Member, error) { - req := &membersAPIActionList{} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - - var mCollection memberCollection - if err := json.Unmarshal(body, &mCollection); err != nil { - return nil, err - } - - return []Member(mCollection), nil -} - -func (m *httpMembersAPI) Add(ctx context.Context, peerURL string) (*Member, error) { - urls, err := types.NewURLs([]string{peerURL}) - if err != nil { - return nil, err - } - - req := &membersAPIActionAdd{peerURLs: urls} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusCreated, http.StatusConflict); err != nil { - return nil, err - } - - if resp.StatusCode != http.StatusCreated { - var merr membersError - if err := json.Unmarshal(body, &merr); err != nil { - return nil, err - } - return nil, merr - } - - var memb Member - if err := json.Unmarshal(body, &memb); err != nil { - return nil, err - } - - return &memb, nil -} - -func (m *httpMembersAPI) Update(ctx context.Context, memberID string, peerURLs []string) error { - urls, err := types.NewURLs(peerURLs) - if err != nil { - return err - } - - req := &membersAPIActionUpdate{peerURLs: urls, memberID: memberID} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusNotFound, http.StatusConflict); err != nil { - return err - } - - if resp.StatusCode != http.StatusNoContent { - var merr membersError - if err := json.Unmarshal(body, &merr); err != nil { - return err - } - return merr - } - - return nil -} - -func (m *httpMembersAPI) Remove(ctx context.Context, memberID string) error { - req := &membersAPIActionRemove{memberID: memberID} - resp, _, err := m.client.Do(ctx, req) - if err != nil { - return err - } - - return assertStatusCode(resp.StatusCode, http.StatusNoContent, http.StatusGone) -} - -func (m *httpMembersAPI) Leader(ctx context.Context) (*Member, error) { - req := &membersAPIActionLeader{} - resp, body, err := m.client.Do(ctx, req) - if err != nil { - return nil, err - } - - if err := assertStatusCode(resp.StatusCode, http.StatusOK); err != nil { - return nil, err - } - - var leader Member - if err := json.Unmarshal(body, &leader); err != nil { - return nil, err - } - - return &leader, nil -} - -type membersAPIActionList struct{} - -func (l *membersAPIActionList) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -type membersAPIActionRemove struct { - memberID string -} - -func (d *membersAPIActionRemove) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - u.Path = path.Join(u.Path, d.memberID) - req, _ := http.NewRequest("DELETE", u.String(), nil) - return req -} - -type membersAPIActionAdd struct { - peerURLs types.URLs -} - -func (a *membersAPIActionAdd) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} - b, _ := json.Marshal(&m) - req, _ := http.NewRequest("POST", u.String(), bytes.NewReader(b)) - req.Header.Set("Content-Type", "application/json") - return req -} - -type membersAPIActionUpdate struct { - memberID string - peerURLs types.URLs -} - -func (a *membersAPIActionUpdate) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - m := memberCreateOrUpdateRequest{PeerURLs: a.peerURLs} - u.Path = path.Join(u.Path, a.memberID) - b, _ := json.Marshal(&m) - req, _ := http.NewRequest("PUT", u.String(), bytes.NewReader(b)) - req.Header.Set("Content-Type", "application/json") - return req -} - -func assertStatusCode(got int, want ...int) (err error) { - for _, w := range want { - if w == got { - return nil - } - } - return fmt.Errorf("unexpected status code %d", got) -} - -type membersAPIActionLeader struct{} - -func (l *membersAPIActionLeader) HTTPRequest(ep url.URL) *http.Request { - u := v2MembersURL(ep) - u.Path = path.Join(u.Path, defaultLeaderSuffix) - req, _ := http.NewRequest("GET", u.String(), nil) - return req -} - -// v2MembersURL add the necessary path to the provided endpoint -// to route requests to the default v2 members API. -func v2MembersURL(ep url.URL) *url.URL { - ep.Path = path.Join(ep.Path, defaultV2MembersPrefix) - return &ep -} - -type membersError struct { - Message string `json:"message"` - Code int `json:"-"` -} - -func (e membersError) Error() string { - return e.Message -} diff --git a/vendor/src/github.com/coreos/etcd/client/srv.go b/vendor/src/github.com/coreos/etcd/client/srv.go deleted file mode 100644 index 06197967ca..0000000000 --- a/vendor/src/github.com/coreos/etcd/client/srv.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -import ( - "fmt" - "net" - "net/url" -) - -var ( - // indirection for testing - lookupSRV = net.LookupSRV -) - -type srvDiscover struct{} - -// NewSRVDiscover constructs a new Discoverer that uses the stdlib to lookup SRV records. -func NewSRVDiscover() Discoverer { - return &srvDiscover{} -} - -// Discover looks up the etcd servers for the domain. -func (d *srvDiscover) Discover(domain string) ([]string, error) { - var urls []*url.URL - - updateURLs := func(service, scheme string) error { - _, addrs, err := lookupSRV(service, "tcp", domain) - if err != nil { - return err - } - for _, srv := range addrs { - urls = append(urls, &url.URL{ - Scheme: scheme, - Host: net.JoinHostPort(srv.Target, fmt.Sprintf("%d", srv.Port)), - }) - } - return nil - } - - errHTTPS := updateURLs("etcd-client-ssl", "https") - errHTTP := updateURLs("etcd-client", "http") - - if errHTTPS != nil && errHTTP != nil { - return nil, fmt.Errorf("dns lookup errors: %s and %s", errHTTPS, errHTTP) - } - - endpoints := make([]string, len(urls)) - for i := range urls { - endpoints[i] = urls[i].String() - } - return endpoints, nil -} diff --git a/vendor/src/github.com/coreos/etcd/client/util.go b/vendor/src/github.com/coreos/etcd/client/util.go deleted file mode 100644 index fc0800b3d2..0000000000 --- a/vendor/src/github.com/coreos/etcd/client/util.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package client - -// IsKeyNotFound returns true if the error code is ErrorCodeKeyNotFound. -func IsKeyNotFound(err error) bool { - if cErr, ok := err.(Error); ok { - return cErr.Code == ErrorCodeKeyNotFound - } - return false -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/crc/crc.go b/vendor/src/github.com/coreos/etcd/pkg/crc/crc.go deleted file mode 100644 index 4b998a4845..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/crc/crc.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package crc provides utility function for cyclic redundancy check -// algorithms. -package crc - -import ( - "hash" - "hash/crc32" -) - -// The size of a CRC-32 checksum in bytes. -const Size = 4 - -type digest struct { - crc uint32 - tab *crc32.Table -} - -// New creates a new hash.Hash32 computing the CRC-32 checksum -// using the polynomial represented by the Table. -// Modified by xiangli to take a prevcrc. -func New(prev uint32, tab *crc32.Table) hash.Hash32 { return &digest{prev, tab} } - -func (d *digest) Size() int { return Size } - -func (d *digest) BlockSize() int { return 1 } - -func (d *digest) Reset() { d.crc = 0 } - -func (d *digest) Write(p []byte) (n int, err error) { - d.crc = crc32.Update(d.crc, d.tab, p) - return len(p), nil -} - -func (d *digest) Sum32() uint32 { return d.crc } - -func (d *digest) Sum(in []byte) []byte { - s := d.Sum32() - return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s)) -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go deleted file mode 100644 index 145886a1a0..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/fileutil.go +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package fileutil implements utility functions related to files and paths. -package fileutil - -import ( - "io/ioutil" - "os" - "path" - "sort" - - "github.com/coreos/pkg/capnslog" -) - -const ( - privateFileMode = 0600 - // owner can make/remove files inside the directory - privateDirMode = 0700 -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "fileutil") -) - -// IsDirWriteable checks if dir is writable by writing and removing a file -// to dir. It returns nil if dir is writable. -func IsDirWriteable(dir string) error { - f := path.Join(dir, ".touch") - if err := ioutil.WriteFile(f, []byte(""), privateFileMode); err != nil { - return err - } - return os.Remove(f) -} - -// ReadDir returns the filenames in the given directory in sorted order. -func ReadDir(dirpath string) ([]string, error) { - dir, err := os.Open(dirpath) - if err != nil { - return nil, err - } - defer dir.Close() - names, err := dir.Readdirnames(-1) - if err != nil { - return nil, err - } - sort.Strings(names) - return names, nil -} - -// TouchDirAll is similar to os.MkdirAll. It creates directories with 0700 permission if any directory -// does not exists. TouchDirAll also ensures the given directory is writable. -func TouchDirAll(dir string) error { - err := os.MkdirAll(dir, privateDirMode) - if err != nil && err != os.ErrExist { - return err - } - return IsDirWriteable(dir) -} - -func Exist(name string) bool { - _, err := os.Stat(name) - return err == nil -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock.go deleted file mode 100644 index bf411d3a17..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -type Lock interface { - // Name returns the name of the file. - Name() string - // TryLock acquires exclusivity on the lock without blocking. - TryLock() error - // Lock acquires exclusivity on the lock. - Lock() error - // Unlock unlocks the lock. - Unlock() error - // Destroy should be called after Unlock to clean up - // the resources. - Destroy() error -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go deleted file mode 100644 index bd2bc86764..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_plan9.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "errors" - "os" - "syscall" - "time" -) - -var ( - ErrLocked = errors.New("file already locked") -) - -type lock struct { - fname string - file *os.File -} - -func (l *lock) Name() string { - return l.fname -} - -func (l *lock) TryLock() error { - err := os.Chmod(l.fname, syscall.DMEXCL|0600) - if err != nil { - return err - } - - f, err := os.Open(l.fname) - if err != nil { - return ErrLocked - } - - l.file = f - return nil -} - -func (l *lock) Lock() error { - err := os.Chmod(l.fname, syscall.DMEXCL|0600) - if err != nil { - return err - } - - for { - f, err := os.Open(l.fname) - if err == nil { - l.file = f - return nil - } - time.Sleep(10 * time.Millisecond) - } -} - -func (l *lock) Unlock() error { - return l.file.Close() -} - -func (l *lock) Destroy() error { - return nil -} - -func NewLock(file string) (Lock, error) { - l := &lock{fname: file} - return l, nil -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go deleted file mode 100644 index e3b0a01768..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_solaris.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build solaris - -package fileutil - -import ( - "errors" - "os" - "syscall" -) - -var ( - ErrLocked = errors.New("file already locked") -) - -type lock struct { - fd int - file *os.File -} - -func (l *lock) Name() string { - return l.file.Name() -} - -func (l *lock) TryLock() error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Pid = 0 - lock.Type = syscall.F_WRLCK - lock.Whence = 0 - lock.Pid = 0 - err := syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock) - if err != nil && err == syscall.EAGAIN { - return ErrLocked - } - return err -} - -func (l *lock) Lock() error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Type = syscall.F_WRLCK - lock.Whence = 0 - lock.Pid = 0 - return syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock) -} - -func (l *lock) Unlock() error { - var lock syscall.Flock_t - lock.Start = 0 - lock.Len = 0 - lock.Type = syscall.F_UNLCK - lock.Whence = 0 - err := syscall.FcntlFlock(uintptr(l.fd), syscall.F_SETLK, &lock) - if err != nil && err == syscall.EAGAIN { - return ErrLocked - } - return err -} - -func (l *lock) Destroy() error { - return l.file.Close() -} - -func NewLock(file string) (Lock, error) { - f, err := os.OpenFile(file, os.O_WRONLY, 0600) - if err != nil { - return nil, err - } - l := &lock{int(f.Fd()), f} - return l, nil -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_unix.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_unix.go deleted file mode 100644 index 4f90e42ace..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_unix.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !windows,!plan9,!solaris - -package fileutil - -import ( - "errors" - "os" - "syscall" -) - -var ( - ErrLocked = errors.New("file already locked") -) - -type lock struct { - fd int - file *os.File -} - -func (l *lock) Name() string { - return l.file.Name() -} - -func (l *lock) TryLock() error { - err := syscall.Flock(l.fd, syscall.LOCK_EX|syscall.LOCK_NB) - if err != nil && err == syscall.EWOULDBLOCK { - return ErrLocked - } - return err -} - -func (l *lock) Lock() error { - return syscall.Flock(l.fd, syscall.LOCK_EX) -} - -func (l *lock) Unlock() error { - return syscall.Flock(l.fd, syscall.LOCK_UN) -} - -func (l *lock) Destroy() error { - return l.file.Close() -} - -func NewLock(file string) (Lock, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - l := &lock{int(f.Fd()), f} - return l, nil -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_windows.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_windows.go deleted file mode 100644 index ddca9a6695..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/lock_windows.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build windows - -package fileutil - -import ( - "errors" - "os" -) - -var ( - ErrLocked = errors.New("file already locked") -) - -type lock struct { - fd int - file *os.File -} - -func (l *lock) Name() string { - return l.file.Name() -} - -func (l *lock) TryLock() error { - return nil -} - -func (l *lock) Lock() error { - return nil -} - -func (l *lock) Unlock() error { - return nil -} - -func (l *lock) Destroy() error { - return l.file.Close() -} - -func NewLock(file string) (Lock, error) { - f, err := os.Open(file) - if err != nil { - return nil, err - } - l := &lock{int(f.Fd()), f} - return l, nil -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/perallocate_unsupported.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/perallocate_unsupported.go deleted file mode 100644 index c1a952bb79..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/perallocate_unsupported.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !linux - -package fileutil - -import "os" - -// Preallocate tries to allocate the space for given -// file. This operation is only supported on linux by a -// few filesystems (btrfs, ext4, etc.). -// If the operation is unsupported, no error will be returned. -// Otherwise, the error encountered will be returned. -func Preallocate(f *os.File, sizeInBytes int) error { - return nil -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/preallocate.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/preallocate.go deleted file mode 100644 index c4bd4f4c81..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/preallocate.go +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package fileutil - -import ( - "os" - "syscall" -) - -// Preallocate tries to allocate the space for given -// file. This operation is only supported on linux by a -// few filesystems (btrfs, ext4, etc.). -// If the operation is unsupported, no error will be returned. -// Otherwise, the error encountered will be returned. -func Preallocate(f *os.File, sizeInBytes int) error { - // use mode = 1 to keep size - // see FALLOC_FL_KEEP_SIZE - err := syscall.Fallocate(int(f.Fd()), 1, 0, int64(sizeInBytes)) - if err != nil { - errno, ok := err.(syscall.Errno) - // treat not support as nil error - if ok && errno == syscall.ENOTSUP { - return nil - } - return err - } - return nil -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/purge.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/purge.go deleted file mode 100644 index 375aa97197..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/purge.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package fileutil - -import ( - "os" - "path" - "sort" - "strings" - "time" -) - -func PurgeFile(dirname string, suffix string, max uint, interval time.Duration, stop <-chan struct{}) <-chan error { - errC := make(chan error, 1) - go func() { - for { - fnames, err := ReadDir(dirname) - if err != nil { - errC <- err - return - } - newfnames := make([]string, 0) - for _, fname := range fnames { - if strings.HasSuffix(fname, suffix) { - newfnames = append(newfnames, fname) - } - } - sort.Strings(newfnames) - for len(newfnames) > int(max) { - f := path.Join(dirname, newfnames[0]) - l, err := NewLock(f) - if err != nil { - errC <- err - return - } - err = l.TryLock() - if err != nil { - break - } - err = os.Remove(f) - if err != nil { - errC <- err - return - } - err = l.Unlock() - if err != nil { - plog.Errorf("error unlocking %s when purging file (%v)", l.Name(), err) - errC <- err - return - } - err = l.Destroy() - if err != nil { - plog.Errorf("error destroying lock %s when purging file (%v)", l.Name(), err) - errC <- err - return - } - plog.Infof("purged file %s successfully", f) - newfnames = newfnames[1:] - } - select { - case <-time.After(interval): - case <-stop: - return - } - } - }() - return errC -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync.go deleted file mode 100644 index cd7fff08f6..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync.go +++ /dev/null @@ -1,26 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build !linux - -package fileutil - -import "os" - -// Fdatasync is similar to fsync(), but does not flush modified metadata -// unless that metadata is needed in order to allow a subsequent data retrieval -// to be correctly handled. -func Fdatasync(f *os.File) error { - return f.Sync() -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync_linux.go b/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync_linux.go deleted file mode 100644 index 14c4b4808e..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/fileutil/sync_linux.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2016 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// +build linux - -package fileutil - -import ( - "os" - "syscall" -) - -// Fdatasync is similar to fsync(), but does not flush modified metadata -// unless that metadata is needed in order to allow a subsequent data retrieval -// to be correctly handled. -func Fdatasync(f *os.File) error { - return syscall.Fdatasync(int(f.Fd())) -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/idutil/id.go b/vendor/src/github.com/coreos/etcd/pkg/idutil/id.go deleted file mode 100644 index 6f1d379112..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/idutil/id.go +++ /dev/null @@ -1,78 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package idutil implements utility functions for generating unique, -// randomized ids. -package idutil - -import ( - "math" - "sync" - "time" -) - -const ( - tsLen = 5 * 8 - cntLen = 8 - suffixLen = tsLen + cntLen -) - -// Generator generates unique identifiers based on counters, timestamps, and -// a node member ID. -// -// The initial id is in this format: -// High order byte is memberID, next 5 bytes are from timestamp, -// and low order 2 bytes are 0s. -// | prefix | suffix | -// | 2 bytes | 5 bytes | 1 byte | -// | memberID | timestamp | cnt | -// -// The timestamp 5 bytes is different when the machine is restart -// after 1 ms and before 35 years. -// -// It increases suffix to generate the next id. -// The count field may overflow to timestamp field, which is intentional. -// It helps to extend the event window to 2^56. This doesn't break that -// id generated after restart is unique because etcd throughput is << -// 256req/ms(250k reqs/second). -type Generator struct { - mu sync.Mutex - // high order 2 bytes - prefix uint64 - // low order 6 bytes - suffix uint64 -} - -func NewGenerator(memberID uint16, now time.Time) *Generator { - prefix := uint64(memberID) << suffixLen - unixMilli := uint64(now.UnixNano()) / uint64(time.Millisecond/time.Nanosecond) - suffix := lowbit(unixMilli, tsLen) << cntLen - return &Generator{ - prefix: prefix, - suffix: suffix, - } -} - -// Next generates a id that is unique. -func (g *Generator) Next() uint64 { - g.mu.Lock() - defer g.mu.Unlock() - g.suffix++ - id := g.prefix | lowbit(g.suffix, suffixLen) - return id -} - -func lowbit(x uint64, n uint) uint64 { - return x & (math.MaxUint64 >> (64 - n)) -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/pathutil/path.go b/vendor/src/github.com/coreos/etcd/pkg/pathutil/path.go deleted file mode 100644 index f26254ba93..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/pathutil/path.go +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright 2009 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package pathutil implements utility functions for handling slash-separated -// paths. -package pathutil - -import "path" - -// CanonicalURLPath returns the canonical url path for p, which follows the rules: -// 1. the path always starts with "/" -// 2. replace multiple slashes with a single slash -// 3. replace each '.' '..' path name element with equivalent one -// 4. keep the trailing slash -// The function is borrowed from stdlib http.cleanPath in server.go. -func CanonicalURLPath(p string) string { - if p == "" { - return "/" - } - if p[0] != '/' { - p = "/" + p - } - np := path.Clean(p) - // path.Clean removes trailing slash except for root, - // put the trailing slash back if necessary. - if p[len(p)-1] == '/' && np != "/" { - np += "/" - } - return np -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/pbutil/pbutil.go b/vendor/src/github.com/coreos/etcd/pkg/pbutil/pbutil.go deleted file mode 100644 index 9d640a8af6..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/pbutil/pbutil.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package pbutil defines interfaces for handling Protocol Buffer objects. -package pbutil - -import "github.com/coreos/pkg/capnslog" - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd/pkg", "flags") -) - -type Marshaler interface { - Marshal() (data []byte, err error) -} - -type Unmarshaler interface { - Unmarshal(data []byte) error -} - -func MustMarshal(m Marshaler) []byte { - d, err := m.Marshal() - if err != nil { - plog.Panicf("marshal should never fail (%v)", err) - } - return d -} - -func MustUnmarshal(um Unmarshaler, data []byte) { - if err := um.Unmarshal(data); err != nil { - plog.Panicf("unmarshal should never fail (%v)", err) - } -} - -func MaybeUnmarshal(um Unmarshaler, data []byte) bool { - if err := um.Unmarshal(data); err != nil { - return false - } - return true -} - -func GetBool(v *bool) (vv bool, set bool) { - if v == nil { - return false, false - } - return *v, true -} - -func Boolp(b bool) *bool { return &b } diff --git a/vendor/src/github.com/coreos/etcd/pkg/types/doc.go b/vendor/src/github.com/coreos/etcd/pkg/types/doc.go deleted file mode 100644 index 04b4c38d1c..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/types/doc.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package types declares various data types and implements type-checking -// functions. -package types diff --git a/vendor/src/github.com/coreos/etcd/pkg/types/id.go b/vendor/src/github.com/coreos/etcd/pkg/types/id.go deleted file mode 100644 index 88cb9e6349..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/types/id.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "strconv" -) - -// ID represents a generic identifier which is canonically -// stored as a uint64 but is typically represented as a -// base-16 string for input/output -type ID uint64 - -func (i ID) String() string { - return strconv.FormatUint(uint64(i), 16) -} - -// IDFromString attempts to create an ID from a base-16 string. -func IDFromString(s string) (ID, error) { - i, err := strconv.ParseUint(s, 16, 64) - return ID(i), err -} - -// IDSlice implements the sort interface -type IDSlice []ID - -func (p IDSlice) Len() int { return len(p) } -func (p IDSlice) Less(i, j int) bool { return uint64(p[i]) < uint64(p[j]) } -func (p IDSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/src/github.com/coreos/etcd/pkg/types/set.go b/vendor/src/github.com/coreos/etcd/pkg/types/set.go deleted file mode 100644 index bb997174c7..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/types/set.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "reflect" - "sort" - "sync" -) - -type Set interface { - Add(string) - Remove(string) - Contains(string) bool - Equals(Set) bool - Length() int - Values() []string - Copy() Set - Sub(Set) Set -} - -func NewUnsafeSet(values ...string) *unsafeSet { - set := &unsafeSet{make(map[string]struct{})} - for _, v := range values { - set.Add(v) - } - return set -} - -func NewThreadsafeSet(values ...string) *tsafeSet { - us := NewUnsafeSet(values...) - return &tsafeSet{us, sync.RWMutex{}} -} - -type unsafeSet struct { - d map[string]struct{} -} - -// Add adds a new value to the set (no-op if the value is already present) -func (us *unsafeSet) Add(value string) { - us.d[value] = struct{}{} -} - -// Remove removes the given value from the set -func (us *unsafeSet) Remove(value string) { - delete(us.d, value) -} - -// Contains returns whether the set contains the given value -func (us *unsafeSet) Contains(value string) (exists bool) { - _, exists = us.d[value] - return -} - -// ContainsAll returns whether the set contains all given values -func (us *unsafeSet) ContainsAll(values []string) bool { - for _, s := range values { - if !us.Contains(s) { - return false - } - } - return true -} - -// Equals returns whether the contents of two sets are identical -func (us *unsafeSet) Equals(other Set) bool { - v1 := sort.StringSlice(us.Values()) - v2 := sort.StringSlice(other.Values()) - v1.Sort() - v2.Sort() - return reflect.DeepEqual(v1, v2) -} - -// Length returns the number of elements in the set -func (us *unsafeSet) Length() int { - return len(us.d) -} - -// Values returns the values of the Set in an unspecified order. -func (us *unsafeSet) Values() (values []string) { - values = make([]string, 0) - for val := range us.d { - values = append(values, val) - } - return -} - -// Copy creates a new Set containing the values of the first -func (us *unsafeSet) Copy() Set { - cp := NewUnsafeSet() - for val := range us.d { - cp.Add(val) - } - - return cp -} - -// Sub removes all elements in other from the set -func (us *unsafeSet) Sub(other Set) Set { - oValues := other.Values() - result := us.Copy().(*unsafeSet) - - for _, val := range oValues { - if _, ok := result.d[val]; !ok { - continue - } - delete(result.d, val) - } - - return result -} - -type tsafeSet struct { - us *unsafeSet - m sync.RWMutex -} - -func (ts *tsafeSet) Add(value string) { - ts.m.Lock() - defer ts.m.Unlock() - ts.us.Add(value) -} - -func (ts *tsafeSet) Remove(value string) { - ts.m.Lock() - defer ts.m.Unlock() - ts.us.Remove(value) -} - -func (ts *tsafeSet) Contains(value string) (exists bool) { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Contains(value) -} - -func (ts *tsafeSet) Equals(other Set) bool { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Equals(other) -} - -func (ts *tsafeSet) Length() int { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Length() -} - -func (ts *tsafeSet) Values() (values []string) { - ts.m.RLock() - defer ts.m.RUnlock() - return ts.us.Values() -} - -func (ts *tsafeSet) Copy() Set { - ts.m.RLock() - defer ts.m.RUnlock() - usResult := ts.us.Copy().(*unsafeSet) - return &tsafeSet{usResult, sync.RWMutex{}} -} - -func (ts *tsafeSet) Sub(other Set) Set { - ts.m.RLock() - defer ts.m.RUnlock() - usResult := ts.us.Sub(other).(*unsafeSet) - return &tsafeSet{usResult, sync.RWMutex{}} -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/types/slice.go b/vendor/src/github.com/coreos/etcd/pkg/types/slice.go deleted file mode 100644 index 0327950f70..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/types/slice.go +++ /dev/null @@ -1,22 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -// Uint64Slice implements sort interface -type Uint64Slice []uint64 - -func (p Uint64Slice) Len() int { return len(p) } -func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/src/github.com/coreos/etcd/pkg/types/urls.go b/vendor/src/github.com/coreos/etcd/pkg/types/urls.go deleted file mode 100644 index ce2483ffaa..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/types/urls.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "errors" - "fmt" - "net" - "net/url" - "sort" - "strings" -) - -type URLs []url.URL - -func NewURLs(strs []string) (URLs, error) { - all := make([]url.URL, len(strs)) - if len(all) == 0 { - return nil, errors.New("no valid URLs given") - } - for i, in := range strs { - in = strings.TrimSpace(in) - u, err := url.Parse(in) - if err != nil { - return nil, err - } - if u.Scheme != "http" && u.Scheme != "https" { - return nil, fmt.Errorf("URL scheme must be http or https: %s", in) - } - if _, _, err := net.SplitHostPort(u.Host); err != nil { - return nil, fmt.Errorf(`URL address does not have the form "host:port": %s`, in) - } - if u.Path != "" { - return nil, fmt.Errorf("URL must not contain a path: %s", in) - } - all[i] = *u - } - us := URLs(all) - us.Sort() - - return us, nil -} - -func (us URLs) String() string { - return strings.Join(us.StringSlice(), ",") -} - -func (us *URLs) Sort() { - sort.Sort(us) -} -func (us URLs) Len() int { return len(us) } -func (us URLs) Less(i, j int) bool { return us[i].String() < us[j].String() } -func (us URLs) Swap(i, j int) { us[i], us[j] = us[j], us[i] } - -func (us URLs) StringSlice() []string { - out := make([]string, len(us)) - for i := range us { - out[i] = us[i].String() - } - - return out -} diff --git a/vendor/src/github.com/coreos/etcd/pkg/types/urlsmap.go b/vendor/src/github.com/coreos/etcd/pkg/types/urlsmap.go deleted file mode 100644 index 4fe9218c74..0000000000 --- a/vendor/src/github.com/coreos/etcd/pkg/types/urlsmap.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package types - -import ( - "fmt" - "sort" - "strings" -) - -// URLsMap is a map from a name to its URLs. -type URLsMap map[string]URLs - -// NewURLsMap returns a URLsMap instantiated from the given string, -// which consists of discovery-formatted names-to-URLs, like: -// mach0=http://1.1.1.1:2380,mach0=http://2.2.2.2::2380,mach1=http://3.3.3.3:2380,mach2=http://4.4.4.4:2380 -func NewURLsMap(s string) (URLsMap, error) { - m := parse(s) - - cl := URLsMap{} - for name, urls := range m { - us, err := NewURLs(urls) - if err != nil { - return nil, err - } - cl[name] = us - } - return cl, nil -} - -// String turns URLsMap into discovery-formatted name-to-URLs sorted by name. -func (c URLsMap) String() string { - var pairs []string - for name, urls := range c { - for _, url := range urls { - pairs = append(pairs, fmt.Sprintf("%s=%s", name, url.String())) - } - } - sort.Strings(pairs) - return strings.Join(pairs, ",") -} - -// URLs returns a list of all URLs. -// The returned list is sorted in ascending lexicographical order. -func (c URLsMap) URLs() []string { - var urls []string - for _, us := range c { - for _, u := range us { - urls = append(urls, u.String()) - } - } - sort.Strings(urls) - return urls -} - -// Len returns the size of URLsMap. -func (c URLsMap) Len() int { - return len(c) -} - -// parse parses the given string and returns a map listing the values specified for each key. -func parse(s string) map[string][]string { - m := make(map[string][]string) - for s != "" { - key := s - if i := strings.IndexAny(key, ","); i >= 0 { - key, s = key[:i], key[i+1:] - } else { - s = "" - } - if key == "" { - continue - } - value := "" - if i := strings.Index(key, "="); i >= 0 { - key, value = key[:i], key[i+1:] - } - m[key] = append(m[key], value) - } - return m -} diff --git a/vendor/src/github.com/coreos/etcd/raft/design.md b/vendor/src/github.com/coreos/etcd/raft/design.md deleted file mode 100644 index 7bc0531dce..0000000000 --- a/vendor/src/github.com/coreos/etcd/raft/design.md +++ /dev/null @@ -1,57 +0,0 @@ -## Progress - -Progress represents a follower’s progress in the view of the leader. Leader maintains progresses of all followers, and sends `replication message` to the follower based on its progress. - -`replication message` is a `msgApp` with log entries. - -A progress has two attribute: `match` and `next`. `match` is the index of the highest known matched entry. If leader knows nothing about follower’s replication status, `match` is set to zero. `next` is the index of the first entry that will be replicated to the follower. Leader puts entries from `next` to its latest one in next `replication message`. - -A progress is in one of the three state: `probe`, `replicate`, `snapshot`. - -``` - +--------------------------------------------------------+ - | send snapshot | - | | - +---------+----------+ +----------v---------+ - +---> probe | | snapshot | - | | max inflight = 1 <----------------------------------+ max inflight = 0 | - | +---------+----------+ +--------------------+ - | | 1. snapshot success - | | (next=snapshot.index + 1) - | | 2. snapshot failure - | | (no change) - | | 3. receives msgAppResp(rej=false&&index>lastsnap.index) - | | (match=m.index,next=match+1) -receives msgAppResp(rej=true) -(next=match+1)| | - | | - | | - | | receives msgAppResp(rej=false&&index>match) - | | (match=m.index,next=match+1) - | | - | | - | | - | +---------v----------+ - | | replicate | - +---+ max inflight = n | - +--------------------+ -``` - -When the progress of a follower is in `probe` state, leader sends at most one `replication message` per heartbeat interval. The leader sends `replication message` slowly and probing the actual progress of the follower. A `msgHeartbeatResp` or a `msgAppResp` with reject might trigger the sending of the next `replication message`. - -When the progress of a follower is in `replicate` state, leader sends `replication message`, then optimistically increases `next` to the latest entry sent. This is an optimized state for fast replicating log entries to the follower. - -When the progress of a follower is in `snapshot` state, leader stops sending any `replication message`. - -A newly elected leader sets the progresses of all the followers to `probe` state with `match` = 0 and `next` = last index. The leader slowly (at most once per heartbeat) sends `replication message` to the follower and probes its progress. - -A progress changes to `replicate` when the follower replies with a non-rejection `msgAppResp`, which implies that it has matched the index sent. At this point, leader starts to stream log entries to the follower fast. The progress will fall back to `probe` when the follower replies a rejection `msgAppResp` or the link layer reports the follower is unreachable. We aggressively reset `next` to `match`+1 since if we receive any `msgAppResp` soon, both `match` and `next` will increase directly to the `index` in `msgAppResp`. (We might end up with sending some duplicate entries when aggressively reset `next` too low. see open question) - -A progress changes from `probe` to `snapshot` when the follower falls very far behind and requires a snapshot. After sending `msgSnap`, the leader waits until the success, failure or abortion of the previous snapshot sent. The progress will go back to `probe` after the sending result is applied. - -### Flow Control - -1. limit the max size of message sent per message. Max should be configurable. -Lower the cost at probing state as we limit the size per message; lower the penalty when aggressively decreased to a too low `next` - -2. limit the # of in flight messages < N when in `replicate` state. N should be configurable. Most implementation will have a sending buffer on top of its actual network transport layer (not blocking raft node). We want to make sure raft does not overflow that buffer, which can cause message dropping and triggering a bunch of unnecessary resending repeatedly. diff --git a/vendor/src/github.com/coreos/etcd/raft/doc.go b/vendor/src/github.com/coreos/etcd/raft/doc.go deleted file mode 100644 index 6ed0bcfadb..0000000000 --- a/vendor/src/github.com/coreos/etcd/raft/doc.go +++ /dev/null @@ -1,293 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package raft sends and receives messages in the Protocol Buffer format -defined in the raftpb package. - -Raft is a protocol with which a cluster of nodes can maintain a replicated state machine. -The state machine is kept in sync through the use of a replicated log. -For more details on Raft, see "In Search of an Understandable Consensus Algorithm" -(https://ramcloud.stanford.edu/raft.pdf) by Diego Ongaro and John Ousterhout. - -A simple example application, _raftexample_, is also available to help illustrate -how to use this package in practice: -https://github.com/coreos/etcd/tree/master/contrib/raftexample - -Usage - -The primary object in raft is a Node. You either start a Node from scratch -using raft.StartNode or start a Node from some initial state using raft.RestartNode. - -To start a node from scratch: - - storage := raft.NewMemoryStorage() - c := &Config{ - ID: 0x01, - ElectionTick: 10, - HeartbeatTick: 1, - Storage: storage, - MaxSizePerMsg: 4096, - MaxInflightMsgs: 256, - } - n := raft.StartNode(c, []raft.Peer{{ID: 0x02}, {ID: 0x03}}) - -To restart a node from previous state: - - storage := raft.NewMemoryStorage() - - // recover the in-memory storage from persistent - // snapshot, state and entries. - storage.ApplySnapshot(snapshot) - storage.SetHardState(state) - storage.Append(entries) - - c := &Config{ - ID: 0x01, - ElectionTick: 10, - HeartbeatTick: 1, - Storage: storage, - MaxSizePerMsg: 4096, - MaxInflightMsgs: 256, - } - - // restart raft without peer information. - // peer information is already included in the storage. - n := raft.RestartNode(c) - -Now that you are holding onto a Node you have a few responsibilities: - -First, you must read from the Node.Ready() channel and process the updates -it contains. These steps may be performed in parallel, except as noted in step -2. - -1. Write HardState, Entries, and Snapshot to persistent storage if they are -not empty. Note that when writing an Entry with Index i, any -previously-persisted entries with Index >= i must be discarded. - -2. Send all Messages to the nodes named in the To field. It is important that -no messages be sent until after the latest HardState has been persisted to disk, -and all Entries written by any previous Ready batch (Messages may be sent while -entries from the same batch are being persisted). To reduce the I/O latency, an -optimization can be applied to make leader write to disk in parallel with its -followers (as explained at section 10.2.1 in Raft thesis). If any Message has type -MsgSnap, call Node.ReportSnapshot() after it has been sent (these messages may be -large). - -Note: Marshalling messages is not thread-safe; it is important that you -make sure that no new entries are persisted while marshalling. -The easiest way to achieve this is to serialise the messages directly inside -your main raft loop. - -3. Apply Snapshot (if any) and CommittedEntries to the state machine. -If any committed Entry has Type EntryConfChange, call Node.ApplyConfChange() -to apply it to the node. The configuration change may be cancelled at this point -by setting the NodeID field to zero before calling ApplyConfChange -(but ApplyConfChange must be called one way or the other, and the decision to cancel -must be based solely on the state machine and not external information such as -the observed health of the node). - -4. Call Node.Advance() to signal readiness for the next batch of updates. -This may be done at any time after step 1, although all updates must be processed -in the order they were returned by Ready. - -Second, all persisted log entries must be made available via an -implementation of the Storage interface. The provided MemoryStorage -type can be used for this (if you repopulate its state upon a -restart), or you can supply your own disk-backed implementation. - -Third, when you receive a message from another node, pass it to Node.Step: - - func recvRaftRPC(ctx context.Context, m raftpb.Message) { - n.Step(ctx, m) - } - -Finally, you need to call Node.Tick() at regular intervals (probably -via a time.Ticker). Raft has two important timeouts: heartbeat and the -election timeout. However, internally to the raft package time is -represented by an abstract "tick". - -The total state machine handling loop will look something like this: - - for { - select { - case <-s.Ticker: - n.Tick() - case rd := <-s.Node.Ready(): - saveToStorage(rd.State, rd.Entries, rd.Snapshot) - send(rd.Messages) - if !raft.IsEmptySnap(rd.Snapshot) { - processSnapshot(rd.Snapshot) - } - for _, entry := range rd.CommittedEntries { - process(entry) - if entry.Type == raftpb.EntryConfChange { - var cc raftpb.ConfChange - cc.Unmarshal(entry.Data) - s.Node.ApplyConfChange(cc) - } - s.Node.Advance() - case <-s.done: - return - } - } - -To propose changes to the state machine from your node take your application -data, serialize it into a byte slice and call: - - n.Propose(ctx, data) - -If the proposal is committed, data will appear in committed entries with type -raftpb.EntryNormal. There is no guarantee that a proposed command will be -committed; you may have to re-propose after a timeout. - -To add or remove node in a cluster, build ConfChange struct 'cc' and call: - - n.ProposeConfChange(ctx, cc) - -After config change is committed, some committed entry with type -raftpb.EntryConfChange will be returned. You must apply it to node through: - - var cc raftpb.ConfChange - cc.Unmarshal(data) - n.ApplyConfChange(cc) - -Note: An ID represents a unique node in a cluster for all time. A -given ID MUST be used only once even if the old node has been removed. -This means that for example IP addresses make poor node IDs since they -may be reused. Node IDs must be non-zero. - -Implementation notes - -This implementation is up to date with the final Raft thesis -(https://ramcloud.stanford.edu/~ongaro/thesis.pdf), although our -implementation of the membership change protocol differs somewhat from -that described in chapter 4. The key invariant that membership changes -happen one node at a time is preserved, but in our implementation the -membership change takes effect when its entry is applied, not when it -is added to the log (so the entry is committed under the old -membership instead of the new). This is equivalent in terms of safety, -since the old and new configurations are guaranteed to overlap. - -To ensure that we do not attempt to commit two membership changes at -once by matching log positions (which would be unsafe since they -should have different quorum requirements), we simply disallow any -proposed membership change while any uncommitted change appears in -the leader's log. - -This approach introduces a problem when you try to remove a member -from a two-member cluster: If one of the members dies before the -other one receives the commit of the confchange entry, then the member -cannot be removed any more since the cluster cannot make progress. -For this reason it is highly recommended to use three or more nodes in -every cluster. - -MessageType - -Package raft sends and receives message in Protocol Buffer format (defined -in raftpb package). Each state (follower, candidate, leader) implements its -own 'step' method ('stepFollower', 'stepCandidate', 'stepLeader') when -advancing with the given raftpb.Message. Each step is determined by its -raftpb.MessageType. Note that every step is checked by one common method -'Step' that safety-checks the terms of node and incoming message to prevent -stale log entries: - - 'MsgHup' is used for election. If a node is a follower or candidate, the - 'tick' function in 'raft' struct is set as 'tickElection'. If a follower or - candidate has not received any heartbeat before the election timeout, it - passes 'MsgHup' to its Step method and becomes (or remains) a candidate to - start a new election. - - 'MsgBeat' is an internal type that signals leaders to send a heartbeat of - the 'MsgHeartbeat' type. If a node is a leader, the 'tick' function in - the 'raft' struct is set as 'tickHeartbeat', and sends periodic heartbeat - messages of the 'MsgBeat' type to its followers. - - 'MsgProp' proposes to append data to its log entries. This is a special - type to redirect proposals to leader. Therefore, send method overwrites - raftpb.Message's term with its HardState's term to avoid attaching its - local term to 'MsgProp'. When 'MsgProp' is passed to the leader's 'Step' - method, the leader first calls the 'appendEntry' method to append entries - to its log, and then calls 'bcastAppend' method to send those entries to - its peers. When passed to candidate, 'MsgProp' is dropped. When passed to - follower, 'MsgProp' is stored in follower's mailbox(msgs) by the send - method. It is stored with sender's ID and later forwarded to leader by - rafthttp package. - - 'MsgApp' contains log entries to replicate. A leader calls bcastAppend, - which calls sendAppend, which sends soon-to-be-replicated logs in 'MsgApp' - type. When 'MsgApp' is passed to candidate's Step method, candidate reverts - back to follower, because it indicates that there is a valid leader sending - 'MsgApp' messages. Candidate and follower respond to this message in - 'MsgAppResp' type. - - 'MsgAppResp' is response to log replication request('MsgApp'). When - 'MsgApp' is passed to candidate or follower's Step method, it responds by - calling 'handleAppendEntries' method, which sends 'MsgAppResp' to raft - mailbox. - - 'MsgVote' requests votes for election. When a node is a follower or - candidate and 'MsgHup' is passed to its Step method, then the node calls - 'campaign' method to campaign itself to become a leader. Once 'campaign' - method is called, the node becomes candidate and sends 'MsgVote' to peers - in cluster to request votes. When passed to leader or candidate's Step - method and the message's Term is lower than leader's or candidate's, - 'MsgVote' will be rejected ('MsgVoteResp' is returned with Reject true). - If leader or candidate receives 'MsgVote' with higher term, it will revert - back to follower. When 'MsgVote' is passed to follower, it votes for the - sender only when sender's last term is greater than MsgVote's term or - sender's last term is equal to MsgVote's term but sender's last committed - index is greater than or equal to follower's. - - 'MsgVoteResp' contains responses from voting request. When 'MsgVoteResp' is - passed to candidate, the candidate calculates how many votes it has won. If - it's more than majority (quorum), it becomes leader and calls 'bcastAppend'. - If candidate receives majority of votes of denials, it reverts back to - follower. - - 'MsgSnap' requests to install a snapshot message. When a node has just - become a leader or the leader receives 'MsgProp' message, it calls - 'bcastAppend' method, which then calls 'sendAppend' method to each - follower. In 'sendAppend', if a leader fails to get term or entries, - the leader requests snapshot by sending 'MsgSnap' type message. - - 'MsgSnapStatus' tells the result of snapshot install message. When a - follower rejected 'MsgSnap', it indicates the snapshot request with - 'MsgSnap' had failed from network issues which causes the network layer - to fail to send out snapshots to its followers. Then leader considers - follower's progress as probe. When 'MsgSnap' were not rejected, it - indicates that the snapshot succeeded and the leader sets follower's - progress to probe and resumes its log replication. - - 'MsgHeartbeat' sends heartbeat from leader. When 'MsgHeartbeat' is passed - to candidate and message's term is higher than candidate's, the candidate - reverts back to follower and updates its committed index from the one in - this heartbeat. And it sends the message to its mailbox. When - 'MsgHeartbeat' is passed to follower's Step method and message's term is - higher than follower's, the follower updates its leaderID with the ID - from the message. - - 'MsgHeartbeatResp' is a response to 'MsgHeartbeat'. When 'MsgHeartbeatResp' - is passed to leader's Step method, the leader knows which follower - responded. And only when the leader's last committed index is greater than - follower's Match index, the leader runs 'sendAppend` method. - - 'MsgUnreachable' tells that request(message) wasn't delivered. When - 'MsgUnreachable' is passed to leader's Step method, the leader discovers - that the follower that sent this 'MsgUnreachable' is not reachable, often - indicating 'MsgApp' is lost. When follower's progress state is replicate, - the leader sets it back to probe. - -*/ -package raft diff --git a/vendor/src/github.com/coreos/etcd/raft/log.go b/vendor/src/github.com/coreos/etcd/raft/log.go deleted file mode 100644 index 99cd1b31ef..0000000000 --- a/vendor/src/github.com/coreos/etcd/raft/log.go +++ /dev/null @@ -1,361 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "fmt" - "log" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -type raftLog struct { - // storage contains all stable entries since the last snapshot. - storage Storage - - // unstable contains all unstable entries and snapshot. - // they will be saved into storage. - unstable unstable - - // committed is the highest log position that is known to be in - // stable storage on a quorum of nodes. - committed uint64 - // applied is the highest log position that the application has - // been instructed to apply to its state machine. - // Invariant: applied <= committed - applied uint64 - - logger Logger -} - -// newLog returns log using the given storage. It recovers the log to the state -// that it just commits and applies the latest snapshot. -func newLog(storage Storage, logger Logger) *raftLog { - if storage == nil { - log.Panic("storage must not be nil") - } - log := &raftLog{ - storage: storage, - logger: logger, - } - firstIndex, err := storage.FirstIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - lastIndex, err := storage.LastIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - log.unstable.offset = lastIndex + 1 - log.unstable.logger = logger - // Initialize our committed and applied pointers to the time of the last compaction. - log.committed = firstIndex - 1 - log.applied = firstIndex - 1 - - return log -} - -func (l *raftLog) String() string { - return fmt.Sprintf("committed=%d, applied=%d, unstable.offset=%d, len(unstable.Entries)=%d", l.committed, l.applied, l.unstable.offset, len(l.unstable.entries)) -} - -// maybeAppend returns (0, false) if the entries cannot be appended. Otherwise, -// it returns (last index of new entries, true). -func (l *raftLog) maybeAppend(index, logTerm, committed uint64, ents ...pb.Entry) (lastnewi uint64, ok bool) { - lastnewi = index + uint64(len(ents)) - if l.matchTerm(index, logTerm) { - ci := l.findConflict(ents) - switch { - case ci == 0: - case ci <= l.committed: - l.logger.Panicf("entry %d conflict with committed entry [committed(%d)]", ci, l.committed) - default: - offset := index + 1 - l.append(ents[ci-offset:]...) - } - l.commitTo(min(committed, lastnewi)) - return lastnewi, true - } - return 0, false -} - -func (l *raftLog) append(ents ...pb.Entry) uint64 { - if len(ents) == 0 { - return l.lastIndex() - } - if after := ents[0].Index - 1; after < l.committed { - l.logger.Panicf("after(%d) is out of range [committed(%d)]", after, l.committed) - } - l.unstable.truncateAndAppend(ents) - return l.lastIndex() -} - -// findConflict finds the index of the conflict. -// It returns the first pair of conflicting entries between the existing -// entries and the given entries, if there are any. -// If there is no conflicting entries, and the existing entries contains -// all the given entries, zero will be returned. -// If there is no conflicting entries, but the given entries contains new -// entries, the index of the first new entry will be returned. -// An entry is considered to be conflicting if it has the same index but -// a different term. -// The first entry MUST have an index equal to the argument 'from'. -// The index of the given entries MUST be continuously increasing. -func (l *raftLog) findConflict(ents []pb.Entry) uint64 { - for _, ne := range ents { - if !l.matchTerm(ne.Index, ne.Term) { - if ne.Index <= l.lastIndex() { - l.logger.Infof("found conflict at index %d [existing term: %d, conflicting term: %d]", - ne.Index, l.zeroTermOnErrCompacted(l.term(ne.Index)), ne.Term) - } - return ne.Index - } - } - return 0 -} - -func (l *raftLog) unstableEntries() []pb.Entry { - if len(l.unstable.entries) == 0 { - return nil - } - return l.unstable.entries -} - -// nextEnts returns all the available entries for execution. -// If applied is smaller than the index of snapshot, it returns all committed -// entries after the index of snapshot. -func (l *raftLog) nextEnts() (ents []pb.Entry) { - off := max(l.applied+1, l.firstIndex()) - if l.committed+1 > off { - ents, err := l.slice(off, l.committed+1, noLimit) - if err != nil { - l.logger.Panicf("unexpected error when getting unapplied entries (%v)", err) - } - return ents - } - return nil -} - -// hasNextEnts returns if there is any available entries for execution. This -// is a fast check without heavy raftLog.slice() in raftLog.nextEnts(). -func (l *raftLog) hasNextEnts() bool { - off := max(l.applied+1, l.firstIndex()) - if l.committed+1 > off { - return true - } - return false -} - -func (l *raftLog) snapshot() (pb.Snapshot, error) { - if l.unstable.snapshot != nil { - return *l.unstable.snapshot, nil - } - return l.storage.Snapshot() -} - -func (l *raftLog) firstIndex() uint64 { - if i, ok := l.unstable.maybeFirstIndex(); ok { - return i - } - index, err := l.storage.FirstIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - return index -} - -func (l *raftLog) lastIndex() uint64 { - if i, ok := l.unstable.maybeLastIndex(); ok { - return i - } - i, err := l.storage.LastIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - return i -} - -func (l *raftLog) commitTo(tocommit uint64) { - // never decrease commit - if l.committed < tocommit { - if l.lastIndex() < tocommit { - l.logger.Panicf("tocommit(%d) is out of range [lastIndex(%d)]. Was the raft log corrupted, truncated, or lost?", tocommit, l.lastIndex()) - } - l.committed = tocommit - } -} - -func (l *raftLog) appliedTo(i uint64) { - if i == 0 { - return - } - if l.committed < i || i < l.applied { - l.logger.Panicf("applied(%d) is out of range [prevApplied(%d), committed(%d)]", i, l.applied, l.committed) - } - l.applied = i -} - -func (l *raftLog) stableTo(i, t uint64) { l.unstable.stableTo(i, t) } - -func (l *raftLog) stableSnapTo(i uint64) { l.unstable.stableSnapTo(i) } - -func (l *raftLog) lastTerm() uint64 { - t, err := l.term(l.lastIndex()) - if err != nil { - l.logger.Panicf("unexpected error when getting the last term (%v)", err) - } - return t -} - -func (l *raftLog) term(i uint64) (uint64, error) { - // the valid term range is [index of dummy entry, last index] - dummyIndex := l.firstIndex() - 1 - if i < dummyIndex || i > l.lastIndex() { - // TODO: return an error instead? - return 0, nil - } - - if t, ok := l.unstable.maybeTerm(i); ok { - return t, nil - } - - t, err := l.storage.Term(i) - if err == nil { - return t, nil - } - if err == ErrCompacted { - return 0, err - } - panic(err) // TODO(bdarnell) -} - -func (l *raftLog) entries(i, maxsize uint64) ([]pb.Entry, error) { - if i > l.lastIndex() { - return nil, nil - } - return l.slice(i, l.lastIndex()+1, maxsize) -} - -// allEntries returns all entries in the log. -func (l *raftLog) allEntries() []pb.Entry { - ents, err := l.entries(l.firstIndex(), noLimit) - if err == nil { - return ents - } - if err == ErrCompacted { // try again if there was a racing compaction - return l.allEntries() - } - // TODO (xiangli): handle error? - panic(err) -} - -// isUpToDate determines if the given (lastIndex,term) log is more up-to-date -// by comparing the index and term of the last entries in the existing logs. -// If the logs have last entries with different terms, then the log with the -// later term is more up-to-date. If the logs end with the same term, then -// whichever log has the larger lastIndex is more up-to-date. If the logs are -// the same, the given log is up-to-date. -func (l *raftLog) isUpToDate(lasti, term uint64) bool { - return term > l.lastTerm() || (term == l.lastTerm() && lasti >= l.lastIndex()) -} - -func (l *raftLog) matchTerm(i, term uint64) bool { - t, err := l.term(i) - if err != nil { - return false - } - return t == term -} - -func (l *raftLog) maybeCommit(maxIndex, term uint64) bool { - if maxIndex > l.committed && l.zeroTermOnErrCompacted(l.term(maxIndex)) == term { - l.commitTo(maxIndex) - return true - } - return false -} - -func (l *raftLog) restore(s pb.Snapshot) { - l.logger.Infof("log [%s] starts to restore snapshot [index: %d, term: %d]", l, s.Metadata.Index, s.Metadata.Term) - l.committed = s.Metadata.Index - l.unstable.restore(s) -} - -// slice returns a slice of log entries from lo through hi-1, inclusive. -func (l *raftLog) slice(lo, hi, maxSize uint64) ([]pb.Entry, error) { - err := l.mustCheckOutOfBounds(lo, hi) - if err != nil { - return nil, err - } - if lo == hi { - return nil, nil - } - var ents []pb.Entry - if lo < l.unstable.offset { - storedEnts, err := l.storage.Entries(lo, min(hi, l.unstable.offset), maxSize) - if err == ErrCompacted { - return nil, err - } else if err == ErrUnavailable { - l.logger.Panicf("entries[%d:%d) is unavailable from storage", lo, min(hi, l.unstable.offset)) - } else if err != nil { - panic(err) // TODO(bdarnell) - } - - // check if ents has reached the size limitation - if uint64(len(storedEnts)) < min(hi, l.unstable.offset)-lo { - return storedEnts, nil - } - - ents = storedEnts - } - if hi > l.unstable.offset { - unstable := l.unstable.slice(max(lo, l.unstable.offset), hi) - if len(ents) > 0 { - ents = append([]pb.Entry{}, ents...) - ents = append(ents, unstable...) - } else { - ents = unstable - } - } - return limitSize(ents, maxSize), nil -} - -// l.firstIndex <= lo <= hi <= l.firstIndex + len(l.entries) -func (l *raftLog) mustCheckOutOfBounds(lo, hi uint64) error { - if lo > hi { - l.logger.Panicf("invalid slice %d > %d", lo, hi) - } - fi := l.firstIndex() - if lo < fi { - return ErrCompacted - } - - length := l.lastIndex() - fi + 1 - if lo < fi || hi > fi+length { - l.logger.Panicf("slice[%d,%d) out of bound [%d,%d]", lo, hi, fi, l.lastIndex()) - } - return nil -} - -func (l *raftLog) zeroTermOnErrCompacted(t uint64, err error) uint64 { - if err == nil { - return t - } - if err == ErrCompacted { - return 0 - } - l.logger.Panicf("unexpected error (%v)", err) - return 0 -} diff --git a/vendor/src/github.com/coreos/etcd/raft/log_unstable.go b/vendor/src/github.com/coreos/etcd/raft/log_unstable.go deleted file mode 100644 index df90178f9a..0000000000 --- a/vendor/src/github.com/coreos/etcd/raft/log_unstable.go +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import pb "github.com/coreos/etcd/raft/raftpb" - -// unstable.entries[i] has raft log position i+unstable.offset. -// Note that unstable.offset may be less than the highest log -// position in storage; this means that the next write to storage -// might need to truncate the log before persisting unstable.entries. -type unstable struct { - // the incoming unstable snapshot, if any. - snapshot *pb.Snapshot - // all entries that have not yet been written to storage. - entries []pb.Entry - offset uint64 - - logger Logger -} - -// maybeFirstIndex returns the index of the first possible entry in entries -// if it has a snapshot. -func (u *unstable) maybeFirstIndex() (uint64, bool) { - if u.snapshot != nil { - return u.snapshot.Metadata.Index + 1, true - } - return 0, false -} - -// maybeLastIndex returns the last index if it has at least one -// unstable entry or snapshot. -func (u *unstable) maybeLastIndex() (uint64, bool) { - if l := len(u.entries); l != 0 { - return u.offset + uint64(l) - 1, true - } - if u.snapshot != nil { - return u.snapshot.Metadata.Index, true - } - return 0, false -} - -// maybeTerm returns the term of the entry at index i, if there -// is any. -func (u *unstable) maybeTerm(i uint64) (uint64, bool) { - if i < u.offset { - if u.snapshot == nil { - return 0, false - } - if u.snapshot.Metadata.Index == i { - return u.snapshot.Metadata.Term, true - } - return 0, false - } - - last, ok := u.maybeLastIndex() - if !ok { - return 0, false - } - if i > last { - return 0, false - } - return u.entries[i-u.offset].Term, true -} - -func (u *unstable) stableTo(i, t uint64) { - gt, ok := u.maybeTerm(i) - if !ok { - return - } - // if i < offset, term is matched with the snapshot - // only update the unstable entries if term is matched with - // an unstable entry. - if gt == t && i >= u.offset { - u.entries = u.entries[i+1-u.offset:] - u.offset = i + 1 - } -} - -func (u *unstable) stableSnapTo(i uint64) { - if u.snapshot != nil && u.snapshot.Metadata.Index == i { - u.snapshot = nil - } -} - -func (u *unstable) restore(s pb.Snapshot) { - u.offset = s.Metadata.Index + 1 - u.entries = nil - u.snapshot = &s -} - -func (u *unstable) truncateAndAppend(ents []pb.Entry) { - after := ents[0].Index - 1 - switch { - case after == u.offset+uint64(len(u.entries))-1: - // after is the last index in the u.entries - // directly append - u.entries = append(u.entries, ents...) - case after < u.offset: - u.logger.Infof("replace the unstable entries from index %d", after+1) - // The log is being truncated to before our current offset - // portion, so set the offset and replace the entries - u.offset = after + 1 - u.entries = ents - default: - // truncate to after and copy to u.entries - // then append - u.logger.Infof("truncate the unstable entries to index %d", after) - u.entries = append([]pb.Entry{}, u.slice(u.offset, after+1)...) - u.entries = append(u.entries, ents...) - } -} - -func (u *unstable) slice(lo uint64, hi uint64) []pb.Entry { - u.mustCheckOutOfBounds(lo, hi) - return u.entries[lo-u.offset : hi-u.offset] -} - -// u.offset <= lo <= hi <= u.offset+len(u.offset) -func (u *unstable) mustCheckOutOfBounds(lo, hi uint64) { - if lo > hi { - u.logger.Panicf("invalid unstable.slice %d > %d", lo, hi) - } - upper := u.offset + uint64(len(u.entries)) - if lo < u.offset || hi > upper { - u.logger.Panicf("unstable.slice[%d,%d) out of bound [%d,%d]", lo, hi, u.offset, upper) - } -} diff --git a/vendor/src/github.com/coreos/etcd/raft/logger.go b/vendor/src/github.com/coreos/etcd/raft/logger.go deleted file mode 100644 index 31c194a06b..0000000000 --- a/vendor/src/github.com/coreos/etcd/raft/logger.go +++ /dev/null @@ -1,126 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "fmt" - "io/ioutil" - "log" - "os" -) - -type Logger interface { - Debug(v ...interface{}) - Debugf(format string, v ...interface{}) - - Error(v ...interface{}) - Errorf(format string, v ...interface{}) - - Info(v ...interface{}) - Infof(format string, v ...interface{}) - - Warning(v ...interface{}) - Warningf(format string, v ...interface{}) - - Fatal(v ...interface{}) - Fatalf(format string, v ...interface{}) - - Panic(v ...interface{}) - Panicf(format string, v ...interface{}) -} - -func SetLogger(l Logger) { raftLogger = l } - -var ( - defaultLogger = &DefaultLogger{Logger: log.New(os.Stderr, "raft", log.LstdFlags)} - discardLogger = &DefaultLogger{Logger: log.New(ioutil.Discard, "", 0)} - raftLogger = Logger(defaultLogger) -) - -const ( - calldepth = 2 -) - -// DefaultLogger is a default implementation of the Logger interface. -type DefaultLogger struct { - *log.Logger - debug bool -} - -func (l *DefaultLogger) EnableTimestamps() { - l.SetFlags(l.Flags() | log.Ldate | log.Ltime) -} - -func (l *DefaultLogger) EnableDebug() { - l.debug = true -} - -func (l *DefaultLogger) Debug(v ...interface{}) { - if l.debug { - l.Output(calldepth, header("DEBUG", fmt.Sprint(v...))) - } -} - -func (l *DefaultLogger) Debugf(format string, v ...interface{}) { - if l.debug { - l.Output(calldepth, header("DEBUG", fmt.Sprintf(format, v...))) - } -} - -func (l *DefaultLogger) Info(v ...interface{}) { - l.Output(calldepth, header("INFO", fmt.Sprint(v...))) -} - -func (l *DefaultLogger) Infof(format string, v ...interface{}) { - l.Output(calldepth, header("INFO", fmt.Sprintf(format, v...))) -} - -func (l *DefaultLogger) Error(v ...interface{}) { - l.Output(calldepth, header("ERROR", fmt.Sprint(v...))) -} - -func (l *DefaultLogger) Errorf(format string, v ...interface{}) { - l.Output(calldepth, header("ERROR", fmt.Sprintf(format, v...))) -} - -func (l *DefaultLogger) Warning(v ...interface{}) { - l.Output(calldepth, header("WARN", fmt.Sprint(v...))) -} - -func (l *DefaultLogger) Warningf(format string, v ...interface{}) { - l.Output(calldepth, header("WARN", fmt.Sprintf(format, v...))) -} - -func (l *DefaultLogger) Fatal(v ...interface{}) { - l.Output(calldepth, header("FATAL", fmt.Sprint(v...))) - os.Exit(1) -} - -func (l *DefaultLogger) Fatalf(format string, v ...interface{}) { - l.Output(calldepth, header("FATAL", fmt.Sprintf(format, v...))) - os.Exit(1) -} - -func (l *DefaultLogger) Panic(v ...interface{}) { - l.Logger.Panic(v) -} - -func (l *DefaultLogger) Panicf(format string, v ...interface{}) { - l.Logger.Panicf(format, v...) -} - -func header(lvl, msg string) string { - return fmt.Sprintf("%s: %s", lvl, msg) -} diff --git a/vendor/src/github.com/coreos/etcd/raft/node.go b/vendor/src/github.com/coreos/etcd/raft/node.go deleted file mode 100644 index c80dbc4b8a..0000000000 --- a/vendor/src/github.com/coreos/etcd/raft/node.go +++ /dev/null @@ -1,488 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "errors" - - "golang.org/x/net/context" - pb "github.com/coreos/etcd/raft/raftpb" -) - -type SnapshotStatus int - -const ( - SnapshotFinish SnapshotStatus = 1 - SnapshotFailure SnapshotStatus = 2 -) - -var ( - emptyState = pb.HardState{} - - // ErrStopped is returned by methods on Nodes that have been stopped. - ErrStopped = errors.New("raft: stopped") -) - -// SoftState provides state that is useful for logging and debugging. -// The state is volatile and does not need to be persisted to the WAL. -type SoftState struct { - Lead uint64 - RaftState StateType -} - -func (a *SoftState) equal(b *SoftState) bool { - return a.Lead == b.Lead && a.RaftState == b.RaftState -} - -// Ready encapsulates the entries and messages that are ready to read, -// be saved to stable storage, committed or sent to other peers. -// All fields in Ready are read-only. -type Ready struct { - // The current volatile state of a Node. - // SoftState will be nil if there is no update. - // It is not required to consume or store SoftState. - *SoftState - - // The current state of a Node to be saved to stable storage BEFORE - // Messages are sent. - // HardState will be equal to empty state if there is no update. - pb.HardState - - // Entries specifies entries to be saved to stable storage BEFORE - // Messages are sent. - Entries []pb.Entry - - // Snapshot specifies the snapshot to be saved to stable storage. - Snapshot pb.Snapshot - - // CommittedEntries specifies entries to be committed to a - // store/state-machine. These have previously been committed to stable - // store. - CommittedEntries []pb.Entry - - // Messages specifies outbound messages to be sent AFTER Entries are - // committed to stable storage. - // If it contains a MsgSnap message, the application MUST report back to raft - // when the snapshot has been received or has failed by calling ReportSnapshot. - Messages []pb.Message -} - -func isHardStateEqual(a, b pb.HardState) bool { - return a.Term == b.Term && a.Vote == b.Vote && a.Commit == b.Commit -} - -// IsEmptyHardState returns true if the given HardState is empty. -func IsEmptyHardState(st pb.HardState) bool { - return isHardStateEqual(st, emptyState) -} - -// IsEmptySnap returns true if the given Snapshot is empty. -func IsEmptySnap(sp pb.Snapshot) bool { - return sp.Metadata.Index == 0 -} - -func (rd Ready) containsUpdates() bool { - return rd.SoftState != nil || !IsEmptyHardState(rd.HardState) || - !IsEmptySnap(rd.Snapshot) || len(rd.Entries) > 0 || - len(rd.CommittedEntries) > 0 || len(rd.Messages) > 0 -} - -// Node represents a node in a raft cluster. -type Node interface { - // Tick increments the internal logical clock for the Node by a single tick. Election - // timeouts and heartbeat timeouts are in units of ticks. - Tick() - // Campaign causes the Node to transition to candidate state and start campaigning to become leader. - Campaign(ctx context.Context) error - // Propose proposes that data be appended to the log. - Propose(ctx context.Context, data []byte) error - // ProposeConfChange proposes config change. - // At most one ConfChange can be in the process of going through consensus. - // Application needs to call ApplyConfChange when applying EntryConfChange type entry. - ProposeConfChange(ctx context.Context, cc pb.ConfChange) error - // Step advances the state machine using the given message. ctx.Err() will be returned, if any. - Step(ctx context.Context, msg pb.Message) error - - // Ready returns a channel that returns the current point-in-time state. - // Users of the Node must call Advance after retrieving the state returned by Ready. - // - // NOTE: No committed entries from the next Ready may be applied until all committed entries - // and snapshots from the previous one have finished. - Ready() <-chan Ready - - // Advance notifies the Node that the application has saved progress up to the last Ready. - // It prepares the node to return the next available Ready. - // - // The application should generally call Advance after it applies the entries in last Ready. - // - // However, as an optimization, the application may call Advance while it is applying the - // commands. For example. when the last Ready contains a snapshot, the application might take - // a long time to apply the snapshot data. To continue receiving Ready without blocking raft - // progress, it can call Advance before finish applying the last ready. To make this optimization - // work safely, when the application receives a Ready with softState.RaftState equal to Candidate - // it MUST apply all pending configuration changes if there is any. - // - // Here is a simple solution that waiting for ALL pending entries to get applied. - // ``` - // ... - // rd := <-n.Ready() - // go apply(rd.CommittedEntries) // optimization to apply asynchronously in FIFO order. - // if rd.SoftState.RaftState == StateCandidate { - // waitAllApplied() - // } - // n.Advance() - // ... - //``` - Advance() - // ApplyConfChange applies config change to the local node. - // Returns an opaque ConfState protobuf which must be recorded - // in snapshots. Will never return nil; it returns a pointer only - // to match MemoryStorage.Compact. - ApplyConfChange(cc pb.ConfChange) *pb.ConfState - // Status returns the current status of the raft state machine. - Status() Status - // ReportUnreachable reports the given node is not reachable for the last send. - ReportUnreachable(id uint64) - // ReportSnapshot reports the status of the sent snapshot. - ReportSnapshot(id uint64, status SnapshotStatus) - // Stop performs any necessary termination of the Node. - Stop() -} - -type Peer struct { - ID uint64 - Context []byte -} - -// StartNode returns a new Node given configuration and a list of raft peers. -// It appends a ConfChangeAddNode entry for each given peer to the initial log. -func StartNode(c *Config, peers []Peer) Node { - r := newRaft(c) - // become the follower at term 1 and apply initial configuration - // entries of term 1 - r.becomeFollower(1, None) - for _, peer := range peers { - cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} - d, err := cc.Marshal() - if err != nil { - panic("unexpected marshal error") - } - e := pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: r.raftLog.lastIndex() + 1, Data: d} - r.raftLog.append(e) - } - // Mark these initial entries as committed. - // TODO(bdarnell): These entries are still unstable; do we need to preserve - // the invariant that committed < unstable? - r.raftLog.committed = r.raftLog.lastIndex() - // Now apply them, mainly so that the application can call Campaign - // immediately after StartNode in tests. Note that these nodes will - // be added to raft twice: here and when the application's Ready - // loop calls ApplyConfChange. The calls to addNode must come after - // all calls to raftLog.append so progress.next is set after these - // bootstrapping entries (it is an error if we try to append these - // entries since they have already been committed). - // We do not set raftLog.applied so the application will be able - // to observe all conf changes via Ready.CommittedEntries. - for _, peer := range peers { - r.addNode(peer.ID) - } - - n := newNode() - go n.run(r) - return &n -} - -// RestartNode is similar to StartNode but does not take a list of peers. -// The current membership of the cluster will be restored from the Storage. -// If the caller has an existing state machine, pass in the last log index that -// has been applied to it; otherwise use zero. -func RestartNode(c *Config) Node { - r := newRaft(c) - - n := newNode() - go n.run(r) - return &n -} - -// node is the canonical implementation of the Node interface -type node struct { - propc chan pb.Message - recvc chan pb.Message - confc chan pb.ConfChange - confstatec chan pb.ConfState - readyc chan Ready - advancec chan struct{} - tickc chan struct{} - done chan struct{} - stop chan struct{} - status chan chan Status -} - -func newNode() node { - return node{ - propc: make(chan pb.Message), - recvc: make(chan pb.Message), - confc: make(chan pb.ConfChange), - confstatec: make(chan pb.ConfState), - readyc: make(chan Ready), - advancec: make(chan struct{}), - tickc: make(chan struct{}), - done: make(chan struct{}), - stop: make(chan struct{}), - status: make(chan chan Status), - } -} - -func (n *node) Stop() { - select { - case n.stop <- struct{}{}: - // Not already stopped, so trigger it - case <-n.done: - // Node has already been stopped - no need to do anything - return - } - // Block until the stop has been acknowledged by run() - <-n.done -} - -func (n *node) run(r *raft) { - var propc chan pb.Message - var readyc chan Ready - var advancec chan struct{} - var prevLastUnstablei, prevLastUnstablet uint64 - var havePrevLastUnstablei bool - var prevSnapi uint64 - var rd Ready - - lead := None - prevSoftSt := r.softState() - prevHardSt := emptyState - - for { - if advancec != nil { - readyc = nil - } else { - rd = newReady(r, prevSoftSt, prevHardSt) - if rd.containsUpdates() { - readyc = n.readyc - } else { - readyc = nil - } - } - - if lead != r.lead { - if r.hasLeader() { - if lead == None { - r.logger.Infof("raft.node: %x elected leader %x at term %d", r.id, r.lead, r.Term) - } else { - r.logger.Infof("raft.node: %x changed leader from %x to %x at term %d", r.id, lead, r.lead, r.Term) - } - propc = n.propc - } else { - r.logger.Infof("raft.node: %x lost leader %x at term %d", r.id, lead, r.Term) - propc = nil - } - lead = r.lead - } - - select { - // TODO: maybe buffer the config propose if there exists one (the way - // described in raft dissertation) - // Currently it is dropped in Step silently. - case m := <-propc: - m.From = r.id - r.Step(m) - case m := <-n.recvc: - // filter out response message from unknown From. - if _, ok := r.prs[m.From]; ok || !IsResponseMsg(m) { - r.Step(m) // raft never returns an error - } - case cc := <-n.confc: - if cc.NodeID == None { - r.resetPendingConf() - select { - case n.confstatec <- pb.ConfState{Nodes: r.nodes()}: - case <-n.done: - } - break - } - switch cc.Type { - case pb.ConfChangeAddNode: - r.addNode(cc.NodeID) - case pb.ConfChangeRemoveNode: - // block incoming proposal when local node is - // removed - if cc.NodeID == r.id { - n.propc = nil - } - r.removeNode(cc.NodeID) - case pb.ConfChangeUpdateNode: - r.resetPendingConf() - default: - panic("unexpected conf type") - } - select { - case n.confstatec <- pb.ConfState{Nodes: r.nodes()}: - case <-n.done: - } - case <-n.tickc: - r.tick() - case readyc <- rd: - if rd.SoftState != nil { - prevSoftSt = rd.SoftState - } - if len(rd.Entries) > 0 { - prevLastUnstablei = rd.Entries[len(rd.Entries)-1].Index - prevLastUnstablet = rd.Entries[len(rd.Entries)-1].Term - havePrevLastUnstablei = true - } - if !IsEmptyHardState(rd.HardState) { - prevHardSt = rd.HardState - } - if !IsEmptySnap(rd.Snapshot) { - prevSnapi = rd.Snapshot.Metadata.Index - } - r.msgs = nil - advancec = n.advancec - case <-advancec: - if prevHardSt.Commit != 0 { - r.raftLog.appliedTo(prevHardSt.Commit) - } - if havePrevLastUnstablei { - r.raftLog.stableTo(prevLastUnstablei, prevLastUnstablet) - havePrevLastUnstablei = false - } - r.raftLog.stableSnapTo(prevSnapi) - advancec = nil - case c := <-n.status: - c <- getStatus(r) - case <-n.stop: - close(n.done) - return - } - } -} - -// Tick increments the internal logical clock for this Node. Election timeouts -// and heartbeat timeouts are in units of ticks. -func (n *node) Tick() { - select { - case n.tickc <- struct{}{}: - case <-n.done: - } -} - -func (n *node) Campaign(ctx context.Context) error { return n.step(ctx, pb.Message{Type: pb.MsgHup}) } - -func (n *node) Propose(ctx context.Context, data []byte) error { - return n.step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Data: data}}}) -} - -func (n *node) Step(ctx context.Context, m pb.Message) error { - // ignore unexpected local messages receiving over network - if IsLocalMsg(m) { - // TODO: return an error? - return nil - } - return n.step(ctx, m) -} - -func (n *node) ProposeConfChange(ctx context.Context, cc pb.ConfChange) error { - data, err := cc.Marshal() - if err != nil { - return err - } - return n.Step(ctx, pb.Message{Type: pb.MsgProp, Entries: []pb.Entry{{Type: pb.EntryConfChange, Data: data}}}) -} - -// Step advances the state machine using msgs. The ctx.Err() will be returned, -// if any. -func (n *node) step(ctx context.Context, m pb.Message) error { - ch := n.recvc - if m.Type == pb.MsgProp { - ch = n.propc - } - - select { - case ch <- m: - return nil - case <-ctx.Done(): - return ctx.Err() - case <-n.done: - return ErrStopped - } -} - -func (n *node) Ready() <-chan Ready { return n.readyc } - -func (n *node) Advance() { - select { - case n.advancec <- struct{}{}: - case <-n.done: - } -} - -func (n *node) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { - var cs pb.ConfState - select { - case n.confc <- cc: - case <-n.done: - } - select { - case cs = <-n.confstatec: - case <-n.done: - } - return &cs -} - -func (n *node) Status() Status { - c := make(chan Status) - n.status <- c - return <-c -} - -func (n *node) ReportUnreachable(id uint64) { - select { - case n.recvc <- pb.Message{Type: pb.MsgUnreachable, From: id}: - case <-n.done: - } -} - -func (n *node) ReportSnapshot(id uint64, status SnapshotStatus) { - rej := status == SnapshotFailure - - select { - case n.recvc <- pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}: - case <-n.done: - } -} - -func newReady(r *raft, prevSoftSt *SoftState, prevHardSt pb.HardState) Ready { - rd := Ready{ - Entries: r.raftLog.unstableEntries(), - CommittedEntries: r.raftLog.nextEnts(), - Messages: r.msgs, - } - if softSt := r.softState(); !softSt.equal(prevSoftSt) { - rd.SoftState = softSt - } - if hardSt := r.hardState(); !isHardStateEqual(hardSt, prevHardSt) { - rd.HardState = hardSt - } - if r.raftLog.unstable.snapshot != nil { - rd.Snapshot = *r.raftLog.unstable.snapshot - } - return rd -} diff --git a/vendor/src/github.com/coreos/etcd/raft/progress.go b/vendor/src/github.com/coreos/etcd/raft/progress.go deleted file mode 100644 index 11f53409d4..0000000000 --- a/vendor/src/github.com/coreos/etcd/raft/progress.go +++ /dev/null @@ -1,245 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import "fmt" - -const ( - ProgressStateProbe ProgressStateType = iota - ProgressStateReplicate - ProgressStateSnapshot -) - -type ProgressStateType uint64 - -var prstmap = [...]string{ - "ProgressStateProbe", - "ProgressStateReplicate", - "ProgressStateSnapshot", -} - -func (st ProgressStateType) String() string { return prstmap[uint64(st)] } - -// Progress represents a follower’s progress in the view of the leader. Leader maintains -// progresses of all followers, and sends entries to the follower based on its progress. -type Progress struct { - Match, Next uint64 - // State defines how the leader should interact with the follower. - // - // When in ProgressStateProbe, leader sends at most one replication message - // per heartbeat interval. It also probes actual progress of the follower. - // - // When in ProgressStateReplicate, leader optimistically increases next - // to the latest entry sent after sending replication message. This is - // an optimized state for fast replicating log entries to the follower. - // - // When in ProgressStateSnapshot, leader should have sent out snapshot - // before and stops sending any replication message. - State ProgressStateType - // Paused is used in ProgressStateProbe. - // When Paused is true, raft should pause sending replication message to this peer. - Paused bool - // PendingSnapshot is used in ProgressStateSnapshot. - // If there is a pending snapshot, the pendingSnapshot will be set to the - // index of the snapshot. If pendingSnapshot is set, the replication process of - // this Progress will be paused. raft will not resend snapshot until the pending one - // is reported to be failed. - PendingSnapshot uint64 - - // RecentActive is true if the progress is recently active. Receiving any messages - // from the corresponding follower indicates the progress is active. - // RecentActive can be reset to false after an election timeout. - RecentActive bool - - // inflights is a sliding window for the inflight messages. - // When inflights is full, no more message should be sent. - // When a leader sends out a message, the index of the last - // entry should be added to inflights. The index MUST be added - // into inflights in order. - // When a leader receives a reply, the previous inflights should - // be freed by calling inflights.freeTo. - ins *inflights -} - -func (pr *Progress) resetState(state ProgressStateType) { - pr.Paused = false - pr.RecentActive = false - pr.PendingSnapshot = 0 - pr.State = state - pr.ins.reset() -} - -func (pr *Progress) becomeProbe() { - // If the original state is ProgressStateSnapshot, progress knows that - // the pending snapshot has been sent to this peer successfully, then - // probes from pendingSnapshot + 1. - if pr.State == ProgressStateSnapshot { - pendingSnapshot := pr.PendingSnapshot - pr.resetState(ProgressStateProbe) - pr.Next = max(pr.Match+1, pendingSnapshot+1) - } else { - pr.resetState(ProgressStateProbe) - pr.Next = pr.Match + 1 - } -} - -func (pr *Progress) becomeReplicate() { - pr.resetState(ProgressStateReplicate) - pr.Next = pr.Match + 1 -} - -func (pr *Progress) becomeSnapshot(snapshoti uint64) { - pr.resetState(ProgressStateSnapshot) - pr.PendingSnapshot = snapshoti -} - -// maybeUpdate returns false if the given n index comes from an outdated message. -// Otherwise it updates the progress and returns true. -func (pr *Progress) maybeUpdate(n uint64) bool { - var updated bool - if pr.Match < n { - pr.Match = n - updated = true - pr.resume() - } - if pr.Next < n+1 { - pr.Next = n + 1 - } - return updated -} - -func (pr *Progress) optimisticUpdate(n uint64) { pr.Next = n + 1 } - -// maybeDecrTo returns false if the given to index comes from an out of order message. -// Otherwise it decreases the progress next index to min(rejected, last) and returns true. -func (pr *Progress) maybeDecrTo(rejected, last uint64) bool { - if pr.State == ProgressStateReplicate { - // the rejection must be stale if the progress has matched and "rejected" - // is smaller than "match". - if rejected <= pr.Match { - return false - } - // directly decrease next to match + 1 - pr.Next = pr.Match + 1 - return true - } - - // the rejection must be stale if "rejected" does not match next - 1 - if pr.Next-1 != rejected { - return false - } - - if pr.Next = min(rejected, last+1); pr.Next < 1 { - pr.Next = 1 - } - pr.resume() - return true -} - -func (pr *Progress) pause() { pr.Paused = true } -func (pr *Progress) resume() { pr.Paused = false } - -// isPaused returns whether progress stops sending message. -func (pr *Progress) isPaused() bool { - switch pr.State { - case ProgressStateProbe: - return pr.Paused - case ProgressStateReplicate: - return pr.ins.full() - case ProgressStateSnapshot: - return true - default: - panic("unexpected state") - } -} - -func (pr *Progress) snapshotFailure() { pr.PendingSnapshot = 0 } - -// maybeSnapshotAbort unsets pendingSnapshot if Match is equal or higher than -// the pendingSnapshot -func (pr *Progress) maybeSnapshotAbort() bool { - return pr.State == ProgressStateSnapshot && pr.Match >= pr.PendingSnapshot -} - -func (pr *Progress) String() string { - return fmt.Sprintf("next = %d, match = %d, state = %s, waiting = %v, pendingSnapshot = %d", pr.Next, pr.Match, pr.State, pr.isPaused(), pr.PendingSnapshot) -} - -type inflights struct { - // the starting index in the buffer - start int - // number of inflights in the buffer - count int - - // the size of the buffer - size int - buffer []uint64 -} - -func newInflights(size int) *inflights { - return &inflights{ - size: size, - buffer: make([]uint64, size), - } -} - -// add adds an inflight into inflights -func (in *inflights) add(inflight uint64) { - if in.full() { - panic("cannot add into a full inflights") - } - next := in.start + in.count - if next >= in.size { - next -= in.size - } - in.buffer[next] = inflight - in.count++ -} - -// freeTo frees the inflights smaller or equal to the given `to` flight. -func (in *inflights) freeTo(to uint64) { - if in.count == 0 || to < in.buffer[in.start] { - // out of the left side of the window - return - } - - i, idx := 0, in.start - for i = 0; i < in.count; i++ { - if to < in.buffer[idx] { // found the first large inflight - break - } - - // increase index and maybe rotate - if idx++; idx >= in.size { - idx -= in.size - } - } - // free i inflights and set new start index - in.count -= i - in.start = idx -} - -func (in *inflights) freeFirstOne() { in.freeTo(in.buffer[in.start]) } - -// full returns true if the inflights is full. -func (in *inflights) full() bool { - return in.count == in.size -} - -// resets frees all inflights. -func (in *inflights) reset() { - in.count = 0 - in.start = 0 -} diff --git a/vendor/src/github.com/coreos/etcd/raft/raft.go b/vendor/src/github.com/coreos/etcd/raft/raft.go deleted file mode 100644 index 5639fcb8f3..0000000000 --- a/vendor/src/github.com/coreos/etcd/raft/raft.go +++ /dev/null @@ -1,898 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "errors" - "fmt" - "math" - "math/rand" - "sort" - "strings" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -// None is a placeholder node ID used when there is no leader. -const None uint64 = 0 -const noLimit = math.MaxUint64 - -var errNoLeader = errors.New("no leader") - -var ErrSnapshotTemporarilyUnavailable = errors.New("snapshot is temporarily unavailable") - -// Possible values for StateType. -const ( - StateFollower StateType = iota - StateCandidate - StateLeader -) - -// StateType represents the role of a node in a cluster. -type StateType uint64 - -var stmap = [...]string{ - "StateFollower", - "StateCandidate", - "StateLeader", -} - -func (st StateType) String() string { - return stmap[uint64(st)] -} - -// Config contains the parameters to start a raft. -type Config struct { - // ID is the identity of the local raft. ID cannot be 0. - ID uint64 - - // peers contains the IDs of all nodes (including self) in the raft cluster. It - // should only be set when starting a new raft cluster. Restarting raft from - // previous configuration will panic if peers is set. peer is private and only - // used for testing right now. - peers []uint64 - - // ElectionTick is the number of Node.Tick invocations that must pass between - // elections. That is, if a follower does not receive any message from the - // leader of current term before ElectionTick has elapsed, it will become - // candidate and start an election. ElectionTick must be greater than - // HeartbeatTick. We suggest ElectionTick = 10 * HeartbeatTick to avoid - // unnecessary leader switching. - ElectionTick int - // HeartbeatTick is the number of Node.Tick invocations that must pass between - // heartbeats. That is, a leader sends heartbeat messages to maintain its - // leadership every HeartbeatTick ticks. - HeartbeatTick int - - // Storage is the storage for raft. raft generates entries and states to be - // stored in storage. raft reads the persisted entries and states out of - // Storage when it needs. raft reads out the previous state and configuration - // out of storage when restarting. - Storage Storage - // Applied is the last applied index. It should only be set when restarting - // raft. raft will not return entries to the application smaller or equal to - // Applied. If Applied is unset when restarting, raft might return previous - // applied entries. This is a very application dependent configuration. - Applied uint64 - - // MaxSizePerMsg limits the max size of each append message. Smaller value - // lowers the raft recovery cost(initial probing and message lost during normal - // operation). On the other side, it might affect the throughput during normal - // replication. Note: math.MaxUint64 for unlimited, 0 for at most one entry per - // message. - MaxSizePerMsg uint64 - // MaxInflightMsgs limits the max number of in-flight append messages during - // optimistic replication phase. The application transportation layer usually - // has its own sending buffer over TCP/UDP. Setting MaxInflightMsgs to avoid - // overflowing that sending buffer. TODO (xiangli): feedback to application to - // limit the proposal rate? - MaxInflightMsgs int - - // CheckQuorum specifies if the leader should check quorum activity. Leader - // steps down when quorum is not active for an electionTimeout. - CheckQuorum bool - - // Logger is the logger used for raft log. For multinode which can host - // multiple raft group, each raft group can have its own logger - Logger Logger -} - -func (c *Config) validate() error { - if c.ID == None { - return errors.New("cannot use none as id") - } - - if c.HeartbeatTick <= 0 { - return errors.New("heartbeat tick must be greater than 0") - } - - if c.ElectionTick <= c.HeartbeatTick { - return errors.New("election tick must be greater than heartbeat tick") - } - - if c.Storage == nil { - return errors.New("storage cannot be nil") - } - - if c.MaxInflightMsgs <= 0 { - return errors.New("max inflight messages must be greater than 0") - } - - if c.Logger == nil { - c.Logger = raftLogger - } - - return nil -} - -type raft struct { - id uint64 - - Term uint64 - Vote uint64 - - // the log - raftLog *raftLog - - maxInflight int - maxMsgSize uint64 - prs map[uint64]*Progress - - state StateType - - votes map[uint64]bool - - msgs []pb.Message - - // the leader id - lead uint64 - - // New configuration is ignored if there exists unapplied configuration. - pendingConf bool - - // number of ticks since it reached last electionTimeout when it is leader - // or candidate. - // number of ticks since it reached last electionTimeout or received a - // valid message from current leader when it is a follower. - electionElapsed int - - // number of ticks since it reached last heartbeatTimeout. - // only leader keeps heartbeatElapsed. - heartbeatElapsed int - - checkQuorum bool - - heartbeatTimeout int - electionTimeout int - rand *rand.Rand - tick func() - step stepFunc - - logger Logger -} - -func newRaft(c *Config) *raft { - if err := c.validate(); err != nil { - panic(err.Error()) - } - raftlog := newLog(c.Storage, c.Logger) - hs, cs, err := c.Storage.InitialState() - if err != nil { - panic(err) // TODO(bdarnell) - } - peers := c.peers - if len(cs.Nodes) > 0 { - if len(peers) > 0 { - // TODO(bdarnell): the peers argument is always nil except in - // tests; the argument should be removed and these tests should be - // updated to specify their nodes through a snapshot. - panic("cannot specify both newRaft(peers) and ConfState.Nodes)") - } - peers = cs.Nodes - } - r := &raft{ - id: c.ID, - lead: None, - raftLog: raftlog, - maxMsgSize: c.MaxSizePerMsg, - maxInflight: c.MaxInflightMsgs, - prs: make(map[uint64]*Progress), - electionTimeout: c.ElectionTick, - heartbeatTimeout: c.HeartbeatTick, - logger: c.Logger, - checkQuorum: c.CheckQuorum, - } - r.rand = rand.New(rand.NewSource(int64(c.ID))) - for _, p := range peers { - r.prs[p] = &Progress{Next: 1, ins: newInflights(r.maxInflight)} - } - if !isHardStateEqual(hs, emptyState) { - r.loadState(hs) - } - if c.Applied > 0 { - raftlog.appliedTo(c.Applied) - } - r.becomeFollower(r.Term, None) - - nodesStrs := make([]string, 0) - for _, n := range r.nodes() { - nodesStrs = append(nodesStrs, fmt.Sprintf("%x", n)) - } - - r.logger.Infof("newRaft %x [peers: [%s], term: %d, commit: %d, applied: %d, lastindex: %d, lastterm: %d]", - r.id, strings.Join(nodesStrs, ","), r.Term, r.raftLog.committed, r.raftLog.applied, r.raftLog.lastIndex(), r.raftLog.lastTerm()) - return r -} - -func (r *raft) hasLeader() bool { return r.lead != None } - -func (r *raft) softState() *SoftState { return &SoftState{Lead: r.lead, RaftState: r.state} } - -func (r *raft) hardState() pb.HardState { - return pb.HardState{ - Term: r.Term, - Vote: r.Vote, - Commit: r.raftLog.committed, - } -} - -func (r *raft) quorum() int { return len(r.prs)/2 + 1 } - -func (r *raft) nodes() []uint64 { - nodes := make([]uint64, 0, len(r.prs)) - for id := range r.prs { - nodes = append(nodes, id) - } - sort.Sort(uint64Slice(nodes)) - return nodes -} - -// send persists state to stable storage and then sends to its mailbox. -func (r *raft) send(m pb.Message) { - m.From = r.id - // do not attach term to MsgProp - // proposals are a way to forward to the leader and - // should be treated as local message. - if m.Type != pb.MsgProp { - m.Term = r.Term - } - r.msgs = append(r.msgs, m) -} - -// sendAppend sends RPC, with entries to the given peer. -func (r *raft) sendAppend(to uint64) { - pr := r.prs[to] - if pr.isPaused() { - return - } - m := pb.Message{} - m.To = to - - term, errt := r.raftLog.term(pr.Next - 1) - ents, erre := r.raftLog.entries(pr.Next, r.maxMsgSize) - - if errt != nil || erre != nil { // send snapshot if we failed to get term or entries - if !pr.RecentActive { - r.logger.Debugf("ignore sending snapshot to %x since it is not recently active", to) - return - } - - m.Type = pb.MsgSnap - snapshot, err := r.raftLog.snapshot() - if err != nil { - if err == ErrSnapshotTemporarilyUnavailable { - r.logger.Debugf("%x failed to send snapshot to %x because snapshot is temporarily unavailable", r.id, to) - return - } - panic(err) // TODO(bdarnell) - } - if IsEmptySnap(snapshot) { - panic("need non-empty snapshot") - } - m.Snapshot = snapshot - sindex, sterm := snapshot.Metadata.Index, snapshot.Metadata.Term - r.logger.Debugf("%x [firstindex: %d, commit: %d] sent snapshot[index: %d, term: %d] to %x [%s]", - r.id, r.raftLog.firstIndex(), r.raftLog.committed, sindex, sterm, to, pr) - pr.becomeSnapshot(sindex) - r.logger.Debugf("%x paused sending replication messages to %x [%s]", r.id, to, pr) - } else { - m.Type = pb.MsgApp - m.Index = pr.Next - 1 - m.LogTerm = term - m.Entries = ents - m.Commit = r.raftLog.committed - if n := len(m.Entries); n != 0 { - switch pr.State { - // optimistically increase the next when in ProgressStateReplicate - case ProgressStateReplicate: - last := m.Entries[n-1].Index - pr.optimisticUpdate(last) - pr.ins.add(last) - case ProgressStateProbe: - pr.pause() - default: - r.logger.Panicf("%x is sending append in unhandled state %s", r.id, pr.State) - } - } - } - r.send(m) -} - -// sendHeartbeat sends an empty MsgApp -func (r *raft) sendHeartbeat(to uint64) { - // Attach the commit as min(to.matched, r.committed). - // When the leader sends out heartbeat message, - // the receiver(follower) might not be matched with the leader - // or it might not have all the committed entries. - // The leader MUST NOT forward the follower's commit to - // an unmatched index. - commit := min(r.prs[to].Match, r.raftLog.committed) - m := pb.Message{ - To: to, - Type: pb.MsgHeartbeat, - Commit: commit, - } - r.send(m) -} - -// bcastAppend sends RPC, with entries to all peers that are not up-to-date -// according to the progress recorded in r.prs. -func (r *raft) bcastAppend() { - for id := range r.prs { - if id == r.id { - continue - } - r.sendAppend(id) - } -} - -// bcastHeartbeat sends RPC, without entries to all the peers. -func (r *raft) bcastHeartbeat() { - for id := range r.prs { - if id == r.id { - continue - } - r.sendHeartbeat(id) - r.prs[id].resume() - } -} - -// maybeCommit attempts to advance the commit index. Returns true if -// the commit index changed (in which case the caller should call -// r.bcastAppend). -func (r *raft) maybeCommit() bool { - // TODO(bmizerany): optimize.. Currently naive - mis := make(uint64Slice, 0, len(r.prs)) - for id := range r.prs { - mis = append(mis, r.prs[id].Match) - } - sort.Sort(sort.Reverse(mis)) - mci := mis[r.quorum()-1] - return r.raftLog.maybeCommit(mci, r.Term) -} - -func (r *raft) reset(term uint64) { - if r.Term != term { - r.Term = term - r.Vote = None - } - r.lead = None - - r.electionElapsed = 0 - r.heartbeatElapsed = 0 - - r.votes = make(map[uint64]bool) - for id := range r.prs { - r.prs[id] = &Progress{Next: r.raftLog.lastIndex() + 1, ins: newInflights(r.maxInflight)} - if id == r.id { - r.prs[id].Match = r.raftLog.lastIndex() - } - } - r.pendingConf = false -} - -func (r *raft) appendEntry(es ...pb.Entry) { - li := r.raftLog.lastIndex() - for i := range es { - es[i].Term = r.Term - es[i].Index = li + 1 + uint64(i) - } - r.raftLog.append(es...) - r.prs[r.id].maybeUpdate(r.raftLog.lastIndex()) - // Regardless of maybeCommit's return, our caller will call bcastAppend. - r.maybeCommit() -} - -// tickElection is run by followers and candidates after r.electionTimeout. -func (r *raft) tickElection() { - if !r.promotable() { - r.electionElapsed = 0 - return - } - r.electionElapsed++ - if r.isElectionTimeout() { - r.electionElapsed = 0 - r.Step(pb.Message{From: r.id, Type: pb.MsgHup}) - } -} - -// tickHeartbeat is run by leaders to send a MsgBeat after r.heartbeatTimeout. -func (r *raft) tickHeartbeat() { - r.heartbeatElapsed++ - r.electionElapsed++ - - if r.electionElapsed >= r.electionTimeout { - r.electionElapsed = 0 - if r.checkQuorum { - r.Step(pb.Message{From: r.id, Type: pb.MsgCheckQuorum}) - } - } - - if r.state != StateLeader { - return - } - - if r.heartbeatElapsed >= r.heartbeatTimeout { - r.heartbeatElapsed = 0 - r.Step(pb.Message{From: r.id, Type: pb.MsgBeat}) - } -} - -func (r *raft) becomeFollower(term uint64, lead uint64) { - r.step = stepFollower - r.reset(term) - r.tick = r.tickElection - r.lead = lead - r.state = StateFollower - r.logger.Infof("%x became follower at term %d", r.id, r.Term) -} - -func (r *raft) becomeCandidate() { - // TODO(xiangli) remove the panic when the raft implementation is stable - if r.state == StateLeader { - panic("invalid transition [leader -> candidate]") - } - r.step = stepCandidate - r.reset(r.Term + 1) - r.tick = r.tickElection - r.Vote = r.id - r.state = StateCandidate - r.logger.Infof("%x became candidate at term %d", r.id, r.Term) -} - -func (r *raft) becomeLeader() { - // TODO(xiangli) remove the panic when the raft implementation is stable - if r.state == StateFollower { - panic("invalid transition [follower -> leader]") - } - r.step = stepLeader - r.reset(r.Term) - r.tick = r.tickHeartbeat - r.lead = r.id - r.state = StateLeader - ents, err := r.raftLog.entries(r.raftLog.committed+1, noLimit) - if err != nil { - r.logger.Panicf("unexpected error getting uncommitted entries (%v)", err) - } - - for _, e := range ents { - if e.Type != pb.EntryConfChange { - continue - } - if r.pendingConf { - panic("unexpected double uncommitted config entry") - } - r.pendingConf = true - } - r.appendEntry(pb.Entry{Data: nil}) - r.logger.Infof("%x became leader at term %d", r.id, r.Term) -} - -func (r *raft) campaign() { - r.becomeCandidate() - if r.quorum() == r.poll(r.id, true) { - r.becomeLeader() - return - } - for id := range r.prs { - if id == r.id { - continue - } - r.logger.Infof("%x [logterm: %d, index: %d] sent vote request to %x at term %d", - r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), id, r.Term) - r.send(pb.Message{To: id, Type: pb.MsgVote, Index: r.raftLog.lastIndex(), LogTerm: r.raftLog.lastTerm()}) - } -} - -func (r *raft) poll(id uint64, v bool) (granted int) { - if v { - r.logger.Infof("%x received vote from %x at term %d", r.id, id, r.Term) - } else { - r.logger.Infof("%x received vote rejection from %x at term %d", r.id, id, r.Term) - } - if _, ok := r.votes[id]; !ok { - r.votes[id] = v - } - for _, vv := range r.votes { - if vv { - granted++ - } - } - return granted -} - -func (r *raft) Step(m pb.Message) error { - if m.Type == pb.MsgHup { - if r.state != StateLeader { - r.logger.Infof("%x is starting a new election at term %d", r.id, r.Term) - r.campaign() - } else { - r.logger.Debugf("%x ignoring MsgHup because already leader", r.id) - } - return nil - } - - switch { - case m.Term == 0: - // local message - case m.Term > r.Term: - lead := m.From - if m.Type == pb.MsgVote { - lead = None - } - r.logger.Infof("%x [term: %d] received a %s message with higher term from %x [term: %d]", - r.id, r.Term, m.Type, m.From, m.Term) - r.becomeFollower(m.Term, lead) - case m.Term < r.Term: - // ignore - r.logger.Infof("%x [term: %d] ignored a %s message with lower term from %x [term: %d]", - r.id, r.Term, m.Type, m.From, m.Term) - return nil - } - r.step(r, m) - return nil -} - -type stepFunc func(r *raft, m pb.Message) - -func stepLeader(r *raft, m pb.Message) { - - // These message types do not require any progress for m.From. - switch m.Type { - case pb.MsgBeat: - r.bcastHeartbeat() - return - case pb.MsgCheckQuorum: - if !r.checkQuorumActive() { - r.logger.Warningf("%x stepped down to follower since quorum is not active", r.id) - r.becomeFollower(r.Term, None) - } - return - case pb.MsgProp: - if len(m.Entries) == 0 { - r.logger.Panicf("%x stepped empty MsgProp", r.id) - } - if _, ok := r.prs[r.id]; !ok { - // If we are not currently a member of the range (i.e. this node - // was removed from the configuration while serving as leader), - // drop any new proposals. - return - } - for i, e := range m.Entries { - if e.Type == pb.EntryConfChange { - if r.pendingConf { - m.Entries[i] = pb.Entry{Type: pb.EntryNormal} - } - r.pendingConf = true - } - } - r.appendEntry(m.Entries...) - r.bcastAppend() - return - case pb.MsgVote: - r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected vote from %x [logterm: %d, index: %d] at term %d", - r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term) - r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp, Reject: true}) - return - } - - // All other message types require a progress for m.From (pr). - pr, prOk := r.prs[m.From] - if !prOk { - r.logger.Debugf("no progress available for %x", m.From) - return - } - switch m.Type { - case pb.MsgAppResp: - pr.RecentActive = true - - if m.Reject { - r.logger.Debugf("%x received msgApp rejection(lastindex: %d) from %x for index %d", - r.id, m.RejectHint, m.From, m.Index) - if pr.maybeDecrTo(m.Index, m.RejectHint) { - r.logger.Debugf("%x decreased progress of %x to [%s]", r.id, m.From, pr) - if pr.State == ProgressStateReplicate { - pr.becomeProbe() - } - r.sendAppend(m.From) - } - } else { - oldPaused := pr.isPaused() - if pr.maybeUpdate(m.Index) { - switch { - case pr.State == ProgressStateProbe: - pr.becomeReplicate() - case pr.State == ProgressStateSnapshot && pr.maybeSnapshotAbort(): - r.logger.Debugf("%x snapshot aborted, resumed sending replication messages to %x [%s]", r.id, m.From, pr) - pr.becomeProbe() - case pr.State == ProgressStateReplicate: - pr.ins.freeTo(m.Index) - } - - if r.maybeCommit() { - r.bcastAppend() - } else if oldPaused { - // update() reset the wait state on this node. If we had delayed sending - // an update before, send it now. - r.sendAppend(m.From) - } - } - } - case pb.MsgHeartbeatResp: - pr.RecentActive = true - - // free one slot for the full inflights window to allow progress. - if pr.State == ProgressStateReplicate && pr.ins.full() { - pr.ins.freeFirstOne() - } - if pr.Match < r.raftLog.lastIndex() { - r.sendAppend(m.From) - } - case pb.MsgSnapStatus: - if pr.State != ProgressStateSnapshot { - return - } - if !m.Reject { - pr.becomeProbe() - r.logger.Debugf("%x snapshot succeeded, resumed sending replication messages to %x [%s]", r.id, m.From, pr) - } else { - pr.snapshotFailure() - pr.becomeProbe() - r.logger.Debugf("%x snapshot failed, resumed sending replication messages to %x [%s]", r.id, m.From, pr) - } - // If snapshot finish, wait for the msgAppResp from the remote node before sending - // out the next msgApp. - // If snapshot failure, wait for a heartbeat interval before next try - pr.pause() - case pb.MsgUnreachable: - // During optimistic replication, if the remote becomes unreachable, - // there is huge probability that a MsgApp is lost. - if pr.State == ProgressStateReplicate { - pr.becomeProbe() - } - r.logger.Debugf("%x failed to send message to %x because it is unreachable [%s]", r.id, m.From, pr) - } -} - -func stepCandidate(r *raft, m pb.Message) { - switch m.Type { - case pb.MsgProp: - r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) - return - case pb.MsgApp: - r.becomeFollower(r.Term, m.From) - r.handleAppendEntries(m) - case pb.MsgHeartbeat: - r.becomeFollower(r.Term, m.From) - r.handleHeartbeat(m) - case pb.MsgSnap: - r.becomeFollower(m.Term, m.From) - r.handleSnapshot(m) - case pb.MsgVote: - r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected vote from %x [logterm: %d, index: %d] at term %d", - r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term) - r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp, Reject: true}) - case pb.MsgVoteResp: - gr := r.poll(m.From, !m.Reject) - r.logger.Infof("%x [quorum:%d] has received %d votes and %d vote rejections", r.id, r.quorum(), gr, len(r.votes)-gr) - switch r.quorum() { - case gr: - r.becomeLeader() - r.bcastAppend() - case len(r.votes) - gr: - r.becomeFollower(r.Term, None) - } - } -} - -func stepFollower(r *raft, m pb.Message) { - switch m.Type { - case pb.MsgProp: - if r.lead == None { - r.logger.Infof("%x no leader at term %d; dropping proposal", r.id, r.Term) - return - } - m.To = r.lead - r.send(m) - case pb.MsgApp: - r.electionElapsed = 0 - r.lead = m.From - r.handleAppendEntries(m) - case pb.MsgHeartbeat: - r.electionElapsed = 0 - r.lead = m.From - r.handleHeartbeat(m) - case pb.MsgSnap: - r.electionElapsed = 0 - r.handleSnapshot(m) - case pb.MsgVote: - if (r.Vote == None || r.Vote == m.From) && r.raftLog.isUpToDate(m.Index, m.LogTerm) { - r.electionElapsed = 0 - r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] voted for %x [logterm: %d, index: %d] at term %d", - r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term) - r.Vote = m.From - r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp}) - } else { - r.logger.Infof("%x [logterm: %d, index: %d, vote: %x] rejected vote from %x [logterm: %d, index: %d] at term %d", - r.id, r.raftLog.lastTerm(), r.raftLog.lastIndex(), r.Vote, m.From, m.LogTerm, m.Index, r.Term) - r.send(pb.Message{To: m.From, Type: pb.MsgVoteResp, Reject: true}) - } - } -} - -func (r *raft) handleAppendEntries(m pb.Message) { - if m.Index < r.raftLog.committed { - r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) - return - } - - if mlastIndex, ok := r.raftLog.maybeAppend(m.Index, m.LogTerm, m.Commit, m.Entries...); ok { - r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: mlastIndex}) - } else { - r.logger.Debugf("%x [logterm: %d, index: %d] rejected msgApp [logterm: %d, index: %d] from %x", - r.id, r.raftLog.zeroTermOnErrCompacted(r.raftLog.term(m.Index)), m.Index, m.LogTerm, m.Index, m.From) - r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: m.Index, Reject: true, RejectHint: r.raftLog.lastIndex()}) - } -} - -func (r *raft) handleHeartbeat(m pb.Message) { - r.raftLog.commitTo(m.Commit) - r.send(pb.Message{To: m.From, Type: pb.MsgHeartbeatResp}) -} - -func (r *raft) handleSnapshot(m pb.Message) { - sindex, sterm := m.Snapshot.Metadata.Index, m.Snapshot.Metadata.Term - if r.restore(m.Snapshot) { - r.logger.Infof("%x [commit: %d] restored snapshot [index: %d, term: %d]", - r.id, r.raftLog.committed, sindex, sterm) - r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.lastIndex()}) - } else { - r.logger.Infof("%x [commit: %d] ignored snapshot [index: %d, term: %d]", - r.id, r.raftLog.committed, sindex, sterm) - r.send(pb.Message{To: m.From, Type: pb.MsgAppResp, Index: r.raftLog.committed}) - } -} - -// restore recovers the state machine from a snapshot. It restores the log and the -// configuration of state machine. -func (r *raft) restore(s pb.Snapshot) bool { - if s.Metadata.Index <= r.raftLog.committed { - return false - } - if r.raftLog.matchTerm(s.Metadata.Index, s.Metadata.Term) { - r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] fast-forwarded commit to snapshot [index: %d, term: %d]", - r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) - r.raftLog.commitTo(s.Metadata.Index) - return false - } - - r.logger.Infof("%x [commit: %d, lastindex: %d, lastterm: %d] starts to restore snapshot [index: %d, term: %d]", - r.id, r.raftLog.committed, r.raftLog.lastIndex(), r.raftLog.lastTerm(), s.Metadata.Index, s.Metadata.Term) - - r.raftLog.restore(s) - r.prs = make(map[uint64]*Progress) - for _, n := range s.Metadata.ConfState.Nodes { - match, next := uint64(0), uint64(r.raftLog.lastIndex())+1 - if n == r.id { - match = next - 1 - } else { - match = 0 - } - r.setProgress(n, match, next) - r.logger.Infof("%x restored progress of %x [%s]", r.id, n, r.prs[n]) - } - return true -} - -// promotable indicates whether state machine can be promoted to leader, -// which is true when its own id is in progress list. -func (r *raft) promotable() bool { - _, ok := r.prs[r.id] - return ok -} - -func (r *raft) addNode(id uint64) { - if _, ok := r.prs[id]; ok { - // Ignore any redundant addNode calls (which can happen because the - // initial bootstrapping entries are applied twice). - return - } - - r.setProgress(id, 0, r.raftLog.lastIndex()+1) - r.pendingConf = false -} - -func (r *raft) removeNode(id uint64) { - r.delProgress(id) - r.pendingConf = false - // The quorum size is now smaller, so see if any pending entries can - // be committed. - if r.maybeCommit() { - r.bcastAppend() - } -} - -func (r *raft) resetPendingConf() { r.pendingConf = false } - -func (r *raft) setProgress(id, match, next uint64) { - r.prs[id] = &Progress{Next: next, Match: match, ins: newInflights(r.maxInflight)} -} - -func (r *raft) delProgress(id uint64) { - delete(r.prs, id) -} - -func (r *raft) loadState(state pb.HardState) { - if state.Commit < r.raftLog.committed || state.Commit > r.raftLog.lastIndex() { - r.logger.Panicf("%x state.commit %d is out of range [%d, %d]", r.id, state.Commit, r.raftLog.committed, r.raftLog.lastIndex()) - } - r.raftLog.committed = state.Commit - r.Term = state.Term - r.Vote = state.Vote -} - -// isElectionTimeout returns true if r.electionElapsed is greater than the -// randomized election timeout in (electiontimeout, 2 * electiontimeout - 1). -// Otherwise, it returns false. -func (r *raft) isElectionTimeout() bool { - d := r.electionElapsed - r.electionTimeout - if d < 0 { - return false - } - return d > r.rand.Int()%r.electionTimeout -} - -// checkQuorumActive returns true if the quorum is active from -// the view of the local raft state machine. Otherwise, it returns -// false. -// checkQuorumActive also resets all RecentActive to false. -func (r *raft) checkQuorumActive() bool { - var act int - - for id := range r.prs { - if id == r.id { // self is always active - act++ - continue - } - - if r.prs[id].RecentActive { - act++ - } - - r.prs[id].RecentActive = false - } - - return act >= r.quorum() -} diff --git a/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go b/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go deleted file mode 100644 index 319134cdeb..0000000000 --- a/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.pb.go +++ /dev/null @@ -1,1768 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: raft.proto -// DO NOT EDIT! - -/* - Package raftpb is a generated protocol buffer package. - - It is generated from these files: - raft.proto - - It has these top-level messages: - Entry - SnapshotMetadata - Snapshot - Message - HardState - ConfState - ConfChange -*/ -package raftpb - -import ( - "fmt" - - proto "github.com/gogo/protobuf/proto" -) - -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type EntryType int32 - -const ( - EntryNormal EntryType = 0 - EntryConfChange EntryType = 1 -) - -var EntryType_name = map[int32]string{ - 0: "EntryNormal", - 1: "EntryConfChange", -} -var EntryType_value = map[string]int32{ - "EntryNormal": 0, - "EntryConfChange": 1, -} - -func (x EntryType) Enum() *EntryType { - p := new(EntryType) - *p = x - return p -} -func (x EntryType) String() string { - return proto.EnumName(EntryType_name, int32(x)) -} -func (x *EntryType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(EntryType_value, data, "EntryType") - if err != nil { - return err - } - *x = EntryType(value) - return nil -} - -type MessageType int32 - -const ( - MsgHup MessageType = 0 - MsgBeat MessageType = 1 - MsgProp MessageType = 2 - MsgApp MessageType = 3 - MsgAppResp MessageType = 4 - MsgVote MessageType = 5 - MsgVoteResp MessageType = 6 - MsgSnap MessageType = 7 - MsgHeartbeat MessageType = 8 - MsgHeartbeatResp MessageType = 9 - MsgUnreachable MessageType = 10 - MsgSnapStatus MessageType = 11 - MsgCheckQuorum MessageType = 12 -) - -var MessageType_name = map[int32]string{ - 0: "MsgHup", - 1: "MsgBeat", - 2: "MsgProp", - 3: "MsgApp", - 4: "MsgAppResp", - 5: "MsgVote", - 6: "MsgVoteResp", - 7: "MsgSnap", - 8: "MsgHeartbeat", - 9: "MsgHeartbeatResp", - 10: "MsgUnreachable", - 11: "MsgSnapStatus", - 12: "MsgCheckQuorum", -} -var MessageType_value = map[string]int32{ - "MsgHup": 0, - "MsgBeat": 1, - "MsgProp": 2, - "MsgApp": 3, - "MsgAppResp": 4, - "MsgVote": 5, - "MsgVoteResp": 6, - "MsgSnap": 7, - "MsgHeartbeat": 8, - "MsgHeartbeatResp": 9, - "MsgUnreachable": 10, - "MsgSnapStatus": 11, - "MsgCheckQuorum": 12, -} - -func (x MessageType) Enum() *MessageType { - p := new(MessageType) - *p = x - return p -} -func (x MessageType) String() string { - return proto.EnumName(MessageType_name, int32(x)) -} -func (x *MessageType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(MessageType_value, data, "MessageType") - if err != nil { - return err - } - *x = MessageType(value) - return nil -} - -type ConfChangeType int32 - -const ( - ConfChangeAddNode ConfChangeType = 0 - ConfChangeRemoveNode ConfChangeType = 1 - ConfChangeUpdateNode ConfChangeType = 2 -) - -var ConfChangeType_name = map[int32]string{ - 0: "ConfChangeAddNode", - 1: "ConfChangeRemoveNode", - 2: "ConfChangeUpdateNode", -} -var ConfChangeType_value = map[string]int32{ - "ConfChangeAddNode": 0, - "ConfChangeRemoveNode": 1, - "ConfChangeUpdateNode": 2, -} - -func (x ConfChangeType) Enum() *ConfChangeType { - p := new(ConfChangeType) - *p = x - return p -} -func (x ConfChangeType) String() string { - return proto.EnumName(ConfChangeType_name, int32(x)) -} -func (x *ConfChangeType) UnmarshalJSON(data []byte) error { - value, err := proto.UnmarshalJSONEnum(ConfChangeType_value, data, "ConfChangeType") - if err != nil { - return err - } - *x = ConfChangeType(value) - return nil -} - -type Entry struct { - Type EntryType `protobuf:"varint,1,opt,name=Type,enum=raftpb.EntryType" json:"Type"` - Term uint64 `protobuf:"varint,2,opt,name=Term" json:"Term"` - Index uint64 `protobuf:"varint,3,opt,name=Index" json:"Index"` - Data []byte `protobuf:"bytes,4,opt,name=Data" json:"Data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Entry) Reset() { *m = Entry{} } -func (m *Entry) String() string { return proto.CompactTextString(m) } -func (*Entry) ProtoMessage() {} - -type SnapshotMetadata struct { - ConfState ConfState `protobuf:"bytes,1,opt,name=conf_state" json:"conf_state"` - Index uint64 `protobuf:"varint,2,opt,name=index" json:"index"` - Term uint64 `protobuf:"varint,3,opt,name=term" json:"term"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *SnapshotMetadata) Reset() { *m = SnapshotMetadata{} } -func (m *SnapshotMetadata) String() string { return proto.CompactTextString(m) } -func (*SnapshotMetadata) ProtoMessage() {} - -type Snapshot struct { - Data []byte `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"` - Metadata SnapshotMetadata `protobuf:"bytes,2,opt,name=metadata" json:"metadata"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} - -type Message struct { - Type MessageType `protobuf:"varint,1,opt,name=type,enum=raftpb.MessageType" json:"type"` - To uint64 `protobuf:"varint,2,opt,name=to" json:"to"` - From uint64 `protobuf:"varint,3,opt,name=from" json:"from"` - Term uint64 `protobuf:"varint,4,opt,name=term" json:"term"` - LogTerm uint64 `protobuf:"varint,5,opt,name=logTerm" json:"logTerm"` - Index uint64 `protobuf:"varint,6,opt,name=index" json:"index"` - Entries []Entry `protobuf:"bytes,7,rep,name=entries" json:"entries"` - Commit uint64 `protobuf:"varint,8,opt,name=commit" json:"commit"` - Snapshot Snapshot `protobuf:"bytes,9,opt,name=snapshot" json:"snapshot"` - Reject bool `protobuf:"varint,10,opt,name=reject" json:"reject"` - RejectHint uint64 `protobuf:"varint,11,opt,name=rejectHint" json:"rejectHint"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Message) Reset() { *m = Message{} } -func (m *Message) String() string { return proto.CompactTextString(m) } -func (*Message) ProtoMessage() {} - -type HardState struct { - Term uint64 `protobuf:"varint,1,opt,name=term" json:"term"` - Vote uint64 `protobuf:"varint,2,opt,name=vote" json:"vote"` - Commit uint64 `protobuf:"varint,3,opt,name=commit" json:"commit"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *HardState) Reset() { *m = HardState{} } -func (m *HardState) String() string { return proto.CompactTextString(m) } -func (*HardState) ProtoMessage() {} - -type ConfState struct { - Nodes []uint64 `protobuf:"varint,1,rep,name=nodes" json:"nodes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ConfState) Reset() { *m = ConfState{} } -func (m *ConfState) String() string { return proto.CompactTextString(m) } -func (*ConfState) ProtoMessage() {} - -type ConfChange struct { - ID uint64 `protobuf:"varint,1,opt,name=ID" json:"ID"` - Type ConfChangeType `protobuf:"varint,2,opt,name=Type,enum=raftpb.ConfChangeType" json:"Type"` - NodeID uint64 `protobuf:"varint,3,opt,name=NodeID" json:"NodeID"` - Context []byte `protobuf:"bytes,4,opt,name=Context" json:"Context,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ConfChange) Reset() { *m = ConfChange{} } -func (m *ConfChange) String() string { return proto.CompactTextString(m) } -func (*ConfChange) ProtoMessage() {} - -func init() { - proto.RegisterType((*Entry)(nil), "raftpb.Entry") - proto.RegisterType((*SnapshotMetadata)(nil), "raftpb.SnapshotMetadata") - proto.RegisterType((*Snapshot)(nil), "raftpb.Snapshot") - proto.RegisterType((*Message)(nil), "raftpb.Message") - proto.RegisterType((*HardState)(nil), "raftpb.HardState") - proto.RegisterType((*ConfState)(nil), "raftpb.ConfState") - proto.RegisterType((*ConfChange)(nil), "raftpb.ConfChange") - proto.RegisterEnum("raftpb.EntryType", EntryType_name, EntryType_value) - proto.RegisterEnum("raftpb.MessageType", MessageType_name, MessageType_value) - proto.RegisterEnum("raftpb.ConfChangeType", ConfChangeType_name, ConfChangeType_value) -} -func (m *Entry) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Entry) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintRaft(data, i, uint64(m.Type)) - data[i] = 0x10 - i++ - i = encodeVarintRaft(data, i, uint64(m.Term)) - data[i] = 0x18 - i++ - i = encodeVarintRaft(data, i, uint64(m.Index)) - if m.Data != nil { - data[i] = 0x22 - i++ - i = encodeVarintRaft(data, i, uint64(len(m.Data))) - i += copy(data[i:], m.Data) - } - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *SnapshotMetadata) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *SnapshotMetadata) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0xa - i++ - i = encodeVarintRaft(data, i, uint64(m.ConfState.Size())) - n1, err := m.ConfState.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n1 - data[i] = 0x10 - i++ - i = encodeVarintRaft(data, i, uint64(m.Index)) - data[i] = 0x18 - i++ - i = encodeVarintRaft(data, i, uint64(m.Term)) - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Snapshot) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Snapshot) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Data != nil { - data[i] = 0xa - i++ - i = encodeVarintRaft(data, i, uint64(len(m.Data))) - i += copy(data[i:], m.Data) - } - data[i] = 0x12 - i++ - i = encodeVarintRaft(data, i, uint64(m.Metadata.Size())) - n2, err := m.Metadata.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n2 - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Message) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Message) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintRaft(data, i, uint64(m.Type)) - data[i] = 0x10 - i++ - i = encodeVarintRaft(data, i, uint64(m.To)) - data[i] = 0x18 - i++ - i = encodeVarintRaft(data, i, uint64(m.From)) - data[i] = 0x20 - i++ - i = encodeVarintRaft(data, i, uint64(m.Term)) - data[i] = 0x28 - i++ - i = encodeVarintRaft(data, i, uint64(m.LogTerm)) - data[i] = 0x30 - i++ - i = encodeVarintRaft(data, i, uint64(m.Index)) - if len(m.Entries) > 0 { - for _, msg := range m.Entries { - data[i] = 0x3a - i++ - i = encodeVarintRaft(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n - } - } - data[i] = 0x40 - i++ - i = encodeVarintRaft(data, i, uint64(m.Commit)) - data[i] = 0x4a - i++ - i = encodeVarintRaft(data, i, uint64(m.Snapshot.Size())) - n3, err := m.Snapshot.MarshalTo(data[i:]) - if err != nil { - return 0, err - } - i += n3 - data[i] = 0x50 - i++ - if m.Reject { - data[i] = 1 - } else { - data[i] = 0 - } - i++ - data[i] = 0x58 - i++ - i = encodeVarintRaft(data, i, uint64(m.RejectHint)) - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *HardState) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *HardState) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintRaft(data, i, uint64(m.Term)) - data[i] = 0x10 - i++ - i = encodeVarintRaft(data, i, uint64(m.Vote)) - data[i] = 0x18 - i++ - i = encodeVarintRaft(data, i, uint64(m.Commit)) - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ConfState) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ConfState) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if len(m.Nodes) > 0 { - for _, num := range m.Nodes { - data[i] = 0x8 - i++ - i = encodeVarintRaft(data, i, uint64(num)) - } - } - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ConfChange) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *ConfChange) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintRaft(data, i, uint64(m.ID)) - data[i] = 0x10 - i++ - i = encodeVarintRaft(data, i, uint64(m.Type)) - data[i] = 0x18 - i++ - i = encodeVarintRaft(data, i, uint64(m.NodeID)) - if m.Context != nil { - data[i] = 0x22 - i++ - i = encodeVarintRaft(data, i, uint64(len(m.Context))) - i += copy(data[i:], m.Context) - } - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Raft(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Raft(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintRaft(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *Entry) Size() (n int) { - var l int - _ = l - n += 1 + sovRaft(uint64(m.Type)) - n += 1 + sovRaft(uint64(m.Term)) - n += 1 + sovRaft(uint64(m.Index)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovRaft(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *SnapshotMetadata) Size() (n int) { - var l int - _ = l - l = m.ConfState.Size() - n += 1 + l + sovRaft(uint64(l)) - n += 1 + sovRaft(uint64(m.Index)) - n += 1 + sovRaft(uint64(m.Term)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Snapshot) Size() (n int) { - var l int - _ = l - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovRaft(uint64(l)) - } - l = m.Metadata.Size() - n += 1 + l + sovRaft(uint64(l)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Message) Size() (n int) { - var l int - _ = l - n += 1 + sovRaft(uint64(m.Type)) - n += 1 + sovRaft(uint64(m.To)) - n += 1 + sovRaft(uint64(m.From)) - n += 1 + sovRaft(uint64(m.Term)) - n += 1 + sovRaft(uint64(m.LogTerm)) - n += 1 + sovRaft(uint64(m.Index)) - if len(m.Entries) > 0 { - for _, e := range m.Entries { - l = e.Size() - n += 1 + l + sovRaft(uint64(l)) - } - } - n += 1 + sovRaft(uint64(m.Commit)) - l = m.Snapshot.Size() - n += 1 + l + sovRaft(uint64(l)) - n += 2 - n += 1 + sovRaft(uint64(m.RejectHint)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *HardState) Size() (n int) { - var l int - _ = l - n += 1 + sovRaft(uint64(m.Term)) - n += 1 + sovRaft(uint64(m.Vote)) - n += 1 + sovRaft(uint64(m.Commit)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ConfState) Size() (n int) { - var l int - _ = l - if len(m.Nodes) > 0 { - for _, e := range m.Nodes { - n += 1 + sovRaft(uint64(e)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ConfChange) Size() (n int) { - var l int - _ = l - n += 1 + sovRaft(uint64(m.ID)) - n += 1 + sovRaft(uint64(m.Type)) - n += 1 + sovRaft(uint64(m.NodeID)) - if m.Context != nil { - l = len(m.Context) - n += 1 + l + sovRaft(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovRaft(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozRaft(x uint64) (n int) { - return sovRaft(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Entry) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Entry: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Entry: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Type |= (EntryType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) - } - m.Term = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Term |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Index |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaft(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *SnapshotMetadata) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: SnapshotMetadata: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: SnapshotMetadata: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ConfState", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.ConfState.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Index |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) - } - m.Term = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Term |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRaft(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Snapshot) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Metadata.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaft(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Message) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Message: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Message: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Type |= (MessageType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field To", wireType) - } - m.To = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.To |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field From", wireType) - } - m.From = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.From |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) - } - m.Term = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Term |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LogTerm", wireType) - } - m.LogTerm = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.LogTerm |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 6: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Index |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Entries = append(m.Entries, Entry{}) - if err := m.Entries[len(m.Entries)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 8: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) - } - m.Commit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Commit |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 9: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if err := m.Snapshot.Unmarshal(data[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 10: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Reject", wireType) - } - var v int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Reject = bool(v != 0) - case 11: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field RejectHint", wireType) - } - m.RejectHint = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.RejectHint |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRaft(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *HardState) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HardState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HardState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) - } - m.Term = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Term |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Vote", wireType) - } - m.Vote = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Vote |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Commit", wireType) - } - m.Commit = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Commit |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRaft(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfState) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfState: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfState: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Nodes", wireType) - } - var v uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - v |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.Nodes = append(m.Nodes, v) - default: - iNdEx = preIndex - skippy, err := skipRaft(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ConfChange) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ConfChange: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ConfChange: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) - } - m.ID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.ID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Type |= (ConfChangeType(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeID", wireType) - } - m.NodeID = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.NodeID |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Context", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRaft - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRaft - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Context = append(m.Context[:0], data[iNdEx:postIndex]...) - if m.Context == nil { - m.Context = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRaft(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRaft - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRaft(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaft - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaft - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaft - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthRaft - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRaft - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipRaft(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthRaft = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRaft = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.proto b/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.proto deleted file mode 100644 index 0a98b8cfa5..0000000000 --- a/vendor/src/github.com/coreos/etcd/raft/raftpb/raft.proto +++ /dev/null @@ -1,86 +0,0 @@ -syntax = "proto2"; -package raftpb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; -option (gogoproto.goproto_enum_prefix_all) = false; - -enum EntryType { - EntryNormal = 0; - EntryConfChange = 1; -} - -message Entry { - optional EntryType Type = 1 [(gogoproto.nullable) = false]; - optional uint64 Term = 2 [(gogoproto.nullable) = false]; - optional uint64 Index = 3 [(gogoproto.nullable) = false]; - optional bytes Data = 4; -} - -message SnapshotMetadata { - optional ConfState conf_state = 1 [(gogoproto.nullable) = false]; - optional uint64 index = 2 [(gogoproto.nullable) = false]; - optional uint64 term = 3 [(gogoproto.nullable) = false]; -} - -message Snapshot { - optional bytes data = 1; - optional SnapshotMetadata metadata = 2 [(gogoproto.nullable) = false]; -} - -enum MessageType { - MsgHup = 0; - MsgBeat = 1; - MsgProp = 2; - MsgApp = 3; - MsgAppResp = 4; - MsgVote = 5; - MsgVoteResp = 6; - MsgSnap = 7; - MsgHeartbeat = 8; - MsgHeartbeatResp = 9; - MsgUnreachable = 10; - MsgSnapStatus = 11; - MsgCheckQuorum = 12; -} - -message Message { - optional MessageType type = 1 [(gogoproto.nullable) = false]; - optional uint64 to = 2 [(gogoproto.nullable) = false]; - optional uint64 from = 3 [(gogoproto.nullable) = false]; - optional uint64 term = 4 [(gogoproto.nullable) = false]; - optional uint64 logTerm = 5 [(gogoproto.nullable) = false]; - optional uint64 index = 6 [(gogoproto.nullable) = false]; - repeated Entry entries = 7 [(gogoproto.nullable) = false]; - optional uint64 commit = 8 [(gogoproto.nullable) = false]; - optional Snapshot snapshot = 9 [(gogoproto.nullable) = false]; - optional bool reject = 10 [(gogoproto.nullable) = false]; - optional uint64 rejectHint = 11 [(gogoproto.nullable) = false]; -} - -message HardState { - optional uint64 term = 1 [(gogoproto.nullable) = false]; - optional uint64 vote = 2 [(gogoproto.nullable) = false]; - optional uint64 commit = 3 [(gogoproto.nullable) = false]; -} - -message ConfState { - repeated uint64 nodes = 1; -} - -enum ConfChangeType { - ConfChangeAddNode = 0; - ConfChangeRemoveNode = 1; - ConfChangeUpdateNode = 2; -} - -message ConfChange { - optional uint64 ID = 1 [(gogoproto.nullable) = false]; - optional ConfChangeType Type = 2 [(gogoproto.nullable) = false]; - optional uint64 NodeID = 3 [(gogoproto.nullable) = false]; - optional bytes Context = 4; -} diff --git a/vendor/src/github.com/coreos/etcd/raft/rawnode.go b/vendor/src/github.com/coreos/etcd/raft/rawnode.go deleted file mode 100644 index 8cf0858917..0000000000 --- a/vendor/src/github.com/coreos/etcd/raft/rawnode.go +++ /dev/null @@ -1,228 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "errors" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -// ErrStepLocalMsg is returned when try to step a local raft message -var ErrStepLocalMsg = errors.New("raft: cannot step raft local message") - -// ErrStepPeerNotFound is returned when try to step a response message -// but there is no peer found in raft.prs for that node. -var ErrStepPeerNotFound = errors.New("raft: cannot step as peer not found") - -// RawNode is a thread-unsafe Node. -// The methods of this struct correspond to the methods of Node and are described -// more fully there. -type RawNode struct { - raft *raft - prevSoftSt *SoftState - prevHardSt pb.HardState -} - -func (rn *RawNode) newReady() Ready { - return newReady(rn.raft, rn.prevSoftSt, rn.prevHardSt) -} - -func (rn *RawNode) commitReady(rd Ready) { - if rd.SoftState != nil { - rn.prevSoftSt = rd.SoftState - } - if !IsEmptyHardState(rd.HardState) { - rn.prevHardSt = rd.HardState - } - if rn.prevHardSt.Commit != 0 { - // In most cases, prevHardSt and rd.HardState will be the same - // because when there are new entries to apply we just sent a - // HardState with an updated Commit value. However, on initial - // startup the two are different because we don't send a HardState - // until something changes, but we do send any un-applied but - // committed entries (and previously-committed entries may be - // incorporated into the snapshot, even if rd.CommittedEntries is - // empty). Therefore we mark all committed entries as applied - // whether they were included in rd.HardState or not. - rn.raft.raftLog.appliedTo(rn.prevHardSt.Commit) - } - if len(rd.Entries) > 0 { - e := rd.Entries[len(rd.Entries)-1] - rn.raft.raftLog.stableTo(e.Index, e.Term) - } - if !IsEmptySnap(rd.Snapshot) { - rn.raft.raftLog.stableSnapTo(rd.Snapshot.Metadata.Index) - } -} - -// NewRawNode returns a new RawNode given configuration and a list of raft peers. -func NewRawNode(config *Config, peers []Peer) (*RawNode, error) { - if config.ID == 0 { - panic("config.ID must not be zero") - } - r := newRaft(config) - rn := &RawNode{ - raft: r, - } - lastIndex, err := config.Storage.LastIndex() - if err != nil { - panic(err) // TODO(bdarnell) - } - // If the log is empty, this is a new RawNode (like StartNode); otherwise it's - // restoring an existing RawNode (like RestartNode). - // TODO(bdarnell): rethink RawNode initialization and whether the application needs - // to be able to tell us when it expects the RawNode to exist. - if lastIndex == 0 { - r.becomeFollower(1, None) - ents := make([]pb.Entry, len(peers)) - for i, peer := range peers { - cc := pb.ConfChange{Type: pb.ConfChangeAddNode, NodeID: peer.ID, Context: peer.Context} - data, err := cc.Marshal() - if err != nil { - panic("unexpected marshal error") - } - - ents[i] = pb.Entry{Type: pb.EntryConfChange, Term: 1, Index: uint64(i + 1), Data: data} - } - r.raftLog.append(ents...) - r.raftLog.committed = uint64(len(ents)) - for _, peer := range peers { - r.addNode(peer.ID) - } - } - // Set the initial hard and soft states after performing all initialization. - rn.prevSoftSt = r.softState() - rn.prevHardSt = r.hardState() - - return rn, nil -} - -// Tick advances the internal logical clock by a single tick. -func (rn *RawNode) Tick() { - rn.raft.tick() -} - -// Campaign causes this RawNode to transition to candidate state. -func (rn *RawNode) Campaign() error { - return rn.raft.Step(pb.Message{ - Type: pb.MsgHup, - }) -} - -// Propose proposes data be appended to the raft log. -func (rn *RawNode) Propose(data []byte) error { - return rn.raft.Step(pb.Message{ - Type: pb.MsgProp, - From: rn.raft.id, - Entries: []pb.Entry{ - {Data: data}, - }}) -} - -// ProposeConfChange proposes a config change. -func (rn *RawNode) ProposeConfChange(cc pb.ConfChange) error { - data, err := cc.Marshal() - if err != nil { - return err - } - return rn.raft.Step(pb.Message{ - Type: pb.MsgProp, - Entries: []pb.Entry{ - {Type: pb.EntryConfChange, Data: data}, - }, - }) -} - -// ApplyConfChange applies a config change to the local node. -func (rn *RawNode) ApplyConfChange(cc pb.ConfChange) *pb.ConfState { - if cc.NodeID == None { - rn.raft.resetPendingConf() - return &pb.ConfState{Nodes: rn.raft.nodes()} - } - switch cc.Type { - case pb.ConfChangeAddNode: - rn.raft.addNode(cc.NodeID) - case pb.ConfChangeRemoveNode: - rn.raft.removeNode(cc.NodeID) - case pb.ConfChangeUpdateNode: - rn.raft.resetPendingConf() - default: - panic("unexpected conf type") - } - return &pb.ConfState{Nodes: rn.raft.nodes()} -} - -// Step advances the state machine using the given message. -func (rn *RawNode) Step(m pb.Message) error { - // ignore unexpected local messages receiving over network - if IsLocalMsg(m) { - return ErrStepLocalMsg - } - if _, ok := rn.raft.prs[m.From]; ok || !IsResponseMsg(m) { - return rn.raft.Step(m) - } - return ErrStepPeerNotFound -} - -// Ready returns the current point-in-time state of this RawNode. -func (rn *RawNode) Ready() Ready { - rd := rn.newReady() - rn.raft.msgs = nil - return rd -} - -// HasReady called when RawNode user need to check if any Ready pending. -// Checking logic in this method should be consistent with Ready.containsUpdates(). -func (rn *RawNode) HasReady() bool { - r := rn.raft - if !r.softState().equal(rn.prevSoftSt) { - return true - } - if hardSt := r.hardState(); !IsEmptyHardState(hardSt) && !isHardStateEqual(hardSt, rn.prevHardSt) { - return true - } - if r.raftLog.unstable.snapshot != nil && !IsEmptySnap(*r.raftLog.unstable.snapshot) { - return true - } - if len(r.msgs) > 0 || len(r.raftLog.unstableEntries()) > 0 || r.raftLog.hasNextEnts() { - return true - } - return false -} - -// Advance notifies the RawNode that the application has applied and saved progress in the -// last Ready results. -func (rn *RawNode) Advance(rd Ready) { - rn.commitReady(rd) -} - -// Status returns the current status of the given group. -func (rn *RawNode) Status() *Status { - status := getStatus(rn.raft) - return &status -} - -// ReportUnreachable reports the given node is not reachable for the last send. -func (rn *RawNode) ReportUnreachable(id uint64) { - _ = rn.raft.Step(pb.Message{Type: pb.MsgUnreachable, From: id}) -} - -// ReportSnapshot reports the status of the sent snapshot. -func (rn *RawNode) ReportSnapshot(id uint64, status SnapshotStatus) { - rej := status == SnapshotFailure - - _ = rn.raft.Step(pb.Message{Type: pb.MsgSnapStatus, From: id, Reject: rej}) -} diff --git a/vendor/src/github.com/coreos/etcd/raft/status.go b/vendor/src/github.com/coreos/etcd/raft/status.go deleted file mode 100644 index d18a489541..0000000000 --- a/vendor/src/github.com/coreos/etcd/raft/status.go +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "fmt" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -type Status struct { - ID uint64 - - pb.HardState - SoftState - - Applied uint64 - Progress map[uint64]Progress -} - -// getStatus gets a copy of the current raft status. -func getStatus(r *raft) Status { - s := Status{ID: r.id} - s.HardState = r.hardState() - s.SoftState = *r.softState() - - s.Applied = r.raftLog.applied - - if s.RaftState == StateLeader { - s.Progress = make(map[uint64]Progress) - for id, p := range r.prs { - s.Progress[id] = *p - } - } - - return s -} - -// MarshalJSON translates the raft status into JSON. -// TODO: try to simplify this by introducing ID type into raft -func (s Status) MarshalJSON() ([]byte, error) { - j := fmt.Sprintf(`{"id":"%x","term":%d,"vote":"%x","commit":%d,"lead":"%x","raftState":%q,"progress":{`, - s.ID, s.Term, s.Vote, s.Commit, s.Lead, s.RaftState) - - if len(s.Progress) == 0 { - j += "}}" - } else { - for k, v := range s.Progress { - subj := fmt.Sprintf(`"%x":{"match":%d,"next":%d,"state":%q},`, k, v.Match, v.Next, v.State) - j += subj - } - // remove the trailing "," - j = j[:len(j)-1] + "}}" - } - return []byte(j), nil -} - -func (s Status) String() string { - b, err := s.MarshalJSON() - if err != nil { - raftLogger.Panicf("unexpected error: %v", err) - } - return string(b) -} diff --git a/vendor/src/github.com/coreos/etcd/raft/storage.go b/vendor/src/github.com/coreos/etcd/raft/storage.go deleted file mode 100644 index f3724162ce..0000000000 --- a/vendor/src/github.com/coreos/etcd/raft/storage.go +++ /dev/null @@ -1,252 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "errors" - "sync" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -// ErrCompacted is returned by Storage.Entries/Compact when a requested -// index is unavailable because it predates the last snapshot. -var ErrCompacted = errors.New("requested index is unavailable due to compaction") - -// ErrSnapOutOfDate is returned by Storage.CreateSnapshot when a requested -// index is older than the existing snapshot. -var ErrSnapOutOfDate = errors.New("requested index is older than the existing snapshot") - -var ErrUnavailable = errors.New("requested entry at index is unavailable") - -// Storage is an interface that may be implemented by the application -// to retrieve log entries from storage. -// -// If any Storage method returns an error, the raft instance will -// become inoperable and refuse to participate in elections; the -// application is responsible for cleanup and recovery in this case. -type Storage interface { - // InitialState returns the saved HardState and ConfState information. - InitialState() (pb.HardState, pb.ConfState, error) - // Entries returns a slice of log entries in the range [lo,hi). - // MaxSize limits the total size of the log entries returned, but - // Entries returns at least one entry if any. - Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) - // Term returns the term of entry i, which must be in the range - // [FirstIndex()-1, LastIndex()]. The term of the entry before - // FirstIndex is retained for matching purposes even though the - // rest of that entry may not be available. - Term(i uint64) (uint64, error) - // LastIndex returns the index of the last entry in the log. - LastIndex() (uint64, error) - // FirstIndex returns the index of the first log entry that is - // possibly available via Entries (older entries have been incorporated - // into the latest Snapshot; if storage only contains the dummy entry the - // first log entry is not available). - FirstIndex() (uint64, error) - // Snapshot returns the most recent snapshot. - // If snapshot is temporarily unavailable, it should return ErrSnapshotTemporarilyUnavailable, - // so raft state machine could know that Storage needs some time to prepare - // snapshot and call Snapshot later. - Snapshot() (pb.Snapshot, error) -} - -// MemoryStorage implements the Storage interface backed by an -// in-memory array. -type MemoryStorage struct { - // Protects access to all fields. Most methods of MemoryStorage are - // run on the raft goroutine, but Append() is run on an application - // goroutine. - sync.Mutex - - hardState pb.HardState - snapshot pb.Snapshot - // ents[i] has raft log position i+snapshot.Metadata.Index - ents []pb.Entry -} - -// NewMemoryStorage creates an empty MemoryStorage. -func NewMemoryStorage() *MemoryStorage { - return &MemoryStorage{ - // When starting from scratch populate the list with a dummy entry at term zero. - ents: make([]pb.Entry, 1), - } -} - -// InitialState implements the Storage interface. -func (ms *MemoryStorage) InitialState() (pb.HardState, pb.ConfState, error) { - return ms.hardState, ms.snapshot.Metadata.ConfState, nil -} - -// SetHardState saves the current HardState. -func (ms *MemoryStorage) SetHardState(st pb.HardState) error { - ms.hardState = st - return nil -} - -// Entries implements the Storage interface. -func (ms *MemoryStorage) Entries(lo, hi, maxSize uint64) ([]pb.Entry, error) { - ms.Lock() - defer ms.Unlock() - offset := ms.ents[0].Index - if lo <= offset { - return nil, ErrCompacted - } - if hi > ms.lastIndex()+1 { - raftLogger.Panicf("entries' hi(%d) is out of bound lastindex(%d)", hi, ms.lastIndex()) - } - // only contains dummy entries. - if len(ms.ents) == 1 { - return nil, ErrUnavailable - } - - ents := ms.ents[lo-offset : hi-offset] - return limitSize(ents, maxSize), nil -} - -// Term implements the Storage interface. -func (ms *MemoryStorage) Term(i uint64) (uint64, error) { - ms.Lock() - defer ms.Unlock() - offset := ms.ents[0].Index - if i < offset { - return 0, ErrCompacted - } - return ms.ents[i-offset].Term, nil -} - -// LastIndex implements the Storage interface. -func (ms *MemoryStorage) LastIndex() (uint64, error) { - ms.Lock() - defer ms.Unlock() - return ms.lastIndex(), nil -} - -func (ms *MemoryStorage) lastIndex() uint64 { - return ms.ents[0].Index + uint64(len(ms.ents)) - 1 -} - -// FirstIndex implements the Storage interface. -func (ms *MemoryStorage) FirstIndex() (uint64, error) { - ms.Lock() - defer ms.Unlock() - return ms.firstIndex(), nil -} - -func (ms *MemoryStorage) firstIndex() uint64 { - return ms.ents[0].Index + 1 -} - -// Snapshot implements the Storage interface. -func (ms *MemoryStorage) Snapshot() (pb.Snapshot, error) { - ms.Lock() - defer ms.Unlock() - return ms.snapshot, nil -} - -// ApplySnapshot overwrites the contents of this Storage object with -// those of the given snapshot. -func (ms *MemoryStorage) ApplySnapshot(snap pb.Snapshot) error { - ms.Lock() - defer ms.Unlock() - - // TODO: return ErrSnapOutOfDate? - ms.snapshot = snap - ms.ents = []pb.Entry{{Term: snap.Metadata.Term, Index: snap.Metadata.Index}} - return nil -} - -// CreateSnapshot makes a snapshot which can be retrieved with Snapshot() and -// can be used to reconstruct the state at that point. -// If any configuration changes have been made since the last compaction, -// the result of the last ApplyConfChange must be passed in. -func (ms *MemoryStorage) CreateSnapshot(i uint64, cs *pb.ConfState, data []byte) (pb.Snapshot, error) { - ms.Lock() - defer ms.Unlock() - if i <= ms.snapshot.Metadata.Index { - return pb.Snapshot{}, ErrSnapOutOfDate - } - - offset := ms.ents[0].Index - if i > ms.lastIndex() { - raftLogger.Panicf("snapshot %d is out of bound lastindex(%d)", i, ms.lastIndex()) - } - - ms.snapshot.Metadata.Index = i - ms.snapshot.Metadata.Term = ms.ents[i-offset].Term - if cs != nil { - ms.snapshot.Metadata.ConfState = *cs - } - ms.snapshot.Data = data - return ms.snapshot, nil -} - -// Compact discards all log entries prior to compactIndex. -// It is the application's responsibility to not attempt to compact an index -// greater than raftLog.applied. -func (ms *MemoryStorage) Compact(compactIndex uint64) error { - ms.Lock() - defer ms.Unlock() - offset := ms.ents[0].Index - if compactIndex <= offset { - return ErrCompacted - } - if compactIndex > ms.lastIndex() { - raftLogger.Panicf("compact %d is out of bound lastindex(%d)", compactIndex, ms.lastIndex()) - } - - i := compactIndex - offset - ents := make([]pb.Entry, 1, 1+uint64(len(ms.ents))-i) - ents[0].Index = ms.ents[i].Index - ents[0].Term = ms.ents[i].Term - ents = append(ents, ms.ents[i+1:]...) - ms.ents = ents - return nil -} - -// Append the new entries to storage. -// TODO (xiangli): ensure the entries are continuous and -// entries[0].Index > ms.entries[0].Index -func (ms *MemoryStorage) Append(entries []pb.Entry) error { - ms.Lock() - defer ms.Unlock() - if len(entries) == 0 { - return nil - } - first := ms.ents[0].Index + 1 - last := entries[0].Index + uint64(len(entries)) - 1 - - // shortcut if there is no new entry. - if last < first { - return nil - } - // truncate compacted entries - if first > entries[0].Index { - entries = entries[first-entries[0].Index:] - } - - offset := entries[0].Index - ms.ents[0].Index - switch { - case uint64(len(ms.ents)) > offset: - ms.ents = append([]pb.Entry{}, ms.ents[:offset]...) - ms.ents = append(ms.ents, entries...) - case uint64(len(ms.ents)) == offset: - ms.ents = append(ms.ents, entries...) - default: - raftLogger.Panicf("missing log entry [last: %d, append at: %d]", - ms.lastIndex(), entries[0].Index) - } - return nil -} diff --git a/vendor/src/github.com/coreos/etcd/raft/util.go b/vendor/src/github.com/coreos/etcd/raft/util.go deleted file mode 100644 index 8d4c419001..0000000000 --- a/vendor/src/github.com/coreos/etcd/raft/util.go +++ /dev/null @@ -1,116 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package raft - -import ( - "bytes" - "fmt" - - pb "github.com/coreos/etcd/raft/raftpb" -) - -func (st StateType) MarshalJSON() ([]byte, error) { - return []byte(fmt.Sprintf("%q", st.String())), nil -} - -// uint64Slice implements sort interface -type uint64Slice []uint64 - -func (p uint64Slice) Len() int { return len(p) } -func (p uint64Slice) Less(i, j int) bool { return p[i] < p[j] } -func (p uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } - -func min(a, b uint64) uint64 { - if a > b { - return b - } - return a -} - -func max(a, b uint64) uint64 { - if a > b { - return a - } - return b -} - -func IsLocalMsg(m pb.Message) bool { - return m.Type == pb.MsgHup || m.Type == pb.MsgBeat || m.Type == pb.MsgUnreachable || m.Type == pb.MsgSnapStatus || m.Type == pb.MsgCheckQuorum -} - -func IsResponseMsg(m pb.Message) bool { - return m.Type == pb.MsgAppResp || m.Type == pb.MsgVoteResp || m.Type == pb.MsgHeartbeatResp || m.Type == pb.MsgUnreachable -} - -// EntryFormatter can be implemented by the application to provide human-readable formatting -// of entry data. Nil is a valid EntryFormatter and will use a default format. -type EntryFormatter func([]byte) string - -// DescribeMessage returns a concise human-readable description of a -// Message for debugging. -func DescribeMessage(m pb.Message, f EntryFormatter) string { - var buf bytes.Buffer - fmt.Fprintf(&buf, "%x->%x %v Term:%d Log:%d/%d", m.From, m.To, m.Type, m.Term, m.LogTerm, m.Index) - if m.Reject { - fmt.Fprintf(&buf, " Rejected") - if m.RejectHint != 0 { - fmt.Fprintf(&buf, "(Hint:%d)", m.RejectHint) - } - } - if m.Commit != 0 { - fmt.Fprintf(&buf, " Commit:%d", m.Commit) - } - if len(m.Entries) > 0 { - fmt.Fprintf(&buf, " Entries:[") - for i, e := range m.Entries { - if i != 0 { - buf.WriteString(", ") - } - buf.WriteString(DescribeEntry(e, f)) - } - fmt.Fprintf(&buf, "]") - } - if !IsEmptySnap(m.Snapshot) { - fmt.Fprintf(&buf, " Snapshot:%v", m.Snapshot) - } - return buf.String() -} - -// DescribeEntry returns a concise human-readable description of an -// Entry for debugging. -func DescribeEntry(e pb.Entry, f EntryFormatter) string { - var formatted string - if e.Type == pb.EntryNormal && f != nil { - formatted = f(e.Data) - } else { - formatted = fmt.Sprintf("%q", e.Data) - } - return fmt.Sprintf("%d/%d %s %s", e.Term, e.Index, e.Type, formatted) -} - -func limitSize(ents []pb.Entry, maxSize uint64) []pb.Entry { - if len(ents) == 0 { - return ents - } - size := ents[0].Size() - var limit int - for limit = 1; limit < len(ents); limit++ { - size += ents[limit].Size() - if uint64(size) > maxSize { - break - } - } - return ents[:limit] -} diff --git a/vendor/src/github.com/coreos/etcd/snap/db.go b/vendor/src/github.com/coreos/etcd/snap/db.go deleted file mode 100644 index ca68837cb1..0000000000 --- a/vendor/src/github.com/coreos/etcd/snap/db.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snap - -import ( - "fmt" - "io" - "io/ioutil" - "os" - "path" - - "github.com/coreos/etcd/pkg/fileutil" -) - -// SaveDBFrom saves snapshot of the database from the given reader. It -// guarantees the save operation is atomic. -func (s *Snapshotter) SaveDBFrom(r io.Reader, id uint64) error { - f, err := ioutil.TempFile(s.dir, "tmp") - if err != nil { - return err - } - var n int64 - n, err = io.Copy(f, r) - if err == nil { - err = f.Sync() - } - f.Close() - if err != nil { - os.Remove(f.Name()) - return err - } - fn := path.Join(s.dir, fmt.Sprintf("%016x.snap.db", id)) - if fileutil.Exist(fn) { - os.Remove(f.Name()) - return nil - } - err = os.Rename(f.Name(), fn) - if err != nil { - os.Remove(f.Name()) - return err - } - - plog.Infof("saved database snapshot to disk [total bytes: %d]", n) - - return nil -} - -// DBFilePath returns the file path for the snapshot of the database with -// given id. If the snapshot does not exist, it returns error. -func (s *Snapshotter) DBFilePath(id uint64) (string, error) { - fns, err := fileutil.ReadDir(s.dir) - if err != nil { - return "", err - } - wfn := fmt.Sprintf("%016x.snap.db", id) - for _, fn := range fns { - if fn == wfn { - return path.Join(s.dir, fn), nil - } - } - return "", fmt.Errorf("snap: snapshot file doesn't exist") -} diff --git a/vendor/src/github.com/coreos/etcd/snap/message.go b/vendor/src/github.com/coreos/etcd/snap/message.go deleted file mode 100644 index 2d2b211061..0000000000 --- a/vendor/src/github.com/coreos/etcd/snap/message.go +++ /dev/null @@ -1,59 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snap - -import ( - "io" - - "github.com/coreos/etcd/raft/raftpb" -) - -// Message is a struct that contains a raft Message and a ReadCloser. The type -// of raft message MUST be MsgSnap, which contains the raft meta-data and an -// additional data []byte field that contains the snapshot of the actual state -// machine. -// Message contains the ReadCloser field for handling large snapshot. This avoid -// copying the entire snapshot into a byte array, which consumes a lot of memory. -// -// User of Message should close the Message after sending it. -type Message struct { - raftpb.Message - ReadCloser io.ReadCloser - closeC chan bool -} - -func NewMessage(rs raftpb.Message, rc io.ReadCloser) *Message { - return &Message{ - Message: rs, - ReadCloser: rc, - closeC: make(chan bool, 1), - } -} - -// CloseNotify returns a channel that receives a single value -// when the message sent is finished. true indicates the sent -// is successful. -func (m Message) CloseNotify() <-chan bool { - return m.closeC -} - -func (m Message) CloseWithError(err error) { - m.ReadCloser.Close() - if err == nil { - m.closeC <- true - } else { - m.closeC <- false - } -} diff --git a/vendor/src/github.com/coreos/etcd/snap/metrics.go b/vendor/src/github.com/coreos/etcd/snap/metrics.go deleted file mode 100644 index 88aad5dc9c..0000000000 --- a/vendor/src/github.com/coreos/etcd/snap/metrics.go +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package snap - -import "github.com/prometheus/client_golang/prometheus" - -var ( - // TODO: save_fsync latency? - saveDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "snapshot", - Name: "save_total_durations_seconds", - Help: "The total latency distributions of save called by snapshot.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) - - marshallingDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "snapshot", - Name: "save_marshalling_durations_seconds", - Help: "The marshalling cost distributions of save called by snapshot.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) -) - -func init() { - prometheus.MustRegister(saveDurations) - prometheus.MustRegister(marshallingDurations) -} diff --git a/vendor/src/github.com/coreos/etcd/snap/snappb/snap.pb.go b/vendor/src/github.com/coreos/etcd/snap/snappb/snap.pb.go deleted file mode 100644 index 5d1d21ab31..0000000000 --- a/vendor/src/github.com/coreos/etcd/snap/snappb/snap.pb.go +++ /dev/null @@ -1,332 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: snap.proto -// DO NOT EDIT! - -/* - Package snappb is a generated protocol buffer package. - - It is generated from these files: - snap.proto - - It has these top-level messages: - Snapshot -*/ -package snappb - -import ( - "fmt" - - proto "github.com/gogo/protobuf/proto" -) - -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type Snapshot struct { - Crc uint32 `protobuf:"varint,1,opt,name=crc" json:"crc"` - Data []byte `protobuf:"bytes,2,opt,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} - -func init() { - proto.RegisterType((*Snapshot)(nil), "snappb.snapshot") -} -func (m *Snapshot) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Snapshot) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintSnap(data, i, uint64(m.Crc)) - if m.Data != nil { - data[i] = 0x12 - i++ - i = encodeVarintSnap(data, i, uint64(len(m.Data))) - i += copy(data[i:], m.Data) - } - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Snap(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Snap(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintSnap(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *Snapshot) Size() (n int) { - var l int - _ = l - n += 1 + sovSnap(uint64(m.Crc)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovSnap(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovSnap(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozSnap(x uint64) (n int) { - return sovSnap(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Snapshot) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnap - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: snapshot: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: snapshot: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType) - } - m.Crc = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnap - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Crc |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowSnap - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthSnap - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipSnap(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthSnap - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipSnap(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnap - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnap - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnap - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthSnap - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowSnap - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipSnap(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthSnap = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowSnap = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/src/github.com/coreos/etcd/snap/snappb/snap.proto b/vendor/src/github.com/coreos/etcd/snap/snappb/snap.proto deleted file mode 100644 index cd3d21d0ee..0000000000 --- a/vendor/src/github.com/coreos/etcd/snap/snappb/snap.proto +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto2"; -package snappb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; - -message snapshot { - optional uint32 crc = 1 [(gogoproto.nullable) = false]; - optional bytes data = 2; -} diff --git a/vendor/src/github.com/coreos/etcd/snap/snapshotter.go b/vendor/src/github.com/coreos/etcd/snap/snapshotter.go deleted file mode 100644 index 4e06483a88..0000000000 --- a/vendor/src/github.com/coreos/etcd/snap/snapshotter.go +++ /dev/null @@ -1,189 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package snap stores raft nodes' states with snapshots. -package snap - -import ( - "errors" - "fmt" - "hash/crc32" - "io/ioutil" - "os" - "path" - "sort" - "strings" - "time" - - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/snap/snappb" - - "github.com/coreos/pkg/capnslog" -) - -const ( - snapSuffix = ".snap" -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "snap") - - ErrNoSnapshot = errors.New("snap: no available snapshot") - ErrEmptySnapshot = errors.New("snap: empty snapshot") - ErrCRCMismatch = errors.New("snap: crc mismatch") - crcTable = crc32.MakeTable(crc32.Castagnoli) -) - -type Snapshotter struct { - dir string -} - -func New(dir string) *Snapshotter { - return &Snapshotter{ - dir: dir, - } -} - -func (s *Snapshotter) SaveSnap(snapshot raftpb.Snapshot) error { - if raft.IsEmptySnap(snapshot) { - return nil - } - return s.save(&snapshot) -} - -func (s *Snapshotter) save(snapshot *raftpb.Snapshot) error { - start := time.Now() - - fname := fmt.Sprintf("%016x-%016x%s", snapshot.Metadata.Term, snapshot.Metadata.Index, snapSuffix) - b := pbutil.MustMarshal(snapshot) - crc := crc32.Update(0, crcTable, b) - snap := snappb.Snapshot{Crc: crc, Data: b} - d, err := snap.Marshal() - if err != nil { - return err - } else { - marshallingDurations.Observe(float64(time.Since(start)) / float64(time.Second)) - } - - err = ioutil.WriteFile(path.Join(s.dir, fname), d, 0666) - if err == nil { - saveDurations.Observe(float64(time.Since(start)) / float64(time.Second)) - } - return err -} - -func (s *Snapshotter) Load() (*raftpb.Snapshot, error) { - names, err := s.snapNames() - if err != nil { - return nil, err - } - var snap *raftpb.Snapshot - for _, name := range names { - if snap, err = loadSnap(s.dir, name); err == nil { - break - } - } - if err != nil { - return nil, ErrNoSnapshot - } - return snap, nil -} - -func loadSnap(dir, name string) (*raftpb.Snapshot, error) { - fpath := path.Join(dir, name) - snap, err := Read(fpath) - if err != nil { - renameBroken(fpath) - } - return snap, err -} - -// Read reads the snapshot named by snapname and returns the snapshot. -func Read(snapname string) (*raftpb.Snapshot, error) { - b, err := ioutil.ReadFile(snapname) - if err != nil { - plog.Errorf("cannot read file %v: %v", snapname, err) - return nil, err - } - - if len(b) == 0 { - plog.Errorf("unexpected empty snapshot") - return nil, ErrEmptySnapshot - } - - var serializedSnap snappb.Snapshot - if err = serializedSnap.Unmarshal(b); err != nil { - plog.Errorf("corrupted snapshot file %v: %v", snapname, err) - return nil, err - } - - if len(serializedSnap.Data) == 0 || serializedSnap.Crc == 0 { - plog.Errorf("unexpected empty snapshot") - return nil, ErrEmptySnapshot - } - - crc := crc32.Update(0, crcTable, serializedSnap.Data) - if crc != serializedSnap.Crc { - plog.Errorf("corrupted snapshot file %v: crc mismatch", snapname) - return nil, ErrCRCMismatch - } - - var snap raftpb.Snapshot - if err = snap.Unmarshal(serializedSnap.Data); err != nil { - plog.Errorf("corrupted snapshot file %v: %v", snapname, err) - return nil, err - } - return &snap, nil -} - -// snapNames returns the filename of the snapshots in logical time order (from newest to oldest). -// If there is no available snapshots, an ErrNoSnapshot will be returned. -func (s *Snapshotter) snapNames() ([]string, error) { - dir, err := os.Open(s.dir) - if err != nil { - return nil, err - } - defer dir.Close() - names, err := dir.Readdirnames(-1) - if err != nil { - return nil, err - } - snaps := checkSuffix(names) - if len(snaps) == 0 { - return nil, ErrNoSnapshot - } - sort.Sort(sort.Reverse(sort.StringSlice(snaps))) - return snaps, nil -} - -func checkSuffix(names []string) []string { - snaps := []string{} - for i := range names { - if strings.HasSuffix(names[i], snapSuffix) { - snaps = append(snaps, names[i]) - } else { - plog.Warningf("skipped unexpected non snapshot file %v", names[i]) - } - } - return snaps -} - -func renameBroken(path string) { - brokenPath := path + ".broken" - if err := os.Rename(path, brokenPath); err != nil { - plog.Warningf("cannot rename broken snapshot file %v to %v: %v", path, brokenPath, err) - } -} diff --git a/vendor/src/github.com/coreos/etcd/wal/decoder.go b/vendor/src/github.com/coreos/etcd/wal/decoder.go deleted file mode 100644 index f75c919fba..0000000000 --- a/vendor/src/github.com/coreos/etcd/wal/decoder.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "bufio" - "encoding/binary" - "hash" - "io" - "sync" - - "github.com/coreos/etcd/pkg/crc" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/wal/walpb" -) - -type decoder struct { - mu sync.Mutex - br *bufio.Reader - - c io.Closer - crc hash.Hash32 -} - -func newDecoder(rc io.ReadCloser) *decoder { - return &decoder{ - br: bufio.NewReader(rc), - c: rc, - crc: crc.New(0, crcTable), - } -} - -func (d *decoder) decode(rec *walpb.Record) error { - d.mu.Lock() - defer d.mu.Unlock() - - rec.Reset() - l, err := readInt64(d.br) - if err != nil { - return err - } - data := make([]byte, l) - if _, err = io.ReadFull(d.br, data); err != nil { - // ReadFull returns io.EOF only if no bytes were read - // the decoder should treat this as an ErrUnexpectedEOF instead. - if err == io.EOF { - err = io.ErrUnexpectedEOF - } - return err - } - if err := rec.Unmarshal(data); err != nil { - return err - } - // skip crc checking if the record type is crcType - if rec.Type == crcType { - return nil - } - d.crc.Write(rec.Data) - return rec.Validate(d.crc.Sum32()) -} - -func (d *decoder) updateCRC(prevCrc uint32) { - d.crc = crc.New(prevCrc, crcTable) -} - -func (d *decoder) lastCRC() uint32 { - return d.crc.Sum32() -} - -func (d *decoder) close() error { - return d.c.Close() -} - -func mustUnmarshalEntry(d []byte) raftpb.Entry { - var e raftpb.Entry - pbutil.MustUnmarshal(&e, d) - return e -} - -func mustUnmarshalState(d []byte) raftpb.HardState { - var s raftpb.HardState - pbutil.MustUnmarshal(&s, d) - return s -} - -func readInt64(r io.Reader) (int64, error) { - var n int64 - err := binary.Read(r, binary.LittleEndian, &n) - return n, err -} diff --git a/vendor/src/github.com/coreos/etcd/wal/doc.go b/vendor/src/github.com/coreos/etcd/wal/doc.go deleted file mode 100644 index 769b522f04..0000000000 --- a/vendor/src/github.com/coreos/etcd/wal/doc.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -/* -Package wal provides an implementation of a write ahead log that is used by -etcd. - -A WAL is created at a particular directory and is made up of a number of -segmented WAL files. Inside of each file the raft state and entries are appended -to it with the Save method: - - metadata := []byte{} - w, err := wal.Create("/var/lib/etcd", metadata) - ... - err := w.Save(s, ents) - -After saving an raft snapshot to disk, SaveSnapshot method should be called to -record it. So WAL can match with the saved snapshot when restarting. - - err := w.SaveSnapshot(walpb.Snapshot{Index: 10, Term: 2}) - -When a user has finished using a WAL it must be closed: - - w.Close() - -WAL files are placed inside of the directory in the following format: -$seq-$index.wal - -The first WAL file to be created will be 0000000000000000-0000000000000000.wal -indicating an initial sequence of 0 and an initial raft index of 0. The first -entry written to WAL MUST have raft index 0. - -WAL will cuts its current wal files if its size exceeds 8MB. This will increment an internal -sequence number and cause a new file to be created. If the last raft index saved -was 0x20 and this is the first time cut has been called on this WAL then the sequence will -increment from 0x0 to 0x1. The new file will be: 0000000000000001-0000000000000021.wal. -If a second cut issues 0x10 entries with incremental index later then the file will be called: -0000000000000002-0000000000000031.wal. - -At a later time a WAL can be opened at a particular snapshot. If there is no -snapshot, an empty snapshot should be passed in. - - w, err := wal.Open("/var/lib/etcd", walpb.Snapshot{Index: 10, Term: 2}) - ... - -The snapshot must have been written to the WAL. - -Additional items cannot be Saved to this WAL until all of the items from the given -snapshot to the end of the WAL are read first: - - metadata, state, ents, err := w.ReadAll() - -This will give you the metadata, the last raft.State and the slice of -raft.Entry items in the log. - -*/ -package wal diff --git a/vendor/src/github.com/coreos/etcd/wal/encoder.go b/vendor/src/github.com/coreos/etcd/wal/encoder.go deleted file mode 100644 index f5b73fe12b..0000000000 --- a/vendor/src/github.com/coreos/etcd/wal/encoder.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "bufio" - "encoding/binary" - "hash" - "io" - "sync" - - "github.com/coreos/etcd/pkg/crc" - "github.com/coreos/etcd/wal/walpb" -) - -type encoder struct { - mu sync.Mutex - bw *bufio.Writer - - crc hash.Hash32 - buf []byte - uint64buf []byte -} - -func newEncoder(w io.Writer, prevCrc uint32) *encoder { - return &encoder{ - bw: bufio.NewWriter(w), - crc: crc.New(prevCrc, crcTable), - // 1MB buffer - buf: make([]byte, 1024*1024), - uint64buf: make([]byte, 8), - } -} - -func (e *encoder) encode(rec *walpb.Record) error { - e.mu.Lock() - defer e.mu.Unlock() - - e.crc.Write(rec.Data) - rec.Crc = e.crc.Sum32() - var ( - data []byte - err error - n int - ) - - if rec.Size() > len(e.buf) { - data, err = rec.Marshal() - if err != nil { - return err - } - } else { - n, err = rec.MarshalTo(e.buf) - if err != nil { - return err - } - data = e.buf[:n] - } - if err = writeInt64(e.bw, int64(len(data)), e.uint64buf); err != nil { - return err - } - _, err = e.bw.Write(data) - return err -} - -func (e *encoder) flush() error { - e.mu.Lock() - defer e.mu.Unlock() - return e.bw.Flush() -} - -func writeInt64(w io.Writer, n int64, buf []byte) error { - // http://golang.org/src/encoding/binary/binary.go - binary.LittleEndian.PutUint64(buf, uint64(n)) - _, err := w.Write(buf) - return err -} diff --git a/vendor/src/github.com/coreos/etcd/wal/metrics.go b/vendor/src/github.com/coreos/etcd/wal/metrics.go deleted file mode 100644 index ed270fac63..0000000000 --- a/vendor/src/github.com/coreos/etcd/wal/metrics.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import "github.com/prometheus/client_golang/prometheus" - -var ( - syncDurations = prometheus.NewHistogram(prometheus.HistogramOpts{ - Namespace: "etcd", - Subsystem: "wal", - Name: "fsync_durations_seconds", - Help: "The latency distributions of fsync called by wal.", - Buckets: prometheus.ExponentialBuckets(0.001, 2, 14), - }) - lastIndexSaved = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "etcd", - Subsystem: "wal", - Name: "last_index_saved", - Help: "The index of the last entry saved by wal.", - }) -) - -func init() { - prometheus.MustRegister(syncDurations) - prometheus.MustRegister(lastIndexSaved) -} diff --git a/vendor/src/github.com/coreos/etcd/wal/multi_readcloser.go b/vendor/src/github.com/coreos/etcd/wal/multi_readcloser.go deleted file mode 100644 index 513c6d17d9..0000000000 --- a/vendor/src/github.com/coreos/etcd/wal/multi_readcloser.go +++ /dev/null @@ -1,45 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import "io" - -type multiReadCloser struct { - closers []io.Closer - reader io.Reader -} - -func (mc *multiReadCloser) Close() error { - var err error - for i := range mc.closers { - err = mc.closers[i].Close() - } - return err -} - -func (mc *multiReadCloser) Read(p []byte) (int, error) { - return mc.reader.Read(p) -} - -func MultiReadCloser(readClosers ...io.ReadCloser) io.ReadCloser { - cs := make([]io.Closer, len(readClosers)) - rs := make([]io.Reader, len(readClosers)) - for i := range readClosers { - cs[i] = readClosers[i] - rs[i] = readClosers[i] - } - r := io.MultiReader(rs...) - return &multiReadCloser{cs, r} -} diff --git a/vendor/src/github.com/coreos/etcd/wal/repair.go b/vendor/src/github.com/coreos/etcd/wal/repair.go deleted file mode 100644 index bcc22ef081..0000000000 --- a/vendor/src/github.com/coreos/etcd/wal/repair.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "io" - "os" - "path" - - "github.com/coreos/etcd/pkg/fileutil" - "github.com/coreos/etcd/wal/walpb" -) - -// Repair tries to repair ErrUnexpectedEOF in the -// last wal file by truncating. -func Repair(dirpath string) bool { - f, err := openLast(dirpath) - if err != nil { - return false - } - defer f.Close() - - n := 0 - rec := &walpb.Record{} - - decoder := newDecoder(f) - defer decoder.close() - for { - err := decoder.decode(rec) - switch err { - case nil: - n += 8 + rec.Size() - // update crc of the decoder when necessary - switch rec.Type { - case crcType: - crc := decoder.crc.Sum32() - // current crc of decoder must match the crc of the record. - // do no need to match 0 crc, since the decoder is a new one at this case. - if crc != 0 && rec.Validate(crc) != nil { - return false - } - decoder.updateCRC(rec.Crc) - } - continue - case io.EOF: - return true - case io.ErrUnexpectedEOF: - plog.Noticef("repairing %v", f.Name()) - bf, bferr := os.Create(f.Name() + ".broken") - if bferr != nil { - plog.Errorf("could not repair %v, failed to create backup file", f.Name()) - return false - } - defer bf.Close() - - if _, err = f.Seek(0, os.SEEK_SET); err != nil { - plog.Errorf("could not repair %v, failed to read file", f.Name()) - return false - } - - if _, err = io.Copy(bf, f); err != nil { - plog.Errorf("could not repair %v, failed to copy file", f.Name()) - return false - } - - if err = f.Truncate(int64(n)); err != nil { - plog.Errorf("could not repair %v, failed to truncate file", f.Name()) - return false - } - if err = f.Sync(); err != nil { - plog.Errorf("could not repair %v, failed to sync file", f.Name()) - return false - } - return true - default: - plog.Errorf("could not repair error (%v)", err) - return false - } - } -} - -// openLast opens the last wal file for read and write. -func openLast(dirpath string) (*os.File, error) { - names, err := fileutil.ReadDir(dirpath) - if err != nil { - return nil, err - } - names = checkWalNames(names) - if len(names) == 0 { - return nil, ErrFileNotFound - } - last := path.Join(dirpath, names[len(names)-1]) - return os.OpenFile(last, os.O_RDWR, 0) -} diff --git a/vendor/src/github.com/coreos/etcd/wal/util.go b/vendor/src/github.com/coreos/etcd/wal/util.go deleted file mode 100644 index 9588b6ec08..0000000000 --- a/vendor/src/github.com/coreos/etcd/wal/util.go +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "errors" - "fmt" - "strings" - - "github.com/coreos/etcd/pkg/fileutil" -) - -var ( - badWalName = errors.New("bad wal name") -) - -func Exist(dirpath string) bool { - names, err := fileutil.ReadDir(dirpath) - if err != nil { - return false - } - return len(names) != 0 -} - -// searchIndex returns the last array index of names whose raft index section is -// equal to or smaller than the given index. -// The given names MUST be sorted. -func searchIndex(names []string, index uint64) (int, bool) { - for i := len(names) - 1; i >= 0; i-- { - name := names[i] - _, curIndex, err := parseWalName(name) - if err != nil { - plog.Panicf("parse correct name should never fail: %v", err) - } - if index >= curIndex { - return i, true - } - } - return -1, false -} - -// names should have been sorted based on sequence number. -// isValidSeq checks whether seq increases continuously. -func isValidSeq(names []string) bool { - var lastSeq uint64 - for _, name := range names { - curSeq, _, err := parseWalName(name) - if err != nil { - plog.Panicf("parse correct name should never fail: %v", err) - } - if lastSeq != 0 && lastSeq != curSeq-1 { - return false - } - lastSeq = curSeq - } - return true -} - -func checkWalNames(names []string) []string { - wnames := make([]string, 0) - for _, name := range names { - if _, _, err := parseWalName(name); err != nil { - plog.Warningf("ignored file %v in wal", name) - continue - } - wnames = append(wnames, name) - } - return wnames -} - -func parseWalName(str string) (seq, index uint64, err error) { - if !strings.HasSuffix(str, ".wal") { - return 0, 0, badWalName - } - _, err = fmt.Sscanf(str, "%016x-%016x.wal", &seq, &index) - return seq, index, err -} - -func walName(seq, index uint64) string { - return fmt.Sprintf("%016x-%016x.wal", seq, index) -} diff --git a/vendor/src/github.com/coreos/etcd/wal/wal.go b/vendor/src/github.com/coreos/etcd/wal/wal.go deleted file mode 100644 index f9a58ca38b..0000000000 --- a/vendor/src/github.com/coreos/etcd/wal/wal.go +++ /dev/null @@ -1,562 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package wal - -import ( - "errors" - "fmt" - "hash/crc32" - "io" - "os" - "path" - "reflect" - "sync" - "time" - - "github.com/coreos/etcd/pkg/fileutil" - "github.com/coreos/etcd/pkg/pbutil" - "github.com/coreos/etcd/raft" - "github.com/coreos/etcd/raft/raftpb" - "github.com/coreos/etcd/wal/walpb" - - "github.com/coreos/pkg/capnslog" -) - -const ( - metadataType int64 = iota + 1 - entryType - stateType - crcType - snapshotType - - // the owner can make/remove files inside the directory - privateDirMode = 0700 - - // the expected size of each wal segment file. - // the actual size might be bigger than it. - segmentSizeBytes = 64 * 1000 * 1000 // 64MB -) - -var ( - plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "wal") - - ErrMetadataConflict = errors.New("wal: conflicting metadata found") - ErrFileNotFound = errors.New("wal: file not found") - ErrCRCMismatch = errors.New("wal: crc mismatch") - ErrSnapshotMismatch = errors.New("wal: snapshot mismatch") - ErrSnapshotNotFound = errors.New("wal: snapshot not found") - crcTable = crc32.MakeTable(crc32.Castagnoli) -) - -// WAL is a logical representation of the stable storage. -// WAL is either in read mode or append mode but not both. -// A newly created WAL is in append mode, and ready for appending records. -// A just opened WAL is in read mode, and ready for reading records. -// The WAL will be ready for appending after reading out all the previous records. -type WAL struct { - dir string // the living directory of the underlay files - metadata []byte // metadata recorded at the head of each WAL - state raftpb.HardState // hardstate recorded at the head of WAL - - start walpb.Snapshot // snapshot to start reading - decoder *decoder // decoder to decode records - - mu sync.Mutex - f *os.File // underlay file opened for appending, sync - seq uint64 // sequence of the wal file currently used for writes - enti uint64 // index of the last entry saved to the wal - encoder *encoder // encoder to encode records - - locks []fileutil.Lock // the file locks the WAL is holding (the name is increasing) -} - -// Create creates a WAL ready for appending records. The given metadata is -// recorded at the head of each WAL file, and can be retrieved with ReadAll. -func Create(dirpath string, metadata []byte) (*WAL, error) { - if Exist(dirpath) { - return nil, os.ErrExist - } - - if err := os.MkdirAll(dirpath, privateDirMode); err != nil { - return nil, err - } - - p := path.Join(dirpath, walName(0, 0)) - f, err := os.OpenFile(p, os.O_WRONLY|os.O_APPEND|os.O_CREATE, 0600) - if err != nil { - return nil, err - } - l, err := fileutil.NewLock(f.Name()) - if err != nil { - return nil, err - } - if err = l.Lock(); err != nil { - return nil, err - } - - w := &WAL{ - dir: dirpath, - metadata: metadata, - seq: 0, - f: f, - encoder: newEncoder(f, 0), - } - w.locks = append(w.locks, l) - if err := w.saveCrc(0); err != nil { - return nil, err - } - if err := w.encoder.encode(&walpb.Record{Type: metadataType, Data: metadata}); err != nil { - return nil, err - } - if err := w.SaveSnapshot(walpb.Snapshot{}); err != nil { - return nil, err - } - return w, nil -} - -// Open opens the WAL at the given snap. -// The snap SHOULD have been previously saved to the WAL, or the following -// ReadAll will fail. -// The returned WAL is ready to read and the first record will be the one after -// the given snap. The WAL cannot be appended to before reading out all of its -// previous records. -func Open(dirpath string, snap walpb.Snapshot) (*WAL, error) { - return openAtIndex(dirpath, snap, true) -} - -// OpenForRead only opens the wal files for read. -// Write on a read only wal panics. -func OpenForRead(dirpath string, snap walpb.Snapshot) (*WAL, error) { - return openAtIndex(dirpath, snap, false) -} - -func openAtIndex(dirpath string, snap walpb.Snapshot, write bool) (*WAL, error) { - names, err := fileutil.ReadDir(dirpath) - if err != nil { - return nil, err - } - names = checkWalNames(names) - if len(names) == 0 { - return nil, ErrFileNotFound - } - - nameIndex, ok := searchIndex(names, snap.Index) - if !ok || !isValidSeq(names[nameIndex:]) { - return nil, ErrFileNotFound - } - - // open the wal files for reading - rcs := make([]io.ReadCloser, 0) - ls := make([]fileutil.Lock, 0) - for _, name := range names[nameIndex:] { - f, err := os.Open(path.Join(dirpath, name)) - if err != nil { - return nil, err - } - l, err := fileutil.NewLock(f.Name()) - if err != nil { - return nil, err - } - err = l.TryLock() - if err != nil { - if write { - return nil, err - } - } - rcs = append(rcs, f) - ls = append(ls, l) - } - rc := MultiReadCloser(rcs...) - - // create a WAL ready for reading - w := &WAL{ - dir: dirpath, - start: snap, - decoder: newDecoder(rc), - locks: ls, - } - - if write { - // open the last wal file for appending - seq, _, err := parseWalName(names[len(names)-1]) - if err != nil { - rc.Close() - return nil, err - } - last := path.Join(dirpath, names[len(names)-1]) - - f, err := os.OpenFile(last, os.O_WRONLY|os.O_APPEND, 0) - if err != nil { - rc.Close() - return nil, err - } - err = fileutil.Preallocate(f, segmentSizeBytes) - if err != nil { - rc.Close() - plog.Errorf("failed to allocate space when creating new wal file (%v)", err) - return nil, err - } - - w.f = f - w.seq = seq - } - - return w, nil -} - -// ReadAll reads out records of the current WAL. -// If opened in write mode, it must read out all records until EOF. Or an error -// will be returned. -// If opened in read mode, it will try to read all records if possible. -// If it cannot read out the expected snap, it will return ErrSnapshotNotFound. -// If loaded snap doesn't match with the expected one, it will return -// all the records and error ErrSnapshotMismatch. -// TODO: detect not-last-snap error. -// TODO: maybe loose the checking of match. -// After ReadAll, the WAL will be ready for appending new records. -func (w *WAL) ReadAll() (metadata []byte, state raftpb.HardState, ents []raftpb.Entry, err error) { - w.mu.Lock() - defer w.mu.Unlock() - - rec := &walpb.Record{} - decoder := w.decoder - - var match bool - for err = decoder.decode(rec); err == nil; err = decoder.decode(rec) { - switch rec.Type { - case entryType: - e := mustUnmarshalEntry(rec.Data) - if e.Index > w.start.Index { - ents = append(ents[:e.Index-w.start.Index-1], e) - } - w.enti = e.Index - case stateType: - state = mustUnmarshalState(rec.Data) - case metadataType: - if metadata != nil && !reflect.DeepEqual(metadata, rec.Data) { - state.Reset() - return nil, state, nil, ErrMetadataConflict - } - metadata = rec.Data - case crcType: - crc := decoder.crc.Sum32() - // current crc of decoder must match the crc of the record. - // do no need to match 0 crc, since the decoder is a new one at this case. - if crc != 0 && rec.Validate(crc) != nil { - state.Reset() - return nil, state, nil, ErrCRCMismatch - } - decoder.updateCRC(rec.Crc) - case snapshotType: - var snap walpb.Snapshot - pbutil.MustUnmarshal(&snap, rec.Data) - if snap.Index == w.start.Index { - if snap.Term != w.start.Term { - state.Reset() - return nil, state, nil, ErrSnapshotMismatch - } - match = true - } - default: - state.Reset() - return nil, state, nil, fmt.Errorf("unexpected block type %d", rec.Type) - } - } - - switch w.f { - case nil: - // We do not have to read out all entries in read mode. - // The last record maybe a partial written one, so - // ErrunexpectedEOF might be returned. - if err != io.EOF && err != io.ErrUnexpectedEOF { - state.Reset() - return nil, state, nil, err - } - default: - // We must read all of the entries if WAL is opened in write mode. - if err != io.EOF { - state.Reset() - return nil, state, nil, err - } - } - - err = nil - if !match { - err = ErrSnapshotNotFound - } - - // close decoder, disable reading - w.decoder.close() - w.start = walpb.Snapshot{} - - w.metadata = metadata - - if w.f != nil { - // create encoder (chain crc with the decoder), enable appending - w.encoder = newEncoder(w.f, w.decoder.lastCRC()) - w.decoder = nil - lastIndexSaved.Set(float64(w.enti)) - } - - return metadata, state, ents, err -} - -// cut closes current file written and creates a new one ready to append. -// cut first creates a temp wal file and writes necessary headers into it. -// Then cut atomically rename temp wal file to a wal file. -func (w *WAL) cut() error { - // close old wal file - if err := w.sync(); err != nil { - return err - } - if err := w.f.Close(); err != nil { - return err - } - - fpath := path.Join(w.dir, walName(w.seq+1, w.enti+1)) - ftpath := fpath + ".tmp" - - // create a temp wal file with name sequence + 1, or truncate the existing one - ft, err := os.OpenFile(ftpath, os.O_WRONLY|os.O_APPEND|os.O_CREATE|os.O_TRUNC, 0600) - if err != nil { - return err - } - - // update writer and save the previous crc - w.f = ft - prevCrc := w.encoder.crc.Sum32() - w.encoder = newEncoder(w.f, prevCrc) - if err = w.saveCrc(prevCrc); err != nil { - return err - } - if err = w.encoder.encode(&walpb.Record{Type: metadataType, Data: w.metadata}); err != nil { - return err - } - if err = w.saveState(&w.state); err != nil { - return err - } - // close temp wal file - if err = w.sync(); err != nil { - return err - } - if err = w.f.Close(); err != nil { - return err - } - - // atomically move temp wal file to wal file - if err = os.Rename(ftpath, fpath); err != nil { - return err - } - - // open the wal file and update writer again - f, err := os.OpenFile(fpath, os.O_WRONLY|os.O_APPEND, 0600) - if err != nil { - return err - } - if err = fileutil.Preallocate(f, segmentSizeBytes); err != nil { - plog.Errorf("failed to allocate space when creating new wal file (%v)", err) - return err - } - - w.f = f - prevCrc = w.encoder.crc.Sum32() - w.encoder = newEncoder(w.f, prevCrc) - - // lock the new wal file - l, err := fileutil.NewLock(f.Name()) - if err != nil { - return err - } - - if err := l.Lock(); err != nil { - return err - } - w.locks = append(w.locks, l) - - // increase the wal seq - w.seq++ - - plog.Infof("segmented wal file %v is created", fpath) - return nil -} - -func (w *WAL) sync() error { - if w.encoder != nil { - if err := w.encoder.flush(); err != nil { - return err - } - } - start := time.Now() - err := fileutil.Fdatasync(w.f) - syncDurations.Observe(float64(time.Since(start)) / float64(time.Second)) - return err -} - -// ReleaseLockTo releases the locks, which has smaller index than the given index -// except the largest one among them. -// For example, if WAL is holding lock 1,2,3,4,5,6, ReleaseLockTo(4) will release -// lock 1,2 but keep 3. ReleaseLockTo(5) will release 1,2,3 but keep 4. -func (w *WAL) ReleaseLockTo(index uint64) error { - w.mu.Lock() - defer w.mu.Unlock() - - var smaller int - found := false - - for i, l := range w.locks { - _, lockIndex, err := parseWalName(path.Base(l.Name())) - if err != nil { - return err - } - if lockIndex >= index { - smaller = i - 1 - found = true - break - } - } - - // if no lock index is greater than the release index, we can - // release lock up to the last one(excluding). - if !found && len(w.locks) != 0 { - smaller = len(w.locks) - 1 - } - - if smaller <= 0 { - return nil - } - - for i := 0; i < smaller; i++ { - w.locks[i].Unlock() - w.locks[i].Destroy() - } - w.locks = w.locks[smaller:] - - return nil -} - -func (w *WAL) Close() error { - w.mu.Lock() - defer w.mu.Unlock() - - if w.f != nil { - if err := w.sync(); err != nil { - return err - } - if err := w.f.Close(); err != nil { - return err - } - } - for _, l := range w.locks { - err := l.Unlock() - if err != nil { - plog.Errorf("failed to unlock during closing wal: %s", err) - } - err = l.Destroy() - if err != nil { - plog.Errorf("failed to destroy lock during closing wal: %s", err) - } - } - return nil -} - -func (w *WAL) saveEntry(e *raftpb.Entry) error { - // TODO: add MustMarshalTo to reduce one allocation. - b := pbutil.MustMarshal(e) - rec := &walpb.Record{Type: entryType, Data: b} - if err := w.encoder.encode(rec); err != nil { - return err - } - w.enti = e.Index - lastIndexSaved.Set(float64(w.enti)) - return nil -} - -func (w *WAL) saveState(s *raftpb.HardState) error { - if raft.IsEmptyHardState(*s) { - return nil - } - w.state = *s - b := pbutil.MustMarshal(s) - rec := &walpb.Record{Type: stateType, Data: b} - return w.encoder.encode(rec) -} - -func (w *WAL) Save(st raftpb.HardState, ents []raftpb.Entry) error { - w.mu.Lock() - defer w.mu.Unlock() - - // short cut, do not call sync - if raft.IsEmptyHardState(st) && len(ents) == 0 { - return nil - } - - mustSync := mustSync(st, w.state, len(ents)) - - // TODO(xiangli): no more reference operator - for i := range ents { - if err := w.saveEntry(&ents[i]); err != nil { - return err - } - } - if err := w.saveState(&st); err != nil { - return err - } - - fstat, err := w.f.Stat() - if err != nil { - return err - } - if fstat.Size() < segmentSizeBytes { - if mustSync { - return w.sync() - } - return nil - } - // TODO: add a test for this code path when refactoring the tests - return w.cut() -} - -func (w *WAL) SaveSnapshot(e walpb.Snapshot) error { - w.mu.Lock() - defer w.mu.Unlock() - - b := pbutil.MustMarshal(&e) - rec := &walpb.Record{Type: snapshotType, Data: b} - if err := w.encoder.encode(rec); err != nil { - return err - } - // update enti only when snapshot is ahead of last index - if w.enti < e.Index { - w.enti = e.Index - } - lastIndexSaved.Set(float64(w.enti)) - return w.sync() -} - -func (w *WAL) saveCrc(prevCrc uint32) error { - return w.encoder.encode(&walpb.Record{Type: crcType, Crc: prevCrc}) -} - -func mustSync(st, prevst raftpb.HardState, entsnum int) bool { - // Persistent state on all servers: - // (Updated on stable storage before responding to RPCs) - // currentTerm - // votedFor - // log entries[] - if entsnum != 0 || st.Vote != prevst.Vote || st.Term != prevst.Term { - return true - } - return false -} diff --git a/vendor/src/github.com/coreos/etcd/wal/walpb/record.go b/vendor/src/github.com/coreos/etcd/wal/walpb/record.go deleted file mode 100644 index bb53685697..0000000000 --- a/vendor/src/github.com/coreos/etcd/wal/walpb/record.go +++ /dev/null @@ -1,29 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package walpb - -import "errors" - -var ( - ErrCRCMismatch = errors.New("walpb: crc mismatch") -) - -func (rec *Record) Validate(crc uint32) error { - if rec.Crc == crc { - return nil - } - rec.Reset() - return ErrCRCMismatch -} diff --git a/vendor/src/github.com/coreos/etcd/wal/walpb/record.pb.go b/vendor/src/github.com/coreos/etcd/wal/walpb/record.pb.go deleted file mode 100644 index 638bdc3b69..0000000000 --- a/vendor/src/github.com/coreos/etcd/wal/walpb/record.pb.go +++ /dev/null @@ -1,495 +0,0 @@ -// Code generated by protoc-gen-gogo. -// source: record.proto -// DO NOT EDIT! - -/* - Package walpb is a generated protocol buffer package. - - It is generated from these files: - record.proto - - It has these top-level messages: - Record - Snapshot -*/ -package walpb - -import ( - "fmt" - - proto "github.com/gogo/protobuf/proto" -) - -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -type Record struct { - Type int64 `protobuf:"varint,1,opt,name=type" json:"type"` - Crc uint32 `protobuf:"varint,2,opt,name=crc" json:"crc"` - Data []byte `protobuf:"bytes,3,opt,name=data" json:"data,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Record) Reset() { *m = Record{} } -func (m *Record) String() string { return proto.CompactTextString(m) } -func (*Record) ProtoMessage() {} - -type Snapshot struct { - Index uint64 `protobuf:"varint,1,opt,name=index" json:"index"` - Term uint64 `protobuf:"varint,2,opt,name=term" json:"term"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *Snapshot) Reset() { *m = Snapshot{} } -func (m *Snapshot) String() string { return proto.CompactTextString(m) } -func (*Snapshot) ProtoMessage() {} - -func init() { - proto.RegisterType((*Record)(nil), "walpb.Record") - proto.RegisterType((*Snapshot)(nil), "walpb.Snapshot") -} -func (m *Record) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Record) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintRecord(data, i, uint64(m.Type)) - data[i] = 0x10 - i++ - i = encodeVarintRecord(data, i, uint64(m.Crc)) - if m.Data != nil { - data[i] = 0x1a - i++ - i = encodeVarintRecord(data, i, uint64(len(m.Data))) - i += copy(data[i:], m.Data) - } - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *Snapshot) Marshal() (data []byte, err error) { - size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) - if err != nil { - return nil, err - } - return data[:n], nil -} - -func (m *Snapshot) MarshalTo(data []byte) (int, error) { - var i int - _ = i - var l int - _ = l - data[i] = 0x8 - i++ - i = encodeVarintRecord(data, i, uint64(m.Index)) - data[i] = 0x10 - i++ - i = encodeVarintRecord(data, i, uint64(m.Term)) - if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeFixed64Record(data []byte, offset int, v uint64) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - data[offset+4] = uint8(v >> 32) - data[offset+5] = uint8(v >> 40) - data[offset+6] = uint8(v >> 48) - data[offset+7] = uint8(v >> 56) - return offset + 8 -} -func encodeFixed32Record(data []byte, offset int, v uint32) int { - data[offset] = uint8(v) - data[offset+1] = uint8(v >> 8) - data[offset+2] = uint8(v >> 16) - data[offset+3] = uint8(v >> 24) - return offset + 4 -} -func encodeVarintRecord(data []byte, offset int, v uint64) int { - for v >= 1<<7 { - data[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - data[offset] = uint8(v) - return offset + 1 -} -func (m *Record) Size() (n int) { - var l int - _ = l - n += 1 + sovRecord(uint64(m.Type)) - n += 1 + sovRecord(uint64(m.Crc)) - if m.Data != nil { - l = len(m.Data) - n += 1 + l + sovRecord(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *Snapshot) Size() (n int) { - var l int - _ = l - n += 1 + sovRecord(uint64(m.Index)) - n += 1 + sovRecord(uint64(m.Term)) - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovRecord(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} -func sozRecord(x uint64) (n int) { - return sovRecord(uint64((x << 1) ^ uint64((int64(x) >> 63)))) -} -func (m *Record) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Record: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Record: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) - } - m.Type = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Type |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Crc", wireType) - } - m.Crc = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Crc |= (uint32(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthRecord - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Data = append(m.Data[:0], data[iNdEx:postIndex]...) - if m.Data == nil { - m.Data = []byte{} - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipRecord(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRecord - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *Snapshot) Unmarshal(data []byte) error { - l := len(data) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) - } - m.Index = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Index |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - case 2: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field Term", wireType) - } - m.Term = 0 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowRecord - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - m.Term |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - default: - iNdEx = preIndex - skippy, err := skipRecord(data[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthRecord - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipRecord(data []byte) (n int, err error) { - l := len(data) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if data[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthRecord - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowRecord - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := data[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipRecord(data[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthRecord = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowRecord = fmt.Errorf("proto: integer overflow") -) diff --git a/vendor/src/github.com/coreos/etcd/wal/walpb/record.proto b/vendor/src/github.com/coreos/etcd/wal/walpb/record.proto deleted file mode 100644 index b694cb2338..0000000000 --- a/vendor/src/github.com/coreos/etcd/wal/walpb/record.proto +++ /dev/null @@ -1,20 +0,0 @@ -syntax = "proto2"; -package walpb; - -import "gogoproto/gogo.proto"; - -option (gogoproto.marshaler_all) = true; -option (gogoproto.sizer_all) = true; -option (gogoproto.unmarshaler_all) = true; -option (gogoproto.goproto_getters_all) = false; - -message Record { - optional int64 type = 1 [(gogoproto.nullable) = false]; - optional uint32 crc = 2 [(gogoproto.nullable) = false]; - optional bytes data = 3; -} - -message Snapshot { - optional uint64 index = 1 [(gogoproto.nullable) = false]; - optional uint64 term = 2 [(gogoproto.nullable) = false]; -} diff --git a/vendor/src/github.com/coreos/go-systemd/LICENSE b/vendor/src/github.com/coreos/go-systemd/LICENSE deleted file mode 100644 index 37ec93a14f..0000000000 --- a/vendor/src/github.com/coreos/go-systemd/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ -Apache License -Version 2.0, January 2004 -http://www.apache.org/licenses/ - -TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - -1. Definitions. - -"License" shall mean the terms and conditions for use, reproduction, and -distribution as defined by Sections 1 through 9 of this document. - -"Licensor" shall mean the copyright owner or entity authorized by the copyright -owner that is granting the License. - -"Legal Entity" shall mean the union of the acting entity and all other entities -that control, are controlled by, or are under common control with that entity. -For the purposes of this definition, "control" means (i) the power, direct or -indirect, to cause the direction or management of such entity, whether by -contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the -outstanding shares, or (iii) beneficial ownership of such entity. - -"You" (or "Your") shall mean an individual or Legal Entity exercising -permissions granted by this License. - -"Source" form shall mean the preferred form for making modifications, including -but not limited to software source code, documentation source, and configuration -files. - -"Object" form shall mean any form resulting from mechanical transformation or -translation of a Source form, including but not limited to compiled object code, -generated documentation, and conversions to other media types. - -"Work" shall mean the work of authorship, whether in Source or Object form, made -available under the License, as indicated by a copyright notice that is included -in or attached to the work (an example is provided in the Appendix below). - -"Derivative Works" shall mean any work, whether in Source or Object form, that -is based on (or derived from) the Work and for which the editorial revisions, -annotations, elaborations, or other modifications represent, as a whole, an -original work of authorship. For the purposes of this License, Derivative Works -shall not include works that remain separable from, or merely link (or bind by -name) to the interfaces of, the Work and Derivative Works thereof. - -"Contribution" shall mean any work of authorship, including the original version -of the Work and any modifications or additions to that Work or Derivative Works -thereof, that is intentionally submitted to Licensor for inclusion in the Work -by the copyright owner or by an individual or Legal Entity authorized to submit -on behalf of the copyright owner. For the purposes of this definition, -"submitted" means any form of electronic, verbal, or written communication sent -to the Licensor or its representatives, including but not limited to -communication on electronic mailing lists, source code control systems, and -issue tracking systems that are managed by, or on behalf of, the Licensor for -the purpose of discussing and improving the Work, but excluding communication -that is conspicuously marked or otherwise designated in writing by the copyright -owner as "Not a Contribution." - -"Contributor" shall mean Licensor and any individual or Legal Entity on behalf -of whom a Contribution has been received by Licensor and subsequently -incorporated within the Work. - -2. Grant of Copyright License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable copyright license to reproduce, prepare Derivative Works of, -publicly display, publicly perform, sublicense, and distribute the Work and such -Derivative Works in Source or Object form. - -3. Grant of Patent License. - -Subject to the terms and conditions of this License, each Contributor hereby -grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free, -irrevocable (except as stated in this section) patent license to make, have -made, use, offer to sell, sell, import, and otherwise transfer the Work, where -such license applies only to those patent claims licensable by such Contributor -that are necessarily infringed by their Contribution(s) alone or by combination -of their Contribution(s) with the Work to which such Contribution(s) was -submitted. If You institute patent litigation against any entity (including a -cross-claim or counterclaim in a lawsuit) alleging that the Work or a -Contribution incorporated within the Work constitutes direct or contributory -patent infringement, then any patent licenses granted to You under this License -for that Work shall terminate as of the date such litigation is filed. - -4. Redistribution. - -You may reproduce and distribute copies of the Work or Derivative Works thereof -in any medium, with or without modifications, and in Source or Object form, -provided that You meet the following conditions: - -You must give any other recipients of the Work or Derivative Works a copy of -this License; and -You must cause any modified files to carry prominent notices stating that You -changed the files; and -You must retain, in the Source form of any Derivative Works that You distribute, -all copyright, patent, trademark, and attribution notices from the Source form -of the Work, excluding those notices that do not pertain to any part of the -Derivative Works; and -If the Work includes a "NOTICE" text file as part of its distribution, then any -Derivative Works that You distribute must include a readable copy of the -attribution notices contained within such NOTICE file, excluding those notices -that do not pertain to any part of the Derivative Works, in at least one of the -following places: within a NOTICE text file distributed as part of the -Derivative Works; within the Source form or documentation, if provided along -with the Derivative Works; or, within a display generated by the Derivative -Works, if and wherever such third-party notices normally appear. The contents of -the NOTICE file are for informational purposes only and do not modify the -License. You may add Your own attribution notices within Derivative Works that -You distribute, alongside or as an addendum to the NOTICE text from the Work, -provided that such additional attribution notices cannot be construed as -modifying the License. -You may add Your own copyright statement to Your modifications and may provide -additional or different license terms and conditions for use, reproduction, or -distribution of Your modifications, or for any such Derivative Works as a whole, -provided Your use, reproduction, and distribution of the Work otherwise complies -with the conditions stated in this License. - -5. Submission of Contributions. - -Unless You explicitly state otherwise, any Contribution intentionally submitted -for inclusion in the Work by You to the Licensor shall be under the terms and -conditions of this License, without any additional terms or conditions. -Notwithstanding the above, nothing herein shall supersede or modify the terms of -any separate license agreement you may have executed with Licensor regarding -such Contributions. - -6. Trademarks. - -This License does not grant permission to use the trade names, trademarks, -service marks, or product names of the Licensor, except as required for -reasonable and customary use in describing the origin of the Work and -reproducing the content of the NOTICE file. - -7. Disclaimer of Warranty. - -Unless required by applicable law or agreed to in writing, Licensor provides the -Work (and each Contributor provides its Contributions) on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, -including, without limitation, any warranties or conditions of TITLE, -NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are -solely responsible for determining the appropriateness of using or -redistributing the Work and assume any risks associated with Your exercise of -permissions under this License. - -8. Limitation of Liability. - -In no event and under no legal theory, whether in tort (including negligence), -contract, or otherwise, unless required by applicable law (such as deliberate -and grossly negligent acts) or agreed to in writing, shall any Contributor be -liable to You for damages, including any direct, indirect, special, incidental, -or consequential damages of any character arising as a result of this License or -out of the use or inability to use the Work (including but not limited to -damages for loss of goodwill, work stoppage, computer failure or malfunction, or -any and all other commercial damages or losses), even if such Contributor has -been advised of the possibility of such damages. - -9. Accepting Warranty or Additional Liability. - -While redistributing the Work or Derivative Works thereof, You may choose to -offer, and charge a fee for, acceptance of support, warranty, indemnity, or -other liability obligations and/or rights consistent with this License. However, -in accepting such obligations, You may act only on Your own behalf and on Your -sole responsibility, not on behalf of any other Contributor, and only if You -agree to indemnify, defend, and hold each Contributor harmless for any liability -incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/src/github.com/coreos/go-systemd/activation/files.go b/vendor/src/github.com/coreos/go-systemd/activation/files.go deleted file mode 100644 index c8e85fcd58..0000000000 --- a/vendor/src/github.com/coreos/go-systemd/activation/files.go +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package activation implements primitives for systemd socket activation. -package activation - -import ( - "os" - "strconv" - "syscall" -) - -// based on: https://gist.github.com/alberts/4640792 -const ( - listenFdsStart = 3 -) - -func Files(unsetEnv bool) []*os.File { - if unsetEnv { - defer os.Unsetenv("LISTEN_PID") - defer os.Unsetenv("LISTEN_FDS") - } - - pid, err := strconv.Atoi(os.Getenv("LISTEN_PID")) - if err != nil || pid != os.Getpid() { - return nil - } - - nfds, err := strconv.Atoi(os.Getenv("LISTEN_FDS")) - if err != nil || nfds == 0 { - return nil - } - - files := make([]*os.File, 0, nfds) - for fd := listenFdsStart; fd < listenFdsStart+nfds; fd++ { - syscall.CloseOnExec(fd) - files = append(files, os.NewFile(uintptr(fd), "LISTEN_FD_"+strconv.Itoa(fd))) - } - - return files -} diff --git a/vendor/src/github.com/coreos/go-systemd/activation/listeners.go b/vendor/src/github.com/coreos/go-systemd/activation/listeners.go deleted file mode 100644 index df27c29e9e..0000000000 --- a/vendor/src/github.com/coreos/go-systemd/activation/listeners.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package activation - -import ( - "crypto/tls" - "net" -) - -// Listeners returns a slice containing a net.Listener for each matching socket type -// passed to this process. -// -// The order of the file descriptors is preserved in the returned slice. -// Nil values are used to fill any gaps. For example if systemd were to return file descriptors -// corresponding with "udp, tcp, tcp", then the slice would contain {nil, net.Listener, net.Listener} -func Listeners(unsetEnv bool) ([]net.Listener, error) { - files := Files(unsetEnv) - listeners := make([]net.Listener, len(files)) - - for i, f := range files { - if pc, err := net.FileListener(f); err == nil { - listeners[i] = pc - } - } - return listeners, nil -} - -// TLSListeners returns a slice containing a net.listener for each matching TCP socket type -// passed to this process. -// It uses default Listeners func and forces TCP sockets handlers to use TLS based on tlsConfig. -func TLSListeners(unsetEnv bool, tlsConfig *tls.Config) ([]net.Listener, error) { - listeners, err := Listeners(unsetEnv) - - if listeners == nil || err != nil { - return nil, err - } - - if tlsConfig != nil && err == nil { - tlsConfig.NextProtos = []string{"http/1.1"} - - for i, l := range listeners { - // Activate TLS only for TCP sockets - if l.Addr().Network() == "tcp" { - listeners[i] = tls.NewListener(l, tlsConfig) - } - } - } - - return listeners, err -} diff --git a/vendor/src/github.com/coreos/go-systemd/activation/packetconns.go b/vendor/src/github.com/coreos/go-systemd/activation/packetconns.go deleted file mode 100644 index 48b2ca029d..0000000000 --- a/vendor/src/github.com/coreos/go-systemd/activation/packetconns.go +++ /dev/null @@ -1,37 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package activation - -import ( - "net" -) - -// PacketConns returns a slice containing a net.PacketConn for each matching socket type -// passed to this process. -// -// The order of the file descriptors is preserved in the returned slice. -// Nil values are used to fill any gaps. For example if systemd were to return file descriptors -// corresponding with "udp, tcp, udp", then the slice would contain {net.PacketConn, nil, net.PacketConn} -func PacketConns(unsetEnv bool) ([]net.PacketConn, error) { - files := Files(unsetEnv) - conns := make([]net.PacketConn, len(files)) - - for i, f := range files { - if pc, err := net.FilePacketConn(f); err == nil { - conns[i] = pc - } - } - return conns, nil -} diff --git a/vendor/src/github.com/coreos/go-systemd/daemon/sdnotify.go b/vendor/src/github.com/coreos/go-systemd/daemon/sdnotify.go deleted file mode 100644 index b92b1911c1..0000000000 --- a/vendor/src/github.com/coreos/go-systemd/daemon/sdnotify.go +++ /dev/null @@ -1,31 +0,0 @@ -// Code forked from Docker project -package daemon - -import ( - "errors" - "net" - "os" -) - -var SdNotifyNoSocket = errors.New("No socket") - -// SdNotify sends a message to the init daemon. It is common to ignore the error. -func SdNotify(state string) error { - socketAddr := &net.UnixAddr{ - Name: os.Getenv("NOTIFY_SOCKET"), - Net: "unixgram", - } - - if socketAddr.Name == "" { - return SdNotifyNoSocket - } - - conn, err := net.DialUnix(socketAddr.Net, nil, socketAddr) - if err != nil { - return err - } - defer conn.Close() - - _, err = conn.Write([]byte(state)) - return err -} diff --git a/vendor/src/github.com/coreos/go-systemd/journal/journal.go b/vendor/src/github.com/coreos/go-systemd/journal/journal.go deleted file mode 100644 index 6c3f5b94df..0000000000 --- a/vendor/src/github.com/coreos/go-systemd/journal/journal.go +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package journal provides write bindings to the local systemd journal. -// It is implemented in pure Go and connects to the journal directly over its -// unix socket. -// -// To read from the journal, see the "sdjournal" package, which wraps the -// sd-journal a C API. -// -// http://www.freedesktop.org/software/systemd/man/systemd-journald.service.html -package journal - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "net" - "os" - "strconv" - "strings" - "syscall" -) - -// Priority of a journal message -type Priority int - -const ( - PriEmerg Priority = iota - PriAlert - PriCrit - PriErr - PriWarning - PriNotice - PriInfo - PriDebug -) - -var conn net.Conn - -func init() { - var err error - conn, err = net.Dial("unixgram", "/run/systemd/journal/socket") - if err != nil { - conn = nil - } -} - -// Enabled returns true if the local systemd journal is available for logging -func Enabled() bool { - return conn != nil -} - -// Send a message to the local systemd journal. vars is a map of journald -// fields to values. Fields must be composed of uppercase letters, numbers, -// and underscores, but must not start with an underscore. Within these -// restrictions, any arbitrary field name may be used. Some names have special -// significance: see the journalctl documentation -// (http://www.freedesktop.org/software/systemd/man/systemd.journal-fields.html) -// for more details. vars may be nil. -func Send(message string, priority Priority, vars map[string]string) error { - if conn == nil { - return journalError("could not connect to journald socket") - } - - data := new(bytes.Buffer) - appendVariable(data, "PRIORITY", strconv.Itoa(int(priority))) - appendVariable(data, "MESSAGE", message) - for k, v := range vars { - appendVariable(data, k, v) - } - - _, err := io.Copy(conn, data) - if err != nil && isSocketSpaceError(err) { - file, err := tempFd() - if err != nil { - return journalError(err.Error()) - } - _, err = io.Copy(file, data) - if err != nil { - return journalError(err.Error()) - } - - rights := syscall.UnixRights(int(file.Fd())) - - /* this connection should always be a UnixConn, but better safe than sorry */ - unixConn, ok := conn.(*net.UnixConn) - if !ok { - return journalError("can't send file through non-Unix connection") - } - unixConn.WriteMsgUnix([]byte{}, rights, nil) - } else if err != nil { - return journalError(err.Error()) - } - return nil -} - -// Print prints a message to the local systemd journal using Send(). -func Print(priority Priority, format string, a ...interface{}) error { - return Send(fmt.Sprintf(format, a...), priority, nil) -} - -func appendVariable(w io.Writer, name, value string) { - if !validVarName(name) { - journalError("variable name contains invalid character, ignoring") - } - if strings.ContainsRune(value, '\n') { - /* When the value contains a newline, we write: - * - the variable name, followed by a newline - * - the size (in 64bit little endian format) - * - the data, followed by a newline - */ - fmt.Fprintln(w, name) - binary.Write(w, binary.LittleEndian, uint64(len(value))) - fmt.Fprintln(w, value) - } else { - /* just write the variable and value all on one line */ - fmt.Fprintf(w, "%s=%s\n", name, value) - } -} - -func validVarName(name string) bool { - /* The variable name must be in uppercase and consist only of characters, - * numbers and underscores, and may not begin with an underscore. (from the docs) - */ - - valid := name[0] != '_' - for _, c := range name { - valid = valid && ('A' <= c && c <= 'Z') || ('0' <= c && c <= '9') || c == '_' - } - return valid -} - -func isSocketSpaceError(err error) bool { - opErr, ok := err.(*net.OpError) - if !ok { - return false - } - - sysErr, ok := opErr.Err.(syscall.Errno) - if !ok { - return false - } - - return sysErr == syscall.EMSGSIZE || sysErr == syscall.ENOBUFS -} - -func tempFd() (*os.File, error) { - file, err := ioutil.TempFile("/dev/shm/", "journal.XXXXX") - if err != nil { - return nil, err - } - syscall.Unlink(file.Name()) - if err != nil { - return nil, err - } - return file, nil -} - -func journalError(s string) error { - s = "journal error: " + s - fmt.Fprintln(os.Stderr, s) - return errors.New(s) -} diff --git a/vendor/src/github.com/coreos/pkg/LICENSE b/vendor/src/github.com/coreos/pkg/LICENSE deleted file mode 100644 index e06d208186..0000000000 --- a/vendor/src/github.com/coreos/pkg/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/src/github.com/coreos/pkg/capnslog/README.md b/vendor/src/github.com/coreos/pkg/capnslog/README.md deleted file mode 100644 index 81efb1fb6a..0000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/README.md +++ /dev/null @@ -1,39 +0,0 @@ -# capnslog, the CoreOS logging package - -There are far too many logging packages out there, with varying degrees of licenses, far too many features (colorization, all sorts of log frameworks) or are just a pain to use (lack of `Fatalln()`?). -capnslog provides a simple but consistent logging interface suitable for all kinds of projects. - -### Design Principles - -##### `package main` is the place where logging gets turned on and routed - -A library should not touch log options, only generate log entries. Libraries are silent until main lets them speak. - -##### All log options are runtime-configurable. - -Still the job of `main` to expose these configurations. `main` may delegate this to, say, a configuration webhook, but does so explicitly. - -##### There is one log object per package. It is registered under its repository and package name. - -`main` activates logging for its repository and any dependency repositories it would also like to have output in its logstream. `main` also dictates at which level each subpackage logs. - -##### There is *one* output stream, and it is an `io.Writer` composed with a formatter. - -Splitting streams is probably not the job of your program, but rather, your log aggregation framework. If you must split output streams, again, `main` configures this and you can write a very simple two-output struct that satisfies io.Writer. - -Fancy colorful formatting and JSON output are beyond the scope of a basic logging framework -- they're application/log-collector dependant. These are, at best, provided as options, but more likely, provided by your application. - -##### Log objects are an interface - -An object knows best how to print itself. Log objects can collect more interesting metadata if they wish, however, because text isn't going away anytime soon, they must all be marshalable to text. The simplest log object is a string, which returns itself. If you wish to do more fancy tricks for printing your log objects, see also JSON output -- introspect and write a formatter which can handle your advanced log interface. Making strings is the only thing guaranteed. - -##### Log levels have specific meanings: - - * Critical: Unrecoverable. Must fail. - * Error: Data has been lost, a request has failed for a bad reason, or a required resource has been lost - * Warning: (Hopefully) Temporary conditions that may cause errors, but may work fine. A replica disappearing (that may reconnect) is a warning. - * Notice: Normal, but important (uncommon) log information. - * Info: Normal, working log information, everything is fine, but helpful notices for auditing or common operations. - * Debug: Everything is still fine, but even common operations may be logged, and less helpful but more quantity of notices. - * Trace: Anything goes, from logging every function call as part of a common operation, to tracing execution of a query. - diff --git a/vendor/src/github.com/coreos/pkg/capnslog/formatters.go b/vendor/src/github.com/coreos/pkg/capnslog/formatters.go deleted file mode 100644 index 99ec6f824b..0000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/formatters.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "bufio" - "fmt" - "io" - "runtime" - "strings" - "time" -) - -type Formatter interface { - Format(pkg string, level LogLevel, depth int, entries ...interface{}) - Flush() -} - -func NewStringFormatter(w io.Writer) *StringFormatter { - return &StringFormatter{ - w: bufio.NewWriter(w), - } -} - -type StringFormatter struct { - w *bufio.Writer -} - -func (s *StringFormatter) Format(pkg string, l LogLevel, i int, entries ...interface{}) { - now := time.Now().UTC() - s.w.WriteString(now.Format(time.RFC3339)) - s.w.WriteByte(' ') - writeEntries(s.w, pkg, l, i, entries...) - s.Flush() -} - -func writeEntries(w *bufio.Writer, pkg string, _ LogLevel, _ int, entries ...interface{}) { - if pkg != "" { - w.WriteString(pkg + ": ") - } - str := fmt.Sprint(entries...) - endsInNL := strings.HasSuffix(str, "\n") - w.WriteString(str) - if !endsInNL { - w.WriteString("\n") - } -} - -func (s *StringFormatter) Flush() { - s.w.Flush() -} - -func NewPrettyFormatter(w io.Writer, debug bool) Formatter { - return &PrettyFormatter{ - w: bufio.NewWriter(w), - debug: debug, - } -} - -type PrettyFormatter struct { - w *bufio.Writer - debug bool -} - -func (c *PrettyFormatter) Format(pkg string, l LogLevel, depth int, entries ...interface{}) { - now := time.Now() - ts := now.Format("2006-01-02 15:04:05") - c.w.WriteString(ts) - ms := now.Nanosecond() / 1000 - c.w.WriteString(fmt.Sprintf(".%06d", ms)) - if c.debug { - _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - if line < 0 { - line = 0 // not a real line number - } - c.w.WriteString(fmt.Sprintf(" [%s:%d]", file, line)) - } - c.w.WriteString(fmt.Sprint(" ", l.Char(), " | ")) - writeEntries(c.w, pkg, l, depth, entries...) - c.Flush() -} - -func (c *PrettyFormatter) Flush() { - c.w.Flush() -} diff --git a/vendor/src/github.com/coreos/pkg/capnslog/glog_formatter.go b/vendor/src/github.com/coreos/pkg/capnslog/glog_formatter.go deleted file mode 100644 index 426603ef30..0000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/glog_formatter.go +++ /dev/null @@ -1,96 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "bufio" - "bytes" - "io" - "os" - "runtime" - "strconv" - "strings" - "time" -) - -var pid = os.Getpid() - -type GlogFormatter struct { - StringFormatter -} - -func NewGlogFormatter(w io.Writer) *GlogFormatter { - g := &GlogFormatter{} - g.w = bufio.NewWriter(w) - return g -} - -func (g GlogFormatter) Format(pkg string, level LogLevel, depth int, entries ...interface{}) { - g.w.Write(GlogHeader(level, depth+1)) - g.StringFormatter.Format(pkg, level, depth+1, entries...) -} - -func GlogHeader(level LogLevel, depth int) []byte { - // Lmmdd hh:mm:ss.uuuuuu threadid file:line] - now := time.Now().UTC() - _, file, line, ok := runtime.Caller(depth) // It's always the same number of frames to the user's call. - if !ok { - file = "???" - line = 1 - } else { - slash := strings.LastIndex(file, "/") - if slash >= 0 { - file = file[slash+1:] - } - } - if line < 0 { - line = 0 // not a real line number - } - buf := &bytes.Buffer{} - buf.Grow(30) - _, month, day := now.Date() - hour, minute, second := now.Clock() - buf.WriteString(level.Char()) - twoDigits(buf, int(month)) - twoDigits(buf, day) - buf.WriteByte(' ') - twoDigits(buf, hour) - buf.WriteByte(':') - twoDigits(buf, minute) - buf.WriteByte(':') - twoDigits(buf, second) - buf.WriteByte('.') - buf.WriteString(strconv.Itoa(now.Nanosecond() / 1000)) - buf.WriteByte('Z') - buf.WriteByte(' ') - buf.WriteString(strconv.Itoa(pid)) - buf.WriteByte(' ') - buf.WriteString(file) - buf.WriteByte(':') - buf.WriteString(strconv.Itoa(line)) - buf.WriteByte(']') - buf.WriteByte(' ') - return buf.Bytes() -} - -const digits = "0123456789" - -func twoDigits(b *bytes.Buffer, d int) { - c2 := digits[d%10] - d /= 10 - c1 := digits[d%10] - b.WriteByte(c1) - b.WriteByte(c2) -} diff --git a/vendor/src/github.com/coreos/pkg/capnslog/init.go b/vendor/src/github.com/coreos/pkg/capnslog/init.go deleted file mode 100644 index 44b8cd361b..0000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/init.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// +build !windows - -package capnslog - -import ( - "io" - "os" - "syscall" -) - -// Here's where the opinionation comes in. We need some sensible defaults, -// especially after taking over the log package. Your project (whatever it may -// be) may see things differently. That's okay; there should be no defaults in -// the main package that cannot be controlled or overridden programatically, -// otherwise it's a bug. Doing so is creating your own init_log.go file much -// like this one. - -func init() { - initHijack() - - // Go `log` pacakge uses os.Stderr. - SetFormatter(NewDefaultFormatter(os.Stderr)) - SetGlobalLogLevel(INFO) -} - -func NewDefaultFormatter(out io.Writer) Formatter { - if syscall.Getppid() == 1 { - // We're running under init, which may be systemd. - f, err := NewJournaldFormatter() - if err == nil { - return f - } - } - return NewPrettyFormatter(out, false) -} diff --git a/vendor/src/github.com/coreos/pkg/capnslog/init_windows.go b/vendor/src/github.com/coreos/pkg/capnslog/init_windows.go deleted file mode 100644 index 4553050653..0000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/init_windows.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import "os" - -func init() { - initHijack() - - // Go `log` package uses os.Stderr. - SetFormatter(NewPrettyFormatter(os.Stderr, false)) - SetGlobalLogLevel(INFO) -} diff --git a/vendor/src/github.com/coreos/pkg/capnslog/journald_formatter.go b/vendor/src/github.com/coreos/pkg/capnslog/journald_formatter.go deleted file mode 100644 index 72e05207c5..0000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/journald_formatter.go +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// +build !windows - -package capnslog - -import ( - "errors" - "fmt" - "os" - "path/filepath" - - "github.com/coreos/go-systemd/journal" -) - -func NewJournaldFormatter() (Formatter, error) { - if !journal.Enabled() { - return nil, errors.New("No systemd detected") - } - return &journaldFormatter{}, nil -} - -type journaldFormatter struct{} - -func (j *journaldFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { - var pri journal.Priority - switch l { - case CRITICAL: - pri = journal.PriCrit - case ERROR: - pri = journal.PriErr - case WARNING: - pri = journal.PriWarning - case NOTICE: - pri = journal.PriNotice - case INFO: - pri = journal.PriInfo - case DEBUG: - pri = journal.PriDebug - case TRACE: - pri = journal.PriDebug - default: - panic("Unhandled loglevel") - } - msg := fmt.Sprint(entries...) - tags := map[string]string{ - "PACKAGE": pkg, - "SYSLOG_IDENTIFIER": filepath.Base(os.Args[0]), - } - err := journal.Send(msg, pri, tags) - if err != nil { - fmt.Fprintln(os.Stderr, err) - } -} - -func (j *journaldFormatter) Flush() {} diff --git a/vendor/src/github.com/coreos/pkg/capnslog/log_hijack.go b/vendor/src/github.com/coreos/pkg/capnslog/log_hijack.go deleted file mode 100644 index 970086b9f9..0000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/log_hijack.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "log" -) - -func initHijack() { - pkg := NewPackageLogger("log", "") - w := packageWriter{pkg} - log.SetFlags(0) - log.SetPrefix("") - log.SetOutput(w) -} - -type packageWriter struct { - pl *PackageLogger -} - -func (p packageWriter) Write(b []byte) (int, error) { - if p.pl.level < INFO { - return 0, nil - } - p.pl.internalLog(calldepth+2, INFO, string(b)) - return len(b), nil -} diff --git a/vendor/src/github.com/coreos/pkg/capnslog/logmap.go b/vendor/src/github.com/coreos/pkg/capnslog/logmap.go deleted file mode 100644 index 8495448830..0000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/logmap.go +++ /dev/null @@ -1,240 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "errors" - "strings" - "sync" -) - -// LogLevel is the set of all log levels. -type LogLevel int8 - -const ( - // CRITICAL is the lowest log level; only errors which will end the program will be propagated. - CRITICAL LogLevel = iota - 1 - // ERROR is for errors that are not fatal but lead to troubling behavior. - ERROR - // WARNING is for errors which are not fatal and not errors, but are unusual. Often sourced from misconfigurations. - WARNING - // NOTICE is for normal but significant conditions. - NOTICE - // INFO is a log level for common, everyday log updates. - INFO - // DEBUG is the default hidden level for more verbose updates about internal processes. - DEBUG - // TRACE is for (potentially) call by call tracing of programs. - TRACE -) - -// Char returns a single-character representation of the log level. -func (l LogLevel) Char() string { - switch l { - case CRITICAL: - return "C" - case ERROR: - return "E" - case WARNING: - return "W" - case NOTICE: - return "N" - case INFO: - return "I" - case DEBUG: - return "D" - case TRACE: - return "T" - default: - panic("Unhandled loglevel") - } -} - -// String returns a multi-character representation of the log level. -func (l LogLevel) String() string { - switch l { - case CRITICAL: - return "CRITICAL" - case ERROR: - return "ERROR" - case WARNING: - return "WARNING" - case NOTICE: - return "NOTICE" - case INFO: - return "INFO" - case DEBUG: - return "DEBUG" - case TRACE: - return "TRACE" - default: - panic("Unhandled loglevel") - } -} - -// Update using the given string value. Fulfills the flag.Value interface. -func (l *LogLevel) Set(s string) error { - value, err := ParseLevel(s) - if err != nil { - return err - } - - *l = value - return nil -} - -// ParseLevel translates some potential loglevel strings into their corresponding levels. -func ParseLevel(s string) (LogLevel, error) { - switch s { - case "CRITICAL", "C": - return CRITICAL, nil - case "ERROR", "0", "E": - return ERROR, nil - case "WARNING", "1", "W": - return WARNING, nil - case "NOTICE", "2", "N": - return NOTICE, nil - case "INFO", "3", "I": - return INFO, nil - case "DEBUG", "4", "D": - return DEBUG, nil - case "TRACE", "5", "T": - return TRACE, nil - } - return CRITICAL, errors.New("couldn't parse log level " + s) -} - -type RepoLogger map[string]*PackageLogger - -type loggerStruct struct { - sync.Mutex - repoMap map[string]RepoLogger - formatter Formatter -} - -// logger is the global logger -var logger = new(loggerStruct) - -// SetGlobalLogLevel sets the log level for all packages in all repositories -// registered with capnslog. -func SetGlobalLogLevel(l LogLevel) { - logger.Lock() - defer logger.Unlock() - for _, r := range logger.repoMap { - r.setRepoLogLevelInternal(l) - } -} - -// GetRepoLogger may return the handle to the repository's set of packages' loggers. -func GetRepoLogger(repo string) (RepoLogger, error) { - logger.Lock() - defer logger.Unlock() - r, ok := logger.repoMap[repo] - if !ok { - return nil, errors.New("no packages registered for repo " + repo) - } - return r, nil -} - -// MustRepoLogger returns the handle to the repository's packages' loggers. -func MustRepoLogger(repo string) RepoLogger { - r, err := GetRepoLogger(repo) - if err != nil { - panic(err) - } - return r -} - -// SetRepoLogLevel sets the log level for all packages in the repository. -func (r RepoLogger) SetRepoLogLevel(l LogLevel) { - logger.Lock() - defer logger.Unlock() - r.setRepoLogLevelInternal(l) -} - -func (r RepoLogger) setRepoLogLevelInternal(l LogLevel) { - for _, v := range r { - v.level = l - } -} - -// ParseLogLevelConfig parses a comma-separated string of "package=loglevel", in -// order, and returns a map of the results, for use in SetLogLevel. -func (r RepoLogger) ParseLogLevelConfig(conf string) (map[string]LogLevel, error) { - setlist := strings.Split(conf, ",") - out := make(map[string]LogLevel) - for _, setstring := range setlist { - setting := strings.Split(setstring, "=") - if len(setting) != 2 { - return nil, errors.New("oddly structured `pkg=level` option: " + setstring) - } - l, err := ParseLevel(setting[1]) - if err != nil { - return nil, err - } - out[setting[0]] = l - } - return out, nil -} - -// SetLogLevel takes a map of package names within a repository to their desired -// loglevel, and sets the levels appropriately. Unknown packages are ignored. -// "*" is a special package name that corresponds to all packages, and will be -// processed first. -func (r RepoLogger) SetLogLevel(m map[string]LogLevel) { - logger.Lock() - defer logger.Unlock() - if l, ok := m["*"]; ok { - r.setRepoLogLevelInternal(l) - } - for k, v := range m { - l, ok := r[k] - if !ok { - continue - } - l.level = v - } -} - -// SetFormatter sets the formatting function for all logs. -func SetFormatter(f Formatter) { - logger.Lock() - defer logger.Unlock() - logger.formatter = f -} - -// NewPackageLogger creates a package logger object. -// This should be defined as a global var in your package, referencing your repo. -func NewPackageLogger(repo string, pkg string) (p *PackageLogger) { - logger.Lock() - defer logger.Unlock() - if logger.repoMap == nil { - logger.repoMap = make(map[string]RepoLogger) - } - r, rok := logger.repoMap[repo] - if !rok { - logger.repoMap[repo] = make(RepoLogger) - r = logger.repoMap[repo] - } - p, pok := r[pkg] - if !pok { - r[pkg] = &PackageLogger{ - pkg: pkg, - level: INFO, - } - p = r[pkg] - } - return -} diff --git a/vendor/src/github.com/coreos/pkg/capnslog/pkg_logger.go b/vendor/src/github.com/coreos/pkg/capnslog/pkg_logger.go deleted file mode 100644 index 32d2f16a98..0000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/pkg_logger.go +++ /dev/null @@ -1,158 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package capnslog - -import ( - "fmt" - "os" -) - -type PackageLogger struct { - pkg string - level LogLevel -} - -const calldepth = 2 - -func (p *PackageLogger) internalLog(depth int, inLevel LogLevel, entries ...interface{}) { - if inLevel != CRITICAL && p.level < inLevel { - return - } - logger.Lock() - defer logger.Unlock() - if logger.formatter != nil { - logger.formatter.Format(p.pkg, inLevel, depth+1, entries...) - } -} - -func (p *PackageLogger) LevelAt(l LogLevel) bool { - return p.level >= l -} - -// Log a formatted string at any level between ERROR and TRACE -func (p *PackageLogger) Logf(l LogLevel, format string, args ...interface{}) { - p.internalLog(calldepth, l, fmt.Sprintf(format, args...)) -} - -// Log a message at any level between ERROR and TRACE -func (p *PackageLogger) Log(l LogLevel, args ...interface{}) { - p.internalLog(calldepth, l, fmt.Sprint(args...)) -} - -// log stdlib compatibility - -func (p *PackageLogger) Println(args ...interface{}) { - p.internalLog(calldepth, INFO, fmt.Sprintln(args...)) -} - -func (p *PackageLogger) Printf(format string, args ...interface{}) { - p.internalLog(calldepth, INFO, fmt.Sprintf(format, args...)) -} - -func (p *PackageLogger) Print(args ...interface{}) { - p.internalLog(calldepth, INFO, fmt.Sprint(args...)) -} - -// Panic and fatal - -func (p *PackageLogger) Panicf(format string, args ...interface{}) { - s := fmt.Sprintf(format, args...) - p.internalLog(calldepth, CRITICAL, s) - panic(s) -} - -func (p *PackageLogger) Panic(args ...interface{}) { - s := fmt.Sprint(args...) - p.internalLog(calldepth, CRITICAL, s) - panic(s) -} - -func (p *PackageLogger) Fatalf(format string, args ...interface{}) { - s := fmt.Sprintf(format, args...) - p.internalLog(calldepth, CRITICAL, s) - os.Exit(1) -} - -func (p *PackageLogger) Fatal(args ...interface{}) { - s := fmt.Sprint(args...) - p.internalLog(calldepth, CRITICAL, s) - os.Exit(1) -} - -// Error Functions - -func (p *PackageLogger) Errorf(format string, args ...interface{}) { - p.internalLog(calldepth, ERROR, fmt.Sprintf(format, args...)) -} - -func (p *PackageLogger) Error(entries ...interface{}) { - p.internalLog(calldepth, ERROR, entries...) -} - -// Warning Functions - -func (p *PackageLogger) Warningf(format string, args ...interface{}) { - p.internalLog(calldepth, WARNING, fmt.Sprintf(format, args...)) -} - -func (p *PackageLogger) Warning(entries ...interface{}) { - p.internalLog(calldepth, WARNING, entries...) -} - -// Notice Functions - -func (p *PackageLogger) Noticef(format string, args ...interface{}) { - p.internalLog(calldepth, NOTICE, fmt.Sprintf(format, args...)) -} - -func (p *PackageLogger) Notice(entries ...interface{}) { - p.internalLog(calldepth, NOTICE, entries...) -} - -// Info Functions - -func (p *PackageLogger) Infof(format string, args ...interface{}) { - p.internalLog(calldepth, INFO, fmt.Sprintf(format, args...)) -} - -func (p *PackageLogger) Info(entries ...interface{}) { - p.internalLog(calldepth, INFO, entries...) -} - -// Debug Functions - -func (p *PackageLogger) Debugf(format string, args ...interface{}) { - p.internalLog(calldepth, DEBUG, fmt.Sprintf(format, args...)) -} - -func (p *PackageLogger) Debug(entries ...interface{}) { - p.internalLog(calldepth, DEBUG, entries...) -} - -// Trace Functions - -func (p *PackageLogger) Tracef(format string, args ...interface{}) { - p.internalLog(calldepth, TRACE, fmt.Sprintf(format, args...)) -} - -func (p *PackageLogger) Trace(entries ...interface{}) { - p.internalLog(calldepth, TRACE, entries...) -} - -func (p *PackageLogger) Flush() { - logger.Lock() - defer logger.Unlock() - logger.formatter.Flush() -} diff --git a/vendor/src/github.com/coreos/pkg/capnslog/syslog_formatter.go b/vendor/src/github.com/coreos/pkg/capnslog/syslog_formatter.go deleted file mode 100644 index 4be5a1f2de..0000000000 --- a/vendor/src/github.com/coreos/pkg/capnslog/syslog_formatter.go +++ /dev/null @@ -1,65 +0,0 @@ -// Copyright 2015 CoreOS, Inc. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// +build !windows - -package capnslog - -import ( - "fmt" - "log/syslog" -) - -func NewSyslogFormatter(w *syslog.Writer) Formatter { - return &syslogFormatter{w} -} - -func NewDefaultSyslogFormatter(tag string) (Formatter, error) { - w, err := syslog.New(syslog.LOG_DEBUG, tag) - if err != nil { - return nil, err - } - return NewSyslogFormatter(w), nil -} - -type syslogFormatter struct { - w *syslog.Writer -} - -func (s *syslogFormatter) Format(pkg string, l LogLevel, _ int, entries ...interface{}) { - for _, entry := range entries { - str := fmt.Sprint(entry) - switch l { - case CRITICAL: - s.w.Crit(str) - case ERROR: - s.w.Err(str) - case WARNING: - s.w.Warning(str) - case NOTICE: - s.w.Notice(str) - case INFO: - s.w.Info(str) - case DEBUG: - s.w.Debug(str) - case TRACE: - s.w.Debug(str) - default: - panic("Unhandled loglevel") - } - } -} - -func (s *syslogFormatter) Flush() { -} diff --git a/vendor/src/github.com/deckarep/golang-set/.gitignore b/vendor/src/github.com/deckarep/golang-set/.gitignore deleted file mode 100644 index 00268614f0..0000000000 --- a/vendor/src/github.com/deckarep/golang-set/.gitignore +++ /dev/null @@ -1,22 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe diff --git a/vendor/src/github.com/deckarep/golang-set/.travis.yml b/vendor/src/github.com/deckarep/golang-set/.travis.yml deleted file mode 100644 index db1359c72e..0000000000 --- a/vendor/src/github.com/deckarep/golang-set/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.2 - -script: - - go test ./... - #- go test -race ./... - diff --git a/vendor/src/github.com/deckarep/golang-set/LICENSE b/vendor/src/github.com/deckarep/golang-set/LICENSE deleted file mode 100644 index b5768f89cf..0000000000 --- a/vendor/src/github.com/deckarep/golang-set/LICENSE +++ /dev/null @@ -1,22 +0,0 @@ -Open Source Initiative OSI - The MIT License (MIT):Licensing - -The MIT License (MIT) -Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. \ No newline at end of file diff --git a/vendor/src/github.com/deckarep/golang-set/README.md b/vendor/src/github.com/deckarep/golang-set/README.md deleted file mode 100644 index 744b1841cd..0000000000 --- a/vendor/src/github.com/deckarep/golang-set/README.md +++ /dev/null @@ -1,94 +0,0 @@ -[![Build Status](https://travis-ci.org/deckarep/golang-set.png?branch=master)](https://travis-ci.org/deckarep/golang-set) -[![GoDoc](https://godoc.org/github.com/deckarep/golang-set?status.png)](http://godoc.org/github.com/deckarep/golang-set) - -## golang-set - - -The missing set collection for the Go language. Until Go has sets built-in...use this. - -Coming from Python one of the things I miss is the superbly wonderful set collection. This is my attempt to mimic the primary features of the set from Python. -You can of course argue that there is no need for a set in Go, otherwise the creators would have added one to the standard library. To those I say simply ignore this repository -and carry-on and to the rest that find this useful please contribute in helping me make it better by: - -* Helping to make more idiomatic improvements to the code. -* Helping to increase the performance of it. ~~(So far, no attempt has been made, but since it uses a map internally, I expect it to be mostly performant.)~~ -* Helping to make the unit-tests more robust and kick-ass. -* Helping to fill in the [documentation.](http://godoc.org/github.com/deckarep/golang-set) -* Simply offering feedback and suggestions. (Positive, constructive feedback is appreciated.) - -I have to give some credit for helping seed the idea with this post on [stackoverflow.](http://programmers.stackexchange.com/questions/177428/sets-data-structure-in-golang) - -*Update* - as of 3/9/2014, you can use a compile-time generic version of this package in the [gen](http://clipperhouse.github.io/gen/) framework. This framework allows you to use the golang-set in a completely generic and type-safe way by allowing you to generate a supporting .go file based on your custom types. - -## Features (as of 9/22/2014) - -* a CartesionProduct() method has been added with unit-tests: [Read more about the cartesion product](http://en.wikipedia.org/wiki/Cartesian_product) - -## Features (as of 9/15/2014) - -* a PowerSet() method has been added with unit-tests: [Read more about the Power set](http://en.wikipedia.org/wiki/Power_set) - -## Features (as of 4/22/2014) - -* One common interface to both implementations -* Two set implementations to choose from - * a thread-safe implementation designed for concurrent use - * a non-thread-safe implementation designed for performance -* 75 benchmarks for both implementations -* 35 unit tests for both implementations -* 14 concurrent tests for the thread-safe implementation - - - -Please see the unit test file for additional usage examples. The Python set documentation will also do a better job than I can of explaining how a set typically [works.](http://docs.python.org/2/library/sets.html) Please keep in mind -however that the Python set is a built-in type and supports additional features and syntax that make it awesome. - -## Examples but not exhaustive: - -```go -requiredClasses := mapset.NewSet() -requiredClasses.Add("Cooking") -requiredClasses.Add("English") -requiredClasses.Add("Math") -requiredClasses.Add("Biology") - -scienceSlice := []interface{}{"Biology", "Chemistry"} -scienceClasses := mapset.NewSetFromSlice(scienceSlice) - -electiveClasses := mapset.NewSet() -electiveClasses.Add("Welding") -electiveClasses.Add("Music") -electiveClasses.Add("Automotive") - -bonusClasses := mapset.NewSet() -bonusClasses.Add("Go Programming") -bonusClasses.Add("Python Programming") - -//Show me all the available classes I can take -allClasses := requiredClasses.Union(scienceClasses).Union(electiveClasses).Union(bonusClasses) -fmt.Println(allClasses) //Set{Cooking, English, Math, Chemistry, Welding, Biology, Music, Automotive, Go Programming, Python Programming} - - -//Is cooking considered a science class? -fmt.Println(scienceClasses.Contains("Cooking")) //false - -//Show me all classes that are not science classes, since I hate science. -fmt.Println(allClasses.Difference(scienceClasses)) //Set{Music, Automotive, Go Programming, Python Programming, Cooking, English, Math, Welding} - -//Which science classes are also required classes? -fmt.Println(scienceClasses.Intersect(requiredClasses)) //Set{Biology} - -//How many bonus classes do you offer? -fmt.Println(bonusClasses.Cardinality()) //2 - -//Do you have the following classes? Welding, Automotive and English? -fmt.Println(allClasses.IsSuperset(mapset.NewSetFromSlice([]interface{}{"Welding", "Automotive", "English"}))) //true -``` - -Thanks! - --Ralph - -[![Bitdeli Badge](https://d2weczhvl823v0.cloudfront.net/deckarep/golang-set/trend.png)](https://bitdeli.com/free "Bitdeli Badge") - -[![Analytics](https://ga-beacon.appspot.com/UA-42584447-2/deckarep/golang-set)](https://github.com/igrigorik/ga-beacon) diff --git a/vendor/src/github.com/deckarep/golang-set/set.go b/vendor/src/github.com/deckarep/golang-set/set.go deleted file mode 100644 index eccba70e44..0000000000 --- a/vendor/src/github.com/deckarep/golang-set/set.go +++ /dev/null @@ -1,168 +0,0 @@ -/* -Open Source Initiative OSI - The MIT License (MIT):Licensing - -The MIT License (MIT) -Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -*/ - -// Package mapset implements a simple and generic set collection. -// Items stored within it are unordered and unique. It supports -// typical set operations: membership testing, intersection, union, -// difference, symmetric difference and cloning. -// -// Package mapset provides two implementations. The default -// implementation is safe for concurrent access. There is a non-threadsafe -// implementation which is slightly more performant. -package mapset - -type Set interface { - // Adds an element to the set. Returns whether - // the item was added. - Add(i interface{}) bool - - // Returns the number of elements in the set. - Cardinality() int - - // Removes all elements from the set, leaving - // the emtpy set. - Clear() - - // Returns a clone of the set using the same - // implementation, duplicating all keys. - Clone() Set - - // Returns whether the given items - // are all in the set. - Contains(i ...interface{}) bool - - // Returns the difference between this set - // and other. The returned set will contain - // all elements of this set that are not also - // elements of other. - // - // Note that the argument to Difference - // must be of the same type as the receiver - // of the method. Otherwise, Difference will - // panic. - Difference(other Set) Set - - // Determines if two sets are equal to each - // other. If they have the same cardinality - // and contain the same elements, they are - // considered equal. The order in which - // the elements were added is irrelevant. - // - // Note that the argument to Equal must be - // of the same type as the receiver of the - // method. Otherwise, Equal will panic. - Equal(other Set) bool - - // Returns a new set containing only the elements - // that exist only in both sets. - // - // Note that the argument to Intersect - // must be of the same type as the receiver - // of the method. Otherwise, Intersect will - // panic. - Intersect(other Set) Set - - // Determines if every element in the other set - // is in this set. - // - // Note that the argument to IsSubset - // must be of the same type as the receiver - // of the method. Otherwise, IsSubset will - // panic. - IsSubset(other Set) bool - - // Determines if every element in this set is in - // the other set. - // - // Note that the argument to IsSuperset - // must be of the same type as the receiver - // of the method. Otherwise, IsSuperset will - // panic. - IsSuperset(other Set) bool - - // Returns a channel of elements that you can - // range over. - Iter() <-chan interface{} - - // Remove a single element from the set. - Remove(i interface{}) - - // Provides a convenient string representation - // of the current state of the set. - String() string - - // Returns a new set with all elements which are - // in either this set or the other set but not in both. - // - // Note that the argument to SymmetricDifference - // must be of the same type as the receiver - // of the method. Otherwise, SymmetricDifference - // will panic. - SymmetricDifference(other Set) Set - - // Returns a new set with all elements in both sets. - // - // Note that the argument to Union must be of the - // same type as the receiver of the method. - // Otherwise, IsSuperset will panic. - Union(other Set) Set - - // Returns all subsets of a given set (Power Set). - PowerSet() Set - - // Returns the Cartesian Product of two sets. - CartesianProduct(other Set) Set - - // Returns the members of the set as a slice. - ToSlice() []interface{} -} - -// Creates and returns a reference to an empty set. -func NewSet() Set { - set := newThreadSafeSet() - return &set -} - -// Creates and returns a reference to a set from an existing slice -func NewSetFromSlice(s []interface{}) Set { - a := NewSet() - for _, item := range s { - a.Add(item) - } - return a -} - -func NewThreadUnsafeSet() Set { - set := newThreadUnsafeSet() - return &set -} - -func NewThreadUnsafeSetFromSlice(s []interface{}) Set { - a := NewThreadUnsafeSet() - for _, item := range s { - a.Add(item) - } - return a -} diff --git a/vendor/src/github.com/deckarep/golang-set/threadsafe.go b/vendor/src/github.com/deckarep/golang-set/threadsafe.go deleted file mode 100644 index 9dca94af73..0000000000 --- a/vendor/src/github.com/deckarep/golang-set/threadsafe.go +++ /dev/null @@ -1,204 +0,0 @@ -/* -Open Source Initiative OSI - The MIT License (MIT):Licensing - -The MIT License (MIT) -Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -*/ - -package mapset - -import "sync" - -type threadSafeSet struct { - s threadUnsafeSet - sync.RWMutex -} - -func newThreadSafeSet() threadSafeSet { - return threadSafeSet{s: newThreadUnsafeSet()} -} - -func (set *threadSafeSet) Add(i interface{}) bool { - set.Lock() - ret := set.s.Add(i) - set.Unlock() - return ret -} - -func (set *threadSafeSet) Contains(i ...interface{}) bool { - set.RLock() - ret := set.s.Contains(i...) - set.RUnlock() - return ret -} - -func (set *threadSafeSet) IsSubset(other Set) bool { - o := other.(*threadSafeSet) - - set.RLock() - o.RLock() - - ret := set.s.IsSubset(&o.s) - set.RUnlock() - o.RUnlock() - return ret -} - -func (set *threadSafeSet) IsSuperset(other Set) bool { - return other.IsSubset(set) -} - -func (set *threadSafeSet) Union(other Set) Set { - o := other.(*threadSafeSet) - - set.RLock() - o.RLock() - - unsafeUnion := set.s.Union(&o.s).(*threadUnsafeSet) - ret := &threadSafeSet{s: *unsafeUnion} - set.RUnlock() - o.RUnlock() - return ret -} - -func (set *threadSafeSet) Intersect(other Set) Set { - o := other.(*threadSafeSet) - - set.RLock() - o.RLock() - - unsafeIntersection := set.s.Intersect(&o.s).(*threadUnsafeSet) - ret := &threadSafeSet{s: *unsafeIntersection} - set.RUnlock() - o.RUnlock() - return ret -} - -func (set *threadSafeSet) Difference(other Set) Set { - o := other.(*threadSafeSet) - - set.RLock() - o.RLock() - - unsafeDifference := set.s.Difference(&o.s).(*threadUnsafeSet) - ret := &threadSafeSet{s: *unsafeDifference} - set.RUnlock() - o.RUnlock() - return ret -} - -func (set *threadSafeSet) SymmetricDifference(other Set) Set { - o := other.(*threadSafeSet) - - unsafeDifference := set.s.SymmetricDifference(&o.s).(*threadUnsafeSet) - return &threadSafeSet{s: *unsafeDifference} -} - -func (set *threadSafeSet) Clear() { - set.Lock() - set.s = newThreadUnsafeSet() - set.Unlock() -} - -func (set *threadSafeSet) Remove(i interface{}) { - set.Lock() - delete(set.s, i) - set.Unlock() -} - -func (set *threadSafeSet) Cardinality() int { - set.RLock() - defer set.RUnlock() - return len(set.s) -} - -func (set *threadSafeSet) Iter() <-chan interface{} { - ch := make(chan interface{}) - go func() { - set.RLock() - - for elem := range set.s { - ch <- elem - } - close(ch) - set.RUnlock() - }() - - return ch -} - -func (set *threadSafeSet) Equal(other Set) bool { - o := other.(*threadSafeSet) - - set.RLock() - o.RLock() - - ret := set.s.Equal(&o.s) - set.RUnlock() - o.RUnlock() - return ret -} - -func (set *threadSafeSet) Clone() Set { - set.RLock() - - unsafeClone := set.s.Clone().(*threadUnsafeSet) - ret := &threadSafeSet{s: *unsafeClone} - set.RUnlock() - return ret -} - -func (set *threadSafeSet) String() string { - set.RLock() - ret := set.s.String() - set.RUnlock() - return ret -} - -func (set *threadSafeSet) PowerSet() Set { - set.RLock() - ret := set.s.PowerSet() - set.RUnlock() - return ret -} - -func (set *threadSafeSet) CartesianProduct(other Set) Set { - o := other.(*threadSafeSet) - - set.RLock() - o.RLock() - - unsafeCartProduct := set.s.CartesianProduct(&o.s).(*threadUnsafeSet) - ret := &threadSafeSet{s: *unsafeCartProduct} - set.RUnlock() - o.RUnlock() - return ret -} - -func (set *threadSafeSet) ToSlice() []interface{} { - set.RLock() - keys := make([]interface{}, 0, set.Cardinality()) - for elem := range set.s { - keys = append(keys, elem) - } - set.RUnlock() - return keys -} diff --git a/vendor/src/github.com/deckarep/golang-set/threadunsafe.go b/vendor/src/github.com/deckarep/golang-set/threadunsafe.go deleted file mode 100644 index 124521e2ee..0000000000 --- a/vendor/src/github.com/deckarep/golang-set/threadunsafe.go +++ /dev/null @@ -1,246 +0,0 @@ -/* -Open Source Initiative OSI - The MIT License (MIT):Licensing - -The MIT License (MIT) -Copyright (c) 2013 Ralph Caraveo (deckarep@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of -this software and associated documentation files (the "Software"), to deal in -the Software without restriction, including without limitation the rights to -use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies -of the Software, and to permit persons to whom the Software is furnished to do -so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. -*/ - -package mapset - -import ( - "fmt" - "reflect" - "strings" -) - -type threadUnsafeSet map[interface{}]struct{} - -type orderedPair struct { - first interface{} - second interface{} -} - -func newThreadUnsafeSet() threadUnsafeSet { - return make(threadUnsafeSet) -} - -func (pair *orderedPair) Equal(other orderedPair) bool { - if pair.first == other.first && - pair.second == other.second { - return true - } - - return false -} - -func (set *threadUnsafeSet) Add(i interface{}) bool { - _, found := (*set)[i] - (*set)[i] = struct{}{} - return !found //False if it existed already -} - -func (set *threadUnsafeSet) Contains(i ...interface{}) bool { - for _, val := range i { - if _, ok := (*set)[val]; !ok { - return false - } - } - return true -} - -func (set *threadUnsafeSet) IsSubset(other Set) bool { - _ = other.(*threadUnsafeSet) - for elem := range *set { - if !other.Contains(elem) { - return false - } - } - return true -} - -func (set *threadUnsafeSet) IsSuperset(other Set) bool { - return other.IsSubset(set) -} - -func (set *threadUnsafeSet) Union(other Set) Set { - o := other.(*threadUnsafeSet) - - unionedSet := newThreadUnsafeSet() - - for elem := range *set { - unionedSet.Add(elem) - } - for elem := range *o { - unionedSet.Add(elem) - } - return &unionedSet -} - -func (set *threadUnsafeSet) Intersect(other Set) Set { - o := other.(*threadUnsafeSet) - - intersection := newThreadUnsafeSet() - // loop over smaller set - if set.Cardinality() < other.Cardinality() { - for elem := range *set { - if other.Contains(elem) { - intersection.Add(elem) - } - } - } else { - for elem := range *o { - if set.Contains(elem) { - intersection.Add(elem) - } - } - } - return &intersection -} - -func (set *threadUnsafeSet) Difference(other Set) Set { - _ = other.(*threadUnsafeSet) - - difference := newThreadUnsafeSet() - for elem := range *set { - if !other.Contains(elem) { - difference.Add(elem) - } - } - return &difference -} - -func (set *threadUnsafeSet) SymmetricDifference(other Set) Set { - _ = other.(*threadUnsafeSet) - - aDiff := set.Difference(other) - bDiff := other.Difference(set) - return aDiff.Union(bDiff) -} - -func (set *threadUnsafeSet) Clear() { - *set = newThreadUnsafeSet() -} - -func (set *threadUnsafeSet) Remove(i interface{}) { - delete(*set, i) -} - -func (set *threadUnsafeSet) Cardinality() int { - return len(*set) -} - -func (set *threadUnsafeSet) Iter() <-chan interface{} { - ch := make(chan interface{}) - go func() { - for elem := range *set { - ch <- elem - } - close(ch) - }() - - return ch -} - -func (set *threadUnsafeSet) Equal(other Set) bool { - _ = other.(*threadUnsafeSet) - - if set.Cardinality() != other.Cardinality() { - return false - } - for elem := range *set { - if !other.Contains(elem) { - return false - } - } - return true -} - -func (set *threadUnsafeSet) Clone() Set { - clonedSet := newThreadUnsafeSet() - for elem := range *set { - clonedSet.Add(elem) - } - return &clonedSet -} - -func (set *threadUnsafeSet) String() string { - items := make([]string, 0, len(*set)) - - for elem := range *set { - items = append(items, fmt.Sprintf("%v", elem)) - } - return fmt.Sprintf("Set{%s}", strings.Join(items, ", ")) -} - -func (pair orderedPair) String() string { - return fmt.Sprintf("(%v, %v)", pair.first, pair.second) -} - -func (set *threadUnsafeSet) PowerSet() Set { - powSet := NewThreadUnsafeSet() - nullset := newThreadUnsafeSet() - powSet.Add(&nullset) - - for es := range *set { - u := newThreadUnsafeSet() - j := powSet.Iter() - for er := range j { - p := newThreadUnsafeSet() - if reflect.TypeOf(er).Name() == "" { - k := er.(*threadUnsafeSet) - for ek := range *(k) { - p.Add(ek) - } - } else { - p.Add(er) - } - p.Add(es) - u.Add(&p) - } - - powSet = powSet.Union(&u) - } - - return powSet -} - -func (set *threadUnsafeSet) CartesianProduct(other Set) Set { - o := other.(*threadUnsafeSet) - cartProduct := NewThreadUnsafeSet() - - for i := range *set { - for j := range *o { - elem := orderedPair{first: i, second: j} - cartProduct.Add(elem) - } - } - - return cartProduct -} - -func (set *threadUnsafeSet) ToSlice() []interface{} { - keys := make([]interface{}, 0, set.Cardinality()) - for elem := range *set { - keys = append(keys, elem) - } - - return keys -} diff --git a/vendor/src/github.com/docker/containerd/LICENSE.code b/vendor/src/github.com/docker/containerd/LICENSE.code deleted file mode 100644 index 8f3fee627a..0000000000 --- a/vendor/src/github.com/docker/containerd/LICENSE.code +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2013-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/src/github.com/docker/containerd/LICENSE.docs b/vendor/src/github.com/docker/containerd/LICENSE.docs deleted file mode 100644 index e26cd4fc8e..0000000000 --- a/vendor/src/github.com/docker/containerd/LICENSE.docs +++ /dev/null @@ -1,425 +0,0 @@ -Attribution-ShareAlike 4.0 International - -======================================================================= - -Creative Commons Corporation ("Creative Commons") is not a law firm and -does not provide legal services or legal advice. Distribution of -Creative Commons public licenses does not create a lawyer-client or -other relationship. Creative Commons makes its licenses and related -information available on an "as-is" basis. Creative Commons gives no -warranties regarding its licenses, any material licensed under their -terms and conditions, or any related information. Creative Commons -disclaims all liability for damages resulting from their use to the -fullest extent possible. - -Using Creative Commons Public Licenses - -Creative Commons public licenses provide a standard set of terms and -conditions that creators and other rights holders may use to share -original works of authorship and other material subject to copyright -and certain other rights specified in the public license below. The -following considerations are for informational purposes only, are not -exhaustive, and do not form part of our licenses. - - Considerations for licensors: Our public licenses are - intended for use by those authorized to give the public - permission to use material in ways otherwise restricted by - copyright and certain other rights. Our licenses are - irrevocable. Licensors should read and understand the terms - and conditions of the license they choose before applying it. - Licensors should also secure all rights necessary before - applying our licenses so that the public can reuse the - material as expected. Licensors should clearly mark any - material not subject to the license. This includes other CC- - licensed material, or material used under an exception or - limitation to copyright. More considerations for licensors: - wiki.creativecommons.org/Considerations_for_licensors - - Considerations for the public: By using one of our public - licenses, a licensor grants the public permission to use the - licensed material under specified terms and conditions. If - the licensor's permission is not necessary for any reason--for - example, because of any applicable exception or limitation to - copyright--then that use is not regulated by the license. Our - licenses grant only permissions under copyright and certain - other rights that a licensor has authority to grant. Use of - the licensed material may still be restricted for other - reasons, including because others have copyright or other - rights in the material. A licensor may make special requests, - such as asking that all changes be marked or described. - Although not required by our licenses, you are encouraged to - respect those requests where reasonable. More_considerations - for the public: - wiki.creativecommons.org/Considerations_for_licensees - -======================================================================= - -Creative Commons Attribution-ShareAlike 4.0 International Public -License - -By exercising the Licensed Rights (defined below), You accept and agree -to be bound by the terms and conditions of this Creative Commons -Attribution-ShareAlike 4.0 International Public License ("Public -License"). To the extent this Public License may be interpreted as a -contract, You are granted the Licensed Rights in consideration of Your -acceptance of these terms and conditions, and the Licensor grants You -such rights in consideration of benefits the Licensor receives from -making the Licensed Material available under these terms and -conditions. - - -Section 1 -- Definitions. - - a. Adapted Material means material subject to Copyright and Similar - Rights that is derived from or based upon the Licensed Material - and in which the Licensed Material is translated, altered, - arranged, transformed, or otherwise modified in a manner requiring - permission under the Copyright and Similar Rights held by the - Licensor. For purposes of this Public License, where the Licensed - Material is a musical work, performance, or sound recording, - Adapted Material is always produced where the Licensed Material is - synched in timed relation with a moving image. - - b. Adapter's License means the license You apply to Your Copyright - and Similar Rights in Your contributions to Adapted Material in - accordance with the terms and conditions of this Public License. - - c. BY-SA Compatible License means a license listed at - creativecommons.org/compatiblelicenses, approved by Creative - Commons as essentially the equivalent of this Public License. - - d. Copyright and Similar Rights means copyright and/or similar rights - closely related to copyright including, without limitation, - performance, broadcast, sound recording, and Sui Generis Database - Rights, without regard to how the rights are labeled or - categorized. For purposes of this Public License, the rights - specified in Section 2(b)(1)-(2) are not Copyright and Similar - Rights. - - e. Effective Technological Measures means those measures that, in the - absence of proper authority, may not be circumvented under laws - fulfilling obligations under Article 11 of the WIPO Copyright - Treaty adopted on December 20, 1996, and/or similar international - agreements. - - f. Exceptions and Limitations means fair use, fair dealing, and/or - any other exception or limitation to Copyright and Similar Rights - that applies to Your use of the Licensed Material. - - g. License Elements means the license attributes listed in the name - of a Creative Commons Public License. The License Elements of this - Public License are Attribution and ShareAlike. - - h. Licensed Material means the artistic or literary work, database, - or other material to which the Licensor applied this Public - License. - - i. Licensed Rights means the rights granted to You subject to the - terms and conditions of this Public License, which are limited to - all Copyright and Similar Rights that apply to Your use of the - Licensed Material and that the Licensor has authority to license. - - j. Licensor means the individual(s) or entity(ies) granting rights - under this Public License. - - k. Share means to provide material to the public by any means or - process that requires permission under the Licensed Rights, such - as reproduction, public display, public performance, distribution, - dissemination, communication, or importation, and to make material - available to the public including in ways that members of the - public may access the material from a place and at a time - individually chosen by them. - - l. Sui Generis Database Rights means rights other than copyright - resulting from Directive 96/9/EC of the European Parliament and of - the Council of 11 March 1996 on the legal protection of databases, - as amended and/or succeeded, as well as other essentially - equivalent rights anywhere in the world. - - m. You means the individual or entity exercising the Licensed Rights - under this Public License. Your has a corresponding meaning. - - -Section 2 -- Scope. - - a. License grant. - - 1. Subject to the terms and conditions of this Public License, - the Licensor hereby grants You a worldwide, royalty-free, - non-sublicensable, non-exclusive, irrevocable license to - exercise the Licensed Rights in the Licensed Material to: - - a. reproduce and Share the Licensed Material, in whole or - in part; and - - b. produce, reproduce, and Share Adapted Material. - - 2. Exceptions and Limitations. For the avoidance of doubt, where - Exceptions and Limitations apply to Your use, this Public - License does not apply, and You do not need to comply with - its terms and conditions. - - 3. Term. The term of this Public License is specified in Section - 6(a). - - 4. Media and formats; technical modifications allowed. The - Licensor authorizes You to exercise the Licensed Rights in - all media and formats whether now known or hereafter created, - and to make technical modifications necessary to do so. The - Licensor waives and/or agrees not to assert any right or - authority to forbid You from making technical modifications - necessary to exercise the Licensed Rights, including - technical modifications necessary to circumvent Effective - Technological Measures. For purposes of this Public License, - simply making modifications authorized by this Section 2(a) - (4) never produces Adapted Material. - - 5. Downstream recipients. - - a. Offer from the Licensor -- Licensed Material. Every - recipient of the Licensed Material automatically - receives an offer from the Licensor to exercise the - Licensed Rights under the terms and conditions of this - Public License. - - b. Additional offer from the Licensor -- Adapted Material. - Every recipient of Adapted Material from You - automatically receives an offer from the Licensor to - exercise the Licensed Rights in the Adapted Material - under the conditions of the Adapter's License You apply. - - c. No downstream restrictions. You may not offer or impose - any additional or different terms or conditions on, or - apply any Effective Technological Measures to, the - Licensed Material if doing so restricts exercise of the - Licensed Rights by any recipient of the Licensed - Material. - - 6. No endorsement. Nothing in this Public License constitutes or - may be construed as permission to assert or imply that You - are, or that Your use of the Licensed Material is, connected - with, or sponsored, endorsed, or granted official status by, - the Licensor or others designated to receive attribution as - provided in Section 3(a)(1)(A)(i). - - b. Other rights. - - 1. Moral rights, such as the right of integrity, are not - licensed under this Public License, nor are publicity, - privacy, and/or other similar personality rights; however, to - the extent possible, the Licensor waives and/or agrees not to - assert any such rights held by the Licensor to the limited - extent necessary to allow You to exercise the Licensed - Rights, but not otherwise. - - 2. Patent and trademark rights are not licensed under this - Public License. - - 3. To the extent possible, the Licensor waives any right to - collect royalties from You for the exercise of the Licensed - Rights, whether directly or through a collecting society - under any voluntary or waivable statutory or compulsory - licensing scheme. In all other cases the Licensor expressly - reserves any right to collect such royalties. - - -Section 3 -- License Conditions. - -Your exercise of the Licensed Rights is expressly made subject to the -following conditions. - - a. Attribution. - - 1. If You Share the Licensed Material (including in modified - form), You must: - - a. retain the following if it is supplied by the Licensor - with the Licensed Material: - - i. identification of the creator(s) of the Licensed - Material and any others designated to receive - attribution, in any reasonable manner requested by - the Licensor (including by pseudonym if - designated); - - ii. a copyright notice; - - iii. a notice that refers to this Public License; - - iv. a notice that refers to the disclaimer of - warranties; - - v. a URI or hyperlink to the Licensed Material to the - extent reasonably practicable; - - b. indicate if You modified the Licensed Material and - retain an indication of any previous modifications; and - - c. indicate the Licensed Material is licensed under this - Public License, and include the text of, or the URI or - hyperlink to, this Public License. - - 2. You may satisfy the conditions in Section 3(a)(1) in any - reasonable manner based on the medium, means, and context in - which You Share the Licensed Material. For example, it may be - reasonable to satisfy the conditions by providing a URI or - hyperlink to a resource that includes the required - information. - - 3. If requested by the Licensor, You must remove any of the - information required by Section 3(a)(1)(A) to the extent - reasonably practicable. - - b. ShareAlike. - - In addition to the conditions in Section 3(a), if You Share - Adapted Material You produce, the following conditions also apply. - - 1. The Adapter's License You apply must be a Creative Commons - license with the same License Elements, this version or - later, or a BY-SA Compatible License. - - 2. You must include the text of, or the URI or hyperlink to, the - Adapter's License You apply. You may satisfy this condition - in any reasonable manner based on the medium, means, and - context in which You Share Adapted Material. - - 3. You may not offer or impose any additional or different terms - or conditions on, or apply any Effective Technological - Measures to, Adapted Material that restrict exercise of the - rights granted under the Adapter's License You apply. - - -Section 4 -- Sui Generis Database Rights. - -Where the Licensed Rights include Sui Generis Database Rights that -apply to Your use of the Licensed Material: - - a. for the avoidance of doubt, Section 2(a)(1) grants You the right - to extract, reuse, reproduce, and Share all or a substantial - portion of the contents of the database; - - b. if You include all or a substantial portion of the database - contents in a database in which You have Sui Generis Database - Rights, then the database in which You have Sui Generis Database - Rights (but not its individual contents) is Adapted Material, - - including for purposes of Section 3(b); and - c. You must comply with the conditions in Section 3(a) if You Share - all or a substantial portion of the contents of the database. - -For the avoidance of doubt, this Section 4 supplements and does not -replace Your obligations under this Public License where the Licensed -Rights include other Copyright and Similar Rights. - - -Section 5 -- Disclaimer of Warranties and Limitation of Liability. - - a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE - EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS - AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF - ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, - IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, - WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, - ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT - KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT - ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. - - b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE - TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, - NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, - INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, - COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR - USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN - ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR - DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR - IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. - - c. The disclaimer of warranties and limitation of liability provided - above shall be interpreted in a manner that, to the extent - possible, most closely approximates an absolute disclaimer and - waiver of all liability. - - -Section 6 -- Term and Termination. - - a. This Public License applies for the term of the Copyright and - Similar Rights licensed here. However, if You fail to comply with - this Public License, then Your rights under this Public License - terminate automatically. - - b. Where Your right to use the Licensed Material has terminated under - Section 6(a), it reinstates: - - 1. automatically as of the date the violation is cured, provided - it is cured within 30 days of Your discovery of the - violation; or - - 2. upon express reinstatement by the Licensor. - - For the avoidance of doubt, this Section 6(b) does not affect any - right the Licensor may have to seek remedies for Your violations - of this Public License. - - c. For the avoidance of doubt, the Licensor may also offer the - Licensed Material under separate terms or conditions or stop - distributing the Licensed Material at any time; however, doing so - will not terminate this Public License. - - d. Sections 1, 5, 6, 7, and 8 survive termination of this Public - License. - - -Section 7 -- Other Terms and Conditions. - - a. The Licensor shall not be bound by any additional or different - terms or conditions communicated by You unless expressly agreed. - - b. Any arrangements, understandings, or agreements regarding the - Licensed Material not stated herein are separate from and - independent of the terms and conditions of this Public License. - - -Section 8 -- Interpretation. - - a. For the avoidance of doubt, this Public License does not, and - shall not be interpreted to, reduce, limit, restrict, or impose - conditions on any use of the Licensed Material that could lawfully - be made without permission under this Public License. - - b. To the extent possible, if any provision of this Public License is - deemed unenforceable, it shall be automatically reformed to the - minimum extent necessary to make it enforceable. If the provision - cannot be reformed, it shall be severed from this Public License - without affecting the enforceability of the remaining terms and - conditions. - - c. No term or condition of this Public License will be waived and no - failure to comply consented to unless expressly agreed to by the - Licensor. - - d. Nothing in this Public License constitutes or may be interpreted - as a limitation upon, or waiver of, any privileges and immunities - that apply to the Licensor or You, including from the legal - processes of any jurisdiction or authority. - - -======================================================================= - -Creative Commons is not a party to its public licenses. -Notwithstanding, Creative Commons may elect to apply one of its public -licenses to material it publishes and in those instances will be -considered the "Licensor." Except for the limited purpose of indicating -that material is shared under a Creative Commons public license or as -otherwise permitted by the Creative Commons policies published at -creativecommons.org/policies, Creative Commons does not authorize the -use of the trademark "Creative Commons" or any other trademark or logo -of Creative Commons without its prior written consent including, -without limitation, in connection with any unauthorized modifications -to any of its public licenses or any other arrangements, -understandings, or agreements concerning use of licensed material. For -the avoidance of doubt, this paragraph does not form part of the public -licenses. - -Creative Commons may be contacted at creativecommons.org. diff --git a/vendor/src/github.com/docker/containerd/api/grpc/types/api.pb.go b/vendor/src/github.com/docker/containerd/api/grpc/types/api.pb.go deleted file mode 100644 index 8d5f6bf5d2..0000000000 --- a/vendor/src/github.com/docker/containerd/api/grpc/types/api.pb.go +++ /dev/null @@ -1,1498 +0,0 @@ -// Code generated by protoc-gen-go. -// source: api.proto -// DO NOT EDIT! - -/* -Package types is a generated protocol buffer package. - -It is generated from these files: - api.proto - -It has these top-level messages: - GetServerVersionRequest - GetServerVersionResponse - UpdateProcessRequest - UpdateProcessResponse - CreateContainerRequest - CreateContainerResponse - SignalRequest - SignalResponse - AddProcessRequest - Rlimit - User - AddProcessResponse - CreateCheckpointRequest - CreateCheckpointResponse - DeleteCheckpointRequest - DeleteCheckpointResponse - ListCheckpointRequest - Checkpoint - ListCheckpointResponse - StateRequest - ContainerState - Process - Container - Machine - StateResponse - UpdateContainerRequest - UpdateResource - UpdateContainerResponse - EventsRequest - Event - NetworkStats - CpuUsage - ThrottlingData - CpuStats - PidsStats - MemoryData - MemoryStats - BlkioStatsEntry - BlkioStats - HugetlbStats - CgroupStats - StatsResponse - StatsRequest -*/ -package types - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import google_protobuf "github.com/golang/protobuf/ptypes/timestamp" - -import ( - context "golang.org/x/net/context" - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -const _ = proto.ProtoPackageIsVersion1 - -type GetServerVersionRequest struct { -} - -func (m *GetServerVersionRequest) Reset() { *m = GetServerVersionRequest{} } -func (m *GetServerVersionRequest) String() string { return proto.CompactTextString(m) } -func (*GetServerVersionRequest) ProtoMessage() {} -func (*GetServerVersionRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type GetServerVersionResponse struct { - Major uint32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` - Minor uint32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` - Patch uint32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` - Revision string `protobuf:"bytes,4,opt,name=revision" json:"revision,omitempty"` -} - -func (m *GetServerVersionResponse) Reset() { *m = GetServerVersionResponse{} } -func (m *GetServerVersionResponse) String() string { return proto.CompactTextString(m) } -func (*GetServerVersionResponse) ProtoMessage() {} -func (*GetServerVersionResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -type UpdateProcessRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` - CloseStdin bool `protobuf:"varint,3,opt,name=closeStdin" json:"closeStdin,omitempty"` - Width uint32 `protobuf:"varint,4,opt,name=width" json:"width,omitempty"` - Height uint32 `protobuf:"varint,5,opt,name=height" json:"height,omitempty"` -} - -func (m *UpdateProcessRequest) Reset() { *m = UpdateProcessRequest{} } -func (m *UpdateProcessRequest) String() string { return proto.CompactTextString(m) } -func (*UpdateProcessRequest) ProtoMessage() {} -func (*UpdateProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -type UpdateProcessResponse struct { -} - -func (m *UpdateProcessResponse) Reset() { *m = UpdateProcessResponse{} } -func (m *UpdateProcessResponse) String() string { return proto.CompactTextString(m) } -func (*UpdateProcessResponse) ProtoMessage() {} -func (*UpdateProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -type CreateContainerRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"` - Checkpoint string `protobuf:"bytes,3,opt,name=checkpoint" json:"checkpoint,omitempty"` - Stdin string `protobuf:"bytes,4,opt,name=stdin" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,5,opt,name=stdout" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,6,opt,name=stderr" json:"stderr,omitempty"` - Labels []string `protobuf:"bytes,7,rep,name=labels" json:"labels,omitempty"` - NoPivotRoot bool `protobuf:"varint,8,opt,name=noPivotRoot" json:"noPivotRoot,omitempty"` - Runtime string `protobuf:"bytes,9,opt,name=runtime" json:"runtime,omitempty"` - RuntimeArgs []string `protobuf:"bytes,10,rep,name=runtimeArgs" json:"runtimeArgs,omitempty"` - CheckpointDir string `protobuf:"bytes,11,opt,name=checkpointDir" json:"checkpointDir,omitempty"` -} - -func (m *CreateContainerRequest) Reset() { *m = CreateContainerRequest{} } -func (m *CreateContainerRequest) String() string { return proto.CompactTextString(m) } -func (*CreateContainerRequest) ProtoMessage() {} -func (*CreateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } - -type CreateContainerResponse struct { - Container *Container `protobuf:"bytes,1,opt,name=container" json:"container,omitempty"` -} - -func (m *CreateContainerResponse) Reset() { *m = CreateContainerResponse{} } -func (m *CreateContainerResponse) String() string { return proto.CompactTextString(m) } -func (*CreateContainerResponse) ProtoMessage() {} -func (*CreateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } - -func (m *CreateContainerResponse) GetContainer() *Container { - if m != nil { - return m.Container - } - return nil -} - -type SignalRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` - Signal uint32 `protobuf:"varint,3,opt,name=signal" json:"signal,omitempty"` -} - -func (m *SignalRequest) Reset() { *m = SignalRequest{} } -func (m *SignalRequest) String() string { return proto.CompactTextString(m) } -func (*SignalRequest) ProtoMessage() {} -func (*SignalRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } - -type SignalResponse struct { -} - -func (m *SignalResponse) Reset() { *m = SignalResponse{} } -func (m *SignalResponse) String() string { return proto.CompactTextString(m) } -func (*SignalResponse) ProtoMessage() {} -func (*SignalResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } - -type AddProcessRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Terminal bool `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"` - User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` - Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` - Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"` - Pid string `protobuf:"bytes,7,opt,name=pid" json:"pid,omitempty"` - Stdin string `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"` - Capabilities []string `protobuf:"bytes,11,rep,name=capabilities" json:"capabilities,omitempty"` - ApparmorProfile string `protobuf:"bytes,12,opt,name=apparmorProfile" json:"apparmorProfile,omitempty"` - SelinuxLabel string `protobuf:"bytes,13,opt,name=selinuxLabel" json:"selinuxLabel,omitempty"` - NoNewPrivileges bool `protobuf:"varint,14,opt,name=noNewPrivileges" json:"noNewPrivileges,omitempty"` - Rlimits []*Rlimit `protobuf:"bytes,15,rep,name=rlimits" json:"rlimits,omitempty"` -} - -func (m *AddProcessRequest) Reset() { *m = AddProcessRequest{} } -func (m *AddProcessRequest) String() string { return proto.CompactTextString(m) } -func (*AddProcessRequest) ProtoMessage() {} -func (*AddProcessRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } - -func (m *AddProcessRequest) GetUser() *User { - if m != nil { - return m.User - } - return nil -} - -func (m *AddProcessRequest) GetRlimits() []*Rlimit { - if m != nil { - return m.Rlimits - } - return nil -} - -type Rlimit struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Soft uint64 `protobuf:"varint,2,opt,name=soft" json:"soft,omitempty"` - Hard uint64 `protobuf:"varint,3,opt,name=hard" json:"hard,omitempty"` -} - -func (m *Rlimit) Reset() { *m = Rlimit{} } -func (m *Rlimit) String() string { return proto.CompactTextString(m) } -func (*Rlimit) ProtoMessage() {} -func (*Rlimit) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } - -type User struct { - Uid uint32 `protobuf:"varint,1,opt,name=uid" json:"uid,omitempty"` - Gid uint32 `protobuf:"varint,2,opt,name=gid" json:"gid,omitempty"` - AdditionalGids []uint32 `protobuf:"varint,3,rep,name=additionalGids" json:"additionalGids,omitempty"` -} - -func (m *User) Reset() { *m = User{} } -func (m *User) String() string { return proto.CompactTextString(m) } -func (*User) ProtoMessage() {} -func (*User) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } - -type AddProcessResponse struct { -} - -func (m *AddProcessResponse) Reset() { *m = AddProcessResponse{} } -func (m *AddProcessResponse) String() string { return proto.CompactTextString(m) } -func (*AddProcessResponse) ProtoMessage() {} -func (*AddProcessResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } - -type CreateCheckpointRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Checkpoint *Checkpoint `protobuf:"bytes,2,opt,name=checkpoint" json:"checkpoint,omitempty"` - CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"` -} - -func (m *CreateCheckpointRequest) Reset() { *m = CreateCheckpointRequest{} } -func (m *CreateCheckpointRequest) String() string { return proto.CompactTextString(m) } -func (*CreateCheckpointRequest) ProtoMessage() {} -func (*CreateCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } - -func (m *CreateCheckpointRequest) GetCheckpoint() *Checkpoint { - if m != nil { - return m.Checkpoint - } - return nil -} - -type CreateCheckpointResponse struct { -} - -func (m *CreateCheckpointResponse) Reset() { *m = CreateCheckpointResponse{} } -func (m *CreateCheckpointResponse) String() string { return proto.CompactTextString(m) } -func (*CreateCheckpointResponse) ProtoMessage() {} -func (*CreateCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } - -type DeleteCheckpointRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` - CheckpointDir string `protobuf:"bytes,3,opt,name=checkpointDir" json:"checkpointDir,omitempty"` -} - -func (m *DeleteCheckpointRequest) Reset() { *m = DeleteCheckpointRequest{} } -func (m *DeleteCheckpointRequest) String() string { return proto.CompactTextString(m) } -func (*DeleteCheckpointRequest) ProtoMessage() {} -func (*DeleteCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } - -type DeleteCheckpointResponse struct { -} - -func (m *DeleteCheckpointResponse) Reset() { *m = DeleteCheckpointResponse{} } -func (m *DeleteCheckpointResponse) String() string { return proto.CompactTextString(m) } -func (*DeleteCheckpointResponse) ProtoMessage() {} -func (*DeleteCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } - -type ListCheckpointRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - CheckpointDir string `protobuf:"bytes,2,opt,name=checkpointDir" json:"checkpointDir,omitempty"` -} - -func (m *ListCheckpointRequest) Reset() { *m = ListCheckpointRequest{} } -func (m *ListCheckpointRequest) String() string { return proto.CompactTextString(m) } -func (*ListCheckpointRequest) ProtoMessage() {} -func (*ListCheckpointRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } - -type Checkpoint struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Exit bool `protobuf:"varint,2,opt,name=exit" json:"exit,omitempty"` - Tcp bool `protobuf:"varint,3,opt,name=tcp" json:"tcp,omitempty"` - UnixSockets bool `protobuf:"varint,4,opt,name=unixSockets" json:"unixSockets,omitempty"` - Shell bool `protobuf:"varint,5,opt,name=shell" json:"shell,omitempty"` - EmptyNS []string `protobuf:"bytes,6,rep,name=emptyNS" json:"emptyNS,omitempty"` -} - -func (m *Checkpoint) Reset() { *m = Checkpoint{} } -func (m *Checkpoint) String() string { return proto.CompactTextString(m) } -func (*Checkpoint) ProtoMessage() {} -func (*Checkpoint) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } - -type ListCheckpointResponse struct { - Checkpoints []*Checkpoint `protobuf:"bytes,1,rep,name=checkpoints" json:"checkpoints,omitempty"` -} - -func (m *ListCheckpointResponse) Reset() { *m = ListCheckpointResponse{} } -func (m *ListCheckpointResponse) String() string { return proto.CompactTextString(m) } -func (*ListCheckpointResponse) ProtoMessage() {} -func (*ListCheckpointResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } - -func (m *ListCheckpointResponse) GetCheckpoints() []*Checkpoint { - if m != nil { - return m.Checkpoints - } - return nil -} - -type StateRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` -} - -func (m *StateRequest) Reset() { *m = StateRequest{} } -func (m *StateRequest) String() string { return proto.CompactTextString(m) } -func (*StateRequest) ProtoMessage() {} -func (*StateRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } - -type ContainerState struct { - Status string `protobuf:"bytes,1,opt,name=status" json:"status,omitempty"` -} - -func (m *ContainerState) Reset() { *m = ContainerState{} } -func (m *ContainerState) String() string { return proto.CompactTextString(m) } -func (*ContainerState) ProtoMessage() {} -func (*ContainerState) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{20} } - -type Process struct { - Pid string `protobuf:"bytes,1,opt,name=pid" json:"pid,omitempty"` - Terminal bool `protobuf:"varint,2,opt,name=terminal" json:"terminal,omitempty"` - User *User `protobuf:"bytes,3,opt,name=user" json:"user,omitempty"` - Args []string `protobuf:"bytes,4,rep,name=args" json:"args,omitempty"` - Env []string `protobuf:"bytes,5,rep,name=env" json:"env,omitempty"` - Cwd string `protobuf:"bytes,6,opt,name=cwd" json:"cwd,omitempty"` - SystemPid uint32 `protobuf:"varint,7,opt,name=systemPid" json:"systemPid,omitempty"` - Stdin string `protobuf:"bytes,8,opt,name=stdin" json:"stdin,omitempty"` - Stdout string `protobuf:"bytes,9,opt,name=stdout" json:"stdout,omitempty"` - Stderr string `protobuf:"bytes,10,opt,name=stderr" json:"stderr,omitempty"` - Capabilities []string `protobuf:"bytes,11,rep,name=capabilities" json:"capabilities,omitempty"` - ApparmorProfile string `protobuf:"bytes,12,opt,name=apparmorProfile" json:"apparmorProfile,omitempty"` - SelinuxLabel string `protobuf:"bytes,13,opt,name=selinuxLabel" json:"selinuxLabel,omitempty"` - NoNewPrivileges bool `protobuf:"varint,14,opt,name=noNewPrivileges" json:"noNewPrivileges,omitempty"` - Rlimits []*Rlimit `protobuf:"bytes,15,rep,name=rlimits" json:"rlimits,omitempty"` -} - -func (m *Process) Reset() { *m = Process{} } -func (m *Process) String() string { return proto.CompactTextString(m) } -func (*Process) ProtoMessage() {} -func (*Process) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{21} } - -func (m *Process) GetUser() *User { - if m != nil { - return m.User - } - return nil -} - -func (m *Process) GetRlimits() []*Rlimit { - if m != nil { - return m.Rlimits - } - return nil -} - -type Container struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - BundlePath string `protobuf:"bytes,2,opt,name=bundlePath" json:"bundlePath,omitempty"` - Processes []*Process `protobuf:"bytes,3,rep,name=processes" json:"processes,omitempty"` - Status string `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"` - Labels []string `protobuf:"bytes,5,rep,name=labels" json:"labels,omitempty"` - Pids []uint32 `protobuf:"varint,6,rep,name=pids" json:"pids,omitempty"` - Runtime string `protobuf:"bytes,7,opt,name=runtime" json:"runtime,omitempty"` -} - -func (m *Container) Reset() { *m = Container{} } -func (m *Container) String() string { return proto.CompactTextString(m) } -func (*Container) ProtoMessage() {} -func (*Container) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{22} } - -func (m *Container) GetProcesses() []*Process { - if m != nil { - return m.Processes - } - return nil -} - -// Machine is information about machine on which containerd is run -type Machine struct { - Cpus uint32 `protobuf:"varint,1,opt,name=cpus" json:"cpus,omitempty"` - Memory uint64 `protobuf:"varint,2,opt,name=memory" json:"memory,omitempty"` -} - -func (m *Machine) Reset() { *m = Machine{} } -func (m *Machine) String() string { return proto.CompactTextString(m) } -func (*Machine) ProtoMessage() {} -func (*Machine) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{23} } - -// StateResponse is information about containerd daemon -type StateResponse struct { - Containers []*Container `protobuf:"bytes,1,rep,name=containers" json:"containers,omitempty"` - Machine *Machine `protobuf:"bytes,2,opt,name=machine" json:"machine,omitempty"` -} - -func (m *StateResponse) Reset() { *m = StateResponse{} } -func (m *StateResponse) String() string { return proto.CompactTextString(m) } -func (*StateResponse) ProtoMessage() {} -func (*StateResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{24} } - -func (m *StateResponse) GetContainers() []*Container { - if m != nil { - return m.Containers - } - return nil -} - -func (m *StateResponse) GetMachine() *Machine { - if m != nil { - return m.Machine - } - return nil -} - -type UpdateContainerRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` - Pid string `protobuf:"bytes,2,opt,name=pid" json:"pid,omitempty"` - Status string `protobuf:"bytes,3,opt,name=status" json:"status,omitempty"` - Resources *UpdateResource `protobuf:"bytes,4,opt,name=resources" json:"resources,omitempty"` -} - -func (m *UpdateContainerRequest) Reset() { *m = UpdateContainerRequest{} } -func (m *UpdateContainerRequest) String() string { return proto.CompactTextString(m) } -func (*UpdateContainerRequest) ProtoMessage() {} -func (*UpdateContainerRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{25} } - -func (m *UpdateContainerRequest) GetResources() *UpdateResource { - if m != nil { - return m.Resources - } - return nil -} - -type UpdateResource struct { - BlkioWeight uint64 `protobuf:"varint,1,opt,name=blkioWeight" json:"blkioWeight,omitempty"` - CpuShares uint64 `protobuf:"varint,2,opt,name=cpuShares" json:"cpuShares,omitempty"` - CpuPeriod uint64 `protobuf:"varint,3,opt,name=cpuPeriod" json:"cpuPeriod,omitempty"` - CpuQuota uint64 `protobuf:"varint,4,opt,name=cpuQuota" json:"cpuQuota,omitempty"` - CpusetCpus string `protobuf:"bytes,5,opt,name=cpusetCpus" json:"cpusetCpus,omitempty"` - CpusetMems string `protobuf:"bytes,6,opt,name=cpusetMems" json:"cpusetMems,omitempty"` - MemoryLimit uint64 `protobuf:"varint,7,opt,name=memoryLimit" json:"memoryLimit,omitempty"` - MemorySwap uint64 `protobuf:"varint,8,opt,name=memorySwap" json:"memorySwap,omitempty"` - MemoryReservation uint64 `protobuf:"varint,9,opt,name=memoryReservation" json:"memoryReservation,omitempty"` - KernelMemoryLimit uint64 `protobuf:"varint,10,opt,name=kernelMemoryLimit" json:"kernelMemoryLimit,omitempty"` - KernelTCPMemoryLimit uint64 `protobuf:"varint,11,opt,name=kernelTCPMemoryLimit" json:"kernelTCPMemoryLimit,omitempty"` -} - -func (m *UpdateResource) Reset() { *m = UpdateResource{} } -func (m *UpdateResource) String() string { return proto.CompactTextString(m) } -func (*UpdateResource) ProtoMessage() {} -func (*UpdateResource) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{26} } - -type UpdateContainerResponse struct { -} - -func (m *UpdateContainerResponse) Reset() { *m = UpdateContainerResponse{} } -func (m *UpdateContainerResponse) String() string { return proto.CompactTextString(m) } -func (*UpdateContainerResponse) ProtoMessage() {} -func (*UpdateContainerResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{27} } - -type EventsRequest struct { - // Tag 1 is deprecated (old uint64 timestamp) - Timestamp *google_protobuf.Timestamp `protobuf:"bytes,2,opt,name=timestamp" json:"timestamp,omitempty"` - StoredOnly bool `protobuf:"varint,3,opt,name=storedOnly" json:"storedOnly,omitempty"` - Id string `protobuf:"bytes,4,opt,name=id" json:"id,omitempty"` -} - -func (m *EventsRequest) Reset() { *m = EventsRequest{} } -func (m *EventsRequest) String() string { return proto.CompactTextString(m) } -func (*EventsRequest) ProtoMessage() {} -func (*EventsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{28} } - -func (m *EventsRequest) GetTimestamp() *google_protobuf.Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -type Event struct { - Type string `protobuf:"bytes,1,opt,name=type" json:"type,omitempty"` - Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` - Status uint32 `protobuf:"varint,3,opt,name=status" json:"status,omitempty"` - Pid string `protobuf:"bytes,4,opt,name=pid" json:"pid,omitempty"` - // Tag 5 is deprecated (old uint64 timestamp) - Timestamp *google_protobuf.Timestamp `protobuf:"bytes,6,opt,name=timestamp" json:"timestamp,omitempty"` -} - -func (m *Event) Reset() { *m = Event{} } -func (m *Event) String() string { return proto.CompactTextString(m) } -func (*Event) ProtoMessage() {} -func (*Event) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{29} } - -func (m *Event) GetTimestamp() *google_protobuf.Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -type NetworkStats struct { - Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - RxBytes uint64 `protobuf:"varint,2,opt,name=rx_bytes,json=rxBytes" json:"rx_bytes,omitempty"` - Rx_Packets uint64 `protobuf:"varint,3,opt,name=rx_Packets,json=rxPackets" json:"rx_Packets,omitempty"` - RxErrors uint64 `protobuf:"varint,4,opt,name=Rx_errors,json=rxErrors" json:"Rx_errors,omitempty"` - RxDropped uint64 `protobuf:"varint,5,opt,name=Rx_dropped,json=rxDropped" json:"Rx_dropped,omitempty"` - TxBytes uint64 `protobuf:"varint,6,opt,name=Tx_bytes,json=txBytes" json:"Tx_bytes,omitempty"` - TxPackets uint64 `protobuf:"varint,7,opt,name=Tx_packets,json=txPackets" json:"Tx_packets,omitempty"` - TxErrors uint64 `protobuf:"varint,8,opt,name=Tx_errors,json=txErrors" json:"Tx_errors,omitempty"` - TxDropped uint64 `protobuf:"varint,9,opt,name=Tx_dropped,json=txDropped" json:"Tx_dropped,omitempty"` -} - -func (m *NetworkStats) Reset() { *m = NetworkStats{} } -func (m *NetworkStats) String() string { return proto.CompactTextString(m) } -func (*NetworkStats) ProtoMessage() {} -func (*NetworkStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{30} } - -type CpuUsage struct { - TotalUsage uint64 `protobuf:"varint,1,opt,name=total_usage,json=totalUsage" json:"total_usage,omitempty"` - PercpuUsage []uint64 `protobuf:"varint,2,rep,name=percpu_usage,json=percpuUsage" json:"percpu_usage,omitempty"` - UsageInKernelmode uint64 `protobuf:"varint,3,opt,name=usage_in_kernelmode,json=usageInKernelmode" json:"usage_in_kernelmode,omitempty"` - UsageInUsermode uint64 `protobuf:"varint,4,opt,name=usage_in_usermode,json=usageInUsermode" json:"usage_in_usermode,omitempty"` -} - -func (m *CpuUsage) Reset() { *m = CpuUsage{} } -func (m *CpuUsage) String() string { return proto.CompactTextString(m) } -func (*CpuUsage) ProtoMessage() {} -func (*CpuUsage) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{31} } - -type ThrottlingData struct { - Periods uint64 `protobuf:"varint,1,opt,name=periods" json:"periods,omitempty"` - ThrottledPeriods uint64 `protobuf:"varint,2,opt,name=throttled_periods,json=throttledPeriods" json:"throttled_periods,omitempty"` - ThrottledTime uint64 `protobuf:"varint,3,opt,name=throttled_time,json=throttledTime" json:"throttled_time,omitempty"` -} - -func (m *ThrottlingData) Reset() { *m = ThrottlingData{} } -func (m *ThrottlingData) String() string { return proto.CompactTextString(m) } -func (*ThrottlingData) ProtoMessage() {} -func (*ThrottlingData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{32} } - -type CpuStats struct { - CpuUsage *CpuUsage `protobuf:"bytes,1,opt,name=cpu_usage,json=cpuUsage" json:"cpu_usage,omitempty"` - ThrottlingData *ThrottlingData `protobuf:"bytes,2,opt,name=throttling_data,json=throttlingData" json:"throttling_data,omitempty"` - SystemUsage uint64 `protobuf:"varint,3,opt,name=system_usage,json=systemUsage" json:"system_usage,omitempty"` -} - -func (m *CpuStats) Reset() { *m = CpuStats{} } -func (m *CpuStats) String() string { return proto.CompactTextString(m) } -func (*CpuStats) ProtoMessage() {} -func (*CpuStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{33} } - -func (m *CpuStats) GetCpuUsage() *CpuUsage { - if m != nil { - return m.CpuUsage - } - return nil -} - -func (m *CpuStats) GetThrottlingData() *ThrottlingData { - if m != nil { - return m.ThrottlingData - } - return nil -} - -type PidsStats struct { - Current uint64 `protobuf:"varint,1,opt,name=current" json:"current,omitempty"` - Limit uint64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"` -} - -func (m *PidsStats) Reset() { *m = PidsStats{} } -func (m *PidsStats) String() string { return proto.CompactTextString(m) } -func (*PidsStats) ProtoMessage() {} -func (*PidsStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{34} } - -type MemoryData struct { - Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"` - MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage,json=maxUsage" json:"max_usage,omitempty"` - Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"` - Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` -} - -func (m *MemoryData) Reset() { *m = MemoryData{} } -func (m *MemoryData) String() string { return proto.CompactTextString(m) } -func (*MemoryData) ProtoMessage() {} -func (*MemoryData) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{35} } - -type MemoryStats struct { - Cache uint64 `protobuf:"varint,1,opt,name=cache" json:"cache,omitempty"` - Usage *MemoryData `protobuf:"bytes,2,opt,name=usage" json:"usage,omitempty"` - SwapUsage *MemoryData `protobuf:"bytes,3,opt,name=swap_usage,json=swapUsage" json:"swap_usage,omitempty"` - KernelUsage *MemoryData `protobuf:"bytes,4,opt,name=kernel_usage,json=kernelUsage" json:"kernel_usage,omitempty"` - Stats map[string]uint64 `protobuf:"bytes,5,rep,name=stats" json:"stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` -} - -func (m *MemoryStats) Reset() { *m = MemoryStats{} } -func (m *MemoryStats) String() string { return proto.CompactTextString(m) } -func (*MemoryStats) ProtoMessage() {} -func (*MemoryStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{36} } - -func (m *MemoryStats) GetUsage() *MemoryData { - if m != nil { - return m.Usage - } - return nil -} - -func (m *MemoryStats) GetSwapUsage() *MemoryData { - if m != nil { - return m.SwapUsage - } - return nil -} - -func (m *MemoryStats) GetKernelUsage() *MemoryData { - if m != nil { - return m.KernelUsage - } - return nil -} - -func (m *MemoryStats) GetStats() map[string]uint64 { - if m != nil { - return m.Stats - } - return nil -} - -type BlkioStatsEntry struct { - Major uint64 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` - Minor uint64 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` - Op string `protobuf:"bytes,3,opt,name=op" json:"op,omitempty"` - Value uint64 `protobuf:"varint,4,opt,name=value" json:"value,omitempty"` -} - -func (m *BlkioStatsEntry) Reset() { *m = BlkioStatsEntry{} } -func (m *BlkioStatsEntry) String() string { return proto.CompactTextString(m) } -func (*BlkioStatsEntry) ProtoMessage() {} -func (*BlkioStatsEntry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{37} } - -type BlkioStats struct { - IoServiceBytesRecursive []*BlkioStatsEntry `protobuf:"bytes,1,rep,name=io_service_bytes_recursive,json=ioServiceBytesRecursive" json:"io_service_bytes_recursive,omitempty"` - IoServicedRecursive []*BlkioStatsEntry `protobuf:"bytes,2,rep,name=io_serviced_recursive,json=ioServicedRecursive" json:"io_serviced_recursive,omitempty"` - IoQueuedRecursive []*BlkioStatsEntry `protobuf:"bytes,3,rep,name=io_queued_recursive,json=ioQueuedRecursive" json:"io_queued_recursive,omitempty"` - IoServiceTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,4,rep,name=io_service_time_recursive,json=ioServiceTimeRecursive" json:"io_service_time_recursive,omitempty"` - IoWaitTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,5,rep,name=io_wait_time_recursive,json=ioWaitTimeRecursive" json:"io_wait_time_recursive,omitempty"` - IoMergedRecursive []*BlkioStatsEntry `protobuf:"bytes,6,rep,name=io_merged_recursive,json=ioMergedRecursive" json:"io_merged_recursive,omitempty"` - IoTimeRecursive []*BlkioStatsEntry `protobuf:"bytes,7,rep,name=io_time_recursive,json=ioTimeRecursive" json:"io_time_recursive,omitempty"` - SectorsRecursive []*BlkioStatsEntry `protobuf:"bytes,8,rep,name=sectors_recursive,json=sectorsRecursive" json:"sectors_recursive,omitempty"` -} - -func (m *BlkioStats) Reset() { *m = BlkioStats{} } -func (m *BlkioStats) String() string { return proto.CompactTextString(m) } -func (*BlkioStats) ProtoMessage() {} -func (*BlkioStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{38} } - -func (m *BlkioStats) GetIoServiceBytesRecursive() []*BlkioStatsEntry { - if m != nil { - return m.IoServiceBytesRecursive - } - return nil -} - -func (m *BlkioStats) GetIoServicedRecursive() []*BlkioStatsEntry { - if m != nil { - return m.IoServicedRecursive - } - return nil -} - -func (m *BlkioStats) GetIoQueuedRecursive() []*BlkioStatsEntry { - if m != nil { - return m.IoQueuedRecursive - } - return nil -} - -func (m *BlkioStats) GetIoServiceTimeRecursive() []*BlkioStatsEntry { - if m != nil { - return m.IoServiceTimeRecursive - } - return nil -} - -func (m *BlkioStats) GetIoWaitTimeRecursive() []*BlkioStatsEntry { - if m != nil { - return m.IoWaitTimeRecursive - } - return nil -} - -func (m *BlkioStats) GetIoMergedRecursive() []*BlkioStatsEntry { - if m != nil { - return m.IoMergedRecursive - } - return nil -} - -func (m *BlkioStats) GetIoTimeRecursive() []*BlkioStatsEntry { - if m != nil { - return m.IoTimeRecursive - } - return nil -} - -func (m *BlkioStats) GetSectorsRecursive() []*BlkioStatsEntry { - if m != nil { - return m.SectorsRecursive - } - return nil -} - -type HugetlbStats struct { - Usage uint64 `protobuf:"varint,1,opt,name=usage" json:"usage,omitempty"` - MaxUsage uint64 `protobuf:"varint,2,opt,name=max_usage,json=maxUsage" json:"max_usage,omitempty"` - Failcnt uint64 `protobuf:"varint,3,opt,name=failcnt" json:"failcnt,omitempty"` - Limit uint64 `protobuf:"varint,4,opt,name=limit" json:"limit,omitempty"` -} - -func (m *HugetlbStats) Reset() { *m = HugetlbStats{} } -func (m *HugetlbStats) String() string { return proto.CompactTextString(m) } -func (*HugetlbStats) ProtoMessage() {} -func (*HugetlbStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{39} } - -type CgroupStats struct { - CpuStats *CpuStats `protobuf:"bytes,1,opt,name=cpu_stats,json=cpuStats" json:"cpu_stats,omitempty"` - MemoryStats *MemoryStats `protobuf:"bytes,2,opt,name=memory_stats,json=memoryStats" json:"memory_stats,omitempty"` - BlkioStats *BlkioStats `protobuf:"bytes,3,opt,name=blkio_stats,json=blkioStats" json:"blkio_stats,omitempty"` - HugetlbStats map[string]*HugetlbStats `protobuf:"bytes,4,rep,name=hugetlb_stats,json=hugetlbStats" json:"hugetlb_stats,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - PidsStats *PidsStats `protobuf:"bytes,5,opt,name=pids_stats,json=pidsStats" json:"pids_stats,omitempty"` -} - -func (m *CgroupStats) Reset() { *m = CgroupStats{} } -func (m *CgroupStats) String() string { return proto.CompactTextString(m) } -func (*CgroupStats) ProtoMessage() {} -func (*CgroupStats) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{40} } - -func (m *CgroupStats) GetCpuStats() *CpuStats { - if m != nil { - return m.CpuStats - } - return nil -} - -func (m *CgroupStats) GetMemoryStats() *MemoryStats { - if m != nil { - return m.MemoryStats - } - return nil -} - -func (m *CgroupStats) GetBlkioStats() *BlkioStats { - if m != nil { - return m.BlkioStats - } - return nil -} - -func (m *CgroupStats) GetHugetlbStats() map[string]*HugetlbStats { - if m != nil { - return m.HugetlbStats - } - return nil -} - -func (m *CgroupStats) GetPidsStats() *PidsStats { - if m != nil { - return m.PidsStats - } - return nil -} - -type StatsResponse struct { - NetworkStats []*NetworkStats `protobuf:"bytes,1,rep,name=network_stats,json=networkStats" json:"network_stats,omitempty"` - CgroupStats *CgroupStats `protobuf:"bytes,2,opt,name=cgroup_stats,json=cgroupStats" json:"cgroup_stats,omitempty"` - // Tag 3 is deprecated (old uint64 timestamp) - Timestamp *google_protobuf.Timestamp `protobuf:"bytes,4,opt,name=timestamp" json:"timestamp,omitempty"` -} - -func (m *StatsResponse) Reset() { *m = StatsResponse{} } -func (m *StatsResponse) String() string { return proto.CompactTextString(m) } -func (*StatsResponse) ProtoMessage() {} -func (*StatsResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{41} } - -func (m *StatsResponse) GetNetworkStats() []*NetworkStats { - if m != nil { - return m.NetworkStats - } - return nil -} - -func (m *StatsResponse) GetCgroupStats() *CgroupStats { - if m != nil { - return m.CgroupStats - } - return nil -} - -func (m *StatsResponse) GetTimestamp() *google_protobuf.Timestamp { - if m != nil { - return m.Timestamp - } - return nil -} - -type StatsRequest struct { - Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` -} - -func (m *StatsRequest) Reset() { *m = StatsRequest{} } -func (m *StatsRequest) String() string { return proto.CompactTextString(m) } -func (*StatsRequest) ProtoMessage() {} -func (*StatsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{42} } - -func init() { - proto.RegisterType((*GetServerVersionRequest)(nil), "types.GetServerVersionRequest") - proto.RegisterType((*GetServerVersionResponse)(nil), "types.GetServerVersionResponse") - proto.RegisterType((*UpdateProcessRequest)(nil), "types.UpdateProcessRequest") - proto.RegisterType((*UpdateProcessResponse)(nil), "types.UpdateProcessResponse") - proto.RegisterType((*CreateContainerRequest)(nil), "types.CreateContainerRequest") - proto.RegisterType((*CreateContainerResponse)(nil), "types.CreateContainerResponse") - proto.RegisterType((*SignalRequest)(nil), "types.SignalRequest") - proto.RegisterType((*SignalResponse)(nil), "types.SignalResponse") - proto.RegisterType((*AddProcessRequest)(nil), "types.AddProcessRequest") - proto.RegisterType((*Rlimit)(nil), "types.Rlimit") - proto.RegisterType((*User)(nil), "types.User") - proto.RegisterType((*AddProcessResponse)(nil), "types.AddProcessResponse") - proto.RegisterType((*CreateCheckpointRequest)(nil), "types.CreateCheckpointRequest") - proto.RegisterType((*CreateCheckpointResponse)(nil), "types.CreateCheckpointResponse") - proto.RegisterType((*DeleteCheckpointRequest)(nil), "types.DeleteCheckpointRequest") - proto.RegisterType((*DeleteCheckpointResponse)(nil), "types.DeleteCheckpointResponse") - proto.RegisterType((*ListCheckpointRequest)(nil), "types.ListCheckpointRequest") - proto.RegisterType((*Checkpoint)(nil), "types.Checkpoint") - proto.RegisterType((*ListCheckpointResponse)(nil), "types.ListCheckpointResponse") - proto.RegisterType((*StateRequest)(nil), "types.StateRequest") - proto.RegisterType((*ContainerState)(nil), "types.ContainerState") - proto.RegisterType((*Process)(nil), "types.Process") - proto.RegisterType((*Container)(nil), "types.Container") - proto.RegisterType((*Machine)(nil), "types.Machine") - proto.RegisterType((*StateResponse)(nil), "types.StateResponse") - proto.RegisterType((*UpdateContainerRequest)(nil), "types.UpdateContainerRequest") - proto.RegisterType((*UpdateResource)(nil), "types.UpdateResource") - proto.RegisterType((*UpdateContainerResponse)(nil), "types.UpdateContainerResponse") - proto.RegisterType((*EventsRequest)(nil), "types.EventsRequest") - proto.RegisterType((*Event)(nil), "types.Event") - proto.RegisterType((*NetworkStats)(nil), "types.NetworkStats") - proto.RegisterType((*CpuUsage)(nil), "types.CpuUsage") - proto.RegisterType((*ThrottlingData)(nil), "types.ThrottlingData") - proto.RegisterType((*CpuStats)(nil), "types.CpuStats") - proto.RegisterType((*PidsStats)(nil), "types.PidsStats") - proto.RegisterType((*MemoryData)(nil), "types.MemoryData") - proto.RegisterType((*MemoryStats)(nil), "types.MemoryStats") - proto.RegisterType((*BlkioStatsEntry)(nil), "types.BlkioStatsEntry") - proto.RegisterType((*BlkioStats)(nil), "types.BlkioStats") - proto.RegisterType((*HugetlbStats)(nil), "types.HugetlbStats") - proto.RegisterType((*CgroupStats)(nil), "types.CgroupStats") - proto.RegisterType((*StatsResponse)(nil), "types.StatsResponse") - proto.RegisterType((*StatsRequest)(nil), "types.StatsRequest") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion2 - -// Client API for API service - -type APIClient interface { - GetServerVersion(ctx context.Context, in *GetServerVersionRequest, opts ...grpc.CallOption) (*GetServerVersionResponse, error) - CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) - UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) - Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error) - UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error) - AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) - CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error) - DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error) - ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error) - State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) - Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error) - Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) -} - -type aPIClient struct { - cc *grpc.ClientConn -} - -func NewAPIClient(cc *grpc.ClientConn) APIClient { - return &aPIClient{cc} -} - -func (c *aPIClient) GetServerVersion(ctx context.Context, in *GetServerVersionRequest, opts ...grpc.CallOption) (*GetServerVersionResponse, error) { - out := new(GetServerVersionResponse) - err := grpc.Invoke(ctx, "/types.API/GetServerVersion", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) CreateContainer(ctx context.Context, in *CreateContainerRequest, opts ...grpc.CallOption) (*CreateContainerResponse, error) { - out := new(CreateContainerResponse) - err := grpc.Invoke(ctx, "/types.API/CreateContainer", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) UpdateContainer(ctx context.Context, in *UpdateContainerRequest, opts ...grpc.CallOption) (*UpdateContainerResponse, error) { - out := new(UpdateContainerResponse) - err := grpc.Invoke(ctx, "/types.API/UpdateContainer", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) Signal(ctx context.Context, in *SignalRequest, opts ...grpc.CallOption) (*SignalResponse, error) { - out := new(SignalResponse) - err := grpc.Invoke(ctx, "/types.API/Signal", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) UpdateProcess(ctx context.Context, in *UpdateProcessRequest, opts ...grpc.CallOption) (*UpdateProcessResponse, error) { - out := new(UpdateProcessResponse) - err := grpc.Invoke(ctx, "/types.API/UpdateProcess", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) AddProcess(ctx context.Context, in *AddProcessRequest, opts ...grpc.CallOption) (*AddProcessResponse, error) { - out := new(AddProcessResponse) - err := grpc.Invoke(ctx, "/types.API/AddProcess", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) CreateCheckpoint(ctx context.Context, in *CreateCheckpointRequest, opts ...grpc.CallOption) (*CreateCheckpointResponse, error) { - out := new(CreateCheckpointResponse) - err := grpc.Invoke(ctx, "/types.API/CreateCheckpoint", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) DeleteCheckpoint(ctx context.Context, in *DeleteCheckpointRequest, opts ...grpc.CallOption) (*DeleteCheckpointResponse, error) { - out := new(DeleteCheckpointResponse) - err := grpc.Invoke(ctx, "/types.API/DeleteCheckpoint", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) ListCheckpoint(ctx context.Context, in *ListCheckpointRequest, opts ...grpc.CallOption) (*ListCheckpointResponse, error) { - out := new(ListCheckpointResponse) - err := grpc.Invoke(ctx, "/types.API/ListCheckpoint", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) State(ctx context.Context, in *StateRequest, opts ...grpc.CallOption) (*StateResponse, error) { - out := new(StateResponse) - err := grpc.Invoke(ctx, "/types.API/State", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *aPIClient) Events(ctx context.Context, in *EventsRequest, opts ...grpc.CallOption) (API_EventsClient, error) { - stream, err := grpc.NewClientStream(ctx, &_API_serviceDesc.Streams[0], c.cc, "/types.API/Events", opts...) - if err != nil { - return nil, err - } - x := &aPIEventsClient{stream} - if err := x.ClientStream.SendMsg(in); err != nil { - return nil, err - } - if err := x.ClientStream.CloseSend(); err != nil { - return nil, err - } - return x, nil -} - -type API_EventsClient interface { - Recv() (*Event, error) - grpc.ClientStream -} - -type aPIEventsClient struct { - grpc.ClientStream -} - -func (x *aPIEventsClient) Recv() (*Event, error) { - m := new(Event) - if err := x.ClientStream.RecvMsg(m); err != nil { - return nil, err - } - return m, nil -} - -func (c *aPIClient) Stats(ctx context.Context, in *StatsRequest, opts ...grpc.CallOption) (*StatsResponse, error) { - out := new(StatsResponse) - err := grpc.Invoke(ctx, "/types.API/Stats", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for API service - -type APIServer interface { - GetServerVersion(context.Context, *GetServerVersionRequest) (*GetServerVersionResponse, error) - CreateContainer(context.Context, *CreateContainerRequest) (*CreateContainerResponse, error) - UpdateContainer(context.Context, *UpdateContainerRequest) (*UpdateContainerResponse, error) - Signal(context.Context, *SignalRequest) (*SignalResponse, error) - UpdateProcess(context.Context, *UpdateProcessRequest) (*UpdateProcessResponse, error) - AddProcess(context.Context, *AddProcessRequest) (*AddProcessResponse, error) - CreateCheckpoint(context.Context, *CreateCheckpointRequest) (*CreateCheckpointResponse, error) - DeleteCheckpoint(context.Context, *DeleteCheckpointRequest) (*DeleteCheckpointResponse, error) - ListCheckpoint(context.Context, *ListCheckpointRequest) (*ListCheckpointResponse, error) - State(context.Context, *StateRequest) (*StateResponse, error) - Events(*EventsRequest, API_EventsServer) error - Stats(context.Context, *StatsRequest) (*StatsResponse, error) -} - -func RegisterAPIServer(s *grpc.Server, srv APIServer) { - s.RegisterService(&_API_serviceDesc, srv) -} - -func _API_GetServerVersion_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(GetServerVersionRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).GetServerVersion(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/GetServerVersion", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).GetServerVersion(ctx, req.(*GetServerVersionRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_CreateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).CreateContainer(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/CreateContainer", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).CreateContainer(ctx, req.(*CreateContainerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_UpdateContainer_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateContainerRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).UpdateContainer(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/UpdateContainer", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).UpdateContainer(ctx, req.(*UpdateContainerRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_Signal_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(SignalRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).Signal(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/Signal", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).Signal(ctx, req.(*SignalRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_UpdateProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(UpdateProcessRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).UpdateProcess(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/UpdateProcess", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).UpdateProcess(ctx, req.(*UpdateProcessRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_AddProcess_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(AddProcessRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).AddProcess(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/AddProcess", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).AddProcess(ctx, req.(*AddProcessRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_CreateCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(CreateCheckpointRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).CreateCheckpoint(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/CreateCheckpoint", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).CreateCheckpoint(ctx, req.(*CreateCheckpointRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_DeleteCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(DeleteCheckpointRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).DeleteCheckpoint(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/DeleteCheckpoint", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).DeleteCheckpoint(ctx, req.(*DeleteCheckpointRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_ListCheckpoint_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ListCheckpointRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).ListCheckpoint(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/ListCheckpoint", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).ListCheckpoint(ctx, req.(*ListCheckpointRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_State_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StateRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).State(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/State", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).State(ctx, req.(*StateRequest)) - } - return interceptor(ctx, in, info, handler) -} - -func _API_Events_Handler(srv interface{}, stream grpc.ServerStream) error { - m := new(EventsRequest) - if err := stream.RecvMsg(m); err != nil { - return err - } - return srv.(APIServer).Events(m, &aPIEventsServer{stream}) -} - -type API_EventsServer interface { - Send(*Event) error - grpc.ServerStream -} - -type aPIEventsServer struct { - grpc.ServerStream -} - -func (x *aPIEventsServer) Send(m *Event) error { - return x.ServerStream.SendMsg(m) -} - -func _API_Stats_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(StatsRequest) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(APIServer).Stats(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/types.API/Stats", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(APIServer).Stats(ctx, req.(*StatsRequest)) - } - return interceptor(ctx, in, info, handler) -} - -var _API_serviceDesc = grpc.ServiceDesc{ - ServiceName: "types.API", - HandlerType: (*APIServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "GetServerVersion", - Handler: _API_GetServerVersion_Handler, - }, - { - MethodName: "CreateContainer", - Handler: _API_CreateContainer_Handler, - }, - { - MethodName: "UpdateContainer", - Handler: _API_UpdateContainer_Handler, - }, - { - MethodName: "Signal", - Handler: _API_Signal_Handler, - }, - { - MethodName: "UpdateProcess", - Handler: _API_UpdateProcess_Handler, - }, - { - MethodName: "AddProcess", - Handler: _API_AddProcess_Handler, - }, - { - MethodName: "CreateCheckpoint", - Handler: _API_CreateCheckpoint_Handler, - }, - { - MethodName: "DeleteCheckpoint", - Handler: _API_DeleteCheckpoint_Handler, - }, - { - MethodName: "ListCheckpoint", - Handler: _API_ListCheckpoint_Handler, - }, - { - MethodName: "State", - Handler: _API_State_Handler, - }, - { - MethodName: "Stats", - Handler: _API_Stats_Handler, - }, - }, - Streams: []grpc.StreamDesc{ - { - StreamName: "Events", - Handler: _API_Events_Handler, - ServerStreams: true, - }, - }, -} - -var fileDescriptor0 = []byte{ - // 2414 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xec, 0x59, 0xcd, 0x73, 0x1b, 0x4b, - 0x11, 0x8f, 0xa4, 0xb5, 0x6c, 0xb5, 0x3e, 0x6c, 0x6f, 0xfc, 0xa1, 0xe8, 0xbd, 0x7c, 0xb0, 0xf5, - 0x80, 0x00, 0xaf, 0x94, 0xa0, 0xbc, 0x57, 0xa4, 0xa0, 0x8a, 0xaa, 0xc4, 0x0e, 0x8f, 0xf0, 0xe2, - 0x3c, 0x65, 0x6d, 0xf3, 0x8e, 0xaa, 0xb5, 0x76, 0x22, 0x2d, 0x5e, 0xed, 0x6e, 0x76, 0x47, 0xb6, - 0x7c, 0xe1, 0xc0, 0x01, 0x6e, 0x70, 0xa5, 0x0a, 0x6e, 0xdc, 0xb8, 0x73, 0x80, 0x7f, 0x80, 0x2a, - 0xfe, 0x10, 0x6e, 0xdc, 0x39, 0xd2, 0xf3, 0xb9, 0xb3, 0xfa, 0xb0, 0x93, 0x03, 0xc5, 0x85, 0x8b, - 0x6a, 0xfa, 0x37, 0x3d, 0xdd, 0x3d, 0x3d, 0xdd, 0x3d, 0xbd, 0x23, 0xa8, 0x79, 0x49, 0xd0, 0x4d, - 0xd2, 0x98, 0xc6, 0xf6, 0x1a, 0xbd, 0x4a, 0x48, 0xd6, 0xb9, 0x3f, 0x8a, 0xe3, 0x51, 0x48, 0x1e, - 0x71, 0xf0, 0x6c, 0xfa, 0xf6, 0x11, 0x0d, 0x26, 0x24, 0xa3, 0xde, 0x24, 0x11, 0x7c, 0xce, 0x1d, - 0xd8, 0xff, 0x82, 0xd0, 0x63, 0x92, 0x5e, 0x90, 0xf4, 0xe7, 0x24, 0xcd, 0x82, 0x38, 0x72, 0xc9, - 0xbb, 0x29, 0xf2, 0x38, 0x33, 0x68, 0x2f, 0x4e, 0x65, 0x49, 0x1c, 0x65, 0xc4, 0xde, 0x81, 0xb5, - 0x89, 0xf7, 0x8b, 0x38, 0x6d, 0x97, 0x1e, 0x94, 0x1e, 0x36, 0x5d, 0x41, 0x70, 0x34, 0x88, 0x10, - 0x2d, 0x4b, 0x94, 0x11, 0x0c, 0x4d, 0x3c, 0x3a, 0x1c, 0xb7, 0x2b, 0x02, 0xe5, 0x84, 0xdd, 0x81, - 0x8d, 0x94, 0x5c, 0x04, 0x4c, 0x6a, 0xdb, 0xc2, 0x89, 0x9a, 0xab, 0x69, 0xe7, 0xd7, 0x25, 0xd8, - 0x39, 0x4d, 0x7c, 0x8f, 0x92, 0x7e, 0x1a, 0x0f, 0x49, 0x96, 0x49, 0x93, 0xec, 0x16, 0x94, 0x03, - 0x9f, 0xeb, 0xac, 0xb9, 0x38, 0xb2, 0xb7, 0xa0, 0x92, 0x20, 0x50, 0xe6, 0x00, 0x1b, 0xda, 0xf7, - 0x00, 0x86, 0x61, 0x9c, 0x91, 0x63, 0xea, 0x07, 0x11, 0xd7, 0xb8, 0xe1, 0x1a, 0x08, 0x33, 0xe6, - 0x32, 0xf0, 0xe9, 0x98, 0xeb, 0x44, 0x63, 0x38, 0x61, 0xef, 0x41, 0x75, 0x4c, 0x82, 0xd1, 0x98, - 0xb6, 0xd7, 0x38, 0x2c, 0x29, 0x67, 0x1f, 0x76, 0xe7, 0xec, 0x10, 0xfb, 0x77, 0xfe, 0x51, 0x86, - 0xbd, 0x83, 0x94, 0xe0, 0xcc, 0x41, 0x1c, 0x51, 0x2f, 0x88, 0x48, 0xba, 0xca, 0x46, 0xb4, 0xe8, - 0x6c, 0x1a, 0xf9, 0x21, 0xe9, 0x7b, 0xa8, 0x56, 0x98, 0x6a, 0x20, 0xdc, 0xe2, 0x31, 0x19, 0x9e, - 0x27, 0x71, 0x10, 0x51, 0x6e, 0x31, 0xce, 0xe7, 0x08, 0xb3, 0x38, 0xe3, 0x9b, 0x11, 0x5e, 0x12, - 0x04, 0xb3, 0x18, 0x07, 0xf1, 0x54, 0x58, 0x5c, 0x73, 0x25, 0x25, 0x71, 0x92, 0xa6, 0xed, 0xaa, - 0xc6, 0x91, 0x62, 0x78, 0xe8, 0x9d, 0x91, 0x30, 0x6b, 0xaf, 0x3f, 0xa8, 0x30, 0x5c, 0x50, 0xf6, - 0x03, 0xa8, 0x47, 0x71, 0x3f, 0xb8, 0x88, 0xa9, 0x1b, 0xc7, 0xb4, 0xbd, 0xc1, 0x1d, 0x66, 0x42, - 0x76, 0x1b, 0xd6, 0xd3, 0x69, 0xc4, 0xe2, 0xa6, 0x5d, 0xe3, 0x22, 0x15, 0xc9, 0xd6, 0xca, 0xe1, - 0xb3, 0x74, 0x94, 0xb5, 0x81, 0x0b, 0x36, 0x21, 0xfb, 0x13, 0x68, 0xe6, 0x3b, 0x39, 0x0c, 0xd2, - 0x76, 0x9d, 0x4b, 0x28, 0x82, 0xce, 0x4b, 0xd8, 0x5f, 0xf0, 0xa5, 0x8c, 0xb3, 0x2e, 0xd4, 0x86, - 0x0a, 0xe4, 0x3e, 0xad, 0xf7, 0xb6, 0xba, 0x3c, 0xb4, 0xbb, 0x39, 0x73, 0xce, 0x82, 0xa2, 0x9a, - 0xc7, 0xc1, 0x28, 0xf2, 0xc2, 0xf7, 0x8f, 0x18, 0xe6, 0x31, 0xbe, 0x44, 0xc6, 0xa7, 0xa4, 0x9c, - 0x2d, 0x68, 0x29, 0x51, 0xf2, 0xd0, 0xff, 0x52, 0x81, 0xed, 0x67, 0xbe, 0x7f, 0x43, 0x4c, 0x62, - 0x60, 0x53, 0x92, 0x62, 0xe8, 0xa3, 0xc4, 0x32, 0x77, 0xa7, 0xa6, 0xed, 0xfb, 0x60, 0x4d, 0x33, - 0xdc, 0x49, 0x85, 0xef, 0xa4, 0x2e, 0x77, 0x72, 0x8a, 0x90, 0xcb, 0x27, 0x6c, 0x1b, 0x2c, 0x8f, - 0xf9, 0xd2, 0xe2, 0xbe, 0xe4, 0x63, 0x66, 0x32, 0x89, 0x2e, 0xf0, 0x9c, 0x19, 0xc4, 0x86, 0x0c, - 0x19, 0x5e, 0xfa, 0xf2, 0x84, 0xd9, 0x50, 0x6d, 0x6b, 0x3d, 0xdf, 0x96, 0x0e, 0x9b, 0x8d, 0xe5, - 0x61, 0x53, 0x5b, 0x11, 0x36, 0x50, 0x08, 0x1b, 0x07, 0x1a, 0x43, 0x2f, 0xf1, 0xce, 0x82, 0x30, - 0xa0, 0x01, 0xc9, 0xf0, 0xfc, 0x98, 0x11, 0x05, 0xcc, 0x7e, 0x08, 0x9b, 0x5e, 0x92, 0x78, 0xe9, - 0x24, 0x4e, 0xd1, 0x35, 0x6f, 0x83, 0x90, 0xb4, 0x1b, 0x5c, 0xc8, 0x3c, 0xcc, 0xa4, 0x65, 0x24, - 0x0c, 0xa2, 0xe9, 0xec, 0x15, 0x8b, 0xbe, 0x76, 0x93, 0xb3, 0x15, 0x30, 0x26, 0x2d, 0x8a, 0x5f, - 0x93, 0xcb, 0x7e, 0x1a, 0x5c, 0xe0, 0x9a, 0x11, 0x2a, 0x6d, 0x71, 0x2f, 0xce, 0xc3, 0xf6, 0xb7, - 0x31, 0x30, 0xc3, 0x60, 0x12, 0xd0, 0xac, 0xbd, 0x89, 0x66, 0xd5, 0x7b, 0x4d, 0xe9, 0x4f, 0x97, - 0xa3, 0xae, 0x9a, 0x75, 0x0e, 0xa1, 0x2a, 0x20, 0xe6, 0x5e, 0xc6, 0x22, 0x4f, 0x8b, 0x8f, 0x19, - 0x96, 0xc5, 0x6f, 0x29, 0x3f, 0x2b, 0xcb, 0xe5, 0x63, 0x86, 0x8d, 0xbd, 0xd4, 0xe7, 0xe7, 0x84, - 0x18, 0x1b, 0x3b, 0x2e, 0x58, 0xec, 0xa0, 0x98, 0xab, 0xa7, 0xf2, 0xc0, 0x9b, 0x2e, 0x1b, 0x32, - 0x64, 0x24, 0x63, 0x0a, 0x11, 0x1c, 0xda, 0xdf, 0x82, 0x96, 0xe7, 0xfb, 0xe8, 0x9e, 0x18, 0x4f, - 0xfd, 0x8b, 0xc0, 0xcf, 0x50, 0x52, 0x05, 0x27, 0xe7, 0x50, 0x67, 0x07, 0x6c, 0x33, 0xa0, 0x64, - 0x9c, 0xfd, 0xaa, 0xa4, 0x13, 0x42, 0xe7, 0xc9, 0xaa, 0x68, 0xfb, 0x7e, 0xa1, 0x7a, 0x94, 0x79, - 0x5c, 0x6d, 0xab, 0x0c, 0xc9, 0x57, 0x9b, 0x05, 0x65, 0x21, 0x29, 0x2b, 0xcb, 0x92, 0xb2, 0x03, - 0xed, 0x45, 0x1b, 0xa4, 0x81, 0x43, 0xd8, 0x3f, 0x24, 0x21, 0x79, 0x1f, 0xfb, 0xd0, 0x93, 0x91, - 0x87, 0xa5, 0x43, 0x24, 0x1c, 0x1f, 0xbf, 0xbf, 0x01, 0x8b, 0x4a, 0xa4, 0x01, 0x47, 0xb0, 0xfb, - 0x2a, 0xc8, 0xe8, 0xcd, 0xea, 0x17, 0x54, 0x95, 0x97, 0xa9, 0xfa, 0x7d, 0x09, 0x20, 0x97, 0xa5, - 0x6d, 0x2e, 0x19, 0x36, 0x23, 0x46, 0x66, 0x01, 0x95, 0x19, 0xcd, 0xc7, 0xec, 0xdc, 0xe9, 0x30, - 0x91, 0x97, 0x0c, 0x1b, 0xb2, 0x8a, 0x38, 0x8d, 0x82, 0xd9, 0x71, 0x3c, 0x3c, 0x27, 0x34, 0xe3, - 0x15, 0x1b, 0xab, 0xa9, 0x01, 0xf1, 0xb4, 0x1c, 0x93, 0x30, 0xe4, 0x65, 0x7b, 0xc3, 0x15, 0x04, - 0xab, 0xb1, 0x64, 0x92, 0xd0, 0xab, 0xd7, 0xc7, 0x98, 0xd4, 0x2c, 0xc3, 0x14, 0x89, 0x3b, 0xdd, - 0x9b, 0xdf, 0xa9, 0x2c, 0x8d, 0x4f, 0xa0, 0x9e, 0xef, 0x22, 0x43, 0x63, 0x2b, 0xcb, 0x8f, 0xde, - 0xe4, 0x72, 0xee, 0x41, 0xe3, 0x98, 0xe2, 0xa1, 0xae, 0xf0, 0x97, 0xf3, 0x10, 0x5a, 0xba, 0xae, - 0x72, 0x46, 0x51, 0x19, 0x3c, 0x3a, 0xcd, 0x24, 0x97, 0xa4, 0x9c, 0xbf, 0x56, 0x60, 0x5d, 0x06, - 0xae, 0xaa, 0x3e, 0xa5, 0xbc, 0xfa, 0xfc, 0x4f, 0x8a, 0xe0, 0xc7, 0x50, 0xcb, 0xae, 0x32, 0x4a, - 0x26, 0x7d, 0x59, 0x0a, 0x9b, 0x6e, 0x0e, 0xfc, 0xbf, 0x20, 0xe6, 0x05, 0xf1, 0xef, 0x25, 0xa8, - 0xe9, 0x63, 0xfe, 0xe0, 0x86, 0xe5, 0x53, 0xa8, 0x25, 0xe2, 0xe0, 0x89, 0xa8, 0x6b, 0xf5, 0x5e, - 0x4b, 0x2a, 0x52, 0x95, 0x2c, 0x67, 0x30, 0xe2, 0xc7, 0x32, 0xe3, 0xc7, 0x68, 0x48, 0xd6, 0x0a, - 0x0d, 0x09, 0x1e, 0x7e, 0xc2, 0x0a, 0x66, 0x95, 0x17, 0x4c, 0x3e, 0x36, 0x5b, 0x90, 0xf5, 0x42, - 0x0b, 0xe2, 0x7c, 0x0e, 0xeb, 0x47, 0xde, 0x70, 0x8c, 0xfb, 0x60, 0x0b, 0x87, 0x89, 0x0c, 0x53, - 0x5c, 0xc8, 0xc6, 0x4c, 0xc9, 0x84, 0xa0, 0xbf, 0xaf, 0x64, 0x75, 0x97, 0x94, 0x73, 0x8e, 0x6d, - 0x82, 0x48, 0x03, 0x99, 0x4c, 0x8f, 0xb1, 0x8c, 0x2a, 0x87, 0xa8, 0x5c, 0x5a, 0x6c, 0x34, 0x0c, - 0x1e, 0x3c, 0x96, 0xf5, 0x89, 0xd0, 0x2c, 0xab, 0xae, 0xf2, 0x81, 0xb4, 0xc7, 0x55, 0xd3, 0xce, - 0x6f, 0x4a, 0xb0, 0x27, 0xba, 0xc8, 0x1b, 0x7b, 0xc5, 0xe5, 0xdd, 0x89, 0x70, 0x5f, 0xa5, 0xe0, - 0xbe, 0x27, 0x50, 0x4b, 0x49, 0x16, 0x4f, 0x53, 0x74, 0x33, 0xf7, 0x6c, 0xbd, 0xb7, 0xab, 0x32, - 0x89, 0xeb, 0x72, 0xe5, 0xac, 0x9b, 0xf3, 0x39, 0x7f, 0xac, 0x40, 0xab, 0x38, 0xcb, 0x2a, 0xd6, - 0x59, 0x78, 0x1e, 0xc4, 0x5f, 0x8b, 0xf6, 0xb7, 0xc4, 0xdd, 0x64, 0x42, 0x2c, 0xab, 0xd0, 0x97, - 0xc7, 0x78, 0x07, 0xa2, 0x26, 0xe1, 0xc6, 0x1c, 0x90, 0xb3, 0x7d, 0x92, 0x06, 0xb1, 0xba, 0x2e, - 0x73, 0x80, 0x95, 0x01, 0x24, 0xde, 0x4c, 0x63, 0xea, 0x71, 0x23, 0x2d, 0x57, 0xd3, 0xbc, 0xef, - 0xc5, 0x33, 0x22, 0xf4, 0x80, 0x9d, 0xda, 0x9a, 0xec, 0x7b, 0x35, 0x92, 0xcf, 0x1f, 0x91, 0x49, - 0x26, 0xd3, 0xdc, 0x40, 0x98, 0xe5, 0xe2, 0x34, 0x5f, 0xb1, 0xa0, 0xe6, 0x81, 0x81, 0x96, 0x1b, - 0x10, 0x93, 0x20, 0xc8, 0xe3, 0x4b, 0x2f, 0xe1, 0x69, 0x6f, 0xb9, 0x06, 0x82, 0x81, 0xbc, 0x2d, - 0x28, 0xf4, 0x06, 0x7e, 0xe5, 0x78, 0xec, 0x62, 0xe6, 0x65, 0xc0, 0x72, 0x17, 0x27, 0x18, 0xf7, - 0x39, 0x49, 0x23, 0x12, 0x1e, 0x19, 0x5a, 0x41, 0x70, 0x2f, 0x4c, 0xd8, 0x3d, 0xd8, 0x11, 0xe0, - 0xc9, 0x41, 0xdf, 0x5c, 0x50, 0xe7, 0x0b, 0x96, 0xce, 0xb1, 0x6f, 0xb1, 0x85, 0x38, 0x91, 0x17, - 0xde, 0x15, 0x34, 0x5f, 0x5c, 0x10, 0xac, 0xe0, 0x2a, 0x72, 0x9e, 0x42, 0x4d, 0x7f, 0xca, 0xc9, - 0x00, 0xec, 0x74, 0xc5, 0xc7, 0x5e, 0x57, 0x7d, 0xec, 0x75, 0x4f, 0x14, 0x87, 0x9b, 0x33, 0x33, - 0xaf, 0x64, 0x34, 0x4e, 0x89, 0xff, 0x55, 0x14, 0x5e, 0xa9, 0x2f, 0xa4, 0x1c, 0x91, 0x31, 0x69, - 0xe9, 0x2b, 0xe1, 0x77, 0x25, 0x58, 0xe3, 0xba, 0x97, 0x76, 0x4f, 0x82, 0xbb, 0xac, 0x23, 0xb8, - 0x18, 0xaf, 0x4d, 0x1d, 0xaf, 0x32, 0xb2, 0xad, 0x3c, 0xb2, 0x0b, 0x3b, 0xa8, 0x7e, 0xc0, 0x0e, - 0x9c, 0xdf, 0x96, 0xa1, 0xf1, 0x9a, 0xd0, 0xcb, 0x38, 0x3d, 0x67, 0x59, 0x9c, 0x2d, 0xbd, 0xb0, - 0xef, 0xe0, 0xf7, 0xe5, 0x6c, 0x70, 0x76, 0x45, 0x75, 0xd4, 0xae, 0xa7, 0xb3, 0xe7, 0x8c, 0xb4, - 0xef, 0x02, 0xe0, 0x54, 0xdf, 0x13, 0x97, 0xb4, 0x0c, 0xda, 0x74, 0x26, 0x01, 0xfb, 0x23, 0xa8, - 0xb9, 0xb3, 0x01, 0x16, 0xfb, 0x38, 0xcd, 0x54, 0xd4, 0xa6, 0xb3, 0x17, 0x9c, 0x66, 0x6b, 0x71, - 0xd2, 0x4f, 0xe3, 0x24, 0x21, 0x3e, 0x8f, 0x5a, 0xbe, 0xf6, 0x50, 0x00, 0x4c, 0xeb, 0x89, 0xd2, - 0x5a, 0x15, 0x5a, 0x69, 0xae, 0x15, 0xa7, 0x12, 0xa9, 0x55, 0x84, 0x6b, 0x8d, 0x9a, 0x5a, 0x4f, - 0xb4, 0x56, 0x11, 0xab, 0x1b, 0xd4, 0xd0, 0x7a, 0x92, 0x6b, 0xad, 0xa9, 0xb5, 0x52, 0xab, 0xf3, - 0xe7, 0x12, 0x6c, 0x60, 0xce, 0x9c, 0x66, 0xde, 0x88, 0xe0, 0xf5, 0x5a, 0xa7, 0x98, 0x5f, 0xe1, - 0x60, 0xca, 0x48, 0x99, 0xd1, 0xc0, 0x21, 0xc1, 0xf0, 0x0d, 0x68, 0x24, 0x24, 0xc5, 0x4c, 0x92, - 0x1c, 0x65, 0xac, 0x76, 0x98, 0x39, 0x02, 0x13, 0x2c, 0x5d, 0xb8, 0xcd, 0xe7, 0x06, 0x41, 0x34, - 0x10, 0xa1, 0x3a, 0x89, 0x7d, 0x22, 0x5d, 0xb5, 0xcd, 0xa7, 0x5e, 0x46, 0x5f, 0xea, 0x09, 0xfb, - 0xbb, 0xb0, 0xad, 0xf9, 0xd9, 0x15, 0xce, 0xb9, 0x85, 0xeb, 0x36, 0x25, 0xf7, 0xa9, 0x84, 0x9d, - 0x5f, 0x42, 0xeb, 0x64, 0x8c, 0xe7, 0x4b, 0xf1, 0x8e, 0x1b, 0x1d, 0x7a, 0x58, 0x09, 0xb0, 0xbc, - 0x27, 0xbc, 0x5e, 0x64, 0xd2, 0x5a, 0x45, 0xda, 0xdf, 0x83, 0x6d, 0x2a, 0x78, 0x89, 0x3f, 0x50, - 0x3c, 0xe2, 0x34, 0xb7, 0xf4, 0x44, 0x5f, 0x32, 0x7f, 0x13, 0x5a, 0x39, 0x33, 0xbf, 0x2c, 0x84, - 0xbd, 0x4d, 0x8d, 0xb2, 0x68, 0x72, 0xfe, 0x20, 0x9c, 0x25, 0x22, 0xe7, 0x53, 0x5e, 0xbe, 0x0c, - 0x57, 0xd5, 0x7b, 0x9b, 0xaa, 0xec, 0x4b, 0x67, 0xf0, 0x92, 0x25, 0xdc, 0xf2, 0x63, 0xd8, 0xa4, - 0xda, 0xf4, 0x01, 0x66, 0xaa, 0x27, 0x53, 0x4f, 0x95, 0xde, 0xe2, 0xc6, 0xdc, 0x16, 0x2d, 0x6e, - 0x14, 0x3d, 0x2f, 0xfa, 0x11, 0xa9, 0x50, 0xd8, 0x57, 0x17, 0x18, 0x57, 0xe1, 0xfc, 0x08, 0x6a, - 0xd8, 0xac, 0x64, 0xc2, 0x3a, 0x74, 0xcc, 0x70, 0x9a, 0xa6, 0x98, 0x7b, 0xca, 0x31, 0x92, 0x64, - 0xcd, 0x0c, 0xbf, 0xcb, 0xa5, 0x33, 0x04, 0xe1, 0xc4, 0x00, 0xa2, 0x9e, 0x70, 0x6d, 0xc8, 0x63, - 0x86, 0x80, 0x20, 0x58, 0x9c, 0x4d, 0xbc, 0x99, 0x3e, 0x7a, 0x1e, 0x67, 0x08, 0x88, 0x0d, 0xa2, - 0xc2, 0xb7, 0x5e, 0x10, 0x0e, 0xe5, 0x43, 0x04, 0x2a, 0x94, 0x64, 0xae, 0xd0, 0x32, 0x15, 0xfe, - 0xa9, 0x0c, 0x75, 0xa1, 0x51, 0x18, 0x8c, 0x5c, 0x43, 0xbc, 0xf5, 0xb4, 0x4a, 0x4e, 0x60, 0x5f, - 0xb2, 0x96, 0xab, 0xcb, 0x7b, 0xd4, 0xdc, 0x54, 0x65, 0x1b, 0xde, 0xc2, 0x19, 0x16, 0x66, 0xc3, - 0x3b, 0x4b, 0xb9, 0x6b, 0x8c, 0x49, 0x18, 0xfc, 0x19, 0x34, 0x44, 0x7c, 0xca, 0x35, 0xd6, 0xaa, - 0x35, 0x75, 0xc1, 0x26, 0x56, 0x3d, 0x61, 0xad, 0x20, 0xda, 0xcb, 0x5b, 0x8f, 0x7a, 0xef, 0x6e, - 0x81, 0x9d, 0xef, 0xa4, 0xcb, 0x7f, 0x5f, 0x44, 0x14, 0xef, 0x00, 0xc1, 0xdb, 0x79, 0x0a, 0x90, - 0x83, 0xac, 0x9e, 0x9d, 0x93, 0x2b, 0xd5, 0xf2, 0xe2, 0x90, 0xed, 0xfd, 0xc2, 0x0b, 0xa7, 0xca, - 0xa9, 0x82, 0xf8, 0x61, 0xf9, 0x69, 0x09, 0x3f, 0x97, 0x36, 0x9f, 0xb3, 0x0b, 0xd5, 0x58, 0x5e, - 0x78, 0x3f, 0xb3, 0x96, 0xbe, 0x9f, 0x59, 0xea, 0xfd, 0x0c, 0x4b, 0x6c, 0x9c, 0xc8, 0xeb, 0x1f, - 0x47, 0xb9, 0x22, 0xcb, 0x50, 0xe4, 0xfc, 0xd3, 0x02, 0xc8, 0xb5, 0xd8, 0xc7, 0xd0, 0x09, 0xe2, - 0x01, 0xbb, 0xbd, 0x82, 0x21, 0x11, 0x05, 0x69, 0x90, 0x12, 0x0c, 0x9f, 0x2c, 0xb8, 0x20, 0xb2, - 0xc1, 0xd9, 0x93, 0xfb, 0x9e, 0x33, 0xce, 0xdd, 0x47, 0x4a, 0x2c, 0xe4, 0x95, 0xcb, 0x55, 0xcb, - 0xec, 0x9f, 0xc1, 0x6e, 0x2e, 0xd4, 0x37, 0xe4, 0x95, 0xaf, 0x95, 0x77, 0x5b, 0xcb, 0xf3, 0x73, - 0x59, 0x3f, 0x01, 0x84, 0x07, 0x78, 0x99, 0x4d, 0x0b, 0x92, 0x2a, 0xd7, 0x4a, 0xda, 0x0e, 0xe2, - 0x37, 0x7c, 0x45, 0x2e, 0xe7, 0x0d, 0xdc, 0x31, 0x36, 0xca, 0xd2, 0xde, 0x90, 0x66, 0x5d, 0x2b, - 0x6d, 0x4f, 0xdb, 0xc5, 0x0a, 0x43, 0x2e, 0xf2, 0x4b, 0xc0, 0x99, 0xc1, 0xa5, 0x17, 0xd0, 0x79, - 0x79, 0x6b, 0x37, 0xed, 0xf3, 0x6b, 0x5c, 0x54, 0x14, 0x26, 0xf6, 0x39, 0x21, 0xe9, 0xa8, 0xb0, - 0xcf, 0xea, 0x4d, 0xfb, 0x3c, 0xe2, 0x2b, 0x72, 0x39, 0xcf, 0x01, 0xc1, 0x79, 0x7b, 0xd6, 0xaf, - 0x95, 0xb2, 0x19, 0xc4, 0x45, 0x5b, 0x0e, 0x60, 0x3b, 0x23, 0x43, 0xbc, 0xea, 0xcd, 0x58, 0xd8, - 0xb8, 0x56, 0xc6, 0x96, 0x5c, 0xa0, 0x85, 0x38, 0xef, 0xa0, 0xf1, 0xd3, 0xe9, 0x88, 0xd0, 0xf0, - 0x4c, 0xe7, 0xfc, 0x7f, 0xbb, 0xcc, 0xfc, 0x1b, 0xcb, 0xcc, 0xc1, 0x28, 0x8d, 0xa7, 0x49, 0xa1, - 0x6a, 0x8b, 0x1c, 0x5e, 0xa8, 0xda, 0x9c, 0x87, 0x57, 0x6d, 0xc1, 0xfd, 0x39, 0x34, 0x44, 0x37, - 0x27, 0x17, 0x88, 0x2a, 0x64, 0x2f, 0x26, 0xbd, 0xea, 0x1e, 0xc5, 0xb2, 0x9e, 0xec, 0x8c, 0xe5, - 0xaa, 0x62, 0x35, 0xca, 0xdd, 0x84, 0x9f, 0x46, 0x79, 0xd6, 0xbd, 0x84, 0xe6, 0x58, 0xf8, 0x46, - 0xae, 0x12, 0x01, 0xf8, 0x89, 0x32, 0x2e, 0xdf, 0x43, 0xd7, 0xf4, 0xa1, 0x70, 0x75, 0x63, 0x6c, - 0xba, 0xf5, 0x11, 0x00, 0xfb, 0xf6, 0x19, 0xa8, 0x42, 0x65, 0x3e, 0x7d, 0xea, 0x1b, 0x02, 0x3f, - 0xb4, 0xd4, 0xb0, 0x73, 0x02, 0xdb, 0x0b, 0x32, 0x97, 0x94, 0xa9, 0xef, 0x98, 0x65, 0xaa, 0xde, - 0xbb, 0x2d, 0x45, 0x9a, 0x4b, 0xcd, 0xda, 0xf5, 0xb7, 0x92, 0xf8, 0x54, 0xd2, 0xaf, 0x53, 0xd8, - 0xb7, 0x35, 0x23, 0xd1, 0x7c, 0xe9, 0x03, 0xa8, 0x18, 0x82, 0xcc, 0xc6, 0xcc, 0x6d, 0x44, 0x66, - 0x9b, 0x86, 0x07, 0x31, 0xe4, 0x1e, 0x58, 0x7a, 0x10, 0x86, 0x73, 0xdc, 0xfa, 0xd0, 0x38, 0xed, - 0x42, 0xa3, 0x68, 0x7d, 0x48, 0xa3, 0x28, 0x5f, 0x3b, 0x56, 0x3d, 0xd5, 0xf6, 0xfe, 0x55, 0x85, - 0xca, 0xb3, 0xfe, 0x4b, 0xfb, 0x14, 0xb6, 0xe6, 0xff, 0xe9, 0xb0, 0xef, 0x49, 0xb3, 0x56, 0xfc, - 0x3b, 0xd2, 0xb9, 0xbf, 0x72, 0x5e, 0xb6, 0xec, 0xb7, 0x6c, 0x17, 0x36, 0xe7, 0xde, 0xb5, 0x6d, - 0x75, 0xd5, 0x2c, 0xff, 0xef, 0xa0, 0x73, 0x6f, 0xd5, 0xb4, 0x29, 0x73, 0xee, 0x1b, 0x41, 0xcb, - 0x5c, 0xfe, 0x8d, 0xa9, 0x65, 0xae, 0xfa, 0xb4, 0xb8, 0x65, 0xff, 0x00, 0xaa, 0xe2, 0xa5, 0xdb, - 0xde, 0x91, 0xbc, 0x85, 0x37, 0xf4, 0xce, 0xee, 0x1c, 0xaa, 0x17, 0xbe, 0x82, 0x66, 0xe1, 0xef, - 0x11, 0xfb, 0xa3, 0x82, 0xae, 0xe2, 0x43, 0x79, 0xe7, 0xe3, 0xe5, 0x93, 0x5a, 0xda, 0x01, 0x40, - 0xfe, 0x18, 0x6a, 0xb7, 0x25, 0xf7, 0xc2, 0x83, 0x7b, 0xe7, 0xce, 0x92, 0x19, 0x2d, 0x04, 0x8f, - 0x72, 0xfe, 0xd9, 0xd2, 0x9e, 0xf3, 0xea, 0xfc, 0xa3, 0xa1, 0x3e, 0xca, 0x95, 0xef, 0x9d, 0x5c, - 0xec, 0xfc, 0x63, 0xa4, 0x16, 0xbb, 0xe2, 0x29, 0x54, 0x8b, 0x5d, 0xf9, 0x8a, 0x79, 0xcb, 0xfe, - 0x0a, 0x5a, 0xc5, 0xd7, 0x3d, 0x5b, 0x39, 0x69, 0xe9, 0xf3, 0x66, 0xe7, 0xee, 0x8a, 0x59, 0x2d, - 0xf0, 0x33, 0x58, 0x13, 0xcf, 0x76, 0x2a, 0x1d, 0xcd, 0xd7, 0xbe, 0xce, 0x4e, 0x11, 0xd4, 0xab, - 0x1e, 0x43, 0x55, 0x7c, 0x5d, 0xea, 0x00, 0x28, 0x7c, 0x6c, 0x76, 0x1a, 0x26, 0xea, 0xdc, 0x7a, - 0x5c, 0x52, 0x7a, 0xb2, 0x82, 0x9e, 0x6c, 0x99, 0x1e, 0xe3, 0x70, 0xce, 0xaa, 0x3c, 0x5d, 0x9f, - 0xfc, 0x27, 0x00, 0x00, 0xff, 0xff, 0xc9, 0x06, 0x1e, 0xda, 0xa8, 0x1c, 0x00, 0x00, -} diff --git a/vendor/src/github.com/docker/containerd/api/grpc/types/api.proto b/vendor/src/github.com/docker/containerd/api/grpc/types/api.proto deleted file mode 100644 index 170510131e..0000000000 --- a/vendor/src/github.com/docker/containerd/api/grpc/types/api.proto +++ /dev/null @@ -1,320 +0,0 @@ -syntax = "proto3"; - -package types; - -import "google/protobuf/timestamp.proto"; - -service API { - rpc GetServerVersion(GetServerVersionRequest) returns (GetServerVersionResponse) {} - rpc CreateContainer(CreateContainerRequest) returns (CreateContainerResponse) {} - rpc UpdateContainer(UpdateContainerRequest) returns (UpdateContainerResponse) {} - rpc Signal(SignalRequest) returns (SignalResponse) {} - rpc UpdateProcess(UpdateProcessRequest) returns (UpdateProcessResponse) {} - rpc AddProcess(AddProcessRequest) returns (AddProcessResponse) {} - rpc CreateCheckpoint(CreateCheckpointRequest) returns (CreateCheckpointResponse) {} - rpc DeleteCheckpoint(DeleteCheckpointRequest) returns (DeleteCheckpointResponse) {} - rpc ListCheckpoint(ListCheckpointRequest) returns (ListCheckpointResponse) {} - rpc State(StateRequest) returns (StateResponse) {} - rpc Events(EventsRequest) returns (stream Event) {} - rpc Stats(StatsRequest) returns (StatsResponse) {} -} - -message GetServerVersionRequest { -} - -message GetServerVersionResponse { - uint32 major = 1; - uint32 minor = 2; - uint32 patch = 3; - string revision = 4; -} - -message UpdateProcessRequest { - string id = 1; - string pid = 2; - bool closeStdin = 3; // Close stdin of the container - uint32 width = 4; - uint32 height = 5; -} - -message UpdateProcessResponse { -} - -message CreateContainerRequest { - string id = 1; // ID of container - string bundlePath = 2; // path to OCI bundle - string checkpoint = 3; // checkpoint name if you want to create immediate checkpoint (optional) - string stdin = 4; // path to the file where stdin will be read (optional) - string stdout = 5; // path to file where stdout will be written (optional) - string stderr = 6; // path to file where stderr will be written (optional) - repeated string labels = 7; - bool noPivotRoot = 8; - string runtime = 9; - repeated string runtimeArgs = 10; - string checkpointDir = 11; // Directory where checkpoints are stored -} - -message CreateContainerResponse { - Container container = 1; -} - -message SignalRequest { - string id = 1; // ID of container - string pid = 2; // PID of process inside container - uint32 signal = 3; // Signal which will be sent, you can find value in "man 7 signal" -} - -message SignalResponse { -} - -message AddProcessRequest { - string id = 1; // ID of container - bool terminal = 2; // Use tty for container stdio - User user = 3; // User under which process will be run - repeated string args = 4; // Arguments for process, first is binary path itself - repeated string env = 5; // List of environment variables for process - string cwd = 6; // Workind directory of process - string pid = 7; // Process ID - string stdin = 8; // path to the file where stdin will be read (optional) - string stdout = 9; // path to file where stdout will be written (optional) - string stderr = 10; // path to file where stderr will be written (optional) - repeated string capabilities = 11; - string apparmorProfile = 12; - string selinuxLabel = 13; - bool noNewPrivileges = 14; - repeated Rlimit rlimits = 15; -} - -message Rlimit { - string type = 1; - uint64 soft = 2; - uint64 hard = 3; -} - -message User { - uint32 uid = 1; // UID of user - uint32 gid = 2; // GID of user - repeated uint32 additionalGids = 3; // Additional groups to which user will be added -} - -message AddProcessResponse { -} - -message CreateCheckpointRequest { - string id = 1; // ID of container - Checkpoint checkpoint = 2; // Checkpoint configuration - string checkpointDir = 3; // Directory where checkpoints are stored -} - -message CreateCheckpointResponse { -} - -message DeleteCheckpointRequest { - string id = 1; // ID of container - string name = 2; // Name of checkpoint - string checkpointDir = 3; // Directory where checkpoints are stored -} - -message DeleteCheckpointResponse { -} - -message ListCheckpointRequest { - string id = 1; // ID of container - string checkpointDir = 2; // Directory where checkpoints are stored -} - -message Checkpoint { - string name = 1; // Name of checkpoint - bool exit = 2; // checkpoint configuration: should container exit on checkpoint or not - bool tcp = 3; // allow open tcp connections - bool unixSockets = 4; // allow external unix sockets - bool shell = 5; // allow shell-jobs - repeated string emptyNS = 6; -} - -message ListCheckpointResponse { - repeated Checkpoint checkpoints = 1; // List of checkpoints -} - -message StateRequest { - string id = 1; // container id for a single container -} - -message ContainerState { - string status = 1; -} - -message Process { - string pid = 1; - bool terminal = 2; // Use tty for container stdio - User user = 3; // User under which process will be run - repeated string args = 4; // Arguments for process, first is binary path itself - repeated string env = 5; // List of environment variables for process - string cwd = 6; // Workind directory of process - uint32 systemPid = 7; - string stdin = 8; // path to the file where stdin will be read (optional) - string stdout = 9; // path to file where stdout will be written (optional) - string stderr = 10; // path to file where stderr will be written (optional) - repeated string capabilities = 11; - string apparmorProfile = 12; - string selinuxLabel = 13; - bool noNewPrivileges = 14; - repeated Rlimit rlimits = 15; -} - -message Container { - string id = 1; // ID of container - string bundlePath = 2; // Path to OCI bundle - repeated Process processes = 3; // List of processes which run in container - string status = 4; // Container status ("running", "paused", etc.) - repeated string labels = 5; - repeated uint32 pids = 6; - string runtime = 7; // runtime used to execute the container -} - -// Machine is information about machine on which containerd is run -message Machine { - uint32 cpus = 1; // number of cpus - uint64 memory = 2; // amount of memory -} - -// StateResponse is information about containerd daemon -message StateResponse { - repeated Container containers = 1; - Machine machine = 2; -} - -message UpdateContainerRequest { - string id = 1; // ID of container - string pid = 2; - string status = 3; // Status to which containerd will try to change - UpdateResource resources =4; -} - -message UpdateResource { - uint64 blkioWeight =1; - uint64 cpuShares = 2; - uint64 cpuPeriod = 3; - uint64 cpuQuota = 4; - string cpusetCpus = 5; - string cpusetMems = 6; - uint64 memoryLimit = 7; - uint64 memorySwap = 8; - uint64 memoryReservation = 9; - uint64 kernelMemoryLimit = 10; - uint64 kernelTCPMemoryLimit = 11; -} - -message UpdateContainerResponse { -} - -message EventsRequest { - // Tag 1 is deprecated (old uint64 timestamp) - google.protobuf.Timestamp timestamp = 2; - bool storedOnly = 3; - string id = 4; -} - -message Event { - string type = 1; - string id = 2; - uint32 status = 3; - string pid = 4; - // Tag 5 is deprecated (old uint64 timestamp) - google.protobuf.Timestamp timestamp = 6; -} - -message NetworkStats { - string name = 1; // name of network interface - uint64 rx_bytes = 2; - uint64 rx_Packets = 3; - uint64 Rx_errors = 4; - uint64 Rx_dropped = 5; - uint64 Tx_bytes = 6; - uint64 Tx_packets = 7; - uint64 Tx_errors = 8; - uint64 Tx_dropped = 9; -} - -message CpuUsage { - uint64 total_usage = 1; - repeated uint64 percpu_usage = 2; - uint64 usage_in_kernelmode = 3; - uint64 usage_in_usermode = 4; -} - -message ThrottlingData { - uint64 periods = 1; - uint64 throttled_periods = 2; - uint64 throttled_time = 3; -} - -message CpuStats { - CpuUsage cpu_usage = 1; - ThrottlingData throttling_data = 2; - uint64 system_usage = 3; -} - -message PidsStats { - uint64 current = 1; - uint64 limit = 2; -} - -message MemoryData { - uint64 usage = 1; - uint64 max_usage = 2; - uint64 failcnt = 3; - uint64 limit = 4; -} - -message MemoryStats { - uint64 cache = 1; - MemoryData usage = 2; - MemoryData swap_usage = 3; - MemoryData kernel_usage = 4; - map stats = 5; -} - -message BlkioStatsEntry { - uint64 major = 1; - uint64 minor = 2; - string op = 3; - uint64 value = 4; -} - -message BlkioStats { - repeated BlkioStatsEntry io_service_bytes_recursive = 1; // number of bytes transferred to and from the block device - repeated BlkioStatsEntry io_serviced_recursive = 2; - repeated BlkioStatsEntry io_queued_recursive = 3; - repeated BlkioStatsEntry io_service_time_recursive = 4; - repeated BlkioStatsEntry io_wait_time_recursive = 5; - repeated BlkioStatsEntry io_merged_recursive = 6; - repeated BlkioStatsEntry io_time_recursive = 7; - repeated BlkioStatsEntry sectors_recursive = 8; -} - -message HugetlbStats { - uint64 usage = 1; - uint64 max_usage = 2; - uint64 failcnt = 3; - uint64 limit = 4; -} - -message CgroupStats { - CpuStats cpu_stats = 1; - MemoryStats memory_stats = 2; - BlkioStats blkio_stats = 3; - map hugetlb_stats = 4; // the map is in the format "size of hugepage: stats of the hugepage" - PidsStats pids_stats = 5; -} - -message StatsResponse { - repeated NetworkStats network_stats = 1; - CgroupStats cgroup_stats = 2; - // Tag 3 is deprecated (old uint64 timestamp) - google.protobuf.Timestamp timestamp = 4; -}; - -message StatsRequest { - string id = 1; -} diff --git a/vendor/src/github.com/docker/distribution/.gitignore b/vendor/src/github.com/docker/distribution/.gitignore deleted file mode 100644 index 1c3ae0a773..0000000000 --- a/vendor/src/github.com/docker/distribution/.gitignore +++ /dev/null @@ -1,37 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof - -# never checkin from the bin file (for now) -bin/* - -# Test key files -*.pem - -# Cover profiles -*.out - -# Editor/IDE specific files. -*.sublime-project -*.sublime-workspace diff --git a/vendor/src/github.com/docker/distribution/.mailmap b/vendor/src/github.com/docker/distribution/.mailmap deleted file mode 100644 index d991060198..0000000000 --- a/vendor/src/github.com/docker/distribution/.mailmap +++ /dev/null @@ -1,18 +0,0 @@ -Stephen J Day Stephen Day -Stephen J Day Stephen Day -Olivier Gambier Olivier Gambier -Brian Bland Brian Bland -Brian Bland Brian Bland -Josh Hawn Josh Hawn -Richard Scothern Richard -Richard Scothern Richard Scothern -Andrew Meredith Andrew Meredith -harche harche -Jessie Frazelle -Sharif Nassar Sharif Nassar -Sven Dowideit Sven Dowideit -Vincent Giersch Vincent Giersch -davidli davidli -Omer Cohen Omer Cohen -Eric Yang Eric Yang -Nikita Tarasov Nikita diff --git a/vendor/src/github.com/docker/distribution/AUTHORS b/vendor/src/github.com/docker/distribution/AUTHORS deleted file mode 100644 index 9e80e062bb..0000000000 --- a/vendor/src/github.com/docker/distribution/AUTHORS +++ /dev/null @@ -1,147 +0,0 @@ -Aaron Lehmann -Aaron Schlesinger -Aaron Vinson -Adam Enger -Adrian Mouat -Ahmet Alp Balkan -Alex Chan -Alex Elman -Alexey Gladkov -allencloud -amitshukla -Amy Lindburg -Andrew Hsu -Andrew Meredith -Andrew T Nguyen -Andrey Kostov -Andy Goldstein -Anis Elleuch -Anton Tiurin -Antonio Mercado -Antonio Murdaca -Arien Holthuizen -Arnaud Porterie -Arthur Baars -Asuka Suzuki -Avi Miller -Ayose Cazorla -BadZen -Ben Firshman -bin liu -Brian Bland -burnettk -Carson A -Chris Dillon -cyli -Daisuke Fujita -Daniel Huhn -Darren Shepherd -Dave Trombley -Dave Tucker -David Lawrence -David Verhasselt -David Xia -davidli -Dejan Golja -Derek McGowan -Diogo Mónica -DJ Enriquez -Donald Huang -Doug Davis -Eric Yang -Fabio Huser -farmerworking -Felix Yan -Florentin Raud -Frederick F. Kautz IV -gabriell nascimento -Gleb Schukin -harche -Henri Gomez -Hu Keping -Hua Wang -HuKeping -Ian Babrou -igayoso -Jack Griffin -Jason Freidman -Jeff Nickoloff -Jessie Frazelle -jhaohai -Jianqing Wang -John Starks -Jon Johnson -Jon Poler -Jonathan Boulle -Jordan Liggitt -Josh Hawn -Julien Fernandez -Ke Xu -Keerthan Mala -Kelsey Hightower -Kenneth Lim -Kenny Leung -Li Yi -Liu Hua -liuchang0812 -Louis Kottmann -Luke Carpenter -Mary Anthony -Matt Bentley -Matt Duch -Matt Moore -Matt Robenolt -Michael Prokop -Michal Minar -Miquel Sabaté -Morgan Bauer -moxiegirl -Nathan Sullivan -nevermosby -Nghia Tran -Nikita Tarasov -Nuutti Kotivuori -Oilbeater -Olivier Gambier -Olivier Jacques -Omer Cohen -Patrick Devine -Phil Estes -Philip Misiowiec -Richard Scothern -Rodolfo Carvalho -Rusty Conover -Sean Boran -Sebastiaan van Stijn -Serge Dubrouski -Sharif Nassar -Shawn Falkner-Horine -Shreyas Karnik -Simon Thulbourn -Spencer Rinehart -Stefan Majewsky -Stefan Weil -Stephen J Day -Sungho Moon -Sven Dowideit -Sylvain Baubeau -Ted Reed -tgic -Thomas Sjögren -Tianon Gravi -Tibor Vass -Tonis Tiigi -Tony Holdstock-Brown -Trevor Pounds -Troels Thomsen -Vincent Batts -Vincent Demeester -Vincent Giersch -W. Trevor King -weiyuan.yl -xg.song -xiekeyang -Yann ROBERT -yuzou -zhouhaibing089 -姜继忠 diff --git a/vendor/src/github.com/docker/distribution/BUILDING.md b/vendor/src/github.com/docker/distribution/BUILDING.md deleted file mode 100644 index d9577022b6..0000000000 --- a/vendor/src/github.com/docker/distribution/BUILDING.md +++ /dev/null @@ -1,119 +0,0 @@ - -# Building the registry source - -## Use-case - -This is useful if you intend to actively work on the registry. - -### Alternatives - -Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/). - -People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`. - -OS X users who want to run natively can do so following [the instructions here](osx-setup-guide.md). - -### Gotchas - -You are expected to know your way around with go & git. - -If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you. - -## Build the development environment - -The first prerequisite of properly building distribution targets is to have a Go -development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html) -for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the -environment. - -If a Go development environment is setup, one can use `go get` to install the -`registry` command from the current latest: - - go get github.com/docker/distribution/cmd/registry - -The above will install the source repository into the `GOPATH`. - -Now create the directory for the registry data (this might require you to set permissions properly) - - mkdir -p /var/lib/registry - -... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location. - -The `registry` -binary can then be run with the following: - - $ $GOPATH/bin/registry --version - $GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown - -> __NOTE:__ While you do not need to use `go get` to checkout the distribution -> project, for these build instructions to work, the project must be checked -> out in the correct location in the `GOPATH`. This should almost always be -> `$GOPATH/src/github.com/docker/distribution`. - -The registry can be run with the default config using the following -incantation: - - $ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml - INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown - INFO[0000] debug server listening localhost:5001 - -If it is working, one should see the above log messages. - -### Repeatable Builds - -For the full development experience, one should `cd` into -`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go` -commands, such as `go test`, should work per package (please see -[Developing](#developing) if they don't work). - -A `Makefile` has been provided as a convenience to support repeatable builds. -Please install the following into `GOPATH` for it to work: - - go get github.com/tools/godep github.com/golang/lint/golint - -**TODO(stevvooe):** Add a `make setup` command to Makefile to run this. Have to think about how to interact with Godeps properly. - -Once these commands are available in the `GOPATH`, run `make` to get a full -build: - - $ make - + clean - + fmt - + vet - + lint - + build - github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar - github.com/Sirupsen/logrus - github.com/docker/libtrust - ... - github.com/yvasiyarov/gorelic - github.com/docker/distribution/registry/handlers - github.com/docker/distribution/cmd/registry - + test - ... - ok github.com/docker/distribution/digest 7.875s - ok github.com/docker/distribution/manifest 0.028s - ok github.com/docker/distribution/notifications 17.322s - ? github.com/docker/distribution/registry [no test files] - ok github.com/docker/distribution/registry/api/v2 0.101s - ? github.com/docker/distribution/registry/auth [no test files] - ok github.com/docker/distribution/registry/auth/silly 0.011s - ... - + /Users/sday/go/src/github.com/docker/distribution/bin/registry - + /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template - + binaries - -The above provides a repeatable build using the contents of the vendored -Godeps directory. This includes formatting, vetting, linting, building, -testing and generating tagged binaries. We can verify this worked by running -the registry binary generated in the "./bin" directory: - - $ ./bin/registry -version - ./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m - -### Optional build tags - -Optional [build tags](http://golang.org/pkg/go/build/) can be provided using -the environment variable `DOCKER_BUILDTAGS`. diff --git a/vendor/src/github.com/docker/distribution/CHANGELOG.md b/vendor/src/github.com/docker/distribution/CHANGELOG.md deleted file mode 100644 index 3445c090c2..0000000000 --- a/vendor/src/github.com/docker/distribution/CHANGELOG.md +++ /dev/null @@ -1,35 +0,0 @@ -# Changelog - -## 2.5.0 (2016-06-14) - -### Storage -- Ensure uploads directory is cleaned after upload is commited -- Add ability to cap concurrent operations in filesystem driver -- S3: Add 'us-gov-west-1' to the valid region list -- Swift: Handle ceph not returning Last-Modified header for HEAD requests -- Add redirect middleware - -#### Registry -- Add support for blobAccessController middleware -- Add support for layers from foreign sources -- Remove signature store -- Add support for Let's Encrypt -- Correct yaml key names in configuration - -#### Client -- Add option to get content digest from manifest get - -#### Spec -- Update the auth spec scope grammar to reflect the fact that hostnames are optionally supported -- Clarify API documentation around catalog fetch behavior - -### API -- Support returning HTTP 429 (Too Many Requests) - -### Documentation -- Update auth documentation examples to show "expires in" as int - -### Docker Image -- Use Alpine Linux as base image - - diff --git a/vendor/src/github.com/docker/distribution/CONTRIBUTING.md b/vendor/src/github.com/docker/distribution/CONTRIBUTING.md deleted file mode 100644 index 7cc7aedffe..0000000000 --- a/vendor/src/github.com/docker/distribution/CONTRIBUTING.md +++ /dev/null @@ -1,140 +0,0 @@ -# Contributing to the registry - -## Before reporting an issue... - -### If your problem is with... - - - automated builds - - your account on the [Docker Hub](https://hub.docker.com/) - - any other [Docker Hub](https://hub.docker.com/) issue - -Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com) - -### If you... - - - need help setting up your registry - - can't figure out something - - are not sure what's going on or what your problem is - -Then please do not open an issue here yet - you should first try one of the following support forums: - - - irc: #docker-distribution on freenode - - mailing-list: or https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution - -## Reporting an issue properly - -By following these simple rules you will get better and faster feedback on your issue. - - - search the bugtracker for an already reported issue - -### If you found an issue that describes your problem: - - - please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments - - please refrain from adding "same thing here" or "+1" comments - - you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button - - comment if you have some new, technical and relevant information to add to the case - - __DO NOT__ comment on closed issues or merged PRs. If you think you have a related problem, open up a new issue and reference the PR or issue. - -### If you have not found an existing issue that describes your problem: - - 1. create a new issue, with a succinct title that describes your issue: - - bad title: "It doesn't work with my docker" - - good title: "Private registry push fail: 400 error with E_INVALID_DIGEST" - 2. copy the output of: - - `docker version` - - `docker info` - - `docker exec registry -version` - 3. copy the command line you used to launch your Registry - 4. restart your docker daemon in debug mode (add `-D` to the daemon launch arguments) - 5. reproduce your problem and get your docker daemon logs showing the error - 6. if relevant, copy your registry logs that show the error - 7. provide any relevant detail about your specific Registry configuration (e.g., storage backend used) - 8. indicate if you are using an enterprise proxy, Nginx, or anything else between you and your Registry - -## Contributing a patch for a known bug, or a small correction - -You should follow the basic GitHub workflow: - - 1. fork - 2. commit a change - 3. make sure the tests pass - 4. PR - -Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple: - - - configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com` - - sign your commits using `-s`: `git commit -s -m "My commit"` - -Some simple rules to ensure quick merge: - - - clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`) - - prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once - - if you need to amend your PR following comments, please squash instead of adding more commits - -## Contributing new features - -You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve. - -If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning. -If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work. - -Then you should submit your implementation, clearly linking to the issue (and possible proposal). - -Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged. - -It's mandatory to: - - - interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines) - - address maintainers' comments and modify your submission accordingly - - write tests for any new code - -Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry. - -Have a look at a great, successful contribution: the [Swift driver PR](https://github.com/docker/distribution/pull/493) - -## Coding Style - -Unless explicitly stated, we follow all coding guidelines from the Go -community. While some of these standards may seem arbitrary, they somehow seem -to result in a solid, consistent codebase. - -It is possible that the code base does not currently comply with these -guidelines. We are not looking for a massive PR that fixes this, since that -goes against the spirit of the guidelines. All new contributions should make a -best effort to clean up and make the code base better than they left it. -Obviously, apply your best judgement. Remember, the goal here is to make the -code base easier for humans to navigate and understand. Always keep that in -mind when nudging others to comply. - -The rules: - -1. All code should be formatted with `gofmt -s`. -2. All code should pass the default levels of - [`golint`](https://github.com/golang/lint). -3. All code should follow the guidelines covered in [Effective - Go](http://golang.org/doc/effective_go.html) and [Go Code Review - Comments](https://github.com/golang/go/wiki/CodeReviewComments). -4. Comment the code. Tell us the why, the history and the context. -5. Document _all_ declarations and methods, even private ones. Declare - expectations, caveats and anything else that may be important. If a type - gets exported, having the comments already there will ensure it's ready. -6. Variable name length should be proportional to its context and no longer. - `noCommaALongVariableNameLikeThisIsNotMoreClearWhenASimpleCommentWouldDo`. - In practice, short methods will have short variable names and globals will - have longer names. -7. No underscores in package names. If you need a compound name, step back, - and re-examine why you need a compound name. If you still think you need a - compound name, lose the underscore. -8. No utils or helpers packages. If a function is not general enough to - warrant its own package, it has not been written generally enough to be a - part of a util package. Just leave it unexported and well-documented. -9. All tests should run with `go test` and outside tooling should not be - required. No, we don't need another unit testing framework. Assertion - packages are acceptable if they provide _real_ incremental value. -10. Even though we call these "rules" above, they are actually just - guidelines. Since you've read all the rules, you now know that. - -If you are having trouble getting into the mood of idiomatic Go, we recommend -reading through [Effective Go](http://golang.org/doc/effective_go.html). The -[Go Blog](http://blog.golang.org/) is also a great resource. Drinking the -kool-aid is a lot easier than going thirsty. diff --git a/vendor/src/github.com/docker/distribution/Dockerfile b/vendor/src/github.com/docker/distribution/Dockerfile deleted file mode 100644 index fa9cd4627e..0000000000 --- a/vendor/src/github.com/docker/distribution/Dockerfile +++ /dev/null @@ -1,18 +0,0 @@ -FROM golang:1.6-alpine - -ENV DISTRIBUTION_DIR /go/src/github.com/docker/distribution -ENV DOCKER_BUILDTAGS include_oss include_gcs - -WORKDIR $DISTRIBUTION_DIR -COPY . $DISTRIBUTION_DIR -COPY cmd/registry/config-dev.yml /etc/docker/registry/config.yml - -RUN set -ex \ - && apk add --no-cache make git - -RUN make PREFIX=/go clean binaries - -VOLUME ["/var/lib/registry"] -EXPOSE 5000 -ENTRYPOINT ["registry"] -CMD ["serve", "/etc/docker/registry/config.yml"] diff --git a/vendor/src/github.com/docker/distribution/LICENSE b/vendor/src/github.com/docker/distribution/LICENSE deleted file mode 100644 index e06d208186..0000000000 --- a/vendor/src/github.com/docker/distribution/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/src/github.com/docker/distribution/MAINTAINERS b/vendor/src/github.com/docker/distribution/MAINTAINERS deleted file mode 100644 index bda400150c..0000000000 --- a/vendor/src/github.com/docker/distribution/MAINTAINERS +++ /dev/null @@ -1,58 +0,0 @@ -# Distribution maintainers file -# -# This file describes who runs the docker/distribution project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "aaronlehmann", - "dmcgowan", - "dmp42", - "richardscothern", - "shykes", - "stevvooe", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.aaronlehmann] - Name = "Aaron Lehmann" - Email = "aaron.lehmann@docker.com" - GitHub = "aaronlehmann" - - [people.dmcgowan] - Name = "Derek McGowan" - Email = "derek@mcgstyle.net" - GitHub = "dmcgowan" - - [people.dmp42] - Name = "Olivier Gambier" - Email = "olivier@docker.com" - GitHub = "dmp42" - - [people.richardscothern] - Name = "Richard Scothern" - Email = "richard.scothern@gmail.com" - GitHub = "richardscothern" - - [people.shykes] - Name = "Solomon Hykes" - Email = "solomon@docker.com" - GitHub = "shykes" - - [people.stevvooe] - Name = "Stephen Day" - Email = "stephen.day@docker.com" - GitHub = "stevvooe" diff --git a/vendor/src/github.com/docker/distribution/Makefile b/vendor/src/github.com/docker/distribution/Makefile deleted file mode 100644 index a0602d0b2c..0000000000 --- a/vendor/src/github.com/docker/distribution/Makefile +++ /dev/null @@ -1,106 +0,0 @@ -# Set an output prefix, which is the local directory if not specified -PREFIX?=$(shell pwd) - - -# Used to populate version variable in main package. -VERSION=$(shell git describe --match 'v[0-9]*' --dirty='.m' --always) - -# Allow turning off function inlining and variable registerization -ifeq (${DISABLE_OPTIMIZATION},true) - GO_GCFLAGS=-gcflags "-N -l" - VERSION:="$(VERSION)-noopt" -endif - -GO_LDFLAGS=-ldflags "-X `go list ./version`.Version=$(VERSION)" - -.PHONY: clean all fmt vet lint build test binaries -.DEFAULT: all -all: fmt vet lint build test binaries - -AUTHORS: .mailmap .git/HEAD - git log --format='%aN <%aE>' | sort -fu > $@ - -# This only needs to be generated by hand when cutting full releases. -version/version.go: - ./version/version.sh > $@ - -# Required for go 1.5 to build -GO15VENDOREXPERIMENT := 1 - -# Package list -PKGS := $(shell go list -tags "${DOCKER_BUILDTAGS}" ./... | grep -v ^github.com/docker/distribution/vendor/) - -# Resolving binary dependencies for specific targets -GOLINT := $(shell which golint || echo '') -GODEP := $(shell which godep || echo '') - -${PREFIX}/bin/registry: $(wildcard **/*.go) - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry - -${PREFIX}/bin/digest: $(wildcard **/*.go) - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/digest - -${PREFIX}/bin/registry-api-descriptor-template: $(wildcard **/*.go) - @echo "+ $@" - @go build -o $@ ${GO_LDFLAGS} ${GO_GCFLAGS} ./cmd/registry-api-descriptor-template - -docs/spec/api.md: docs/spec/api.md.tmpl ${PREFIX}/bin/registry-api-descriptor-template - ./bin/registry-api-descriptor-template $< > $@ - -vet: - @echo "+ $@" - @go vet -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -fmt: - @echo "+ $@" - @test -z "$$(gofmt -s -l . 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" || \ - (echo >&2 "+ please format Go code with 'gofmt -s'" && false) - -lint: - @echo "+ $@" - $(if $(GOLINT), , \ - $(error Please install golint: `go get -u github.com/golang/lint/golint`)) - @test -z "$$($(GOLINT) ./... 2>&1 | grep -v ^vendor/ | tee /dev/stderr)" - -build: - @echo "+ $@" - @go build -tags "${DOCKER_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS) - -test: - @echo "+ $@" - @go test -test.short -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -test-full: - @echo "+ $@" - @go test -tags "${DOCKER_BUILDTAGS}" $(PKGS) - -binaries: ${PREFIX}/bin/registry ${PREFIX}/bin/digest ${PREFIX}/bin/registry-api-descriptor-template - @echo "+ $@" - -clean: - @echo "+ $@" - @rm -rf "${PREFIX}/bin/registry" "${PREFIX}/bin/digest" "${PREFIX}/bin/registry-api-descriptor-template" - -dep-save: - @echo "+ $@" - $(if $(GODEP), , \ - $(error Please install godep: go get github.com/tools/godep)) - @$(GODEP) save $(PKGS) - -dep-restore: - @echo "+ $@" - $(if $(GODEP), , \ - $(error Please install godep: go get github.com/tools/godep)) - @$(GODEP) restore -v - -dep-validate: dep-restore - @echo "+ $@" - @rm -Rf .vendor.bak - @mv vendor .vendor.bak - @rm -Rf Godeps - @$(GODEP) save ./... - @test -z "$$(diff -r vendor .vendor.bak 2>&1 | tee /dev/stderr)" || \ - (echo >&2 "+ borked dependencies! what you have in Godeps/Godeps.json does not match with what you have in vendor" && false) - @rm -Rf .vendor.bak diff --git a/vendor/src/github.com/docker/distribution/README.md b/vendor/src/github.com/docker/distribution/README.md deleted file mode 100644 index d35bcb682d..0000000000 --- a/vendor/src/github.com/docker/distribution/README.md +++ /dev/null @@ -1,131 +0,0 @@ -# Distribution - -The Docker toolset to pack, ship, store, and deliver content. - -This repository's main product is the Docker Registry 2.0 implementation -for storing and distributing Docker images. It supersedes the -[docker/docker-registry](https://github.com/docker/docker-registry) -project with a new API design, focused around security and performance. - - - -[![Circle CI](https://circleci.com/gh/docker/distribution/tree/master.svg?style=svg)](https://circleci.com/gh/docker/distribution/tree/master) -[![GoDoc](https://godoc.org/github.com/docker/distribution?status.svg)](https://godoc.org/github.com/docker/distribution) - -This repository contains the following components: - -|**Component** |Description | -|--------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| **registry** | An implementation of the [Docker Registry HTTP API V2](docs/spec/api.md) for use with docker 1.6+. | -| **libraries** | A rich set of libraries for interacting with distribution components. Please see [godoc](https://godoc.org/github.com/docker/distribution) for details. **Note**: These libraries are **unstable**. | -| **specifications** | _Distribution_ related specifications are available in [docs/spec](docs/spec) | -| **documentation** | Docker's full documentation set is available at [docs.docker.com](https://docs.docker.com). This repository [contains the subset](docs/index.md) related just to the registry. | - -### How does this integrate with Docker engine? - -This project should provide an implementation to a V2 API for use in the [Docker -core project](https://github.com/docker/docker). The API should be embeddable -and simplify the process of securely pulling and pushing content from `docker` -daemons. - -### What are the long term goals of the Distribution project? - -The _Distribution_ project has the further long term goal of providing a -secure tool chain for distributing content. The specifications, APIs and tools -should be as useful with Docker as they are without. - -Our goal is to design a professional grade and extensible content distribution -system that allow users to: - -* Enjoy an efficient, secured and reliable way to store, manage, package and - exchange content -* Hack/roll their own on top of healthy open-source components -* Implement their own home made solution through good specs, and solid - extensions mechanism. - -## More about Registry 2.0 - -The new registry implementation provides the following benefits: - -- faster push and pull -- new, more efficient implementation -- simplified deployment -- pluggable storage backend -- webhook notifications - -For information on upcoming functionality, please see [ROADMAP.md](ROADMAP.md). - -### Who needs to deploy a registry? - -By default, Docker users pull images from Docker's public registry instance. -[Installing Docker](https://docs.docker.com/engine/installation/) gives users this -ability. Users can also push images to a repository on Docker's public registry, -if they have a [Docker Hub](https://hub.docker.com/) account. - -For some users and even companies, this default behavior is sufficient. For -others, it is not. - -For example, users with their own software products may want to maintain a -registry for private, company images. Also, you may wish to deploy your own -image repository for images used to test or in continuous integration. For these -use cases and others, [deploying your own registry instance](docs/deploying.md) -may be the better choice. - -### Migration to Registry 2.0 - -For those who have previously deployed their own registry based on the Registry -1.0 implementation and wish to deploy a Registry 2.0 while retaining images, -data migration is required. A tool to assist with migration efforts has been -created. For more information see [docker/migrator] -(https://github.com/docker/migrator). - -## Contribute - -Please see [CONTRIBUTING.md](CONTRIBUTING.md) for details on how to contribute -issues, fixes, and patches to this project. If you are contributing code, see -the instructions for [building a development environment](docs/recipes/building.md). - -## Support - -If any issues are encountered while using the _Distribution_ project, several -avenues are available for support: - - - - - - - - - - - - - - - - - - -
- IRC - - #docker-distribution on FreeNode -
- Issue Tracker - - github.com/docker/distribution/issues -
- Google Groups - - https://groups.google.com/a/dockerproject.org/forum/#!forum/distribution -
- Mailing List - - docker@dockerproject.org -
- - -## License - -This project is distributed under [Apache License, Version 2.0](LICENSE). diff --git a/vendor/src/github.com/docker/distribution/ROADMAP.md b/vendor/src/github.com/docker/distribution/ROADMAP.md deleted file mode 100644 index 701127afec..0000000000 --- a/vendor/src/github.com/docker/distribution/ROADMAP.md +++ /dev/null @@ -1,267 +0,0 @@ -# Roadmap - -The Distribution Project consists of several components, some of which are -still being defined. This document defines the high-level goals of the -project, identifies the current components, and defines the release- -relationship to the Docker Platform. - -* [Distribution Goals](#distribution-goals) -* [Distribution Components](#distribution-components) -* [Project Planning](#project-planning): release-relationship to the Docker Platform. - -This road map is a living document, providing an overview of the goals and -considerations made in respect of the future of the project. - -## Distribution Goals - -- Replace the existing [docker registry](github.com/docker/docker-registry) - implementation as the primary implementation. -- Replace the existing push and pull code in the docker engine with the - distribution package. -- Define a strong data model for distributing docker images -- Provide a flexible distribution tool kit for use in the docker platform -- Unlock new distribution models - -## Distribution Components - -Components of the Distribution Project are managed via github [milestones](https://github.com/docker/distribution/milestones). Upcoming -features and bugfixes for a component will be added to the relevant milestone. If a feature or -bugfix is not part of a milestone, it is currently unscheduled for -implementation. - -* [Registry](#registry) -* [Distribution Package](#distribution-package) - -*** - -### Registry - -The new Docker registry is the main portion of the distribution repository. -Registry 2.0 is the first release of the next-generation registry. This was -primarily focused on implementing the [new registry -API](https://github.com/docker/distribution/blob/master/docs/spec/api.md), -with a focus on security and performance. - -Following from the Distribution project goals above, we have a set of goals -for registry v2 that we would like to follow in the design. New features -should be compared against these goals. - -#### Data Storage and Distribution First - -The registry's first goal is to provide a reliable, consistent storage -location for Docker images. The registry should only provide the minimal -amount of indexing required to fetch image data and no more. - -This means we should be selective in new features and API additions, including -those that may require expensive, ever growing indexes. Requests should be -servable in "constant time". - -#### Content Addressability - -All data objects used in the registry API should be content addressable. -Content identifiers should be secure and verifiable. This provides a secure, -reliable base from which to build more advanced content distribution systems. - -#### Content Agnostic - -In the past, changes to the image format would require large changes in Docker -and the Registry. By decoupling the distribution and image format, we can -allow the formats to progress without having to coordinate between the two. -This means that we should be focused on decoupling Docker from the registry -just as much as decoupling the registry from Docker. Such an approach will -allow us to unlock new distribution models that haven't been possible before. - -We can take this further by saying that the new registry should be content -agnostic. The registry provides a model of names, tags, manifests and content -addresses and that model can be used to work with content. - -#### Simplicity - -The new registry should be closer to a microservice component than its -predecessor. This means it should have a narrower API and a low number of -service dependencies. It should be easy to deploy. - -This means that other solutions should be explored before changing the API or -adding extra dependencies. If functionality is required, can it be added as an -extension or companion service. - -#### Extensibility - -The registry should provide extension points to add functionality. By keeping -the scope narrow, but providing the ability to add functionality. - -Features like search, indexing, synchronization and registry explorers fall -into this category. No such feature should be added unless we've found it -impossible to do through an extension. - -#### Active Feature Discussions - -The following are feature discussions that are currently active. - -If you don't see your favorite, unimplemented feature, feel free to contact us -via IRC or the mailing list and we can talk about adding it. The goal here is -to make sure that new features go through a rigid design process before -landing in the registry. - -##### Proxying to other Registries - -A _pull-through caching_ mode exists for the registry, but is restricted from -within the docker client to only mirror the official Docker Hub. This functionality -can be expanded when image provenance has been specified and implemented in the -distribution project. - -##### Metadata storage - -Metadata for the registry is currently stored with the manifest and layer data on -the storage backend. While this is a big win for simplicity and reliably maintaining -state, it comes with the cost of consistency and high latency. The mutable registry -metadata operations should be abstracted behind an API which will allow ACID compliant -storage systems to handle metadata. - -##### Peer to Peer transfer - -Discussion has started here: https://docs.google.com/document/d/1rYDpSpJiQWmCQy8Cuiaa3NH-Co33oK_SC9HeXYo87QA/edit - -##### Indexing, Search and Discovery - -The original registry provided some implementation of search for use with -private registries. Support has been elided from V2 since we'd like to both -decouple search functionality from the registry. The makes the registry -simpler to deploy, especially in use cases where search is not needed, and -let's us decouple the image format from the registry. - -There are explorations into using the catalog API and notification system to -build external indexes. The current line of thought is that we will define a -common search API to index and query docker images. Such a system could be run -as a companion to a registry or set of registries to power discovery. - -The main issue with search and discovery is that there are so many ways to -accomplish it. There are two aspects to this project. The first is deciding on -how it will be done, including an API definition that can work with changing -data formats. The second is the process of integrating with `docker search`. -We expect that someone attempts to address the problem with the existing tools -and propose it as a standard search API or uses it to inform a standardization -process. Once this has been explored, we integrate with the docker client. - -Please see the following for more detail: - -- https://github.com/docker/distribution/issues/206 - -##### Deletes - -> __NOTE:__ Deletes are a much asked for feature. Before requesting this -feature or participating in discussion, we ask that you read this section in -full and understand the problems behind deletes. - -While, at first glance, implementing deleting seems simple, there are a number -mitigating factors that make many solutions not ideal or even pathological in -the context of a registry. The following paragraph discuss the background and -approaches that could be applied to arrive at a solution. - -The goal of deletes in any system is to remove unused or unneeded data. Only -data requested for deletion should be removed and no other data. Removing -unintended data is worse than _not_ removing data that was requested for -removal but ideally, both are supported. Generally, according to this rule, we -err on holding data longer than needed, ensuring that it is only removed when -we can be certain that it can be removed. With the current behavior, we opt to -hold onto the data forever, ensuring that data cannot be incorrectly removed. - -To understand the problems with implementing deletes, one must understand the -data model. All registry data is stored in a filesystem layout, implemented on -a "storage driver", effectively a _virtual file system_ (VFS). The storage -system must assume that this VFS layer will be eventually consistent and has -poor read- after-write consistency, since this is the lower common denominator -among the storage drivers. This is mitigated by writing values in reverse- -dependent order, but makes wider transactional operations unsafe. - -Layered on the VFS model is a content-addressable _directed, acyclic graph_ -(DAG) made up of blobs. Manifests reference layers. Tags reference manifests. -Since the same data can be referenced by multiple manifests, we only store -data once, even if it is in different repositories. Thus, we have a set of -blobs, referenced by tags and manifests. If we want to delete a blob we need -to be certain that it is no longer referenced by another manifest or tag. When -we delete a manifest, we also can try to delete the referenced blobs. Deciding -whether or not a blob has an active reference is the crux of the problem. - -Conceptually, deleting a manifest and its resources is quite simple. Just find -all the manifests, enumerate the referenced blobs and delete the blobs not in -that set. An astute observer will recognize this as a garbage collection -problem. As with garbage collection in programming languages, this is very -simple when one always has a consistent view. When one adds parallelism and an -inconsistent view of data, it becomes very challenging. - -A simple example can demonstrate this. Let's say we are deleting a manifest -_A_ in one process. We scan the manifest and decide that all the blobs are -ready for deletion. Concurrently, we have another process accepting a new -manifest _B_ referencing one or more blobs from the manifest _A_. Manifest _B_ -is accepted and all the blobs are considered present, so the operation -proceeds. The original process then deletes the referenced blobs, assuming -they were unreferenced. The manifest _B_, which we thought had all of its data -present, can no longer be served by the registry, since the dependent data has -been deleted. - -Deleting data from the registry safely requires some way to coordinate this -operation. The following approaches are being considered: - -- _Reference Counting_ - Maintain a count of references to each blob. This is - challenging for a number of reasons: 1. maintaining a consistent consensus - of reference counts across a set of Registries and 2. Building the initial - list of reference counts for an existing registry. These challenges can be - met with a consensus protocol like Paxos or Raft in the first case and a - necessary but simple scan in the second.. -- _Lock the World GC_ - Halt all writes to the data store. Walk the data store - and find all blob references. Delete all unreferenced blobs. This approach - is very simple but requires disabling writes for a period of time while the - service reads all data. This is slow and expensive but very accurate and - effective. -- _Generational GC_ - Do something similar to above but instead of blocking - writes, writes are sent to another storage backend while reads are broadcast - to the new and old backends. GC is then performed on the read-only portion. - Because writes land in the new backend, the data in the read-only section - can be safely deleted. The main drawbacks of this approach are complexity - and coordination. -- _Centralized Oracle_ - Using a centralized, transactional database, we can - know exactly which data is referenced at any given time. This avoids - coordination problem by managing this data in a single location. We trade - off metadata scalability for simplicity and performance. This is a very good - option for most registry deployments. This would create a bottleneck for - registry metadata. However, metadata is generally not the main bottleneck - when serving images. - -Please let us know if other solutions exist that we have yet to enumerate. -Note that for any approach, implementation is a massive consideration. For -example, a mark-sweep based solution may seem simple but the amount of work in -coordination offset the extra work it might take to build a _Centralized -Oracle_. We'll accept proposals for any solution but please coordinate with us -before dropping code. - -At this time, we have traded off simplicity and ease of deployment for disk -space. Simplicity and ease of deployment tend to reduce developer involvement, -which is currently the most expensive resource in software engineering. Taking -on any solution for deletes will greatly effect these factors, trading off -very cheap disk space for a complex deployment and operational story. - -Please see the following issues for more detail: - -- https://github.com/docker/distribution/issues/422 -- https://github.com/docker/distribution/issues/461 -- https://github.com/docker/distribution/issues/462 - -### Distribution Package - -At its core, the Distribution Project is a set of Go packages that make up -Distribution Components. At this time, most of these packages make up the -Registry implementation. - -The package itself is considered unstable. If you're using it, please take care to vendor the dependent version. - -For feature additions, please see the Registry section. In the future, we may break out a -separate Roadmap for distribution-specific features that apply to more than -just the registry. - -*** - -### Project Planning - -An [Open-Source Planning Process](https://github.com/docker/distribution/wiki/Open-Source-Planning-Process) is used to define the Roadmap. [Project Pages](https://github.com/docker/distribution/wiki) define the goals for each Milestone and identify current progress. - diff --git a/vendor/src/github.com/docker/distribution/blobs.go b/vendor/src/github.com/docker/distribution/blobs.go deleted file mode 100644 index d125330117..0000000000 --- a/vendor/src/github.com/docker/distribution/blobs.go +++ /dev/null @@ -1,245 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "io" - "net/http" - "time" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" -) - -var ( - // ErrBlobExists returned when blob already exists - ErrBlobExists = errors.New("blob exists") - - // ErrBlobDigestUnsupported when blob digest is an unsupported version. - ErrBlobDigestUnsupported = errors.New("unsupported blob digest") - - // ErrBlobUnknown when blob is not found. - ErrBlobUnknown = errors.New("unknown blob") - - // ErrBlobUploadUnknown returned when upload is not found. - ErrBlobUploadUnknown = errors.New("blob upload unknown") - - // ErrBlobInvalidLength returned when the blob has an expected length on - // commit, meaning mismatched with the descriptor or an invalid value. - ErrBlobInvalidLength = errors.New("blob invalid length") -) - -// ErrBlobInvalidDigest returned when digest check fails. -type ErrBlobInvalidDigest struct { - Digest digest.Digest - Reason error -} - -func (err ErrBlobInvalidDigest) Error() string { - return fmt.Sprintf("invalid digest for referenced layer: %v, %v", - err.Digest, err.Reason) -} - -// ErrBlobMounted returned when a blob is mounted from another repository -// instead of initiating an upload session. -type ErrBlobMounted struct { - From reference.Canonical - Descriptor Descriptor -} - -func (err ErrBlobMounted) Error() string { - return fmt.Sprintf("blob mounted from: %v to: %v", - err.From, err.Descriptor) -} - -// Descriptor describes targeted content. Used in conjunction with a blob -// store, a descriptor can be used to fetch, store and target any kind of -// blob. The struct also describes the wire protocol format. Fields should -// only be added but never changed. -type Descriptor struct { - // MediaType describe the type of the content. All text based formats are - // encoded as utf-8. - MediaType string `json:"mediaType,omitempty"` - - // Size in bytes of content. - Size int64 `json:"size,omitempty"` - - // Digest uniquely identifies the content. A byte stream can be verified - // against against this digest. - Digest digest.Digest `json:"digest,omitempty"` - - // URLs contains the source URLs of this content. - URLs []string `json:"urls,omitempty"` - - // NOTE: Before adding a field here, please ensure that all - // other options have been exhausted. Much of the type relationships - // depend on the simplicity of this type. -} - -// Descriptor returns the descriptor, to make it satisfy the Describable -// interface. Note that implementations of Describable are generally objects -// which can be described, not simply descriptors; this exception is in place -// to make it more convenient to pass actual descriptors to functions that -// expect Describable objects. -func (d Descriptor) Descriptor() Descriptor { - return d -} - -// BlobStatter makes blob descriptors available by digest. The service may -// provide a descriptor of a different digest if the provided digest is not -// canonical. -type BlobStatter interface { - // Stat provides metadata about a blob identified by the digest. If the - // blob is unknown to the describer, ErrBlobUnknown will be returned. - Stat(ctx context.Context, dgst digest.Digest) (Descriptor, error) -} - -// BlobDeleter enables deleting blobs from storage. -type BlobDeleter interface { - Delete(ctx context.Context, dgst digest.Digest) error -} - -// BlobEnumerator enables iterating over blobs from storage -type BlobEnumerator interface { - Enumerate(ctx context.Context, ingester func(dgst digest.Digest) error) error -} - -// BlobDescriptorService manages metadata about a blob by digest. Most -// implementations will not expose such an interface explicitly. Such mappings -// should be maintained by interacting with the BlobIngester. Hence, this is -// left off of BlobService and BlobStore. -type BlobDescriptorService interface { - BlobStatter - - // SetDescriptor assigns the descriptor to the digest. The provided digest and - // the digest in the descriptor must map to identical content but they may - // differ on their algorithm. The descriptor must have the canonical - // digest of the content and the digest algorithm must match the - // annotators canonical algorithm. - // - // Such a facility can be used to map blobs between digest domains, with - // the restriction that the algorithm of the descriptor must match the - // canonical algorithm (ie sha256) of the annotator. - SetDescriptor(ctx context.Context, dgst digest.Digest, desc Descriptor) error - - // Clear enables descriptors to be unlinked - Clear(ctx context.Context, dgst digest.Digest) error -} - -// BlobDescriptorServiceFactory creates middleware for BlobDescriptorService. -type BlobDescriptorServiceFactory interface { - BlobAccessController(svc BlobDescriptorService) BlobDescriptorService -} - -// ReadSeekCloser is the primary reader type for blob data, combining -// io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// BlobProvider describes operations for getting blob data. -type BlobProvider interface { - // Get returns the entire blob identified by digest along with the descriptor. - Get(ctx context.Context, dgst digest.Digest) ([]byte, error) - - // Open provides a ReadSeekCloser to the blob identified by the provided - // descriptor. If the blob is not known to the service, an error will be - // returned. - Open(ctx context.Context, dgst digest.Digest) (ReadSeekCloser, error) -} - -// BlobServer can serve blobs via http. -type BlobServer interface { - // ServeBlob attempts to serve the blob, identifed by dgst, via http. The - // service may decide to redirect the client elsewhere or serve the data - // directly. - // - // This handler only issues successful responses, such as 2xx or 3xx, - // meaning it serves data or issues a redirect. If the blob is not - // available, an error will be returned and the caller may still issue a - // response. - // - // The implementation may serve the same blob from a different digest - // domain. The appropriate headers will be set for the blob, unless they - // have already been set by the caller. - ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error -} - -// BlobIngester ingests blob data. -type BlobIngester interface { - // Put inserts the content p into the blob service, returning a descriptor - // or an error. - Put(ctx context.Context, mediaType string, p []byte) (Descriptor, error) - - // Create allocates a new blob writer to add a blob to this service. The - // returned handle can be written to and later resumed using an opaque - // identifier. With this approach, one can Close and Resume a BlobWriter - // multiple times until the BlobWriter is committed or cancelled. - Create(ctx context.Context, options ...BlobCreateOption) (BlobWriter, error) - - // Resume attempts to resume a write to a blob, identified by an id. - Resume(ctx context.Context, id string) (BlobWriter, error) -} - -// BlobCreateOption is a general extensible function argument for blob creation -// methods. A BlobIngester may choose to honor any or none of the given -// BlobCreateOptions, which can be specific to the implementation of the -// BlobIngester receiving them. -// TODO (brianbland): unify this with ManifestServiceOption in the future -type BlobCreateOption interface { - Apply(interface{}) error -} - -// BlobWriter provides a handle for inserting data into a blob store. -// Instances should be obtained from BlobWriteService.Writer and -// BlobWriteService.Resume. If supported by the store, a writer can be -// recovered with the id. -type BlobWriter interface { - io.WriteCloser - io.ReaderFrom - - // Size returns the number of bytes written to this blob. - Size() int64 - - // ID returns the identifier for this writer. The ID can be used with the - // Blob service to later resume the write. - ID() string - - // StartedAt returns the time this blob write was started. - StartedAt() time.Time - - // Commit completes the blob writer process. The content is verified - // against the provided provisional descriptor, which may result in an - // error. Depending on the implementation, written data may be validated - // against the provisional descriptor fields. If MediaType is not present, - // the implementation may reject the commit or assign "application/octet- - // stream" to the blob. The returned descriptor may have a different - // digest depending on the blob store, referred to as the canonical - // descriptor. - Commit(ctx context.Context, provisional Descriptor) (canonical Descriptor, err error) - - // Cancel ends the blob write without storing any data and frees any - // associated resources. Any data written thus far will be lost. Cancel - // implementations should allow multiple calls even after a commit that - // result in a no-op. This allows use of Cancel in a defer statement, - // increasing the assurance that it is correctly called. - Cancel(ctx context.Context) error -} - -// BlobService combines the operations to access, read and write blobs. This -// can be used to describe remote blob services. -type BlobService interface { - BlobStatter - BlobProvider - BlobIngester -} - -// BlobStore represent the entire suite of blob related operations. Such an -// implementation can access, read, write, delete and serve blobs. -type BlobStore interface { - BlobService - BlobServer - BlobDeleter -} diff --git a/vendor/src/github.com/docker/distribution/circle.yml b/vendor/src/github.com/docker/distribution/circle.yml deleted file mode 100644 index 3d1ffd2f06..0000000000 --- a/vendor/src/github.com/docker/distribution/circle.yml +++ /dev/null @@ -1,89 +0,0 @@ -# Pony-up! -machine: - pre: - # Install gvm - - bash < <(curl -s -S -L https://raw.githubusercontent.com/moovweb/gvm/1.0.22/binscripts/gvm-installer) - # Install codecov for coverage - - pip install --user codecov - - post: - # go - - gvm install go1.6 --prefer-binary --name=stable - - environment: - # Convenient shortcuts to "common" locations - CHECKOUT: /home/ubuntu/$CIRCLE_PROJECT_REPONAME - BASE_DIR: src/github.com/$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME - # Trick circle brainflat "no absolute path" behavior - BASE_STABLE: ../../../$HOME/.gvm/pkgsets/stable/global/$BASE_DIR - DOCKER_BUILDTAGS: "include_oss include_gcs" - # Workaround Circle parsing dumb bugs and/or YAML wonkyness - CIRCLE_PAIN: "mode: set" - - hosts: - # Not used yet - fancy: 127.0.0.1 - -dependencies: - pre: - # Copy the code to the gopath of all go versions - - > - gvm use stable && - mkdir -p "$(dirname $BASE_STABLE)" && - cp -R "$CHECKOUT" "$BASE_STABLE" - - override: - # Install dependencies for every copied clone/go version - - gvm use stable && go get github.com/tools/godep: - pwd: $BASE_STABLE - - post: - # For the stable go version, additionally install linting tools - - > - gvm use stable && - go get github.com/axw/gocov/gocov github.com/golang/lint/golint - -test: - pre: - # Output the go versions we are going to test - # - gvm use old && go version - - gvm use stable && go version - - # Ensure validation of dependencies - - gvm use stable && if test -n "`git diff --stat=1000 master | grep -Ei \"vendor|godeps\"`"; then make dep-validate; fi: - pwd: $BASE_STABLE - - # First thing: build everything. This will catch compile errors, and it's - # also necessary for go vet to work properly (see #807). - - gvm use stable && godep go install $(go list ./... | grep -v "/vendor/"): - pwd: $BASE_STABLE - - # FMT - - gvm use stable && make fmt: - pwd: $BASE_STABLE - - # VET - - gvm use stable && make vet: - pwd: $BASE_STABLE - - # LINT - - gvm use stable && make lint: - pwd: $BASE_STABLE - - override: - # Test stable, and report - - gvm use stable; export ROOT_PACKAGE=$(go list .); go list -tags "$DOCKER_BUILDTAGS" ./... | grep -v "/vendor/" | xargs -L 1 -I{} bash -c 'export PACKAGE={}; godep go test -tags "$DOCKER_BUILDTAGS" -test.short -coverprofile=$GOPATH/src/$PACKAGE/coverage.out -coverpkg=$(./coverpkg.sh $PACKAGE $ROOT_PACKAGE) $PACKAGE': - timeout: 600 - pwd: $BASE_STABLE - - post: - # Report to codecov - - bash <(curl -s https://codecov.io/bash): - pwd: $BASE_STABLE - - ## Notes - # Disabled the -race detector due to massive memory usage. - # Do we want these as well? - # - go get code.google.com/p/go.tools/cmd/goimports - # - test -z "$(goimports -l -w ./... | tee /dev/stderr)" - # http://labix.org/gocheck diff --git a/vendor/src/github.com/docker/distribution/context/context.go b/vendor/src/github.com/docker/distribution/context/context.go deleted file mode 100644 index 23cbf5b545..0000000000 --- a/vendor/src/github.com/docker/distribution/context/context.go +++ /dev/null @@ -1,85 +0,0 @@ -package context - -import ( - "sync" - - "github.com/docker/distribution/uuid" - "golang.org/x/net/context" -) - -// Context is a copy of Context from the golang.org/x/net/context package. -type Context interface { - context.Context -} - -// instanceContext is a context that provides only an instance id. It is -// provided as the main background context. -type instanceContext struct { - Context - id string // id of context, logged as "instance.id" - once sync.Once // once protect generation of the id -} - -func (ic *instanceContext) Value(key interface{}) interface{} { - if key == "instance.id" { - ic.once.Do(func() { - // We want to lazy initialize the UUID such that we don't - // call a random generator from the package initialization - // code. For various reasons random could not be available - // https://github.com/docker/distribution/issues/782 - ic.id = uuid.Generate().String() - }) - return ic.id - } - - return ic.Context.Value(key) -} - -var background = &instanceContext{ - Context: context.Background(), -} - -// Background returns a non-nil, empty Context. The background context -// provides a single key, "instance.id" that is globally unique to the -// process. -func Background() Context { - return background -} - -// WithValue returns a copy of parent in which the value associated with key is -// val. Use context Values only for request-scoped data that transits processes -// and APIs, not for passing optional parameters to functions. -func WithValue(parent Context, key, val interface{}) Context { - return context.WithValue(parent, key, val) -} - -// stringMapContext is a simple context implementation that checks a map for a -// key, falling back to a parent if not present. -type stringMapContext struct { - context.Context - m map[string]interface{} -} - -// WithValues returns a context that proxies lookups through a map. Only -// supports string keys. -func WithValues(ctx context.Context, m map[string]interface{}) context.Context { - mo := make(map[string]interface{}, len(m)) // make our own copy. - for k, v := range m { - mo[k] = v - } - - return stringMapContext{ - Context: ctx, - m: mo, - } -} - -func (smc stringMapContext) Value(key interface{}) interface{} { - if ks, ok := key.(string); ok { - if v, ok := smc.m[ks]; ok { - return v - } - } - - return smc.Context.Value(key) -} diff --git a/vendor/src/github.com/docker/distribution/context/doc.go b/vendor/src/github.com/docker/distribution/context/doc.go deleted file mode 100644 index 3b4ab8882f..0000000000 --- a/vendor/src/github.com/docker/distribution/context/doc.go +++ /dev/null @@ -1,89 +0,0 @@ -// Package context provides several utilities for working with -// golang.org/x/net/context in http requests. Primarily, the focus is on -// logging relevant request information but this package is not limited to -// that purpose. -// -// The easiest way to get started is to get the background context: -// -// ctx := context.Background() -// -// The returned context should be passed around your application and be the -// root of all other context instances. If the application has a version, this -// line should be called before anything else: -// -// ctx := context.WithVersion(context.Background(), version) -// -// The above will store the version in the context and will be available to -// the logger. -// -// Logging -// -// The most useful aspect of this package is GetLogger. This function takes -// any context.Context interface and returns the current logger from the -// context. Canonical usage looks like this: -// -// GetLogger(ctx).Infof("something interesting happened") -// -// GetLogger also takes optional key arguments. The keys will be looked up in -// the context and reported with the logger. The following example would -// return a logger that prints the version with each log message: -// -// ctx := context.Context(context.Background(), "version", version) -// GetLogger(ctx, "version").Infof("this log message has a version field") -// -// The above would print out a log message like this: -// -// INFO[0000] this log message has a version field version=v2.0.0-alpha.2.m -// -// When used with WithLogger, we gain the ability to decorate the context with -// loggers that have information from disparate parts of the call stack. -// Following from the version example, we can build a new context with the -// configured logger such that we always print the version field: -// -// ctx = WithLogger(ctx, GetLogger(ctx, "version")) -// -// Since the logger has been pushed to the context, we can now get the version -// field for free with our log messages. Future calls to GetLogger on the new -// context will have the version field: -// -// GetLogger(ctx).Infof("this log message has a version field") -// -// This becomes more powerful when we start stacking loggers. Let's say we -// have the version logger from above but also want a request id. Using the -// context above, in our request scoped function, we place another logger in -// the context: -// -// ctx = context.WithValue(ctx, "http.request.id", "unique id") // called when building request context -// ctx = WithLogger(ctx, GetLogger(ctx, "http.request.id")) -// -// When GetLogger is called on the new context, "http.request.id" will be -// included as a logger field, along with the original "version" field: -// -// INFO[0000] this log message has a version field http.request.id=unique id version=v2.0.0-alpha.2.m -// -// Note that this only affects the new context, the previous context, with the -// version field, can be used independently. Put another way, the new logger, -// added to the request context, is unique to that context and can have -// request scoped varaibles. -// -// HTTP Requests -// -// This package also contains several methods for working with http requests. -// The concepts are very similar to those described above. We simply place the -// request in the context using WithRequest. This makes the request variables -// available. GetRequestLogger can then be called to get request specific -// variables in a log line: -// -// ctx = WithRequest(ctx, req) -// GetRequestLogger(ctx).Infof("request variables") -// -// Like above, if we want to include the request data in all log messages in -// the context, we push the logger to a new context and use that one: -// -// ctx = WithLogger(ctx, GetRequestLogger(ctx)) -// -// The concept is fairly powerful and ensures that calls throughout the stack -// can be traced in log messages. Using the fields like "http.request.id", one -// can analyze call flow for a particular request with a simple grep of the -// logs. -package context diff --git a/vendor/src/github.com/docker/distribution/context/http.go b/vendor/src/github.com/docker/distribution/context/http.go deleted file mode 100644 index 2cb1d04175..0000000000 --- a/vendor/src/github.com/docker/distribution/context/http.go +++ /dev/null @@ -1,364 +0,0 @@ -package context - -import ( - "errors" - "net" - "net/http" - "strings" - "sync" - "time" - - log "github.com/Sirupsen/logrus" - "github.com/docker/distribution/uuid" - "github.com/gorilla/mux" -) - -// Common errors used with this package. -var ( - ErrNoRequestContext = errors.New("no http request in context") - ErrNoResponseWriterContext = errors.New("no http response in context") -) - -func parseIP(ipStr string) net.IP { - ip := net.ParseIP(ipStr) - if ip == nil { - log.Warnf("invalid remote IP address: %q", ipStr) - } - return ip -} - -// RemoteAddr extracts the remote address of the request, taking into -// account proxy headers. -func RemoteAddr(r *http.Request) string { - if prior := r.Header.Get("X-Forwarded-For"); prior != "" { - proxies := strings.Split(prior, ",") - if len(proxies) > 0 { - remoteAddr := strings.Trim(proxies[0], " ") - if parseIP(remoteAddr) != nil { - return remoteAddr - } - } - } - // X-Real-Ip is less supported, but worth checking in the - // absence of X-Forwarded-For - if realIP := r.Header.Get("X-Real-Ip"); realIP != "" { - if parseIP(realIP) != nil { - return realIP - } - } - - return r.RemoteAddr -} - -// RemoteIP extracts the remote IP of the request, taking into -// account proxy headers. -func RemoteIP(r *http.Request) string { - addr := RemoteAddr(r) - - // Try parsing it as "IP:port" - if ip, _, err := net.SplitHostPort(addr); err == nil { - return ip - } - - return addr -} - -// WithRequest places the request on the context. The context of the request -// is assigned a unique id, available at "http.request.id". The request itself -// is available at "http.request". Other common attributes are available under -// the prefix "http.request.". If a request is already present on the context, -// this method will panic. -func WithRequest(ctx Context, r *http.Request) Context { - if ctx.Value("http.request") != nil { - // NOTE(stevvooe): This needs to be considered a programming error. It - // is unlikely that we'd want to have more than one request in - // context. - panic("only one request per context") - } - - return &httpRequestContext{ - Context: ctx, - startedAt: time.Now(), - id: uuid.Generate().String(), - r: r, - } -} - -// GetRequest returns the http request in the given context. Returns -// ErrNoRequestContext if the context does not have an http request associated -// with it. -func GetRequest(ctx Context) (*http.Request, error) { - if r, ok := ctx.Value("http.request").(*http.Request); r != nil && ok { - return r, nil - } - return nil, ErrNoRequestContext -} - -// GetRequestID attempts to resolve the current request id, if possible. An -// error is return if it is not available on the context. -func GetRequestID(ctx Context) string { - return GetStringValue(ctx, "http.request.id") -} - -// WithResponseWriter returns a new context and response writer that makes -// interesting response statistics available within the context. -func WithResponseWriter(ctx Context, w http.ResponseWriter) (Context, http.ResponseWriter) { - irw := instrumentedResponseWriter{ - ResponseWriter: w, - Context: ctx, - } - - if closeNotifier, ok := w.(http.CloseNotifier); ok { - irwCN := &instrumentedResponseWriterCN{ - instrumentedResponseWriter: irw, - CloseNotifier: closeNotifier, - } - - return irwCN, irwCN - } - - return &irw, &irw -} - -// GetResponseWriter returns the http.ResponseWriter from the provided -// context. If not present, ErrNoResponseWriterContext is returned. The -// returned instance provides instrumentation in the context. -func GetResponseWriter(ctx Context) (http.ResponseWriter, error) { - v := ctx.Value("http.response") - - rw, ok := v.(http.ResponseWriter) - if !ok || rw == nil { - return nil, ErrNoResponseWriterContext - } - - return rw, nil -} - -// getVarsFromRequest let's us change request vars implementation for testing -// and maybe future changes. -var getVarsFromRequest = mux.Vars - -// WithVars extracts gorilla/mux vars and makes them available on the returned -// context. Variables are available at keys with the prefix "vars.". For -// example, if looking for the variable "name", it can be accessed as -// "vars.name". Implementations that are accessing values need not know that -// the underlying context is implemented with gorilla/mux vars. -func WithVars(ctx Context, r *http.Request) Context { - return &muxVarsContext{ - Context: ctx, - vars: getVarsFromRequest(r), - } -} - -// GetRequestLogger returns a logger that contains fields from the request in -// the current context. If the request is not available in the context, no -// fields will display. Request loggers can safely be pushed onto the context. -func GetRequestLogger(ctx Context) Logger { - return GetLogger(ctx, - "http.request.id", - "http.request.method", - "http.request.host", - "http.request.uri", - "http.request.referer", - "http.request.useragent", - "http.request.remoteaddr", - "http.request.contenttype") -} - -// GetResponseLogger reads the current response stats and builds a logger. -// Because the values are read at call time, pushing a logger returned from -// this function on the context will lead to missing or invalid data. Only -// call this at the end of a request, after the response has been written. -func GetResponseLogger(ctx Context) Logger { - l := getLogrusLogger(ctx, - "http.response.written", - "http.response.status", - "http.response.contenttype") - - duration := Since(ctx, "http.request.startedat") - - if duration > 0 { - l = l.WithField("http.response.duration", duration.String()) - } - - return l -} - -// httpRequestContext makes information about a request available to context. -type httpRequestContext struct { - Context - - startedAt time.Time - id string - r *http.Request -} - -// Value returns a keyed element of the request for use in the context. To get -// the request itself, query "request". For other components, access them as -// "request.". For example, r.RequestURI -func (ctx *httpRequestContext) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "http.request" { - return ctx.r - } - - if !strings.HasPrefix(keyStr, "http.request.") { - goto fallback - } - - parts := strings.Split(keyStr, ".") - - if len(parts) != 3 { - goto fallback - } - - switch parts[2] { - case "uri": - return ctx.r.RequestURI - case "remoteaddr": - return RemoteAddr(ctx.r) - case "method": - return ctx.r.Method - case "host": - return ctx.r.Host - case "referer": - referer := ctx.r.Referer() - if referer != "" { - return referer - } - case "useragent": - return ctx.r.UserAgent() - case "id": - return ctx.id - case "startedat": - return ctx.startedAt - case "contenttype": - ct := ctx.r.Header.Get("Content-Type") - if ct != "" { - return ct - } - } - } - -fallback: - return ctx.Context.Value(key) -} - -type muxVarsContext struct { - Context - vars map[string]string -} - -func (ctx *muxVarsContext) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "vars" { - return ctx.vars - } - - if strings.HasPrefix(keyStr, "vars.") { - keyStr = strings.TrimPrefix(keyStr, "vars.") - } - - if v, ok := ctx.vars[keyStr]; ok { - return v - } - } - - return ctx.Context.Value(key) -} - -// instrumentedResponseWriterCN provides response writer information in a -// context. It implements http.CloseNotifier so that users can detect -// early disconnects. -type instrumentedResponseWriterCN struct { - instrumentedResponseWriter - http.CloseNotifier -} - -// instrumentedResponseWriter provides response writer information in a -// context. This variant is only used in the case where CloseNotifier is not -// implemented by the parent ResponseWriter. -type instrumentedResponseWriter struct { - http.ResponseWriter - Context - - mu sync.Mutex - status int - written int64 -} - -func (irw *instrumentedResponseWriter) Write(p []byte) (n int, err error) { - n, err = irw.ResponseWriter.Write(p) - - irw.mu.Lock() - irw.written += int64(n) - - // Guess the likely status if not set. - if irw.status == 0 { - irw.status = http.StatusOK - } - - irw.mu.Unlock() - - return -} - -func (irw *instrumentedResponseWriter) WriteHeader(status int) { - irw.ResponseWriter.WriteHeader(status) - - irw.mu.Lock() - irw.status = status - irw.mu.Unlock() -} - -func (irw *instrumentedResponseWriter) Flush() { - if flusher, ok := irw.ResponseWriter.(http.Flusher); ok { - flusher.Flush() - } -} - -func (irw *instrumentedResponseWriter) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "http.response" { - return irw - } - - if !strings.HasPrefix(keyStr, "http.response.") { - goto fallback - } - - parts := strings.Split(keyStr, ".") - - if len(parts) != 3 { - goto fallback - } - - irw.mu.Lock() - defer irw.mu.Unlock() - - switch parts[2] { - case "written": - return irw.written - case "status": - return irw.status - case "contenttype": - contentType := irw.Header().Get("Content-Type") - if contentType != "" { - return contentType - } - } - } - -fallback: - return irw.Context.Value(key) -} - -func (irw *instrumentedResponseWriterCN) Value(key interface{}) interface{} { - if keyStr, ok := key.(string); ok { - if keyStr == "http.response" { - return irw - } - } - - return irw.instrumentedResponseWriter.Value(key) -} diff --git a/vendor/src/github.com/docker/distribution/context/logger.go b/vendor/src/github.com/docker/distribution/context/logger.go deleted file mode 100644 index fbb6a0511f..0000000000 --- a/vendor/src/github.com/docker/distribution/context/logger.go +++ /dev/null @@ -1,116 +0,0 @@ -package context - -import ( - "fmt" - - "github.com/Sirupsen/logrus" - "runtime" -) - -// Logger provides a leveled-logging interface. -type Logger interface { - // standard logger methods - Print(args ...interface{}) - Printf(format string, args ...interface{}) - Println(args ...interface{}) - - Fatal(args ...interface{}) - Fatalf(format string, args ...interface{}) - Fatalln(args ...interface{}) - - Panic(args ...interface{}) - Panicf(format string, args ...interface{}) - Panicln(args ...interface{}) - - // Leveled methods, from logrus - Debug(args ...interface{}) - Debugf(format string, args ...interface{}) - Debugln(args ...interface{}) - - Error(args ...interface{}) - Errorf(format string, args ...interface{}) - Errorln(args ...interface{}) - - Info(args ...interface{}) - Infof(format string, args ...interface{}) - Infoln(args ...interface{}) - - Warn(args ...interface{}) - Warnf(format string, args ...interface{}) - Warnln(args ...interface{}) -} - -// WithLogger creates a new context with provided logger. -func WithLogger(ctx Context, logger Logger) Context { - return WithValue(ctx, "logger", logger) -} - -// GetLoggerWithField returns a logger instance with the specified field key -// and value without affecting the context. Extra specified keys will be -// resolved from the context. -func GetLoggerWithField(ctx Context, key, value interface{}, keys ...interface{}) Logger { - return getLogrusLogger(ctx, keys...).WithField(fmt.Sprint(key), value) -} - -// GetLoggerWithFields returns a logger instance with the specified fields -// without affecting the context. Extra specified keys will be resolved from -// the context. -func GetLoggerWithFields(ctx Context, fields map[interface{}]interface{}, keys ...interface{}) Logger { - // must convert from interface{} -> interface{} to string -> interface{} for logrus. - lfields := make(logrus.Fields, len(fields)) - for key, value := range fields { - lfields[fmt.Sprint(key)] = value - } - - return getLogrusLogger(ctx, keys...).WithFields(lfields) -} - -// GetLogger returns the logger from the current context, if present. If one -// or more keys are provided, they will be resolved on the context and -// included in the logger. While context.Value takes an interface, any key -// argument passed to GetLogger will be passed to fmt.Sprint when expanded as -// a logging key field. If context keys are integer constants, for example, -// its recommended that a String method is implemented. -func GetLogger(ctx Context, keys ...interface{}) Logger { - return getLogrusLogger(ctx, keys...) -} - -// GetLogrusLogger returns the logrus logger for the context. If one more keys -// are provided, they will be resolved on the context and included in the -// logger. Only use this function if specific logrus functionality is -// required. -func getLogrusLogger(ctx Context, keys ...interface{}) *logrus.Entry { - var logger *logrus.Entry - - // Get a logger, if it is present. - loggerInterface := ctx.Value("logger") - if loggerInterface != nil { - if lgr, ok := loggerInterface.(*logrus.Entry); ok { - logger = lgr - } - } - - if logger == nil { - fields := logrus.Fields{} - - // Fill in the instance id, if we have it. - instanceID := ctx.Value("instance.id") - if instanceID != nil { - fields["instance.id"] = instanceID - } - - fields["go.version"] = runtime.Version() - // If no logger is found, just return the standard logger. - logger = logrus.StandardLogger().WithFields(fields) - } - - fields := logrus.Fields{} - for _, key := range keys { - v := ctx.Value(key) - if v != nil { - fields[fmt.Sprint(key)] = v - } - } - - return logger.WithFields(fields) -} diff --git a/vendor/src/github.com/docker/distribution/context/trace.go b/vendor/src/github.com/docker/distribution/context/trace.go deleted file mode 100644 index 721964a848..0000000000 --- a/vendor/src/github.com/docker/distribution/context/trace.go +++ /dev/null @@ -1,104 +0,0 @@ -package context - -import ( - "runtime" - "time" - - "github.com/docker/distribution/uuid" -) - -// WithTrace allocates a traced timing span in a new context. This allows a -// caller to track the time between calling WithTrace and the returned done -// function. When the done function is called, a log message is emitted with a -// "trace.duration" field, corresponding to the elapsed time and a -// "trace.func" field, corresponding to the function that called WithTrace. -// -// The logging keys "trace.id" and "trace.parent.id" are provided to implement -// dapper-like tracing. This function should be complemented with a WithSpan -// method that could be used for tracing distributed RPC calls. -// -// The main benefit of this function is to post-process log messages or -// intercept them in a hook to provide timing data. Trace ids and parent ids -// can also be linked to provide call tracing, if so required. -// -// Here is an example of the usage: -// -// func timedOperation(ctx Context) { -// ctx, done := WithTrace(ctx) -// defer done("this will be the log message") -// // ... function body ... -// } -// -// If the function ran for roughly 1s, such a usage would emit a log message -// as follows: -// -// INFO[0001] this will be the log message trace.duration=1.004575763s trace.func=github.com/docker/distribution/context.traceOperation trace.id= ... -// -// Notice that the function name is automatically resolved, along with the -// package and a trace id is emitted that can be linked with parent ids. -func WithTrace(ctx Context) (Context, func(format string, a ...interface{})) { - if ctx == nil { - ctx = Background() - } - - pc, file, line, _ := runtime.Caller(1) - f := runtime.FuncForPC(pc) - ctx = &traced{ - Context: ctx, - id: uuid.Generate().String(), - start: time.Now(), - parent: GetStringValue(ctx, "trace.id"), - fnname: f.Name(), - file: file, - line: line, - } - - return ctx, func(format string, a ...interface{}) { - GetLogger(ctx, - "trace.duration", - "trace.id", - "trace.parent.id", - "trace.func", - "trace.file", - "trace.line"). - Debugf(format, a...) - } -} - -// traced represents a context that is traced for function call timing. It -// also provides fast lookup for the various attributes that are available on -// the trace. -type traced struct { - Context - id string - parent string - start time.Time - fnname string - file string - line int -} - -func (ts *traced) Value(key interface{}) interface{} { - switch key { - case "trace.start": - return ts.start - case "trace.duration": - return time.Since(ts.start) - case "trace.id": - return ts.id - case "trace.parent.id": - if ts.parent == "" { - return nil // must return nil to signal no parent. - } - - return ts.parent - case "trace.func": - return ts.fnname - case "trace.file": - return ts.file - case "trace.line": - return ts.line - } - - return ts.Context.Value(key) -} diff --git a/vendor/src/github.com/docker/distribution/context/util.go b/vendor/src/github.com/docker/distribution/context/util.go deleted file mode 100644 index cb9ef52e38..0000000000 --- a/vendor/src/github.com/docker/distribution/context/util.go +++ /dev/null @@ -1,24 +0,0 @@ -package context - -import ( - "time" -) - -// Since looks up key, which should be a time.Time, and returns the duration -// since that time. If the key is not found, the value returned will be zero. -// This is helpful when inferring metrics related to context execution times. -func Since(ctx Context, key interface{}) time.Duration { - if startedAt, ok := ctx.Value(key).(time.Time); ok { - return time.Since(startedAt) - } - return 0 -} - -// GetStringValue returns a string value from the context. The empty string -// will be returned if not found. -func GetStringValue(ctx Context, key interface{}) (value string) { - if valuev, ok := ctx.Value(key).(string); ok { - value = valuev - } - return value -} diff --git a/vendor/src/github.com/docker/distribution/context/version.go b/vendor/src/github.com/docker/distribution/context/version.go deleted file mode 100644 index 746cda02ec..0000000000 --- a/vendor/src/github.com/docker/distribution/context/version.go +++ /dev/null @@ -1,16 +0,0 @@ -package context - -// WithVersion stores the application version in the context. The new context -// gets a logger to ensure log messages are marked with the application -// version. -func WithVersion(ctx Context, version string) Context { - ctx = WithValue(ctx, "version", version) - // push a new logger onto the stack - return WithLogger(ctx, GetLogger(ctx, "version")) -} - -// GetVersion returns the application version from the context. An empty -// string may returned if the version was not set on the context. -func GetVersion(ctx Context) string { - return GetStringValue(ctx, "version") -} diff --git a/vendor/src/github.com/docker/distribution/coverpkg.sh b/vendor/src/github.com/docker/distribution/coverpkg.sh deleted file mode 100755 index 25d419ae82..0000000000 --- a/vendor/src/github.com/docker/distribution/coverpkg.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/usr/bin/env bash -# Given a subpackage and the containing package, figures out which packages -# need to be passed to `go test -coverpkg`: this includes all of the -# subpackage's dependencies within the containing package, as well as the -# subpackage itself. -DEPENDENCIES="$(go list -f $'{{range $f := .Deps}}{{$f}}\n{{end}}' ${1} | grep ${2} | grep -v github.com/docker/distribution/vendor)" -echo "${1} ${DEPENDENCIES}" | xargs echo -n | tr ' ' ',' diff --git a/vendor/src/github.com/docker/distribution/digest/digest.go b/vendor/src/github.com/docker/distribution/digest/digest.go deleted file mode 100644 index 31d821bba7..0000000000 --- a/vendor/src/github.com/docker/distribution/digest/digest.go +++ /dev/null @@ -1,139 +0,0 @@ -package digest - -import ( - "fmt" - "hash" - "io" - "regexp" - "strings" -) - -const ( - // DigestSha256EmptyTar is the canonical sha256 digest of empty data - DigestSha256EmptyTar = "sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855" -) - -// Digest allows simple protection of hex formatted digest strings, prefixed -// by their algorithm. Strings of type Digest have some guarantee of being in -// the correct format and it provides quick access to the components of a -// digest string. -// -// The following is an example of the contents of Digest types: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -// This allows to abstract the digest behind this type and work only in those -// terms. -type Digest string - -// NewDigest returns a Digest from alg and a hash.Hash object. -func NewDigest(alg Algorithm, h hash.Hash) Digest { - return NewDigestFromBytes(alg, h.Sum(nil)) -} - -// NewDigestFromBytes returns a new digest from the byte contents of p. -// Typically, this can come from hash.Hash.Sum(...) or xxx.SumXXX(...) -// functions. This is also useful for rebuilding digests from binary -// serializations. -func NewDigestFromBytes(alg Algorithm, p []byte) Digest { - return Digest(fmt.Sprintf("%s:%x", alg, p)) -} - -// NewDigestFromHex returns a Digest from alg and a the hex encoded digest. -func NewDigestFromHex(alg, hex string) Digest { - return Digest(fmt.Sprintf("%s:%s", alg, hex)) -} - -// DigestRegexp matches valid digest types. -var DigestRegexp = regexp.MustCompile(`[a-zA-Z0-9-_+.]+:[a-fA-F0-9]+`) - -// DigestRegexpAnchored matches valid digest types, anchored to the start and end of the match. -var DigestRegexpAnchored = regexp.MustCompile(`^` + DigestRegexp.String() + `$`) - -var ( - // ErrDigestInvalidFormat returned when digest format invalid. - ErrDigestInvalidFormat = fmt.Errorf("invalid checksum digest format") - - // ErrDigestInvalidLength returned when digest has invalid length. - ErrDigestInvalidLength = fmt.Errorf("invalid checksum digest length") - - // ErrDigestUnsupported returned when the digest algorithm is unsupported. - ErrDigestUnsupported = fmt.Errorf("unsupported digest algorithm") -) - -// ParseDigest parses s and returns the validated digest object. An error will -// be returned if the format is invalid. -func ParseDigest(s string) (Digest, error) { - d := Digest(s) - - return d, d.Validate() -} - -// FromReader returns the most valid digest for the underlying content using -// the canonical digest algorithm. -func FromReader(rd io.Reader) (Digest, error) { - return Canonical.FromReader(rd) -} - -// FromBytes digests the input and returns a Digest. -func FromBytes(p []byte) Digest { - return Canonical.FromBytes(p) -} - -// Validate checks that the contents of d is a valid digest, returning an -// error if not. -func (d Digest) Validate() error { - s := string(d) - - if !DigestRegexpAnchored.MatchString(s) { - return ErrDigestInvalidFormat - } - - i := strings.Index(s, ":") - if i < 0 { - return ErrDigestInvalidFormat - } - - // case: "sha256:" with no hex. - if i+1 == len(s) { - return ErrDigestInvalidFormat - } - - switch algorithm := Algorithm(s[:i]); algorithm { - case SHA256, SHA384, SHA512: - if algorithm.Size()*2 != len(s[i+1:]) { - return ErrDigestInvalidLength - } - break - default: - return ErrDigestUnsupported - } - - return nil -} - -// Algorithm returns the algorithm portion of the digest. This will panic if -// the underlying digest is not in a valid format. -func (d Digest) Algorithm() Algorithm { - return Algorithm(d[:d.sepIndex()]) -} - -// Hex returns the hex digest portion of the digest. This will panic if the -// underlying digest is not in a valid format. -func (d Digest) Hex() string { - return string(d[d.sepIndex()+1:]) -} - -func (d Digest) String() string { - return string(d) -} - -func (d Digest) sepIndex() int { - i := strings.Index(string(d), ":") - - if i < 0 { - panic("could not find ':' in digest: " + d) - } - - return i -} diff --git a/vendor/src/github.com/docker/distribution/digest/digester.go b/vendor/src/github.com/docker/distribution/digest/digester.go deleted file mode 100644 index f3105a45b6..0000000000 --- a/vendor/src/github.com/docker/distribution/digest/digester.go +++ /dev/null @@ -1,155 +0,0 @@ -package digest - -import ( - "crypto" - "fmt" - "hash" - "io" -) - -// Algorithm identifies and implementation of a digester by an identifier. -// Note the that this defines both the hash algorithm used and the string -// encoding. -type Algorithm string - -// supported digest types -const ( - SHA256 Algorithm = "sha256" // sha256 with hex encoding - SHA384 Algorithm = "sha384" // sha384 with hex encoding - SHA512 Algorithm = "sha512" // sha512 with hex encoding - - // Canonical is the primary digest algorithm used with the distribution - // project. Other digests may be used but this one is the primary storage - // digest. - Canonical = SHA256 -) - -var ( - // TODO(stevvooe): Follow the pattern of the standard crypto package for - // registration of digests. Effectively, we are a registerable set and - // common symbol access. - - // algorithms maps values to hash.Hash implementations. Other algorithms - // may be available but they cannot be calculated by the digest package. - algorithms = map[Algorithm]crypto.Hash{ - SHA256: crypto.SHA256, - SHA384: crypto.SHA384, - SHA512: crypto.SHA512, - } -) - -// Available returns true if the digest type is available for use. If this -// returns false, New and Hash will return nil. -func (a Algorithm) Available() bool { - h, ok := algorithms[a] - if !ok { - return false - } - - // check availability of the hash, as well - return h.Available() -} - -func (a Algorithm) String() string { - return string(a) -} - -// Size returns number of bytes returned by the hash. -func (a Algorithm) Size() int { - h, ok := algorithms[a] - if !ok { - return 0 - } - return h.Size() -} - -// Set implemented to allow use of Algorithm as a command line flag. -func (a *Algorithm) Set(value string) error { - if value == "" { - *a = Canonical - } else { - // just do a type conversion, support is queried with Available. - *a = Algorithm(value) - } - - return nil -} - -// New returns a new digester for the specified algorithm. If the algorithm -// does not have a digester implementation, nil will be returned. This can be -// checked by calling Available before calling New. -func (a Algorithm) New() Digester { - return &digester{ - alg: a, - hash: a.Hash(), - } -} - -// Hash returns a new hash as used by the algorithm. If not available, the -// method will panic. Check Algorithm.Available() before calling. -func (a Algorithm) Hash() hash.Hash { - if !a.Available() { - // NOTE(stevvooe): A missing hash is usually a programming error that - // must be resolved at compile time. We don't import in the digest - // package to allow users to choose their hash implementation (such as - // when using stevvooe/resumable or a hardware accelerated package). - // - // Applications that may want to resolve the hash at runtime should - // call Algorithm.Available before call Algorithm.Hash(). - panic(fmt.Sprintf("%v not available (make sure it is imported)", a)) - } - - return algorithms[a].New() -} - -// FromReader returns the digest of the reader using the algorithm. -func (a Algorithm) FromReader(rd io.Reader) (Digest, error) { - digester := a.New() - - if _, err := io.Copy(digester.Hash(), rd); err != nil { - return "", err - } - - return digester.Digest(), nil -} - -// FromBytes digests the input and returns a Digest. -func (a Algorithm) FromBytes(p []byte) Digest { - digester := a.New() - - if _, err := digester.Hash().Write(p); err != nil { - // Writes to a Hash should never fail. None of the existing - // hash implementations in the stdlib or hashes vendored - // here can return errors from Write. Having a panic in this - // condition instead of having FromBytes return an error value - // avoids unnecessary error handling paths in all callers. - panic("write to hash function returned error: " + err.Error()) - } - - return digester.Digest() -} - -// TODO(stevvooe): Allow resolution of verifiers using the digest type and -// this registration system. - -// Digester calculates the digest of written data. Writes should go directly -// to the return value of Hash, while calling Digest will return the current -// value of the digest. -type Digester interface { - Hash() hash.Hash // provides direct access to underlying hash instance. - Digest() Digest -} - -// digester provides a simple digester definition that embeds a hasher. -type digester struct { - alg Algorithm - hash hash.Hash -} - -func (d *digester) Hash() hash.Hash { - return d.hash -} - -func (d *digester) Digest() Digest { - return NewDigest(d.alg, d.hash) -} diff --git a/vendor/src/github.com/docker/distribution/digest/doc.go b/vendor/src/github.com/docker/distribution/digest/doc.go deleted file mode 100644 index f64b0db32b..0000000000 --- a/vendor/src/github.com/docker/distribution/digest/doc.go +++ /dev/null @@ -1,42 +0,0 @@ -// Package digest provides a generalized type to opaquely represent message -// digests and their operations within the registry. The Digest type is -// designed to serve as a flexible identifier in a content-addressable system. -// More importantly, it provides tools and wrappers to work with -// hash.Hash-based digests with little effort. -// -// Basics -// -// The format of a digest is simply a string with two parts, dubbed the -// "algorithm" and the "digest", separated by a colon: -// -// : -// -// An example of a sha256 digest representation follows: -// -// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc -// -// In this case, the string "sha256" is the algorithm and the hex bytes are -// the "digest". -// -// Because the Digest type is simply a string, once a valid Digest is -// obtained, comparisons are cheap, quick and simple to express with the -// standard equality operator. -// -// Verification -// -// The main benefit of using the Digest type is simple verification against a -// given digest. The Verifier interface, modeled after the stdlib hash.Hash -// interface, provides a common write sink for digest verification. After -// writing is complete, calling the Verifier.Verified method will indicate -// whether or not the stream of bytes matches the target digest. -// -// Missing Features -// -// In addition to the above, we intend to add the following features to this -// package: -// -// 1. A Digester type that supports write sink digest calculation. -// -// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry. -// -package digest diff --git a/vendor/src/github.com/docker/distribution/digest/set.go b/vendor/src/github.com/docker/distribution/digest/set.go deleted file mode 100644 index 4b9313c1ae..0000000000 --- a/vendor/src/github.com/docker/distribution/digest/set.go +++ /dev/null @@ -1,245 +0,0 @@ -package digest - -import ( - "errors" - "sort" - "strings" - "sync" -) - -var ( - // ErrDigestNotFound is used when a matching digest - // could not be found in a set. - ErrDigestNotFound = errors.New("digest not found") - - // ErrDigestAmbiguous is used when multiple digests - // are found in a set. None of the matching digests - // should be considered valid matches. - ErrDigestAmbiguous = errors.New("ambiguous digest string") -) - -// Set is used to hold a unique set of digests which -// may be easily referenced by easily referenced by a string -// representation of the digest as well as short representation. -// The uniqueness of the short representation is based on other -// digests in the set. If digests are omitted from this set, -// collisions in a larger set may not be detected, therefore it -// is important to always do short representation lookups on -// the complete set of digests. To mitigate collisions, an -// appropriately long short code should be used. -type Set struct { - mutex sync.RWMutex - entries digestEntries -} - -// NewSet creates an empty set of digests -// which may have digests added. -func NewSet() *Set { - return &Set{ - entries: digestEntries{}, - } -} - -// checkShortMatch checks whether two digests match as either whole -// values or short values. This function does not test equality, -// rather whether the second value could match against the first -// value. -func checkShortMatch(alg Algorithm, hex, shortAlg, shortHex string) bool { - if len(hex) == len(shortHex) { - if hex != shortHex { - return false - } - if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - } else if !strings.HasPrefix(hex, shortHex) { - return false - } else if len(shortAlg) > 0 && string(alg) != shortAlg { - return false - } - return true -} - -// Lookup looks for a digest matching the given string representation. -// If no digests could be found ErrDigestNotFound will be returned -// with an empty digest value. If multiple matches are found -// ErrDigestAmbiguous will be returned with an empty digest value. -func (dst *Set) Lookup(d string) (Digest, error) { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - if len(dst.entries) == 0 { - return "", ErrDigestNotFound - } - var ( - searchFunc func(int) bool - alg Algorithm - hex string - ) - dgst, err := ParseDigest(d) - if err == ErrDigestInvalidFormat { - hex = d - searchFunc = func(i int) bool { - return dst.entries[i].val >= d - } - } else { - hex = dgst.Hex() - alg = dgst.Algorithm() - searchFunc = func(i int) bool { - if dst.entries[i].val == hex { - return dst.entries[i].alg >= alg - } - return dst.entries[i].val >= hex - } - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) || !checkShortMatch(dst.entries[idx].alg, dst.entries[idx].val, string(alg), hex) { - return "", ErrDigestNotFound - } - if dst.entries[idx].alg == alg && dst.entries[idx].val == hex { - return dst.entries[idx].digest, nil - } - if idx+1 < len(dst.entries) && checkShortMatch(dst.entries[idx+1].alg, dst.entries[idx+1].val, string(alg), hex) { - return "", ErrDigestAmbiguous - } - - return dst.entries[idx].digest, nil -} - -// Add adds the given digest to the set. An error will be returned -// if the given digest is invalid. If the digest already exists in the -// set, this operation will be a no-op. -func (dst *Set) Add(d Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - if idx == len(dst.entries) { - dst.entries = append(dst.entries, entry) - return nil - } else if dst.entries[idx].digest == d { - return nil - } - - entries := append(dst.entries, nil) - copy(entries[idx+1:], entries[idx:len(entries)-1]) - entries[idx] = entry - dst.entries = entries - return nil -} - -// Remove removes the given digest from the set. An err will be -// returned if the given digest is invalid. If the digest does -// not exist in the set, this operation will be a no-op. -func (dst *Set) Remove(d Digest) error { - if err := d.Validate(); err != nil { - return err - } - dst.mutex.Lock() - defer dst.mutex.Unlock() - entry := &digestEntry{alg: d.Algorithm(), val: d.Hex(), digest: d} - searchFunc := func(i int) bool { - if dst.entries[i].val == entry.val { - return dst.entries[i].alg >= entry.alg - } - return dst.entries[i].val >= entry.val - } - idx := sort.Search(len(dst.entries), searchFunc) - // Not found if idx is after or value at idx is not digest - if idx == len(dst.entries) || dst.entries[idx].digest != d { - return nil - } - - entries := dst.entries - copy(entries[idx:], entries[idx+1:]) - entries = entries[:len(entries)-1] - dst.entries = entries - - return nil -} - -// All returns all the digests in the set -func (dst *Set) All() []Digest { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - retValues := make([]Digest, len(dst.entries)) - for i := range dst.entries { - retValues[i] = dst.entries[i].digest - } - - return retValues -} - -// ShortCodeTable returns a map of Digest to unique short codes. The -// length represents the minimum value, the maximum length may be the -// entire value of digest if uniqueness cannot be achieved without the -// full value. This function will attempt to make short codes as short -// as possible to be unique. -func ShortCodeTable(dst *Set, length int) map[Digest]string { - dst.mutex.RLock() - defer dst.mutex.RUnlock() - m := make(map[Digest]string, len(dst.entries)) - l := length - resetIdx := 0 - for i := 0; i < len(dst.entries); i++ { - var short string - extended := true - for extended { - extended = false - if len(dst.entries[i].val) <= l { - short = dst.entries[i].digest.String() - } else { - short = dst.entries[i].val[:l] - for j := i + 1; j < len(dst.entries); j++ { - if checkShortMatch(dst.entries[j].alg, dst.entries[j].val, "", short) { - if j > resetIdx { - resetIdx = j - } - extended = true - } else { - break - } - } - if extended { - l++ - } - } - } - m[dst.entries[i].digest] = short - if i >= resetIdx { - l = length - } - } - return m -} - -type digestEntry struct { - alg Algorithm - val string - digest Digest -} - -type digestEntries []*digestEntry - -func (d digestEntries) Len() int { - return len(d) -} - -func (d digestEntries) Less(i, j int) bool { - if d[i].val != d[j].val { - return d[i].val < d[j].val - } - return d[i].alg < d[j].alg -} - -func (d digestEntries) Swap(i, j int) { - d[i], d[j] = d[j], d[i] -} diff --git a/vendor/src/github.com/docker/distribution/digest/verifiers.go b/vendor/src/github.com/docker/distribution/digest/verifiers.go deleted file mode 100644 index 9af3be1341..0000000000 --- a/vendor/src/github.com/docker/distribution/digest/verifiers.go +++ /dev/null @@ -1,44 +0,0 @@ -package digest - -import ( - "hash" - "io" -) - -// Verifier presents a general verification interface to be used with message -// digests and other byte stream verifications. Users instantiate a Verifier -// from one of the various methods, write the data under test to it then check -// the result with the Verified method. -type Verifier interface { - io.Writer - - // Verified will return true if the content written to Verifier matches - // the digest. - Verified() bool -} - -// NewDigestVerifier returns a verifier that compares the written bytes -// against a passed in digest. -func NewDigestVerifier(d Digest) (Verifier, error) { - if err := d.Validate(); err != nil { - return nil, err - } - - return hashVerifier{ - hash: d.Algorithm().Hash(), - digest: d, - }, nil -} - -type hashVerifier struct { - digest Digest - hash hash.Hash -} - -func (hv hashVerifier) Write(p []byte) (n int, err error) { - return hv.hash.Write(p) -} - -func (hv hashVerifier) Verified() bool { - return hv.digest == NewDigest(hv.digest.Algorithm(), hv.hash) -} diff --git a/vendor/src/github.com/docker/distribution/doc.go b/vendor/src/github.com/docker/distribution/doc.go deleted file mode 100644 index bdd8cb708e..0000000000 --- a/vendor/src/github.com/docker/distribution/doc.go +++ /dev/null @@ -1,7 +0,0 @@ -// Package distribution will define the interfaces for the components of -// docker distribution. The goal is to allow users to reliably package, ship -// and store content related to docker images. -// -// This is currently a work in progress. More details are available in the -// README.md. -package distribution diff --git a/vendor/src/github.com/docker/distribution/errors.go b/vendor/src/github.com/docker/distribution/errors.go deleted file mode 100644 index c20f28113c..0000000000 --- a/vendor/src/github.com/docker/distribution/errors.go +++ /dev/null @@ -1,115 +0,0 @@ -package distribution - -import ( - "errors" - "fmt" - "strings" - - "github.com/docker/distribution/digest" -) - -// ErrAccessDenied is returned when an access to a requested resource is -// denied. -var ErrAccessDenied = errors.New("access denied") - -// ErrManifestNotModified is returned when a conditional manifest GetByTag -// returns nil due to the client indicating it has the latest version -var ErrManifestNotModified = errors.New("manifest not modified") - -// ErrUnsupported is returned when an unimplemented or unsupported action is -// performed -var ErrUnsupported = errors.New("operation unsupported") - -// ErrTagUnknown is returned if the given tag is not known by the tag service -type ErrTagUnknown struct { - Tag string -} - -func (err ErrTagUnknown) Error() string { - return fmt.Sprintf("unknown tag=%s", err.Tag) -} - -// ErrRepositoryUnknown is returned if the named repository is not known by -// the registry. -type ErrRepositoryUnknown struct { - Name string -} - -func (err ErrRepositoryUnknown) Error() string { - return fmt.Sprintf("unknown repository name=%s", err.Name) -} - -// ErrRepositoryNameInvalid should be used to denote an invalid repository -// name. Reason may set, indicating the cause of invalidity. -type ErrRepositoryNameInvalid struct { - Name string - Reason error -} - -func (err ErrRepositoryNameInvalid) Error() string { - return fmt.Sprintf("repository name %q invalid: %v", err.Name, err.Reason) -} - -// ErrManifestUnknown is returned if the manifest is not known by the -// registry. -type ErrManifestUnknown struct { - Name string - Tag string -} - -func (err ErrManifestUnknown) Error() string { - return fmt.Sprintf("unknown manifest name=%s tag=%s", err.Name, err.Tag) -} - -// ErrManifestUnknownRevision is returned when a manifest cannot be found by -// revision within a repository. -type ErrManifestUnknownRevision struct { - Name string - Revision digest.Digest -} - -func (err ErrManifestUnknownRevision) Error() string { - return fmt.Sprintf("unknown manifest name=%s revision=%s", err.Name, err.Revision) -} - -// ErrManifestUnverified is returned when the registry is unable to verify -// the manifest. -type ErrManifestUnverified struct{} - -func (ErrManifestUnverified) Error() string { - return fmt.Sprintf("unverified manifest") -} - -// ErrManifestVerification provides a type to collect errors encountered -// during manifest verification. Currently, it accepts errors of all types, -// but it may be narrowed to those involving manifest verification. -type ErrManifestVerification []error - -func (errs ErrManifestVerification) Error() string { - var parts []string - for _, err := range errs { - parts = append(parts, err.Error()) - } - - return fmt.Sprintf("errors verifying manifest: %v", strings.Join(parts, ",")) -} - -// ErrManifestBlobUnknown returned when a referenced blob cannot be found. -type ErrManifestBlobUnknown struct { - Digest digest.Digest -} - -func (err ErrManifestBlobUnknown) Error() string { - return fmt.Sprintf("unknown blob %v on manifest", err.Digest) -} - -// ErrManifestNameInvalid should be used to denote an invalid manifest -// name. Reason may set, indicating the cause of invalidity. -type ErrManifestNameInvalid struct { - Name string - Reason error -} - -func (err ErrManifestNameInvalid) Error() string { - return fmt.Sprintf("manifest name %q invalid: %v", err.Name, err.Reason) -} diff --git a/vendor/src/github.com/docker/distribution/manifest/doc.go b/vendor/src/github.com/docker/distribution/manifest/doc.go deleted file mode 100644 index 88367b0a05..0000000000 --- a/vendor/src/github.com/docker/distribution/manifest/doc.go +++ /dev/null @@ -1 +0,0 @@ -package manifest diff --git a/vendor/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go b/vendor/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go deleted file mode 100644 index a2082ec02f..0000000000 --- a/vendor/src/github.com/docker/distribution/manifest/manifestlist/manifestlist.go +++ /dev/null @@ -1,155 +0,0 @@ -package manifestlist - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" -) - -// MediaTypeManifestList specifies the mediaType for manifest lists. -const MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json" - -// SchemaVersion provides a pre-initialized version structure for this -// packages version of the manifest. -var SchemaVersion = manifest.Versioned{ - SchemaVersion: 2, - MediaType: MediaTypeManifestList, -} - -func init() { - manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { - m := new(DeserializedManifestList) - err := m.UnmarshalJSON(b) - if err != nil { - return nil, distribution.Descriptor{}, err - } - - dgst := digest.FromBytes(b) - return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err - } - err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc) - if err != nil { - panic(fmt.Sprintf("Unable to register manifest: %s", err)) - } -} - -// PlatformSpec specifies a platform where a particular image manifest is -// applicable. -type PlatformSpec struct { - // Architecture field specifies the CPU architecture, for example - // `amd64` or `ppc64`. - Architecture string `json:"architecture"` - - // OS specifies the operating system, for example `linux` or `windows`. - OS string `json:"os"` - - // OSVersion is an optional field specifying the operating system - // version, for example `10.0.10586`. - OSVersion string `json:"os.version,omitempty"` - - // OSFeatures is an optional field specifying an array of strings, - // each listing a required OS feature (for example on Windows `win32k`). - OSFeatures []string `json:"os.features,omitempty"` - - // Variant is an optional field specifying a variant of the CPU, for - // example `ppc64le` to specify a little-endian version of a PowerPC CPU. - Variant string `json:"variant,omitempty"` - - // Features is an optional field specifying an array of strings, each - // listing a required CPU feature (for example `sse4` or `aes`). - Features []string `json:"features,omitempty"` -} - -// A ManifestDescriptor references a platform-specific manifest. -type ManifestDescriptor struct { - distribution.Descriptor - - // Platform specifies which platform the manifest pointed to by the - // descriptor runs on. - Platform PlatformSpec `json:"platform"` -} - -// ManifestList references manifests for various platforms. -type ManifestList struct { - manifest.Versioned - - // Config references the image configuration as a blob. - Manifests []ManifestDescriptor `json:"manifests"` -} - -// References returnes the distribution descriptors for the referenced image -// manifests. -func (m ManifestList) References() []distribution.Descriptor { - dependencies := make([]distribution.Descriptor, len(m.Manifests)) - for i := range m.Manifests { - dependencies[i] = m.Manifests[i].Descriptor - } - - return dependencies -} - -// DeserializedManifestList wraps ManifestList with a copy of the original -// JSON. -type DeserializedManifestList struct { - ManifestList - - // canonical is the canonical byte representation of the Manifest. - canonical []byte -} - -// FromDescriptors takes a slice of descriptors, and returns a -// DeserializedManifestList which contains the resulting manifest list -// and its JSON representation. -func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) { - m := ManifestList{ - Versioned: SchemaVersion, - } - - m.Manifests = make([]ManifestDescriptor, len(descriptors), len(descriptors)) - copy(m.Manifests, descriptors) - - deserialized := DeserializedManifestList{ - ManifestList: m, - } - - var err error - deserialized.canonical, err = json.MarshalIndent(&m, "", " ") - return &deserialized, err -} - -// UnmarshalJSON populates a new ManifestList struct from JSON data. -func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error { - m.canonical = make([]byte, len(b), len(b)) - // store manifest list in canonical - copy(m.canonical, b) - - // Unmarshal canonical JSON into ManifestList object - var manifestList ManifestList - if err := json.Unmarshal(m.canonical, &manifestList); err != nil { - return err - } - - m.ManifestList = manifestList - - return nil -} - -// MarshalJSON returns the contents of canonical. If canonical is empty, -// marshals the inner contents. -func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) { - if len(m.canonical) > 0 { - return m.canonical, nil - } - - return nil, errors.New("JSON representation not initialized in DeserializedManifestList") -} - -// Payload returns the raw content of the manifest list. The contents can be -// used to calculate the content identifier. -func (m DeserializedManifestList) Payload() (string, []byte, error) { - return m.MediaType, m.canonical, nil -} diff --git a/vendor/src/github.com/docker/distribution/manifest/schema1/config_builder.go b/vendor/src/github.com/docker/distribution/manifest/schema1/config_builder.go deleted file mode 100644 index 5cdd76796c..0000000000 --- a/vendor/src/github.com/docker/distribution/manifest/schema1/config_builder.go +++ /dev/null @@ -1,283 +0,0 @@ -package schema1 - -import ( - "crypto/sha512" - "encoding/json" - "errors" - "fmt" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" - "github.com/docker/libtrust" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" -) - -type diffID digest.Digest - -// gzippedEmptyTar is a gzip-compressed version of an empty tar file -// (1024 NULL bytes) -var gzippedEmptyTar = []byte{ - 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88, - 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0, -} - -// digestSHA256GzippedEmptyTar is the canonical sha256 digest of -// gzippedEmptyTar -const digestSHA256GzippedEmptyTar = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4") - -// configManifestBuilder is a type for constructing manifests from an image -// configuration and generic descriptors. -type configManifestBuilder struct { - // bs is a BlobService used to create empty layer tars in the - // blob store if necessary. - bs distribution.BlobService - // pk is the libtrust private key used to sign the final manifest. - pk libtrust.PrivateKey - // configJSON is configuration supplied when the ManifestBuilder was - // created. - configJSON []byte - // ref contains the name and optional tag provided to NewConfigManifestBuilder. - ref reference.Named - // descriptors is the set of descriptors referencing the layers. - descriptors []distribution.Descriptor - // emptyTarDigest is set to a valid digest if an empty tar has been - // put in the blob store; otherwise it is empty. - emptyTarDigest digest.Digest -} - -// NewConfigManifestBuilder is used to build new manifests for the current -// schema version from an image configuration and a set of descriptors. -// It takes a BlobService so that it can add an empty tar to the blob store -// if the resulting manifest needs empty layers. -func NewConfigManifestBuilder(bs distribution.BlobService, pk libtrust.PrivateKey, ref reference.Named, configJSON []byte) distribution.ManifestBuilder { - return &configManifestBuilder{ - bs: bs, - pk: pk, - configJSON: configJSON, - ref: ref, - } -} - -// Build produces a final manifest from the given references -func (mb *configManifestBuilder) Build(ctx context.Context) (m distribution.Manifest, err error) { - type imageRootFS struct { - Type string `json:"type"` - DiffIDs []diffID `json:"diff_ids,omitempty"` - BaseLayer string `json:"base_layer,omitempty"` - } - - type imageHistory struct { - Created time.Time `json:"created"` - Author string `json:"author,omitempty"` - CreatedBy string `json:"created_by,omitempty"` - Comment string `json:"comment,omitempty"` - EmptyLayer bool `json:"empty_layer,omitempty"` - } - - type imageConfig struct { - RootFS *imageRootFS `json:"rootfs,omitempty"` - History []imageHistory `json:"history,omitempty"` - Architecture string `json:"architecture,omitempty"` - } - - var img imageConfig - - if err := json.Unmarshal(mb.configJSON, &img); err != nil { - return nil, err - } - - if len(img.History) == 0 { - return nil, errors.New("empty history when trying to create schema1 manifest") - } - - if len(img.RootFS.DiffIDs) != len(mb.descriptors) { - return nil, errors.New("number of descriptors and number of layers in rootfs must match") - } - - // Generate IDs for each layer - // For non-top-level layers, create fake V1Compatibility strings that - // fit the format and don't collide with anything else, but don't - // result in runnable images on their own. - type v1Compatibility struct { - ID string `json:"id"` - Parent string `json:"parent,omitempty"` - Comment string `json:"comment,omitempty"` - Created time.Time `json:"created"` - ContainerConfig struct { - Cmd []string - } `json:"container_config,omitempty"` - Author string `json:"author,omitempty"` - ThrowAway bool `json:"throwaway,omitempty"` - } - - fsLayerList := make([]FSLayer, len(img.History)) - history := make([]History, len(img.History)) - - parent := "" - layerCounter := 0 - for i, h := range img.History[:len(img.History)-1] { - var blobsum digest.Digest - if h.EmptyLayer { - if blobsum, err = mb.emptyTar(ctx); err != nil { - return nil, err - } - } else { - if len(img.RootFS.DiffIDs) <= layerCounter { - return nil, errors.New("too many non-empty layers in History section") - } - blobsum = mb.descriptors[layerCounter].Digest - layerCounter++ - } - - v1ID := digest.FromBytes([]byte(blobsum.Hex() + " " + parent)).Hex() - - if i == 0 && img.RootFS.BaseLayer != "" { - // windows-only baselayer setup - baseID := sha512.Sum384([]byte(img.RootFS.BaseLayer)) - parent = fmt.Sprintf("%x", baseID[:32]) - } - - v1Compatibility := v1Compatibility{ - ID: v1ID, - Parent: parent, - Comment: h.Comment, - Created: h.Created, - Author: h.Author, - } - v1Compatibility.ContainerConfig.Cmd = []string{img.History[i].CreatedBy} - if h.EmptyLayer { - v1Compatibility.ThrowAway = true - } - jsonBytes, err := json.Marshal(&v1Compatibility) - if err != nil { - return nil, err - } - - reversedIndex := len(img.History) - i - 1 - history[reversedIndex].V1Compatibility = string(jsonBytes) - fsLayerList[reversedIndex] = FSLayer{BlobSum: blobsum} - - parent = v1ID - } - - latestHistory := img.History[len(img.History)-1] - - var blobsum digest.Digest - if latestHistory.EmptyLayer { - if blobsum, err = mb.emptyTar(ctx); err != nil { - return nil, err - } - } else { - if len(img.RootFS.DiffIDs) <= layerCounter { - return nil, errors.New("too many non-empty layers in History section") - } - blobsum = mb.descriptors[layerCounter].Digest - } - - fsLayerList[0] = FSLayer{BlobSum: blobsum} - dgst := digest.FromBytes([]byte(blobsum.Hex() + " " + parent + " " + string(mb.configJSON))) - - // Top-level v1compatibility string should be a modified version of the - // image config. - transformedConfig, err := MakeV1ConfigFromConfig(mb.configJSON, dgst.Hex(), parent, latestHistory.EmptyLayer) - if err != nil { - return nil, err - } - - history[0].V1Compatibility = string(transformedConfig) - - tag := "" - if tagged, isTagged := mb.ref.(reference.Tagged); isTagged { - tag = tagged.Tag() - } - - mfst := Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: mb.ref.Name(), - Tag: tag, - Architecture: img.Architecture, - FSLayers: fsLayerList, - History: history, - } - - return Sign(&mfst, mb.pk) -} - -// emptyTar pushes a compressed empty tar to the blob store if one doesn't -// already exist, and returns its blobsum. -func (mb *configManifestBuilder) emptyTar(ctx context.Context) (digest.Digest, error) { - if mb.emptyTarDigest != "" { - // Already put an empty tar - return mb.emptyTarDigest, nil - } - - descriptor, err := mb.bs.Stat(ctx, digestSHA256GzippedEmptyTar) - switch err { - case nil: - mb.emptyTarDigest = descriptor.Digest - return descriptor.Digest, nil - case distribution.ErrBlobUnknown: - // nop - default: - return "", err - } - - // Add gzipped empty tar to the blob store - descriptor, err = mb.bs.Put(ctx, "", gzippedEmptyTar) - if err != nil { - return "", err - } - - mb.emptyTarDigest = descriptor.Digest - - return descriptor.Digest, nil -} - -// AppendReference adds a reference to the current ManifestBuilder -func (mb *configManifestBuilder) AppendReference(d distribution.Describable) error { - // todo: verification here? - mb.descriptors = append(mb.descriptors, d.Descriptor()) - return nil -} - -// References returns the current references added to this builder -func (mb *configManifestBuilder) References() []distribution.Descriptor { - return mb.descriptors -} - -// MakeV1ConfigFromConfig creates an legacy V1 image config from image config JSON -func MakeV1ConfigFromConfig(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) { - // Top-level v1compatibility string should be a modified version of the - // image config. - var configAsMap map[string]*json.RawMessage - if err := json.Unmarshal(configJSON, &configAsMap); err != nil { - return nil, err - } - - // Delete fields that didn't exist in old manifest - delete(configAsMap, "rootfs") - delete(configAsMap, "history") - configAsMap["id"] = rawJSON(v1ID) - if parentV1ID != "" { - configAsMap["parent"] = rawJSON(parentV1ID) - } - if throwaway { - configAsMap["throwaway"] = rawJSON(true) - } - - return json.Marshal(configAsMap) -} - -func rawJSON(value interface{}) *json.RawMessage { - jsonval, err := json.Marshal(value) - if err != nil { - return nil - } - return (*json.RawMessage)(&jsonval) -} diff --git a/vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go b/vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go deleted file mode 100644 index bff47bde05..0000000000 --- a/vendor/src/github.com/docker/distribution/manifest/schema1/manifest.go +++ /dev/null @@ -1,184 +0,0 @@ -package schema1 - -import ( - "encoding/json" - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/libtrust" -) - -const ( - // MediaTypeManifest specifies the mediaType for the current version. Note - // that for schema version 1, the the media is optionally "application/json". - MediaTypeManifest = "application/vnd.docker.distribution.manifest.v1+json" - // MediaTypeSignedManifest specifies the mediatype for current SignedManifest version - MediaTypeSignedManifest = "application/vnd.docker.distribution.manifest.v1+prettyjws" - // MediaTypeManifestLayer specifies the media type for manifest layers - MediaTypeManifestLayer = "application/vnd.docker.container.image.rootfs.diff+x-gtar" -) - -var ( - // SchemaVersion provides a pre-initialized version structure for this - // packages version of the manifest. - SchemaVersion = manifest.Versioned{ - SchemaVersion: 1, - } -) - -func init() { - schema1Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { - sm := new(SignedManifest) - err := sm.UnmarshalJSON(b) - if err != nil { - return nil, distribution.Descriptor{}, err - } - - desc := distribution.Descriptor{ - Digest: digest.FromBytes(sm.Canonical), - Size: int64(len(sm.Canonical)), - MediaType: MediaTypeSignedManifest, - } - return sm, desc, err - } - err := distribution.RegisterManifestSchema(MediaTypeSignedManifest, schema1Func) - if err != nil { - panic(fmt.Sprintf("Unable to register manifest: %s", err)) - } - err = distribution.RegisterManifestSchema("", schema1Func) - if err != nil { - panic(fmt.Sprintf("Unable to register manifest: %s", err)) - } - err = distribution.RegisterManifestSchema("application/json", schema1Func) - if err != nil { - panic(fmt.Sprintf("Unable to register manifest: %s", err)) - } -} - -// FSLayer is a container struct for BlobSums defined in an image manifest -type FSLayer struct { - // BlobSum is the tarsum of the referenced filesystem image layer - BlobSum digest.Digest `json:"blobSum"` -} - -// History stores unstructured v1 compatibility information -type History struct { - // V1Compatibility is the raw v1 compatibility information - V1Compatibility string `json:"v1Compatibility"` -} - -// Manifest provides the base accessible fields for working with V2 image -// format in the registry. -type Manifest struct { - manifest.Versioned - - // Name is the name of the image's repository - Name string `json:"name"` - - // Tag is the tag of the image specified by this manifest - Tag string `json:"tag"` - - // Architecture is the host architecture on which this image is intended to - // run - Architecture string `json:"architecture"` - - // FSLayers is a list of filesystem layer blobSums contained in this image - FSLayers []FSLayer `json:"fsLayers"` - - // History is a list of unstructured historical data for v1 compatibility - History []History `json:"history"` -} - -// SignedManifest provides an envelope for a signed image manifest, including -// the format sensitive raw bytes. -type SignedManifest struct { - Manifest - - // Canonical is the canonical byte representation of the ImageManifest, - // without any attached signatures. The manifest byte - // representation cannot change or it will have to be re-signed. - Canonical []byte `json:"-"` - - // all contains the byte representation of the Manifest including signatures - // and is returned by Payload() - all []byte -} - -// UnmarshalJSON populates a new SignedManifest struct from JSON data. -func (sm *SignedManifest) UnmarshalJSON(b []byte) error { - sm.all = make([]byte, len(b), len(b)) - // store manifest and signatures in all - copy(sm.all, b) - - jsig, err := libtrust.ParsePrettySignature(b, "signatures") - if err != nil { - return err - } - - // Resolve the payload in the manifest. - bytes, err := jsig.Payload() - if err != nil { - return err - } - - // sm.Canonical stores the canonical manifest JSON - sm.Canonical = make([]byte, len(bytes), len(bytes)) - copy(sm.Canonical, bytes) - - // Unmarshal canonical JSON into Manifest object - var manifest Manifest - if err := json.Unmarshal(sm.Canonical, &manifest); err != nil { - return err - } - - sm.Manifest = manifest - - return nil -} - -// References returnes the descriptors of this manifests references -func (sm SignedManifest) References() []distribution.Descriptor { - dependencies := make([]distribution.Descriptor, len(sm.FSLayers)) - for i, fsLayer := range sm.FSLayers { - dependencies[i] = distribution.Descriptor{ - MediaType: "application/vnd.docker.container.image.rootfs.diff+x-gtar", - Digest: fsLayer.BlobSum, - } - } - - return dependencies - -} - -// MarshalJSON returns the contents of raw. If Raw is nil, marshals the inner -// contents. Applications requiring a marshaled signed manifest should simply -// use Raw directly, since the the content produced by json.Marshal will be -// compacted and will fail signature checks. -func (sm *SignedManifest) MarshalJSON() ([]byte, error) { - if len(sm.all) > 0 { - return sm.all, nil - } - - // If the raw data is not available, just dump the inner content. - return json.Marshal(&sm.Manifest) -} - -// Payload returns the signed content of the signed manifest. -func (sm SignedManifest) Payload() (string, []byte, error) { - return MediaTypeSignedManifest, sm.all, nil -} - -// Signatures returns the signatures as provided by -// (*libtrust.JSONSignature).Signatures. The byte slices are opaque jws -// signatures. -func (sm *SignedManifest) Signatures() ([][]byte, error) { - jsig, err := libtrust.ParsePrettySignature(sm.all, "signatures") - if err != nil { - return nil, err - } - - // Resolve the payload in the manifest. - return jsig.Signatures() -} diff --git a/vendor/src/github.com/docker/distribution/manifest/schema1/reference_builder.go b/vendor/src/github.com/docker/distribution/manifest/schema1/reference_builder.go deleted file mode 100644 index fc1045f9ea..0000000000 --- a/vendor/src/github.com/docker/distribution/manifest/schema1/reference_builder.go +++ /dev/null @@ -1,98 +0,0 @@ -package schema1 - -import ( - "fmt" - - "errors" - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" - "github.com/docker/distribution/reference" - "github.com/docker/libtrust" -) - -// referenceManifestBuilder is a type for constructing manifests from schema1 -// dependencies. -type referenceManifestBuilder struct { - Manifest - pk libtrust.PrivateKey -} - -// NewReferenceManifestBuilder is used to build new manifests for the current -// schema version using schema1 dependencies. -func NewReferenceManifestBuilder(pk libtrust.PrivateKey, ref reference.Named, architecture string) distribution.ManifestBuilder { - tag := "" - if tagged, isTagged := ref.(reference.Tagged); isTagged { - tag = tagged.Tag() - } - - return &referenceManifestBuilder{ - Manifest: Manifest{ - Versioned: manifest.Versioned{ - SchemaVersion: 1, - }, - Name: ref.Name(), - Tag: tag, - Architecture: architecture, - }, - pk: pk, - } -} - -func (mb *referenceManifestBuilder) Build(ctx context.Context) (distribution.Manifest, error) { - m := mb.Manifest - if len(m.FSLayers) == 0 { - return nil, errors.New("cannot build manifest with zero layers or history") - } - - m.FSLayers = make([]FSLayer, len(mb.Manifest.FSLayers)) - m.History = make([]History, len(mb.Manifest.History)) - copy(m.FSLayers, mb.Manifest.FSLayers) - copy(m.History, mb.Manifest.History) - - return Sign(&m, mb.pk) -} - -// AppendReference adds a reference to the current ManifestBuilder -func (mb *referenceManifestBuilder) AppendReference(d distribution.Describable) error { - r, ok := d.(Reference) - if !ok { - return fmt.Errorf("Unable to add non-reference type to v1 builder") - } - - // Entries need to be prepended - mb.Manifest.FSLayers = append([]FSLayer{{BlobSum: r.Digest}}, mb.Manifest.FSLayers...) - mb.Manifest.History = append([]History{r.History}, mb.Manifest.History...) - return nil - -} - -// References returns the current references added to this builder -func (mb *referenceManifestBuilder) References() []distribution.Descriptor { - refs := make([]distribution.Descriptor, len(mb.Manifest.FSLayers)) - for i := range mb.Manifest.FSLayers { - layerDigest := mb.Manifest.FSLayers[i].BlobSum - history := mb.Manifest.History[i] - ref := Reference{layerDigest, 0, history} - refs[i] = ref.Descriptor() - } - return refs -} - -// Reference describes a manifest v2, schema version 1 dependency. -// An FSLayer associated with a history entry. -type Reference struct { - Digest digest.Digest - Size int64 // if we know it, set it for the descriptor. - History History -} - -// Descriptor describes a reference -func (r Reference) Descriptor() distribution.Descriptor { - return distribution.Descriptor{ - MediaType: MediaTypeManifestLayer, - Digest: r.Digest, - Size: r.Size, - } -} diff --git a/vendor/src/github.com/docker/distribution/manifest/schema1/sign.go b/vendor/src/github.com/docker/distribution/manifest/schema1/sign.go deleted file mode 100644 index c862dd8123..0000000000 --- a/vendor/src/github.com/docker/distribution/manifest/schema1/sign.go +++ /dev/null @@ -1,68 +0,0 @@ -package schema1 - -import ( - "crypto/x509" - "encoding/json" - - "github.com/docker/libtrust" -) - -// Sign signs the manifest with the provided private key, returning a -// SignedManifest. This typically won't be used within the registry, except -// for testing. -func Sign(m *Manifest, pk libtrust.PrivateKey) (*SignedManifest, error) { - p, err := json.MarshalIndent(m, "", " ") - if err != nil { - return nil, err - } - - js, err := libtrust.NewJSONSignature(p) - if err != nil { - return nil, err - } - - if err := js.Sign(pk); err != nil { - return nil, err - } - - pretty, err := js.PrettySignature("signatures") - if err != nil { - return nil, err - } - - return &SignedManifest{ - Manifest: *m, - all: pretty, - Canonical: p, - }, nil -} - -// SignWithChain signs the manifest with the given private key and x509 chain. -// The public key of the first element in the chain must be the public key -// corresponding with the sign key. -func SignWithChain(m *Manifest, key libtrust.PrivateKey, chain []*x509.Certificate) (*SignedManifest, error) { - p, err := json.MarshalIndent(m, "", " ") - if err != nil { - return nil, err - } - - js, err := libtrust.NewJSONSignature(p) - if err != nil { - return nil, err - } - - if err := js.SignWithChain(key, chain); err != nil { - return nil, err - } - - pretty, err := js.PrettySignature("signatures") - if err != nil { - return nil, err - } - - return &SignedManifest{ - Manifest: *m, - all: pretty, - Canonical: p, - }, nil -} diff --git a/vendor/src/github.com/docker/distribution/manifest/schema1/verify.go b/vendor/src/github.com/docker/distribution/manifest/schema1/verify.go deleted file mode 100644 index fa8daa56f5..0000000000 --- a/vendor/src/github.com/docker/distribution/manifest/schema1/verify.go +++ /dev/null @@ -1,32 +0,0 @@ -package schema1 - -import ( - "crypto/x509" - - "github.com/Sirupsen/logrus" - "github.com/docker/libtrust" -) - -// Verify verifies the signature of the signed manifest returning the public -// keys used during signing. -func Verify(sm *SignedManifest) ([]libtrust.PublicKey, error) { - js, err := libtrust.ParsePrettySignature(sm.all, "signatures") - if err != nil { - logrus.WithField("err", err).Debugf("(*SignedManifest).Verify") - return nil, err - } - - return js.Verify() -} - -// VerifyChains verifies the signature of the signed manifest against the -// certificate pool returning the list of verified chains. Signatures without -// an x509 chain are not checked. -func VerifyChains(sm *SignedManifest, ca *x509.CertPool) ([][]*x509.Certificate, error) { - js, err := libtrust.ParsePrettySignature(sm.all, "signatures") - if err != nil { - return nil, err - } - - return js.VerifyChains(ca) -} diff --git a/vendor/src/github.com/docker/distribution/manifest/schema2/builder.go b/vendor/src/github.com/docker/distribution/manifest/schema2/builder.go deleted file mode 100644 index ec0bf858d1..0000000000 --- a/vendor/src/github.com/docker/distribution/manifest/schema2/builder.go +++ /dev/null @@ -1,80 +0,0 @@ -package schema2 - -import ( - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" -) - -// builder is a type for constructing manifests. -type builder struct { - // bs is a BlobService used to publish the configuration blob. - bs distribution.BlobService - - // configJSON references - configJSON []byte - - // layers is a list of layer descriptors that gets built by successive - // calls to AppendReference. - layers []distribution.Descriptor -} - -// NewManifestBuilder is used to build new manifests for the current schema -// version. It takes a BlobService so it can publish the configuration blob -// as part of the Build process. -func NewManifestBuilder(bs distribution.BlobService, configJSON []byte) distribution.ManifestBuilder { - mb := &builder{ - bs: bs, - configJSON: make([]byte, len(configJSON)), - } - copy(mb.configJSON, configJSON) - - return mb -} - -// Build produces a final manifest from the given references. -func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) { - m := Manifest{ - Versioned: SchemaVersion, - Layers: make([]distribution.Descriptor, len(mb.layers)), - } - copy(m.Layers, mb.layers) - - configDigest := digest.FromBytes(mb.configJSON) - - var err error - m.Config, err = mb.bs.Stat(ctx, configDigest) - switch err { - case nil: - // Override MediaType, since Put always replaces the specified media - // type with application/octet-stream in the descriptor it returns. - m.Config.MediaType = MediaTypeConfig - return FromStruct(m) - case distribution.ErrBlobUnknown: - // nop - default: - return nil, err - } - - // Add config to the blob store - m.Config, err = mb.bs.Put(ctx, MediaTypeConfig, mb.configJSON) - // Override MediaType, since Put always replaces the specified media - // type with application/octet-stream in the descriptor it returns. - m.Config.MediaType = MediaTypeConfig - if err != nil { - return nil, err - } - - return FromStruct(m) -} - -// AppendReference adds a reference to the current ManifestBuilder. -func (mb *builder) AppendReference(d distribution.Describable) error { - mb.layers = append(mb.layers, d.Descriptor()) - return nil -} - -// References returns the current references added to this builder. -func (mb *builder) References() []distribution.Descriptor { - return mb.layers -} diff --git a/vendor/src/github.com/docker/distribution/manifest/schema2/manifest.go b/vendor/src/github.com/docker/distribution/manifest/schema2/manifest.go deleted file mode 100644 index 355b5ad4ea..0000000000 --- a/vendor/src/github.com/docker/distribution/manifest/schema2/manifest.go +++ /dev/null @@ -1,128 +0,0 @@ -package schema2 - -import ( - "encoding/json" - "errors" - "fmt" - - "github.com/docker/distribution" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/manifest" -) - -const ( - // MediaTypeManifest specifies the mediaType for the current version. - MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json" - - // MediaTypeConfig specifies the mediaType for the image configuration. - MediaTypeConfig = "application/vnd.docker.container.image.v1+json" - - // MediaTypeLayer is the mediaType used for layers referenced by the - // manifest. - MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip" - - // MediaTypeForeignLayer is the mediaType used for layers that must be - // downloaded from foreign URLs. - MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip" -) - -var ( - // SchemaVersion provides a pre-initialized version structure for this - // packages version of the manifest. - SchemaVersion = manifest.Versioned{ - SchemaVersion: 2, - MediaType: MediaTypeManifest, - } -) - -func init() { - schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) { - m := new(DeserializedManifest) - err := m.UnmarshalJSON(b) - if err != nil { - return nil, distribution.Descriptor{}, err - } - - dgst := digest.FromBytes(b) - return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err - } - err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func) - if err != nil { - panic(fmt.Sprintf("Unable to register manifest: %s", err)) - } -} - -// Manifest defines a schema2 manifest. -type Manifest struct { - manifest.Versioned - - // Config references the image configuration as a blob. - Config distribution.Descriptor `json:"config"` - - // Layers lists descriptors for the layers referenced by the - // configuration. - Layers []distribution.Descriptor `json:"layers"` -} - -// References returnes the descriptors of this manifests references. -func (m Manifest) References() []distribution.Descriptor { - return m.Layers -} - -// Target returns the target of this signed manifest. -func (m Manifest) Target() distribution.Descriptor { - return m.Config -} - -// DeserializedManifest wraps Manifest with a copy of the original JSON. -// It satisfies the distribution.Manifest interface. -type DeserializedManifest struct { - Manifest - - // canonical is the canonical byte representation of the Manifest. - canonical []byte -} - -// FromStruct takes a Manifest structure, marshals it to JSON, and returns a -// DeserializedManifest which contains the manifest and its JSON representation. -func FromStruct(m Manifest) (*DeserializedManifest, error) { - var deserialized DeserializedManifest - deserialized.Manifest = m - - var err error - deserialized.canonical, err = json.MarshalIndent(&m, "", " ") - return &deserialized, err -} - -// UnmarshalJSON populates a new Manifest struct from JSON data. -func (m *DeserializedManifest) UnmarshalJSON(b []byte) error { - m.canonical = make([]byte, len(b), len(b)) - // store manifest in canonical - copy(m.canonical, b) - - // Unmarshal canonical JSON into Manifest object - var manifest Manifest - if err := json.Unmarshal(m.canonical, &manifest); err != nil { - return err - } - - m.Manifest = manifest - - return nil -} - -// MarshalJSON returns the contents of canonical. If canonical is empty, -// marshals the inner contents. -func (m *DeserializedManifest) MarshalJSON() ([]byte, error) { - if len(m.canonical) > 0 { - return m.canonical, nil - } - - return nil, errors.New("JSON representation not initialized in DeserializedManifest") -} - -// Payload returns the raw content of the manifest. The contents can be used to -// calculate the content identifier. -func (m DeserializedManifest) Payload() (string, []byte, error) { - return m.MediaType, m.canonical, nil -} diff --git a/vendor/src/github.com/docker/distribution/manifest/versioned.go b/vendor/src/github.com/docker/distribution/manifest/versioned.go deleted file mode 100644 index c57398bde6..0000000000 --- a/vendor/src/github.com/docker/distribution/manifest/versioned.go +++ /dev/null @@ -1,12 +0,0 @@ -package manifest - -// Versioned provides a struct with the manifest schemaVersion and . Incoming -// content with unknown schema version can be decoded against this struct to -// check the version. -type Versioned struct { - // SchemaVersion is the image manifest schema that this image follows - SchemaVersion int `json:"schemaVersion"` - - // MediaType is the media type of this schema. - MediaType string `json:"mediaType,omitempty"` -} diff --git a/vendor/src/github.com/docker/distribution/manifests.go b/vendor/src/github.com/docker/distribution/manifests.go deleted file mode 100644 index 2ac7c8f211..0000000000 --- a/vendor/src/github.com/docker/distribution/manifests.go +++ /dev/null @@ -1,117 +0,0 @@ -package distribution - -import ( - "fmt" - "mime" - - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" -) - -// Manifest represents a registry object specifying a set of -// references and an optional target -type Manifest interface { - // References returns a list of objects which make up this manifest. - // The references are strictly ordered from base to head. A reference - // is anything which can be represented by a distribution.Descriptor - References() []Descriptor - - // Payload provides the serialized format of the manifest, in addition to - // the mediatype. - Payload() (mediatype string, payload []byte, err error) -} - -// ManifestBuilder creates a manifest allowing one to include dependencies. -// Instances can be obtained from a version-specific manifest package. Manifest -// specific data is passed into the function which creates the builder. -type ManifestBuilder interface { - // Build creates the manifest from his builder. - Build(ctx context.Context) (Manifest, error) - - // References returns a list of objects which have been added to this - // builder. The dependencies are returned in the order they were added, - // which should be from base to head. - References() []Descriptor - - // AppendReference includes the given object in the manifest after any - // existing dependencies. If the add fails, such as when adding an - // unsupported dependency, an error may be returned. - AppendReference(dependency Describable) error -} - -// ManifestService describes operations on image manifests. -type ManifestService interface { - // Exists returns true if the manifest exists. - Exists(ctx context.Context, dgst digest.Digest) (bool, error) - - // Get retrieves the manifest specified by the given digest - Get(ctx context.Context, dgst digest.Digest, options ...ManifestServiceOption) (Manifest, error) - - // Put creates or updates the given manifest returning the manifest digest - Put(ctx context.Context, manifest Manifest, options ...ManifestServiceOption) (digest.Digest, error) - - // Delete removes the manifest specified by the given digest. Deleting - // a manifest that doesn't exist will return ErrManifestNotFound - Delete(ctx context.Context, dgst digest.Digest) error -} - -// ManifestEnumerator enables iterating over manifests -type ManifestEnumerator interface { - // Enumerate calls ingester for each manifest. - Enumerate(ctx context.Context, ingester func(digest.Digest) error) error -} - -// Describable is an interface for descriptors -type Describable interface { - Descriptor() Descriptor -} - -// ManifestMediaTypes returns the supported media types for manifests. -func ManifestMediaTypes() (mediaTypes []string) { - for t := range mappings { - if t != "" { - mediaTypes = append(mediaTypes, t) - } - } - return -} - -// UnmarshalFunc implements manifest unmarshalling a given MediaType -type UnmarshalFunc func([]byte) (Manifest, Descriptor, error) - -var mappings = make(map[string]UnmarshalFunc, 0) - -// UnmarshalManifest looks up manifest unmarshal functions based on -// MediaType -func UnmarshalManifest(ctHeader string, p []byte) (Manifest, Descriptor, error) { - // Need to look up by the actual media type, not the raw contents of - // the header. Strip semicolons and anything following them. - var mediatype string - if ctHeader != "" { - var err error - mediatype, _, err = mime.ParseMediaType(ctHeader) - if err != nil { - return nil, Descriptor{}, err - } - } - - unmarshalFunc, ok := mappings[mediatype] - if !ok { - unmarshalFunc, ok = mappings[""] - if !ok { - return nil, Descriptor{}, fmt.Errorf("unsupported manifest mediatype and no default available: %s", mediatype) - } - } - - return unmarshalFunc(p) -} - -// RegisterManifestSchema registers an UnmarshalFunc for a given schema type. This -// should be called from specific -func RegisterManifestSchema(mediatype string, u UnmarshalFunc) error { - if _, ok := mappings[mediatype]; ok { - return fmt.Errorf("manifest mediatype registration would overwrite existing: %s", mediatype) - } - mappings[mediatype] = u - return nil -} diff --git a/vendor/src/github.com/docker/distribution/reference/reference.go b/vendor/src/github.com/docker/distribution/reference/reference.go deleted file mode 100644 index bb09fa25da..0000000000 --- a/vendor/src/github.com/docker/distribution/reference/reference.go +++ /dev/null @@ -1,334 +0,0 @@ -// Package reference provides a general type to represent any way of referencing images within the registry. -// Its main purpose is to abstract tags and digests (content-addressable hash). -// -// Grammar -// -// reference := name [ ":" tag ] [ "@" digest ] -// name := [hostname '/'] component ['/' component]* -// hostname := hostcomponent ['.' hostcomponent]* [':' port-number] -// hostcomponent := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/ -// port-number := /[0-9]+/ -// component := alpha-numeric [separator alpha-numeric]* -// alpha-numeric := /[a-z0-9]+/ -// separator := /[_.]|__|[-]*/ -// -// tag := /[\w][\w.-]{0,127}/ -// -// digest := digest-algorithm ":" digest-hex -// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ] -// digest-algorithm-separator := /[+.-_]/ -// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/ -// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value -package reference - -import ( - "errors" - "fmt" - - "github.com/docker/distribution/digest" -) - -const ( - // NameTotalLengthMax is the maximum total number of characters in a repository name. - NameTotalLengthMax = 255 -) - -var ( - // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference. - ErrReferenceInvalidFormat = errors.New("invalid reference format") - - // ErrTagInvalidFormat represents an error while trying to parse a string as a tag. - ErrTagInvalidFormat = errors.New("invalid tag format") - - // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag. - ErrDigestInvalidFormat = errors.New("invalid digest format") - - // ErrNameEmpty is returned for empty, invalid repository names. - ErrNameEmpty = errors.New("repository name must have at least one component") - - // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax. - ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax) -) - -// Reference is an opaque object reference identifier that may include -// modifiers such as a hostname, name, tag, and digest. -type Reference interface { - // String returns the full reference - String() string -} - -// Field provides a wrapper type for resolving correct reference types when -// working with encoding. -type Field struct { - reference Reference -} - -// AsField wraps a reference in a Field for encoding. -func AsField(reference Reference) Field { - return Field{reference} -} - -// Reference unwraps the reference type from the field to -// return the Reference object. This object should be -// of the appropriate type to further check for different -// reference types. -func (f Field) Reference() Reference { - return f.reference -} - -// MarshalText serializes the field to byte text which -// is the string of the reference. -func (f Field) MarshalText() (p []byte, err error) { - return []byte(f.reference.String()), nil -} - -// UnmarshalText parses text bytes by invoking the -// reference parser to ensure the appropriately -// typed reference object is wrapped by field. -func (f *Field) UnmarshalText(p []byte) error { - r, err := Parse(string(p)) - if err != nil { - return err - } - - f.reference = r - return nil -} - -// Named is an object with a full name -type Named interface { - Reference - Name() string -} - -// Tagged is an object which has a tag -type Tagged interface { - Reference - Tag() string -} - -// NamedTagged is an object including a name and tag. -type NamedTagged interface { - Named - Tag() string -} - -// Digested is an object which has a digest -// in which it can be referenced by -type Digested interface { - Reference - Digest() digest.Digest -} - -// Canonical reference is an object with a fully unique -// name including a name with hostname and digest -type Canonical interface { - Named - Digest() digest.Digest -} - -// SplitHostname splits a named reference into a -// hostname and name string. If no valid hostname is -// found, the hostname is empty and the full value -// is returned as name -func SplitHostname(named Named) (string, string) { - name := named.Name() - match := anchoredNameRegexp.FindStringSubmatch(name) - if match == nil || len(match) != 3 { - return "", name - } - return match[1], match[2] -} - -// Parse parses s and returns a syntactically valid Reference. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: Parse will not handle short digests. -func Parse(s string) (Reference, error) { - matches := ReferenceRegexp.FindStringSubmatch(s) - if matches == nil { - if s == "" { - return nil, ErrNameEmpty - } - // TODO(dmcgowan): Provide more specific and helpful error - return nil, ErrReferenceInvalidFormat - } - - if len(matches[1]) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - - ref := reference{ - name: matches[1], - tag: matches[2], - } - if matches[3] != "" { - var err error - ref.digest, err = digest.ParseDigest(matches[3]) - if err != nil { - return nil, err - } - } - - r := getBestReferenceType(ref) - if r == nil { - return nil, ErrNameEmpty - } - - return r, nil -} - -// ParseNamed parses s and returns a syntactically valid reference implementing -// the Named interface. The reference must have a name, otherwise an error is -// returned. -// If an error was encountered it is returned, along with a nil Reference. -// NOTE: ParseNamed will not handle short digests. -func ParseNamed(s string) (Named, error) { - ref, err := Parse(s) - if err != nil { - return nil, err - } - named, isNamed := ref.(Named) - if !isNamed { - return nil, fmt.Errorf("reference %s has no name", ref.String()) - } - return named, nil -} - -// WithName returns a named object representing the given string. If the input -// is invalid ErrReferenceInvalidFormat will be returned. -func WithName(name string) (Named, error) { - if len(name) > NameTotalLengthMax { - return nil, ErrNameTooLong - } - if !anchoredNameRegexp.MatchString(name) { - return nil, ErrReferenceInvalidFormat - } - return repository(name), nil -} - -// WithTag combines the name from "name" and the tag from "tag" to form a -// reference incorporating both the name and the tag. -func WithTag(name Named, tag string) (NamedTagged, error) { - if !anchoredTagRegexp.MatchString(tag) { - return nil, ErrTagInvalidFormat - } - return taggedReference{ - name: name.Name(), - tag: tag, - }, nil -} - -// WithDigest combines the name from "name" and the digest from "digest" to form -// a reference incorporating both the name and the digest. -func WithDigest(name Named, digest digest.Digest) (Canonical, error) { - if !anchoredDigestRegexp.MatchString(digest.String()) { - return nil, ErrDigestInvalidFormat - } - return canonicalReference{ - name: name.Name(), - digest: digest, - }, nil -} - -func getBestReferenceType(ref reference) Reference { - if ref.name == "" { - // Allow digest only references - if ref.digest != "" { - return digestReference(ref.digest) - } - return nil - } - if ref.tag == "" { - if ref.digest != "" { - return canonicalReference{ - name: ref.name, - digest: ref.digest, - } - } - return repository(ref.name) - } - if ref.digest == "" { - return taggedReference{ - name: ref.name, - tag: ref.tag, - } - } - - return ref -} - -type reference struct { - name string - tag string - digest digest.Digest -} - -func (r reference) String() string { - return r.name + ":" + r.tag + "@" + r.digest.String() -} - -func (r reference) Name() string { - return r.name -} - -func (r reference) Tag() string { - return r.tag -} - -func (r reference) Digest() digest.Digest { - return r.digest -} - -type repository string - -func (r repository) String() string { - return string(r) -} - -func (r repository) Name() string { - return string(r) -} - -type digestReference digest.Digest - -func (d digestReference) String() string { - return d.String() -} - -func (d digestReference) Digest() digest.Digest { - return digest.Digest(d) -} - -type taggedReference struct { - name string - tag string -} - -func (t taggedReference) String() string { - return t.name + ":" + t.tag -} - -func (t taggedReference) Name() string { - return t.name -} - -func (t taggedReference) Tag() string { - return t.tag -} - -type canonicalReference struct { - name string - digest digest.Digest -} - -func (c canonicalReference) String() string { - return c.name + "@" + c.digest.String() -} - -func (c canonicalReference) Name() string { - return c.name -} - -func (c canonicalReference) Digest() digest.Digest { - return c.digest -} diff --git a/vendor/src/github.com/docker/distribution/reference/regexp.go b/vendor/src/github.com/docker/distribution/reference/regexp.go deleted file mode 100644 index 9a7d366bc8..0000000000 --- a/vendor/src/github.com/docker/distribution/reference/regexp.go +++ /dev/null @@ -1,124 +0,0 @@ -package reference - -import "regexp" - -var ( - // alphaNumericRegexp defines the alpha numeric atom, typically a - // component of names. This only allows lower case characters and digits. - alphaNumericRegexp = match(`[a-z0-9]+`) - - // separatorRegexp defines the separators allowed to be embedded in name - // components. This allow one period, one or two underscore and multiple - // dashes. - separatorRegexp = match(`(?:[._]|__|[-]*)`) - - // nameComponentRegexp restricts registry path component names to start - // with at least one letter or number, with following parts able to be - // separated by one period, one or two underscore and multiple dashes. - nameComponentRegexp = expression( - alphaNumericRegexp, - optional(repeated(separatorRegexp, alphaNumericRegexp))) - - // hostnameComponentRegexp restricts the registry hostname component of a - // repository name to start with a component as defined by hostnameRegexp - // and followed by an optional port. - hostnameComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`) - - // hostnameRegexp defines the structure of potential hostname components - // that may be part of image names. This is purposely a subset of what is - // allowed by DNS to ensure backwards compatibility with Docker image - // names. - hostnameRegexp = expression( - hostnameComponentRegexp, - optional(repeated(literal(`.`), hostnameComponentRegexp)), - optional(literal(`:`), match(`[0-9]+`))) - - // TagRegexp matches valid tag names. From docker/docker:graph/tags.go. - TagRegexp = match(`[\w][\w.-]{0,127}`) - - // anchoredTagRegexp matches valid tag names, anchored at the start and - // end of the matched string. - anchoredTagRegexp = anchored(TagRegexp) - - // DigestRegexp matches valid digests. - DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`) - - // anchoredDigestRegexp matches valid digests, anchored at the start and - // end of the matched string. - anchoredDigestRegexp = anchored(DigestRegexp) - - // NameRegexp is the format for the name component of references. The - // regexp has capturing groups for the hostname and name part omitting - // the separating forward slash from either. - NameRegexp = expression( - optional(hostnameRegexp, literal(`/`)), - nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp))) - - // anchoredNameRegexp is used to parse a name value, capturing the - // hostname and trailing components. - anchoredNameRegexp = anchored( - optional(capture(hostnameRegexp), literal(`/`)), - capture(nameComponentRegexp, - optional(repeated(literal(`/`), nameComponentRegexp)))) - - // ReferenceRegexp is the full supported format of a reference. The regexp - // is anchored and has capturing groups for name, tag, and digest - // components. - ReferenceRegexp = anchored(capture(NameRegexp), - optional(literal(":"), capture(TagRegexp)), - optional(literal("@"), capture(DigestRegexp))) -) - -// match compiles the string to a regular expression. -var match = regexp.MustCompile - -// literal compiles s into a literal regular expression, escaping any regexp -// reserved characters. -func literal(s string) *regexp.Regexp { - re := match(regexp.QuoteMeta(s)) - - if _, complete := re.LiteralPrefix(); !complete { - panic("must be a literal") - } - - return re -} - -// expression defines a full expression, where each regular expression must -// follow the previous. -func expression(res ...*regexp.Regexp) *regexp.Regexp { - var s string - for _, re := range res { - s += re.String() - } - - return match(s) -} - -// optional wraps the expression in a non-capturing group and makes the -// production optional. -func optional(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `?`) -} - -// repeated wraps the regexp in a non-capturing group to get one or more -// matches. -func repeated(res ...*regexp.Regexp) *regexp.Regexp { - return match(group(expression(res...)).String() + `+`) -} - -// group wraps the regexp in a non-capturing group. -func group(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(?:` + expression(res...).String() + `)`) -} - -// capture wraps the expression in a capturing group. -func capture(res ...*regexp.Regexp) *regexp.Regexp { - return match(`(` + expression(res...).String() + `)`) -} - -// anchored anchors the regular expression by adding start and end delimiters. -func anchored(res ...*regexp.Regexp) *regexp.Regexp { - return match(`^` + expression(res...).String() + `$`) -} diff --git a/vendor/src/github.com/docker/distribution/registry.go b/vendor/src/github.com/docker/distribution/registry.go deleted file mode 100644 index 1ede31ebb6..0000000000 --- a/vendor/src/github.com/docker/distribution/registry.go +++ /dev/null @@ -1,97 +0,0 @@ -package distribution - -import ( - "github.com/docker/distribution/context" - "github.com/docker/distribution/reference" -) - -// Scope defines the set of items that match a namespace. -type Scope interface { - // Contains returns true if the name belongs to the namespace. - Contains(name string) bool -} - -type fullScope struct{} - -func (f fullScope) Contains(string) bool { - return true -} - -// GlobalScope represents the full namespace scope which contains -// all other scopes. -var GlobalScope = Scope(fullScope{}) - -// Namespace represents a collection of repositories, addressable by name. -// Generally, a namespace is backed by a set of one or more services, -// providing facilities such as registry access, trust, and indexing. -type Namespace interface { - // Scope describes the names that can be used with this Namespace. The - // global namespace will have a scope that matches all names. The scope - // effectively provides an identity for the namespace. - Scope() Scope - - // Repository should return a reference to the named repository. The - // registry may or may not have the repository but should always return a - // reference. - Repository(ctx context.Context, name reference.Named) (Repository, error) - - // Repositories fills 'repos' with a lexigraphically sorted catalog of repositories - // up to the size of 'repos' and returns the value 'n' for the number of entries - // which were filled. 'last' contains an offset in the catalog, and 'err' will be - // set to io.EOF if there are no more entries to obtain. - Repositories(ctx context.Context, repos []string, last string) (n int, err error) - - // Blobs returns a blob enumerator to access all blobs - Blobs() BlobEnumerator - - // BlobStatter returns a BlobStatter to control - BlobStatter() BlobStatter -} - -// RepositoryEnumerator describes an operation to enumerate repositories -type RepositoryEnumerator interface { - Enumerate(ctx context.Context, ingester func(string) error) error -} - -// ManifestServiceOption is a function argument for Manifest Service methods -type ManifestServiceOption interface { - Apply(ManifestService) error -} - -// WithTag allows a tag to be passed into Put -func WithTag(tag string) ManifestServiceOption { - return WithTagOption{tag} -} - -// WithTagOption holds a tag -type WithTagOption struct{ Tag string } - -// Apply conforms to the ManifestServiceOption interface -func (o WithTagOption) Apply(m ManifestService) error { - // no implementation - return nil -} - -// Repository is a named collection of manifests and layers. -type Repository interface { - // Named returns the name of the repository. - Named() reference.Named - - // Manifests returns a reference to this repository's manifest service. - // with the supplied options applied. - Manifests(ctx context.Context, options ...ManifestServiceOption) (ManifestService, error) - - // Blobs returns a reference to this repository's blob service. - Blobs(ctx context.Context) BlobStore - - // TODO(stevvooe): The above BlobStore return can probably be relaxed to - // be a BlobService for use with clients. This will allow such - // implementations to avoid implementing ServeBlob. - - // Tags returns a reference to this repositories tag service - Tags(ctx context.Context) TagService -} - -// TODO(stevvooe): Must add close methods to all these. May want to change the -// way instances are created to better reflect internal dependency -// relationships. diff --git a/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go b/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go deleted file mode 100644 index 6d9bb4b62a..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/api/errcode/errors.go +++ /dev/null @@ -1,267 +0,0 @@ -package errcode - -import ( - "encoding/json" - "fmt" - "strings" -) - -// ErrorCoder is the base interface for ErrorCode and Error allowing -// users of each to just call ErrorCode to get the real ID of each -type ErrorCoder interface { - ErrorCode() ErrorCode -} - -// ErrorCode represents the error type. The errors are serialized via strings -// and the integer format may change and should *never* be exported. -type ErrorCode int - -var _ error = ErrorCode(0) - -// ErrorCode just returns itself -func (ec ErrorCode) ErrorCode() ErrorCode { - return ec -} - -// Error returns the ID/Value -func (ec ErrorCode) Error() string { - // NOTE(stevvooe): Cannot use message here since it may have unpopulated args. - return strings.ToLower(strings.Replace(ec.String(), "_", " ", -1)) -} - -// Descriptor returns the descriptor for the error code. -func (ec ErrorCode) Descriptor() ErrorDescriptor { - d, ok := errorCodeToDescriptors[ec] - - if !ok { - return ErrorCodeUnknown.Descriptor() - } - - return d -} - -// String returns the canonical identifier for this error code. -func (ec ErrorCode) String() string { - return ec.Descriptor().Value -} - -// Message returned the human-readable error message for this error code. -func (ec ErrorCode) Message() string { - return ec.Descriptor().Message -} - -// MarshalText encodes the receiver into UTF-8-encoded text and returns the -// result. -func (ec ErrorCode) MarshalText() (text []byte, err error) { - return []byte(ec.String()), nil -} - -// UnmarshalText decodes the form generated by MarshalText. -func (ec *ErrorCode) UnmarshalText(text []byte) error { - desc, ok := idToDescriptors[string(text)] - - if !ok { - desc = ErrorCodeUnknown.Descriptor() - } - - *ec = desc.Code - - return nil -} - -// WithMessage creates a new Error struct based on the passed-in info and -// overrides the Message property. -func (ec ErrorCode) WithMessage(message string) Error { - return Error{ - Code: ec, - Message: message, - } -} - -// WithDetail creates a new Error struct based on the passed-in info and -// set the Detail property appropriately -func (ec ErrorCode) WithDetail(detail interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithDetail(detail) -} - -// WithArgs creates a new Error struct and sets the Args slice -func (ec ErrorCode) WithArgs(args ...interface{}) Error { - return Error{ - Code: ec, - Message: ec.Message(), - }.WithArgs(args...) -} - -// Error provides a wrapper around ErrorCode with extra Details provided. -type Error struct { - Code ErrorCode `json:"code"` - Message string `json:"message"` - Detail interface{} `json:"detail,omitempty"` - - // TODO(duglin): See if we need an "args" property so we can do the - // variable substitution right before showing the message to the user -} - -var _ error = Error{} - -// ErrorCode returns the ID/Value of this Error -func (e Error) ErrorCode() ErrorCode { - return e.Code -} - -// Error returns a human readable representation of the error. -func (e Error) Error() string { - return fmt.Sprintf("%s: %s", e.Code.Error(), e.Message) -} - -// WithDetail will return a new Error, based on the current one, but with -// some Detail info added -func (e Error) WithDetail(detail interface{}) Error { - return Error{ - Code: e.Code, - Message: e.Message, - Detail: detail, - } -} - -// WithArgs uses the passed-in list of interface{} as the substitution -// variables in the Error's Message string, but returns a new Error -func (e Error) WithArgs(args ...interface{}) Error { - return Error{ - Code: e.Code, - Message: fmt.Sprintf(e.Code.Message(), args...), - Detail: e.Detail, - } -} - -// ErrorDescriptor provides relevant information about a given error code. -type ErrorDescriptor struct { - // Code is the error code that this descriptor describes. - Code ErrorCode - - // Value provides a unique, string key, often captilized with - // underscores, to identify the error code. This value is used as the - // keyed value when serializing api errors. - Value string - - // Message is a short, human readable decription of the error condition - // included in API responses. - Message string - - // Description provides a complete account of the errors purpose, suitable - // for use in documentation. - Description string - - // HTTPStatusCode provides the http status code that is associated with - // this error condition. - HTTPStatusCode int -} - -// ParseErrorCode returns the value by the string error code. -// `ErrorCodeUnknown` will be returned if the error is not known. -func ParseErrorCode(value string) ErrorCode { - ed, ok := idToDescriptors[value] - if ok { - return ed.Code - } - - return ErrorCodeUnknown -} - -// Errors provides the envelope for multiple errors and a few sugar methods -// for use within the application. -type Errors []error - -var _ error = Errors{} - -func (errs Errors) Error() string { - switch len(errs) { - case 0: - return "" - case 1: - return errs[0].Error() - default: - msg := "errors:\n" - for _, err := range errs { - msg += err.Error() + "\n" - } - return msg - } -} - -// Len returns the current number of errors. -func (errs Errors) Len() int { - return len(errs) -} - -// MarshalJSON converts slice of error, ErrorCode or Error into a -// slice of Error - then serializes -func (errs Errors) MarshalJSON() ([]byte, error) { - var tmpErrs struct { - Errors []Error `json:"errors,omitempty"` - } - - for _, daErr := range errs { - var err Error - - switch daErr.(type) { - case ErrorCode: - err = daErr.(ErrorCode).WithDetail(nil) - case Error: - err = daErr.(Error) - default: - err = ErrorCodeUnknown.WithDetail(daErr) - - } - - // If the Error struct was setup and they forgot to set the - // Message field (meaning its "") then grab it from the ErrCode - msg := err.Message - if msg == "" { - msg = err.Code.Message() - } - - tmpErrs.Errors = append(tmpErrs.Errors, Error{ - Code: err.Code, - Message: msg, - Detail: err.Detail, - }) - } - - return json.Marshal(tmpErrs) -} - -// UnmarshalJSON deserializes []Error and then converts it into slice of -// Error or ErrorCode -func (errs *Errors) UnmarshalJSON(data []byte) error { - var tmpErrs struct { - Errors []Error - } - - if err := json.Unmarshal(data, &tmpErrs); err != nil { - return err - } - - var newErrs Errors - for _, daErr := range tmpErrs.Errors { - // If Message is empty or exactly matches the Code's message string - // then just use the Code, no need for a full Error struct - if daErr.Detail == nil && (daErr.Message == "" || daErr.Message == daErr.Code.Message()) { - // Error's w/o details get converted to ErrorCode - newErrs = append(newErrs, daErr.Code) - } else { - // Error's w/ details are untouched - newErrs = append(newErrs, Error{ - Code: daErr.Code, - Message: daErr.Message, - Detail: daErr.Detail, - }) - } - } - - *errs = newErrs - return nil -} diff --git a/vendor/src/github.com/docker/distribution/registry/api/errcode/handler.go b/vendor/src/github.com/docker/distribution/registry/api/errcode/handler.go deleted file mode 100644 index 49a64a86eb..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/api/errcode/handler.go +++ /dev/null @@ -1,44 +0,0 @@ -package errcode - -import ( - "encoding/json" - "net/http" -) - -// ServeJSON attempts to serve the errcode in a JSON envelope. It marshals err -// and sets the content-type header to 'application/json'. It will handle -// ErrorCoder and Errors, and if necessary will create an envelope. -func ServeJSON(w http.ResponseWriter, err error) error { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - var sc int - - switch errs := err.(type) { - case Errors: - if len(errs) < 1 { - break - } - - if err, ok := errs[0].(ErrorCoder); ok { - sc = err.ErrorCode().Descriptor().HTTPStatusCode - } - case ErrorCoder: - sc = errs.ErrorCode().Descriptor().HTTPStatusCode - err = Errors{err} // create an envelope. - default: - // We just have an unhandled error type, so just place in an envelope - // and move along. - err = Errors{err} - } - - if sc == 0 { - sc = http.StatusInternalServerError - } - - w.WriteHeader(sc) - - if err := json.NewEncoder(w).Encode(err); err != nil { - return err - } - - return nil -} diff --git a/vendor/src/github.com/docker/distribution/registry/api/errcode/register.go b/vendor/src/github.com/docker/distribution/registry/api/errcode/register.go deleted file mode 100644 index d1e8826c6d..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/api/errcode/register.go +++ /dev/null @@ -1,138 +0,0 @@ -package errcode - -import ( - "fmt" - "net/http" - "sort" - "sync" -) - -var ( - errorCodeToDescriptors = map[ErrorCode]ErrorDescriptor{} - idToDescriptors = map[string]ErrorDescriptor{} - groupToDescriptors = map[string][]ErrorDescriptor{} -) - -var ( - // ErrorCodeUnknown is a generic error that can be used as a last - // resort if there is no situation-specific error message that can be used - ErrorCodeUnknown = Register("errcode", ErrorDescriptor{ - Value: "UNKNOWN", - Message: "unknown error", - Description: `Generic error returned when the error does not have an - API classification.`, - HTTPStatusCode: http.StatusInternalServerError, - }) - - // ErrorCodeUnsupported is returned when an operation is not supported. - ErrorCodeUnsupported = Register("errcode", ErrorDescriptor{ - Value: "UNSUPPORTED", - Message: "The operation is unsupported.", - Description: `The operation was unsupported due to a missing - implementation or invalid set of parameters.`, - HTTPStatusCode: http.StatusMethodNotAllowed, - }) - - // ErrorCodeUnauthorized is returned if a request requires - // authentication. - ErrorCodeUnauthorized = Register("errcode", ErrorDescriptor{ - Value: "UNAUTHORIZED", - Message: "authentication required", - Description: `The access controller was unable to authenticate - the client. Often this will be accompanied by a - Www-Authenticate HTTP response header indicating how to - authenticate.`, - HTTPStatusCode: http.StatusUnauthorized, - }) - - // ErrorCodeDenied is returned if a client does not have sufficient - // permission to perform an action. - ErrorCodeDenied = Register("errcode", ErrorDescriptor{ - Value: "DENIED", - Message: "requested access to the resource is denied", - Description: `The access controller denied access for the - operation on a resource.`, - HTTPStatusCode: http.StatusForbidden, - }) - - // ErrorCodeUnavailable provides a common error to report unavailability - // of a service or endpoint. - ErrorCodeUnavailable = Register("errcode", ErrorDescriptor{ - Value: "UNAVAILABLE", - Message: "service unavailable", - Description: "Returned when a service is not available", - HTTPStatusCode: http.StatusServiceUnavailable, - }) - - // ErrorCodeTooManyRequests is returned if a client attempts too many - // times to contact a service endpoint. - ErrorCodeTooManyRequests = Register("errcode", ErrorDescriptor{ - Value: "TOOMANYREQUESTS", - Message: "too many requests", - Description: `Returned when a client attempts to contact a - service too many times`, - HTTPStatusCode: http.StatusTooManyRequests, - }) -) - -var nextCode = 1000 -var registerLock sync.Mutex - -// Register will make the passed-in error known to the environment and -// return a new ErrorCode -func Register(group string, descriptor ErrorDescriptor) ErrorCode { - registerLock.Lock() - defer registerLock.Unlock() - - descriptor.Code = ErrorCode(nextCode) - - if _, ok := idToDescriptors[descriptor.Value]; ok { - panic(fmt.Sprintf("ErrorValue %q is already registered", descriptor.Value)) - } - if _, ok := errorCodeToDescriptors[descriptor.Code]; ok { - panic(fmt.Sprintf("ErrorCode %v is already registered", descriptor.Code)) - } - - groupToDescriptors[group] = append(groupToDescriptors[group], descriptor) - errorCodeToDescriptors[descriptor.Code] = descriptor - idToDescriptors[descriptor.Value] = descriptor - - nextCode++ - return descriptor.Code -} - -type byValue []ErrorDescriptor - -func (a byValue) Len() int { return len(a) } -func (a byValue) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byValue) Less(i, j int) bool { return a[i].Value < a[j].Value } - -// GetGroupNames returns the list of Error group names that are registered -func GetGroupNames() []string { - keys := []string{} - - for k := range groupToDescriptors { - keys = append(keys, k) - } - sort.Strings(keys) - return keys -} - -// GetErrorCodeGroup returns the named group of error descriptors -func GetErrorCodeGroup(name string) []ErrorDescriptor { - desc := groupToDescriptors[name] - sort.Sort(byValue(desc)) - return desc -} - -// GetErrorAllDescriptors returns a slice of all ErrorDescriptors that are -// registered, irrespective of what group they're in -func GetErrorAllDescriptors() []ErrorDescriptor { - result := []ErrorDescriptor{} - - for _, group := range GetGroupNames() { - result = append(result, GetErrorCodeGroup(group)...) - } - sort.Sort(byValue(result)) - return result -} diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go b/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go deleted file mode 100644 index fc42c1c410..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/api/v2/descriptors.go +++ /dev/null @@ -1,1569 +0,0 @@ -package v2 - -import ( - "net/http" - "regexp" - - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/errcode" -) - -var ( - nameParameterDescriptor = ParameterDescriptor{ - Name: "name", - Type: "string", - Format: reference.NameRegexp.String(), - Required: true, - Description: `Name of the target repository.`, - } - - referenceParameterDescriptor = ParameterDescriptor{ - Name: "reference", - Type: "string", - Format: reference.TagRegexp.String(), - Required: true, - Description: `Tag or digest of the target manifest.`, - } - - uuidParameterDescriptor = ParameterDescriptor{ - Name: "uuid", - Type: "opaque", - Required: true, - Description: "A uuid identifying the upload. This field can accept characters that match `[a-zA-Z0-9-_.=]+`.", - } - - digestPathParameter = ParameterDescriptor{ - Name: "digest", - Type: "path", - Required: true, - Format: digest.DigestRegexp.String(), - Description: `Digest of desired blob.`, - } - - hostHeader = ParameterDescriptor{ - Name: "Host", - Type: "string", - Description: "Standard HTTP Host Header. Should be set to the registry host.", - Format: "", - Examples: []string{"registry-1.docker.io"}, - } - - authHeader = ParameterDescriptor{ - Name: "Authorization", - Type: "string", - Description: "An RFC7235 compliant authorization header.", - Format: " ", - Examples: []string{"Bearer dGhpcyBpcyBhIGZha2UgYmVhcmVyIHRva2VuIQ=="}, - } - - authChallengeHeader = ParameterDescriptor{ - Name: "WWW-Authenticate", - Type: "string", - Description: "An RFC7235 compliant authentication challenge header.", - Format: ` realm="", ..."`, - Examples: []string{ - `Bearer realm="https://auth.docker.com/", service="registry.docker.com", scopes="repository:library/ubuntu:pull"`, - }, - } - - contentLengthZeroHeader = ParameterDescriptor{ - Name: "Content-Length", - Description: "The `Content-Length` header must be zero and the body must be empty.", - Type: "integer", - Format: "0", - } - - dockerUploadUUIDHeader = ParameterDescriptor{ - Name: "Docker-Upload-UUID", - Description: "Identifies the docker upload uuid for the current request.", - Type: "uuid", - Format: "", - } - - digestHeader = ParameterDescriptor{ - Name: "Docker-Content-Digest", - Description: "Digest of the targeted content for the request.", - Type: "digest", - Format: "", - } - - linkHeader = ParameterDescriptor{ - Name: "Link", - Type: "link", - Description: "RFC5988 compliant rel='next' with URL to next result set, if available", - Format: `<?n=&last=>; rel="next"`, - } - - paginationParameters = []ParameterDescriptor{ - { - Name: "n", - Type: "integer", - Description: "Limit the number of entries in each response. It not present, all entries will be returned.", - Format: "", - Required: false, - }, - { - Name: "last", - Type: "string", - Description: "Result set will include values lexically after last.", - Format: "", - Required: false, - }, - } - - unauthorizedResponseDescriptor = ResponseDescriptor{ - Name: "Authentication Required", - StatusCode: http.StatusUnauthorized, - Description: "The client is not authenticated.", - Headers: []ParameterDescriptor{ - authChallengeHeader, - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnauthorized, - }, - } - - repositoryNotFoundResponseDescriptor = ResponseDescriptor{ - Name: "No Such Repository Error", - StatusCode: http.StatusNotFound, - Description: "The repository is not known to the registry.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - }, - } - - deniedResponseDescriptor = ResponseDescriptor{ - Name: "Access Denied", - StatusCode: http.StatusForbidden, - Description: "The client does not have required access to the repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeDenied, - }, - } -) - -const ( - manifestBody = `{ - "name": , - "tag": , - "fsLayers": [ - { - "blobSum": "" - }, - ... - ] - ], - "history": , - "signature": -}` - - errorsBody = `{ - "errors:" [ - { - "code": , - "message": "", - "detail": ... - }, - ... - ] -}` - - unauthorizedErrorsBody = `{ - "errors:" [ - { - "code": "UNAUTHORIZED", - "message": "access to the requested resource is not authorized", - "detail": ... - }, - ... - ] -}` -) - -// APIDescriptor exports descriptions of the layout of the v2 registry API. -var APIDescriptor = struct { - // RouteDescriptors provides a list of the routes available in the API. - RouteDescriptors []RouteDescriptor -}{ - RouteDescriptors: routeDescriptors, -} - -// RouteDescriptor describes a route specified by name. -type RouteDescriptor struct { - // Name is the name of the route, as specified in RouteNameXXX exports. - // These names a should be considered a unique reference for a route. If - // the route is registered with gorilla, this is the name that will be - // used. - Name string - - // Path is a gorilla/mux-compatible regexp that can be used to match the - // route. For any incoming method and path, only one route descriptor - // should match. - Path string - - // Entity should be a short, human-readalbe description of the object - // targeted by the endpoint. - Entity string - - // Description should provide an accurate overview of the functionality - // provided by the route. - Description string - - // Methods should describe the various HTTP methods that may be used on - // this route, including request and response formats. - Methods []MethodDescriptor -} - -// MethodDescriptor provides a description of the requests that may be -// conducted with the target method. -type MethodDescriptor struct { - - // Method is an HTTP method, such as GET, PUT or POST. - Method string - - // Description should provide an overview of the functionality provided by - // the covered method, suitable for use in documentation. Use of markdown - // here is encouraged. - Description string - - // Requests is a slice of request descriptors enumerating how this - // endpoint may be used. - Requests []RequestDescriptor -} - -// RequestDescriptor covers a particular set of headers and parameters that -// can be carried out with the parent method. Its most helpful to have one -// RequestDescriptor per API use case. -type RequestDescriptor struct { - // Name provides a short identifier for the request, usable as a title or - // to provide quick context for the particular request. - Name string - - // Description should cover the requests purpose, covering any details for - // this particular use case. - Description string - - // Headers describes headers that must be used with the HTTP request. - Headers []ParameterDescriptor - - // PathParameters enumerate the parameterized path components for the - // given request, as defined in the route's regular expression. - PathParameters []ParameterDescriptor - - // QueryParameters provides a list of query parameters for the given - // request. - QueryParameters []ParameterDescriptor - - // Body describes the format of the request body. - Body BodyDescriptor - - // Successes enumerates the possible responses that are considered to be - // the result of a successful request. - Successes []ResponseDescriptor - - // Failures covers the possible failures from this particular request. - Failures []ResponseDescriptor -} - -// ResponseDescriptor describes the components of an API response. -type ResponseDescriptor struct { - // Name provides a short identifier for the response, usable as a title or - // to provide quick context for the particular response. - Name string - - // Description should provide a brief overview of the role of the - // response. - Description string - - // StatusCode specifies the status received by this particular response. - StatusCode int - - // Headers covers any headers that may be returned from the response. - Headers []ParameterDescriptor - - // Fields describes any fields that may be present in the response. - Fields []ParameterDescriptor - - // ErrorCodes enumerates the error codes that may be returned along with - // the response. - ErrorCodes []errcode.ErrorCode - - // Body describes the body of the response, if any. - Body BodyDescriptor -} - -// BodyDescriptor describes a request body and its expected content type. For -// the most part, it should be example json or some placeholder for body -// data in documentation. -type BodyDescriptor struct { - ContentType string - Format string -} - -// ParameterDescriptor describes the format of a request parameter, which may -// be a header, path parameter or query parameter. -type ParameterDescriptor struct { - // Name is the name of the parameter, either of the path component or - // query parameter. - Name string - - // Type specifies the type of the parameter, such as string, integer, etc. - Type string - - // Description provides a human-readable description of the parameter. - Description string - - // Required means the field is required when set. - Required bool - - // Format is a specifying the string format accepted by this parameter. - Format string - - // Regexp is a compiled regular expression that can be used to validate - // the contents of the parameter. - Regexp *regexp.Regexp - - // Examples provides multiple examples for the values that might be valid - // for this parameter. - Examples []string -} - -var routeDescriptors = []RouteDescriptor{ - { - Name: RouteNameBase, - Path: "/v2/", - Entity: "Base", - Description: `Base V2 API route. Typically, this can be used for lightweight version checks and to validate registry authentication.`, - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Check that the endpoint implements Docker Registry API V2.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Successes: []ResponseDescriptor{ - { - Description: "The API implements V2 protocol and is accessible.", - StatusCode: http.StatusOK, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The registry does not implement the V2 API.", - StatusCode: http.StatusNotFound, - }, - unauthorizedResponseDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameTags, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/tags/list", - Entity: "Tags", - Description: "Retrieve information about tags.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the tags under the repository identified by `name`.", - Requests: []RequestDescriptor{ - { - Name: "Tags", - Description: "Return all tags for the repository", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ] -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - { - Name: "Tags Paginated", - Description: "Return a portion of the tags for the specified repository.", - PathParameters: []ParameterDescriptor{nameParameterDescriptor}, - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Description: "A list of tags for the named repository.", - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "name": , - "tags": [ - , - ... - ], -}`, - }, - }, - }, - Failures: []ResponseDescriptor{ - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameManifest, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/manifests/{reference:" + reference.TagRegexp.String() + "|" + digest.DigestRegexp.String() + "}", - Entity: "Manifest", - Description: "Create, update, delete and retrieve manifests.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Fetch the manifest identified by `name` and `reference` where `reference` can be a tag or digest. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest identified by `name` and `reference`. The contents can be used to identify and resolve resources required to run the specified image.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "The name or reference was invalid.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Put the manifest identified by `name` and `reference` where `reference` can be a tag or digest.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Body: BodyDescriptor{ - ContentType: "", - Format: manifestBody, - }, - Successes: []ResponseDescriptor{ - { - Description: "The manifest has been accepted by the registry and is stored under the specified `name` and `tag`.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The canonical location url of the uploaded manifest.", - Format: "", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Manifest", - Description: "The received manifest was invalid in some way, as described by the error codes. The client should resolve the issue and retry the request.", - StatusCode: http.StatusBadRequest, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - ErrorCodeManifestInvalid, - ErrorCodeManifestUnverified, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - { - Name: "Missing Layer(s)", - Description: "One or more layers may be missing during a manifest upload. If so, the missing layers will be enumerated in the error response.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "errors:" [{ - "code": "BLOB_UNKNOWN", - "message": "blob unknown to registry", - "detail": { - "digest": "" - } - }, - ... - ] -}`, - }, - }, - { - Name: "Not allowed", - Description: "Manifest put is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the manifest identified by `name` and `reference`. Note that a manifest can _only_ be deleted by `digest`.", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - referenceParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Reference", - Description: "The specified `name` or `reference` were invalid and the delete was unable to proceed.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeTagInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - { - Name: "Unknown Manifest", - Description: "The specified `name` or `reference` are unknown to the registry and the delete was unable to proceed. Clients can assume the manifest was already deleted if this response is returned.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeManifestUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Name: "Not allowed", - Description: "Manifest delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled.", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlob, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/{digest:" + digest.DigestRegexp.String() + "}", - Entity: "Blob", - Description: "Operations on blobs identified by `name` and `digest`. Used to fetch or delete layers by digest.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve the blob from the registry identified by `digest`. A `HEAD` request can also be issued to this endpoint to obtain resource information without receiving all data.", - Requests: []RequestDescriptor{ - { - Name: "Fetch Blob", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The blob content will be present in the body of the request.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob content.", - Format: "", - }, - digestHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - { - Description: "The blob identified by `digest` is available at the provided location.", - StatusCode: http.StatusTemporaryRedirect, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Description: "The location where the layer should be accessible.", - Format: "", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - { - Name: "Fetch Blob Part", - Description: "This endpoint may also support RFC7233 compliant range requests. Support can be detected by issuing a HEAD request. If the header `Accept-Range: bytes` is returned, range requests can be used to fetch partial content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Range", - Type: "string", - Description: "HTTP Range header specifying blob chunk.", - Format: "bytes=-", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob identified by `digest` is available. The specified chunk of blob content will be present in the body of the request.", - StatusCode: http.StatusPartialContent, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "The length of the requested blob chunk.", - Format: "", - }, - { - Name: "Content-Range", - Type: "byte range", - Description: "Content range of blob chunk.", - Format: "bytes -/", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was a problem with the request that needs to be addressed by the client, such as an invalid `name` or `tag`.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeDigestInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The range specification cannot be satisfied for the requested content. This can happen when the range is not formatted correctly or if the range is outside of the valid size of the content.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Delete the blob identified by `name` and `digest`", - Requests: []RequestDescriptor{ - { - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - digestPathParameter, - }, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "0", - Format: "0", - }, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Description: "The blob, identified by `name` and `digest`, is unknown to the registry.", - StatusCode: http.StatusNotFound, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameUnknown, - ErrorCodeBlobUnknown, - }, - }, - { - Description: "Blob delete is not allowed because the registry is configured as a pull-through cache or `delete` has been disabled", - StatusCode: http.StatusMethodNotAllowed, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - - // TODO(stevvooe): We may want to add a PUT request here to - // kickoff an upload of a blob, integrated with the blob upload - // API. - }, - }, - - { - Name: RouteNameBlobUpload, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/", - Entity: "Initiate Blob Upload", - Description: "Initiate a blob upload. This endpoint can be used to create resumable uploads or monolithic uploads.", - Methods: []MethodDescriptor{ - { - Method: "POST", - Description: "Initiate a resumable blob upload. If successful, an upload location will be provided to complete the upload. Optionally, if the `digest` parameter is present, the request body will be used to complete the upload in a single request.", - Requests: []RequestDescriptor{ - { - Name: "Initiate Monolithic Blob Upload", - Description: "Upload a blob identified by the `digest` parameter in single request. This upload will not be resumable unless a recoverable error is returned.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of uploaded blob. If present, the upload will be completed, in a single request, with contents of the request body as the resulting blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octect-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been created in the registry and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob upload is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - { - Name: "Initiate Resumable Blob Upload", - Description: "Initiate a resumable blob upload with an empty request body.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Description: "The upload has been created. The `Location` header must be used to complete the upload. The response should be identical to a `GET` request on the contents of the returned `Location` header.", - StatusCode: http.StatusAccepted, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the created upload. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Format: "0-0", - Description: "Range header indicating the progress of the upload. When starting an upload, it will return an empty range, since no content has been received.", - }, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - { - Name: "Mount Blob", - Description: "Mount a blob identified by the `mount` parameter from another repository.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "mount", - Type: "query", - Format: "", - Regexp: digest.DigestRegexp, - Description: `Digest of blob to mount from the source repository.`, - }, - { - Name: "from", - Type: "query", - Format: "", - Regexp: reference.NameRegexp, - Description: `Name of the source repository.`, - }, - }, - Successes: []ResponseDescriptor{ - { - Description: "The blob has been mounted in the repository and is available at the provided location.", - StatusCode: http.StatusCreated, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Name: "Invalid Name or Digest", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - }, - }, - { - Name: "Not allowed", - Description: "Blob mount is not allowed because the registry is configured as a pull-through cache or for some other reason", - StatusCode: http.StatusMethodNotAllowed, - ErrorCodes: []errcode.ErrorCode{ - errcode.ErrorCodeUnsupported, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - }, - }, - - { - Name: RouteNameBlobUploadChunk, - Path: "/v2/{name:" + reference.NameRegexp.String() + "}/blobs/uploads/{uuid:[a-zA-Z0-9-_.=]+}", - Entity: "Blob Upload", - Description: "Interact with blob uploads. Clients should never assemble URLs for this endpoint and should only take it through the `Location` header on related API requests. The `Location` header and its parameters should be preserved by clients, using the latest value returned via upload related API calls.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve status of upload identified by `uuid`. The primary purpose of this endpoint is to resolve the current status of a resumable upload.", - Requests: []RequestDescriptor{ - { - Description: "Retrieve the progress of the current upload, as reported by the `Range` header.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Progress", - Description: "The upload is known and in progress. The last received offset is available in the `Range` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - { - Method: "PATCH", - Description: "Upload a chunk of data for the specified upload.", - Requests: []RequestDescriptor{ - { - Name: "Stream upload", - Description: "Upload a stream of data to upload without completing the upload.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Data Accepted", - Description: "The stream of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - { - Name: "Chunked upload", - Description: "Upload a chunk of data to specified upload without completing the upload. The data will be uploaded to the specified Content Range.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Required: true, - Description: "Range of bytes identifying the desired block of content represented by the body. Start must the end offset retrieved via status check plus one. Note that this is a non-standard use of the `Content-Range` header.", - }, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the chunk being uploaded, corresponding the length of the request body.", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Chunk Accepted", - Description: "The chunk of data has been accepted and the current progress is available in the range header. The updated upload location is available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "/v2//blobs/uploads/", - Description: "The location of the upload. Clients should assume this changes after each request. Clients should use the contents verbatim to complete the upload, adding parameters where required.", - }, - { - Name: "Range", - Type: "header", - Format: "0-", - Description: "Range indicating the current progress of the upload.", - }, - contentLengthZeroHeader, - dockerUploadUUIDHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The `Content-Range` specification cannot be accepted, either because it does not overlap with the current progress or it is invalid.", - StatusCode: http.StatusRequestedRangeNotSatisfiable, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - { - Method: "PUT", - Description: "Complete the upload specified by `uuid`, optionally appending the body as the final chunk.", - Requests: []RequestDescriptor{ - { - Description: "Complete the upload, providing all the data in the body, if necessary. A request without a body will just complete the upload with previously uploaded content.", - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - { - Name: "Content-Length", - Type: "integer", - Format: "", - Description: "Length of the data being uploaded, corresponding to the length of the request body. May be zero if no data is provided.", - }, - }, - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - QueryParameters: []ParameterDescriptor{ - { - Name: "digest", - Type: "string", - Format: "", - Regexp: digest.DigestRegexp, - Required: true, - Description: `Digest of uploaded blob.`, - }, - }, - Body: BodyDescriptor{ - ContentType: "application/octet-stream", - Format: "", - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Complete", - Description: "The upload has been completed and accepted by the registry. The canonical location will be available in the `Location` header.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - { - Name: "Location", - Type: "url", - Format: "", - Description: "The canonical location of the blob for retrieval", - }, - { - Name: "Content-Range", - Type: "header", - Format: "-", - Description: "Range of bytes identifying the desired block of content represented by the body. Start must match the end of offset retrieved via status check. Note that this is a non-standard use of the `Content-Range` header.", - }, - contentLengthZeroHeader, - digestHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "There was an error processing the upload and it must be restarted.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeDigestInvalid, - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - errcode.ErrorCodeUnsupported, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The upload must be restarted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - { - Method: "DELETE", - Description: "Cancel outstanding upload processes, releasing associated resources. If this is not called, the unfinished uploads will eventually timeout.", - Requests: []RequestDescriptor{ - { - Description: "Cancel the upload specified by `uuid`.", - PathParameters: []ParameterDescriptor{ - nameParameterDescriptor, - uuidParameterDescriptor, - }, - Headers: []ParameterDescriptor{ - hostHeader, - authHeader, - contentLengthZeroHeader, - }, - Successes: []ResponseDescriptor{ - { - Name: "Upload Deleted", - Description: "The upload has been successfully deleted.", - StatusCode: http.StatusNoContent, - Headers: []ParameterDescriptor{ - contentLengthZeroHeader, - }, - }, - }, - Failures: []ResponseDescriptor{ - { - Description: "An error was encountered processing the delete. The client may ignore this error.", - StatusCode: http.StatusBadRequest, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeNameInvalid, - ErrorCodeBlobUploadInvalid, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - { - Description: "The upload is unknown to the registry. The client may ignore this error and assume the upload has been deleted.", - StatusCode: http.StatusNotFound, - ErrorCodes: []errcode.ErrorCode{ - ErrorCodeBlobUploadUnknown, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: errorsBody, - }, - }, - unauthorizedResponseDescriptor, - repositoryNotFoundResponseDescriptor, - deniedResponseDescriptor, - }, - }, - }, - }, - }, - }, - { - Name: RouteNameCatalog, - Path: "/v2/_catalog", - Entity: "Catalog", - Description: "List a set of available repositories in the local registry cluster. Does not provide any indication of what may be available upstream. Applications can only determine if a repository is available but not if it is not available.", - Methods: []MethodDescriptor{ - { - Method: "GET", - Description: "Retrieve a sorted, json list of repositories available in the registry.", - Requests: []RequestDescriptor{ - { - Name: "Catalog Fetch", - Description: "Request an unabridged list of repositories available. The implementation may impose a maximum limit and return a partial set with pagination links.", - Successes: []ResponseDescriptor{ - { - Description: "Returns the unabridged list of repositories as a json response.", - StatusCode: http.StatusOK, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - }, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] -}`, - }, - }, - }, - }, - { - Name: "Catalog Fetch Paginated", - Description: "Return the specified portion of repositories.", - QueryParameters: paginationParameters, - Successes: []ResponseDescriptor{ - { - StatusCode: http.StatusOK, - Body: BodyDescriptor{ - ContentType: "application/json; charset=utf-8", - Format: `{ - "repositories": [ - , - ... - ] - "next": "?last=&n=" -}`, - }, - Headers: []ParameterDescriptor{ - { - Name: "Content-Length", - Type: "integer", - Description: "Length of the JSON response body.", - Format: "", - }, - linkHeader, - }, - }, - }, - }, - }, - }, - }, - }, -} - -var routeDescriptorsMap map[string]RouteDescriptor - -func init() { - routeDescriptorsMap = make(map[string]RouteDescriptor, len(routeDescriptors)) - - for _, descriptor := range routeDescriptors { - routeDescriptorsMap[descriptor.Name] = descriptor - } -} diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/doc.go b/vendor/src/github.com/docker/distribution/registry/api/v2/doc.go deleted file mode 100644 index cde0119594..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/api/v2/doc.go +++ /dev/null @@ -1,9 +0,0 @@ -// Package v2 describes routes, urls and the error codes used in the Docker -// Registry JSON HTTP API V2. In addition to declarations, descriptors are -// provided for routes and error codes that can be used for implementation and -// automatically generating documentation. -// -// Definitions here are considered to be locked down for the V2 registry api. -// Any changes must be considered carefully and should not proceed without a -// change proposal in docker core. -package v2 diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/errors.go b/vendor/src/github.com/docker/distribution/registry/api/v2/errors.go deleted file mode 100644 index 97d6923aa0..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/api/v2/errors.go +++ /dev/null @@ -1,136 +0,0 @@ -package v2 - -import ( - "net/http" - - "github.com/docker/distribution/registry/api/errcode" -) - -const errGroup = "registry.api.v2" - -var ( - // ErrorCodeDigestInvalid is returned when uploading a blob if the - // provided digest does not match the blob contents. - ErrorCodeDigestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "DIGEST_INVALID", - Message: "provided digest did not match uploaded content", - Description: `When a blob is uploaded, the registry will check that - the content matches the digest provided by the client. The error may - include a detail structure with the key "digest", including the - invalid digest string. This error may also be returned when a manifest - includes an invalid layer digest.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeSizeInvalid is returned when uploading a blob if the provided - ErrorCodeSizeInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "SIZE_INVALID", - Message: "provided length did not match content length", - Description: `When a layer is uploaded, the provided size will be - checked against the uploaded content. If they do not match, this error - will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameInvalid is returned when the name in the manifest does not - // match the provided name. - ErrorCodeNameInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_INVALID", - Message: "invalid repository name", - Description: `Invalid repository name encountered either during - manifest validation or any API operation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeTagInvalid is returned when the tag in the manifest does not - // match the provided tag. - ErrorCodeTagInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "TAG_INVALID", - Message: "manifest tag did not match URI", - Description: `During a manifest upload, if the tag in the manifest - does not match the uri tag, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeNameUnknown when the repository name is not known. - ErrorCodeNameUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "NAME_UNKNOWN", - Message: "repository name not known to registry", - Description: `This is returned if the name used during an operation is - unknown to the registry.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestUnknown returned when image manifest is unknown. - ErrorCodeManifestUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNKNOWN", - Message: "manifest unknown", - Description: `This error is returned when the manifest, identified by - name and tag is unknown to the repository.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeManifestInvalid returned when an image manifest is invalid, - // typically during a PUT operation. This error encompasses all errors - // encountered during manifest validation that aren't signature errors. - ErrorCodeManifestInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_INVALID", - Message: "manifest invalid", - Description: `During upload, manifests undergo several checks ensuring - validity. If those checks fail, this error may be returned, unless a - more specific error is included. The detail will contain information - the failed validation.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestUnverified is returned when the manifest fails - // signature verification. - ErrorCodeManifestUnverified = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_UNVERIFIED", - Message: "manifest failed signature verification", - Description: `During manifest upload, if the manifest fails signature - verification, this error will be returned.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeManifestBlobUnknown is returned when a manifest blob is - // unknown to the registry. - ErrorCodeManifestBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "MANIFEST_BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a manifest blob is - unknown to the registry.`, - HTTPStatusCode: http.StatusBadRequest, - }) - - // ErrorCodeBlobUnknown is returned when a blob is unknown to the - // registry. This can happen when the manifest references a nonexistent - // layer or the result is not found by a blob fetch. - ErrorCodeBlobUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UNKNOWN", - Message: "blob unknown to registry", - Description: `This error may be returned when a blob is unknown to the - registry in a specified repository. This can be returned with a - standard get or if a manifest references an unknown layer during - upload.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadUnknown is returned when an upload is unknown. - ErrorCodeBlobUploadUnknown = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_UNKNOWN", - Message: "blob upload unknown to registry", - Description: `If a blob upload has been cancelled or was never - started, this error code may be returned.`, - HTTPStatusCode: http.StatusNotFound, - }) - - // ErrorCodeBlobUploadInvalid is returned when an upload is invalid. - ErrorCodeBlobUploadInvalid = errcode.Register(errGroup, errcode.ErrorDescriptor{ - Value: "BLOB_UPLOAD_INVALID", - Message: "blob upload invalid", - Description: `The blob upload encountered an error and can no - longer proceed.`, - HTTPStatusCode: http.StatusNotFound, - }) -) diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/routes.go b/vendor/src/github.com/docker/distribution/registry/api/v2/routes.go deleted file mode 100644 index 5b80d5be76..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/api/v2/routes.go +++ /dev/null @@ -1,49 +0,0 @@ -package v2 - -import "github.com/gorilla/mux" - -// The following are definitions of the name under which all V2 routes are -// registered. These symbols can be used to look up a route based on the name. -const ( - RouteNameBase = "base" - RouteNameManifest = "manifest" - RouteNameTags = "tags" - RouteNameBlob = "blob" - RouteNameBlobUpload = "blob-upload" - RouteNameBlobUploadChunk = "blob-upload-chunk" - RouteNameCatalog = "catalog" -) - -var allEndpoints = []string{ - RouteNameManifest, - RouteNameCatalog, - RouteNameTags, - RouteNameBlob, - RouteNameBlobUpload, - RouteNameBlobUploadChunk, -} - -// Router builds a gorilla router with named routes for the various API -// methods. This can be used directly by both server implementations and -// clients. -func Router() *mux.Router { - return RouterWithPrefix("") -} - -// RouterWithPrefix builds a gorilla router with a configured prefix -// on all routes. -func RouterWithPrefix(prefix string) *mux.Router { - rootRouter := mux.NewRouter() - router := rootRouter - if prefix != "" { - router = router.PathPrefix(prefix).Subrouter() - } - - router.StrictSlash(true) - - for _, descriptor := range routeDescriptors { - router.Path(descriptor.Path).Name(descriptor.Name) - } - - return rootRouter -} diff --git a/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go b/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go deleted file mode 100644 index a959aaa897..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/api/v2/urls.go +++ /dev/null @@ -1,251 +0,0 @@ -package v2 - -import ( - "net/http" - "net/url" - "strings" - - "github.com/docker/distribution/reference" - "github.com/gorilla/mux" -) - -// URLBuilder creates registry API urls from a single base endpoint. It can be -// used to create urls for use in a registry client or server. -// -// All urls will be created from the given base, including the api version. -// For example, if a root of "/foo/" is provided, urls generated will be fall -// under "/foo/v2/...". Most application will only provide a schema, host and -// port, such as "https://localhost:5000/". -type URLBuilder struct { - root *url.URL // url root (ie http://localhost/) - router *mux.Router - relative bool -} - -// NewURLBuilder creates a URLBuilder with provided root url object. -func NewURLBuilder(root *url.URL, relative bool) *URLBuilder { - return &URLBuilder{ - root: root, - router: Router(), - relative: relative, - } -} - -// NewURLBuilderFromString workes identically to NewURLBuilder except it takes -// a string argument for the root, returning an error if it is not a valid -// url. -func NewURLBuilderFromString(root string, relative bool) (*URLBuilder, error) { - u, err := url.Parse(root) - if err != nil { - return nil, err - } - - return NewURLBuilder(u, relative), nil -} - -// NewURLBuilderFromRequest uses information from an *http.Request to -// construct the root url. -func NewURLBuilderFromRequest(r *http.Request, relative bool) *URLBuilder { - var scheme string - - forwardedProto := r.Header.Get("X-Forwarded-Proto") - - switch { - case len(forwardedProto) > 0: - scheme = forwardedProto - case r.TLS != nil: - scheme = "https" - case len(r.URL.Scheme) > 0: - scheme = r.URL.Scheme - default: - scheme = "http" - } - - host := r.Host - forwardedHost := r.Header.Get("X-Forwarded-Host") - if len(forwardedHost) > 0 { - // According to the Apache mod_proxy docs, X-Forwarded-Host can be a - // comma-separated list of hosts, to which each proxy appends the - // requested host. We want to grab the first from this comma-separated - // list. - hosts := strings.SplitN(forwardedHost, ",", 2) - host = strings.TrimSpace(hosts[0]) - } - - basePath := routeDescriptorsMap[RouteNameBase].Path - - requestPath := r.URL.Path - index := strings.Index(requestPath, basePath) - - u := &url.URL{ - Scheme: scheme, - Host: host, - } - - if index > 0 { - // N.B. index+1 is important because we want to include the trailing / - u.Path = requestPath[0 : index+1] - } - - return NewURLBuilder(u, relative) -} - -// BuildBaseURL constructs a base url for the API, typically just "/v2/". -func (ub *URLBuilder) BuildBaseURL() (string, error) { - route := ub.cloneRoute(RouteNameBase) - - baseURL, err := route.URL() - if err != nil { - return "", err - } - - return baseURL.String(), nil -} - -// BuildCatalogURL constructs a url get a catalog of repositories -func (ub *URLBuilder) BuildCatalogURL(values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameCatalog) - - catalogURL, err := route.URL() - if err != nil { - return "", err - } - - return appendValuesURL(catalogURL, values...).String(), nil -} - -// BuildTagsURL constructs a url to list the tags in the named repository. -func (ub *URLBuilder) BuildTagsURL(name reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameTags) - - tagsURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return tagsURL.String(), nil -} - -// BuildManifestURL constructs a url for the manifest identified by name and -// reference. The argument reference may be either a tag or digest. -func (ub *URLBuilder) BuildManifestURL(ref reference.Named) (string, error) { - route := ub.cloneRoute(RouteNameManifest) - - tagOrDigest := "" - switch v := ref.(type) { - case reference.Tagged: - tagOrDigest = v.Tag() - case reference.Digested: - tagOrDigest = v.Digest().String() - } - - manifestURL, err := route.URL("name", ref.Name(), "reference", tagOrDigest) - if err != nil { - return "", err - } - - return manifestURL.String(), nil -} - -// BuildBlobURL constructs the url for the blob identified by name and dgst. -func (ub *URLBuilder) BuildBlobURL(ref reference.Canonical) (string, error) { - route := ub.cloneRoute(RouteNameBlob) - - layerURL, err := route.URL("name", ref.Name(), "digest", ref.Digest().String()) - if err != nil { - return "", err - } - - return layerURL.String(), nil -} - -// BuildBlobUploadURL constructs a url to begin a blob upload in the -// repository identified by name. -func (ub *URLBuilder) BuildBlobUploadURL(name reference.Named, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUpload) - - uploadURL, err := route.URL("name", name.Name()) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// BuildBlobUploadChunkURL constructs a url for the upload identified by uuid, -// including any url values. This should generally not be used by clients, as -// this url is provided by server implementations during the blob upload -// process. -func (ub *URLBuilder) BuildBlobUploadChunkURL(name reference.Named, uuid string, values ...url.Values) (string, error) { - route := ub.cloneRoute(RouteNameBlobUploadChunk) - - uploadURL, err := route.URL("name", name.Name(), "uuid", uuid) - if err != nil { - return "", err - } - - return appendValuesURL(uploadURL, values...).String(), nil -} - -// clondedRoute returns a clone of the named route from the router. Routes -// must be cloned to avoid modifying them during url generation. -func (ub *URLBuilder) cloneRoute(name string) clonedRoute { - route := new(mux.Route) - root := new(url.URL) - - *route = *ub.router.GetRoute(name) // clone the route - *root = *ub.root - - return clonedRoute{Route: route, root: root, relative: ub.relative} -} - -type clonedRoute struct { - *mux.Route - root *url.URL - relative bool -} - -func (cr clonedRoute) URL(pairs ...string) (*url.URL, error) { - routeURL, err := cr.Route.URL(pairs...) - if err != nil { - return nil, err - } - - if cr.relative { - return routeURL, nil - } - - if routeURL.Scheme == "" && routeURL.User == nil && routeURL.Host == "" { - routeURL.Path = routeURL.Path[1:] - } - - url := cr.root.ResolveReference(routeURL) - url.Scheme = cr.root.Scheme - return url, nil -} - -// appendValuesURL appends the parameters to the url. -func appendValuesURL(u *url.URL, values ...url.Values) *url.URL { - merged := u.Query() - - for _, v := range values { - for k, vv := range v { - merged[k] = append(merged[k], vv...) - } - } - - u.RawQuery = merged.Encode() - return u -} - -// appendValues appends the parameters to the url. Panics if the string is not -// a url. -func appendValues(u string, values ...url.Values) string { - up, err := url.Parse(u) - - if err != nil { - panic(err) // should never happen - } - - return appendValuesURL(up, values...).String() -} diff --git a/vendor/src/github.com/docker/distribution/registry/client/auth/api_version.go b/vendor/src/github.com/docker/distribution/registry/client/auth/api_version.go deleted file mode 100644 index 7d8f1d9576..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/client/auth/api_version.go +++ /dev/null @@ -1,58 +0,0 @@ -package auth - -import ( - "net/http" - "strings" -) - -// APIVersion represents a version of an API including its -// type and version number. -type APIVersion struct { - // Type refers to the name of a specific API specification - // such as "registry" - Type string - - // Version is the version of the API specification implemented, - // This may omit the revision number and only include - // the major and minor version, such as "2.0" - Version string -} - -// String returns the string formatted API Version -func (v APIVersion) String() string { - return v.Type + "/" + v.Version -} - -// APIVersions gets the API versions out of an HTTP response using the provided -// version header as the key for the HTTP header. -func APIVersions(resp *http.Response, versionHeader string) []APIVersion { - versions := []APIVersion{} - if versionHeader != "" { - for _, supportedVersions := range resp.Header[http.CanonicalHeaderKey(versionHeader)] { - for _, version := range strings.Fields(supportedVersions) { - versions = append(versions, ParseAPIVersion(version)) - } - } - } - return versions -} - -// ParseAPIVersion parses an API version string into an APIVersion -// Format (Expected, not enforced): -// API version string = '/' -// API type = [a-z][a-z0-9]* -// API version = [0-9]+(\.[0-9]+)? -// TODO(dmcgowan): Enforce format, add error condition, remove unknown type -func ParseAPIVersion(versionStr string) APIVersion { - idx := strings.IndexRune(versionStr, '/') - if idx == -1 { - return APIVersion{ - Type: "unknown", - Version: versionStr, - } - } - return APIVersion{ - Type: strings.ToLower(versionStr[:idx]), - Version: versionStr[idx+1:], - } -} diff --git a/vendor/src/github.com/docker/distribution/registry/client/auth/authchallenge.go b/vendor/src/github.com/docker/distribution/registry/client/auth/authchallenge.go deleted file mode 100644 index c8cd83bb97..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/client/auth/authchallenge.go +++ /dev/null @@ -1,220 +0,0 @@ -package auth - -import ( - "fmt" - "net/http" - "net/url" - "strings" -) - -// Challenge carries information from a WWW-Authenticate response header. -// See RFC 2617. -type Challenge struct { - // Scheme is the auth-scheme according to RFC 2617 - Scheme string - - // Parameters are the auth-params according to RFC 2617 - Parameters map[string]string -} - -// ChallengeManager manages the challenges for endpoints. -// The challenges are pulled out of HTTP responses. Only -// responses which expect challenges should be added to -// the manager, since a non-unauthorized request will be -// viewed as not requiring challenges. -type ChallengeManager interface { - // GetChallenges returns the challenges for the given - // endpoint URL. - GetChallenges(endpoint url.URL) ([]Challenge, error) - - // AddResponse adds the response to the challenge - // manager. The challenges will be parsed out of - // the WWW-Authenicate headers and added to the - // URL which was produced the response. If the - // response was authorized, any challenges for the - // endpoint will be cleared. - AddResponse(resp *http.Response) error -} - -// NewSimpleChallengeManager returns an instance of -// ChallengeManger which only maps endpoints to challenges -// based on the responses which have been added the -// manager. The simple manager will make no attempt to -// perform requests on the endpoints or cache the responses -// to a backend. -func NewSimpleChallengeManager() ChallengeManager { - return simpleChallengeManager{} -} - -type simpleChallengeManager map[string][]Challenge - -func (m simpleChallengeManager) GetChallenges(endpoint url.URL) ([]Challenge, error) { - endpoint.Host = strings.ToLower(endpoint.Host) - - challenges := m[endpoint.String()] - return challenges, nil -} - -func (m simpleChallengeManager) AddResponse(resp *http.Response) error { - challenges := ResponseChallenges(resp) - if resp.Request == nil { - return fmt.Errorf("missing request reference") - } - urlCopy := url.URL{ - Path: resp.Request.URL.Path, - Host: strings.ToLower(resp.Request.URL.Host), - Scheme: resp.Request.URL.Scheme, - } - m[urlCopy.String()] = challenges - return nil -} - -// Octet types from RFC 2616. -type octetType byte - -var octetTypes [256]octetType - -const ( - isToken octetType = 1 << iota - isSpace -) - -func init() { - // OCTET = - // CHAR = - // CTL = - // CR = - // LF = - // SP = - // HT = - // <"> = - // CRLF = CR LF - // LWS = [CRLF] 1*( SP | HT ) - // TEXT = - // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <"> - // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT - // token = 1* - // qdtext = > - - for c := 0; c < 256; c++ { - var t octetType - isCtl := c <= 31 || c == 127 - isChar := 0 <= c && c <= 127 - isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0 - if strings.IndexRune(" \t\r\n", rune(c)) >= 0 { - t |= isSpace - } - if isChar && !isCtl && !isSeparator { - t |= isToken - } - octetTypes[c] = t - } -} - -// ResponseChallenges returns a list of authorization challenges -// for the given http Response. Challenges are only checked if -// the response status code was a 401. -func ResponseChallenges(resp *http.Response) []Challenge { - if resp.StatusCode == http.StatusUnauthorized { - // Parse the WWW-Authenticate Header and store the challenges - // on this endpoint object. - return parseAuthHeader(resp.Header) - } - - return nil -} - -func parseAuthHeader(header http.Header) []Challenge { - challenges := []Challenge{} - for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] { - v, p := parseValueAndParams(h) - if v != "" { - challenges = append(challenges, Challenge{Scheme: v, Parameters: p}) - } - } - return challenges -} - -func parseValueAndParams(header string) (value string, params map[string]string) { - params = make(map[string]string) - value, s := expectToken(header) - if value == "" { - return - } - value = strings.ToLower(value) - s = "," + skipSpace(s) - for strings.HasPrefix(s, ",") { - var pkey string - pkey, s = expectToken(skipSpace(s[1:])) - if pkey == "" { - return - } - if !strings.HasPrefix(s, "=") { - return - } - var pvalue string - pvalue, s = expectTokenOrQuoted(s[1:]) - if pvalue == "" { - return - } - pkey = strings.ToLower(pkey) - params[pkey] = pvalue - s = skipSpace(s) - } - return -} - -func skipSpace(s string) (rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isSpace == 0 { - break - } - } - return s[i:] -} - -func expectToken(s string) (token, rest string) { - i := 0 - for ; i < len(s); i++ { - if octetTypes[s[i]]&isToken == 0 { - break - } - } - return s[:i], s[i:] -} - -func expectTokenOrQuoted(s string) (value string, rest string) { - if !strings.HasPrefix(s, "\"") { - return expectToken(s) - } - s = s[1:] - for i := 0; i < len(s); i++ { - switch s[i] { - case '"': - return s[:i], s[i+1:] - case '\\': - p := make([]byte, len(s)-1) - j := copy(p, s[:i]) - escape := true - for i = i + 1; i < len(s); i++ { - b := s[i] - switch { - case escape: - escape = false - p[j] = b - j++ - case b == '\\': - escape = true - case b == '"': - return string(p[:j]), s[i+1:] - default: - p[j] = b - j++ - } - } - return "", "" - } - } - return "", "" -} diff --git a/vendor/src/github.com/docker/distribution/registry/client/auth/session.go b/vendor/src/github.com/docker/distribution/registry/client/auth/session.go deleted file mode 100644 index d03d8ff0ed..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/client/auth/session.go +++ /dev/null @@ -1,497 +0,0 @@ -package auth - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/Sirupsen/logrus" - "github.com/docker/distribution/registry/client" - "github.com/docker/distribution/registry/client/transport" -) - -var ( - // ErrNoBasicAuthCredentials is returned if a request can't be authorized with - // basic auth due to lack of credentials. - ErrNoBasicAuthCredentials = errors.New("no basic auth credentials") - - // ErrNoToken is returned if a request is successful but the body does not - // contain an authorization token. - ErrNoToken = errors.New("authorization server did not include a token in the response") -) - -const defaultClientID = "registry-client" - -// AuthenticationHandler is an interface for authorizing a request from -// params from a "WWW-Authenicate" header for a single scheme. -type AuthenticationHandler interface { - // Scheme returns the scheme as expected from the "WWW-Authenicate" header. - Scheme() string - - // AuthorizeRequest adds the authorization header to a request (if needed) - // using the parameters from "WWW-Authenticate" method. The parameters - // values depend on the scheme. - AuthorizeRequest(req *http.Request, params map[string]string) error -} - -// CredentialStore is an interface for getting credentials for -// a given URL -type CredentialStore interface { - // Basic returns basic auth for the given URL - Basic(*url.URL) (string, string) - - // RefreshToken returns a refresh token for the - // given URL and service - RefreshToken(*url.URL, string) string - - // SetRefreshToken sets the refresh token if none - // is provided for the given url and service - SetRefreshToken(realm *url.URL, service, token string) -} - -// NewAuthorizer creates an authorizer which can handle multiple authentication -// schemes. The handlers are tried in order, the higher priority authentication -// methods should be first. The challengeMap holds a list of challenges for -// a given root API endpoint (for example "https://registry-1.docker.io/v2/"). -func NewAuthorizer(manager ChallengeManager, handlers ...AuthenticationHandler) transport.RequestModifier { - return &endpointAuthorizer{ - challenges: manager, - handlers: handlers, - } -} - -type endpointAuthorizer struct { - challenges ChallengeManager - handlers []AuthenticationHandler - transport http.RoundTripper -} - -func (ea *endpointAuthorizer) ModifyRequest(req *http.Request) error { - pingPath := req.URL.Path - if v2Root := strings.Index(req.URL.Path, "/v2/"); v2Root != -1 { - pingPath = pingPath[:v2Root+4] - } else if v1Root := strings.Index(req.URL.Path, "/v1/"); v1Root != -1 { - pingPath = pingPath[:v1Root] + "/v2/" - } else { - return nil - } - - ping := url.URL{ - Host: req.URL.Host, - Scheme: req.URL.Scheme, - Path: pingPath, - } - - challenges, err := ea.challenges.GetChallenges(ping) - if err != nil { - return err - } - - if len(challenges) > 0 { - for _, handler := range ea.handlers { - for _, challenge := range challenges { - if challenge.Scheme != handler.Scheme() { - continue - } - if err := handler.AuthorizeRequest(req, challenge.Parameters); err != nil { - return err - } - } - } - } - - return nil -} - -// This is the minimum duration a token can last (in seconds). -// A token must not live less than 60 seconds because older versions -// of the Docker client didn't read their expiration from the token -// response and assumed 60 seconds. So to remain compatible with -// those implementations, a token must live at least this long. -const minimumTokenLifetimeSeconds = 60 - -// Private interface for time used by this package to enable tests to provide their own implementation. -type clock interface { - Now() time.Time -} - -type tokenHandler struct { - header http.Header - creds CredentialStore - transport http.RoundTripper - clock clock - - offlineAccess bool - forceOAuth bool - clientID string - scopes []Scope - - tokenLock sync.Mutex - tokenCache string - tokenExpiration time.Time -} - -// Scope is a type which is serializable to a string -// using the allow scope grammar. -type Scope interface { - String() string -} - -// RepositoryScope represents a token scope for access -// to a repository. -type RepositoryScope struct { - Repository string - Actions []string -} - -// String returns the string representation of the repository -// using the scope grammar -func (rs RepositoryScope) String() string { - return fmt.Sprintf("repository:%s:%s", rs.Repository, strings.Join(rs.Actions, ",")) -} - -// RegistryScope represents a token scope for access -// to resources in the registry. -type RegistryScope struct { - Name string - Actions []string -} - -// String returns the string representation of the user -// using the scope grammar -func (rs RegistryScope) String() string { - return fmt.Sprintf("registry:%s:%s", rs.Name, strings.Join(rs.Actions, ",")) -} - -// TokenHandlerOptions is used to configure a new token handler -type TokenHandlerOptions struct { - Transport http.RoundTripper - Credentials CredentialStore - - OfflineAccess bool - ForceOAuth bool - ClientID string - Scopes []Scope -} - -// An implementation of clock for providing real time data. -type realClock struct{} - -// Now implements clock -func (realClock) Now() time.Time { return time.Now() } - -// NewTokenHandler creates a new AuthenicationHandler which supports -// fetching tokens from a remote token server. -func NewTokenHandler(transport http.RoundTripper, creds CredentialStore, scope string, actions ...string) AuthenticationHandler { - // Create options... - return NewTokenHandlerWithOptions(TokenHandlerOptions{ - Transport: transport, - Credentials: creds, - Scopes: []Scope{ - RepositoryScope{ - Repository: scope, - Actions: actions, - }, - }, - }) -} - -// NewTokenHandlerWithOptions creates a new token handler using the provided -// options structure. -func NewTokenHandlerWithOptions(options TokenHandlerOptions) AuthenticationHandler { - handler := &tokenHandler{ - transport: options.Transport, - creds: options.Credentials, - offlineAccess: options.OfflineAccess, - forceOAuth: options.ForceOAuth, - clientID: options.ClientID, - scopes: options.Scopes, - clock: realClock{}, - } - - return handler -} - -func (th *tokenHandler) client() *http.Client { - return &http.Client{ - Transport: th.transport, - Timeout: 15 * time.Second, - } -} - -func (th *tokenHandler) Scheme() string { - return "bearer" -} - -func (th *tokenHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - var additionalScopes []string - if fromParam := req.URL.Query().Get("from"); fromParam != "" { - additionalScopes = append(additionalScopes, RepositoryScope{ - Repository: fromParam, - Actions: []string{"pull"}, - }.String()) - } - - token, err := th.getToken(params, additionalScopes...) - if err != nil { - return err - } - - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", token)) - - return nil -} - -func (th *tokenHandler) getToken(params map[string]string, additionalScopes ...string) (string, error) { - th.tokenLock.Lock() - defer th.tokenLock.Unlock() - scopes := make([]string, 0, len(th.scopes)+len(additionalScopes)) - for _, scope := range th.scopes { - scopes = append(scopes, scope.String()) - } - var addedScopes bool - for _, scope := range additionalScopes { - scopes = append(scopes, scope) - addedScopes = true - } - - now := th.clock.Now() - if now.After(th.tokenExpiration) || addedScopes { - token, expiration, err := th.fetchToken(params, scopes) - if err != nil { - return "", err - } - - // do not update cache for added scope tokens - if !addedScopes { - th.tokenCache = token - th.tokenExpiration = expiration - } - - return token, nil - } - - return th.tokenCache, nil -} - -type postTokenResponse struct { - AccessToken string `json:"access_token"` - RefreshToken string `json:"refresh_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - Scope string `json:"scope"` -} - -func (th *tokenHandler) fetchTokenWithOAuth(realm *url.URL, refreshToken, service string, scopes []string) (token string, expiration time.Time, err error) { - form := url.Values{} - form.Set("scope", strings.Join(scopes, " ")) - form.Set("service", service) - - clientID := th.clientID - if clientID == "" { - // Use default client, this is a required field - clientID = defaultClientID - } - form.Set("client_id", clientID) - - if refreshToken != "" { - form.Set("grant_type", "refresh_token") - form.Set("refresh_token", refreshToken) - } else if th.creds != nil { - form.Set("grant_type", "password") - username, password := th.creds.Basic(realm) - form.Set("username", username) - form.Set("password", password) - - // attempt to get a refresh token - form.Set("access_type", "offline") - } else { - // refuse to do oauth without a grant type - return "", time.Time{}, fmt.Errorf("no supported grant type") - } - - resp, err := th.client().PostForm(realm.String(), form) - if err != nil { - return "", time.Time{}, err - } - defer resp.Body.Close() - - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) - return "", time.Time{}, err - } - - decoder := json.NewDecoder(resp.Body) - - var tr postTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.RefreshToken != "" && tr.RefreshToken != refreshToken { - th.creds.SetRefreshToken(realm, service, tr.RefreshToken) - } - - if tr.ExpiresIn < minimumTokenLifetimeSeconds { - // The default/minimum lifetime. - tr.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) - } - - if tr.IssuedAt.IsZero() { - // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now().UTC() - } - - return tr.AccessToken, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil -} - -type getTokenResponse struct { - Token string `json:"token"` - AccessToken string `json:"access_token"` - ExpiresIn int `json:"expires_in"` - IssuedAt time.Time `json:"issued_at"` - RefreshToken string `json:"refresh_token"` -} - -func (th *tokenHandler) fetchTokenWithBasicAuth(realm *url.URL, service string, scopes []string) (token string, expiration time.Time, err error) { - - req, err := http.NewRequest("GET", realm.String(), nil) - if err != nil { - return "", time.Time{}, err - } - - reqParams := req.URL.Query() - - if service != "" { - reqParams.Add("service", service) - } - - for _, scope := range scopes { - reqParams.Add("scope", scope) - } - - if th.offlineAccess { - reqParams.Add("offline_token", "true") - clientID := th.clientID - if clientID == "" { - clientID = defaultClientID - } - reqParams.Add("client_id", clientID) - } - - if th.creds != nil { - username, password := th.creds.Basic(realm) - if username != "" && password != "" { - reqParams.Add("account", username) - req.SetBasicAuth(username, password) - } - } - - req.URL.RawQuery = reqParams.Encode() - - resp, err := th.client().Do(req) - if err != nil { - return "", time.Time{}, err - } - defer resp.Body.Close() - - if !client.SuccessStatus(resp.StatusCode) { - err := client.HandleErrorResponse(resp) - return "", time.Time{}, err - } - - decoder := json.NewDecoder(resp.Body) - - var tr getTokenResponse - if err = decoder.Decode(&tr); err != nil { - return "", time.Time{}, fmt.Errorf("unable to decode token response: %s", err) - } - - if tr.RefreshToken != "" && th.creds != nil { - th.creds.SetRefreshToken(realm, service, tr.RefreshToken) - } - - // `access_token` is equivalent to `token` and if both are specified - // the choice is undefined. Canonicalize `access_token` by sticking - // things in `token`. - if tr.AccessToken != "" { - tr.Token = tr.AccessToken - } - - if tr.Token == "" { - return "", time.Time{}, ErrNoToken - } - - if tr.ExpiresIn < minimumTokenLifetimeSeconds { - // The default/minimum lifetime. - tr.ExpiresIn = minimumTokenLifetimeSeconds - logrus.Debugf("Increasing token expiration to: %d seconds", tr.ExpiresIn) - } - - if tr.IssuedAt.IsZero() { - // issued_at is optional in the token response. - tr.IssuedAt = th.clock.Now().UTC() - } - - return tr.Token, tr.IssuedAt.Add(time.Duration(tr.ExpiresIn) * time.Second), nil -} - -func (th *tokenHandler) fetchToken(params map[string]string, scopes []string) (token string, expiration time.Time, err error) { - realm, ok := params["realm"] - if !ok { - return "", time.Time{}, errors.New("no realm specified for token auth challenge") - } - - // TODO(dmcgowan): Handle empty scheme and relative realm - realmURL, err := url.Parse(realm) - if err != nil { - return "", time.Time{}, fmt.Errorf("invalid token auth challenge realm: %s", err) - } - - service := params["service"] - - var refreshToken string - - if th.creds != nil { - refreshToken = th.creds.RefreshToken(realmURL, service) - } - - if refreshToken != "" || th.forceOAuth { - return th.fetchTokenWithOAuth(realmURL, refreshToken, service, scopes) - } - - return th.fetchTokenWithBasicAuth(realmURL, service, scopes) -} - -type basicHandler struct { - creds CredentialStore -} - -// NewBasicHandler creaters a new authentiation handler which adds -// basic authentication credentials to a request. -func NewBasicHandler(creds CredentialStore) AuthenticationHandler { - return &basicHandler{ - creds: creds, - } -} - -func (*basicHandler) Scheme() string { - return "basic" -} - -func (bh *basicHandler) AuthorizeRequest(req *http.Request, params map[string]string) error { - if bh.creds != nil { - username, password := bh.creds.Basic(req.URL) - if username != "" && password != "" { - req.SetBasicAuth(username, password) - return nil - } - } - return ErrNoBasicAuthCredentials -} diff --git a/vendor/src/github.com/docker/distribution/registry/client/blob_writer.go b/vendor/src/github.com/docker/distribution/registry/client/blob_writer.go deleted file mode 100644 index e3ffcb00fd..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/client/blob_writer.go +++ /dev/null @@ -1,162 +0,0 @@ -package client - -import ( - "bytes" - "fmt" - "io" - "io/ioutil" - "net/http" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" -) - -type httpBlobUpload struct { - statter distribution.BlobStatter - client *http.Client - - uuid string - startedAt time.Time - - location string // always the last value of the location header. - offset int64 - closed bool -} - -func (hbu *httpBlobUpload) Reader() (io.ReadCloser, error) { - panic("Not implemented") -} - -func (hbu *httpBlobUpload) handleErrorResponse(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUploadUnknown - } - return HandleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) ReadFrom(r io.Reader) (n int64, err error) { - req, err := http.NewRequest("PATCH", hbu.location, ioutil.NopCloser(r)) - if err != nil { - return 0, err - } - defer req.Body.Close() - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int64 - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Write(p []byte) (n int, err error) { - req, err := http.NewRequest("PATCH", hbu.location, bytes.NewReader(p)) - if err != nil { - return 0, err - } - req.Header.Set("Content-Range", fmt.Sprintf("%d-%d", hbu.offset, hbu.offset+int64(len(p)-1))) - req.Header.Set("Content-Length", fmt.Sprintf("%d", len(p))) - req.Header.Set("Content-Type", "application/octet-stream") - - resp, err := hbu.client.Do(req) - if err != nil { - return 0, err - } - - if !SuccessStatus(resp.StatusCode) { - return 0, hbu.handleErrorResponse(resp) - } - - hbu.uuid = resp.Header.Get("Docker-Upload-UUID") - hbu.location, err = sanitizeLocation(resp.Header.Get("Location"), hbu.location) - if err != nil { - return 0, err - } - rng := resp.Header.Get("Range") - var start, end int - if n, err := fmt.Sscanf(rng, "%d-%d", &start, &end); err != nil { - return 0, err - } else if n != 2 || end < start { - return 0, fmt.Errorf("bad range format: %s", rng) - } - - return (end - start + 1), nil - -} - -func (hbu *httpBlobUpload) Size() int64 { - return hbu.offset -} - -func (hbu *httpBlobUpload) ID() string { - return hbu.uuid -} - -func (hbu *httpBlobUpload) StartedAt() time.Time { - return hbu.startedAt -} - -func (hbu *httpBlobUpload) Commit(ctx context.Context, desc distribution.Descriptor) (distribution.Descriptor, error) { - // TODO(dmcgowan): Check if already finished, if so just fetch - req, err := http.NewRequest("PUT", hbu.location, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - values := req.URL.Query() - values.Set("digest", desc.Digest.String()) - req.URL.RawQuery = values.Encode() - - resp, err := hbu.client.Do(req) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if !SuccessStatus(resp.StatusCode) { - return distribution.Descriptor{}, hbu.handleErrorResponse(resp) - } - - return hbu.statter.Stat(ctx, desc.Digest) -} - -func (hbu *httpBlobUpload) Cancel(ctx context.Context) error { - req, err := http.NewRequest("DELETE", hbu.location, nil) - if err != nil { - return err - } - resp, err := hbu.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.StatusCode == http.StatusNotFound || SuccessStatus(resp.StatusCode) { - return nil - } - return hbu.handleErrorResponse(resp) -} - -func (hbu *httpBlobUpload) Close() error { - hbu.closed = true - return nil -} diff --git a/vendor/src/github.com/docker/distribution/registry/client/errors.go b/vendor/src/github.com/docker/distribution/registry/client/errors.go deleted file mode 100644 index f73e3c2301..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/client/errors.go +++ /dev/null @@ -1,107 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - - "github.com/docker/distribution/registry/api/errcode" -) - -// ErrNoErrorsInBody is returned when an HTTP response body parses to an empty -// errcode.Errors slice. -var ErrNoErrorsInBody = errors.New("no error details found in HTTP response body") - -// UnexpectedHTTPStatusError is returned when an unexpected HTTP status is -// returned when making a registry api call. -type UnexpectedHTTPStatusError struct { - Status string -} - -func (e *UnexpectedHTTPStatusError) Error() string { - return fmt.Sprintf("received unexpected HTTP status: %s", e.Status) -} - -// UnexpectedHTTPResponseError is returned when an expected HTTP status code -// is returned, but the content was unexpected and failed to be parsed. -type UnexpectedHTTPResponseError struct { - ParseErr error - StatusCode int - Response []byte -} - -func (e *UnexpectedHTTPResponseError) Error() string { - return fmt.Sprintf("error parsing HTTP %d response body: %s: %q", e.StatusCode, e.ParseErr.Error(), string(e.Response)) -} - -func parseHTTPErrorResponse(statusCode int, r io.Reader) error { - var errors errcode.Errors - body, err := ioutil.ReadAll(r) - if err != nil { - return err - } - - // For backward compatibility, handle irregularly formatted - // messages that contain a "details" field. - var detailsErr struct { - Details string `json:"details"` - } - err = json.Unmarshal(body, &detailsErr) - if err == nil && detailsErr.Details != "" { - switch statusCode { - case http.StatusUnauthorized: - return errcode.ErrorCodeUnauthorized.WithMessage(detailsErr.Details) - case http.StatusTooManyRequests: - return errcode.ErrorCodeTooManyRequests.WithMessage(detailsErr.Details) - default: - return errcode.ErrorCodeUnknown.WithMessage(detailsErr.Details) - } - } - - if err := json.Unmarshal(body, &errors); err != nil { - return &UnexpectedHTTPResponseError{ - ParseErr: err, - StatusCode: statusCode, - Response: body, - } - } - - if len(errors) == 0 { - // If there was no error specified in the body, return - // UnexpectedHTTPResponseError. - return &UnexpectedHTTPResponseError{ - ParseErr: ErrNoErrorsInBody, - StatusCode: statusCode, - Response: body, - } - } - - return errors -} - -// HandleErrorResponse returns error parsed from HTTP response for an -// unsuccessful HTTP response code (in the range 400 - 499 inclusive). An -// UnexpectedHTTPStatusError returned for response code outside of expected -// range. -func HandleErrorResponse(resp *http.Response) error { - if resp.StatusCode == 401 { - err := parseHTTPErrorResponse(resp.StatusCode, resp.Body) - if uErr, ok := err.(*UnexpectedHTTPResponseError); ok { - return errcode.ErrorCodeUnauthorized.WithDetail(uErr.Response) - } - return err - } - if resp.StatusCode >= 400 && resp.StatusCode < 500 { - return parseHTTPErrorResponse(resp.StatusCode, resp.Body) - } - return &UnexpectedHTTPStatusError{Status: resp.Status} -} - -// SuccessStatus returns true if the argument is a successful HTTP response -// code (in the range 200 - 399 inclusive). -func SuccessStatus(status int) bool { - return status >= 200 && status <= 399 -} diff --git a/vendor/src/github.com/docker/distribution/registry/client/repository.go b/vendor/src/github.com/docker/distribution/registry/client/repository.go deleted file mode 100644 index 9731255615..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/client/repository.go +++ /dev/null @@ -1,863 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" - "strings" - "time" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/api/v2" - "github.com/docker/distribution/registry/client/transport" - "github.com/docker/distribution/registry/storage/cache" - "github.com/docker/distribution/registry/storage/cache/memory" -) - -// Registry provides an interface for calling Repositories, which returns a catalog of repositories. -type Registry interface { - Repositories(ctx context.Context, repos []string, last string) (n int, err error) -} - -// checkHTTPRedirect is a callback that can manipulate redirected HTTP -// requests. It is used to preserve Accept and Range headers. -func checkHTTPRedirect(req *http.Request, via []*http.Request) error { - if len(via) >= 10 { - return errors.New("stopped after 10 redirects") - } - - if len(via) > 0 { - for headerName, headerVals := range via[0].Header { - if headerName != "Accept" && headerName != "Range" { - continue - } - for _, val := range headerVals { - // Don't add to redirected request if redirected - // request already has a header with the same - // name and value. - hasValue := false - for _, existingVal := range req.Header[headerName] { - if existingVal == val { - hasValue = true - break - } - } - if !hasValue { - req.Header.Add(headerName, val) - } - } - } - } - - return nil -} - -// NewRegistry creates a registry namespace which can be used to get a listing of repositories -func NewRegistry(ctx context.Context, baseURL string, transport http.RoundTripper) (Registry, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - Timeout: 1 * time.Minute, - CheckRedirect: checkHTTPRedirect, - } - - return ®istry{ - client: client, - ub: ub, - context: ctx, - }, nil -} - -type registry struct { - client *http.Client - ub *v2.URLBuilder - context context.Context -} - -// Repositories returns a lexigraphically sorted catalog given a base URL. The 'entries' slice will be filled up to the size -// of the slice, starting at the value provided in 'last'. The number of entries will be returned along with io.EOF if there -// are no more entries -func (r *registry) Repositories(ctx context.Context, entries []string, last string) (int, error) { - var numFilled int - var returnErr error - - values := buildCatalogValues(len(entries), last) - u, err := r.ub.BuildCatalogURL(values) - if err != nil { - return 0, err - } - - resp, err := r.client.Get(u) - if err != nil { - return 0, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - var ctlg struct { - Repositories []string `json:"repositories"` - } - decoder := json.NewDecoder(resp.Body) - - if err := decoder.Decode(&ctlg); err != nil { - return 0, err - } - - for cnt := range ctlg.Repositories { - entries[cnt] = ctlg.Repositories[cnt] - } - numFilled = len(ctlg.Repositories) - - link := resp.Header.Get("Link") - if link == "" { - returnErr = io.EOF - } - } else { - return 0, HandleErrorResponse(resp) - } - - return numFilled, returnErr -} - -// NewRepository creates a new Repository for the given repository name and base URL. -func NewRepository(ctx context.Context, name reference.Named, baseURL string, transport http.RoundTripper) (distribution.Repository, error) { - ub, err := v2.NewURLBuilderFromString(baseURL, false) - if err != nil { - return nil, err - } - - client := &http.Client{ - Transport: transport, - CheckRedirect: checkHTTPRedirect, - // TODO(dmcgowan): create cookie jar - } - - return &repository{ - client: client, - ub: ub, - name: name, - context: ctx, - }, nil -} - -type repository struct { - client *http.Client - ub *v2.URLBuilder - context context.Context - name reference.Named -} - -func (r *repository) Named() reference.Named { - return r.name -} - -func (r *repository) Blobs(ctx context.Context) distribution.BlobStore { - statter := &blobStatter{ - name: r.name, - ub: r.ub, - client: r.client, - } - return &blobs{ - name: r.name, - ub: r.ub, - client: r.client, - statter: cache.NewCachedBlobStatter(memory.NewInMemoryBlobDescriptorCacheProvider(), statter), - } -} - -func (r *repository) Manifests(ctx context.Context, options ...distribution.ManifestServiceOption) (distribution.ManifestService, error) { - // todo(richardscothern): options should be sent over the wire - return &manifests{ - name: r.name, - ub: r.ub, - client: r.client, - etags: make(map[string]string), - }, nil -} - -func (r *repository) Tags(ctx context.Context) distribution.TagService { - return &tags{ - client: r.client, - ub: r.ub, - context: r.context, - name: r.Named(), - } -} - -// tags implements remote tagging operations. -type tags struct { - client *http.Client - ub *v2.URLBuilder - context context.Context - name reference.Named -} - -// All returns all tags -func (t *tags) All(ctx context.Context) ([]string, error) { - var tags []string - - u, err := t.ub.BuildTagsURL(t.name) - if err != nil { - return tags, err - } - - for { - resp, err := t.client.Get(u) - if err != nil { - return tags, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - return tags, err - } - - tagsResponse := struct { - Tags []string `json:"tags"` - }{} - if err := json.Unmarshal(b, &tagsResponse); err != nil { - return tags, err - } - tags = append(tags, tagsResponse.Tags...) - if link := resp.Header.Get("Link"); link != "" { - u = strings.Trim(strings.Split(link, ";")[0], "<>") - } else { - return tags, nil - } - } else { - return tags, HandleErrorResponse(resp) - } - } -} - -func descriptorFromResponse(response *http.Response) (distribution.Descriptor, error) { - desc := distribution.Descriptor{} - headers := response.Header - - ctHeader := headers.Get("Content-Type") - if ctHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Type header") - } - desc.MediaType = ctHeader - - digestHeader := headers.Get("Docker-Content-Digest") - if digestHeader == "" { - bytes, err := ioutil.ReadAll(response.Body) - if err != nil { - return distribution.Descriptor{}, err - } - _, desc, err := distribution.UnmarshalManifest(ctHeader, bytes) - if err != nil { - return distribution.Descriptor{}, err - } - return desc, nil - } - - dgst, err := digest.ParseDigest(digestHeader) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Digest = dgst - - lengthHeader := headers.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, errors.New("missing or empty Content-Length header") - } - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, err - } - desc.Size = length - - return desc, nil - -} - -// Get issues a HEAD request for a Manifest against its named endpoint in order -// to construct a descriptor for the tag. If the registry doesn't support HEADing -// a manifest, fallback to GET. -func (t *tags) Get(ctx context.Context, tag string) (distribution.Descriptor, error) { - ref, err := reference.WithTag(t.name, tag) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := t.ub.BuildManifestURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - req, err := http.NewRequest("HEAD", u, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - - var attempts int - resp, err := t.client.Do(req) -check: - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - switch { - case resp.StatusCode >= 200 && resp.StatusCode < 400: - return descriptorFromResponse(resp) - case resp.StatusCode == http.StatusMethodNotAllowed: - req, err = http.NewRequest("GET", u, nil) - if err != nil { - return distribution.Descriptor{}, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - - resp, err = t.client.Do(req) - attempts++ - if attempts > 1 { - return distribution.Descriptor{}, err - } - goto check - default: - return distribution.Descriptor{}, HandleErrorResponse(resp) - } -} - -func (t *tags) Lookup(ctx context.Context, digest distribution.Descriptor) ([]string, error) { - panic("not implemented") -} - -func (t *tags) Tag(ctx context.Context, tag string, desc distribution.Descriptor) error { - panic("not implemented") -} - -func (t *tags) Untag(ctx context.Context, tag string) error { - panic("not implemented") -} - -type manifests struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - etags map[string]string -} - -func (ms *manifests) Exists(ctx context.Context, dgst digest.Digest) (bool, error) { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return false, err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return false, err - } - - resp, err := ms.client.Head(u) - if err != nil { - return false, err - } - - if SuccessStatus(resp.StatusCode) { - return true, nil - } else if resp.StatusCode == http.StatusNotFound { - return false, nil - } - return false, HandleErrorResponse(resp) -} - -// AddEtagToTag allows a client to supply an eTag to Get which will be -// used for a conditional HTTP request. If the eTag matches, a nil manifest -// and ErrManifestNotModified error will be returned. etag is automatically -// quoted when added to this map. -func AddEtagToTag(tag, etag string) distribution.ManifestServiceOption { - return etagOption{tag, etag} -} - -type etagOption struct{ tag, etag string } - -func (o etagOption) Apply(ms distribution.ManifestService) error { - if ms, ok := ms.(*manifests); ok { - ms.etags[o.tag] = fmt.Sprintf(`"%s"`, o.etag) - return nil - } - return fmt.Errorf("etag options is a client-only option") -} - -// ReturnContentDigest allows a client to set a the content digest on -// a successful request from the 'Docker-Content-Digest' header. This -// returned digest is represents the digest which the registry uses -// to refer to the content and can be used to delete the content. -func ReturnContentDigest(dgst *digest.Digest) distribution.ManifestServiceOption { - return contentDigestOption{dgst} -} - -type contentDigestOption struct{ digest *digest.Digest } - -func (o contentDigestOption) Apply(ms distribution.ManifestService) error { - return nil -} - -func (ms *manifests) Get(ctx context.Context, dgst digest.Digest, options ...distribution.ManifestServiceOption) (distribution.Manifest, error) { - var ( - digestOrTag string - ref reference.Named - err error - contentDgst *digest.Digest - ) - - for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { - digestOrTag = opt.Tag - ref, err = reference.WithTag(ms.name, opt.Tag) - if err != nil { - return nil, err - } - } else if opt, ok := option.(contentDigestOption); ok { - contentDgst = opt.digest - } else { - err := option.Apply(ms) - if err != nil { - return nil, err - } - } - } - - if digestOrTag == "" { - digestOrTag = dgst.String() - ref, err = reference.WithDigest(ms.name, dgst) - if err != nil { - return nil, err - } - } - - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return nil, err - } - - req, err := http.NewRequest("GET", u, nil) - if err != nil { - return nil, err - } - - for _, t := range distribution.ManifestMediaTypes() { - req.Header.Add("Accept", t) - } - - if _, ok := ms.etags[digestOrTag]; ok { - req.Header.Set("If-None-Match", ms.etags[digestOrTag]) - } - - resp, err := ms.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - if resp.StatusCode == http.StatusNotModified { - return nil, distribution.ErrManifestNotModified - } else if SuccessStatus(resp.StatusCode) { - if contentDgst != nil { - dgst, err := digest.ParseDigest(resp.Header.Get("Docker-Content-Digest")) - if err == nil { - *contentDgst = dgst - } - } - mt := resp.Header.Get("Content-Type") - body, err := ioutil.ReadAll(resp.Body) - - if err != nil { - return nil, err - } - m, _, err := distribution.UnmarshalManifest(mt, body) - if err != nil { - return nil, err - } - return m, nil - } - return nil, HandleErrorResponse(resp) -} - -// Put puts a manifest. A tag can be specified using an options parameter which uses some shared state to hold the -// tag name in order to build the correct upload URL. -func (ms *manifests) Put(ctx context.Context, m distribution.Manifest, options ...distribution.ManifestServiceOption) (digest.Digest, error) { - ref := ms.name - var tagged bool - - for _, option := range options { - if opt, ok := option.(distribution.WithTagOption); ok { - var err error - ref, err = reference.WithTag(ref, opt.Tag) - if err != nil { - return "", err - } - tagged = true - } else { - err := option.Apply(ms) - if err != nil { - return "", err - } - } - } - mediaType, p, err := m.Payload() - if err != nil { - return "", err - } - - if !tagged { - // generate a canonical digest and Put by digest - _, d, err := distribution.UnmarshalManifest(mediaType, p) - if err != nil { - return "", err - } - ref, err = reference.WithDigest(ref, d.Digest) - if err != nil { - return "", err - } - } - - manifestURL, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return "", err - } - - putRequest, err := http.NewRequest("PUT", manifestURL, bytes.NewReader(p)) - if err != nil { - return "", err - } - - putRequest.Header.Set("Content-Type", mediaType) - - resp, err := ms.client.Do(putRequest) - if err != nil { - return "", err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - dgstHeader := resp.Header.Get("Docker-Content-Digest") - dgst, err := digest.ParseDigest(dgstHeader) - if err != nil { - return "", err - } - - return dgst, nil - } - - return "", HandleErrorResponse(resp) -} - -func (ms *manifests) Delete(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(ms.name, dgst) - if err != nil { - return err - } - u, err := ms.ub.BuildManifestURL(ref) - if err != nil { - return err - } - req, err := http.NewRequest("DELETE", u, nil) - if err != nil { - return err - } - - resp, err := ms.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -// todo(richardscothern): Restore interface and implementation with merge of #1050 -/*func (ms *manifests) Enumerate(ctx context.Context, manifests []distribution.Manifest, last distribution.Manifest) (n int, err error) { - panic("not supported") -}*/ - -type blobs struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client - - statter distribution.BlobDescriptorService - distribution.BlobDeleter -} - -func sanitizeLocation(location, base string) (string, error) { - baseURL, err := url.Parse(base) - if err != nil { - return "", err - } - - locationURL, err := url.Parse(location) - if err != nil { - return "", err - } - - return baseURL.ResolveReference(locationURL).String(), nil -} - -func (bs *blobs) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return bs.statter.Stat(ctx, dgst) - -} - -func (bs *blobs) Get(ctx context.Context, dgst digest.Digest) ([]byte, error) { - reader, err := bs.Open(ctx, dgst) - if err != nil { - return nil, err - } - defer reader.Close() - - return ioutil.ReadAll(reader) -} - -func (bs *blobs) Open(ctx context.Context, dgst digest.Digest) (distribution.ReadSeekCloser, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return nil, err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return nil, err - } - - return transport.NewHTTPReadSeeker(bs.client, blobURL, - func(resp *http.Response) error { - if resp.StatusCode == http.StatusNotFound { - return distribution.ErrBlobUnknown - } - return HandleErrorResponse(resp) - }), nil -} - -func (bs *blobs) ServeBlob(ctx context.Context, w http.ResponseWriter, r *http.Request, dgst digest.Digest) error { - panic("not implemented") -} - -func (bs *blobs) Put(ctx context.Context, mediaType string, p []byte) (distribution.Descriptor, error) { - writer, err := bs.Create(ctx) - if err != nil { - return distribution.Descriptor{}, err - } - dgstr := digest.Canonical.New() - n, err := io.Copy(writer, io.TeeReader(bytes.NewReader(p), dgstr.Hash())) - if err != nil { - return distribution.Descriptor{}, err - } - if n < int64(len(p)) { - return distribution.Descriptor{}, fmt.Errorf("short copy: wrote %d of %d", n, len(p)) - } - - desc := distribution.Descriptor{ - MediaType: mediaType, - Size: int64(len(p)), - Digest: dgstr.Digest(), - } - - return writer.Commit(ctx, desc) -} - -// createOptions is a collection of blob creation modifiers relevant to general -// blob storage intended to be configured by the BlobCreateOption.Apply method. -type createOptions struct { - Mount struct { - ShouldMount bool - From reference.Canonical - } -} - -type optionFunc func(interface{}) error - -func (f optionFunc) Apply(v interface{}) error { - return f(v) -} - -// WithMountFrom returns a BlobCreateOption which designates that the blob should be -// mounted from the given canonical reference. -func WithMountFrom(ref reference.Canonical) distribution.BlobCreateOption { - return optionFunc(func(v interface{}) error { - opts, ok := v.(*createOptions) - if !ok { - return fmt.Errorf("unexpected options type: %T", v) - } - - opts.Mount.ShouldMount = true - opts.Mount.From = ref - - return nil - }) -} - -func (bs *blobs) Create(ctx context.Context, options ...distribution.BlobCreateOption) (distribution.BlobWriter, error) { - var opts createOptions - - for _, option := range options { - err := option.Apply(&opts) - if err != nil { - return nil, err - } - } - - var values []url.Values - - if opts.Mount.ShouldMount { - values = append(values, url.Values{"from": {opts.Mount.From.Name()}, "mount": {opts.Mount.From.Digest().String()}}) - } - - u, err := bs.ub.BuildBlobUploadURL(bs.name, values...) - if err != nil { - return nil, err - } - - resp, err := bs.client.Post(u, "", nil) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - switch resp.StatusCode { - case http.StatusCreated: - desc, err := bs.statter.Stat(ctx, opts.Mount.From.Digest()) - if err != nil { - return nil, err - } - return nil, distribution.ErrBlobMounted{From: opts.Mount.From, Descriptor: desc} - case http.StatusAccepted: - // TODO(dmcgowan): Check for invalid UUID - uuid := resp.Header.Get("Docker-Upload-UUID") - location, err := sanitizeLocation(resp.Header.Get("Location"), u) - if err != nil { - return nil, err - } - - return &httpBlobUpload{ - statter: bs.statter, - client: bs.client, - uuid: uuid, - startedAt: time.Now(), - location: location, - }, nil - default: - return nil, HandleErrorResponse(resp) - } -} - -func (bs *blobs) Resume(ctx context.Context, id string) (distribution.BlobWriter, error) { - panic("not implemented") -} - -func (bs *blobs) Delete(ctx context.Context, dgst digest.Digest) error { - return bs.statter.Clear(ctx, dgst) -} - -type blobStatter struct { - name reference.Named - ub *v2.URLBuilder - client *http.Client -} - -func (bs *blobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return distribution.Descriptor{}, err - } - u, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return distribution.Descriptor{}, err - } - - resp, err := bs.client.Head(u) - if err != nil { - return distribution.Descriptor{}, err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - lengthHeader := resp.Header.Get("Content-Length") - if lengthHeader == "" { - return distribution.Descriptor{}, fmt.Errorf("missing content-length header for request: %s", u) - } - - length, err := strconv.ParseInt(lengthHeader, 10, 64) - if err != nil { - return distribution.Descriptor{}, fmt.Errorf("error parsing content-length: %v", err) - } - - return distribution.Descriptor{ - MediaType: resp.Header.Get("Content-Type"), - Size: length, - Digest: dgst, - }, nil - } else if resp.StatusCode == http.StatusNotFound { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - return distribution.Descriptor{}, HandleErrorResponse(resp) -} - -func buildCatalogValues(maxEntries int, last string) url.Values { - values := url.Values{} - - if maxEntries > 0 { - values.Add("n", strconv.Itoa(maxEntries)) - } - - if last != "" { - values.Add("last", last) - } - - return values -} - -func (bs *blobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - ref, err := reference.WithDigest(bs.name, dgst) - if err != nil { - return err - } - blobURL, err := bs.ub.BuildBlobURL(ref) - if err != nil { - return err - } - - req, err := http.NewRequest("DELETE", blobURL, nil) - if err != nil { - return err - } - - resp, err := bs.client.Do(req) - if err != nil { - return err - } - defer resp.Body.Close() - - if SuccessStatus(resp.StatusCode) { - return nil - } - return HandleErrorResponse(resp) -} - -func (bs *blobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - return nil -} diff --git a/vendor/src/github.com/docker/distribution/registry/client/transport/http_reader.go b/vendor/src/github.com/docker/distribution/registry/client/transport/http_reader.go deleted file mode 100644 index e1b17a03a0..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/client/transport/http_reader.go +++ /dev/null @@ -1,250 +0,0 @@ -package transport - -import ( - "errors" - "fmt" - "io" - "net/http" - "os" - "regexp" - "strconv" -) - -var ( - contentRangeRegexp = regexp.MustCompile(`bytes ([0-9]+)-([0-9]+)/([0-9]+|\\*)`) - - // ErrWrongCodeForByteRange is returned if the client sends a request - // with a Range header but the server returns a 2xx or 3xx code other - // than 206 Partial Content. - ErrWrongCodeForByteRange = errors.New("expected HTTP 206 from byte range request") -) - -// ReadSeekCloser combines io.ReadSeeker with io.Closer. -type ReadSeekCloser interface { - io.ReadSeeker - io.Closer -} - -// NewHTTPReadSeeker handles reading from an HTTP endpoint using a GET -// request. When seeking and starting a read from a non-zero offset -// the a "Range" header will be added which sets the offset. -// TODO(dmcgowan): Move this into a separate utility package -func NewHTTPReadSeeker(client *http.Client, url string, errorHandler func(*http.Response) error) ReadSeekCloser { - return &httpReadSeeker{ - client: client, - url: url, - errorHandler: errorHandler, - } -} - -type httpReadSeeker struct { - client *http.Client - url string - - // errorHandler creates an error from an unsuccessful HTTP response. - // This allows the error to be created with the HTTP response body - // without leaking the body through a returned error. - errorHandler func(*http.Response) error - - size int64 - - // rc is the remote read closer. - rc io.ReadCloser - // readerOffset tracks the offset as of the last read. - readerOffset int64 - // seekOffset allows Seek to override the offset. Seek changes - // seekOffset instead of changing readOffset directly so that - // connection resets can be delayed and possibly avoided if the - // seek is undone (i.e. seeking to the end and then back to the - // beginning). - seekOffset int64 - err error -} - -func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) { - if hrs.err != nil { - return 0, hrs.err - } - - // If we sought to a different position, we need to reset the - // connection. This logic is here instead of Seek so that if - // a seek is undone before the next read, the connection doesn't - // need to be closed and reopened. A common example of this is - // seeking to the end to determine the length, and then seeking - // back to the original position. - if hrs.readerOffset != hrs.seekOffset { - hrs.reset() - } - - hrs.readerOffset = hrs.seekOffset - - rd, err := hrs.reader() - if err != nil { - return 0, err - } - - n, err = rd.Read(p) - hrs.seekOffset += int64(n) - hrs.readerOffset += int64(n) - - return n, err -} - -func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) { - if hrs.err != nil { - return 0, hrs.err - } - - lastReaderOffset := hrs.readerOffset - - if whence == os.SEEK_SET && hrs.rc == nil { - // If no request has been made yet, and we are seeking to an - // absolute position, set the read offset as well to avoid an - // unnecessary request. - hrs.readerOffset = offset - } - - _, err := hrs.reader() - if err != nil { - hrs.readerOffset = lastReaderOffset - return 0, err - } - - newOffset := hrs.seekOffset - - switch whence { - case os.SEEK_CUR: - newOffset += offset - case os.SEEK_END: - if hrs.size < 0 { - return 0, errors.New("content length not known") - } - newOffset = hrs.size + offset - case os.SEEK_SET: - newOffset = offset - } - - if newOffset < 0 { - err = errors.New("cannot seek to negative position") - } else { - hrs.seekOffset = newOffset - } - - return hrs.seekOffset, err -} - -func (hrs *httpReadSeeker) Close() error { - if hrs.err != nil { - return hrs.err - } - - // close and release reader chain - if hrs.rc != nil { - hrs.rc.Close() - } - - hrs.rc = nil - - hrs.err = errors.New("httpLayer: closed") - - return nil -} - -func (hrs *httpReadSeeker) reset() { - if hrs.err != nil { - return - } - if hrs.rc != nil { - hrs.rc.Close() - hrs.rc = nil - } -} - -func (hrs *httpReadSeeker) reader() (io.Reader, error) { - if hrs.err != nil { - return nil, hrs.err - } - - if hrs.rc != nil { - return hrs.rc, nil - } - - req, err := http.NewRequest("GET", hrs.url, nil) - if err != nil { - return nil, err - } - - if hrs.readerOffset > 0 { - // If we are at different offset, issue a range request from there. - req.Header.Add("Range", fmt.Sprintf("bytes=%d-", hrs.readerOffset)) - // TODO: get context in here - // context.GetLogger(hrs.context).Infof("Range: %s", req.Header.Get("Range")) - } - - resp, err := hrs.client.Do(req) - if err != nil { - return nil, err - } - - // Normally would use client.SuccessStatus, but that would be a cyclic - // import - if resp.StatusCode >= 200 && resp.StatusCode <= 399 { - if hrs.readerOffset > 0 { - if resp.StatusCode != http.StatusPartialContent { - return nil, ErrWrongCodeForByteRange - } - - contentRange := resp.Header.Get("Content-Range") - if contentRange == "" { - return nil, errors.New("no Content-Range header found in HTTP 206 response") - } - - submatches := contentRangeRegexp.FindStringSubmatch(contentRange) - if len(submatches) < 4 { - return nil, fmt.Errorf("could not parse Content-Range header: %s", contentRange) - } - - startByte, err := strconv.ParseUint(submatches[1], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse start of range in Content-Range header: %s", contentRange) - } - - if startByte != uint64(hrs.readerOffset) { - return nil, fmt.Errorf("received Content-Range starting at offset %d instead of requested %d", startByte, hrs.readerOffset) - } - - endByte, err := strconv.ParseUint(submatches[2], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse end of range in Content-Range header: %s", contentRange) - } - - if submatches[3] == "*" { - hrs.size = -1 - } else { - size, err := strconv.ParseUint(submatches[3], 10, 64) - if err != nil { - return nil, fmt.Errorf("could not parse total size in Content-Range header: %s", contentRange) - } - - if endByte+1 != size { - return nil, fmt.Errorf("range in Content-Range stops before the end of the content: %s", contentRange) - } - - hrs.size = int64(size) - } - } else if resp.StatusCode == http.StatusOK { - hrs.size = resp.ContentLength - } else { - hrs.size = -1 - } - hrs.rc = resp.Body - } else { - defer resp.Body.Close() - if hrs.errorHandler != nil { - return nil, hrs.errorHandler(resp) - } - return nil, fmt.Errorf("unexpected status resolving reader: %v", resp.Status) - } - - return hrs.rc, nil -} diff --git a/vendor/src/github.com/docker/distribution/registry/client/transport/transport.go b/vendor/src/github.com/docker/distribution/registry/client/transport/transport.go deleted file mode 100644 index 30e45fab0f..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/client/transport/transport.go +++ /dev/null @@ -1,147 +0,0 @@ -package transport - -import ( - "io" - "net/http" - "sync" -) - -// RequestModifier represents an object which will do an inplace -// modification of an HTTP request. -type RequestModifier interface { - ModifyRequest(*http.Request) error -} - -type headerModifier http.Header - -// NewHeaderRequestModifier returns a new RequestModifier which will -// add the given headers to a request. -func NewHeaderRequestModifier(header http.Header) RequestModifier { - return headerModifier(header) -} - -func (h headerModifier) ModifyRequest(req *http.Request) error { - for k, s := range http.Header(h) { - req.Header[k] = append(req.Header[k], s...) - } - - return nil -} - -// NewTransport creates a new transport which will apply modifiers to -// the request on a RoundTrip call. -func NewTransport(base http.RoundTripper, modifiers ...RequestModifier) http.RoundTripper { - return &transport{ - Modifiers: modifiers, - Base: base, - } -} - -// transport is an http.RoundTripper that makes HTTP requests after -// copying and modifying the request -type transport struct { - Modifiers []RequestModifier - Base http.RoundTripper - - mu sync.Mutex // guards modReq - modReq map[*http.Request]*http.Request // original -> modified -} - -// RoundTrip authorizes and authenticates the request with an -// access token. If no token exists or token is expired, -// tries to refresh/fetch a new token. -func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) { - req2 := cloneRequest(req) - for _, modifier := range t.Modifiers { - if err := modifier.ModifyRequest(req2); err != nil { - return nil, err - } - } - - t.setModReq(req, req2) - res, err := t.base().RoundTrip(req2) - if err != nil { - t.setModReq(req, nil) - return nil, err - } - res.Body = &onEOFReader{ - rc: res.Body, - fn: func() { t.setModReq(req, nil) }, - } - return res, nil -} - -// CancelRequest cancels an in-flight request by closing its connection. -func (t *transport) CancelRequest(req *http.Request) { - type canceler interface { - CancelRequest(*http.Request) - } - if cr, ok := t.base().(canceler); ok { - t.mu.Lock() - modReq := t.modReq[req] - delete(t.modReq, req) - t.mu.Unlock() - cr.CancelRequest(modReq) - } -} - -func (t *transport) base() http.RoundTripper { - if t.Base != nil { - return t.Base - } - return http.DefaultTransport -} - -func (t *transport) setModReq(orig, mod *http.Request) { - t.mu.Lock() - defer t.mu.Unlock() - if t.modReq == nil { - t.modReq = make(map[*http.Request]*http.Request) - } - if mod == nil { - delete(t.modReq, orig) - } else { - t.modReq[orig] = mod - } -} - -// cloneRequest returns a clone of the provided *http.Request. -// The clone is a shallow copy of the struct and its Header map. -func cloneRequest(r *http.Request) *http.Request { - // shallow copy of the struct - r2 := new(http.Request) - *r2 = *r - // deep copy of the Header - r2.Header = make(http.Header, len(r.Header)) - for k, s := range r.Header { - r2.Header[k] = append([]string(nil), s...) - } - - return r2 -} - -type onEOFReader struct { - rc io.ReadCloser - fn func() -} - -func (r *onEOFReader) Read(p []byte) (n int, err error) { - n, err = r.rc.Read(p) - if err == io.EOF { - r.runFunc() - } - return -} - -func (r *onEOFReader) Close() error { - err := r.rc.Close() - r.runFunc() - return err -} - -func (r *onEOFReader) runFunc() { - if fn := r.fn; fn != nil { - fn() - r.fn = nil - } -} diff --git a/vendor/src/github.com/docker/distribution/registry/storage/cache/cache.go b/vendor/src/github.com/docker/distribution/registry/storage/cache/cache.go deleted file mode 100644 index 10a3909197..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/storage/cache/cache.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package cache provides facilities to speed up access to the storage -// backend. -package cache - -import ( - "fmt" - - "github.com/docker/distribution" -) - -// BlobDescriptorCacheProvider provides repository scoped -// BlobDescriptorService cache instances and a global descriptor cache. -type BlobDescriptorCacheProvider interface { - distribution.BlobDescriptorService - - RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) -} - -// ValidateDescriptor provides a helper function to ensure that caches have -// common criteria for admitting descriptors. -func ValidateDescriptor(desc distribution.Descriptor) error { - if err := desc.Digest.Validate(); err != nil { - return err - } - - if desc.Size < 0 { - return fmt.Errorf("cache: invalid length in descriptor: %v < 0", desc.Size) - } - - if desc.MediaType == "" { - return fmt.Errorf("cache: empty mediatype on descriptor: %v", desc) - } - - return nil -} diff --git a/vendor/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go b/vendor/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go deleted file mode 100644 index 94ca8a90c7..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/storage/cache/cachedblobdescriptorstore.go +++ /dev/null @@ -1,101 +0,0 @@ -package cache - -import ( - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - - "github.com/docker/distribution" -) - -// Metrics is used to hold metric counters -// related to the number of times a cache was -// hit or missed. -type Metrics struct { - Requests uint64 - Hits uint64 - Misses uint64 -} - -// MetricsTracker represents a metric tracker -// which simply counts the number of hits and misses. -type MetricsTracker interface { - Hit() - Miss() - Metrics() Metrics -} - -type cachedBlobStatter struct { - cache distribution.BlobDescriptorService - backend distribution.BlobDescriptorService - tracker MetricsTracker -} - -// NewCachedBlobStatter creates a new statter which prefers a cache and -// falls back to a backend. -func NewCachedBlobStatter(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService) distribution.BlobDescriptorService { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - } -} - -// NewCachedBlobStatterWithMetrics creates a new statter which prefers a cache and -// falls back to a backend. Hits and misses will send to the tracker. -func NewCachedBlobStatterWithMetrics(cache distribution.BlobDescriptorService, backend distribution.BlobDescriptorService, tracker MetricsTracker) distribution.BlobStatter { - return &cachedBlobStatter{ - cache: cache, - backend: backend, - tracker: tracker, - } -} - -func (cbds *cachedBlobStatter) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - desc, err := cbds.cache.Stat(ctx, dgst) - if err != nil { - if err != distribution.ErrBlobUnknown { - context.GetLogger(ctx).Errorf("error retrieving descriptor from cache: %v", err) - } - - goto fallback - } - - if cbds.tracker != nil { - cbds.tracker.Hit() - } - return desc, nil -fallback: - if cbds.tracker != nil { - cbds.tracker.Miss() - } - desc, err = cbds.backend.Stat(ctx, dgst) - if err != nil { - return desc, err - } - - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) - } - - return desc, err - -} - -func (cbds *cachedBlobStatter) Clear(ctx context.Context, dgst digest.Digest) error { - err := cbds.cache.Clear(ctx, dgst) - if err != nil { - return err - } - - err = cbds.backend.Clear(ctx, dgst) - if err != nil { - return err - } - return nil -} - -func (cbds *cachedBlobStatter) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := cbds.cache.SetDescriptor(ctx, dgst, desc); err != nil { - context.GetLogger(ctx).Errorf("error adding descriptor %v to cache: %v", desc.Digest, err) - } - return nil -} diff --git a/vendor/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go b/vendor/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go deleted file mode 100644 index 68a68f081e..0000000000 --- a/vendor/src/github.com/docker/distribution/registry/storage/cache/memory/memory.go +++ /dev/null @@ -1,170 +0,0 @@ -package memory - -import ( - "sync" - - "github.com/docker/distribution" - "github.com/docker/distribution/context" - "github.com/docker/distribution/digest" - "github.com/docker/distribution/reference" - "github.com/docker/distribution/registry/storage/cache" -) - -type inMemoryBlobDescriptorCacheProvider struct { - global *mapBlobDescriptorCache - repositories map[string]*mapBlobDescriptorCache - mu sync.RWMutex -} - -// NewInMemoryBlobDescriptorCacheProvider returns a new mapped-based cache for -// storing blob descriptor data. -func NewInMemoryBlobDescriptorCacheProvider() cache.BlobDescriptorCacheProvider { - return &inMemoryBlobDescriptorCacheProvider{ - global: newMapBlobDescriptorCache(), - repositories: make(map[string]*mapBlobDescriptorCache), - } -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) RepositoryScoped(repo string) (distribution.BlobDescriptorService, error) { - if _, err := reference.ParseNamed(repo); err != nil { - return nil, err - } - - imbdcp.mu.RLock() - defer imbdcp.mu.RUnlock() - - return &repositoryScopedInMemoryBlobDescriptorCache{ - repo: repo, - parent: imbdcp, - repository: imbdcp.repositories[repo], - }, nil -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - return imbdcp.global.Stat(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) Clear(ctx context.Context, dgst digest.Digest) error { - return imbdcp.global.Clear(ctx, dgst) -} - -func (imbdcp *inMemoryBlobDescriptorCacheProvider) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - _, err := imbdcp.Stat(ctx, dgst) - if err == distribution.ErrBlobUnknown { - - if dgst.Algorithm() != desc.Digest.Algorithm() && dgst != desc.Digest { - // if the digests differ, set the other canonical mapping - if err := imbdcp.global.SetDescriptor(ctx, desc.Digest, desc); err != nil { - return err - } - } - - // unknown, just set it - return imbdcp.global.SetDescriptor(ctx, dgst, desc) - } - - // we already know it, do nothing - return err -} - -// repositoryScopedInMemoryBlobDescriptorCache provides the request scoped -// repository cache. Instances are not thread-safe but the delegated -// operations are. -type repositoryScopedInMemoryBlobDescriptorCache struct { - repo string - parent *inMemoryBlobDescriptorCacheProvider // allows lazy allocation of repo's map - repository *mapBlobDescriptorCache -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if rsimbdcp.repository == nil { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return rsimbdcp.repository.Stat(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - if rsimbdcp.repository == nil { - return distribution.ErrBlobUnknown - } - - return rsimbdcp.repository.Clear(ctx, dgst) -} - -func (rsimbdcp *repositoryScopedInMemoryBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if rsimbdcp.repository == nil { - // allocate map since we are setting it now. - rsimbdcp.parent.mu.Lock() - var ok bool - // have to read back value since we may have allocated elsewhere. - rsimbdcp.repository, ok = rsimbdcp.parent.repositories[rsimbdcp.repo] - if !ok { - rsimbdcp.repository = newMapBlobDescriptorCache() - rsimbdcp.parent.repositories[rsimbdcp.repo] = rsimbdcp.repository - } - - rsimbdcp.parent.mu.Unlock() - } - - if err := rsimbdcp.repository.SetDescriptor(ctx, dgst, desc); err != nil { - return err - } - - return rsimbdcp.parent.SetDescriptor(ctx, dgst, desc) -} - -// mapBlobDescriptorCache provides a simple map-based implementation of the -// descriptor cache. -type mapBlobDescriptorCache struct { - descriptors map[digest.Digest]distribution.Descriptor - mu sync.RWMutex -} - -var _ distribution.BlobDescriptorService = &mapBlobDescriptorCache{} - -func newMapBlobDescriptorCache() *mapBlobDescriptorCache { - return &mapBlobDescriptorCache{ - descriptors: make(map[digest.Digest]distribution.Descriptor), - } -} - -func (mbdc *mapBlobDescriptorCache) Stat(ctx context.Context, dgst digest.Digest) (distribution.Descriptor, error) { - if err := dgst.Validate(); err != nil { - return distribution.Descriptor{}, err - } - - mbdc.mu.RLock() - defer mbdc.mu.RUnlock() - - desc, ok := mbdc.descriptors[dgst] - if !ok { - return distribution.Descriptor{}, distribution.ErrBlobUnknown - } - - return desc, nil -} - -func (mbdc *mapBlobDescriptorCache) Clear(ctx context.Context, dgst digest.Digest) error { - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - delete(mbdc.descriptors, dgst) - return nil -} - -func (mbdc *mapBlobDescriptorCache) SetDescriptor(ctx context.Context, dgst digest.Digest, desc distribution.Descriptor) error { - if err := dgst.Validate(); err != nil { - return err - } - - if err := cache.ValidateDescriptor(desc); err != nil { - return err - } - - mbdc.mu.Lock() - defer mbdc.mu.Unlock() - - mbdc.descriptors[dgst] = desc - return nil -} diff --git a/vendor/src/github.com/docker/distribution/tags.go b/vendor/src/github.com/docker/distribution/tags.go deleted file mode 100644 index 5030565963..0000000000 --- a/vendor/src/github.com/docker/distribution/tags.go +++ /dev/null @@ -1,27 +0,0 @@ -package distribution - -import ( - "github.com/docker/distribution/context" -) - -// TagService provides access to information about tagged objects. -type TagService interface { - // Get retrieves the descriptor identified by the tag. Some - // implementations may differentiate between "trusted" tags and - // "untrusted" tags. If a tag is "untrusted", the mapping will be returned - // as an ErrTagUntrusted error, with the target descriptor. - Get(ctx context.Context, tag string) (Descriptor, error) - - // Tag associates the tag with the provided descriptor, updating the - // current association, if needed. - Tag(ctx context.Context, tag string, desc Descriptor) error - - // Untag removes the given tag association - Untag(ctx context.Context, tag string) error - - // All returns the set of tags managed by this tag service - All(ctx context.Context) ([]string, error) - - // Lookup returns the set of tags referencing the given digest. - Lookup(ctx context.Context, digest Descriptor) ([]string, error) -} diff --git a/vendor/src/github.com/docker/distribution/uuid/uuid.go b/vendor/src/github.com/docker/distribution/uuid/uuid.go deleted file mode 100644 index d433ccaf51..0000000000 --- a/vendor/src/github.com/docker/distribution/uuid/uuid.go +++ /dev/null @@ -1,126 +0,0 @@ -// Package uuid provides simple UUID generation. Only version 4 style UUIDs -// can be generated. -// -// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs. -package uuid - -import ( - "crypto/rand" - "fmt" - "io" - "os" - "syscall" - "time" -) - -const ( - // Bits is the number of bits in a UUID - Bits = 128 - - // Size is the number of bytes in a UUID - Size = Bits / 8 - - format = "%08x-%04x-%04x-%04x-%012x" -) - -var ( - // ErrUUIDInvalid indicates a parsed string is not a valid uuid. - ErrUUIDInvalid = fmt.Errorf("invalid uuid") - - // Loggerf can be used to override the default logging destination. Such - // log messages in this library should be logged at warning or higher. - Loggerf = func(format string, args ...interface{}) {} -) - -// UUID represents a UUID value. UUIDs can be compared and set to other values -// and accessed by byte. -type UUID [Size]byte - -// Generate creates a new, version 4 uuid. -func Generate() (u UUID) { - const ( - // ensures we backoff for less than 450ms total. Use the following to - // select new value, in units of 10ms: - // n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2 - maxretries = 9 - backoff = time.Millisecond * 10 - ) - - var ( - totalBackoff time.Duration - count int - retries int - ) - - for { - // This should never block but the read may fail. Because of this, - // we just try to read the random number generator until we get - // something. This is a very rare condition but may happen. - b := time.Duration(retries) * backoff - time.Sleep(b) - totalBackoff += b - - n, err := io.ReadFull(rand.Reader, u[count:]) - if err != nil { - if retryOnError(err) && retries < maxretries { - count += n - retries++ - Loggerf("error generating version 4 uuid, retrying: %v", err) - continue - } - - // Any other errors represent a system problem. What did someone - // do to /dev/urandom? - panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err)) - } - - break - } - - u[6] = (u[6] & 0x0f) | 0x40 // set version byte - u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b} - - return u -} - -// Parse attempts to extract a uuid from the string or returns an error. -func Parse(s string) (u UUID, err error) { - if len(s) != 36 { - return UUID{}, ErrUUIDInvalid - } - - // create stack addresses for each section of the uuid. - p := make([][]byte, 5) - - if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil { - return u, err - } - - copy(u[0:4], p[0]) - copy(u[4:6], p[1]) - copy(u[6:8], p[2]) - copy(u[8:10], p[3]) - copy(u[10:16], p[4]) - - return -} - -func (u UUID) String() string { - return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:]) -} - -// retryOnError tries to detect whether or not retrying would be fruitful. -func retryOnError(err error) bool { - switch err := err.(type) { - case *os.PathError: - return retryOnError(err.Err) // unpack the target error - case syscall.Errno: - if err == syscall.EPERM { - // EPERM represents an entropy pool exhaustion, a condition under - // which we backoff and retry. - return true - } - } - - return false -} diff --git a/vendor/src/github.com/docker/docker-credential-helpers/LICENSE b/vendor/src/github.com/docker/docker-credential-helpers/LICENSE deleted file mode 100644 index 1ea555e2af..0000000000 --- a/vendor/src/github.com/docker/docker-credential-helpers/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2016 David Calavera - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/src/github.com/docker/docker-credential-helpers/client/client.go b/vendor/src/github.com/docker/docker-credential-helpers/client/client.go deleted file mode 100644 index ddd30bbc88..0000000000 --- a/vendor/src/github.com/docker/docker-credential-helpers/client/client.go +++ /dev/null @@ -1,70 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "strings" - - "github.com/docker/docker-credential-helpers/credentials" -) - -// Store uses an external program to save credentials. -func Store(program ProgramFunc, credentials *credentials.Credentials) error { - cmd := program("store") - - buffer := new(bytes.Buffer) - if err := json.NewEncoder(buffer).Encode(credentials); err != nil { - return err - } - cmd.Input(buffer) - - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - return fmt.Errorf("error storing credentials - err: %v, out: `%s`", err, t) - } - - return nil -} - -// Get executes an external program to get the credentials from a native store. -func Get(program ProgramFunc, serverURL string) (*credentials.Credentials, error) { - cmd := program("get") - cmd.Input(strings.NewReader(serverURL)) - - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - - if credentials.IsErrCredentialsNotFoundMessage(t) { - return nil, credentials.NewErrCredentialsNotFound() - } - - return nil, fmt.Errorf("error getting credentials - err: %v, out: `%s`", err, t) - } - - resp := &credentials.Credentials{ - ServerURL: serverURL, - } - - if err := json.NewDecoder(bytes.NewReader(out)).Decode(resp); err != nil { - return nil, err - } - - return resp, nil -} - -// Erase executes a program to remove the server credentails from the native store. -func Erase(program ProgramFunc, serverURL string) error { - cmd := program("erase") - cmd.Input(strings.NewReader(serverURL)) - - out, err := cmd.Output() - if err != nil { - t := strings.TrimSpace(string(out)) - return fmt.Errorf("error erasing credentials - err: %v, out: `%s`", err, t) - } - - return nil -} diff --git a/vendor/src/github.com/docker/docker-credential-helpers/client/command.go b/vendor/src/github.com/docker/docker-credential-helpers/client/command.go deleted file mode 100644 index 8983da6947..0000000000 --- a/vendor/src/github.com/docker/docker-credential-helpers/client/command.go +++ /dev/null @@ -1,37 +0,0 @@ -package client - -import ( - "io" - "os/exec" -) - -// Program is an interface to execute external programs. -type Program interface { - Output() ([]byte, error) - Input(in io.Reader) -} - -// ProgramFunc is a type of function that initializes programs based on arguments. -type ProgramFunc func(args ...string) Program - -// NewShellProgramFunc creates programs that are executed in a Shell. -func NewShellProgramFunc(name string) ProgramFunc { - return func(args ...string) Program { - return &Shell{cmd: exec.Command(name, args...)} - } -} - -// Shell invokes shell commands to talk with a remote credentials helper. -type Shell struct { - cmd *exec.Cmd -} - -// Output returns responses from the remote credentials helper. -func (s *Shell) Output() ([]byte, error) { - return s.cmd.Output() -} - -// Input sets the input to send to a remote credentials helper. -func (s *Shell) Input(in io.Reader) { - s.cmd.Stdin = in -} diff --git a/vendor/src/github.com/docker/docker-credential-helpers/credentials/credentials.go b/vendor/src/github.com/docker/docker-credential-helpers/credentials/credentials.go deleted file mode 100644 index b14f495660..0000000000 --- a/vendor/src/github.com/docker/docker-credential-helpers/credentials/credentials.go +++ /dev/null @@ -1,129 +0,0 @@ -package credentials - -import ( - "bufio" - "bytes" - "encoding/json" - "fmt" - "io" - "os" - "strings" -) - -// Credentials holds the information shared between docker and the credentials store. -type Credentials struct { - ServerURL string - Username string - Secret string -} - -// Serve initializes the credentials helper and parses the action argument. -// This function is designed to be called from a command line interface. -// It uses os.Args[1] as the key for the action. -// It uses os.Stdin as input and os.Stdout as output. -// This function terminates the program with os.Exit(1) if there is an error. -func Serve(helper Helper) { - var err error - if len(os.Args) != 2 { - err = fmt.Errorf("Usage: %s ", os.Args[0]) - } - - if err == nil { - err = HandleCommand(helper, os.Args[1], os.Stdin, os.Stdout) - } - - if err != nil { - fmt.Fprintf(os.Stdout, "%v\n", err) - os.Exit(1) - } -} - -// HandleCommand uses a helper and a key to run a credential action. -func HandleCommand(helper Helper, key string, in io.Reader, out io.Writer) error { - switch key { - case "store": - return Store(helper, in) - case "get": - return Get(helper, in, out) - case "erase": - return Erase(helper, in) - } - return fmt.Errorf("Unknown credential action `%s`", key) -} - -// Store uses a helper and an input reader to save credentials. -// The reader must contain the JSON serialization of a Credentials struct. -func Store(helper Helper, reader io.Reader) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - var creds Credentials - if err := json.NewDecoder(buffer).Decode(&creds); err != nil { - return err - } - - return helper.Add(&creds) -} - -// Get retrieves the credentials for a given server url. -// The reader must contain the server URL to search. -// The writer is used to write the JSON serialization of the credentials. -func Get(helper Helper, reader io.Reader, writer io.Writer) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - serverURL := strings.TrimSpace(buffer.String()) - - username, secret, err := helper.Get(serverURL) - if err != nil { - return err - } - - resp := Credentials{ - Username: username, - Secret: secret, - } - - buffer.Reset() - if err := json.NewEncoder(buffer).Encode(resp); err != nil { - return err - } - - fmt.Fprint(writer, buffer.String()) - return nil -} - -// Erase removes credentials from the store. -// The reader must contain the server URL to remove. -func Erase(helper Helper, reader io.Reader) error { - scanner := bufio.NewScanner(reader) - - buffer := new(bytes.Buffer) - for scanner.Scan() { - buffer.Write(scanner.Bytes()) - } - - if err := scanner.Err(); err != nil && err != io.EOF { - return err - } - - serverURL := strings.TrimSpace(buffer.String()) - - return helper.Delete(serverURL) -} diff --git a/vendor/src/github.com/docker/docker-credential-helpers/credentials/error.go b/vendor/src/github.com/docker/docker-credential-helpers/credentials/error.go deleted file mode 100644 index d24bf16f09..0000000000 --- a/vendor/src/github.com/docker/docker-credential-helpers/credentials/error.go +++ /dev/null @@ -1,37 +0,0 @@ -package credentials - -// ErrCredentialsNotFound standarizes the not found error, so every helper returns -// the same message and docker can handle it properly. -const errCredentialsNotFoundMessage = "credentials not found in native keychain" - -// errCredentialsNotFound represents an error -// raised when credentials are not in the store. -type errCredentialsNotFound struct{} - -// Error returns the standard error message -// for when the credentials are not in the store. -func (errCredentialsNotFound) Error() string { - return errCredentialsNotFoundMessage -} - -// NewErrCredentialsNotFound creates a new error -// for when the credentials are not in the store. -func NewErrCredentialsNotFound() error { - return errCredentialsNotFound{} -} - -// IsErrCredentialsNotFound returns true if the error -// was caused by not having a set of credentials in a store. -func IsErrCredentialsNotFound(err error) bool { - _, ok := err.(errCredentialsNotFound) - return ok -} - -// IsErrCredentialsNotFoundMessage returns true if the error -// was caused by not having a set of credentials in a store. -// -// This function helps to check messages returned by an -// external program via its standard output. -func IsErrCredentialsNotFoundMessage(err string) bool { - return err == errCredentialsNotFoundMessage -} diff --git a/vendor/src/github.com/docker/docker-credential-helpers/credentials/helper.go b/vendor/src/github.com/docker/docker-credential-helpers/credentials/helper.go deleted file mode 100644 index 8a6967144e..0000000000 --- a/vendor/src/github.com/docker/docker-credential-helpers/credentials/helper.go +++ /dev/null @@ -1,12 +0,0 @@ -package credentials - -// Helper is the interface a credentials store helper must implement. -type Helper interface { - // Add appends credentials to the store. - Add(*Credentials) error - // Delete removes credentials from the store. - Delete(serverURL string) error - // Get retrieves credentials from the store. - // It returns username and secret as strings. - Get(serverURL string) (string, string, error) -} diff --git a/vendor/src/github.com/docker/engine-api/LICENSE b/vendor/src/github.com/docker/engine-api/LICENSE deleted file mode 100644 index c157bff96a..0000000000 --- a/vendor/src/github.com/docker/engine-api/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015-2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/src/github.com/docker/engine-api/client/checkpoint_create.go b/vendor/src/github.com/docker/engine-api/client/checkpoint_create.go deleted file mode 100644 index 23883cc06c..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/checkpoint_create.go +++ /dev/null @@ -1,13 +0,0 @@ -package client - -import ( - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// CheckpointCreate creates a checkpoint from the given container with the given name -func (cli *Client) CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error { - resp, err := cli.post(ctx, "/containers/"+container+"/checkpoints", nil, options, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/checkpoint_delete.go b/vendor/src/github.com/docker/engine-api/client/checkpoint_delete.go deleted file mode 100644 index a4e9ed0c06..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/checkpoint_delete.go +++ /dev/null @@ -1,12 +0,0 @@ -package client - -import ( - "golang.org/x/net/context" -) - -// CheckpointDelete deletes the checkpoint with the given name from the given container -func (cli *Client) CheckpointDelete(ctx context.Context, containerID string, checkpointID string) error { - resp, err := cli.delete(ctx, "/containers/"+containerID+"/checkpoints/"+checkpointID, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/checkpoint_list.go b/vendor/src/github.com/docker/engine-api/client/checkpoint_list.go deleted file mode 100644 index ef5ec261b6..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/checkpoint_list.go +++ /dev/null @@ -1,22 +0,0 @@ -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// CheckpointList returns the volumes configured in the docker host. -func (cli *Client) CheckpointList(ctx context.Context, container string) ([]types.Checkpoint, error) { - var checkpoints []types.Checkpoint - - resp, err := cli.get(ctx, "/containers/"+container+"/checkpoints", nil, nil) - if err != nil { - return checkpoints, err - } - - err = json.NewDecoder(resp.body).Decode(&checkpoints) - ensureReaderClosed(resp) - return checkpoints, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/client.go b/vendor/src/github.com/docker/engine-api/client/client.go deleted file mode 100644 index f3ad2cf30d..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/client.go +++ /dev/null @@ -1,153 +0,0 @@ -package client - -import ( - "fmt" - "net/http" - "net/url" - "os" - "path/filepath" - "strings" - - "github.com/docker/engine-api/client/transport" - "github.com/docker/go-connections/tlsconfig" -) - -// DefaultVersion is the version of the current stable API -const DefaultVersion string = "1.23" - -// Client is the API client that performs all operations -// against a docker server. -type Client struct { - // proto holds the client protocol i.e. unix. - proto string - // addr holds the client address. - addr string - // basePath holds the path to prepend to the requests. - basePath string - // transport is the interface to send request with, it implements transport.Client. - transport transport.Client - // version of the server to talk to. - version string - // custom http headers configured by users. - customHTTPHeaders map[string]string -} - -// NewEnvClient initializes a new API client based on environment variables. -// Use DOCKER_HOST to set the url to the docker server. -// Use DOCKER_API_VERSION to set the version of the API to reach, leave empty for latest. -// Use DOCKER_CERT_PATH to load the tls certificates from. -// Use DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default. -func NewEnvClient() (*Client, error) { - var client *http.Client - if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" { - options := tlsconfig.Options{ - CAFile: filepath.Join(dockerCertPath, "ca.pem"), - CertFile: filepath.Join(dockerCertPath, "cert.pem"), - KeyFile: filepath.Join(dockerCertPath, "key.pem"), - InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "", - } - tlsc, err := tlsconfig.Client(options) - if err != nil { - return nil, err - } - - client = &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: tlsc, - }, - } - } - - host := os.Getenv("DOCKER_HOST") - if host == "" { - host = DefaultDockerHost - } - - version := os.Getenv("DOCKER_API_VERSION") - if version == "" { - version = DefaultVersion - } - - return NewClient(host, version, client, nil) -} - -// NewClient initializes a new API client for the given host and API version. -// It uses the given http client as transport. -// It also initializes the custom http headers to add to each request. -// -// It won't send any version information if the version number is empty. It is -// highly recommended that you set a version or your client may break if the -// server is upgraded. -func NewClient(host string, version string, client *http.Client, httpHeaders map[string]string) (*Client, error) { - proto, addr, basePath, err := ParseHost(host) - if err != nil { - return nil, err - } - - transport, err := transport.NewTransportWithHTTP(proto, addr, client) - if err != nil { - return nil, err - } - - return &Client{ - proto: proto, - addr: addr, - basePath: basePath, - transport: transport, - version: version, - customHTTPHeaders: httpHeaders, - }, nil -} - -// getAPIPath returns the versioned request path to call the api. -// It appends the query parameters to the path if they are not empty. -func (cli *Client) getAPIPath(p string, query url.Values) string { - var apiPath string - if cli.version != "" { - v := strings.TrimPrefix(cli.version, "v") - apiPath = fmt.Sprintf("%s/v%s%s", cli.basePath, v, p) - } else { - apiPath = fmt.Sprintf("%s%s", cli.basePath, p) - } - - u := &url.URL{ - Path: apiPath, - } - if len(query) > 0 { - u.RawQuery = query.Encode() - } - return u.String() -} - -// ClientVersion returns the version string associated with this -// instance of the Client. Note that this value can be changed -// via the DOCKER_API_VERSION env var. -func (cli *Client) ClientVersion() string { - return cli.version -} - -// UpdateClientVersion updates the version string associated with this -// instance of the Client. -func (cli *Client) UpdateClientVersion(v string) { - cli.version = v -} - -// ParseHost verifies that the given host strings is valid. -func ParseHost(host string) (string, string, string, error) { - protoAddrParts := strings.SplitN(host, "://", 2) - if len(protoAddrParts) == 1 { - return "", "", "", fmt.Errorf("unable to parse docker host `%s`", host) - } - - var basePath string - proto, addr := protoAddrParts[0], protoAddrParts[1] - if proto == "tcp" { - parsed, err := url.Parse("tcp://" + addr) - if err != nil { - return "", "", "", err - } - addr = parsed.Host - basePath = parsed.Path - } - return proto, addr, basePath, nil -} diff --git a/vendor/src/github.com/docker/engine-api/client/client_darwin.go b/vendor/src/github.com/docker/engine-api/client/client_darwin.go deleted file mode 100644 index 4b47a178c4..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/client_darwin.go +++ /dev/null @@ -1,4 +0,0 @@ -package client - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "tcp://127.0.0.1:2375" diff --git a/vendor/src/github.com/docker/engine-api/client/client_unix.go b/vendor/src/github.com/docker/engine-api/client/client_unix.go deleted file mode 100644 index 572c5f87a7..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/client_unix.go +++ /dev/null @@ -1,6 +0,0 @@ -// +build linux freebsd solaris openbsd - -package client - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "unix:///var/run/docker.sock" diff --git a/vendor/src/github.com/docker/engine-api/client/client_windows.go b/vendor/src/github.com/docker/engine-api/client/client_windows.go deleted file mode 100644 index 07c0c7a774..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/client_windows.go +++ /dev/null @@ -1,4 +0,0 @@ -package client - -// DefaultDockerHost defines os specific default if DOCKER_HOST is unset -const DefaultDockerHost = "npipe:////./pipe/docker_engine" diff --git a/vendor/src/github.com/docker/engine-api/client/container_attach.go b/vendor/src/github.com/docker/engine-api/client/container_attach.go deleted file mode 100644 index 1b616bf038..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_attach.go +++ /dev/null @@ -1,34 +0,0 @@ -package client - -import ( - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerAttach attaches a connection to a container in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) { - query := url.Values{} - if options.Stream { - query.Set("stream", "1") - } - if options.Stdin { - query.Set("stdin", "1") - } - if options.Stdout { - query.Set("stdout", "1") - } - if options.Stderr { - query.Set("stderr", "1") - } - if options.DetachKeys != "" { - query.Set("detachKeys", options.DetachKeys) - } - - headers := map[string][]string{"Content-Type": {"text/plain"}} - return cli.postHijacked(ctx, "/containers/"+container+"/attach", query, nil, headers) -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_commit.go b/vendor/src/github.com/docker/engine-api/client/container_commit.go deleted file mode 100644 index d5c4749906..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_commit.go +++ /dev/null @@ -1,53 +0,0 @@ -package client - -import ( - "encoding/json" - "errors" - "net/url" - - distreference "github.com/docker/distribution/reference" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/reference" - "golang.org/x/net/context" -) - -// ContainerCommit applies changes into a container and creates a new tagged image. -func (cli *Client) ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) { - var repository, tag string - if options.Reference != "" { - distributionRef, err := distreference.ParseNamed(options.Reference) - if err != nil { - return types.ContainerCommitResponse{}, err - } - - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { - return types.ContainerCommitResponse{}, errors.New("refusing to create a tag with a digest reference") - } - - tag = reference.GetTagFromNamedRef(distributionRef) - repository = distributionRef.Name() - } - - query := url.Values{} - query.Set("container", container) - query.Set("repo", repository) - query.Set("tag", tag) - query.Set("comment", options.Comment) - query.Set("author", options.Author) - for _, change := range options.Changes { - query.Add("changes", change) - } - if options.Pause != true { - query.Set("pause", "0") - } - - var response types.ContainerCommitResponse - resp, err := cli.post(ctx, "/commit", query, options.Config, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_copy.go b/vendor/src/github.com/docker/engine-api/client/container_copy.go deleted file mode 100644 index d3dd0b116c..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_copy.go +++ /dev/null @@ -1,97 +0,0 @@ -package client - -import ( - "encoding/base64" - "encoding/json" - "fmt" - "io" - "net/http" - "net/url" - "path/filepath" - "strings" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" -) - -// ContainerStatPath returns Stat information about a path inside the container filesystem. -func (cli *Client) ContainerStatPath(ctx context.Context, containerID, path string) (types.ContainerPathStat, error) { - query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - - urlStr := fmt.Sprintf("/containers/%s/archive", containerID) - response, err := cli.head(ctx, urlStr, query, nil) - if err != nil { - return types.ContainerPathStat{}, err - } - defer ensureReaderClosed(response) - return getContainerPathStatFromHeader(response.header) -} - -// CopyToContainer copies content into the container filesystem. -func (cli *Client) CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error { - query := url.Values{} - query.Set("path", filepath.ToSlash(path)) // Normalize the paths used in the API. - // Do not allow for an existing directory to be overwritten by a non-directory and vice versa. - if !options.AllowOverwriteDirWithFile { - query.Set("noOverwriteDirNonDir", "true") - } - - apiPath := fmt.Sprintf("/containers/%s/archive", container) - - response, err := cli.putRaw(ctx, apiPath, query, content, nil) - if err != nil { - return err - } - defer ensureReaderClosed(response) - - if response.statusCode != http.StatusOK { - return fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) - } - - return nil -} - -// CopyFromContainer gets the content from the container and returns it as a Reader -// to manipulate it in the host. It's up to the caller to close the reader. -func (cli *Client) CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) { - query := make(url.Values, 1) - query.Set("path", filepath.ToSlash(srcPath)) // Normalize the paths used in the API. - - apiPath := fmt.Sprintf("/containers/%s/archive", container) - response, err := cli.get(ctx, apiPath, query, nil) - if err != nil { - return nil, types.ContainerPathStat{}, err - } - - if response.statusCode != http.StatusOK { - return nil, types.ContainerPathStat{}, fmt.Errorf("unexpected status code from daemon: %d", response.statusCode) - } - - // In order to get the copy behavior right, we need to know information - // about both the source and the destination. The response headers include - // stat info about the source that we can use in deciding exactly how to - // copy it locally. Along with the stat info about the local destination, - // we have everything we need to handle the multiple possibilities there - // can be when copying a file/dir from one location to another file/dir. - stat, err := getContainerPathStatFromHeader(response.header) - if err != nil { - return nil, stat, fmt.Errorf("unable to get resource stat from response: %s", err) - } - return response.body, stat, err -} - -func getContainerPathStatFromHeader(header http.Header) (types.ContainerPathStat, error) { - var stat types.ContainerPathStat - - encodedStat := header.Get("X-Docker-Container-Path-Stat") - statDecoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(encodedStat)) - - err := json.NewDecoder(statDecoder).Decode(&stat) - if err != nil { - err = fmt.Errorf("unable to decode container path stat header: %s", err) - } - - return stat, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_create.go b/vendor/src/github.com/docker/engine-api/client/container_create.go deleted file mode 100644 index 98935794da..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_create.go +++ /dev/null @@ -1,46 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - "strings" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/network" - "golang.org/x/net/context" -) - -type configWrapper struct { - *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig -} - -// ContainerCreate creates a new container based in the given configuration. -// It can be associated with a name, but it's not mandatory. -func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) { - var response types.ContainerCreateResponse - query := url.Values{} - if containerName != "" { - query.Set("name", containerName) - } - - body := configWrapper{ - Config: config, - HostConfig: hostConfig, - NetworkingConfig: networkingConfig, - } - - serverResp, err := cli.post(ctx, "/containers/create", query, body, nil) - if err != nil { - if serverResp != nil && serverResp.statusCode == 404 && strings.Contains(err.Error(), "No such image") { - return response, imageNotFoundError{config.Image} - } - return response, err - } - - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_diff.go b/vendor/src/github.com/docker/engine-api/client/container_diff.go deleted file mode 100644 index f4bb3a46b9..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_diff.go +++ /dev/null @@ -1,23 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerDiff shows differences in a container filesystem since it was started. -func (cli *Client) ContainerDiff(ctx context.Context, containerID string) ([]types.ContainerChange, error) { - var changes []types.ContainerChange - - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/changes", url.Values{}, nil) - if err != nil { - return changes, err - } - - err = json.NewDecoder(serverResp.body).Decode(&changes) - ensureReaderClosed(serverResp) - return changes, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_exec.go b/vendor/src/github.com/docker/engine-api/client/container_exec.go deleted file mode 100644 index ff7e1a9d05..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_exec.go +++ /dev/null @@ -1,49 +0,0 @@ -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerExecCreate creates a new exec configuration to run an exec process. -func (cli *Client) ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) { - var response types.ContainerExecCreateResponse - resp, err := cli.post(ctx, "/containers/"+container+"/exec", nil, config, nil) - if err != nil { - return response, err - } - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} - -// ContainerExecStart starts an exec process already created in the docker host. -func (cli *Client) ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error { - resp, err := cli.post(ctx, "/exec/"+execID+"/start", nil, config, nil) - ensureReaderClosed(resp) - return err -} - -// ContainerExecAttach attaches a connection to an exec process in the server. -// It returns a types.HijackedConnection with the hijacked connection -// and the a reader to get output. It's up to the called to close -// the hijacked connection by calling types.HijackedResponse.Close. -func (cli *Client) ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) { - headers := map[string][]string{"Content-Type": {"application/json"}} - return cli.postHijacked(ctx, "/exec/"+execID+"/start", nil, config, headers) -} - -// ContainerExecInspect returns information about a specific exec process on the docker host. -func (cli *Client) ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) { - var response types.ContainerExecInspect - resp, err := cli.get(ctx, "/exec/"+execID+"/json", nil, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_export.go b/vendor/src/github.com/docker/engine-api/client/container_export.go deleted file mode 100644 index 52194f3d34..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_export.go +++ /dev/null @@ -1,20 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" -) - -// ContainerExport retrieves the raw contents of a container -// and returns them as an io.ReadCloser. It's up to the caller -// to close the stream. -func (cli *Client) ContainerExport(ctx context.Context, containerID string) (io.ReadCloser, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/export", url.Values{}, nil) - if err != nil { - return nil, err - } - - return serverResp.body, nil -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_inspect.go b/vendor/src/github.com/docker/engine-api/client/container_inspect.go deleted file mode 100644 index 0fa096d38f..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_inspect.go +++ /dev/null @@ -1,54 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerInspect returns the container information. -func (cli *Client) ContainerInspect(ctx context.Context, containerID string) (types.ContainerJSON, error) { - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", nil, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ContainerJSON{}, containerNotFoundError{containerID} - } - return types.ContainerJSON{}, err - } - - var response types.ContainerJSON - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} - -// ContainerInspectWithRaw returns the container information and its raw representation. -func (cli *Client) ContainerInspectWithRaw(ctx context.Context, containerID string, getSize bool) (types.ContainerJSON, []byte, error) { - query := url.Values{} - if getSize { - query.Set("size", "1") - } - serverResp, err := cli.get(ctx, "/containers/"+containerID+"/json", query, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ContainerJSON{}, nil, containerNotFoundError{containerID} - } - return types.ContainerJSON{}, nil, err - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return types.ContainerJSON{}, nil, err - } - - var response types.ContainerJSON - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_kill.go b/vendor/src/github.com/docker/engine-api/client/container_kill.go deleted file mode 100644 index 29f80c73ad..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_kill.go +++ /dev/null @@ -1,17 +0,0 @@ -package client - -import ( - "net/url" - - "golang.org/x/net/context" -) - -// ContainerKill terminates the container process but does not remove the container from the docker host. -func (cli *Client) ContainerKill(ctx context.Context, containerID, signal string) error { - query := url.Values{} - query.Set("signal", signal) - - resp, err := cli.post(ctx, "/containers/"+containerID+"/kill", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_list.go b/vendor/src/github.com/docker/engine-api/client/container_list.go deleted file mode 100644 index 87f7333dc7..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_list.go +++ /dev/null @@ -1,56 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - "strconv" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -// ContainerList returns the list of containers in the docker host. -func (cli *Client) ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) { - query := url.Values{} - - if options.All { - query.Set("all", "1") - } - - if options.Limit != -1 { - query.Set("limit", strconv.Itoa(options.Limit)) - } - - if options.Since != "" { - query.Set("since", options.Since) - } - - if options.Before != "" { - query.Set("before", options.Before) - } - - if options.Size { - query.Set("size", "1") - } - - if options.Filter.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filter) - - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/containers/json", query, nil) - if err != nil { - return nil, err - } - - var containers []types.Container - err = json.NewDecoder(resp.body).Decode(&containers) - ensureReaderClosed(resp) - return containers, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_logs.go b/vendor/src/github.com/docker/engine-api/client/container_logs.go deleted file mode 100644 index 08b9b91876..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_logs.go +++ /dev/null @@ -1,52 +0,0 @@ -package client - -import ( - "io" - "net/url" - "time" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - timetypes "github.com/docker/engine-api/types/time" -) - -// ContainerLogs returns the logs generated by a container in an io.ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) { - query := url.Values{} - if options.ShowStdout { - query.Set("stdout", "1") - } - - if options.ShowStderr { - query.Set("stderr", "1") - } - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, time.Now()) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - - if options.Timestamps { - query.Set("timestamps", "1") - } - - if options.Details { - query.Set("details", "1") - } - - if options.Follow { - query.Set("follow", "1") - } - query.Set("tail", options.Tail) - - resp, err := cli.get(ctx, "/containers/"+container+"/logs", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_pause.go b/vendor/src/github.com/docker/engine-api/client/container_pause.go deleted file mode 100644 index 412067a782..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_pause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "golang.org/x/net/context" - -// ContainerPause pauses the main process of a given container without terminating it. -func (cli *Client) ContainerPause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/pause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_remove.go b/vendor/src/github.com/docker/engine-api/client/container_remove.go deleted file mode 100644 index cef4b81220..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_remove.go +++ /dev/null @@ -1,27 +0,0 @@ -package client - -import ( - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerRemove kills and removes a container from the docker host. -func (cli *Client) ContainerRemove(ctx context.Context, containerID string, options types.ContainerRemoveOptions) error { - query := url.Values{} - if options.RemoveVolumes { - query.Set("v", "1") - } - if options.RemoveLinks { - query.Set("link", "1") - } - - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/containers/"+containerID, query, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_rename.go b/vendor/src/github.com/docker/engine-api/client/container_rename.go deleted file mode 100644 index 0e718da7c6..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_rename.go +++ /dev/null @@ -1,16 +0,0 @@ -package client - -import ( - "net/url" - - "golang.org/x/net/context" -) - -// ContainerRename changes the name of a given container. -func (cli *Client) ContainerRename(ctx context.Context, containerID, newContainerName string) error { - query := url.Values{} - query.Set("name", newContainerName) - resp, err := cli.post(ctx, "/containers/"+containerID+"/rename", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_resize.go b/vendor/src/github.com/docker/engine-api/client/container_resize.go deleted file mode 100644 index b95d26b335..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_resize.go +++ /dev/null @@ -1,29 +0,0 @@ -package client - -import ( - "net/url" - "strconv" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerResize changes the size of the tty for a container. -func (cli *Client) ContainerResize(ctx context.Context, containerID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/containers/"+containerID, options.Height, options.Width) -} - -// ContainerExecResize changes the size of the tty for an exec process running inside a container. -func (cli *Client) ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error { - return cli.resize(ctx, "/exec/"+execID, options.Height, options.Width) -} - -func (cli *Client) resize(ctx context.Context, basePath string, height, width int) error { - query := url.Values{} - query.Set("h", strconv.Itoa(height)) - query.Set("w", strconv.Itoa(width)) - - resp, err := cli.post(ctx, basePath+"/resize", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_restart.go b/vendor/src/github.com/docker/engine-api/client/container_restart.go deleted file mode 100644 index 93c042d085..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_restart.go +++ /dev/null @@ -1,22 +0,0 @@ -package client - -import ( - "net/url" - "time" - - timetypes "github.com/docker/engine-api/types/time" - "golang.org/x/net/context" -) - -// ContainerRestart stops and starts a container again. -// It makes the daemon to wait for the container to be up again for -// a specific amount of time, given the timeout. -func (cli *Client) ContainerRestart(ctx context.Context, containerID string, timeout *time.Duration) error { - query := url.Values{} - if timeout != nil { - query.Set("t", timetypes.DurationToSecondsString(*timeout)) - } - resp, err := cli.post(ctx, "/containers/"+containerID+"/restart", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_start.go b/vendor/src/github.com/docker/engine-api/client/container_start.go deleted file mode 100644 index 1e22eec641..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_start.go +++ /dev/null @@ -1,21 +0,0 @@ -package client - -import ( - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" -) - -// ContainerStart sends a request to the docker daemon to start a container. -func (cli *Client) ContainerStart(ctx context.Context, containerID string, options types.ContainerStartOptions) error { - query := url.Values{} - if len(options.CheckpointID) != 0 { - query.Set("checkpoint", options.CheckpointID) - } - - resp, err := cli.post(ctx, "/containers/"+containerID+"/start", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_stats.go b/vendor/src/github.com/docker/engine-api/client/container_stats.go deleted file mode 100644 index 2cc67c3af1..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_stats.go +++ /dev/null @@ -1,24 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" -) - -// ContainerStats returns near realtime stats for a given container. -// It's up to the caller to close the io.ReadCloser returned. -func (cli *Client) ContainerStats(ctx context.Context, containerID string, stream bool) (io.ReadCloser, error) { - query := url.Values{} - query.Set("stream", "0") - if stream { - query.Set("stream", "1") - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/stats", query, nil) - if err != nil { - return nil, err - } - return resp.body, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_stop.go b/vendor/src/github.com/docker/engine-api/client/container_stop.go deleted file mode 100644 index 1fc577f2b9..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_stop.go +++ /dev/null @@ -1,21 +0,0 @@ -package client - -import ( - "net/url" - "time" - - timetypes "github.com/docker/engine-api/types/time" - "golang.org/x/net/context" -) - -// ContainerStop stops a container without terminating the process. -// The process is blocked until the container stops or the timeout expires. -func (cli *Client) ContainerStop(ctx context.Context, containerID string, timeout *time.Duration) error { - query := url.Values{} - if timeout != nil { - query.Set("t", timetypes.DurationToSecondsString(*timeout)) - } - resp, err := cli.post(ctx, "/containers/"+containerID+"/stop", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_top.go b/vendor/src/github.com/docker/engine-api/client/container_top.go deleted file mode 100644 index 5ad926ae08..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_top.go +++ /dev/null @@ -1,28 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - "strings" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ContainerTop shows process information from within a container. -func (cli *Client) ContainerTop(ctx context.Context, containerID string, arguments []string) (types.ContainerProcessList, error) { - var response types.ContainerProcessList - query := url.Values{} - if len(arguments) > 0 { - query.Set("ps_args", strings.Join(arguments, " ")) - } - - resp, err := cli.get(ctx, "/containers/"+containerID+"/top", query, nil) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_unpause.go b/vendor/src/github.com/docker/engine-api/client/container_unpause.go deleted file mode 100644 index 5c76211256..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_unpause.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "golang.org/x/net/context" - -// ContainerUnpause resumes the process execution within a container -func (cli *Client) ContainerUnpause(ctx context.Context, containerID string) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/unpause", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_update.go b/vendor/src/github.com/docker/engine-api/client/container_update.go deleted file mode 100644 index a5a1826dc4..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_update.go +++ /dev/null @@ -1,13 +0,0 @@ -package client - -import ( - "github.com/docker/engine-api/types/container" - "golang.org/x/net/context" -) - -// ContainerUpdate updates resources of a container -func (cli *Client) ContainerUpdate(ctx context.Context, containerID string, updateConfig container.UpdateConfig) error { - resp, err := cli.post(ctx, "/containers/"+containerID+"/update", nil, updateConfig, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/container_wait.go b/vendor/src/github.com/docker/engine-api/client/container_wait.go deleted file mode 100644 index c26ff3f378..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/container_wait.go +++ /dev/null @@ -1,26 +0,0 @@ -package client - -import ( - "encoding/json" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" -) - -// ContainerWait pauses execution until a container exits. -// It returns the API status code as response of its readiness. -func (cli *Client) ContainerWait(ctx context.Context, containerID string) (int, error) { - resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", nil, nil, nil) - if err != nil { - return -1, err - } - defer ensureReaderClosed(resp) - - var res types.ContainerWaitResponse - if err := json.NewDecoder(resp.body).Decode(&res); err != nil { - return -1, err - } - - return res.StatusCode, nil -} diff --git a/vendor/src/github.com/docker/engine-api/client/errors.go b/vendor/src/github.com/docker/engine-api/client/errors.go deleted file mode 100644 index e026320bbd..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/errors.go +++ /dev/null @@ -1,203 +0,0 @@ -package client - -import ( - "errors" - "fmt" -) - -// ErrConnectionFailed is an error raised when the connection between the client and the server failed. -var ErrConnectionFailed = errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?") - -type notFound interface { - error - NotFound() bool // Is the error a NotFound error -} - -// IsErrNotFound returns true if the error is caused with an -// object (image, container, network, volume, …) is not found in the docker host. -func IsErrNotFound(err error) bool { - te, ok := err.(notFound) - return ok && te.NotFound() -} - -// imageNotFoundError implements an error returned when an image is not in the docker host. -type imageNotFoundError struct { - imageID string -} - -// NoFound indicates that this error type is of NotFound -func (e imageNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of an imageNotFoundError -func (e imageNotFoundError) Error() string { - return fmt.Sprintf("Error: No such image: %s", e.imageID) -} - -// IsErrImageNotFound returns true if the error is caused -// when an image is not found in the docker host. -func IsErrImageNotFound(err error) bool { - return IsErrNotFound(err) -} - -// containerNotFoundError implements an error returned when a container is not in the docker host. -type containerNotFoundError struct { - containerID string -} - -// NoFound indicates that this error type is of NotFound -func (e containerNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a containerNotFoundError -func (e containerNotFoundError) Error() string { - return fmt.Sprintf("Error: No such container: %s", e.containerID) -} - -// IsErrContainerNotFound returns true if the error is caused -// when a container is not found in the docker host. -func IsErrContainerNotFound(err error) bool { - return IsErrNotFound(err) -} - -// networkNotFoundError implements an error returned when a network is not in the docker host. -type networkNotFoundError struct { - networkID string -} - -// NoFound indicates that this error type is of NotFound -func (e networkNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a networkNotFoundError -func (e networkNotFoundError) Error() string { - return fmt.Sprintf("Error: No such network: %s", e.networkID) -} - -// IsErrNetworkNotFound returns true if the error is caused -// when a network is not found in the docker host. -func IsErrNetworkNotFound(err error) bool { - return IsErrNotFound(err) -} - -// volumeNotFoundError implements an error returned when a volume is not in the docker host. -type volumeNotFoundError struct { - volumeID string -} - -// NoFound indicates that this error type is of NotFound -func (e volumeNotFoundError) NotFound() bool { - return true -} - -// Error returns a string representation of a networkNotFoundError -func (e volumeNotFoundError) Error() string { - return fmt.Sprintf("Error: No such volume: %s", e.volumeID) -} - -// IsErrVolumeNotFound returns true if the error is caused -// when a volume is not found in the docker host. -func IsErrVolumeNotFound(err error) bool { - return IsErrNotFound(err) -} - -// unauthorizedError represents an authorization error in a remote registry. -type unauthorizedError struct { - cause error -} - -// Error returns a string representation of an unauthorizedError -func (u unauthorizedError) Error() string { - return u.cause.Error() -} - -// IsErrUnauthorized returns true if the error is caused -// when a remote registry authentication fails -func IsErrUnauthorized(err error) bool { - _, ok := err.(unauthorizedError) - return ok -} - -// nodeNotFoundError implements an error returned when a node is not found. -type nodeNotFoundError struct { - nodeID string -} - -// Error returns a string representation of a nodeNotFoundError -func (e nodeNotFoundError) Error() string { - return fmt.Sprintf("Error: No such node: %s", e.nodeID) -} - -// NoFound indicates that this error type is of NotFound -func (e nodeNotFoundError) NotFound() bool { - return true -} - -// IsErrNodeNotFound returns true if the error is caused -// when a node is not found. -func IsErrNodeNotFound(err error) bool { - _, ok := err.(nodeNotFoundError) - return ok -} - -// serviceNotFoundError implements an error returned when a service is not found. -type serviceNotFoundError struct { - serviceID string -} - -// Error returns a string representation of a serviceNotFoundError -func (e serviceNotFoundError) Error() string { - return fmt.Sprintf("Error: No such service: %s", e.serviceID) -} - -// NoFound indicates that this error type is of NotFound -func (e serviceNotFoundError) NotFound() bool { - return true -} - -// IsErrServiceNotFound returns true if the error is caused -// when a service is not found. -func IsErrServiceNotFound(err error) bool { - _, ok := err.(serviceNotFoundError) - return ok -} - -// taskNotFoundError implements an error returned when a task is not found. -type taskNotFoundError struct { - taskID string -} - -// Error returns a string representation of a taskNotFoundError -func (e taskNotFoundError) Error() string { - return fmt.Sprintf("Error: No such task: %s", e.taskID) -} - -// NoFound indicates that this error type is of NotFound -func (e taskNotFoundError) NotFound() bool { - return true -} - -// IsErrTaskNotFound returns true if the error is caused -// when a task is not found. -func IsErrTaskNotFound(err error) bool { - _, ok := err.(taskNotFoundError) - return ok -} - -type pluginPermissionDenied struct { - name string -} - -func (e pluginPermissionDenied) Error() string { - return "Permission denied while installing plugin " + e.name -} - -// IsErrPluginPermissionDenied returns true if the error is caused -// when a user denies a plugin's permissions -func IsErrPluginPermissionDenied(err error) bool { - _, ok := err.(pluginPermissionDenied) - return ok -} diff --git a/vendor/src/github.com/docker/engine-api/client/events.go b/vendor/src/github.com/docker/engine-api/client/events.go deleted file mode 100644 index f22a18e1d3..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/events.go +++ /dev/null @@ -1,48 +0,0 @@ -package client - -import ( - "io" - "net/url" - "time" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - timetypes "github.com/docker/engine-api/types/time" -) - -// Events returns a stream of events in the daemon in a ReadCloser. -// It's up to the caller to close the stream. -func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) { - query := url.Values{} - ref := time.Now() - - if options.Since != "" { - ts, err := timetypes.GetTimestamp(options.Since, ref) - if err != nil { - return nil, err - } - query.Set("since", ts) - } - if options.Until != "" { - ts, err := timetypes.GetTimestamp(options.Until, ref) - if err != nil { - return nil, err - } - query.Set("until", ts) - } - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return nil, err - } - query.Set("filters", filterJSON) - } - - serverResponse, err := cli.get(ctx, "/events", query, nil) - if err != nil { - return nil, err - } - return serverResponse.body, nil -} diff --git a/vendor/src/github.com/docker/engine-api/client/hijack.go b/vendor/src/github.com/docker/engine-api/client/hijack.go deleted file mode 100644 index dbd91ef629..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/hijack.go +++ /dev/null @@ -1,174 +0,0 @@ -package client - -import ( - "crypto/tls" - "errors" - "fmt" - "net" - "net/http/httputil" - "net/url" - "strings" - "time" - - "github.com/docker/engine-api/types" - "github.com/docker/go-connections/sockets" - "golang.org/x/net/context" -) - -// tlsClientCon holds tls information and a dialed connection. -type tlsClientCon struct { - *tls.Conn - rawConn net.Conn -} - -func (c *tlsClientCon) CloseWrite() error { - // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it - // on its underlying connection. - if conn, ok := c.rawConn.(types.CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// postHijacked sends a POST request and hijacks the connection. -func (cli *Client) postHijacked(ctx context.Context, path string, query url.Values, body interface{}, headers map[string][]string) (types.HijackedResponse, error) { - bodyEncoded, err := encodeData(body) - if err != nil { - return types.HijackedResponse{}, err - } - - req, err := cli.newRequest("POST", path, query, bodyEncoded, headers) - if err != nil { - return types.HijackedResponse{}, err - } - req.Host = cli.addr - - req.Header.Set("Connection", "Upgrade") - req.Header.Set("Upgrade", "tcp") - - conn, err := dial(cli.proto, cli.addr, cli.transport.TLSConfig()) - if err != nil { - if strings.Contains(err.Error(), "connection refused") { - return types.HijackedResponse{}, fmt.Errorf("Cannot connect to the Docker daemon. Is 'docker daemon' running on this host?") - } - return types.HijackedResponse{}, err - } - - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := conn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - clientconn := httputil.NewClientConn(conn, nil) - defer clientconn.Close() - - // Server hijacks the connection, error 'connection closed' expected - _, err = clientconn.Do(req) - - rwc, br := clientconn.Hijack() - - return types.HijackedResponse{Conn: rwc, Reader: br}, err -} - -func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) { - return tlsDialWithDialer(new(net.Dialer), network, addr, config) -} - -// We need to copy Go's implementation of tls.Dial (pkg/cryptor/tls/tls.go) in -// order to return our custom tlsClientCon struct which holds both the tls.Conn -// object _and_ its underlying raw connection. The rationale for this is that -// we need to be able to close the write end of the connection when attaching, -// which tls.Conn does not provide. -func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) { - // We want the Timeout and Deadline values from dialer to cover the - // whole process: TCP connection and TLS handshake. This means that we - // also need to start our own timers now. - timeout := dialer.Timeout - - if !dialer.Deadline.IsZero() { - deadlineTimeout := dialer.Deadline.Sub(time.Now()) - if timeout == 0 || deadlineTimeout < timeout { - timeout = deadlineTimeout - } - } - - var errChannel chan error - - if timeout != 0 { - errChannel = make(chan error, 2) - time.AfterFunc(timeout, func() { - errChannel <- errors.New("") - }) - } - - proxyDialer, err := sockets.DialerFromEnvironment(dialer) - if err != nil { - return nil, err - } - - rawConn, err := proxyDialer.Dial(network, addr) - if err != nil { - return nil, err - } - // When we set up a TCP connection for hijack, there could be long periods - // of inactivity (a long running command with no output) that in certain - // network setups may cause ECONNTIMEOUT, leaving the client in an unknown - // state. Setting TCP KeepAlive on the socket connection will prohibit - // ECONNTIMEOUT unless the socket connection truly is broken - if tcpConn, ok := rawConn.(*net.TCPConn); ok { - tcpConn.SetKeepAlive(true) - tcpConn.SetKeepAlivePeriod(30 * time.Second) - } - - colonPos := strings.LastIndex(addr, ":") - if colonPos == -1 { - colonPos = len(addr) - } - hostname := addr[:colonPos] - - // If no ServerName is set, infer the ServerName - // from the hostname we're connecting to. - if config.ServerName == "" { - // Make a copy to avoid polluting argument or default. - c := *config - c.ServerName = hostname - config = &c - } - - conn := tls.Client(rawConn, config) - - if timeout == 0 { - err = conn.Handshake() - } else { - go func() { - errChannel <- conn.Handshake() - }() - - err = <-errChannel - } - - if err != nil { - rawConn.Close() - return nil, err - } - - // This is Docker difference with standard's crypto/tls package: returned a - // wrapper which holds both the TLS and raw connections. - return &tlsClientCon{conn, rawConn}, nil -} - -func dial(proto, addr string, tlsConfig *tls.Config) (net.Conn, error) { - if tlsConfig != nil && proto != "unix" && proto != "npipe" { - // Notice this isn't Go standard's tls.Dial function - return tlsDial(proto, addr, tlsConfig) - } - if proto == "npipe" { - return sockets.DialPipe(addr, 32*time.Second) - } - return net.Dial(proto, addr) -} diff --git a/vendor/src/github.com/docker/engine-api/client/image_build.go b/vendor/src/github.com/docker/engine-api/client/image_build.go deleted file mode 100644 index 0ceb88cf68..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/image_build.go +++ /dev/null @@ -1,119 +0,0 @@ -package client - -import ( - "encoding/base64" - "encoding/json" - "io" - "net/http" - "net/url" - "regexp" - "strconv" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" -) - -var headerRegexp = regexp.MustCompile(`\ADocker/.+\s\((.+)\)\z`) - -// ImageBuild sends request to the daemon to build images. -// The Body in the response implement an io.ReadCloser and it's up to the caller to -// close it. -func (cli *Client) ImageBuild(ctx context.Context, buildContext io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) { - query, err := imageBuildOptionsToQuery(options) - if err != nil { - return types.ImageBuildResponse{}, err - } - - headers := http.Header(make(map[string][]string)) - buf, err := json.Marshal(options.AuthConfigs) - if err != nil { - return types.ImageBuildResponse{}, err - } - headers.Add("X-Registry-Config", base64.URLEncoding.EncodeToString(buf)) - headers.Set("Content-Type", "application/tar") - - serverResp, err := cli.postRaw(ctx, "/build", query, buildContext, headers) - if err != nil { - return types.ImageBuildResponse{}, err - } - - osType := getDockerOS(serverResp.header.Get("Server")) - - return types.ImageBuildResponse{ - Body: serverResp.body, - OSType: osType, - }, nil -} - -func imageBuildOptionsToQuery(options types.ImageBuildOptions) (url.Values, error) { - query := url.Values{ - "t": options.Tags, - } - if options.SuppressOutput { - query.Set("q", "1") - } - if options.RemoteContext != "" { - query.Set("remote", options.RemoteContext) - } - if options.NoCache { - query.Set("nocache", "1") - } - if options.Remove { - query.Set("rm", "1") - } else { - query.Set("rm", "0") - } - - if options.ForceRemove { - query.Set("forcerm", "1") - } - - if options.PullParent { - query.Set("pull", "1") - } - - if !container.Isolation.IsDefault(options.Isolation) { - query.Set("isolation", string(options.Isolation)) - } - - query.Set("cpusetcpus", options.CPUSetCPUs) - query.Set("cpusetmems", options.CPUSetMems) - query.Set("cpushares", strconv.FormatInt(options.CPUShares, 10)) - query.Set("cpuquota", strconv.FormatInt(options.CPUQuota, 10)) - query.Set("cpuperiod", strconv.FormatInt(options.CPUPeriod, 10)) - query.Set("memory", strconv.FormatInt(options.Memory, 10)) - query.Set("memswap", strconv.FormatInt(options.MemorySwap, 10)) - query.Set("cgroupparent", options.CgroupParent) - query.Set("shmsize", strconv.FormatInt(options.ShmSize, 10)) - query.Set("dockerfile", options.Dockerfile) - - ulimitsJSON, err := json.Marshal(options.Ulimits) - if err != nil { - return query, err - } - query.Set("ulimits", string(ulimitsJSON)) - - buildArgsJSON, err := json.Marshal(options.BuildArgs) - if err != nil { - return query, err - } - query.Set("buildargs", string(buildArgsJSON)) - - labelsJSON, err := json.Marshal(options.Labels) - if err != nil { - return query, err - } - query.Set("labels", string(labelsJSON)) - return query, nil -} - -func getDockerOS(serverHeader string) string { - var osType string - matches := headerRegexp.FindStringSubmatch(serverHeader) - if len(matches) > 0 { - osType = matches[1] - } - return osType -} diff --git a/vendor/src/github.com/docker/engine-api/client/image_create.go b/vendor/src/github.com/docker/engine-api/client/image_create.go deleted file mode 100644 index 6dfc0391c0..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/image_create.go +++ /dev/null @@ -1,34 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/reference" -) - -// ImageCreate creates a new image based in the parent options. -// It returns the JSON content in the response body. -func (cli *Client) ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) { - repository, tag, err := reference.Parse(parentReference) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", repository) - query.Set("tag", tag) - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImageCreate(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/images/create", query, nil, headers) -} diff --git a/vendor/src/github.com/docker/engine-api/client/image_history.go b/vendor/src/github.com/docker/engine-api/client/image_history.go deleted file mode 100644 index b2840b5ed8..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/image_history.go +++ /dev/null @@ -1,22 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ImageHistory returns the changes in an image in history format. -func (cli *Client) ImageHistory(ctx context.Context, imageID string) ([]types.ImageHistory, error) { - var history []types.ImageHistory - serverResp, err := cli.get(ctx, "/images/"+imageID+"/history", url.Values{}, nil) - if err != nil { - return history, err - } - - err = json.NewDecoder(serverResp.body).Decode(&history) - ensureReaderClosed(serverResp) - return history, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/image_import.go b/vendor/src/github.com/docker/engine-api/client/image_import.go deleted file mode 100644 index 4e8749a01d..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/image_import.go +++ /dev/null @@ -1,37 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/distribution/reference" - "github.com/docker/engine-api/types" -) - -// ImageImport creates a new image based in the source options. -// It returns the JSON content in the response body. -func (cli *Client) ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) { - if ref != "" { - //Check if the given image name can be resolved - if _, err := reference.ParseNamed(ref); err != nil { - return nil, err - } - } - - query := url.Values{} - query.Set("fromSrc", source.SourceName) - query.Set("repo", ref) - query.Set("tag", options.Tag) - query.Set("message", options.Message) - for _, change := range options.Changes { - query.Add("changes", change) - } - - resp, err := cli.postRaw(ctx, "/images/create", query, source.Source, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/src/github.com/docker/engine-api/client/image_inspect.go b/vendor/src/github.com/docker/engine-api/client/image_inspect.go deleted file mode 100644 index 859ba64086..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/image_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ImageInspectWithRaw returns the image information and its raw representation. -func (cli *Client) ImageInspectWithRaw(ctx context.Context, imageID string, getSize bool) (types.ImageInspect, []byte, error) { - query := url.Values{} - if getSize { - query.Set("size", "1") - } - serverResp, err := cli.get(ctx, "/images/"+imageID+"/json", query, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return types.ImageInspect{}, nil, imageNotFoundError{imageID} - } - return types.ImageInspect{}, nil, err - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return types.ImageInspect{}, nil, err - } - - var response types.ImageInspect - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/image_list.go b/vendor/src/github.com/docker/engine-api/client/image_list.go deleted file mode 100644 index 7408258231..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/image_list.go +++ /dev/null @@ -1,40 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -// ImageList returns a list of images in the docker host. -func (cli *Client) ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) { - var images []types.Image - query := url.Values{} - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return images, err - } - query.Set("filters", filterJSON) - } - if options.MatchName != "" { - // FIXME rename this parameter, to not be confused with the filters flag - query.Set("filter", options.MatchName) - } - if options.All { - query.Set("all", "1") - } - - serverResp, err := cli.get(ctx, "/images/json", query, nil) - if err != nil { - return images, err - } - - err = json.NewDecoder(serverResp.body).Decode(&images) - ensureReaderClosed(serverResp) - return images, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/image_load.go b/vendor/src/github.com/docker/engine-api/client/image_load.go deleted file mode 100644 index 72f55fdc01..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/image_load.go +++ /dev/null @@ -1,30 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" -) - -// ImageLoad loads an image in the docker host from the client host. -// It's up to the caller to close the io.ReadCloser in the -// ImageLoadResponse returned by this function. -func (cli *Client) ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) { - v := url.Values{} - v.Set("quiet", "0") - if quiet { - v.Set("quiet", "1") - } - headers := map[string][]string{"Content-Type": {"application/x-tar"}} - resp, err := cli.postRaw(ctx, "/images/load", v, input, headers) - if err != nil { - return types.ImageLoadResponse{}, err - } - return types.ImageLoadResponse{ - Body: resp.body, - JSON: resp.header.Get("Content-Type") == "application/json", - }, nil -} diff --git a/vendor/src/github.com/docker/engine-api/client/image_pull.go b/vendor/src/github.com/docker/engine-api/client/image_pull.go deleted file mode 100644 index e2c49ec52b..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/image_pull.go +++ /dev/null @@ -1,46 +0,0 @@ -package client - -import ( - "io" - "net/http" - "net/url" - - "golang.org/x/net/context" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/reference" -) - -// ImagePull requests the docker host to pull an image from a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -// -// FIXME(vdemeester): there is currently used in a few way in docker/docker -// - if not in trusted content, ref is used to pass the whole reference, and tag is empty -// - if in trusted content, ref is used to pass the reference name, and tag for the digest -func (cli *Client) ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) { - repository, tag, err := reference.Parse(ref) - if err != nil { - return nil, err - } - - query := url.Values{} - query.Set("fromImage", repository) - if tag != "" && !options.All { - query.Set("tag", tag) - } - - resp, err := cli.tryImageCreate(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImageCreate(ctx, query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/src/github.com/docker/engine-api/client/image_push.go b/vendor/src/github.com/docker/engine-api/client/image_push.go deleted file mode 100644 index 89191ee30d..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/image_push.go +++ /dev/null @@ -1,54 +0,0 @@ -package client - -import ( - "errors" - "io" - "net/http" - "net/url" - - "golang.org/x/net/context" - - distreference "github.com/docker/distribution/reference" - "github.com/docker/engine-api/types" -) - -// ImagePush requests the docker host to push an image to a remote registry. -// It executes the privileged function if the operation is unauthorized -// and it tries one more time. -// It's up to the caller to handle the io.ReadCloser and close it properly. -func (cli *Client) ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) { - distributionRef, err := distreference.ParseNamed(ref) - if err != nil { - return nil, err - } - - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { - return nil, errors.New("cannot push a digest reference") - } - - var tag = "" - if nameTaggedRef, isNamedTagged := distributionRef.(distreference.NamedTagged); isNamedTagged { - tag = nameTaggedRef.Tag() - } - - query := url.Values{} - query.Set("tag", tag) - - resp, err := cli.tryImagePush(ctx, distributionRef.Name(), query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return nil, privilegeErr - } - resp, err = cli.tryImagePush(ctx, distributionRef.Name(), query, newAuthHeader) - } - if err != nil { - return nil, err - } - return resp.body, nil -} - -func (cli *Client) tryImagePush(ctx context.Context, imageID string, query url.Values, registryAuth string) (*serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/images/"+imageID+"/push", query, nil, headers) -} diff --git a/vendor/src/github.com/docker/engine-api/client/image_remove.go b/vendor/src/github.com/docker/engine-api/client/image_remove.go deleted file mode 100644 index 47224326e0..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/image_remove.go +++ /dev/null @@ -1,31 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ImageRemove removes an image from the docker host. -func (cli *Client) ImageRemove(ctx context.Context, imageID string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) { - query := url.Values{} - - if options.Force { - query.Set("force", "1") - } - if !options.PruneChildren { - query.Set("noprune", "1") - } - - resp, err := cli.delete(ctx, "/images/"+imageID, query, nil) - if err != nil { - return nil, err - } - - var dels []types.ImageDelete - err = json.NewDecoder(resp.body).Decode(&dels) - ensureReaderClosed(resp) - return dels, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/image_save.go b/vendor/src/github.com/docker/engine-api/client/image_save.go deleted file mode 100644 index ecac880a32..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/image_save.go +++ /dev/null @@ -1,22 +0,0 @@ -package client - -import ( - "io" - "net/url" - - "golang.org/x/net/context" -) - -// ImageSave retrieves one or more images from the docker host as an io.ReadCloser. -// It's up to the caller to store the images and close the stream. -func (cli *Client) ImageSave(ctx context.Context, imageIDs []string) (io.ReadCloser, error) { - query := url.Values{ - "names": imageIDs, - } - - resp, err := cli.get(ctx, "/images/get", query, nil) - if err != nil { - return nil, err - } - return resp.body, nil -} diff --git a/vendor/src/github.com/docker/engine-api/client/image_search.go b/vendor/src/github.com/docker/engine-api/client/image_search.go deleted file mode 100644 index 3940dfd799..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/image_search.go +++ /dev/null @@ -1,51 +0,0 @@ -package client - -import ( - "encoding/json" - "fmt" - "net/http" - "net/url" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/registry" - "golang.org/x/net/context" -) - -// ImageSearch makes the docker host to search by a term in a remote registry. -// The list of results is not sorted in any fashion. -func (cli *Client) ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) { - var results []registry.SearchResult - query := url.Values{} - query.Set("term", term) - query.Set("limit", fmt.Sprintf("%d", options.Limit)) - - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filters) - if err != nil { - return results, err - } - query.Set("filters", filterJSON) - } - - resp, err := cli.tryImageSearch(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - return results, privilegeErr - } - resp, err = cli.tryImageSearch(ctx, query, newAuthHeader) - } - if err != nil { - return results, err - } - - err = json.NewDecoder(resp.body).Decode(&results) - ensureReaderClosed(resp) - return results, err -} - -func (cli *Client) tryImageSearch(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.get(ctx, "/images/search", query, headers) -} diff --git a/vendor/src/github.com/docker/engine-api/client/image_tag.go b/vendor/src/github.com/docker/engine-api/client/image_tag.go deleted file mode 100644 index 7182913672..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/image_tag.go +++ /dev/null @@ -1,34 +0,0 @@ -package client - -import ( - "errors" - "fmt" - "net/url" - - "golang.org/x/net/context" - - distreference "github.com/docker/distribution/reference" - "github.com/docker/engine-api/types/reference" -) - -// ImageTag tags an image in the docker host -func (cli *Client) ImageTag(ctx context.Context, imageID, ref string) error { - distributionRef, err := distreference.ParseNamed(ref) - if err != nil { - return fmt.Errorf("Error parsing reference: %q is not a valid repository/tag", ref) - } - - if _, isCanonical := distributionRef.(distreference.Canonical); isCanonical { - return errors.New("refusing to create a tag with a digest reference") - } - - tag := reference.GetTagFromNamedRef(distributionRef) - - query := url.Values{} - query.Set("repo", distributionRef.Name()) - query.Set("tag", tag) - - resp, err := cli.post(ctx, "/images/"+imageID+"/tag", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/info.go b/vendor/src/github.com/docker/engine-api/client/info.go deleted file mode 100644 index ff0958d65c..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/info.go +++ /dev/null @@ -1,26 +0,0 @@ -package client - -import ( - "encoding/json" - "fmt" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// Info returns information about the docker server. -func (cli *Client) Info(ctx context.Context) (types.Info, error) { - var info types.Info - serverResp, err := cli.get(ctx, "/info", url.Values{}, nil) - if err != nil { - return info, err - } - defer ensureReaderClosed(serverResp) - - if err := json.NewDecoder(serverResp.body).Decode(&info); err != nil { - return info, fmt.Errorf("Error reading remote info: %v", err) - } - - return info, nil -} diff --git a/vendor/src/github.com/docker/engine-api/client/interface.go b/vendor/src/github.com/docker/engine-api/client/interface.go deleted file mode 100644 index 1b4fa4216f..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/interface.go +++ /dev/null @@ -1,135 +0,0 @@ -package client - -import ( - "io" - "time" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/network" - "github.com/docker/engine-api/types/registry" - "github.com/docker/engine-api/types/swarm" - "golang.org/x/net/context" -) - -// CommonAPIClient is the common methods between stable and experimental versions of APIClient. -type CommonAPIClient interface { - ContainerAPIClient - ImageAPIClient - NodeAPIClient - NetworkAPIClient - ServiceAPIClient - SwarmAPIClient - SystemAPIClient - VolumeAPIClient - ClientVersion() string - ServerVersion(ctx context.Context) (types.Version, error) - UpdateClientVersion(v string) -} - -// ContainerAPIClient defines API client methods for the containers -type ContainerAPIClient interface { - ContainerAttach(ctx context.Context, container string, options types.ContainerAttachOptions) (types.HijackedResponse, error) - ContainerCommit(ctx context.Context, container string, options types.ContainerCommitOptions) (types.ContainerCommitResponse, error) - ContainerCreate(ctx context.Context, config *container.Config, hostConfig *container.HostConfig, networkingConfig *network.NetworkingConfig, containerName string) (types.ContainerCreateResponse, error) - ContainerDiff(ctx context.Context, container string) ([]types.ContainerChange, error) - ContainerExecAttach(ctx context.Context, execID string, config types.ExecConfig) (types.HijackedResponse, error) - ContainerExecCreate(ctx context.Context, container string, config types.ExecConfig) (types.ContainerExecCreateResponse, error) - ContainerExecInspect(ctx context.Context, execID string) (types.ContainerExecInspect, error) - ContainerExecResize(ctx context.Context, execID string, options types.ResizeOptions) error - ContainerExecStart(ctx context.Context, execID string, config types.ExecStartCheck) error - ContainerExport(ctx context.Context, container string) (io.ReadCloser, error) - ContainerInspect(ctx context.Context, container string) (types.ContainerJSON, error) - ContainerInspectWithRaw(ctx context.Context, container string, getSize bool) (types.ContainerJSON, []byte, error) - ContainerKill(ctx context.Context, container, signal string) error - ContainerList(ctx context.Context, options types.ContainerListOptions) ([]types.Container, error) - ContainerLogs(ctx context.Context, container string, options types.ContainerLogsOptions) (io.ReadCloser, error) - ContainerPause(ctx context.Context, container string) error - ContainerRemove(ctx context.Context, container string, options types.ContainerRemoveOptions) error - ContainerRename(ctx context.Context, container, newContainerName string) error - ContainerResize(ctx context.Context, container string, options types.ResizeOptions) error - ContainerRestart(ctx context.Context, container string, timeout *time.Duration) error - ContainerStatPath(ctx context.Context, container, path string) (types.ContainerPathStat, error) - ContainerStats(ctx context.Context, container string, stream bool) (io.ReadCloser, error) - ContainerStart(ctx context.Context, container string, options types.ContainerStartOptions) error - ContainerStop(ctx context.Context, container string, timeout *time.Duration) error - ContainerTop(ctx context.Context, container string, arguments []string) (types.ContainerProcessList, error) - ContainerUnpause(ctx context.Context, container string) error - ContainerUpdate(ctx context.Context, container string, updateConfig container.UpdateConfig) error - ContainerWait(ctx context.Context, container string) (int, error) - CopyFromContainer(ctx context.Context, container, srcPath string) (io.ReadCloser, types.ContainerPathStat, error) - CopyToContainer(ctx context.Context, container, path string, content io.Reader, options types.CopyToContainerOptions) error -} - -// ImageAPIClient defines API client methods for the images -type ImageAPIClient interface { - ImageBuild(ctx context.Context, context io.Reader, options types.ImageBuildOptions) (types.ImageBuildResponse, error) - ImageCreate(ctx context.Context, parentReference string, options types.ImageCreateOptions) (io.ReadCloser, error) - ImageHistory(ctx context.Context, image string) ([]types.ImageHistory, error) - ImageImport(ctx context.Context, source types.ImageImportSource, ref string, options types.ImageImportOptions) (io.ReadCloser, error) - ImageInspectWithRaw(ctx context.Context, image string, getSize bool) (types.ImageInspect, []byte, error) - ImageList(ctx context.Context, options types.ImageListOptions) ([]types.Image, error) - ImageLoad(ctx context.Context, input io.Reader, quiet bool) (types.ImageLoadResponse, error) - ImagePull(ctx context.Context, ref string, options types.ImagePullOptions) (io.ReadCloser, error) - ImagePush(ctx context.Context, ref string, options types.ImagePushOptions) (io.ReadCloser, error) - ImageRemove(ctx context.Context, image string, options types.ImageRemoveOptions) ([]types.ImageDelete, error) - ImageSearch(ctx context.Context, term string, options types.ImageSearchOptions) ([]registry.SearchResult, error) - ImageSave(ctx context.Context, images []string) (io.ReadCloser, error) - ImageTag(ctx context.Context, image, ref string) error -} - -// NetworkAPIClient defines API client methods for the networks -type NetworkAPIClient interface { - NetworkConnect(ctx context.Context, networkID, container string, config *network.EndpointSettings) error - NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) - NetworkDisconnect(ctx context.Context, networkID, container string, force bool) error - NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) - NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) - NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) - NetworkRemove(ctx context.Context, networkID string) error -} - -// NodeAPIClient defines API client methods for the nodes -type NodeAPIClient interface { - NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) - NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) - NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error - NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error -} - -// ServiceAPIClient defines API client methods for the services -type ServiceAPIClient interface { - ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) - ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) - ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) - ServiceRemove(ctx context.Context, serviceID string) error - ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) error - TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) - TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) -} - -// SwarmAPIClient defines API client methods for the swarm -type SwarmAPIClient interface { - SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) - SwarmJoin(ctx context.Context, req swarm.JoinRequest) error - SwarmLeave(ctx context.Context, force bool) error - SwarmInspect(ctx context.Context) (swarm.Swarm, error) - SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error -} - -// SystemAPIClient defines API client methods for the system -type SystemAPIClient interface { - Events(ctx context.Context, options types.EventsOptions) (io.ReadCloser, error) - Info(ctx context.Context) (types.Info, error) - RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) -} - -// VolumeAPIClient defines API client methods for the volumes -type VolumeAPIClient interface { - VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) - VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) - VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) - VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) - VolumeRemove(ctx context.Context, volumeID string) error -} diff --git a/vendor/src/github.com/docker/engine-api/client/interface_experimental.go b/vendor/src/github.com/docker/engine-api/client/interface_experimental.go deleted file mode 100644 index eb0cd7bf14..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/interface_experimental.go +++ /dev/null @@ -1,37 +0,0 @@ -// +build experimental - -package client - -import ( - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// APIClient is an interface that clients that talk with a docker server must implement. -type APIClient interface { - CommonAPIClient - CheckpointAPIClient - PluginAPIClient -} - -// CheckpointAPIClient defines API client methods for the checkpoints -type CheckpointAPIClient interface { - CheckpointCreate(ctx context.Context, container string, options types.CheckpointCreateOptions) error - CheckpointDelete(ctx context.Context, container string, checkpointID string) error - CheckpointList(ctx context.Context, container string) ([]types.Checkpoint, error) -} - -// PluginAPIClient defines API client methods for the plugins -type PluginAPIClient interface { - PluginList(ctx context.Context) (types.PluginsListResponse, error) - PluginRemove(ctx context.Context, name string) error - PluginEnable(ctx context.Context, name string) error - PluginDisable(ctx context.Context, name string) error - PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) error - PluginPush(ctx context.Context, name string, registryAuth string) error - PluginSet(ctx context.Context, name string, args []string) error - PluginInspect(ctx context.Context, name string) (*types.Plugin, error) -} - -// Ensure that Client always implements APIClient. -var _ APIClient = &Client{} diff --git a/vendor/src/github.com/docker/engine-api/client/interface_stable.go b/vendor/src/github.com/docker/engine-api/client/interface_stable.go deleted file mode 100644 index 496f522d51..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/interface_stable.go +++ /dev/null @@ -1,11 +0,0 @@ -// +build !experimental - -package client - -// APIClient is an interface that clients that talk with a docker server must implement. -type APIClient interface { - CommonAPIClient -} - -// Ensure that Client always implements APIClient. -var _ APIClient = &Client{} diff --git a/vendor/src/github.com/docker/engine-api/client/login.go b/vendor/src/github.com/docker/engine-api/client/login.go deleted file mode 100644 index 482f94789f..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/login.go +++ /dev/null @@ -1,28 +0,0 @@ -package client - -import ( - "encoding/json" - "net/http" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// RegistryLogin authenticates the docker server with a given docker registry. -// It returns UnauthorizerError when the authentication fails. -func (cli *Client) RegistryLogin(ctx context.Context, auth types.AuthConfig) (types.AuthResponse, error) { - resp, err := cli.post(ctx, "/auth", url.Values{}, auth, nil) - - if resp != nil && resp.statusCode == http.StatusUnauthorized { - return types.AuthResponse{}, unauthorizedError{err} - } - if err != nil { - return types.AuthResponse{}, err - } - - var response types.AuthResponse - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/network_connect.go b/vendor/src/github.com/docker/engine-api/client/network_connect.go deleted file mode 100644 index 9a402a3e63..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/network_connect.go +++ /dev/null @@ -1,18 +0,0 @@ -package client - -import ( - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/network" - "golang.org/x/net/context" -) - -// NetworkConnect connects a container to an existent network in the docker host. -func (cli *Client) NetworkConnect(ctx context.Context, networkID, containerID string, config *network.EndpointSettings) error { - nc := types.NetworkConnect{ - Container: containerID, - EndpointConfig: config, - } - resp, err := cli.post(ctx, "/networks/"+networkID+"/connect", nil, nc, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/network_create.go b/vendor/src/github.com/docker/engine-api/client/network_create.go deleted file mode 100644 index c9c0b9fde7..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/network_create.go +++ /dev/null @@ -1,25 +0,0 @@ -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// NetworkCreate creates a new network in the docker host. -func (cli *Client) NetworkCreate(ctx context.Context, name string, options types.NetworkCreate) (types.NetworkCreateResponse, error) { - networkCreateRequest := types.NetworkCreateRequest{ - NetworkCreate: options, - Name: name, - } - var response types.NetworkCreateResponse - serverResp, err := cli.post(ctx, "/networks/create", nil, networkCreateRequest, nil) - if err != nil { - return response, err - } - - json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/network_disconnect.go b/vendor/src/github.com/docker/engine-api/client/network_disconnect.go deleted file mode 100644 index a3e33672fe..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/network_disconnect.go +++ /dev/null @@ -1,14 +0,0 @@ -package client - -import ( - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// NetworkDisconnect disconnects a container from an existent network in the docker host. -func (cli *Client) NetworkDisconnect(ctx context.Context, networkID, containerID string, force bool) error { - nd := types.NetworkDisconnect{Container: containerID, Force: force} - resp, err := cli.post(ctx, "/networks/"+networkID+"/disconnect", nil, nd, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/network_inspect.go b/vendor/src/github.com/docker/engine-api/client/network_inspect.go deleted file mode 100644 index e22fcd6710..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/network_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// NetworkInspect returns the information for a specific network configured in the docker host. -func (cli *Client) NetworkInspect(ctx context.Context, networkID string) (types.NetworkResource, error) { - networkResource, _, err := cli.NetworkInspectWithRaw(ctx, networkID) - return networkResource, err -} - -// NetworkInspectWithRaw returns the information for a specific network configured in the docker host and its raw representation. -func (cli *Client) NetworkInspectWithRaw(ctx context.Context, networkID string) (types.NetworkResource, []byte, error) { - var networkResource types.NetworkResource - resp, err := cli.get(ctx, "/networks/"+networkID, nil, nil) - if err != nil { - if resp.statusCode == http.StatusNotFound { - return networkResource, nil, networkNotFoundError{networkID} - } - return networkResource, nil, err - } - defer ensureReaderClosed(resp) - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return networkResource, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&networkResource) - return networkResource, body, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/network_list.go b/vendor/src/github.com/docker/engine-api/client/network_list.go deleted file mode 100644 index 0569552496..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/network_list.go +++ /dev/null @@ -1,31 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -// NetworkList returns the list of networks configured in the docker host. -func (cli *Client) NetworkList(ctx context.Context, options types.NetworkListOptions) ([]types.NetworkResource, error) { - query := url.Values{} - if options.Filters.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, options.Filters) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - var networkResources []types.NetworkResource - resp, err := cli.get(ctx, "/networks", query, nil) - if err != nil { - return networkResources, err - } - err = json.NewDecoder(resp.body).Decode(&networkResources) - ensureReaderClosed(resp) - return networkResources, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/network_remove.go b/vendor/src/github.com/docker/engine-api/client/network_remove.go deleted file mode 100644 index 6bd6748924..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/network_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "golang.org/x/net/context" - -// NetworkRemove removes an existent network from the docker host. -func (cli *Client) NetworkRemove(ctx context.Context, networkID string) error { - resp, err := cli.delete(ctx, "/networks/"+networkID, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/node_inspect.go b/vendor/src/github.com/docker/engine-api/client/node_inspect.go deleted file mode 100644 index 5f555bb357..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/node_inspect.go +++ /dev/null @@ -1,33 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - - "github.com/docker/engine-api/types/swarm" - "golang.org/x/net/context" -) - -// NodeInspectWithRaw returns the node information. -func (cli *Client) NodeInspectWithRaw(ctx context.Context, nodeID string) (swarm.Node, []byte, error) { - serverResp, err := cli.get(ctx, "/nodes/"+nodeID, nil, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return swarm.Node{}, nil, nodeNotFoundError{nodeID} - } - return swarm.Node{}, nil, err - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return swarm.Node{}, nil, err - } - - var response swarm.Node - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/node_list.go b/vendor/src/github.com/docker/engine-api/client/node_list.go deleted file mode 100644 index 57cf14827d..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/node_list.go +++ /dev/null @@ -1,36 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/swarm" - "golang.org/x/net/context" -) - -// NodeList returns the list of nodes. -func (cli *Client) NodeList(ctx context.Context, options types.NodeListOptions) ([]swarm.Node, error) { - query := url.Values{} - - if options.Filter.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filter) - - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/nodes", query, nil) - if err != nil { - return nil, err - } - - var nodes []swarm.Node - err = json.NewDecoder(resp.body).Decode(&nodes) - ensureReaderClosed(resp) - return nodes, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/node_remove.go b/vendor/src/github.com/docker/engine-api/client/node_remove.go deleted file mode 100644 index a9cf8ba857..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/node_remove.go +++ /dev/null @@ -1,21 +0,0 @@ -package client - -import ( - "net/url" - - "github.com/docker/engine-api/types" - - "golang.org/x/net/context" -) - -// NodeRemove removes a Node. -func (cli *Client) NodeRemove(ctx context.Context, nodeID string, options types.NodeRemoveOptions) error { - query := url.Values{} - if options.Force { - query.Set("force", "1") - } - - resp, err := cli.delete(ctx, "/nodes/"+nodeID, query, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/node_update.go b/vendor/src/github.com/docker/engine-api/client/node_update.go deleted file mode 100644 index 4722211517..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/node_update.go +++ /dev/null @@ -1,18 +0,0 @@ -package client - -import ( - "net/url" - "strconv" - - "github.com/docker/engine-api/types/swarm" - "golang.org/x/net/context" -) - -// NodeUpdate updates a Node. -func (cli *Client) NodeUpdate(ctx context.Context, nodeID string, version swarm.Version, node swarm.NodeSpec) error { - query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - resp, err := cli.post(ctx, "/nodes/"+nodeID+"/update", query, node, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_disable.go b/vendor/src/github.com/docker/engine-api/client/plugin_disable.go deleted file mode 100644 index 893fc6e823..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/plugin_disable.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build experimental - -package client - -import ( - "golang.org/x/net/context" -) - -// PluginDisable disables a plugin -func (cli *Client) PluginDisable(ctx context.Context, name string) error { - resp, err := cli.post(ctx, "/plugins/"+name+"/disable", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_enable.go b/vendor/src/github.com/docker/engine-api/client/plugin_enable.go deleted file mode 100644 index 84422abc79..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/plugin_enable.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build experimental - -package client - -import ( - "golang.org/x/net/context" -) - -// PluginEnable enables a plugin -func (cli *Client) PluginEnable(ctx context.Context, name string) error { - resp, err := cli.post(ctx, "/plugins/"+name+"/enable", nil, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_inspect.go b/vendor/src/github.com/docker/engine-api/client/plugin_inspect.go deleted file mode 100644 index b4bcc20069..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/plugin_inspect.go +++ /dev/null @@ -1,22 +0,0 @@ -// +build experimental - -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// PluginInspect inspects an existing plugin -func (cli *Client) PluginInspect(ctx context.Context, name string) (*types.Plugin, error) { - var p types.Plugin - resp, err := cli.get(ctx, "/plugins/"+name, nil, nil) - if err != nil { - return nil, err - } - err = json.NewDecoder(resp.body).Decode(&p) - ensureReaderClosed(resp) - return &p, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_install.go b/vendor/src/github.com/docker/engine-api/client/plugin_install.go deleted file mode 100644 index 3f5e59ff5d..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/plugin_install.go +++ /dev/null @@ -1,59 +0,0 @@ -// +build experimental - -package client - -import ( - "encoding/json" - "net/http" - "net/url" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// PluginInstall installs a plugin -func (cli *Client) PluginInstall(ctx context.Context, name string, options types.PluginInstallOptions) error { - // FIXME(vdemeester) name is a ref, we might want to parse/validate it here. - query := url.Values{} - query.Set("name", name) - resp, err := cli.tryPluginPull(ctx, query, options.RegistryAuth) - if resp.statusCode == http.StatusUnauthorized && options.PrivilegeFunc != nil { - newAuthHeader, privilegeErr := options.PrivilegeFunc() - if privilegeErr != nil { - ensureReaderClosed(resp) - return privilegeErr - } - resp, err = cli.tryPluginPull(ctx, query, newAuthHeader) - } - if err != nil { - ensureReaderClosed(resp) - return err - } - var privileges types.PluginPrivileges - if err := json.NewDecoder(resp.body).Decode(&privileges); err != nil { - ensureReaderClosed(resp) - return err - } - ensureReaderClosed(resp) - - if !options.AcceptAllPermissions && options.AcceptPermissionsFunc != nil && len(privileges) > 0 { - accept, err := options.AcceptPermissionsFunc(privileges) - if err != nil { - return err - } - if !accept { - resp, _ := cli.delete(ctx, "/plugins/"+name, nil, nil) - ensureReaderClosed(resp) - return pluginPermissionDenied{name} - } - } - if options.Disabled { - return nil - } - return cli.PluginEnable(ctx, name) -} - -func (cli *Client) tryPluginPull(ctx context.Context, query url.Values, registryAuth string) (*serverResponse, error) { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - return cli.post(ctx, "/plugins/pull", query, nil, headers) -} diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_list.go b/vendor/src/github.com/docker/engine-api/client/plugin_list.go deleted file mode 100644 index 7f2e2f21f3..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/plugin_list.go +++ /dev/null @@ -1,23 +0,0 @@ -// +build experimental - -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// PluginList returns the installed plugins -func (cli *Client) PluginList(ctx context.Context) (types.PluginsListResponse, error) { - var plugins types.PluginsListResponse - resp, err := cli.get(ctx, "/plugins", nil, nil) - if err != nil { - return plugins, err - } - - err = json.NewDecoder(resp.body).Decode(&plugins) - ensureReaderClosed(resp) - return plugins, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_push.go b/vendor/src/github.com/docker/engine-api/client/plugin_push.go deleted file mode 100644 index 3afea5ed79..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/plugin_push.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build experimental - -package client - -import ( - "golang.org/x/net/context" -) - -// PluginPush pushes a plugin to a registry -func (cli *Client) PluginPush(ctx context.Context, name string, registryAuth string) error { - headers := map[string][]string{"X-Registry-Auth": {registryAuth}} - resp, err := cli.post(ctx, "/plugins/"+name+"/push", nil, nil, headers) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_remove.go b/vendor/src/github.com/docker/engine-api/client/plugin_remove.go deleted file mode 100644 index baf666556b..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/plugin_remove.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build experimental - -package client - -import ( - "golang.org/x/net/context" -) - -// PluginRemove removes a plugin -func (cli *Client) PluginRemove(ctx context.Context, name string) error { - resp, err := cli.delete(ctx, "/plugins/"+name, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/plugin_set.go b/vendor/src/github.com/docker/engine-api/client/plugin_set.go deleted file mode 100644 index fb40f38b22..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/plugin_set.go +++ /dev/null @@ -1,14 +0,0 @@ -// +build experimental - -package client - -import ( - "golang.org/x/net/context" -) - -// PluginSet modifies settings for an existing plugin -func (cli *Client) PluginSet(ctx context.Context, name string, args []string) error { - resp, err := cli.post(ctx, "/plugins/"+name+"/set", nil, args, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/request.go b/vendor/src/github.com/docker/engine-api/client/request.go deleted file mode 100644 index 854901559b..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/request.go +++ /dev/null @@ -1,207 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "io/ioutil" - "net/http" - "net/url" - "strings" - - "github.com/docker/engine-api/client/transport/cancellable" - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/versions" - "golang.org/x/net/context" -) - -// serverResponse is a wrapper for http API responses. -type serverResponse struct { - body io.ReadCloser - header http.Header - statusCode int -} - -// head sends an http request to the docker API using the method HEAD. -func (cli *Client) head(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "HEAD", path, query, nil, headers) -} - -// getWithContext sends an http request to the docker API using the method GET with a specific go context. -func (cli *Client) get(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "GET", path, query, nil, headers) -} - -// postWithContext sends an http request to the docker API using the method POST with a specific go context. -func (cli *Client) post(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "POST", path, query, obj, headers) -} - -func (cli *Client) postRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { - return cli.sendClientRequest(ctx, "POST", path, query, body, headers) -} - -// put sends an http request to the docker API using the method PUT. -func (cli *Client) put(ctx context.Context, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "PUT", path, query, obj, headers) -} - -// put sends an http request to the docker API using the method PUT. -func (cli *Client) putRaw(ctx context.Context, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { - return cli.sendClientRequest(ctx, "PUT", path, query, body, headers) -} - -// delete sends an http request to the docker API using the method DELETE. -func (cli *Client) delete(ctx context.Context, path string, query url.Values, headers map[string][]string) (*serverResponse, error) { - return cli.sendRequest(ctx, "DELETE", path, query, nil, headers) -} - -func (cli *Client) sendRequest(ctx context.Context, method, path string, query url.Values, obj interface{}, headers map[string][]string) (*serverResponse, error) { - var body io.Reader - - if obj != nil { - var err error - body, err = encodeData(obj) - if err != nil { - return nil, err - } - if headers == nil { - headers = make(map[string][]string) - } - headers["Content-Type"] = []string{"application/json"} - } - - return cli.sendClientRequest(ctx, method, path, query, body, headers) -} - -func (cli *Client) sendClientRequest(ctx context.Context, method, path string, query url.Values, body io.Reader, headers map[string][]string) (*serverResponse, error) { - serverResp := &serverResponse{ - body: nil, - statusCode: -1, - } - - expectedPayload := (method == "POST" || method == "PUT") - if expectedPayload && body == nil { - body = bytes.NewReader([]byte{}) - } - - req, err := cli.newRequest(method, path, query, body, headers) - if err != nil { - return serverResp, err - } - - if cli.proto == "unix" || cli.proto == "npipe" { - // For local communications, it doesn't matter what the host is. We just - // need a valid and meaningful host name. (See #189) - req.Host = "docker" - } - req.URL.Host = cli.addr - req.URL.Scheme = cli.transport.Scheme() - - if expectedPayload && req.Header.Get("Content-Type") == "" { - req.Header.Set("Content-Type", "text/plain") - } - - resp, err := cancellable.Do(ctx, cli.transport, req) - if err != nil { - if isTimeout(err) || strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") { - return serverResp, ErrConnectionFailed - } - - if !cli.transport.Secure() && strings.Contains(err.Error(), "malformed HTTP response") { - return serverResp, fmt.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err) - } - - if cli.transport.Secure() && strings.Contains(err.Error(), "bad certificate") { - return serverResp, fmt.Errorf("The server probably has client authentication (--tlsverify) enabled. Please check your TLS client certification settings: %v", err) - } - - return serverResp, fmt.Errorf("An error occurred trying to connect: %v", err) - } - - if resp != nil { - serverResp.statusCode = resp.StatusCode - } - - if serverResp.statusCode < 200 || serverResp.statusCode >= 400 { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return serverResp, err - } - if len(body) == 0 { - return serverResp, fmt.Errorf("Error: request returned %s for API route and version %s, check if the server supports the requested API version", http.StatusText(serverResp.statusCode), req.URL) - } - - var errorMessage string - if (cli.version == "" || versions.GreaterThan(cli.version, "1.23")) && - resp.Header.Get("Content-Type") == "application/json" { - var errorResponse types.ErrorResponse - if err := json.Unmarshal(body, &errorResponse); err != nil { - return serverResp, fmt.Errorf("Error reading JSON: %v", err) - } - errorMessage = errorResponse.Message - } else { - errorMessage = string(body) - } - - return serverResp, fmt.Errorf("Error response from daemon: %s", strings.TrimSpace(errorMessage)) - } - - serverResp.body = resp.Body - serverResp.header = resp.Header - return serverResp, nil -} - -func (cli *Client) newRequest(method, path string, query url.Values, body io.Reader, headers map[string][]string) (*http.Request, error) { - apiPath := cli.getAPIPath(path, query) - req, err := http.NewRequest(method, apiPath, body) - if err != nil { - return nil, err - } - - // Add CLI Config's HTTP Headers BEFORE we set the Docker headers - // then the user can't change OUR headers - for k, v := range cli.customHTTPHeaders { - req.Header.Set(k, v) - } - - if headers != nil { - for k, v := range headers { - req.Header[k] = v - } - } - - return req, nil -} - -func encodeData(data interface{}) (*bytes.Buffer, error) { - params := bytes.NewBuffer(nil) - if data != nil { - if err := json.NewEncoder(params).Encode(data); err != nil { - return nil, err - } - } - return params, nil -} - -func ensureReaderClosed(response *serverResponse) { - if response != nil && response.body != nil { - // Drain up to 512 bytes and close the body to let the Transport reuse the connection - io.CopyN(ioutil.Discard, response.body, 512) - response.body.Close() - } -} - -func isTimeout(err error) bool { - type timeout interface { - Timeout() bool - } - e := err - switch urlErr := err.(type) { - case *url.Error: - e = urlErr.Err - } - t, ok := e.(timeout) - return ok && t.Timeout() -} diff --git a/vendor/src/github.com/docker/engine-api/client/service_create.go b/vendor/src/github.com/docker/engine-api/client/service_create.go deleted file mode 100644 index 7349a984e7..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/service_create.go +++ /dev/null @@ -1,30 +0,0 @@ -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/swarm" - "golang.org/x/net/context" -) - -// ServiceCreate creates a new Service. -func (cli *Client) ServiceCreate(ctx context.Context, service swarm.ServiceSpec, options types.ServiceCreateOptions) (types.ServiceCreateResponse, error) { - var headers map[string][]string - - if options.EncodedRegistryAuth != "" { - headers = map[string][]string{ - "X-Registry-Auth": []string{options.EncodedRegistryAuth}, - } - } - - var response types.ServiceCreateResponse - resp, err := cli.post(ctx, "/services/create", nil, service, headers) - if err != nil { - return response, err - } - - err = json.NewDecoder(resp.body).Decode(&response) - ensureReaderClosed(resp) - return response, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/service_inspect.go b/vendor/src/github.com/docker/engine-api/client/service_inspect.go deleted file mode 100644 index 958cd662ea..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/service_inspect.go +++ /dev/null @@ -1,33 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - - "github.com/docker/engine-api/types/swarm" - "golang.org/x/net/context" -) - -// ServiceInspectWithRaw returns the service information and the raw data. -func (cli *Client) ServiceInspectWithRaw(ctx context.Context, serviceID string) (swarm.Service, []byte, error) { - serverResp, err := cli.get(ctx, "/services/"+serviceID, nil, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return swarm.Service{}, nil, serviceNotFoundError{serviceID} - } - return swarm.Service{}, nil, err - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return swarm.Service{}, nil, err - } - - var response swarm.Service - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/service_list.go b/vendor/src/github.com/docker/engine-api/client/service_list.go deleted file mode 100644 index b48964aa0f..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/service_list.go +++ /dev/null @@ -1,35 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/swarm" - "golang.org/x/net/context" -) - -// ServiceList returns the list of services. -func (cli *Client) ServiceList(ctx context.Context, options types.ServiceListOptions) ([]swarm.Service, error) { - query := url.Values{} - - if options.Filter.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filter) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/services", query, nil) - if err != nil { - return nil, err - } - - var services []swarm.Service - err = json.NewDecoder(resp.body).Decode(&services) - ensureReaderClosed(resp) - return services, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/service_remove.go b/vendor/src/github.com/docker/engine-api/client/service_remove.go deleted file mode 100644 index a9331f92c2..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/service_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "golang.org/x/net/context" - -// ServiceRemove kills and removes a service. -func (cli *Client) ServiceRemove(ctx context.Context, serviceID string) error { - resp, err := cli.delete(ctx, "/services/"+serviceID, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/service_update.go b/vendor/src/github.com/docker/engine-api/client/service_update.go deleted file mode 100644 index ee8b46126b..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/service_update.go +++ /dev/null @@ -1,30 +0,0 @@ -package client - -import ( - "net/url" - "strconv" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/swarm" - "golang.org/x/net/context" -) - -// ServiceUpdate updates a Service. -func (cli *Client) ServiceUpdate(ctx context.Context, serviceID string, version swarm.Version, service swarm.ServiceSpec, options types.ServiceUpdateOptions) error { - var ( - headers map[string][]string - query = url.Values{} - ) - - if options.EncodedRegistryAuth != "" { - headers = map[string][]string{ - "X-Registry-Auth": []string{options.EncodedRegistryAuth}, - } - } - - query.Set("version", strconv.FormatUint(version.Index, 10)) - - resp, err := cli.post(ctx, "/services/"+serviceID+"/update", query, service, headers) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/swarm_init.go b/vendor/src/github.com/docker/engine-api/client/swarm_init.go deleted file mode 100644 index 68f0a744a2..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/swarm_init.go +++ /dev/null @@ -1,21 +0,0 @@ -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types/swarm" - "golang.org/x/net/context" -) - -// SwarmInit initializes the Swarm. -func (cli *Client) SwarmInit(ctx context.Context, req swarm.InitRequest) (string, error) { - serverResp, err := cli.post(ctx, "/swarm/init", nil, req, nil) - if err != nil { - return "", err - } - - var response string - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/swarm_inspect.go b/vendor/src/github.com/docker/engine-api/client/swarm_inspect.go deleted file mode 100644 index d67c7c010b..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/swarm_inspect.go +++ /dev/null @@ -1,21 +0,0 @@ -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types/swarm" - "golang.org/x/net/context" -) - -// SwarmInspect inspects the Swarm. -func (cli *Client) SwarmInspect(ctx context.Context) (swarm.Swarm, error) { - serverResp, err := cli.get(ctx, "/swarm", nil, nil) - if err != nil { - return swarm.Swarm{}, err - } - - var response swarm.Swarm - err = json.NewDecoder(serverResp.body).Decode(&response) - ensureReaderClosed(serverResp) - return response, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/swarm_join.go b/vendor/src/github.com/docker/engine-api/client/swarm_join.go deleted file mode 100644 index a9b14e0c48..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/swarm_join.go +++ /dev/null @@ -1,13 +0,0 @@ -package client - -import ( - "github.com/docker/engine-api/types/swarm" - "golang.org/x/net/context" -) - -// SwarmJoin joins the Swarm. -func (cli *Client) SwarmJoin(ctx context.Context, req swarm.JoinRequest) error { - resp, err := cli.post(ctx, "/swarm/join", nil, req, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/swarm_leave.go b/vendor/src/github.com/docker/engine-api/client/swarm_leave.go deleted file mode 100644 index a4df732174..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/swarm_leave.go +++ /dev/null @@ -1,18 +0,0 @@ -package client - -import ( - "net/url" - - "golang.org/x/net/context" -) - -// SwarmLeave leaves the Swarm. -func (cli *Client) SwarmLeave(ctx context.Context, force bool) error { - query := url.Values{} - if force { - query.Set("force", "1") - } - resp, err := cli.post(ctx, "/swarm/leave", query, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/swarm_update.go b/vendor/src/github.com/docker/engine-api/client/swarm_update.go deleted file mode 100644 index 5adec81ca4..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/swarm_update.go +++ /dev/null @@ -1,21 +0,0 @@ -package client - -import ( - "fmt" - "net/url" - "strconv" - - "github.com/docker/engine-api/types/swarm" - "golang.org/x/net/context" -) - -// SwarmUpdate updates the Swarm. -func (cli *Client) SwarmUpdate(ctx context.Context, version swarm.Version, swarm swarm.Spec, flags swarm.UpdateFlags) error { - query := url.Values{} - query.Set("version", strconv.FormatUint(version.Index, 10)) - query.Set("rotateWorkerToken", fmt.Sprintf("%v", flags.RotateWorkerToken)) - query.Set("rotateManagerToken", fmt.Sprintf("%v", flags.RotateManagerToken)) - resp, err := cli.post(ctx, "/swarm/update", query, swarm, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/task_inspect.go b/vendor/src/github.com/docker/engine-api/client/task_inspect.go deleted file mode 100644 index 3cac8882ef..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/task_inspect.go +++ /dev/null @@ -1,34 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - - "github.com/docker/engine-api/types/swarm" - - "golang.org/x/net/context" -) - -// TaskInspectWithRaw returns the task information and its raw representation.. -func (cli *Client) TaskInspectWithRaw(ctx context.Context, taskID string) (swarm.Task, []byte, error) { - serverResp, err := cli.get(ctx, "/tasks/"+taskID, nil, nil) - if err != nil { - if serverResp.statusCode == http.StatusNotFound { - return swarm.Task{}, nil, taskNotFoundError{taskID} - } - return swarm.Task{}, nil, err - } - defer ensureReaderClosed(serverResp) - - body, err := ioutil.ReadAll(serverResp.body) - if err != nil { - return swarm.Task{}, nil, err - } - - var response swarm.Task - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&response) - return response, body, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/task_list.go b/vendor/src/github.com/docker/engine-api/client/task_list.go deleted file mode 100644 index 4604513caf..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/task_list.go +++ /dev/null @@ -1,35 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "github.com/docker/engine-api/types/swarm" - "golang.org/x/net/context" -) - -// TaskList returns the list of tasks. -func (cli *Client) TaskList(ctx context.Context, options types.TaskListOptions) ([]swarm.Task, error) { - query := url.Values{} - - if options.Filter.Len() > 0 { - filterJSON, err := filters.ToParam(options.Filter) - if err != nil { - return nil, err - } - - query.Set("filters", filterJSON) - } - - resp, err := cli.get(ctx, "/tasks", query, nil) - if err != nil { - return nil, err - } - - var tasks []swarm.Task - err = json.NewDecoder(resp.body).Decode(&tasks) - ensureReaderClosed(resp) - return tasks, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/transport/cancellable/LICENSE b/vendor/src/github.com/docker/engine-api/client/transport/cancellable/LICENSE deleted file mode 100644 index 6a66aea5ea..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/transport/cancellable/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/docker/engine-api/client/transport/cancellable/canceler.go b/vendor/src/github.com/docker/engine-api/client/transport/cancellable/canceler.go deleted file mode 100644 index 11dff60026..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/transport/cancellable/canceler.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.5 - -package cancellable - -import ( - "net/http" - - "github.com/docker/engine-api/client/transport" -) - -func canceler(client transport.Sender, req *http.Request) func() { - // TODO(djd): Respect any existing value of req.Cancel. - ch := make(chan struct{}) - req.Cancel = ch - - return func() { - close(ch) - } -} diff --git a/vendor/src/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go b/vendor/src/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go deleted file mode 100644 index 8ff2845c28..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/transport/cancellable/canceler_go14.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.5 - -package cancellable - -import ( - "net/http" - - "github.com/docker/engine-api/client/transport" -) - -type requestCanceler interface { - CancelRequest(*http.Request) -} - -func canceler(client transport.Sender, req *http.Request) func() { - rc, ok := client.(requestCanceler) - if !ok { - return func() {} - } - return func() { - rc.CancelRequest(req) - } -} diff --git a/vendor/src/github.com/docker/engine-api/client/transport/cancellable/cancellable.go b/vendor/src/github.com/docker/engine-api/client/transport/cancellable/cancellable.go deleted file mode 100644 index 1394149574..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/transport/cancellable/cancellable.go +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package cancellable provides helper function to cancel http requests. -package cancellable - -import ( - "io" - "net/http" - - "github.com/docker/engine-api/client/transport" - - "golang.org/x/net/context" -) - -func nop() {} - -var ( - testHookContextDoneBeforeHeaders = nop - testHookDoReturned = nop - testHookDidBodyClose = nop -) - -// Do sends an HTTP request with the provided transport.Sender and returns an HTTP response. -// If the client is nil, http.DefaultClient is used. -// If the context is canceled or times out, ctx.Err() will be returned. -// -// FORK INFORMATION: -// -// This function deviates from the upstream version in golang.org/x/net/context/ctxhttp by -// taking a Sender interface rather than a *http.Client directly. That allow us to use -// this function with mocked clients and hijacked connections. -func Do(ctx context.Context, client transport.Sender, req *http.Request) (*http.Response, error) { - if client == nil { - client = http.DefaultClient - } - - // Request cancelation changed in Go 1.5, see canceler.go and canceler_go14.go. - cancel := canceler(client, req) - - type responseAndError struct { - resp *http.Response - err error - } - result := make(chan responseAndError, 1) - - go func() { - resp, err := client.Do(req) - testHookDoReturned() - result <- responseAndError{resp, err} - }() - - var resp *http.Response - - select { - case <-ctx.Done(): - testHookContextDoneBeforeHeaders() - cancel() - // Clean up after the goroutine calling client.Do: - go func() { - if r := <-result; r.resp != nil && r.resp.Body != nil { - testHookDidBodyClose() - r.resp.Body.Close() - } - }() - return nil, ctx.Err() - case r := <-result: - var err error - resp, err = r.resp, r.err - if err != nil { - return resp, err - } - } - - c := make(chan struct{}) - go func() { - select { - case <-ctx.Done(): - cancel() - case <-c: - // The response's Body is closed. - } - }() - resp.Body = ¬ifyingReader{resp.Body, c} - - return resp, nil -} - -// notifyingReader is an io.ReadCloser that closes the notify channel after -// Close is called or a Read fails on the underlying ReadCloser. -type notifyingReader struct { - io.ReadCloser - notify chan<- struct{} -} - -func (r *notifyingReader) Read(p []byte) (int, error) { - n, err := r.ReadCloser.Read(p) - if err != nil && r.notify != nil { - close(r.notify) - r.notify = nil - } - return n, err -} - -func (r *notifyingReader) Close() error { - err := r.ReadCloser.Close() - if r.notify != nil { - close(r.notify) - r.notify = nil - } - return err -} diff --git a/vendor/src/github.com/docker/engine-api/client/transport/client.go b/vendor/src/github.com/docker/engine-api/client/transport/client.go deleted file mode 100644 index 13d4b3ab3d..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/transport/client.go +++ /dev/null @@ -1,47 +0,0 @@ -package transport - -import ( - "crypto/tls" - "net/http" -) - -// Sender is an interface that clients must implement -// to be able to send requests to a remote connection. -type Sender interface { - // Do sends request to a remote endpoint. - Do(*http.Request) (*http.Response, error) -} - -// Client is an interface that abstracts all remote connections. -type Client interface { - Sender - // Secure tells whether the connection is secure or not. - Secure() bool - // Scheme returns the connection protocol the client uses. - Scheme() string - // TLSConfig returns any TLS configuration the client uses. - TLSConfig() *tls.Config -} - -// tlsInfo returns information about the TLS configuration. -type tlsInfo struct { - tlsConfig *tls.Config -} - -// TLSConfig returns the TLS configuration. -func (t *tlsInfo) TLSConfig() *tls.Config { - return t.tlsConfig -} - -// Scheme returns protocol scheme to use. -func (t *tlsInfo) Scheme() string { - if t.tlsConfig != nil { - return "https" - } - return "http" -} - -// Secure returns true if there is a TLS configuration. -func (t *tlsInfo) Secure() bool { - return t.tlsConfig != nil -} diff --git a/vendor/src/github.com/docker/engine-api/client/transport/transport.go b/vendor/src/github.com/docker/engine-api/client/transport/transport.go deleted file mode 100644 index ff28af1855..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/transport/transport.go +++ /dev/null @@ -1,57 +0,0 @@ -// Package transport provides function to send request to remote endpoints. -package transport - -import ( - "fmt" - "net/http" - - "github.com/docker/go-connections/sockets" -) - -// apiTransport holds information about the http transport to connect with the API. -type apiTransport struct { - *http.Client - *tlsInfo - transport *http.Transport -} - -// NewTransportWithHTTP creates a new transport based on the provided proto, address and http client. -// It uses Docker's default http transport configuration if the client is nil. -// It does not modify the client's transport if it's not nil. -func NewTransportWithHTTP(proto, addr string, client *http.Client) (Client, error) { - var transport *http.Transport - - if client != nil { - tr, ok := client.Transport.(*http.Transport) - if !ok { - return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", client.Transport) - } - transport = tr - } else { - transport = defaultTransport(proto, addr) - client = &http.Client{ - Transport: transport, - } - } - - return &apiTransport{ - Client: client, - tlsInfo: &tlsInfo{transport.TLSClientConfig}, - transport: transport, - }, nil -} - -// CancelRequest stops a request execution. -func (a *apiTransport) CancelRequest(req *http.Request) { - a.transport.CancelRequest(req) -} - -// defaultTransport creates a new http.Transport with Docker's -// default transport configuration. -func defaultTransport(proto, addr string) *http.Transport { - tr := new(http.Transport) - sockets.ConfigureTransport(tr, proto, addr) - return tr -} - -var _ Client = &apiTransport{} diff --git a/vendor/src/github.com/docker/engine-api/client/version.go b/vendor/src/github.com/docker/engine-api/client/version.go deleted file mode 100644 index e037551a21..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/version.go +++ /dev/null @@ -1,21 +0,0 @@ -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// ServerVersion returns information of the docker client and server host. -func (cli *Client) ServerVersion(ctx context.Context) (types.Version, error) { - resp, err := cli.get(ctx, "/version", nil, nil) - if err != nil { - return types.Version{}, err - } - - var server types.Version - err = json.NewDecoder(resp.body).Decode(&server) - ensureReaderClosed(resp) - return server, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/volume_create.go b/vendor/src/github.com/docker/engine-api/client/volume_create.go deleted file mode 100644 index cc1e1c1772..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/volume_create.go +++ /dev/null @@ -1,20 +0,0 @@ -package client - -import ( - "encoding/json" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// VolumeCreate creates a volume in the docker host. -func (cli *Client) VolumeCreate(ctx context.Context, options types.VolumeCreateRequest) (types.Volume, error) { - var volume types.Volume - resp, err := cli.post(ctx, "/volumes/create", nil, options, nil) - if err != nil { - return volume, err - } - err = json.NewDecoder(resp.body).Decode(&volume) - ensureReaderClosed(resp) - return volume, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/volume_inspect.go b/vendor/src/github.com/docker/engine-api/client/volume_inspect.go deleted file mode 100644 index 2eaebfafa1..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/volume_inspect.go +++ /dev/null @@ -1,38 +0,0 @@ -package client - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - - "github.com/docker/engine-api/types" - "golang.org/x/net/context" -) - -// VolumeInspect returns the information about a specific volume in the docker host. -func (cli *Client) VolumeInspect(ctx context.Context, volumeID string) (types.Volume, error) { - volume, _, err := cli.VolumeInspectWithRaw(ctx, volumeID) - return volume, err -} - -// VolumeInspectWithRaw returns the information about a specific volume in the docker host and its raw representation -func (cli *Client) VolumeInspectWithRaw(ctx context.Context, volumeID string) (types.Volume, []byte, error) { - var volume types.Volume - resp, err := cli.get(ctx, "/volumes/"+volumeID, nil, nil) - if err != nil { - if resp.statusCode == http.StatusNotFound { - return volume, nil, volumeNotFoundError{volumeID} - } - return volume, nil, err - } - defer ensureReaderClosed(resp) - - body, err := ioutil.ReadAll(resp.body) - if err != nil { - return volume, nil, err - } - rdr := bytes.NewReader(body) - err = json.NewDecoder(rdr).Decode(&volume) - return volume, body, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/volume_list.go b/vendor/src/github.com/docker/engine-api/client/volume_list.go deleted file mode 100644 index 7c6ccf834f..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/volume_list.go +++ /dev/null @@ -1,32 +0,0 @@ -package client - -import ( - "encoding/json" - "net/url" - - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/filters" - "golang.org/x/net/context" -) - -// VolumeList returns the volumes configured in the docker host. -func (cli *Client) VolumeList(ctx context.Context, filter filters.Args) (types.VolumesListResponse, error) { - var volumes types.VolumesListResponse - query := url.Values{} - - if filter.Len() > 0 { - filterJSON, err := filters.ToParamWithVersion(cli.version, filter) - if err != nil { - return volumes, err - } - query.Set("filters", filterJSON) - } - resp, err := cli.get(ctx, "/volumes", query, nil) - if err != nil { - return volumes, err - } - - err = json.NewDecoder(resp.body).Decode(&volumes) - ensureReaderClosed(resp) - return volumes, err -} diff --git a/vendor/src/github.com/docker/engine-api/client/volume_remove.go b/vendor/src/github.com/docker/engine-api/client/volume_remove.go deleted file mode 100644 index 0dce24c79b..0000000000 --- a/vendor/src/github.com/docker/engine-api/client/volume_remove.go +++ /dev/null @@ -1,10 +0,0 @@ -package client - -import "golang.org/x/net/context" - -// VolumeRemove removes a volume from the docker host. -func (cli *Client) VolumeRemove(ctx context.Context, volumeID string) error { - resp, err := cli.delete(ctx, "/volumes/"+volumeID, nil, nil) - ensureReaderClosed(resp) - return err -} diff --git a/vendor/src/github.com/docker/engine-api/types/auth.go b/vendor/src/github.com/docker/engine-api/types/auth.go deleted file mode 100644 index 056af6b842..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/auth.go +++ /dev/null @@ -1,22 +0,0 @@ -package types - -// AuthConfig contains authorization information for connecting to a Registry -type AuthConfig struct { - Username string `json:"username,omitempty"` - Password string `json:"password,omitempty"` - Auth string `json:"auth,omitempty"` - - // Email is an optional value associated with the username. - // This field is deprecated and will be removed in a later - // version of docker. - Email string `json:"email,omitempty"` - - ServerAddress string `json:"serveraddress,omitempty"` - - // IdentityToken is used to authenticate the user and get - // an access token for the registry. - IdentityToken string `json:"identitytoken,omitempty"` - - // RegistryToken is a bearer token to be sent to a registry - RegistryToken string `json:"registrytoken,omitempty"` -} diff --git a/vendor/src/github.com/docker/engine-api/types/blkiodev/blkio.go b/vendor/src/github.com/docker/engine-api/types/blkiodev/blkio.go deleted file mode 100644 index 931ae10ab1..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/blkiodev/blkio.go +++ /dev/null @@ -1,23 +0,0 @@ -package blkiodev - -import "fmt" - -// WeightDevice is a structure that holds device:weight pair -type WeightDevice struct { - Path string - Weight uint16 -} - -func (w *WeightDevice) String() string { - return fmt.Sprintf("%s:%d", w.Path, w.Weight) -} - -// ThrottleDevice is a structure that holds device:rate_per_second pair -type ThrottleDevice struct { - Path string - Rate uint64 -} - -func (t *ThrottleDevice) String() string { - return fmt.Sprintf("%s:%d", t.Path, t.Rate) -} diff --git a/vendor/src/github.com/docker/engine-api/types/client.go b/vendor/src/github.com/docker/engine-api/types/client.go deleted file mode 100644 index c6d244d316..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/client.go +++ /dev/null @@ -1,291 +0,0 @@ -package types - -import ( - "bufio" - "io" - "net" - - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/filters" - "github.com/docker/go-units" -) - -// CheckpointCreateOptions holds parameters to create a checkpoint from a container -type CheckpointCreateOptions struct { - CheckpointID string - Exit bool -} - -// ContainerAttachOptions holds parameters to attach to a container. -type ContainerAttachOptions struct { - Stream bool - Stdin bool - Stdout bool - Stderr bool - DetachKeys string -} - -// ContainerCommitOptions holds parameters to commit changes into a container. -type ContainerCommitOptions struct { - Reference string - Comment string - Author string - Changes []string - Pause bool - Config *container.Config -} - -// ContainerExecInspect holds information returned by exec inspect. -type ContainerExecInspect struct { - ExecID string - ContainerID string - Running bool - ExitCode int -} - -// ContainerListOptions holds parameters to list containers with. -type ContainerListOptions struct { - Quiet bool - Size bool - All bool - Latest bool - Since string - Before string - Limit int - Filter filters.Args -} - -// ContainerLogsOptions holds parameters to filter logs with. -type ContainerLogsOptions struct { - ShowStdout bool - ShowStderr bool - Since string - Timestamps bool - Follow bool - Tail string - Details bool -} - -// ContainerRemoveOptions holds parameters to remove containers. -type ContainerRemoveOptions struct { - RemoveVolumes bool - RemoveLinks bool - Force bool -} - -// ContainerStartOptions holds parameters to start containers. -type ContainerStartOptions struct { - CheckpointID string -} - -// CopyToContainerOptions holds information -// about files to copy into a container -type CopyToContainerOptions struct { - AllowOverwriteDirWithFile bool -} - -// EventsOptions hold parameters to filter events with. -type EventsOptions struct { - Since string - Until string - Filters filters.Args -} - -// NetworkListOptions holds parameters to filter the list of networks with. -type NetworkListOptions struct { - Filters filters.Args -} - -// HijackedResponse holds connection information for a hijacked request. -type HijackedResponse struct { - Conn net.Conn - Reader *bufio.Reader -} - -// Close closes the hijacked connection and reader. -func (h *HijackedResponse) Close() { - h.Conn.Close() -} - -// CloseWriter is an interface that implements structs -// that close input streams to prevent from writing. -type CloseWriter interface { - CloseWrite() error -} - -// CloseWrite closes a readWriter for writing. -func (h *HijackedResponse) CloseWrite() error { - if conn, ok := h.Conn.(CloseWriter); ok { - return conn.CloseWrite() - } - return nil -} - -// ImageBuildOptions holds the information -// necessary to build images. -type ImageBuildOptions struct { - Tags []string - SuppressOutput bool - RemoteContext string - NoCache bool - Remove bool - ForceRemove bool - PullParent bool - Isolation container.Isolation - CPUSetCPUs string - CPUSetMems string - CPUShares int64 - CPUQuota int64 - CPUPeriod int64 - Memory int64 - MemorySwap int64 - CgroupParent string - ShmSize int64 - Dockerfile string - Ulimits []*units.Ulimit - BuildArgs map[string]string - AuthConfigs map[string]AuthConfig - Context io.Reader - Labels map[string]string -} - -// ImageBuildResponse holds information -// returned by a server after building -// an image. -type ImageBuildResponse struct { - Body io.ReadCloser - OSType string -} - -// ImageCreateOptions holds information to create images. -type ImageCreateOptions struct { - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry -} - -// ImageImportSource holds source information for ImageImport -type ImageImportSource struct { - Source io.Reader // Source is the data to send to the server to create this image from (mutually exclusive with SourceName) - SourceName string // SourceName is the name of the image to pull (mutually exclusive with Source) -} - -// ImageImportOptions holds information to import images from the client host. -type ImageImportOptions struct { - Tag string // Tag is the name to tag this image with. This attribute is deprecated. - Message string // Message is the message to tag the image with - Changes []string // Changes are the raw changes to apply to this image -} - -// ImageListOptions holds parameters to filter the list of images with. -type ImageListOptions struct { - MatchName string - All bool - Filters filters.Args -} - -// ImageLoadResponse returns information to the client about a load process. -type ImageLoadResponse struct { - // Body must be closed to avoid a resource leak - Body io.ReadCloser - JSON bool -} - -// ImagePullOptions holds information to pull images. -type ImagePullOptions struct { - All bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc -} - -// RequestPrivilegeFunc is a function interface that -// clients can supply to retry operations after -// getting an authorization error. -// This function returns the registry authentication -// header value in base 64 format, or an error -// if the privilege request fails. -type RequestPrivilegeFunc func() (string, error) - -//ImagePushOptions holds information to push images. -type ImagePushOptions ImagePullOptions - -// ImageRemoveOptions holds parameters to remove images. -type ImageRemoveOptions struct { - Force bool - PruneChildren bool -} - -// ImageSearchOptions holds parameters to search images with. -type ImageSearchOptions struct { - RegistryAuth string - PrivilegeFunc RequestPrivilegeFunc - Filters filters.Args - Limit int -} - -// ResizeOptions holds parameters to resize a tty. -// It can be used to resize container ttys and -// exec process ttys too. -type ResizeOptions struct { - Height int - Width int -} - -// VersionResponse holds version information for the client and the server -type VersionResponse struct { - Client *Version - Server *Version -} - -// ServerOK returns true when the client could connect to the docker server -// and parse the information received. It returns false otherwise. -func (v VersionResponse) ServerOK() bool { - return v.Server != nil -} - -// NodeListOptions holds parameters to list nodes with. -type NodeListOptions struct { - Filter filters.Args -} - -// NodeRemoveOptions holds parameters to remove nodes with. -type NodeRemoveOptions struct { - Force bool -} - -// ServiceCreateOptions contains the options to use when creating a service. -type ServiceCreateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string -} - -// ServiceCreateResponse contains the information returned to a client -// on the creation of a new service. -type ServiceCreateResponse struct { - // ID is the ID of the created service. - ID string -} - -// ServiceUpdateOptions contains the options to be used for updating services. -type ServiceUpdateOptions struct { - // EncodedRegistryAuth is the encoded registry authorization credentials to - // use when updating the service. - // - // This field follows the format of the X-Registry-Auth header. - EncodedRegistryAuth string - - // TODO(stevvooe): Consider moving the version parameter of ServiceUpdate - // into this field. While it does open API users up to racy writes, most - // users may not need that level of consistency in practice. -} - -// ServiceListOptions holds parameters to list services with. -type ServiceListOptions struct { - Filter filters.Args -} - -// TaskListOptions holds parameters to list tasks with. -type TaskListOptions struct { - Filter filters.Args -} diff --git a/vendor/src/github.com/docker/engine-api/types/configs.go b/vendor/src/github.com/docker/engine-api/types/configs.go deleted file mode 100644 index 93384b9fad..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/configs.go +++ /dev/null @@ -1,53 +0,0 @@ -package types - -import ( - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/network" -) - -// configs holds structs used for internal communication between the -// frontend (such as an http server) and the backend (such as the -// docker daemon). - -// ContainerCreateConfig is the parameter set to ContainerCreate() -type ContainerCreateConfig struct { - Name string - Config *container.Config - HostConfig *container.HostConfig - NetworkingConfig *network.NetworkingConfig - AdjustCPUShares bool -} - -// ContainerRmConfig holds arguments for the container remove -// operation. This struct is used to tell the backend what operations -// to perform. -type ContainerRmConfig struct { - ForceRemove, RemoveVolume, RemoveLink bool -} - -// ContainerCommitConfig contains build configs for commit operation, -// and is used when making a commit with the current state of the container. -type ContainerCommitConfig struct { - Pause bool - Repo string - Tag string - Author string - Comment string - // merge container config into commit config before commit - MergeConfigs bool - Config *container.Config -} - -// ExecConfig is a small subset of the Config struct that holds the configuration -// for the exec feature of docker. -type ExecConfig struct { - User string // User that will run the command - Privileged bool // Is the container in privileged mode - Tty bool // Attach standard streams to a tty. - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStderr bool // Attach the standard error - AttachStdout bool // Attach the standard output - Detach bool // Execute in detach mode - DetachKeys string // Escape keys for detach - Cmd []string // Execution commands and args -} diff --git a/vendor/src/github.com/docker/engine-api/types/container/config.go b/vendor/src/github.com/docker/engine-api/types/container/config.go deleted file mode 100644 index 707fc8c174..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/container/config.go +++ /dev/null @@ -1,62 +0,0 @@ -package container - -import ( - "time" - - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" -) - -// HealthConfig holds configuration settings for the HEALTHCHECK feature. -type HealthConfig struct { - // Test is the test to perform to check that the container is healthy. - // An empty slice means to inherit the default. - // The options are: - // {} : inherit healthcheck - // {"NONE"} : disable healthcheck - // {"CMD", args...} : exec arguments directly - // {"CMD-SHELL", command} : run command with system's default shell - Test []string `json:",omitempty"` - - // Zero means to inherit. Durations are expressed as integer nanoseconds. - Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks. - Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung. - - // Retries is the number of consecutive failures needed to consider a container as unhealthy. - // Zero means inherit. - Retries int `json:",omitempty"` -} - -// Config contains the configuration data about a container. -// It should hold only portable information about the container. -// Here, "portable" means "independent from the host we are running on". -// Non-portable information *should* appear in HostConfig. -// All fields added to this struct must be marked `omitempty` to keep getting -// predictable hashes from the old `v1Compatibility` configuration. -type Config struct { - Hostname string // Hostname - Domainname string // Domainname - User string // User that will run the command(s) inside the container - AttachStdin bool // Attach the standard input, makes possible user interaction - AttachStdout bool // Attach the standard output - AttachStderr bool // Attach the standard error - ExposedPorts map[nat.Port]struct{} `json:",omitempty"` // List of exposed ports - Tty bool // Attach standard streams to a tty, including stdin if it is not closed. - OpenStdin bool // Open stdin - StdinOnce bool // If true, close stdin after the 1 attached client disconnects. - Env []string // List of environment variable to set in the container - Cmd strslice.StrSlice // Command to run when starting the container - Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy - ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) - Image string // Name of the image as it was passed by the operator (eg. could be symbolic) - Volumes map[string]struct{} // List of volumes (mounts) used for the container - WorkingDir string // Current directory (PWD) in the command will be launched - Entrypoint strslice.StrSlice // Entrypoint to run when starting the container - NetworkDisabled bool `json:",omitempty"` // Is network disabled - MacAddress string `json:",omitempty"` // Mac Address of the container - OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile - Labels map[string]string // List of labels set to this container - StopSignal string `json:",omitempty"` // Signal to stop a container - StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container - Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT -} diff --git a/vendor/src/github.com/docker/engine-api/types/container/host_config.go b/vendor/src/github.com/docker/engine-api/types/container/host_config.go deleted file mode 100644 index a9ff755b04..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/container/host_config.go +++ /dev/null @@ -1,320 +0,0 @@ -package container - -import ( - "strings" - - "github.com/docker/engine-api/types/blkiodev" - "github.com/docker/engine-api/types/strslice" - "github.com/docker/go-connections/nat" - "github.com/docker/go-units" -) - -// NetworkMode represents the container network stack. -type NetworkMode string - -// Isolation represents the isolation technology of a container. The supported -// values are platform specific -type Isolation string - -// IsDefault indicates the default isolation technology of a container. On Linux this -// is the native driver. On Windows, this is a Windows Server Container. -func (i Isolation) IsDefault() bool { - return strings.ToLower(string(i)) == "default" || string(i) == "" -} - -// IpcMode represents the container ipc stack. -type IpcMode string - -// IsPrivate indicates whether the container uses its private ipc stack. -func (n IpcMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's ipc stack. -func (n IpcMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's ipc stack. -func (n IpcMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the ipc stack is valid. -func (n IpcMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container ipc stack is going to be used. -func (n IpcMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UsernsMode represents userns mode in the container. -type UsernsMode string - -// IsHost indicates whether the container uses the host's userns. -func (n UsernsMode) IsHost() bool { - return n == "host" -} - -// IsPrivate indicates whether the container uses the a private userns. -func (n UsernsMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// Valid indicates whether the userns is valid. -func (n UsernsMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// CgroupSpec represents the cgroup to use for the container. -type CgroupSpec string - -// IsContainer indicates whether the container is using another container cgroup -func (c CgroupSpec) IsContainer() bool { - parts := strings.SplitN(string(c), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the cgroup spec is valid. -func (c CgroupSpec) Valid() bool { - return c.IsContainer() || c == "" -} - -// Container returns the name of the container whose cgroup will be used. -func (c CgroupSpec) Container() string { - parts := strings.SplitN(string(c), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// UTSMode represents the UTS namespace of the container. -type UTSMode string - -// IsPrivate indicates whether the container uses its private UTS namespace. -func (n UTSMode) IsPrivate() bool { - return !(n.IsHost()) -} - -// IsHost indicates whether the container uses the host's UTS namespace. -func (n UTSMode) IsHost() bool { - return n == "host" -} - -// Valid indicates whether the UTS namespace is valid. -func (n UTSMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - default: - return false - } - return true -} - -// PidMode represents the pid namespace of the container. -type PidMode string - -// IsPrivate indicates whether the container uses its own new pid namespace. -func (n PidMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsHost indicates whether the container uses the host's pid namespace. -func (n PidMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether the container uses a container's pid namespace. -func (n PidMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// Valid indicates whether the pid namespace is valid. -func (n PidMode) Valid() bool { - parts := strings.Split(string(n), ":") - switch mode := parts[0]; mode { - case "", "host": - case "container": - if len(parts) != 2 || parts[1] == "" { - return false - } - default: - return false - } - return true -} - -// Container returns the name of the container whose pid namespace is going to be used. -func (n PidMode) Container() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// DeviceMapping represents the device mapping between the host and the container. -type DeviceMapping struct { - PathOnHost string - PathInContainer string - CgroupPermissions string -} - -// RestartPolicy represents the restart policies of the container. -type RestartPolicy struct { - Name string - MaximumRetryCount int -} - -// IsNone indicates whether the container has the "no" restart policy. -// This means the container will not automatically restart when exiting. -func (rp *RestartPolicy) IsNone() bool { - return rp.Name == "no" || rp.Name == "" -} - -// IsAlways indicates whether the container has the "always" restart policy. -// This means the container will automatically restart regardless of the exit status. -func (rp *RestartPolicy) IsAlways() bool { - return rp.Name == "always" -} - -// IsOnFailure indicates whether the container has the "on-failure" restart policy. -// This means the container will automatically restart of exiting with a non-zero exit status. -func (rp *RestartPolicy) IsOnFailure() bool { - return rp.Name == "on-failure" -} - -// IsUnlessStopped indicates whether the container has the -// "unless-stopped" restart policy. This means the container will -// automatically restart unless user has put it to stopped state. -func (rp *RestartPolicy) IsUnlessStopped() bool { - return rp.Name == "unless-stopped" -} - -// IsSame compares two RestartPolicy to see if they are the same -func (rp *RestartPolicy) IsSame(tp *RestartPolicy) bool { - return rp.Name == tp.Name && rp.MaximumRetryCount == tp.MaximumRetryCount -} - -// LogConfig represents the logging configuration of the container. -type LogConfig struct { - Type string - Config map[string]string -} - -// Resources contains container's resources (cgroups config, ulimits...) -type Resources struct { - // Applicable to all platforms - CPUShares int64 `json:"CpuShares"` // CPU shares (relative weight vs. other containers) - Memory int64 // Memory limit (in bytes) - - // Applicable to UNIX platforms - CgroupParent string // Parent cgroup. - BlkioWeight uint16 // Block IO weight (relative weight vs. other containers) - BlkioWeightDevice []*blkiodev.WeightDevice - BlkioDeviceReadBps []*blkiodev.ThrottleDevice - BlkioDeviceWriteBps []*blkiodev.ThrottleDevice - BlkioDeviceReadIOps []*blkiodev.ThrottleDevice - BlkioDeviceWriteIOps []*blkiodev.ThrottleDevice - CPUPeriod int64 `json:"CpuPeriod"` // CPU CFS (Completely Fair Scheduler) period - CPUQuota int64 `json:"CpuQuota"` // CPU CFS (Completely Fair Scheduler) quota - CpusetCpus string // CpusetCpus 0-2, 0,1 - CpusetMems string // CpusetMems 0-2, 0,1 - Devices []DeviceMapping // List of devices to map inside the container - DiskQuota int64 // Disk limit (in bytes) - KernelMemory int64 // Kernel memory limit (in bytes) - MemoryReservation int64 // Memory soft limit (in bytes) - MemorySwap int64 // Total memory usage (memory + swap); set `-1` to enable unlimited swap - MemorySwappiness *int64 // Tuning container memory swappiness behaviour - OomKillDisable *bool // Whether to disable OOM Killer or not - PidsLimit int64 // Setting pids limit for a container - Ulimits []*units.Ulimit // List of ulimits to be set in the container - - // Applicable to Windows - CPUCount int64 `json:"CpuCount"` // CPU count - CPUPercent int64 `json:"CpuPercent"` // CPU percent - IOMaximumIOps uint64 // Maximum IOps for the container system drive - IOMaximumBandwidth uint64 // Maximum IO in bytes per second for the container system drive -} - -// UpdateConfig holds the mutable attributes of a Container. -// Those attributes can be updated at runtime. -type UpdateConfig struct { - // Contains container's resources (cgroups, ulimits) - Resources - RestartPolicy RestartPolicy -} - -// HostConfig the non-portable Config structure of a container. -// Here, "non-portable" means "dependent of the host we are running on". -// Portable information *should* appear in Config. -type HostConfig struct { - // Applicable to all platforms - Binds []string // List of volume bindings for this container - ContainerIDFile string // File (path) where the containerId is written - LogConfig LogConfig // Configuration of the logs for this container - NetworkMode NetworkMode // Network mode to use for the container - PortBindings nat.PortMap // Port mapping between the exposed port (container) and the host - RestartPolicy RestartPolicy // Restart policy to be used for the container - AutoRemove bool // Automatically remove container when it exits - VolumeDriver string // Name of the volume driver used to mount volumes - VolumesFrom []string // List of volumes to take from other container - - // Applicable to UNIX platforms - CapAdd strslice.StrSlice // List of kernel capabilities to add to the container - CapDrop strslice.StrSlice // List of kernel capabilities to remove from the container - DNS []string `json:"Dns"` // List of DNS server to lookup - DNSOptions []string `json:"DnsOptions"` // List of DNSOption to look for - DNSSearch []string `json:"DnsSearch"` // List of DNSSearch to look for - ExtraHosts []string // List of extra hosts - GroupAdd []string // List of additional groups that the container process will run as - IpcMode IpcMode // IPC namespace to use for the container - Cgroup CgroupSpec // Cgroup to use for the container - Links []string // List of links (in the name:alias form) - OomScoreAdj int // Container preference for OOM-killing - PidMode PidMode // PID namespace to use for the container - Privileged bool // Is the container in privileged mode - PublishAllPorts bool // Should docker publish all exposed port for the container - ReadonlyRootfs bool // Is the container root filesystem in read-only - SecurityOpt []string // List of string values to customize labels for MLS systems, such as SELinux. - StorageOpt map[string]string `json:",omitempty"` // Storage driver options per container. - Tmpfs map[string]string `json:",omitempty"` // List of tmpfs (mounts) used for the container - UTSMode UTSMode // UTS namespace to use for the container - UsernsMode UsernsMode // The user namespace to use for the container - ShmSize int64 // Total shm memory usage - Sysctls map[string]string `json:",omitempty"` // List of Namespaced sysctls used for the container - Runtime string `json:",omitempty"` // Runtime to use with this container - - // Applicable to Windows - ConsoleSize [2]int // Initial console size - Isolation Isolation // Isolation technology of the container (eg default, hyperv) - - // Contains container's resources (cgroups, ulimits) - Resources -} diff --git a/vendor/src/github.com/docker/engine-api/types/container/hostconfig_unix.go b/vendor/src/github.com/docker/engine-api/types/container/hostconfig_unix.go deleted file mode 100644 index 4171059a47..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/container/hostconfig_unix.go +++ /dev/null @@ -1,81 +0,0 @@ -// +build !windows - -package container - -import "strings" - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() -} - -// IsPrivate indicates whether container uses it's private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsBridge() { - return "bridge" - } else if n.IsHost() { - return "host" - } else if n.IsContainer() { - return "container" - } else if n.IsNone() { - return "none" - } else if n.IsDefault() { - return "default" - } else if n.IsUserDefined() { - return n.UserDefined() - } - return "" -} - -// IsBridge indicates whether container uses the bridge network stack -func (n NetworkMode) IsBridge() bool { - return n == "bridge" -} - -// IsHost indicates whether container uses the host network stack. -func (n NetworkMode) IsHost() bool { - return n == "host" -} - -// IsContainer indicates whether container uses a container network stack. -func (n NetworkMode) IsContainer() bool { - parts := strings.SplitN(string(n), ":", 2) - return len(parts) > 1 && parts[0] == "container" -} - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// ConnectedContainer is the id of the container which network this container is connected to. -func (n NetworkMode) ConnectedContainer() string { - parts := strings.SplitN(string(n), ":", 2) - if len(parts) > 1 { - return parts[1] - } - return "" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsBridge() && !n.IsHost() && !n.IsNone() && !n.IsContainer() -} - -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} diff --git a/vendor/src/github.com/docker/engine-api/types/container/hostconfig_windows.go b/vendor/src/github.com/docker/engine-api/types/container/hostconfig_windows.go deleted file mode 100644 index 0ee332ba68..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/container/hostconfig_windows.go +++ /dev/null @@ -1,87 +0,0 @@ -package container - -import ( - "strings" -) - -// IsDefault indicates whether container uses the default network stack. -func (n NetworkMode) IsDefault() bool { - return n == "default" -} - -// IsNone indicates whether container isn't using a network stack. -func (n NetworkMode) IsNone() bool { - return n == "none" -} - -// IsContainer indicates whether container uses a container network stack. -// Returns false as windows doesn't support this mode -func (n NetworkMode) IsContainer() bool { - return false -} - -// IsBridge indicates whether container uses the bridge network stack -// in windows it is given the name NAT -func (n NetworkMode) IsBridge() bool { - return n == "nat" -} - -// IsHost indicates whether container uses the host network stack. -// returns false as this is not supported by windows -func (n NetworkMode) IsHost() bool { - return false -} - -// IsPrivate indicates whether container uses its private network stack. -func (n NetworkMode) IsPrivate() bool { - return !(n.IsHost() || n.IsContainer()) -} - -// ConnectedContainer is the id of the container which network this container is connected to. -// Returns blank string on windows -func (n NetworkMode) ConnectedContainer() string { - return "" -} - -// IsUserDefined indicates user-created network -func (n NetworkMode) IsUserDefined() bool { - return !n.IsDefault() && !n.IsNone() && !n.IsBridge() -} - -// IsHyperV indicates the use of a Hyper-V partition for isolation -func (i Isolation) IsHyperV() bool { - return strings.ToLower(string(i)) == "hyperv" -} - -// IsProcess indicates the use of process isolation -func (i Isolation) IsProcess() bool { - return strings.ToLower(string(i)) == "process" -} - -// IsValid indicates if an isolation technology is valid -func (i Isolation) IsValid() bool { - return i.IsDefault() || i.IsHyperV() || i.IsProcess() -} - -// NetworkName returns the name of the network stack. -func (n NetworkMode) NetworkName() string { - if n.IsDefault() { - return "default" - } else if n.IsBridge() { - return "nat" - } else if n.IsNone() { - return "none" - } else if n.IsUserDefined() { - return n.UserDefined() - } - - return "" -} - -//UserDefined indicates user-created network -func (n NetworkMode) UserDefined() string { - if n.IsUserDefined() { - return string(n) - } - return "" -} diff --git a/vendor/src/github.com/docker/engine-api/types/errors.go b/vendor/src/github.com/docker/engine-api/types/errors.go deleted file mode 100644 index 649ab95131..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/errors.go +++ /dev/null @@ -1,6 +0,0 @@ -package types - -// ErrorResponse is the response body of API errors. -type ErrorResponse struct { - Message string `json:"message"` -} diff --git a/vendor/src/github.com/docker/engine-api/types/events/events.go b/vendor/src/github.com/docker/engine-api/types/events/events.go deleted file mode 100644 index 7129a65acf..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/events/events.go +++ /dev/null @@ -1,42 +0,0 @@ -package events - -const ( - // ContainerEventType is the event type that containers generate - ContainerEventType = "container" - // DaemonEventType is the event type that daemon generate - DaemonEventType = "daemon" - // ImageEventType is the event type that images generate - ImageEventType = "image" - // NetworkEventType is the event type that networks generate - NetworkEventType = "network" - // PluginEventType is the event type that plugins generate - PluginEventType = "plugin" - // VolumeEventType is the event type that volumes generate - VolumeEventType = "volume" -) - -// Actor describes something that generates events, -// like a container, or a network, or a volume. -// It has a defined name and a set or attributes. -// The container attributes are its labels, other actors -// can generate these attributes from other properties. -type Actor struct { - ID string - Attributes map[string]string -} - -// Message represents the information an event contains -type Message struct { - // Deprecated information from JSONMessage. - // With data only in container events. - Status string `json:"status,omitempty"` - ID string `json:"id,omitempty"` - From string `json:"from,omitempty"` - - Type string - Action string - Actor Actor - - Time int64 `json:"time,omitempty"` - TimeNano int64 `json:"timeNano,omitempty"` -} diff --git a/vendor/src/github.com/docker/engine-api/types/filters/parse.go b/vendor/src/github.com/docker/engine-api/types/filters/parse.go deleted file mode 100644 index dc2c48b894..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/filters/parse.go +++ /dev/null @@ -1,307 +0,0 @@ -// Package filters provides helper function to parse and handle command line -// filter, used for example in docker ps or docker images commands. -package filters - -import ( - "encoding/json" - "errors" - "fmt" - "regexp" - "strings" - - "github.com/docker/engine-api/types/versions" -) - -// Args stores filter arguments as map key:{map key: bool}. -// It contains an aggregation of the map of arguments (which are in the form -// of -f 'key=value') based on the key, and stores values for the same key -// in a map with string keys and boolean values. -// e.g given -f 'label=label1=1' -f 'label=label2=2' -f 'image.name=ubuntu' -// the args will be {"image.name":{"ubuntu":true},"label":{"label1=1":true,"label2=2":true}} -type Args struct { - fields map[string]map[string]bool -} - -// NewArgs initializes a new Args struct. -func NewArgs() Args { - return Args{fields: map[string]map[string]bool{}} -} - -// ParseFlag parses the argument to the filter flag. Like -// -// `docker ps -f 'created=today' -f 'image.name=ubuntu*'` -// -// If prev map is provided, then it is appended to, and returned. By default a new -// map is created. -func ParseFlag(arg string, prev Args) (Args, error) { - filters := prev - if len(arg) == 0 { - return filters, nil - } - - if !strings.Contains(arg, "=") { - return filters, ErrBadFormat - } - - f := strings.SplitN(arg, "=", 2) - - name := strings.ToLower(strings.TrimSpace(f[0])) - value := strings.TrimSpace(f[1]) - - filters.Add(name, value) - - return filters, nil -} - -// ErrBadFormat is an error returned in case of bad format for a filter. -var ErrBadFormat = errors.New("bad format of filter (expected name=value)") - -// ToParam packs the Args into a string for easy transport from client to server. -func ToParam(a Args) (string, error) { - // this way we don't URL encode {}, just empty space - if a.Len() == 0 { - return "", nil - } - - buf, err := json.Marshal(a.fields) - if err != nil { - return "", err - } - return string(buf), nil -} - -// ToParamWithVersion packs the Args into a string for easy transport from client to server. -// The generated string will depend on the specified version (corresponding to the API version). -func ToParamWithVersion(version string, a Args) (string, error) { - // this way we don't URL encode {}, just empty space - if a.Len() == 0 { - return "", nil - } - - // for daemons older than v1.10, filter must be of the form map[string][]string - buf := []byte{} - err := errors.New("") - if version != "" && versions.LessThan(version, "1.22") { - buf, err = json.Marshal(convertArgsToSlice(a.fields)) - } else { - buf, err = json.Marshal(a.fields) - } - if err != nil { - return "", err - } - return string(buf), nil -} - -// FromParam unpacks the filter Args. -func FromParam(p string) (Args, error) { - if len(p) == 0 { - return NewArgs(), nil - } - - r := strings.NewReader(p) - d := json.NewDecoder(r) - - m := map[string]map[string]bool{} - if err := d.Decode(&m); err != nil { - r.Seek(0, 0) - - // Allow parsing old arguments in slice format. - // Because other libraries might be sending them in this format. - deprecated := map[string][]string{} - if deprecatedErr := d.Decode(&deprecated); deprecatedErr == nil { - m = deprecatedArgs(deprecated) - } else { - return NewArgs(), err - } - } - return Args{m}, nil -} - -// Get returns the list of values associates with a field. -// It returns a slice of strings to keep backwards compatibility with old code. -func (filters Args) Get(field string) []string { - values := filters.fields[field] - if values == nil { - return make([]string, 0) - } - slice := make([]string, 0, len(values)) - for key := range values { - slice = append(slice, key) - } - return slice -} - -// Add adds a new value to a filter field. -func (filters Args) Add(name, value string) { - if _, ok := filters.fields[name]; ok { - filters.fields[name][value] = true - } else { - filters.fields[name] = map[string]bool{value: true} - } -} - -// Del removes a value from a filter field. -func (filters Args) Del(name, value string) { - if _, ok := filters.fields[name]; ok { - delete(filters.fields[name], value) - } -} - -// Len returns the number of fields in the arguments. -func (filters Args) Len() int { - return len(filters.fields) -} - -// MatchKVList returns true if the values for the specified field matches the ones -// from the sources. -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'label' and sources are {'label1': '1', 'label2': '2'} -// it returns true. -func (filters Args) MatchKVList(field string, sources map[string]string) bool { - fieldValues := filters.fields[field] - - //do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - - if sources == nil || len(sources) == 0 { - return false - } - - for name2match := range fieldValues { - testKV := strings.SplitN(name2match, "=", 2) - - v, ok := sources[testKV[0]] - if !ok { - return false - } - if len(testKV) == 2 && testKV[1] != v { - return false - } - } - - return true -} - -// Match returns true if the values for the specified field matches the source string -// e.g. given Args are {'label': {'label1=1','label2=1'}, 'image.name', {'ubuntu'}}, -// field is 'image.name' and source is 'ubuntu' -// it returns true. -func (filters Args) Match(field, source string) bool { - if filters.ExactMatch(field, source) { - return true - } - - fieldValues := filters.fields[field] - for name2match := range fieldValues { - match, err := regexp.MatchString(name2match, source) - if err != nil { - continue - } - if match { - return true - } - } - return false -} - -// ExactMatch returns true if the source matches exactly one of the filters. -func (filters Args) ExactMatch(field, source string) bool { - fieldValues, ok := filters.fields[field] - //do not filter if there is no filter set or cannot determine filter - if !ok || len(fieldValues) == 0 { - return true - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// UniqueExactMatch returns true if there is only one filter and the source matches exactly this one. -func (filters Args) UniqueExactMatch(field, source string) bool { - fieldValues := filters.fields[field] - //do not filter if there is no filter set or cannot determine filter - if len(fieldValues) == 0 { - return true - } - if len(filters.fields[field]) != 1 { - return false - } - - // try to match full name value to avoid O(N) regular expression matching - return fieldValues[source] -} - -// FuzzyMatch returns true if the source matches exactly one of the filters, -// or the source has one of the filters as a prefix. -func (filters Args) FuzzyMatch(field, source string) bool { - if filters.ExactMatch(field, source) { - return true - } - - fieldValues := filters.fields[field] - for prefix := range fieldValues { - if strings.HasPrefix(source, prefix) { - return true - } - } - return false -} - -// Include returns true if the name of the field to filter is in the filters. -func (filters Args) Include(field string) bool { - _, ok := filters.fields[field] - return ok -} - -// Validate ensures that all the fields in the filter are valid. -// It returns an error as soon as it finds an invalid field. -func (filters Args) Validate(accepted map[string]bool) error { - for name := range filters.fields { - if !accepted[name] { - return fmt.Errorf("Invalid filter '%s'", name) - } - } - return nil -} - -// WalkValues iterates over the list of filtered values for a field. -// It stops the iteration if it finds an error and it returns that error. -func (filters Args) WalkValues(field string, op func(value string) error) error { - if _, ok := filters.fields[field]; !ok { - return nil - } - for v := range filters.fields[field] { - if err := op(v); err != nil { - return err - } - } - return nil -} - -func deprecatedArgs(d map[string][]string) map[string]map[string]bool { - m := map[string]map[string]bool{} - for k, v := range d { - values := map[string]bool{} - for _, vv := range v { - values[vv] = true - } - m[k] = values - } - return m -} - -func convertArgsToSlice(f map[string]map[string]bool) map[string][]string { - m := map[string][]string{} - for k, v := range f { - values := []string{} - for kk := range v { - if v[kk] { - values = append(values, kk) - } - } - m[k] = values - } - return m -} diff --git a/vendor/src/github.com/docker/engine-api/types/network/network.go b/vendor/src/github.com/docker/engine-api/types/network/network.go deleted file mode 100644 index 47080b652e..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/network/network.go +++ /dev/null @@ -1,53 +0,0 @@ -package network - -// Address represents an IP address -type Address struct { - Addr string - PrefixLen int -} - -// IPAM represents IP Address Management -type IPAM struct { - Driver string - Options map[string]string //Per network IPAM driver options - Config []IPAMConfig -} - -// IPAMConfig represents IPAM configurations -type IPAMConfig struct { - Subnet string `json:",omitempty"` - IPRange string `json:",omitempty"` - Gateway string `json:",omitempty"` - AuxAddress map[string]string `json:"AuxiliaryAddresses,omitempty"` -} - -// EndpointIPAMConfig represents IPAM configurations for the endpoint -type EndpointIPAMConfig struct { - IPv4Address string `json:",omitempty"` - IPv6Address string `json:",omitempty"` - LinkLocalIPs []string `json:",omitempty"` -} - -// EndpointSettings stores the network endpoint details -type EndpointSettings struct { - // Configurations - IPAMConfig *EndpointIPAMConfig - Links []string - Aliases []string - // Operational data - NetworkID string - EndpointID string - Gateway string - IPAddress string - IPPrefixLen int - IPv6Gateway string - GlobalIPv6Address string - GlobalIPv6PrefixLen int - MacAddress string -} - -// NetworkingConfig represents the container's networking configuration for each of its interfaces -// Carries the networking configs specified in the `docker run` and `docker network connect` commands -type NetworkingConfig struct { - EndpointsConfig map[string]*EndpointSettings // Endpoint configs for each connecting network -} diff --git a/vendor/src/github.com/docker/engine-api/types/plugin.go b/vendor/src/github.com/docker/engine-api/types/plugin.go deleted file mode 100644 index 05030ff3de..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/plugin.go +++ /dev/null @@ -1,169 +0,0 @@ -// +build experimental - -package types - -import ( - "encoding/json" - "fmt" -) - -// PluginInstallOptions holds parameters to install a plugin. -type PluginInstallOptions struct { - Disabled bool - AcceptAllPermissions bool - RegistryAuth string // RegistryAuth is the base64 encoded credentials for the registry - PrivilegeFunc RequestPrivilegeFunc - AcceptPermissionsFunc func(PluginPrivileges) (bool, error) -} - -// PluginConfig represents the values of settings potentially modifiable by a user -type PluginConfig struct { - Mounts []PluginMount - Env []string - Args []string - Devices []PluginDevice -} - -// Plugin represents a Docker plugin for the remote API -type Plugin struct { - ID string `json:"Id,omitempty"` - Name string - Tag string - Active bool - Config PluginConfig - Manifest PluginManifest -} - -// PluginsListResponse contains the response for the remote API -type PluginsListResponse []*Plugin - -const ( - authzDriver = "AuthzDriver" - graphDriver = "GraphDriver" - ipamDriver = "IpamDriver" - networkDriver = "NetworkDriver" - volumeDriver = "VolumeDriver" -) - -// PluginInterfaceType represents a type that a plugin implements. -type PluginInterfaceType struct { - Prefix string // This is always "docker" - Capability string // Capability should be validated against the above list. - Version string // Plugin API version. Depends on the capability -} - -// UnmarshalJSON implements json.Unmarshaler for PluginInterfaceType -func (t *PluginInterfaceType) UnmarshalJSON(p []byte) error { - versionIndex := len(p) - prefixIndex := 0 - if len(p) < 2 || p[0] != '"' || p[len(p)-1] != '"' { - return fmt.Errorf("%q is not a plugin interface type", p) - } - p = p[1 : len(p)-1] -loop: - for i, b := range p { - switch b { - case '.': - prefixIndex = i - case '/': - versionIndex = i - break loop - } - } - t.Prefix = string(p[:prefixIndex]) - t.Capability = string(p[prefixIndex+1 : versionIndex]) - if versionIndex < len(p) { - t.Version = string(p[versionIndex+1:]) - } - return nil -} - -// MarshalJSON implements json.Marshaler for PluginInterfaceType -func (t *PluginInterfaceType) MarshalJSON() ([]byte, error) { - return json.Marshal(t.String()) -} - -// String implements fmt.Stringer for PluginInterfaceType -func (t PluginInterfaceType) String() string { - return fmt.Sprintf("%s.%s/%s", t.Prefix, t.Capability, t.Version) -} - -// PluginInterface describes the interface between Docker and plugin -type PluginInterface struct { - Types []PluginInterfaceType - Socket string -} - -// PluginSetting is to be embedded in other structs, if they are supposed to be -// modifiable by the user. -type PluginSetting struct { - Name string - Description string - Settable []string -} - -// PluginNetwork represents the network configuration for a plugin -type PluginNetwork struct { - Type string -} - -// PluginMount represents the mount configuration for a plugin -type PluginMount struct { - PluginSetting - Source *string - Destination string - Type string - Options []string -} - -// PluginEnv represents an environment variable for a plugin -type PluginEnv struct { - PluginSetting - Value *string -} - -// PluginArgs represents the command line arguments for a plugin -type PluginArgs struct { - PluginSetting - Value []string -} - -// PluginDevice represents a device for a plugin -type PluginDevice struct { - PluginSetting - Path *string -} - -// PluginUser represents the user for the plugin's process -type PluginUser struct { - UID uint32 `json:"Uid,omitempty"` - GID uint32 `json:"Gid,omitempty"` -} - -// PluginManifest represents the manifest of a plugin -type PluginManifest struct { - ManifestVersion string - Description string - Documentation string - Interface PluginInterface - Entrypoint []string - Workdir string - User PluginUser `json:",omitempty"` - Network PluginNetwork - Capabilities []string - Mounts []PluginMount - Devices []PluginDevice - Env []PluginEnv - Args PluginArgs -} - -// PluginPrivilege describes a permission the user has to accept -// upon installing a plugin. -type PluginPrivilege struct { - Name string - Description string - Value []string -} - -// PluginPrivileges is a list of PluginPrivilege -type PluginPrivileges []PluginPrivilege diff --git a/vendor/src/github.com/docker/engine-api/types/reference/image_reference.go b/vendor/src/github.com/docker/engine-api/types/reference/image_reference.go deleted file mode 100644 index be9cf8ebed..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/reference/image_reference.go +++ /dev/null @@ -1,34 +0,0 @@ -package reference - -import ( - distreference "github.com/docker/distribution/reference" -) - -// Parse parses the given references and returns the repository and -// tag (if present) from it. If there is an error during parsing, it will -// return an error. -func Parse(ref string) (string, string, error) { - distributionRef, err := distreference.ParseNamed(ref) - if err != nil { - return "", "", err - } - - tag := GetTagFromNamedRef(distributionRef) - return distributionRef.Name(), tag, nil -} - -// GetTagFromNamedRef returns a tag from the specified reference. -// This function is necessary as long as the docker "server" api makes the distinction between repository -// and tags. -func GetTagFromNamedRef(ref distreference.Named) string { - var tag string - switch x := ref.(type) { - case distreference.Digested: - tag = x.Digest().String() - case distreference.NamedTagged: - tag = x.Tag() - default: - tag = "latest" - } - return tag -} diff --git a/vendor/src/github.com/docker/engine-api/types/registry/registry.go b/vendor/src/github.com/docker/engine-api/types/registry/registry.go deleted file mode 100644 index d2aca6f024..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/registry/registry.go +++ /dev/null @@ -1,99 +0,0 @@ -package registry - -import ( - "encoding/json" - "net" -) - -// ServiceConfig stores daemon registry services configuration. -type ServiceConfig struct { - InsecureRegistryCIDRs []*NetIPNet `json:"InsecureRegistryCIDRs"` - IndexConfigs map[string]*IndexInfo `json:"IndexConfigs"` - Mirrors []string -} - -// NetIPNet is the net.IPNet type, which can be marshalled and -// unmarshalled to JSON -type NetIPNet net.IPNet - -// MarshalJSON returns the JSON representation of the IPNet -func (ipnet *NetIPNet) MarshalJSON() ([]byte, error) { - return json.Marshal((*net.IPNet)(ipnet).String()) -} - -// UnmarshalJSON sets the IPNet from a byte array of JSON -func (ipnet *NetIPNet) UnmarshalJSON(b []byte) (err error) { - var ipnetStr string - if err = json.Unmarshal(b, &ipnetStr); err == nil { - var cidr *net.IPNet - if _, cidr, err = net.ParseCIDR(ipnetStr); err == nil { - *ipnet = NetIPNet(*cidr) - } - } - return -} - -// IndexInfo contains information about a registry -// -// RepositoryInfo Examples: -// { -// "Index" : { -// "Name" : "docker.io", -// "Mirrors" : ["https://registry-2.docker.io/v1/", "https://registry-3.docker.io/v1/"], -// "Secure" : true, -// "Official" : true, -// }, -// "RemoteName" : "library/debian", -// "LocalName" : "debian", -// "CanonicalName" : "docker.io/debian" -// "Official" : true, -// } -// -// { -// "Index" : { -// "Name" : "127.0.0.1:5000", -// "Mirrors" : [], -// "Secure" : false, -// "Official" : false, -// }, -// "RemoteName" : "user/repo", -// "LocalName" : "127.0.0.1:5000/user/repo", -// "CanonicalName" : "127.0.0.1:5000/user/repo", -// "Official" : false, -// } -type IndexInfo struct { - // Name is the name of the registry, such as "docker.io" - Name string - // Mirrors is a list of mirrors, expressed as URIs - Mirrors []string - // Secure is set to false if the registry is part of the list of - // insecure registries. Insecure registries accept HTTP and/or accept - // HTTPS with certificates from unknown CAs. - Secure bool - // Official indicates whether this is an official registry - Official bool -} - -// SearchResult describes a search result returned from a registry -type SearchResult struct { - // StarCount indicates the number of stars this repository has - StarCount int `json:"star_count"` - // IsOfficial is true if the result is from an official repository. - IsOfficial bool `json:"is_official"` - // Name is the name of the repository - Name string `json:"name"` - // IsAutomated indicates whether the result is automated - IsAutomated bool `json:"is_automated"` - // Description is a textual description of the repository - Description string `json:"description"` -} - -// SearchResults lists a collection search results returned from a registry -type SearchResults struct { - // Query contains the query string that generated the search results - Query string `json:"query"` - // NumResults indicates the number of results the query returned - NumResults int `json:"num_results"` - // Results is a slice containing the actual results for the search - Results []SearchResult `json:"results"` -} diff --git a/vendor/src/github.com/docker/engine-api/types/seccomp.go b/vendor/src/github.com/docker/engine-api/types/seccomp.go deleted file mode 100644 index 854f1c4538..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/seccomp.go +++ /dev/null @@ -1,73 +0,0 @@ -package types - -// Seccomp represents the config for a seccomp profile for syscall restriction. -type Seccomp struct { - DefaultAction Action `json:"defaultAction"` - Architectures []Arch `json:"architectures"` - Syscalls []*Syscall `json:"syscalls"` -} - -// Arch used for additional architectures -type Arch string - -// Additional architectures permitted to be used for system calls -// By default only the native architecture of the kernel is permitted -const ( - ArchX86 Arch = "SCMP_ARCH_X86" - ArchX86_64 Arch = "SCMP_ARCH_X86_64" - ArchX32 Arch = "SCMP_ARCH_X32" - ArchARM Arch = "SCMP_ARCH_ARM" - ArchAARCH64 Arch = "SCMP_ARCH_AARCH64" - ArchMIPS Arch = "SCMP_ARCH_MIPS" - ArchMIPS64 Arch = "SCMP_ARCH_MIPS64" - ArchMIPS64N32 Arch = "SCMP_ARCH_MIPS64N32" - ArchMIPSEL Arch = "SCMP_ARCH_MIPSEL" - ArchMIPSEL64 Arch = "SCMP_ARCH_MIPSEL64" - ArchMIPSEL64N32 Arch = "SCMP_ARCH_MIPSEL64N32" - ArchPPC Arch = "SCMP_ARCH_PPC" - ArchPPC64 Arch = "SCMP_ARCH_PPC64" - ArchPPC64LE Arch = "SCMP_ARCH_PPC64LE" - ArchS390 Arch = "SCMP_ARCH_S390" - ArchS390X Arch = "SCMP_ARCH_S390X" -) - -// Action taken upon Seccomp rule match -type Action string - -// Define actions for Seccomp rules -const ( - ActKill Action = "SCMP_ACT_KILL" - ActTrap Action = "SCMP_ACT_TRAP" - ActErrno Action = "SCMP_ACT_ERRNO" - ActTrace Action = "SCMP_ACT_TRACE" - ActAllow Action = "SCMP_ACT_ALLOW" -) - -// Operator used to match syscall arguments in Seccomp -type Operator string - -// Define operators for syscall arguments in Seccomp -const ( - OpNotEqual Operator = "SCMP_CMP_NE" - OpLessThan Operator = "SCMP_CMP_LT" - OpLessEqual Operator = "SCMP_CMP_LE" - OpEqualTo Operator = "SCMP_CMP_EQ" - OpGreaterEqual Operator = "SCMP_CMP_GE" - OpGreaterThan Operator = "SCMP_CMP_GT" - OpMaskedEqual Operator = "SCMP_CMP_MASKED_EQ" -) - -// Arg used for matching specific syscall arguments in Seccomp -type Arg struct { - Index uint `json:"index"` - Value uint64 `json:"value"` - ValueTwo uint64 `json:"valueTwo"` - Op Operator `json:"op"` -} - -// Syscall is used to match a syscall in Seccomp -type Syscall struct { - Name string `json:"name"` - Action Action `json:"action"` - Args []*Arg `json:"args"` -} diff --git a/vendor/src/github.com/docker/engine-api/types/stats.go b/vendor/src/github.com/docker/engine-api/types/stats.go deleted file mode 100644 index b420ebe7f6..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/stats.go +++ /dev/null @@ -1,115 +0,0 @@ -// Package types is used for API stability in the types and response to the -// consumers of the API stats endpoint. -package types - -import "time" - -// ThrottlingData stores CPU throttling stats of one running container -type ThrottlingData struct { - // Number of periods with throttling active - Periods uint64 `json:"periods"` - // Number of periods when the container hits its throttling limit. - ThrottledPeriods uint64 `json:"throttled_periods"` - // Aggregate time the container was throttled for in nanoseconds. - ThrottledTime uint64 `json:"throttled_time"` -} - -// CPUUsage stores All CPU stats aggregated since container inception. -type CPUUsage struct { - // Total CPU time consumed. - // Units: nanoseconds. - TotalUsage uint64 `json:"total_usage"` - // Total CPU time consumed per core. - // Units: nanoseconds. - PercpuUsage []uint64 `json:"percpu_usage"` - // Time spent by tasks of the cgroup in kernel mode. - // Units: nanoseconds. - UsageInKernelmode uint64 `json:"usage_in_kernelmode"` - // Time spent by tasks of the cgroup in user mode. - // Units: nanoseconds. - UsageInUsermode uint64 `json:"usage_in_usermode"` -} - -// CPUStats aggregates and wraps all CPU related info of container -type CPUStats struct { - CPUUsage CPUUsage `json:"cpu_usage"` - SystemUsage uint64 `json:"system_cpu_usage"` - ThrottlingData ThrottlingData `json:"throttling_data,omitempty"` -} - -// MemoryStats aggregates All memory stats since container inception -type MemoryStats struct { - // current res_counter usage for memory - Usage uint64 `json:"usage"` - // maximum usage ever recorded. - MaxUsage uint64 `json:"max_usage"` - // TODO(vishh): Export these as stronger types. - // all the stats exported via memory.stat. - Stats map[string]uint64 `json:"stats"` - // number of times memory usage hits limits. - Failcnt uint64 `json:"failcnt"` - Limit uint64 `json:"limit"` -} - -// BlkioStatEntry is one small entity to store a piece of Blkio stats -// TODO Windows: This can be factored out -type BlkioStatEntry struct { - Major uint64 `json:"major"` - Minor uint64 `json:"minor"` - Op string `json:"op"` - Value uint64 `json:"value"` -} - -// BlkioStats stores All IO service stats for data read and write -// TODO Windows: This can be factored out -type BlkioStats struct { - // number of bytes transferred to and from the block device - IoServiceBytesRecursive []BlkioStatEntry `json:"io_service_bytes_recursive"` - IoServicedRecursive []BlkioStatEntry `json:"io_serviced_recursive"` - IoQueuedRecursive []BlkioStatEntry `json:"io_queue_recursive"` - IoServiceTimeRecursive []BlkioStatEntry `json:"io_service_time_recursive"` - IoWaitTimeRecursive []BlkioStatEntry `json:"io_wait_time_recursive"` - IoMergedRecursive []BlkioStatEntry `json:"io_merged_recursive"` - IoTimeRecursive []BlkioStatEntry `json:"io_time_recursive"` - SectorsRecursive []BlkioStatEntry `json:"sectors_recursive"` -} - -// NetworkStats aggregates All network stats of one container -// TODO Windows: This will require refactoring -type NetworkStats struct { - RxBytes uint64 `json:"rx_bytes"` - RxPackets uint64 `json:"rx_packets"` - RxErrors uint64 `json:"rx_errors"` - RxDropped uint64 `json:"rx_dropped"` - TxBytes uint64 `json:"tx_bytes"` - TxPackets uint64 `json:"tx_packets"` - TxErrors uint64 `json:"tx_errors"` - TxDropped uint64 `json:"tx_dropped"` -} - -// PidsStats contains the stats of a container's pids -type PidsStats struct { - // Current is the number of pids in the cgroup - Current uint64 `json:"current,omitempty"` - // Limit is the hard limit on the number of pids in the cgroup. - // A "Limit" of 0 means that there is no limit. - Limit uint64 `json:"limit,omitempty"` -} - -// Stats is Ultimate struct aggregating all types of stats of one container -type Stats struct { - Read time.Time `json:"read"` - PreCPUStats CPUStats `json:"precpu_stats,omitempty"` - CPUStats CPUStats `json:"cpu_stats,omitempty"` - MemoryStats MemoryStats `json:"memory_stats,omitempty"` - BlkioStats BlkioStats `json:"blkio_stats,omitempty"` - PidsStats PidsStats `json:"pids_stats,omitempty"` -} - -// StatsJSON is newly used Networks -type StatsJSON struct { - Stats - - // Networks request version >=1.21 - Networks map[string]NetworkStats `json:"networks,omitempty"` -} diff --git a/vendor/src/github.com/docker/engine-api/types/strslice/strslice.go b/vendor/src/github.com/docker/engine-api/types/strslice/strslice.go deleted file mode 100644 index bad493fb89..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/strslice/strslice.go +++ /dev/null @@ -1,30 +0,0 @@ -package strslice - -import "encoding/json" - -// StrSlice represents a string or an array of strings. -// We need to override the json decoder to accept both options. -type StrSlice []string - -// UnmarshalJSON decodes the byte slice whether it's a string or an array of -// strings. This method is needed to implement json.Unmarshaler. -func (e *StrSlice) UnmarshalJSON(b []byte) error { - if len(b) == 0 { - // With no input, we preserve the existing value by returning nil and - // leaving the target alone. This allows defining default values for - // the type. - return nil - } - - p := make([]string, 0, 1) - if err := json.Unmarshal(b, &p); err != nil { - var s string - if err := json.Unmarshal(b, &s); err != nil { - return err - } - p = append(p, s) - } - - *e = p - return nil -} diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/common.go b/vendor/src/github.com/docker/engine-api/types/swarm/common.go deleted file mode 100644 index b87f545369..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/swarm/common.go +++ /dev/null @@ -1,21 +0,0 @@ -package swarm - -import "time" - -// Version represent the internal object version. -type Version struct { - Index uint64 `json:",omitempty"` -} - -// Meta is base object inherited by most of the other once. -type Meta struct { - Version Version `json:",omitempty"` - CreatedAt time.Time `json:",omitempty"` - UpdatedAt time.Time `json:",omitempty"` -} - -// Annotations represents how to describe an object. -type Annotations struct { - Name string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` -} diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/container.go b/vendor/src/github.com/docker/engine-api/types/swarm/container.go deleted file mode 100644 index 29f2e8a647..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/swarm/container.go +++ /dev/null @@ -1,67 +0,0 @@ -package swarm - -import "time" - -// ContainerSpec represents the spec of a container. -type ContainerSpec struct { - Image string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Command []string `json:",omitempty"` - Args []string `json:",omitempty"` - Env []string `json:",omitempty"` - Dir string `json:",omitempty"` - User string `json:",omitempty"` - Mounts []Mount `json:",omitempty"` - StopGracePeriod *time.Duration `json:",omitempty"` -} - -// MountType represents the type of a mount. -type MountType string - -const ( - // MountTypeBind BIND - MountTypeBind MountType = "bind" - // MountTypeVolume VOLUME - MountTypeVolume MountType = "volume" -) - -// Mount represents a mount (volume). -type Mount struct { - Type MountType `json:",omitempty"` - Source string `json:",omitempty"` - Target string `json:",omitempty"` - ReadOnly bool `json:",omitempty"` - - BindOptions *BindOptions `json:",omitempty"` - VolumeOptions *VolumeOptions `json:",omitempty"` -} - -// MountPropagation represents the propagation of a mount. -type MountPropagation string - -const ( - // MountPropagationRPrivate RPRIVATE - MountPropagationRPrivate MountPropagation = "rprivate" - // MountPropagationPrivate PRIVATE - MountPropagationPrivate MountPropagation = "private" - // MountPropagationRShared RSHARED - MountPropagationRShared MountPropagation = "rshared" - // MountPropagationShared SHARED - MountPropagationShared MountPropagation = "shared" - // MountPropagationRSlave RSLAVE - MountPropagationRSlave MountPropagation = "rslave" - // MountPropagationSlave SLAVE - MountPropagationSlave MountPropagation = "slave" -) - -// BindOptions defines options specific to mounts of type "bind". -type BindOptions struct { - Propagation MountPropagation `json:",omitempty"` -} - -// VolumeOptions represents the options for a mount of type volume. -type VolumeOptions struct { - NoCopy bool `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - DriverConfig *Driver `json:",omitempty"` -} diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/network.go b/vendor/src/github.com/docker/engine-api/types/swarm/network.go deleted file mode 100644 index 84804da2fb..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/swarm/network.go +++ /dev/null @@ -1,99 +0,0 @@ -package swarm - -// Endpoint represents an endpoint. -type Endpoint struct { - Spec EndpointSpec `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` - VirtualIPs []EndpointVirtualIP `json:",omitempty"` -} - -// EndpointSpec represents the spec of an endpoint. -type EndpointSpec struct { - Mode ResolutionMode `json:",omitempty"` - Ports []PortConfig `json:",omitempty"` -} - -// ResolutionMode represents a resolution mode. -type ResolutionMode string - -const ( - // ResolutionModeVIP VIP - ResolutionModeVIP ResolutionMode = "vip" - // ResolutionModeDNSRR DNSRR - ResolutionModeDNSRR ResolutionMode = "dnsrr" -) - -// PortConfig represents the config of a port. -type PortConfig struct { - Name string `json:",omitempty"` - Protocol PortConfigProtocol `json:",omitempty"` - TargetPort uint32 `json:",omitempty"` - PublishedPort uint32 `json:",omitempty"` -} - -// PortConfigProtocol represents the protocol of a port. -type PortConfigProtocol string - -const ( - // TODO(stevvooe): These should be used generally, not just for PortConfig. - - // PortConfigProtocolTCP TCP - PortConfigProtocolTCP PortConfigProtocol = "tcp" - // PortConfigProtocolUDP UDP - PortConfigProtocolUDP PortConfigProtocol = "udp" -) - -// EndpointVirtualIP represents the virtual ip of a port. -type EndpointVirtualIP struct { - NetworkID string `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// Network represents a network. -type Network struct { - ID string - Meta - Spec NetworkSpec `json:",omitempty"` - DriverState Driver `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkSpec represents the spec of a network. -type NetworkSpec struct { - Annotations - DriverConfiguration *Driver `json:",omitempty"` - IPv6Enabled bool `json:",omitempty"` - Internal bool `json:",omitempty"` - IPAMOptions *IPAMOptions `json:",omitempty"` -} - -// NetworkAttachmentConfig represents the configuration of a network attachment. -type NetworkAttachmentConfig struct { - Target string `json:",omitempty"` - Aliases []string `json:",omitempty"` -} - -// NetworkAttachment represents a network attachment. -type NetworkAttachment struct { - Network Network `json:",omitempty"` - Addresses []string `json:",omitempty"` -} - -// IPAMOptions represents ipam options. -type IPAMOptions struct { - Driver Driver `json:",omitempty"` - Configs []IPAMConfig `json:",omitempty"` -} - -// IPAMConfig represents ipam configuration. -type IPAMConfig struct { - Subnet string `json:",omitempty"` - Range string `json:",omitempty"` - Gateway string `json:",omitempty"` -} - -// Driver represents a driver (network/volume). -type Driver struct { - Name string `json:",omitempty"` - Options map[string]string `json:",omitempty"` -} diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/node.go b/vendor/src/github.com/docker/engine-api/types/swarm/node.go deleted file mode 100644 index 9987662a58..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/swarm/node.go +++ /dev/null @@ -1,107 +0,0 @@ -package swarm - -// Node represents a node. -type Node struct { - ID string - Meta - - Spec NodeSpec `json:",omitempty"` - Description NodeDescription `json:",omitempty"` - Status NodeStatus `json:",omitempty"` - ManagerStatus *ManagerStatus `json:",omitempty"` -} - -// NodeSpec represents the spec of a node. -type NodeSpec struct { - Annotations - Role NodeRole `json:",omitempty"` - Availability NodeAvailability `json:",omitempty"` -} - -// NodeRole represents the role of a node. -type NodeRole string - -const ( - // NodeRoleWorker WORKER - NodeRoleWorker NodeRole = "worker" - // NodeRoleManager MANAGER - NodeRoleManager NodeRole = "manager" -) - -// NodeAvailability represents the availability of a node. -type NodeAvailability string - -const ( - // NodeAvailabilityActive ACTIVE - NodeAvailabilityActive NodeAvailability = "active" - // NodeAvailabilityPause PAUSE - NodeAvailabilityPause NodeAvailability = "pause" - // NodeAvailabilityDrain DRAIN - NodeAvailabilityDrain NodeAvailability = "drain" -) - -// NodeDescription represents the description of a node. -type NodeDescription struct { - Hostname string `json:",omitempty"` - Platform Platform `json:",omitempty"` - Resources Resources `json:",omitempty"` - Engine EngineDescription `json:",omitempty"` -} - -// Platform represents the platfrom (Arch/OS). -type Platform struct { - Architecture string `json:",omitempty"` - OS string `json:",omitempty"` -} - -// EngineDescription represents the description of an engine. -type EngineDescription struct { - EngineVersion string `json:",omitempty"` - Labels map[string]string `json:",omitempty"` - Plugins []PluginDescription `json:",omitempty"` -} - -// PluginDescription represents the description of an engine plugin. -type PluginDescription struct { - Type string `json:",omitempty"` - Name string `json:",omitempty"` -} - -// NodeStatus represents the status of a node. -type NodeStatus struct { - State NodeState `json:",omitempty"` - Message string `json:",omitempty"` -} - -// Reachability represents the reachability of a node. -type Reachability string - -const ( - // ReachabilityUnknown UNKNOWN - ReachabilityUnknown Reachability = "unknown" - // ReachabilityUnreachable UNREACHABLE - ReachabilityUnreachable Reachability = "unreachable" - // ReachabilityReachable REACHABLE - ReachabilityReachable Reachability = "reachable" -) - -// ManagerStatus represents the status of a manager. -type ManagerStatus struct { - Leader bool `json:",omitempty"` - Reachability Reachability `json:",omitempty"` - Addr string `json:",omitempty"` -} - -// NodeState represents the state of a node. -type NodeState string - -const ( - // NodeStateUnknown UNKNOWN - NodeStateUnknown NodeState = "unknown" - // NodeStateDown DOWN - NodeStateDown NodeState = "down" - // NodeStateReady READY - NodeStateReady NodeState = "ready" - // NodeStateDisconnected DISCONNECTED - NodeStateDisconnected NodeState = "disconnected" -) diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/service.go b/vendor/src/github.com/docker/engine-api/types/swarm/service.go deleted file mode 100644 index 676fc0e0bf..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/swarm/service.go +++ /dev/null @@ -1,73 +0,0 @@ -package swarm - -import "time" - -// Service represents a service. -type Service struct { - ID string - Meta - Spec ServiceSpec `json:",omitempty"` - Endpoint Endpoint `json:",omitempty"` - UpdateStatus UpdateStatus `json:",omitempty"` -} - -// ServiceSpec represents the spec of a service. -type ServiceSpec struct { - Annotations - - // TaskTemplate defines how the service should construct new tasks when - // orchestrating this service. - TaskTemplate TaskSpec `json:",omitempty"` - Mode ServiceMode `json:",omitempty"` - UpdateConfig *UpdateConfig `json:",omitempty"` - Networks []NetworkAttachmentConfig `json:",omitempty"` - EndpointSpec *EndpointSpec `json:",omitempty"` -} - -// ServiceMode represents the mode of a service. -type ServiceMode struct { - Replicated *ReplicatedService `json:",omitempty"` - Global *GlobalService `json:",omitempty"` -} - -// UpdateState is the state of a service update. -type UpdateState string - -const ( - // UpdateStateUpdating is the updating state. - UpdateStateUpdating UpdateState = "updating" - // UpdateStatePaused is the paused state. - UpdateStatePaused UpdateState = "paused" - // UpdateStateCompleted is the completed state. - UpdateStateCompleted UpdateState = "completed" -) - -// UpdateStatus reports the status of a service update. -type UpdateStatus struct { - State UpdateState `json:",omitempty"` - StartedAt time.Time `json:",omitempty"` - CompletedAt time.Time `json:",omitempty"` - Message string `json:",omitempty"` -} - -// ReplicatedService is a kind of ServiceMode. -type ReplicatedService struct { - Replicas *uint64 `json:",omitempty"` -} - -// GlobalService is a kind of ServiceMode. -type GlobalService struct{} - -const ( - // UpdateFailureActionPause PAUSE - UpdateFailureActionPause = "pause" - // UpdateFailureActionContinue CONTINUE - UpdateFailureActionContinue = "continue" -) - -// UpdateConfig represents the update configuration. -type UpdateConfig struct { - Parallelism uint64 `json:",omitempty"` - Delay time.Duration `json:",omitempty"` - FailureAction string `json:",omitempty"` -} diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/swarm.go b/vendor/src/github.com/docker/engine-api/types/swarm/swarm.go deleted file mode 100644 index 0a5414101d..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/swarm/swarm.go +++ /dev/null @@ -1,141 +0,0 @@ -package swarm - -import "time" - -// ClusterInfo represents info about a the cluster for outputing in "info" -// it contains the same information as "Swarm", but without the JoinTokens -type ClusterInfo struct { - ID string - Meta - Spec Spec -} - -// Swarm represents a swarm. -type Swarm struct { - ClusterInfo - JoinTokens JoinTokens -} - -// JoinTokens contains the tokens workers and managers need to join the swarm. -type JoinTokens struct { - Worker string - Manager string -} - -// Spec represents the spec of a swarm. -type Spec struct { - Annotations - - Orchestration OrchestrationConfig `json:",omitempty"` - Raft RaftConfig `json:",omitempty"` - Dispatcher DispatcherConfig `json:",omitempty"` - CAConfig CAConfig `json:",omitempty"` - TaskDefaults TaskDefaults `json:",omitempty"` -} - -// OrchestrationConfig represents orchestration configuration. -type OrchestrationConfig struct { - TaskHistoryRetentionLimit int64 `json:",omitempty"` -} - -// TaskDefaults parameterizes cluster-level task creation with default values. -type TaskDefaults struct { - // LogDriver selects the log driver to use for tasks created in the - // orchestrator if unspecified by a service. - // - // Updating this value will only have an affect on new tasks. Old tasks - // will continue use their previously configured log driver until - // recreated. - LogDriver *Driver `json:",omitempty"` -} - -// RaftConfig represents raft configuration. -type RaftConfig struct { - SnapshotInterval uint64 `json:",omitempty"` - KeepOldSnapshots uint64 `json:",omitempty"` - LogEntriesForSlowFollowers uint64 `json:",omitempty"` - HeartbeatTick uint32 `json:",omitempty"` - ElectionTick uint32 `json:",omitempty"` -} - -// DispatcherConfig represents dispatcher configuration. -type DispatcherConfig struct { - HeartbeatPeriod uint64 `json:",omitempty"` -} - -// CAConfig represents CA configuration. -type CAConfig struct { - NodeCertExpiry time.Duration `json:",omitempty"` - ExternalCAs []*ExternalCA `json:",omitempty"` -} - -// ExternalCAProtocol represents type of external CA. -type ExternalCAProtocol string - -// ExternalCAProtocolCFSSL CFSSL -const ExternalCAProtocolCFSSL ExternalCAProtocol = "cfssl" - -// ExternalCA defines external CA to be used by the cluster. -type ExternalCA struct { - Protocol ExternalCAProtocol - URL string - Options map[string]string `json:",omitempty"` -} - -// InitRequest is the request used to init a swarm. -type InitRequest struct { - ListenAddr string - AdvertiseAddr string - ForceNewCluster bool - Spec Spec -} - -// JoinRequest is the request used to join a swarm. -type JoinRequest struct { - ListenAddr string - AdvertiseAddr string - RemoteAddrs []string - JoinToken string // accept by secret -} - -// LocalNodeState represents the state of the local node. -type LocalNodeState string - -const ( - // LocalNodeStateInactive INACTIVE - LocalNodeStateInactive LocalNodeState = "inactive" - // LocalNodeStatePending PENDING - LocalNodeStatePending LocalNodeState = "pending" - // LocalNodeStateActive ACTIVE - LocalNodeStateActive LocalNodeState = "active" - // LocalNodeStateError ERROR - LocalNodeStateError LocalNodeState = "error" -) - -// Info represents generic information about swarm. -type Info struct { - NodeID string - NodeAddr string - - LocalNodeState LocalNodeState - ControlAvailable bool - Error string - - RemoteManagers []Peer - Nodes int - Managers int - - Cluster ClusterInfo -} - -// Peer represents a peer. -type Peer struct { - NodeID string - Addr string -} - -// UpdateFlags contains flags for SwarmUpdate. -type UpdateFlags struct { - RotateWorkerToken bool - RotateManagerToken bool -} diff --git a/vendor/src/github.com/docker/engine-api/types/swarm/task.go b/vendor/src/github.com/docker/engine-api/types/swarm/task.go deleted file mode 100644 index fa8228a497..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/swarm/task.go +++ /dev/null @@ -1,115 +0,0 @@ -package swarm - -import "time" - -// TaskState represents the state of a task. -type TaskState string - -const ( - // TaskStateNew NEW - TaskStateNew TaskState = "new" - // TaskStateAllocated ALLOCATED - TaskStateAllocated TaskState = "allocated" - // TaskStatePending PENDING - TaskStatePending TaskState = "pending" - // TaskStateAssigned ASSIGNED - TaskStateAssigned TaskState = "assigned" - // TaskStateAccepted ACCEPTED - TaskStateAccepted TaskState = "accepted" - // TaskStatePreparing PREPARING - TaskStatePreparing TaskState = "preparing" - // TaskStateReady READY - TaskStateReady TaskState = "ready" - // TaskStateStarting STARTING - TaskStateStarting TaskState = "starting" - // TaskStateRunning RUNNING - TaskStateRunning TaskState = "running" - // TaskStateComplete COMPLETE - TaskStateComplete TaskState = "complete" - // TaskStateShutdown SHUTDOWN - TaskStateShutdown TaskState = "shutdown" - // TaskStateFailed FAILED - TaskStateFailed TaskState = "failed" - // TaskStateRejected REJECTED - TaskStateRejected TaskState = "rejected" -) - -// Task represents a task. -type Task struct { - ID string - Meta - - Spec TaskSpec `json:",omitempty"` - ServiceID string `json:",omitempty"` - Slot int `json:",omitempty"` - NodeID string `json:",omitempty"` - Status TaskStatus `json:",omitempty"` - DesiredState TaskState `json:",omitempty"` - NetworksAttachments []NetworkAttachment `json:",omitempty"` -} - -// TaskSpec represents the spec of a task. -type TaskSpec struct { - ContainerSpec ContainerSpec `json:",omitempty"` - Resources *ResourceRequirements `json:",omitempty"` - RestartPolicy *RestartPolicy `json:",omitempty"` - Placement *Placement `json:",omitempty"` - - // LogDriver specifies the LogDriver to use for tasks created from this - // spec. If not present, the one on cluster default on swarm.Spec will be - // used, finally falling back to the engine default if not specified. - LogDriver *Driver `json:",omitempty"` -} - -// Resources represents resources (CPU/Memory). -type Resources struct { - NanoCPUs int64 `json:",omitempty"` - MemoryBytes int64 `json:",omitempty"` -} - -// ResourceRequirements represents resources requirements. -type ResourceRequirements struct { - Limits *Resources `json:",omitempty"` - Reservations *Resources `json:",omitempty"` -} - -// Placement represents orchestration parameters. -type Placement struct { - Constraints []string `json:",omitempty"` -} - -// RestartPolicy represents the restart policy. -type RestartPolicy struct { - Condition RestartPolicyCondition `json:",omitempty"` - Delay *time.Duration `json:",omitempty"` - MaxAttempts *uint64 `json:",omitempty"` - Window *time.Duration `json:",omitempty"` -} - -// RestartPolicyCondition represents when to restart. -type RestartPolicyCondition string - -const ( - // RestartPolicyConditionNone NONE - RestartPolicyConditionNone RestartPolicyCondition = "none" - // RestartPolicyConditionOnFailure ON_FAILURE - RestartPolicyConditionOnFailure RestartPolicyCondition = "on-failure" - // RestartPolicyConditionAny ANY - RestartPolicyConditionAny RestartPolicyCondition = "any" -) - -// TaskStatus represents the status of a task. -type TaskStatus struct { - Timestamp time.Time `json:",omitempty"` - State TaskState `json:",omitempty"` - Message string `json:",omitempty"` - Err string `json:",omitempty"` - ContainerStatus ContainerStatus `json:",omitempty"` -} - -// ContainerStatus represents the status of a container. -type ContainerStatus struct { - ContainerID string `json:",omitempty"` - PID int `json:",omitempty"` - ExitCode int `json:",omitempty"` -} diff --git a/vendor/src/github.com/docker/engine-api/types/time/duration_convert.go b/vendor/src/github.com/docker/engine-api/types/time/duration_convert.go deleted file mode 100644 index 63e1eec19e..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/time/duration_convert.go +++ /dev/null @@ -1,12 +0,0 @@ -package time - -import ( - "strconv" - "time" -) - -// DurationToSecondsString converts the specified duration to the number -// seconds it represents, formatted as a string. -func DurationToSecondsString(duration time.Duration) string { - return strconv.FormatFloat(duration.Seconds(), 'f', 0, 64) -} diff --git a/vendor/src/github.com/docker/engine-api/types/time/timestamp.go b/vendor/src/github.com/docker/engine-api/types/time/timestamp.go deleted file mode 100644 index d3695ba723..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/time/timestamp.go +++ /dev/null @@ -1,124 +0,0 @@ -package time - -import ( - "fmt" - "math" - "strconv" - "strings" - "time" -) - -// These are additional predefined layouts for use in Time.Format and Time.Parse -// with --since and --until parameters for `docker logs` and `docker events` -const ( - rFC3339Local = "2006-01-02T15:04:05" // RFC3339 with local timezone - rFC3339NanoLocal = "2006-01-02T15:04:05.999999999" // RFC3339Nano with local timezone - dateWithZone = "2006-01-02Z07:00" // RFC3339 with time at 00:00:00 - dateLocal = "2006-01-02" // RFC3339 with local timezone and time at 00:00:00 -) - -// GetTimestamp tries to parse given string as golang duration, -// then RFC3339 time and finally as a Unix timestamp. If -// any of these were successful, it returns a Unix timestamp -// as string otherwise returns the given value back. -// In case of duration input, the returned timestamp is computed -// as the given reference time minus the amount of the duration. -func GetTimestamp(value string, reference time.Time) (string, error) { - if d, err := time.ParseDuration(value); value != "0" && err == nil { - return strconv.FormatInt(reference.Add(-d).Unix(), 10), nil - } - - var format string - var parseInLocation bool - - // if the string has a Z or a + or three dashes use parse otherwise use parseinlocation - parseInLocation = !(strings.ContainsAny(value, "zZ+") || strings.Count(value, "-") == 3) - - if strings.Contains(value, ".") { - if parseInLocation { - format = rFC3339NanoLocal - } else { - format = time.RFC3339Nano - } - } else if strings.Contains(value, "T") { - // we want the number of colons in the T portion of the timestamp - tcolons := strings.Count(value, ":") - // if parseInLocation is off and we have a +/- zone offset (not Z) then - // there will be an extra colon in the input for the tz offset subtract that - // colon from the tcolons count - if !parseInLocation && !strings.ContainsAny(value, "zZ") && tcolons > 0 { - tcolons-- - } - if parseInLocation { - switch tcolons { - case 0: - format = "2006-01-02T15" - case 1: - format = "2006-01-02T15:04" - default: - format = rFC3339Local - } - } else { - switch tcolons { - case 0: - format = "2006-01-02T15Z07:00" - case 1: - format = "2006-01-02T15:04Z07:00" - default: - format = time.RFC3339 - } - } - } else if parseInLocation { - format = dateLocal - } else { - format = dateWithZone - } - - var t time.Time - var err error - - if parseInLocation { - t, err = time.ParseInLocation(format, value, time.FixedZone(reference.Zone())) - } else { - t, err = time.Parse(format, value) - } - - if err != nil { - // if there is a `-` then its an RFC3339 like timestamp otherwise assume unixtimestamp - if strings.Contains(value, "-") { - return "", err // was probably an RFC3339 like timestamp but the parser failed with an error - } - return value, nil // unixtimestamp in and out case (meaning: the value passed at the command line is already in the right format for passing to the server) - } - - return fmt.Sprintf("%d.%09d", t.Unix(), int64(t.Nanosecond())), nil -} - -// ParseTimestamps returns seconds and nanoseconds from a timestamp that has the -// format "%d.%09d", time.Unix(), int64(time.Nanosecond())) -// if the incoming nanosecond portion is longer or shorter than 9 digits it is -// converted to nanoseconds. The expectation is that the seconds and -// seconds will be used to create a time variable. For example: -// seconds, nanoseconds, err := ParseTimestamp("1136073600.000000001",0) -// if err == nil since := time.Unix(seconds, nanoseconds) -// returns seconds as def(aultSeconds) if value == "" -func ParseTimestamps(value string, def int64) (int64, int64, error) { - if value == "" { - return def, 0, nil - } - sa := strings.SplitN(value, ".", 2) - s, err := strconv.ParseInt(sa[0], 10, 64) - if err != nil { - return s, 0, err - } - if len(sa) != 2 { - return s, 0, nil - } - n, err := strconv.ParseInt(sa[1], 10, 64) - if err != nil { - return s, n, err - } - // should already be in nanoseconds but just in case convert n to nanoseonds - n = int64(float64(n) * math.Pow(float64(10), float64(9-len(sa[1])))) - return s, n, nil -} diff --git a/vendor/src/github.com/docker/engine-api/types/types.go b/vendor/src/github.com/docker/engine-api/types/types.go deleted file mode 100644 index b6f9125923..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/types.go +++ /dev/null @@ -1,515 +0,0 @@ -package types - -import ( - "os" - "time" - - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/network" - "github.com/docker/engine-api/types/registry" - "github.com/docker/engine-api/types/swarm" - "github.com/docker/go-connections/nat" -) - -// ContainerCreateResponse contains the information returned to a client on the -// creation of a new container. -type ContainerCreateResponse struct { - // ID is the ID of the created container. - ID string `json:"Id"` - - // Warnings are any warnings encountered during the creation of the container. - Warnings []string `json:"Warnings"` -} - -// ContainerExecCreateResponse contains response of Remote API: -// POST "/containers/{name:.*}/exec" -type ContainerExecCreateResponse struct { - // ID is the exec ID. - ID string `json:"Id"` -} - -// ContainerUpdateResponse contains response of Remote API: -// POST "/containers/{name:.*}/update" -type ContainerUpdateResponse struct { - // Warnings are any warnings encountered during the updating of the container. - Warnings []string `json:"Warnings"` -} - -// AuthResponse contains response of Remote API: -// POST "/auth" -type AuthResponse struct { - // Status is the authentication status - Status string `json:"Status"` - - // IdentityToken is an opaque token used for authenticating - // a user after a successful login. - IdentityToken string `json:"IdentityToken,omitempty"` -} - -// ContainerWaitResponse contains response of Remote API: -// POST "/containers/"+containerID+"/wait" -type ContainerWaitResponse struct { - // StatusCode is the status code of the wait job - StatusCode int `json:"StatusCode"` -} - -// ContainerCommitResponse contains response of Remote API: -// POST "/commit?container="+containerID -type ContainerCommitResponse struct { - ID string `json:"Id"` -} - -// ContainerChange contains response of Remote API: -// GET "/containers/{name:.*}/changes" -type ContainerChange struct { - Kind int - Path string -} - -// ImageHistory contains response of Remote API: -// GET "/images/{name:.*}/history" -type ImageHistory struct { - ID string `json:"Id"` - Created int64 - CreatedBy string - Tags []string - Size int64 - Comment string -} - -// ImageDelete contains response of Remote API: -// DELETE "/images/{name:.*}" -type ImageDelete struct { - Untagged string `json:",omitempty"` - Deleted string `json:",omitempty"` -} - -// Image contains response of Remote API: -// GET "/images/json" -type Image struct { - ID string `json:"Id"` - ParentID string `json:"ParentId"` - RepoTags []string - RepoDigests []string - Created int64 - Size int64 - VirtualSize int64 - Labels map[string]string -} - -// GraphDriverData returns Image's graph driver config info -// when calling inspect command -type GraphDriverData struct { - Name string - Data map[string]string -} - -// RootFS returns Image's RootFS description including the layer IDs. -type RootFS struct { - Type string - Layers []string `json:",omitempty"` - BaseLayer string `json:",omitempty"` -} - -// ImageInspect contains response of Remote API: -// GET "/images/{name:.*}/json" -type ImageInspect struct { - ID string `json:"Id"` - RepoTags []string - RepoDigests []string - Parent string - Comment string - Created string - Container string - ContainerConfig *container.Config - DockerVersion string - Author string - Config *container.Config - Architecture string - Os string - Size int64 - VirtualSize int64 - GraphDriver GraphDriverData - RootFS RootFS -} - -// Port stores open ports info of container -// e.g. {"PrivatePort": 8080, "PublicPort": 80, "Type": "tcp"} -type Port struct { - IP string `json:",omitempty"` - PrivatePort int - PublicPort int `json:",omitempty"` - Type string -} - -// Container contains response of Remote API: -// GET "/containers/json" -type Container struct { - ID string `json:"Id"` - Names []string - Image string - ImageID string - Command string - Created int64 - Ports []Port - SizeRw int64 `json:",omitempty"` - SizeRootFs int64 `json:",omitempty"` - Labels map[string]string - State string - Status string - HostConfig struct { - NetworkMode string `json:",omitempty"` - } - NetworkSettings *SummaryNetworkSettings - Mounts []MountPoint -} - -// CopyConfig contains request body of Remote API: -// POST "/containers/"+containerID+"/copy" -type CopyConfig struct { - Resource string -} - -// ContainerPathStat is used to encode the header from -// GET "/containers/{name:.*}/archive" -// "Name" is the file or directory name. -type ContainerPathStat struct { - Name string `json:"name"` - Size int64 `json:"size"` - Mode os.FileMode `json:"mode"` - Mtime time.Time `json:"mtime"` - LinkTarget string `json:"linkTarget"` -} - -// ContainerProcessList contains response of Remote API: -// GET "/containers/{name:.*}/top" -type ContainerProcessList struct { - Processes [][]string - Titles []string -} - -// Version contains response of Remote API: -// GET "/version" -type Version struct { - Version string - APIVersion string `json:"ApiVersion"` - GitCommit string - GoVersion string - Os string - Arch string - KernelVersion string `json:",omitempty"` - Experimental bool `json:",omitempty"` - BuildTime string `json:",omitempty"` -} - -// Info contains response of Remote API: -// GET "/info" -type Info struct { - ID string - Containers int - ContainersRunning int - ContainersPaused int - ContainersStopped int - Images int - Driver string - DriverStatus [][2]string - SystemStatus [][2]string - Plugins PluginsInfo - MemoryLimit bool - SwapLimit bool - KernelMemory bool - CPUCfsPeriod bool `json:"CpuCfsPeriod"` - CPUCfsQuota bool `json:"CpuCfsQuota"` - CPUShares bool - CPUSet bool - IPv4Forwarding bool - BridgeNfIptables bool - BridgeNfIP6tables bool `json:"BridgeNfIp6tables"` - Debug bool - NFd int - OomKillDisable bool - NGoroutines int - SystemTime string - ExecutionDriver string - LoggingDriver string - CgroupDriver string - NEventsListener int - KernelVersion string - OperatingSystem string - OSType string - Architecture string - IndexServerAddress string - RegistryConfig *registry.ServiceConfig - NCPU int - MemTotal int64 - DockerRootDir string - HTTPProxy string `json:"HttpProxy"` - HTTPSProxy string `json:"HttpsProxy"` - NoProxy string - Name string - Labels []string - ExperimentalBuild bool - ServerVersion string - ClusterStore string - ClusterAdvertise string - SecurityOptions []string - Runtimes map[string]Runtime - DefaultRuntime string - Swarm swarm.Info - // LiveRestoreEnabled determines whether containers should be kept - // running when the daemon is shutdown or upon daemon start if - // running containers are detected - LiveRestoreEnabled bool -} - -// PluginsInfo is a temp struct holding Plugins name -// registered with docker daemon. It is used by Info struct -type PluginsInfo struct { - // List of Volume plugins registered - Volume []string - // List of Network plugins registered - Network []string - // List of Authorization plugins registered - Authorization []string -} - -// ExecStartCheck is a temp struct used by execStart -// Config fields is part of ExecConfig in runconfig package -type ExecStartCheck struct { - // ExecStart will first check if it's detached - Detach bool - // Check if there's a tty - Tty bool -} - -// HealthcheckResult stores information about a single run of a healthcheck probe -type HealthcheckResult struct { - Start time.Time // Start is the time this check started - End time.Time // End is the time this check ended - ExitCode int // ExitCode meanings: 0=healthy, 1=unhealthy, 2=reserved (considered unhealthy), else=error running probe - Output string // Output from last check -} - -// Health states -const ( - Starting = "starting" // Starting indicates that the container is not yet ready - Healthy = "healthy" // Healthy indicates that the container is running correctly - Unhealthy = "unhealthy" // Unhealthy indicates that the container has a problem -) - -// Health stores information about the container's healthcheck results -type Health struct { - Status string // Status is one of Starting, Healthy or Unhealthy - FailingStreak int // FailingStreak is the number of consecutive failures - Log []*HealthcheckResult // Log contains the last few results (oldest first) -} - -// ContainerState stores container's running state -// it's part of ContainerJSONBase and will return by "inspect" command -type ContainerState struct { - Status string - Running bool - Paused bool - Restarting bool - OOMKilled bool - Dead bool - Pid int - ExitCode int - Error string - StartedAt string - FinishedAt string - Health *Health `json:",omitempty"` -} - -// ContainerNode stores information about the node that a container -// is running on. It's only available in Docker Swarm -type ContainerNode struct { - ID string - IPAddress string `json:"IP"` - Addr string - Name string - Cpus int - Memory int64 - Labels map[string]string -} - -// ContainerJSONBase contains response of Remote API: -// GET "/containers/{name:.*}/json" -type ContainerJSONBase struct { - ID string `json:"Id"` - Created string - Path string - Args []string - State *ContainerState - Image string - ResolvConfPath string - HostnamePath string - HostsPath string - LogPath string - Node *ContainerNode `json:",omitempty"` - Name string - RestartCount int - Driver string - MountLabel string - ProcessLabel string - AppArmorProfile string - ExecIDs []string - HostConfig *container.HostConfig - GraphDriver GraphDriverData - SizeRw *int64 `json:",omitempty"` - SizeRootFs *int64 `json:",omitempty"` -} - -// ContainerJSON is newly used struct along with MountPoint -type ContainerJSON struct { - *ContainerJSONBase - Mounts []MountPoint - Config *container.Config - NetworkSettings *NetworkSettings -} - -// NetworkSettings exposes the network settings in the api -type NetworkSettings struct { - NetworkSettingsBase - DefaultNetworkSettings - Networks map[string]*network.EndpointSettings -} - -// SummaryNetworkSettings provides a summary of container's networks -// in /containers/json -type SummaryNetworkSettings struct { - Networks map[string]*network.EndpointSettings -} - -// NetworkSettingsBase holds basic information about networks -type NetworkSettingsBase struct { - Bridge string // Bridge is the Bridge name the network uses(e.g. `docker0`) - SandboxID string // SandboxID uniquely represents a container's network stack - HairpinMode bool // HairpinMode specifies if hairpin NAT should be enabled on the virtual interface - LinkLocalIPv6Address string // LinkLocalIPv6Address is an IPv6 unicast address using the link-local prefix - LinkLocalIPv6PrefixLen int // LinkLocalIPv6PrefixLen is the prefix length of an IPv6 unicast address - Ports nat.PortMap // Ports is a collection of PortBinding indexed by Port - SandboxKey string // SandboxKey identifies the sandbox - SecondaryIPAddresses []network.Address - SecondaryIPv6Addresses []network.Address -} - -// DefaultNetworkSettings holds network information -// during the 2 release deprecation period. -// It will be removed in Docker 1.11. -type DefaultNetworkSettings struct { - EndpointID string // EndpointID uniquely represents a service endpoint in a Sandbox - Gateway string // Gateway holds the gateway address for the network - GlobalIPv6Address string // GlobalIPv6Address holds network's global IPv6 address - GlobalIPv6PrefixLen int // GlobalIPv6PrefixLen represents mask length of network's global IPv6 address - IPAddress string // IPAddress holds the IPv4 address for the network - IPPrefixLen int // IPPrefixLen represents mask length of network's IPv4 address - IPv6Gateway string // IPv6Gateway holds gateway address specific for IPv6 - MacAddress string // MacAddress holds the MAC address for the network -} - -// MountPoint represents a mount point configuration inside the container. -type MountPoint struct { - Name string `json:",omitempty"` - Source string - Destination string - Driver string `json:",omitempty"` - Mode string - RW bool - Propagation string -} - -// Volume represents the configuration of a volume for the remote API -type Volume struct { - Name string // Name is the name of the volume - Driver string // Driver is the Driver name used to create the volume - Mountpoint string // Mountpoint is the location on disk of the volume - Status map[string]interface{} `json:",omitempty"` // Status provides low-level status information about the volume - Labels map[string]string // Labels is metadata specific to the volume - Scope string // Scope describes the level at which the volume exists (e.g. `global` for cluster-wide or `local` for machine level) -} - -// VolumesListResponse contains the response for the remote API: -// GET "/volumes" -type VolumesListResponse struct { - Volumes []*Volume // Volumes is the list of volumes being returned - Warnings []string // Warnings is a list of warnings that occurred when getting the list from the volume drivers -} - -// VolumeCreateRequest contains the response for the remote API: -// POST "/volumes/create" -type VolumeCreateRequest struct { - Name string // Name is the requested name of the volume - Driver string // Driver is the name of the driver that should be used to create the volume - DriverOpts map[string]string // DriverOpts holds the driver specific options to use for when creating the volume. - Labels map[string]string // Labels holds metadata specific to the volume being created. -} - -// NetworkResource is the body of the "get network" http response message -type NetworkResource struct { - Name string // Name is the requested name of the network - ID string `json:"Id"` // ID uniquely identifies a network on a single machine - Scope string // Scope describes the level at which the network exists (e.g. `global` for cluster-wide or `local` for machine level) - Driver string // Driver is the Driver name used to create the network (e.g. `bridge`, `overlay`) - EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6 - IPAM network.IPAM // IPAM is the network's IP Address Management - Internal bool // Internal represents if the network is used internal only - Containers map[string]EndpointResource // Containers contains endpoints belonging to the network - Options map[string]string // Options holds the network specific options to use for when creating the network - Labels map[string]string // Labels holds metadata specific to the network being created -} - -// EndpointResource contains network resources allocated and used for a container in a network -type EndpointResource struct { - Name string - EndpointID string - MacAddress string - IPv4Address string - IPv6Address string -} - -// NetworkCreate is the expected body of the "create network" http request message -type NetworkCreate struct { - CheckDuplicate bool - Driver string - EnableIPv6 bool - IPAM network.IPAM - Internal bool - Options map[string]string - Labels map[string]string -} - -// NetworkCreateRequest is the request message sent to the server for network create call. -type NetworkCreateRequest struct { - NetworkCreate - Name string -} - -// NetworkCreateResponse is the response message sent by the server for network create call -type NetworkCreateResponse struct { - ID string `json:"Id"` - Warning string -} - -// NetworkConnect represents the data to be used to connect a container to the network -type NetworkConnect struct { - Container string - EndpointConfig *network.EndpointSettings `json:",omitempty"` -} - -// NetworkDisconnect represents the data to be used to disconnect a container from the network -type NetworkDisconnect struct { - Container string - Force bool -} - -// Checkpoint represents the details of a checkpoint -type Checkpoint struct { - Name string // Name is the name of the checkpoint -} - -// Runtime describes an OCI runtime -type Runtime struct { - Path string `json:"path"` - Args []string `json:"runtimeArgs,omitempty"` -} diff --git a/vendor/src/github.com/docker/engine-api/types/versions/README.md b/vendor/src/github.com/docker/engine-api/types/versions/README.md deleted file mode 100644 index cdac50a53c..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/versions/README.md +++ /dev/null @@ -1,14 +0,0 @@ -## Legacy API type versions - -This package includes types for legacy API versions. The stable version of the API types live in `api/types/*.go`. - -Consider moving a type here when you need to keep backwards compatibility in the API. This legacy types are organized by the latest API version they appear in. For instance, types in the `v1p19` package are valid for API versions below or equal `1.19`. Types in the `v1p20` package are valid for the API version `1.20`, since the versions below that will use the legacy types in `v1p19`. - -### Package name conventions - -The package name convention is to use `v` as a prefix for the version number and `p`(patch) as a separator. We use this nomenclature due to a few restrictions in the Go package name convention: - -1. We cannot use `.` because it's interpreted by the language, think of `v1.20.CallFunction`. -2. We cannot use `_` because golint complains about it. The code is actually valid, but it looks probably more weird: `v1_20.CallFunction`. - -For instance, if you want to modify a type that was available in the version `1.21` of the API but it will have different fields in the version `1.22`, you want to create a new package under `api/types/versions/v1p21`. diff --git a/vendor/src/github.com/docker/engine-api/types/versions/compare.go b/vendor/src/github.com/docker/engine-api/types/versions/compare.go deleted file mode 100644 index 611d4fed66..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/versions/compare.go +++ /dev/null @@ -1,62 +0,0 @@ -package versions - -import ( - "strconv" - "strings" -) - -// compare compares two version strings -// returns -1 if v1 < v2, 1 if v1 > v2, 0 otherwise. -func compare(v1, v2 string) int { - var ( - currTab = strings.Split(v1, ".") - otherTab = strings.Split(v2, ".") - ) - - max := len(currTab) - if len(otherTab) > max { - max = len(otherTab) - } - for i := 0; i < max; i++ { - var currInt, otherInt int - - if len(currTab) > i { - currInt, _ = strconv.Atoi(currTab[i]) - } - if len(otherTab) > i { - otherInt, _ = strconv.Atoi(otherTab[i]) - } - if currInt > otherInt { - return 1 - } - if otherInt > currInt { - return -1 - } - } - return 0 -} - -// LessThan checks if a version is less than another -func LessThan(v, other string) bool { - return compare(v, other) == -1 -} - -// LessThanOrEqualTo checks if a version is less than or equal to another -func LessThanOrEqualTo(v, other string) bool { - return compare(v, other) <= 0 -} - -// GreaterThan checks if a version is greater than another -func GreaterThan(v, other string) bool { - return compare(v, other) == 1 -} - -// GreaterThanOrEqualTo checks if a version is greater than or equal to another -func GreaterThanOrEqualTo(v, other string) bool { - return compare(v, other) >= 0 -} - -// Equal checks if a version is equal to another -func Equal(v, other string) bool { - return compare(v, other) == 0 -} diff --git a/vendor/src/github.com/docker/engine-api/types/versions/v1p19/types.go b/vendor/src/github.com/docker/engine-api/types/versions/v1p19/types.go deleted file mode 100644 index 4ed4335881..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/versions/v1p19/types.go +++ /dev/null @@ -1,35 +0,0 @@ -// Package v1p19 provides specific API types for the API version 1, patch 19. -package v1p19 - -import ( - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/engine-api/types/versions/v1p20" - "github.com/docker/go-connections/nat" -) - -// ContainerJSON is a backcompatibility struct for APIs prior to 1.20. -// Note this is not used by the Windows daemon. -type ContainerJSON struct { - *types.ContainerJSONBase - Volumes map[string]string - VolumesRW map[string]bool - Config *ContainerConfig - NetworkSettings *v1p20.NetworkSettings -} - -// ContainerConfig is a backcompatibility struct for APIs prior to 1.20. -type ContainerConfig struct { - *container.Config - - MacAddress string - NetworkDisabled bool - ExposedPorts map[nat.Port]struct{} - - // backward compatibility, they now live in HostConfig - VolumeDriver string - Memory int64 - MemorySwap int64 - CPUShares int64 `json:"CpuShares"` - CPUSet string `json:"Cpuset"` -} diff --git a/vendor/src/github.com/docker/engine-api/types/versions/v1p20/types.go b/vendor/src/github.com/docker/engine-api/types/versions/v1p20/types.go deleted file mode 100644 index 5736efad00..0000000000 --- a/vendor/src/github.com/docker/engine-api/types/versions/v1p20/types.go +++ /dev/null @@ -1,40 +0,0 @@ -// Package v1p20 provides specific API types for the API version 1, patch 20. -package v1p20 - -import ( - "github.com/docker/engine-api/types" - "github.com/docker/engine-api/types/container" - "github.com/docker/go-connections/nat" -) - -// ContainerJSON is a backcompatibility struct for the API 1.20 -type ContainerJSON struct { - *types.ContainerJSONBase - Mounts []types.MountPoint - Config *ContainerConfig - NetworkSettings *NetworkSettings -} - -// ContainerConfig is a backcompatibility struct used in ContainerJSON for the API 1.20 -type ContainerConfig struct { - *container.Config - - MacAddress string - NetworkDisabled bool - ExposedPorts map[nat.Port]struct{} - - // backward compatibility, they now live in HostConfig - VolumeDriver string -} - -// StatsJSON is a backcompatibility struct used in Stats for APIs prior to 1.21 -type StatsJSON struct { - types.Stats - Network types.NetworkStats `json:"network,omitempty"` -} - -// NetworkSettings is a backward compatible struct for APIs prior to 1.21 -type NetworkSettings struct { - types.NetworkSettingsBase - types.DefaultNetworkSettings -} diff --git a/vendor/src/github.com/docker/go-connections/LICENSE b/vendor/src/github.com/docker/go-connections/LICENSE deleted file mode 100644 index b55b37bc31..0000000000 --- a/vendor/src/github.com/docker/go-connections/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/src/github.com/docker/go-connections/nat/nat.go b/vendor/src/github.com/docker/go-connections/nat/nat.go deleted file mode 100644 index bca3c2c99a..0000000000 --- a/vendor/src/github.com/docker/go-connections/nat/nat.go +++ /dev/null @@ -1,243 +0,0 @@ -// Package nat is a convenience package for manipulation of strings describing network ports. -package nat - -import ( - "fmt" - "net" - "strconv" - "strings" -) - -const ( - // portSpecTemplate is the expected format for port specifications - portSpecTemplate = "ip:hostPort:containerPort" -) - -// PortBinding represents a binding between a Host IP address and a Host Port -type PortBinding struct { - // HostIP is the host IP Address - HostIP string `json:"HostIp"` - // HostPort is the host port number - HostPort string -} - -// PortMap is a collection of PortBinding indexed by Port -type PortMap map[Port][]PortBinding - -// PortSet is a collection of structs indexed by Port -type PortSet map[Port]struct{} - -// Port is a string containing port number and protocol in the format "80/tcp" -type Port string - -// NewPort creates a new instance of a Port given a protocol and port number or port range -func NewPort(proto, port string) (Port, error) { - // Check for parsing issues on "port" now so we can avoid having - // to check it later on. - - portStartInt, portEndInt, err := ParsePortRangeToInt(port) - if err != nil { - return "", err - } - - if portStartInt == portEndInt { - return Port(fmt.Sprintf("%d/%s", portStartInt, proto)), nil - } - return Port(fmt.Sprintf("%d-%d/%s", portStartInt, portEndInt, proto)), nil -} - -// ParsePort parses the port number string and returns an int -func ParsePort(rawPort string) (int, error) { - if len(rawPort) == 0 { - return 0, nil - } - port, err := strconv.ParseUint(rawPort, 10, 16) - if err != nil { - return 0, err - } - return int(port), nil -} - -// ParsePortRangeToInt parses the port range string and returns start/end ints -func ParsePortRangeToInt(rawPort string) (int, int, error) { - if len(rawPort) == 0 { - return 0, 0, nil - } - start, end, err := ParsePortRange(rawPort) - if err != nil { - return 0, 0, err - } - return int(start), int(end), nil -} - -// Proto returns the protocol of a Port -func (p Port) Proto() string { - proto, _ := SplitProtoPort(string(p)) - return proto -} - -// Port returns the port number of a Port -func (p Port) Port() string { - _, port := SplitProtoPort(string(p)) - return port -} - -// Int returns the port number of a Port as an int -func (p Port) Int() int { - portStr := p.Port() - if len(portStr) == 0 { - return 0 - } - - // We don't need to check for an error because we're going to - // assume that any error would have been found, and reported, in NewPort() - port, _ := strconv.ParseUint(portStr, 10, 16) - return int(port) -} - -// Range returns the start/end port numbers of a Port range as ints -func (p Port) Range() (int, int, error) { - return ParsePortRangeToInt(p.Port()) -} - -// SplitProtoPort splits a port in the format of proto/port -func SplitProtoPort(rawPort string) (string, string) { - parts := strings.Split(rawPort, "/") - l := len(parts) - if len(rawPort) == 0 || l == 0 || len(parts[0]) == 0 { - return "", "" - } - if l == 1 { - return "tcp", rawPort - } - if len(parts[1]) == 0 { - return "tcp", parts[0] - } - return parts[1], parts[0] -} - -func validateProto(proto string) bool { - for _, availableProto := range []string{"tcp", "udp"} { - if availableProto == proto { - return true - } - } - return false -} - -// ParsePortSpecs receives port specs in the format of ip:public:private/proto and parses -// these in to the internal types -func ParsePortSpecs(ports []string) (map[Port]struct{}, map[Port][]PortBinding, error) { - var ( - exposedPorts = make(map[Port]struct{}, len(ports)) - bindings = make(map[Port][]PortBinding) - ) - for _, rawPort := range ports { - portMappings, err := ParsePortSpec(rawPort) - if err != nil { - return nil, nil, err - } - - for _, portMapping := range portMappings { - port := portMapping.Port - if _, exists := exposedPorts[port]; !exists { - exposedPorts[port] = struct{}{} - } - bslice, exists := bindings[port] - if !exists { - bslice = []PortBinding{} - } - bindings[port] = append(bslice, portMapping.Binding) - } - } - return exposedPorts, bindings, nil -} - -// PortMapping is a data object mapping a Port to a PortBinding -type PortMapping struct { - Port Port - Binding PortBinding -} - -// ParsePortSpec parses a port specification string into a slice of PortMappings -func ParsePortSpec(rawPort string) ([]PortMapping, error) { - proto := "tcp" - - if i := strings.LastIndex(rawPort, "/"); i != -1 { - proto = rawPort[i+1:] - rawPort = rawPort[:i] - } - if !strings.Contains(rawPort, ":") { - rawPort = fmt.Sprintf("::%s", rawPort) - } else if len(strings.Split(rawPort, ":")) == 2 { - rawPort = fmt.Sprintf(":%s", rawPort) - } - - parts, err := PartParser(portSpecTemplate, rawPort) - if err != nil { - return nil, err - } - - var ( - containerPort = parts["containerPort"] - rawIP = parts["ip"] - hostPort = parts["hostPort"] - ) - - if rawIP != "" && net.ParseIP(rawIP) == nil { - return nil, fmt.Errorf("Invalid ip address: %s", rawIP) - } - if containerPort == "" { - return nil, fmt.Errorf("No port specified: %s", rawPort) - } - - startPort, endPort, err := ParsePortRange(containerPort) - if err != nil { - return nil, fmt.Errorf("Invalid containerPort: %s", containerPort) - } - - var startHostPort, endHostPort uint64 = 0, 0 - if len(hostPort) > 0 { - startHostPort, endHostPort, err = ParsePortRange(hostPort) - if err != nil { - return nil, fmt.Errorf("Invalid hostPort: %s", hostPort) - } - } - - if hostPort != "" && (endPort-startPort) != (endHostPort-startHostPort) { - // Allow host port range iff containerPort is not a range. - // In this case, use the host port range as the dynamic - // host port range to allocate into. - if endPort != startPort { - return nil, fmt.Errorf("Invalid ranges specified for container and host Ports: %s and %s", containerPort, hostPort) - } - } - - if !validateProto(strings.ToLower(proto)) { - return nil, fmt.Errorf("Invalid proto: %s", proto) - } - - ports := []PortMapping{} - for i := uint64(0); i <= (endPort - startPort); i++ { - containerPort = strconv.FormatUint(startPort+i, 10) - if len(hostPort) > 0 { - hostPort = strconv.FormatUint(startHostPort+i, 10) - } - // Set hostPort to a range only if there is a single container port - // and a dynamic host port. - if startPort == endPort && startHostPort != endHostPort { - hostPort = fmt.Sprintf("%s-%s", hostPort, strconv.FormatUint(endHostPort, 10)) - } - port, err := NewPort(strings.ToLower(proto), containerPort) - if err != nil { - return nil, err - } - - binding := PortBinding{ - HostIP: rawIP, - HostPort: hostPort, - } - ports = append(ports, PortMapping{Port: port, Binding: binding}) - } - return ports, nil -} diff --git a/vendor/src/github.com/docker/go-connections/nat/parse.go b/vendor/src/github.com/docker/go-connections/nat/parse.go deleted file mode 100644 index 872050205f..0000000000 --- a/vendor/src/github.com/docker/go-connections/nat/parse.go +++ /dev/null @@ -1,56 +0,0 @@ -package nat - -import ( - "fmt" - "strconv" - "strings" -) - -// PartParser parses and validates the specified string (data) using the specified template -// e.g. ip:public:private -> 192.168.0.1:80:8000 -func PartParser(template, data string) (map[string]string, error) { - // ip:public:private - var ( - templateParts = strings.Split(template, ":") - parts = strings.Split(data, ":") - out = make(map[string]string, len(templateParts)) - ) - if len(parts) != len(templateParts) { - return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template) - } - - for i, t := range templateParts { - value := "" - if len(parts) > i { - value = parts[i] - } - out[t] = value - } - return out, nil -} - -// ParsePortRange parses and validates the specified string as a port-range (8000-9000) -func ParsePortRange(ports string) (uint64, uint64, error) { - if ports == "" { - return 0, 0, fmt.Errorf("Empty string specified for ports.") - } - if !strings.Contains(ports, "-") { - start, err := strconv.ParseUint(ports, 10, 16) - end := start - return start, end, err - } - - parts := strings.Split(ports, "-") - start, err := strconv.ParseUint(parts[0], 10, 16) - if err != nil { - return 0, 0, err - } - end, err := strconv.ParseUint(parts[1], 10, 16) - if err != nil { - return 0, 0, err - } - if end < start { - return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports) - } - return start, end, nil -} diff --git a/vendor/src/github.com/docker/go-connections/nat/sort.go b/vendor/src/github.com/docker/go-connections/nat/sort.go deleted file mode 100644 index ce950171e3..0000000000 --- a/vendor/src/github.com/docker/go-connections/nat/sort.go +++ /dev/null @@ -1,96 +0,0 @@ -package nat - -import ( - "sort" - "strings" -) - -type portSorter struct { - ports []Port - by func(i, j Port) bool -} - -func (s *portSorter) Len() int { - return len(s.ports) -} - -func (s *portSorter) Swap(i, j int) { - s.ports[i], s.ports[j] = s.ports[j], s.ports[i] -} - -func (s *portSorter) Less(i, j int) bool { - ip := s.ports[i] - jp := s.ports[j] - - return s.by(ip, jp) -} - -// Sort sorts a list of ports using the provided predicate -// This function should compare `i` and `j`, returning true if `i` is -// considered to be less than `j` -func Sort(ports []Port, predicate func(i, j Port) bool) { - s := &portSorter{ports, predicate} - sort.Sort(s) -} - -type portMapEntry struct { - port Port - binding PortBinding -} - -type portMapSorter []portMapEntry - -func (s portMapSorter) Len() int { return len(s) } -func (s portMapSorter) Swap(i, j int) { s[i], s[j] = s[j], s[i] } - -// sort the port so that the order is: -// 1. port with larger specified bindings -// 2. larger port -// 3. port with tcp protocol -func (s portMapSorter) Less(i, j int) bool { - pi, pj := s[i].port, s[j].port - hpi, hpj := toInt(s[i].binding.HostPort), toInt(s[j].binding.HostPort) - return hpi > hpj || pi.Int() > pj.Int() || (pi.Int() == pj.Int() && strings.ToLower(pi.Proto()) == "tcp") -} - -// SortPortMap sorts the list of ports and their respected mapping. The ports -// will explicit HostPort will be placed first. -func SortPortMap(ports []Port, bindings PortMap) { - s := portMapSorter{} - for _, p := range ports { - if binding, ok := bindings[p]; ok { - for _, b := range binding { - s = append(s, portMapEntry{port: p, binding: b}) - } - bindings[p] = []PortBinding{} - } else { - s = append(s, portMapEntry{port: p}) - } - } - - sort.Sort(s) - var ( - i int - pm = make(map[Port]struct{}) - ) - // reorder ports - for _, entry := range s { - if _, ok := pm[entry.port]; !ok { - ports[i] = entry.port - pm[entry.port] = struct{}{} - i++ - } - // reorder bindings for this port - if _, ok := bindings[entry.port]; ok { - bindings[entry.port] = append(bindings[entry.port], entry.binding) - } - } -} - -func toInt(s string) uint64 { - i, _, err := ParsePortRange(s) - if err != nil { - i = 0 - } - return i -} diff --git a/vendor/src/github.com/docker/go-connections/sockets/README.md b/vendor/src/github.com/docker/go-connections/sockets/README.md deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/vendor/src/github.com/docker/go-connections/sockets/inmem_socket.go b/vendor/src/github.com/docker/go-connections/sockets/inmem_socket.go deleted file mode 100644 index 99846ffddb..0000000000 --- a/vendor/src/github.com/docker/go-connections/sockets/inmem_socket.go +++ /dev/null @@ -1,81 +0,0 @@ -package sockets - -import ( - "errors" - "net" - "sync" -) - -var errClosed = errors.New("use of closed network connection") - -// InmemSocket implements net.Listener using in-memory only connections. -type InmemSocket struct { - chConn chan net.Conn - chClose chan struct{} - addr string - mu sync.Mutex -} - -// dummyAddr is used to satisfy net.Addr for the in-mem socket -// it is just stored as a string and returns the string for all calls -type dummyAddr string - -// NewInmemSocket creates an in-memory only net.Listener -// The addr argument can be any string, but is used to satisfy the `Addr()` part -// of the net.Listener interface -func NewInmemSocket(addr string, bufSize int) *InmemSocket { - return &InmemSocket{ - chConn: make(chan net.Conn, bufSize), - chClose: make(chan struct{}), - addr: addr, - } -} - -// Addr returns the socket's addr string to satisfy net.Listener -func (s *InmemSocket) Addr() net.Addr { - return dummyAddr(s.addr) -} - -// Accept implements the Accept method in the Listener interface; it waits for the next call and returns a generic Conn. -func (s *InmemSocket) Accept() (net.Conn, error) { - select { - case conn := <-s.chConn: - return conn, nil - case <-s.chClose: - return nil, errClosed - } -} - -// Close closes the listener. It will be unavailable for use once closed. -func (s *InmemSocket) Close() error { - s.mu.Lock() - defer s.mu.Unlock() - select { - case <-s.chClose: - default: - close(s.chClose) - } - return nil -} - -// Dial is used to establish a connection with the in-mem server -func (s *InmemSocket) Dial(network, addr string) (net.Conn, error) { - srvConn, clientConn := net.Pipe() - select { - case s.chConn <- srvConn: - case <-s.chClose: - return nil, errClosed - } - - return clientConn, nil -} - -// Network returns the addr string, satisfies net.Addr -func (a dummyAddr) Network() string { - return string(a) -} - -// String returns the string form -func (a dummyAddr) String() string { - return string(a) -} diff --git a/vendor/src/github.com/docker/go-connections/sockets/proxy.go b/vendor/src/github.com/docker/go-connections/sockets/proxy.go deleted file mode 100644 index 98e9a1dc61..0000000000 --- a/vendor/src/github.com/docker/go-connections/sockets/proxy.go +++ /dev/null @@ -1,51 +0,0 @@ -package sockets - -import ( - "net" - "net/url" - "os" - "strings" - - "golang.org/x/net/proxy" -) - -// GetProxyEnv allows access to the uppercase and the lowercase forms of -// proxy-related variables. See the Go specification for details on these -// variables. https://golang.org/pkg/net/http/ -func GetProxyEnv(key string) string { - proxyValue := os.Getenv(strings.ToUpper(key)) - if proxyValue == "" { - return os.Getenv(strings.ToLower(key)) - } - return proxyValue -} - -// DialerFromEnvironment takes in a "direct" *net.Dialer and returns a -// proxy.Dialer which will route the connections through the proxy using the -// given dialer. -func DialerFromEnvironment(direct *net.Dialer) (proxy.Dialer, error) { - allProxy := GetProxyEnv("all_proxy") - if len(allProxy) == 0 { - return direct, nil - } - - proxyURL, err := url.Parse(allProxy) - if err != nil { - return direct, err - } - - proxyFromURL, err := proxy.FromURL(proxyURL, direct) - if err != nil { - return direct, err - } - - noProxy := GetProxyEnv("no_proxy") - if len(noProxy) == 0 { - return proxyFromURL, nil - } - - perHost := proxy.NewPerHost(proxyFromURL, direct) - perHost.AddFromString(noProxy) - - return perHost, nil -} diff --git a/vendor/src/github.com/docker/go-connections/sockets/sockets.go b/vendor/src/github.com/docker/go-connections/sockets/sockets.go deleted file mode 100644 index 1739cecf2a..0000000000 --- a/vendor/src/github.com/docker/go-connections/sockets/sockets.go +++ /dev/null @@ -1,42 +0,0 @@ -// Package sockets provides helper functions to create and configure Unix or TCP sockets. -package sockets - -import ( - "net" - "net/http" - "time" -) - -// Why 32? See https://github.com/docker/docker/pull/8035. -const defaultTimeout = 32 * time.Second - -// ConfigureTransport configures the specified Transport according to the -// specified proto and addr. -// If the proto is unix (using a unix socket to communicate) or npipe the -// compression is disabled. -func ConfigureTransport(tr *http.Transport, proto, addr string) error { - switch proto { - case "unix": - // No need for compression in local communications. - tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return net.DialTimeout(proto, addr, defaultTimeout) - } - case "npipe": - // No need for compression in local communications. - tr.DisableCompression = true - tr.Dial = func(_, _ string) (net.Conn, error) { - return DialPipe(addr, defaultTimeout) - } - default: - tr.Proxy = http.ProxyFromEnvironment - dialer, err := DialerFromEnvironment(&net.Dialer{ - Timeout: defaultTimeout, - }) - if err != nil { - return err - } - tr.Dial = dialer.Dial - } - return nil -} diff --git a/vendor/src/github.com/docker/go-connections/sockets/sockets_unix.go b/vendor/src/github.com/docker/go-connections/sockets/sockets_unix.go deleted file mode 100644 index b255ac9ac7..0000000000 --- a/vendor/src/github.com/docker/go-connections/sockets/sockets_unix.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !windows - -package sockets - -import ( - "net" - "syscall" - "time" -) - -// DialPipe connects to a Windows named pipe. -// This is not supported on other OSes. -func DialPipe(_ string, _ time.Duration) (net.Conn, error) { - return nil, syscall.EAFNOSUPPORT -} diff --git a/vendor/src/github.com/docker/go-connections/sockets/sockets_windows.go b/vendor/src/github.com/docker/go-connections/sockets/sockets_windows.go deleted file mode 100644 index 1f3540b2fe..0000000000 --- a/vendor/src/github.com/docker/go-connections/sockets/sockets_windows.go +++ /dev/null @@ -1,13 +0,0 @@ -package sockets - -import ( - "net" - "time" - - "github.com/Microsoft/go-winio" -) - -// DialPipe connects to a Windows named pipe. -func DialPipe(addr string, timeout time.Duration) (net.Conn, error) { - return winio.DialPipe(addr, &timeout) -} diff --git a/vendor/src/github.com/docker/go-connections/sockets/tcp_socket.go b/vendor/src/github.com/docker/go-connections/sockets/tcp_socket.go deleted file mode 100644 index 8a82727df0..0000000000 --- a/vendor/src/github.com/docker/go-connections/sockets/tcp_socket.go +++ /dev/null @@ -1,22 +0,0 @@ -// Package sockets provides helper functions to create and configure Unix or TCP sockets. -package sockets - -import ( - "crypto/tls" - "net" -) - -// NewTCPSocket creates a TCP socket listener with the specified address and -// and the specified tls configuration. If TLSConfig is set, will encapsulate the -// TCP listener inside a TLS one. -func NewTCPSocket(addr string, tlsConfig *tls.Config) (net.Listener, error) { - l, err := net.Listen("tcp", addr) - if err != nil { - return nil, err - } - if tlsConfig != nil { - tlsConfig.NextProtos = []string{"http/1.1"} - l = tls.NewListener(l, tlsConfig) - } - return l, nil -} diff --git a/vendor/src/github.com/docker/go-connections/sockets/unix_socket.go b/vendor/src/github.com/docker/go-connections/sockets/unix_socket.go deleted file mode 100644 index d1627349f8..0000000000 --- a/vendor/src/github.com/docker/go-connections/sockets/unix_socket.go +++ /dev/null @@ -1,80 +0,0 @@ -// +build linux freebsd solaris - -package sockets - -import ( - "fmt" - "net" - "os" - "strconv" - "syscall" - - "github.com/Sirupsen/logrus" - "github.com/opencontainers/runc/libcontainer/user" -) - -// NewUnixSocket creates a unix socket with the specified path and group. -func NewUnixSocket(path, group string) (net.Listener, error) { - if err := syscall.Unlink(path); err != nil && !os.IsNotExist(err) { - return nil, err - } - mask := syscall.Umask(0777) - defer syscall.Umask(mask) - l, err := net.Listen("unix", path) - if err != nil { - return nil, err - } - if err := setSocketGroup(path, group); err != nil { - l.Close() - return nil, err - } - if err := os.Chmod(path, 0660); err != nil { - l.Close() - return nil, err - } - return l, nil -} - -func setSocketGroup(path, group string) error { - if group == "" { - return nil - } - if err := changeGroup(path, group); err != nil { - if group != "docker" { - return err - } - logrus.Debugf("Warning: could not change group %s to docker: %v", path, err) - } - return nil -} - -func changeGroup(path string, nameOrGid string) error { - gid, err := lookupGidByName(nameOrGid) - if err != nil { - return err - } - logrus.Debugf("%s group found. gid: %d", nameOrGid, gid) - return os.Chown(path, 0, gid) -} - -func lookupGidByName(nameOrGid string) (int, error) { - groupFile, err := user.GetGroupPath() - if err != nil { - return -1, err - } - groups, err := user.ParseGroupFileFilter(groupFile, func(g user.Group) bool { - return g.Name == nameOrGid || strconv.Itoa(g.Gid) == nameOrGid - }) - if err != nil { - return -1, err - } - if groups != nil && len(groups) > 0 { - return groups[0].Gid, nil - } - gid, err := strconv.Atoi(nameOrGid) - if err == nil { - logrus.Warnf("Could not find GID %d", gid) - return gid, nil - } - return -1, fmt.Errorf("Group %s not found", nameOrGid) -} diff --git a/vendor/src/github.com/docker/go-connections/tlsconfig/config.go b/vendor/src/github.com/docker/go-connections/tlsconfig/config.go deleted file mode 100644 index 1ba04395e2..0000000000 --- a/vendor/src/github.com/docker/go-connections/tlsconfig/config.go +++ /dev/null @@ -1,122 +0,0 @@ -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -// As a reminder from https://golang.org/pkg/crypto/tls/#Config: -// A Config structure is used to configure a TLS client or server. After one has been passed to a TLS function it must not be modified. -// A Config may be reused; the tls package will also not modify it. -package tlsconfig - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io/ioutil" - "os" - - "github.com/Sirupsen/logrus" -) - -// Options represents the information needed to create client and server TLS configurations. -type Options struct { - CAFile string - - // If either CertFile or KeyFile is empty, Client() will not load them - // preventing the client from authenticating to the server. - // However, Server() requires them and will error out if they are empty. - CertFile string - KeyFile string - - // client-only option - InsecureSkipVerify bool - // server-only option - ClientAuth tls.ClientAuthType -} - -// Extra (server-side) accepted CBC cipher suites - will phase out in the future -var acceptedCBCCiphers = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, -} - -// DefaultServerAcceptedCiphers should be uses by code which already has a crypto/tls -// options struct but wants to use a commonly accepted set of TLS cipher suites, with -// known weak algorithms removed. -var DefaultServerAcceptedCiphers = append(clientCipherSuites, acceptedCBCCiphers...) - -// ServerDefault is a secure-enough TLS configuration for the server TLS configuration. -var ServerDefault = tls.Config{ - // Avoid fallback to SSL protocols < TLS1.0 - MinVersion: tls.VersionTLS10, - PreferServerCipherSuites: true, - CipherSuites: DefaultServerAcceptedCiphers, -} - -// ClientDefault is a secure-enough TLS configuration for the client TLS configuration. -var ClientDefault = tls.Config{ - // Prefer TLS1.2 as the client minimum - MinVersion: tls.VersionTLS12, - CipherSuites: clientCipherSuites, -} - -// certPool returns an X.509 certificate pool from `caFile`, the certificate file. -func certPool(caFile string) (*x509.CertPool, error) { - // If we should verify the server, we need to load a trusted ca - certPool := x509.NewCertPool() - pem, err := ioutil.ReadFile(caFile) - if err != nil { - return nil, fmt.Errorf("Could not read CA certificate %q: %v", caFile, err) - } - if !certPool.AppendCertsFromPEM(pem) { - return nil, fmt.Errorf("failed to append certificates from PEM file: %q", caFile) - } - logrus.Debugf("Trusting %d certs", len(certPool.Subjects())) - return certPool, nil -} - -// Client returns a TLS configuration meant to be used by a client. -func Client(options Options) (*tls.Config, error) { - tlsConfig := ClientDefault - tlsConfig.InsecureSkipVerify = options.InsecureSkipVerify - if !options.InsecureSkipVerify && options.CAFile != "" { - CAs, err := certPool(options.CAFile) - if err != nil { - return nil, err - } - tlsConfig.RootCAs = CAs - } - - if options.CertFile != "" || options.KeyFile != "" { - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - return nil, fmt.Errorf("Could not load X509 key pair: %v. Make sure the key is not encrypted", err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - } - - return &tlsConfig, nil -} - -// Server returns a TLS configuration meant to be used by a server. -func Server(options Options) (*tls.Config, error) { - tlsConfig := ServerDefault - tlsConfig.ClientAuth = options.ClientAuth - tlsCert, err := tls.LoadX509KeyPair(options.CertFile, options.KeyFile) - if err != nil { - if os.IsNotExist(err) { - return nil, fmt.Errorf("Could not load X509 key pair (cert: %q, key: %q): %v", options.CertFile, options.KeyFile, err) - } - return nil, fmt.Errorf("Error reading X509 key pair (cert: %q, key: %q): %v. Make sure the key is not encrypted.", options.CertFile, options.KeyFile, err) - } - tlsConfig.Certificates = []tls.Certificate{tlsCert} - if options.ClientAuth >= tls.VerifyClientCertIfGiven { - CAs, err := certPool(options.CAFile) - if err != nil { - return nil, err - } - tlsConfig.ClientCAs = CAs - } - return &tlsConfig, nil -} diff --git a/vendor/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go b/vendor/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go deleted file mode 100644 index 6b4c6a7c0d..0000000000 --- a/vendor/src/github.com/docker/go-connections/tlsconfig/config_client_ciphers.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/vendor/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go b/vendor/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go deleted file mode 100644 index ee22df47cb..0000000000 --- a/vendor/src/github.com/docker/go-connections/tlsconfig/config_legacy_client_ciphers.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build !go1.5 - -// Package tlsconfig provides primitives to retrieve secure-enough TLS configurations for both clients and servers. -// -package tlsconfig - -import ( - "crypto/tls" -) - -// Client TLS cipher suites (dropping CBC ciphers for client preferred suite set) -var clientCipherSuites = []uint16{ - tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, - tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, -} diff --git a/vendor/src/github.com/docker/go-events/.gitignore b/vendor/src/github.com/docker/go-events/.gitignore deleted file mode 100644 index daf913b1b3..0000000000 --- a/vendor/src/github.com/docker/go-events/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/src/github.com/docker/go-events/CONTRIBUTING.md b/vendor/src/github.com/docker/go-events/CONTRIBUTING.md deleted file mode 100644 index d813af779b..0000000000 --- a/vendor/src/github.com/docker/go-events/CONTRIBUTING.md +++ /dev/null @@ -1,70 +0,0 @@ -# Contributing to Docker open source projects - -Want to hack on go-events? Awesome! Here are instructions to get you started. - -go-events is part of the [Docker](https://www.docker.com) project, and -follows the same rules and principles. If you're already familiar with the way -Docker does things, you'll feel right at home. - -Otherwise, go read Docker's -[contributions guidelines](https://github.com/docker/docker/blob/master/CONTRIBUTING.md), -[issue triaging](https://github.com/docker/docker/blob/master/project/ISSUE-TRIAGE.md), -[review process](https://github.com/docker/docker/blob/master/project/REVIEWING.md) and -[branches and tags](https://github.com/docker/docker/blob/master/project/BRANCHES-AND-TAGS.md). - -For an in-depth description of our contribution process, visit the -contributors guide: [Understand how to contribute](https://docs.docker.com/opensource/workflow/make-a-contribution/) - -### Sign your work - -The sign-off is a simple line at the end of the explanation for the patch. Your -signature certifies that you wrote the patch or otherwise have the right to pass -it on as an open-source patch. The rules are pretty simple: if you can certify -the below (from [developercertificate.org](http://developercertificate.org/)): - -``` -Developer Certificate of Origin -Version 1.1 - -Copyright (C) 2004, 2006 The Linux Foundation and its contributors. -660 York Street, Suite 102, -San Francisco, CA 94110 USA - -Everyone is permitted to copy and distribute verbatim copies of this -license document, but changing it is not allowed. - -Developer's Certificate of Origin 1.1 - -By making a contribution to this project, I certify that: - -(a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - -(b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - -(c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - -(d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. -``` - -Then you just add a line to every git commit message: - - Signed-off-by: Joe Smith - -Use your real name (sorry, no pseudonyms or anonymous contributions.) - -If you set your `user.name` and `user.email` git configs, you can sign your -commit automatically with `git commit -s`. diff --git a/vendor/src/github.com/docker/go-events/LICENSE b/vendor/src/github.com/docker/go-events/LICENSE deleted file mode 100644 index 6d630cf595..0000000000 --- a/vendor/src/github.com/docker/go-events/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/src/github.com/docker/go-events/MAINTAINERS b/vendor/src/github.com/docker/go-events/MAINTAINERS deleted file mode 100644 index e414d82e96..0000000000 --- a/vendor/src/github.com/docker/go-events/MAINTAINERS +++ /dev/null @@ -1,46 +0,0 @@ -# go-events maintainers file -# -# This file describes who runs the docker/go-events project and how. -# This is a living document - if you see something out of date or missing, speak up! -# -# It is structured to be consumable by both humans and programs. -# To extract its contents programmatically, use any TOML-compliant parser. -# -# This file is compiled into the MAINTAINERS file in docker/opensource. -# -[Org] - [Org."Core maintainers"] - people = [ - "aaronlehmann", - "aluzzardi", - "lk4d4", - "stevvooe", - ] - -[people] - -# A reference list of all people associated with the project. -# All other sections should refer to people by their canonical key -# in the people section. - - # ADD YOURSELF HERE IN ALPHABETICAL ORDER - - [people.aaronlehmann] - Name = "Aaron Lehmann" - Email = "aaron.lehmann@docker.com" - GitHub = "aaronlehmann" - - [people.aluzzardi] - Name = "Andrea Luzzardi" - Email = "al@docker.com" - GitHub = "aluzzardi" - - [people.lk4d4] - Name = "Alexander Morozov" - Email = "lk4d4@docker.com" - GitHub = "lk4d4" - - [people.stevvooe] - Name = "Stephen Day" - Email = "stephen.day@docker.com" - GitHub = "stevvooe" diff --git a/vendor/src/github.com/docker/go-events/README.md b/vendor/src/github.com/docker/go-events/README.md deleted file mode 100644 index 0acafc279a..0000000000 --- a/vendor/src/github.com/docker/go-events/README.md +++ /dev/null @@ -1,117 +0,0 @@ -# Docker Events Package - -[![GoDoc](https://godoc.org/github.com/docker/go-events?status.svg)](https://godoc.org/github.com/docker/go-events) -[![Circle CI](https://circleci.com/gh/docker/go-events.svg?style=shield)](https://circleci.com/gh/docker/go-events) - -The Docker `events` package implements a composable event distribution package -for Go. - -Originally created to implement the [notifications in Docker Registry -2](https://github.com/docker/distribution/blob/master/docs/notifications.md), -we've found the pattern to be useful in other applications. This package is -most of the same code with slightly updated interfaces. Much of the internals -have been made available. - -## Usage - -The `events` package centers around a `Sink` type. Events are written with -calls to `Sink.Write(event Event)`. Sinks can be wired up in various -configurations to achieve interesting behavior. - -The canonical example is that employed by the -[docker/distribution/notifications](https://godoc.org/github.com/docker/distribution/notifications) -package. Let's say we have a type `httpSink` where we'd like to queue -notifications. As a rule, it should send a single http request and return an -error if it fails: - -```go -func (h *httpSink) Write(event Event) error { - p, err := json.Marshal(event) - if err != nil { - return err - } - body := bytes.NewReader(p) - resp, err := h.client.Post(h.url, "application/json", body) - if err != nil { - return err - } - defer resp.Body.Close() - - if resp.Status != 200 { - return errors.New("unexpected status") - } - - return nil -} - -// implement (*httpSink).Close() -``` - -With just that, we can start using components from this package. One can call -`(*httpSink).Write` to send events as the body of a post request to a -configured URL. - -### Retries - -HTTP can be unreliable. The first feature we'd like is to have some retry: - -```go -hs := newHTTPSink(/*...*/) -retry := NewRetryingSink(hs, NewBreaker(5, time.Second)) -``` - -We now have a sink that will retry events against the `httpSink` until they -succeed. The retry will backoff for one second after 5 consecutive failures -using the breaker strategy. - -### Queues - -This isn't quite enough. We we want a sink that doesn't block while we are -waiting for events to be sent. Let's add a `Queue`: - -```go -queue := NewQueue(retry) -``` - -Now, we have an unbounded queue that will work through all events sent with -`(*Queue).Write`. Events can be added asynchronously to the queue without -blocking the current execution path. This is ideal for use in an http request. - -### Broadcast - -It usually turns out that you want to send to more than one listener. We can -use `Broadcaster` to support this: - -```go -var broadcast = NewBroadcaster() // make it available somewhere in your application. -broadcast.Add(queue) // add your queue! -broadcast.Add(queue2) // and another! -``` - -With the above, we can now call `broadcast.Write` in our http handlers and have -all the events distributed to each queue. Because the events are queued, not -listener blocks another. - -### Extending - -For the most part, the above is sufficient for a lot of applications. However, -extending the above functionality can be done implementing your own `Sink`. The -behavior and semantics of the sink can be completely dependent on the -application requirements. The interface is provided below for reference: - -```go -type Sink { - Write(Event) error - Close() error -} -``` - -Application behavior can be controlled by how `Write` behaves. The examples -above are designed to queue the message and return as quickly as possible. -Other implementations may block until the event is committed to durable -storage. - -## Copyright and license - -Copyright © 2016 Docker, Inc. go-events is licensed under the Apache License, -Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/src/github.com/docker/go-events/broadcast.go b/vendor/src/github.com/docker/go-events/broadcast.go deleted file mode 100644 index e73d758bcf..0000000000 --- a/vendor/src/github.com/docker/go-events/broadcast.go +++ /dev/null @@ -1,158 +0,0 @@ -package events - -import "github.com/Sirupsen/logrus" - -// Broadcaster sends events to multiple, reliable Sinks. The goal of this -// component is to dispatch events to configured endpoints. Reliability can be -// provided by wrapping incoming sinks. -type Broadcaster struct { - sinks []Sink - events chan Event - adds chan configureRequest - removes chan configureRequest - closed chan chan struct{} -} - -// NewBroadcaster appends one or more sinks to the list of sinks. The -// broadcaster behavior will be affected by the properties of the sink. -// Generally, the sink should accept all messages and deal with reliability on -// its own. Use of EventQueue and RetryingSink should be used here. -func NewBroadcaster(sinks ...Sink) *Broadcaster { - b := Broadcaster{ - sinks: sinks, - events: make(chan Event), - adds: make(chan configureRequest), - removes: make(chan configureRequest), - closed: make(chan chan struct{}), - } - - // Start the broadcaster - go b.run() - - return &b -} - -// Write accepts an event to be dispatched to all sinks. This method will never -// fail and should never block (hopefully!). The caller cedes the memory to the -// broadcaster and should not modify it after calling write. -func (b *Broadcaster) Write(event Event) error { - select { - case b.events <- event: - case <-b.closed: - return ErrSinkClosed - } - return nil -} - -// Add the sink to the broadcaster. -// -// The provided sink must be comparable with equality. Typically, this just -// works with a regular pointer type. -func (b *Broadcaster) Add(sink Sink) error { - return b.configure(b.adds, sink) -} - -// Remove the provided sink. -func (b *Broadcaster) Remove(sink Sink) error { - return b.configure(b.removes, sink) -} - -type configureRequest struct { - sink Sink - response chan error -} - -func (b *Broadcaster) configure(ch chan configureRequest, sink Sink) error { - response := make(chan error, 1) - - for { - select { - case ch <- configureRequest{ - sink: sink, - response: response}: - ch = nil - case err := <-response: - return err - case <-b.closed: - return ErrSinkClosed - } - } -} - -// Close the broadcaster, ensuring that all messages are flushed to the -// underlying sink before returning. -func (b *Broadcaster) Close() error { - select { - case <-b.closed: - // already closed - return ErrSinkClosed - default: - // do a little chan handoff dance to synchronize closing - closed := make(chan struct{}) - b.closed <- closed - close(b.closed) - <-closed - return nil - } -} - -// run is the main broadcast loop, started when the broadcaster is created. -// Under normal conditions, it waits for events on the event channel. After -// Close is called, this goroutine will exit. -func (b *Broadcaster) run() { - remove := func(target Sink) { - for i, sink := range b.sinks { - if sink == target { - b.sinks = append(b.sinks[:i], b.sinks[i+1:]...) - break - } - } - } - - for { - select { - case event := <-b.events: - for _, sink := range b.sinks { - if err := sink.Write(event); err != nil { - if err == ErrSinkClosed { - // remove closed sinks - remove(sink) - continue - } - logrus.WithField("event", event).WithField("events.sink", sink).WithError(err). - Errorf("broadcaster: dropping event") - } - } - case request := <-b.adds: - // while we have to iterate for add/remove, common iteration for - // send is faster against slice. - - var found bool - for _, sink := range b.sinks { - if request.sink == sink { - found = true - break - } - } - - if !found { - b.sinks = append(b.sinks, request.sink) - } - // b.sinks[request.sink] = struct{}{} - request.response <- nil - case request := <-b.removes: - remove(request.sink) - request.response <- nil - case closing := <-b.closed: - // close all the underlying sinks - for _, sink := range b.sinks { - if err := sink.Close(); err != nil && err != ErrSinkClosed { - logrus.WithField("events.sink", sink).WithError(err). - Errorf("broadcaster: closing sink failed") - } - } - closing <- struct{}{} - return - } - } -} diff --git a/vendor/src/github.com/docker/go-events/channel.go b/vendor/src/github.com/docker/go-events/channel.go deleted file mode 100644 index ca24f8d5a2..0000000000 --- a/vendor/src/github.com/docker/go-events/channel.go +++ /dev/null @@ -1,47 +0,0 @@ -package events - -// Channel provides a sink that can be listened on. The writer and channel -// listener must operate in separate goroutines. -// -// Consumers should listen on Channel.C until Closed is closed. -type Channel struct { - C chan Event - - closed chan struct{} -} - -// NewChannel returns a channel. If buffer is zero, the channel is -// unbuffered. -func NewChannel(buffer int) *Channel { - return &Channel{ - C: make(chan Event, buffer), - closed: make(chan struct{}), - } -} - -// Done returns a channel that will always proceed once the sink is closed. -func (ch *Channel) Done() chan struct{} { - return ch.closed -} - -// Write the event to the channel. Must be called in a separate goroutine from -// the listener. -func (ch *Channel) Write(event Event) error { - select { - case ch.C <- event: - return nil - case <-ch.closed: - return ErrSinkClosed - } -} - -// Close the channel sink. -func (ch *Channel) Close() error { - select { - case <-ch.closed: - return ErrSinkClosed - default: - close(ch.closed) - return nil - } -} diff --git a/vendor/src/github.com/docker/go-events/errors.go b/vendor/src/github.com/docker/go-events/errors.go deleted file mode 100644 index 56db7c2510..0000000000 --- a/vendor/src/github.com/docker/go-events/errors.go +++ /dev/null @@ -1,10 +0,0 @@ -package events - -import "fmt" - -var ( - // ErrSinkClosed is returned if a write is issued to a sink that has been - // closed. If encountered, the error should be considered terminal and - // retries will not be successful. - ErrSinkClosed = fmt.Errorf("events: sink closed") -) diff --git a/vendor/src/github.com/docker/go-events/event.go b/vendor/src/github.com/docker/go-events/event.go deleted file mode 100644 index f0f1d9ea5f..0000000000 --- a/vendor/src/github.com/docker/go-events/event.go +++ /dev/null @@ -1,15 +0,0 @@ -package events - -// Event marks items that can be sent as events. -type Event interface{} - -// Sink accepts and sends events. -type Sink interface { - // Write an event to the Sink. If no error is returned, the caller will - // assume that all events have been committed to the sink. If an error is - // received, the caller may retry sending the event. - Write(event Event) error - - // Close the sink, possibly waiting for pending events to flush. - Close() error -} diff --git a/vendor/src/github.com/docker/go-events/filter.go b/vendor/src/github.com/docker/go-events/filter.go deleted file mode 100644 index f2765cfe6b..0000000000 --- a/vendor/src/github.com/docker/go-events/filter.go +++ /dev/null @@ -1,52 +0,0 @@ -package events - -// Matcher matches events. -type Matcher interface { - Match(event Event) bool -} - -// MatcherFunc implements matcher with just a function. -type MatcherFunc func(event Event) bool - -// Match calls the wrapped function. -func (fn MatcherFunc) Match(event Event) bool { - return fn(event) -} - -// Filter provides an event sink that sends only events that are accepted by a -// Matcher. No methods on filter are goroutine safe. -type Filter struct { - dst Sink - matcher Matcher - closed bool -} - -// NewFilter returns a new filter that will send to events to dst that return -// true for Matcher. -func NewFilter(dst Sink, matcher Matcher) Sink { - return &Filter{dst: dst, matcher: matcher} -} - -// Write an event to the filter. -func (f *Filter) Write(event Event) error { - if f.closed { - return ErrSinkClosed - } - - if f.matcher.Match(event) { - return f.dst.Write(event) - } - - return nil -} - -// Close the filter and allow no more events to pass through. -func (f *Filter) Close() error { - // TODO(stevvooe): Not all sinks should have Close. - if f.closed { - return ErrSinkClosed - } - - f.closed = true - return f.dst.Close() -} diff --git a/vendor/src/github.com/docker/go-events/queue.go b/vendor/src/github.com/docker/go-events/queue.go deleted file mode 100644 index 8032f090b6..0000000000 --- a/vendor/src/github.com/docker/go-events/queue.go +++ /dev/null @@ -1,111 +0,0 @@ -package events - -import ( - "container/list" - "sync" - - "github.com/Sirupsen/logrus" -) - -// Queue accepts all messages into a queue for asynchronous consumption -// by a sink. It is unbounded and thread safe but the sink must be reliable or -// events will be dropped. -type Queue struct { - dst Sink - events *list.List - cond *sync.Cond - mu sync.Mutex - closed bool -} - -// NewQueue returns a queue to the provided Sink dst. -func NewQueue(dst Sink) *Queue { - eq := Queue{ - dst: dst, - events: list.New(), - } - - eq.cond = sync.NewCond(&eq.mu) - go eq.run() - return &eq -} - -// Write accepts the events into the queue, only failing if the queue has -// beend closed. -func (eq *Queue) Write(event Event) error { - eq.mu.Lock() - defer eq.mu.Unlock() - - if eq.closed { - return ErrSinkClosed - } - - eq.events.PushBack(event) - eq.cond.Signal() // signal waiters - - return nil -} - -// Close shutsdown the event queue, flushing -func (eq *Queue) Close() error { - eq.mu.Lock() - defer eq.mu.Unlock() - - if eq.closed { - return ErrSinkClosed - } - - // set closed flag - eq.closed = true - eq.cond.Signal() // signal flushes queue - eq.cond.Wait() // wait for signal from last flush - return eq.dst.Close() -} - -// run is the main goroutine to flush events to the target sink. -func (eq *Queue) run() { - for { - event := eq.next() - - if event == nil { - return // nil block means event queue is closed. - } - - if err := eq.dst.Write(event); err != nil { - // TODO(aaronl): Dropping events could be bad depending - // on the application. We should have a way of - // communicating this condition. However, logging - // at a log level above debug may not be appropriate. - // Eventually, go-events should not use logrus at all, - // and should bubble up conditions like this through - // error values. - logrus.WithFields(logrus.Fields{ - "event": event, - "sink": eq.dst, - }).WithError(err).Debug("eventqueue: dropped event") - } - } -} - -// next encompasses the critical section of the run loop. When the queue is -// empty, it will block on the condition. If new data arrives, it will wake -// and return a block. When closed, a nil slice will be returned. -func (eq *Queue) next() Event { - eq.mu.Lock() - defer eq.mu.Unlock() - - for eq.events.Len() < 1 { - if eq.closed { - eq.cond.Broadcast() - return nil - } - - eq.cond.Wait() - } - - front := eq.events.Front() - block := front.Value.(Event) - eq.events.Remove(front) - - return block -} diff --git a/vendor/src/github.com/docker/go-events/retry.go b/vendor/src/github.com/docker/go-events/retry.go deleted file mode 100644 index 4ddb3ac6a7..0000000000 --- a/vendor/src/github.com/docker/go-events/retry.go +++ /dev/null @@ -1,249 +0,0 @@ -package events - -import ( - "math/rand" - "sync" - "sync/atomic" - "time" - - "github.com/Sirupsen/logrus" -) - -// RetryingSink retries the write until success or an ErrSinkClosed is -// returned. Underlying sink must have p > 0 of succeeding or the sink will -// block. Retry is configured with a RetryStrategy. Concurrent calls to a -// retrying sink are serialized through the sink, meaning that if one is -// in-flight, another will not proceed. -type RetryingSink struct { - sink Sink - strategy RetryStrategy - closed chan struct{} -} - -// NewRetryingSink returns a sink that will retry writes to a sink, backing -// off on failure. Parameters threshold and backoff adjust the behavior of the -// circuit breaker. -func NewRetryingSink(sink Sink, strategy RetryStrategy) *RetryingSink { - rs := &RetryingSink{ - sink: sink, - strategy: strategy, - closed: make(chan struct{}), - } - - return rs -} - -// Write attempts to flush the events to the downstream sink until it succeeds -// or the sink is closed. -func (rs *RetryingSink) Write(event Event) error { - logger := logrus.WithField("event", event) - -retry: - select { - case <-rs.closed: - return ErrSinkClosed - default: - } - - if backoff := rs.strategy.Proceed(event); backoff > 0 { - select { - case <-time.After(backoff): - // TODO(stevvooe): This branch holds up the next try. Before, we - // would simply break to the "retry" label and then possibly wait - // again. However, this requires all retry strategies to have a - // large probability of probing the sync for success, rather than - // just backing off and sending the request. - case <-rs.closed: - return ErrSinkClosed - } - } - - if err := rs.sink.Write(event); err != nil { - if err == ErrSinkClosed { - // terminal! - return err - } - - logger := logger.WithError(err) // shadow!! - - if rs.strategy.Failure(event, err) { - logger.Errorf("retryingsink: dropped event") - return nil - } - - logger.Errorf("retryingsink: error writing event, retrying") - goto retry - } - - rs.strategy.Success(event) - return nil -} - -// Close closes the sink and the underlying sink. -func (rs *RetryingSink) Close() error { - select { - case <-rs.closed: - return ErrSinkClosed - default: - close(rs.closed) - return rs.sink.Close() - } -} - -// RetryStrategy defines a strategy for retrying event sink writes. -// -// All methods should be goroutine safe. -type RetryStrategy interface { - // Proceed is called before every event send. If proceed returns a - // positive, non-zero integer, the retryer will back off by the provided - // duration. - // - // An event is provided, by may be ignored. - Proceed(event Event) time.Duration - - // Failure reports a failure to the strategy. If this method returns true, - // the event should be dropped. - Failure(event Event, err error) bool - - // Success should be called when an event is sent successfully. - Success(event Event) -} - -// Breaker implements a circuit breaker retry strategy. -// -// The current implementation never drops events. -type Breaker struct { - threshold int - recent int - last time.Time - backoff time.Duration // time after which we retry after failure. - mu sync.Mutex -} - -var _ RetryStrategy = &Breaker{} - -// NewBreaker returns a breaker that will backoff after the threshold has been -// tripped. A Breaker is thread safe and may be shared by many goroutines. -func NewBreaker(threshold int, backoff time.Duration) *Breaker { - return &Breaker{ - threshold: threshold, - backoff: backoff, - } -} - -// Proceed checks the failures against the threshold. -func (b *Breaker) Proceed(event Event) time.Duration { - b.mu.Lock() - defer b.mu.Unlock() - - if b.recent < b.threshold { - return 0 - } - - return b.last.Add(b.backoff).Sub(time.Now()) -} - -// Success resets the breaker. -func (b *Breaker) Success(event Event) { - b.mu.Lock() - defer b.mu.Unlock() - - b.recent = 0 - b.last = time.Time{} -} - -// Failure records the failure and latest failure time. -func (b *Breaker) Failure(event Event, err error) bool { - b.mu.Lock() - defer b.mu.Unlock() - - b.recent++ - b.last = time.Now().UTC() - return false // never drop events. -} - -var ( - // DefaultExponentialBackoffConfig provides a default configuration for - // exponential backoff. - DefaultExponentialBackoffConfig = ExponentialBackoffConfig{ - Base: time.Second, - Factor: time.Second, - Max: 20 * time.Second, - } -) - -// ExponentialBackoffConfig configures backoff parameters. -// -// Note that these parameters operate on the upper bound for choosing a random -// value. For example, at Base=1s, a random value in [0,1s) will be chosen for -// the backoff value. -type ExponentialBackoffConfig struct { - // Base is the minimum bound for backing off after failure. - Base time.Duration - - // Factor sets the amount of time by which the backoff grows with each - // failure. - Factor time.Duration - - // Max is the absolute maxiumum bound for a single backoff. - Max time.Duration -} - -// ExponentialBackoff implements random backoff with exponentially increasing -// bounds as the number consecutive failures increase. -type ExponentialBackoff struct { - config ExponentialBackoffConfig - failures uint64 // consecutive failure counter. -} - -// NewExponentialBackoff returns an exponential backoff strategy with the -// desired config. If config is nil, the default is returned. -func NewExponentialBackoff(config ExponentialBackoffConfig) *ExponentialBackoff { - return &ExponentialBackoff{ - config: config, - } -} - -// Proceed returns the next randomly bound exponential backoff time. -func (b *ExponentialBackoff) Proceed(event Event) time.Duration { - return b.backoff(atomic.LoadUint64(&b.failures)) -} - -// Success resets the failures counter. -func (b *ExponentialBackoff) Success(event Event) { - atomic.StoreUint64(&b.failures, 0) -} - -// Failure increments the failure counter. -func (b *ExponentialBackoff) Failure(event Event, err error) bool { - atomic.AddUint64(&b.failures, 1) - return false -} - -// backoff calculates the amount of time to wait based on the number of -// consecutive failures. -func (b *ExponentialBackoff) backoff(failures uint64) time.Duration { - if failures <= 0 { - // proceed normally when there are no failures. - return 0 - } - - factor := b.config.Factor - if factor <= 0 { - factor = DefaultExponentialBackoffConfig.Factor - } - - backoff := b.config.Base + factor*time.Duration(1<<(failures-1)) - - max := b.config.Max - if max <= 0 { - max = DefaultExponentialBackoffConfig.Max - } - - if backoff > max || backoff < 0 { - backoff = max - } - - // Choose a uniformly distributed value from [0, backoff). - return time.Duration(rand.Int63n(int64(backoff))) -} diff --git a/vendor/src/github.com/docker/go-units/LICENSE b/vendor/src/github.com/docker/go-units/LICENSE deleted file mode 100644 index b55b37bc31..0000000000 --- a/vendor/src/github.com/docker/go-units/LICENSE +++ /dev/null @@ -1,191 +0,0 @@ - - Apache License - Version 2.0, January 2004 - https://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - Copyright 2015 Docker, Inc. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - https://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/src/github.com/docker/go-units/README.md b/vendor/src/github.com/docker/go-units/README.md deleted file mode 100644 index e2fb4051f4..0000000000 --- a/vendor/src/github.com/docker/go-units/README.md +++ /dev/null @@ -1,13 +0,0 @@ -[![GoDoc](https://godoc.org/github.com/docker/go-units?status.svg)](https://godoc.org/github.com/docker/go-units) - -# Introduction - -go-units is a library to transform human friendly measurements into machine friendly values. - -## Usage - -See the [docs in godoc](https://godoc.org/github.com/docker/go-units) for examples and documentation. - -## License - -go-units is licensed under the Apache License, Version 2.0. See [LICENSE](LICENSE) for the full license text. diff --git a/vendor/src/github.com/docker/go-units/circle.yml b/vendor/src/github.com/docker/go-units/circle.yml deleted file mode 100644 index 9043b35478..0000000000 --- a/vendor/src/github.com/docker/go-units/circle.yml +++ /dev/null @@ -1,11 +0,0 @@ -dependencies: - post: - # install golint - - go get github.com/golang/lint/golint - -test: - pre: - # run analysis before tests - - go vet ./... - - test -z "$(golint ./... | tee /dev/stderr)" - - test -z "$(gofmt -s -l . | tee /dev/stderr)" diff --git a/vendor/src/github.com/docker/go-units/duration.go b/vendor/src/github.com/docker/go-units/duration.go deleted file mode 100644 index c219a8a968..0000000000 --- a/vendor/src/github.com/docker/go-units/duration.go +++ /dev/null @@ -1,33 +0,0 @@ -// Package units provides helper function to parse and print size and time units -// in human-readable format. -package units - -import ( - "fmt" - "time" -) - -// HumanDuration returns a human-readable approximation of a duration -// (eg. "About a minute", "4 hours ago", etc.). -func HumanDuration(d time.Duration) string { - if seconds := int(d.Seconds()); seconds < 1 { - return "Less than a second" - } else if seconds < 60 { - return fmt.Sprintf("%d seconds", seconds) - } else if minutes := int(d.Minutes()); minutes == 1 { - return "About a minute" - } else if minutes < 60 { - return fmt.Sprintf("%d minutes", minutes) - } else if hours := int(d.Hours()); hours == 1 { - return "About an hour" - } else if hours < 48 { - return fmt.Sprintf("%d hours", hours) - } else if hours < 24*7*2 { - return fmt.Sprintf("%d days", hours/24) - } else if hours < 24*30*3 { - return fmt.Sprintf("%d weeks", hours/24/7) - } else if hours < 24*365*2 { - return fmt.Sprintf("%d months", hours/24/30) - } - return fmt.Sprintf("%d years", int(d.Hours())/24/365) -} diff --git a/vendor/src/github.com/docker/go-units/size.go b/vendor/src/github.com/docker/go-units/size.go deleted file mode 100644 index 3b59daff31..0000000000 --- a/vendor/src/github.com/docker/go-units/size.go +++ /dev/null @@ -1,95 +0,0 @@ -package units - -import ( - "fmt" - "regexp" - "strconv" - "strings" -) - -// See: http://en.wikipedia.org/wiki/Binary_prefix -const ( - // Decimal - - KB = 1000 - MB = 1000 * KB - GB = 1000 * MB - TB = 1000 * GB - PB = 1000 * TB - - // Binary - - KiB = 1024 - MiB = 1024 * KiB - GiB = 1024 * MiB - TiB = 1024 * GiB - PiB = 1024 * TiB -) - -type unitMap map[string]int64 - -var ( - decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB} - binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB} - sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`) -) - -var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"} -var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"} - -// CustomSize returns a human-readable approximation of a size -// using custom format. -func CustomSize(format string, size float64, base float64, _map []string) string { - i := 0 - for size >= base { - size = size / base - i++ - } - return fmt.Sprintf(format, size, _map[i]) -} - -// HumanSize returns a human-readable approximation of a size -// capped at 4 valid numbers (eg. "2.746 MB", "796 KB"). -func HumanSize(size float64) string { - return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs) -} - -// BytesSize returns a human-readable size in bytes, kibibytes, -// mebibytes, gibibytes, or tebibytes (eg. "44kiB", "17MiB"). -func BytesSize(size float64) string { - return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs) -} - -// FromHumanSize returns an integer from a human-readable specification of a -// size using SI standard (eg. "44kB", "17MB"). -func FromHumanSize(size string) (int64, error) { - return parseSize(size, decimalMap) -} - -// RAMInBytes parses a human-readable string representing an amount of RAM -// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and -// returns the number of bytes, or -1 if the string is unparseable. -// Units are case-insensitive, and the 'b' suffix is optional. -func RAMInBytes(size string) (int64, error) { - return parseSize(size, binaryMap) -} - -// Parses the human-readable size string into the amount it represents. -func parseSize(sizeStr string, uMap unitMap) (int64, error) { - matches := sizeRegex.FindStringSubmatch(sizeStr) - if len(matches) != 3 { - return -1, fmt.Errorf("invalid size: '%s'", sizeStr) - } - - size, err := strconv.ParseInt(matches[1], 10, 0) - if err != nil { - return -1, err - } - - unitPrefix := strings.ToLower(matches[2]) - if mul, ok := uMap[unitPrefix]; ok { - size *= mul - } - - return size, nil -} diff --git a/vendor/src/github.com/docker/go-units/ulimit.go b/vendor/src/github.com/docker/go-units/ulimit.go deleted file mode 100644 index f0a7be2921..0000000000 --- a/vendor/src/github.com/docker/go-units/ulimit.go +++ /dev/null @@ -1,109 +0,0 @@ -package units - -import ( - "fmt" - "strconv" - "strings" -) - -// Ulimit is a human friendly version of Rlimit. -type Ulimit struct { - Name string - Hard int64 - Soft int64 -} - -// Rlimit specifies the resource limits, such as max open files. -type Rlimit struct { - Type int `json:"type,omitempty"` - Hard uint64 `json:"hard,omitempty"` - Soft uint64 `json:"soft,omitempty"` -} - -const ( - // magic numbers for making the syscall - // some of these are defined in the syscall package, but not all. - // Also since Windows client doesn't get access to the syscall package, need to - // define these here - rlimitAs = 9 - rlimitCore = 4 - rlimitCPU = 0 - rlimitData = 2 - rlimitFsize = 1 - rlimitLocks = 10 - rlimitMemlock = 8 - rlimitMsgqueue = 12 - rlimitNice = 13 - rlimitNofile = 7 - rlimitNproc = 6 - rlimitRss = 5 - rlimitRtprio = 14 - rlimitRttime = 15 - rlimitSigpending = 11 - rlimitStack = 3 -) - -var ulimitNameMapping = map[string]int{ - //"as": rlimitAs, // Disabled since this doesn't seem usable with the way Docker inits a container. - "core": rlimitCore, - "cpu": rlimitCPU, - "data": rlimitData, - "fsize": rlimitFsize, - "locks": rlimitLocks, - "memlock": rlimitMemlock, - "msgqueue": rlimitMsgqueue, - "nice": rlimitNice, - "nofile": rlimitNofile, - "nproc": rlimitNproc, - "rss": rlimitRss, - "rtprio": rlimitRtprio, - "rttime": rlimitRttime, - "sigpending": rlimitSigpending, - "stack": rlimitStack, -} - -// ParseUlimit parses and returns a Ulimit from the specified string. -func ParseUlimit(val string) (*Ulimit, error) { - parts := strings.SplitN(val, "=", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("invalid ulimit argument: %s", val) - } - - if _, exists := ulimitNameMapping[parts[0]]; !exists { - return nil, fmt.Errorf("invalid ulimit type: %s", parts[0]) - } - - limitVals := strings.SplitN(parts[1], ":", 2) - if len(limitVals) > 2 { - return nil, fmt.Errorf("too many limit value arguments - %s, can only have up to two, `soft[:hard]`", parts[1]) - } - - soft, err := strconv.ParseInt(limitVals[0], 10, 64) - if err != nil { - return nil, err - } - - hard := soft // in case no hard was set - if len(limitVals) == 2 { - hard, err = strconv.ParseInt(limitVals[1], 10, 64) - } - if soft > hard { - return nil, fmt.Errorf("ulimit soft limit must be less than or equal to hard limit: %d > %d", soft, hard) - } - - return &Ulimit{Name: parts[0], Soft: soft, Hard: hard}, nil -} - -// GetRlimit returns the RLimit corresponding to Ulimit. -func (u *Ulimit) GetRlimit() (*Rlimit, error) { - t, exists := ulimitNameMapping[u.Name] - if !exists { - return nil, fmt.Errorf("invalid ulimit name %s", u.Name) - } - - return &Rlimit{Type: t, Soft: uint64(u.Soft), Hard: uint64(u.Hard)}, nil -} - -func (u *Ulimit) String() string { - return fmt.Sprintf("%s=%d:%d", u.Name, u.Soft, u.Hard) -} diff --git a/vendor/src/github.com/docker/go/LICENSE b/vendor/src/github.com/docker/go/LICENSE deleted file mode 100644 index 7448756763..0000000000 --- a/vendor/src/github.com/docker/go/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2012 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/src/github.com/docker/go/canonical/json/decode.go b/vendor/src/github.com/docker/go/canonical/json/decode.go deleted file mode 100644 index 35bac2e2b4..0000000000 --- a/vendor/src/github.com/docker/go/canonical/json/decode.go +++ /dev/null @@ -1,1094 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Represents JSON data structure using native Go types: booleans, floats, -// strings, arrays, and maps. - -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "errors" - "fmt" - "reflect" - "runtime" - "strconv" - "unicode" - "unicode/utf16" - "unicode/utf8" -) - -// Unmarshal parses the JSON-encoded data and stores the result -// in the value pointed to by v. -// -// Unmarshal uses the inverse of the encodings that -// Marshal uses, allocating maps, slices, and pointers as necessary, -// with the following additional rules: -// -// To unmarshal JSON into a pointer, Unmarshal first handles the case of -// the JSON being the JSON literal null. In that case, Unmarshal sets -// the pointer to nil. Otherwise, Unmarshal unmarshals the JSON into -// the value pointed at by the pointer. If the pointer is nil, Unmarshal -// allocates a new value for it to point to. -// -// To unmarshal JSON into a struct, Unmarshal matches incoming object -// keys to the keys used by Marshal (either the struct field name or its tag), -// preferring an exact match but also accepting a case-insensitive match. -// -// To unmarshal JSON into an interface value, -// Unmarshal stores one of these in the interface value: -// -// bool, for JSON booleans -// float64, for JSON numbers -// string, for JSON strings -// []interface{}, for JSON arrays -// map[string]interface{}, for JSON objects -// nil for JSON null -// -// To unmarshal a JSON array into a slice, Unmarshal resets the slice to nil -// and then appends each element to the slice. -// -// To unmarshal a JSON object into a map, Unmarshal replaces the map -// with an empty map and then adds key-value pairs from the object to -// the map. -// -// If a JSON value is not appropriate for a given target type, -// or if a JSON number overflows the target type, Unmarshal -// skips that field and completes the unmarshalling as best it can. -// If no more serious errors are encountered, Unmarshal returns -// an UnmarshalTypeError describing the earliest such error. -// -// The JSON null value unmarshals into an interface, map, pointer, or slice -// by setting that Go value to nil. Because null is often used in JSON to mean -// ``not present,'' unmarshaling a JSON null into any other Go type has no effect -// on the value and produces no error. -// -// When unmarshaling quoted strings, invalid UTF-8 or -// invalid UTF-16 surrogate pairs are not treated as an error. -// Instead, they are replaced by the Unicode replacement -// character U+FFFD. -// -func Unmarshal(data []byte, v interface{}) error { - // Check for well-formedness. - // Avoids filling out half a data structure - // before discovering a JSON syntax error. - var d decodeState - err := checkValid(data, &d.scan) - if err != nil { - return err - } - - d.init(data) - return d.unmarshal(v) -} - -// Unmarshaler is the interface implemented by objects -// that can unmarshal a JSON description of themselves. -// The input can be assumed to be a valid encoding of -// a JSON value. UnmarshalJSON must copy the JSON data -// if it wishes to retain the data after returning. -type Unmarshaler interface { - UnmarshalJSON([]byte) error -} - -// An UnmarshalTypeError describes a JSON value that was -// not appropriate for a value of a specific Go type. -type UnmarshalTypeError struct { - Value string // description of JSON value - "bool", "array", "number -5" - Type reflect.Type // type of Go value it could not be assigned to - Offset int64 // error occurred after reading Offset bytes -} - -func (e *UnmarshalTypeError) Error() string { - return "json: cannot unmarshal " + e.Value + " into Go value of type " + e.Type.String() -} - -// An UnmarshalFieldError describes a JSON object key that -// led to an unexported (and therefore unwritable) struct field. -// (No longer used; kept for compatibility.) -type UnmarshalFieldError struct { - Key string - Type reflect.Type - Field reflect.StructField -} - -func (e *UnmarshalFieldError) Error() string { - return "json: cannot unmarshal object key " + strconv.Quote(e.Key) + " into unexported field " + e.Field.Name + " of type " + e.Type.String() -} - -// An InvalidUnmarshalError describes an invalid argument passed to Unmarshal. -// (The argument to Unmarshal must be a non-nil pointer.) -type InvalidUnmarshalError struct { - Type reflect.Type -} - -func (e *InvalidUnmarshalError) Error() string { - if e.Type == nil { - return "json: Unmarshal(nil)" - } - - if e.Type.Kind() != reflect.Ptr { - return "json: Unmarshal(non-pointer " + e.Type.String() + ")" - } - return "json: Unmarshal(nil " + e.Type.String() + ")" -} - -func (d *decodeState) unmarshal(v interface{}) (err error) { - defer func() { - if r := recover(); r != nil { - if _, ok := r.(runtime.Error); ok { - panic(r) - } - err = r.(error) - } - }() - - rv := reflect.ValueOf(v) - if rv.Kind() != reflect.Ptr || rv.IsNil() { - return &InvalidUnmarshalError{reflect.TypeOf(v)} - } - - d.scan.reset() - // We decode rv not rv.Elem because the Unmarshaler interface - // test must be applied at the top level of the value. - d.value(rv) - return d.savedError -} - -// A Number represents a JSON number literal. -type Number string - -// String returns the literal text of the number. -func (n Number) String() string { return string(n) } - -// Float64 returns the number as a float64. -func (n Number) Float64() (float64, error) { - return strconv.ParseFloat(string(n), 64) -} - -// Int64 returns the number as an int64. -func (n Number) Int64() (int64, error) { - return strconv.ParseInt(string(n), 10, 64) -} - -// decodeState represents the state while decoding a JSON value. -type decodeState struct { - data []byte - off int // read offset in data - scan scanner - nextscan scanner // for calls to nextValue - savedError error - useNumber bool - canonical bool -} - -// errPhase is used for errors that should not happen unless -// there is a bug in the JSON decoder or something is editing -// the data slice while the decoder executes. -var errPhase = errors.New("JSON decoder out of sync - data changing underfoot?") - -func (d *decodeState) init(data []byte) *decodeState { - d.data = data - d.off = 0 - d.savedError = nil - return d -} - -// error aborts the decoding by panicking with err. -func (d *decodeState) error(err error) { - panic(err) -} - -// saveError saves the first err it is called with, -// for reporting at the end of the unmarshal. -func (d *decodeState) saveError(err error) { - if d.savedError == nil { - d.savedError = err - } -} - -// next cuts off and returns the next full JSON value in d.data[d.off:]. -// The next value is known to be an object or array, not a literal. -func (d *decodeState) next() []byte { - c := d.data[d.off] - item, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // Our scanner has seen the opening brace/bracket - // and thinks we're still in the middle of the object. - // invent a closing brace/bracket to get it out. - if c == '{' { - d.scan.step(&d.scan, '}') - } else { - d.scan.step(&d.scan, ']') - } - - return item -} - -// scanWhile processes bytes in d.data[d.off:] until it -// receives a scan code not equal to op. -// It updates d.off and returns the new scan code. -func (d *decodeState) scanWhile(op int) int { - var newOp int - for { - if d.off >= len(d.data) { - newOp = d.scan.eof() - d.off = len(d.data) + 1 // mark processed EOF with len+1 - } else { - c := int(d.data[d.off]) - d.off++ - newOp = d.scan.step(&d.scan, c) - } - if newOp != op { - break - } - } - return newOp -} - -// value decodes a JSON value from d.data[d.off:] into the value. -// it updates d.off to point past the decoded value. -func (d *decodeState) value(v reflect.Value) { - if !v.IsValid() { - _, rest, err := nextValue(d.data[d.off:], &d.nextscan) - if err != nil { - d.error(err) - } - d.off = len(d.data) - len(rest) - - // d.scan thinks we're still at the beginning of the item. - // Feed in an empty string - the shortest, simplest value - - // so that it knows we got to the end of the value. - if d.scan.redo { - // rewind. - d.scan.redo = false - d.scan.step = stateBeginValue - } - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - - n := len(d.scan.parseState) - if n > 0 && d.scan.parseState[n-1] == parseObjectKey { - // d.scan thinks we just read an object key; finish the object - d.scan.step(&d.scan, ':') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '"') - d.scan.step(&d.scan, '}') - } - - return - } - - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(v) - - case scanBeginObject: - d.object(v) - - case scanBeginLiteral: - d.literal(v) - } -} - -type unquotedValue struct{} - -// valueQuoted is like value but decodes a -// quoted string literal or literal null into an interface value. -// If it finds anything other than a quoted string literal or null, -// valueQuoted returns unquotedValue{}. -func (d *decodeState) valueQuoted() interface{} { - switch op := d.scanWhile(scanSkipSpace); op { - default: - d.error(errPhase) - - case scanBeginArray: - d.array(reflect.Value{}) - - case scanBeginObject: - d.object(reflect.Value{}) - - case scanBeginLiteral: - switch v := d.literalInterface().(type) { - case nil, string: - return v - } - } - return unquotedValue{} -} - -// indirect walks down v allocating pointers as needed, -// until it gets to a non-pointer. -// if it encounters an Unmarshaler, indirect stops and returns that. -// if decodingNull is true, indirect stops at the last pointer so it can be set to nil. -func (d *decodeState) indirect(v reflect.Value, decodingNull bool) (Unmarshaler, encoding.TextUnmarshaler, reflect.Value) { - // If v is a named type and is addressable, - // start with its address, so that if the type has pointer methods, - // we find them. - if v.Kind() != reflect.Ptr && v.Type().Name() != "" && v.CanAddr() { - v = v.Addr() - } - for { - // Load value from interface, but only if the result will be - // usefully addressable. - if v.Kind() == reflect.Interface && !v.IsNil() { - e := v.Elem() - if e.Kind() == reflect.Ptr && !e.IsNil() && (!decodingNull || e.Elem().Kind() == reflect.Ptr) { - v = e - continue - } - } - - if v.Kind() != reflect.Ptr { - break - } - - if v.Elem().Kind() != reflect.Ptr && decodingNull && v.CanSet() { - break - } - if v.IsNil() { - v.Set(reflect.New(v.Type().Elem())) - } - if v.Type().NumMethod() > 0 { - if u, ok := v.Interface().(Unmarshaler); ok { - return u, nil, reflect.Value{} - } - if u, ok := v.Interface().(encoding.TextUnmarshaler); ok { - return nil, u, reflect.Value{} - } - } - v = v.Elem() - } - return nil, nil, v -} - -// array consumes an array from d.data[d.off-1:], decoding into the value v. -// the first byte of the array ('[') has been read already. -func (d *decodeState) array(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - } - - v = pv - - // Check type of target. - switch v.Kind() { - case reflect.Interface: - if v.NumMethod() == 0 { - // Decoding into nil interface? Switch to non-reflect code. - v.Set(reflect.ValueOf(d.arrayInterface())) - return - } - // Otherwise it's invalid. - fallthrough - default: - d.saveError(&UnmarshalTypeError{"array", v.Type(), int64(d.off)}) - d.off-- - d.next() - return - case reflect.Array: - case reflect.Slice: - break - } - - i := 0 - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - // Get element of array, growing if necessary. - if v.Kind() == reflect.Slice { - // Grow slice if necessary - if i >= v.Cap() { - newcap := v.Cap() + v.Cap()/2 - if newcap < 4 { - newcap = 4 - } - newv := reflect.MakeSlice(v.Type(), v.Len(), newcap) - reflect.Copy(newv, v) - v.Set(newv) - } - if i >= v.Len() { - v.SetLen(i + 1) - } - } - - if i < v.Len() { - // Decode into element. - d.value(v.Index(i)) - } else { - // Ran out of fixed array: skip. - d.value(reflect.Value{}) - } - i++ - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - - if i < v.Len() { - if v.Kind() == reflect.Array { - // Array. Zero the rest. - z := reflect.Zero(v.Type().Elem()) - for ; i < v.Len(); i++ { - v.Index(i).Set(z) - } - } else { - v.SetLen(i) - } - } - if i == 0 && v.Kind() == reflect.Slice { - v.Set(reflect.MakeSlice(v.Type(), 0, 0)) - } -} - -var nullLiteral = []byte("null") - -// object consumes an object from d.data[d.off-1:], decoding into the value v. -// the first byte ('{') of the object has been read already. -func (d *decodeState) object(v reflect.Value) { - // Check for unmarshaler. - u, ut, pv := d.indirect(v, false) - if u != nil { - d.off-- - err := u.UnmarshalJSON(d.next()) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - v = pv - - // Decoding into nil interface? Switch to non-reflect code. - if v.Kind() == reflect.Interface && v.NumMethod() == 0 { - v.Set(reflect.ValueOf(d.objectInterface())) - return - } - - // Check type of target: struct or map[string]T - switch v.Kind() { - case reflect.Map: - // map must have string kind - t := v.Type() - if t.Key().Kind() != reflect.String { - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - if v.IsNil() { - v.Set(reflect.MakeMap(t)) - } - case reflect.Struct: - - default: - d.saveError(&UnmarshalTypeError{"object", v.Type(), int64(d.off)}) - d.off-- - d.next() // skip over { } in input - return - } - - var mapElem reflect.Value - - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquoteBytes(item) - if !ok { - d.error(errPhase) - } - - // Figure out field corresponding to key. - var subv reflect.Value - destring := false // whether the value is wrapped in a string to be decoded first - - if v.Kind() == reflect.Map { - elemType := v.Type().Elem() - if !mapElem.IsValid() { - mapElem = reflect.New(elemType).Elem() - } else { - mapElem.Set(reflect.Zero(elemType)) - } - subv = mapElem - } else { - var f *field - fields := cachedTypeFields(v.Type(), false) - for i := range fields { - ff := &fields[i] - if bytes.Equal(ff.nameBytes, key) { - f = ff - break - } - if f == nil && ff.equalFold(ff.nameBytes, key) { - f = ff - } - } - if f != nil { - subv = v - destring = f.quoted - for _, i := range f.index { - if subv.Kind() == reflect.Ptr { - if subv.IsNil() { - subv.Set(reflect.New(subv.Type().Elem())) - } - subv = subv.Elem() - } - subv = subv.Field(i) - } - } - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - if destring { - switch qv := d.valueQuoted().(type) { - case nil: - d.literalStore(nullLiteral, subv, false) - case string: - d.literalStore([]byte(qv), subv, true) - default: - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal unquoted value into %v", subv.Type())) - } - } else { - d.value(subv) - } - - // Write value back to map; - // if using struct, subv points into struct already. - if v.Kind() == reflect.Map { - kv := reflect.ValueOf(key).Convert(v.Type().Key()) - v.SetMapIndex(kv, subv) - } - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } -} - -// literal consumes a literal from d.data[d.off-1:], decoding into the value v. -// The first byte of the literal has been read already -// (that's how the caller knows it's a literal). -func (d *decodeState) literal(v reflect.Value) { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - - d.literalStore(d.data[start:d.off], v, false) -} - -// convertNumber converts the number literal s to a float64 or a Number -// depending on the setting of d.useNumber. -func (d *decodeState) convertNumber(s string) (interface{}, error) { - if d.useNumber { - return Number(s), nil - } - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return nil, &UnmarshalTypeError{"number " + s, reflect.TypeOf(0.0), int64(d.off)} - } - return f, nil -} - -var numberType = reflect.TypeOf(Number("")) - -// literalStore decodes a literal stored in item into v. -// -// fromQuoted indicates whether this literal came from unwrapping a -// string from the ",string" struct tag option. this is used only to -// produce more helpful error messages. -func (d *decodeState) literalStore(item []byte, v reflect.Value, fromQuoted bool) { - // Check for unmarshaler. - if len(item) == 0 { - //Empty string given - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - return - } - wantptr := item[0] == 'n' // null - u, ut, pv := d.indirect(v, wantptr) - if u != nil { - err := u.UnmarshalJSON(item) - if err != nil { - d.error(err) - } - return - } - if ut != nil { - if item[0] != '"' { - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - return - } - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - err := ut.UnmarshalText(s) - if err != nil { - d.error(err) - } - return - } - - v = pv - - switch c := item[0]; c { - case 'n': // null - switch v.Kind() { - case reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice: - v.Set(reflect.Zero(v.Type())) - // otherwise, ignore null for primitives/string - } - case 't', 'f': // true, false - value := c == 't' - switch v.Kind() { - default: - if fromQuoted { - d.saveError(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - case reflect.Bool: - v.SetBool(value) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(value)) - } else { - d.saveError(&UnmarshalTypeError{"bool", v.Type(), int64(d.off)}) - } - } - - case '"': // string - s, ok := unquoteBytes(item) - if !ok { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - switch v.Kind() { - default: - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - case reflect.Slice: - if v.Type().Elem().Kind() != reflect.Uint8 { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - break - } - b := make([]byte, base64.StdEncoding.DecodedLen(len(s))) - n, err := base64.StdEncoding.Decode(b, s) - if err != nil { - d.saveError(err) - break - } - v.Set(reflect.ValueOf(b[0:n])) - case reflect.String: - v.SetString(string(s)) - case reflect.Interface: - if v.NumMethod() == 0 { - v.Set(reflect.ValueOf(string(s))) - } else { - d.saveError(&UnmarshalTypeError{"string", v.Type(), int64(d.off)}) - } - } - - default: // number - if c != '-' && (c < '0' || c > '9') { - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(errPhase) - } - } - s := string(item) - switch v.Kind() { - default: - if v.Kind() == reflect.String && v.Type() == numberType { - v.SetString(s) - break - } - if fromQuoted { - d.error(fmt.Errorf("json: invalid use of ,string struct tag, trying to unmarshal %q into %v", item, v.Type())) - } else { - d.error(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - } - case reflect.Interface: - n, err := d.convertNumber(s) - if err != nil { - d.saveError(err) - break - } - if v.NumMethod() != 0 { - d.saveError(&UnmarshalTypeError{"number", v.Type(), int64(d.off)}) - break - } - v.Set(reflect.ValueOf(n)) - - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - n, err := strconv.ParseInt(s, 10, 64) - if err != nil || v.OverflowInt(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetInt(n) - - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - n, err := strconv.ParseUint(s, 10, 64) - if err != nil || v.OverflowUint(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetUint(n) - - case reflect.Float32, reflect.Float64: - n, err := strconv.ParseFloat(s, v.Type().Bits()) - if err != nil || v.OverflowFloat(n) { - d.saveError(&UnmarshalTypeError{"number " + s, v.Type(), int64(d.off)}) - break - } - v.SetFloat(n) - } - } -} - -// The xxxInterface routines build up a value to be stored -// in an empty interface. They are not strictly necessary, -// but they avoid the weight of reflection in this common case. - -// valueInterface is like value but returns interface{} -func (d *decodeState) valueInterface() interface{} { - switch d.scanWhile(scanSkipSpace) { - default: - d.error(errPhase) - panic("unreachable") - case scanBeginArray: - return d.arrayInterface() - case scanBeginObject: - return d.objectInterface() - case scanBeginLiteral: - return d.literalInterface() - } -} - -// arrayInterface is like array but returns []interface{}. -func (d *decodeState) arrayInterface() []interface{} { - var v = make([]interface{}, 0) - for { - // Look ahead for ] - can only happen on first iteration. - op := d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - - // Back up so d.value can have the byte we just read. - d.off-- - d.scan.undo(op) - - v = append(v, d.valueInterface()) - - // Next token must be , or ]. - op = d.scanWhile(scanSkipSpace) - if op == scanEndArray { - break - } - if op != scanArrayValue { - d.error(errPhase) - } - } - return v -} - -// objectInterface is like object but returns map[string]interface{}. -func (d *decodeState) objectInterface() map[string]interface{} { - m := make(map[string]interface{}) - for { - // Read opening " of string key or closing }. - op := d.scanWhile(scanSkipSpace) - if op == scanEndObject { - // closing } - can only happen on first iteration. - break - } - if op != scanBeginLiteral { - d.error(errPhase) - } - - // Read string key. - start := d.off - 1 - op = d.scanWhile(scanContinue) - item := d.data[start : d.off-1] - key, ok := unquote(item) - if !ok { - d.error(errPhase) - } - - // Read : before value. - if op == scanSkipSpace { - op = d.scanWhile(scanSkipSpace) - } - if op != scanObjectKey { - d.error(errPhase) - } - - // Read value. - m[key] = d.valueInterface() - - // Next token must be , or }. - op = d.scanWhile(scanSkipSpace) - if op == scanEndObject { - break - } - if op != scanObjectValue { - d.error(errPhase) - } - } - return m -} - -// literalInterface is like literal but returns an interface value. -func (d *decodeState) literalInterface() interface{} { - // All bytes inside literal return scanContinue op code. - start := d.off - 1 - op := d.scanWhile(scanContinue) - - // Scan read one byte too far; back up. - d.off-- - d.scan.undo(op) - item := d.data[start:d.off] - - switch c := item[0]; c { - case 'n': // null - return nil - - case 't', 'f': // true, false - return c == 't' - - case '"': // string - s, ok := unquote(item) - if !ok { - d.error(errPhase) - } - return s - - default: // number - if c != '-' && (c < '0' || c > '9') { - d.error(errPhase) - } - n, err := d.convertNumber(string(item)) - if err != nil { - d.saveError(err) - } - return n - } -} - -// getu4 decodes \uXXXX from the beginning of s, returning the hex value, -// or it returns -1. -func getu4(s []byte) rune { - if len(s) < 6 || s[0] != '\\' || s[1] != 'u' { - return -1 - } - r, err := strconv.ParseUint(string(s[2:6]), 16, 64) - if err != nil { - return -1 - } - return rune(r) -} - -// unquote converts a quoted JSON string literal s into an actual string t. -// The rules are different than for Go, so cannot use strconv.Unquote. -func unquote(s []byte) (t string, ok bool) { - s, ok = unquoteBytes(s) - t = string(s) - return -} - -func unquoteBytes(s []byte) (t []byte, ok bool) { - if len(s) < 2 || s[0] != '"' || s[len(s)-1] != '"' { - return - } - s = s[1 : len(s)-1] - - // Check for unusual characters. If there are none, - // then no unquoting is needed, so return a slice of the - // original bytes. - r := 0 - for r < len(s) { - c := s[r] - if c == '\\' || c == '"' || c < ' ' { - break - } - if c < utf8.RuneSelf { - r++ - continue - } - rr, size := utf8.DecodeRune(s[r:]) - if rr == utf8.RuneError && size == 1 { - break - } - r += size - } - if r == len(s) { - return s, true - } - - b := make([]byte, len(s)+2*utf8.UTFMax) - w := copy(b, s[0:r]) - for r < len(s) { - // Out of room? Can only happen if s is full of - // malformed UTF-8 and we're replacing each - // byte with RuneError. - if w >= len(b)-2*utf8.UTFMax { - nb := make([]byte, (len(b)+utf8.UTFMax)*2) - copy(nb, b[0:w]) - b = nb - } - switch c := s[r]; { - case c == '\\': - r++ - if r >= len(s) { - return - } - switch s[r] { - default: - return - case '"', '\\', '/', '\'': - b[w] = s[r] - r++ - w++ - case 'b': - b[w] = '\b' - r++ - w++ - case 'f': - b[w] = '\f' - r++ - w++ - case 'n': - b[w] = '\n' - r++ - w++ - case 'r': - b[w] = '\r' - r++ - w++ - case 't': - b[w] = '\t' - r++ - w++ - case 'u': - r-- - rr := getu4(s[r:]) - if rr < 0 { - return - } - r += 6 - if utf16.IsSurrogate(rr) { - rr1 := getu4(s[r:]) - if dec := utf16.DecodeRune(rr, rr1); dec != unicode.ReplacementChar { - // A valid pair; consume. - r += 6 - w += utf8.EncodeRune(b[w:], dec) - break - } - // Invalid surrogate; fall back to replacement rune. - rr = unicode.ReplacementChar - } - w += utf8.EncodeRune(b[w:], rr) - } - - // Quote, control characters are invalid. - case c == '"', c < ' ': - return - - // ASCII - case c < utf8.RuneSelf: - b[w] = c - r++ - w++ - - // Coerce to well-formed UTF-8. - default: - rr, size := utf8.DecodeRune(s[r:]) - r += size - w += utf8.EncodeRune(b[w:], rr) - } - } - return b[0:w], true -} diff --git a/vendor/src/github.com/docker/go/canonical/json/encode.go b/vendor/src/github.com/docker/go/canonical/json/encode.go deleted file mode 100644 index 0fab020e2c..0000000000 --- a/vendor/src/github.com/docker/go/canonical/json/encode.go +++ /dev/null @@ -1,1245 +0,0 @@ -// Copyright 2010 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package json implements encoding and decoding of JSON objects as defined in -// RFC 4627. The mapping between JSON objects and Go values is described -// in the documentation for the Marshal and Unmarshal functions. -// -// See "JSON and Go" for an introduction to this package: -// https://golang.org/doc/articles/json_and_go.html -package json - -import ( - "bytes" - "encoding" - "encoding/base64" - "math" - "reflect" - "runtime" - "sort" - "strconv" - "strings" - "sync" - "unicode" - "unicode/utf8" -) - -// Marshal returns the JSON encoding of v. -// -// Marshal traverses the value v recursively. -// If an encountered value implements the Marshaler interface -// and is not a nil pointer, Marshal calls its MarshalJSON method -// to produce JSON. The nil pointer exception is not strictly necessary -// but mimics a similar, necessary exception in the behavior of -// UnmarshalJSON. -// -// Otherwise, Marshal uses the following type-dependent default encodings: -// -// Boolean values encode as JSON booleans. -// -// Floating point, integer, and Number values encode as JSON numbers. -// -// String values encode as JSON strings coerced to valid UTF-8, -// replacing invalid bytes with the Unicode replacement rune. -// The angle brackets "<" and ">" are escaped to "\u003c" and "\u003e" -// to keep some browsers from misinterpreting JSON output as HTML. -// Ampersand "&" is also escaped to "\u0026" for the same reason. -// -// Array and slice values encode as JSON arrays, except that -// []byte encodes as a base64-encoded string, and a nil slice -// encodes as the null JSON object. -// -// Struct values encode as JSON objects. Each exported struct field -// becomes a member of the object unless -// - the field's tag is "-", or -// - the field is empty and its tag specifies the "omitempty" option. -// The empty values are false, 0, any -// nil pointer or interface value, and any array, slice, map, or string of -// length zero. The object's default key string is the struct field name -// but can be specified in the struct field's tag value. The "json" key in -// the struct field's tag value is the key name, followed by an optional comma -// and options. Examples: -// -// // Field is ignored by this package. -// Field int `json:"-"` -// -// // Field appears in JSON as key "myName". -// Field int `json:"myName"` -// -// // Field appears in JSON as key "myName" and -// // the field is omitted from the object if its value is empty, -// // as defined above. -// Field int `json:"myName,omitempty"` -// -// // Field appears in JSON as key "Field" (the default), but -// // the field is skipped if empty. -// // Note the leading comma. -// Field int `json:",omitempty"` -// -// The "string" option signals that a field is stored as JSON inside a -// JSON-encoded string. It applies only to fields of string, floating point, -// integer, or boolean types. This extra level of encoding is sometimes used -// when communicating with JavaScript programs: -// -// Int64String int64 `json:",string"` -// -// The key name will be used if it's a non-empty string consisting of -// only Unicode letters, digits, dollar signs, percent signs, hyphens, -// underscores and slashes. -// -// Anonymous struct fields are usually marshaled as if their inner exported fields -// were fields in the outer struct, subject to the usual Go visibility rules amended -// as described in the next paragraph. -// An anonymous struct field with a name given in its JSON tag is treated as -// having that name, rather than being anonymous. -// An anonymous struct field of interface type is treated the same as having -// that type as its name, rather than being anonymous. -// -// The Go visibility rules for struct fields are amended for JSON when -// deciding which field to marshal or unmarshal. If there are -// multiple fields at the same level, and that level is the least -// nested (and would therefore be the nesting level selected by the -// usual Go rules), the following extra rules apply: -// -// 1) Of those fields, if any are JSON-tagged, only tagged fields are considered, -// even if there are multiple untagged fields that would otherwise conflict. -// 2) If there is exactly one field (tagged or not according to the first rule), that is selected. -// 3) Otherwise there are multiple fields, and all are ignored; no error occurs. -// -// Handling of anonymous struct fields is new in Go 1.1. -// Prior to Go 1.1, anonymous struct fields were ignored. To force ignoring of -// an anonymous struct field in both current and earlier versions, give the field -// a JSON tag of "-". -// -// Map values encode as JSON objects. -// The map's key type must be string; the map keys are used as JSON object -// keys, subject to the UTF-8 coercion described for string values above. -// -// Pointer values encode as the value pointed to. -// A nil pointer encodes as the null JSON object. -// -// Interface values encode as the value contained in the interface. -// A nil interface value encodes as the null JSON object. -// -// Channel, complex, and function values cannot be encoded in JSON. -// Attempting to encode such a value causes Marshal to return -// an UnsupportedTypeError. -// -// JSON cannot represent cyclic data structures and Marshal does not -// handle them. Passing cyclic structures to Marshal will result in -// an infinite recursion. -// -func Marshal(v interface{}) ([]byte, error) { - return marshal(v, false) -} - -// MarshalIndent is like Marshal but applies Indent to format the output. -func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { - b, err := Marshal(v) - if err != nil { - return nil, err - } - var buf bytes.Buffer - err = Indent(&buf, b, prefix, indent) - if err != nil { - return nil, err - } - return buf.Bytes(), nil -} - -// MarshalCanonical is like Marshal but encodes into Canonical JSON. -// Read more at: http://wiki.laptop.org/go/Canonical_JSON -func MarshalCanonical(v interface{}) ([]byte, error) { - return marshal(v, true) -} - -func marshal(v interface{}, canonical bool) ([]byte, error) { - e := &encodeState{canonical: canonical} - err := e.marshal(v) - if err != nil { - return nil, err - } - return e.Bytes(), nil -} - -// HTMLEscape appends to dst the JSON-encoded src with <, >, &, U+2028 and U+2029 -// characters inside string literals changed to \u003c, \u003e, \u0026, \u2028, \u2029 -// so that the JSON will be safe to embed inside HTML